##// END OF EJS Templates
py3: convert to next() function...
timeless -
r29216:ead25aa2 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,516 +1,516 b''
1 # synthrepo.py - repo synthesis
1 # synthrepo.py - repo synthesis
2 #
2 #
3 # Copyright 2012 Facebook
3 # Copyright 2012 Facebook
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''synthesize structurally interesting change history
8 '''synthesize structurally interesting change history
9
9
10 This extension is useful for creating a repository with properties
10 This extension is useful for creating a repository with properties
11 that are statistically similar to an existing repository. During
11 that are statistically similar to an existing repository. During
12 analysis, a simple probability table is constructed from the history
12 analysis, a simple probability table is constructed from the history
13 of an existing repository. During synthesis, these properties are
13 of an existing repository. During synthesis, these properties are
14 reconstructed.
14 reconstructed.
15
15
16 Properties that are analyzed and synthesized include the following:
16 Properties that are analyzed and synthesized include the following:
17
17
18 - Lines added or removed when an existing file is modified
18 - Lines added or removed when an existing file is modified
19 - Number and sizes of files added
19 - Number and sizes of files added
20 - Number of files removed
20 - Number of files removed
21 - Line lengths
21 - Line lengths
22 - Topological distance to parent changeset(s)
22 - Topological distance to parent changeset(s)
23 - Probability of a commit being a merge
23 - Probability of a commit being a merge
24 - Probability of a newly added file being added to a new directory
24 - Probability of a newly added file being added to a new directory
25 - Interarrival time, and time zone, of commits
25 - Interarrival time, and time zone, of commits
26 - Number of files in each directory
26 - Number of files in each directory
27
27
28 A few obvious properties that are not currently handled realistically:
28 A few obvious properties that are not currently handled realistically:
29
29
30 - Merges are treated as regular commits with two parents, which is not
30 - Merges are treated as regular commits with two parents, which is not
31 realistic
31 realistic
32 - Modifications are not treated as operations on hunks of lines, but
32 - Modifications are not treated as operations on hunks of lines, but
33 as insertions and deletions of randomly chosen single lines
33 as insertions and deletions of randomly chosen single lines
34 - Committer ID (always random)
34 - Committer ID (always random)
35 - Executability of files
35 - Executability of files
36 - Symlinks and binary files are ignored
36 - Symlinks and binary files are ignored
37 '''
37 '''
38
38
39 from __future__ import absolute_import
39 from __future__ import absolute_import
40 import bisect
40 import bisect
41 import collections
41 import collections
42 import itertools
42 import itertools
43 import json
43 import json
44 import os
44 import os
45 import random
45 import random
46 import sys
46 import sys
47 import time
47 import time
48
48
49 from mercurial.i18n import _
49 from mercurial.i18n import _
50 from mercurial.node import (
50 from mercurial.node import (
51 nullid,
51 nullid,
52 nullrev,
52 nullrev,
53 short,
53 short,
54 )
54 )
55 from mercurial import (
55 from mercurial import (
56 cmdutil,
56 cmdutil,
57 context,
57 context,
58 error,
58 error,
59 hg,
59 hg,
60 patch,
60 patch,
61 scmutil,
61 scmutil,
62 util,
62 util,
63 )
63 )
64
64
65 # Note for extension authors: ONLY specify testedwith = 'internal' for
65 # Note for extension authors: ONLY specify testedwith = 'internal' for
66 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
66 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
67 # be specifying the version(s) of Mercurial they are tested with, or
67 # be specifying the version(s) of Mercurial they are tested with, or
68 # leave the attribute unspecified.
68 # leave the attribute unspecified.
69 testedwith = 'internal'
69 testedwith = 'internal'
70
70
71 cmdtable = {}
71 cmdtable = {}
72 command = cmdutil.command(cmdtable)
72 command = cmdutil.command(cmdtable)
73
73
74 newfile = set(('new fi', 'rename', 'copy f', 'copy t'))
74 newfile = set(('new fi', 'rename', 'copy f', 'copy t'))
75
75
76 def zerodict():
76 def zerodict():
77 return collections.defaultdict(lambda: 0)
77 return collections.defaultdict(lambda: 0)
78
78
79 def roundto(x, k):
79 def roundto(x, k):
80 if x > k * 2:
80 if x > k * 2:
81 return int(round(x / float(k)) * k)
81 return int(round(x / float(k)) * k)
82 return int(round(x))
82 return int(round(x))
83
83
84 def parsegitdiff(lines):
84 def parsegitdiff(lines):
85 filename, mar, lineadd, lineremove = None, None, zerodict(), 0
85 filename, mar, lineadd, lineremove = None, None, zerodict(), 0
86 binary = False
86 binary = False
87 for line in lines:
87 for line in lines:
88 start = line[:6]
88 start = line[:6]
89 if start == 'diff -':
89 if start == 'diff -':
90 if filename:
90 if filename:
91 yield filename, mar, lineadd, lineremove, binary
91 yield filename, mar, lineadd, lineremove, binary
92 mar, lineadd, lineremove, binary = 'm', zerodict(), 0, False
92 mar, lineadd, lineremove, binary = 'm', zerodict(), 0, False
93 filename = patch.gitre.match(line).group(1)
93 filename = patch.gitre.match(line).group(1)
94 elif start in newfile:
94 elif start in newfile:
95 mar = 'a'
95 mar = 'a'
96 elif start == 'GIT bi':
96 elif start == 'GIT bi':
97 binary = True
97 binary = True
98 elif start == 'delete':
98 elif start == 'delete':
99 mar = 'r'
99 mar = 'r'
100 elif start:
100 elif start:
101 s = start[0]
101 s = start[0]
102 if s == '-' and not line.startswith('--- '):
102 if s == '-' and not line.startswith('--- '):
103 lineremove += 1
103 lineremove += 1
104 elif s == '+' and not line.startswith('+++ '):
104 elif s == '+' and not line.startswith('+++ '):
105 lineadd[roundto(len(line) - 1, 5)] += 1
105 lineadd[roundto(len(line) - 1, 5)] += 1
106 if filename:
106 if filename:
107 yield filename, mar, lineadd, lineremove, binary
107 yield filename, mar, lineadd, lineremove, binary
108
108
109 @command('analyze',
109 @command('analyze',
110 [('o', 'output', '', _('write output to given file'), _('FILE')),
110 [('o', 'output', '', _('write output to given file'), _('FILE')),
111 ('r', 'rev', [], _('analyze specified revisions'), _('REV'))],
111 ('r', 'rev', [], _('analyze specified revisions'), _('REV'))],
112 _('hg analyze'), optionalrepo=True)
112 _('hg analyze'), optionalrepo=True)
113 def analyze(ui, repo, *revs, **opts):
113 def analyze(ui, repo, *revs, **opts):
114 '''create a simple model of a repository to use for later synthesis
114 '''create a simple model of a repository to use for later synthesis
115
115
116 This command examines every changeset in the given range (or all
116 This command examines every changeset in the given range (or all
117 of history if none are specified) and creates a simple statistical
117 of history if none are specified) and creates a simple statistical
118 model of the history of the repository. It also measures the directory
118 model of the history of the repository. It also measures the directory
119 structure of the repository as checked out.
119 structure of the repository as checked out.
120
120
121 The model is written out to a JSON file, and can be used by
121 The model is written out to a JSON file, and can be used by
122 :hg:`synthesize` to create or augment a repository with synthetic
122 :hg:`synthesize` to create or augment a repository with synthetic
123 commits that have a structure that is statistically similar to the
123 commits that have a structure that is statistically similar to the
124 analyzed repository.
124 analyzed repository.
125 '''
125 '''
126 root = repo.root
126 root = repo.root
127 if not root.endswith(os.path.sep):
127 if not root.endswith(os.path.sep):
128 root += os.path.sep
128 root += os.path.sep
129
129
130 revs = list(revs)
130 revs = list(revs)
131 revs.extend(opts['rev'])
131 revs.extend(opts['rev'])
132 if not revs:
132 if not revs:
133 revs = [':']
133 revs = [':']
134
134
135 output = opts['output']
135 output = opts['output']
136 if not output:
136 if not output:
137 output = os.path.basename(root) + '.json'
137 output = os.path.basename(root) + '.json'
138
138
139 if output == '-':
139 if output == '-':
140 fp = sys.stdout
140 fp = sys.stdout
141 else:
141 else:
142 fp = open(output, 'w')
142 fp = open(output, 'w')
143
143
144 # Always obtain file counts of each directory in the given root directory.
144 # Always obtain file counts of each directory in the given root directory.
145 def onerror(e):
145 def onerror(e):
146 ui.warn(_('error walking directory structure: %s\n') % e)
146 ui.warn(_('error walking directory structure: %s\n') % e)
147
147
148 dirs = {}
148 dirs = {}
149 rootprefixlen = len(root)
149 rootprefixlen = len(root)
150 for dirpath, dirnames, filenames in os.walk(root, onerror=onerror):
150 for dirpath, dirnames, filenames in os.walk(root, onerror=onerror):
151 dirpathfromroot = dirpath[rootprefixlen:]
151 dirpathfromroot = dirpath[rootprefixlen:]
152 dirs[dirpathfromroot] = len(filenames)
152 dirs[dirpathfromroot] = len(filenames)
153 if '.hg' in dirnames:
153 if '.hg' in dirnames:
154 dirnames.remove('.hg')
154 dirnames.remove('.hg')
155
155
156 lineschanged = zerodict()
156 lineschanged = zerodict()
157 children = zerodict()
157 children = zerodict()
158 p1distance = zerodict()
158 p1distance = zerodict()
159 p2distance = zerodict()
159 p2distance = zerodict()
160 linesinfilesadded = zerodict()
160 linesinfilesadded = zerodict()
161 fileschanged = zerodict()
161 fileschanged = zerodict()
162 filesadded = zerodict()
162 filesadded = zerodict()
163 filesremoved = zerodict()
163 filesremoved = zerodict()
164 linelengths = zerodict()
164 linelengths = zerodict()
165 interarrival = zerodict()
165 interarrival = zerodict()
166 parents = zerodict()
166 parents = zerodict()
167 dirsadded = zerodict()
167 dirsadded = zerodict()
168 tzoffset = zerodict()
168 tzoffset = zerodict()
169
169
170 # If a mercurial repo is available, also model the commit history.
170 # If a mercurial repo is available, also model the commit history.
171 if repo:
171 if repo:
172 revs = scmutil.revrange(repo, revs)
172 revs = scmutil.revrange(repo, revs)
173 revs.sort()
173 revs.sort()
174
174
175 progress = ui.progress
175 progress = ui.progress
176 _analyzing = _('analyzing')
176 _analyzing = _('analyzing')
177 _changesets = _('changesets')
177 _changesets = _('changesets')
178 _total = len(revs)
178 _total = len(revs)
179
179
180 for i, rev in enumerate(revs):
180 for i, rev in enumerate(revs):
181 progress(_analyzing, i, unit=_changesets, total=_total)
181 progress(_analyzing, i, unit=_changesets, total=_total)
182 ctx = repo[rev]
182 ctx = repo[rev]
183 pl = ctx.parents()
183 pl = ctx.parents()
184 pctx = pl[0]
184 pctx = pl[0]
185 prev = pctx.rev()
185 prev = pctx.rev()
186 children[prev] += 1
186 children[prev] += 1
187 p1distance[rev - prev] += 1
187 p1distance[rev - prev] += 1
188 parents[len(pl)] += 1
188 parents[len(pl)] += 1
189 tzoffset[ctx.date()[1]] += 1
189 tzoffset[ctx.date()[1]] += 1
190 if len(pl) > 1:
190 if len(pl) > 1:
191 p2distance[rev - pl[1].rev()] += 1
191 p2distance[rev - pl[1].rev()] += 1
192 if prev == rev - 1:
192 if prev == rev - 1:
193 lastctx = pctx
193 lastctx = pctx
194 else:
194 else:
195 lastctx = repo[rev - 1]
195 lastctx = repo[rev - 1]
196 if lastctx.rev() != nullrev:
196 if lastctx.rev() != nullrev:
197 timedelta = ctx.date()[0] - lastctx.date()[0]
197 timedelta = ctx.date()[0] - lastctx.date()[0]
198 interarrival[roundto(timedelta, 300)] += 1
198 interarrival[roundto(timedelta, 300)] += 1
199 diff = sum((d.splitlines() for d in ctx.diff(pctx, git=True)), [])
199 diff = sum((d.splitlines() for d in ctx.diff(pctx, git=True)), [])
200 fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0
200 fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0
201 for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff):
201 for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff):
202 if isbin:
202 if isbin:
203 continue
203 continue
204 added = sum(lineadd.itervalues(), 0)
204 added = sum(lineadd.itervalues(), 0)
205 if mar == 'm':
205 if mar == 'm':
206 if added and lineremove:
206 if added and lineremove:
207 lineschanged[roundto(added, 5),
207 lineschanged[roundto(added, 5),
208 roundto(lineremove, 5)] += 1
208 roundto(lineremove, 5)] += 1
209 filechanges += 1
209 filechanges += 1
210 elif mar == 'a':
210 elif mar == 'a':
211 fileadds += 1
211 fileadds += 1
212 if '/' in filename:
212 if '/' in filename:
213 filedir = filename.rsplit('/', 1)[0]
213 filedir = filename.rsplit('/', 1)[0]
214 if filedir not in pctx.dirs():
214 if filedir not in pctx.dirs():
215 diradds += 1
215 diradds += 1
216 linesinfilesadded[roundto(added, 5)] += 1
216 linesinfilesadded[roundto(added, 5)] += 1
217 elif mar == 'r':
217 elif mar == 'r':
218 fileremoves += 1
218 fileremoves += 1
219 for length, count in lineadd.iteritems():
219 for length, count in lineadd.iteritems():
220 linelengths[length] += count
220 linelengths[length] += count
221 fileschanged[filechanges] += 1
221 fileschanged[filechanges] += 1
222 filesadded[fileadds] += 1
222 filesadded[fileadds] += 1
223 dirsadded[diradds] += 1
223 dirsadded[diradds] += 1
224 filesremoved[fileremoves] += 1
224 filesremoved[fileremoves] += 1
225
225
226 invchildren = zerodict()
226 invchildren = zerodict()
227
227
228 for rev, count in children.iteritems():
228 for rev, count in children.iteritems():
229 invchildren[count] += 1
229 invchildren[count] += 1
230
230
231 if output != '-':
231 if output != '-':
232 ui.status(_('writing output to %s\n') % output)
232 ui.status(_('writing output to %s\n') % output)
233
233
234 def pronk(d):
234 def pronk(d):
235 return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)
235 return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)
236
236
237 json.dump({'revs': len(revs),
237 json.dump({'revs': len(revs),
238 'initdirs': pronk(dirs),
238 'initdirs': pronk(dirs),
239 'lineschanged': pronk(lineschanged),
239 'lineschanged': pronk(lineschanged),
240 'children': pronk(invchildren),
240 'children': pronk(invchildren),
241 'fileschanged': pronk(fileschanged),
241 'fileschanged': pronk(fileschanged),
242 'filesadded': pronk(filesadded),
242 'filesadded': pronk(filesadded),
243 'linesinfilesadded': pronk(linesinfilesadded),
243 'linesinfilesadded': pronk(linesinfilesadded),
244 'dirsadded': pronk(dirsadded),
244 'dirsadded': pronk(dirsadded),
245 'filesremoved': pronk(filesremoved),
245 'filesremoved': pronk(filesremoved),
246 'linelengths': pronk(linelengths),
246 'linelengths': pronk(linelengths),
247 'parents': pronk(parents),
247 'parents': pronk(parents),
248 'p1distance': pronk(p1distance),
248 'p1distance': pronk(p1distance),
249 'p2distance': pronk(p2distance),
249 'p2distance': pronk(p2distance),
250 'interarrival': pronk(interarrival),
250 'interarrival': pronk(interarrival),
251 'tzoffset': pronk(tzoffset),
251 'tzoffset': pronk(tzoffset),
252 },
252 },
253 fp)
253 fp)
254 fp.close()
254 fp.close()
255
255
256 @command('synthesize',
256 @command('synthesize',
257 [('c', 'count', 0, _('create given number of commits'), _('COUNT')),
257 [('c', 'count', 0, _('create given number of commits'), _('COUNT')),
258 ('', 'dict', '', _('path to a dictionary of words'), _('FILE')),
258 ('', 'dict', '', _('path to a dictionary of words'), _('FILE')),
259 ('', 'initfiles', 0, _('initial file count to create'), _('COUNT'))],
259 ('', 'initfiles', 0, _('initial file count to create'), _('COUNT'))],
260 _('hg synthesize [OPTION].. DESCFILE'))
260 _('hg synthesize [OPTION].. DESCFILE'))
261 def synthesize(ui, repo, descpath, **opts):
261 def synthesize(ui, repo, descpath, **opts):
262 '''synthesize commits based on a model of an existing repository
262 '''synthesize commits based on a model of an existing repository
263
263
264 The model must have been generated by :hg:`analyze`. Commits will
264 The model must have been generated by :hg:`analyze`. Commits will
265 be generated randomly according to the probabilities described in
265 be generated randomly according to the probabilities described in
266 the model. If --initfiles is set, the repository will be seeded with
266 the model. If --initfiles is set, the repository will be seeded with
267 the given number files following the modeled repository's directory
267 the given number files following the modeled repository's directory
268 structure.
268 structure.
269
269
270 When synthesizing new content, commit descriptions, and user
270 When synthesizing new content, commit descriptions, and user
271 names, words will be chosen randomly from a dictionary that is
271 names, words will be chosen randomly from a dictionary that is
272 presumed to contain one word per line. Use --dict to specify the
272 presumed to contain one word per line. Use --dict to specify the
273 path to an alternate dictionary to use.
273 path to an alternate dictionary to use.
274 '''
274 '''
275 try:
275 try:
276 fp = hg.openpath(ui, descpath)
276 fp = hg.openpath(ui, descpath)
277 except Exception as err:
277 except Exception as err:
278 raise error.Abort('%s: %s' % (descpath, err[0].strerror))
278 raise error.Abort('%s: %s' % (descpath, err[0].strerror))
279 desc = json.load(fp)
279 desc = json.load(fp)
280 fp.close()
280 fp.close()
281
281
282 def cdf(l):
282 def cdf(l):
283 if not l:
283 if not l:
284 return [], []
284 return [], []
285 vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True))
285 vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True))
286 t = float(sum(probs, 0))
286 t = float(sum(probs, 0))
287 s, cdfs = 0, []
287 s, cdfs = 0, []
288 for v in probs:
288 for v in probs:
289 s += v
289 s += v
290 cdfs.append(s / t)
290 cdfs.append(s / t)
291 return vals, cdfs
291 return vals, cdfs
292
292
293 lineschanged = cdf(desc['lineschanged'])
293 lineschanged = cdf(desc['lineschanged'])
294 fileschanged = cdf(desc['fileschanged'])
294 fileschanged = cdf(desc['fileschanged'])
295 filesadded = cdf(desc['filesadded'])
295 filesadded = cdf(desc['filesadded'])
296 dirsadded = cdf(desc['dirsadded'])
296 dirsadded = cdf(desc['dirsadded'])
297 filesremoved = cdf(desc['filesremoved'])
297 filesremoved = cdf(desc['filesremoved'])
298 linelengths = cdf(desc['linelengths'])
298 linelengths = cdf(desc['linelengths'])
299 parents = cdf(desc['parents'])
299 parents = cdf(desc['parents'])
300 p1distance = cdf(desc['p1distance'])
300 p1distance = cdf(desc['p1distance'])
301 p2distance = cdf(desc['p2distance'])
301 p2distance = cdf(desc['p2distance'])
302 interarrival = cdf(desc['interarrival'])
302 interarrival = cdf(desc['interarrival'])
303 linesinfilesadded = cdf(desc['linesinfilesadded'])
303 linesinfilesadded = cdf(desc['linesinfilesadded'])
304 tzoffset = cdf(desc['tzoffset'])
304 tzoffset = cdf(desc['tzoffset'])
305
305
306 dictfile = opts.get('dict') or '/usr/share/dict/words'
306 dictfile = opts.get('dict') or '/usr/share/dict/words'
307 try:
307 try:
308 fp = open(dictfile, 'rU')
308 fp = open(dictfile, 'rU')
309 except IOError as err:
309 except IOError as err:
310 raise error.Abort('%s: %s' % (dictfile, err.strerror))
310 raise error.Abort('%s: %s' % (dictfile, err.strerror))
311 words = fp.read().splitlines()
311 words = fp.read().splitlines()
312 fp.close()
312 fp.close()
313
313
314 initdirs = {}
314 initdirs = {}
315 if desc['initdirs']:
315 if desc['initdirs']:
316 for k, v in desc['initdirs']:
316 for k, v in desc['initdirs']:
317 initdirs[k.encode('utf-8').replace('.hg', '_hg')] = v
317 initdirs[k.encode('utf-8').replace('.hg', '_hg')] = v
318 initdirs = renamedirs(initdirs, words)
318 initdirs = renamedirs(initdirs, words)
319 initdirscdf = cdf(initdirs)
319 initdirscdf = cdf(initdirs)
320
320
321 def pick(cdf):
321 def pick(cdf):
322 return cdf[0][bisect.bisect_left(cdf[1], random.random())]
322 return cdf[0][bisect.bisect_left(cdf[1], random.random())]
323
323
324 def pickpath():
324 def pickpath():
325 return os.path.join(pick(initdirscdf), random.choice(words))
325 return os.path.join(pick(initdirscdf), random.choice(words))
326
326
327 def makeline(minimum=0):
327 def makeline(minimum=0):
328 total = max(minimum, pick(linelengths))
328 total = max(minimum, pick(linelengths))
329 c, l = 0, []
329 c, l = 0, []
330 while c < total:
330 while c < total:
331 w = random.choice(words)
331 w = random.choice(words)
332 c += len(w) + 1
332 c += len(w) + 1
333 l.append(w)
333 l.append(w)
334 return ' '.join(l)
334 return ' '.join(l)
335
335
336 wlock = repo.wlock()
336 wlock = repo.wlock()
337 lock = repo.lock()
337 lock = repo.lock()
338
338
339 nevertouch = set(('.hgsub', '.hgignore', '.hgtags'))
339 nevertouch = set(('.hgsub', '.hgignore', '.hgtags'))
340
340
341 progress = ui.progress
341 progress = ui.progress
342 _synthesizing = _('synthesizing')
342 _synthesizing = _('synthesizing')
343 _files = _('initial files')
343 _files = _('initial files')
344 _changesets = _('changesets')
344 _changesets = _('changesets')
345
345
346 # Synthesize a single initial revision adding files to the repo according
346 # Synthesize a single initial revision adding files to the repo according
347 # to the modeled directory structure.
347 # to the modeled directory structure.
348 initcount = int(opts['initfiles'])
348 initcount = int(opts['initfiles'])
349 if initcount and initdirs:
349 if initcount and initdirs:
350 pctx = repo[None].parents()[0]
350 pctx = repo[None].parents()[0]
351 dirs = set(pctx.dirs())
351 dirs = set(pctx.dirs())
352 files = {}
352 files = {}
353
353
354 def validpath(path):
354 def validpath(path):
355 # Don't pick filenames which are already directory names.
355 # Don't pick filenames which are already directory names.
356 if path in dirs:
356 if path in dirs:
357 return False
357 return False
358 # Don't pick directories which were used as file names.
358 # Don't pick directories which were used as file names.
359 while path:
359 while path:
360 if path in files:
360 if path in files:
361 return False
361 return False
362 path = os.path.dirname(path)
362 path = os.path.dirname(path)
363 return True
363 return True
364
364
365 for i in xrange(0, initcount):
365 for i in xrange(0, initcount):
366 ui.progress(_synthesizing, i, unit=_files, total=initcount)
366 ui.progress(_synthesizing, i, unit=_files, total=initcount)
367
367
368 path = pickpath()
368 path = pickpath()
369 while not validpath(path):
369 while not validpath(path):
370 path = pickpath()
370 path = pickpath()
371 data = '%s contents\n' % path
371 data = '%s contents\n' % path
372 files[path] = context.memfilectx(repo, path, data)
372 files[path] = context.memfilectx(repo, path, data)
373 dir = os.path.dirname(path)
373 dir = os.path.dirname(path)
374 while dir and dir not in dirs:
374 while dir and dir not in dirs:
375 dirs.add(dir)
375 dirs.add(dir)
376 dir = os.path.dirname(dir)
376 dir = os.path.dirname(dir)
377
377
378 def filectxfn(repo, memctx, path):
378 def filectxfn(repo, memctx, path):
379 return files[path]
379 return files[path]
380
380
381 ui.progress(_synthesizing, None)
381 ui.progress(_synthesizing, None)
382 message = 'synthesized wide repo with %d files' % (len(files),)
382 message = 'synthesized wide repo with %d files' % (len(files),)
383 mc = context.memctx(repo, [pctx.node(), nullid], message,
383 mc = context.memctx(repo, [pctx.node(), nullid], message,
384 files.iterkeys(), filectxfn, ui.username(),
384 files.iterkeys(), filectxfn, ui.username(),
385 '%d %d' % util.makedate())
385 '%d %d' % util.makedate())
386 initnode = mc.commit()
386 initnode = mc.commit()
387 if ui.debugflag:
387 if ui.debugflag:
388 hexfn = hex
388 hexfn = hex
389 else:
389 else:
390 hexfn = short
390 hexfn = short
391 ui.status(_('added commit %s with %d files\n')
391 ui.status(_('added commit %s with %d files\n')
392 % (hexfn(initnode), len(files)))
392 % (hexfn(initnode), len(files)))
393
393
394 # Synthesize incremental revisions to the repository, adding repo depth.
394 # Synthesize incremental revisions to the repository, adding repo depth.
395 count = int(opts['count'])
395 count = int(opts['count'])
396 heads = set(map(repo.changelog.rev, repo.heads()))
396 heads = set(map(repo.changelog.rev, repo.heads()))
397 for i in xrange(count):
397 for i in xrange(count):
398 progress(_synthesizing, i, unit=_changesets, total=count)
398 progress(_synthesizing, i, unit=_changesets, total=count)
399
399
400 node = repo.changelog.node
400 node = repo.changelog.node
401 revs = len(repo)
401 revs = len(repo)
402
402
403 def pickhead(heads, distance):
403 def pickhead(heads, distance):
404 if heads:
404 if heads:
405 lheads = sorted(heads)
405 lheads = sorted(heads)
406 rev = revs - min(pick(distance), revs)
406 rev = revs - min(pick(distance), revs)
407 if rev < lheads[-1]:
407 if rev < lheads[-1]:
408 rev = lheads[bisect.bisect_left(lheads, rev)]
408 rev = lheads[bisect.bisect_left(lheads, rev)]
409 else:
409 else:
410 rev = lheads[-1]
410 rev = lheads[-1]
411 return rev, node(rev)
411 return rev, node(rev)
412 return nullrev, nullid
412 return nullrev, nullid
413
413
414 r1 = revs - min(pick(p1distance), revs)
414 r1 = revs - min(pick(p1distance), revs)
415 p1 = node(r1)
415 p1 = node(r1)
416
416
417 # the number of heads will grow without bound if we use a pure
417 # the number of heads will grow without bound if we use a pure
418 # model, so artificially constrain their proliferation
418 # model, so artificially constrain their proliferation
419 toomanyheads = len(heads) > random.randint(1, 20)
419 toomanyheads = len(heads) > random.randint(1, 20)
420 if p2distance[0] and (pick(parents) == 2 or toomanyheads):
420 if p2distance[0] and (pick(parents) == 2 or toomanyheads):
421 r2, p2 = pickhead(heads.difference([r1]), p2distance)
421 r2, p2 = pickhead(heads.difference([r1]), p2distance)
422 else:
422 else:
423 r2, p2 = nullrev, nullid
423 r2, p2 = nullrev, nullid
424
424
425 pl = [p1, p2]
425 pl = [p1, p2]
426 pctx = repo[r1]
426 pctx = repo[r1]
427 mf = pctx.manifest()
427 mf = pctx.manifest()
428 mfk = mf.keys()
428 mfk = mf.keys()
429 changes = {}
429 changes = {}
430 if mfk:
430 if mfk:
431 for __ in xrange(pick(fileschanged)):
431 for __ in xrange(pick(fileschanged)):
432 for __ in xrange(10):
432 for __ in xrange(10):
433 fctx = pctx.filectx(random.choice(mfk))
433 fctx = pctx.filectx(random.choice(mfk))
434 path = fctx.path()
434 path = fctx.path()
435 if not (path in nevertouch or fctx.isbinary() or
435 if not (path in nevertouch or fctx.isbinary() or
436 'l' in fctx.flags()):
436 'l' in fctx.flags()):
437 break
437 break
438 lines = fctx.data().splitlines()
438 lines = fctx.data().splitlines()
439 add, remove = pick(lineschanged)
439 add, remove = pick(lineschanged)
440 for __ in xrange(remove):
440 for __ in xrange(remove):
441 if not lines:
441 if not lines:
442 break
442 break
443 del lines[random.randrange(0, len(lines))]
443 del lines[random.randrange(0, len(lines))]
444 for __ in xrange(add):
444 for __ in xrange(add):
445 lines.insert(random.randint(0, len(lines)), makeline())
445 lines.insert(random.randint(0, len(lines)), makeline())
446 path = fctx.path()
446 path = fctx.path()
447 changes[path] = context.memfilectx(repo, path,
447 changes[path] = context.memfilectx(repo, path,
448 '\n'.join(lines) + '\n')
448 '\n'.join(lines) + '\n')
449 for __ in xrange(pick(filesremoved)):
449 for __ in xrange(pick(filesremoved)):
450 path = random.choice(mfk)
450 path = random.choice(mfk)
451 for __ in xrange(10):
451 for __ in xrange(10):
452 path = random.choice(mfk)
452 path = random.choice(mfk)
453 if path not in changes:
453 if path not in changes:
454 changes[path] = None
454 changes[path] = None
455 break
455 break
456 if filesadded:
456 if filesadded:
457 dirs = list(pctx.dirs())
457 dirs = list(pctx.dirs())
458 dirs.insert(0, '')
458 dirs.insert(0, '')
459 for __ in xrange(pick(filesadded)):
459 for __ in xrange(pick(filesadded)):
460 pathstr = ''
460 pathstr = ''
461 while pathstr in dirs:
461 while pathstr in dirs:
462 path = [random.choice(dirs)]
462 path = [random.choice(dirs)]
463 if pick(dirsadded):
463 if pick(dirsadded):
464 path.append(random.choice(words))
464 path.append(random.choice(words))
465 path.append(random.choice(words))
465 path.append(random.choice(words))
466 pathstr = '/'.join(filter(None, path))
466 pathstr = '/'.join(filter(None, path))
467 data = '\n'.join(makeline()
467 data = '\n'.join(makeline()
468 for __ in xrange(pick(linesinfilesadded))) + '\n'
468 for __ in xrange(pick(linesinfilesadded))) + '\n'
469 changes[pathstr] = context.memfilectx(repo, pathstr, data)
469 changes[pathstr] = context.memfilectx(repo, pathstr, data)
470 def filectxfn(repo, memctx, path):
470 def filectxfn(repo, memctx, path):
471 return changes[path]
471 return changes[path]
472 if not changes:
472 if not changes:
473 continue
473 continue
474 if revs:
474 if revs:
475 date = repo['tip'].date()[0] + pick(interarrival)
475 date = repo['tip'].date()[0] + pick(interarrival)
476 else:
476 else:
477 date = time.time() - (86400 * count)
477 date = time.time() - (86400 * count)
478 # dates in mercurial must be positive, fit in 32-bit signed integers.
478 # dates in mercurial must be positive, fit in 32-bit signed integers.
479 date = min(0x7fffffff, max(0, date))
479 date = min(0x7fffffff, max(0, date))
480 user = random.choice(words) + '@' + random.choice(words)
480 user = random.choice(words) + '@' + random.choice(words)
481 mc = context.memctx(repo, pl, makeline(minimum=2),
481 mc = context.memctx(repo, pl, makeline(minimum=2),
482 sorted(changes.iterkeys()),
482 sorted(changes.iterkeys()),
483 filectxfn, user, '%d %d' % (date, pick(tzoffset)))
483 filectxfn, user, '%d %d' % (date, pick(tzoffset)))
484 newnode = mc.commit()
484 newnode = mc.commit()
485 heads.add(repo.changelog.rev(newnode))
485 heads.add(repo.changelog.rev(newnode))
486 heads.discard(r1)
486 heads.discard(r1)
487 heads.discard(r2)
487 heads.discard(r2)
488
488
489 lock.release()
489 lock.release()
490 wlock.release()
490 wlock.release()
491
491
492 def renamedirs(dirs, words):
492 def renamedirs(dirs, words):
493 '''Randomly rename the directory names in the per-dir file count dict.'''
493 '''Randomly rename the directory names in the per-dir file count dict.'''
494 wordgen = itertools.cycle(words)
494 wordgen = itertools.cycle(words)
495 replacements = {'': ''}
495 replacements = {'': ''}
496 def rename(dirpath):
496 def rename(dirpath):
497 '''Recursively rename the directory and all path prefixes.
497 '''Recursively rename the directory and all path prefixes.
498
498
499 The mapping from path to renamed path is stored for all path prefixes
499 The mapping from path to renamed path is stored for all path prefixes
500 as in dynamic programming, ensuring linear runtime and consistent
500 as in dynamic programming, ensuring linear runtime and consistent
501 renaming regardless of iteration order through the model.
501 renaming regardless of iteration order through the model.
502 '''
502 '''
503 if dirpath in replacements:
503 if dirpath in replacements:
504 return replacements[dirpath]
504 return replacements[dirpath]
505 head, _ = os.path.split(dirpath)
505 head, _ = os.path.split(dirpath)
506 if head:
506 if head:
507 head = rename(head)
507 head = rename(head)
508 else:
508 else:
509 head = ''
509 head = ''
510 renamed = os.path.join(head, wordgen.next())
510 renamed = os.path.join(head, next(wordgen))
511 replacements[dirpath] = renamed
511 replacements[dirpath] = renamed
512 return renamed
512 return renamed
513 result = []
513 result = []
514 for dirpath, count in dirs.iteritems():
514 for dirpath, count in dirs.iteritems():
515 result.append([rename(dirpath.lstrip(os.sep)), count])
515 result.append([rename(dirpath.lstrip(os.sep)), count])
516 return result
516 return result
@@ -1,1115 +1,1115 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2 # $Id: manpage.py 6110 2009-08-31 14:40:33Z grubert $
2 # $Id: manpage.py 6110 2009-08-31 14:40:33Z grubert $
3 # Author: Engelbert Gruber <grubert@users.sourceforge.net>
3 # Author: Engelbert Gruber <grubert@users.sourceforge.net>
4 # Copyright: This module is put into the public domain.
4 # Copyright: This module is put into the public domain.
5
5
6 """
6 """
7 Simple man page writer for reStructuredText.
7 Simple man page writer for reStructuredText.
8
8
9 Man pages (short for "manual pages") contain system documentation on unix-like
9 Man pages (short for "manual pages") contain system documentation on unix-like
10 systems. The pages are grouped in numbered sections:
10 systems. The pages are grouped in numbered sections:
11
11
12 1 executable programs and shell commands
12 1 executable programs and shell commands
13 2 system calls
13 2 system calls
14 3 library functions
14 3 library functions
15 4 special files
15 4 special files
16 5 file formats
16 5 file formats
17 6 games
17 6 games
18 7 miscellaneous
18 7 miscellaneous
19 8 system administration
19 8 system administration
20
20
21 Man pages are written in *troff*, a text file formatting system.
21 Man pages are written in *troff*, a text file formatting system.
22
22
23 See http://www.tldp.org/HOWTO/Man-Page for a start.
23 See http://www.tldp.org/HOWTO/Man-Page for a start.
24
24
25 Man pages have no subsections only parts.
25 Man pages have no subsections only parts.
26 Standard parts
26 Standard parts
27
27
28 NAME ,
28 NAME ,
29 SYNOPSIS ,
29 SYNOPSIS ,
30 DESCRIPTION ,
30 DESCRIPTION ,
31 OPTIONS ,
31 OPTIONS ,
32 FILES ,
32 FILES ,
33 SEE ALSO ,
33 SEE ALSO ,
34 BUGS ,
34 BUGS ,
35
35
36 and
36 and
37
37
38 AUTHOR .
38 AUTHOR .
39
39
40 A unix-like system keeps an index of the DESCRIPTIONs, which is accesable
40 A unix-like system keeps an index of the DESCRIPTIONs, which is accesable
41 by the command whatis or apropos.
41 by the command whatis or apropos.
42
42
43 """
43 """
44 from __future__ import absolute_import
44 from __future__ import absolute_import
45
45
46 __docformat__ = 'reStructuredText'
46 __docformat__ = 'reStructuredText'
47
47
48 import inspect
48 import inspect
49 import re
49 import re
50
50
51 from docutils import (
51 from docutils import (
52 languages,
52 languages,
53 nodes,
53 nodes,
54 writers,
54 writers,
55 )
55 )
56 try:
56 try:
57 import roman
57 import roman
58 except ImportError:
58 except ImportError:
59 from docutils.utils import roman
59 from docutils.utils import roman
60 import inspect
60 import inspect
61
61
62 FIELD_LIST_INDENT = 7
62 FIELD_LIST_INDENT = 7
63 DEFINITION_LIST_INDENT = 7
63 DEFINITION_LIST_INDENT = 7
64 OPTION_LIST_INDENT = 7
64 OPTION_LIST_INDENT = 7
65 BLOCKQOUTE_INDENT = 3.5
65 BLOCKQOUTE_INDENT = 3.5
66
66
67 # Define two macros so man/roff can calculate the
67 # Define two macros so man/roff can calculate the
68 # indent/unindent margins by itself
68 # indent/unindent margins by itself
69 MACRO_DEF = (r""".
69 MACRO_DEF = (r""".
70 .nr rst2man-indent-level 0
70 .nr rst2man-indent-level 0
71 .
71 .
72 .de1 rstReportMargin
72 .de1 rstReportMargin
73 \\$1 \\n[an-margin]
73 \\$1 \\n[an-margin]
74 level \\n[rst2man-indent-level]
74 level \\n[rst2man-indent-level]
75 level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
75 level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
76 -
76 -
77 \\n[rst2man-indent0]
77 \\n[rst2man-indent0]
78 \\n[rst2man-indent1]
78 \\n[rst2man-indent1]
79 \\n[rst2man-indent2]
79 \\n[rst2man-indent2]
80 ..
80 ..
81 .de1 INDENT
81 .de1 INDENT
82 .\" .rstReportMargin pre:
82 .\" .rstReportMargin pre:
83 . RS \\$1
83 . RS \\$1
84 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
84 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
85 . nr rst2man-indent-level +1
85 . nr rst2man-indent-level +1
86 .\" .rstReportMargin post:
86 .\" .rstReportMargin post:
87 ..
87 ..
88 .de UNINDENT
88 .de UNINDENT
89 . RE
89 . RE
90 .\" indent \\n[an-margin]
90 .\" indent \\n[an-margin]
91 .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
91 .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
92 .nr rst2man-indent-level -1
92 .nr rst2man-indent-level -1
93 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
93 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
94 .in \\n[rst2man-indent\\n[rst2man-indent-level]]u
94 .in \\n[rst2man-indent\\n[rst2man-indent-level]]u
95 ..
95 ..
96 """)
96 """)
97
97
98 class Writer(writers.Writer):
98 class Writer(writers.Writer):
99
99
100 supported = ('manpage')
100 supported = ('manpage')
101 """Formats this writer supports."""
101 """Formats this writer supports."""
102
102
103 output = None
103 output = None
104 """Final translated form of `document`."""
104 """Final translated form of `document`."""
105
105
106 def __init__(self):
106 def __init__(self):
107 writers.Writer.__init__(self)
107 writers.Writer.__init__(self)
108 self.translator_class = Translator
108 self.translator_class = Translator
109
109
110 def translate(self):
110 def translate(self):
111 visitor = self.translator_class(self.document)
111 visitor = self.translator_class(self.document)
112 self.document.walkabout(visitor)
112 self.document.walkabout(visitor)
113 self.output = visitor.astext()
113 self.output = visitor.astext()
114
114
115
115
116 class Table(object):
116 class Table(object):
117 def __init__(self):
117 def __init__(self):
118 self._rows = []
118 self._rows = []
119 self._options = ['center']
119 self._options = ['center']
120 self._tab_char = '\t'
120 self._tab_char = '\t'
121 self._coldefs = []
121 self._coldefs = []
122 def new_row(self):
122 def new_row(self):
123 self._rows.append([])
123 self._rows.append([])
124 def append_separator(self, separator):
124 def append_separator(self, separator):
125 """Append the separator for table head."""
125 """Append the separator for table head."""
126 self._rows.append([separator])
126 self._rows.append([separator])
127 def append_cell(self, cell_lines):
127 def append_cell(self, cell_lines):
128 """cell_lines is an array of lines"""
128 """cell_lines is an array of lines"""
129 start = 0
129 start = 0
130 if len(cell_lines) > 0 and cell_lines[0] == '.sp\n':
130 if len(cell_lines) > 0 and cell_lines[0] == '.sp\n':
131 start = 1
131 start = 1
132 self._rows[-1].append(cell_lines[start:])
132 self._rows[-1].append(cell_lines[start:])
133 if len(self._coldefs) < len(self._rows[-1]):
133 if len(self._coldefs) < len(self._rows[-1]):
134 self._coldefs.append('l')
134 self._coldefs.append('l')
135 def _minimize_cell(self, cell_lines):
135 def _minimize_cell(self, cell_lines):
136 """Remove leading and trailing blank and ``.sp`` lines"""
136 """Remove leading and trailing blank and ``.sp`` lines"""
137 while (cell_lines and cell_lines[0] in ('\n', '.sp\n')):
137 while (cell_lines and cell_lines[0] in ('\n', '.sp\n')):
138 del cell_lines[0]
138 del cell_lines[0]
139 while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')):
139 while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')):
140 del cell_lines[-1]
140 del cell_lines[-1]
141 def as_list(self):
141 def as_list(self):
142 text = ['.TS\n']
142 text = ['.TS\n']
143 text.append(' '.join(self._options) + ';\n')
143 text.append(' '.join(self._options) + ';\n')
144 text.append('|%s|.\n' % ('|'.join(self._coldefs)))
144 text.append('|%s|.\n' % ('|'.join(self._coldefs)))
145 for row in self._rows:
145 for row in self._rows:
146 # row = array of cells. cell = array of lines.
146 # row = array of cells. cell = array of lines.
147 text.append('_\n') # line above
147 text.append('_\n') # line above
148 text.append('T{\n')
148 text.append('T{\n')
149 for i in range(len(row)):
149 for i in range(len(row)):
150 cell = row[i]
150 cell = row[i]
151 self._minimize_cell(cell)
151 self._minimize_cell(cell)
152 text.extend(cell)
152 text.extend(cell)
153 if not text[-1].endswith('\n'):
153 if not text[-1].endswith('\n'):
154 text[-1] += '\n'
154 text[-1] += '\n'
155 if i < len(row) - 1:
155 if i < len(row) - 1:
156 text.append('T}'+self._tab_char+'T{\n')
156 text.append('T}'+self._tab_char+'T{\n')
157 else:
157 else:
158 text.append('T}\n')
158 text.append('T}\n')
159 text.append('_\n')
159 text.append('_\n')
160 text.append('.TE\n')
160 text.append('.TE\n')
161 return text
161 return text
162
162
163 class Translator(nodes.NodeVisitor):
163 class Translator(nodes.NodeVisitor):
164 """"""
164 """"""
165
165
166 words_and_spaces = re.compile(r'\S+| +|\n')
166 words_and_spaces = re.compile(r'\S+| +|\n')
167 document_start = """Man page generated from reStructuredText."""
167 document_start = """Man page generated from reStructuredText."""
168
168
169 def __init__(self, document):
169 def __init__(self, document):
170 nodes.NodeVisitor.__init__(self, document)
170 nodes.NodeVisitor.__init__(self, document)
171 self.settings = settings = document.settings
171 self.settings = settings = document.settings
172 lcode = settings.language_code
172 lcode = settings.language_code
173 arglen = len(inspect.getargspec(languages.get_language)[0])
173 arglen = len(inspect.getargspec(languages.get_language)[0])
174 if arglen == 2:
174 if arglen == 2:
175 self.language = languages.get_language(lcode,
175 self.language = languages.get_language(lcode,
176 self.document.reporter)
176 self.document.reporter)
177 else:
177 else:
178 self.language = languages.get_language(lcode)
178 self.language = languages.get_language(lcode)
179 self.head = []
179 self.head = []
180 self.body = []
180 self.body = []
181 self.foot = []
181 self.foot = []
182 self.section_level = 0
182 self.section_level = 0
183 self.context = []
183 self.context = []
184 self.topic_class = ''
184 self.topic_class = ''
185 self.colspecs = []
185 self.colspecs = []
186 self.compact_p = 1
186 self.compact_p = 1
187 self.compact_simple = None
187 self.compact_simple = None
188 # the list style "*" bullet or "#" numbered
188 # the list style "*" bullet or "#" numbered
189 self._list_char = []
189 self._list_char = []
190 # writing the header .TH and .SH NAME is postboned after
190 # writing the header .TH and .SH NAME is postboned after
191 # docinfo.
191 # docinfo.
192 self._docinfo = {
192 self._docinfo = {
193 "title" : "", "title_upper": "",
193 "title" : "", "title_upper": "",
194 "subtitle" : "",
194 "subtitle" : "",
195 "manual_section" : "", "manual_group" : "",
195 "manual_section" : "", "manual_group" : "",
196 "author" : [],
196 "author" : [],
197 "date" : "",
197 "date" : "",
198 "copyright" : "",
198 "copyright" : "",
199 "version" : "",
199 "version" : "",
200 }
200 }
201 self._docinfo_keys = [] # a list to keep the sequence as in source.
201 self._docinfo_keys = [] # a list to keep the sequence as in source.
202 self._docinfo_names = {} # to get name from text not normalized.
202 self._docinfo_names = {} # to get name from text not normalized.
203 self._in_docinfo = None
203 self._in_docinfo = None
204 self._active_table = None
204 self._active_table = None
205 self._in_literal = False
205 self._in_literal = False
206 self.header_written = 0
206 self.header_written = 0
207 self._line_block = 0
207 self._line_block = 0
208 self.authors = []
208 self.authors = []
209 self.section_level = 0
209 self.section_level = 0
210 self._indent = [0]
210 self._indent = [0]
211 # central definition of simple processing rules
211 # central definition of simple processing rules
212 # what to output on : visit, depart
212 # what to output on : visit, depart
213 # Do not use paragraph requests ``.PP`` because these set indentation.
213 # Do not use paragraph requests ``.PP`` because these set indentation.
214 # use ``.sp``. Remove superfluous ``.sp`` in ``astext``.
214 # use ``.sp``. Remove superfluous ``.sp`` in ``astext``.
215 #
215 #
216 # Fonts are put on a stack, the top one is used.
216 # Fonts are put on a stack, the top one is used.
217 # ``.ft P`` or ``\\fP`` pop from stack.
217 # ``.ft P`` or ``\\fP`` pop from stack.
218 # ``B`` bold, ``I`` italic, ``R`` roman should be available.
218 # ``B`` bold, ``I`` italic, ``R`` roman should be available.
219 # Hopefully ``C`` courier too.
219 # Hopefully ``C`` courier too.
220 self.defs = {
220 self.defs = {
221 'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
221 'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
222 'definition_list_item' : ('.TP', ''),
222 'definition_list_item' : ('.TP', ''),
223 'field_name' : ('.TP\n.B ', '\n'),
223 'field_name' : ('.TP\n.B ', '\n'),
224 'literal' : ('\\fB', '\\fP'),
224 'literal' : ('\\fB', '\\fP'),
225 'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'),
225 'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'),
226
226
227 'option_list_item' : ('.TP\n', ''),
227 'option_list_item' : ('.TP\n', ''),
228
228
229 'reference' : (r'\%', r'\:'),
229 'reference' : (r'\%', r'\:'),
230 'emphasis': ('\\fI', '\\fP'),
230 'emphasis': ('\\fI', '\\fP'),
231 'strong' : ('\\fB', '\\fP'),
231 'strong' : ('\\fB', '\\fP'),
232 'term' : ('\n.B ', '\n'),
232 'term' : ('\n.B ', '\n'),
233 'title_reference' : ('\\fI', '\\fP'),
233 'title_reference' : ('\\fI', '\\fP'),
234
234
235 'topic-title' : ('.SS ',),
235 'topic-title' : ('.SS ',),
236 'sidebar-title' : ('.SS ',),
236 'sidebar-title' : ('.SS ',),
237
237
238 'problematic' : ('\n.nf\n', '\n.fi\n'),
238 'problematic' : ('\n.nf\n', '\n.fi\n'),
239 }
239 }
240 # NOTE don't specify the newline before a dot-command, but ensure
240 # NOTE don't specify the newline before a dot-command, but ensure
241 # it is there.
241 # it is there.
242
242
243 def comment_begin(self, text):
243 def comment_begin(self, text):
244 """Return commented version of the passed text WITHOUT end of
244 """Return commented version of the passed text WITHOUT end of
245 line/comment."""
245 line/comment."""
246 prefix = '.\\" '
246 prefix = '.\\" '
247 out_text = ''.join(
247 out_text = ''.join(
248 [(prefix + in_line + '\n')
248 [(prefix + in_line + '\n')
249 for in_line in text.split('\n')])
249 for in_line in text.split('\n')])
250 return out_text
250 return out_text
251
251
252 def comment(self, text):
252 def comment(self, text):
253 """Return commented version of the passed text."""
253 """Return commented version of the passed text."""
254 return self.comment_begin(text)+'.\n'
254 return self.comment_begin(text)+'.\n'
255
255
256 def ensure_eol(self):
256 def ensure_eol(self):
257 """Ensure the last line in body is terminated by new line."""
257 """Ensure the last line in body is terminated by new line."""
258 if self.body[-1][-1] != '\n':
258 if self.body[-1][-1] != '\n':
259 self.body.append('\n')
259 self.body.append('\n')
260
260
261 def astext(self):
261 def astext(self):
262 """Return the final formatted document as a string."""
262 """Return the final formatted document as a string."""
263 if not self.header_written:
263 if not self.header_written:
264 # ensure we get a ".TH" as viewers require it.
264 # ensure we get a ".TH" as viewers require it.
265 self.head.append(self.header())
265 self.head.append(self.header())
266 # filter body
266 # filter body
267 for i in xrange(len(self.body) - 1, 0, -1):
267 for i in xrange(len(self.body) - 1, 0, -1):
268 # remove superfluous vertical gaps.
268 # remove superfluous vertical gaps.
269 if self.body[i] == '.sp\n':
269 if self.body[i] == '.sp\n':
270 if self.body[i - 1][:4] in ('.BI ','.IP '):
270 if self.body[i - 1][:4] in ('.BI ','.IP '):
271 self.body[i] = '.\n'
271 self.body[i] = '.\n'
272 elif (self.body[i - 1][:3] == '.B ' and
272 elif (self.body[i - 1][:3] == '.B ' and
273 self.body[i - 2][:4] == '.TP\n'):
273 self.body[i - 2][:4] == '.TP\n'):
274 self.body[i] = '.\n'
274 self.body[i] = '.\n'
275 elif (self.body[i - 1] == '\n' and
275 elif (self.body[i - 1] == '\n' and
276 self.body[i - 2][0] != '.' and
276 self.body[i - 2][0] != '.' and
277 (self.body[i - 3][:7] == '.TP\n.B '
277 (self.body[i - 3][:7] == '.TP\n.B '
278 or self.body[i - 3][:4] == '\n.B ')
278 or self.body[i - 3][:4] == '\n.B ')
279 ):
279 ):
280 self.body[i] = '.\n'
280 self.body[i] = '.\n'
281 return ''.join(self.head + self.body + self.foot)
281 return ''.join(self.head + self.body + self.foot)
282
282
283 def deunicode(self, text):
283 def deunicode(self, text):
284 text = text.replace(u'\xa0', '\\ ')
284 text = text.replace(u'\xa0', '\\ ')
285 text = text.replace(u'\u2020', '\\(dg')
285 text = text.replace(u'\u2020', '\\(dg')
286 return text
286 return text
287
287
288 def visit_Text(self, node):
288 def visit_Text(self, node):
289 text = node.astext()
289 text = node.astext()
290 text = text.replace('\\','\\e')
290 text = text.replace('\\','\\e')
291 replace_pairs = [
291 replace_pairs = [
292 (u'-', ur'\-'),
292 (u'-', ur'\-'),
293 (u'\'', ur'\(aq'),
293 (u'\'', ur'\(aq'),
294 (u'´', ur'\''),
294 (u'´', ur'\''),
295 (u'`', ur'\(ga'),
295 (u'`', ur'\(ga'),
296 ]
296 ]
297 for (in_char, out_markup) in replace_pairs:
297 for (in_char, out_markup) in replace_pairs:
298 text = text.replace(in_char, out_markup)
298 text = text.replace(in_char, out_markup)
299 # unicode
299 # unicode
300 text = self.deunicode(text)
300 text = self.deunicode(text)
301 if self._in_literal:
301 if self._in_literal:
302 # prevent interpretation of "." at line start
302 # prevent interpretation of "." at line start
303 if text[0] == '.':
303 if text[0] == '.':
304 text = '\\&' + text
304 text = '\\&' + text
305 text = text.replace('\n.', '\n\\&.')
305 text = text.replace('\n.', '\n\\&.')
306 self.body.append(text)
306 self.body.append(text)
307
307
308 def depart_Text(self, node):
308 def depart_Text(self, node):
309 pass
309 pass
310
310
311 def list_start(self, node):
311 def list_start(self, node):
312 class enum_char(object):
312 class enum_char(object):
313 enum_style = {
313 enum_style = {
314 'bullet' : '\\(bu',
314 'bullet' : '\\(bu',
315 'emdash' : '\\(em',
315 'emdash' : '\\(em',
316 }
316 }
317
317
318 def __init__(self, style):
318 def __init__(self, style):
319 self._style = style
319 self._style = style
320 if 'start' in node:
320 if 'start' in node:
321 self._cnt = node['start'] - 1
321 self._cnt = node['start'] - 1
322 else:
322 else:
323 self._cnt = 0
323 self._cnt = 0
324 self._indent = 2
324 self._indent = 2
325 if style == 'arabic':
325 if style == 'arabic':
326 # indentation depends on number of children
326 # indentation depends on number of children
327 # and start value.
327 # and start value.
328 self._indent = len(str(len(node.children)))
328 self._indent = len(str(len(node.children)))
329 self._indent += len(str(self._cnt)) + 1
329 self._indent += len(str(self._cnt)) + 1
330 elif style == 'loweralpha':
330 elif style == 'loweralpha':
331 self._cnt += ord('a') - 1
331 self._cnt += ord('a') - 1
332 self._indent = 3
332 self._indent = 3
333 elif style == 'upperalpha':
333 elif style == 'upperalpha':
334 self._cnt += ord('A') - 1
334 self._cnt += ord('A') - 1
335 self._indent = 3
335 self._indent = 3
336 elif style.endswith('roman'):
336 elif style.endswith('roman'):
337 self._indent = 5
337 self._indent = 5
338
338
339 def next(self):
339 def next(self):
340 if self._style == 'bullet':
340 if self._style == 'bullet':
341 return self.enum_style[self._style]
341 return self.enum_style[self._style]
342 elif self._style == 'emdash':
342 elif self._style == 'emdash':
343 return self.enum_style[self._style]
343 return self.enum_style[self._style]
344 self._cnt += 1
344 self._cnt += 1
345 # TODO add prefix postfix
345 # TODO add prefix postfix
346 if self._style == 'arabic':
346 if self._style == 'arabic':
347 return "%d." % self._cnt
347 return "%d." % self._cnt
348 elif self._style in ('loweralpha', 'upperalpha'):
348 elif self._style in ('loweralpha', 'upperalpha'):
349 return "%c." % self._cnt
349 return "%c." % self._cnt
350 elif self._style.endswith('roman'):
350 elif self._style.endswith('roman'):
351 res = roman.toRoman(self._cnt) + '.'
351 res = roman.toRoman(self._cnt) + '.'
352 if self._style.startswith('upper'):
352 if self._style.startswith('upper'):
353 return res.upper()
353 return res.upper()
354 return res.lower()
354 return res.lower()
355 else:
355 else:
356 return "%d." % self._cnt
356 return "%d." % self._cnt
357 def get_width(self):
357 def get_width(self):
358 return self._indent
358 return self._indent
359 def __repr__(self):
359 def __repr__(self):
360 return 'enum_style-%s' % list(self._style)
360 return 'enum_style-%s' % list(self._style)
361
361
362 if 'enumtype' in node:
362 if 'enumtype' in node:
363 self._list_char.append(enum_char(node['enumtype']))
363 self._list_char.append(enum_char(node['enumtype']))
364 else:
364 else:
365 self._list_char.append(enum_char('bullet'))
365 self._list_char.append(enum_char('bullet'))
366 if len(self._list_char) > 1:
366 if len(self._list_char) > 1:
367 # indent nested lists
367 # indent nested lists
368 self.indent(self._list_char[-2].get_width())
368 self.indent(self._list_char[-2].get_width())
369 else:
369 else:
370 self.indent(self._list_char[-1].get_width())
370 self.indent(self._list_char[-1].get_width())
371
371
372 def list_end(self):
372 def list_end(self):
373 self.dedent()
373 self.dedent()
374 self._list_char.pop()
374 self._list_char.pop()
375
375
376 def header(self):
376 def header(self):
377 tmpl = (".TH %(title_upper)s %(manual_section)s"
377 tmpl = (".TH %(title_upper)s %(manual_section)s"
378 " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
378 " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
379 ".SH NAME\n"
379 ".SH NAME\n"
380 "%(title)s \- %(subtitle)s\n")
380 "%(title)s \- %(subtitle)s\n")
381 return tmpl % self._docinfo
381 return tmpl % self._docinfo
382
382
383 def append_header(self):
383 def append_header(self):
384 """append header with .TH and .SH NAME"""
384 """append header with .TH and .SH NAME"""
385 # NOTE before everything
385 # NOTE before everything
386 # .TH title_upper section date source manual
386 # .TH title_upper section date source manual
387 if self.header_written:
387 if self.header_written:
388 return
388 return
389 self.body.append(self.header())
389 self.body.append(self.header())
390 self.body.append(MACRO_DEF)
390 self.body.append(MACRO_DEF)
391 self.header_written = 1
391 self.header_written = 1
392
392
393 def visit_address(self, node):
393 def visit_address(self, node):
394 self.visit_docinfo_item(node, 'address')
394 self.visit_docinfo_item(node, 'address')
395
395
396 def depart_address(self, node):
396 def depart_address(self, node):
397 pass
397 pass
398
398
399 def visit_admonition(self, node, name=None):
399 def visit_admonition(self, node, name=None):
400 if name:
400 if name:
401 self.body.append('.IP %s\n' %
401 self.body.append('.IP %s\n' %
402 self.language.labels.get(name, name))
402 self.language.labels.get(name, name))
403
403
404 def depart_admonition(self, node):
404 def depart_admonition(self, node):
405 self.body.append('.RE\n')
405 self.body.append('.RE\n')
406
406
407 def visit_attention(self, node):
407 def visit_attention(self, node):
408 self.visit_admonition(node, 'attention')
408 self.visit_admonition(node, 'attention')
409
409
410 depart_attention = depart_admonition
410 depart_attention = depart_admonition
411
411
412 def visit_docinfo_item(self, node, name):
412 def visit_docinfo_item(self, node, name):
413 if name == 'author':
413 if name == 'author':
414 self._docinfo[name].append(node.astext())
414 self._docinfo[name].append(node.astext())
415 else:
415 else:
416 self._docinfo[name] = node.astext()
416 self._docinfo[name] = node.astext()
417 self._docinfo_keys.append(name)
417 self._docinfo_keys.append(name)
418 raise nodes.SkipNode()
418 raise nodes.SkipNode()
419
419
420 def depart_docinfo_item(self, node):
420 def depart_docinfo_item(self, node):
421 pass
421 pass
422
422
423 def visit_author(self, node):
423 def visit_author(self, node):
424 self.visit_docinfo_item(node, 'author')
424 self.visit_docinfo_item(node, 'author')
425
425
426 depart_author = depart_docinfo_item
426 depart_author = depart_docinfo_item
427
427
428 def visit_authors(self, node):
428 def visit_authors(self, node):
429 # _author is called anyway.
429 # _author is called anyway.
430 pass
430 pass
431
431
432 def depart_authors(self, node):
432 def depart_authors(self, node):
433 pass
433 pass
434
434
435 def visit_block_quote(self, node):
435 def visit_block_quote(self, node):
436 # BUG/HACK: indent always uses the _last_ indentation,
436 # BUG/HACK: indent always uses the _last_ indentation,
437 # thus we need two of them.
437 # thus we need two of them.
438 self.indent(BLOCKQOUTE_INDENT)
438 self.indent(BLOCKQOUTE_INDENT)
439 self.indent(0)
439 self.indent(0)
440
440
441 def depart_block_quote(self, node):
441 def depart_block_quote(self, node):
442 self.dedent()
442 self.dedent()
443 self.dedent()
443 self.dedent()
444
444
445 def visit_bullet_list(self, node):
445 def visit_bullet_list(self, node):
446 self.list_start(node)
446 self.list_start(node)
447
447
448 def depart_bullet_list(self, node):
448 def depart_bullet_list(self, node):
449 self.list_end()
449 self.list_end()
450
450
451 def visit_caption(self, node):
451 def visit_caption(self, node):
452 pass
452 pass
453
453
454 def depart_caption(self, node):
454 def depart_caption(self, node):
455 pass
455 pass
456
456
457 def visit_caution(self, node):
457 def visit_caution(self, node):
458 self.visit_admonition(node, 'caution')
458 self.visit_admonition(node, 'caution')
459
459
460 depart_caution = depart_admonition
460 depart_caution = depart_admonition
461
461
462 def visit_citation(self, node):
462 def visit_citation(self, node):
463 num, text = node.astext().split(None, 1)
463 num, text = node.astext().split(None, 1)
464 num = num.strip()
464 num = num.strip()
465 self.body.append('.IP [%s] 5\n' % num)
465 self.body.append('.IP [%s] 5\n' % num)
466
466
467 def depart_citation(self, node):
467 def depart_citation(self, node):
468 pass
468 pass
469
469
470 def visit_citation_reference(self, node):
470 def visit_citation_reference(self, node):
471 self.body.append('['+node.astext()+']')
471 self.body.append('['+node.astext()+']')
472 raise nodes.SkipNode()
472 raise nodes.SkipNode()
473
473
474 def visit_classifier(self, node):
474 def visit_classifier(self, node):
475 pass
475 pass
476
476
477 def depart_classifier(self, node):
477 def depart_classifier(self, node):
478 pass
478 pass
479
479
480 def visit_colspec(self, node):
480 def visit_colspec(self, node):
481 self.colspecs.append(node)
481 self.colspecs.append(node)
482
482
483 def depart_colspec(self, node):
483 def depart_colspec(self, node):
484 pass
484 pass
485
485
486 def write_colspecs(self):
486 def write_colspecs(self):
487 self.body.append("%s.\n" % ('L '*len(self.colspecs)))
487 self.body.append("%s.\n" % ('L '*len(self.colspecs)))
488
488
489 def visit_comment(self, node,
489 def visit_comment(self, node,
490 sub=re.compile('-(?=-)').sub):
490 sub=re.compile('-(?=-)').sub):
491 self.body.append(self.comment(node.astext()))
491 self.body.append(self.comment(node.astext()))
492 raise nodes.SkipNode()
492 raise nodes.SkipNode()
493
493
494 def visit_contact(self, node):
494 def visit_contact(self, node):
495 self.visit_docinfo_item(node, 'contact')
495 self.visit_docinfo_item(node, 'contact')
496
496
497 depart_contact = depart_docinfo_item
497 depart_contact = depart_docinfo_item
498
498
499 def visit_container(self, node):
499 def visit_container(self, node):
500 pass
500 pass
501
501
502 def depart_container(self, node):
502 def depart_container(self, node):
503 pass
503 pass
504
504
505 def visit_compound(self, node):
505 def visit_compound(self, node):
506 pass
506 pass
507
507
508 def depart_compound(self, node):
508 def depart_compound(self, node):
509 pass
509 pass
510
510
511 def visit_copyright(self, node):
511 def visit_copyright(self, node):
512 self.visit_docinfo_item(node, 'copyright')
512 self.visit_docinfo_item(node, 'copyright')
513
513
514 def visit_danger(self, node):
514 def visit_danger(self, node):
515 self.visit_admonition(node, 'danger')
515 self.visit_admonition(node, 'danger')
516
516
517 depart_danger = depart_admonition
517 depart_danger = depart_admonition
518
518
519 def visit_date(self, node):
519 def visit_date(self, node):
520 self.visit_docinfo_item(node, 'date')
520 self.visit_docinfo_item(node, 'date')
521
521
522 def visit_decoration(self, node):
522 def visit_decoration(self, node):
523 pass
523 pass
524
524
525 def depart_decoration(self, node):
525 def depart_decoration(self, node):
526 pass
526 pass
527
527
528 def visit_definition(self, node):
528 def visit_definition(self, node):
529 pass
529 pass
530
530
531 def depart_definition(self, node):
531 def depart_definition(self, node):
532 pass
532 pass
533
533
534 def visit_definition_list(self, node):
534 def visit_definition_list(self, node):
535 self.indent(DEFINITION_LIST_INDENT)
535 self.indent(DEFINITION_LIST_INDENT)
536
536
537 def depart_definition_list(self, node):
537 def depart_definition_list(self, node):
538 self.dedent()
538 self.dedent()
539
539
540 def visit_definition_list_item(self, node):
540 def visit_definition_list_item(self, node):
541 self.body.append(self.defs['definition_list_item'][0])
541 self.body.append(self.defs['definition_list_item'][0])
542
542
543 def depart_definition_list_item(self, node):
543 def depart_definition_list_item(self, node):
544 self.body.append(self.defs['definition_list_item'][1])
544 self.body.append(self.defs['definition_list_item'][1])
545
545
546 def visit_description(self, node):
546 def visit_description(self, node):
547 pass
547 pass
548
548
549 def depart_description(self, node):
549 def depart_description(self, node):
550 pass
550 pass
551
551
552 def visit_docinfo(self, node):
552 def visit_docinfo(self, node):
553 self._in_docinfo = 1
553 self._in_docinfo = 1
554
554
555 def depart_docinfo(self, node):
555 def depart_docinfo(self, node):
556 self._in_docinfo = None
556 self._in_docinfo = None
557 # NOTE nothing should be written before this
557 # NOTE nothing should be written before this
558 self.append_header()
558 self.append_header()
559
559
560 def visit_doctest_block(self, node):
560 def visit_doctest_block(self, node):
561 self.body.append(self.defs['literal_block'][0])
561 self.body.append(self.defs['literal_block'][0])
562 self._in_literal = True
562 self._in_literal = True
563
563
564 def depart_doctest_block(self, node):
564 def depart_doctest_block(self, node):
565 self._in_literal = False
565 self._in_literal = False
566 self.body.append(self.defs['literal_block'][1])
566 self.body.append(self.defs['literal_block'][1])
567
567
568 def visit_document(self, node):
568 def visit_document(self, node):
569 # no blank line between comment and header.
569 # no blank line between comment and header.
570 self.body.append(self.comment(self.document_start).rstrip()+'\n')
570 self.body.append(self.comment(self.document_start).rstrip()+'\n')
571 # writing header is postboned
571 # writing header is postboned
572 self.header_written = 0
572 self.header_written = 0
573
573
574 def depart_document(self, node):
574 def depart_document(self, node):
575 if self._docinfo['author']:
575 if self._docinfo['author']:
576 self.body.append('.SH AUTHOR\n%s\n'
576 self.body.append('.SH AUTHOR\n%s\n'
577 % ', '.join(self._docinfo['author']))
577 % ', '.join(self._docinfo['author']))
578 skip = ('author', 'copyright', 'date',
578 skip = ('author', 'copyright', 'date',
579 'manual_group', 'manual_section',
579 'manual_group', 'manual_section',
580 'subtitle',
580 'subtitle',
581 'title', 'title_upper', 'version')
581 'title', 'title_upper', 'version')
582 for name in self._docinfo_keys:
582 for name in self._docinfo_keys:
583 if name == 'address':
583 if name == 'address':
584 self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % (
584 self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % (
585 self.language.labels.get(name, name),
585 self.language.labels.get(name, name),
586 self.defs['indent'][0] % 0,
586 self.defs['indent'][0] % 0,
587 self.defs['indent'][0] % BLOCKQOUTE_INDENT,
587 self.defs['indent'][0] % BLOCKQOUTE_INDENT,
588 self._docinfo[name],
588 self._docinfo[name],
589 self.defs['indent'][1],
589 self.defs['indent'][1],
590 self.defs['indent'][1]))
590 self.defs['indent'][1]))
591 elif name not in skip:
591 elif name not in skip:
592 if name in self._docinfo_names:
592 if name in self._docinfo_names:
593 label = self._docinfo_names[name]
593 label = self._docinfo_names[name]
594 else:
594 else:
595 label = self.language.labels.get(name, name)
595 label = self.language.labels.get(name, name)
596 self.body.append("\n%s: %s\n" % (label, self._docinfo[name]))
596 self.body.append("\n%s: %s\n" % (label, self._docinfo[name]))
597 if self._docinfo['copyright']:
597 if self._docinfo['copyright']:
598 self.body.append('.SH COPYRIGHT\n%s\n'
598 self.body.append('.SH COPYRIGHT\n%s\n'
599 % self._docinfo['copyright'])
599 % self._docinfo['copyright'])
600 self.body.append(self.comment(
600 self.body.append(self.comment(
601 'Generated by docutils manpage writer.\n'))
601 'Generated by docutils manpage writer.\n'))
602
602
603 def visit_emphasis(self, node):
603 def visit_emphasis(self, node):
604 self.body.append(self.defs['emphasis'][0])
604 self.body.append(self.defs['emphasis'][0])
605
605
606 def depart_emphasis(self, node):
606 def depart_emphasis(self, node):
607 self.body.append(self.defs['emphasis'][1])
607 self.body.append(self.defs['emphasis'][1])
608
608
609 def visit_entry(self, node):
609 def visit_entry(self, node):
610 # a cell in a table row
610 # a cell in a table row
611 if 'morerows' in node:
611 if 'morerows' in node:
612 self.document.reporter.warning('"table row spanning" not supported',
612 self.document.reporter.warning('"table row spanning" not supported',
613 base_node=node)
613 base_node=node)
614 if 'morecols' in node:
614 if 'morecols' in node:
615 self.document.reporter.warning(
615 self.document.reporter.warning(
616 '"table cell spanning" not supported', base_node=node)
616 '"table cell spanning" not supported', base_node=node)
617 self.context.append(len(self.body))
617 self.context.append(len(self.body))
618
618
619 def depart_entry(self, node):
619 def depart_entry(self, node):
620 start = self.context.pop()
620 start = self.context.pop()
621 self._active_table.append_cell(self.body[start:])
621 self._active_table.append_cell(self.body[start:])
622 del self.body[start:]
622 del self.body[start:]
623
623
624 def visit_enumerated_list(self, node):
624 def visit_enumerated_list(self, node):
625 self.list_start(node)
625 self.list_start(node)
626
626
627 def depart_enumerated_list(self, node):
627 def depart_enumerated_list(self, node):
628 self.list_end()
628 self.list_end()
629
629
630 def visit_error(self, node):
630 def visit_error(self, node):
631 self.visit_admonition(node, 'error')
631 self.visit_admonition(node, 'error')
632
632
633 depart_error = depart_admonition
633 depart_error = depart_admonition
634
634
635 def visit_field(self, node):
635 def visit_field(self, node):
636 pass
636 pass
637
637
638 def depart_field(self, node):
638 def depart_field(self, node):
639 pass
639 pass
640
640
641 def visit_field_body(self, node):
641 def visit_field_body(self, node):
642 if self._in_docinfo:
642 if self._in_docinfo:
643 name_normalized = self._field_name.lower().replace(" ","_")
643 name_normalized = self._field_name.lower().replace(" ","_")
644 self._docinfo_names[name_normalized] = self._field_name
644 self._docinfo_names[name_normalized] = self._field_name
645 self.visit_docinfo_item(node, name_normalized)
645 self.visit_docinfo_item(node, name_normalized)
646 raise nodes.SkipNode()
646 raise nodes.SkipNode()
647
647
648 def depart_field_body(self, node):
648 def depart_field_body(self, node):
649 pass
649 pass
650
650
651 def visit_field_list(self, node):
651 def visit_field_list(self, node):
652 self.indent(FIELD_LIST_INDENT)
652 self.indent(FIELD_LIST_INDENT)
653
653
654 def depart_field_list(self, node):
654 def depart_field_list(self, node):
655 self.dedent()
655 self.dedent()
656
656
657 def visit_field_name(self, node):
657 def visit_field_name(self, node):
658 if self._in_docinfo:
658 if self._in_docinfo:
659 self._field_name = node.astext()
659 self._field_name = node.astext()
660 raise nodes.SkipNode()
660 raise nodes.SkipNode()
661 else:
661 else:
662 self.body.append(self.defs['field_name'][0])
662 self.body.append(self.defs['field_name'][0])
663
663
664 def depart_field_name(self, node):
664 def depart_field_name(self, node):
665 self.body.append(self.defs['field_name'][1])
665 self.body.append(self.defs['field_name'][1])
666
666
667 def visit_figure(self, node):
667 def visit_figure(self, node):
668 self.indent(2.5)
668 self.indent(2.5)
669 self.indent(0)
669 self.indent(0)
670
670
671 def depart_figure(self, node):
671 def depart_figure(self, node):
672 self.dedent()
672 self.dedent()
673 self.dedent()
673 self.dedent()
674
674
675 def visit_footer(self, node):
675 def visit_footer(self, node):
676 self.document.reporter.warning('"footer" not supported',
676 self.document.reporter.warning('"footer" not supported',
677 base_node=node)
677 base_node=node)
678
678
679 def depart_footer(self, node):
679 def depart_footer(self, node):
680 pass
680 pass
681
681
682 def visit_footnote(self, node):
682 def visit_footnote(self, node):
683 num, text = node.astext().split(None, 1)
683 num, text = node.astext().split(None, 1)
684 num = num.strip()
684 num = num.strip()
685 self.body.append('.IP [%s] 5\n' % self.deunicode(num))
685 self.body.append('.IP [%s] 5\n' % self.deunicode(num))
686
686
687 def depart_footnote(self, node):
687 def depart_footnote(self, node):
688 pass
688 pass
689
689
690 def footnote_backrefs(self, node):
690 def footnote_backrefs(self, node):
691 self.document.reporter.warning('"footnote_backrefs" not supported',
691 self.document.reporter.warning('"footnote_backrefs" not supported',
692 base_node=node)
692 base_node=node)
693
693
694 def visit_footnote_reference(self, node):
694 def visit_footnote_reference(self, node):
695 self.body.append('['+self.deunicode(node.astext())+']')
695 self.body.append('['+self.deunicode(node.astext())+']')
696 raise nodes.SkipNode()
696 raise nodes.SkipNode()
697
697
698 def depart_footnote_reference(self, node):
698 def depart_footnote_reference(self, node):
699 pass
699 pass
700
700
701 def visit_generated(self, node):
701 def visit_generated(self, node):
702 pass
702 pass
703
703
704 def depart_generated(self, node):
704 def depart_generated(self, node):
705 pass
705 pass
706
706
707 def visit_header(self, node):
707 def visit_header(self, node):
708 raise NotImplementedError(node.astext())
708 raise NotImplementedError(node.astext())
709
709
710 def depart_header(self, node):
710 def depart_header(self, node):
711 pass
711 pass
712
712
713 def visit_hint(self, node):
713 def visit_hint(self, node):
714 self.visit_admonition(node, 'hint')
714 self.visit_admonition(node, 'hint')
715
715
716 depart_hint = depart_admonition
716 depart_hint = depart_admonition
717
717
718 def visit_subscript(self, node):
718 def visit_subscript(self, node):
719 self.body.append('\\s-2\\d')
719 self.body.append('\\s-2\\d')
720
720
721 def depart_subscript(self, node):
721 def depart_subscript(self, node):
722 self.body.append('\\u\\s0')
722 self.body.append('\\u\\s0')
723
723
724 def visit_superscript(self, node):
724 def visit_superscript(self, node):
725 self.body.append('\\s-2\\u')
725 self.body.append('\\s-2\\u')
726
726
727 def depart_superscript(self, node):
727 def depart_superscript(self, node):
728 self.body.append('\\d\\s0')
728 self.body.append('\\d\\s0')
729
729
730 def visit_attribution(self, node):
730 def visit_attribution(self, node):
731 self.body.append('\\(em ')
731 self.body.append('\\(em ')
732
732
733 def depart_attribution(self, node):
733 def depart_attribution(self, node):
734 self.body.append('\n')
734 self.body.append('\n')
735
735
736 def visit_image(self, node):
736 def visit_image(self, node):
737 self.document.reporter.warning('"image" not supported',
737 self.document.reporter.warning('"image" not supported',
738 base_node=node)
738 base_node=node)
739 text = []
739 text = []
740 if 'alt' in node.attributes:
740 if 'alt' in node.attributes:
741 text.append(node.attributes['alt'])
741 text.append(node.attributes['alt'])
742 if 'uri' in node.attributes:
742 if 'uri' in node.attributes:
743 text.append(node.attributes['uri'])
743 text.append(node.attributes['uri'])
744 self.body.append('[image: %s]\n' % ('/'.join(text)))
744 self.body.append('[image: %s]\n' % ('/'.join(text)))
745 raise nodes.SkipNode()
745 raise nodes.SkipNode()
746
746
747 def visit_important(self, node):
747 def visit_important(self, node):
748 self.visit_admonition(node, 'important')
748 self.visit_admonition(node, 'important')
749
749
750 depart_important = depart_admonition
750 depart_important = depart_admonition
751
751
752 def visit_label(self, node):
752 def visit_label(self, node):
753 # footnote and citation
753 # footnote and citation
754 if (isinstance(node.parent, nodes.footnote)
754 if (isinstance(node.parent, nodes.footnote)
755 or isinstance(node.parent, nodes.citation)):
755 or isinstance(node.parent, nodes.citation)):
756 raise nodes.SkipNode()
756 raise nodes.SkipNode()
757 self.document.reporter.warning('"unsupported "label"',
757 self.document.reporter.warning('"unsupported "label"',
758 base_node=node)
758 base_node=node)
759 self.body.append('[')
759 self.body.append('[')
760
760
761 def depart_label(self, node):
761 def depart_label(self, node):
762 self.body.append(']\n')
762 self.body.append(']\n')
763
763
764 def visit_legend(self, node):
764 def visit_legend(self, node):
765 pass
765 pass
766
766
767 def depart_legend(self, node):
767 def depart_legend(self, node):
768 pass
768 pass
769
769
770 # WHAT should we use .INDENT, .UNINDENT ?
770 # WHAT should we use .INDENT, .UNINDENT ?
771 def visit_line_block(self, node):
771 def visit_line_block(self, node):
772 self._line_block += 1
772 self._line_block += 1
773 if self._line_block == 1:
773 if self._line_block == 1:
774 self.body.append('.sp\n')
774 self.body.append('.sp\n')
775 self.body.append('.nf\n')
775 self.body.append('.nf\n')
776 else:
776 else:
777 self.body.append('.in +2\n')
777 self.body.append('.in +2\n')
778
778
779 def depart_line_block(self, node):
779 def depart_line_block(self, node):
780 self._line_block -= 1
780 self._line_block -= 1
781 if self._line_block == 0:
781 if self._line_block == 0:
782 self.body.append('.fi\n')
782 self.body.append('.fi\n')
783 self.body.append('.sp\n')
783 self.body.append('.sp\n')
784 else:
784 else:
785 self.body.append('.in -2\n')
785 self.body.append('.in -2\n')
786
786
787 def visit_line(self, node):
787 def visit_line(self, node):
788 pass
788 pass
789
789
790 def depart_line(self, node):
790 def depart_line(self, node):
791 self.body.append('\n')
791 self.body.append('\n')
792
792
793 def visit_list_item(self, node):
793 def visit_list_item(self, node):
794 # man 7 man argues to use ".IP" instead of ".TP"
794 # man 7 man argues to use ".IP" instead of ".TP"
795 self.body.append('.IP %s %d\n' % (
795 self.body.append('.IP %s %d\n' % (
796 self._list_char[-1].next(),
796 next(self._list_char[-1]),
797 self._list_char[-1].get_width(),))
797 self._list_char[-1].get_width(),))
798
798
799 def depart_list_item(self, node):
799 def depart_list_item(self, node):
800 pass
800 pass
801
801
802 def visit_literal(self, node):
802 def visit_literal(self, node):
803 self.body.append(self.defs['literal'][0])
803 self.body.append(self.defs['literal'][0])
804
804
805 def depart_literal(self, node):
805 def depart_literal(self, node):
806 self.body.append(self.defs['literal'][1])
806 self.body.append(self.defs['literal'][1])
807
807
808 def visit_literal_block(self, node):
808 def visit_literal_block(self, node):
809 self.body.append(self.defs['literal_block'][0])
809 self.body.append(self.defs['literal_block'][0])
810 self._in_literal = True
810 self._in_literal = True
811
811
812 def depart_literal_block(self, node):
812 def depart_literal_block(self, node):
813 self._in_literal = False
813 self._in_literal = False
814 self.body.append(self.defs['literal_block'][1])
814 self.body.append(self.defs['literal_block'][1])
815
815
816 def visit_meta(self, node):
816 def visit_meta(self, node):
817 raise NotImplementedError(node.astext())
817 raise NotImplementedError(node.astext())
818
818
819 def depart_meta(self, node):
819 def depart_meta(self, node):
820 pass
820 pass
821
821
822 def visit_note(self, node):
822 def visit_note(self, node):
823 self.visit_admonition(node, 'note')
823 self.visit_admonition(node, 'note')
824
824
825 depart_note = depart_admonition
825 depart_note = depart_admonition
826
826
827 def indent(self, by=0.5):
827 def indent(self, by=0.5):
828 # if we are in a section ".SH" there already is a .RS
828 # if we are in a section ".SH" there already is a .RS
829 step = self._indent[-1]
829 step = self._indent[-1]
830 self._indent.append(by)
830 self._indent.append(by)
831 self.body.append(self.defs['indent'][0] % step)
831 self.body.append(self.defs['indent'][0] % step)
832
832
833 def dedent(self):
833 def dedent(self):
834 self._indent.pop()
834 self._indent.pop()
835 self.body.append(self.defs['indent'][1])
835 self.body.append(self.defs['indent'][1])
836
836
837 def visit_option_list(self, node):
837 def visit_option_list(self, node):
838 self.indent(OPTION_LIST_INDENT)
838 self.indent(OPTION_LIST_INDENT)
839
839
840 def depart_option_list(self, node):
840 def depart_option_list(self, node):
841 self.dedent()
841 self.dedent()
842
842
843 def visit_option_list_item(self, node):
843 def visit_option_list_item(self, node):
844 # one item of the list
844 # one item of the list
845 self.body.append(self.defs['option_list_item'][0])
845 self.body.append(self.defs['option_list_item'][0])
846
846
847 def depart_option_list_item(self, node):
847 def depart_option_list_item(self, node):
848 self.body.append(self.defs['option_list_item'][1])
848 self.body.append(self.defs['option_list_item'][1])
849
849
850 def visit_option_group(self, node):
850 def visit_option_group(self, node):
851 # as one option could have several forms it is a group
851 # as one option could have several forms it is a group
852 # options without parameter bold only, .B, -v
852 # options without parameter bold only, .B, -v
853 # options with parameter bold italic, .BI, -f file
853 # options with parameter bold italic, .BI, -f file
854 #
854 #
855 # we do not know if .B or .BI
855 # we do not know if .B or .BI
856 self.context.append('.B') # blind guess
856 self.context.append('.B') # blind guess
857 self.context.append(len(self.body)) # to be able to insert later
857 self.context.append(len(self.body)) # to be able to insert later
858 self.context.append(0) # option counter
858 self.context.append(0) # option counter
859
859
860 def depart_option_group(self, node):
860 def depart_option_group(self, node):
861 self.context.pop() # the counter
861 self.context.pop() # the counter
862 start_position = self.context.pop()
862 start_position = self.context.pop()
863 text = self.body[start_position:]
863 text = self.body[start_position:]
864 del self.body[start_position:]
864 del self.body[start_position:]
865 self.body.append('%s%s\n' % (self.context.pop(), ''.join(text)))
865 self.body.append('%s%s\n' % (self.context.pop(), ''.join(text)))
866
866
867 def visit_option(self, node):
867 def visit_option(self, node):
868 # each form of the option will be presented separately
868 # each form of the option will be presented separately
869 if self.context[-1] > 0:
869 if self.context[-1] > 0:
870 self.body.append(', ')
870 self.body.append(', ')
871 if self.context[-3] == '.BI':
871 if self.context[-3] == '.BI':
872 self.body.append('\\')
872 self.body.append('\\')
873 self.body.append(' ')
873 self.body.append(' ')
874
874
875 def depart_option(self, node):
875 def depart_option(self, node):
876 self.context[-1] += 1
876 self.context[-1] += 1
877
877
878 def visit_option_string(self, node):
878 def visit_option_string(self, node):
879 # do not know if .B or .BI
879 # do not know if .B or .BI
880 pass
880 pass
881
881
882 def depart_option_string(self, node):
882 def depart_option_string(self, node):
883 pass
883 pass
884
884
885 def visit_option_argument(self, node):
885 def visit_option_argument(self, node):
886 self.context[-3] = '.BI' # bold/italic alternate
886 self.context[-3] = '.BI' # bold/italic alternate
887 if node['delimiter'] != ' ':
887 if node['delimiter'] != ' ':
888 self.body.append('\\fB%s ' % node['delimiter'])
888 self.body.append('\\fB%s ' % node['delimiter'])
889 elif self.body[len(self.body) - 1].endswith('='):
889 elif self.body[len(self.body) - 1].endswith('='):
890 # a blank only means no blank in output, just changing font
890 # a blank only means no blank in output, just changing font
891 self.body.append(' ')
891 self.body.append(' ')
892 else:
892 else:
893 # blank backslash blank, switch font then a blank
893 # blank backslash blank, switch font then a blank
894 self.body.append(' \\ ')
894 self.body.append(' \\ ')
895
895
896 def depart_option_argument(self, node):
896 def depart_option_argument(self, node):
897 pass
897 pass
898
898
899 def visit_organization(self, node):
899 def visit_organization(self, node):
900 self.visit_docinfo_item(node, 'organization')
900 self.visit_docinfo_item(node, 'organization')
901
901
902 def depart_organization(self, node):
902 def depart_organization(self, node):
903 pass
903 pass
904
904
905 def visit_paragraph(self, node):
905 def visit_paragraph(self, node):
906 # ``.PP`` : Start standard indented paragraph.
906 # ``.PP`` : Start standard indented paragraph.
907 # ``.LP`` : Start block paragraph, all except the first.
907 # ``.LP`` : Start block paragraph, all except the first.
908 # ``.P [type]`` : Start paragraph type.
908 # ``.P [type]`` : Start paragraph type.
909 # NOTE don't use paragraph starts because they reset indentation.
909 # NOTE don't use paragraph starts because they reset indentation.
910 # ``.sp`` is only vertical space
910 # ``.sp`` is only vertical space
911 self.ensure_eol()
911 self.ensure_eol()
912 self.body.append('.sp\n')
912 self.body.append('.sp\n')
913
913
914 def depart_paragraph(self, node):
914 def depart_paragraph(self, node):
915 self.body.append('\n')
915 self.body.append('\n')
916
916
917 def visit_problematic(self, node):
917 def visit_problematic(self, node):
918 self.body.append(self.defs['problematic'][0])
918 self.body.append(self.defs['problematic'][0])
919
919
920 def depart_problematic(self, node):
920 def depart_problematic(self, node):
921 self.body.append(self.defs['problematic'][1])
921 self.body.append(self.defs['problematic'][1])
922
922
923 def visit_raw(self, node):
923 def visit_raw(self, node):
924 if node.get('format') == 'manpage':
924 if node.get('format') == 'manpage':
925 self.body.append(node.astext() + "\n")
925 self.body.append(node.astext() + "\n")
926 # Keep non-manpage raw text out of output:
926 # Keep non-manpage raw text out of output:
927 raise nodes.SkipNode()
927 raise nodes.SkipNode()
928
928
929 def visit_reference(self, node):
929 def visit_reference(self, node):
930 """E.g. link or email address."""
930 """E.g. link or email address."""
931 self.body.append(self.defs['reference'][0])
931 self.body.append(self.defs['reference'][0])
932
932
933 def depart_reference(self, node):
933 def depart_reference(self, node):
934 self.body.append(self.defs['reference'][1])
934 self.body.append(self.defs['reference'][1])
935
935
936 def visit_revision(self, node):
936 def visit_revision(self, node):
937 self.visit_docinfo_item(node, 'revision')
937 self.visit_docinfo_item(node, 'revision')
938
938
939 depart_revision = depart_docinfo_item
939 depart_revision = depart_docinfo_item
940
940
941 def visit_row(self, node):
941 def visit_row(self, node):
942 self._active_table.new_row()
942 self._active_table.new_row()
943
943
944 def depart_row(self, node):
944 def depart_row(self, node):
945 pass
945 pass
946
946
947 def visit_section(self, node):
947 def visit_section(self, node):
948 self.section_level += 1
948 self.section_level += 1
949
949
950 def depart_section(self, node):
950 def depart_section(self, node):
951 self.section_level -= 1
951 self.section_level -= 1
952
952
953 def visit_status(self, node):
953 def visit_status(self, node):
954 self.visit_docinfo_item(node, 'status')
954 self.visit_docinfo_item(node, 'status')
955
955
956 depart_status = depart_docinfo_item
956 depart_status = depart_docinfo_item
957
957
958 def visit_strong(self, node):
958 def visit_strong(self, node):
959 self.body.append(self.defs['strong'][0])
959 self.body.append(self.defs['strong'][0])
960
960
961 def depart_strong(self, node):
961 def depart_strong(self, node):
962 self.body.append(self.defs['strong'][1])
962 self.body.append(self.defs['strong'][1])
963
963
964 def visit_substitution_definition(self, node):
964 def visit_substitution_definition(self, node):
965 """Internal only."""
965 """Internal only."""
966 raise nodes.SkipNode()
966 raise nodes.SkipNode()
967
967
968 def visit_substitution_reference(self, node):
968 def visit_substitution_reference(self, node):
969 self.document.reporter.warning('"substitution_reference" not supported',
969 self.document.reporter.warning('"substitution_reference" not supported',
970 base_node=node)
970 base_node=node)
971
971
972 def visit_subtitle(self, node):
972 def visit_subtitle(self, node):
973 if isinstance(node.parent, nodes.sidebar):
973 if isinstance(node.parent, nodes.sidebar):
974 self.body.append(self.defs['strong'][0])
974 self.body.append(self.defs['strong'][0])
975 elif isinstance(node.parent, nodes.document):
975 elif isinstance(node.parent, nodes.document):
976 self.visit_docinfo_item(node, 'subtitle')
976 self.visit_docinfo_item(node, 'subtitle')
977 elif isinstance(node.parent, nodes.section):
977 elif isinstance(node.parent, nodes.section):
978 self.body.append(self.defs['strong'][0])
978 self.body.append(self.defs['strong'][0])
979
979
980 def depart_subtitle(self, node):
980 def depart_subtitle(self, node):
981 # document subtitle calls SkipNode
981 # document subtitle calls SkipNode
982 self.body.append(self.defs['strong'][1]+'\n.PP\n')
982 self.body.append(self.defs['strong'][1]+'\n.PP\n')
983
983
984 def visit_system_message(self, node):
984 def visit_system_message(self, node):
985 # TODO add report_level
985 # TODO add report_level
986 #if node['level'] < self.document.reporter['writer'].report_level:
986 #if node['level'] < self.document.reporter['writer'].report_level:
987 # Level is too low to display:
987 # Level is too low to display:
988 # raise nodes.SkipNode
988 # raise nodes.SkipNode
989 attr = {}
989 attr = {}
990 if node.hasattr('id'):
990 if node.hasattr('id'):
991 attr['name'] = node['id']
991 attr['name'] = node['id']
992 if node.hasattr('line'):
992 if node.hasattr('line'):
993 line = ', line %s' % node['line']
993 line = ', line %s' % node['line']
994 else:
994 else:
995 line = ''
995 line = ''
996 self.body.append('.IP "System Message: %s/%s (%s:%s)"\n'
996 self.body.append('.IP "System Message: %s/%s (%s:%s)"\n'
997 % (node['type'], node['level'], node['source'], line))
997 % (node['type'], node['level'], node['source'], line))
998
998
999 def depart_system_message(self, node):
999 def depart_system_message(self, node):
1000 pass
1000 pass
1001
1001
1002 def visit_table(self, node):
1002 def visit_table(self, node):
1003 self._active_table = Table()
1003 self._active_table = Table()
1004
1004
1005 def depart_table(self, node):
1005 def depart_table(self, node):
1006 self.ensure_eol()
1006 self.ensure_eol()
1007 self.body.extend(self._active_table.as_list())
1007 self.body.extend(self._active_table.as_list())
1008 self._active_table = None
1008 self._active_table = None
1009
1009
1010 def visit_target(self, node):
1010 def visit_target(self, node):
1011 # targets are in-document hyper targets, without any use for man-pages.
1011 # targets are in-document hyper targets, without any use for man-pages.
1012 raise nodes.SkipNode()
1012 raise nodes.SkipNode()
1013
1013
1014 def visit_tbody(self, node):
1014 def visit_tbody(self, node):
1015 pass
1015 pass
1016
1016
1017 def depart_tbody(self, node):
1017 def depart_tbody(self, node):
1018 pass
1018 pass
1019
1019
1020 def visit_term(self, node):
1020 def visit_term(self, node):
1021 self.body.append(self.defs['term'][0])
1021 self.body.append(self.defs['term'][0])
1022
1022
1023 def depart_term(self, node):
1023 def depart_term(self, node):
1024 self.body.append(self.defs['term'][1])
1024 self.body.append(self.defs['term'][1])
1025
1025
1026 def visit_tgroup(self, node):
1026 def visit_tgroup(self, node):
1027 pass
1027 pass
1028
1028
1029 def depart_tgroup(self, node):
1029 def depart_tgroup(self, node):
1030 pass
1030 pass
1031
1031
1032 def visit_thead(self, node):
1032 def visit_thead(self, node):
1033 # MAYBE double line '='
1033 # MAYBE double line '='
1034 pass
1034 pass
1035
1035
1036 def depart_thead(self, node):
1036 def depart_thead(self, node):
1037 # MAYBE double line '='
1037 # MAYBE double line '='
1038 pass
1038 pass
1039
1039
1040 def visit_tip(self, node):
1040 def visit_tip(self, node):
1041 self.visit_admonition(node, 'tip')
1041 self.visit_admonition(node, 'tip')
1042
1042
1043 depart_tip = depart_admonition
1043 depart_tip = depart_admonition
1044
1044
1045 def visit_title(self, node):
1045 def visit_title(self, node):
1046 if isinstance(node.parent, nodes.topic):
1046 if isinstance(node.parent, nodes.topic):
1047 self.body.append(self.defs['topic-title'][0])
1047 self.body.append(self.defs['topic-title'][0])
1048 elif isinstance(node.parent, nodes.sidebar):
1048 elif isinstance(node.parent, nodes.sidebar):
1049 self.body.append(self.defs['sidebar-title'][0])
1049 self.body.append(self.defs['sidebar-title'][0])
1050 elif isinstance(node.parent, nodes.admonition):
1050 elif isinstance(node.parent, nodes.admonition):
1051 self.body.append('.IP "')
1051 self.body.append('.IP "')
1052 elif self.section_level == 0:
1052 elif self.section_level == 0:
1053 self._docinfo['title'] = node.astext()
1053 self._docinfo['title'] = node.astext()
1054 # document title for .TH
1054 # document title for .TH
1055 self._docinfo['title_upper'] = node.astext().upper()
1055 self._docinfo['title_upper'] = node.astext().upper()
1056 raise nodes.SkipNode()
1056 raise nodes.SkipNode()
1057 elif self.section_level == 1:
1057 elif self.section_level == 1:
1058 self.body.append('.SH ')
1058 self.body.append('.SH ')
1059 for n in node.traverse(nodes.Text):
1059 for n in node.traverse(nodes.Text):
1060 n.parent.replace(n, nodes.Text(n.astext().upper()))
1060 n.parent.replace(n, nodes.Text(n.astext().upper()))
1061 else:
1061 else:
1062 self.body.append('.SS ')
1062 self.body.append('.SS ')
1063
1063
1064 def depart_title(self, node):
1064 def depart_title(self, node):
1065 if isinstance(node.parent, nodes.admonition):
1065 if isinstance(node.parent, nodes.admonition):
1066 self.body.append('"')
1066 self.body.append('"')
1067 self.body.append('\n')
1067 self.body.append('\n')
1068
1068
1069 def visit_title_reference(self, node):
1069 def visit_title_reference(self, node):
1070 """inline citation reference"""
1070 """inline citation reference"""
1071 self.body.append(self.defs['title_reference'][0])
1071 self.body.append(self.defs['title_reference'][0])
1072
1072
1073 def depart_title_reference(self, node):
1073 def depart_title_reference(self, node):
1074 self.body.append(self.defs['title_reference'][1])
1074 self.body.append(self.defs['title_reference'][1])
1075
1075
1076 def visit_topic(self, node):
1076 def visit_topic(self, node):
1077 pass
1077 pass
1078
1078
1079 def depart_topic(self, node):
1079 def depart_topic(self, node):
1080 pass
1080 pass
1081
1081
1082 def visit_sidebar(self, node):
1082 def visit_sidebar(self, node):
1083 pass
1083 pass
1084
1084
1085 def depart_sidebar(self, node):
1085 def depart_sidebar(self, node):
1086 pass
1086 pass
1087
1087
1088 def visit_rubric(self, node):
1088 def visit_rubric(self, node):
1089 pass
1089 pass
1090
1090
1091 def depart_rubric(self, node):
1091 def depart_rubric(self, node):
1092 pass
1092 pass
1093
1093
1094 def visit_transition(self, node):
1094 def visit_transition(self, node):
1095 # .PP Begin a new paragraph and reset prevailing indent.
1095 # .PP Begin a new paragraph and reset prevailing indent.
1096 # .sp N leaves N lines of blank space.
1096 # .sp N leaves N lines of blank space.
1097 # .ce centers the next line
1097 # .ce centers the next line
1098 self.body.append('\n.sp\n.ce\n----\n')
1098 self.body.append('\n.sp\n.ce\n----\n')
1099
1099
1100 def depart_transition(self, node):
1100 def depart_transition(self, node):
1101 self.body.append('\n.ce 0\n.sp\n')
1101 self.body.append('\n.ce 0\n.sp\n')
1102
1102
1103 def visit_version(self, node):
1103 def visit_version(self, node):
1104 self.visit_docinfo_item(node, 'version')
1104 self.visit_docinfo_item(node, 'version')
1105
1105
1106 def visit_warning(self, node):
1106 def visit_warning(self, node):
1107 self.visit_admonition(node, 'warning')
1107 self.visit_admonition(node, 'warning')
1108
1108
1109 depart_warning = depart_admonition
1109 depart_warning = depart_admonition
1110
1110
1111 def unimplemented_visit(self, node):
1111 def unimplemented_visit(self, node):
1112 raise NotImplementedError('visiting unimplemented node type: %s'
1112 raise NotImplementedError('visiting unimplemented node type: %s'
1113 % node.__class__.__name__)
1113 % node.__class__.__name__)
1114
1114
1115 # vim: set fileencoding=utf-8 et ts=4 ai :
1115 # vim: set fileencoding=utf-8 et ts=4 ai :
@@ -1,75 +1,75 b''
1 # highlight.py - highlight extension implementation file
1 # highlight.py - highlight extension implementation file
2 #
2 #
3 # Copyright 2007-2009 Adam Hupp <adam@hupp.org> and others
3 # Copyright 2007-2009 Adam Hupp <adam@hupp.org> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # The original module was split in an interface and an implementation
8 # The original module was split in an interface and an implementation
9 # file to defer pygments loading and speedup extension setup.
9 # file to defer pygments loading and speedup extension setup.
10
10
11 from mercurial import demandimport
11 from mercurial import demandimport
12 demandimport.ignore.extend(['pkgutil', 'pkg_resources', '__main__'])
12 demandimport.ignore.extend(['pkgutil', 'pkg_resources', '__main__'])
13 from mercurial import util, encoding
13 from mercurial import util, encoding
14
14
15 from pygments import highlight
15 from pygments import highlight
16 from pygments.util import ClassNotFound
16 from pygments.util import ClassNotFound
17 from pygments.lexers import guess_lexer, guess_lexer_for_filename, TextLexer
17 from pygments.lexers import guess_lexer, guess_lexer_for_filename, TextLexer
18 from pygments.formatters import HtmlFormatter
18 from pygments.formatters import HtmlFormatter
19
19
20 SYNTAX_CSS = ('\n<link rel="stylesheet" href="{url}highlightcss" '
20 SYNTAX_CSS = ('\n<link rel="stylesheet" href="{url}highlightcss" '
21 'type="text/css" />')
21 'type="text/css" />')
22
22
23 def pygmentize(field, fctx, style, tmpl, guessfilenameonly=False):
23 def pygmentize(field, fctx, style, tmpl, guessfilenameonly=False):
24
24
25 # append a <link ...> to the syntax highlighting css
25 # append a <link ...> to the syntax highlighting css
26 old_header = tmpl.load('header')
26 old_header = tmpl.load('header')
27 if SYNTAX_CSS not in old_header:
27 if SYNTAX_CSS not in old_header:
28 new_header = old_header + SYNTAX_CSS
28 new_header = old_header + SYNTAX_CSS
29 tmpl.cache['header'] = new_header
29 tmpl.cache['header'] = new_header
30
30
31 text = fctx.data()
31 text = fctx.data()
32 if util.binary(text):
32 if util.binary(text):
33 return
33 return
34
34
35 # str.splitlines() != unicode.splitlines() because "reasons"
35 # str.splitlines() != unicode.splitlines() because "reasons"
36 for c in "\x0c\x1c\x1d\x1e":
36 for c in "\x0c\x1c\x1d\x1e":
37 if c in text:
37 if c in text:
38 text = text.replace(c, '')
38 text = text.replace(c, '')
39
39
40 # Pygments is best used with Unicode strings:
40 # Pygments is best used with Unicode strings:
41 # <http://pygments.org/docs/unicode/>
41 # <http://pygments.org/docs/unicode/>
42 text = text.decode(encoding.encoding, 'replace')
42 text = text.decode(encoding.encoding, 'replace')
43
43
44 # To get multi-line strings right, we can't format line-by-line
44 # To get multi-line strings right, we can't format line-by-line
45 try:
45 try:
46 lexer = guess_lexer_for_filename(fctx.path(), text[:1024],
46 lexer = guess_lexer_for_filename(fctx.path(), text[:1024],
47 stripnl=False)
47 stripnl=False)
48 except (ClassNotFound, ValueError):
48 except (ClassNotFound, ValueError):
49 # guess_lexer will return a lexer if *any* lexer matches. There is
49 # guess_lexer will return a lexer if *any* lexer matches. There is
50 # no way to specify a minimum match score. This can give a high rate of
50 # no way to specify a minimum match score. This can give a high rate of
51 # false positives on files with an unknown filename pattern.
51 # false positives on files with an unknown filename pattern.
52 if guessfilenameonly:
52 if guessfilenameonly:
53 return
53 return
54
54
55 try:
55 try:
56 lexer = guess_lexer(text[:1024], stripnl=False)
56 lexer = guess_lexer(text[:1024], stripnl=False)
57 except (ClassNotFound, ValueError):
57 except (ClassNotFound, ValueError):
58 # Don't highlight unknown files
58 # Don't highlight unknown files
59 return
59 return
60
60
61 # Don't highlight text files
61 # Don't highlight text files
62 if isinstance(lexer, TextLexer):
62 if isinstance(lexer, TextLexer):
63 return
63 return
64
64
65 formatter = HtmlFormatter(nowrap=True, style=style)
65 formatter = HtmlFormatter(nowrap=True, style=style)
66
66
67 colorized = highlight(text, lexer, formatter)
67 colorized = highlight(text, lexer, formatter)
68 coloriter = (s.encode(encoding.encoding, 'replace')
68 coloriter = (s.encode(encoding.encoding, 'replace')
69 for s in colorized.splitlines())
69 for s in colorized.splitlines())
70
70
71 tmpl.filters['colorize'] = lambda x: coloriter.next()
71 tmpl.filters['colorize'] = lambda x: next(coloriter)
72
72
73 oldl = tmpl.cache[field]
73 oldl = tmpl.cache[field]
74 newl = oldl.replace('line|escape', 'line|colorize')
74 newl = oldl.replace('line|escape', 'line|colorize')
75 tmpl.cache[field] = newl
75 tmpl.cache[field] = newl
@@ -1,361 +1,361 b''
1 # ancestor.py - generic DAG ancestor algorithm for mercurial
1 # ancestor.py - generic DAG ancestor algorithm for mercurial
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import heapq
11 import heapq
12
12
13 from .node import nullrev
13 from .node import nullrev
14
14
15 def commonancestorsheads(pfunc, *nodes):
15 def commonancestorsheads(pfunc, *nodes):
16 """Returns a set with the heads of all common ancestors of all nodes,
16 """Returns a set with the heads of all common ancestors of all nodes,
17 heads(::nodes[0] and ::nodes[1] and ...) .
17 heads(::nodes[0] and ::nodes[1] and ...) .
18
18
19 pfunc must return a list of parent vertices for a given vertex.
19 pfunc must return a list of parent vertices for a given vertex.
20 """
20 """
21 if not isinstance(nodes, set):
21 if not isinstance(nodes, set):
22 nodes = set(nodes)
22 nodes = set(nodes)
23 if nullrev in nodes:
23 if nullrev in nodes:
24 return set()
24 return set()
25 if len(nodes) <= 1:
25 if len(nodes) <= 1:
26 return nodes
26 return nodes
27
27
28 allseen = (1 << len(nodes)) - 1
28 allseen = (1 << len(nodes)) - 1
29 seen = [0] * (max(nodes) + 1)
29 seen = [0] * (max(nodes) + 1)
30 for i, n in enumerate(nodes):
30 for i, n in enumerate(nodes):
31 seen[n] = 1 << i
31 seen[n] = 1 << i
32 poison = 1 << (i + 1)
32 poison = 1 << (i + 1)
33
33
34 gca = set()
34 gca = set()
35 interesting = len(nodes)
35 interesting = len(nodes)
36 nv = len(seen) - 1
36 nv = len(seen) - 1
37 while nv >= 0 and interesting:
37 while nv >= 0 and interesting:
38 v = nv
38 v = nv
39 nv -= 1
39 nv -= 1
40 if not seen[v]:
40 if not seen[v]:
41 continue
41 continue
42 sv = seen[v]
42 sv = seen[v]
43 if sv < poison:
43 if sv < poison:
44 interesting -= 1
44 interesting -= 1
45 if sv == allseen:
45 if sv == allseen:
46 gca.add(v)
46 gca.add(v)
47 sv |= poison
47 sv |= poison
48 if v in nodes:
48 if v in nodes:
49 # history is linear
49 # history is linear
50 return set([v])
50 return set([v])
51 if sv < poison:
51 if sv < poison:
52 for p in pfunc(v):
52 for p in pfunc(v):
53 sp = seen[p]
53 sp = seen[p]
54 if p == nullrev:
54 if p == nullrev:
55 continue
55 continue
56 if sp == 0:
56 if sp == 0:
57 seen[p] = sv
57 seen[p] = sv
58 interesting += 1
58 interesting += 1
59 elif sp != sv:
59 elif sp != sv:
60 seen[p] |= sv
60 seen[p] |= sv
61 else:
61 else:
62 for p in pfunc(v):
62 for p in pfunc(v):
63 if p == nullrev:
63 if p == nullrev:
64 continue
64 continue
65 sp = seen[p]
65 sp = seen[p]
66 if sp and sp < poison:
66 if sp and sp < poison:
67 interesting -= 1
67 interesting -= 1
68 seen[p] = sv
68 seen[p] = sv
69 return gca
69 return gca
70
70
71 def ancestors(pfunc, *orignodes):
71 def ancestors(pfunc, *orignodes):
72 """
72 """
73 Returns the common ancestors of a and b that are furthest from a
73 Returns the common ancestors of a and b that are furthest from a
74 root (as measured by longest path).
74 root (as measured by longest path).
75
75
76 pfunc must return a list of parent vertices for a given vertex.
76 pfunc must return a list of parent vertices for a given vertex.
77 """
77 """
78 def deepest(nodes):
78 def deepest(nodes):
79 interesting = {}
79 interesting = {}
80 count = max(nodes) + 1
80 count = max(nodes) + 1
81 depth = [0] * count
81 depth = [0] * count
82 seen = [0] * count
82 seen = [0] * count
83 mapping = []
83 mapping = []
84 for (i, n) in enumerate(sorted(nodes)):
84 for (i, n) in enumerate(sorted(nodes)):
85 depth[n] = 1
85 depth[n] = 1
86 b = 1 << i
86 b = 1 << i
87 seen[n] = b
87 seen[n] = b
88 interesting[b] = 1
88 interesting[b] = 1
89 mapping.append((b, n))
89 mapping.append((b, n))
90 nv = count - 1
90 nv = count - 1
91 while nv >= 0 and len(interesting) > 1:
91 while nv >= 0 and len(interesting) > 1:
92 v = nv
92 v = nv
93 nv -= 1
93 nv -= 1
94 dv = depth[v]
94 dv = depth[v]
95 if dv == 0:
95 if dv == 0:
96 continue
96 continue
97 sv = seen[v]
97 sv = seen[v]
98 for p in pfunc(v):
98 for p in pfunc(v):
99 if p == nullrev:
99 if p == nullrev:
100 continue
100 continue
101 dp = depth[p]
101 dp = depth[p]
102 nsp = sp = seen[p]
102 nsp = sp = seen[p]
103 if dp <= dv:
103 if dp <= dv:
104 depth[p] = dv + 1
104 depth[p] = dv + 1
105 if sp != sv:
105 if sp != sv:
106 interesting[sv] += 1
106 interesting[sv] += 1
107 nsp = seen[p] = sv
107 nsp = seen[p] = sv
108 if sp:
108 if sp:
109 interesting[sp] -= 1
109 interesting[sp] -= 1
110 if interesting[sp] == 0:
110 if interesting[sp] == 0:
111 del interesting[sp]
111 del interesting[sp]
112 elif dv == dp - 1:
112 elif dv == dp - 1:
113 nsp = sp | sv
113 nsp = sp | sv
114 if nsp == sp:
114 if nsp == sp:
115 continue
115 continue
116 seen[p] = nsp
116 seen[p] = nsp
117 interesting.setdefault(nsp, 0)
117 interesting.setdefault(nsp, 0)
118 interesting[nsp] += 1
118 interesting[nsp] += 1
119 interesting[sp] -= 1
119 interesting[sp] -= 1
120 if interesting[sp] == 0:
120 if interesting[sp] == 0:
121 del interesting[sp]
121 del interesting[sp]
122 interesting[sv] -= 1
122 interesting[sv] -= 1
123 if interesting[sv] == 0:
123 if interesting[sv] == 0:
124 del interesting[sv]
124 del interesting[sv]
125
125
126 if len(interesting) != 1:
126 if len(interesting) != 1:
127 return []
127 return []
128
128
129 k = 0
129 k = 0
130 for i in interesting:
130 for i in interesting:
131 k |= i
131 k |= i
132 return set(n for (i, n) in mapping if k & i)
132 return set(n for (i, n) in mapping if k & i)
133
133
134 gca = commonancestorsheads(pfunc, *orignodes)
134 gca = commonancestorsheads(pfunc, *orignodes)
135
135
136 if len(gca) <= 1:
136 if len(gca) <= 1:
137 return gca
137 return gca
138 return deepest(gca)
138 return deepest(gca)
139
139
140 class incrementalmissingancestors(object):
140 class incrementalmissingancestors(object):
141 '''persistent state used to calculate missing ancestors incrementally
141 '''persistent state used to calculate missing ancestors incrementally
142
142
143 Although similar in spirit to lazyancestors below, this is a separate class
143 Although similar in spirit to lazyancestors below, this is a separate class
144 because trying to support contains and missingancestors operations with the
144 because trying to support contains and missingancestors operations with the
145 same internal data structures adds needless complexity.'''
145 same internal data structures adds needless complexity.'''
146 def __init__(self, pfunc, bases):
146 def __init__(self, pfunc, bases):
147 self.bases = set(bases)
147 self.bases = set(bases)
148 if not self.bases:
148 if not self.bases:
149 self.bases.add(nullrev)
149 self.bases.add(nullrev)
150 self.pfunc = pfunc
150 self.pfunc = pfunc
151
151
152 def hasbases(self):
152 def hasbases(self):
153 '''whether the common set has any non-trivial bases'''
153 '''whether the common set has any non-trivial bases'''
154 return self.bases and self.bases != set([nullrev])
154 return self.bases and self.bases != set([nullrev])
155
155
156 def addbases(self, newbases):
156 def addbases(self, newbases):
157 '''grow the ancestor set by adding new bases'''
157 '''grow the ancestor set by adding new bases'''
158 self.bases.update(newbases)
158 self.bases.update(newbases)
159
159
160 def removeancestorsfrom(self, revs):
160 def removeancestorsfrom(self, revs):
161 '''remove all ancestors of bases from the set revs (in place)'''
161 '''remove all ancestors of bases from the set revs (in place)'''
162 bases = self.bases
162 bases = self.bases
163 pfunc = self.pfunc
163 pfunc = self.pfunc
164 revs.difference_update(bases)
164 revs.difference_update(bases)
165 # nullrev is always an ancestor
165 # nullrev is always an ancestor
166 revs.discard(nullrev)
166 revs.discard(nullrev)
167 if not revs:
167 if not revs:
168 return
168 return
169 # anything in revs > start is definitely not an ancestor of bases
169 # anything in revs > start is definitely not an ancestor of bases
170 # revs <= start needs to be investigated
170 # revs <= start needs to be investigated
171 start = max(bases)
171 start = max(bases)
172 keepcount = sum(1 for r in revs if r > start)
172 keepcount = sum(1 for r in revs if r > start)
173 if len(revs) == keepcount:
173 if len(revs) == keepcount:
174 # no revs to consider
174 # no revs to consider
175 return
175 return
176
176
177 for curr in xrange(start, min(revs) - 1, -1):
177 for curr in xrange(start, min(revs) - 1, -1):
178 if curr not in bases:
178 if curr not in bases:
179 continue
179 continue
180 revs.discard(curr)
180 revs.discard(curr)
181 bases.update(pfunc(curr))
181 bases.update(pfunc(curr))
182 if len(revs) == keepcount:
182 if len(revs) == keepcount:
183 # no more potential revs to discard
183 # no more potential revs to discard
184 break
184 break
185
185
186 def missingancestors(self, revs):
186 def missingancestors(self, revs):
187 '''return all the ancestors of revs that are not ancestors of self.bases
187 '''return all the ancestors of revs that are not ancestors of self.bases
188
188
189 This may include elements from revs.
189 This may include elements from revs.
190
190
191 Equivalent to the revset (::revs - ::self.bases). Revs are returned in
191 Equivalent to the revset (::revs - ::self.bases). Revs are returned in
192 revision number order, which is a topological order.'''
192 revision number order, which is a topological order.'''
193 revsvisit = set(revs)
193 revsvisit = set(revs)
194 basesvisit = self.bases
194 basesvisit = self.bases
195 pfunc = self.pfunc
195 pfunc = self.pfunc
196 bothvisit = revsvisit.intersection(basesvisit)
196 bothvisit = revsvisit.intersection(basesvisit)
197 revsvisit.difference_update(bothvisit)
197 revsvisit.difference_update(bothvisit)
198 if not revsvisit:
198 if not revsvisit:
199 return []
199 return []
200
200
201 start = max(max(revsvisit), max(basesvisit))
201 start = max(max(revsvisit), max(basesvisit))
202 # At this point, we hold the invariants that:
202 # At this point, we hold the invariants that:
203 # - revsvisit is the set of nodes we know are an ancestor of at least
203 # - revsvisit is the set of nodes we know are an ancestor of at least
204 # one of the nodes in revs
204 # one of the nodes in revs
205 # - basesvisit is the same for bases
205 # - basesvisit is the same for bases
206 # - bothvisit is the set of nodes we know are ancestors of at least one
206 # - bothvisit is the set of nodes we know are ancestors of at least one
207 # of the nodes in revs and one of the nodes in bases. bothvisit and
207 # of the nodes in revs and one of the nodes in bases. bothvisit and
208 # revsvisit are mutually exclusive, but bothvisit is a subset of
208 # revsvisit are mutually exclusive, but bothvisit is a subset of
209 # basesvisit.
209 # basesvisit.
210 # Now we walk down in reverse topo order, adding parents of nodes
210 # Now we walk down in reverse topo order, adding parents of nodes
211 # already visited to the sets while maintaining the invariants. When a
211 # already visited to the sets while maintaining the invariants. When a
212 # node is found in both revsvisit and basesvisit, it is removed from
212 # node is found in both revsvisit and basesvisit, it is removed from
213 # revsvisit and added to bothvisit. When revsvisit becomes empty, there
213 # revsvisit and added to bothvisit. When revsvisit becomes empty, there
214 # are no more ancestors of revs that aren't also ancestors of bases, so
214 # are no more ancestors of revs that aren't also ancestors of bases, so
215 # exit.
215 # exit.
216
216
217 missing = []
217 missing = []
218 for curr in xrange(start, nullrev, -1):
218 for curr in xrange(start, nullrev, -1):
219 if not revsvisit:
219 if not revsvisit:
220 break
220 break
221
221
222 if curr in bothvisit:
222 if curr in bothvisit:
223 bothvisit.remove(curr)
223 bothvisit.remove(curr)
224 # curr's parents might have made it into revsvisit through
224 # curr's parents might have made it into revsvisit through
225 # another path
225 # another path
226 for p in pfunc(curr):
226 for p in pfunc(curr):
227 revsvisit.discard(p)
227 revsvisit.discard(p)
228 basesvisit.add(p)
228 basesvisit.add(p)
229 bothvisit.add(p)
229 bothvisit.add(p)
230 continue
230 continue
231
231
232 if curr in revsvisit:
232 if curr in revsvisit:
233 missing.append(curr)
233 missing.append(curr)
234 revsvisit.remove(curr)
234 revsvisit.remove(curr)
235 thisvisit = revsvisit
235 thisvisit = revsvisit
236 othervisit = basesvisit
236 othervisit = basesvisit
237 elif curr in basesvisit:
237 elif curr in basesvisit:
238 thisvisit = basesvisit
238 thisvisit = basesvisit
239 othervisit = revsvisit
239 othervisit = revsvisit
240 else:
240 else:
241 # not an ancestor of revs or bases: ignore
241 # not an ancestor of revs or bases: ignore
242 continue
242 continue
243
243
244 for p in pfunc(curr):
244 for p in pfunc(curr):
245 if p == nullrev:
245 if p == nullrev:
246 pass
246 pass
247 elif p in othervisit or p in bothvisit:
247 elif p in othervisit or p in bothvisit:
248 # p is implicitly in thisvisit. This means p is or should be
248 # p is implicitly in thisvisit. This means p is or should be
249 # in bothvisit
249 # in bothvisit
250 revsvisit.discard(p)
250 revsvisit.discard(p)
251 basesvisit.add(p)
251 basesvisit.add(p)
252 bothvisit.add(p)
252 bothvisit.add(p)
253 else:
253 else:
254 # visit later
254 # visit later
255 thisvisit.add(p)
255 thisvisit.add(p)
256
256
257 missing.reverse()
257 missing.reverse()
258 return missing
258 return missing
259
259
260 class lazyancestors(object):
260 class lazyancestors(object):
261 def __init__(self, pfunc, revs, stoprev=0, inclusive=False):
261 def __init__(self, pfunc, revs, stoprev=0, inclusive=False):
262 """Create a new object generating ancestors for the given revs. Does
262 """Create a new object generating ancestors for the given revs. Does
263 not generate revs lower than stoprev.
263 not generate revs lower than stoprev.
264
264
265 This is computed lazily starting from revs. The object supports
265 This is computed lazily starting from revs. The object supports
266 iteration and membership.
266 iteration and membership.
267
267
268 cl should be a changelog and revs should be an iterable. inclusive is
268 cl should be a changelog and revs should be an iterable. inclusive is
269 a boolean that indicates whether revs should be included. Revs lower
269 a boolean that indicates whether revs should be included. Revs lower
270 than stoprev will not be generated.
270 than stoprev will not be generated.
271
271
272 Result does not include the null revision."""
272 Result does not include the null revision."""
273 self._parentrevs = pfunc
273 self._parentrevs = pfunc
274 self._initrevs = revs
274 self._initrevs = revs
275 self._stoprev = stoprev
275 self._stoprev = stoprev
276 self._inclusive = inclusive
276 self._inclusive = inclusive
277
277
278 # Initialize data structures for __contains__.
278 # Initialize data structures for __contains__.
279 # For __contains__, we use a heap rather than a deque because
279 # For __contains__, we use a heap rather than a deque because
280 # (a) it minimizes the number of parentrevs calls made
280 # (a) it minimizes the number of parentrevs calls made
281 # (b) it makes the loop termination condition obvious
281 # (b) it makes the loop termination condition obvious
282 # Python's heap is a min-heap. Multiply all values by -1 to convert it
282 # Python's heap is a min-heap. Multiply all values by -1 to convert it
283 # into a max-heap.
283 # into a max-heap.
284 self._containsvisit = [-rev for rev in revs]
284 self._containsvisit = [-rev for rev in revs]
285 heapq.heapify(self._containsvisit)
285 heapq.heapify(self._containsvisit)
286 if inclusive:
286 if inclusive:
287 self._containsseen = set(revs)
287 self._containsseen = set(revs)
288 else:
288 else:
289 self._containsseen = set()
289 self._containsseen = set()
290
290
291 def __nonzero__(self):
291 def __nonzero__(self):
292 """False if the set is empty, True otherwise."""
292 """False if the set is empty, True otherwise."""
293 try:
293 try:
294 iter(self).next()
294 next(iter(self))
295 return True
295 return True
296 except StopIteration:
296 except StopIteration:
297 return False
297 return False
298
298
299 def __iter__(self):
299 def __iter__(self):
300 """Generate the ancestors of _initrevs in reverse topological order.
300 """Generate the ancestors of _initrevs in reverse topological order.
301
301
302 If inclusive is False, yield a sequence of revision numbers starting
302 If inclusive is False, yield a sequence of revision numbers starting
303 with the parents of each revision in revs, i.e., each revision is *not*
303 with the parents of each revision in revs, i.e., each revision is *not*
304 considered an ancestor of itself. Results are in breadth-first order:
304 considered an ancestor of itself. Results are in breadth-first order:
305 parents of each rev in revs, then parents of those, etc.
305 parents of each rev in revs, then parents of those, etc.
306
306
307 If inclusive is True, yield all the revs first (ignoring stoprev),
307 If inclusive is True, yield all the revs first (ignoring stoprev),
308 then yield all the ancestors of revs as when inclusive is False.
308 then yield all the ancestors of revs as when inclusive is False.
309 If an element in revs is an ancestor of a different rev it is not
309 If an element in revs is an ancestor of a different rev it is not
310 yielded again."""
310 yielded again."""
311 seen = set()
311 seen = set()
312 revs = self._initrevs
312 revs = self._initrevs
313 if self._inclusive:
313 if self._inclusive:
314 for rev in revs:
314 for rev in revs:
315 yield rev
315 yield rev
316 seen.update(revs)
316 seen.update(revs)
317
317
318 parentrevs = self._parentrevs
318 parentrevs = self._parentrevs
319 stoprev = self._stoprev
319 stoprev = self._stoprev
320 visit = collections.deque(revs)
320 visit = collections.deque(revs)
321
321
322 see = seen.add
322 see = seen.add
323 schedule = visit.append
323 schedule = visit.append
324
324
325 while visit:
325 while visit:
326 for parent in parentrevs(visit.popleft()):
326 for parent in parentrevs(visit.popleft()):
327 if parent >= stoprev and parent not in seen:
327 if parent >= stoprev and parent not in seen:
328 schedule(parent)
328 schedule(parent)
329 see(parent)
329 see(parent)
330 yield parent
330 yield parent
331
331
332 def __contains__(self, target):
332 def __contains__(self, target):
333 """Test whether target is an ancestor of self._initrevs."""
333 """Test whether target is an ancestor of self._initrevs."""
334 # Trying to do both __iter__ and __contains__ using the same visit
334 # Trying to do both __iter__ and __contains__ using the same visit
335 # heap and seen set is complex enough that it slows down both. Keep
335 # heap and seen set is complex enough that it slows down both. Keep
336 # them separate.
336 # them separate.
337 seen = self._containsseen
337 seen = self._containsseen
338 if target in seen:
338 if target in seen:
339 return True
339 return True
340
340
341 parentrevs = self._parentrevs
341 parentrevs = self._parentrevs
342 visit = self._containsvisit
342 visit = self._containsvisit
343 stoprev = self._stoprev
343 stoprev = self._stoprev
344 heappop = heapq.heappop
344 heappop = heapq.heappop
345 heappush = heapq.heappush
345 heappush = heapq.heappush
346 see = seen.add
346 see = seen.add
347
347
348 targetseen = False
348 targetseen = False
349
349
350 while visit and -visit[0] > target and not targetseen:
350 while visit and -visit[0] > target and not targetseen:
351 for parent in parentrevs(-heappop(visit)):
351 for parent in parentrevs(-heappop(visit)):
352 if parent < stoprev or parent in seen:
352 if parent < stoprev or parent in seen:
353 continue
353 continue
354 # We need to make sure we push all parents into the heap so
354 # We need to make sure we push all parents into the heap so
355 # that we leave it in a consistent state for future calls.
355 # that we leave it in a consistent state for future calls.
356 heappush(visit, -parent)
356 heappush(visit, -parent)
357 see(parent)
357 see(parent)
358 if parent == target:
358 if parent == target:
359 targetseen = True
359 targetseen = True
360
360
361 return targetseen
361 return targetseen
@@ -1,3556 +1,3556 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import sys
13 import sys
14 import tempfile
14 import tempfile
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 bin,
18 bin,
19 hex,
19 hex,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 )
23 )
24
24
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 changelog,
27 changelog,
28 copies,
28 copies,
29 crecord as crecordmod,
29 crecord as crecordmod,
30 encoding,
30 encoding,
31 error,
31 error,
32 formatter,
32 formatter,
33 graphmod,
33 graphmod,
34 lock as lockmod,
34 lock as lockmod,
35 match as matchmod,
35 match as matchmod,
36 obsolete,
36 obsolete,
37 patch,
37 patch,
38 pathutil,
38 pathutil,
39 phases,
39 phases,
40 repair,
40 repair,
41 revlog,
41 revlog,
42 revset,
42 revset,
43 scmutil,
43 scmutil,
44 templatekw,
44 templatekw,
45 templater,
45 templater,
46 util,
46 util,
47 )
47 )
48 stringio = util.stringio
48 stringio = util.stringio
49
49
50 def ishunk(x):
50 def ishunk(x):
51 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
51 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
52 return isinstance(x, hunkclasses)
52 return isinstance(x, hunkclasses)
53
53
54 def newandmodified(chunks, originalchunks):
54 def newandmodified(chunks, originalchunks):
55 newlyaddedandmodifiedfiles = set()
55 newlyaddedandmodifiedfiles = set()
56 for chunk in chunks:
56 for chunk in chunks:
57 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
57 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
58 originalchunks:
58 originalchunks:
59 newlyaddedandmodifiedfiles.add(chunk.header.filename())
59 newlyaddedandmodifiedfiles.add(chunk.header.filename())
60 return newlyaddedandmodifiedfiles
60 return newlyaddedandmodifiedfiles
61
61
62 def parsealiases(cmd):
62 def parsealiases(cmd):
63 return cmd.lstrip("^").split("|")
63 return cmd.lstrip("^").split("|")
64
64
65 def setupwrapcolorwrite(ui):
65 def setupwrapcolorwrite(ui):
66 # wrap ui.write so diff output can be labeled/colorized
66 # wrap ui.write so diff output can be labeled/colorized
67 def wrapwrite(orig, *args, **kw):
67 def wrapwrite(orig, *args, **kw):
68 label = kw.pop('label', '')
68 label = kw.pop('label', '')
69 for chunk, l in patch.difflabel(lambda: args):
69 for chunk, l in patch.difflabel(lambda: args):
70 orig(chunk, label=label + l)
70 orig(chunk, label=label + l)
71
71
72 oldwrite = ui.write
72 oldwrite = ui.write
73 def wrap(*args, **kwargs):
73 def wrap(*args, **kwargs):
74 return wrapwrite(oldwrite, *args, **kwargs)
74 return wrapwrite(oldwrite, *args, **kwargs)
75 setattr(ui, 'write', wrap)
75 setattr(ui, 'write', wrap)
76 return oldwrite
76 return oldwrite
77
77
78 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
78 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
79 if usecurses:
79 if usecurses:
80 if testfile:
80 if testfile:
81 recordfn = crecordmod.testdecorator(testfile,
81 recordfn = crecordmod.testdecorator(testfile,
82 crecordmod.testchunkselector)
82 crecordmod.testchunkselector)
83 else:
83 else:
84 recordfn = crecordmod.chunkselector
84 recordfn = crecordmod.chunkselector
85
85
86 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
86 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
87
87
88 else:
88 else:
89 return patch.filterpatch(ui, originalhunks, operation)
89 return patch.filterpatch(ui, originalhunks, operation)
90
90
91 def recordfilter(ui, originalhunks, operation=None):
91 def recordfilter(ui, originalhunks, operation=None):
92 """ Prompts the user to filter the originalhunks and return a list of
92 """ Prompts the user to filter the originalhunks and return a list of
93 selected hunks.
93 selected hunks.
94 *operation* is used for ui purposes to indicate the user
94 *operation* is used for ui purposes to indicate the user
95 what kind of filtering they are doing: reverting, committing, shelving, etc.
95 what kind of filtering they are doing: reverting, committing, shelving, etc.
96 *operation* has to be a translated string.
96 *operation* has to be a translated string.
97 """
97 """
98 usecurses = crecordmod.checkcurses(ui)
98 usecurses = crecordmod.checkcurses(ui)
99 testfile = ui.config('experimental', 'crecordtest', None)
99 testfile = ui.config('experimental', 'crecordtest', None)
100 oldwrite = setupwrapcolorwrite(ui)
100 oldwrite = setupwrapcolorwrite(ui)
101 try:
101 try:
102 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
102 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
103 testfile, operation)
103 testfile, operation)
104 finally:
104 finally:
105 ui.write = oldwrite
105 ui.write = oldwrite
106 return newchunks, newopts
106 return newchunks, newopts
107
107
108 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
108 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
109 filterfn, *pats, **opts):
109 filterfn, *pats, **opts):
110 from . import merge as mergemod
110 from . import merge as mergemod
111 if not ui.interactive():
111 if not ui.interactive():
112 if cmdsuggest:
112 if cmdsuggest:
113 msg = _('running non-interactively, use %s instead') % cmdsuggest
113 msg = _('running non-interactively, use %s instead') % cmdsuggest
114 else:
114 else:
115 msg = _('running non-interactively')
115 msg = _('running non-interactively')
116 raise error.Abort(msg)
116 raise error.Abort(msg)
117
117
118 # make sure username is set before going interactive
118 # make sure username is set before going interactive
119 if not opts.get('user'):
119 if not opts.get('user'):
120 ui.username() # raise exception, username not provided
120 ui.username() # raise exception, username not provided
121
121
122 def recordfunc(ui, repo, message, match, opts):
122 def recordfunc(ui, repo, message, match, opts):
123 """This is generic record driver.
123 """This is generic record driver.
124
124
125 Its job is to interactively filter local changes, and
125 Its job is to interactively filter local changes, and
126 accordingly prepare working directory into a state in which the
126 accordingly prepare working directory into a state in which the
127 job can be delegated to a non-interactive commit command such as
127 job can be delegated to a non-interactive commit command such as
128 'commit' or 'qrefresh'.
128 'commit' or 'qrefresh'.
129
129
130 After the actual job is done by non-interactive command, the
130 After the actual job is done by non-interactive command, the
131 working directory is restored to its original state.
131 working directory is restored to its original state.
132
132
133 In the end we'll record interesting changes, and everything else
133 In the end we'll record interesting changes, and everything else
134 will be left in place, so the user can continue working.
134 will be left in place, so the user can continue working.
135 """
135 """
136
136
137 checkunfinished(repo, commit=True)
137 checkunfinished(repo, commit=True)
138 wctx = repo[None]
138 wctx = repo[None]
139 merge = len(wctx.parents()) > 1
139 merge = len(wctx.parents()) > 1
140 if merge:
140 if merge:
141 raise error.Abort(_('cannot partially commit a merge '
141 raise error.Abort(_('cannot partially commit a merge '
142 '(use "hg commit" instead)'))
142 '(use "hg commit" instead)'))
143
143
144 def fail(f, msg):
144 def fail(f, msg):
145 raise error.Abort('%s: %s' % (f, msg))
145 raise error.Abort('%s: %s' % (f, msg))
146
146
147 force = opts.get('force')
147 force = opts.get('force')
148 if not force:
148 if not force:
149 vdirs = []
149 vdirs = []
150 match.explicitdir = vdirs.append
150 match.explicitdir = vdirs.append
151 match.bad = fail
151 match.bad = fail
152
152
153 status = repo.status(match=match)
153 status = repo.status(match=match)
154 if not force:
154 if not force:
155 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
155 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
156 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
156 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
157 diffopts.nodates = True
157 diffopts.nodates = True
158 diffopts.git = True
158 diffopts.git = True
159 diffopts.showfunc = True
159 diffopts.showfunc = True
160 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
160 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
161 originalchunks = patch.parsepatch(originaldiff)
161 originalchunks = patch.parsepatch(originaldiff)
162
162
163 # 1. filter patch, since we are intending to apply subset of it
163 # 1. filter patch, since we are intending to apply subset of it
164 try:
164 try:
165 chunks, newopts = filterfn(ui, originalchunks)
165 chunks, newopts = filterfn(ui, originalchunks)
166 except patch.PatchError as err:
166 except patch.PatchError as err:
167 raise error.Abort(_('error parsing patch: %s') % err)
167 raise error.Abort(_('error parsing patch: %s') % err)
168 opts.update(newopts)
168 opts.update(newopts)
169
169
170 # We need to keep a backup of files that have been newly added and
170 # We need to keep a backup of files that have been newly added and
171 # modified during the recording process because there is a previous
171 # modified during the recording process because there is a previous
172 # version without the edit in the workdir
172 # version without the edit in the workdir
173 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
173 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
174 contenders = set()
174 contenders = set()
175 for h in chunks:
175 for h in chunks:
176 try:
176 try:
177 contenders.update(set(h.files()))
177 contenders.update(set(h.files()))
178 except AttributeError:
178 except AttributeError:
179 pass
179 pass
180
180
181 changed = status.modified + status.added + status.removed
181 changed = status.modified + status.added + status.removed
182 newfiles = [f for f in changed if f in contenders]
182 newfiles = [f for f in changed if f in contenders]
183 if not newfiles:
183 if not newfiles:
184 ui.status(_('no changes to record\n'))
184 ui.status(_('no changes to record\n'))
185 return 0
185 return 0
186
186
187 modified = set(status.modified)
187 modified = set(status.modified)
188
188
189 # 2. backup changed files, so we can restore them in the end
189 # 2. backup changed files, so we can restore them in the end
190
190
191 if backupall:
191 if backupall:
192 tobackup = changed
192 tobackup = changed
193 else:
193 else:
194 tobackup = [f for f in newfiles if f in modified or f in \
194 tobackup = [f for f in newfiles if f in modified or f in \
195 newlyaddedandmodifiedfiles]
195 newlyaddedandmodifiedfiles]
196 backups = {}
196 backups = {}
197 if tobackup:
197 if tobackup:
198 backupdir = repo.join('record-backups')
198 backupdir = repo.join('record-backups')
199 try:
199 try:
200 os.mkdir(backupdir)
200 os.mkdir(backupdir)
201 except OSError as err:
201 except OSError as err:
202 if err.errno != errno.EEXIST:
202 if err.errno != errno.EEXIST:
203 raise
203 raise
204 try:
204 try:
205 # backup continues
205 # backup continues
206 for f in tobackup:
206 for f in tobackup:
207 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
207 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
208 dir=backupdir)
208 dir=backupdir)
209 os.close(fd)
209 os.close(fd)
210 ui.debug('backup %r as %r\n' % (f, tmpname))
210 ui.debug('backup %r as %r\n' % (f, tmpname))
211 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
211 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
212 backups[f] = tmpname
212 backups[f] = tmpname
213
213
214 fp = stringio()
214 fp = stringio()
215 for c in chunks:
215 for c in chunks:
216 fname = c.filename()
216 fname = c.filename()
217 if fname in backups:
217 if fname in backups:
218 c.write(fp)
218 c.write(fp)
219 dopatch = fp.tell()
219 dopatch = fp.tell()
220 fp.seek(0)
220 fp.seek(0)
221
221
222 # 2.5 optionally review / modify patch in text editor
222 # 2.5 optionally review / modify patch in text editor
223 if opts.get('review', False):
223 if opts.get('review', False):
224 patchtext = (crecordmod.diffhelptext
224 patchtext = (crecordmod.diffhelptext
225 + crecordmod.patchhelptext
225 + crecordmod.patchhelptext
226 + fp.read())
226 + fp.read())
227 reviewedpatch = ui.edit(patchtext, "",
227 reviewedpatch = ui.edit(patchtext, "",
228 extra={"suffix": ".diff"})
228 extra={"suffix": ".diff"})
229 fp.truncate(0)
229 fp.truncate(0)
230 fp.write(reviewedpatch)
230 fp.write(reviewedpatch)
231 fp.seek(0)
231 fp.seek(0)
232
232
233 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
233 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
234 # 3a. apply filtered patch to clean repo (clean)
234 # 3a. apply filtered patch to clean repo (clean)
235 if backups:
235 if backups:
236 # Equivalent to hg.revert
236 # Equivalent to hg.revert
237 m = scmutil.matchfiles(repo, backups.keys())
237 m = scmutil.matchfiles(repo, backups.keys())
238 mergemod.update(repo, repo.dirstate.p1(),
238 mergemod.update(repo, repo.dirstate.p1(),
239 False, True, matcher=m)
239 False, True, matcher=m)
240
240
241 # 3b. (apply)
241 # 3b. (apply)
242 if dopatch:
242 if dopatch:
243 try:
243 try:
244 ui.debug('applying patch\n')
244 ui.debug('applying patch\n')
245 ui.debug(fp.getvalue())
245 ui.debug(fp.getvalue())
246 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
246 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
247 except patch.PatchError as err:
247 except patch.PatchError as err:
248 raise error.Abort(str(err))
248 raise error.Abort(str(err))
249 del fp
249 del fp
250
250
251 # 4. We prepared working directory according to filtered
251 # 4. We prepared working directory according to filtered
252 # patch. Now is the time to delegate the job to
252 # patch. Now is the time to delegate the job to
253 # commit/qrefresh or the like!
253 # commit/qrefresh or the like!
254
254
255 # Make all of the pathnames absolute.
255 # Make all of the pathnames absolute.
256 newfiles = [repo.wjoin(nf) for nf in newfiles]
256 newfiles = [repo.wjoin(nf) for nf in newfiles]
257 return commitfunc(ui, repo, *newfiles, **opts)
257 return commitfunc(ui, repo, *newfiles, **opts)
258 finally:
258 finally:
259 # 5. finally restore backed-up files
259 # 5. finally restore backed-up files
260 try:
260 try:
261 dirstate = repo.dirstate
261 dirstate = repo.dirstate
262 for realname, tmpname in backups.iteritems():
262 for realname, tmpname in backups.iteritems():
263 ui.debug('restoring %r to %r\n' % (tmpname, realname))
263 ui.debug('restoring %r to %r\n' % (tmpname, realname))
264
264
265 if dirstate[realname] == 'n':
265 if dirstate[realname] == 'n':
266 # without normallookup, restoring timestamp
266 # without normallookup, restoring timestamp
267 # may cause partially committed files
267 # may cause partially committed files
268 # to be treated as unmodified
268 # to be treated as unmodified
269 dirstate.normallookup(realname)
269 dirstate.normallookup(realname)
270
270
271 # copystat=True here and above are a hack to trick any
271 # copystat=True here and above are a hack to trick any
272 # editors that have f open that we haven't modified them.
272 # editors that have f open that we haven't modified them.
273 #
273 #
274 # Also note that this racy as an editor could notice the
274 # Also note that this racy as an editor could notice the
275 # file's mtime before we've finished writing it.
275 # file's mtime before we've finished writing it.
276 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
276 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
277 os.unlink(tmpname)
277 os.unlink(tmpname)
278 if tobackup:
278 if tobackup:
279 os.rmdir(backupdir)
279 os.rmdir(backupdir)
280 except OSError:
280 except OSError:
281 pass
281 pass
282
282
283 def recordinwlock(ui, repo, message, match, opts):
283 def recordinwlock(ui, repo, message, match, opts):
284 with repo.wlock():
284 with repo.wlock():
285 return recordfunc(ui, repo, message, match, opts)
285 return recordfunc(ui, repo, message, match, opts)
286
286
287 return commit(ui, repo, recordinwlock, pats, opts)
287 return commit(ui, repo, recordinwlock, pats, opts)
288
288
289 def findpossible(cmd, table, strict=False):
289 def findpossible(cmd, table, strict=False):
290 """
290 """
291 Return cmd -> (aliases, command table entry)
291 Return cmd -> (aliases, command table entry)
292 for each matching command.
292 for each matching command.
293 Return debug commands (or their aliases) only if no normal command matches.
293 Return debug commands (or their aliases) only if no normal command matches.
294 """
294 """
295 choice = {}
295 choice = {}
296 debugchoice = {}
296 debugchoice = {}
297
297
298 if cmd in table:
298 if cmd in table:
299 # short-circuit exact matches, "log" alias beats "^log|history"
299 # short-circuit exact matches, "log" alias beats "^log|history"
300 keys = [cmd]
300 keys = [cmd]
301 else:
301 else:
302 keys = table.keys()
302 keys = table.keys()
303
303
304 allcmds = []
304 allcmds = []
305 for e in keys:
305 for e in keys:
306 aliases = parsealiases(e)
306 aliases = parsealiases(e)
307 allcmds.extend(aliases)
307 allcmds.extend(aliases)
308 found = None
308 found = None
309 if cmd in aliases:
309 if cmd in aliases:
310 found = cmd
310 found = cmd
311 elif not strict:
311 elif not strict:
312 for a in aliases:
312 for a in aliases:
313 if a.startswith(cmd):
313 if a.startswith(cmd):
314 found = a
314 found = a
315 break
315 break
316 if found is not None:
316 if found is not None:
317 if aliases[0].startswith("debug") or found.startswith("debug"):
317 if aliases[0].startswith("debug") or found.startswith("debug"):
318 debugchoice[found] = (aliases, table[e])
318 debugchoice[found] = (aliases, table[e])
319 else:
319 else:
320 choice[found] = (aliases, table[e])
320 choice[found] = (aliases, table[e])
321
321
322 if not choice and debugchoice:
322 if not choice and debugchoice:
323 choice = debugchoice
323 choice = debugchoice
324
324
325 return choice, allcmds
325 return choice, allcmds
326
326
327 def findcmd(cmd, table, strict=True):
327 def findcmd(cmd, table, strict=True):
328 """Return (aliases, command table entry) for command string."""
328 """Return (aliases, command table entry) for command string."""
329 choice, allcmds = findpossible(cmd, table, strict)
329 choice, allcmds = findpossible(cmd, table, strict)
330
330
331 if cmd in choice:
331 if cmd in choice:
332 return choice[cmd]
332 return choice[cmd]
333
333
334 if len(choice) > 1:
334 if len(choice) > 1:
335 clist = choice.keys()
335 clist = choice.keys()
336 clist.sort()
336 clist.sort()
337 raise error.AmbiguousCommand(cmd, clist)
337 raise error.AmbiguousCommand(cmd, clist)
338
338
339 if choice:
339 if choice:
340 return choice.values()[0]
340 return choice.values()[0]
341
341
342 raise error.UnknownCommand(cmd, allcmds)
342 raise error.UnknownCommand(cmd, allcmds)
343
343
344 def findrepo(p):
344 def findrepo(p):
345 while not os.path.isdir(os.path.join(p, ".hg")):
345 while not os.path.isdir(os.path.join(p, ".hg")):
346 oldp, p = p, os.path.dirname(p)
346 oldp, p = p, os.path.dirname(p)
347 if p == oldp:
347 if p == oldp:
348 return None
348 return None
349
349
350 return p
350 return p
351
351
352 def bailifchanged(repo, merge=True):
352 def bailifchanged(repo, merge=True):
353 if merge and repo.dirstate.p2() != nullid:
353 if merge and repo.dirstate.p2() != nullid:
354 raise error.Abort(_('outstanding uncommitted merge'))
354 raise error.Abort(_('outstanding uncommitted merge'))
355 modified, added, removed, deleted = repo.status()[:4]
355 modified, added, removed, deleted = repo.status()[:4]
356 if modified or added or removed or deleted:
356 if modified or added or removed or deleted:
357 raise error.Abort(_('uncommitted changes'))
357 raise error.Abort(_('uncommitted changes'))
358 ctx = repo[None]
358 ctx = repo[None]
359 for s in sorted(ctx.substate):
359 for s in sorted(ctx.substate):
360 ctx.sub(s).bailifchanged()
360 ctx.sub(s).bailifchanged()
361
361
362 def logmessage(ui, opts):
362 def logmessage(ui, opts):
363 """ get the log message according to -m and -l option """
363 """ get the log message according to -m and -l option """
364 message = opts.get('message')
364 message = opts.get('message')
365 logfile = opts.get('logfile')
365 logfile = opts.get('logfile')
366
366
367 if message and logfile:
367 if message and logfile:
368 raise error.Abort(_('options --message and --logfile are mutually '
368 raise error.Abort(_('options --message and --logfile are mutually '
369 'exclusive'))
369 'exclusive'))
370 if not message and logfile:
370 if not message and logfile:
371 try:
371 try:
372 if logfile == '-':
372 if logfile == '-':
373 message = ui.fin.read()
373 message = ui.fin.read()
374 else:
374 else:
375 message = '\n'.join(util.readfile(logfile).splitlines())
375 message = '\n'.join(util.readfile(logfile).splitlines())
376 except IOError as inst:
376 except IOError as inst:
377 raise error.Abort(_("can't read commit message '%s': %s") %
377 raise error.Abort(_("can't read commit message '%s': %s") %
378 (logfile, inst.strerror))
378 (logfile, inst.strerror))
379 return message
379 return message
380
380
381 def mergeeditform(ctxorbool, baseformname):
381 def mergeeditform(ctxorbool, baseformname):
382 """return appropriate editform name (referencing a committemplate)
382 """return appropriate editform name (referencing a committemplate)
383
383
384 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
384 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
385 merging is committed.
385 merging is committed.
386
386
387 This returns baseformname with '.merge' appended if it is a merge,
387 This returns baseformname with '.merge' appended if it is a merge,
388 otherwise '.normal' is appended.
388 otherwise '.normal' is appended.
389 """
389 """
390 if isinstance(ctxorbool, bool):
390 if isinstance(ctxorbool, bool):
391 if ctxorbool:
391 if ctxorbool:
392 return baseformname + ".merge"
392 return baseformname + ".merge"
393 elif 1 < len(ctxorbool.parents()):
393 elif 1 < len(ctxorbool.parents()):
394 return baseformname + ".merge"
394 return baseformname + ".merge"
395
395
396 return baseformname + ".normal"
396 return baseformname + ".normal"
397
397
398 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
398 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
399 editform='', **opts):
399 editform='', **opts):
400 """get appropriate commit message editor according to '--edit' option
400 """get appropriate commit message editor according to '--edit' option
401
401
402 'finishdesc' is a function to be called with edited commit message
402 'finishdesc' is a function to be called with edited commit message
403 (= 'description' of the new changeset) just after editing, but
403 (= 'description' of the new changeset) just after editing, but
404 before checking empty-ness. It should return actual text to be
404 before checking empty-ness. It should return actual text to be
405 stored into history. This allows to change description before
405 stored into history. This allows to change description before
406 storing.
406 storing.
407
407
408 'extramsg' is a extra message to be shown in the editor instead of
408 'extramsg' is a extra message to be shown in the editor instead of
409 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
409 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
410 is automatically added.
410 is automatically added.
411
411
412 'editform' is a dot-separated list of names, to distinguish
412 'editform' is a dot-separated list of names, to distinguish
413 the purpose of commit text editing.
413 the purpose of commit text editing.
414
414
415 'getcommiteditor' returns 'commitforceeditor' regardless of
415 'getcommiteditor' returns 'commitforceeditor' regardless of
416 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
416 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
417 they are specific for usage in MQ.
417 they are specific for usage in MQ.
418 """
418 """
419 if edit or finishdesc or extramsg:
419 if edit or finishdesc or extramsg:
420 return lambda r, c, s: commitforceeditor(r, c, s,
420 return lambda r, c, s: commitforceeditor(r, c, s,
421 finishdesc=finishdesc,
421 finishdesc=finishdesc,
422 extramsg=extramsg,
422 extramsg=extramsg,
423 editform=editform)
423 editform=editform)
424 elif editform:
424 elif editform:
425 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
425 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
426 else:
426 else:
427 return commiteditor
427 return commiteditor
428
428
429 def loglimit(opts):
429 def loglimit(opts):
430 """get the log limit according to option -l/--limit"""
430 """get the log limit according to option -l/--limit"""
431 limit = opts.get('limit')
431 limit = opts.get('limit')
432 if limit:
432 if limit:
433 try:
433 try:
434 limit = int(limit)
434 limit = int(limit)
435 except ValueError:
435 except ValueError:
436 raise error.Abort(_('limit must be a positive integer'))
436 raise error.Abort(_('limit must be a positive integer'))
437 if limit <= 0:
437 if limit <= 0:
438 raise error.Abort(_('limit must be positive'))
438 raise error.Abort(_('limit must be positive'))
439 else:
439 else:
440 limit = None
440 limit = None
441 return limit
441 return limit
442
442
443 def makefilename(repo, pat, node, desc=None,
443 def makefilename(repo, pat, node, desc=None,
444 total=None, seqno=None, revwidth=None, pathname=None):
444 total=None, seqno=None, revwidth=None, pathname=None):
445 node_expander = {
445 node_expander = {
446 'H': lambda: hex(node),
446 'H': lambda: hex(node),
447 'R': lambda: str(repo.changelog.rev(node)),
447 'R': lambda: str(repo.changelog.rev(node)),
448 'h': lambda: short(node),
448 'h': lambda: short(node),
449 'm': lambda: re.sub('[^\w]', '_', str(desc))
449 'm': lambda: re.sub('[^\w]', '_', str(desc))
450 }
450 }
451 expander = {
451 expander = {
452 '%': lambda: '%',
452 '%': lambda: '%',
453 'b': lambda: os.path.basename(repo.root),
453 'b': lambda: os.path.basename(repo.root),
454 }
454 }
455
455
456 try:
456 try:
457 if node:
457 if node:
458 expander.update(node_expander)
458 expander.update(node_expander)
459 if node:
459 if node:
460 expander['r'] = (lambda:
460 expander['r'] = (lambda:
461 str(repo.changelog.rev(node)).zfill(revwidth or 0))
461 str(repo.changelog.rev(node)).zfill(revwidth or 0))
462 if total is not None:
462 if total is not None:
463 expander['N'] = lambda: str(total)
463 expander['N'] = lambda: str(total)
464 if seqno is not None:
464 if seqno is not None:
465 expander['n'] = lambda: str(seqno)
465 expander['n'] = lambda: str(seqno)
466 if total is not None and seqno is not None:
466 if total is not None and seqno is not None:
467 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
467 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
468 if pathname is not None:
468 if pathname is not None:
469 expander['s'] = lambda: os.path.basename(pathname)
469 expander['s'] = lambda: os.path.basename(pathname)
470 expander['d'] = lambda: os.path.dirname(pathname) or '.'
470 expander['d'] = lambda: os.path.dirname(pathname) or '.'
471 expander['p'] = lambda: pathname
471 expander['p'] = lambda: pathname
472
472
473 newname = []
473 newname = []
474 patlen = len(pat)
474 patlen = len(pat)
475 i = 0
475 i = 0
476 while i < patlen:
476 while i < patlen:
477 c = pat[i]
477 c = pat[i]
478 if c == '%':
478 if c == '%':
479 i += 1
479 i += 1
480 c = pat[i]
480 c = pat[i]
481 c = expander[c]()
481 c = expander[c]()
482 newname.append(c)
482 newname.append(c)
483 i += 1
483 i += 1
484 return ''.join(newname)
484 return ''.join(newname)
485 except KeyError as inst:
485 except KeyError as inst:
486 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
486 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
487 inst.args[0])
487 inst.args[0])
488
488
489 class _unclosablefile(object):
489 class _unclosablefile(object):
490 def __init__(self, fp):
490 def __init__(self, fp):
491 self._fp = fp
491 self._fp = fp
492
492
493 def close(self):
493 def close(self):
494 pass
494 pass
495
495
496 def __iter__(self):
496 def __iter__(self):
497 return iter(self._fp)
497 return iter(self._fp)
498
498
499 def __getattr__(self, attr):
499 def __getattr__(self, attr):
500 return getattr(self._fp, attr)
500 return getattr(self._fp, attr)
501
501
502 def makefileobj(repo, pat, node=None, desc=None, total=None,
502 def makefileobj(repo, pat, node=None, desc=None, total=None,
503 seqno=None, revwidth=None, mode='wb', modemap=None,
503 seqno=None, revwidth=None, mode='wb', modemap=None,
504 pathname=None):
504 pathname=None):
505
505
506 writable = mode not in ('r', 'rb')
506 writable = mode not in ('r', 'rb')
507
507
508 if not pat or pat == '-':
508 if not pat or pat == '-':
509 if writable:
509 if writable:
510 fp = repo.ui.fout
510 fp = repo.ui.fout
511 else:
511 else:
512 fp = repo.ui.fin
512 fp = repo.ui.fin
513 return _unclosablefile(fp)
513 return _unclosablefile(fp)
514 if util.safehasattr(pat, 'write') and writable:
514 if util.safehasattr(pat, 'write') and writable:
515 return pat
515 return pat
516 if util.safehasattr(pat, 'read') and 'r' in mode:
516 if util.safehasattr(pat, 'read') and 'r' in mode:
517 return pat
517 return pat
518 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
518 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
519 if modemap is not None:
519 if modemap is not None:
520 mode = modemap.get(fn, mode)
520 mode = modemap.get(fn, mode)
521 if mode == 'wb':
521 if mode == 'wb':
522 modemap[fn] = 'ab'
522 modemap[fn] = 'ab'
523 return open(fn, mode)
523 return open(fn, mode)
524
524
525 def openrevlog(repo, cmd, file_, opts):
525 def openrevlog(repo, cmd, file_, opts):
526 """opens the changelog, manifest, a filelog or a given revlog"""
526 """opens the changelog, manifest, a filelog or a given revlog"""
527 cl = opts['changelog']
527 cl = opts['changelog']
528 mf = opts['manifest']
528 mf = opts['manifest']
529 dir = opts['dir']
529 dir = opts['dir']
530 msg = None
530 msg = None
531 if cl and mf:
531 if cl and mf:
532 msg = _('cannot specify --changelog and --manifest at the same time')
532 msg = _('cannot specify --changelog and --manifest at the same time')
533 elif cl and dir:
533 elif cl and dir:
534 msg = _('cannot specify --changelog and --dir at the same time')
534 msg = _('cannot specify --changelog and --dir at the same time')
535 elif cl or mf:
535 elif cl or mf:
536 if file_:
536 if file_:
537 msg = _('cannot specify filename with --changelog or --manifest')
537 msg = _('cannot specify filename with --changelog or --manifest')
538 elif not repo:
538 elif not repo:
539 msg = _('cannot specify --changelog or --manifest or --dir '
539 msg = _('cannot specify --changelog or --manifest or --dir '
540 'without a repository')
540 'without a repository')
541 if msg:
541 if msg:
542 raise error.Abort(msg)
542 raise error.Abort(msg)
543
543
544 r = None
544 r = None
545 if repo:
545 if repo:
546 if cl:
546 if cl:
547 r = repo.unfiltered().changelog
547 r = repo.unfiltered().changelog
548 elif dir:
548 elif dir:
549 if 'treemanifest' not in repo.requirements:
549 if 'treemanifest' not in repo.requirements:
550 raise error.Abort(_("--dir can only be used on repos with "
550 raise error.Abort(_("--dir can only be used on repos with "
551 "treemanifest enabled"))
551 "treemanifest enabled"))
552 dirlog = repo.dirlog(file_)
552 dirlog = repo.dirlog(file_)
553 if len(dirlog):
553 if len(dirlog):
554 r = dirlog
554 r = dirlog
555 elif mf:
555 elif mf:
556 r = repo.manifest
556 r = repo.manifest
557 elif file_:
557 elif file_:
558 filelog = repo.file(file_)
558 filelog = repo.file(file_)
559 if len(filelog):
559 if len(filelog):
560 r = filelog
560 r = filelog
561 if not r:
561 if not r:
562 if not file_:
562 if not file_:
563 raise error.CommandError(cmd, _('invalid arguments'))
563 raise error.CommandError(cmd, _('invalid arguments'))
564 if not os.path.isfile(file_):
564 if not os.path.isfile(file_):
565 raise error.Abort(_("revlog '%s' not found") % file_)
565 raise error.Abort(_("revlog '%s' not found") % file_)
566 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
566 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
567 file_[:-2] + ".i")
567 file_[:-2] + ".i")
568 return r
568 return r
569
569
570 def copy(ui, repo, pats, opts, rename=False):
570 def copy(ui, repo, pats, opts, rename=False):
571 # called with the repo lock held
571 # called with the repo lock held
572 #
572 #
573 # hgsep => pathname that uses "/" to separate directories
573 # hgsep => pathname that uses "/" to separate directories
574 # ossep => pathname that uses os.sep to separate directories
574 # ossep => pathname that uses os.sep to separate directories
575 cwd = repo.getcwd()
575 cwd = repo.getcwd()
576 targets = {}
576 targets = {}
577 after = opts.get("after")
577 after = opts.get("after")
578 dryrun = opts.get("dry_run")
578 dryrun = opts.get("dry_run")
579 wctx = repo[None]
579 wctx = repo[None]
580
580
581 def walkpat(pat):
581 def walkpat(pat):
582 srcs = []
582 srcs = []
583 if after:
583 if after:
584 badstates = '?'
584 badstates = '?'
585 else:
585 else:
586 badstates = '?r'
586 badstates = '?r'
587 m = scmutil.match(repo[None], [pat], opts, globbed=True)
587 m = scmutil.match(repo[None], [pat], opts, globbed=True)
588 for abs in repo.walk(m):
588 for abs in repo.walk(m):
589 state = repo.dirstate[abs]
589 state = repo.dirstate[abs]
590 rel = m.rel(abs)
590 rel = m.rel(abs)
591 exact = m.exact(abs)
591 exact = m.exact(abs)
592 if state in badstates:
592 if state in badstates:
593 if exact and state == '?':
593 if exact and state == '?':
594 ui.warn(_('%s: not copying - file is not managed\n') % rel)
594 ui.warn(_('%s: not copying - file is not managed\n') % rel)
595 if exact and state == 'r':
595 if exact and state == 'r':
596 ui.warn(_('%s: not copying - file has been marked for'
596 ui.warn(_('%s: not copying - file has been marked for'
597 ' remove\n') % rel)
597 ' remove\n') % rel)
598 continue
598 continue
599 # abs: hgsep
599 # abs: hgsep
600 # rel: ossep
600 # rel: ossep
601 srcs.append((abs, rel, exact))
601 srcs.append((abs, rel, exact))
602 return srcs
602 return srcs
603
603
604 # abssrc: hgsep
604 # abssrc: hgsep
605 # relsrc: ossep
605 # relsrc: ossep
606 # otarget: ossep
606 # otarget: ossep
607 def copyfile(abssrc, relsrc, otarget, exact):
607 def copyfile(abssrc, relsrc, otarget, exact):
608 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
608 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
609 if '/' in abstarget:
609 if '/' in abstarget:
610 # We cannot normalize abstarget itself, this would prevent
610 # We cannot normalize abstarget itself, this would prevent
611 # case only renames, like a => A.
611 # case only renames, like a => A.
612 abspath, absname = abstarget.rsplit('/', 1)
612 abspath, absname = abstarget.rsplit('/', 1)
613 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
613 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
614 reltarget = repo.pathto(abstarget, cwd)
614 reltarget = repo.pathto(abstarget, cwd)
615 target = repo.wjoin(abstarget)
615 target = repo.wjoin(abstarget)
616 src = repo.wjoin(abssrc)
616 src = repo.wjoin(abssrc)
617 state = repo.dirstate[abstarget]
617 state = repo.dirstate[abstarget]
618
618
619 scmutil.checkportable(ui, abstarget)
619 scmutil.checkportable(ui, abstarget)
620
620
621 # check for collisions
621 # check for collisions
622 prevsrc = targets.get(abstarget)
622 prevsrc = targets.get(abstarget)
623 if prevsrc is not None:
623 if prevsrc is not None:
624 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
624 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
625 (reltarget, repo.pathto(abssrc, cwd),
625 (reltarget, repo.pathto(abssrc, cwd),
626 repo.pathto(prevsrc, cwd)))
626 repo.pathto(prevsrc, cwd)))
627 return
627 return
628
628
629 # check for overwrites
629 # check for overwrites
630 exists = os.path.lexists(target)
630 exists = os.path.lexists(target)
631 samefile = False
631 samefile = False
632 if exists and abssrc != abstarget:
632 if exists and abssrc != abstarget:
633 if (repo.dirstate.normalize(abssrc) ==
633 if (repo.dirstate.normalize(abssrc) ==
634 repo.dirstate.normalize(abstarget)):
634 repo.dirstate.normalize(abstarget)):
635 if not rename:
635 if not rename:
636 ui.warn(_("%s: can't copy - same file\n") % reltarget)
636 ui.warn(_("%s: can't copy - same file\n") % reltarget)
637 return
637 return
638 exists = False
638 exists = False
639 samefile = True
639 samefile = True
640
640
641 if not after and exists or after and state in 'mn':
641 if not after and exists or after and state in 'mn':
642 if not opts['force']:
642 if not opts['force']:
643 ui.warn(_('%s: not overwriting - file exists\n') %
643 ui.warn(_('%s: not overwriting - file exists\n') %
644 reltarget)
644 reltarget)
645 return
645 return
646
646
647 if after:
647 if after:
648 if not exists:
648 if not exists:
649 if rename:
649 if rename:
650 ui.warn(_('%s: not recording move - %s does not exist\n') %
650 ui.warn(_('%s: not recording move - %s does not exist\n') %
651 (relsrc, reltarget))
651 (relsrc, reltarget))
652 else:
652 else:
653 ui.warn(_('%s: not recording copy - %s does not exist\n') %
653 ui.warn(_('%s: not recording copy - %s does not exist\n') %
654 (relsrc, reltarget))
654 (relsrc, reltarget))
655 return
655 return
656 elif not dryrun:
656 elif not dryrun:
657 try:
657 try:
658 if exists:
658 if exists:
659 os.unlink(target)
659 os.unlink(target)
660 targetdir = os.path.dirname(target) or '.'
660 targetdir = os.path.dirname(target) or '.'
661 if not os.path.isdir(targetdir):
661 if not os.path.isdir(targetdir):
662 os.makedirs(targetdir)
662 os.makedirs(targetdir)
663 if samefile:
663 if samefile:
664 tmp = target + "~hgrename"
664 tmp = target + "~hgrename"
665 os.rename(src, tmp)
665 os.rename(src, tmp)
666 os.rename(tmp, target)
666 os.rename(tmp, target)
667 else:
667 else:
668 util.copyfile(src, target)
668 util.copyfile(src, target)
669 srcexists = True
669 srcexists = True
670 except IOError as inst:
670 except IOError as inst:
671 if inst.errno == errno.ENOENT:
671 if inst.errno == errno.ENOENT:
672 ui.warn(_('%s: deleted in working directory\n') % relsrc)
672 ui.warn(_('%s: deleted in working directory\n') % relsrc)
673 srcexists = False
673 srcexists = False
674 else:
674 else:
675 ui.warn(_('%s: cannot copy - %s\n') %
675 ui.warn(_('%s: cannot copy - %s\n') %
676 (relsrc, inst.strerror))
676 (relsrc, inst.strerror))
677 return True # report a failure
677 return True # report a failure
678
678
679 if ui.verbose or not exact:
679 if ui.verbose or not exact:
680 if rename:
680 if rename:
681 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
681 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
682 else:
682 else:
683 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
683 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
684
684
685 targets[abstarget] = abssrc
685 targets[abstarget] = abssrc
686
686
687 # fix up dirstate
687 # fix up dirstate
688 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
688 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
689 dryrun=dryrun, cwd=cwd)
689 dryrun=dryrun, cwd=cwd)
690 if rename and not dryrun:
690 if rename and not dryrun:
691 if not after and srcexists and not samefile:
691 if not after and srcexists and not samefile:
692 util.unlinkpath(repo.wjoin(abssrc))
692 util.unlinkpath(repo.wjoin(abssrc))
693 wctx.forget([abssrc])
693 wctx.forget([abssrc])
694
694
695 # pat: ossep
695 # pat: ossep
696 # dest ossep
696 # dest ossep
697 # srcs: list of (hgsep, hgsep, ossep, bool)
697 # srcs: list of (hgsep, hgsep, ossep, bool)
698 # return: function that takes hgsep and returns ossep
698 # return: function that takes hgsep and returns ossep
699 def targetpathfn(pat, dest, srcs):
699 def targetpathfn(pat, dest, srcs):
700 if os.path.isdir(pat):
700 if os.path.isdir(pat):
701 abspfx = pathutil.canonpath(repo.root, cwd, pat)
701 abspfx = pathutil.canonpath(repo.root, cwd, pat)
702 abspfx = util.localpath(abspfx)
702 abspfx = util.localpath(abspfx)
703 if destdirexists:
703 if destdirexists:
704 striplen = len(os.path.split(abspfx)[0])
704 striplen = len(os.path.split(abspfx)[0])
705 else:
705 else:
706 striplen = len(abspfx)
706 striplen = len(abspfx)
707 if striplen:
707 if striplen:
708 striplen += len(os.sep)
708 striplen += len(os.sep)
709 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
709 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
710 elif destdirexists:
710 elif destdirexists:
711 res = lambda p: os.path.join(dest,
711 res = lambda p: os.path.join(dest,
712 os.path.basename(util.localpath(p)))
712 os.path.basename(util.localpath(p)))
713 else:
713 else:
714 res = lambda p: dest
714 res = lambda p: dest
715 return res
715 return res
716
716
717 # pat: ossep
717 # pat: ossep
718 # dest ossep
718 # dest ossep
719 # srcs: list of (hgsep, hgsep, ossep, bool)
719 # srcs: list of (hgsep, hgsep, ossep, bool)
720 # return: function that takes hgsep and returns ossep
720 # return: function that takes hgsep and returns ossep
721 def targetpathafterfn(pat, dest, srcs):
721 def targetpathafterfn(pat, dest, srcs):
722 if matchmod.patkind(pat):
722 if matchmod.patkind(pat):
723 # a mercurial pattern
723 # a mercurial pattern
724 res = lambda p: os.path.join(dest,
724 res = lambda p: os.path.join(dest,
725 os.path.basename(util.localpath(p)))
725 os.path.basename(util.localpath(p)))
726 else:
726 else:
727 abspfx = pathutil.canonpath(repo.root, cwd, pat)
727 abspfx = pathutil.canonpath(repo.root, cwd, pat)
728 if len(abspfx) < len(srcs[0][0]):
728 if len(abspfx) < len(srcs[0][0]):
729 # A directory. Either the target path contains the last
729 # A directory. Either the target path contains the last
730 # component of the source path or it does not.
730 # component of the source path or it does not.
731 def evalpath(striplen):
731 def evalpath(striplen):
732 score = 0
732 score = 0
733 for s in srcs:
733 for s in srcs:
734 t = os.path.join(dest, util.localpath(s[0])[striplen:])
734 t = os.path.join(dest, util.localpath(s[0])[striplen:])
735 if os.path.lexists(t):
735 if os.path.lexists(t):
736 score += 1
736 score += 1
737 return score
737 return score
738
738
739 abspfx = util.localpath(abspfx)
739 abspfx = util.localpath(abspfx)
740 striplen = len(abspfx)
740 striplen = len(abspfx)
741 if striplen:
741 if striplen:
742 striplen += len(os.sep)
742 striplen += len(os.sep)
743 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
743 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
744 score = evalpath(striplen)
744 score = evalpath(striplen)
745 striplen1 = len(os.path.split(abspfx)[0])
745 striplen1 = len(os.path.split(abspfx)[0])
746 if striplen1:
746 if striplen1:
747 striplen1 += len(os.sep)
747 striplen1 += len(os.sep)
748 if evalpath(striplen1) > score:
748 if evalpath(striplen1) > score:
749 striplen = striplen1
749 striplen = striplen1
750 res = lambda p: os.path.join(dest,
750 res = lambda p: os.path.join(dest,
751 util.localpath(p)[striplen:])
751 util.localpath(p)[striplen:])
752 else:
752 else:
753 # a file
753 # a file
754 if destdirexists:
754 if destdirexists:
755 res = lambda p: os.path.join(dest,
755 res = lambda p: os.path.join(dest,
756 os.path.basename(util.localpath(p)))
756 os.path.basename(util.localpath(p)))
757 else:
757 else:
758 res = lambda p: dest
758 res = lambda p: dest
759 return res
759 return res
760
760
761 pats = scmutil.expandpats(pats)
761 pats = scmutil.expandpats(pats)
762 if not pats:
762 if not pats:
763 raise error.Abort(_('no source or destination specified'))
763 raise error.Abort(_('no source or destination specified'))
764 if len(pats) == 1:
764 if len(pats) == 1:
765 raise error.Abort(_('no destination specified'))
765 raise error.Abort(_('no destination specified'))
766 dest = pats.pop()
766 dest = pats.pop()
767 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
767 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
768 if not destdirexists:
768 if not destdirexists:
769 if len(pats) > 1 or matchmod.patkind(pats[0]):
769 if len(pats) > 1 or matchmod.patkind(pats[0]):
770 raise error.Abort(_('with multiple sources, destination must be an '
770 raise error.Abort(_('with multiple sources, destination must be an '
771 'existing directory'))
771 'existing directory'))
772 if util.endswithsep(dest):
772 if util.endswithsep(dest):
773 raise error.Abort(_('destination %s is not a directory') % dest)
773 raise error.Abort(_('destination %s is not a directory') % dest)
774
774
775 tfn = targetpathfn
775 tfn = targetpathfn
776 if after:
776 if after:
777 tfn = targetpathafterfn
777 tfn = targetpathafterfn
778 copylist = []
778 copylist = []
779 for pat in pats:
779 for pat in pats:
780 srcs = walkpat(pat)
780 srcs = walkpat(pat)
781 if not srcs:
781 if not srcs:
782 continue
782 continue
783 copylist.append((tfn(pat, dest, srcs), srcs))
783 copylist.append((tfn(pat, dest, srcs), srcs))
784 if not copylist:
784 if not copylist:
785 raise error.Abort(_('no files to copy'))
785 raise error.Abort(_('no files to copy'))
786
786
787 errors = 0
787 errors = 0
788 for targetpath, srcs in copylist:
788 for targetpath, srcs in copylist:
789 for abssrc, relsrc, exact in srcs:
789 for abssrc, relsrc, exact in srcs:
790 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
790 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
791 errors += 1
791 errors += 1
792
792
793 if errors:
793 if errors:
794 ui.warn(_('(consider using --after)\n'))
794 ui.warn(_('(consider using --after)\n'))
795
795
796 return errors != 0
796 return errors != 0
797
797
798 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
798 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
799 runargs=None, appendpid=False):
799 runargs=None, appendpid=False):
800 '''Run a command as a service.'''
800 '''Run a command as a service.'''
801
801
802 def writepid(pid):
802 def writepid(pid):
803 if opts['pid_file']:
803 if opts['pid_file']:
804 if appendpid:
804 if appendpid:
805 mode = 'a'
805 mode = 'a'
806 else:
806 else:
807 mode = 'w'
807 mode = 'w'
808 fp = open(opts['pid_file'], mode)
808 fp = open(opts['pid_file'], mode)
809 fp.write(str(pid) + '\n')
809 fp.write(str(pid) + '\n')
810 fp.close()
810 fp.close()
811
811
812 if opts['daemon'] and not opts['daemon_postexec']:
812 if opts['daemon'] and not opts['daemon_postexec']:
813 # Signal child process startup with file removal
813 # Signal child process startup with file removal
814 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
814 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
815 os.close(lockfd)
815 os.close(lockfd)
816 try:
816 try:
817 if not runargs:
817 if not runargs:
818 runargs = util.hgcmd() + sys.argv[1:]
818 runargs = util.hgcmd() + sys.argv[1:]
819 runargs.append('--daemon-postexec=unlink:%s' % lockpath)
819 runargs.append('--daemon-postexec=unlink:%s' % lockpath)
820 # Don't pass --cwd to the child process, because we've already
820 # Don't pass --cwd to the child process, because we've already
821 # changed directory.
821 # changed directory.
822 for i in xrange(1, len(runargs)):
822 for i in xrange(1, len(runargs)):
823 if runargs[i].startswith('--cwd='):
823 if runargs[i].startswith('--cwd='):
824 del runargs[i]
824 del runargs[i]
825 break
825 break
826 elif runargs[i].startswith('--cwd'):
826 elif runargs[i].startswith('--cwd'):
827 del runargs[i:i + 2]
827 del runargs[i:i + 2]
828 break
828 break
829 def condfn():
829 def condfn():
830 return not os.path.exists(lockpath)
830 return not os.path.exists(lockpath)
831 pid = util.rundetached(runargs, condfn)
831 pid = util.rundetached(runargs, condfn)
832 if pid < 0:
832 if pid < 0:
833 raise error.Abort(_('child process failed to start'))
833 raise error.Abort(_('child process failed to start'))
834 writepid(pid)
834 writepid(pid)
835 finally:
835 finally:
836 try:
836 try:
837 os.unlink(lockpath)
837 os.unlink(lockpath)
838 except OSError as e:
838 except OSError as e:
839 if e.errno != errno.ENOENT:
839 if e.errno != errno.ENOENT:
840 raise
840 raise
841 if parentfn:
841 if parentfn:
842 return parentfn(pid)
842 return parentfn(pid)
843 else:
843 else:
844 return
844 return
845
845
846 if initfn:
846 if initfn:
847 initfn()
847 initfn()
848
848
849 if not opts['daemon']:
849 if not opts['daemon']:
850 writepid(util.getpid())
850 writepid(util.getpid())
851
851
852 if opts['daemon_postexec']:
852 if opts['daemon_postexec']:
853 try:
853 try:
854 os.setsid()
854 os.setsid()
855 except AttributeError:
855 except AttributeError:
856 pass
856 pass
857 for inst in opts['daemon_postexec']:
857 for inst in opts['daemon_postexec']:
858 if inst.startswith('unlink:'):
858 if inst.startswith('unlink:'):
859 lockpath = inst[7:]
859 lockpath = inst[7:]
860 os.unlink(lockpath)
860 os.unlink(lockpath)
861 elif inst.startswith('chdir:'):
861 elif inst.startswith('chdir:'):
862 os.chdir(inst[6:])
862 os.chdir(inst[6:])
863 elif inst != 'none':
863 elif inst != 'none':
864 raise error.Abort(_('invalid value for --daemon-postexec: %s')
864 raise error.Abort(_('invalid value for --daemon-postexec: %s')
865 % inst)
865 % inst)
866 util.hidewindow()
866 util.hidewindow()
867 sys.stdout.flush()
867 sys.stdout.flush()
868 sys.stderr.flush()
868 sys.stderr.flush()
869
869
870 nullfd = os.open(os.devnull, os.O_RDWR)
870 nullfd = os.open(os.devnull, os.O_RDWR)
871 logfilefd = nullfd
871 logfilefd = nullfd
872 if logfile:
872 if logfile:
873 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
873 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
874 os.dup2(nullfd, 0)
874 os.dup2(nullfd, 0)
875 os.dup2(logfilefd, 1)
875 os.dup2(logfilefd, 1)
876 os.dup2(logfilefd, 2)
876 os.dup2(logfilefd, 2)
877 if nullfd not in (0, 1, 2):
877 if nullfd not in (0, 1, 2):
878 os.close(nullfd)
878 os.close(nullfd)
879 if logfile and logfilefd not in (0, 1, 2):
879 if logfile and logfilefd not in (0, 1, 2):
880 os.close(logfilefd)
880 os.close(logfilefd)
881
881
882 if runfn:
882 if runfn:
883 return runfn()
883 return runfn()
884
884
885 ## facility to let extension process additional data into an import patch
885 ## facility to let extension process additional data into an import patch
886 # list of identifier to be executed in order
886 # list of identifier to be executed in order
887 extrapreimport = [] # run before commit
887 extrapreimport = [] # run before commit
888 extrapostimport = [] # run after commit
888 extrapostimport = [] # run after commit
889 # mapping from identifier to actual import function
889 # mapping from identifier to actual import function
890 #
890 #
891 # 'preimport' are run before the commit is made and are provided the following
891 # 'preimport' are run before the commit is made and are provided the following
892 # arguments:
892 # arguments:
893 # - repo: the localrepository instance,
893 # - repo: the localrepository instance,
894 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
894 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
895 # - extra: the future extra dictionary of the changeset, please mutate it,
895 # - extra: the future extra dictionary of the changeset, please mutate it,
896 # - opts: the import options.
896 # - opts: the import options.
897 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
897 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
898 # mutation of in memory commit and more. Feel free to rework the code to get
898 # mutation of in memory commit and more. Feel free to rework the code to get
899 # there.
899 # there.
900 extrapreimportmap = {}
900 extrapreimportmap = {}
901 # 'postimport' are run after the commit is made and are provided the following
901 # 'postimport' are run after the commit is made and are provided the following
902 # argument:
902 # argument:
903 # - ctx: the changectx created by import.
903 # - ctx: the changectx created by import.
904 extrapostimportmap = {}
904 extrapostimportmap = {}
905
905
906 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
906 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
907 """Utility function used by commands.import to import a single patch
907 """Utility function used by commands.import to import a single patch
908
908
909 This function is explicitly defined here to help the evolve extension to
909 This function is explicitly defined here to help the evolve extension to
910 wrap this part of the import logic.
910 wrap this part of the import logic.
911
911
912 The API is currently a bit ugly because it a simple code translation from
912 The API is currently a bit ugly because it a simple code translation from
913 the import command. Feel free to make it better.
913 the import command. Feel free to make it better.
914
914
915 :hunk: a patch (as a binary string)
915 :hunk: a patch (as a binary string)
916 :parents: nodes that will be parent of the created commit
916 :parents: nodes that will be parent of the created commit
917 :opts: the full dict of option passed to the import command
917 :opts: the full dict of option passed to the import command
918 :msgs: list to save commit message to.
918 :msgs: list to save commit message to.
919 (used in case we need to save it when failing)
919 (used in case we need to save it when failing)
920 :updatefunc: a function that update a repo to a given node
920 :updatefunc: a function that update a repo to a given node
921 updatefunc(<repo>, <node>)
921 updatefunc(<repo>, <node>)
922 """
922 """
923 # avoid cycle context -> subrepo -> cmdutil
923 # avoid cycle context -> subrepo -> cmdutil
924 from . import context
924 from . import context
925 extractdata = patch.extract(ui, hunk)
925 extractdata = patch.extract(ui, hunk)
926 tmpname = extractdata.get('filename')
926 tmpname = extractdata.get('filename')
927 message = extractdata.get('message')
927 message = extractdata.get('message')
928 user = opts.get('user') or extractdata.get('user')
928 user = opts.get('user') or extractdata.get('user')
929 date = opts.get('date') or extractdata.get('date')
929 date = opts.get('date') or extractdata.get('date')
930 branch = extractdata.get('branch')
930 branch = extractdata.get('branch')
931 nodeid = extractdata.get('nodeid')
931 nodeid = extractdata.get('nodeid')
932 p1 = extractdata.get('p1')
932 p1 = extractdata.get('p1')
933 p2 = extractdata.get('p2')
933 p2 = extractdata.get('p2')
934
934
935 nocommit = opts.get('no_commit')
935 nocommit = opts.get('no_commit')
936 importbranch = opts.get('import_branch')
936 importbranch = opts.get('import_branch')
937 update = not opts.get('bypass')
937 update = not opts.get('bypass')
938 strip = opts["strip"]
938 strip = opts["strip"]
939 prefix = opts["prefix"]
939 prefix = opts["prefix"]
940 sim = float(opts.get('similarity') or 0)
940 sim = float(opts.get('similarity') or 0)
941 if not tmpname:
941 if not tmpname:
942 return (None, None, False)
942 return (None, None, False)
943
943
944 rejects = False
944 rejects = False
945
945
946 try:
946 try:
947 cmdline_message = logmessage(ui, opts)
947 cmdline_message = logmessage(ui, opts)
948 if cmdline_message:
948 if cmdline_message:
949 # pickup the cmdline msg
949 # pickup the cmdline msg
950 message = cmdline_message
950 message = cmdline_message
951 elif message:
951 elif message:
952 # pickup the patch msg
952 # pickup the patch msg
953 message = message.strip()
953 message = message.strip()
954 else:
954 else:
955 # launch the editor
955 # launch the editor
956 message = None
956 message = None
957 ui.debug('message:\n%s\n' % message)
957 ui.debug('message:\n%s\n' % message)
958
958
959 if len(parents) == 1:
959 if len(parents) == 1:
960 parents.append(repo[nullid])
960 parents.append(repo[nullid])
961 if opts.get('exact'):
961 if opts.get('exact'):
962 if not nodeid or not p1:
962 if not nodeid or not p1:
963 raise error.Abort(_('not a Mercurial patch'))
963 raise error.Abort(_('not a Mercurial patch'))
964 p1 = repo[p1]
964 p1 = repo[p1]
965 p2 = repo[p2 or nullid]
965 p2 = repo[p2 or nullid]
966 elif p2:
966 elif p2:
967 try:
967 try:
968 p1 = repo[p1]
968 p1 = repo[p1]
969 p2 = repo[p2]
969 p2 = repo[p2]
970 # Without any options, consider p2 only if the
970 # Without any options, consider p2 only if the
971 # patch is being applied on top of the recorded
971 # patch is being applied on top of the recorded
972 # first parent.
972 # first parent.
973 if p1 != parents[0]:
973 if p1 != parents[0]:
974 p1 = parents[0]
974 p1 = parents[0]
975 p2 = repo[nullid]
975 p2 = repo[nullid]
976 except error.RepoError:
976 except error.RepoError:
977 p1, p2 = parents
977 p1, p2 = parents
978 if p2.node() == nullid:
978 if p2.node() == nullid:
979 ui.warn(_("warning: import the patch as a normal revision\n"
979 ui.warn(_("warning: import the patch as a normal revision\n"
980 "(use --exact to import the patch as a merge)\n"))
980 "(use --exact to import the patch as a merge)\n"))
981 else:
981 else:
982 p1, p2 = parents
982 p1, p2 = parents
983
983
984 n = None
984 n = None
985 if update:
985 if update:
986 if p1 != parents[0]:
986 if p1 != parents[0]:
987 updatefunc(repo, p1.node())
987 updatefunc(repo, p1.node())
988 if p2 != parents[1]:
988 if p2 != parents[1]:
989 repo.setparents(p1.node(), p2.node())
989 repo.setparents(p1.node(), p2.node())
990
990
991 if opts.get('exact') or importbranch:
991 if opts.get('exact') or importbranch:
992 repo.dirstate.setbranch(branch or 'default')
992 repo.dirstate.setbranch(branch or 'default')
993
993
994 partial = opts.get('partial', False)
994 partial = opts.get('partial', False)
995 files = set()
995 files = set()
996 try:
996 try:
997 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
997 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
998 files=files, eolmode=None, similarity=sim / 100.0)
998 files=files, eolmode=None, similarity=sim / 100.0)
999 except patch.PatchError as e:
999 except patch.PatchError as e:
1000 if not partial:
1000 if not partial:
1001 raise error.Abort(str(e))
1001 raise error.Abort(str(e))
1002 if partial:
1002 if partial:
1003 rejects = True
1003 rejects = True
1004
1004
1005 files = list(files)
1005 files = list(files)
1006 if nocommit:
1006 if nocommit:
1007 if message:
1007 if message:
1008 msgs.append(message)
1008 msgs.append(message)
1009 else:
1009 else:
1010 if opts.get('exact') or p2:
1010 if opts.get('exact') or p2:
1011 # If you got here, you either use --force and know what
1011 # If you got here, you either use --force and know what
1012 # you are doing or used --exact or a merge patch while
1012 # you are doing or used --exact or a merge patch while
1013 # being updated to its first parent.
1013 # being updated to its first parent.
1014 m = None
1014 m = None
1015 else:
1015 else:
1016 m = scmutil.matchfiles(repo, files or [])
1016 m = scmutil.matchfiles(repo, files or [])
1017 editform = mergeeditform(repo[None], 'import.normal')
1017 editform = mergeeditform(repo[None], 'import.normal')
1018 if opts.get('exact'):
1018 if opts.get('exact'):
1019 editor = None
1019 editor = None
1020 else:
1020 else:
1021 editor = getcommiteditor(editform=editform, **opts)
1021 editor = getcommiteditor(editform=editform, **opts)
1022 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
1022 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
1023 extra = {}
1023 extra = {}
1024 for idfunc in extrapreimport:
1024 for idfunc in extrapreimport:
1025 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1025 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1026 try:
1026 try:
1027 if partial:
1027 if partial:
1028 repo.ui.setconfig('ui', 'allowemptycommit', True)
1028 repo.ui.setconfig('ui', 'allowemptycommit', True)
1029 n = repo.commit(message, user,
1029 n = repo.commit(message, user,
1030 date, match=m,
1030 date, match=m,
1031 editor=editor, extra=extra)
1031 editor=editor, extra=extra)
1032 for idfunc in extrapostimport:
1032 for idfunc in extrapostimport:
1033 extrapostimportmap[idfunc](repo[n])
1033 extrapostimportmap[idfunc](repo[n])
1034 finally:
1034 finally:
1035 repo.ui.restoreconfig(allowemptyback)
1035 repo.ui.restoreconfig(allowemptyback)
1036 else:
1036 else:
1037 if opts.get('exact') or importbranch:
1037 if opts.get('exact') or importbranch:
1038 branch = branch or 'default'
1038 branch = branch or 'default'
1039 else:
1039 else:
1040 branch = p1.branch()
1040 branch = p1.branch()
1041 store = patch.filestore()
1041 store = patch.filestore()
1042 try:
1042 try:
1043 files = set()
1043 files = set()
1044 try:
1044 try:
1045 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1045 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1046 files, eolmode=None)
1046 files, eolmode=None)
1047 except patch.PatchError as e:
1047 except patch.PatchError as e:
1048 raise error.Abort(str(e))
1048 raise error.Abort(str(e))
1049 if opts.get('exact'):
1049 if opts.get('exact'):
1050 editor = None
1050 editor = None
1051 else:
1051 else:
1052 editor = getcommiteditor(editform='import.bypass')
1052 editor = getcommiteditor(editform='import.bypass')
1053 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1053 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1054 message,
1054 message,
1055 user,
1055 user,
1056 date,
1056 date,
1057 branch, files, store,
1057 branch, files, store,
1058 editor=editor)
1058 editor=editor)
1059 n = memctx.commit()
1059 n = memctx.commit()
1060 finally:
1060 finally:
1061 store.close()
1061 store.close()
1062 if opts.get('exact') and nocommit:
1062 if opts.get('exact') and nocommit:
1063 # --exact with --no-commit is still useful in that it does merge
1063 # --exact with --no-commit is still useful in that it does merge
1064 # and branch bits
1064 # and branch bits
1065 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1065 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1066 elif opts.get('exact') and hex(n) != nodeid:
1066 elif opts.get('exact') and hex(n) != nodeid:
1067 raise error.Abort(_('patch is damaged or loses information'))
1067 raise error.Abort(_('patch is damaged or loses information'))
1068 msg = _('applied to working directory')
1068 msg = _('applied to working directory')
1069 if n:
1069 if n:
1070 # i18n: refers to a short changeset id
1070 # i18n: refers to a short changeset id
1071 msg = _('created %s') % short(n)
1071 msg = _('created %s') % short(n)
1072 return (msg, n, rejects)
1072 return (msg, n, rejects)
1073 finally:
1073 finally:
1074 os.unlink(tmpname)
1074 os.unlink(tmpname)
1075
1075
1076 # facility to let extensions include additional data in an exported patch
1076 # facility to let extensions include additional data in an exported patch
1077 # list of identifiers to be executed in order
1077 # list of identifiers to be executed in order
1078 extraexport = []
1078 extraexport = []
1079 # mapping from identifier to actual export function
1079 # mapping from identifier to actual export function
1080 # function as to return a string to be added to the header or None
1080 # function as to return a string to be added to the header or None
1081 # it is given two arguments (sequencenumber, changectx)
1081 # it is given two arguments (sequencenumber, changectx)
1082 extraexportmap = {}
1082 extraexportmap = {}
1083
1083
1084 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1084 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1085 opts=None, match=None):
1085 opts=None, match=None):
1086 '''export changesets as hg patches.'''
1086 '''export changesets as hg patches.'''
1087
1087
1088 total = len(revs)
1088 total = len(revs)
1089 revwidth = max([len(str(rev)) for rev in revs])
1089 revwidth = max([len(str(rev)) for rev in revs])
1090 filemode = {}
1090 filemode = {}
1091
1091
1092 def single(rev, seqno, fp):
1092 def single(rev, seqno, fp):
1093 ctx = repo[rev]
1093 ctx = repo[rev]
1094 node = ctx.node()
1094 node = ctx.node()
1095 parents = [p.node() for p in ctx.parents() if p]
1095 parents = [p.node() for p in ctx.parents() if p]
1096 branch = ctx.branch()
1096 branch = ctx.branch()
1097 if switch_parent:
1097 if switch_parent:
1098 parents.reverse()
1098 parents.reverse()
1099
1099
1100 if parents:
1100 if parents:
1101 prev = parents[0]
1101 prev = parents[0]
1102 else:
1102 else:
1103 prev = nullid
1103 prev = nullid
1104
1104
1105 shouldclose = False
1105 shouldclose = False
1106 if not fp and len(template) > 0:
1106 if not fp and len(template) > 0:
1107 desc_lines = ctx.description().rstrip().split('\n')
1107 desc_lines = ctx.description().rstrip().split('\n')
1108 desc = desc_lines[0] #Commit always has a first line.
1108 desc = desc_lines[0] #Commit always has a first line.
1109 fp = makefileobj(repo, template, node, desc=desc, total=total,
1109 fp = makefileobj(repo, template, node, desc=desc, total=total,
1110 seqno=seqno, revwidth=revwidth, mode='wb',
1110 seqno=seqno, revwidth=revwidth, mode='wb',
1111 modemap=filemode)
1111 modemap=filemode)
1112 shouldclose = True
1112 shouldclose = True
1113 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1113 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1114 repo.ui.note("%s\n" % fp.name)
1114 repo.ui.note("%s\n" % fp.name)
1115
1115
1116 if not fp:
1116 if not fp:
1117 write = repo.ui.write
1117 write = repo.ui.write
1118 else:
1118 else:
1119 def write(s, **kw):
1119 def write(s, **kw):
1120 fp.write(s)
1120 fp.write(s)
1121
1121
1122 write("# HG changeset patch\n")
1122 write("# HG changeset patch\n")
1123 write("# User %s\n" % ctx.user())
1123 write("# User %s\n" % ctx.user())
1124 write("# Date %d %d\n" % ctx.date())
1124 write("# Date %d %d\n" % ctx.date())
1125 write("# %s\n" % util.datestr(ctx.date()))
1125 write("# %s\n" % util.datestr(ctx.date()))
1126 if branch and branch != 'default':
1126 if branch and branch != 'default':
1127 write("# Branch %s\n" % branch)
1127 write("# Branch %s\n" % branch)
1128 write("# Node ID %s\n" % hex(node))
1128 write("# Node ID %s\n" % hex(node))
1129 write("# Parent %s\n" % hex(prev))
1129 write("# Parent %s\n" % hex(prev))
1130 if len(parents) > 1:
1130 if len(parents) > 1:
1131 write("# Parent %s\n" % hex(parents[1]))
1131 write("# Parent %s\n" % hex(parents[1]))
1132
1132
1133 for headerid in extraexport:
1133 for headerid in extraexport:
1134 header = extraexportmap[headerid](seqno, ctx)
1134 header = extraexportmap[headerid](seqno, ctx)
1135 if header is not None:
1135 if header is not None:
1136 write('# %s\n' % header)
1136 write('# %s\n' % header)
1137 write(ctx.description().rstrip())
1137 write(ctx.description().rstrip())
1138 write("\n\n")
1138 write("\n\n")
1139
1139
1140 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1140 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1141 write(chunk, label=label)
1141 write(chunk, label=label)
1142
1142
1143 if shouldclose:
1143 if shouldclose:
1144 fp.close()
1144 fp.close()
1145
1145
1146 for seqno, rev in enumerate(revs):
1146 for seqno, rev in enumerate(revs):
1147 single(rev, seqno + 1, fp)
1147 single(rev, seqno + 1, fp)
1148
1148
1149 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1149 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1150 changes=None, stat=False, fp=None, prefix='',
1150 changes=None, stat=False, fp=None, prefix='',
1151 root='', listsubrepos=False):
1151 root='', listsubrepos=False):
1152 '''show diff or diffstat.'''
1152 '''show diff or diffstat.'''
1153 if fp is None:
1153 if fp is None:
1154 write = ui.write
1154 write = ui.write
1155 else:
1155 else:
1156 def write(s, **kw):
1156 def write(s, **kw):
1157 fp.write(s)
1157 fp.write(s)
1158
1158
1159 if root:
1159 if root:
1160 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1160 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1161 else:
1161 else:
1162 relroot = ''
1162 relroot = ''
1163 if relroot != '':
1163 if relroot != '':
1164 # XXX relative roots currently don't work if the root is within a
1164 # XXX relative roots currently don't work if the root is within a
1165 # subrepo
1165 # subrepo
1166 uirelroot = match.uipath(relroot)
1166 uirelroot = match.uipath(relroot)
1167 relroot += '/'
1167 relroot += '/'
1168 for matchroot in match.files():
1168 for matchroot in match.files():
1169 if not matchroot.startswith(relroot):
1169 if not matchroot.startswith(relroot):
1170 ui.warn(_('warning: %s not inside relative root %s\n') % (
1170 ui.warn(_('warning: %s not inside relative root %s\n') % (
1171 match.uipath(matchroot), uirelroot))
1171 match.uipath(matchroot), uirelroot))
1172
1172
1173 if stat:
1173 if stat:
1174 diffopts = diffopts.copy(context=0)
1174 diffopts = diffopts.copy(context=0)
1175 width = 80
1175 width = 80
1176 if not ui.plain():
1176 if not ui.plain():
1177 width = ui.termwidth()
1177 width = ui.termwidth()
1178 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1178 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1179 prefix=prefix, relroot=relroot)
1179 prefix=prefix, relroot=relroot)
1180 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1180 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1181 width=width,
1181 width=width,
1182 git=diffopts.git):
1182 git=diffopts.git):
1183 write(chunk, label=label)
1183 write(chunk, label=label)
1184 else:
1184 else:
1185 for chunk, label in patch.diffui(repo, node1, node2, match,
1185 for chunk, label in patch.diffui(repo, node1, node2, match,
1186 changes, diffopts, prefix=prefix,
1186 changes, diffopts, prefix=prefix,
1187 relroot=relroot):
1187 relroot=relroot):
1188 write(chunk, label=label)
1188 write(chunk, label=label)
1189
1189
1190 if listsubrepos:
1190 if listsubrepos:
1191 ctx1 = repo[node1]
1191 ctx1 = repo[node1]
1192 ctx2 = repo[node2]
1192 ctx2 = repo[node2]
1193 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1193 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1194 tempnode2 = node2
1194 tempnode2 = node2
1195 try:
1195 try:
1196 if node2 is not None:
1196 if node2 is not None:
1197 tempnode2 = ctx2.substate[subpath][1]
1197 tempnode2 = ctx2.substate[subpath][1]
1198 except KeyError:
1198 except KeyError:
1199 # A subrepo that existed in node1 was deleted between node1 and
1199 # A subrepo that existed in node1 was deleted between node1 and
1200 # node2 (inclusive). Thus, ctx2's substate won't contain that
1200 # node2 (inclusive). Thus, ctx2's substate won't contain that
1201 # subpath. The best we can do is to ignore it.
1201 # subpath. The best we can do is to ignore it.
1202 tempnode2 = None
1202 tempnode2 = None
1203 submatch = matchmod.subdirmatcher(subpath, match)
1203 submatch = matchmod.subdirmatcher(subpath, match)
1204 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1204 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1205 stat=stat, fp=fp, prefix=prefix)
1205 stat=stat, fp=fp, prefix=prefix)
1206
1206
1207 class changeset_printer(object):
1207 class changeset_printer(object):
1208 '''show changeset information when templating not requested.'''
1208 '''show changeset information when templating not requested.'''
1209
1209
1210 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1210 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1211 self.ui = ui
1211 self.ui = ui
1212 self.repo = repo
1212 self.repo = repo
1213 self.buffered = buffered
1213 self.buffered = buffered
1214 self.matchfn = matchfn
1214 self.matchfn = matchfn
1215 self.diffopts = diffopts
1215 self.diffopts = diffopts
1216 self.header = {}
1216 self.header = {}
1217 self.hunk = {}
1217 self.hunk = {}
1218 self.lastheader = None
1218 self.lastheader = None
1219 self.footer = None
1219 self.footer = None
1220
1220
1221 def flush(self, ctx):
1221 def flush(self, ctx):
1222 rev = ctx.rev()
1222 rev = ctx.rev()
1223 if rev in self.header:
1223 if rev in self.header:
1224 h = self.header[rev]
1224 h = self.header[rev]
1225 if h != self.lastheader:
1225 if h != self.lastheader:
1226 self.lastheader = h
1226 self.lastheader = h
1227 self.ui.write(h)
1227 self.ui.write(h)
1228 del self.header[rev]
1228 del self.header[rev]
1229 if rev in self.hunk:
1229 if rev in self.hunk:
1230 self.ui.write(self.hunk[rev])
1230 self.ui.write(self.hunk[rev])
1231 del self.hunk[rev]
1231 del self.hunk[rev]
1232 return 1
1232 return 1
1233 return 0
1233 return 0
1234
1234
1235 def close(self):
1235 def close(self):
1236 if self.footer:
1236 if self.footer:
1237 self.ui.write(self.footer)
1237 self.ui.write(self.footer)
1238
1238
1239 def show(self, ctx, copies=None, matchfn=None, **props):
1239 def show(self, ctx, copies=None, matchfn=None, **props):
1240 if self.buffered:
1240 if self.buffered:
1241 self.ui.pushbuffer(labeled=True)
1241 self.ui.pushbuffer(labeled=True)
1242 self._show(ctx, copies, matchfn, props)
1242 self._show(ctx, copies, matchfn, props)
1243 self.hunk[ctx.rev()] = self.ui.popbuffer()
1243 self.hunk[ctx.rev()] = self.ui.popbuffer()
1244 else:
1244 else:
1245 self._show(ctx, copies, matchfn, props)
1245 self._show(ctx, copies, matchfn, props)
1246
1246
1247 def _show(self, ctx, copies, matchfn, props):
1247 def _show(self, ctx, copies, matchfn, props):
1248 '''show a single changeset or file revision'''
1248 '''show a single changeset or file revision'''
1249 changenode = ctx.node()
1249 changenode = ctx.node()
1250 rev = ctx.rev()
1250 rev = ctx.rev()
1251 if self.ui.debugflag:
1251 if self.ui.debugflag:
1252 hexfunc = hex
1252 hexfunc = hex
1253 else:
1253 else:
1254 hexfunc = short
1254 hexfunc = short
1255 # as of now, wctx.node() and wctx.rev() return None, but we want to
1255 # as of now, wctx.node() and wctx.rev() return None, but we want to
1256 # show the same values as {node} and {rev} templatekw
1256 # show the same values as {node} and {rev} templatekw
1257 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1257 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1258
1258
1259 if self.ui.quiet:
1259 if self.ui.quiet:
1260 self.ui.write("%d:%s\n" % revnode, label='log.node')
1260 self.ui.write("%d:%s\n" % revnode, label='log.node')
1261 return
1261 return
1262
1262
1263 date = util.datestr(ctx.date())
1263 date = util.datestr(ctx.date())
1264
1264
1265 # i18n: column positioning for "hg log"
1265 # i18n: column positioning for "hg log"
1266 self.ui.write(_("changeset: %d:%s\n") % revnode,
1266 self.ui.write(_("changeset: %d:%s\n") % revnode,
1267 label='log.changeset changeset.%s' % ctx.phasestr())
1267 label='log.changeset changeset.%s' % ctx.phasestr())
1268
1268
1269 # branches are shown first before any other names due to backwards
1269 # branches are shown first before any other names due to backwards
1270 # compatibility
1270 # compatibility
1271 branch = ctx.branch()
1271 branch = ctx.branch()
1272 # don't show the default branch name
1272 # don't show the default branch name
1273 if branch != 'default':
1273 if branch != 'default':
1274 # i18n: column positioning for "hg log"
1274 # i18n: column positioning for "hg log"
1275 self.ui.write(_("branch: %s\n") % branch,
1275 self.ui.write(_("branch: %s\n") % branch,
1276 label='log.branch')
1276 label='log.branch')
1277
1277
1278 for nsname, ns in self.repo.names.iteritems():
1278 for nsname, ns in self.repo.names.iteritems():
1279 # branches has special logic already handled above, so here we just
1279 # branches has special logic already handled above, so here we just
1280 # skip it
1280 # skip it
1281 if nsname == 'branches':
1281 if nsname == 'branches':
1282 continue
1282 continue
1283 # we will use the templatename as the color name since those two
1283 # we will use the templatename as the color name since those two
1284 # should be the same
1284 # should be the same
1285 for name in ns.names(self.repo, changenode):
1285 for name in ns.names(self.repo, changenode):
1286 self.ui.write(ns.logfmt % name,
1286 self.ui.write(ns.logfmt % name,
1287 label='log.%s' % ns.colorname)
1287 label='log.%s' % ns.colorname)
1288 if self.ui.debugflag:
1288 if self.ui.debugflag:
1289 # i18n: column positioning for "hg log"
1289 # i18n: column positioning for "hg log"
1290 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1290 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1291 label='log.phase')
1291 label='log.phase')
1292 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1292 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1293 label = 'log.parent changeset.%s' % pctx.phasestr()
1293 label = 'log.parent changeset.%s' % pctx.phasestr()
1294 # i18n: column positioning for "hg log"
1294 # i18n: column positioning for "hg log"
1295 self.ui.write(_("parent: %d:%s\n")
1295 self.ui.write(_("parent: %d:%s\n")
1296 % (pctx.rev(), hexfunc(pctx.node())),
1296 % (pctx.rev(), hexfunc(pctx.node())),
1297 label=label)
1297 label=label)
1298
1298
1299 if self.ui.debugflag and rev is not None:
1299 if self.ui.debugflag and rev is not None:
1300 mnode = ctx.manifestnode()
1300 mnode = ctx.manifestnode()
1301 # i18n: column positioning for "hg log"
1301 # i18n: column positioning for "hg log"
1302 self.ui.write(_("manifest: %d:%s\n") %
1302 self.ui.write(_("manifest: %d:%s\n") %
1303 (self.repo.manifest.rev(mnode), hex(mnode)),
1303 (self.repo.manifest.rev(mnode), hex(mnode)),
1304 label='ui.debug log.manifest')
1304 label='ui.debug log.manifest')
1305 # i18n: column positioning for "hg log"
1305 # i18n: column positioning for "hg log"
1306 self.ui.write(_("user: %s\n") % ctx.user(),
1306 self.ui.write(_("user: %s\n") % ctx.user(),
1307 label='log.user')
1307 label='log.user')
1308 # i18n: column positioning for "hg log"
1308 # i18n: column positioning for "hg log"
1309 self.ui.write(_("date: %s\n") % date,
1309 self.ui.write(_("date: %s\n") % date,
1310 label='log.date')
1310 label='log.date')
1311
1311
1312 if self.ui.debugflag:
1312 if self.ui.debugflag:
1313 files = ctx.p1().status(ctx)[:3]
1313 files = ctx.p1().status(ctx)[:3]
1314 for key, value in zip([# i18n: column positioning for "hg log"
1314 for key, value in zip([# i18n: column positioning for "hg log"
1315 _("files:"),
1315 _("files:"),
1316 # i18n: column positioning for "hg log"
1316 # i18n: column positioning for "hg log"
1317 _("files+:"),
1317 _("files+:"),
1318 # i18n: column positioning for "hg log"
1318 # i18n: column positioning for "hg log"
1319 _("files-:")], files):
1319 _("files-:")], files):
1320 if value:
1320 if value:
1321 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1321 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1322 label='ui.debug log.files')
1322 label='ui.debug log.files')
1323 elif ctx.files() and self.ui.verbose:
1323 elif ctx.files() and self.ui.verbose:
1324 # i18n: column positioning for "hg log"
1324 # i18n: column positioning for "hg log"
1325 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1325 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1326 label='ui.note log.files')
1326 label='ui.note log.files')
1327 if copies and self.ui.verbose:
1327 if copies and self.ui.verbose:
1328 copies = ['%s (%s)' % c for c in copies]
1328 copies = ['%s (%s)' % c for c in copies]
1329 # i18n: column positioning for "hg log"
1329 # i18n: column positioning for "hg log"
1330 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1330 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1331 label='ui.note log.copies')
1331 label='ui.note log.copies')
1332
1332
1333 extra = ctx.extra()
1333 extra = ctx.extra()
1334 if extra and self.ui.debugflag:
1334 if extra and self.ui.debugflag:
1335 for key, value in sorted(extra.items()):
1335 for key, value in sorted(extra.items()):
1336 # i18n: column positioning for "hg log"
1336 # i18n: column positioning for "hg log"
1337 self.ui.write(_("extra: %s=%s\n")
1337 self.ui.write(_("extra: %s=%s\n")
1338 % (key, value.encode('string_escape')),
1338 % (key, value.encode('string_escape')),
1339 label='ui.debug log.extra')
1339 label='ui.debug log.extra')
1340
1340
1341 description = ctx.description().strip()
1341 description = ctx.description().strip()
1342 if description:
1342 if description:
1343 if self.ui.verbose:
1343 if self.ui.verbose:
1344 self.ui.write(_("description:\n"),
1344 self.ui.write(_("description:\n"),
1345 label='ui.note log.description')
1345 label='ui.note log.description')
1346 self.ui.write(description,
1346 self.ui.write(description,
1347 label='ui.note log.description')
1347 label='ui.note log.description')
1348 self.ui.write("\n\n")
1348 self.ui.write("\n\n")
1349 else:
1349 else:
1350 # i18n: column positioning for "hg log"
1350 # i18n: column positioning for "hg log"
1351 self.ui.write(_("summary: %s\n") %
1351 self.ui.write(_("summary: %s\n") %
1352 description.splitlines()[0],
1352 description.splitlines()[0],
1353 label='log.summary')
1353 label='log.summary')
1354 self.ui.write("\n")
1354 self.ui.write("\n")
1355
1355
1356 self.showpatch(ctx, matchfn)
1356 self.showpatch(ctx, matchfn)
1357
1357
1358 def showpatch(self, ctx, matchfn):
1358 def showpatch(self, ctx, matchfn):
1359 if not matchfn:
1359 if not matchfn:
1360 matchfn = self.matchfn
1360 matchfn = self.matchfn
1361 if matchfn:
1361 if matchfn:
1362 stat = self.diffopts.get('stat')
1362 stat = self.diffopts.get('stat')
1363 diff = self.diffopts.get('patch')
1363 diff = self.diffopts.get('patch')
1364 diffopts = patch.diffallopts(self.ui, self.diffopts)
1364 diffopts = patch.diffallopts(self.ui, self.diffopts)
1365 node = ctx.node()
1365 node = ctx.node()
1366 prev = ctx.p1().node()
1366 prev = ctx.p1().node()
1367 if stat:
1367 if stat:
1368 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1368 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1369 match=matchfn, stat=True)
1369 match=matchfn, stat=True)
1370 if diff:
1370 if diff:
1371 if stat:
1371 if stat:
1372 self.ui.write("\n")
1372 self.ui.write("\n")
1373 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1373 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1374 match=matchfn, stat=False)
1374 match=matchfn, stat=False)
1375 self.ui.write("\n")
1375 self.ui.write("\n")
1376
1376
1377 class jsonchangeset(changeset_printer):
1377 class jsonchangeset(changeset_printer):
1378 '''format changeset information.'''
1378 '''format changeset information.'''
1379
1379
1380 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1380 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1381 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1381 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1382 self.cache = {}
1382 self.cache = {}
1383 self._first = True
1383 self._first = True
1384
1384
1385 def close(self):
1385 def close(self):
1386 if not self._first:
1386 if not self._first:
1387 self.ui.write("\n]\n")
1387 self.ui.write("\n]\n")
1388 else:
1388 else:
1389 self.ui.write("[]\n")
1389 self.ui.write("[]\n")
1390
1390
1391 def _show(self, ctx, copies, matchfn, props):
1391 def _show(self, ctx, copies, matchfn, props):
1392 '''show a single changeset or file revision'''
1392 '''show a single changeset or file revision'''
1393 rev = ctx.rev()
1393 rev = ctx.rev()
1394 if rev is None:
1394 if rev is None:
1395 jrev = jnode = 'null'
1395 jrev = jnode = 'null'
1396 else:
1396 else:
1397 jrev = str(rev)
1397 jrev = str(rev)
1398 jnode = '"%s"' % hex(ctx.node())
1398 jnode = '"%s"' % hex(ctx.node())
1399 j = encoding.jsonescape
1399 j = encoding.jsonescape
1400
1400
1401 if self._first:
1401 if self._first:
1402 self.ui.write("[\n {")
1402 self.ui.write("[\n {")
1403 self._first = False
1403 self._first = False
1404 else:
1404 else:
1405 self.ui.write(",\n {")
1405 self.ui.write(",\n {")
1406
1406
1407 if self.ui.quiet:
1407 if self.ui.quiet:
1408 self.ui.write('\n "rev": %s' % jrev)
1408 self.ui.write('\n "rev": %s' % jrev)
1409 self.ui.write(',\n "node": %s' % jnode)
1409 self.ui.write(',\n "node": %s' % jnode)
1410 self.ui.write('\n }')
1410 self.ui.write('\n }')
1411 return
1411 return
1412
1412
1413 self.ui.write('\n "rev": %s' % jrev)
1413 self.ui.write('\n "rev": %s' % jrev)
1414 self.ui.write(',\n "node": %s' % jnode)
1414 self.ui.write(',\n "node": %s' % jnode)
1415 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1415 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1416 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1416 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1417 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1417 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1418 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1418 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1419 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1419 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1420
1420
1421 self.ui.write(',\n "bookmarks": [%s]' %
1421 self.ui.write(',\n "bookmarks": [%s]' %
1422 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1422 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1423 self.ui.write(',\n "tags": [%s]' %
1423 self.ui.write(',\n "tags": [%s]' %
1424 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1424 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1425 self.ui.write(',\n "parents": [%s]' %
1425 self.ui.write(',\n "parents": [%s]' %
1426 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1426 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1427
1427
1428 if self.ui.debugflag:
1428 if self.ui.debugflag:
1429 if rev is None:
1429 if rev is None:
1430 jmanifestnode = 'null'
1430 jmanifestnode = 'null'
1431 else:
1431 else:
1432 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1432 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1433 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1433 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1434
1434
1435 self.ui.write(',\n "extra": {%s}' %
1435 self.ui.write(',\n "extra": {%s}' %
1436 ", ".join('"%s": "%s"' % (j(k), j(v))
1436 ", ".join('"%s": "%s"' % (j(k), j(v))
1437 for k, v in ctx.extra().items()))
1437 for k, v in ctx.extra().items()))
1438
1438
1439 files = ctx.p1().status(ctx)
1439 files = ctx.p1().status(ctx)
1440 self.ui.write(',\n "modified": [%s]' %
1440 self.ui.write(',\n "modified": [%s]' %
1441 ", ".join('"%s"' % j(f) for f in files[0]))
1441 ", ".join('"%s"' % j(f) for f in files[0]))
1442 self.ui.write(',\n "added": [%s]' %
1442 self.ui.write(',\n "added": [%s]' %
1443 ", ".join('"%s"' % j(f) for f in files[1]))
1443 ", ".join('"%s"' % j(f) for f in files[1]))
1444 self.ui.write(',\n "removed": [%s]' %
1444 self.ui.write(',\n "removed": [%s]' %
1445 ", ".join('"%s"' % j(f) for f in files[2]))
1445 ", ".join('"%s"' % j(f) for f in files[2]))
1446
1446
1447 elif self.ui.verbose:
1447 elif self.ui.verbose:
1448 self.ui.write(',\n "files": [%s]' %
1448 self.ui.write(',\n "files": [%s]' %
1449 ", ".join('"%s"' % j(f) for f in ctx.files()))
1449 ", ".join('"%s"' % j(f) for f in ctx.files()))
1450
1450
1451 if copies:
1451 if copies:
1452 self.ui.write(',\n "copies": {%s}' %
1452 self.ui.write(',\n "copies": {%s}' %
1453 ", ".join('"%s": "%s"' % (j(k), j(v))
1453 ", ".join('"%s": "%s"' % (j(k), j(v))
1454 for k, v in copies))
1454 for k, v in copies))
1455
1455
1456 matchfn = self.matchfn
1456 matchfn = self.matchfn
1457 if matchfn:
1457 if matchfn:
1458 stat = self.diffopts.get('stat')
1458 stat = self.diffopts.get('stat')
1459 diff = self.diffopts.get('patch')
1459 diff = self.diffopts.get('patch')
1460 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1460 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1461 node, prev = ctx.node(), ctx.p1().node()
1461 node, prev = ctx.node(), ctx.p1().node()
1462 if stat:
1462 if stat:
1463 self.ui.pushbuffer()
1463 self.ui.pushbuffer()
1464 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1464 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1465 match=matchfn, stat=True)
1465 match=matchfn, stat=True)
1466 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1466 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1467 if diff:
1467 if diff:
1468 self.ui.pushbuffer()
1468 self.ui.pushbuffer()
1469 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1469 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1470 match=matchfn, stat=False)
1470 match=matchfn, stat=False)
1471 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1471 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1472
1472
1473 self.ui.write("\n }")
1473 self.ui.write("\n }")
1474
1474
1475 class changeset_templater(changeset_printer):
1475 class changeset_templater(changeset_printer):
1476 '''format changeset information.'''
1476 '''format changeset information.'''
1477
1477
1478 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1478 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1479 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1479 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1480 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1480 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1481 filters = {'formatnode': formatnode}
1481 filters = {'formatnode': formatnode}
1482 defaulttempl = {
1482 defaulttempl = {
1483 'parent': '{rev}:{node|formatnode} ',
1483 'parent': '{rev}:{node|formatnode} ',
1484 'manifest': '{rev}:{node|formatnode}',
1484 'manifest': '{rev}:{node|formatnode}',
1485 'file_copy': '{name} ({source})',
1485 'file_copy': '{name} ({source})',
1486 'extra': '{key}={value|stringescape}'
1486 'extra': '{key}={value|stringescape}'
1487 }
1487 }
1488 # filecopy is preserved for compatibility reasons
1488 # filecopy is preserved for compatibility reasons
1489 defaulttempl['filecopy'] = defaulttempl['file_copy']
1489 defaulttempl['filecopy'] = defaulttempl['file_copy']
1490 assert not (tmpl and mapfile)
1490 assert not (tmpl and mapfile)
1491 if mapfile:
1491 if mapfile:
1492 self.t = templater.templater.frommapfile(mapfile, filters=filters,
1492 self.t = templater.templater.frommapfile(mapfile, filters=filters,
1493 cache=defaulttempl)
1493 cache=defaulttempl)
1494 else:
1494 else:
1495 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1495 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1496 filters=filters,
1496 filters=filters,
1497 cache=defaulttempl)
1497 cache=defaulttempl)
1498
1498
1499 self.cache = {}
1499 self.cache = {}
1500
1500
1501 # find correct templates for current mode
1501 # find correct templates for current mode
1502 tmplmodes = [
1502 tmplmodes = [
1503 (True, None),
1503 (True, None),
1504 (self.ui.verbose, 'verbose'),
1504 (self.ui.verbose, 'verbose'),
1505 (self.ui.quiet, 'quiet'),
1505 (self.ui.quiet, 'quiet'),
1506 (self.ui.debugflag, 'debug'),
1506 (self.ui.debugflag, 'debug'),
1507 ]
1507 ]
1508
1508
1509 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1509 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1510 'docheader': '', 'docfooter': ''}
1510 'docheader': '', 'docfooter': ''}
1511 for mode, postfix in tmplmodes:
1511 for mode, postfix in tmplmodes:
1512 for t in self._parts:
1512 for t in self._parts:
1513 cur = t
1513 cur = t
1514 if postfix:
1514 if postfix:
1515 cur += "_" + postfix
1515 cur += "_" + postfix
1516 if mode and cur in self.t:
1516 if mode and cur in self.t:
1517 self._parts[t] = cur
1517 self._parts[t] = cur
1518
1518
1519 if self._parts['docheader']:
1519 if self._parts['docheader']:
1520 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1520 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1521
1521
1522 def close(self):
1522 def close(self):
1523 if self._parts['docfooter']:
1523 if self._parts['docfooter']:
1524 if not self.footer:
1524 if not self.footer:
1525 self.footer = ""
1525 self.footer = ""
1526 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1526 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1527 return super(changeset_templater, self).close()
1527 return super(changeset_templater, self).close()
1528
1528
1529 def _show(self, ctx, copies, matchfn, props):
1529 def _show(self, ctx, copies, matchfn, props):
1530 '''show a single changeset or file revision'''
1530 '''show a single changeset or file revision'''
1531 props = props.copy()
1531 props = props.copy()
1532 props.update(templatekw.keywords)
1532 props.update(templatekw.keywords)
1533 props['templ'] = self.t
1533 props['templ'] = self.t
1534 props['ctx'] = ctx
1534 props['ctx'] = ctx
1535 props['repo'] = self.repo
1535 props['repo'] = self.repo
1536 props['ui'] = self.repo.ui
1536 props['ui'] = self.repo.ui
1537 props['revcache'] = {'copies': copies}
1537 props['revcache'] = {'copies': copies}
1538 props['cache'] = self.cache
1538 props['cache'] = self.cache
1539
1539
1540 # write header
1540 # write header
1541 if self._parts['header']:
1541 if self._parts['header']:
1542 h = templater.stringify(self.t(self._parts['header'], **props))
1542 h = templater.stringify(self.t(self._parts['header'], **props))
1543 if self.buffered:
1543 if self.buffered:
1544 self.header[ctx.rev()] = h
1544 self.header[ctx.rev()] = h
1545 else:
1545 else:
1546 if self.lastheader != h:
1546 if self.lastheader != h:
1547 self.lastheader = h
1547 self.lastheader = h
1548 self.ui.write(h)
1548 self.ui.write(h)
1549
1549
1550 # write changeset metadata, then patch if requested
1550 # write changeset metadata, then patch if requested
1551 key = self._parts['changeset']
1551 key = self._parts['changeset']
1552 self.ui.write(templater.stringify(self.t(key, **props)))
1552 self.ui.write(templater.stringify(self.t(key, **props)))
1553 self.showpatch(ctx, matchfn)
1553 self.showpatch(ctx, matchfn)
1554
1554
1555 if self._parts['footer']:
1555 if self._parts['footer']:
1556 if not self.footer:
1556 if not self.footer:
1557 self.footer = templater.stringify(
1557 self.footer = templater.stringify(
1558 self.t(self._parts['footer'], **props))
1558 self.t(self._parts['footer'], **props))
1559
1559
1560 def gettemplate(ui, tmpl, style):
1560 def gettemplate(ui, tmpl, style):
1561 """
1561 """
1562 Find the template matching the given template spec or style.
1562 Find the template matching the given template spec or style.
1563 """
1563 """
1564
1564
1565 # ui settings
1565 # ui settings
1566 if not tmpl and not style: # template are stronger than style
1566 if not tmpl and not style: # template are stronger than style
1567 tmpl = ui.config('ui', 'logtemplate')
1567 tmpl = ui.config('ui', 'logtemplate')
1568 if tmpl:
1568 if tmpl:
1569 return templater.unquotestring(tmpl), None
1569 return templater.unquotestring(tmpl), None
1570 else:
1570 else:
1571 style = util.expandpath(ui.config('ui', 'style', ''))
1571 style = util.expandpath(ui.config('ui', 'style', ''))
1572
1572
1573 if not tmpl and style:
1573 if not tmpl and style:
1574 mapfile = style
1574 mapfile = style
1575 if not os.path.split(mapfile)[0]:
1575 if not os.path.split(mapfile)[0]:
1576 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1576 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1577 or templater.templatepath(mapfile))
1577 or templater.templatepath(mapfile))
1578 if mapname:
1578 if mapname:
1579 mapfile = mapname
1579 mapfile = mapname
1580 return None, mapfile
1580 return None, mapfile
1581
1581
1582 if not tmpl:
1582 if not tmpl:
1583 return None, None
1583 return None, None
1584
1584
1585 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1585 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1586
1586
1587 def show_changeset(ui, repo, opts, buffered=False):
1587 def show_changeset(ui, repo, opts, buffered=False):
1588 """show one changeset using template or regular display.
1588 """show one changeset using template or regular display.
1589
1589
1590 Display format will be the first non-empty hit of:
1590 Display format will be the first non-empty hit of:
1591 1. option 'template'
1591 1. option 'template'
1592 2. option 'style'
1592 2. option 'style'
1593 3. [ui] setting 'logtemplate'
1593 3. [ui] setting 'logtemplate'
1594 4. [ui] setting 'style'
1594 4. [ui] setting 'style'
1595 If all of these values are either the unset or the empty string,
1595 If all of these values are either the unset or the empty string,
1596 regular display via changeset_printer() is done.
1596 regular display via changeset_printer() is done.
1597 """
1597 """
1598 # options
1598 # options
1599 matchfn = None
1599 matchfn = None
1600 if opts.get('patch') or opts.get('stat'):
1600 if opts.get('patch') or opts.get('stat'):
1601 matchfn = scmutil.matchall(repo)
1601 matchfn = scmutil.matchall(repo)
1602
1602
1603 if opts.get('template') == 'json':
1603 if opts.get('template') == 'json':
1604 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1604 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1605
1605
1606 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1606 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1607
1607
1608 if not tmpl and not mapfile:
1608 if not tmpl and not mapfile:
1609 return changeset_printer(ui, repo, matchfn, opts, buffered)
1609 return changeset_printer(ui, repo, matchfn, opts, buffered)
1610
1610
1611 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1611 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1612
1612
1613 def showmarker(ui, marker, index=None):
1613 def showmarker(ui, marker, index=None):
1614 """utility function to display obsolescence marker in a readable way
1614 """utility function to display obsolescence marker in a readable way
1615
1615
1616 To be used by debug function."""
1616 To be used by debug function."""
1617 if index is not None:
1617 if index is not None:
1618 ui.write("%i " % index)
1618 ui.write("%i " % index)
1619 ui.write(hex(marker.precnode()))
1619 ui.write(hex(marker.precnode()))
1620 for repl in marker.succnodes():
1620 for repl in marker.succnodes():
1621 ui.write(' ')
1621 ui.write(' ')
1622 ui.write(hex(repl))
1622 ui.write(hex(repl))
1623 ui.write(' %X ' % marker.flags())
1623 ui.write(' %X ' % marker.flags())
1624 parents = marker.parentnodes()
1624 parents = marker.parentnodes()
1625 if parents is not None:
1625 if parents is not None:
1626 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1626 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1627 ui.write('(%s) ' % util.datestr(marker.date()))
1627 ui.write('(%s) ' % util.datestr(marker.date()))
1628 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1628 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1629 sorted(marker.metadata().items())
1629 sorted(marker.metadata().items())
1630 if t[0] != 'date')))
1630 if t[0] != 'date')))
1631 ui.write('\n')
1631 ui.write('\n')
1632
1632
1633 def finddate(ui, repo, date):
1633 def finddate(ui, repo, date):
1634 """Find the tipmost changeset that matches the given date spec"""
1634 """Find the tipmost changeset that matches the given date spec"""
1635
1635
1636 df = util.matchdate(date)
1636 df = util.matchdate(date)
1637 m = scmutil.matchall(repo)
1637 m = scmutil.matchall(repo)
1638 results = {}
1638 results = {}
1639
1639
1640 def prep(ctx, fns):
1640 def prep(ctx, fns):
1641 d = ctx.date()
1641 d = ctx.date()
1642 if df(d[0]):
1642 if df(d[0]):
1643 results[ctx.rev()] = d
1643 results[ctx.rev()] = d
1644
1644
1645 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1645 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1646 rev = ctx.rev()
1646 rev = ctx.rev()
1647 if rev in results:
1647 if rev in results:
1648 ui.status(_("found revision %s from %s\n") %
1648 ui.status(_("found revision %s from %s\n") %
1649 (rev, util.datestr(results[rev])))
1649 (rev, util.datestr(results[rev])))
1650 return str(rev)
1650 return str(rev)
1651
1651
1652 raise error.Abort(_("revision matching date not found"))
1652 raise error.Abort(_("revision matching date not found"))
1653
1653
1654 def increasingwindows(windowsize=8, sizelimit=512):
1654 def increasingwindows(windowsize=8, sizelimit=512):
1655 while True:
1655 while True:
1656 yield windowsize
1656 yield windowsize
1657 if windowsize < sizelimit:
1657 if windowsize < sizelimit:
1658 windowsize *= 2
1658 windowsize *= 2
1659
1659
1660 class FileWalkError(Exception):
1660 class FileWalkError(Exception):
1661 pass
1661 pass
1662
1662
1663 def walkfilerevs(repo, match, follow, revs, fncache):
1663 def walkfilerevs(repo, match, follow, revs, fncache):
1664 '''Walks the file history for the matched files.
1664 '''Walks the file history for the matched files.
1665
1665
1666 Returns the changeset revs that are involved in the file history.
1666 Returns the changeset revs that are involved in the file history.
1667
1667
1668 Throws FileWalkError if the file history can't be walked using
1668 Throws FileWalkError if the file history can't be walked using
1669 filelogs alone.
1669 filelogs alone.
1670 '''
1670 '''
1671 wanted = set()
1671 wanted = set()
1672 copies = []
1672 copies = []
1673 minrev, maxrev = min(revs), max(revs)
1673 minrev, maxrev = min(revs), max(revs)
1674 def filerevgen(filelog, last):
1674 def filerevgen(filelog, last):
1675 """
1675 """
1676 Only files, no patterns. Check the history of each file.
1676 Only files, no patterns. Check the history of each file.
1677
1677
1678 Examines filelog entries within minrev, maxrev linkrev range
1678 Examines filelog entries within minrev, maxrev linkrev range
1679 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1679 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1680 tuples in backwards order
1680 tuples in backwards order
1681 """
1681 """
1682 cl_count = len(repo)
1682 cl_count = len(repo)
1683 revs = []
1683 revs = []
1684 for j in xrange(0, last + 1):
1684 for j in xrange(0, last + 1):
1685 linkrev = filelog.linkrev(j)
1685 linkrev = filelog.linkrev(j)
1686 if linkrev < minrev:
1686 if linkrev < minrev:
1687 continue
1687 continue
1688 # only yield rev for which we have the changelog, it can
1688 # only yield rev for which we have the changelog, it can
1689 # happen while doing "hg log" during a pull or commit
1689 # happen while doing "hg log" during a pull or commit
1690 if linkrev >= cl_count:
1690 if linkrev >= cl_count:
1691 break
1691 break
1692
1692
1693 parentlinkrevs = []
1693 parentlinkrevs = []
1694 for p in filelog.parentrevs(j):
1694 for p in filelog.parentrevs(j):
1695 if p != nullrev:
1695 if p != nullrev:
1696 parentlinkrevs.append(filelog.linkrev(p))
1696 parentlinkrevs.append(filelog.linkrev(p))
1697 n = filelog.node(j)
1697 n = filelog.node(j)
1698 revs.append((linkrev, parentlinkrevs,
1698 revs.append((linkrev, parentlinkrevs,
1699 follow and filelog.renamed(n)))
1699 follow and filelog.renamed(n)))
1700
1700
1701 return reversed(revs)
1701 return reversed(revs)
1702 def iterfiles():
1702 def iterfiles():
1703 pctx = repo['.']
1703 pctx = repo['.']
1704 for filename in match.files():
1704 for filename in match.files():
1705 if follow:
1705 if follow:
1706 if filename not in pctx:
1706 if filename not in pctx:
1707 raise error.Abort(_('cannot follow file not in parent '
1707 raise error.Abort(_('cannot follow file not in parent '
1708 'revision: "%s"') % filename)
1708 'revision: "%s"') % filename)
1709 yield filename, pctx[filename].filenode()
1709 yield filename, pctx[filename].filenode()
1710 else:
1710 else:
1711 yield filename, None
1711 yield filename, None
1712 for filename_node in copies:
1712 for filename_node in copies:
1713 yield filename_node
1713 yield filename_node
1714
1714
1715 for file_, node in iterfiles():
1715 for file_, node in iterfiles():
1716 filelog = repo.file(file_)
1716 filelog = repo.file(file_)
1717 if not len(filelog):
1717 if not len(filelog):
1718 if node is None:
1718 if node is None:
1719 # A zero count may be a directory or deleted file, so
1719 # A zero count may be a directory or deleted file, so
1720 # try to find matching entries on the slow path.
1720 # try to find matching entries on the slow path.
1721 if follow:
1721 if follow:
1722 raise error.Abort(
1722 raise error.Abort(
1723 _('cannot follow nonexistent file: "%s"') % file_)
1723 _('cannot follow nonexistent file: "%s"') % file_)
1724 raise FileWalkError("Cannot walk via filelog")
1724 raise FileWalkError("Cannot walk via filelog")
1725 else:
1725 else:
1726 continue
1726 continue
1727
1727
1728 if node is None:
1728 if node is None:
1729 last = len(filelog) - 1
1729 last = len(filelog) - 1
1730 else:
1730 else:
1731 last = filelog.rev(node)
1731 last = filelog.rev(node)
1732
1732
1733 # keep track of all ancestors of the file
1733 # keep track of all ancestors of the file
1734 ancestors = set([filelog.linkrev(last)])
1734 ancestors = set([filelog.linkrev(last)])
1735
1735
1736 # iterate from latest to oldest revision
1736 # iterate from latest to oldest revision
1737 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1737 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1738 if not follow:
1738 if not follow:
1739 if rev > maxrev:
1739 if rev > maxrev:
1740 continue
1740 continue
1741 else:
1741 else:
1742 # Note that last might not be the first interesting
1742 # Note that last might not be the first interesting
1743 # rev to us:
1743 # rev to us:
1744 # if the file has been changed after maxrev, we'll
1744 # if the file has been changed after maxrev, we'll
1745 # have linkrev(last) > maxrev, and we still need
1745 # have linkrev(last) > maxrev, and we still need
1746 # to explore the file graph
1746 # to explore the file graph
1747 if rev not in ancestors:
1747 if rev not in ancestors:
1748 continue
1748 continue
1749 # XXX insert 1327 fix here
1749 # XXX insert 1327 fix here
1750 if flparentlinkrevs:
1750 if flparentlinkrevs:
1751 ancestors.update(flparentlinkrevs)
1751 ancestors.update(flparentlinkrevs)
1752
1752
1753 fncache.setdefault(rev, []).append(file_)
1753 fncache.setdefault(rev, []).append(file_)
1754 wanted.add(rev)
1754 wanted.add(rev)
1755 if copied:
1755 if copied:
1756 copies.append(copied)
1756 copies.append(copied)
1757
1757
1758 return wanted
1758 return wanted
1759
1759
1760 class _followfilter(object):
1760 class _followfilter(object):
1761 def __init__(self, repo, onlyfirst=False):
1761 def __init__(self, repo, onlyfirst=False):
1762 self.repo = repo
1762 self.repo = repo
1763 self.startrev = nullrev
1763 self.startrev = nullrev
1764 self.roots = set()
1764 self.roots = set()
1765 self.onlyfirst = onlyfirst
1765 self.onlyfirst = onlyfirst
1766
1766
1767 def match(self, rev):
1767 def match(self, rev):
1768 def realparents(rev):
1768 def realparents(rev):
1769 if self.onlyfirst:
1769 if self.onlyfirst:
1770 return self.repo.changelog.parentrevs(rev)[0:1]
1770 return self.repo.changelog.parentrevs(rev)[0:1]
1771 else:
1771 else:
1772 return filter(lambda x: x != nullrev,
1772 return filter(lambda x: x != nullrev,
1773 self.repo.changelog.parentrevs(rev))
1773 self.repo.changelog.parentrevs(rev))
1774
1774
1775 if self.startrev == nullrev:
1775 if self.startrev == nullrev:
1776 self.startrev = rev
1776 self.startrev = rev
1777 return True
1777 return True
1778
1778
1779 if rev > self.startrev:
1779 if rev > self.startrev:
1780 # forward: all descendants
1780 # forward: all descendants
1781 if not self.roots:
1781 if not self.roots:
1782 self.roots.add(self.startrev)
1782 self.roots.add(self.startrev)
1783 for parent in realparents(rev):
1783 for parent in realparents(rev):
1784 if parent in self.roots:
1784 if parent in self.roots:
1785 self.roots.add(rev)
1785 self.roots.add(rev)
1786 return True
1786 return True
1787 else:
1787 else:
1788 # backwards: all parents
1788 # backwards: all parents
1789 if not self.roots:
1789 if not self.roots:
1790 self.roots.update(realparents(self.startrev))
1790 self.roots.update(realparents(self.startrev))
1791 if rev in self.roots:
1791 if rev in self.roots:
1792 self.roots.remove(rev)
1792 self.roots.remove(rev)
1793 self.roots.update(realparents(rev))
1793 self.roots.update(realparents(rev))
1794 return True
1794 return True
1795
1795
1796 return False
1796 return False
1797
1797
1798 def walkchangerevs(repo, match, opts, prepare):
1798 def walkchangerevs(repo, match, opts, prepare):
1799 '''Iterate over files and the revs in which they changed.
1799 '''Iterate over files and the revs in which they changed.
1800
1800
1801 Callers most commonly need to iterate backwards over the history
1801 Callers most commonly need to iterate backwards over the history
1802 in which they are interested. Doing so has awful (quadratic-looking)
1802 in which they are interested. Doing so has awful (quadratic-looking)
1803 performance, so we use iterators in a "windowed" way.
1803 performance, so we use iterators in a "windowed" way.
1804
1804
1805 We walk a window of revisions in the desired order. Within the
1805 We walk a window of revisions in the desired order. Within the
1806 window, we first walk forwards to gather data, then in the desired
1806 window, we first walk forwards to gather data, then in the desired
1807 order (usually backwards) to display it.
1807 order (usually backwards) to display it.
1808
1808
1809 This function returns an iterator yielding contexts. Before
1809 This function returns an iterator yielding contexts. Before
1810 yielding each context, the iterator will first call the prepare
1810 yielding each context, the iterator will first call the prepare
1811 function on each context in the window in forward order.'''
1811 function on each context in the window in forward order.'''
1812
1812
1813 follow = opts.get('follow') or opts.get('follow_first')
1813 follow = opts.get('follow') or opts.get('follow_first')
1814 revs = _logrevs(repo, opts)
1814 revs = _logrevs(repo, opts)
1815 if not revs:
1815 if not revs:
1816 return []
1816 return []
1817 wanted = set()
1817 wanted = set()
1818 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1818 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1819 opts.get('removed'))
1819 opts.get('removed'))
1820 fncache = {}
1820 fncache = {}
1821 change = repo.changectx
1821 change = repo.changectx
1822
1822
1823 # First step is to fill wanted, the set of revisions that we want to yield.
1823 # First step is to fill wanted, the set of revisions that we want to yield.
1824 # When it does not induce extra cost, we also fill fncache for revisions in
1824 # When it does not induce extra cost, we also fill fncache for revisions in
1825 # wanted: a cache of filenames that were changed (ctx.files()) and that
1825 # wanted: a cache of filenames that were changed (ctx.files()) and that
1826 # match the file filtering conditions.
1826 # match the file filtering conditions.
1827
1827
1828 if match.always():
1828 if match.always():
1829 # No files, no patterns. Display all revs.
1829 # No files, no patterns. Display all revs.
1830 wanted = revs
1830 wanted = revs
1831 elif not slowpath:
1831 elif not slowpath:
1832 # We only have to read through the filelog to find wanted revisions
1832 # We only have to read through the filelog to find wanted revisions
1833
1833
1834 try:
1834 try:
1835 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1835 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1836 except FileWalkError:
1836 except FileWalkError:
1837 slowpath = True
1837 slowpath = True
1838
1838
1839 # We decided to fall back to the slowpath because at least one
1839 # We decided to fall back to the slowpath because at least one
1840 # of the paths was not a file. Check to see if at least one of them
1840 # of the paths was not a file. Check to see if at least one of them
1841 # existed in history, otherwise simply return
1841 # existed in history, otherwise simply return
1842 for path in match.files():
1842 for path in match.files():
1843 if path == '.' or path in repo.store:
1843 if path == '.' or path in repo.store:
1844 break
1844 break
1845 else:
1845 else:
1846 return []
1846 return []
1847
1847
1848 if slowpath:
1848 if slowpath:
1849 # We have to read the changelog to match filenames against
1849 # We have to read the changelog to match filenames against
1850 # changed files
1850 # changed files
1851
1851
1852 if follow:
1852 if follow:
1853 raise error.Abort(_('can only follow copies/renames for explicit '
1853 raise error.Abort(_('can only follow copies/renames for explicit '
1854 'filenames'))
1854 'filenames'))
1855
1855
1856 # The slow path checks files modified in every changeset.
1856 # The slow path checks files modified in every changeset.
1857 # This is really slow on large repos, so compute the set lazily.
1857 # This is really slow on large repos, so compute the set lazily.
1858 class lazywantedset(object):
1858 class lazywantedset(object):
1859 def __init__(self):
1859 def __init__(self):
1860 self.set = set()
1860 self.set = set()
1861 self.revs = set(revs)
1861 self.revs = set(revs)
1862
1862
1863 # No need to worry about locality here because it will be accessed
1863 # No need to worry about locality here because it will be accessed
1864 # in the same order as the increasing window below.
1864 # in the same order as the increasing window below.
1865 def __contains__(self, value):
1865 def __contains__(self, value):
1866 if value in self.set:
1866 if value in self.set:
1867 return True
1867 return True
1868 elif not value in self.revs:
1868 elif not value in self.revs:
1869 return False
1869 return False
1870 else:
1870 else:
1871 self.revs.discard(value)
1871 self.revs.discard(value)
1872 ctx = change(value)
1872 ctx = change(value)
1873 matches = filter(match, ctx.files())
1873 matches = filter(match, ctx.files())
1874 if matches:
1874 if matches:
1875 fncache[value] = matches
1875 fncache[value] = matches
1876 self.set.add(value)
1876 self.set.add(value)
1877 return True
1877 return True
1878 return False
1878 return False
1879
1879
1880 def discard(self, value):
1880 def discard(self, value):
1881 self.revs.discard(value)
1881 self.revs.discard(value)
1882 self.set.discard(value)
1882 self.set.discard(value)
1883
1883
1884 wanted = lazywantedset()
1884 wanted = lazywantedset()
1885
1885
1886 # it might be worthwhile to do this in the iterator if the rev range
1886 # it might be worthwhile to do this in the iterator if the rev range
1887 # is descending and the prune args are all within that range
1887 # is descending and the prune args are all within that range
1888 for rev in opts.get('prune', ()):
1888 for rev in opts.get('prune', ()):
1889 rev = repo[rev].rev()
1889 rev = repo[rev].rev()
1890 ff = _followfilter(repo)
1890 ff = _followfilter(repo)
1891 stop = min(revs[0], revs[-1])
1891 stop = min(revs[0], revs[-1])
1892 for x in xrange(rev, stop - 1, -1):
1892 for x in xrange(rev, stop - 1, -1):
1893 if ff.match(x):
1893 if ff.match(x):
1894 wanted = wanted - [x]
1894 wanted = wanted - [x]
1895
1895
1896 # Now that wanted is correctly initialized, we can iterate over the
1896 # Now that wanted is correctly initialized, we can iterate over the
1897 # revision range, yielding only revisions in wanted.
1897 # revision range, yielding only revisions in wanted.
1898 def iterate():
1898 def iterate():
1899 if follow and match.always():
1899 if follow and match.always():
1900 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1900 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1901 def want(rev):
1901 def want(rev):
1902 return ff.match(rev) and rev in wanted
1902 return ff.match(rev) and rev in wanted
1903 else:
1903 else:
1904 def want(rev):
1904 def want(rev):
1905 return rev in wanted
1905 return rev in wanted
1906
1906
1907 it = iter(revs)
1907 it = iter(revs)
1908 stopiteration = False
1908 stopiteration = False
1909 for windowsize in increasingwindows():
1909 for windowsize in increasingwindows():
1910 nrevs = []
1910 nrevs = []
1911 for i in xrange(windowsize):
1911 for i in xrange(windowsize):
1912 rev = next(it, None)
1912 rev = next(it, None)
1913 if rev is None:
1913 if rev is None:
1914 stopiteration = True
1914 stopiteration = True
1915 break
1915 break
1916 elif want(rev):
1916 elif want(rev):
1917 nrevs.append(rev)
1917 nrevs.append(rev)
1918 for rev in sorted(nrevs):
1918 for rev in sorted(nrevs):
1919 fns = fncache.get(rev)
1919 fns = fncache.get(rev)
1920 ctx = change(rev)
1920 ctx = change(rev)
1921 if not fns:
1921 if not fns:
1922 def fns_generator():
1922 def fns_generator():
1923 for f in ctx.files():
1923 for f in ctx.files():
1924 if match(f):
1924 if match(f):
1925 yield f
1925 yield f
1926 fns = fns_generator()
1926 fns = fns_generator()
1927 prepare(ctx, fns)
1927 prepare(ctx, fns)
1928 for rev in nrevs:
1928 for rev in nrevs:
1929 yield change(rev)
1929 yield change(rev)
1930
1930
1931 if stopiteration:
1931 if stopiteration:
1932 break
1932 break
1933
1933
1934 return iterate()
1934 return iterate()
1935
1935
1936 def _makefollowlogfilematcher(repo, files, followfirst):
1936 def _makefollowlogfilematcher(repo, files, followfirst):
1937 # When displaying a revision with --patch --follow FILE, we have
1937 # When displaying a revision with --patch --follow FILE, we have
1938 # to know which file of the revision must be diffed. With
1938 # to know which file of the revision must be diffed. With
1939 # --follow, we want the names of the ancestors of FILE in the
1939 # --follow, we want the names of the ancestors of FILE in the
1940 # revision, stored in "fcache". "fcache" is populated by
1940 # revision, stored in "fcache". "fcache" is populated by
1941 # reproducing the graph traversal already done by --follow revset
1941 # reproducing the graph traversal already done by --follow revset
1942 # and relating linkrevs to file names (which is not "correct" but
1942 # and relating linkrevs to file names (which is not "correct" but
1943 # good enough).
1943 # good enough).
1944 fcache = {}
1944 fcache = {}
1945 fcacheready = [False]
1945 fcacheready = [False]
1946 pctx = repo['.']
1946 pctx = repo['.']
1947
1947
1948 def populate():
1948 def populate():
1949 for fn in files:
1949 for fn in files:
1950 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1950 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1951 for c in i:
1951 for c in i:
1952 fcache.setdefault(c.linkrev(), set()).add(c.path())
1952 fcache.setdefault(c.linkrev(), set()).add(c.path())
1953
1953
1954 def filematcher(rev):
1954 def filematcher(rev):
1955 if not fcacheready[0]:
1955 if not fcacheready[0]:
1956 # Lazy initialization
1956 # Lazy initialization
1957 fcacheready[0] = True
1957 fcacheready[0] = True
1958 populate()
1958 populate()
1959 return scmutil.matchfiles(repo, fcache.get(rev, []))
1959 return scmutil.matchfiles(repo, fcache.get(rev, []))
1960
1960
1961 return filematcher
1961 return filematcher
1962
1962
1963 def _makenofollowlogfilematcher(repo, pats, opts):
1963 def _makenofollowlogfilematcher(repo, pats, opts):
1964 '''hook for extensions to override the filematcher for non-follow cases'''
1964 '''hook for extensions to override the filematcher for non-follow cases'''
1965 return None
1965 return None
1966
1966
1967 def _makelogrevset(repo, pats, opts, revs):
1967 def _makelogrevset(repo, pats, opts, revs):
1968 """Return (expr, filematcher) where expr is a revset string built
1968 """Return (expr, filematcher) where expr is a revset string built
1969 from log options and file patterns or None. If --stat or --patch
1969 from log options and file patterns or None. If --stat or --patch
1970 are not passed filematcher is None. Otherwise it is a callable
1970 are not passed filematcher is None. Otherwise it is a callable
1971 taking a revision number and returning a match objects filtering
1971 taking a revision number and returning a match objects filtering
1972 the files to be detailed when displaying the revision.
1972 the files to be detailed when displaying the revision.
1973 """
1973 """
1974 opt2revset = {
1974 opt2revset = {
1975 'no_merges': ('not merge()', None),
1975 'no_merges': ('not merge()', None),
1976 'only_merges': ('merge()', None),
1976 'only_merges': ('merge()', None),
1977 '_ancestors': ('ancestors(%(val)s)', None),
1977 '_ancestors': ('ancestors(%(val)s)', None),
1978 '_fancestors': ('_firstancestors(%(val)s)', None),
1978 '_fancestors': ('_firstancestors(%(val)s)', None),
1979 '_descendants': ('descendants(%(val)s)', None),
1979 '_descendants': ('descendants(%(val)s)', None),
1980 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1980 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1981 '_matchfiles': ('_matchfiles(%(val)s)', None),
1981 '_matchfiles': ('_matchfiles(%(val)s)', None),
1982 'date': ('date(%(val)r)', None),
1982 'date': ('date(%(val)r)', None),
1983 'branch': ('branch(%(val)r)', ' or '),
1983 'branch': ('branch(%(val)r)', ' or '),
1984 '_patslog': ('filelog(%(val)r)', ' or '),
1984 '_patslog': ('filelog(%(val)r)', ' or '),
1985 '_patsfollow': ('follow(%(val)r)', ' or '),
1985 '_patsfollow': ('follow(%(val)r)', ' or '),
1986 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1986 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1987 'keyword': ('keyword(%(val)r)', ' or '),
1987 'keyword': ('keyword(%(val)r)', ' or '),
1988 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1988 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1989 'user': ('user(%(val)r)', ' or '),
1989 'user': ('user(%(val)r)', ' or '),
1990 }
1990 }
1991
1991
1992 opts = dict(opts)
1992 opts = dict(opts)
1993 # follow or not follow?
1993 # follow or not follow?
1994 follow = opts.get('follow') or opts.get('follow_first')
1994 follow = opts.get('follow') or opts.get('follow_first')
1995 if opts.get('follow_first'):
1995 if opts.get('follow_first'):
1996 followfirst = 1
1996 followfirst = 1
1997 else:
1997 else:
1998 followfirst = 0
1998 followfirst = 0
1999 # --follow with FILE behavior depends on revs...
1999 # --follow with FILE behavior depends on revs...
2000 it = iter(revs)
2000 it = iter(revs)
2001 startrev = it.next()
2001 startrev = next(it)
2002 followdescendants = startrev < next(it, startrev)
2002 followdescendants = startrev < next(it, startrev)
2003
2003
2004 # branch and only_branch are really aliases and must be handled at
2004 # branch and only_branch are really aliases and must be handled at
2005 # the same time
2005 # the same time
2006 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2006 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2007 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2007 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2008 # pats/include/exclude are passed to match.match() directly in
2008 # pats/include/exclude are passed to match.match() directly in
2009 # _matchfiles() revset but walkchangerevs() builds its matcher with
2009 # _matchfiles() revset but walkchangerevs() builds its matcher with
2010 # scmutil.match(). The difference is input pats are globbed on
2010 # scmutil.match(). The difference is input pats are globbed on
2011 # platforms without shell expansion (windows).
2011 # platforms without shell expansion (windows).
2012 wctx = repo[None]
2012 wctx = repo[None]
2013 match, pats = scmutil.matchandpats(wctx, pats, opts)
2013 match, pats = scmutil.matchandpats(wctx, pats, opts)
2014 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2014 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2015 opts.get('removed'))
2015 opts.get('removed'))
2016 if not slowpath:
2016 if not slowpath:
2017 for f in match.files():
2017 for f in match.files():
2018 if follow and f not in wctx:
2018 if follow and f not in wctx:
2019 # If the file exists, it may be a directory, so let it
2019 # If the file exists, it may be a directory, so let it
2020 # take the slow path.
2020 # take the slow path.
2021 if os.path.exists(repo.wjoin(f)):
2021 if os.path.exists(repo.wjoin(f)):
2022 slowpath = True
2022 slowpath = True
2023 continue
2023 continue
2024 else:
2024 else:
2025 raise error.Abort(_('cannot follow file not in parent '
2025 raise error.Abort(_('cannot follow file not in parent '
2026 'revision: "%s"') % f)
2026 'revision: "%s"') % f)
2027 filelog = repo.file(f)
2027 filelog = repo.file(f)
2028 if not filelog:
2028 if not filelog:
2029 # A zero count may be a directory or deleted file, so
2029 # A zero count may be a directory or deleted file, so
2030 # try to find matching entries on the slow path.
2030 # try to find matching entries on the slow path.
2031 if follow:
2031 if follow:
2032 raise error.Abort(
2032 raise error.Abort(
2033 _('cannot follow nonexistent file: "%s"') % f)
2033 _('cannot follow nonexistent file: "%s"') % f)
2034 slowpath = True
2034 slowpath = True
2035
2035
2036 # We decided to fall back to the slowpath because at least one
2036 # We decided to fall back to the slowpath because at least one
2037 # of the paths was not a file. Check to see if at least one of them
2037 # of the paths was not a file. Check to see if at least one of them
2038 # existed in history - in that case, we'll continue down the
2038 # existed in history - in that case, we'll continue down the
2039 # slowpath; otherwise, we can turn off the slowpath
2039 # slowpath; otherwise, we can turn off the slowpath
2040 if slowpath:
2040 if slowpath:
2041 for path in match.files():
2041 for path in match.files():
2042 if path == '.' or path in repo.store:
2042 if path == '.' or path in repo.store:
2043 break
2043 break
2044 else:
2044 else:
2045 slowpath = False
2045 slowpath = False
2046
2046
2047 fpats = ('_patsfollow', '_patsfollowfirst')
2047 fpats = ('_patsfollow', '_patsfollowfirst')
2048 fnopats = (('_ancestors', '_fancestors'),
2048 fnopats = (('_ancestors', '_fancestors'),
2049 ('_descendants', '_fdescendants'))
2049 ('_descendants', '_fdescendants'))
2050 if slowpath:
2050 if slowpath:
2051 # See walkchangerevs() slow path.
2051 # See walkchangerevs() slow path.
2052 #
2052 #
2053 # pats/include/exclude cannot be represented as separate
2053 # pats/include/exclude cannot be represented as separate
2054 # revset expressions as their filtering logic applies at file
2054 # revset expressions as their filtering logic applies at file
2055 # level. For instance "-I a -X a" matches a revision touching
2055 # level. For instance "-I a -X a" matches a revision touching
2056 # "a" and "b" while "file(a) and not file(b)" does
2056 # "a" and "b" while "file(a) and not file(b)" does
2057 # not. Besides, filesets are evaluated against the working
2057 # not. Besides, filesets are evaluated against the working
2058 # directory.
2058 # directory.
2059 matchargs = ['r:', 'd:relpath']
2059 matchargs = ['r:', 'd:relpath']
2060 for p in pats:
2060 for p in pats:
2061 matchargs.append('p:' + p)
2061 matchargs.append('p:' + p)
2062 for p in opts.get('include', []):
2062 for p in opts.get('include', []):
2063 matchargs.append('i:' + p)
2063 matchargs.append('i:' + p)
2064 for p in opts.get('exclude', []):
2064 for p in opts.get('exclude', []):
2065 matchargs.append('x:' + p)
2065 matchargs.append('x:' + p)
2066 matchargs = ','.join(('%r' % p) for p in matchargs)
2066 matchargs = ','.join(('%r' % p) for p in matchargs)
2067 opts['_matchfiles'] = matchargs
2067 opts['_matchfiles'] = matchargs
2068 if follow:
2068 if follow:
2069 opts[fnopats[0][followfirst]] = '.'
2069 opts[fnopats[0][followfirst]] = '.'
2070 else:
2070 else:
2071 if follow:
2071 if follow:
2072 if pats:
2072 if pats:
2073 # follow() revset interprets its file argument as a
2073 # follow() revset interprets its file argument as a
2074 # manifest entry, so use match.files(), not pats.
2074 # manifest entry, so use match.files(), not pats.
2075 opts[fpats[followfirst]] = list(match.files())
2075 opts[fpats[followfirst]] = list(match.files())
2076 else:
2076 else:
2077 op = fnopats[followdescendants][followfirst]
2077 op = fnopats[followdescendants][followfirst]
2078 opts[op] = 'rev(%d)' % startrev
2078 opts[op] = 'rev(%d)' % startrev
2079 else:
2079 else:
2080 opts['_patslog'] = list(pats)
2080 opts['_patslog'] = list(pats)
2081
2081
2082 filematcher = None
2082 filematcher = None
2083 if opts.get('patch') or opts.get('stat'):
2083 if opts.get('patch') or opts.get('stat'):
2084 # When following files, track renames via a special matcher.
2084 # When following files, track renames via a special matcher.
2085 # If we're forced to take the slowpath it means we're following
2085 # If we're forced to take the slowpath it means we're following
2086 # at least one pattern/directory, so don't bother with rename tracking.
2086 # at least one pattern/directory, so don't bother with rename tracking.
2087 if follow and not match.always() and not slowpath:
2087 if follow and not match.always() and not slowpath:
2088 # _makefollowlogfilematcher expects its files argument to be
2088 # _makefollowlogfilematcher expects its files argument to be
2089 # relative to the repo root, so use match.files(), not pats.
2089 # relative to the repo root, so use match.files(), not pats.
2090 filematcher = _makefollowlogfilematcher(repo, match.files(),
2090 filematcher = _makefollowlogfilematcher(repo, match.files(),
2091 followfirst)
2091 followfirst)
2092 else:
2092 else:
2093 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2093 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2094 if filematcher is None:
2094 if filematcher is None:
2095 filematcher = lambda rev: match
2095 filematcher = lambda rev: match
2096
2096
2097 expr = []
2097 expr = []
2098 for op, val in sorted(opts.iteritems()):
2098 for op, val in sorted(opts.iteritems()):
2099 if not val:
2099 if not val:
2100 continue
2100 continue
2101 if op not in opt2revset:
2101 if op not in opt2revset:
2102 continue
2102 continue
2103 revop, andor = opt2revset[op]
2103 revop, andor = opt2revset[op]
2104 if '%(val)' not in revop:
2104 if '%(val)' not in revop:
2105 expr.append(revop)
2105 expr.append(revop)
2106 else:
2106 else:
2107 if not isinstance(val, list):
2107 if not isinstance(val, list):
2108 e = revop % {'val': val}
2108 e = revop % {'val': val}
2109 else:
2109 else:
2110 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2110 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2111 expr.append(e)
2111 expr.append(e)
2112
2112
2113 if expr:
2113 if expr:
2114 expr = '(' + ' and '.join(expr) + ')'
2114 expr = '(' + ' and '.join(expr) + ')'
2115 else:
2115 else:
2116 expr = None
2116 expr = None
2117 return expr, filematcher
2117 return expr, filematcher
2118
2118
2119 def _logrevs(repo, opts):
2119 def _logrevs(repo, opts):
2120 # Default --rev value depends on --follow but --follow behavior
2120 # Default --rev value depends on --follow but --follow behavior
2121 # depends on revisions resolved from --rev...
2121 # depends on revisions resolved from --rev...
2122 follow = opts.get('follow') or opts.get('follow_first')
2122 follow = opts.get('follow') or opts.get('follow_first')
2123 if opts.get('rev'):
2123 if opts.get('rev'):
2124 revs = scmutil.revrange(repo, opts['rev'])
2124 revs = scmutil.revrange(repo, opts['rev'])
2125 elif follow and repo.dirstate.p1() == nullid:
2125 elif follow and repo.dirstate.p1() == nullid:
2126 revs = revset.baseset()
2126 revs = revset.baseset()
2127 elif follow:
2127 elif follow:
2128 revs = repo.revs('reverse(:.)')
2128 revs = repo.revs('reverse(:.)')
2129 else:
2129 else:
2130 revs = revset.spanset(repo)
2130 revs = revset.spanset(repo)
2131 revs.reverse()
2131 revs.reverse()
2132 return revs
2132 return revs
2133
2133
2134 def getgraphlogrevs(repo, pats, opts):
2134 def getgraphlogrevs(repo, pats, opts):
2135 """Return (revs, expr, filematcher) where revs is an iterable of
2135 """Return (revs, expr, filematcher) where revs is an iterable of
2136 revision numbers, expr is a revset string built from log options
2136 revision numbers, expr is a revset string built from log options
2137 and file patterns or None, and used to filter 'revs'. If --stat or
2137 and file patterns or None, and used to filter 'revs'. If --stat or
2138 --patch are not passed filematcher is None. Otherwise it is a
2138 --patch are not passed filematcher is None. Otherwise it is a
2139 callable taking a revision number and returning a match objects
2139 callable taking a revision number and returning a match objects
2140 filtering the files to be detailed when displaying the revision.
2140 filtering the files to be detailed when displaying the revision.
2141 """
2141 """
2142 limit = loglimit(opts)
2142 limit = loglimit(opts)
2143 revs = _logrevs(repo, opts)
2143 revs = _logrevs(repo, opts)
2144 if not revs:
2144 if not revs:
2145 return revset.baseset(), None, None
2145 return revset.baseset(), None, None
2146 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2146 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2147 if opts.get('rev'):
2147 if opts.get('rev'):
2148 # User-specified revs might be unsorted, but don't sort before
2148 # User-specified revs might be unsorted, but don't sort before
2149 # _makelogrevset because it might depend on the order of revs
2149 # _makelogrevset because it might depend on the order of revs
2150 revs.sort(reverse=True)
2150 revs.sort(reverse=True)
2151 if expr:
2151 if expr:
2152 # Revset matchers often operate faster on revisions in changelog
2152 # Revset matchers often operate faster on revisions in changelog
2153 # order, because most filters deal with the changelog.
2153 # order, because most filters deal with the changelog.
2154 revs.reverse()
2154 revs.reverse()
2155 matcher = revset.match(repo.ui, expr)
2155 matcher = revset.match(repo.ui, expr)
2156 # Revset matches can reorder revisions. "A or B" typically returns
2156 # Revset matches can reorder revisions. "A or B" typically returns
2157 # returns the revision matching A then the revision matching B. Sort
2157 # returns the revision matching A then the revision matching B. Sort
2158 # again to fix that.
2158 # again to fix that.
2159 revs = matcher(repo, revs)
2159 revs = matcher(repo, revs)
2160 revs.sort(reverse=True)
2160 revs.sort(reverse=True)
2161 if limit is not None:
2161 if limit is not None:
2162 limitedrevs = []
2162 limitedrevs = []
2163 for idx, rev in enumerate(revs):
2163 for idx, rev in enumerate(revs):
2164 if idx >= limit:
2164 if idx >= limit:
2165 break
2165 break
2166 limitedrevs.append(rev)
2166 limitedrevs.append(rev)
2167 revs = revset.baseset(limitedrevs)
2167 revs = revset.baseset(limitedrevs)
2168
2168
2169 return revs, expr, filematcher
2169 return revs, expr, filematcher
2170
2170
2171 def getlogrevs(repo, pats, opts):
2171 def getlogrevs(repo, pats, opts):
2172 """Return (revs, expr, filematcher) where revs is an iterable of
2172 """Return (revs, expr, filematcher) where revs is an iterable of
2173 revision numbers, expr is a revset string built from log options
2173 revision numbers, expr is a revset string built from log options
2174 and file patterns or None, and used to filter 'revs'. If --stat or
2174 and file patterns or None, and used to filter 'revs'. If --stat or
2175 --patch are not passed filematcher is None. Otherwise it is a
2175 --patch are not passed filematcher is None. Otherwise it is a
2176 callable taking a revision number and returning a match objects
2176 callable taking a revision number and returning a match objects
2177 filtering the files to be detailed when displaying the revision.
2177 filtering the files to be detailed when displaying the revision.
2178 """
2178 """
2179 limit = loglimit(opts)
2179 limit = loglimit(opts)
2180 revs = _logrevs(repo, opts)
2180 revs = _logrevs(repo, opts)
2181 if not revs:
2181 if not revs:
2182 return revset.baseset([]), None, None
2182 return revset.baseset([]), None, None
2183 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2183 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2184 if expr:
2184 if expr:
2185 # Revset matchers often operate faster on revisions in changelog
2185 # Revset matchers often operate faster on revisions in changelog
2186 # order, because most filters deal with the changelog.
2186 # order, because most filters deal with the changelog.
2187 if not opts.get('rev'):
2187 if not opts.get('rev'):
2188 revs.reverse()
2188 revs.reverse()
2189 matcher = revset.match(repo.ui, expr)
2189 matcher = revset.match(repo.ui, expr)
2190 # Revset matches can reorder revisions. "A or B" typically returns
2190 # Revset matches can reorder revisions. "A or B" typically returns
2191 # returns the revision matching A then the revision matching B. Sort
2191 # returns the revision matching A then the revision matching B. Sort
2192 # again to fix that.
2192 # again to fix that.
2193 fixopts = ['branch', 'only_branch', 'keyword', 'user']
2193 fixopts = ['branch', 'only_branch', 'keyword', 'user']
2194 oldrevs = revs
2194 oldrevs = revs
2195 revs = matcher(repo, revs)
2195 revs = matcher(repo, revs)
2196 if not opts.get('rev'):
2196 if not opts.get('rev'):
2197 revs.sort(reverse=True)
2197 revs.sort(reverse=True)
2198 elif len(pats) > 1 or any(len(opts.get(op, [])) > 1 for op in fixopts):
2198 elif len(pats) > 1 or any(len(opts.get(op, [])) > 1 for op in fixopts):
2199 # XXX "A or B" is known to change the order; fix it by filtering
2199 # XXX "A or B" is known to change the order; fix it by filtering
2200 # matched set again (issue5100)
2200 # matched set again (issue5100)
2201 revs = oldrevs & revs
2201 revs = oldrevs & revs
2202 if limit is not None:
2202 if limit is not None:
2203 limitedrevs = []
2203 limitedrevs = []
2204 for idx, r in enumerate(revs):
2204 for idx, r in enumerate(revs):
2205 if limit <= idx:
2205 if limit <= idx:
2206 break
2206 break
2207 limitedrevs.append(r)
2207 limitedrevs.append(r)
2208 revs = revset.baseset(limitedrevs)
2208 revs = revset.baseset(limitedrevs)
2209
2209
2210 return revs, expr, filematcher
2210 return revs, expr, filematcher
2211
2211
2212 def _graphnodeformatter(ui, displayer):
2212 def _graphnodeformatter(ui, displayer):
2213 spec = ui.config('ui', 'graphnodetemplate')
2213 spec = ui.config('ui', 'graphnodetemplate')
2214 if not spec:
2214 if not spec:
2215 return templatekw.showgraphnode # fast path for "{graphnode}"
2215 return templatekw.showgraphnode # fast path for "{graphnode}"
2216
2216
2217 templ = formatter.gettemplater(ui, 'graphnode', spec)
2217 templ = formatter.gettemplater(ui, 'graphnode', spec)
2218 cache = {}
2218 cache = {}
2219 if isinstance(displayer, changeset_templater):
2219 if isinstance(displayer, changeset_templater):
2220 cache = displayer.cache # reuse cache of slow templates
2220 cache = displayer.cache # reuse cache of slow templates
2221 props = templatekw.keywords.copy()
2221 props = templatekw.keywords.copy()
2222 props['templ'] = templ
2222 props['templ'] = templ
2223 props['cache'] = cache
2223 props['cache'] = cache
2224 def formatnode(repo, ctx):
2224 def formatnode(repo, ctx):
2225 props['ctx'] = ctx
2225 props['ctx'] = ctx
2226 props['repo'] = repo
2226 props['repo'] = repo
2227 props['ui'] = repo.ui
2227 props['ui'] = repo.ui
2228 props['revcache'] = {}
2228 props['revcache'] = {}
2229 return templater.stringify(templ('graphnode', **props))
2229 return templater.stringify(templ('graphnode', **props))
2230 return formatnode
2230 return formatnode
2231
2231
2232 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2232 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2233 filematcher=None):
2233 filematcher=None):
2234 formatnode = _graphnodeformatter(ui, displayer)
2234 formatnode = _graphnodeformatter(ui, displayer)
2235 state = graphmod.asciistate()
2235 state = graphmod.asciistate()
2236 styles = state['styles']
2236 styles = state['styles']
2237
2237
2238 # only set graph styling if HGPLAIN is not set.
2238 # only set graph styling if HGPLAIN is not set.
2239 if ui.plain('graph'):
2239 if ui.plain('graph'):
2240 # set all edge styles to |, the default pre-3.8 behaviour
2240 # set all edge styles to |, the default pre-3.8 behaviour
2241 styles.update(dict.fromkeys(styles, '|'))
2241 styles.update(dict.fromkeys(styles, '|'))
2242 else:
2242 else:
2243 edgetypes = {
2243 edgetypes = {
2244 'parent': graphmod.PARENT,
2244 'parent': graphmod.PARENT,
2245 'grandparent': graphmod.GRANDPARENT,
2245 'grandparent': graphmod.GRANDPARENT,
2246 'missing': graphmod.MISSINGPARENT
2246 'missing': graphmod.MISSINGPARENT
2247 }
2247 }
2248 for name, key in edgetypes.items():
2248 for name, key in edgetypes.items():
2249 # experimental config: experimental.graphstyle.*
2249 # experimental config: experimental.graphstyle.*
2250 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2250 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2251 styles[key])
2251 styles[key])
2252 if not styles[key]:
2252 if not styles[key]:
2253 styles[key] = None
2253 styles[key] = None
2254
2254
2255 # experimental config: experimental.graphshorten
2255 # experimental config: experimental.graphshorten
2256 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2256 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2257
2257
2258 for rev, type, ctx, parents in dag:
2258 for rev, type, ctx, parents in dag:
2259 char = formatnode(repo, ctx)
2259 char = formatnode(repo, ctx)
2260 copies = None
2260 copies = None
2261 if getrenamed and ctx.rev():
2261 if getrenamed and ctx.rev():
2262 copies = []
2262 copies = []
2263 for fn in ctx.files():
2263 for fn in ctx.files():
2264 rename = getrenamed(fn, ctx.rev())
2264 rename = getrenamed(fn, ctx.rev())
2265 if rename:
2265 if rename:
2266 copies.append((fn, rename[0]))
2266 copies.append((fn, rename[0]))
2267 revmatchfn = None
2267 revmatchfn = None
2268 if filematcher is not None:
2268 if filematcher is not None:
2269 revmatchfn = filematcher(ctx.rev())
2269 revmatchfn = filematcher(ctx.rev())
2270 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2270 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2271 lines = displayer.hunk.pop(rev).split('\n')
2271 lines = displayer.hunk.pop(rev).split('\n')
2272 if not lines[-1]:
2272 if not lines[-1]:
2273 del lines[-1]
2273 del lines[-1]
2274 displayer.flush(ctx)
2274 displayer.flush(ctx)
2275 edges = edgefn(type, char, lines, state, rev, parents)
2275 edges = edgefn(type, char, lines, state, rev, parents)
2276 for type, char, lines, coldata in edges:
2276 for type, char, lines, coldata in edges:
2277 graphmod.ascii(ui, state, type, char, lines, coldata)
2277 graphmod.ascii(ui, state, type, char, lines, coldata)
2278 displayer.close()
2278 displayer.close()
2279
2279
2280 def graphlog(ui, repo, *pats, **opts):
2280 def graphlog(ui, repo, *pats, **opts):
2281 # Parameters are identical to log command ones
2281 # Parameters are identical to log command ones
2282 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2282 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2283 revdag = graphmod.dagwalker(repo, revs)
2283 revdag = graphmod.dagwalker(repo, revs)
2284
2284
2285 getrenamed = None
2285 getrenamed = None
2286 if opts.get('copies'):
2286 if opts.get('copies'):
2287 endrev = None
2287 endrev = None
2288 if opts.get('rev'):
2288 if opts.get('rev'):
2289 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2289 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2290 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2290 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2291 displayer = show_changeset(ui, repo, opts, buffered=True)
2291 displayer = show_changeset(ui, repo, opts, buffered=True)
2292 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2292 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2293 filematcher)
2293 filematcher)
2294
2294
2295 def checkunsupportedgraphflags(pats, opts):
2295 def checkunsupportedgraphflags(pats, opts):
2296 for op in ["newest_first"]:
2296 for op in ["newest_first"]:
2297 if op in opts and opts[op]:
2297 if op in opts and opts[op]:
2298 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2298 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2299 % op.replace("_", "-"))
2299 % op.replace("_", "-"))
2300
2300
2301 def graphrevs(repo, nodes, opts):
2301 def graphrevs(repo, nodes, opts):
2302 limit = loglimit(opts)
2302 limit = loglimit(opts)
2303 nodes.reverse()
2303 nodes.reverse()
2304 if limit is not None:
2304 if limit is not None:
2305 nodes = nodes[:limit]
2305 nodes = nodes[:limit]
2306 return graphmod.nodes(repo, nodes)
2306 return graphmod.nodes(repo, nodes)
2307
2307
2308 def add(ui, repo, match, prefix, explicitonly, **opts):
2308 def add(ui, repo, match, prefix, explicitonly, **opts):
2309 join = lambda f: os.path.join(prefix, f)
2309 join = lambda f: os.path.join(prefix, f)
2310 bad = []
2310 bad = []
2311
2311
2312 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2312 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2313 names = []
2313 names = []
2314 wctx = repo[None]
2314 wctx = repo[None]
2315 cca = None
2315 cca = None
2316 abort, warn = scmutil.checkportabilityalert(ui)
2316 abort, warn = scmutil.checkportabilityalert(ui)
2317 if abort or warn:
2317 if abort or warn:
2318 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2318 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2319
2319
2320 badmatch = matchmod.badmatch(match, badfn)
2320 badmatch = matchmod.badmatch(match, badfn)
2321 dirstate = repo.dirstate
2321 dirstate = repo.dirstate
2322 # We don't want to just call wctx.walk here, since it would return a lot of
2322 # We don't want to just call wctx.walk here, since it would return a lot of
2323 # clean files, which we aren't interested in and takes time.
2323 # clean files, which we aren't interested in and takes time.
2324 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2324 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2325 True, False, full=False)):
2325 True, False, full=False)):
2326 exact = match.exact(f)
2326 exact = match.exact(f)
2327 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2327 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2328 if cca:
2328 if cca:
2329 cca(f)
2329 cca(f)
2330 names.append(f)
2330 names.append(f)
2331 if ui.verbose or not exact:
2331 if ui.verbose or not exact:
2332 ui.status(_('adding %s\n') % match.rel(f))
2332 ui.status(_('adding %s\n') % match.rel(f))
2333
2333
2334 for subpath in sorted(wctx.substate):
2334 for subpath in sorted(wctx.substate):
2335 sub = wctx.sub(subpath)
2335 sub = wctx.sub(subpath)
2336 try:
2336 try:
2337 submatch = matchmod.subdirmatcher(subpath, match)
2337 submatch = matchmod.subdirmatcher(subpath, match)
2338 if opts.get('subrepos'):
2338 if opts.get('subrepos'):
2339 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2339 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2340 else:
2340 else:
2341 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2341 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2342 except error.LookupError:
2342 except error.LookupError:
2343 ui.status(_("skipping missing subrepository: %s\n")
2343 ui.status(_("skipping missing subrepository: %s\n")
2344 % join(subpath))
2344 % join(subpath))
2345
2345
2346 if not opts.get('dry_run'):
2346 if not opts.get('dry_run'):
2347 rejected = wctx.add(names, prefix)
2347 rejected = wctx.add(names, prefix)
2348 bad.extend(f for f in rejected if f in match.files())
2348 bad.extend(f for f in rejected if f in match.files())
2349 return bad
2349 return bad
2350
2350
2351 def forget(ui, repo, match, prefix, explicitonly):
2351 def forget(ui, repo, match, prefix, explicitonly):
2352 join = lambda f: os.path.join(prefix, f)
2352 join = lambda f: os.path.join(prefix, f)
2353 bad = []
2353 bad = []
2354 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2354 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2355 wctx = repo[None]
2355 wctx = repo[None]
2356 forgot = []
2356 forgot = []
2357
2357
2358 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2358 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2359 forget = sorted(s[0] + s[1] + s[3] + s[6])
2359 forget = sorted(s[0] + s[1] + s[3] + s[6])
2360 if explicitonly:
2360 if explicitonly:
2361 forget = [f for f in forget if match.exact(f)]
2361 forget = [f for f in forget if match.exact(f)]
2362
2362
2363 for subpath in sorted(wctx.substate):
2363 for subpath in sorted(wctx.substate):
2364 sub = wctx.sub(subpath)
2364 sub = wctx.sub(subpath)
2365 try:
2365 try:
2366 submatch = matchmod.subdirmatcher(subpath, match)
2366 submatch = matchmod.subdirmatcher(subpath, match)
2367 subbad, subforgot = sub.forget(submatch, prefix)
2367 subbad, subforgot = sub.forget(submatch, prefix)
2368 bad.extend([subpath + '/' + f for f in subbad])
2368 bad.extend([subpath + '/' + f for f in subbad])
2369 forgot.extend([subpath + '/' + f for f in subforgot])
2369 forgot.extend([subpath + '/' + f for f in subforgot])
2370 except error.LookupError:
2370 except error.LookupError:
2371 ui.status(_("skipping missing subrepository: %s\n")
2371 ui.status(_("skipping missing subrepository: %s\n")
2372 % join(subpath))
2372 % join(subpath))
2373
2373
2374 if not explicitonly:
2374 if not explicitonly:
2375 for f in match.files():
2375 for f in match.files():
2376 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2376 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2377 if f not in forgot:
2377 if f not in forgot:
2378 if repo.wvfs.exists(f):
2378 if repo.wvfs.exists(f):
2379 # Don't complain if the exact case match wasn't given.
2379 # Don't complain if the exact case match wasn't given.
2380 # But don't do this until after checking 'forgot', so
2380 # But don't do this until after checking 'forgot', so
2381 # that subrepo files aren't normalized, and this op is
2381 # that subrepo files aren't normalized, and this op is
2382 # purely from data cached by the status walk above.
2382 # purely from data cached by the status walk above.
2383 if repo.dirstate.normalize(f) in repo.dirstate:
2383 if repo.dirstate.normalize(f) in repo.dirstate:
2384 continue
2384 continue
2385 ui.warn(_('not removing %s: '
2385 ui.warn(_('not removing %s: '
2386 'file is already untracked\n')
2386 'file is already untracked\n')
2387 % match.rel(f))
2387 % match.rel(f))
2388 bad.append(f)
2388 bad.append(f)
2389
2389
2390 for f in forget:
2390 for f in forget:
2391 if ui.verbose or not match.exact(f):
2391 if ui.verbose or not match.exact(f):
2392 ui.status(_('removing %s\n') % match.rel(f))
2392 ui.status(_('removing %s\n') % match.rel(f))
2393
2393
2394 rejected = wctx.forget(forget, prefix)
2394 rejected = wctx.forget(forget, prefix)
2395 bad.extend(f for f in rejected if f in match.files())
2395 bad.extend(f for f in rejected if f in match.files())
2396 forgot.extend(f for f in forget if f not in rejected)
2396 forgot.extend(f for f in forget if f not in rejected)
2397 return bad, forgot
2397 return bad, forgot
2398
2398
2399 def files(ui, ctx, m, fm, fmt, subrepos):
2399 def files(ui, ctx, m, fm, fmt, subrepos):
2400 rev = ctx.rev()
2400 rev = ctx.rev()
2401 ret = 1
2401 ret = 1
2402 ds = ctx.repo().dirstate
2402 ds = ctx.repo().dirstate
2403
2403
2404 for f in ctx.matches(m):
2404 for f in ctx.matches(m):
2405 if rev is None and ds[f] == 'r':
2405 if rev is None and ds[f] == 'r':
2406 continue
2406 continue
2407 fm.startitem()
2407 fm.startitem()
2408 if ui.verbose:
2408 if ui.verbose:
2409 fc = ctx[f]
2409 fc = ctx[f]
2410 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2410 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2411 fm.data(abspath=f)
2411 fm.data(abspath=f)
2412 fm.write('path', fmt, m.rel(f))
2412 fm.write('path', fmt, m.rel(f))
2413 ret = 0
2413 ret = 0
2414
2414
2415 for subpath in sorted(ctx.substate):
2415 for subpath in sorted(ctx.substate):
2416 def matchessubrepo(subpath):
2416 def matchessubrepo(subpath):
2417 return (m.exact(subpath)
2417 return (m.exact(subpath)
2418 or any(f.startswith(subpath + '/') for f in m.files()))
2418 or any(f.startswith(subpath + '/') for f in m.files()))
2419
2419
2420 if subrepos or matchessubrepo(subpath):
2420 if subrepos or matchessubrepo(subpath):
2421 sub = ctx.sub(subpath)
2421 sub = ctx.sub(subpath)
2422 try:
2422 try:
2423 submatch = matchmod.subdirmatcher(subpath, m)
2423 submatch = matchmod.subdirmatcher(subpath, m)
2424 recurse = m.exact(subpath) or subrepos
2424 recurse = m.exact(subpath) or subrepos
2425 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2425 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2426 ret = 0
2426 ret = 0
2427 except error.LookupError:
2427 except error.LookupError:
2428 ui.status(_("skipping missing subrepository: %s\n")
2428 ui.status(_("skipping missing subrepository: %s\n")
2429 % m.abs(subpath))
2429 % m.abs(subpath))
2430
2430
2431 return ret
2431 return ret
2432
2432
2433 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2433 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2434 join = lambda f: os.path.join(prefix, f)
2434 join = lambda f: os.path.join(prefix, f)
2435 ret = 0
2435 ret = 0
2436 s = repo.status(match=m, clean=True)
2436 s = repo.status(match=m, clean=True)
2437 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2437 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2438
2438
2439 wctx = repo[None]
2439 wctx = repo[None]
2440
2440
2441 if warnings is None:
2441 if warnings is None:
2442 warnings = []
2442 warnings = []
2443 warn = True
2443 warn = True
2444 else:
2444 else:
2445 warn = False
2445 warn = False
2446
2446
2447 subs = sorted(wctx.substate)
2447 subs = sorted(wctx.substate)
2448 total = len(subs)
2448 total = len(subs)
2449 count = 0
2449 count = 0
2450 for subpath in subs:
2450 for subpath in subs:
2451 def matchessubrepo(matcher, subpath):
2451 def matchessubrepo(matcher, subpath):
2452 if matcher.exact(subpath):
2452 if matcher.exact(subpath):
2453 return True
2453 return True
2454 for f in matcher.files():
2454 for f in matcher.files():
2455 if f.startswith(subpath):
2455 if f.startswith(subpath):
2456 return True
2456 return True
2457 return False
2457 return False
2458
2458
2459 count += 1
2459 count += 1
2460 if subrepos or matchessubrepo(m, subpath):
2460 if subrepos or matchessubrepo(m, subpath):
2461 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2461 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2462
2462
2463 sub = wctx.sub(subpath)
2463 sub = wctx.sub(subpath)
2464 try:
2464 try:
2465 submatch = matchmod.subdirmatcher(subpath, m)
2465 submatch = matchmod.subdirmatcher(subpath, m)
2466 if sub.removefiles(submatch, prefix, after, force, subrepos,
2466 if sub.removefiles(submatch, prefix, after, force, subrepos,
2467 warnings):
2467 warnings):
2468 ret = 1
2468 ret = 1
2469 except error.LookupError:
2469 except error.LookupError:
2470 warnings.append(_("skipping missing subrepository: %s\n")
2470 warnings.append(_("skipping missing subrepository: %s\n")
2471 % join(subpath))
2471 % join(subpath))
2472 ui.progress(_('searching'), None)
2472 ui.progress(_('searching'), None)
2473
2473
2474 # warn about failure to delete explicit files/dirs
2474 # warn about failure to delete explicit files/dirs
2475 deleteddirs = util.dirs(deleted)
2475 deleteddirs = util.dirs(deleted)
2476 files = m.files()
2476 files = m.files()
2477 total = len(files)
2477 total = len(files)
2478 count = 0
2478 count = 0
2479 for f in files:
2479 for f in files:
2480 def insubrepo():
2480 def insubrepo():
2481 for subpath in wctx.substate:
2481 for subpath in wctx.substate:
2482 if f.startswith(subpath):
2482 if f.startswith(subpath):
2483 return True
2483 return True
2484 return False
2484 return False
2485
2485
2486 count += 1
2486 count += 1
2487 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2487 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2488 isdir = f in deleteddirs or wctx.hasdir(f)
2488 isdir = f in deleteddirs or wctx.hasdir(f)
2489 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2489 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2490 continue
2490 continue
2491
2491
2492 if repo.wvfs.exists(f):
2492 if repo.wvfs.exists(f):
2493 if repo.wvfs.isdir(f):
2493 if repo.wvfs.isdir(f):
2494 warnings.append(_('not removing %s: no tracked files\n')
2494 warnings.append(_('not removing %s: no tracked files\n')
2495 % m.rel(f))
2495 % m.rel(f))
2496 else:
2496 else:
2497 warnings.append(_('not removing %s: file is untracked\n')
2497 warnings.append(_('not removing %s: file is untracked\n')
2498 % m.rel(f))
2498 % m.rel(f))
2499 # missing files will generate a warning elsewhere
2499 # missing files will generate a warning elsewhere
2500 ret = 1
2500 ret = 1
2501 ui.progress(_('deleting'), None)
2501 ui.progress(_('deleting'), None)
2502
2502
2503 if force:
2503 if force:
2504 list = modified + deleted + clean + added
2504 list = modified + deleted + clean + added
2505 elif after:
2505 elif after:
2506 list = deleted
2506 list = deleted
2507 remaining = modified + added + clean
2507 remaining = modified + added + clean
2508 total = len(remaining)
2508 total = len(remaining)
2509 count = 0
2509 count = 0
2510 for f in remaining:
2510 for f in remaining:
2511 count += 1
2511 count += 1
2512 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2512 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2513 warnings.append(_('not removing %s: file still exists\n')
2513 warnings.append(_('not removing %s: file still exists\n')
2514 % m.rel(f))
2514 % m.rel(f))
2515 ret = 1
2515 ret = 1
2516 ui.progress(_('skipping'), None)
2516 ui.progress(_('skipping'), None)
2517 else:
2517 else:
2518 list = deleted + clean
2518 list = deleted + clean
2519 total = len(modified) + len(added)
2519 total = len(modified) + len(added)
2520 count = 0
2520 count = 0
2521 for f in modified:
2521 for f in modified:
2522 count += 1
2522 count += 1
2523 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2523 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2524 warnings.append(_('not removing %s: file is modified (use -f'
2524 warnings.append(_('not removing %s: file is modified (use -f'
2525 ' to force removal)\n') % m.rel(f))
2525 ' to force removal)\n') % m.rel(f))
2526 ret = 1
2526 ret = 1
2527 for f in added:
2527 for f in added:
2528 count += 1
2528 count += 1
2529 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2529 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2530 warnings.append(_('not removing %s: file has been marked for add'
2530 warnings.append(_('not removing %s: file has been marked for add'
2531 ' (use forget to undo)\n') % m.rel(f))
2531 ' (use forget to undo)\n') % m.rel(f))
2532 ret = 1
2532 ret = 1
2533 ui.progress(_('skipping'), None)
2533 ui.progress(_('skipping'), None)
2534
2534
2535 list = sorted(list)
2535 list = sorted(list)
2536 total = len(list)
2536 total = len(list)
2537 count = 0
2537 count = 0
2538 for f in list:
2538 for f in list:
2539 count += 1
2539 count += 1
2540 if ui.verbose or not m.exact(f):
2540 if ui.verbose or not m.exact(f):
2541 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2541 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2542 ui.status(_('removing %s\n') % m.rel(f))
2542 ui.status(_('removing %s\n') % m.rel(f))
2543 ui.progress(_('deleting'), None)
2543 ui.progress(_('deleting'), None)
2544
2544
2545 with repo.wlock():
2545 with repo.wlock():
2546 if not after:
2546 if not after:
2547 for f in list:
2547 for f in list:
2548 if f in added:
2548 if f in added:
2549 continue # we never unlink added files on remove
2549 continue # we never unlink added files on remove
2550 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2550 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2551 repo[None].forget(list)
2551 repo[None].forget(list)
2552
2552
2553 if warn:
2553 if warn:
2554 for warning in warnings:
2554 for warning in warnings:
2555 ui.warn(warning)
2555 ui.warn(warning)
2556
2556
2557 return ret
2557 return ret
2558
2558
2559 def cat(ui, repo, ctx, matcher, prefix, **opts):
2559 def cat(ui, repo, ctx, matcher, prefix, **opts):
2560 err = 1
2560 err = 1
2561
2561
2562 def write(path):
2562 def write(path):
2563 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2563 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2564 pathname=os.path.join(prefix, path))
2564 pathname=os.path.join(prefix, path))
2565 data = ctx[path].data()
2565 data = ctx[path].data()
2566 if opts.get('decode'):
2566 if opts.get('decode'):
2567 data = repo.wwritedata(path, data)
2567 data = repo.wwritedata(path, data)
2568 fp.write(data)
2568 fp.write(data)
2569 fp.close()
2569 fp.close()
2570
2570
2571 # Automation often uses hg cat on single files, so special case it
2571 # Automation often uses hg cat on single files, so special case it
2572 # for performance to avoid the cost of parsing the manifest.
2572 # for performance to avoid the cost of parsing the manifest.
2573 if len(matcher.files()) == 1 and not matcher.anypats():
2573 if len(matcher.files()) == 1 and not matcher.anypats():
2574 file = matcher.files()[0]
2574 file = matcher.files()[0]
2575 mf = repo.manifest
2575 mf = repo.manifest
2576 mfnode = ctx.manifestnode()
2576 mfnode = ctx.manifestnode()
2577 if mfnode and mf.find(mfnode, file)[0]:
2577 if mfnode and mf.find(mfnode, file)[0]:
2578 write(file)
2578 write(file)
2579 return 0
2579 return 0
2580
2580
2581 # Don't warn about "missing" files that are really in subrepos
2581 # Don't warn about "missing" files that are really in subrepos
2582 def badfn(path, msg):
2582 def badfn(path, msg):
2583 for subpath in ctx.substate:
2583 for subpath in ctx.substate:
2584 if path.startswith(subpath):
2584 if path.startswith(subpath):
2585 return
2585 return
2586 matcher.bad(path, msg)
2586 matcher.bad(path, msg)
2587
2587
2588 for abs in ctx.walk(matchmod.badmatch(matcher, badfn)):
2588 for abs in ctx.walk(matchmod.badmatch(matcher, badfn)):
2589 write(abs)
2589 write(abs)
2590 err = 0
2590 err = 0
2591
2591
2592 for subpath in sorted(ctx.substate):
2592 for subpath in sorted(ctx.substate):
2593 sub = ctx.sub(subpath)
2593 sub = ctx.sub(subpath)
2594 try:
2594 try:
2595 submatch = matchmod.subdirmatcher(subpath, matcher)
2595 submatch = matchmod.subdirmatcher(subpath, matcher)
2596
2596
2597 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2597 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2598 **opts):
2598 **opts):
2599 err = 0
2599 err = 0
2600 except error.RepoLookupError:
2600 except error.RepoLookupError:
2601 ui.status(_("skipping missing subrepository: %s\n")
2601 ui.status(_("skipping missing subrepository: %s\n")
2602 % os.path.join(prefix, subpath))
2602 % os.path.join(prefix, subpath))
2603
2603
2604 return err
2604 return err
2605
2605
2606 def commit(ui, repo, commitfunc, pats, opts):
2606 def commit(ui, repo, commitfunc, pats, opts):
2607 '''commit the specified files or all outstanding changes'''
2607 '''commit the specified files or all outstanding changes'''
2608 date = opts.get('date')
2608 date = opts.get('date')
2609 if date:
2609 if date:
2610 opts['date'] = util.parsedate(date)
2610 opts['date'] = util.parsedate(date)
2611 message = logmessage(ui, opts)
2611 message = logmessage(ui, opts)
2612 matcher = scmutil.match(repo[None], pats, opts)
2612 matcher = scmutil.match(repo[None], pats, opts)
2613
2613
2614 # extract addremove carefully -- this function can be called from a command
2614 # extract addremove carefully -- this function can be called from a command
2615 # that doesn't support addremove
2615 # that doesn't support addremove
2616 if opts.get('addremove'):
2616 if opts.get('addremove'):
2617 if scmutil.addremove(repo, matcher, "", opts) != 0:
2617 if scmutil.addremove(repo, matcher, "", opts) != 0:
2618 raise error.Abort(
2618 raise error.Abort(
2619 _("failed to mark all new/missing files as added/removed"))
2619 _("failed to mark all new/missing files as added/removed"))
2620
2620
2621 return commitfunc(ui, repo, message, matcher, opts)
2621 return commitfunc(ui, repo, message, matcher, opts)
2622
2622
2623 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2623 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2624 # avoid cycle context -> subrepo -> cmdutil
2624 # avoid cycle context -> subrepo -> cmdutil
2625 from . import context
2625 from . import context
2626
2626
2627 # amend will reuse the existing user if not specified, but the obsolete
2627 # amend will reuse the existing user if not specified, but the obsolete
2628 # marker creation requires that the current user's name is specified.
2628 # marker creation requires that the current user's name is specified.
2629 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2629 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2630 ui.username() # raise exception if username not set
2630 ui.username() # raise exception if username not set
2631
2631
2632 ui.note(_('amending changeset %s\n') % old)
2632 ui.note(_('amending changeset %s\n') % old)
2633 base = old.p1()
2633 base = old.p1()
2634 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2634 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2635
2635
2636 wlock = lock = newid = None
2636 wlock = lock = newid = None
2637 try:
2637 try:
2638 wlock = repo.wlock()
2638 wlock = repo.wlock()
2639 lock = repo.lock()
2639 lock = repo.lock()
2640 with repo.transaction('amend') as tr:
2640 with repo.transaction('amend') as tr:
2641 # See if we got a message from -m or -l, if not, open the editor
2641 # See if we got a message from -m or -l, if not, open the editor
2642 # with the message of the changeset to amend
2642 # with the message of the changeset to amend
2643 message = logmessage(ui, opts)
2643 message = logmessage(ui, opts)
2644 # ensure logfile does not conflict with later enforcement of the
2644 # ensure logfile does not conflict with later enforcement of the
2645 # message. potential logfile content has been processed by
2645 # message. potential logfile content has been processed by
2646 # `logmessage` anyway.
2646 # `logmessage` anyway.
2647 opts.pop('logfile')
2647 opts.pop('logfile')
2648 # First, do a regular commit to record all changes in the working
2648 # First, do a regular commit to record all changes in the working
2649 # directory (if there are any)
2649 # directory (if there are any)
2650 ui.callhooks = False
2650 ui.callhooks = False
2651 activebookmark = repo._bookmarks.active
2651 activebookmark = repo._bookmarks.active
2652 try:
2652 try:
2653 repo._bookmarks.active = None
2653 repo._bookmarks.active = None
2654 opts['message'] = 'temporary amend commit for %s' % old
2654 opts['message'] = 'temporary amend commit for %s' % old
2655 node = commit(ui, repo, commitfunc, pats, opts)
2655 node = commit(ui, repo, commitfunc, pats, opts)
2656 finally:
2656 finally:
2657 repo._bookmarks.active = activebookmark
2657 repo._bookmarks.active = activebookmark
2658 repo._bookmarks.recordchange(tr)
2658 repo._bookmarks.recordchange(tr)
2659 ui.callhooks = True
2659 ui.callhooks = True
2660 ctx = repo[node]
2660 ctx = repo[node]
2661
2661
2662 # Participating changesets:
2662 # Participating changesets:
2663 #
2663 #
2664 # node/ctx o - new (intermediate) commit that contains changes
2664 # node/ctx o - new (intermediate) commit that contains changes
2665 # | from working dir to go into amending commit
2665 # | from working dir to go into amending commit
2666 # | (or a workingctx if there were no changes)
2666 # | (or a workingctx if there were no changes)
2667 # |
2667 # |
2668 # old o - changeset to amend
2668 # old o - changeset to amend
2669 # |
2669 # |
2670 # base o - parent of amending changeset
2670 # base o - parent of amending changeset
2671
2671
2672 # Update extra dict from amended commit (e.g. to preserve graft
2672 # Update extra dict from amended commit (e.g. to preserve graft
2673 # source)
2673 # source)
2674 extra.update(old.extra())
2674 extra.update(old.extra())
2675
2675
2676 # Also update it from the intermediate commit or from the wctx
2676 # Also update it from the intermediate commit or from the wctx
2677 extra.update(ctx.extra())
2677 extra.update(ctx.extra())
2678
2678
2679 if len(old.parents()) > 1:
2679 if len(old.parents()) > 1:
2680 # ctx.files() isn't reliable for merges, so fall back to the
2680 # ctx.files() isn't reliable for merges, so fall back to the
2681 # slower repo.status() method
2681 # slower repo.status() method
2682 files = set([fn for st in repo.status(base, old)[:3]
2682 files = set([fn for st in repo.status(base, old)[:3]
2683 for fn in st])
2683 for fn in st])
2684 else:
2684 else:
2685 files = set(old.files())
2685 files = set(old.files())
2686
2686
2687 # Second, we use either the commit we just did, or if there were no
2687 # Second, we use either the commit we just did, or if there were no
2688 # changes the parent of the working directory as the version of the
2688 # changes the parent of the working directory as the version of the
2689 # files in the final amend commit
2689 # files in the final amend commit
2690 if node:
2690 if node:
2691 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2691 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2692
2692
2693 user = ctx.user()
2693 user = ctx.user()
2694 date = ctx.date()
2694 date = ctx.date()
2695 # Recompute copies (avoid recording a -> b -> a)
2695 # Recompute copies (avoid recording a -> b -> a)
2696 copied = copies.pathcopies(base, ctx)
2696 copied = copies.pathcopies(base, ctx)
2697 if old.p2:
2697 if old.p2:
2698 copied.update(copies.pathcopies(old.p2(), ctx))
2698 copied.update(copies.pathcopies(old.p2(), ctx))
2699
2699
2700 # Prune files which were reverted by the updates: if old
2700 # Prune files which were reverted by the updates: if old
2701 # introduced file X and our intermediate commit, node,
2701 # introduced file X and our intermediate commit, node,
2702 # renamed that file, then those two files are the same and
2702 # renamed that file, then those two files are the same and
2703 # we can discard X from our list of files. Likewise if X
2703 # we can discard X from our list of files. Likewise if X
2704 # was deleted, it's no longer relevant
2704 # was deleted, it's no longer relevant
2705 files.update(ctx.files())
2705 files.update(ctx.files())
2706
2706
2707 def samefile(f):
2707 def samefile(f):
2708 if f in ctx.manifest():
2708 if f in ctx.manifest():
2709 a = ctx.filectx(f)
2709 a = ctx.filectx(f)
2710 if f in base.manifest():
2710 if f in base.manifest():
2711 b = base.filectx(f)
2711 b = base.filectx(f)
2712 return (not a.cmp(b)
2712 return (not a.cmp(b)
2713 and a.flags() == b.flags())
2713 and a.flags() == b.flags())
2714 else:
2714 else:
2715 return False
2715 return False
2716 else:
2716 else:
2717 return f not in base.manifest()
2717 return f not in base.manifest()
2718 files = [f for f in files if not samefile(f)]
2718 files = [f for f in files if not samefile(f)]
2719
2719
2720 def filectxfn(repo, ctx_, path):
2720 def filectxfn(repo, ctx_, path):
2721 try:
2721 try:
2722 fctx = ctx[path]
2722 fctx = ctx[path]
2723 flags = fctx.flags()
2723 flags = fctx.flags()
2724 mctx = context.memfilectx(repo,
2724 mctx = context.memfilectx(repo,
2725 fctx.path(), fctx.data(),
2725 fctx.path(), fctx.data(),
2726 islink='l' in flags,
2726 islink='l' in flags,
2727 isexec='x' in flags,
2727 isexec='x' in flags,
2728 copied=copied.get(path))
2728 copied=copied.get(path))
2729 return mctx
2729 return mctx
2730 except KeyError:
2730 except KeyError:
2731 return None
2731 return None
2732 else:
2732 else:
2733 ui.note(_('copying changeset %s to %s\n') % (old, base))
2733 ui.note(_('copying changeset %s to %s\n') % (old, base))
2734
2734
2735 # Use version of files as in the old cset
2735 # Use version of files as in the old cset
2736 def filectxfn(repo, ctx_, path):
2736 def filectxfn(repo, ctx_, path):
2737 try:
2737 try:
2738 return old.filectx(path)
2738 return old.filectx(path)
2739 except KeyError:
2739 except KeyError:
2740 return None
2740 return None
2741
2741
2742 user = opts.get('user') or old.user()
2742 user = opts.get('user') or old.user()
2743 date = opts.get('date') or old.date()
2743 date = opts.get('date') or old.date()
2744 editform = mergeeditform(old, 'commit.amend')
2744 editform = mergeeditform(old, 'commit.amend')
2745 editor = getcommiteditor(editform=editform, **opts)
2745 editor = getcommiteditor(editform=editform, **opts)
2746 if not message:
2746 if not message:
2747 editor = getcommiteditor(edit=True, editform=editform)
2747 editor = getcommiteditor(edit=True, editform=editform)
2748 message = old.description()
2748 message = old.description()
2749
2749
2750 pureextra = extra.copy()
2750 pureextra = extra.copy()
2751 extra['amend_source'] = old.hex()
2751 extra['amend_source'] = old.hex()
2752
2752
2753 new = context.memctx(repo,
2753 new = context.memctx(repo,
2754 parents=[base.node(), old.p2().node()],
2754 parents=[base.node(), old.p2().node()],
2755 text=message,
2755 text=message,
2756 files=files,
2756 files=files,
2757 filectxfn=filectxfn,
2757 filectxfn=filectxfn,
2758 user=user,
2758 user=user,
2759 date=date,
2759 date=date,
2760 extra=extra,
2760 extra=extra,
2761 editor=editor)
2761 editor=editor)
2762
2762
2763 newdesc = changelog.stripdesc(new.description())
2763 newdesc = changelog.stripdesc(new.description())
2764 if ((not node)
2764 if ((not node)
2765 and newdesc == old.description()
2765 and newdesc == old.description()
2766 and user == old.user()
2766 and user == old.user()
2767 and date == old.date()
2767 and date == old.date()
2768 and pureextra == old.extra()):
2768 and pureextra == old.extra()):
2769 # nothing changed. continuing here would create a new node
2769 # nothing changed. continuing here would create a new node
2770 # anyway because of the amend_source noise.
2770 # anyway because of the amend_source noise.
2771 #
2771 #
2772 # This not what we expect from amend.
2772 # This not what we expect from amend.
2773 return old.node()
2773 return old.node()
2774
2774
2775 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2775 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2776 try:
2776 try:
2777 if opts.get('secret'):
2777 if opts.get('secret'):
2778 commitphase = 'secret'
2778 commitphase = 'secret'
2779 else:
2779 else:
2780 commitphase = old.phase()
2780 commitphase = old.phase()
2781 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2781 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2782 newid = repo.commitctx(new)
2782 newid = repo.commitctx(new)
2783 finally:
2783 finally:
2784 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2784 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2785 if newid != old.node():
2785 if newid != old.node():
2786 # Reroute the working copy parent to the new changeset
2786 # Reroute the working copy parent to the new changeset
2787 repo.setparents(newid, nullid)
2787 repo.setparents(newid, nullid)
2788
2788
2789 # Move bookmarks from old parent to amend commit
2789 # Move bookmarks from old parent to amend commit
2790 bms = repo.nodebookmarks(old.node())
2790 bms = repo.nodebookmarks(old.node())
2791 if bms:
2791 if bms:
2792 marks = repo._bookmarks
2792 marks = repo._bookmarks
2793 for bm in bms:
2793 for bm in bms:
2794 ui.debug('moving bookmarks %r from %s to %s\n' %
2794 ui.debug('moving bookmarks %r from %s to %s\n' %
2795 (marks, old.hex(), hex(newid)))
2795 (marks, old.hex(), hex(newid)))
2796 marks[bm] = newid
2796 marks[bm] = newid
2797 marks.recordchange(tr)
2797 marks.recordchange(tr)
2798 #commit the whole amend process
2798 #commit the whole amend process
2799 if createmarkers:
2799 if createmarkers:
2800 # mark the new changeset as successor of the rewritten one
2800 # mark the new changeset as successor of the rewritten one
2801 new = repo[newid]
2801 new = repo[newid]
2802 obs = [(old, (new,))]
2802 obs = [(old, (new,))]
2803 if node:
2803 if node:
2804 obs.append((ctx, ()))
2804 obs.append((ctx, ()))
2805
2805
2806 obsolete.createmarkers(repo, obs)
2806 obsolete.createmarkers(repo, obs)
2807 if not createmarkers and newid != old.node():
2807 if not createmarkers and newid != old.node():
2808 # Strip the intermediate commit (if there was one) and the amended
2808 # Strip the intermediate commit (if there was one) and the amended
2809 # commit
2809 # commit
2810 if node:
2810 if node:
2811 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2811 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2812 ui.note(_('stripping amended changeset %s\n') % old)
2812 ui.note(_('stripping amended changeset %s\n') % old)
2813 repair.strip(ui, repo, old.node(), topic='amend-backup')
2813 repair.strip(ui, repo, old.node(), topic='amend-backup')
2814 finally:
2814 finally:
2815 lockmod.release(lock, wlock)
2815 lockmod.release(lock, wlock)
2816 return newid
2816 return newid
2817
2817
2818 def commiteditor(repo, ctx, subs, editform=''):
2818 def commiteditor(repo, ctx, subs, editform=''):
2819 if ctx.description():
2819 if ctx.description():
2820 return ctx.description()
2820 return ctx.description()
2821 return commitforceeditor(repo, ctx, subs, editform=editform,
2821 return commitforceeditor(repo, ctx, subs, editform=editform,
2822 unchangedmessagedetection=True)
2822 unchangedmessagedetection=True)
2823
2823
2824 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2824 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2825 editform='', unchangedmessagedetection=False):
2825 editform='', unchangedmessagedetection=False):
2826 if not extramsg:
2826 if not extramsg:
2827 extramsg = _("Leave message empty to abort commit.")
2827 extramsg = _("Leave message empty to abort commit.")
2828
2828
2829 forms = [e for e in editform.split('.') if e]
2829 forms = [e for e in editform.split('.') if e]
2830 forms.insert(0, 'changeset')
2830 forms.insert(0, 'changeset')
2831 templatetext = None
2831 templatetext = None
2832 while forms:
2832 while forms:
2833 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2833 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2834 if tmpl:
2834 if tmpl:
2835 templatetext = committext = buildcommittemplate(
2835 templatetext = committext = buildcommittemplate(
2836 repo, ctx, subs, extramsg, tmpl)
2836 repo, ctx, subs, extramsg, tmpl)
2837 break
2837 break
2838 forms.pop()
2838 forms.pop()
2839 else:
2839 else:
2840 committext = buildcommittext(repo, ctx, subs, extramsg)
2840 committext = buildcommittext(repo, ctx, subs, extramsg)
2841
2841
2842 # run editor in the repository root
2842 # run editor in the repository root
2843 olddir = os.getcwd()
2843 olddir = os.getcwd()
2844 os.chdir(repo.root)
2844 os.chdir(repo.root)
2845
2845
2846 # make in-memory changes visible to external process
2846 # make in-memory changes visible to external process
2847 tr = repo.currenttransaction()
2847 tr = repo.currenttransaction()
2848 repo.dirstate.write(tr)
2848 repo.dirstate.write(tr)
2849 pending = tr and tr.writepending() and repo.root
2849 pending = tr and tr.writepending() and repo.root
2850
2850
2851 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2851 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2852 editform=editform, pending=pending)
2852 editform=editform, pending=pending)
2853 text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
2853 text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
2854 os.chdir(olddir)
2854 os.chdir(olddir)
2855
2855
2856 if finishdesc:
2856 if finishdesc:
2857 text = finishdesc(text)
2857 text = finishdesc(text)
2858 if not text.strip():
2858 if not text.strip():
2859 raise error.Abort(_("empty commit message"))
2859 raise error.Abort(_("empty commit message"))
2860 if unchangedmessagedetection and editortext == templatetext:
2860 if unchangedmessagedetection and editortext == templatetext:
2861 raise error.Abort(_("commit message unchanged"))
2861 raise error.Abort(_("commit message unchanged"))
2862
2862
2863 return text
2863 return text
2864
2864
2865 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2865 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2866 ui = repo.ui
2866 ui = repo.ui
2867 tmpl, mapfile = gettemplate(ui, tmpl, None)
2867 tmpl, mapfile = gettemplate(ui, tmpl, None)
2868
2868
2869 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2869 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2870
2870
2871 for k, v in repo.ui.configitems('committemplate'):
2871 for k, v in repo.ui.configitems('committemplate'):
2872 if k != 'changeset':
2872 if k != 'changeset':
2873 t.t.cache[k] = v
2873 t.t.cache[k] = v
2874
2874
2875 if not extramsg:
2875 if not extramsg:
2876 extramsg = '' # ensure that extramsg is string
2876 extramsg = '' # ensure that extramsg is string
2877
2877
2878 ui.pushbuffer()
2878 ui.pushbuffer()
2879 t.show(ctx, extramsg=extramsg)
2879 t.show(ctx, extramsg=extramsg)
2880 return ui.popbuffer()
2880 return ui.popbuffer()
2881
2881
2882 def hgprefix(msg):
2882 def hgprefix(msg):
2883 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2883 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2884
2884
2885 def buildcommittext(repo, ctx, subs, extramsg):
2885 def buildcommittext(repo, ctx, subs, extramsg):
2886 edittext = []
2886 edittext = []
2887 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2887 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2888 if ctx.description():
2888 if ctx.description():
2889 edittext.append(ctx.description())
2889 edittext.append(ctx.description())
2890 edittext.append("")
2890 edittext.append("")
2891 edittext.append("") # Empty line between message and comments.
2891 edittext.append("") # Empty line between message and comments.
2892 edittext.append(hgprefix(_("Enter commit message."
2892 edittext.append(hgprefix(_("Enter commit message."
2893 " Lines beginning with 'HG:' are removed.")))
2893 " Lines beginning with 'HG:' are removed.")))
2894 edittext.append(hgprefix(extramsg))
2894 edittext.append(hgprefix(extramsg))
2895 edittext.append("HG: --")
2895 edittext.append("HG: --")
2896 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2896 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2897 if ctx.p2():
2897 if ctx.p2():
2898 edittext.append(hgprefix(_("branch merge")))
2898 edittext.append(hgprefix(_("branch merge")))
2899 if ctx.branch():
2899 if ctx.branch():
2900 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2900 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2901 if bookmarks.isactivewdirparent(repo):
2901 if bookmarks.isactivewdirparent(repo):
2902 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2902 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2903 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2903 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2904 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2904 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2905 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2905 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2906 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2906 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2907 if not added and not modified and not removed:
2907 if not added and not modified and not removed:
2908 edittext.append(hgprefix(_("no files changed")))
2908 edittext.append(hgprefix(_("no files changed")))
2909 edittext.append("")
2909 edittext.append("")
2910
2910
2911 return "\n".join(edittext)
2911 return "\n".join(edittext)
2912
2912
2913 def commitstatus(repo, node, branch, bheads=None, opts=None):
2913 def commitstatus(repo, node, branch, bheads=None, opts=None):
2914 if opts is None:
2914 if opts is None:
2915 opts = {}
2915 opts = {}
2916 ctx = repo[node]
2916 ctx = repo[node]
2917 parents = ctx.parents()
2917 parents = ctx.parents()
2918
2918
2919 if (not opts.get('amend') and bheads and node not in bheads and not
2919 if (not opts.get('amend') and bheads and node not in bheads and not
2920 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2920 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2921 repo.ui.status(_('created new head\n'))
2921 repo.ui.status(_('created new head\n'))
2922 # The message is not printed for initial roots. For the other
2922 # The message is not printed for initial roots. For the other
2923 # changesets, it is printed in the following situations:
2923 # changesets, it is printed in the following situations:
2924 #
2924 #
2925 # Par column: for the 2 parents with ...
2925 # Par column: for the 2 parents with ...
2926 # N: null or no parent
2926 # N: null or no parent
2927 # B: parent is on another named branch
2927 # B: parent is on another named branch
2928 # C: parent is a regular non head changeset
2928 # C: parent is a regular non head changeset
2929 # H: parent was a branch head of the current branch
2929 # H: parent was a branch head of the current branch
2930 # Msg column: whether we print "created new head" message
2930 # Msg column: whether we print "created new head" message
2931 # In the following, it is assumed that there already exists some
2931 # In the following, it is assumed that there already exists some
2932 # initial branch heads of the current branch, otherwise nothing is
2932 # initial branch heads of the current branch, otherwise nothing is
2933 # printed anyway.
2933 # printed anyway.
2934 #
2934 #
2935 # Par Msg Comment
2935 # Par Msg Comment
2936 # N N y additional topo root
2936 # N N y additional topo root
2937 #
2937 #
2938 # B N y additional branch root
2938 # B N y additional branch root
2939 # C N y additional topo head
2939 # C N y additional topo head
2940 # H N n usual case
2940 # H N n usual case
2941 #
2941 #
2942 # B B y weird additional branch root
2942 # B B y weird additional branch root
2943 # C B y branch merge
2943 # C B y branch merge
2944 # H B n merge with named branch
2944 # H B n merge with named branch
2945 #
2945 #
2946 # C C y additional head from merge
2946 # C C y additional head from merge
2947 # C H n merge with a head
2947 # C H n merge with a head
2948 #
2948 #
2949 # H H n head merge: head count decreases
2949 # H H n head merge: head count decreases
2950
2950
2951 if not opts.get('close_branch'):
2951 if not opts.get('close_branch'):
2952 for r in parents:
2952 for r in parents:
2953 if r.closesbranch() and r.branch() == branch:
2953 if r.closesbranch() and r.branch() == branch:
2954 repo.ui.status(_('reopening closed branch head %d\n') % r)
2954 repo.ui.status(_('reopening closed branch head %d\n') % r)
2955
2955
2956 if repo.ui.debugflag:
2956 if repo.ui.debugflag:
2957 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2957 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2958 elif repo.ui.verbose:
2958 elif repo.ui.verbose:
2959 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2959 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2960
2960
2961 def postcommitstatus(repo, pats, opts):
2961 def postcommitstatus(repo, pats, opts):
2962 return repo.status(match=scmutil.match(repo[None], pats, opts))
2962 return repo.status(match=scmutil.match(repo[None], pats, opts))
2963
2963
2964 def revert(ui, repo, ctx, parents, *pats, **opts):
2964 def revert(ui, repo, ctx, parents, *pats, **opts):
2965 parent, p2 = parents
2965 parent, p2 = parents
2966 node = ctx.node()
2966 node = ctx.node()
2967
2967
2968 mf = ctx.manifest()
2968 mf = ctx.manifest()
2969 if node == p2:
2969 if node == p2:
2970 parent = p2
2970 parent = p2
2971
2971
2972 # need all matching names in dirstate and manifest of target rev,
2972 # need all matching names in dirstate and manifest of target rev,
2973 # so have to walk both. do not print errors if files exist in one
2973 # so have to walk both. do not print errors if files exist in one
2974 # but not other. in both cases, filesets should be evaluated against
2974 # but not other. in both cases, filesets should be evaluated against
2975 # workingctx to get consistent result (issue4497). this means 'set:**'
2975 # workingctx to get consistent result (issue4497). this means 'set:**'
2976 # cannot be used to select missing files from target rev.
2976 # cannot be used to select missing files from target rev.
2977
2977
2978 # `names` is a mapping for all elements in working copy and target revision
2978 # `names` is a mapping for all elements in working copy and target revision
2979 # The mapping is in the form:
2979 # The mapping is in the form:
2980 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2980 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2981 names = {}
2981 names = {}
2982
2982
2983 with repo.wlock():
2983 with repo.wlock():
2984 ## filling of the `names` mapping
2984 ## filling of the `names` mapping
2985 # walk dirstate to fill `names`
2985 # walk dirstate to fill `names`
2986
2986
2987 interactive = opts.get('interactive', False)
2987 interactive = opts.get('interactive', False)
2988 wctx = repo[None]
2988 wctx = repo[None]
2989 m = scmutil.match(wctx, pats, opts)
2989 m = scmutil.match(wctx, pats, opts)
2990
2990
2991 # we'll need this later
2991 # we'll need this later
2992 targetsubs = sorted(s for s in wctx.substate if m(s))
2992 targetsubs = sorted(s for s in wctx.substate if m(s))
2993
2993
2994 if not m.always():
2994 if not m.always():
2995 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2995 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2996 names[abs] = m.rel(abs), m.exact(abs)
2996 names[abs] = m.rel(abs), m.exact(abs)
2997
2997
2998 # walk target manifest to fill `names`
2998 # walk target manifest to fill `names`
2999
2999
3000 def badfn(path, msg):
3000 def badfn(path, msg):
3001 if path in names:
3001 if path in names:
3002 return
3002 return
3003 if path in ctx.substate:
3003 if path in ctx.substate:
3004 return
3004 return
3005 path_ = path + '/'
3005 path_ = path + '/'
3006 for f in names:
3006 for f in names:
3007 if f.startswith(path_):
3007 if f.startswith(path_):
3008 return
3008 return
3009 ui.warn("%s: %s\n" % (m.rel(path), msg))
3009 ui.warn("%s: %s\n" % (m.rel(path), msg))
3010
3010
3011 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3011 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3012 if abs not in names:
3012 if abs not in names:
3013 names[abs] = m.rel(abs), m.exact(abs)
3013 names[abs] = m.rel(abs), m.exact(abs)
3014
3014
3015 # Find status of all file in `names`.
3015 # Find status of all file in `names`.
3016 m = scmutil.matchfiles(repo, names)
3016 m = scmutil.matchfiles(repo, names)
3017
3017
3018 changes = repo.status(node1=node, match=m,
3018 changes = repo.status(node1=node, match=m,
3019 unknown=True, ignored=True, clean=True)
3019 unknown=True, ignored=True, clean=True)
3020 else:
3020 else:
3021 changes = repo.status(node1=node, match=m)
3021 changes = repo.status(node1=node, match=m)
3022 for kind in changes:
3022 for kind in changes:
3023 for abs in kind:
3023 for abs in kind:
3024 names[abs] = m.rel(abs), m.exact(abs)
3024 names[abs] = m.rel(abs), m.exact(abs)
3025
3025
3026 m = scmutil.matchfiles(repo, names)
3026 m = scmutil.matchfiles(repo, names)
3027
3027
3028 modified = set(changes.modified)
3028 modified = set(changes.modified)
3029 added = set(changes.added)
3029 added = set(changes.added)
3030 removed = set(changes.removed)
3030 removed = set(changes.removed)
3031 _deleted = set(changes.deleted)
3031 _deleted = set(changes.deleted)
3032 unknown = set(changes.unknown)
3032 unknown = set(changes.unknown)
3033 unknown.update(changes.ignored)
3033 unknown.update(changes.ignored)
3034 clean = set(changes.clean)
3034 clean = set(changes.clean)
3035 modadded = set()
3035 modadded = set()
3036
3036
3037 # split between files known in target manifest and the others
3037 # split between files known in target manifest and the others
3038 smf = set(mf)
3038 smf = set(mf)
3039
3039
3040 # determine the exact nature of the deleted changesets
3040 # determine the exact nature of the deleted changesets
3041 deladded = _deleted - smf
3041 deladded = _deleted - smf
3042 deleted = _deleted - deladded
3042 deleted = _deleted - deladded
3043
3043
3044 # We need to account for the state of the file in the dirstate,
3044 # We need to account for the state of the file in the dirstate,
3045 # even when we revert against something else than parent. This will
3045 # even when we revert against something else than parent. This will
3046 # slightly alter the behavior of revert (doing back up or not, delete
3046 # slightly alter the behavior of revert (doing back up or not, delete
3047 # or just forget etc).
3047 # or just forget etc).
3048 if parent == node:
3048 if parent == node:
3049 dsmodified = modified
3049 dsmodified = modified
3050 dsadded = added
3050 dsadded = added
3051 dsremoved = removed
3051 dsremoved = removed
3052 # store all local modifications, useful later for rename detection
3052 # store all local modifications, useful later for rename detection
3053 localchanges = dsmodified | dsadded
3053 localchanges = dsmodified | dsadded
3054 modified, added, removed = set(), set(), set()
3054 modified, added, removed = set(), set(), set()
3055 else:
3055 else:
3056 changes = repo.status(node1=parent, match=m)
3056 changes = repo.status(node1=parent, match=m)
3057 dsmodified = set(changes.modified)
3057 dsmodified = set(changes.modified)
3058 dsadded = set(changes.added)
3058 dsadded = set(changes.added)
3059 dsremoved = set(changes.removed)
3059 dsremoved = set(changes.removed)
3060 # store all local modifications, useful later for rename detection
3060 # store all local modifications, useful later for rename detection
3061 localchanges = dsmodified | dsadded
3061 localchanges = dsmodified | dsadded
3062
3062
3063 # only take into account for removes between wc and target
3063 # only take into account for removes between wc and target
3064 clean |= dsremoved - removed
3064 clean |= dsremoved - removed
3065 dsremoved &= removed
3065 dsremoved &= removed
3066 # distinct between dirstate remove and other
3066 # distinct between dirstate remove and other
3067 removed -= dsremoved
3067 removed -= dsremoved
3068
3068
3069 modadded = added & dsmodified
3069 modadded = added & dsmodified
3070 added -= modadded
3070 added -= modadded
3071
3071
3072 # tell newly modified apart.
3072 # tell newly modified apart.
3073 dsmodified &= modified
3073 dsmodified &= modified
3074 dsmodified |= modified & dsadded # dirstate added may need backup
3074 dsmodified |= modified & dsadded # dirstate added may need backup
3075 modified -= dsmodified
3075 modified -= dsmodified
3076
3076
3077 # We need to wait for some post-processing to update this set
3077 # We need to wait for some post-processing to update this set
3078 # before making the distinction. The dirstate will be used for
3078 # before making the distinction. The dirstate will be used for
3079 # that purpose.
3079 # that purpose.
3080 dsadded = added
3080 dsadded = added
3081
3081
3082 # in case of merge, files that are actually added can be reported as
3082 # in case of merge, files that are actually added can be reported as
3083 # modified, we need to post process the result
3083 # modified, we need to post process the result
3084 if p2 != nullid:
3084 if p2 != nullid:
3085 mergeadd = dsmodified - smf
3085 mergeadd = dsmodified - smf
3086 dsadded |= mergeadd
3086 dsadded |= mergeadd
3087 dsmodified -= mergeadd
3087 dsmodified -= mergeadd
3088
3088
3089 # if f is a rename, update `names` to also revert the source
3089 # if f is a rename, update `names` to also revert the source
3090 cwd = repo.getcwd()
3090 cwd = repo.getcwd()
3091 for f in localchanges:
3091 for f in localchanges:
3092 src = repo.dirstate.copied(f)
3092 src = repo.dirstate.copied(f)
3093 # XXX should we check for rename down to target node?
3093 # XXX should we check for rename down to target node?
3094 if src and src not in names and repo.dirstate[src] == 'r':
3094 if src and src not in names and repo.dirstate[src] == 'r':
3095 dsremoved.add(src)
3095 dsremoved.add(src)
3096 names[src] = (repo.pathto(src, cwd), True)
3096 names[src] = (repo.pathto(src, cwd), True)
3097
3097
3098 # distinguish between file to forget and the other
3098 # distinguish between file to forget and the other
3099 added = set()
3099 added = set()
3100 for abs in dsadded:
3100 for abs in dsadded:
3101 if repo.dirstate[abs] != 'a':
3101 if repo.dirstate[abs] != 'a':
3102 added.add(abs)
3102 added.add(abs)
3103 dsadded -= added
3103 dsadded -= added
3104
3104
3105 for abs in deladded:
3105 for abs in deladded:
3106 if repo.dirstate[abs] == 'a':
3106 if repo.dirstate[abs] == 'a':
3107 dsadded.add(abs)
3107 dsadded.add(abs)
3108 deladded -= dsadded
3108 deladded -= dsadded
3109
3109
3110 # For files marked as removed, we check if an unknown file is present at
3110 # For files marked as removed, we check if an unknown file is present at
3111 # the same path. If a such file exists it may need to be backed up.
3111 # the same path. If a such file exists it may need to be backed up.
3112 # Making the distinction at this stage helps have simpler backup
3112 # Making the distinction at this stage helps have simpler backup
3113 # logic.
3113 # logic.
3114 removunk = set()
3114 removunk = set()
3115 for abs in removed:
3115 for abs in removed:
3116 target = repo.wjoin(abs)
3116 target = repo.wjoin(abs)
3117 if os.path.lexists(target):
3117 if os.path.lexists(target):
3118 removunk.add(abs)
3118 removunk.add(abs)
3119 removed -= removunk
3119 removed -= removunk
3120
3120
3121 dsremovunk = set()
3121 dsremovunk = set()
3122 for abs in dsremoved:
3122 for abs in dsremoved:
3123 target = repo.wjoin(abs)
3123 target = repo.wjoin(abs)
3124 if os.path.lexists(target):
3124 if os.path.lexists(target):
3125 dsremovunk.add(abs)
3125 dsremovunk.add(abs)
3126 dsremoved -= dsremovunk
3126 dsremoved -= dsremovunk
3127
3127
3128 # action to be actually performed by revert
3128 # action to be actually performed by revert
3129 # (<list of file>, message>) tuple
3129 # (<list of file>, message>) tuple
3130 actions = {'revert': ([], _('reverting %s\n')),
3130 actions = {'revert': ([], _('reverting %s\n')),
3131 'add': ([], _('adding %s\n')),
3131 'add': ([], _('adding %s\n')),
3132 'remove': ([], _('removing %s\n')),
3132 'remove': ([], _('removing %s\n')),
3133 'drop': ([], _('removing %s\n')),
3133 'drop': ([], _('removing %s\n')),
3134 'forget': ([], _('forgetting %s\n')),
3134 'forget': ([], _('forgetting %s\n')),
3135 'undelete': ([], _('undeleting %s\n')),
3135 'undelete': ([], _('undeleting %s\n')),
3136 'noop': (None, _('no changes needed to %s\n')),
3136 'noop': (None, _('no changes needed to %s\n')),
3137 'unknown': (None, _('file not managed: %s\n')),
3137 'unknown': (None, _('file not managed: %s\n')),
3138 }
3138 }
3139
3139
3140 # "constant" that convey the backup strategy.
3140 # "constant" that convey the backup strategy.
3141 # All set to `discard` if `no-backup` is set do avoid checking
3141 # All set to `discard` if `no-backup` is set do avoid checking
3142 # no_backup lower in the code.
3142 # no_backup lower in the code.
3143 # These values are ordered for comparison purposes
3143 # These values are ordered for comparison purposes
3144 backup = 2 # unconditionally do backup
3144 backup = 2 # unconditionally do backup
3145 check = 1 # check if the existing file differs from target
3145 check = 1 # check if the existing file differs from target
3146 discard = 0 # never do backup
3146 discard = 0 # never do backup
3147 if opts.get('no_backup'):
3147 if opts.get('no_backup'):
3148 backup = check = discard
3148 backup = check = discard
3149
3149
3150 backupanddel = actions['remove']
3150 backupanddel = actions['remove']
3151 if not opts.get('no_backup'):
3151 if not opts.get('no_backup'):
3152 backupanddel = actions['drop']
3152 backupanddel = actions['drop']
3153
3153
3154 disptable = (
3154 disptable = (
3155 # dispatch table:
3155 # dispatch table:
3156 # file state
3156 # file state
3157 # action
3157 # action
3158 # make backup
3158 # make backup
3159
3159
3160 ## Sets that results that will change file on disk
3160 ## Sets that results that will change file on disk
3161 # Modified compared to target, no local change
3161 # Modified compared to target, no local change
3162 (modified, actions['revert'], discard),
3162 (modified, actions['revert'], discard),
3163 # Modified compared to target, but local file is deleted
3163 # Modified compared to target, but local file is deleted
3164 (deleted, actions['revert'], discard),
3164 (deleted, actions['revert'], discard),
3165 # Modified compared to target, local change
3165 # Modified compared to target, local change
3166 (dsmodified, actions['revert'], backup),
3166 (dsmodified, actions['revert'], backup),
3167 # Added since target
3167 # Added since target
3168 (added, actions['remove'], discard),
3168 (added, actions['remove'], discard),
3169 # Added in working directory
3169 # Added in working directory
3170 (dsadded, actions['forget'], discard),
3170 (dsadded, actions['forget'], discard),
3171 # Added since target, have local modification
3171 # Added since target, have local modification
3172 (modadded, backupanddel, backup),
3172 (modadded, backupanddel, backup),
3173 # Added since target but file is missing in working directory
3173 # Added since target but file is missing in working directory
3174 (deladded, actions['drop'], discard),
3174 (deladded, actions['drop'], discard),
3175 # Removed since target, before working copy parent
3175 # Removed since target, before working copy parent
3176 (removed, actions['add'], discard),
3176 (removed, actions['add'], discard),
3177 # Same as `removed` but an unknown file exists at the same path
3177 # Same as `removed` but an unknown file exists at the same path
3178 (removunk, actions['add'], check),
3178 (removunk, actions['add'], check),
3179 # Removed since targe, marked as such in working copy parent
3179 # Removed since targe, marked as such in working copy parent
3180 (dsremoved, actions['undelete'], discard),
3180 (dsremoved, actions['undelete'], discard),
3181 # Same as `dsremoved` but an unknown file exists at the same path
3181 # Same as `dsremoved` but an unknown file exists at the same path
3182 (dsremovunk, actions['undelete'], check),
3182 (dsremovunk, actions['undelete'], check),
3183 ## the following sets does not result in any file changes
3183 ## the following sets does not result in any file changes
3184 # File with no modification
3184 # File with no modification
3185 (clean, actions['noop'], discard),
3185 (clean, actions['noop'], discard),
3186 # Existing file, not tracked anywhere
3186 # Existing file, not tracked anywhere
3187 (unknown, actions['unknown'], discard),
3187 (unknown, actions['unknown'], discard),
3188 )
3188 )
3189
3189
3190 for abs, (rel, exact) in sorted(names.items()):
3190 for abs, (rel, exact) in sorted(names.items()):
3191 # target file to be touch on disk (relative to cwd)
3191 # target file to be touch on disk (relative to cwd)
3192 target = repo.wjoin(abs)
3192 target = repo.wjoin(abs)
3193 # search the entry in the dispatch table.
3193 # search the entry in the dispatch table.
3194 # if the file is in any of these sets, it was touched in the working
3194 # if the file is in any of these sets, it was touched in the working
3195 # directory parent and we are sure it needs to be reverted.
3195 # directory parent and we are sure it needs to be reverted.
3196 for table, (xlist, msg), dobackup in disptable:
3196 for table, (xlist, msg), dobackup in disptable:
3197 if abs not in table:
3197 if abs not in table:
3198 continue
3198 continue
3199 if xlist is not None:
3199 if xlist is not None:
3200 xlist.append(abs)
3200 xlist.append(abs)
3201 if dobackup and (backup <= dobackup
3201 if dobackup and (backup <= dobackup
3202 or wctx[abs].cmp(ctx[abs])):
3202 or wctx[abs].cmp(ctx[abs])):
3203 bakname = scmutil.origpath(ui, repo, rel)
3203 bakname = scmutil.origpath(ui, repo, rel)
3204 ui.note(_('saving current version of %s as %s\n') %
3204 ui.note(_('saving current version of %s as %s\n') %
3205 (rel, bakname))
3205 (rel, bakname))
3206 if not opts.get('dry_run'):
3206 if not opts.get('dry_run'):
3207 if interactive:
3207 if interactive:
3208 util.copyfile(target, bakname)
3208 util.copyfile(target, bakname)
3209 else:
3209 else:
3210 util.rename(target, bakname)
3210 util.rename(target, bakname)
3211 if ui.verbose or not exact:
3211 if ui.verbose or not exact:
3212 if not isinstance(msg, basestring):
3212 if not isinstance(msg, basestring):
3213 msg = msg(abs)
3213 msg = msg(abs)
3214 ui.status(msg % rel)
3214 ui.status(msg % rel)
3215 elif exact:
3215 elif exact:
3216 ui.warn(msg % rel)
3216 ui.warn(msg % rel)
3217 break
3217 break
3218
3218
3219 if not opts.get('dry_run'):
3219 if not opts.get('dry_run'):
3220 needdata = ('revert', 'add', 'undelete')
3220 needdata = ('revert', 'add', 'undelete')
3221 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3221 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3222 _performrevert(repo, parents, ctx, actions, interactive)
3222 _performrevert(repo, parents, ctx, actions, interactive)
3223
3223
3224 if targetsubs:
3224 if targetsubs:
3225 # Revert the subrepos on the revert list
3225 # Revert the subrepos on the revert list
3226 for sub in targetsubs:
3226 for sub in targetsubs:
3227 try:
3227 try:
3228 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3228 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3229 except KeyError:
3229 except KeyError:
3230 raise error.Abort("subrepository '%s' does not exist in %s!"
3230 raise error.Abort("subrepository '%s' does not exist in %s!"
3231 % (sub, short(ctx.node())))
3231 % (sub, short(ctx.node())))
3232
3232
3233 def _revertprefetch(repo, ctx, *files):
3233 def _revertprefetch(repo, ctx, *files):
3234 """Let extension changing the storage layer prefetch content"""
3234 """Let extension changing the storage layer prefetch content"""
3235 pass
3235 pass
3236
3236
3237 def _performrevert(repo, parents, ctx, actions, interactive=False):
3237 def _performrevert(repo, parents, ctx, actions, interactive=False):
3238 """function that actually perform all the actions computed for revert
3238 """function that actually perform all the actions computed for revert
3239
3239
3240 This is an independent function to let extension to plug in and react to
3240 This is an independent function to let extension to plug in and react to
3241 the imminent revert.
3241 the imminent revert.
3242
3242
3243 Make sure you have the working directory locked when calling this function.
3243 Make sure you have the working directory locked when calling this function.
3244 """
3244 """
3245 parent, p2 = parents
3245 parent, p2 = parents
3246 node = ctx.node()
3246 node = ctx.node()
3247 excluded_files = []
3247 excluded_files = []
3248 matcher_opts = {"exclude": excluded_files}
3248 matcher_opts = {"exclude": excluded_files}
3249
3249
3250 def checkout(f):
3250 def checkout(f):
3251 fc = ctx[f]
3251 fc = ctx[f]
3252 repo.wwrite(f, fc.data(), fc.flags())
3252 repo.wwrite(f, fc.data(), fc.flags())
3253
3253
3254 audit_path = pathutil.pathauditor(repo.root)
3254 audit_path = pathutil.pathauditor(repo.root)
3255 for f in actions['forget'][0]:
3255 for f in actions['forget'][0]:
3256 if interactive:
3256 if interactive:
3257 choice = \
3257 choice = \
3258 repo.ui.promptchoice(
3258 repo.ui.promptchoice(
3259 _("forget added file %s (yn)?$$ &Yes $$ &No")
3259 _("forget added file %s (yn)?$$ &Yes $$ &No")
3260 % f)
3260 % f)
3261 if choice == 0:
3261 if choice == 0:
3262 repo.dirstate.drop(f)
3262 repo.dirstate.drop(f)
3263 else:
3263 else:
3264 excluded_files.append(repo.wjoin(f))
3264 excluded_files.append(repo.wjoin(f))
3265 else:
3265 else:
3266 repo.dirstate.drop(f)
3266 repo.dirstate.drop(f)
3267 for f in actions['remove'][0]:
3267 for f in actions['remove'][0]:
3268 audit_path(f)
3268 audit_path(f)
3269 try:
3269 try:
3270 util.unlinkpath(repo.wjoin(f))
3270 util.unlinkpath(repo.wjoin(f))
3271 except OSError:
3271 except OSError:
3272 pass
3272 pass
3273 repo.dirstate.remove(f)
3273 repo.dirstate.remove(f)
3274 for f in actions['drop'][0]:
3274 for f in actions['drop'][0]:
3275 audit_path(f)
3275 audit_path(f)
3276 repo.dirstate.remove(f)
3276 repo.dirstate.remove(f)
3277
3277
3278 normal = None
3278 normal = None
3279 if node == parent:
3279 if node == parent:
3280 # We're reverting to our parent. If possible, we'd like status
3280 # We're reverting to our parent. If possible, we'd like status
3281 # to report the file as clean. We have to use normallookup for
3281 # to report the file as clean. We have to use normallookup for
3282 # merges to avoid losing information about merged/dirty files.
3282 # merges to avoid losing information about merged/dirty files.
3283 if p2 != nullid:
3283 if p2 != nullid:
3284 normal = repo.dirstate.normallookup
3284 normal = repo.dirstate.normallookup
3285 else:
3285 else:
3286 normal = repo.dirstate.normal
3286 normal = repo.dirstate.normal
3287
3287
3288 newlyaddedandmodifiedfiles = set()
3288 newlyaddedandmodifiedfiles = set()
3289 if interactive:
3289 if interactive:
3290 # Prompt the user for changes to revert
3290 # Prompt the user for changes to revert
3291 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3291 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3292 m = scmutil.match(ctx, torevert, matcher_opts)
3292 m = scmutil.match(ctx, torevert, matcher_opts)
3293 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3293 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3294 diffopts.nodates = True
3294 diffopts.nodates = True
3295 diffopts.git = True
3295 diffopts.git = True
3296 reversehunks = repo.ui.configbool('experimental',
3296 reversehunks = repo.ui.configbool('experimental',
3297 'revertalternateinteractivemode',
3297 'revertalternateinteractivemode',
3298 True)
3298 True)
3299 if reversehunks:
3299 if reversehunks:
3300 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3300 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3301 else:
3301 else:
3302 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3302 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3303 originalchunks = patch.parsepatch(diff)
3303 originalchunks = patch.parsepatch(diff)
3304
3304
3305 try:
3305 try:
3306
3306
3307 chunks, opts = recordfilter(repo.ui, originalchunks)
3307 chunks, opts = recordfilter(repo.ui, originalchunks)
3308 if reversehunks:
3308 if reversehunks:
3309 chunks = patch.reversehunks(chunks)
3309 chunks = patch.reversehunks(chunks)
3310
3310
3311 except patch.PatchError as err:
3311 except patch.PatchError as err:
3312 raise error.Abort(_('error parsing patch: %s') % err)
3312 raise error.Abort(_('error parsing patch: %s') % err)
3313
3313
3314 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3314 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3315 # Apply changes
3315 # Apply changes
3316 fp = stringio()
3316 fp = stringio()
3317 for c in chunks:
3317 for c in chunks:
3318 c.write(fp)
3318 c.write(fp)
3319 dopatch = fp.tell()
3319 dopatch = fp.tell()
3320 fp.seek(0)
3320 fp.seek(0)
3321 if dopatch:
3321 if dopatch:
3322 try:
3322 try:
3323 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3323 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3324 except patch.PatchError as err:
3324 except patch.PatchError as err:
3325 raise error.Abort(str(err))
3325 raise error.Abort(str(err))
3326 del fp
3326 del fp
3327 else:
3327 else:
3328 for f in actions['revert'][0]:
3328 for f in actions['revert'][0]:
3329 checkout(f)
3329 checkout(f)
3330 if normal:
3330 if normal:
3331 normal(f)
3331 normal(f)
3332
3332
3333 for f in actions['add'][0]:
3333 for f in actions['add'][0]:
3334 # Don't checkout modified files, they are already created by the diff
3334 # Don't checkout modified files, they are already created by the diff
3335 if f not in newlyaddedandmodifiedfiles:
3335 if f not in newlyaddedandmodifiedfiles:
3336 checkout(f)
3336 checkout(f)
3337 repo.dirstate.add(f)
3337 repo.dirstate.add(f)
3338
3338
3339 normal = repo.dirstate.normallookup
3339 normal = repo.dirstate.normallookup
3340 if node == parent and p2 == nullid:
3340 if node == parent and p2 == nullid:
3341 normal = repo.dirstate.normal
3341 normal = repo.dirstate.normal
3342 for f in actions['undelete'][0]:
3342 for f in actions['undelete'][0]:
3343 checkout(f)
3343 checkout(f)
3344 normal(f)
3344 normal(f)
3345
3345
3346 copied = copies.pathcopies(repo[parent], ctx)
3346 copied = copies.pathcopies(repo[parent], ctx)
3347
3347
3348 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3348 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3349 if f in copied:
3349 if f in copied:
3350 repo.dirstate.copy(copied[f], f)
3350 repo.dirstate.copy(copied[f], f)
3351
3351
3352 def command(table):
3352 def command(table):
3353 """Returns a function object to be used as a decorator for making commands.
3353 """Returns a function object to be used as a decorator for making commands.
3354
3354
3355 This function receives a command table as its argument. The table should
3355 This function receives a command table as its argument. The table should
3356 be a dict.
3356 be a dict.
3357
3357
3358 The returned function can be used as a decorator for adding commands
3358 The returned function can be used as a decorator for adding commands
3359 to that command table. This function accepts multiple arguments to define
3359 to that command table. This function accepts multiple arguments to define
3360 a command.
3360 a command.
3361
3361
3362 The first argument is the command name.
3362 The first argument is the command name.
3363
3363
3364 The options argument is an iterable of tuples defining command arguments.
3364 The options argument is an iterable of tuples defining command arguments.
3365 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3365 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3366
3366
3367 The synopsis argument defines a short, one line summary of how to use the
3367 The synopsis argument defines a short, one line summary of how to use the
3368 command. This shows up in the help output.
3368 command. This shows up in the help output.
3369
3369
3370 The norepo argument defines whether the command does not require a
3370 The norepo argument defines whether the command does not require a
3371 local repository. Most commands operate against a repository, thus the
3371 local repository. Most commands operate against a repository, thus the
3372 default is False.
3372 default is False.
3373
3373
3374 The optionalrepo argument defines whether the command optionally requires
3374 The optionalrepo argument defines whether the command optionally requires
3375 a local repository.
3375 a local repository.
3376
3376
3377 The inferrepo argument defines whether to try to find a repository from the
3377 The inferrepo argument defines whether to try to find a repository from the
3378 command line arguments. If True, arguments will be examined for potential
3378 command line arguments. If True, arguments will be examined for potential
3379 repository locations. See ``findrepo()``. If a repository is found, it
3379 repository locations. See ``findrepo()``. If a repository is found, it
3380 will be used.
3380 will be used.
3381 """
3381 """
3382 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3382 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3383 inferrepo=False):
3383 inferrepo=False):
3384 def decorator(func):
3384 def decorator(func):
3385 func.norepo = norepo
3385 func.norepo = norepo
3386 func.optionalrepo = optionalrepo
3386 func.optionalrepo = optionalrepo
3387 func.inferrepo = inferrepo
3387 func.inferrepo = inferrepo
3388 if synopsis:
3388 if synopsis:
3389 table[name] = func, list(options), synopsis
3389 table[name] = func, list(options), synopsis
3390 else:
3390 else:
3391 table[name] = func, list(options)
3391 table[name] = func, list(options)
3392 return func
3392 return func
3393 return decorator
3393 return decorator
3394
3394
3395 return cmd
3395 return cmd
3396
3396
3397 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3397 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3398 # commands.outgoing. "missing" is "missing" of the result of
3398 # commands.outgoing. "missing" is "missing" of the result of
3399 # "findcommonoutgoing()"
3399 # "findcommonoutgoing()"
3400 outgoinghooks = util.hooks()
3400 outgoinghooks = util.hooks()
3401
3401
3402 # a list of (ui, repo) functions called by commands.summary
3402 # a list of (ui, repo) functions called by commands.summary
3403 summaryhooks = util.hooks()
3403 summaryhooks = util.hooks()
3404
3404
3405 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3405 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3406 #
3406 #
3407 # functions should return tuple of booleans below, if 'changes' is None:
3407 # functions should return tuple of booleans below, if 'changes' is None:
3408 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3408 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3409 #
3409 #
3410 # otherwise, 'changes' is a tuple of tuples below:
3410 # otherwise, 'changes' is a tuple of tuples below:
3411 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3411 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3412 # - (desturl, destbranch, destpeer, outgoing)
3412 # - (desturl, destbranch, destpeer, outgoing)
3413 summaryremotehooks = util.hooks()
3413 summaryremotehooks = util.hooks()
3414
3414
3415 # A list of state files kept by multistep operations like graft.
3415 # A list of state files kept by multistep operations like graft.
3416 # Since graft cannot be aborted, it is considered 'clearable' by update.
3416 # Since graft cannot be aborted, it is considered 'clearable' by update.
3417 # note: bisect is intentionally excluded
3417 # note: bisect is intentionally excluded
3418 # (state file, clearable, allowcommit, error, hint)
3418 # (state file, clearable, allowcommit, error, hint)
3419 unfinishedstates = [
3419 unfinishedstates = [
3420 ('graftstate', True, False, _('graft in progress'),
3420 ('graftstate', True, False, _('graft in progress'),
3421 _("use 'hg graft --continue' or 'hg update' to abort")),
3421 _("use 'hg graft --continue' or 'hg update' to abort")),
3422 ('updatestate', True, False, _('last update was interrupted'),
3422 ('updatestate', True, False, _('last update was interrupted'),
3423 _("use 'hg update' to get a consistent checkout"))
3423 _("use 'hg update' to get a consistent checkout"))
3424 ]
3424 ]
3425
3425
3426 def checkunfinished(repo, commit=False):
3426 def checkunfinished(repo, commit=False):
3427 '''Look for an unfinished multistep operation, like graft, and abort
3427 '''Look for an unfinished multistep operation, like graft, and abort
3428 if found. It's probably good to check this right before
3428 if found. It's probably good to check this right before
3429 bailifchanged().
3429 bailifchanged().
3430 '''
3430 '''
3431 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3431 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3432 if commit and allowcommit:
3432 if commit and allowcommit:
3433 continue
3433 continue
3434 if repo.vfs.exists(f):
3434 if repo.vfs.exists(f):
3435 raise error.Abort(msg, hint=hint)
3435 raise error.Abort(msg, hint=hint)
3436
3436
3437 def clearunfinished(repo):
3437 def clearunfinished(repo):
3438 '''Check for unfinished operations (as above), and clear the ones
3438 '''Check for unfinished operations (as above), and clear the ones
3439 that are clearable.
3439 that are clearable.
3440 '''
3440 '''
3441 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3441 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3442 if not clearable and repo.vfs.exists(f):
3442 if not clearable and repo.vfs.exists(f):
3443 raise error.Abort(msg, hint=hint)
3443 raise error.Abort(msg, hint=hint)
3444 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3444 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3445 if clearable and repo.vfs.exists(f):
3445 if clearable and repo.vfs.exists(f):
3446 util.unlink(repo.join(f))
3446 util.unlink(repo.join(f))
3447
3447
3448 afterresolvedstates = [
3448 afterresolvedstates = [
3449 ('graftstate',
3449 ('graftstate',
3450 _('hg graft --continue')),
3450 _('hg graft --continue')),
3451 ]
3451 ]
3452
3452
3453 def howtocontinue(repo):
3453 def howtocontinue(repo):
3454 '''Check for an unfinished operation and return the command to finish
3454 '''Check for an unfinished operation and return the command to finish
3455 it.
3455 it.
3456
3456
3457 afterresolvedstates tupples define a .hg/{file} and the corresponding
3457 afterresolvedstates tupples define a .hg/{file} and the corresponding
3458 command needed to finish it.
3458 command needed to finish it.
3459
3459
3460 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3460 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3461 a boolean.
3461 a boolean.
3462 '''
3462 '''
3463 contmsg = _("continue: %s")
3463 contmsg = _("continue: %s")
3464 for f, msg in afterresolvedstates:
3464 for f, msg in afterresolvedstates:
3465 if repo.vfs.exists(f):
3465 if repo.vfs.exists(f):
3466 return contmsg % msg, True
3466 return contmsg % msg, True
3467 workingctx = repo[None]
3467 workingctx = repo[None]
3468 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3468 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3469 for s in workingctx.substate)
3469 for s in workingctx.substate)
3470 if dirty:
3470 if dirty:
3471 return contmsg % _("hg commit"), False
3471 return contmsg % _("hg commit"), False
3472 return None, None
3472 return None, None
3473
3473
3474 def checkafterresolved(repo):
3474 def checkafterresolved(repo):
3475 '''Inform the user about the next action after completing hg resolve
3475 '''Inform the user about the next action after completing hg resolve
3476
3476
3477 If there's a matching afterresolvedstates, howtocontinue will yield
3477 If there's a matching afterresolvedstates, howtocontinue will yield
3478 repo.ui.warn as the reporter.
3478 repo.ui.warn as the reporter.
3479
3479
3480 Otherwise, it will yield repo.ui.note.
3480 Otherwise, it will yield repo.ui.note.
3481 '''
3481 '''
3482 msg, warning = howtocontinue(repo)
3482 msg, warning = howtocontinue(repo)
3483 if msg is not None:
3483 if msg is not None:
3484 if warning:
3484 if warning:
3485 repo.ui.warn("%s\n" % msg)
3485 repo.ui.warn("%s\n" % msg)
3486 else:
3486 else:
3487 repo.ui.note("%s\n" % msg)
3487 repo.ui.note("%s\n" % msg)
3488
3488
3489 def wrongtooltocontinue(repo, task):
3489 def wrongtooltocontinue(repo, task):
3490 '''Raise an abort suggesting how to properly continue if there is an
3490 '''Raise an abort suggesting how to properly continue if there is an
3491 active task.
3491 active task.
3492
3492
3493 Uses howtocontinue() to find the active task.
3493 Uses howtocontinue() to find the active task.
3494
3494
3495 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3495 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3496 a hint.
3496 a hint.
3497 '''
3497 '''
3498 after = howtocontinue(repo)
3498 after = howtocontinue(repo)
3499 hint = None
3499 hint = None
3500 if after[1]:
3500 if after[1]:
3501 hint = after[0]
3501 hint = after[0]
3502 raise error.Abort(_('no %s in progress') % task, hint=hint)
3502 raise error.Abort(_('no %s in progress') % task, hint=hint)
3503
3503
3504 class dirstateguard(object):
3504 class dirstateguard(object):
3505 '''Restore dirstate at unexpected failure.
3505 '''Restore dirstate at unexpected failure.
3506
3506
3507 At the construction, this class does:
3507 At the construction, this class does:
3508
3508
3509 - write current ``repo.dirstate`` out, and
3509 - write current ``repo.dirstate`` out, and
3510 - save ``.hg/dirstate`` into the backup file
3510 - save ``.hg/dirstate`` into the backup file
3511
3511
3512 This restores ``.hg/dirstate`` from backup file, if ``release()``
3512 This restores ``.hg/dirstate`` from backup file, if ``release()``
3513 is invoked before ``close()``.
3513 is invoked before ``close()``.
3514
3514
3515 This just removes the backup file at ``close()`` before ``release()``.
3515 This just removes the backup file at ``close()`` before ``release()``.
3516 '''
3516 '''
3517
3517
3518 def __init__(self, repo, name):
3518 def __init__(self, repo, name):
3519 self._repo = repo
3519 self._repo = repo
3520 self._suffix = '.backup.%s.%d' % (name, id(self))
3520 self._suffix = '.backup.%s.%d' % (name, id(self))
3521 repo.dirstate.savebackup(repo.currenttransaction(), self._suffix)
3521 repo.dirstate.savebackup(repo.currenttransaction(), self._suffix)
3522 self._active = True
3522 self._active = True
3523 self._closed = False
3523 self._closed = False
3524
3524
3525 def __del__(self):
3525 def __del__(self):
3526 if self._active: # still active
3526 if self._active: # still active
3527 # this may occur, even if this class is used correctly:
3527 # this may occur, even if this class is used correctly:
3528 # for example, releasing other resources like transaction
3528 # for example, releasing other resources like transaction
3529 # may raise exception before ``dirstateguard.release`` in
3529 # may raise exception before ``dirstateguard.release`` in
3530 # ``release(tr, ....)``.
3530 # ``release(tr, ....)``.
3531 self._abort()
3531 self._abort()
3532
3532
3533 def close(self):
3533 def close(self):
3534 if not self._active: # already inactivated
3534 if not self._active: # already inactivated
3535 msg = (_("can't close already inactivated backup: dirstate%s")
3535 msg = (_("can't close already inactivated backup: dirstate%s")
3536 % self._suffix)
3536 % self._suffix)
3537 raise error.Abort(msg)
3537 raise error.Abort(msg)
3538
3538
3539 self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
3539 self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
3540 self._suffix)
3540 self._suffix)
3541 self._active = False
3541 self._active = False
3542 self._closed = True
3542 self._closed = True
3543
3543
3544 def _abort(self):
3544 def _abort(self):
3545 self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
3545 self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
3546 self._suffix)
3546 self._suffix)
3547 self._active = False
3547 self._active = False
3548
3548
3549 def release(self):
3549 def release(self):
3550 if not self._closed:
3550 if not self._closed:
3551 if not self._active: # already inactivated
3551 if not self._active: # already inactivated
3552 msg = (_("can't release already inactivated backup:"
3552 msg = (_("can't release already inactivated backup:"
3553 " dirstate%s")
3553 " dirstate%s")
3554 % self._suffix)
3554 % self._suffix)
3555 raise error.Abort(msg)
3555 raise error.Abort(msg)
3556 self._abort()
3556 self._abort()
@@ -1,553 +1,553 b''
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from . import (
12 from . import (
13 node,
13 node,
14 pathutil,
14 pathutil,
15 scmutil,
15 scmutil,
16 util,
16 util,
17 )
17 )
18
18
19 def _findlimit(repo, a, b):
19 def _findlimit(repo, a, b):
20 """
20 """
21 Find the last revision that needs to be checked to ensure that a full
21 Find the last revision that needs to be checked to ensure that a full
22 transitive closure for file copies can be properly calculated.
22 transitive closure for file copies can be properly calculated.
23 Generally, this means finding the earliest revision number that's an
23 Generally, this means finding the earliest revision number that's an
24 ancestor of a or b but not both, except when a or b is a direct descendent
24 ancestor of a or b but not both, except when a or b is a direct descendent
25 of the other, in which case we can return the minimum revnum of a and b.
25 of the other, in which case we can return the minimum revnum of a and b.
26 None if no such revision exists.
26 None if no such revision exists.
27 """
27 """
28
28
29 # basic idea:
29 # basic idea:
30 # - mark a and b with different sides
30 # - mark a and b with different sides
31 # - if a parent's children are all on the same side, the parent is
31 # - if a parent's children are all on the same side, the parent is
32 # on that side, otherwise it is on no side
32 # on that side, otherwise it is on no side
33 # - walk the graph in topological order with the help of a heap;
33 # - walk the graph in topological order with the help of a heap;
34 # - add unseen parents to side map
34 # - add unseen parents to side map
35 # - clear side of any parent that has children on different sides
35 # - clear side of any parent that has children on different sides
36 # - track number of interesting revs that might still be on a side
36 # - track number of interesting revs that might still be on a side
37 # - track the lowest interesting rev seen
37 # - track the lowest interesting rev seen
38 # - quit when interesting revs is zero
38 # - quit when interesting revs is zero
39
39
40 cl = repo.changelog
40 cl = repo.changelog
41 working = len(cl) # pseudo rev for the working directory
41 working = len(cl) # pseudo rev for the working directory
42 if a is None:
42 if a is None:
43 a = working
43 a = working
44 if b is None:
44 if b is None:
45 b = working
45 b = working
46
46
47 side = {a: -1, b: 1}
47 side = {a: -1, b: 1}
48 visit = [-a, -b]
48 visit = [-a, -b]
49 heapq.heapify(visit)
49 heapq.heapify(visit)
50 interesting = len(visit)
50 interesting = len(visit)
51 hascommonancestor = False
51 hascommonancestor = False
52 limit = working
52 limit = working
53
53
54 while interesting:
54 while interesting:
55 r = -heapq.heappop(visit)
55 r = -heapq.heappop(visit)
56 if r == working:
56 if r == working:
57 parents = [cl.rev(p) for p in repo.dirstate.parents()]
57 parents = [cl.rev(p) for p in repo.dirstate.parents()]
58 else:
58 else:
59 parents = cl.parentrevs(r)
59 parents = cl.parentrevs(r)
60 for p in parents:
60 for p in parents:
61 if p < 0:
61 if p < 0:
62 continue
62 continue
63 if p not in side:
63 if p not in side:
64 # first time we see p; add it to visit
64 # first time we see p; add it to visit
65 side[p] = side[r]
65 side[p] = side[r]
66 if side[p]:
66 if side[p]:
67 interesting += 1
67 interesting += 1
68 heapq.heappush(visit, -p)
68 heapq.heappush(visit, -p)
69 elif side[p] and side[p] != side[r]:
69 elif side[p] and side[p] != side[r]:
70 # p was interesting but now we know better
70 # p was interesting but now we know better
71 side[p] = 0
71 side[p] = 0
72 interesting -= 1
72 interesting -= 1
73 hascommonancestor = True
73 hascommonancestor = True
74 if side[r]:
74 if side[r]:
75 limit = r # lowest rev visited
75 limit = r # lowest rev visited
76 interesting -= 1
76 interesting -= 1
77
77
78 if not hascommonancestor:
78 if not hascommonancestor:
79 return None
79 return None
80
80
81 # Consider the following flow (see test-commit-amend.t under issue4405):
81 # Consider the following flow (see test-commit-amend.t under issue4405):
82 # 1/ File 'a0' committed
82 # 1/ File 'a0' committed
83 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
83 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
84 # 3/ Move back to first commit
84 # 3/ Move back to first commit
85 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
85 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
86 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
86 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
87 #
87 #
88 # During the amend in step five, we will be in this state:
88 # During the amend in step five, we will be in this state:
89 #
89 #
90 # @ 3 temporary amend commit for a1-amend
90 # @ 3 temporary amend commit for a1-amend
91 # |
91 # |
92 # o 2 a1-amend
92 # o 2 a1-amend
93 # |
93 # |
94 # | o 1 a1
94 # | o 1 a1
95 # |/
95 # |/
96 # o 0 a0
96 # o 0 a0
97 #
97 #
98 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
98 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
99 # yet the filelog has the copy information in rev 1 and we will not look
99 # yet the filelog has the copy information in rev 1 and we will not look
100 # back far enough unless we also look at the a and b as candidates.
100 # back far enough unless we also look at the a and b as candidates.
101 # This only occurs when a is a descendent of b or visa-versa.
101 # This only occurs when a is a descendent of b or visa-versa.
102 return min(limit, a, b)
102 return min(limit, a, b)
103
103
104 def _chain(src, dst, a, b):
104 def _chain(src, dst, a, b):
105 '''chain two sets of copies a->b'''
105 '''chain two sets of copies a->b'''
106 t = a.copy()
106 t = a.copy()
107 for k, v in b.iteritems():
107 for k, v in b.iteritems():
108 if v in t:
108 if v in t:
109 # found a chain
109 # found a chain
110 if t[v] != k:
110 if t[v] != k:
111 # file wasn't renamed back to itself
111 # file wasn't renamed back to itself
112 t[k] = t[v]
112 t[k] = t[v]
113 if v not in dst:
113 if v not in dst:
114 # chain was a rename, not a copy
114 # chain was a rename, not a copy
115 del t[v]
115 del t[v]
116 if v in src:
116 if v in src:
117 # file is a copy of an existing file
117 # file is a copy of an existing file
118 t[k] = v
118 t[k] = v
119
119
120 # remove criss-crossed copies
120 # remove criss-crossed copies
121 for k, v in t.items():
121 for k, v in t.items():
122 if k in src and v in dst:
122 if k in src and v in dst:
123 del t[k]
123 del t[k]
124
124
125 return t
125 return t
126
126
127 def _tracefile(fctx, am, limit=-1):
127 def _tracefile(fctx, am, limit=-1):
128 '''return file context that is the ancestor of fctx present in ancestor
128 '''return file context that is the ancestor of fctx present in ancestor
129 manifest am, stopping after the first ancestor lower than limit'''
129 manifest am, stopping after the first ancestor lower than limit'''
130
130
131 for f in fctx.ancestors():
131 for f in fctx.ancestors():
132 if am.get(f.path(), None) == f.filenode():
132 if am.get(f.path(), None) == f.filenode():
133 return f
133 return f
134 if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
134 if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
135 return None
135 return None
136
136
137 def _dirstatecopies(d):
137 def _dirstatecopies(d):
138 ds = d._repo.dirstate
138 ds = d._repo.dirstate
139 c = ds.copies().copy()
139 c = ds.copies().copy()
140 for k in c.keys():
140 for k in c.keys():
141 if ds[k] not in 'anm':
141 if ds[k] not in 'anm':
142 del c[k]
142 del c[k]
143 return c
143 return c
144
144
145 def _computeforwardmissing(a, b, match=None):
145 def _computeforwardmissing(a, b, match=None):
146 """Computes which files are in b but not a.
146 """Computes which files are in b but not a.
147 This is its own function so extensions can easily wrap this call to see what
147 This is its own function so extensions can easily wrap this call to see what
148 files _forwardcopies is about to process.
148 files _forwardcopies is about to process.
149 """
149 """
150 ma = a.manifest()
150 ma = a.manifest()
151 mb = b.manifest()
151 mb = b.manifest()
152 if match:
152 if match:
153 ma = ma.matches(match)
153 ma = ma.matches(match)
154 mb = mb.matches(match)
154 mb = mb.matches(match)
155 return mb.filesnotin(ma)
155 return mb.filesnotin(ma)
156
156
157 def _forwardcopies(a, b, match=None):
157 def _forwardcopies(a, b, match=None):
158 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
158 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
159
159
160 # check for working copy
160 # check for working copy
161 w = None
161 w = None
162 if b.rev() is None:
162 if b.rev() is None:
163 w = b
163 w = b
164 b = w.p1()
164 b = w.p1()
165 if a == b:
165 if a == b:
166 # short-circuit to avoid issues with merge states
166 # short-circuit to avoid issues with merge states
167 return _dirstatecopies(w)
167 return _dirstatecopies(w)
168
168
169 # files might have to be traced back to the fctx parent of the last
169 # files might have to be traced back to the fctx parent of the last
170 # one-side-only changeset, but not further back than that
170 # one-side-only changeset, but not further back than that
171 limit = _findlimit(a._repo, a.rev(), b.rev())
171 limit = _findlimit(a._repo, a.rev(), b.rev())
172 if limit is None:
172 if limit is None:
173 limit = -1
173 limit = -1
174 am = a.manifest()
174 am = a.manifest()
175
175
176 # find where new files came from
176 # find where new files came from
177 # we currently don't try to find where old files went, too expensive
177 # we currently don't try to find where old files went, too expensive
178 # this means we can miss a case like 'hg rm b; hg cp a b'
178 # this means we can miss a case like 'hg rm b; hg cp a b'
179 cm = {}
179 cm = {}
180
180
181 # Computing the forward missing is quite expensive on large manifests, since
181 # Computing the forward missing is quite expensive on large manifests, since
182 # it compares the entire manifests. We can optimize it in the common use
182 # it compares the entire manifests. We can optimize it in the common use
183 # case of computing what copies are in a commit versus its parent (like
183 # case of computing what copies are in a commit versus its parent (like
184 # during a rebase or histedit). Note, we exclude merge commits from this
184 # during a rebase or histedit). Note, we exclude merge commits from this
185 # optimization, since the ctx.files() for a merge commit is not correct for
185 # optimization, since the ctx.files() for a merge commit is not correct for
186 # this comparison.
186 # this comparison.
187 forwardmissingmatch = match
187 forwardmissingmatch = match
188 if not match and b.p1() == a and b.p2().node() == node.nullid:
188 if not match and b.p1() == a and b.p2().node() == node.nullid:
189 forwardmissingmatch = scmutil.matchfiles(a._repo, b.files())
189 forwardmissingmatch = scmutil.matchfiles(a._repo, b.files())
190 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
190 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
191
191
192 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
192 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
193 for f in missing:
193 for f in missing:
194 fctx = b[f]
194 fctx = b[f]
195 fctx._ancestrycontext = ancestrycontext
195 fctx._ancestrycontext = ancestrycontext
196 ofctx = _tracefile(fctx, am, limit)
196 ofctx = _tracefile(fctx, am, limit)
197 if ofctx:
197 if ofctx:
198 cm[f] = ofctx.path()
198 cm[f] = ofctx.path()
199
199
200 # combine copies from dirstate if necessary
200 # combine copies from dirstate if necessary
201 if w is not None:
201 if w is not None:
202 cm = _chain(a, w, cm, _dirstatecopies(w))
202 cm = _chain(a, w, cm, _dirstatecopies(w))
203
203
204 return cm
204 return cm
205
205
206 def _backwardrenames(a, b):
206 def _backwardrenames(a, b):
207 if a._repo.ui.configbool('experimental', 'disablecopytrace'):
207 if a._repo.ui.configbool('experimental', 'disablecopytrace'):
208 return {}
208 return {}
209
209
210 # Even though we're not taking copies into account, 1:n rename situations
210 # Even though we're not taking copies into account, 1:n rename situations
211 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
211 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
212 # arbitrarily pick one of the renames.
212 # arbitrarily pick one of the renames.
213 f = _forwardcopies(b, a)
213 f = _forwardcopies(b, a)
214 r = {}
214 r = {}
215 for k, v in sorted(f.iteritems()):
215 for k, v in sorted(f.iteritems()):
216 # remove copies
216 # remove copies
217 if v in a:
217 if v in a:
218 continue
218 continue
219 r[v] = k
219 r[v] = k
220 return r
220 return r
221
221
222 def pathcopies(x, y, match=None):
222 def pathcopies(x, y, match=None):
223 '''find {dst@y: src@x} copy mapping for directed compare'''
223 '''find {dst@y: src@x} copy mapping for directed compare'''
224 if x == y or not x or not y:
224 if x == y or not x or not y:
225 return {}
225 return {}
226 a = y.ancestor(x)
226 a = y.ancestor(x)
227 if a == x:
227 if a == x:
228 return _forwardcopies(x, y, match=match)
228 return _forwardcopies(x, y, match=match)
229 if a == y:
229 if a == y:
230 return _backwardrenames(x, y)
230 return _backwardrenames(x, y)
231 return _chain(x, y, _backwardrenames(x, a),
231 return _chain(x, y, _backwardrenames(x, a),
232 _forwardcopies(a, y, match=match))
232 _forwardcopies(a, y, match=match))
233
233
234 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2):
234 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2):
235 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
235 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
236 and c2. This is its own function so extensions can easily wrap this call
236 and c2. This is its own function so extensions can easily wrap this call
237 to see what files mergecopies is about to process.
237 to see what files mergecopies is about to process.
238
238
239 Even though c1 and c2 are not used in this function, they are useful in
239 Even though c1 and c2 are not used in this function, they are useful in
240 other extensions for being able to read the file nodes of the changed files.
240 other extensions for being able to read the file nodes of the changed files.
241 """
241 """
242 u1 = sorted(addedinm1 - addedinm2)
242 u1 = sorted(addedinm1 - addedinm2)
243 u2 = sorted(addedinm2 - addedinm1)
243 u2 = sorted(addedinm2 - addedinm1)
244
244
245 if u1:
245 if u1:
246 repo.ui.debug(" unmatched files in local:\n %s\n"
246 repo.ui.debug(" unmatched files in local:\n %s\n"
247 % "\n ".join(u1))
247 % "\n ".join(u1))
248 if u2:
248 if u2:
249 repo.ui.debug(" unmatched files in other:\n %s\n"
249 repo.ui.debug(" unmatched files in other:\n %s\n"
250 % "\n ".join(u2))
250 % "\n ".join(u2))
251 return u1, u2
251 return u1, u2
252
252
253 def _makegetfctx(ctx):
253 def _makegetfctx(ctx):
254 """return a 'getfctx' function suitable for checkcopies usage
254 """return a 'getfctx' function suitable for checkcopies usage
255
255
256 We have to re-setup the function building 'filectx' for each
256 We have to re-setup the function building 'filectx' for each
257 'checkcopies' to ensure the linkrev adjustment is properly setup for
257 'checkcopies' to ensure the linkrev adjustment is properly setup for
258 each. Linkrev adjustment is important to avoid bug in rename
258 each. Linkrev adjustment is important to avoid bug in rename
259 detection. Moreover, having a proper '_ancestrycontext' setup ensures
259 detection. Moreover, having a proper '_ancestrycontext' setup ensures
260 the performance impact of this adjustment is kept limited. Without it,
260 the performance impact of this adjustment is kept limited. Without it,
261 each file could do a full dag traversal making the time complexity of
261 each file could do a full dag traversal making the time complexity of
262 the operation explode (see issue4537).
262 the operation explode (see issue4537).
263
263
264 This function exists here mostly to limit the impact on stable. Feel
264 This function exists here mostly to limit the impact on stable. Feel
265 free to refactor on default.
265 free to refactor on default.
266 """
266 """
267 rev = ctx.rev()
267 rev = ctx.rev()
268 repo = ctx._repo
268 repo = ctx._repo
269 ac = getattr(ctx, '_ancestrycontext', None)
269 ac = getattr(ctx, '_ancestrycontext', None)
270 if ac is None:
270 if ac is None:
271 revs = [rev]
271 revs = [rev]
272 if rev is None:
272 if rev is None:
273 revs = [p.rev() for p in ctx.parents()]
273 revs = [p.rev() for p in ctx.parents()]
274 ac = repo.changelog.ancestors(revs, inclusive=True)
274 ac = repo.changelog.ancestors(revs, inclusive=True)
275 ctx._ancestrycontext = ac
275 ctx._ancestrycontext = ac
276 def makectx(f, n):
276 def makectx(f, n):
277 if len(n) != 20: # in a working context?
277 if len(n) != 20: # in a working context?
278 if ctx.rev() is None:
278 if ctx.rev() is None:
279 return ctx.filectx(f)
279 return ctx.filectx(f)
280 return repo[None][f]
280 return repo[None][f]
281 fctx = repo.filectx(f, fileid=n)
281 fctx = repo.filectx(f, fileid=n)
282 # setup only needed for filectx not create from a changectx
282 # setup only needed for filectx not create from a changectx
283 fctx._ancestrycontext = ac
283 fctx._ancestrycontext = ac
284 fctx._descendantrev = rev
284 fctx._descendantrev = rev
285 return fctx
285 return fctx
286 return util.lrucachefunc(makectx)
286 return util.lrucachefunc(makectx)
287
287
288 def mergecopies(repo, c1, c2, ca):
288 def mergecopies(repo, c1, c2, ca):
289 """
289 """
290 Find moves and copies between context c1 and c2 that are relevant
290 Find moves and copies between context c1 and c2 that are relevant
291 for merging.
291 for merging.
292
292
293 Returns four dicts: "copy", "movewithdir", "diverge", and
293 Returns four dicts: "copy", "movewithdir", "diverge", and
294 "renamedelete".
294 "renamedelete".
295
295
296 "copy" is a mapping from destination name -> source name,
296 "copy" is a mapping from destination name -> source name,
297 where source is in c1 and destination is in c2 or vice-versa.
297 where source is in c1 and destination is in c2 or vice-versa.
298
298
299 "movewithdir" is a mapping from source name -> destination name,
299 "movewithdir" is a mapping from source name -> destination name,
300 where the file at source present in one context but not the other
300 where the file at source present in one context but not the other
301 needs to be moved to destination by the merge process, because the
301 needs to be moved to destination by the merge process, because the
302 other context moved the directory it is in.
302 other context moved the directory it is in.
303
303
304 "diverge" is a mapping of source name -> list of destination names
304 "diverge" is a mapping of source name -> list of destination names
305 for divergent renames.
305 for divergent renames.
306
306
307 "renamedelete" is a mapping of source name -> list of destination
307 "renamedelete" is a mapping of source name -> list of destination
308 names for files deleted in c1 that were renamed in c2 or vice-versa.
308 names for files deleted in c1 that were renamed in c2 or vice-versa.
309 """
309 """
310 # avoid silly behavior for update from empty dir
310 # avoid silly behavior for update from empty dir
311 if not c1 or not c2 or c1 == c2:
311 if not c1 or not c2 or c1 == c2:
312 return {}, {}, {}, {}
312 return {}, {}, {}, {}
313
313
314 # avoid silly behavior for parent -> working dir
314 # avoid silly behavior for parent -> working dir
315 if c2.node() is None and c1.node() == repo.dirstate.p1():
315 if c2.node() is None and c1.node() == repo.dirstate.p1():
316 return repo.dirstate.copies(), {}, {}, {}
316 return repo.dirstate.copies(), {}, {}, {}
317
317
318 # Copy trace disabling is explicitly below the node == p1 logic above
318 # Copy trace disabling is explicitly below the node == p1 logic above
319 # because the logic above is required for a simple copy to be kept across a
319 # because the logic above is required for a simple copy to be kept across a
320 # rebase.
320 # rebase.
321 if repo.ui.configbool('experimental', 'disablecopytrace'):
321 if repo.ui.configbool('experimental', 'disablecopytrace'):
322 return {}, {}, {}, {}
322 return {}, {}, {}, {}
323
323
324 limit = _findlimit(repo, c1.rev(), c2.rev())
324 limit = _findlimit(repo, c1.rev(), c2.rev())
325 if limit is None:
325 if limit is None:
326 # no common ancestor, no copies
326 # no common ancestor, no copies
327 return {}, {}, {}, {}
327 return {}, {}, {}, {}
328 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
328 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
329
329
330 m1 = c1.manifest()
330 m1 = c1.manifest()
331 m2 = c2.manifest()
331 m2 = c2.manifest()
332 ma = ca.manifest()
332 ma = ca.manifest()
333
333
334 copy1, copy2, = {}, {}
334 copy1, copy2, = {}, {}
335 movewithdir1, movewithdir2 = {}, {}
335 movewithdir1, movewithdir2 = {}, {}
336 fullcopy1, fullcopy2 = {}, {}
336 fullcopy1, fullcopy2 = {}, {}
337 diverge = {}
337 diverge = {}
338
338
339 # find interesting file sets from manifests
339 # find interesting file sets from manifests
340 addedinm1 = m1.filesnotin(ma)
340 addedinm1 = m1.filesnotin(ma)
341 addedinm2 = m2.filesnotin(ma)
341 addedinm2 = m2.filesnotin(ma)
342 u1, u2 = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
342 u1, u2 = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
343 bothnew = sorted(addedinm1 & addedinm2)
343 bothnew = sorted(addedinm1 & addedinm2)
344
344
345 for f in u1:
345 for f in u1:
346 checkcopies(c1, f, m1, m2, ca, limit, diverge, copy1, fullcopy1)
346 checkcopies(c1, f, m1, m2, ca, limit, diverge, copy1, fullcopy1)
347
347
348 for f in u2:
348 for f in u2:
349 checkcopies(c2, f, m2, m1, ca, limit, diverge, copy2, fullcopy2)
349 checkcopies(c2, f, m2, m1, ca, limit, diverge, copy2, fullcopy2)
350
350
351 copy = dict(copy1.items() + copy2.items())
351 copy = dict(copy1.items() + copy2.items())
352 movewithdir = dict(movewithdir1.items() + movewithdir2.items())
352 movewithdir = dict(movewithdir1.items() + movewithdir2.items())
353 fullcopy = dict(fullcopy1.items() + fullcopy2.items())
353 fullcopy = dict(fullcopy1.items() + fullcopy2.items())
354
354
355 renamedelete = {}
355 renamedelete = {}
356 renamedeleteset = set()
356 renamedeleteset = set()
357 divergeset = set()
357 divergeset = set()
358 for of, fl in diverge.items():
358 for of, fl in diverge.items():
359 if len(fl) == 1 or of in c1 or of in c2:
359 if len(fl) == 1 or of in c1 or of in c2:
360 del diverge[of] # not actually divergent, or not a rename
360 del diverge[of] # not actually divergent, or not a rename
361 if of not in c1 and of not in c2:
361 if of not in c1 and of not in c2:
362 # renamed on one side, deleted on the other side, but filter
362 # renamed on one side, deleted on the other side, but filter
363 # out files that have been renamed and then deleted
363 # out files that have been renamed and then deleted
364 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
364 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
365 renamedeleteset.update(fl) # reverse map for below
365 renamedeleteset.update(fl) # reverse map for below
366 else:
366 else:
367 divergeset.update(fl) # reverse map for below
367 divergeset.update(fl) # reverse map for below
368
368
369 if bothnew:
369 if bothnew:
370 repo.ui.debug(" unmatched files new in both:\n %s\n"
370 repo.ui.debug(" unmatched files new in both:\n %s\n"
371 % "\n ".join(bothnew))
371 % "\n ".join(bothnew))
372 bothdiverge, _copy, _fullcopy = {}, {}, {}
372 bothdiverge, _copy, _fullcopy = {}, {}, {}
373 for f in bothnew:
373 for f in bothnew:
374 checkcopies(c1, f, m1, m2, ca, limit, bothdiverge, _copy, _fullcopy)
374 checkcopies(c1, f, m1, m2, ca, limit, bothdiverge, _copy, _fullcopy)
375 checkcopies(c2, f, m2, m1, ca, limit, bothdiverge, _copy, _fullcopy)
375 checkcopies(c2, f, m2, m1, ca, limit, bothdiverge, _copy, _fullcopy)
376 for of, fl in bothdiverge.items():
376 for of, fl in bothdiverge.items():
377 if len(fl) == 2 and fl[0] == fl[1]:
377 if len(fl) == 2 and fl[0] == fl[1]:
378 copy[fl[0]] = of # not actually divergent, just matching renames
378 copy[fl[0]] = of # not actually divergent, just matching renames
379
379
380 if fullcopy and repo.ui.debugflag:
380 if fullcopy and repo.ui.debugflag:
381 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
381 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
382 "% = renamed and deleted):\n")
382 "% = renamed and deleted):\n")
383 for f in sorted(fullcopy):
383 for f in sorted(fullcopy):
384 note = ""
384 note = ""
385 if f in copy:
385 if f in copy:
386 note += "*"
386 note += "*"
387 if f in divergeset:
387 if f in divergeset:
388 note += "!"
388 note += "!"
389 if f in renamedeleteset:
389 if f in renamedeleteset:
390 note += "%"
390 note += "%"
391 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
391 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
392 note))
392 note))
393 del divergeset
393 del divergeset
394
394
395 if not fullcopy:
395 if not fullcopy:
396 return copy, movewithdir, diverge, renamedelete
396 return copy, movewithdir, diverge, renamedelete
397
397
398 repo.ui.debug(" checking for directory renames\n")
398 repo.ui.debug(" checking for directory renames\n")
399
399
400 # generate a directory move map
400 # generate a directory move map
401 d1, d2 = c1.dirs(), c2.dirs()
401 d1, d2 = c1.dirs(), c2.dirs()
402 # Hack for adding '', which is not otherwise added, to d1 and d2
402 # Hack for adding '', which is not otherwise added, to d1 and d2
403 d1.addpath('/')
403 d1.addpath('/')
404 d2.addpath('/')
404 d2.addpath('/')
405 invalid = set()
405 invalid = set()
406 dirmove = {}
406 dirmove = {}
407
407
408 # examine each file copy for a potential directory move, which is
408 # examine each file copy for a potential directory move, which is
409 # when all the files in a directory are moved to a new directory
409 # when all the files in a directory are moved to a new directory
410 for dst, src in fullcopy.iteritems():
410 for dst, src in fullcopy.iteritems():
411 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
411 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
412 if dsrc in invalid:
412 if dsrc in invalid:
413 # already seen to be uninteresting
413 # already seen to be uninteresting
414 continue
414 continue
415 elif dsrc in d1 and ddst in d1:
415 elif dsrc in d1 and ddst in d1:
416 # directory wasn't entirely moved locally
416 # directory wasn't entirely moved locally
417 invalid.add(dsrc + "/")
417 invalid.add(dsrc + "/")
418 elif dsrc in d2 and ddst in d2:
418 elif dsrc in d2 and ddst in d2:
419 # directory wasn't entirely moved remotely
419 # directory wasn't entirely moved remotely
420 invalid.add(dsrc + "/")
420 invalid.add(dsrc + "/")
421 elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/":
421 elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/":
422 # files from the same directory moved to two different places
422 # files from the same directory moved to two different places
423 invalid.add(dsrc + "/")
423 invalid.add(dsrc + "/")
424 else:
424 else:
425 # looks good so far
425 # looks good so far
426 dirmove[dsrc + "/"] = ddst + "/"
426 dirmove[dsrc + "/"] = ddst + "/"
427
427
428 for i in invalid:
428 for i in invalid:
429 if i in dirmove:
429 if i in dirmove:
430 del dirmove[i]
430 del dirmove[i]
431 del d1, d2, invalid
431 del d1, d2, invalid
432
432
433 if not dirmove:
433 if not dirmove:
434 return copy, movewithdir, diverge, renamedelete
434 return copy, movewithdir, diverge, renamedelete
435
435
436 for d in dirmove:
436 for d in dirmove:
437 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
437 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
438 (d, dirmove[d]))
438 (d, dirmove[d]))
439
439
440 # check unaccounted nonoverlapping files against directory moves
440 # check unaccounted nonoverlapping files against directory moves
441 for f in u1 + u2:
441 for f in u1 + u2:
442 if f not in fullcopy:
442 if f not in fullcopy:
443 for d in dirmove:
443 for d in dirmove:
444 if f.startswith(d):
444 if f.startswith(d):
445 # new file added in a directory that was moved, move it
445 # new file added in a directory that was moved, move it
446 df = dirmove[d] + f[len(d):]
446 df = dirmove[d] + f[len(d):]
447 if df not in copy:
447 if df not in copy:
448 movewithdir[f] = df
448 movewithdir[f] = df
449 repo.ui.debug((" pending file src: '%s' -> "
449 repo.ui.debug((" pending file src: '%s' -> "
450 "dst: '%s'\n") % (f, df))
450 "dst: '%s'\n") % (f, df))
451 break
451 break
452
452
453 return copy, movewithdir, diverge, renamedelete
453 return copy, movewithdir, diverge, renamedelete
454
454
455 def checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy):
455 def checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy):
456 """
456 """
457 check possible copies of f from m1 to m2
457 check possible copies of f from m1 to m2
458
458
459 ctx = starting context for f in m1
459 ctx = starting context for f in m1
460 f = the filename to check
460 f = the filename to check
461 m1 = the source manifest
461 m1 = the source manifest
462 m2 = the destination manifest
462 m2 = the destination manifest
463 ca = the changectx of the common ancestor
463 ca = the changectx of the common ancestor
464 limit = the rev number to not search beyond
464 limit = the rev number to not search beyond
465 diverge = record all diverges in this dict
465 diverge = record all diverges in this dict
466 copy = record all non-divergent copies in this dict
466 copy = record all non-divergent copies in this dict
467 fullcopy = record all copies in this dict
467 fullcopy = record all copies in this dict
468 """
468 """
469
469
470 ma = ca.manifest()
470 ma = ca.manifest()
471 getfctx = _makegetfctx(ctx)
471 getfctx = _makegetfctx(ctx)
472
472
473 def _related(f1, f2, limit):
473 def _related(f1, f2, limit):
474 # Walk back to common ancestor to see if the two files originate
474 # Walk back to common ancestor to see if the two files originate
475 # from the same file. Since workingfilectx's rev() is None it messes
475 # from the same file. Since workingfilectx's rev() is None it messes
476 # up the integer comparison logic, hence the pre-step check for
476 # up the integer comparison logic, hence the pre-step check for
477 # None (f1 and f2 can only be workingfilectx's initially).
477 # None (f1 and f2 can only be workingfilectx's initially).
478
478
479 if f1 == f2:
479 if f1 == f2:
480 return f1 # a match
480 return f1 # a match
481
481
482 g1, g2 = f1.ancestors(), f2.ancestors()
482 g1, g2 = f1.ancestors(), f2.ancestors()
483 try:
483 try:
484 f1r, f2r = f1.linkrev(), f2.linkrev()
484 f1r, f2r = f1.linkrev(), f2.linkrev()
485
485
486 if f1r is None:
486 if f1r is None:
487 f1 = g1.next()
487 f1 = next(g1)
488 if f2r is None:
488 if f2r is None:
489 f2 = g2.next()
489 f2 = next(g2)
490
490
491 while True:
491 while True:
492 f1r, f2r = f1.linkrev(), f2.linkrev()
492 f1r, f2r = f1.linkrev(), f2.linkrev()
493 if f1r > f2r:
493 if f1r > f2r:
494 f1 = g1.next()
494 f1 = next(g1)
495 elif f2r > f1r:
495 elif f2r > f1r:
496 f2 = g2.next()
496 f2 = next(g2)
497 elif f1 == f2:
497 elif f1 == f2:
498 return f1 # a match
498 return f1 # a match
499 elif f1r == f2r or f1r < limit or f2r < limit:
499 elif f1r == f2r or f1r < limit or f2r < limit:
500 return False # copy no longer relevant
500 return False # copy no longer relevant
501 except StopIteration:
501 except StopIteration:
502 return False
502 return False
503
503
504 of = None
504 of = None
505 seen = set([f])
505 seen = set([f])
506 for oc in getfctx(f, m1[f]).ancestors():
506 for oc in getfctx(f, m1[f]).ancestors():
507 ocr = oc.linkrev()
507 ocr = oc.linkrev()
508 of = oc.path()
508 of = oc.path()
509 if of in seen:
509 if of in seen:
510 # check limit late - grab last rename before
510 # check limit late - grab last rename before
511 if ocr < limit:
511 if ocr < limit:
512 break
512 break
513 continue
513 continue
514 seen.add(of)
514 seen.add(of)
515
515
516 fullcopy[f] = of # remember for dir rename detection
516 fullcopy[f] = of # remember for dir rename detection
517 if of not in m2:
517 if of not in m2:
518 continue # no match, keep looking
518 continue # no match, keep looking
519 if m2[of] == ma.get(of):
519 if m2[of] == ma.get(of):
520 break # no merge needed, quit early
520 break # no merge needed, quit early
521 c2 = getfctx(of, m2[of])
521 c2 = getfctx(of, m2[of])
522 cr = _related(oc, c2, ca.rev())
522 cr = _related(oc, c2, ca.rev())
523 if cr and (of == f or of == c2.path()): # non-divergent
523 if cr and (of == f or of == c2.path()): # non-divergent
524 copy[f] = of
524 copy[f] = of
525 of = None
525 of = None
526 break
526 break
527
527
528 if of in ma:
528 if of in ma:
529 diverge.setdefault(of, []).append(f)
529 diverge.setdefault(of, []).append(f)
530
530
531 def duplicatecopies(repo, rev, fromrev, skiprev=None):
531 def duplicatecopies(repo, rev, fromrev, skiprev=None):
532 '''reproduce copies from fromrev to rev in the dirstate
532 '''reproduce copies from fromrev to rev in the dirstate
533
533
534 If skiprev is specified, it's a revision that should be used to
534 If skiprev is specified, it's a revision that should be used to
535 filter copy records. Any copies that occur between fromrev and
535 filter copy records. Any copies that occur between fromrev and
536 skiprev will not be duplicated, even if they appear in the set of
536 skiprev will not be duplicated, even if they appear in the set of
537 copies between fromrev and rev.
537 copies between fromrev and rev.
538 '''
538 '''
539 exclude = {}
539 exclude = {}
540 if (skiprev is not None and
540 if (skiprev is not None and
541 not repo.ui.configbool('experimental', 'disablecopytrace')):
541 not repo.ui.configbool('experimental', 'disablecopytrace')):
542 # disablecopytrace skips this line, but not the entire function because
542 # disablecopytrace skips this line, but not the entire function because
543 # the line below is O(size of the repo) during a rebase, while the rest
543 # the line below is O(size of the repo) during a rebase, while the rest
544 # of the function is much faster (and is required for carrying copy
544 # of the function is much faster (and is required for carrying copy
545 # metadata across the rebase anyway).
545 # metadata across the rebase anyway).
546 exclude = pathcopies(repo[fromrev], repo[skiprev])
546 exclude = pathcopies(repo[fromrev], repo[skiprev])
547 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
547 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
548 # copies.pathcopies returns backward renames, so dst might not
548 # copies.pathcopies returns backward renames, so dst might not
549 # actually be in the dirstate
549 # actually be in the dirstate
550 if dst in exclude:
550 if dst in exclude:
551 continue
551 continue
552 if repo.dirstate[dst] in "nma":
552 if repo.dirstate[dst] in "nma":
553 repo.dirstate.copy(src, dst)
553 repo.dirstate.copy(src, dst)
@@ -1,1304 +1,1304 b''
1 #
1 #
2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import cgi
10 import cgi
11 import copy
11 import copy
12 import mimetypes
12 import mimetypes
13 import os
13 import os
14 import re
14 import re
15
15
16 from ..i18n import _
16 from ..i18n import _
17 from ..node import hex, short
17 from ..node import hex, short
18
18
19 from .common import (
19 from .common import (
20 ErrorResponse,
20 ErrorResponse,
21 HTTP_FORBIDDEN,
21 HTTP_FORBIDDEN,
22 HTTP_NOT_FOUND,
22 HTTP_NOT_FOUND,
23 HTTP_OK,
23 HTTP_OK,
24 get_contact,
24 get_contact,
25 paritygen,
25 paritygen,
26 staticfile,
26 staticfile,
27 )
27 )
28
28
29 from .. import (
29 from .. import (
30 archival,
30 archival,
31 encoding,
31 encoding,
32 error,
32 error,
33 graphmod,
33 graphmod,
34 patch,
34 patch,
35 revset,
35 revset,
36 scmutil,
36 scmutil,
37 templatefilters,
37 templatefilters,
38 templater,
38 templater,
39 util,
39 util,
40 )
40 )
41
41
42 from . import (
42 from . import (
43 webutil,
43 webutil,
44 )
44 )
45
45
46 __all__ = []
46 __all__ = []
47 commands = {}
47 commands = {}
48
48
49 class webcommand(object):
49 class webcommand(object):
50 """Decorator used to register a web command handler.
50 """Decorator used to register a web command handler.
51
51
52 The decorator takes as its positional arguments the name/path the
52 The decorator takes as its positional arguments the name/path the
53 command should be accessible under.
53 command should be accessible under.
54
54
55 Usage:
55 Usage:
56
56
57 @webcommand('mycommand')
57 @webcommand('mycommand')
58 def mycommand(web, req, tmpl):
58 def mycommand(web, req, tmpl):
59 pass
59 pass
60 """
60 """
61
61
62 def __init__(self, name):
62 def __init__(self, name):
63 self.name = name
63 self.name = name
64
64
65 def __call__(self, func):
65 def __call__(self, func):
66 __all__.append(self.name)
66 __all__.append(self.name)
67 commands[self.name] = func
67 commands[self.name] = func
68 return func
68 return func
69
69
70 @webcommand('log')
70 @webcommand('log')
71 def log(web, req, tmpl):
71 def log(web, req, tmpl):
72 """
72 """
73 /log[/{revision}[/{path}]]
73 /log[/{revision}[/{path}]]
74 --------------------------
74 --------------------------
75
75
76 Show repository or file history.
76 Show repository or file history.
77
77
78 For URLs of the form ``/log/{revision}``, a list of changesets starting at
78 For URLs of the form ``/log/{revision}``, a list of changesets starting at
79 the specified changeset identifier is shown. If ``{revision}`` is not
79 the specified changeset identifier is shown. If ``{revision}`` is not
80 defined, the default is ``tip``. This form is equivalent to the
80 defined, the default is ``tip``. This form is equivalent to the
81 ``changelog`` handler.
81 ``changelog`` handler.
82
82
83 For URLs of the form ``/log/{revision}/{file}``, the history for a specific
83 For URLs of the form ``/log/{revision}/{file}``, the history for a specific
84 file will be shown. This form is equivalent to the ``filelog`` handler.
84 file will be shown. This form is equivalent to the ``filelog`` handler.
85 """
85 """
86
86
87 if 'file' in req.form and req.form['file'][0]:
87 if 'file' in req.form and req.form['file'][0]:
88 return filelog(web, req, tmpl)
88 return filelog(web, req, tmpl)
89 else:
89 else:
90 return changelog(web, req, tmpl)
90 return changelog(web, req, tmpl)
91
91
92 @webcommand('rawfile')
92 @webcommand('rawfile')
93 def rawfile(web, req, tmpl):
93 def rawfile(web, req, tmpl):
94 guessmime = web.configbool('web', 'guessmime', False)
94 guessmime = web.configbool('web', 'guessmime', False)
95
95
96 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
96 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
97 if not path:
97 if not path:
98 content = manifest(web, req, tmpl)
98 content = manifest(web, req, tmpl)
99 req.respond(HTTP_OK, web.ctype)
99 req.respond(HTTP_OK, web.ctype)
100 return content
100 return content
101
101
102 try:
102 try:
103 fctx = webutil.filectx(web.repo, req)
103 fctx = webutil.filectx(web.repo, req)
104 except error.LookupError as inst:
104 except error.LookupError as inst:
105 try:
105 try:
106 content = manifest(web, req, tmpl)
106 content = manifest(web, req, tmpl)
107 req.respond(HTTP_OK, web.ctype)
107 req.respond(HTTP_OK, web.ctype)
108 return content
108 return content
109 except ErrorResponse:
109 except ErrorResponse:
110 raise inst
110 raise inst
111
111
112 path = fctx.path()
112 path = fctx.path()
113 text = fctx.data()
113 text = fctx.data()
114 mt = 'application/binary'
114 mt = 'application/binary'
115 if guessmime:
115 if guessmime:
116 mt = mimetypes.guess_type(path)[0]
116 mt = mimetypes.guess_type(path)[0]
117 if mt is None:
117 if mt is None:
118 if util.binary(text):
118 if util.binary(text):
119 mt = 'application/binary'
119 mt = 'application/binary'
120 else:
120 else:
121 mt = 'text/plain'
121 mt = 'text/plain'
122 if mt.startswith('text/'):
122 if mt.startswith('text/'):
123 mt += '; charset="%s"' % encoding.encoding
123 mt += '; charset="%s"' % encoding.encoding
124
124
125 req.respond(HTTP_OK, mt, path, body=text)
125 req.respond(HTTP_OK, mt, path, body=text)
126 return []
126 return []
127
127
128 def _filerevision(web, req, tmpl, fctx):
128 def _filerevision(web, req, tmpl, fctx):
129 f = fctx.path()
129 f = fctx.path()
130 text = fctx.data()
130 text = fctx.data()
131 parity = paritygen(web.stripecount)
131 parity = paritygen(web.stripecount)
132
132
133 if util.binary(text):
133 if util.binary(text):
134 mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
134 mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
135 text = '(binary:%s)' % mt
135 text = '(binary:%s)' % mt
136
136
137 def lines():
137 def lines():
138 for lineno, t in enumerate(text.splitlines(True)):
138 for lineno, t in enumerate(text.splitlines(True)):
139 yield {"line": t,
139 yield {"line": t,
140 "lineid": "l%d" % (lineno + 1),
140 "lineid": "l%d" % (lineno + 1),
141 "linenumber": "% 6d" % (lineno + 1),
141 "linenumber": "% 6d" % (lineno + 1),
142 "parity": parity.next()}
142 "parity": next(parity)}
143
143
144 return tmpl("filerevision",
144 return tmpl("filerevision",
145 file=f,
145 file=f,
146 path=webutil.up(f),
146 path=webutil.up(f),
147 text=lines(),
147 text=lines(),
148 symrev=webutil.symrevorshortnode(req, fctx),
148 symrev=webutil.symrevorshortnode(req, fctx),
149 rename=webutil.renamelink(fctx),
149 rename=webutil.renamelink(fctx),
150 permissions=fctx.manifest().flags(f),
150 permissions=fctx.manifest().flags(f),
151 **webutil.commonentry(web.repo, fctx))
151 **webutil.commonentry(web.repo, fctx))
152
152
153 @webcommand('file')
153 @webcommand('file')
154 def file(web, req, tmpl):
154 def file(web, req, tmpl):
155 """
155 """
156 /file/{revision}[/{path}]
156 /file/{revision}[/{path}]
157 -------------------------
157 -------------------------
158
158
159 Show information about a directory or file in the repository.
159 Show information about a directory or file in the repository.
160
160
161 Info about the ``path`` given as a URL parameter will be rendered.
161 Info about the ``path`` given as a URL parameter will be rendered.
162
162
163 If ``path`` is a directory, information about the entries in that
163 If ``path`` is a directory, information about the entries in that
164 directory will be rendered. This form is equivalent to the ``manifest``
164 directory will be rendered. This form is equivalent to the ``manifest``
165 handler.
165 handler.
166
166
167 If ``path`` is a file, information about that file will be shown via
167 If ``path`` is a file, information about that file will be shown via
168 the ``filerevision`` template.
168 the ``filerevision`` template.
169
169
170 If ``path`` is not defined, information about the root directory will
170 If ``path`` is not defined, information about the root directory will
171 be rendered.
171 be rendered.
172 """
172 """
173 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
173 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
174 if not path:
174 if not path:
175 return manifest(web, req, tmpl)
175 return manifest(web, req, tmpl)
176 try:
176 try:
177 return _filerevision(web, req, tmpl, webutil.filectx(web.repo, req))
177 return _filerevision(web, req, tmpl, webutil.filectx(web.repo, req))
178 except error.LookupError as inst:
178 except error.LookupError as inst:
179 try:
179 try:
180 return manifest(web, req, tmpl)
180 return manifest(web, req, tmpl)
181 except ErrorResponse:
181 except ErrorResponse:
182 raise inst
182 raise inst
183
183
184 def _search(web, req, tmpl):
184 def _search(web, req, tmpl):
185 MODE_REVISION = 'rev'
185 MODE_REVISION = 'rev'
186 MODE_KEYWORD = 'keyword'
186 MODE_KEYWORD = 'keyword'
187 MODE_REVSET = 'revset'
187 MODE_REVSET = 'revset'
188
188
189 def revsearch(ctx):
189 def revsearch(ctx):
190 yield ctx
190 yield ctx
191
191
192 def keywordsearch(query):
192 def keywordsearch(query):
193 lower = encoding.lower
193 lower = encoding.lower
194 qw = lower(query).split()
194 qw = lower(query).split()
195
195
196 def revgen():
196 def revgen():
197 cl = web.repo.changelog
197 cl = web.repo.changelog
198 for i in xrange(len(web.repo) - 1, 0, -100):
198 for i in xrange(len(web.repo) - 1, 0, -100):
199 l = []
199 l = []
200 for j in cl.revs(max(0, i - 99), i):
200 for j in cl.revs(max(0, i - 99), i):
201 ctx = web.repo[j]
201 ctx = web.repo[j]
202 l.append(ctx)
202 l.append(ctx)
203 l.reverse()
203 l.reverse()
204 for e in l:
204 for e in l:
205 yield e
205 yield e
206
206
207 for ctx in revgen():
207 for ctx in revgen():
208 miss = 0
208 miss = 0
209 for q in qw:
209 for q in qw:
210 if not (q in lower(ctx.user()) or
210 if not (q in lower(ctx.user()) or
211 q in lower(ctx.description()) or
211 q in lower(ctx.description()) or
212 q in lower(" ".join(ctx.files()))):
212 q in lower(" ".join(ctx.files()))):
213 miss = 1
213 miss = 1
214 break
214 break
215 if miss:
215 if miss:
216 continue
216 continue
217
217
218 yield ctx
218 yield ctx
219
219
220 def revsetsearch(revs):
220 def revsetsearch(revs):
221 for r in revs:
221 for r in revs:
222 yield web.repo[r]
222 yield web.repo[r]
223
223
224 searchfuncs = {
224 searchfuncs = {
225 MODE_REVISION: (revsearch, 'exact revision search'),
225 MODE_REVISION: (revsearch, 'exact revision search'),
226 MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
226 MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
227 MODE_REVSET: (revsetsearch, 'revset expression search'),
227 MODE_REVSET: (revsetsearch, 'revset expression search'),
228 }
228 }
229
229
230 def getsearchmode(query):
230 def getsearchmode(query):
231 try:
231 try:
232 ctx = web.repo[query]
232 ctx = web.repo[query]
233 except (error.RepoError, error.LookupError):
233 except (error.RepoError, error.LookupError):
234 # query is not an exact revision pointer, need to
234 # query is not an exact revision pointer, need to
235 # decide if it's a revset expression or keywords
235 # decide if it's a revset expression or keywords
236 pass
236 pass
237 else:
237 else:
238 return MODE_REVISION, ctx
238 return MODE_REVISION, ctx
239
239
240 revdef = 'reverse(%s)' % query
240 revdef = 'reverse(%s)' % query
241 try:
241 try:
242 tree = revset.parse(revdef)
242 tree = revset.parse(revdef)
243 except error.ParseError:
243 except error.ParseError:
244 # can't parse to a revset tree
244 # can't parse to a revset tree
245 return MODE_KEYWORD, query
245 return MODE_KEYWORD, query
246
246
247 if revset.depth(tree) <= 2:
247 if revset.depth(tree) <= 2:
248 # no revset syntax used
248 # no revset syntax used
249 return MODE_KEYWORD, query
249 return MODE_KEYWORD, query
250
250
251 if any((token, (value or '')[:3]) == ('string', 're:')
251 if any((token, (value or '')[:3]) == ('string', 're:')
252 for token, value, pos in revset.tokenize(revdef)):
252 for token, value, pos in revset.tokenize(revdef)):
253 return MODE_KEYWORD, query
253 return MODE_KEYWORD, query
254
254
255 funcsused = revset.funcsused(tree)
255 funcsused = revset.funcsused(tree)
256 if not funcsused.issubset(revset.safesymbols):
256 if not funcsused.issubset(revset.safesymbols):
257 return MODE_KEYWORD, query
257 return MODE_KEYWORD, query
258
258
259 mfunc = revset.match(web.repo.ui, revdef)
259 mfunc = revset.match(web.repo.ui, revdef)
260 try:
260 try:
261 revs = mfunc(web.repo)
261 revs = mfunc(web.repo)
262 return MODE_REVSET, revs
262 return MODE_REVSET, revs
263 # ParseError: wrongly placed tokens, wrongs arguments, etc
263 # ParseError: wrongly placed tokens, wrongs arguments, etc
264 # RepoLookupError: no such revision, e.g. in 'revision:'
264 # RepoLookupError: no such revision, e.g. in 'revision:'
265 # Abort: bookmark/tag not exists
265 # Abort: bookmark/tag not exists
266 # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
266 # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
267 except (error.ParseError, error.RepoLookupError, error.Abort,
267 except (error.ParseError, error.RepoLookupError, error.Abort,
268 LookupError):
268 LookupError):
269 return MODE_KEYWORD, query
269 return MODE_KEYWORD, query
270
270
271 def changelist(**map):
271 def changelist(**map):
272 count = 0
272 count = 0
273
273
274 for ctx in searchfunc[0](funcarg):
274 for ctx in searchfunc[0](funcarg):
275 count += 1
275 count += 1
276 n = ctx.node()
276 n = ctx.node()
277 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
277 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
278 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
278 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
279
279
280 yield tmpl('searchentry',
280 yield tmpl('searchentry',
281 parity=parity.next(),
281 parity=next(parity),
282 changelogtag=showtags,
282 changelogtag=showtags,
283 files=files,
283 files=files,
284 **webutil.commonentry(web.repo, ctx))
284 **webutil.commonentry(web.repo, ctx))
285
285
286 if count >= revcount:
286 if count >= revcount:
287 break
287 break
288
288
289 query = req.form['rev'][0]
289 query = req.form['rev'][0]
290 revcount = web.maxchanges
290 revcount = web.maxchanges
291 if 'revcount' in req.form:
291 if 'revcount' in req.form:
292 try:
292 try:
293 revcount = int(req.form.get('revcount', [revcount])[0])
293 revcount = int(req.form.get('revcount', [revcount])[0])
294 revcount = max(revcount, 1)
294 revcount = max(revcount, 1)
295 tmpl.defaults['sessionvars']['revcount'] = revcount
295 tmpl.defaults['sessionvars']['revcount'] = revcount
296 except ValueError:
296 except ValueError:
297 pass
297 pass
298
298
299 lessvars = copy.copy(tmpl.defaults['sessionvars'])
299 lessvars = copy.copy(tmpl.defaults['sessionvars'])
300 lessvars['revcount'] = max(revcount / 2, 1)
300 lessvars['revcount'] = max(revcount / 2, 1)
301 lessvars['rev'] = query
301 lessvars['rev'] = query
302 morevars = copy.copy(tmpl.defaults['sessionvars'])
302 morevars = copy.copy(tmpl.defaults['sessionvars'])
303 morevars['revcount'] = revcount * 2
303 morevars['revcount'] = revcount * 2
304 morevars['rev'] = query
304 morevars['rev'] = query
305
305
306 mode, funcarg = getsearchmode(query)
306 mode, funcarg = getsearchmode(query)
307
307
308 if 'forcekw' in req.form:
308 if 'forcekw' in req.form:
309 showforcekw = ''
309 showforcekw = ''
310 showunforcekw = searchfuncs[mode][1]
310 showunforcekw = searchfuncs[mode][1]
311 mode = MODE_KEYWORD
311 mode = MODE_KEYWORD
312 funcarg = query
312 funcarg = query
313 else:
313 else:
314 if mode != MODE_KEYWORD:
314 if mode != MODE_KEYWORD:
315 showforcekw = searchfuncs[MODE_KEYWORD][1]
315 showforcekw = searchfuncs[MODE_KEYWORD][1]
316 else:
316 else:
317 showforcekw = ''
317 showforcekw = ''
318 showunforcekw = ''
318 showunforcekw = ''
319
319
320 searchfunc = searchfuncs[mode]
320 searchfunc = searchfuncs[mode]
321
321
322 tip = web.repo['tip']
322 tip = web.repo['tip']
323 parity = paritygen(web.stripecount)
323 parity = paritygen(web.stripecount)
324
324
325 return tmpl('search', query=query, node=tip.hex(), symrev='tip',
325 return tmpl('search', query=query, node=tip.hex(), symrev='tip',
326 entries=changelist, archives=web.archivelist("tip"),
326 entries=changelist, archives=web.archivelist("tip"),
327 morevars=morevars, lessvars=lessvars,
327 morevars=morevars, lessvars=lessvars,
328 modedesc=searchfunc[1],
328 modedesc=searchfunc[1],
329 showforcekw=showforcekw, showunforcekw=showunforcekw)
329 showforcekw=showforcekw, showunforcekw=showunforcekw)
330
330
331 @webcommand('changelog')
331 @webcommand('changelog')
332 def changelog(web, req, tmpl, shortlog=False):
332 def changelog(web, req, tmpl, shortlog=False):
333 """
333 """
334 /changelog[/{revision}]
334 /changelog[/{revision}]
335 -----------------------
335 -----------------------
336
336
337 Show information about multiple changesets.
337 Show information about multiple changesets.
338
338
339 If the optional ``revision`` URL argument is absent, information about
339 If the optional ``revision`` URL argument is absent, information about
340 all changesets starting at ``tip`` will be rendered. If the ``revision``
340 all changesets starting at ``tip`` will be rendered. If the ``revision``
341 argument is present, changesets will be shown starting from the specified
341 argument is present, changesets will be shown starting from the specified
342 revision.
342 revision.
343
343
344 If ``revision`` is absent, the ``rev`` query string argument may be
344 If ``revision`` is absent, the ``rev`` query string argument may be
345 defined. This will perform a search for changesets.
345 defined. This will perform a search for changesets.
346
346
347 The argument for ``rev`` can be a single revision, a revision set,
347 The argument for ``rev`` can be a single revision, a revision set,
348 or a literal keyword to search for in changeset data (equivalent to
348 or a literal keyword to search for in changeset data (equivalent to
349 :hg:`log -k`).
349 :hg:`log -k`).
350
350
351 The ``revcount`` query string argument defines the maximum numbers of
351 The ``revcount`` query string argument defines the maximum numbers of
352 changesets to render.
352 changesets to render.
353
353
354 For non-searches, the ``changelog`` template will be rendered.
354 For non-searches, the ``changelog`` template will be rendered.
355 """
355 """
356
356
357 query = ''
357 query = ''
358 if 'node' in req.form:
358 if 'node' in req.form:
359 ctx = webutil.changectx(web.repo, req)
359 ctx = webutil.changectx(web.repo, req)
360 symrev = webutil.symrevorshortnode(req, ctx)
360 symrev = webutil.symrevorshortnode(req, ctx)
361 elif 'rev' in req.form:
361 elif 'rev' in req.form:
362 return _search(web, req, tmpl)
362 return _search(web, req, tmpl)
363 else:
363 else:
364 ctx = web.repo['tip']
364 ctx = web.repo['tip']
365 symrev = 'tip'
365 symrev = 'tip'
366
366
367 def changelist():
367 def changelist():
368 revs = []
368 revs = []
369 if pos != -1:
369 if pos != -1:
370 revs = web.repo.changelog.revs(pos, 0)
370 revs = web.repo.changelog.revs(pos, 0)
371 curcount = 0
371 curcount = 0
372 for rev in revs:
372 for rev in revs:
373 curcount += 1
373 curcount += 1
374 if curcount > revcount + 1:
374 if curcount > revcount + 1:
375 break
375 break
376
376
377 entry = webutil.changelistentry(web, web.repo[rev], tmpl)
377 entry = webutil.changelistentry(web, web.repo[rev], tmpl)
378 entry['parity'] = parity.next()
378 entry['parity'] = next(parity)
379 yield entry
379 yield entry
380
380
381 if shortlog:
381 if shortlog:
382 revcount = web.maxshortchanges
382 revcount = web.maxshortchanges
383 else:
383 else:
384 revcount = web.maxchanges
384 revcount = web.maxchanges
385
385
386 if 'revcount' in req.form:
386 if 'revcount' in req.form:
387 try:
387 try:
388 revcount = int(req.form.get('revcount', [revcount])[0])
388 revcount = int(req.form.get('revcount', [revcount])[0])
389 revcount = max(revcount, 1)
389 revcount = max(revcount, 1)
390 tmpl.defaults['sessionvars']['revcount'] = revcount
390 tmpl.defaults['sessionvars']['revcount'] = revcount
391 except ValueError:
391 except ValueError:
392 pass
392 pass
393
393
394 lessvars = copy.copy(tmpl.defaults['sessionvars'])
394 lessvars = copy.copy(tmpl.defaults['sessionvars'])
395 lessvars['revcount'] = max(revcount / 2, 1)
395 lessvars['revcount'] = max(revcount / 2, 1)
396 morevars = copy.copy(tmpl.defaults['sessionvars'])
396 morevars = copy.copy(tmpl.defaults['sessionvars'])
397 morevars['revcount'] = revcount * 2
397 morevars['revcount'] = revcount * 2
398
398
399 count = len(web.repo)
399 count = len(web.repo)
400 pos = ctx.rev()
400 pos = ctx.rev()
401 parity = paritygen(web.stripecount)
401 parity = paritygen(web.stripecount)
402
402
403 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
403 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
404
404
405 entries = list(changelist())
405 entries = list(changelist())
406 latestentry = entries[:1]
406 latestentry = entries[:1]
407 if len(entries) > revcount:
407 if len(entries) > revcount:
408 nextentry = entries[-1:]
408 nextentry = entries[-1:]
409 entries = entries[:-1]
409 entries = entries[:-1]
410 else:
410 else:
411 nextentry = []
411 nextentry = []
412
412
413 return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav,
413 return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav,
414 node=ctx.hex(), rev=pos, symrev=symrev, changesets=count,
414 node=ctx.hex(), rev=pos, symrev=symrev, changesets=count,
415 entries=entries,
415 entries=entries,
416 latestentry=latestentry, nextentry=nextentry,
416 latestentry=latestentry, nextentry=nextentry,
417 archives=web.archivelist("tip"), revcount=revcount,
417 archives=web.archivelist("tip"), revcount=revcount,
418 morevars=morevars, lessvars=lessvars, query=query)
418 morevars=morevars, lessvars=lessvars, query=query)
419
419
420 @webcommand('shortlog')
420 @webcommand('shortlog')
421 def shortlog(web, req, tmpl):
421 def shortlog(web, req, tmpl):
422 """
422 """
423 /shortlog
423 /shortlog
424 ---------
424 ---------
425
425
426 Show basic information about a set of changesets.
426 Show basic information about a set of changesets.
427
427
428 This accepts the same parameters as the ``changelog`` handler. The only
428 This accepts the same parameters as the ``changelog`` handler. The only
429 difference is the ``shortlog`` template will be rendered instead of the
429 difference is the ``shortlog`` template will be rendered instead of the
430 ``changelog`` template.
430 ``changelog`` template.
431 """
431 """
432 return changelog(web, req, tmpl, shortlog=True)
432 return changelog(web, req, tmpl, shortlog=True)
433
433
434 @webcommand('changeset')
434 @webcommand('changeset')
435 def changeset(web, req, tmpl):
435 def changeset(web, req, tmpl):
436 """
436 """
437 /changeset[/{revision}]
437 /changeset[/{revision}]
438 -----------------------
438 -----------------------
439
439
440 Show information about a single changeset.
440 Show information about a single changeset.
441
441
442 A URL path argument is the changeset identifier to show. See ``hg help
442 A URL path argument is the changeset identifier to show. See ``hg help
443 revisions`` for possible values. If not defined, the ``tip`` changeset
443 revisions`` for possible values. If not defined, the ``tip`` changeset
444 will be shown.
444 will be shown.
445
445
446 The ``changeset`` template is rendered. Contents of the ``changesettag``,
446 The ``changeset`` template is rendered. Contents of the ``changesettag``,
447 ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many
447 ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many
448 templates related to diffs may all be used to produce the output.
448 templates related to diffs may all be used to produce the output.
449 """
449 """
450 ctx = webutil.changectx(web.repo, req)
450 ctx = webutil.changectx(web.repo, req)
451
451
452 return tmpl('changeset', **webutil.changesetentry(web, req, tmpl, ctx))
452 return tmpl('changeset', **webutil.changesetentry(web, req, tmpl, ctx))
453
453
454 rev = webcommand('rev')(changeset)
454 rev = webcommand('rev')(changeset)
455
455
456 def decodepath(path):
456 def decodepath(path):
457 """Hook for mapping a path in the repository to a path in the
457 """Hook for mapping a path in the repository to a path in the
458 working copy.
458 working copy.
459
459
460 Extensions (e.g., largefiles) can override this to remap files in
460 Extensions (e.g., largefiles) can override this to remap files in
461 the virtual file system presented by the manifest command below."""
461 the virtual file system presented by the manifest command below."""
462 return path
462 return path
463
463
464 @webcommand('manifest')
464 @webcommand('manifest')
465 def manifest(web, req, tmpl):
465 def manifest(web, req, tmpl):
466 """
466 """
467 /manifest[/{revision}[/{path}]]
467 /manifest[/{revision}[/{path}]]
468 -------------------------------
468 -------------------------------
469
469
470 Show information about a directory.
470 Show information about a directory.
471
471
472 If the URL path arguments are omitted, information about the root
472 If the URL path arguments are omitted, information about the root
473 directory for the ``tip`` changeset will be shown.
473 directory for the ``tip`` changeset will be shown.
474
474
475 Because this handler can only show information for directories, it
475 Because this handler can only show information for directories, it
476 is recommended to use the ``file`` handler instead, as it can handle both
476 is recommended to use the ``file`` handler instead, as it can handle both
477 directories and files.
477 directories and files.
478
478
479 The ``manifest`` template will be rendered for this handler.
479 The ``manifest`` template will be rendered for this handler.
480 """
480 """
481 if 'node' in req.form:
481 if 'node' in req.form:
482 ctx = webutil.changectx(web.repo, req)
482 ctx = webutil.changectx(web.repo, req)
483 symrev = webutil.symrevorshortnode(req, ctx)
483 symrev = webutil.symrevorshortnode(req, ctx)
484 else:
484 else:
485 ctx = web.repo['tip']
485 ctx = web.repo['tip']
486 symrev = 'tip'
486 symrev = 'tip'
487 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
487 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
488 mf = ctx.manifest()
488 mf = ctx.manifest()
489 node = ctx.node()
489 node = ctx.node()
490
490
491 files = {}
491 files = {}
492 dirs = {}
492 dirs = {}
493 parity = paritygen(web.stripecount)
493 parity = paritygen(web.stripecount)
494
494
495 if path and path[-1] != "/":
495 if path and path[-1] != "/":
496 path += "/"
496 path += "/"
497 l = len(path)
497 l = len(path)
498 abspath = "/" + path
498 abspath = "/" + path
499
499
500 for full, n in mf.iteritems():
500 for full, n in mf.iteritems():
501 # the virtual path (working copy path) used for the full
501 # the virtual path (working copy path) used for the full
502 # (repository) path
502 # (repository) path
503 f = decodepath(full)
503 f = decodepath(full)
504
504
505 if f[:l] != path:
505 if f[:l] != path:
506 continue
506 continue
507 remain = f[l:]
507 remain = f[l:]
508 elements = remain.split('/')
508 elements = remain.split('/')
509 if len(elements) == 1:
509 if len(elements) == 1:
510 files[remain] = full
510 files[remain] = full
511 else:
511 else:
512 h = dirs # need to retain ref to dirs (root)
512 h = dirs # need to retain ref to dirs (root)
513 for elem in elements[0:-1]:
513 for elem in elements[0:-1]:
514 if elem not in h:
514 if elem not in h:
515 h[elem] = {}
515 h[elem] = {}
516 h = h[elem]
516 h = h[elem]
517 if len(h) > 1:
517 if len(h) > 1:
518 break
518 break
519 h[None] = None # denotes files present
519 h[None] = None # denotes files present
520
520
521 if mf and not files and not dirs:
521 if mf and not files and not dirs:
522 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
522 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
523
523
524 def filelist(**map):
524 def filelist(**map):
525 for f in sorted(files):
525 for f in sorted(files):
526 full = files[f]
526 full = files[f]
527
527
528 fctx = ctx.filectx(full)
528 fctx = ctx.filectx(full)
529 yield {"file": full,
529 yield {"file": full,
530 "parity": parity.next(),
530 "parity": next(parity),
531 "basename": f,
531 "basename": f,
532 "date": fctx.date(),
532 "date": fctx.date(),
533 "size": fctx.size(),
533 "size": fctx.size(),
534 "permissions": mf.flags(full)}
534 "permissions": mf.flags(full)}
535
535
536 def dirlist(**map):
536 def dirlist(**map):
537 for d in sorted(dirs):
537 for d in sorted(dirs):
538
538
539 emptydirs = []
539 emptydirs = []
540 h = dirs[d]
540 h = dirs[d]
541 while isinstance(h, dict) and len(h) == 1:
541 while isinstance(h, dict) and len(h) == 1:
542 k, v = h.items()[0]
542 k, v = h.items()[0]
543 if v:
543 if v:
544 emptydirs.append(k)
544 emptydirs.append(k)
545 h = v
545 h = v
546
546
547 path = "%s%s" % (abspath, d)
547 path = "%s%s" % (abspath, d)
548 yield {"parity": parity.next(),
548 yield {"parity": next(parity),
549 "path": path,
549 "path": path,
550 "emptydirs": "/".join(emptydirs),
550 "emptydirs": "/".join(emptydirs),
551 "basename": d}
551 "basename": d}
552
552
553 return tmpl("manifest",
553 return tmpl("manifest",
554 symrev=symrev,
554 symrev=symrev,
555 path=abspath,
555 path=abspath,
556 up=webutil.up(abspath),
556 up=webutil.up(abspath),
557 upparity=parity.next(),
557 upparity=next(parity),
558 fentries=filelist,
558 fentries=filelist,
559 dentries=dirlist,
559 dentries=dirlist,
560 archives=web.archivelist(hex(node)),
560 archives=web.archivelist(hex(node)),
561 **webutil.commonentry(web.repo, ctx))
561 **webutil.commonentry(web.repo, ctx))
562
562
563 @webcommand('tags')
563 @webcommand('tags')
564 def tags(web, req, tmpl):
564 def tags(web, req, tmpl):
565 """
565 """
566 /tags
566 /tags
567 -----
567 -----
568
568
569 Show information about tags.
569 Show information about tags.
570
570
571 No arguments are accepted.
571 No arguments are accepted.
572
572
573 The ``tags`` template is rendered.
573 The ``tags`` template is rendered.
574 """
574 """
575 i = list(reversed(web.repo.tagslist()))
575 i = list(reversed(web.repo.tagslist()))
576 parity = paritygen(web.stripecount)
576 parity = paritygen(web.stripecount)
577
577
578 def entries(notip, latestonly, **map):
578 def entries(notip, latestonly, **map):
579 t = i
579 t = i
580 if notip:
580 if notip:
581 t = [(k, n) for k, n in i if k != "tip"]
581 t = [(k, n) for k, n in i if k != "tip"]
582 if latestonly:
582 if latestonly:
583 t = t[:1]
583 t = t[:1]
584 for k, n in t:
584 for k, n in t:
585 yield {"parity": parity.next(),
585 yield {"parity": next(parity),
586 "tag": k,
586 "tag": k,
587 "date": web.repo[n].date(),
587 "date": web.repo[n].date(),
588 "node": hex(n)}
588 "node": hex(n)}
589
589
590 return tmpl("tags",
590 return tmpl("tags",
591 node=hex(web.repo.changelog.tip()),
591 node=hex(web.repo.changelog.tip()),
592 entries=lambda **x: entries(False, False, **x),
592 entries=lambda **x: entries(False, False, **x),
593 entriesnotip=lambda **x: entries(True, False, **x),
593 entriesnotip=lambda **x: entries(True, False, **x),
594 latestentry=lambda **x: entries(True, True, **x))
594 latestentry=lambda **x: entries(True, True, **x))
595
595
596 @webcommand('bookmarks')
596 @webcommand('bookmarks')
597 def bookmarks(web, req, tmpl):
597 def bookmarks(web, req, tmpl):
598 """
598 """
599 /bookmarks
599 /bookmarks
600 ----------
600 ----------
601
601
602 Show information about bookmarks.
602 Show information about bookmarks.
603
603
604 No arguments are accepted.
604 No arguments are accepted.
605
605
606 The ``bookmarks`` template is rendered.
606 The ``bookmarks`` template is rendered.
607 """
607 """
608 i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
608 i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
609 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
609 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
610 i = sorted(i, key=sortkey, reverse=True)
610 i = sorted(i, key=sortkey, reverse=True)
611 parity = paritygen(web.stripecount)
611 parity = paritygen(web.stripecount)
612
612
613 def entries(latestonly, **map):
613 def entries(latestonly, **map):
614 t = i
614 t = i
615 if latestonly:
615 if latestonly:
616 t = i[:1]
616 t = i[:1]
617 for k, n in t:
617 for k, n in t:
618 yield {"parity": parity.next(),
618 yield {"parity": next(parity),
619 "bookmark": k,
619 "bookmark": k,
620 "date": web.repo[n].date(),
620 "date": web.repo[n].date(),
621 "node": hex(n)}
621 "node": hex(n)}
622
622
623 if i:
623 if i:
624 latestrev = i[0][1]
624 latestrev = i[0][1]
625 else:
625 else:
626 latestrev = -1
626 latestrev = -1
627
627
628 return tmpl("bookmarks",
628 return tmpl("bookmarks",
629 node=hex(web.repo.changelog.tip()),
629 node=hex(web.repo.changelog.tip()),
630 lastchange=[{"date": web.repo[latestrev].date()}],
630 lastchange=[{"date": web.repo[latestrev].date()}],
631 entries=lambda **x: entries(latestonly=False, **x),
631 entries=lambda **x: entries(latestonly=False, **x),
632 latestentry=lambda **x: entries(latestonly=True, **x))
632 latestentry=lambda **x: entries(latestonly=True, **x))
633
633
634 @webcommand('branches')
634 @webcommand('branches')
635 def branches(web, req, tmpl):
635 def branches(web, req, tmpl):
636 """
636 """
637 /branches
637 /branches
638 ---------
638 ---------
639
639
640 Show information about branches.
640 Show information about branches.
641
641
642 All known branches are contained in the output, even closed branches.
642 All known branches are contained in the output, even closed branches.
643
643
644 No arguments are accepted.
644 No arguments are accepted.
645
645
646 The ``branches`` template is rendered.
646 The ``branches`` template is rendered.
647 """
647 """
648 entries = webutil.branchentries(web.repo, web.stripecount)
648 entries = webutil.branchentries(web.repo, web.stripecount)
649 latestentry = webutil.branchentries(web.repo, web.stripecount, 1)
649 latestentry = webutil.branchentries(web.repo, web.stripecount, 1)
650 return tmpl('branches', node=hex(web.repo.changelog.tip()),
650 return tmpl('branches', node=hex(web.repo.changelog.tip()),
651 entries=entries, latestentry=latestentry)
651 entries=entries, latestentry=latestentry)
652
652
653 @webcommand('summary')
653 @webcommand('summary')
654 def summary(web, req, tmpl):
654 def summary(web, req, tmpl):
655 """
655 """
656 /summary
656 /summary
657 --------
657 --------
658
658
659 Show a summary of repository state.
659 Show a summary of repository state.
660
660
661 Information about the latest changesets, bookmarks, tags, and branches
661 Information about the latest changesets, bookmarks, tags, and branches
662 is captured by this handler.
662 is captured by this handler.
663
663
664 The ``summary`` template is rendered.
664 The ``summary`` template is rendered.
665 """
665 """
666 i = reversed(web.repo.tagslist())
666 i = reversed(web.repo.tagslist())
667
667
668 def tagentries(**map):
668 def tagentries(**map):
669 parity = paritygen(web.stripecount)
669 parity = paritygen(web.stripecount)
670 count = 0
670 count = 0
671 for k, n in i:
671 for k, n in i:
672 if k == "tip": # skip tip
672 if k == "tip": # skip tip
673 continue
673 continue
674
674
675 count += 1
675 count += 1
676 if count > 10: # limit to 10 tags
676 if count > 10: # limit to 10 tags
677 break
677 break
678
678
679 yield tmpl("tagentry",
679 yield tmpl("tagentry",
680 parity=parity.next(),
680 parity=next(parity),
681 tag=k,
681 tag=k,
682 node=hex(n),
682 node=hex(n),
683 date=web.repo[n].date())
683 date=web.repo[n].date())
684
684
685 def bookmarks(**map):
685 def bookmarks(**map):
686 parity = paritygen(web.stripecount)
686 parity = paritygen(web.stripecount)
687 marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
687 marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
688 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
688 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
689 marks = sorted(marks, key=sortkey, reverse=True)
689 marks = sorted(marks, key=sortkey, reverse=True)
690 for k, n in marks[:10]: # limit to 10 bookmarks
690 for k, n in marks[:10]: # limit to 10 bookmarks
691 yield {'parity': parity.next(),
691 yield {'parity': next(parity),
692 'bookmark': k,
692 'bookmark': k,
693 'date': web.repo[n].date(),
693 'date': web.repo[n].date(),
694 'node': hex(n)}
694 'node': hex(n)}
695
695
696 def changelist(**map):
696 def changelist(**map):
697 parity = paritygen(web.stripecount, offset=start - end)
697 parity = paritygen(web.stripecount, offset=start - end)
698 l = [] # build a list in forward order for efficiency
698 l = [] # build a list in forward order for efficiency
699 revs = []
699 revs = []
700 if start < end:
700 if start < end:
701 revs = web.repo.changelog.revs(start, end - 1)
701 revs = web.repo.changelog.revs(start, end - 1)
702 for i in revs:
702 for i in revs:
703 ctx = web.repo[i]
703 ctx = web.repo[i]
704
704
705 l.append(tmpl(
705 l.append(tmpl(
706 'shortlogentry',
706 'shortlogentry',
707 parity=parity.next(),
707 parity=next(parity),
708 **webutil.commonentry(web.repo, ctx)))
708 **webutil.commonentry(web.repo, ctx)))
709
709
710 l.reverse()
710 l.reverse()
711 yield l
711 yield l
712
712
713 tip = web.repo['tip']
713 tip = web.repo['tip']
714 count = len(web.repo)
714 count = len(web.repo)
715 start = max(0, count - web.maxchanges)
715 start = max(0, count - web.maxchanges)
716 end = min(count, start + web.maxchanges)
716 end = min(count, start + web.maxchanges)
717
717
718 return tmpl("summary",
718 return tmpl("summary",
719 desc=web.config("web", "description", "unknown"),
719 desc=web.config("web", "description", "unknown"),
720 owner=get_contact(web.config) or "unknown",
720 owner=get_contact(web.config) or "unknown",
721 lastchange=tip.date(),
721 lastchange=tip.date(),
722 tags=tagentries,
722 tags=tagentries,
723 bookmarks=bookmarks,
723 bookmarks=bookmarks,
724 branches=webutil.branchentries(web.repo, web.stripecount, 10),
724 branches=webutil.branchentries(web.repo, web.stripecount, 10),
725 shortlog=changelist,
725 shortlog=changelist,
726 node=tip.hex(),
726 node=tip.hex(),
727 symrev='tip',
727 symrev='tip',
728 archives=web.archivelist("tip"))
728 archives=web.archivelist("tip"))
729
729
730 @webcommand('filediff')
730 @webcommand('filediff')
731 def filediff(web, req, tmpl):
731 def filediff(web, req, tmpl):
732 """
732 """
733 /diff/{revision}/{path}
733 /diff/{revision}/{path}
734 -----------------------
734 -----------------------
735
735
736 Show how a file changed in a particular commit.
736 Show how a file changed in a particular commit.
737
737
738 The ``filediff`` template is rendered.
738 The ``filediff`` template is rendered.
739
739
740 This handler is registered under both the ``/diff`` and ``/filediff``
740 This handler is registered under both the ``/diff`` and ``/filediff``
741 paths. ``/diff`` is used in modern code.
741 paths. ``/diff`` is used in modern code.
742 """
742 """
743 fctx, ctx = None, None
743 fctx, ctx = None, None
744 try:
744 try:
745 fctx = webutil.filectx(web.repo, req)
745 fctx = webutil.filectx(web.repo, req)
746 except LookupError:
746 except LookupError:
747 ctx = webutil.changectx(web.repo, req)
747 ctx = webutil.changectx(web.repo, req)
748 path = webutil.cleanpath(web.repo, req.form['file'][0])
748 path = webutil.cleanpath(web.repo, req.form['file'][0])
749 if path not in ctx.files():
749 if path not in ctx.files():
750 raise
750 raise
751
751
752 if fctx is not None:
752 if fctx is not None:
753 path = fctx.path()
753 path = fctx.path()
754 ctx = fctx.changectx()
754 ctx = fctx.changectx()
755
755
756 parity = paritygen(web.stripecount)
756 parity = paritygen(web.stripecount)
757 style = web.config('web', 'style', 'paper')
757 style = web.config('web', 'style', 'paper')
758 if 'style' in req.form:
758 if 'style' in req.form:
759 style = req.form['style'][0]
759 style = req.form['style'][0]
760
760
761 diffs = webutil.diffs(web.repo, tmpl, ctx, None, [path], parity, style)
761 diffs = webutil.diffs(web.repo, tmpl, ctx, None, [path], parity, style)
762 if fctx is not None:
762 if fctx is not None:
763 rename = webutil.renamelink(fctx)
763 rename = webutil.renamelink(fctx)
764 ctx = fctx
764 ctx = fctx
765 else:
765 else:
766 rename = []
766 rename = []
767 ctx = ctx
767 ctx = ctx
768 return tmpl("filediff",
768 return tmpl("filediff",
769 file=path,
769 file=path,
770 symrev=webutil.symrevorshortnode(req, ctx),
770 symrev=webutil.symrevorshortnode(req, ctx),
771 rename=rename,
771 rename=rename,
772 diff=diffs,
772 diff=diffs,
773 **webutil.commonentry(web.repo, ctx))
773 **webutil.commonentry(web.repo, ctx))
774
774
775 diff = webcommand('diff')(filediff)
775 diff = webcommand('diff')(filediff)
776
776
777 @webcommand('comparison')
777 @webcommand('comparison')
778 def comparison(web, req, tmpl):
778 def comparison(web, req, tmpl):
779 """
779 """
780 /comparison/{revision}/{path}
780 /comparison/{revision}/{path}
781 -----------------------------
781 -----------------------------
782
782
783 Show a comparison between the old and new versions of a file from changes
783 Show a comparison between the old and new versions of a file from changes
784 made on a particular revision.
784 made on a particular revision.
785
785
786 This is similar to the ``diff`` handler. However, this form features
786 This is similar to the ``diff`` handler. However, this form features
787 a split or side-by-side diff rather than a unified diff.
787 a split or side-by-side diff rather than a unified diff.
788
788
789 The ``context`` query string argument can be used to control the lines of
789 The ``context`` query string argument can be used to control the lines of
790 context in the diff.
790 context in the diff.
791
791
792 The ``filecomparison`` template is rendered.
792 The ``filecomparison`` template is rendered.
793 """
793 """
794 ctx = webutil.changectx(web.repo, req)
794 ctx = webutil.changectx(web.repo, req)
795 if 'file' not in req.form:
795 if 'file' not in req.form:
796 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
796 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
797 path = webutil.cleanpath(web.repo, req.form['file'][0])
797 path = webutil.cleanpath(web.repo, req.form['file'][0])
798
798
799 parsecontext = lambda v: v == 'full' and -1 or int(v)
799 parsecontext = lambda v: v == 'full' and -1 or int(v)
800 if 'context' in req.form:
800 if 'context' in req.form:
801 context = parsecontext(req.form['context'][0])
801 context = parsecontext(req.form['context'][0])
802 else:
802 else:
803 context = parsecontext(web.config('web', 'comparisoncontext', '5'))
803 context = parsecontext(web.config('web', 'comparisoncontext', '5'))
804
804
805 def filelines(f):
805 def filelines(f):
806 if util.binary(f.data()):
806 if util.binary(f.data()):
807 mt = mimetypes.guess_type(f.path())[0]
807 mt = mimetypes.guess_type(f.path())[0]
808 if not mt:
808 if not mt:
809 mt = 'application/octet-stream'
809 mt = 'application/octet-stream'
810 return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
810 return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
811 return f.data().splitlines()
811 return f.data().splitlines()
812
812
813 fctx = None
813 fctx = None
814 parent = ctx.p1()
814 parent = ctx.p1()
815 leftrev = parent.rev()
815 leftrev = parent.rev()
816 leftnode = parent.node()
816 leftnode = parent.node()
817 rightrev = ctx.rev()
817 rightrev = ctx.rev()
818 rightnode = ctx.node()
818 rightnode = ctx.node()
819 if path in ctx:
819 if path in ctx:
820 fctx = ctx[path]
820 fctx = ctx[path]
821 rightlines = filelines(fctx)
821 rightlines = filelines(fctx)
822 if path not in parent:
822 if path not in parent:
823 leftlines = ()
823 leftlines = ()
824 else:
824 else:
825 pfctx = parent[path]
825 pfctx = parent[path]
826 leftlines = filelines(pfctx)
826 leftlines = filelines(pfctx)
827 else:
827 else:
828 rightlines = ()
828 rightlines = ()
829 pfctx = ctx.parents()[0][path]
829 pfctx = ctx.parents()[0][path]
830 leftlines = filelines(pfctx)
830 leftlines = filelines(pfctx)
831
831
832 comparison = webutil.compare(tmpl, context, leftlines, rightlines)
832 comparison = webutil.compare(tmpl, context, leftlines, rightlines)
833 if fctx is not None:
833 if fctx is not None:
834 rename = webutil.renamelink(fctx)
834 rename = webutil.renamelink(fctx)
835 ctx = fctx
835 ctx = fctx
836 else:
836 else:
837 rename = []
837 rename = []
838 ctx = ctx
838 ctx = ctx
839 return tmpl('filecomparison',
839 return tmpl('filecomparison',
840 file=path,
840 file=path,
841 symrev=webutil.symrevorshortnode(req, ctx),
841 symrev=webutil.symrevorshortnode(req, ctx),
842 rename=rename,
842 rename=rename,
843 leftrev=leftrev,
843 leftrev=leftrev,
844 leftnode=hex(leftnode),
844 leftnode=hex(leftnode),
845 rightrev=rightrev,
845 rightrev=rightrev,
846 rightnode=hex(rightnode),
846 rightnode=hex(rightnode),
847 comparison=comparison,
847 comparison=comparison,
848 **webutil.commonentry(web.repo, ctx))
848 **webutil.commonentry(web.repo, ctx))
849
849
850 @webcommand('annotate')
850 @webcommand('annotate')
851 def annotate(web, req, tmpl):
851 def annotate(web, req, tmpl):
852 """
852 """
853 /annotate/{revision}/{path}
853 /annotate/{revision}/{path}
854 ---------------------------
854 ---------------------------
855
855
856 Show changeset information for each line in a file.
856 Show changeset information for each line in a file.
857
857
858 The ``fileannotate`` template is rendered.
858 The ``fileannotate`` template is rendered.
859 """
859 """
860 fctx = webutil.filectx(web.repo, req)
860 fctx = webutil.filectx(web.repo, req)
861 f = fctx.path()
861 f = fctx.path()
862 parity = paritygen(web.stripecount)
862 parity = paritygen(web.stripecount)
863 diffopts = patch.difffeatureopts(web.repo.ui, untrusted=True,
863 diffopts = patch.difffeatureopts(web.repo.ui, untrusted=True,
864 section='annotate', whitespace=True)
864 section='annotate', whitespace=True)
865
865
866 def annotate(**map):
866 def annotate(**map):
867 last = None
867 last = None
868 if util.binary(fctx.data()):
868 if util.binary(fctx.data()):
869 mt = (mimetypes.guess_type(fctx.path())[0]
869 mt = (mimetypes.guess_type(fctx.path())[0]
870 or 'application/octet-stream')
870 or 'application/octet-stream')
871 lines = enumerate([((fctx.filectx(fctx.filerev()), 1),
871 lines = enumerate([((fctx.filectx(fctx.filerev()), 1),
872 '(binary:%s)' % mt)])
872 '(binary:%s)' % mt)])
873 else:
873 else:
874 lines = enumerate(fctx.annotate(follow=True, linenumber=True,
874 lines = enumerate(fctx.annotate(follow=True, linenumber=True,
875 diffopts=diffopts))
875 diffopts=diffopts))
876 for lineno, ((f, targetline), l) in lines:
876 for lineno, ((f, targetline), l) in lines:
877 fnode = f.filenode()
877 fnode = f.filenode()
878
878
879 if last != fnode:
879 if last != fnode:
880 last = fnode
880 last = fnode
881
881
882 yield {"parity": parity.next(),
882 yield {"parity": next(parity),
883 "node": f.hex(),
883 "node": f.hex(),
884 "rev": f.rev(),
884 "rev": f.rev(),
885 "author": f.user(),
885 "author": f.user(),
886 "desc": f.description(),
886 "desc": f.description(),
887 "extra": f.extra(),
887 "extra": f.extra(),
888 "file": f.path(),
888 "file": f.path(),
889 "targetline": targetline,
889 "targetline": targetline,
890 "line": l,
890 "line": l,
891 "lineno": lineno + 1,
891 "lineno": lineno + 1,
892 "lineid": "l%d" % (lineno + 1),
892 "lineid": "l%d" % (lineno + 1),
893 "linenumber": "% 6d" % (lineno + 1),
893 "linenumber": "% 6d" % (lineno + 1),
894 "revdate": f.date()}
894 "revdate": f.date()}
895
895
896 return tmpl("fileannotate",
896 return tmpl("fileannotate",
897 file=f,
897 file=f,
898 annotate=annotate,
898 annotate=annotate,
899 path=webutil.up(f),
899 path=webutil.up(f),
900 symrev=webutil.symrevorshortnode(req, fctx),
900 symrev=webutil.symrevorshortnode(req, fctx),
901 rename=webutil.renamelink(fctx),
901 rename=webutil.renamelink(fctx),
902 permissions=fctx.manifest().flags(f),
902 permissions=fctx.manifest().flags(f),
903 **webutil.commonentry(web.repo, fctx))
903 **webutil.commonentry(web.repo, fctx))
904
904
905 @webcommand('filelog')
905 @webcommand('filelog')
906 def filelog(web, req, tmpl):
906 def filelog(web, req, tmpl):
907 """
907 """
908 /filelog/{revision}/{path}
908 /filelog/{revision}/{path}
909 --------------------------
909 --------------------------
910
910
911 Show information about the history of a file in the repository.
911 Show information about the history of a file in the repository.
912
912
913 The ``revcount`` query string argument can be defined to control the
913 The ``revcount`` query string argument can be defined to control the
914 maximum number of entries to show.
914 maximum number of entries to show.
915
915
916 The ``filelog`` template will be rendered.
916 The ``filelog`` template will be rendered.
917 """
917 """
918
918
919 try:
919 try:
920 fctx = webutil.filectx(web.repo, req)
920 fctx = webutil.filectx(web.repo, req)
921 f = fctx.path()
921 f = fctx.path()
922 fl = fctx.filelog()
922 fl = fctx.filelog()
923 except error.LookupError:
923 except error.LookupError:
924 f = webutil.cleanpath(web.repo, req.form['file'][0])
924 f = webutil.cleanpath(web.repo, req.form['file'][0])
925 fl = web.repo.file(f)
925 fl = web.repo.file(f)
926 numrevs = len(fl)
926 numrevs = len(fl)
927 if not numrevs: # file doesn't exist at all
927 if not numrevs: # file doesn't exist at all
928 raise
928 raise
929 rev = webutil.changectx(web.repo, req).rev()
929 rev = webutil.changectx(web.repo, req).rev()
930 first = fl.linkrev(0)
930 first = fl.linkrev(0)
931 if rev < first: # current rev is from before file existed
931 if rev < first: # current rev is from before file existed
932 raise
932 raise
933 frev = numrevs - 1
933 frev = numrevs - 1
934 while fl.linkrev(frev) > rev:
934 while fl.linkrev(frev) > rev:
935 frev -= 1
935 frev -= 1
936 fctx = web.repo.filectx(f, fl.linkrev(frev))
936 fctx = web.repo.filectx(f, fl.linkrev(frev))
937
937
938 revcount = web.maxshortchanges
938 revcount = web.maxshortchanges
939 if 'revcount' in req.form:
939 if 'revcount' in req.form:
940 try:
940 try:
941 revcount = int(req.form.get('revcount', [revcount])[0])
941 revcount = int(req.form.get('revcount', [revcount])[0])
942 revcount = max(revcount, 1)
942 revcount = max(revcount, 1)
943 tmpl.defaults['sessionvars']['revcount'] = revcount
943 tmpl.defaults['sessionvars']['revcount'] = revcount
944 except ValueError:
944 except ValueError:
945 pass
945 pass
946
946
947 lessvars = copy.copy(tmpl.defaults['sessionvars'])
947 lessvars = copy.copy(tmpl.defaults['sessionvars'])
948 lessvars['revcount'] = max(revcount / 2, 1)
948 lessvars['revcount'] = max(revcount / 2, 1)
949 morevars = copy.copy(tmpl.defaults['sessionvars'])
949 morevars = copy.copy(tmpl.defaults['sessionvars'])
950 morevars['revcount'] = revcount * 2
950 morevars['revcount'] = revcount * 2
951
951
952 count = fctx.filerev() + 1
952 count = fctx.filerev() + 1
953 start = max(0, fctx.filerev() - revcount + 1) # first rev on this page
953 start = max(0, fctx.filerev() - revcount + 1) # first rev on this page
954 end = min(count, start + revcount) # last rev on this page
954 end = min(count, start + revcount) # last rev on this page
955 parity = paritygen(web.stripecount, offset=start - end)
955 parity = paritygen(web.stripecount, offset=start - end)
956
956
957 def entries():
957 def entries():
958 l = []
958 l = []
959
959
960 repo = web.repo
960 repo = web.repo
961 revs = fctx.filelog().revs(start, end - 1)
961 revs = fctx.filelog().revs(start, end - 1)
962 for i in revs:
962 for i in revs:
963 iterfctx = fctx.filectx(i)
963 iterfctx = fctx.filectx(i)
964
964
965 l.append(dict(
965 l.append(dict(
966 parity=parity.next(),
966 parity=next(parity),
967 filerev=i,
967 filerev=i,
968 file=f,
968 file=f,
969 rename=webutil.renamelink(iterfctx),
969 rename=webutil.renamelink(iterfctx),
970 **webutil.commonentry(repo, iterfctx)))
970 **webutil.commonentry(repo, iterfctx)))
971 for e in reversed(l):
971 for e in reversed(l):
972 yield e
972 yield e
973
973
974 entries = list(entries())
974 entries = list(entries())
975 latestentry = entries[:1]
975 latestentry = entries[:1]
976
976
977 revnav = webutil.filerevnav(web.repo, fctx.path())
977 revnav = webutil.filerevnav(web.repo, fctx.path())
978 nav = revnav.gen(end - 1, revcount, count)
978 nav = revnav.gen(end - 1, revcount, count)
979 return tmpl("filelog",
979 return tmpl("filelog",
980 file=f,
980 file=f,
981 nav=nav,
981 nav=nav,
982 symrev=webutil.symrevorshortnode(req, fctx),
982 symrev=webutil.symrevorshortnode(req, fctx),
983 entries=entries,
983 entries=entries,
984 latestentry=latestentry,
984 latestentry=latestentry,
985 revcount=revcount,
985 revcount=revcount,
986 morevars=morevars,
986 morevars=morevars,
987 lessvars=lessvars,
987 lessvars=lessvars,
988 **webutil.commonentry(web.repo, fctx))
988 **webutil.commonentry(web.repo, fctx))
989
989
990 @webcommand('archive')
990 @webcommand('archive')
991 def archive(web, req, tmpl):
991 def archive(web, req, tmpl):
992 """
992 """
993 /archive/{revision}.{format}[/{path}]
993 /archive/{revision}.{format}[/{path}]
994 -------------------------------------
994 -------------------------------------
995
995
996 Obtain an archive of repository content.
996 Obtain an archive of repository content.
997
997
998 The content and type of the archive is defined by a URL path parameter.
998 The content and type of the archive is defined by a URL path parameter.
999 ``format`` is the file extension of the archive type to be generated. e.g.
999 ``format`` is the file extension of the archive type to be generated. e.g.
1000 ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your
1000 ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your
1001 server configuration.
1001 server configuration.
1002
1002
1003 The optional ``path`` URL parameter controls content to include in the
1003 The optional ``path`` URL parameter controls content to include in the
1004 archive. If omitted, every file in the specified revision is present in the
1004 archive. If omitted, every file in the specified revision is present in the
1005 archive. If included, only the specified file or contents of the specified
1005 archive. If included, only the specified file or contents of the specified
1006 directory will be included in the archive.
1006 directory will be included in the archive.
1007
1007
1008 No template is used for this handler. Raw, binary content is generated.
1008 No template is used for this handler. Raw, binary content is generated.
1009 """
1009 """
1010
1010
1011 type_ = req.form.get('type', [None])[0]
1011 type_ = req.form.get('type', [None])[0]
1012 allowed = web.configlist("web", "allow_archive")
1012 allowed = web.configlist("web", "allow_archive")
1013 key = req.form['node'][0]
1013 key = req.form['node'][0]
1014
1014
1015 if type_ not in web.archives:
1015 if type_ not in web.archives:
1016 msg = 'Unsupported archive type: %s' % type_
1016 msg = 'Unsupported archive type: %s' % type_
1017 raise ErrorResponse(HTTP_NOT_FOUND, msg)
1017 raise ErrorResponse(HTTP_NOT_FOUND, msg)
1018
1018
1019 if not ((type_ in allowed or
1019 if not ((type_ in allowed or
1020 web.configbool("web", "allow" + type_, False))):
1020 web.configbool("web", "allow" + type_, False))):
1021 msg = 'Archive type not allowed: %s' % type_
1021 msg = 'Archive type not allowed: %s' % type_
1022 raise ErrorResponse(HTTP_FORBIDDEN, msg)
1022 raise ErrorResponse(HTTP_FORBIDDEN, msg)
1023
1023
1024 reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
1024 reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
1025 cnode = web.repo.lookup(key)
1025 cnode = web.repo.lookup(key)
1026 arch_version = key
1026 arch_version = key
1027 if cnode == key or key == 'tip':
1027 if cnode == key or key == 'tip':
1028 arch_version = short(cnode)
1028 arch_version = short(cnode)
1029 name = "%s-%s" % (reponame, arch_version)
1029 name = "%s-%s" % (reponame, arch_version)
1030
1030
1031 ctx = webutil.changectx(web.repo, req)
1031 ctx = webutil.changectx(web.repo, req)
1032 pats = []
1032 pats = []
1033 matchfn = scmutil.match(ctx, [])
1033 matchfn = scmutil.match(ctx, [])
1034 file = req.form.get('file', None)
1034 file = req.form.get('file', None)
1035 if file:
1035 if file:
1036 pats = ['path:' + file[0]]
1036 pats = ['path:' + file[0]]
1037 matchfn = scmutil.match(ctx, pats, default='path')
1037 matchfn = scmutil.match(ctx, pats, default='path')
1038 if pats:
1038 if pats:
1039 files = [f for f in ctx.manifest().keys() if matchfn(f)]
1039 files = [f for f in ctx.manifest().keys() if matchfn(f)]
1040 if not files:
1040 if not files:
1041 raise ErrorResponse(HTTP_NOT_FOUND,
1041 raise ErrorResponse(HTTP_NOT_FOUND,
1042 'file(s) not found: %s' % file[0])
1042 'file(s) not found: %s' % file[0])
1043
1043
1044 mimetype, artype, extension, encoding = web.archivespecs[type_]
1044 mimetype, artype, extension, encoding = web.archivespecs[type_]
1045 headers = [
1045 headers = [
1046 ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
1046 ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
1047 ]
1047 ]
1048 if encoding:
1048 if encoding:
1049 headers.append(('Content-Encoding', encoding))
1049 headers.append(('Content-Encoding', encoding))
1050 req.headers.extend(headers)
1050 req.headers.extend(headers)
1051 req.respond(HTTP_OK, mimetype)
1051 req.respond(HTTP_OK, mimetype)
1052
1052
1053 archival.archive(web.repo, req, cnode, artype, prefix=name,
1053 archival.archive(web.repo, req, cnode, artype, prefix=name,
1054 matchfn=matchfn,
1054 matchfn=matchfn,
1055 subrepos=web.configbool("web", "archivesubrepos"))
1055 subrepos=web.configbool("web", "archivesubrepos"))
1056 return []
1056 return []
1057
1057
1058
1058
1059 @webcommand('static')
1059 @webcommand('static')
1060 def static(web, req, tmpl):
1060 def static(web, req, tmpl):
1061 fname = req.form['file'][0]
1061 fname = req.form['file'][0]
1062 # a repo owner may set web.static in .hg/hgrc to get any file
1062 # a repo owner may set web.static in .hg/hgrc to get any file
1063 # readable by the user running the CGI script
1063 # readable by the user running the CGI script
1064 static = web.config("web", "static", None, untrusted=False)
1064 static = web.config("web", "static", None, untrusted=False)
1065 if not static:
1065 if not static:
1066 tp = web.templatepath or templater.templatepaths()
1066 tp = web.templatepath or templater.templatepaths()
1067 if isinstance(tp, str):
1067 if isinstance(tp, str):
1068 tp = [tp]
1068 tp = [tp]
1069 static = [os.path.join(p, 'static') for p in tp]
1069 static = [os.path.join(p, 'static') for p in tp]
1070 staticfile(static, fname, req)
1070 staticfile(static, fname, req)
1071 return []
1071 return []
1072
1072
1073 @webcommand('graph')
1073 @webcommand('graph')
1074 def graph(web, req, tmpl):
1074 def graph(web, req, tmpl):
1075 """
1075 """
1076 /graph[/{revision}]
1076 /graph[/{revision}]
1077 -------------------
1077 -------------------
1078
1078
1079 Show information about the graphical topology of the repository.
1079 Show information about the graphical topology of the repository.
1080
1080
1081 Information rendered by this handler can be used to create visual
1081 Information rendered by this handler can be used to create visual
1082 representations of repository topology.
1082 representations of repository topology.
1083
1083
1084 The ``revision`` URL parameter controls the starting changeset.
1084 The ``revision`` URL parameter controls the starting changeset.
1085
1085
1086 The ``revcount`` query string argument can define the number of changesets
1086 The ``revcount`` query string argument can define the number of changesets
1087 to show information for.
1087 to show information for.
1088
1088
1089 This handler will render the ``graph`` template.
1089 This handler will render the ``graph`` template.
1090 """
1090 """
1091
1091
1092 if 'node' in req.form:
1092 if 'node' in req.form:
1093 ctx = webutil.changectx(web.repo, req)
1093 ctx = webutil.changectx(web.repo, req)
1094 symrev = webutil.symrevorshortnode(req, ctx)
1094 symrev = webutil.symrevorshortnode(req, ctx)
1095 else:
1095 else:
1096 ctx = web.repo['tip']
1096 ctx = web.repo['tip']
1097 symrev = 'tip'
1097 symrev = 'tip'
1098 rev = ctx.rev()
1098 rev = ctx.rev()
1099
1099
1100 bg_height = 39
1100 bg_height = 39
1101 revcount = web.maxshortchanges
1101 revcount = web.maxshortchanges
1102 if 'revcount' in req.form:
1102 if 'revcount' in req.form:
1103 try:
1103 try:
1104 revcount = int(req.form.get('revcount', [revcount])[0])
1104 revcount = int(req.form.get('revcount', [revcount])[0])
1105 revcount = max(revcount, 1)
1105 revcount = max(revcount, 1)
1106 tmpl.defaults['sessionvars']['revcount'] = revcount
1106 tmpl.defaults['sessionvars']['revcount'] = revcount
1107 except ValueError:
1107 except ValueError:
1108 pass
1108 pass
1109
1109
1110 lessvars = copy.copy(tmpl.defaults['sessionvars'])
1110 lessvars = copy.copy(tmpl.defaults['sessionvars'])
1111 lessvars['revcount'] = max(revcount / 2, 1)
1111 lessvars['revcount'] = max(revcount / 2, 1)
1112 morevars = copy.copy(tmpl.defaults['sessionvars'])
1112 morevars = copy.copy(tmpl.defaults['sessionvars'])
1113 morevars['revcount'] = revcount * 2
1113 morevars['revcount'] = revcount * 2
1114
1114
1115 count = len(web.repo)
1115 count = len(web.repo)
1116 pos = rev
1116 pos = rev
1117
1117
1118 uprev = min(max(0, count - 1), rev + revcount)
1118 uprev = min(max(0, count - 1), rev + revcount)
1119 downrev = max(0, rev - revcount)
1119 downrev = max(0, rev - revcount)
1120 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
1120 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
1121
1121
1122 tree = []
1122 tree = []
1123 if pos != -1:
1123 if pos != -1:
1124 allrevs = web.repo.changelog.revs(pos, 0)
1124 allrevs = web.repo.changelog.revs(pos, 0)
1125 revs = []
1125 revs = []
1126 for i in allrevs:
1126 for i in allrevs:
1127 revs.append(i)
1127 revs.append(i)
1128 if len(revs) >= revcount:
1128 if len(revs) >= revcount:
1129 break
1129 break
1130
1130
1131 # We have to feed a baseset to dagwalker as it is expecting smartset
1131 # We have to feed a baseset to dagwalker as it is expecting smartset
1132 # object. This does not have a big impact on hgweb performance itself
1132 # object. This does not have a big impact on hgweb performance itself
1133 # since hgweb graphing code is not itself lazy yet.
1133 # since hgweb graphing code is not itself lazy yet.
1134 dag = graphmod.dagwalker(web.repo, revset.baseset(revs))
1134 dag = graphmod.dagwalker(web.repo, revset.baseset(revs))
1135 # As we said one line above... not lazy.
1135 # As we said one line above... not lazy.
1136 tree = list(graphmod.colored(dag, web.repo))
1136 tree = list(graphmod.colored(dag, web.repo))
1137
1137
1138 def getcolumns(tree):
1138 def getcolumns(tree):
1139 cols = 0
1139 cols = 0
1140 for (id, type, ctx, vtx, edges) in tree:
1140 for (id, type, ctx, vtx, edges) in tree:
1141 if type != graphmod.CHANGESET:
1141 if type != graphmod.CHANGESET:
1142 continue
1142 continue
1143 cols = max(cols, max([edge[0] for edge in edges] or [0]),
1143 cols = max(cols, max([edge[0] for edge in edges] or [0]),
1144 max([edge[1] for edge in edges] or [0]))
1144 max([edge[1] for edge in edges] or [0]))
1145 return cols
1145 return cols
1146
1146
1147 def graphdata(usetuples, encodestr):
1147 def graphdata(usetuples, encodestr):
1148 data = []
1148 data = []
1149
1149
1150 row = 0
1150 row = 0
1151 for (id, type, ctx, vtx, edges) in tree:
1151 for (id, type, ctx, vtx, edges) in tree:
1152 if type != graphmod.CHANGESET:
1152 if type != graphmod.CHANGESET:
1153 continue
1153 continue
1154 node = str(ctx)
1154 node = str(ctx)
1155 age = encodestr(templatefilters.age(ctx.date()))
1155 age = encodestr(templatefilters.age(ctx.date()))
1156 desc = templatefilters.firstline(encodestr(ctx.description()))
1156 desc = templatefilters.firstline(encodestr(ctx.description()))
1157 desc = cgi.escape(templatefilters.nonempty(desc))
1157 desc = cgi.escape(templatefilters.nonempty(desc))
1158 user = cgi.escape(templatefilters.person(encodestr(ctx.user())))
1158 user = cgi.escape(templatefilters.person(encodestr(ctx.user())))
1159 branch = cgi.escape(encodestr(ctx.branch()))
1159 branch = cgi.escape(encodestr(ctx.branch()))
1160 try:
1160 try:
1161 branchnode = web.repo.branchtip(branch)
1161 branchnode = web.repo.branchtip(branch)
1162 except error.RepoLookupError:
1162 except error.RepoLookupError:
1163 branchnode = None
1163 branchnode = None
1164 branch = branch, branchnode == ctx.node()
1164 branch = branch, branchnode == ctx.node()
1165
1165
1166 if usetuples:
1166 if usetuples:
1167 data.append((node, vtx, edges, desc, user, age, branch,
1167 data.append((node, vtx, edges, desc, user, age, branch,
1168 [cgi.escape(encodestr(x)) for x in ctx.tags()],
1168 [cgi.escape(encodestr(x)) for x in ctx.tags()],
1169 [cgi.escape(encodestr(x))
1169 [cgi.escape(encodestr(x))
1170 for x in ctx.bookmarks()]))
1170 for x in ctx.bookmarks()]))
1171 else:
1171 else:
1172 edgedata = [{'col': edge[0], 'nextcol': edge[1],
1172 edgedata = [{'col': edge[0], 'nextcol': edge[1],
1173 'color': (edge[2] - 1) % 6 + 1,
1173 'color': (edge[2] - 1) % 6 + 1,
1174 'width': edge[3], 'bcolor': edge[4]}
1174 'width': edge[3], 'bcolor': edge[4]}
1175 for edge in edges]
1175 for edge in edges]
1176
1176
1177 data.append(
1177 data.append(
1178 {'node': node,
1178 {'node': node,
1179 'col': vtx[0],
1179 'col': vtx[0],
1180 'color': (vtx[1] - 1) % 6 + 1,
1180 'color': (vtx[1] - 1) % 6 + 1,
1181 'edges': edgedata,
1181 'edges': edgedata,
1182 'row': row,
1182 'row': row,
1183 'nextrow': row + 1,
1183 'nextrow': row + 1,
1184 'desc': desc,
1184 'desc': desc,
1185 'user': user,
1185 'user': user,
1186 'age': age,
1186 'age': age,
1187 'bookmarks': webutil.nodebookmarksdict(
1187 'bookmarks': webutil.nodebookmarksdict(
1188 web.repo, ctx.node()),
1188 web.repo, ctx.node()),
1189 'branches': webutil.nodebranchdict(web.repo, ctx),
1189 'branches': webutil.nodebranchdict(web.repo, ctx),
1190 'inbranch': webutil.nodeinbranch(web.repo, ctx),
1190 'inbranch': webutil.nodeinbranch(web.repo, ctx),
1191 'tags': webutil.nodetagsdict(web.repo, ctx.node())})
1191 'tags': webutil.nodetagsdict(web.repo, ctx.node())})
1192
1192
1193 row += 1
1193 row += 1
1194
1194
1195 return data
1195 return data
1196
1196
1197 cols = getcolumns(tree)
1197 cols = getcolumns(tree)
1198 rows = len(tree)
1198 rows = len(tree)
1199 canvasheight = (rows + 1) * bg_height - 27
1199 canvasheight = (rows + 1) * bg_height - 27
1200
1200
1201 return tmpl('graph', rev=rev, symrev=symrev, revcount=revcount,
1201 return tmpl('graph', rev=rev, symrev=symrev, revcount=revcount,
1202 uprev=uprev,
1202 uprev=uprev,
1203 lessvars=lessvars, morevars=morevars, downrev=downrev,
1203 lessvars=lessvars, morevars=morevars, downrev=downrev,
1204 cols=cols, rows=rows,
1204 cols=cols, rows=rows,
1205 canvaswidth=(cols + 1) * bg_height,
1205 canvaswidth=(cols + 1) * bg_height,
1206 truecanvasheight=rows * bg_height,
1206 truecanvasheight=rows * bg_height,
1207 canvasheight=canvasheight, bg_height=bg_height,
1207 canvasheight=canvasheight, bg_height=bg_height,
1208 # {jsdata} will be passed to |json, so it must be in utf-8
1208 # {jsdata} will be passed to |json, so it must be in utf-8
1209 jsdata=lambda **x: graphdata(True, encoding.fromlocal),
1209 jsdata=lambda **x: graphdata(True, encoding.fromlocal),
1210 nodes=lambda **x: graphdata(False, str),
1210 nodes=lambda **x: graphdata(False, str),
1211 node=ctx.hex(), changenav=changenav)
1211 node=ctx.hex(), changenav=changenav)
1212
1212
1213 def _getdoc(e):
1213 def _getdoc(e):
1214 doc = e[0].__doc__
1214 doc = e[0].__doc__
1215 if doc:
1215 if doc:
1216 doc = _(doc).partition('\n')[0]
1216 doc = _(doc).partition('\n')[0]
1217 else:
1217 else:
1218 doc = _('(no help text available)')
1218 doc = _('(no help text available)')
1219 return doc
1219 return doc
1220
1220
1221 @webcommand('help')
1221 @webcommand('help')
1222 def help(web, req, tmpl):
1222 def help(web, req, tmpl):
1223 """
1223 """
1224 /help[/{topic}]
1224 /help[/{topic}]
1225 ---------------
1225 ---------------
1226
1226
1227 Render help documentation.
1227 Render help documentation.
1228
1228
1229 This web command is roughly equivalent to :hg:`help`. If a ``topic``
1229 This web command is roughly equivalent to :hg:`help`. If a ``topic``
1230 is defined, that help topic will be rendered. If not, an index of
1230 is defined, that help topic will be rendered. If not, an index of
1231 available help topics will be rendered.
1231 available help topics will be rendered.
1232
1232
1233 The ``help`` template will be rendered when requesting help for a topic.
1233 The ``help`` template will be rendered when requesting help for a topic.
1234 ``helptopics`` will be rendered for the index of help topics.
1234 ``helptopics`` will be rendered for the index of help topics.
1235 """
1235 """
1236 from .. import commands, help as helpmod # avoid cycle
1236 from .. import commands, help as helpmod # avoid cycle
1237
1237
1238 topicname = req.form.get('node', [None])[0]
1238 topicname = req.form.get('node', [None])[0]
1239 if not topicname:
1239 if not topicname:
1240 def topics(**map):
1240 def topics(**map):
1241 for entries, summary, _doc in helpmod.helptable:
1241 for entries, summary, _doc in helpmod.helptable:
1242 yield {'topic': entries[0], 'summary': summary}
1242 yield {'topic': entries[0], 'summary': summary}
1243
1243
1244 early, other = [], []
1244 early, other = [], []
1245 primary = lambda s: s.partition('|')[0]
1245 primary = lambda s: s.partition('|')[0]
1246 for c, e in commands.table.iteritems():
1246 for c, e in commands.table.iteritems():
1247 doc = _getdoc(e)
1247 doc = _getdoc(e)
1248 if 'DEPRECATED' in doc or c.startswith('debug'):
1248 if 'DEPRECATED' in doc or c.startswith('debug'):
1249 continue
1249 continue
1250 cmd = primary(c)
1250 cmd = primary(c)
1251 if cmd.startswith('^'):
1251 if cmd.startswith('^'):
1252 early.append((cmd[1:], doc))
1252 early.append((cmd[1:], doc))
1253 else:
1253 else:
1254 other.append((cmd, doc))
1254 other.append((cmd, doc))
1255
1255
1256 early.sort()
1256 early.sort()
1257 other.sort()
1257 other.sort()
1258
1258
1259 def earlycommands(**map):
1259 def earlycommands(**map):
1260 for c, doc in early:
1260 for c, doc in early:
1261 yield {'topic': c, 'summary': doc}
1261 yield {'topic': c, 'summary': doc}
1262
1262
1263 def othercommands(**map):
1263 def othercommands(**map):
1264 for c, doc in other:
1264 for c, doc in other:
1265 yield {'topic': c, 'summary': doc}
1265 yield {'topic': c, 'summary': doc}
1266
1266
1267 return tmpl('helptopics', topics=topics, earlycommands=earlycommands,
1267 return tmpl('helptopics', topics=topics, earlycommands=earlycommands,
1268 othercommands=othercommands, title='Index')
1268 othercommands=othercommands, title='Index')
1269
1269
1270 # Render an index of sub-topics.
1270 # Render an index of sub-topics.
1271 if topicname in helpmod.subtopics:
1271 if topicname in helpmod.subtopics:
1272 topics = []
1272 topics = []
1273 for entries, summary, _doc in helpmod.subtopics[topicname]:
1273 for entries, summary, _doc in helpmod.subtopics[topicname]:
1274 topics.append({
1274 topics.append({
1275 'topic': '%s.%s' % (topicname, entries[0]),
1275 'topic': '%s.%s' % (topicname, entries[0]),
1276 'basename': entries[0],
1276 'basename': entries[0],
1277 'summary': summary,
1277 'summary': summary,
1278 })
1278 })
1279
1279
1280 return tmpl('helptopics', topics=topics, title=topicname,
1280 return tmpl('helptopics', topics=topics, title=topicname,
1281 subindex=True)
1281 subindex=True)
1282
1282
1283 u = webutil.wsgiui()
1283 u = webutil.wsgiui()
1284 u.verbose = True
1284 u.verbose = True
1285
1285
1286 # Render a page from a sub-topic.
1286 # Render a page from a sub-topic.
1287 if '.' in topicname:
1287 if '.' in topicname:
1288 # TODO implement support for rendering sections, like
1288 # TODO implement support for rendering sections, like
1289 # `hg help` works.
1289 # `hg help` works.
1290 topic, subtopic = topicname.split('.', 1)
1290 topic, subtopic = topicname.split('.', 1)
1291 if topic not in helpmod.subtopics:
1291 if topic not in helpmod.subtopics:
1292 raise ErrorResponse(HTTP_NOT_FOUND)
1292 raise ErrorResponse(HTTP_NOT_FOUND)
1293 else:
1293 else:
1294 topic = topicname
1294 topic = topicname
1295 subtopic = None
1295 subtopic = None
1296
1296
1297 try:
1297 try:
1298 doc = helpmod.help_(u, topic, subtopic=subtopic)
1298 doc = helpmod.help_(u, topic, subtopic=subtopic)
1299 except error.UnknownCommand:
1299 except error.UnknownCommand:
1300 raise ErrorResponse(HTTP_NOT_FOUND)
1300 raise ErrorResponse(HTTP_NOT_FOUND)
1301 return tmpl('help', topic=topicname, doc=doc)
1301 return tmpl('help', topic=topicname, doc=doc)
1302
1302
1303 # tell hggettext to extract docstrings from these functions:
1303 # tell hggettext to extract docstrings from these functions:
1304 i18nfunctions = commands.values()
1304 i18nfunctions = commands.values()
@@ -1,608 +1,608 b''
1 # hgweb/webutil.py - utility library for the web interface.
1 # hgweb/webutil.py - utility library for the web interface.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import copy
11 import copy
12 import difflib
12 import difflib
13 import os
13 import os
14 import re
14 import re
15
15
16 from ..i18n import _
16 from ..i18n import _
17 from ..node import hex, nullid, short
17 from ..node import hex, nullid, short
18
18
19 from .common import (
19 from .common import (
20 ErrorResponse,
20 ErrorResponse,
21 HTTP_NOT_FOUND,
21 HTTP_NOT_FOUND,
22 paritygen,
22 paritygen,
23 )
23 )
24
24
25 from .. import (
25 from .. import (
26 context,
26 context,
27 error,
27 error,
28 match,
28 match,
29 patch,
29 patch,
30 pathutil,
30 pathutil,
31 templatefilters,
31 templatefilters,
32 ui as uimod,
32 ui as uimod,
33 util,
33 util,
34 )
34 )
35
35
36 def up(p):
36 def up(p):
37 if p[0] != "/":
37 if p[0] != "/":
38 p = "/" + p
38 p = "/" + p
39 if p[-1] == "/":
39 if p[-1] == "/":
40 p = p[:-1]
40 p = p[:-1]
41 up = os.path.dirname(p)
41 up = os.path.dirname(p)
42 if up == "/":
42 if up == "/":
43 return "/"
43 return "/"
44 return up + "/"
44 return up + "/"
45
45
46 def _navseq(step, firststep=None):
46 def _navseq(step, firststep=None):
47 if firststep:
47 if firststep:
48 yield firststep
48 yield firststep
49 if firststep >= 20 and firststep <= 40:
49 if firststep >= 20 and firststep <= 40:
50 firststep = 50
50 firststep = 50
51 yield firststep
51 yield firststep
52 assert step > 0
52 assert step > 0
53 assert firststep > 0
53 assert firststep > 0
54 while step <= firststep:
54 while step <= firststep:
55 step *= 10
55 step *= 10
56 while True:
56 while True:
57 yield 1 * step
57 yield 1 * step
58 yield 3 * step
58 yield 3 * step
59 step *= 10
59 step *= 10
60
60
61 class revnav(object):
61 class revnav(object):
62
62
63 def __init__(self, repo):
63 def __init__(self, repo):
64 """Navigation generation object
64 """Navigation generation object
65
65
66 :repo: repo object we generate nav for
66 :repo: repo object we generate nav for
67 """
67 """
68 # used for hex generation
68 # used for hex generation
69 self._revlog = repo.changelog
69 self._revlog = repo.changelog
70
70
71 def __nonzero__(self):
71 def __nonzero__(self):
72 """return True if any revision to navigate over"""
72 """return True if any revision to navigate over"""
73 return self._first() is not None
73 return self._first() is not None
74
74
75 def _first(self):
75 def _first(self):
76 """return the minimum non-filtered changeset or None"""
76 """return the minimum non-filtered changeset or None"""
77 try:
77 try:
78 return iter(self._revlog).next()
78 return next(iter(self._revlog))
79 except StopIteration:
79 except StopIteration:
80 return None
80 return None
81
81
82 def hex(self, rev):
82 def hex(self, rev):
83 return hex(self._revlog.node(rev))
83 return hex(self._revlog.node(rev))
84
84
85 def gen(self, pos, pagelen, limit):
85 def gen(self, pos, pagelen, limit):
86 """computes label and revision id for navigation link
86 """computes label and revision id for navigation link
87
87
88 :pos: is the revision relative to which we generate navigation.
88 :pos: is the revision relative to which we generate navigation.
89 :pagelen: the size of each navigation page
89 :pagelen: the size of each navigation page
90 :limit: how far shall we link
90 :limit: how far shall we link
91
91
92 The return is:
92 The return is:
93 - a single element tuple
93 - a single element tuple
94 - containing a dictionary with a `before` and `after` key
94 - containing a dictionary with a `before` and `after` key
95 - values are generator functions taking arbitrary number of kwargs
95 - values are generator functions taking arbitrary number of kwargs
96 - yield items are dictionaries with `label` and `node` keys
96 - yield items are dictionaries with `label` and `node` keys
97 """
97 """
98 if not self:
98 if not self:
99 # empty repo
99 # empty repo
100 return ({'before': (), 'after': ()},)
100 return ({'before': (), 'after': ()},)
101
101
102 targets = []
102 targets = []
103 for f in _navseq(1, pagelen):
103 for f in _navseq(1, pagelen):
104 if f > limit:
104 if f > limit:
105 break
105 break
106 targets.append(pos + f)
106 targets.append(pos + f)
107 targets.append(pos - f)
107 targets.append(pos - f)
108 targets.sort()
108 targets.sort()
109
109
110 first = self._first()
110 first = self._first()
111 navbefore = [("(%i)" % first, self.hex(first))]
111 navbefore = [("(%i)" % first, self.hex(first))]
112 navafter = []
112 navafter = []
113 for rev in targets:
113 for rev in targets:
114 if rev not in self._revlog:
114 if rev not in self._revlog:
115 continue
115 continue
116 if pos < rev < limit:
116 if pos < rev < limit:
117 navafter.append(("+%d" % abs(rev - pos), self.hex(rev)))
117 navafter.append(("+%d" % abs(rev - pos), self.hex(rev)))
118 if 0 < rev < pos:
118 if 0 < rev < pos:
119 navbefore.append(("-%d" % abs(rev - pos), self.hex(rev)))
119 navbefore.append(("-%d" % abs(rev - pos), self.hex(rev)))
120
120
121
121
122 navafter.append(("tip", "tip"))
122 navafter.append(("tip", "tip"))
123
123
124 data = lambda i: {"label": i[0], "node": i[1]}
124 data = lambda i: {"label": i[0], "node": i[1]}
125 return ({'before': lambda **map: (data(i) for i in navbefore),
125 return ({'before': lambda **map: (data(i) for i in navbefore),
126 'after': lambda **map: (data(i) for i in navafter)},)
126 'after': lambda **map: (data(i) for i in navafter)},)
127
127
128 class filerevnav(revnav):
128 class filerevnav(revnav):
129
129
130 def __init__(self, repo, path):
130 def __init__(self, repo, path):
131 """Navigation generation object
131 """Navigation generation object
132
132
133 :repo: repo object we generate nav for
133 :repo: repo object we generate nav for
134 :path: path of the file we generate nav for
134 :path: path of the file we generate nav for
135 """
135 """
136 # used for iteration
136 # used for iteration
137 self._changelog = repo.unfiltered().changelog
137 self._changelog = repo.unfiltered().changelog
138 # used for hex generation
138 # used for hex generation
139 self._revlog = repo.file(path)
139 self._revlog = repo.file(path)
140
140
141 def hex(self, rev):
141 def hex(self, rev):
142 return hex(self._changelog.node(self._revlog.linkrev(rev)))
142 return hex(self._changelog.node(self._revlog.linkrev(rev)))
143
143
144 class _siblings(object):
144 class _siblings(object):
145 def __init__(self, siblings=[], hiderev=None):
145 def __init__(self, siblings=[], hiderev=None):
146 self.siblings = [s for s in siblings if s.node() != nullid]
146 self.siblings = [s for s in siblings if s.node() != nullid]
147 if len(self.siblings) == 1 and self.siblings[0].rev() == hiderev:
147 if len(self.siblings) == 1 and self.siblings[0].rev() == hiderev:
148 self.siblings = []
148 self.siblings = []
149
149
150 def __iter__(self):
150 def __iter__(self):
151 for s in self.siblings:
151 for s in self.siblings:
152 d = {
152 d = {
153 'node': s.hex(),
153 'node': s.hex(),
154 'rev': s.rev(),
154 'rev': s.rev(),
155 'user': s.user(),
155 'user': s.user(),
156 'date': s.date(),
156 'date': s.date(),
157 'description': s.description(),
157 'description': s.description(),
158 'branch': s.branch(),
158 'branch': s.branch(),
159 }
159 }
160 if util.safehasattr(s, 'path'):
160 if util.safehasattr(s, 'path'):
161 d['file'] = s.path()
161 d['file'] = s.path()
162 yield d
162 yield d
163
163
164 def __len__(self):
164 def __len__(self):
165 return len(self.siblings)
165 return len(self.siblings)
166
166
167 def parents(ctx, hide=None):
167 def parents(ctx, hide=None):
168 if isinstance(ctx, context.basefilectx):
168 if isinstance(ctx, context.basefilectx):
169 introrev = ctx.introrev()
169 introrev = ctx.introrev()
170 if ctx.changectx().rev() != introrev:
170 if ctx.changectx().rev() != introrev:
171 return _siblings([ctx.repo()[introrev]], hide)
171 return _siblings([ctx.repo()[introrev]], hide)
172 return _siblings(ctx.parents(), hide)
172 return _siblings(ctx.parents(), hide)
173
173
174 def children(ctx, hide=None):
174 def children(ctx, hide=None):
175 return _siblings(ctx.children(), hide)
175 return _siblings(ctx.children(), hide)
176
176
177 def renamelink(fctx):
177 def renamelink(fctx):
178 r = fctx.renamed()
178 r = fctx.renamed()
179 if r:
179 if r:
180 return [{'file': r[0], 'node': hex(r[1])}]
180 return [{'file': r[0], 'node': hex(r[1])}]
181 return []
181 return []
182
182
183 def nodetagsdict(repo, node):
183 def nodetagsdict(repo, node):
184 return [{"name": i} for i in repo.nodetags(node)]
184 return [{"name": i} for i in repo.nodetags(node)]
185
185
186 def nodebookmarksdict(repo, node):
186 def nodebookmarksdict(repo, node):
187 return [{"name": i} for i in repo.nodebookmarks(node)]
187 return [{"name": i} for i in repo.nodebookmarks(node)]
188
188
189 def nodebranchdict(repo, ctx):
189 def nodebranchdict(repo, ctx):
190 branches = []
190 branches = []
191 branch = ctx.branch()
191 branch = ctx.branch()
192 # If this is an empty repo, ctx.node() == nullid,
192 # If this is an empty repo, ctx.node() == nullid,
193 # ctx.branch() == 'default'.
193 # ctx.branch() == 'default'.
194 try:
194 try:
195 branchnode = repo.branchtip(branch)
195 branchnode = repo.branchtip(branch)
196 except error.RepoLookupError:
196 except error.RepoLookupError:
197 branchnode = None
197 branchnode = None
198 if branchnode == ctx.node():
198 if branchnode == ctx.node():
199 branches.append({"name": branch})
199 branches.append({"name": branch})
200 return branches
200 return branches
201
201
202 def nodeinbranch(repo, ctx):
202 def nodeinbranch(repo, ctx):
203 branches = []
203 branches = []
204 branch = ctx.branch()
204 branch = ctx.branch()
205 try:
205 try:
206 branchnode = repo.branchtip(branch)
206 branchnode = repo.branchtip(branch)
207 except error.RepoLookupError:
207 except error.RepoLookupError:
208 branchnode = None
208 branchnode = None
209 if branch != 'default' and branchnode != ctx.node():
209 if branch != 'default' and branchnode != ctx.node():
210 branches.append({"name": branch})
210 branches.append({"name": branch})
211 return branches
211 return branches
212
212
213 def nodebranchnodefault(ctx):
213 def nodebranchnodefault(ctx):
214 branches = []
214 branches = []
215 branch = ctx.branch()
215 branch = ctx.branch()
216 if branch != 'default':
216 if branch != 'default':
217 branches.append({"name": branch})
217 branches.append({"name": branch})
218 return branches
218 return branches
219
219
220 def showtag(repo, tmpl, t1, node=nullid, **args):
220 def showtag(repo, tmpl, t1, node=nullid, **args):
221 for t in repo.nodetags(node):
221 for t in repo.nodetags(node):
222 yield tmpl(t1, tag=t, **args)
222 yield tmpl(t1, tag=t, **args)
223
223
224 def showbookmark(repo, tmpl, t1, node=nullid, **args):
224 def showbookmark(repo, tmpl, t1, node=nullid, **args):
225 for t in repo.nodebookmarks(node):
225 for t in repo.nodebookmarks(node):
226 yield tmpl(t1, bookmark=t, **args)
226 yield tmpl(t1, bookmark=t, **args)
227
227
228 def branchentries(repo, stripecount, limit=0):
228 def branchentries(repo, stripecount, limit=0):
229 tips = []
229 tips = []
230 heads = repo.heads()
230 heads = repo.heads()
231 parity = paritygen(stripecount)
231 parity = paritygen(stripecount)
232 sortkey = lambda item: (not item[1], item[0].rev())
232 sortkey = lambda item: (not item[1], item[0].rev())
233
233
234 def entries(**map):
234 def entries(**map):
235 count = 0
235 count = 0
236 if not tips:
236 if not tips:
237 for tag, hs, tip, closed in repo.branchmap().iterbranches():
237 for tag, hs, tip, closed in repo.branchmap().iterbranches():
238 tips.append((repo[tip], closed))
238 tips.append((repo[tip], closed))
239 for ctx, closed in sorted(tips, key=sortkey, reverse=True):
239 for ctx, closed in sorted(tips, key=sortkey, reverse=True):
240 if limit > 0 and count >= limit:
240 if limit > 0 and count >= limit:
241 return
241 return
242 count += 1
242 count += 1
243 if closed:
243 if closed:
244 status = 'closed'
244 status = 'closed'
245 elif ctx.node() not in heads:
245 elif ctx.node() not in heads:
246 status = 'inactive'
246 status = 'inactive'
247 else:
247 else:
248 status = 'open'
248 status = 'open'
249 yield {
249 yield {
250 'parity': parity.next(),
250 'parity': next(parity),
251 'branch': ctx.branch(),
251 'branch': ctx.branch(),
252 'status': status,
252 'status': status,
253 'node': ctx.hex(),
253 'node': ctx.hex(),
254 'date': ctx.date()
254 'date': ctx.date()
255 }
255 }
256
256
257 return entries
257 return entries
258
258
259 def cleanpath(repo, path):
259 def cleanpath(repo, path):
260 path = path.lstrip('/')
260 path = path.lstrip('/')
261 return pathutil.canonpath(repo.root, '', path)
261 return pathutil.canonpath(repo.root, '', path)
262
262
263 def changeidctx(repo, changeid):
263 def changeidctx(repo, changeid):
264 try:
264 try:
265 ctx = repo[changeid]
265 ctx = repo[changeid]
266 except error.RepoError:
266 except error.RepoError:
267 man = repo.manifest
267 man = repo.manifest
268 ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
268 ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
269
269
270 return ctx
270 return ctx
271
271
272 def changectx(repo, req):
272 def changectx(repo, req):
273 changeid = "tip"
273 changeid = "tip"
274 if 'node' in req.form:
274 if 'node' in req.form:
275 changeid = req.form['node'][0]
275 changeid = req.form['node'][0]
276 ipos = changeid.find(':')
276 ipos = changeid.find(':')
277 if ipos != -1:
277 if ipos != -1:
278 changeid = changeid[(ipos + 1):]
278 changeid = changeid[(ipos + 1):]
279 elif 'manifest' in req.form:
279 elif 'manifest' in req.form:
280 changeid = req.form['manifest'][0]
280 changeid = req.form['manifest'][0]
281
281
282 return changeidctx(repo, changeid)
282 return changeidctx(repo, changeid)
283
283
284 def basechangectx(repo, req):
284 def basechangectx(repo, req):
285 if 'node' in req.form:
285 if 'node' in req.form:
286 changeid = req.form['node'][0]
286 changeid = req.form['node'][0]
287 ipos = changeid.find(':')
287 ipos = changeid.find(':')
288 if ipos != -1:
288 if ipos != -1:
289 changeid = changeid[:ipos]
289 changeid = changeid[:ipos]
290 return changeidctx(repo, changeid)
290 return changeidctx(repo, changeid)
291
291
292 return None
292 return None
293
293
294 def filectx(repo, req):
294 def filectx(repo, req):
295 if 'file' not in req.form:
295 if 'file' not in req.form:
296 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
296 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
297 path = cleanpath(repo, req.form['file'][0])
297 path = cleanpath(repo, req.form['file'][0])
298 if 'node' in req.form:
298 if 'node' in req.form:
299 changeid = req.form['node'][0]
299 changeid = req.form['node'][0]
300 elif 'filenode' in req.form:
300 elif 'filenode' in req.form:
301 changeid = req.form['filenode'][0]
301 changeid = req.form['filenode'][0]
302 else:
302 else:
303 raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
303 raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
304 try:
304 try:
305 fctx = repo[changeid][path]
305 fctx = repo[changeid][path]
306 except error.RepoError:
306 except error.RepoError:
307 fctx = repo.filectx(path, fileid=changeid)
307 fctx = repo.filectx(path, fileid=changeid)
308
308
309 return fctx
309 return fctx
310
310
311 def commonentry(repo, ctx):
311 def commonentry(repo, ctx):
312 node = ctx.node()
312 node = ctx.node()
313 return {
313 return {
314 'rev': ctx.rev(),
314 'rev': ctx.rev(),
315 'node': hex(node),
315 'node': hex(node),
316 'author': ctx.user(),
316 'author': ctx.user(),
317 'desc': ctx.description(),
317 'desc': ctx.description(),
318 'date': ctx.date(),
318 'date': ctx.date(),
319 'extra': ctx.extra(),
319 'extra': ctx.extra(),
320 'phase': ctx.phasestr(),
320 'phase': ctx.phasestr(),
321 'branch': nodebranchnodefault(ctx),
321 'branch': nodebranchnodefault(ctx),
322 'inbranch': nodeinbranch(repo, ctx),
322 'inbranch': nodeinbranch(repo, ctx),
323 'branches': nodebranchdict(repo, ctx),
323 'branches': nodebranchdict(repo, ctx),
324 'tags': nodetagsdict(repo, node),
324 'tags': nodetagsdict(repo, node),
325 'bookmarks': nodebookmarksdict(repo, node),
325 'bookmarks': nodebookmarksdict(repo, node),
326 'parent': lambda **x: parents(ctx),
326 'parent': lambda **x: parents(ctx),
327 'child': lambda **x: children(ctx),
327 'child': lambda **x: children(ctx),
328 }
328 }
329
329
330 def changelistentry(web, ctx, tmpl):
330 def changelistentry(web, ctx, tmpl):
331 '''Obtain a dictionary to be used for entries in a changelist.
331 '''Obtain a dictionary to be used for entries in a changelist.
332
332
333 This function is called when producing items for the "entries" list passed
333 This function is called when producing items for the "entries" list passed
334 to the "shortlog" and "changelog" templates.
334 to the "shortlog" and "changelog" templates.
335 '''
335 '''
336 repo = web.repo
336 repo = web.repo
337 rev = ctx.rev()
337 rev = ctx.rev()
338 n = ctx.node()
338 n = ctx.node()
339 showtags = showtag(repo, tmpl, 'changelogtag', n)
339 showtags = showtag(repo, tmpl, 'changelogtag', n)
340 files = listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
340 files = listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
341
341
342 entry = commonentry(repo, ctx)
342 entry = commonentry(repo, ctx)
343 entry.update(
343 entry.update(
344 allparents=lambda **x: parents(ctx),
344 allparents=lambda **x: parents(ctx),
345 parent=lambda **x: parents(ctx, rev - 1),
345 parent=lambda **x: parents(ctx, rev - 1),
346 child=lambda **x: children(ctx, rev + 1),
346 child=lambda **x: children(ctx, rev + 1),
347 changelogtag=showtags,
347 changelogtag=showtags,
348 files=files,
348 files=files,
349 )
349 )
350 return entry
350 return entry
351
351
352 def symrevorshortnode(req, ctx):
352 def symrevorshortnode(req, ctx):
353 if 'node' in req.form:
353 if 'node' in req.form:
354 return templatefilters.revescape(req.form['node'][0])
354 return templatefilters.revescape(req.form['node'][0])
355 else:
355 else:
356 return short(ctx.node())
356 return short(ctx.node())
357
357
358 def changesetentry(web, req, tmpl, ctx):
358 def changesetentry(web, req, tmpl, ctx):
359 '''Obtain a dictionary to be used to render the "changeset" template.'''
359 '''Obtain a dictionary to be used to render the "changeset" template.'''
360
360
361 showtags = showtag(web.repo, tmpl, 'changesettag', ctx.node())
361 showtags = showtag(web.repo, tmpl, 'changesettag', ctx.node())
362 showbookmarks = showbookmark(web.repo, tmpl, 'changesetbookmark',
362 showbookmarks = showbookmark(web.repo, tmpl, 'changesetbookmark',
363 ctx.node())
363 ctx.node())
364 showbranch = nodebranchnodefault(ctx)
364 showbranch = nodebranchnodefault(ctx)
365
365
366 files = []
366 files = []
367 parity = paritygen(web.stripecount)
367 parity = paritygen(web.stripecount)
368 for blockno, f in enumerate(ctx.files()):
368 for blockno, f in enumerate(ctx.files()):
369 template = f in ctx and 'filenodelink' or 'filenolink'
369 template = f in ctx and 'filenodelink' or 'filenolink'
370 files.append(tmpl(template,
370 files.append(tmpl(template,
371 node=ctx.hex(), file=f, blockno=blockno + 1,
371 node=ctx.hex(), file=f, blockno=blockno + 1,
372 parity=parity.next()))
372 parity=next(parity)))
373
373
374 basectx = basechangectx(web.repo, req)
374 basectx = basechangectx(web.repo, req)
375 if basectx is None:
375 if basectx is None:
376 basectx = ctx.p1()
376 basectx = ctx.p1()
377
377
378 style = web.config('web', 'style', 'paper')
378 style = web.config('web', 'style', 'paper')
379 if 'style' in req.form:
379 if 'style' in req.form:
380 style = req.form['style'][0]
380 style = req.form['style'][0]
381
381
382 parity = paritygen(web.stripecount)
382 parity = paritygen(web.stripecount)
383 diff = diffs(web.repo, tmpl, ctx, basectx, None, parity, style)
383 diff = diffs(web.repo, tmpl, ctx, basectx, None, parity, style)
384
384
385 parity = paritygen(web.stripecount)
385 parity = paritygen(web.stripecount)
386 diffstatsgen = diffstatgen(ctx, basectx)
386 diffstatsgen = diffstatgen(ctx, basectx)
387 diffstats = diffstat(tmpl, ctx, diffstatsgen, parity)
387 diffstats = diffstat(tmpl, ctx, diffstatsgen, parity)
388
388
389 return dict(
389 return dict(
390 diff=diff,
390 diff=diff,
391 symrev=symrevorshortnode(req, ctx),
391 symrev=symrevorshortnode(req, ctx),
392 basenode=basectx.hex(),
392 basenode=basectx.hex(),
393 changesettag=showtags,
393 changesettag=showtags,
394 changesetbookmark=showbookmarks,
394 changesetbookmark=showbookmarks,
395 changesetbranch=showbranch,
395 changesetbranch=showbranch,
396 files=files,
396 files=files,
397 diffsummary=lambda **x: diffsummary(diffstatsgen),
397 diffsummary=lambda **x: diffsummary(diffstatsgen),
398 diffstat=diffstats,
398 diffstat=diffstats,
399 archives=web.archivelist(ctx.hex()),
399 archives=web.archivelist(ctx.hex()),
400 **commonentry(web.repo, ctx))
400 **commonentry(web.repo, ctx))
401
401
402 def listfilediffs(tmpl, files, node, max):
402 def listfilediffs(tmpl, files, node, max):
403 for f in files[:max]:
403 for f in files[:max]:
404 yield tmpl('filedifflink', node=hex(node), file=f)
404 yield tmpl('filedifflink', node=hex(node), file=f)
405 if len(files) > max:
405 if len(files) > max:
406 yield tmpl('fileellipses')
406 yield tmpl('fileellipses')
407
407
408 def diffs(repo, tmpl, ctx, basectx, files, parity, style):
408 def diffs(repo, tmpl, ctx, basectx, files, parity, style):
409
409
410 def countgen():
410 def countgen():
411 start = 1
411 start = 1
412 while True:
412 while True:
413 yield start
413 yield start
414 start += 1
414 start += 1
415
415
416 blockcount = countgen()
416 blockcount = countgen()
417 def prettyprintlines(diff, blockno):
417 def prettyprintlines(diff, blockno):
418 for lineno, l in enumerate(diff.splitlines(True)):
418 for lineno, l in enumerate(diff.splitlines(True)):
419 difflineno = "%d.%d" % (blockno, lineno + 1)
419 difflineno = "%d.%d" % (blockno, lineno + 1)
420 if l.startswith('+'):
420 if l.startswith('+'):
421 ltype = "difflineplus"
421 ltype = "difflineplus"
422 elif l.startswith('-'):
422 elif l.startswith('-'):
423 ltype = "difflineminus"
423 ltype = "difflineminus"
424 elif l.startswith('@'):
424 elif l.startswith('@'):
425 ltype = "difflineat"
425 ltype = "difflineat"
426 else:
426 else:
427 ltype = "diffline"
427 ltype = "diffline"
428 yield tmpl(ltype,
428 yield tmpl(ltype,
429 line=l,
429 line=l,
430 lineno=lineno + 1,
430 lineno=lineno + 1,
431 lineid="l%s" % difflineno,
431 lineid="l%s" % difflineno,
432 linenumber="% 8s" % difflineno)
432 linenumber="% 8s" % difflineno)
433
433
434 if files:
434 if files:
435 m = match.exact(repo.root, repo.getcwd(), files)
435 m = match.exact(repo.root, repo.getcwd(), files)
436 else:
436 else:
437 m = match.always(repo.root, repo.getcwd())
437 m = match.always(repo.root, repo.getcwd())
438
438
439 diffopts = patch.diffopts(repo.ui, untrusted=True)
439 diffopts = patch.diffopts(repo.ui, untrusted=True)
440 if basectx is None:
440 if basectx is None:
441 parents = ctx.parents()
441 parents = ctx.parents()
442 if parents:
442 if parents:
443 node1 = parents[0].node()
443 node1 = parents[0].node()
444 else:
444 else:
445 node1 = nullid
445 node1 = nullid
446 else:
446 else:
447 node1 = basectx.node()
447 node1 = basectx.node()
448 node2 = ctx.node()
448 node2 = ctx.node()
449
449
450 block = []
450 block = []
451 for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
451 for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
452 if chunk.startswith('diff') and block:
452 if chunk.startswith('diff') and block:
453 blockno = blockcount.next()
453 blockno = next(blockcount)
454 yield tmpl('diffblock', parity=parity.next(), blockno=blockno,
454 yield tmpl('diffblock', parity=next(parity), blockno=blockno,
455 lines=prettyprintlines(''.join(block), blockno))
455 lines=prettyprintlines(''.join(block), blockno))
456 block = []
456 block = []
457 if chunk.startswith('diff') and style != 'raw':
457 if chunk.startswith('diff') and style != 'raw':
458 chunk = ''.join(chunk.splitlines(True)[1:])
458 chunk = ''.join(chunk.splitlines(True)[1:])
459 block.append(chunk)
459 block.append(chunk)
460 blockno = blockcount.next()
460 blockno = next(blockcount)
461 yield tmpl('diffblock', parity=parity.next(), blockno=blockno,
461 yield tmpl('diffblock', parity=next(parity), blockno=blockno,
462 lines=prettyprintlines(''.join(block), blockno))
462 lines=prettyprintlines(''.join(block), blockno))
463
463
464 def compare(tmpl, context, leftlines, rightlines):
464 def compare(tmpl, context, leftlines, rightlines):
465 '''Generator function that provides side-by-side comparison data.'''
465 '''Generator function that provides side-by-side comparison data.'''
466
466
467 def compline(type, leftlineno, leftline, rightlineno, rightline):
467 def compline(type, leftlineno, leftline, rightlineno, rightline):
468 lineid = leftlineno and ("l%s" % leftlineno) or ''
468 lineid = leftlineno and ("l%s" % leftlineno) or ''
469 lineid += rightlineno and ("r%s" % rightlineno) or ''
469 lineid += rightlineno and ("r%s" % rightlineno) or ''
470 return tmpl('comparisonline',
470 return tmpl('comparisonline',
471 type=type,
471 type=type,
472 lineid=lineid,
472 lineid=lineid,
473 leftlineno=leftlineno,
473 leftlineno=leftlineno,
474 leftlinenumber="% 6s" % (leftlineno or ''),
474 leftlinenumber="% 6s" % (leftlineno or ''),
475 leftline=leftline or '',
475 leftline=leftline or '',
476 rightlineno=rightlineno,
476 rightlineno=rightlineno,
477 rightlinenumber="% 6s" % (rightlineno or ''),
477 rightlinenumber="% 6s" % (rightlineno or ''),
478 rightline=rightline or '')
478 rightline=rightline or '')
479
479
480 def getblock(opcodes):
480 def getblock(opcodes):
481 for type, llo, lhi, rlo, rhi in opcodes:
481 for type, llo, lhi, rlo, rhi in opcodes:
482 len1 = lhi - llo
482 len1 = lhi - llo
483 len2 = rhi - rlo
483 len2 = rhi - rlo
484 count = min(len1, len2)
484 count = min(len1, len2)
485 for i in xrange(count):
485 for i in xrange(count):
486 yield compline(type=type,
486 yield compline(type=type,
487 leftlineno=llo + i + 1,
487 leftlineno=llo + i + 1,
488 leftline=leftlines[llo + i],
488 leftline=leftlines[llo + i],
489 rightlineno=rlo + i + 1,
489 rightlineno=rlo + i + 1,
490 rightline=rightlines[rlo + i])
490 rightline=rightlines[rlo + i])
491 if len1 > len2:
491 if len1 > len2:
492 for i in xrange(llo + count, lhi):
492 for i in xrange(llo + count, lhi):
493 yield compline(type=type,
493 yield compline(type=type,
494 leftlineno=i + 1,
494 leftlineno=i + 1,
495 leftline=leftlines[i],
495 leftline=leftlines[i],
496 rightlineno=None,
496 rightlineno=None,
497 rightline=None)
497 rightline=None)
498 elif len2 > len1:
498 elif len2 > len1:
499 for i in xrange(rlo + count, rhi):
499 for i in xrange(rlo + count, rhi):
500 yield compline(type=type,
500 yield compline(type=type,
501 leftlineno=None,
501 leftlineno=None,
502 leftline=None,
502 leftline=None,
503 rightlineno=i + 1,
503 rightlineno=i + 1,
504 rightline=rightlines[i])
504 rightline=rightlines[i])
505
505
506 s = difflib.SequenceMatcher(None, leftlines, rightlines)
506 s = difflib.SequenceMatcher(None, leftlines, rightlines)
507 if context < 0:
507 if context < 0:
508 yield tmpl('comparisonblock', lines=getblock(s.get_opcodes()))
508 yield tmpl('comparisonblock', lines=getblock(s.get_opcodes()))
509 else:
509 else:
510 for oc in s.get_grouped_opcodes(n=context):
510 for oc in s.get_grouped_opcodes(n=context):
511 yield tmpl('comparisonblock', lines=getblock(oc))
511 yield tmpl('comparisonblock', lines=getblock(oc))
512
512
513 def diffstatgen(ctx, basectx):
513 def diffstatgen(ctx, basectx):
514 '''Generator function that provides the diffstat data.'''
514 '''Generator function that provides the diffstat data.'''
515
515
516 stats = patch.diffstatdata(util.iterlines(ctx.diff(basectx)))
516 stats = patch.diffstatdata(util.iterlines(ctx.diff(basectx)))
517 maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
517 maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
518 while True:
518 while True:
519 yield stats, maxname, maxtotal, addtotal, removetotal, binary
519 yield stats, maxname, maxtotal, addtotal, removetotal, binary
520
520
521 def diffsummary(statgen):
521 def diffsummary(statgen):
522 '''Return a short summary of the diff.'''
522 '''Return a short summary of the diff.'''
523
523
524 stats, maxname, maxtotal, addtotal, removetotal, binary = statgen.next()
524 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
525 return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
525 return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
526 len(stats), addtotal, removetotal)
526 len(stats), addtotal, removetotal)
527
527
528 def diffstat(tmpl, ctx, statgen, parity):
528 def diffstat(tmpl, ctx, statgen, parity):
529 '''Return a diffstat template for each file in the diff.'''
529 '''Return a diffstat template for each file in the diff.'''
530
530
531 stats, maxname, maxtotal, addtotal, removetotal, binary = statgen.next()
531 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
532 files = ctx.files()
532 files = ctx.files()
533
533
534 def pct(i):
534 def pct(i):
535 if maxtotal == 0:
535 if maxtotal == 0:
536 return 0
536 return 0
537 return (float(i) / maxtotal) * 100
537 return (float(i) / maxtotal) * 100
538
538
539 fileno = 0
539 fileno = 0
540 for filename, adds, removes, isbinary in stats:
540 for filename, adds, removes, isbinary in stats:
541 template = filename in files and 'diffstatlink' or 'diffstatnolink'
541 template = filename in files and 'diffstatlink' or 'diffstatnolink'
542 total = adds + removes
542 total = adds + removes
543 fileno += 1
543 fileno += 1
544 yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno,
544 yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno,
545 total=total, addpct=pct(adds), removepct=pct(removes),
545 total=total, addpct=pct(adds), removepct=pct(removes),
546 parity=parity.next())
546 parity=next(parity))
547
547
548 class sessionvars(object):
548 class sessionvars(object):
549 def __init__(self, vars, start='?'):
549 def __init__(self, vars, start='?'):
550 self.start = start
550 self.start = start
551 self.vars = vars
551 self.vars = vars
552 def __getitem__(self, key):
552 def __getitem__(self, key):
553 return self.vars[key]
553 return self.vars[key]
554 def __setitem__(self, key, value):
554 def __setitem__(self, key, value):
555 self.vars[key] = value
555 self.vars[key] = value
556 def __copy__(self):
556 def __copy__(self):
557 return sessionvars(copy.copy(self.vars), self.start)
557 return sessionvars(copy.copy(self.vars), self.start)
558 def __iter__(self):
558 def __iter__(self):
559 separator = self.start
559 separator = self.start
560 for key, value in sorted(self.vars.iteritems()):
560 for key, value in sorted(self.vars.iteritems()):
561 yield {'name': key, 'value': str(value), 'separator': separator}
561 yield {'name': key, 'value': str(value), 'separator': separator}
562 separator = '&'
562 separator = '&'
563
563
564 class wsgiui(uimod.ui):
564 class wsgiui(uimod.ui):
565 # default termwidth breaks under mod_wsgi
565 # default termwidth breaks under mod_wsgi
566 def termwidth(self):
566 def termwidth(self):
567 return 80
567 return 80
568
568
569 def getwebsubs(repo):
569 def getwebsubs(repo):
570 websubtable = []
570 websubtable = []
571 websubdefs = repo.ui.configitems('websub')
571 websubdefs = repo.ui.configitems('websub')
572 # we must maintain interhg backwards compatibility
572 # we must maintain interhg backwards compatibility
573 websubdefs += repo.ui.configitems('interhg')
573 websubdefs += repo.ui.configitems('interhg')
574 for key, pattern in websubdefs:
574 for key, pattern in websubdefs:
575 # grab the delimiter from the character after the "s"
575 # grab the delimiter from the character after the "s"
576 unesc = pattern[1]
576 unesc = pattern[1]
577 delim = re.escape(unesc)
577 delim = re.escape(unesc)
578
578
579 # identify portions of the pattern, taking care to avoid escaped
579 # identify portions of the pattern, taking care to avoid escaped
580 # delimiters. the replace format and flags are optional, but
580 # delimiters. the replace format and flags are optional, but
581 # delimiters are required.
581 # delimiters are required.
582 match = re.match(
582 match = re.match(
583 r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
583 r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
584 % (delim, delim, delim), pattern)
584 % (delim, delim, delim), pattern)
585 if not match:
585 if not match:
586 repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
586 repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
587 % (key, pattern))
587 % (key, pattern))
588 continue
588 continue
589
589
590 # we need to unescape the delimiter for regexp and format
590 # we need to unescape the delimiter for regexp and format
591 delim_re = re.compile(r'(?<!\\)\\%s' % delim)
591 delim_re = re.compile(r'(?<!\\)\\%s' % delim)
592 regexp = delim_re.sub(unesc, match.group(1))
592 regexp = delim_re.sub(unesc, match.group(1))
593 format = delim_re.sub(unesc, match.group(2))
593 format = delim_re.sub(unesc, match.group(2))
594
594
595 # the pattern allows for 6 regexp flags, so set them if necessary
595 # the pattern allows for 6 regexp flags, so set them if necessary
596 flagin = match.group(3)
596 flagin = match.group(3)
597 flags = 0
597 flags = 0
598 if flagin:
598 if flagin:
599 for flag in flagin.upper():
599 for flag in flagin.upper():
600 flags |= re.__dict__[flag]
600 flags |= re.__dict__[flag]
601
601
602 try:
602 try:
603 regexp = re.compile(regexp, flags)
603 regexp = re.compile(regexp, flags)
604 websubtable.append((regexp, format))
604 websubtable.append((regexp, format))
605 except re.error:
605 except re.error:
606 repo.ui.warn(_("websub: invalid regexp for %s: %s\n")
606 repo.ui.warn(_("websub: invalid regexp for %s: %s\n")
607 % (key, regexp))
607 % (key, regexp))
608 return websubtable
608 return websubtable
@@ -1,1651 +1,1651 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullhex,
19 nullhex,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 )
22 )
23 from . import (
23 from . import (
24 copies,
24 copies,
25 destutil,
25 destutil,
26 error,
26 error,
27 filemerge,
27 filemerge,
28 obsolete,
28 obsolete,
29 scmutil,
29 scmutil,
30 subrepo,
30 subrepo,
31 util,
31 util,
32 worker,
32 worker,
33 )
33 )
34
34
35 _pack = struct.pack
35 _pack = struct.pack
36 _unpack = struct.unpack
36 _unpack = struct.unpack
37
37
38 def _droponode(data):
38 def _droponode(data):
39 # used for compatibility for v1
39 # used for compatibility for v1
40 bits = data.split('\0')
40 bits = data.split('\0')
41 bits = bits[:-2] + bits[-1:]
41 bits = bits[:-2] + bits[-1:]
42 return '\0'.join(bits)
42 return '\0'.join(bits)
43
43
44 class mergestate(object):
44 class mergestate(object):
45 '''track 3-way merge state of individual files
45 '''track 3-way merge state of individual files
46
46
47 The merge state is stored on disk when needed. Two files are used: one with
47 The merge state is stored on disk when needed. Two files are used: one with
48 an old format (version 1), and one with a new format (version 2). Version 2
48 an old format (version 1), and one with a new format (version 2). Version 2
49 stores a superset of the data in version 1, including new kinds of records
49 stores a superset of the data in version 1, including new kinds of records
50 in the future. For more about the new format, see the documentation for
50 in the future. For more about the new format, see the documentation for
51 `_readrecordsv2`.
51 `_readrecordsv2`.
52
52
53 Each record can contain arbitrary content, and has an associated type. This
53 Each record can contain arbitrary content, and has an associated type. This
54 `type` should be a letter. If `type` is uppercase, the record is mandatory:
54 `type` should be a letter. If `type` is uppercase, the record is mandatory:
55 versions of Mercurial that don't support it should abort. If `type` is
55 versions of Mercurial that don't support it should abort. If `type` is
56 lowercase, the record can be safely ignored.
56 lowercase, the record can be safely ignored.
57
57
58 Currently known records:
58 Currently known records:
59
59
60 L: the node of the "local" part of the merge (hexified version)
60 L: the node of the "local" part of the merge (hexified version)
61 O: the node of the "other" part of the merge (hexified version)
61 O: the node of the "other" part of the merge (hexified version)
62 F: a file to be merged entry
62 F: a file to be merged entry
63 C: a change/delete or delete/change conflict
63 C: a change/delete or delete/change conflict
64 D: a file that the external merge driver will merge internally
64 D: a file that the external merge driver will merge internally
65 (experimental)
65 (experimental)
66 m: the external merge driver defined for this merge plus its run state
66 m: the external merge driver defined for this merge plus its run state
67 (experimental)
67 (experimental)
68 f: a (filename, dictonary) tuple of optional values for a given file
68 f: a (filename, dictonary) tuple of optional values for a given file
69 X: unsupported mandatory record type (used in tests)
69 X: unsupported mandatory record type (used in tests)
70 x: unsupported advisory record type (used in tests)
70 x: unsupported advisory record type (used in tests)
71 l: the labels for the parts of the merge.
71 l: the labels for the parts of the merge.
72
72
73 Merge driver run states (experimental):
73 Merge driver run states (experimental):
74 u: driver-resolved files unmarked -- needs to be run next time we're about
74 u: driver-resolved files unmarked -- needs to be run next time we're about
75 to resolve or commit
75 to resolve or commit
76 m: driver-resolved files marked -- only needs to be run before commit
76 m: driver-resolved files marked -- only needs to be run before commit
77 s: success/skipped -- does not need to be run any more
77 s: success/skipped -- does not need to be run any more
78
78
79 '''
79 '''
80 statepathv1 = 'merge/state'
80 statepathv1 = 'merge/state'
81 statepathv2 = 'merge/state2'
81 statepathv2 = 'merge/state2'
82
82
83 @staticmethod
83 @staticmethod
84 def clean(repo, node=None, other=None, labels=None):
84 def clean(repo, node=None, other=None, labels=None):
85 """Initialize a brand new merge state, removing any existing state on
85 """Initialize a brand new merge state, removing any existing state on
86 disk."""
86 disk."""
87 ms = mergestate(repo)
87 ms = mergestate(repo)
88 ms.reset(node, other, labels)
88 ms.reset(node, other, labels)
89 return ms
89 return ms
90
90
91 @staticmethod
91 @staticmethod
92 def read(repo):
92 def read(repo):
93 """Initialize the merge state, reading it from disk."""
93 """Initialize the merge state, reading it from disk."""
94 ms = mergestate(repo)
94 ms = mergestate(repo)
95 ms._read()
95 ms._read()
96 return ms
96 return ms
97
97
98 def __init__(self, repo):
98 def __init__(self, repo):
99 """Initialize the merge state.
99 """Initialize the merge state.
100
100
101 Do not use this directly! Instead call read() or clean()."""
101 Do not use this directly! Instead call read() or clean()."""
102 self._repo = repo
102 self._repo = repo
103 self._dirty = False
103 self._dirty = False
104 self._labels = None
104 self._labels = None
105
105
106 def reset(self, node=None, other=None, labels=None):
106 def reset(self, node=None, other=None, labels=None):
107 self._state = {}
107 self._state = {}
108 self._stateextras = {}
108 self._stateextras = {}
109 self._local = None
109 self._local = None
110 self._other = None
110 self._other = None
111 self._labels = labels
111 self._labels = labels
112 for var in ('localctx', 'otherctx'):
112 for var in ('localctx', 'otherctx'):
113 if var in vars(self):
113 if var in vars(self):
114 delattr(self, var)
114 delattr(self, var)
115 if node:
115 if node:
116 self._local = node
116 self._local = node
117 self._other = other
117 self._other = other
118 self._readmergedriver = None
118 self._readmergedriver = None
119 if self.mergedriver:
119 if self.mergedriver:
120 self._mdstate = 's'
120 self._mdstate = 's'
121 else:
121 else:
122 self._mdstate = 'u'
122 self._mdstate = 'u'
123 shutil.rmtree(self._repo.join('merge'), True)
123 shutil.rmtree(self._repo.join('merge'), True)
124 self._results = {}
124 self._results = {}
125 self._dirty = False
125 self._dirty = False
126
126
127 def _read(self):
127 def _read(self):
128 """Analyse each record content to restore a serialized state from disk
128 """Analyse each record content to restore a serialized state from disk
129
129
130 This function process "record" entry produced by the de-serialization
130 This function process "record" entry produced by the de-serialization
131 of on disk file.
131 of on disk file.
132 """
132 """
133 self._state = {}
133 self._state = {}
134 self._stateextras = {}
134 self._stateextras = {}
135 self._local = None
135 self._local = None
136 self._other = None
136 self._other = None
137 for var in ('localctx', 'otherctx'):
137 for var in ('localctx', 'otherctx'):
138 if var in vars(self):
138 if var in vars(self):
139 delattr(self, var)
139 delattr(self, var)
140 self._readmergedriver = None
140 self._readmergedriver = None
141 self._mdstate = 's'
141 self._mdstate = 's'
142 unsupported = set()
142 unsupported = set()
143 records = self._readrecords()
143 records = self._readrecords()
144 for rtype, record in records:
144 for rtype, record in records:
145 if rtype == 'L':
145 if rtype == 'L':
146 self._local = bin(record)
146 self._local = bin(record)
147 elif rtype == 'O':
147 elif rtype == 'O':
148 self._other = bin(record)
148 self._other = bin(record)
149 elif rtype == 'm':
149 elif rtype == 'm':
150 bits = record.split('\0', 1)
150 bits = record.split('\0', 1)
151 mdstate = bits[1]
151 mdstate = bits[1]
152 if len(mdstate) != 1 or mdstate not in 'ums':
152 if len(mdstate) != 1 or mdstate not in 'ums':
153 # the merge driver should be idempotent, so just rerun it
153 # the merge driver should be idempotent, so just rerun it
154 mdstate = 'u'
154 mdstate = 'u'
155
155
156 self._readmergedriver = bits[0]
156 self._readmergedriver = bits[0]
157 self._mdstate = mdstate
157 self._mdstate = mdstate
158 elif rtype in 'FDC':
158 elif rtype in 'FDC':
159 bits = record.split('\0')
159 bits = record.split('\0')
160 self._state[bits[0]] = bits[1:]
160 self._state[bits[0]] = bits[1:]
161 elif rtype == 'f':
161 elif rtype == 'f':
162 filename, rawextras = record.split('\0', 1)
162 filename, rawextras = record.split('\0', 1)
163 extraparts = rawextras.split('\0')
163 extraparts = rawextras.split('\0')
164 extras = {}
164 extras = {}
165 i = 0
165 i = 0
166 while i < len(extraparts):
166 while i < len(extraparts):
167 extras[extraparts[i]] = extraparts[i + 1]
167 extras[extraparts[i]] = extraparts[i + 1]
168 i += 2
168 i += 2
169
169
170 self._stateextras[filename] = extras
170 self._stateextras[filename] = extras
171 elif rtype == 'l':
171 elif rtype == 'l':
172 labels = record.split('\0', 2)
172 labels = record.split('\0', 2)
173 self._labels = [l for l in labels if len(l) > 0]
173 self._labels = [l for l in labels if len(l) > 0]
174 elif not rtype.islower():
174 elif not rtype.islower():
175 unsupported.add(rtype)
175 unsupported.add(rtype)
176 self._results = {}
176 self._results = {}
177 self._dirty = False
177 self._dirty = False
178
178
179 if unsupported:
179 if unsupported:
180 raise error.UnsupportedMergeRecords(unsupported)
180 raise error.UnsupportedMergeRecords(unsupported)
181
181
182 def _readrecords(self):
182 def _readrecords(self):
183 """Read merge state from disk and return a list of record (TYPE, data)
183 """Read merge state from disk and return a list of record (TYPE, data)
184
184
185 We read data from both v1 and v2 files and decide which one to use.
185 We read data from both v1 and v2 files and decide which one to use.
186
186
187 V1 has been used by version prior to 2.9.1 and contains less data than
187 V1 has been used by version prior to 2.9.1 and contains less data than
188 v2. We read both versions and check if no data in v2 contradicts
188 v2. We read both versions and check if no data in v2 contradicts
189 v1. If there is not contradiction we can safely assume that both v1
189 v1. If there is not contradiction we can safely assume that both v1
190 and v2 were written at the same time and use the extract data in v2. If
190 and v2 were written at the same time and use the extract data in v2. If
191 there is contradiction we ignore v2 content as we assume an old version
191 there is contradiction we ignore v2 content as we assume an old version
192 of Mercurial has overwritten the mergestate file and left an old v2
192 of Mercurial has overwritten the mergestate file and left an old v2
193 file around.
193 file around.
194
194
195 returns list of record [(TYPE, data), ...]"""
195 returns list of record [(TYPE, data), ...]"""
196 v1records = self._readrecordsv1()
196 v1records = self._readrecordsv1()
197 v2records = self._readrecordsv2()
197 v2records = self._readrecordsv2()
198 if self._v1v2match(v1records, v2records):
198 if self._v1v2match(v1records, v2records):
199 return v2records
199 return v2records
200 else:
200 else:
201 # v1 file is newer than v2 file, use it
201 # v1 file is newer than v2 file, use it
202 # we have to infer the "other" changeset of the merge
202 # we have to infer the "other" changeset of the merge
203 # we cannot do better than that with v1 of the format
203 # we cannot do better than that with v1 of the format
204 mctx = self._repo[None].parents()[-1]
204 mctx = self._repo[None].parents()[-1]
205 v1records.append(('O', mctx.hex()))
205 v1records.append(('O', mctx.hex()))
206 # add place holder "other" file node information
206 # add place holder "other" file node information
207 # nobody is using it yet so we do no need to fetch the data
207 # nobody is using it yet so we do no need to fetch the data
208 # if mctx was wrong `mctx[bits[-2]]` may fails.
208 # if mctx was wrong `mctx[bits[-2]]` may fails.
209 for idx, r in enumerate(v1records):
209 for idx, r in enumerate(v1records):
210 if r[0] == 'F':
210 if r[0] == 'F':
211 bits = r[1].split('\0')
211 bits = r[1].split('\0')
212 bits.insert(-2, '')
212 bits.insert(-2, '')
213 v1records[idx] = (r[0], '\0'.join(bits))
213 v1records[idx] = (r[0], '\0'.join(bits))
214 return v1records
214 return v1records
215
215
216 def _v1v2match(self, v1records, v2records):
216 def _v1v2match(self, v1records, v2records):
217 oldv2 = set() # old format version of v2 record
217 oldv2 = set() # old format version of v2 record
218 for rec in v2records:
218 for rec in v2records:
219 if rec[0] == 'L':
219 if rec[0] == 'L':
220 oldv2.add(rec)
220 oldv2.add(rec)
221 elif rec[0] == 'F':
221 elif rec[0] == 'F':
222 # drop the onode data (not contained in v1)
222 # drop the onode data (not contained in v1)
223 oldv2.add(('F', _droponode(rec[1])))
223 oldv2.add(('F', _droponode(rec[1])))
224 for rec in v1records:
224 for rec in v1records:
225 if rec not in oldv2:
225 if rec not in oldv2:
226 return False
226 return False
227 else:
227 else:
228 return True
228 return True
229
229
230 def _readrecordsv1(self):
230 def _readrecordsv1(self):
231 """read on disk merge state for version 1 file
231 """read on disk merge state for version 1 file
232
232
233 returns list of record [(TYPE, data), ...]
233 returns list of record [(TYPE, data), ...]
234
234
235 Note: the "F" data from this file are one entry short
235 Note: the "F" data from this file are one entry short
236 (no "other file node" entry)
236 (no "other file node" entry)
237 """
237 """
238 records = []
238 records = []
239 try:
239 try:
240 f = self._repo.vfs(self.statepathv1)
240 f = self._repo.vfs(self.statepathv1)
241 for i, l in enumerate(f):
241 for i, l in enumerate(f):
242 if i == 0:
242 if i == 0:
243 records.append(('L', l[:-1]))
243 records.append(('L', l[:-1]))
244 else:
244 else:
245 records.append(('F', l[:-1]))
245 records.append(('F', l[:-1]))
246 f.close()
246 f.close()
247 except IOError as err:
247 except IOError as err:
248 if err.errno != errno.ENOENT:
248 if err.errno != errno.ENOENT:
249 raise
249 raise
250 return records
250 return records
251
251
252 def _readrecordsv2(self):
252 def _readrecordsv2(self):
253 """read on disk merge state for version 2 file
253 """read on disk merge state for version 2 file
254
254
255 This format is a list of arbitrary records of the form:
255 This format is a list of arbitrary records of the form:
256
256
257 [type][length][content]
257 [type][length][content]
258
258
259 `type` is a single character, `length` is a 4 byte integer, and
259 `type` is a single character, `length` is a 4 byte integer, and
260 `content` is an arbitrary byte sequence of length `length`.
260 `content` is an arbitrary byte sequence of length `length`.
261
261
262 Mercurial versions prior to 3.7 have a bug where if there are
262 Mercurial versions prior to 3.7 have a bug where if there are
263 unsupported mandatory merge records, attempting to clear out the merge
263 unsupported mandatory merge records, attempting to clear out the merge
264 state with hg update --clean or similar aborts. The 't' record type
264 state with hg update --clean or similar aborts. The 't' record type
265 works around that by writing out what those versions treat as an
265 works around that by writing out what those versions treat as an
266 advisory record, but later versions interpret as special: the first
266 advisory record, but later versions interpret as special: the first
267 character is the 'real' record type and everything onwards is the data.
267 character is the 'real' record type and everything onwards is the data.
268
268
269 Returns list of records [(TYPE, data), ...]."""
269 Returns list of records [(TYPE, data), ...]."""
270 records = []
270 records = []
271 try:
271 try:
272 f = self._repo.vfs(self.statepathv2)
272 f = self._repo.vfs(self.statepathv2)
273 data = f.read()
273 data = f.read()
274 off = 0
274 off = 0
275 end = len(data)
275 end = len(data)
276 while off < end:
276 while off < end:
277 rtype = data[off]
277 rtype = data[off]
278 off += 1
278 off += 1
279 length = _unpack('>I', data[off:(off + 4)])[0]
279 length = _unpack('>I', data[off:(off + 4)])[0]
280 off += 4
280 off += 4
281 record = data[off:(off + length)]
281 record = data[off:(off + length)]
282 off += length
282 off += length
283 if rtype == 't':
283 if rtype == 't':
284 rtype, record = record[0], record[1:]
284 rtype, record = record[0], record[1:]
285 records.append((rtype, record))
285 records.append((rtype, record))
286 f.close()
286 f.close()
287 except IOError as err:
287 except IOError as err:
288 if err.errno != errno.ENOENT:
288 if err.errno != errno.ENOENT:
289 raise
289 raise
290 return records
290 return records
291
291
292 @util.propertycache
292 @util.propertycache
293 def mergedriver(self):
293 def mergedriver(self):
294 # protect against the following:
294 # protect against the following:
295 # - A configures a malicious merge driver in their hgrc, then
295 # - A configures a malicious merge driver in their hgrc, then
296 # pauses the merge
296 # pauses the merge
297 # - A edits their hgrc to remove references to the merge driver
297 # - A edits their hgrc to remove references to the merge driver
298 # - A gives a copy of their entire repo, including .hg, to B
298 # - A gives a copy of their entire repo, including .hg, to B
299 # - B inspects .hgrc and finds it to be clean
299 # - B inspects .hgrc and finds it to be clean
300 # - B then continues the merge and the malicious merge driver
300 # - B then continues the merge and the malicious merge driver
301 # gets invoked
301 # gets invoked
302 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
302 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
303 if (self._readmergedriver is not None
303 if (self._readmergedriver is not None
304 and self._readmergedriver != configmergedriver):
304 and self._readmergedriver != configmergedriver):
305 raise error.ConfigError(
305 raise error.ConfigError(
306 _("merge driver changed since merge started"),
306 _("merge driver changed since merge started"),
307 hint=_("revert merge driver change or abort merge"))
307 hint=_("revert merge driver change or abort merge"))
308
308
309 return configmergedriver
309 return configmergedriver
310
310
311 @util.propertycache
311 @util.propertycache
312 def localctx(self):
312 def localctx(self):
313 if self._local is None:
313 if self._local is None:
314 raise RuntimeError("localctx accessed but self._local isn't set")
314 raise RuntimeError("localctx accessed but self._local isn't set")
315 return self._repo[self._local]
315 return self._repo[self._local]
316
316
317 @util.propertycache
317 @util.propertycache
318 def otherctx(self):
318 def otherctx(self):
319 if self._other is None:
319 if self._other is None:
320 raise RuntimeError("otherctx accessed but self._other isn't set")
320 raise RuntimeError("otherctx accessed but self._other isn't set")
321 return self._repo[self._other]
321 return self._repo[self._other]
322
322
323 def active(self):
323 def active(self):
324 """Whether mergestate is active.
324 """Whether mergestate is active.
325
325
326 Returns True if there appears to be mergestate. This is a rough proxy
326 Returns True if there appears to be mergestate. This is a rough proxy
327 for "is a merge in progress."
327 for "is a merge in progress."
328 """
328 """
329 # Check local variables before looking at filesystem for performance
329 # Check local variables before looking at filesystem for performance
330 # reasons.
330 # reasons.
331 return bool(self._local) or bool(self._state) or \
331 return bool(self._local) or bool(self._state) or \
332 self._repo.vfs.exists(self.statepathv1) or \
332 self._repo.vfs.exists(self.statepathv1) or \
333 self._repo.vfs.exists(self.statepathv2)
333 self._repo.vfs.exists(self.statepathv2)
334
334
335 def commit(self):
335 def commit(self):
336 """Write current state on disk (if necessary)"""
336 """Write current state on disk (if necessary)"""
337 if self._dirty:
337 if self._dirty:
338 records = self._makerecords()
338 records = self._makerecords()
339 self._writerecords(records)
339 self._writerecords(records)
340 self._dirty = False
340 self._dirty = False
341
341
342 def _makerecords(self):
342 def _makerecords(self):
343 records = []
343 records = []
344 records.append(('L', hex(self._local)))
344 records.append(('L', hex(self._local)))
345 records.append(('O', hex(self._other)))
345 records.append(('O', hex(self._other)))
346 if self.mergedriver:
346 if self.mergedriver:
347 records.append(('m', '\0'.join([
347 records.append(('m', '\0'.join([
348 self.mergedriver, self._mdstate])))
348 self.mergedriver, self._mdstate])))
349 for d, v in self._state.iteritems():
349 for d, v in self._state.iteritems():
350 if v[0] == 'd':
350 if v[0] == 'd':
351 records.append(('D', '\0'.join([d] + v)))
351 records.append(('D', '\0'.join([d] + v)))
352 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
352 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
353 # older versions of Mercurial
353 # older versions of Mercurial
354 elif v[1] == nullhex or v[6] == nullhex:
354 elif v[1] == nullhex or v[6] == nullhex:
355 records.append(('C', '\0'.join([d] + v)))
355 records.append(('C', '\0'.join([d] + v)))
356 else:
356 else:
357 records.append(('F', '\0'.join([d] + v)))
357 records.append(('F', '\0'.join([d] + v)))
358 for filename, extras in sorted(self._stateextras.iteritems()):
358 for filename, extras in sorted(self._stateextras.iteritems()):
359 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
359 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
360 extras.iteritems())
360 extras.iteritems())
361 records.append(('f', '%s\0%s' % (filename, rawextras)))
361 records.append(('f', '%s\0%s' % (filename, rawextras)))
362 if self._labels is not None:
362 if self._labels is not None:
363 labels = '\0'.join(self._labels)
363 labels = '\0'.join(self._labels)
364 records.append(('l', labels))
364 records.append(('l', labels))
365 return records
365 return records
366
366
367 def _writerecords(self, records):
367 def _writerecords(self, records):
368 """Write current state on disk (both v1 and v2)"""
368 """Write current state on disk (both v1 and v2)"""
369 self._writerecordsv1(records)
369 self._writerecordsv1(records)
370 self._writerecordsv2(records)
370 self._writerecordsv2(records)
371
371
372 def _writerecordsv1(self, records):
372 def _writerecordsv1(self, records):
373 """Write current state on disk in a version 1 file"""
373 """Write current state on disk in a version 1 file"""
374 f = self._repo.vfs(self.statepathv1, 'w')
374 f = self._repo.vfs(self.statepathv1, 'w')
375 irecords = iter(records)
375 irecords = iter(records)
376 lrecords = irecords.next()
376 lrecords = next(irecords)
377 assert lrecords[0] == 'L'
377 assert lrecords[0] == 'L'
378 f.write(hex(self._local) + '\n')
378 f.write(hex(self._local) + '\n')
379 for rtype, data in irecords:
379 for rtype, data in irecords:
380 if rtype == 'F':
380 if rtype == 'F':
381 f.write('%s\n' % _droponode(data))
381 f.write('%s\n' % _droponode(data))
382 f.close()
382 f.close()
383
383
384 def _writerecordsv2(self, records):
384 def _writerecordsv2(self, records):
385 """Write current state on disk in a version 2 file
385 """Write current state on disk in a version 2 file
386
386
387 See the docstring for _readrecordsv2 for why we use 't'."""
387 See the docstring for _readrecordsv2 for why we use 't'."""
388 # these are the records that all version 2 clients can read
388 # these are the records that all version 2 clients can read
389 whitelist = 'LOF'
389 whitelist = 'LOF'
390 f = self._repo.vfs(self.statepathv2, 'w')
390 f = self._repo.vfs(self.statepathv2, 'w')
391 for key, data in records:
391 for key, data in records:
392 assert len(key) == 1
392 assert len(key) == 1
393 if key not in whitelist:
393 if key not in whitelist:
394 key, data = 't', '%s%s' % (key, data)
394 key, data = 't', '%s%s' % (key, data)
395 format = '>sI%is' % len(data)
395 format = '>sI%is' % len(data)
396 f.write(_pack(format, key, len(data), data))
396 f.write(_pack(format, key, len(data), data))
397 f.close()
397 f.close()
398
398
399 def add(self, fcl, fco, fca, fd):
399 def add(self, fcl, fco, fca, fd):
400 """add a new (potentially?) conflicting file the merge state
400 """add a new (potentially?) conflicting file the merge state
401 fcl: file context for local,
401 fcl: file context for local,
402 fco: file context for remote,
402 fco: file context for remote,
403 fca: file context for ancestors,
403 fca: file context for ancestors,
404 fd: file path of the resulting merge.
404 fd: file path of the resulting merge.
405
405
406 note: also write the local version to the `.hg/merge` directory.
406 note: also write the local version to the `.hg/merge` directory.
407 """
407 """
408 if fcl.isabsent():
408 if fcl.isabsent():
409 hash = nullhex
409 hash = nullhex
410 else:
410 else:
411 hash = util.sha1(fcl.path()).hexdigest()
411 hash = util.sha1(fcl.path()).hexdigest()
412 self._repo.vfs.write('merge/' + hash, fcl.data())
412 self._repo.vfs.write('merge/' + hash, fcl.data())
413 self._state[fd] = ['u', hash, fcl.path(),
413 self._state[fd] = ['u', hash, fcl.path(),
414 fca.path(), hex(fca.filenode()),
414 fca.path(), hex(fca.filenode()),
415 fco.path(), hex(fco.filenode()),
415 fco.path(), hex(fco.filenode()),
416 fcl.flags()]
416 fcl.flags()]
417 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
417 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
418 self._dirty = True
418 self._dirty = True
419
419
420 def __contains__(self, dfile):
420 def __contains__(self, dfile):
421 return dfile in self._state
421 return dfile in self._state
422
422
423 def __getitem__(self, dfile):
423 def __getitem__(self, dfile):
424 return self._state[dfile][0]
424 return self._state[dfile][0]
425
425
426 def __iter__(self):
426 def __iter__(self):
427 return iter(sorted(self._state))
427 return iter(sorted(self._state))
428
428
429 def files(self):
429 def files(self):
430 return self._state.keys()
430 return self._state.keys()
431
431
432 def mark(self, dfile, state):
432 def mark(self, dfile, state):
433 self._state[dfile][0] = state
433 self._state[dfile][0] = state
434 self._dirty = True
434 self._dirty = True
435
435
436 def mdstate(self):
436 def mdstate(self):
437 return self._mdstate
437 return self._mdstate
438
438
439 def unresolved(self):
439 def unresolved(self):
440 """Obtain the paths of unresolved files."""
440 """Obtain the paths of unresolved files."""
441
441
442 for f, entry in self._state.items():
442 for f, entry in self._state.items():
443 if entry[0] == 'u':
443 if entry[0] == 'u':
444 yield f
444 yield f
445
445
446 def driverresolved(self):
446 def driverresolved(self):
447 """Obtain the paths of driver-resolved files."""
447 """Obtain the paths of driver-resolved files."""
448
448
449 for f, entry in self._state.items():
449 for f, entry in self._state.items():
450 if entry[0] == 'd':
450 if entry[0] == 'd':
451 yield f
451 yield f
452
452
453 def extras(self, filename):
453 def extras(self, filename):
454 return self._stateextras.setdefault(filename, {})
454 return self._stateextras.setdefault(filename, {})
455
455
456 def _resolve(self, preresolve, dfile, wctx):
456 def _resolve(self, preresolve, dfile, wctx):
457 """rerun merge process for file path `dfile`"""
457 """rerun merge process for file path `dfile`"""
458 if self[dfile] in 'rd':
458 if self[dfile] in 'rd':
459 return True, 0
459 return True, 0
460 stateentry = self._state[dfile]
460 stateentry = self._state[dfile]
461 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
461 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
462 octx = self._repo[self._other]
462 octx = self._repo[self._other]
463 extras = self.extras(dfile)
463 extras = self.extras(dfile)
464 anccommitnode = extras.get('ancestorlinknode')
464 anccommitnode = extras.get('ancestorlinknode')
465 if anccommitnode:
465 if anccommitnode:
466 actx = self._repo[anccommitnode]
466 actx = self._repo[anccommitnode]
467 else:
467 else:
468 actx = None
468 actx = None
469 fcd = self._filectxorabsent(hash, wctx, dfile)
469 fcd = self._filectxorabsent(hash, wctx, dfile)
470 fco = self._filectxorabsent(onode, octx, ofile)
470 fco = self._filectxorabsent(onode, octx, ofile)
471 # TODO: move this to filectxorabsent
471 # TODO: move this to filectxorabsent
472 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
472 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
473 # "premerge" x flags
473 # "premerge" x flags
474 flo = fco.flags()
474 flo = fco.flags()
475 fla = fca.flags()
475 fla = fca.flags()
476 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
476 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
477 if fca.node() == nullid:
477 if fca.node() == nullid:
478 if preresolve:
478 if preresolve:
479 self._repo.ui.warn(
479 self._repo.ui.warn(
480 _('warning: cannot merge flags for %s\n') % afile)
480 _('warning: cannot merge flags for %s\n') % afile)
481 elif flags == fla:
481 elif flags == fla:
482 flags = flo
482 flags = flo
483 if preresolve:
483 if preresolve:
484 # restore local
484 # restore local
485 if hash != nullhex:
485 if hash != nullhex:
486 f = self._repo.vfs('merge/' + hash)
486 f = self._repo.vfs('merge/' + hash)
487 self._repo.wwrite(dfile, f.read(), flags)
487 self._repo.wwrite(dfile, f.read(), flags)
488 f.close()
488 f.close()
489 else:
489 else:
490 self._repo.wvfs.unlinkpath(dfile, ignoremissing=True)
490 self._repo.wvfs.unlinkpath(dfile, ignoremissing=True)
491 complete, r, deleted = filemerge.premerge(self._repo, self._local,
491 complete, r, deleted = filemerge.premerge(self._repo, self._local,
492 lfile, fcd, fco, fca,
492 lfile, fcd, fco, fca,
493 labels=self._labels)
493 labels=self._labels)
494 else:
494 else:
495 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
495 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
496 lfile, fcd, fco, fca,
496 lfile, fcd, fco, fca,
497 labels=self._labels)
497 labels=self._labels)
498 if r is None:
498 if r is None:
499 # no real conflict
499 # no real conflict
500 del self._state[dfile]
500 del self._state[dfile]
501 self._stateextras.pop(dfile, None)
501 self._stateextras.pop(dfile, None)
502 self._dirty = True
502 self._dirty = True
503 elif not r:
503 elif not r:
504 self.mark(dfile, 'r')
504 self.mark(dfile, 'r')
505
505
506 if complete:
506 if complete:
507 action = None
507 action = None
508 if deleted:
508 if deleted:
509 if fcd.isabsent():
509 if fcd.isabsent():
510 # dc: local picked. Need to drop if present, which may
510 # dc: local picked. Need to drop if present, which may
511 # happen on re-resolves.
511 # happen on re-resolves.
512 action = 'f'
512 action = 'f'
513 else:
513 else:
514 # cd: remote picked (or otherwise deleted)
514 # cd: remote picked (or otherwise deleted)
515 action = 'r'
515 action = 'r'
516 else:
516 else:
517 if fcd.isabsent(): # dc: remote picked
517 if fcd.isabsent(): # dc: remote picked
518 action = 'g'
518 action = 'g'
519 elif fco.isabsent(): # cd: local picked
519 elif fco.isabsent(): # cd: local picked
520 if dfile in self.localctx:
520 if dfile in self.localctx:
521 action = 'am'
521 action = 'am'
522 else:
522 else:
523 action = 'a'
523 action = 'a'
524 # else: regular merges (no action necessary)
524 # else: regular merges (no action necessary)
525 self._results[dfile] = r, action
525 self._results[dfile] = r, action
526
526
527 return complete, r
527 return complete, r
528
528
529 def _filectxorabsent(self, hexnode, ctx, f):
529 def _filectxorabsent(self, hexnode, ctx, f):
530 if hexnode == nullhex:
530 if hexnode == nullhex:
531 return filemerge.absentfilectx(ctx, f)
531 return filemerge.absentfilectx(ctx, f)
532 else:
532 else:
533 return ctx[f]
533 return ctx[f]
534
534
535 def preresolve(self, dfile, wctx):
535 def preresolve(self, dfile, wctx):
536 """run premerge process for dfile
536 """run premerge process for dfile
537
537
538 Returns whether the merge is complete, and the exit code."""
538 Returns whether the merge is complete, and the exit code."""
539 return self._resolve(True, dfile, wctx)
539 return self._resolve(True, dfile, wctx)
540
540
541 def resolve(self, dfile, wctx):
541 def resolve(self, dfile, wctx):
542 """run merge process (assuming premerge was run) for dfile
542 """run merge process (assuming premerge was run) for dfile
543
543
544 Returns the exit code of the merge."""
544 Returns the exit code of the merge."""
545 return self._resolve(False, dfile, wctx)[1]
545 return self._resolve(False, dfile, wctx)[1]
546
546
547 def counts(self):
547 def counts(self):
548 """return counts for updated, merged and removed files in this
548 """return counts for updated, merged and removed files in this
549 session"""
549 session"""
550 updated, merged, removed = 0, 0, 0
550 updated, merged, removed = 0, 0, 0
551 for r, action in self._results.itervalues():
551 for r, action in self._results.itervalues():
552 if r is None:
552 if r is None:
553 updated += 1
553 updated += 1
554 elif r == 0:
554 elif r == 0:
555 if action == 'r':
555 if action == 'r':
556 removed += 1
556 removed += 1
557 else:
557 else:
558 merged += 1
558 merged += 1
559 return updated, merged, removed
559 return updated, merged, removed
560
560
561 def unresolvedcount(self):
561 def unresolvedcount(self):
562 """get unresolved count for this merge (persistent)"""
562 """get unresolved count for this merge (persistent)"""
563 return len([True for f, entry in self._state.iteritems()
563 return len([True for f, entry in self._state.iteritems()
564 if entry[0] == 'u'])
564 if entry[0] == 'u'])
565
565
566 def actions(self):
566 def actions(self):
567 """return lists of actions to perform on the dirstate"""
567 """return lists of actions to perform on the dirstate"""
568 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
568 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
569 for f, (r, action) in self._results.iteritems():
569 for f, (r, action) in self._results.iteritems():
570 if action is not None:
570 if action is not None:
571 actions[action].append((f, None, "merge result"))
571 actions[action].append((f, None, "merge result"))
572 return actions
572 return actions
573
573
574 def recordactions(self):
574 def recordactions(self):
575 """record remove/add/get actions in the dirstate"""
575 """record remove/add/get actions in the dirstate"""
576 branchmerge = self._repo.dirstate.p2() != nullid
576 branchmerge = self._repo.dirstate.p2() != nullid
577 recordupdates(self._repo, self.actions(), branchmerge)
577 recordupdates(self._repo, self.actions(), branchmerge)
578
578
579 def queueremove(self, f):
579 def queueremove(self, f):
580 """queues a file to be removed from the dirstate
580 """queues a file to be removed from the dirstate
581
581
582 Meant for use by custom merge drivers."""
582 Meant for use by custom merge drivers."""
583 self._results[f] = 0, 'r'
583 self._results[f] = 0, 'r'
584
584
585 def queueadd(self, f):
585 def queueadd(self, f):
586 """queues a file to be added to the dirstate
586 """queues a file to be added to the dirstate
587
587
588 Meant for use by custom merge drivers."""
588 Meant for use by custom merge drivers."""
589 self._results[f] = 0, 'a'
589 self._results[f] = 0, 'a'
590
590
591 def queueget(self, f):
591 def queueget(self, f):
592 """queues a file to be marked modified in the dirstate
592 """queues a file to be marked modified in the dirstate
593
593
594 Meant for use by custom merge drivers."""
594 Meant for use by custom merge drivers."""
595 self._results[f] = 0, 'g'
595 self._results[f] = 0, 'g'
596
596
597 def _getcheckunknownconfig(repo, section, name):
597 def _getcheckunknownconfig(repo, section, name):
598 config = repo.ui.config(section, name, default='abort')
598 config = repo.ui.config(section, name, default='abort')
599 valid = ['abort', 'ignore', 'warn']
599 valid = ['abort', 'ignore', 'warn']
600 if config not in valid:
600 if config not in valid:
601 validstr = ', '.join(["'" + v + "'" for v in valid])
601 validstr = ', '.join(["'" + v + "'" for v in valid])
602 raise error.ConfigError(_("%s.%s not valid "
602 raise error.ConfigError(_("%s.%s not valid "
603 "('%s' is none of %s)")
603 "('%s' is none of %s)")
604 % (section, name, config, validstr))
604 % (section, name, config, validstr))
605 return config
605 return config
606
606
607 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
607 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
608 if f2 is None:
608 if f2 is None:
609 f2 = f
609 f2 = f
610 return (repo.wvfs.audit.check(f)
610 return (repo.wvfs.audit.check(f)
611 and repo.wvfs.isfileorlink(f)
611 and repo.wvfs.isfileorlink(f)
612 and repo.dirstate.normalize(f) not in repo.dirstate
612 and repo.dirstate.normalize(f) not in repo.dirstate
613 and mctx[f2].cmp(wctx[f]))
613 and mctx[f2].cmp(wctx[f]))
614
614
615 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
615 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
616 """
616 """
617 Considers any actions that care about the presence of conflicting unknown
617 Considers any actions that care about the presence of conflicting unknown
618 files. For some actions, the result is to abort; for others, it is to
618 files. For some actions, the result is to abort; for others, it is to
619 choose a different action.
619 choose a different action.
620 """
620 """
621 conflicts = set()
621 conflicts = set()
622 warnconflicts = set()
622 warnconflicts = set()
623 abortconflicts = set()
623 abortconflicts = set()
624 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
624 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
625 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
625 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
626 if not force:
626 if not force:
627 def collectconflicts(conflicts, config):
627 def collectconflicts(conflicts, config):
628 if config == 'abort':
628 if config == 'abort':
629 abortconflicts.update(conflicts)
629 abortconflicts.update(conflicts)
630 elif config == 'warn':
630 elif config == 'warn':
631 warnconflicts.update(conflicts)
631 warnconflicts.update(conflicts)
632
632
633 for f, (m, args, msg) in actions.iteritems():
633 for f, (m, args, msg) in actions.iteritems():
634 if m in ('c', 'dc'):
634 if m in ('c', 'dc'):
635 if _checkunknownfile(repo, wctx, mctx, f):
635 if _checkunknownfile(repo, wctx, mctx, f):
636 conflicts.add(f)
636 conflicts.add(f)
637 elif m == 'dg':
637 elif m == 'dg':
638 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
638 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
639 conflicts.add(f)
639 conflicts.add(f)
640
640
641 ignoredconflicts = set([c for c in conflicts
641 ignoredconflicts = set([c for c in conflicts
642 if repo.dirstate._ignore(c)])
642 if repo.dirstate._ignore(c)])
643 unknownconflicts = conflicts - ignoredconflicts
643 unknownconflicts = conflicts - ignoredconflicts
644 collectconflicts(ignoredconflicts, ignoredconfig)
644 collectconflicts(ignoredconflicts, ignoredconfig)
645 collectconflicts(unknownconflicts, unknownconfig)
645 collectconflicts(unknownconflicts, unknownconfig)
646 else:
646 else:
647 for f, (m, args, msg) in actions.iteritems():
647 for f, (m, args, msg) in actions.iteritems():
648 if m == 'cm':
648 if m == 'cm':
649 fl2, anc = args
649 fl2, anc = args
650 different = _checkunknownfile(repo, wctx, mctx, f)
650 different = _checkunknownfile(repo, wctx, mctx, f)
651 if repo.dirstate._ignore(f):
651 if repo.dirstate._ignore(f):
652 config = ignoredconfig
652 config = ignoredconfig
653 else:
653 else:
654 config = unknownconfig
654 config = unknownconfig
655
655
656 # The behavior when force is True is described by this table:
656 # The behavior when force is True is described by this table:
657 # config different mergeforce | action backup
657 # config different mergeforce | action backup
658 # * n * | get n
658 # * n * | get n
659 # * y y | merge -
659 # * y y | merge -
660 # abort y n | merge - (1)
660 # abort y n | merge - (1)
661 # warn y n | warn + get y
661 # warn y n | warn + get y
662 # ignore y n | get y
662 # ignore y n | get y
663 #
663 #
664 # (1) this is probably the wrong behavior here -- we should
664 # (1) this is probably the wrong behavior here -- we should
665 # probably abort, but some actions like rebases currently
665 # probably abort, but some actions like rebases currently
666 # don't like an abort happening in the middle of
666 # don't like an abort happening in the middle of
667 # merge.update.
667 # merge.update.
668 if not different:
668 if not different:
669 actions[f] = ('g', (fl2, False), "remote created")
669 actions[f] = ('g', (fl2, False), "remote created")
670 elif mergeforce or config == 'abort':
670 elif mergeforce or config == 'abort':
671 actions[f] = ('m', (f, f, None, False, anc),
671 actions[f] = ('m', (f, f, None, False, anc),
672 "remote differs from untracked local")
672 "remote differs from untracked local")
673 elif config == 'abort':
673 elif config == 'abort':
674 abortconflicts.add(f)
674 abortconflicts.add(f)
675 else:
675 else:
676 if config == 'warn':
676 if config == 'warn':
677 warnconflicts.add(f)
677 warnconflicts.add(f)
678 actions[f] = ('g', (fl2, True), "remote created")
678 actions[f] = ('g', (fl2, True), "remote created")
679
679
680 for f in sorted(abortconflicts):
680 for f in sorted(abortconflicts):
681 repo.ui.warn(_("%s: untracked file differs\n") % f)
681 repo.ui.warn(_("%s: untracked file differs\n") % f)
682 if abortconflicts:
682 if abortconflicts:
683 raise error.Abort(_("untracked files in working directory "
683 raise error.Abort(_("untracked files in working directory "
684 "differ from files in requested revision"))
684 "differ from files in requested revision"))
685
685
686 for f in sorted(warnconflicts):
686 for f in sorted(warnconflicts):
687 repo.ui.warn(_("%s: replacing untracked file\n") % f)
687 repo.ui.warn(_("%s: replacing untracked file\n") % f)
688
688
689 for f, (m, args, msg) in actions.iteritems():
689 for f, (m, args, msg) in actions.iteritems():
690 backup = f in conflicts
690 backup = f in conflicts
691 if m == 'c':
691 if m == 'c':
692 flags, = args
692 flags, = args
693 actions[f] = ('g', (flags, backup), msg)
693 actions[f] = ('g', (flags, backup), msg)
694
694
695 def _forgetremoved(wctx, mctx, branchmerge):
695 def _forgetremoved(wctx, mctx, branchmerge):
696 """
696 """
697 Forget removed files
697 Forget removed files
698
698
699 If we're jumping between revisions (as opposed to merging), and if
699 If we're jumping between revisions (as opposed to merging), and if
700 neither the working directory nor the target rev has the file,
700 neither the working directory nor the target rev has the file,
701 then we need to remove it from the dirstate, to prevent the
701 then we need to remove it from the dirstate, to prevent the
702 dirstate from listing the file when it is no longer in the
702 dirstate from listing the file when it is no longer in the
703 manifest.
703 manifest.
704
704
705 If we're merging, and the other revision has removed a file
705 If we're merging, and the other revision has removed a file
706 that is not present in the working directory, we need to mark it
706 that is not present in the working directory, we need to mark it
707 as removed.
707 as removed.
708 """
708 """
709
709
710 actions = {}
710 actions = {}
711 m = 'f'
711 m = 'f'
712 if branchmerge:
712 if branchmerge:
713 m = 'r'
713 m = 'r'
714 for f in wctx.deleted():
714 for f in wctx.deleted():
715 if f not in mctx:
715 if f not in mctx:
716 actions[f] = m, None, "forget deleted"
716 actions[f] = m, None, "forget deleted"
717
717
718 if not branchmerge:
718 if not branchmerge:
719 for f in wctx.removed():
719 for f in wctx.removed():
720 if f not in mctx:
720 if f not in mctx:
721 actions[f] = 'f', None, "forget removed"
721 actions[f] = 'f', None, "forget removed"
722
722
723 return actions
723 return actions
724
724
725 def _checkcollision(repo, wmf, actions):
725 def _checkcollision(repo, wmf, actions):
726 # build provisional merged manifest up
726 # build provisional merged manifest up
727 pmmf = set(wmf)
727 pmmf = set(wmf)
728
728
729 if actions:
729 if actions:
730 # k, dr, e and rd are no-op
730 # k, dr, e and rd are no-op
731 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
731 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
732 for f, args, msg in actions[m]:
732 for f, args, msg in actions[m]:
733 pmmf.add(f)
733 pmmf.add(f)
734 for f, args, msg in actions['r']:
734 for f, args, msg in actions['r']:
735 pmmf.discard(f)
735 pmmf.discard(f)
736 for f, args, msg in actions['dm']:
736 for f, args, msg in actions['dm']:
737 f2, flags = args
737 f2, flags = args
738 pmmf.discard(f2)
738 pmmf.discard(f2)
739 pmmf.add(f)
739 pmmf.add(f)
740 for f, args, msg in actions['dg']:
740 for f, args, msg in actions['dg']:
741 pmmf.add(f)
741 pmmf.add(f)
742 for f, args, msg in actions['m']:
742 for f, args, msg in actions['m']:
743 f1, f2, fa, move, anc = args
743 f1, f2, fa, move, anc = args
744 if move:
744 if move:
745 pmmf.discard(f1)
745 pmmf.discard(f1)
746 pmmf.add(f)
746 pmmf.add(f)
747
747
748 # check case-folding collision in provisional merged manifest
748 # check case-folding collision in provisional merged manifest
749 foldmap = {}
749 foldmap = {}
750 for f in sorted(pmmf):
750 for f in sorted(pmmf):
751 fold = util.normcase(f)
751 fold = util.normcase(f)
752 if fold in foldmap:
752 if fold in foldmap:
753 raise error.Abort(_("case-folding collision between %s and %s")
753 raise error.Abort(_("case-folding collision between %s and %s")
754 % (f, foldmap[fold]))
754 % (f, foldmap[fold]))
755 foldmap[fold] = f
755 foldmap[fold] = f
756
756
757 # check case-folding of directories
757 # check case-folding of directories
758 foldprefix = unfoldprefix = lastfull = ''
758 foldprefix = unfoldprefix = lastfull = ''
759 for fold, f in sorted(foldmap.items()):
759 for fold, f in sorted(foldmap.items()):
760 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
760 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
761 # the folded prefix matches but actual casing is different
761 # the folded prefix matches but actual casing is different
762 raise error.Abort(_("case-folding collision between "
762 raise error.Abort(_("case-folding collision between "
763 "%s and directory of %s") % (lastfull, f))
763 "%s and directory of %s") % (lastfull, f))
764 foldprefix = fold + '/'
764 foldprefix = fold + '/'
765 unfoldprefix = f + '/'
765 unfoldprefix = f + '/'
766 lastfull = f
766 lastfull = f
767
767
768 def driverpreprocess(repo, ms, wctx, labels=None):
768 def driverpreprocess(repo, ms, wctx, labels=None):
769 """run the preprocess step of the merge driver, if any
769 """run the preprocess step of the merge driver, if any
770
770
771 This is currently not implemented -- it's an extension point."""
771 This is currently not implemented -- it's an extension point."""
772 return True
772 return True
773
773
774 def driverconclude(repo, ms, wctx, labels=None):
774 def driverconclude(repo, ms, wctx, labels=None):
775 """run the conclude step of the merge driver, if any
775 """run the conclude step of the merge driver, if any
776
776
777 This is currently not implemented -- it's an extension point."""
777 This is currently not implemented -- it's an extension point."""
778 return True
778 return True
779
779
780 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
780 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
781 acceptremote, followcopies):
781 acceptremote, followcopies):
782 """
782 """
783 Merge p1 and p2 with ancestor pa and generate merge action list
783 Merge p1 and p2 with ancestor pa and generate merge action list
784
784
785 branchmerge and force are as passed in to update
785 branchmerge and force are as passed in to update
786 matcher = matcher to filter file lists
786 matcher = matcher to filter file lists
787 acceptremote = accept the incoming changes without prompting
787 acceptremote = accept the incoming changes without prompting
788 """
788 """
789 if matcher is not None and matcher.always():
789 if matcher is not None and matcher.always():
790 matcher = None
790 matcher = None
791
791
792 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
792 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
793
793
794 # manifests fetched in order are going to be faster, so prime the caches
794 # manifests fetched in order are going to be faster, so prime the caches
795 [x.manifest() for x in
795 [x.manifest() for x in
796 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
796 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
797
797
798 if followcopies:
798 if followcopies:
799 ret = copies.mergecopies(repo, wctx, p2, pa)
799 ret = copies.mergecopies(repo, wctx, p2, pa)
800 copy, movewithdir, diverge, renamedelete = ret
800 copy, movewithdir, diverge, renamedelete = ret
801
801
802 repo.ui.note(_("resolving manifests\n"))
802 repo.ui.note(_("resolving manifests\n"))
803 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
803 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
804 % (bool(branchmerge), bool(force), bool(matcher)))
804 % (bool(branchmerge), bool(force), bool(matcher)))
805 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
805 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
806
806
807 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
807 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
808 copied = set(copy.values())
808 copied = set(copy.values())
809 copied.update(movewithdir.values())
809 copied.update(movewithdir.values())
810
810
811 if '.hgsubstate' in m1:
811 if '.hgsubstate' in m1:
812 # check whether sub state is modified
812 # check whether sub state is modified
813 if any(wctx.sub(s).dirty() for s in wctx.substate):
813 if any(wctx.sub(s).dirty() for s in wctx.substate):
814 m1['.hgsubstate'] += '+'
814 m1['.hgsubstate'] += '+'
815
815
816 # Compare manifests
816 # Compare manifests
817 if matcher is not None:
817 if matcher is not None:
818 m1 = m1.matches(matcher)
818 m1 = m1.matches(matcher)
819 m2 = m2.matches(matcher)
819 m2 = m2.matches(matcher)
820 diff = m1.diff(m2)
820 diff = m1.diff(m2)
821
821
822 actions = {}
822 actions = {}
823 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
823 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
824 if n1 and n2: # file exists on both local and remote side
824 if n1 and n2: # file exists on both local and remote side
825 if f not in ma:
825 if f not in ma:
826 fa = copy.get(f, None)
826 fa = copy.get(f, None)
827 if fa is not None:
827 if fa is not None:
828 actions[f] = ('m', (f, f, fa, False, pa.node()),
828 actions[f] = ('m', (f, f, fa, False, pa.node()),
829 "both renamed from " + fa)
829 "both renamed from " + fa)
830 else:
830 else:
831 actions[f] = ('m', (f, f, None, False, pa.node()),
831 actions[f] = ('m', (f, f, None, False, pa.node()),
832 "both created")
832 "both created")
833 else:
833 else:
834 a = ma[f]
834 a = ma[f]
835 fla = ma.flags(f)
835 fla = ma.flags(f)
836 nol = 'l' not in fl1 + fl2 + fla
836 nol = 'l' not in fl1 + fl2 + fla
837 if n2 == a and fl2 == fla:
837 if n2 == a and fl2 == fla:
838 actions[f] = ('k' , (), "remote unchanged")
838 actions[f] = ('k' , (), "remote unchanged")
839 elif n1 == a and fl1 == fla: # local unchanged - use remote
839 elif n1 == a and fl1 == fla: # local unchanged - use remote
840 if n1 == n2: # optimization: keep local content
840 if n1 == n2: # optimization: keep local content
841 actions[f] = ('e', (fl2,), "update permissions")
841 actions[f] = ('e', (fl2,), "update permissions")
842 else:
842 else:
843 actions[f] = ('g', (fl2, False), "remote is newer")
843 actions[f] = ('g', (fl2, False), "remote is newer")
844 elif nol and n2 == a: # remote only changed 'x'
844 elif nol and n2 == a: # remote only changed 'x'
845 actions[f] = ('e', (fl2,), "update permissions")
845 actions[f] = ('e', (fl2,), "update permissions")
846 elif nol and n1 == a: # local only changed 'x'
846 elif nol and n1 == a: # local only changed 'x'
847 actions[f] = ('g', (fl1, False), "remote is newer")
847 actions[f] = ('g', (fl1, False), "remote is newer")
848 else: # both changed something
848 else: # both changed something
849 actions[f] = ('m', (f, f, f, False, pa.node()),
849 actions[f] = ('m', (f, f, f, False, pa.node()),
850 "versions differ")
850 "versions differ")
851 elif n1: # file exists only on local side
851 elif n1: # file exists only on local side
852 if f in copied:
852 if f in copied:
853 pass # we'll deal with it on m2 side
853 pass # we'll deal with it on m2 side
854 elif f in movewithdir: # directory rename, move local
854 elif f in movewithdir: # directory rename, move local
855 f2 = movewithdir[f]
855 f2 = movewithdir[f]
856 if f2 in m2:
856 if f2 in m2:
857 actions[f2] = ('m', (f, f2, None, True, pa.node()),
857 actions[f2] = ('m', (f, f2, None, True, pa.node()),
858 "remote directory rename, both created")
858 "remote directory rename, both created")
859 else:
859 else:
860 actions[f2] = ('dm', (f, fl1),
860 actions[f2] = ('dm', (f, fl1),
861 "remote directory rename - move from " + f)
861 "remote directory rename - move from " + f)
862 elif f in copy:
862 elif f in copy:
863 f2 = copy[f]
863 f2 = copy[f]
864 actions[f] = ('m', (f, f2, f2, False, pa.node()),
864 actions[f] = ('m', (f, f2, f2, False, pa.node()),
865 "local copied/moved from " + f2)
865 "local copied/moved from " + f2)
866 elif f in ma: # clean, a different, no remote
866 elif f in ma: # clean, a different, no remote
867 if n1 != ma[f]:
867 if n1 != ma[f]:
868 if acceptremote:
868 if acceptremote:
869 actions[f] = ('r', None, "remote delete")
869 actions[f] = ('r', None, "remote delete")
870 else:
870 else:
871 actions[f] = ('cd', (f, None, f, False, pa.node()),
871 actions[f] = ('cd', (f, None, f, False, pa.node()),
872 "prompt changed/deleted")
872 "prompt changed/deleted")
873 elif n1[20:] == 'a':
873 elif n1[20:] == 'a':
874 # This extra 'a' is added by working copy manifest to mark
874 # This extra 'a' is added by working copy manifest to mark
875 # the file as locally added. We should forget it instead of
875 # the file as locally added. We should forget it instead of
876 # deleting it.
876 # deleting it.
877 actions[f] = ('f', None, "remote deleted")
877 actions[f] = ('f', None, "remote deleted")
878 else:
878 else:
879 actions[f] = ('r', None, "other deleted")
879 actions[f] = ('r', None, "other deleted")
880 elif n2: # file exists only on remote side
880 elif n2: # file exists only on remote side
881 if f in copied:
881 if f in copied:
882 pass # we'll deal with it on m1 side
882 pass # we'll deal with it on m1 side
883 elif f in movewithdir:
883 elif f in movewithdir:
884 f2 = movewithdir[f]
884 f2 = movewithdir[f]
885 if f2 in m1:
885 if f2 in m1:
886 actions[f2] = ('m', (f2, f, None, False, pa.node()),
886 actions[f2] = ('m', (f2, f, None, False, pa.node()),
887 "local directory rename, both created")
887 "local directory rename, both created")
888 else:
888 else:
889 actions[f2] = ('dg', (f, fl2),
889 actions[f2] = ('dg', (f, fl2),
890 "local directory rename - get from " + f)
890 "local directory rename - get from " + f)
891 elif f in copy:
891 elif f in copy:
892 f2 = copy[f]
892 f2 = copy[f]
893 if f2 in m2:
893 if f2 in m2:
894 actions[f] = ('m', (f2, f, f2, False, pa.node()),
894 actions[f] = ('m', (f2, f, f2, False, pa.node()),
895 "remote copied from " + f2)
895 "remote copied from " + f2)
896 else:
896 else:
897 actions[f] = ('m', (f2, f, f2, True, pa.node()),
897 actions[f] = ('m', (f2, f, f2, True, pa.node()),
898 "remote moved from " + f2)
898 "remote moved from " + f2)
899 elif f not in ma:
899 elif f not in ma:
900 # local unknown, remote created: the logic is described by the
900 # local unknown, remote created: the logic is described by the
901 # following table:
901 # following table:
902 #
902 #
903 # force branchmerge different | action
903 # force branchmerge different | action
904 # n * * | create
904 # n * * | create
905 # y n * | create
905 # y n * | create
906 # y y n | create
906 # y y n | create
907 # y y y | merge
907 # y y y | merge
908 #
908 #
909 # Checking whether the files are different is expensive, so we
909 # Checking whether the files are different is expensive, so we
910 # don't do that when we can avoid it.
910 # don't do that when we can avoid it.
911 if not force:
911 if not force:
912 actions[f] = ('c', (fl2,), "remote created")
912 actions[f] = ('c', (fl2,), "remote created")
913 elif not branchmerge:
913 elif not branchmerge:
914 actions[f] = ('c', (fl2,), "remote created")
914 actions[f] = ('c', (fl2,), "remote created")
915 else:
915 else:
916 actions[f] = ('cm', (fl2, pa.node()),
916 actions[f] = ('cm', (fl2, pa.node()),
917 "remote created, get or merge")
917 "remote created, get or merge")
918 elif n2 != ma[f]:
918 elif n2 != ma[f]:
919 if acceptremote:
919 if acceptremote:
920 actions[f] = ('c', (fl2,), "remote recreating")
920 actions[f] = ('c', (fl2,), "remote recreating")
921 else:
921 else:
922 actions[f] = ('dc', (None, f, f, False, pa.node()),
922 actions[f] = ('dc', (None, f, f, False, pa.node()),
923 "prompt deleted/changed")
923 "prompt deleted/changed")
924
924
925 return actions, diverge, renamedelete
925 return actions, diverge, renamedelete
926
926
927 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
927 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
928 """Resolves false conflicts where the nodeid changed but the content
928 """Resolves false conflicts where the nodeid changed but the content
929 remained the same."""
929 remained the same."""
930
930
931 for f, (m, args, msg) in actions.items():
931 for f, (m, args, msg) in actions.items():
932 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
932 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
933 # local did change but ended up with same content
933 # local did change but ended up with same content
934 actions[f] = 'r', None, "prompt same"
934 actions[f] = 'r', None, "prompt same"
935 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
935 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
936 # remote did change but ended up with same content
936 # remote did change but ended up with same content
937 del actions[f] # don't get = keep local deleted
937 del actions[f] # don't get = keep local deleted
938
938
939 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
939 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
940 acceptremote, followcopies, matcher=None,
940 acceptremote, followcopies, matcher=None,
941 mergeforce=False):
941 mergeforce=False):
942 "Calculate the actions needed to merge mctx into wctx using ancestors"
942 "Calculate the actions needed to merge mctx into wctx using ancestors"
943 if len(ancestors) == 1: # default
943 if len(ancestors) == 1: # default
944 actions, diverge, renamedelete = manifestmerge(
944 actions, diverge, renamedelete = manifestmerge(
945 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
945 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
946 acceptremote, followcopies)
946 acceptremote, followcopies)
947 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
947 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
948
948
949 else: # only when merge.preferancestor=* - the default
949 else: # only when merge.preferancestor=* - the default
950 repo.ui.note(
950 repo.ui.note(
951 _("note: merging %s and %s using bids from ancestors %s\n") %
951 _("note: merging %s and %s using bids from ancestors %s\n") %
952 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
952 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
953
953
954 # Call for bids
954 # Call for bids
955 fbids = {} # mapping filename to bids (action method to list af actions)
955 fbids = {} # mapping filename to bids (action method to list af actions)
956 diverge, renamedelete = None, None
956 diverge, renamedelete = None, None
957 for ancestor in ancestors:
957 for ancestor in ancestors:
958 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
958 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
959 actions, diverge1, renamedelete1 = manifestmerge(
959 actions, diverge1, renamedelete1 = manifestmerge(
960 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
960 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
961 acceptremote, followcopies)
961 acceptremote, followcopies)
962 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
962 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
963
963
964 # Track the shortest set of warning on the theory that bid
964 # Track the shortest set of warning on the theory that bid
965 # merge will correctly incorporate more information
965 # merge will correctly incorporate more information
966 if diverge is None or len(diverge1) < len(diverge):
966 if diverge is None or len(diverge1) < len(diverge):
967 diverge = diverge1
967 diverge = diverge1
968 if renamedelete is None or len(renamedelete) < len(renamedelete1):
968 if renamedelete is None or len(renamedelete) < len(renamedelete1):
969 renamedelete = renamedelete1
969 renamedelete = renamedelete1
970
970
971 for f, a in sorted(actions.iteritems()):
971 for f, a in sorted(actions.iteritems()):
972 m, args, msg = a
972 m, args, msg = a
973 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
973 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
974 if f in fbids:
974 if f in fbids:
975 d = fbids[f]
975 d = fbids[f]
976 if m in d:
976 if m in d:
977 d[m].append(a)
977 d[m].append(a)
978 else:
978 else:
979 d[m] = [a]
979 d[m] = [a]
980 else:
980 else:
981 fbids[f] = {m: [a]}
981 fbids[f] = {m: [a]}
982
982
983 # Pick the best bid for each file
983 # Pick the best bid for each file
984 repo.ui.note(_('\nauction for merging merge bids\n'))
984 repo.ui.note(_('\nauction for merging merge bids\n'))
985 actions = {}
985 actions = {}
986 for f, bids in sorted(fbids.items()):
986 for f, bids in sorted(fbids.items()):
987 # bids is a mapping from action method to list af actions
987 # bids is a mapping from action method to list af actions
988 # Consensus?
988 # Consensus?
989 if len(bids) == 1: # all bids are the same kind of method
989 if len(bids) == 1: # all bids are the same kind of method
990 m, l = bids.items()[0]
990 m, l = bids.items()[0]
991 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
991 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
992 repo.ui.note(" %s: consensus for %s\n" % (f, m))
992 repo.ui.note(" %s: consensus for %s\n" % (f, m))
993 actions[f] = l[0]
993 actions[f] = l[0]
994 continue
994 continue
995 # If keep is an option, just do it.
995 # If keep is an option, just do it.
996 if 'k' in bids:
996 if 'k' in bids:
997 repo.ui.note(" %s: picking 'keep' action\n" % f)
997 repo.ui.note(" %s: picking 'keep' action\n" % f)
998 actions[f] = bids['k'][0]
998 actions[f] = bids['k'][0]
999 continue
999 continue
1000 # If there are gets and they all agree [how could they not?], do it.
1000 # If there are gets and they all agree [how could they not?], do it.
1001 if 'g' in bids:
1001 if 'g' in bids:
1002 ga0 = bids['g'][0]
1002 ga0 = bids['g'][0]
1003 if all(a == ga0 for a in bids['g'][1:]):
1003 if all(a == ga0 for a in bids['g'][1:]):
1004 repo.ui.note(" %s: picking 'get' action\n" % f)
1004 repo.ui.note(" %s: picking 'get' action\n" % f)
1005 actions[f] = ga0
1005 actions[f] = ga0
1006 continue
1006 continue
1007 # TODO: Consider other simple actions such as mode changes
1007 # TODO: Consider other simple actions such as mode changes
1008 # Handle inefficient democrazy.
1008 # Handle inefficient democrazy.
1009 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1009 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1010 for m, l in sorted(bids.items()):
1010 for m, l in sorted(bids.items()):
1011 for _f, args, msg in l:
1011 for _f, args, msg in l:
1012 repo.ui.note(' %s -> %s\n' % (msg, m))
1012 repo.ui.note(' %s -> %s\n' % (msg, m))
1013 # Pick random action. TODO: Instead, prompt user when resolving
1013 # Pick random action. TODO: Instead, prompt user when resolving
1014 m, l = bids.items()[0]
1014 m, l = bids.items()[0]
1015 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1015 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1016 (f, m))
1016 (f, m))
1017 actions[f] = l[0]
1017 actions[f] = l[0]
1018 continue
1018 continue
1019 repo.ui.note(_('end of auction\n\n'))
1019 repo.ui.note(_('end of auction\n\n'))
1020
1020
1021 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1021 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1022
1022
1023 if wctx.rev() is None:
1023 if wctx.rev() is None:
1024 fractions = _forgetremoved(wctx, mctx, branchmerge)
1024 fractions = _forgetremoved(wctx, mctx, branchmerge)
1025 actions.update(fractions)
1025 actions.update(fractions)
1026
1026
1027 return actions, diverge, renamedelete
1027 return actions, diverge, renamedelete
1028
1028
1029 def batchremove(repo, actions):
1029 def batchremove(repo, actions):
1030 """apply removes to the working directory
1030 """apply removes to the working directory
1031
1031
1032 yields tuples for progress updates
1032 yields tuples for progress updates
1033 """
1033 """
1034 verbose = repo.ui.verbose
1034 verbose = repo.ui.verbose
1035 unlink = util.unlinkpath
1035 unlink = util.unlinkpath
1036 wjoin = repo.wjoin
1036 wjoin = repo.wjoin
1037 audit = repo.wvfs.audit
1037 audit = repo.wvfs.audit
1038 i = 0
1038 i = 0
1039 for f, args, msg in actions:
1039 for f, args, msg in actions:
1040 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1040 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1041 if verbose:
1041 if verbose:
1042 repo.ui.note(_("removing %s\n") % f)
1042 repo.ui.note(_("removing %s\n") % f)
1043 audit(f)
1043 audit(f)
1044 try:
1044 try:
1045 unlink(wjoin(f), ignoremissing=True)
1045 unlink(wjoin(f), ignoremissing=True)
1046 except OSError as inst:
1046 except OSError as inst:
1047 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1047 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1048 (f, inst.strerror))
1048 (f, inst.strerror))
1049 if i == 100:
1049 if i == 100:
1050 yield i, f
1050 yield i, f
1051 i = 0
1051 i = 0
1052 i += 1
1052 i += 1
1053 if i > 0:
1053 if i > 0:
1054 yield i, f
1054 yield i, f
1055
1055
1056 def batchget(repo, mctx, actions):
1056 def batchget(repo, mctx, actions):
1057 """apply gets to the working directory
1057 """apply gets to the working directory
1058
1058
1059 mctx is the context to get from
1059 mctx is the context to get from
1060
1060
1061 yields tuples for progress updates
1061 yields tuples for progress updates
1062 """
1062 """
1063 verbose = repo.ui.verbose
1063 verbose = repo.ui.verbose
1064 fctx = mctx.filectx
1064 fctx = mctx.filectx
1065 wwrite = repo.wwrite
1065 wwrite = repo.wwrite
1066 ui = repo.ui
1066 ui = repo.ui
1067 i = 0
1067 i = 0
1068 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1068 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1069 for f, (flags, backup), msg in actions:
1069 for f, (flags, backup), msg in actions:
1070 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1070 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1071 if verbose:
1071 if verbose:
1072 repo.ui.note(_("getting %s\n") % f)
1072 repo.ui.note(_("getting %s\n") % f)
1073
1073
1074 if backup:
1074 if backup:
1075 absf = repo.wjoin(f)
1075 absf = repo.wjoin(f)
1076 orig = scmutil.origpath(ui, repo, absf)
1076 orig = scmutil.origpath(ui, repo, absf)
1077 try:
1077 try:
1078 # TODO Mercurial has always aborted if an untracked
1078 # TODO Mercurial has always aborted if an untracked
1079 # directory is replaced by a tracked file, or generally
1079 # directory is replaced by a tracked file, or generally
1080 # with file/directory merges. This needs to be sorted out.
1080 # with file/directory merges. This needs to be sorted out.
1081 if repo.wvfs.isfileorlink(f):
1081 if repo.wvfs.isfileorlink(f):
1082 util.rename(absf, orig)
1082 util.rename(absf, orig)
1083 except OSError as e:
1083 except OSError as e:
1084 if e.errno != errno.ENOENT:
1084 if e.errno != errno.ENOENT:
1085 raise
1085 raise
1086
1086
1087 wwrite(f, fctx(f).data(), flags, backgroundclose=True)
1087 wwrite(f, fctx(f).data(), flags, backgroundclose=True)
1088 if i == 100:
1088 if i == 100:
1089 yield i, f
1089 yield i, f
1090 i = 0
1090 i = 0
1091 i += 1
1091 i += 1
1092 if i > 0:
1092 if i > 0:
1093 yield i, f
1093 yield i, f
1094
1094
1095 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1095 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1096 """apply the merge action list to the working directory
1096 """apply the merge action list to the working directory
1097
1097
1098 wctx is the working copy context
1098 wctx is the working copy context
1099 mctx is the context to be merged into the working copy
1099 mctx is the context to be merged into the working copy
1100
1100
1101 Return a tuple of counts (updated, merged, removed, unresolved) that
1101 Return a tuple of counts (updated, merged, removed, unresolved) that
1102 describes how many files were affected by the update.
1102 describes how many files were affected by the update.
1103 """
1103 """
1104
1104
1105 updated, merged, removed = 0, 0, 0
1105 updated, merged, removed = 0, 0, 0
1106 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1106 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1107 moves = []
1107 moves = []
1108 for m, l in actions.items():
1108 for m, l in actions.items():
1109 l.sort()
1109 l.sort()
1110
1110
1111 # 'cd' and 'dc' actions are treated like other merge conflicts
1111 # 'cd' and 'dc' actions are treated like other merge conflicts
1112 mergeactions = sorted(actions['cd'])
1112 mergeactions = sorted(actions['cd'])
1113 mergeactions.extend(sorted(actions['dc']))
1113 mergeactions.extend(sorted(actions['dc']))
1114 mergeactions.extend(actions['m'])
1114 mergeactions.extend(actions['m'])
1115 for f, args, msg in mergeactions:
1115 for f, args, msg in mergeactions:
1116 f1, f2, fa, move, anc = args
1116 f1, f2, fa, move, anc = args
1117 if f == '.hgsubstate': # merged internally
1117 if f == '.hgsubstate': # merged internally
1118 continue
1118 continue
1119 if f1 is None:
1119 if f1 is None:
1120 fcl = filemerge.absentfilectx(wctx, fa)
1120 fcl = filemerge.absentfilectx(wctx, fa)
1121 else:
1121 else:
1122 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1122 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1123 fcl = wctx[f1]
1123 fcl = wctx[f1]
1124 if f2 is None:
1124 if f2 is None:
1125 fco = filemerge.absentfilectx(mctx, fa)
1125 fco = filemerge.absentfilectx(mctx, fa)
1126 else:
1126 else:
1127 fco = mctx[f2]
1127 fco = mctx[f2]
1128 actx = repo[anc]
1128 actx = repo[anc]
1129 if fa in actx:
1129 if fa in actx:
1130 fca = actx[fa]
1130 fca = actx[fa]
1131 else:
1131 else:
1132 # TODO: move to absentfilectx
1132 # TODO: move to absentfilectx
1133 fca = repo.filectx(f1, fileid=nullrev)
1133 fca = repo.filectx(f1, fileid=nullrev)
1134 ms.add(fcl, fco, fca, f)
1134 ms.add(fcl, fco, fca, f)
1135 if f1 != f and move:
1135 if f1 != f and move:
1136 moves.append(f1)
1136 moves.append(f1)
1137
1137
1138 audit = repo.wvfs.audit
1138 audit = repo.wvfs.audit
1139 _updating = _('updating')
1139 _updating = _('updating')
1140 _files = _('files')
1140 _files = _('files')
1141 progress = repo.ui.progress
1141 progress = repo.ui.progress
1142
1142
1143 # remove renamed files after safely stored
1143 # remove renamed files after safely stored
1144 for f in moves:
1144 for f in moves:
1145 if os.path.lexists(repo.wjoin(f)):
1145 if os.path.lexists(repo.wjoin(f)):
1146 repo.ui.debug("removing %s\n" % f)
1146 repo.ui.debug("removing %s\n" % f)
1147 audit(f)
1147 audit(f)
1148 util.unlinkpath(repo.wjoin(f))
1148 util.unlinkpath(repo.wjoin(f))
1149
1149
1150 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1150 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1151
1151
1152 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1152 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1153 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1153 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1154
1154
1155 # remove in parallel (must come first)
1155 # remove in parallel (must come first)
1156 z = 0
1156 z = 0
1157 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
1157 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
1158 for i, item in prog:
1158 for i, item in prog:
1159 z += i
1159 z += i
1160 progress(_updating, z, item=item, total=numupdates, unit=_files)
1160 progress(_updating, z, item=item, total=numupdates, unit=_files)
1161 removed = len(actions['r'])
1161 removed = len(actions['r'])
1162
1162
1163 # get in parallel
1163 # get in parallel
1164 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
1164 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
1165 for i, item in prog:
1165 for i, item in prog:
1166 z += i
1166 z += i
1167 progress(_updating, z, item=item, total=numupdates, unit=_files)
1167 progress(_updating, z, item=item, total=numupdates, unit=_files)
1168 updated = len(actions['g'])
1168 updated = len(actions['g'])
1169
1169
1170 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1170 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1171 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1171 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1172
1172
1173 # forget (manifest only, just log it) (must come first)
1173 # forget (manifest only, just log it) (must come first)
1174 for f, args, msg in actions['f']:
1174 for f, args, msg in actions['f']:
1175 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1175 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1176 z += 1
1176 z += 1
1177 progress(_updating, z, item=f, total=numupdates, unit=_files)
1177 progress(_updating, z, item=f, total=numupdates, unit=_files)
1178
1178
1179 # re-add (manifest only, just log it)
1179 # re-add (manifest only, just log it)
1180 for f, args, msg in actions['a']:
1180 for f, args, msg in actions['a']:
1181 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1181 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1182 z += 1
1182 z += 1
1183 progress(_updating, z, item=f, total=numupdates, unit=_files)
1183 progress(_updating, z, item=f, total=numupdates, unit=_files)
1184
1184
1185 # re-add/mark as modified (manifest only, just log it)
1185 # re-add/mark as modified (manifest only, just log it)
1186 for f, args, msg in actions['am']:
1186 for f, args, msg in actions['am']:
1187 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1187 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1188 z += 1
1188 z += 1
1189 progress(_updating, z, item=f, total=numupdates, unit=_files)
1189 progress(_updating, z, item=f, total=numupdates, unit=_files)
1190
1190
1191 # keep (noop, just log it)
1191 # keep (noop, just log it)
1192 for f, args, msg in actions['k']:
1192 for f, args, msg in actions['k']:
1193 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1193 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1194 # no progress
1194 # no progress
1195
1195
1196 # directory rename, move local
1196 # directory rename, move local
1197 for f, args, msg in actions['dm']:
1197 for f, args, msg in actions['dm']:
1198 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1198 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1199 z += 1
1199 z += 1
1200 progress(_updating, z, item=f, total=numupdates, unit=_files)
1200 progress(_updating, z, item=f, total=numupdates, unit=_files)
1201 f0, flags = args
1201 f0, flags = args
1202 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1202 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1203 audit(f)
1203 audit(f)
1204 repo.wwrite(f, wctx.filectx(f0).data(), flags)
1204 repo.wwrite(f, wctx.filectx(f0).data(), flags)
1205 util.unlinkpath(repo.wjoin(f0))
1205 util.unlinkpath(repo.wjoin(f0))
1206 updated += 1
1206 updated += 1
1207
1207
1208 # local directory rename, get
1208 # local directory rename, get
1209 for f, args, msg in actions['dg']:
1209 for f, args, msg in actions['dg']:
1210 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1210 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1211 z += 1
1211 z += 1
1212 progress(_updating, z, item=f, total=numupdates, unit=_files)
1212 progress(_updating, z, item=f, total=numupdates, unit=_files)
1213 f0, flags = args
1213 f0, flags = args
1214 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1214 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1215 repo.wwrite(f, mctx.filectx(f0).data(), flags)
1215 repo.wwrite(f, mctx.filectx(f0).data(), flags)
1216 updated += 1
1216 updated += 1
1217
1217
1218 # exec
1218 # exec
1219 for f, args, msg in actions['e']:
1219 for f, args, msg in actions['e']:
1220 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1220 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1221 z += 1
1221 z += 1
1222 progress(_updating, z, item=f, total=numupdates, unit=_files)
1222 progress(_updating, z, item=f, total=numupdates, unit=_files)
1223 flags, = args
1223 flags, = args
1224 audit(f)
1224 audit(f)
1225 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
1225 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
1226 updated += 1
1226 updated += 1
1227
1227
1228 # the ordering is important here -- ms.mergedriver will raise if the merge
1228 # the ordering is important here -- ms.mergedriver will raise if the merge
1229 # driver has changed, and we want to be able to bypass it when overwrite is
1229 # driver has changed, and we want to be able to bypass it when overwrite is
1230 # True
1230 # True
1231 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1231 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1232
1232
1233 if usemergedriver:
1233 if usemergedriver:
1234 ms.commit()
1234 ms.commit()
1235 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1235 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1236 # the driver might leave some files unresolved
1236 # the driver might leave some files unresolved
1237 unresolvedf = set(ms.unresolved())
1237 unresolvedf = set(ms.unresolved())
1238 if not proceed:
1238 if not proceed:
1239 # XXX setting unresolved to at least 1 is a hack to make sure we
1239 # XXX setting unresolved to at least 1 is a hack to make sure we
1240 # error out
1240 # error out
1241 return updated, merged, removed, max(len(unresolvedf), 1)
1241 return updated, merged, removed, max(len(unresolvedf), 1)
1242 newactions = []
1242 newactions = []
1243 for f, args, msg in mergeactions:
1243 for f, args, msg in mergeactions:
1244 if f in unresolvedf:
1244 if f in unresolvedf:
1245 newactions.append((f, args, msg))
1245 newactions.append((f, args, msg))
1246 mergeactions = newactions
1246 mergeactions = newactions
1247
1247
1248 # premerge
1248 # premerge
1249 tocomplete = []
1249 tocomplete = []
1250 for f, args, msg in mergeactions:
1250 for f, args, msg in mergeactions:
1251 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1251 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1252 z += 1
1252 z += 1
1253 progress(_updating, z, item=f, total=numupdates, unit=_files)
1253 progress(_updating, z, item=f, total=numupdates, unit=_files)
1254 if f == '.hgsubstate': # subrepo states need updating
1254 if f == '.hgsubstate': # subrepo states need updating
1255 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1255 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1256 overwrite)
1256 overwrite)
1257 continue
1257 continue
1258 audit(f)
1258 audit(f)
1259 complete, r = ms.preresolve(f, wctx)
1259 complete, r = ms.preresolve(f, wctx)
1260 if not complete:
1260 if not complete:
1261 numupdates += 1
1261 numupdates += 1
1262 tocomplete.append((f, args, msg))
1262 tocomplete.append((f, args, msg))
1263
1263
1264 # merge
1264 # merge
1265 for f, args, msg in tocomplete:
1265 for f, args, msg in tocomplete:
1266 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1266 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1267 z += 1
1267 z += 1
1268 progress(_updating, z, item=f, total=numupdates, unit=_files)
1268 progress(_updating, z, item=f, total=numupdates, unit=_files)
1269 ms.resolve(f, wctx)
1269 ms.resolve(f, wctx)
1270
1270
1271 ms.commit()
1271 ms.commit()
1272
1272
1273 unresolved = ms.unresolvedcount()
1273 unresolved = ms.unresolvedcount()
1274
1274
1275 if usemergedriver and not unresolved and ms.mdstate() != 's':
1275 if usemergedriver and not unresolved and ms.mdstate() != 's':
1276 if not driverconclude(repo, ms, wctx, labels=labels):
1276 if not driverconclude(repo, ms, wctx, labels=labels):
1277 # XXX setting unresolved to at least 1 is a hack to make sure we
1277 # XXX setting unresolved to at least 1 is a hack to make sure we
1278 # error out
1278 # error out
1279 unresolved = max(unresolved, 1)
1279 unresolved = max(unresolved, 1)
1280
1280
1281 ms.commit()
1281 ms.commit()
1282
1282
1283 msupdated, msmerged, msremoved = ms.counts()
1283 msupdated, msmerged, msremoved = ms.counts()
1284 updated += msupdated
1284 updated += msupdated
1285 merged += msmerged
1285 merged += msmerged
1286 removed += msremoved
1286 removed += msremoved
1287
1287
1288 extraactions = ms.actions()
1288 extraactions = ms.actions()
1289 for k, acts in extraactions.iteritems():
1289 for k, acts in extraactions.iteritems():
1290 actions[k].extend(acts)
1290 actions[k].extend(acts)
1291
1291
1292 progress(_updating, None, total=numupdates, unit=_files)
1292 progress(_updating, None, total=numupdates, unit=_files)
1293
1293
1294 return updated, merged, removed, unresolved
1294 return updated, merged, removed, unresolved
1295
1295
1296 def recordupdates(repo, actions, branchmerge):
1296 def recordupdates(repo, actions, branchmerge):
1297 "record merge actions to the dirstate"
1297 "record merge actions to the dirstate"
1298 # remove (must come first)
1298 # remove (must come first)
1299 for f, args, msg in actions.get('r', []):
1299 for f, args, msg in actions.get('r', []):
1300 if branchmerge:
1300 if branchmerge:
1301 repo.dirstate.remove(f)
1301 repo.dirstate.remove(f)
1302 else:
1302 else:
1303 repo.dirstate.drop(f)
1303 repo.dirstate.drop(f)
1304
1304
1305 # forget (must come first)
1305 # forget (must come first)
1306 for f, args, msg in actions.get('f', []):
1306 for f, args, msg in actions.get('f', []):
1307 repo.dirstate.drop(f)
1307 repo.dirstate.drop(f)
1308
1308
1309 # re-add
1309 # re-add
1310 for f, args, msg in actions.get('a', []):
1310 for f, args, msg in actions.get('a', []):
1311 repo.dirstate.add(f)
1311 repo.dirstate.add(f)
1312
1312
1313 # re-add/mark as modified
1313 # re-add/mark as modified
1314 for f, args, msg in actions.get('am', []):
1314 for f, args, msg in actions.get('am', []):
1315 if branchmerge:
1315 if branchmerge:
1316 repo.dirstate.normallookup(f)
1316 repo.dirstate.normallookup(f)
1317 else:
1317 else:
1318 repo.dirstate.add(f)
1318 repo.dirstate.add(f)
1319
1319
1320 # exec change
1320 # exec change
1321 for f, args, msg in actions.get('e', []):
1321 for f, args, msg in actions.get('e', []):
1322 repo.dirstate.normallookup(f)
1322 repo.dirstate.normallookup(f)
1323
1323
1324 # keep
1324 # keep
1325 for f, args, msg in actions.get('k', []):
1325 for f, args, msg in actions.get('k', []):
1326 pass
1326 pass
1327
1327
1328 # get
1328 # get
1329 for f, args, msg in actions.get('g', []):
1329 for f, args, msg in actions.get('g', []):
1330 if branchmerge:
1330 if branchmerge:
1331 repo.dirstate.otherparent(f)
1331 repo.dirstate.otherparent(f)
1332 else:
1332 else:
1333 repo.dirstate.normal(f)
1333 repo.dirstate.normal(f)
1334
1334
1335 # merge
1335 # merge
1336 for f, args, msg in actions.get('m', []):
1336 for f, args, msg in actions.get('m', []):
1337 f1, f2, fa, move, anc = args
1337 f1, f2, fa, move, anc = args
1338 if branchmerge:
1338 if branchmerge:
1339 # We've done a branch merge, mark this file as merged
1339 # We've done a branch merge, mark this file as merged
1340 # so that we properly record the merger later
1340 # so that we properly record the merger later
1341 repo.dirstate.merge(f)
1341 repo.dirstate.merge(f)
1342 if f1 != f2: # copy/rename
1342 if f1 != f2: # copy/rename
1343 if move:
1343 if move:
1344 repo.dirstate.remove(f1)
1344 repo.dirstate.remove(f1)
1345 if f1 != f:
1345 if f1 != f:
1346 repo.dirstate.copy(f1, f)
1346 repo.dirstate.copy(f1, f)
1347 else:
1347 else:
1348 repo.dirstate.copy(f2, f)
1348 repo.dirstate.copy(f2, f)
1349 else:
1349 else:
1350 # We've update-merged a locally modified file, so
1350 # We've update-merged a locally modified file, so
1351 # we set the dirstate to emulate a normal checkout
1351 # we set the dirstate to emulate a normal checkout
1352 # of that file some time in the past. Thus our
1352 # of that file some time in the past. Thus our
1353 # merge will appear as a normal local file
1353 # merge will appear as a normal local file
1354 # modification.
1354 # modification.
1355 if f2 == f: # file not locally copied/moved
1355 if f2 == f: # file not locally copied/moved
1356 repo.dirstate.normallookup(f)
1356 repo.dirstate.normallookup(f)
1357 if move:
1357 if move:
1358 repo.dirstate.drop(f1)
1358 repo.dirstate.drop(f1)
1359
1359
1360 # directory rename, move local
1360 # directory rename, move local
1361 for f, args, msg in actions.get('dm', []):
1361 for f, args, msg in actions.get('dm', []):
1362 f0, flag = args
1362 f0, flag = args
1363 if branchmerge:
1363 if branchmerge:
1364 repo.dirstate.add(f)
1364 repo.dirstate.add(f)
1365 repo.dirstate.remove(f0)
1365 repo.dirstate.remove(f0)
1366 repo.dirstate.copy(f0, f)
1366 repo.dirstate.copy(f0, f)
1367 else:
1367 else:
1368 repo.dirstate.normal(f)
1368 repo.dirstate.normal(f)
1369 repo.dirstate.drop(f0)
1369 repo.dirstate.drop(f0)
1370
1370
1371 # directory rename, get
1371 # directory rename, get
1372 for f, args, msg in actions.get('dg', []):
1372 for f, args, msg in actions.get('dg', []):
1373 f0, flag = args
1373 f0, flag = args
1374 if branchmerge:
1374 if branchmerge:
1375 repo.dirstate.add(f)
1375 repo.dirstate.add(f)
1376 repo.dirstate.copy(f0, f)
1376 repo.dirstate.copy(f0, f)
1377 else:
1377 else:
1378 repo.dirstate.normal(f)
1378 repo.dirstate.normal(f)
1379
1379
1380 def update(repo, node, branchmerge, force, ancestor=None,
1380 def update(repo, node, branchmerge, force, ancestor=None,
1381 mergeancestor=False, labels=None, matcher=None, mergeforce=False):
1381 mergeancestor=False, labels=None, matcher=None, mergeforce=False):
1382 """
1382 """
1383 Perform a merge between the working directory and the given node
1383 Perform a merge between the working directory and the given node
1384
1384
1385 node = the node to update to, or None if unspecified
1385 node = the node to update to, or None if unspecified
1386 branchmerge = whether to merge between branches
1386 branchmerge = whether to merge between branches
1387 force = whether to force branch merging or file overwriting
1387 force = whether to force branch merging or file overwriting
1388 matcher = a matcher to filter file lists (dirstate not updated)
1388 matcher = a matcher to filter file lists (dirstate not updated)
1389 mergeancestor = whether it is merging with an ancestor. If true,
1389 mergeancestor = whether it is merging with an ancestor. If true,
1390 we should accept the incoming changes for any prompts that occur.
1390 we should accept the incoming changes for any prompts that occur.
1391 If false, merging with an ancestor (fast-forward) is only allowed
1391 If false, merging with an ancestor (fast-forward) is only allowed
1392 between different named branches. This flag is used by rebase extension
1392 between different named branches. This flag is used by rebase extension
1393 as a temporary fix and should be avoided in general.
1393 as a temporary fix and should be avoided in general.
1394 labels = labels to use for base, local and other
1394 labels = labels to use for base, local and other
1395 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1395 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1396 this is True, then 'force' should be True as well.
1396 this is True, then 'force' should be True as well.
1397
1397
1398 The table below shows all the behaviors of the update command
1398 The table below shows all the behaviors of the update command
1399 given the -c and -C or no options, whether the working directory
1399 given the -c and -C or no options, whether the working directory
1400 is dirty, whether a revision is specified, and the relationship of
1400 is dirty, whether a revision is specified, and the relationship of
1401 the parent rev to the target rev (linear, on the same named
1401 the parent rev to the target rev (linear, on the same named
1402 branch, or on another named branch).
1402 branch, or on another named branch).
1403
1403
1404 This logic is tested by test-update-branches.t.
1404 This logic is tested by test-update-branches.t.
1405
1405
1406 -c -C dirty rev | linear same cross
1406 -c -C dirty rev | linear same cross
1407 n n n n | ok (1) x
1407 n n n n | ok (1) x
1408 n n n y | ok ok ok
1408 n n n y | ok ok ok
1409 n n y n | merge (2) (2)
1409 n n y n | merge (2) (2)
1410 n n y y | merge (3) (3)
1410 n n y y | merge (3) (3)
1411 n y * * | discard discard discard
1411 n y * * | discard discard discard
1412 y n y * | (4) (4) (4)
1412 y n y * | (4) (4) (4)
1413 y n n * | ok ok ok
1413 y n n * | ok ok ok
1414 y y * * | (5) (5) (5)
1414 y y * * | (5) (5) (5)
1415
1415
1416 x = can't happen
1416 x = can't happen
1417 * = don't-care
1417 * = don't-care
1418 1 = abort: not a linear update (merge or update --check to force update)
1418 1 = abort: not a linear update (merge or update --check to force update)
1419 2 = abort: uncommitted changes (commit and merge, or update --clean to
1419 2 = abort: uncommitted changes (commit and merge, or update --clean to
1420 discard changes)
1420 discard changes)
1421 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1421 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1422 4 = abort: uncommitted changes (checked in commands.py)
1422 4 = abort: uncommitted changes (checked in commands.py)
1423 5 = incompatible options (checked in commands.py)
1423 5 = incompatible options (checked in commands.py)
1424
1424
1425 Return the same tuple as applyupdates().
1425 Return the same tuple as applyupdates().
1426 """
1426 """
1427
1427
1428 onode = node
1428 onode = node
1429 # If we're doing a partial update, we need to skip updating
1429 # If we're doing a partial update, we need to skip updating
1430 # the dirstate, so make a note of any partial-ness to the
1430 # the dirstate, so make a note of any partial-ness to the
1431 # update here.
1431 # update here.
1432 if matcher is None or matcher.always():
1432 if matcher is None or matcher.always():
1433 partial = False
1433 partial = False
1434 else:
1434 else:
1435 partial = True
1435 partial = True
1436 with repo.wlock():
1436 with repo.wlock():
1437 wc = repo[None]
1437 wc = repo[None]
1438 pl = wc.parents()
1438 pl = wc.parents()
1439 p1 = pl[0]
1439 p1 = pl[0]
1440 pas = [None]
1440 pas = [None]
1441 if ancestor is not None:
1441 if ancestor is not None:
1442 pas = [repo[ancestor]]
1442 pas = [repo[ancestor]]
1443
1443
1444 if node is None:
1444 if node is None:
1445 repo.ui.deprecwarn('update with no target', '3.9')
1445 repo.ui.deprecwarn('update with no target', '3.9')
1446 rev, _mark, _act = destutil.destupdate(repo)
1446 rev, _mark, _act = destutil.destupdate(repo)
1447 node = repo[rev].node()
1447 node = repo[rev].node()
1448
1448
1449 overwrite = force and not branchmerge
1449 overwrite = force and not branchmerge
1450
1450
1451 p2 = repo[node]
1451 p2 = repo[node]
1452 if pas[0] is None:
1452 if pas[0] is None:
1453 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1453 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1454 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1454 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1455 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1455 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1456 else:
1456 else:
1457 pas = [p1.ancestor(p2, warn=branchmerge)]
1457 pas = [p1.ancestor(p2, warn=branchmerge)]
1458
1458
1459 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1459 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1460
1460
1461 ### check phase
1461 ### check phase
1462 if not overwrite:
1462 if not overwrite:
1463 if len(pl) > 1:
1463 if len(pl) > 1:
1464 raise error.Abort(_("outstanding uncommitted merge"))
1464 raise error.Abort(_("outstanding uncommitted merge"))
1465 ms = mergestate.read(repo)
1465 ms = mergestate.read(repo)
1466 if list(ms.unresolved()):
1466 if list(ms.unresolved()):
1467 raise error.Abort(_("outstanding merge conflicts"))
1467 raise error.Abort(_("outstanding merge conflicts"))
1468 if branchmerge:
1468 if branchmerge:
1469 if pas == [p2]:
1469 if pas == [p2]:
1470 raise error.Abort(_("merging with a working directory ancestor"
1470 raise error.Abort(_("merging with a working directory ancestor"
1471 " has no effect"))
1471 " has no effect"))
1472 elif pas == [p1]:
1472 elif pas == [p1]:
1473 if not mergeancestor and p1.branch() == p2.branch():
1473 if not mergeancestor and p1.branch() == p2.branch():
1474 raise error.Abort(_("nothing to merge"),
1474 raise error.Abort(_("nothing to merge"),
1475 hint=_("use 'hg update' "
1475 hint=_("use 'hg update' "
1476 "or check 'hg heads'"))
1476 "or check 'hg heads'"))
1477 if not force and (wc.files() or wc.deleted()):
1477 if not force and (wc.files() or wc.deleted()):
1478 raise error.Abort(_("uncommitted changes"),
1478 raise error.Abort(_("uncommitted changes"),
1479 hint=_("use 'hg status' to list changes"))
1479 hint=_("use 'hg status' to list changes"))
1480 for s in sorted(wc.substate):
1480 for s in sorted(wc.substate):
1481 wc.sub(s).bailifchanged()
1481 wc.sub(s).bailifchanged()
1482
1482
1483 elif not overwrite:
1483 elif not overwrite:
1484 if p1 == p2: # no-op update
1484 if p1 == p2: # no-op update
1485 # call the hooks and exit early
1485 # call the hooks and exit early
1486 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1486 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1487 repo.hook('update', parent1=xp2, parent2='', error=0)
1487 repo.hook('update', parent1=xp2, parent2='', error=0)
1488 return 0, 0, 0, 0
1488 return 0, 0, 0, 0
1489
1489
1490 if pas not in ([p1], [p2]): # nonlinear
1490 if pas not in ([p1], [p2]): # nonlinear
1491 dirty = wc.dirty(missing=True)
1491 dirty = wc.dirty(missing=True)
1492 if dirty or onode is None:
1492 if dirty or onode is None:
1493 # Branching is a bit strange to ensure we do the minimal
1493 # Branching is a bit strange to ensure we do the minimal
1494 # amount of call to obsolete.background.
1494 # amount of call to obsolete.background.
1495 foreground = obsolete.foreground(repo, [p1.node()])
1495 foreground = obsolete.foreground(repo, [p1.node()])
1496 # note: the <node> variable contains a random identifier
1496 # note: the <node> variable contains a random identifier
1497 if repo[node].node() in foreground:
1497 if repo[node].node() in foreground:
1498 pas = [p1] # allow updating to successors
1498 pas = [p1] # allow updating to successors
1499 elif dirty:
1499 elif dirty:
1500 msg = _("uncommitted changes")
1500 msg = _("uncommitted changes")
1501 if onode is None:
1501 if onode is None:
1502 hint = _("commit and merge, or update --clean to"
1502 hint = _("commit and merge, or update --clean to"
1503 " discard changes")
1503 " discard changes")
1504 else:
1504 else:
1505 hint = _("commit or update --clean to discard"
1505 hint = _("commit or update --clean to discard"
1506 " changes")
1506 " changes")
1507 raise error.Abort(msg, hint=hint)
1507 raise error.Abort(msg, hint=hint)
1508 else: # node is none
1508 else: # node is none
1509 msg = _("not a linear update")
1509 msg = _("not a linear update")
1510 hint = _("merge or update --check to force update")
1510 hint = _("merge or update --check to force update")
1511 raise error.Abort(msg, hint=hint)
1511 raise error.Abort(msg, hint=hint)
1512 else:
1512 else:
1513 # Allow jumping branches if clean and specific rev given
1513 # Allow jumping branches if clean and specific rev given
1514 pas = [p1]
1514 pas = [p1]
1515
1515
1516 # deprecated config: merge.followcopies
1516 # deprecated config: merge.followcopies
1517 followcopies = False
1517 followcopies = False
1518 if overwrite:
1518 if overwrite:
1519 pas = [wc]
1519 pas = [wc]
1520 elif pas == [p2]: # backwards
1520 elif pas == [p2]: # backwards
1521 pas = [wc.p1()]
1521 pas = [wc.p1()]
1522 elif not branchmerge and not wc.dirty(missing=True):
1522 elif not branchmerge and not wc.dirty(missing=True):
1523 pass
1523 pass
1524 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1524 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1525 followcopies = True
1525 followcopies = True
1526
1526
1527 ### calculate phase
1527 ### calculate phase
1528 actionbyfile, diverge, renamedelete = calculateupdates(
1528 actionbyfile, diverge, renamedelete = calculateupdates(
1529 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1529 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1530 followcopies, matcher=matcher, mergeforce=mergeforce)
1530 followcopies, matcher=matcher, mergeforce=mergeforce)
1531
1531
1532 # Prompt and create actions. Most of this is in the resolve phase
1532 # Prompt and create actions. Most of this is in the resolve phase
1533 # already, but we can't handle .hgsubstate in filemerge or
1533 # already, but we can't handle .hgsubstate in filemerge or
1534 # subrepo.submerge yet so we have to keep prompting for it.
1534 # subrepo.submerge yet so we have to keep prompting for it.
1535 if '.hgsubstate' in actionbyfile:
1535 if '.hgsubstate' in actionbyfile:
1536 f = '.hgsubstate'
1536 f = '.hgsubstate'
1537 m, args, msg = actionbyfile[f]
1537 m, args, msg = actionbyfile[f]
1538 if m == 'cd':
1538 if m == 'cd':
1539 if repo.ui.promptchoice(
1539 if repo.ui.promptchoice(
1540 _("local changed %s which remote deleted\n"
1540 _("local changed %s which remote deleted\n"
1541 "use (c)hanged version or (d)elete?"
1541 "use (c)hanged version or (d)elete?"
1542 "$$ &Changed $$ &Delete") % f, 0):
1542 "$$ &Changed $$ &Delete") % f, 0):
1543 actionbyfile[f] = ('r', None, "prompt delete")
1543 actionbyfile[f] = ('r', None, "prompt delete")
1544 elif f in p1:
1544 elif f in p1:
1545 actionbyfile[f] = ('am', None, "prompt keep")
1545 actionbyfile[f] = ('am', None, "prompt keep")
1546 else:
1546 else:
1547 actionbyfile[f] = ('a', None, "prompt keep")
1547 actionbyfile[f] = ('a', None, "prompt keep")
1548 elif m == 'dc':
1548 elif m == 'dc':
1549 f1, f2, fa, move, anc = args
1549 f1, f2, fa, move, anc = args
1550 flags = p2[f2].flags()
1550 flags = p2[f2].flags()
1551 if repo.ui.promptchoice(
1551 if repo.ui.promptchoice(
1552 _("remote changed %s which local deleted\n"
1552 _("remote changed %s which local deleted\n"
1553 "use (c)hanged version or leave (d)eleted?"
1553 "use (c)hanged version or leave (d)eleted?"
1554 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1554 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1555 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1555 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1556 else:
1556 else:
1557 del actionbyfile[f]
1557 del actionbyfile[f]
1558
1558
1559 # Convert to dictionary-of-lists format
1559 # Convert to dictionary-of-lists format
1560 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1560 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1561 for f, (m, args, msg) in actionbyfile.iteritems():
1561 for f, (m, args, msg) in actionbyfile.iteritems():
1562 if m not in actions:
1562 if m not in actions:
1563 actions[m] = []
1563 actions[m] = []
1564 actions[m].append((f, args, msg))
1564 actions[m].append((f, args, msg))
1565
1565
1566 if not util.checkcase(repo.path):
1566 if not util.checkcase(repo.path):
1567 # check collision between files only in p2 for clean update
1567 # check collision between files only in p2 for clean update
1568 if (not branchmerge and
1568 if (not branchmerge and
1569 (force or not wc.dirty(missing=True, branch=False))):
1569 (force or not wc.dirty(missing=True, branch=False))):
1570 _checkcollision(repo, p2.manifest(), None)
1570 _checkcollision(repo, p2.manifest(), None)
1571 else:
1571 else:
1572 _checkcollision(repo, wc.manifest(), actions)
1572 _checkcollision(repo, wc.manifest(), actions)
1573
1573
1574 # divergent renames
1574 # divergent renames
1575 for f, fl in sorted(diverge.iteritems()):
1575 for f, fl in sorted(diverge.iteritems()):
1576 repo.ui.warn(_("note: possible conflict - %s was renamed "
1576 repo.ui.warn(_("note: possible conflict - %s was renamed "
1577 "multiple times to:\n") % f)
1577 "multiple times to:\n") % f)
1578 for nf in fl:
1578 for nf in fl:
1579 repo.ui.warn(" %s\n" % nf)
1579 repo.ui.warn(" %s\n" % nf)
1580
1580
1581 # rename and delete
1581 # rename and delete
1582 for f, fl in sorted(renamedelete.iteritems()):
1582 for f, fl in sorted(renamedelete.iteritems()):
1583 repo.ui.warn(_("note: possible conflict - %s was deleted "
1583 repo.ui.warn(_("note: possible conflict - %s was deleted "
1584 "and renamed to:\n") % f)
1584 "and renamed to:\n") % f)
1585 for nf in fl:
1585 for nf in fl:
1586 repo.ui.warn(" %s\n" % nf)
1586 repo.ui.warn(" %s\n" % nf)
1587
1587
1588 ### apply phase
1588 ### apply phase
1589 if not branchmerge: # just jump to the new rev
1589 if not branchmerge: # just jump to the new rev
1590 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1590 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1591 if not partial:
1591 if not partial:
1592 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1592 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1593 # note that we're in the middle of an update
1593 # note that we're in the middle of an update
1594 repo.vfs.write('updatestate', p2.hex())
1594 repo.vfs.write('updatestate', p2.hex())
1595
1595
1596 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1596 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1597
1597
1598 if not partial:
1598 if not partial:
1599 repo.dirstate.beginparentchange()
1599 repo.dirstate.beginparentchange()
1600 repo.setparents(fp1, fp2)
1600 repo.setparents(fp1, fp2)
1601 recordupdates(repo, actions, branchmerge)
1601 recordupdates(repo, actions, branchmerge)
1602 # update completed, clear state
1602 # update completed, clear state
1603 util.unlink(repo.join('updatestate'))
1603 util.unlink(repo.join('updatestate'))
1604
1604
1605 if not branchmerge:
1605 if not branchmerge:
1606 repo.dirstate.setbranch(p2.branch())
1606 repo.dirstate.setbranch(p2.branch())
1607 repo.dirstate.endparentchange()
1607 repo.dirstate.endparentchange()
1608
1608
1609 if not partial:
1609 if not partial:
1610 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1610 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1611 return stats
1611 return stats
1612
1612
1613 def graft(repo, ctx, pctx, labels, keepparent=False):
1613 def graft(repo, ctx, pctx, labels, keepparent=False):
1614 """Do a graft-like merge.
1614 """Do a graft-like merge.
1615
1615
1616 This is a merge where the merge ancestor is chosen such that one
1616 This is a merge where the merge ancestor is chosen such that one
1617 or more changesets are grafted onto the current changeset. In
1617 or more changesets are grafted onto the current changeset. In
1618 addition to the merge, this fixes up the dirstate to include only
1618 addition to the merge, this fixes up the dirstate to include only
1619 a single parent (if keepparent is False) and tries to duplicate any
1619 a single parent (if keepparent is False) and tries to duplicate any
1620 renames/copies appropriately.
1620 renames/copies appropriately.
1621
1621
1622 ctx - changeset to rebase
1622 ctx - changeset to rebase
1623 pctx - merge base, usually ctx.p1()
1623 pctx - merge base, usually ctx.p1()
1624 labels - merge labels eg ['local', 'graft']
1624 labels - merge labels eg ['local', 'graft']
1625 keepparent - keep second parent if any
1625 keepparent - keep second parent if any
1626
1626
1627 """
1627 """
1628 # If we're grafting a descendant onto an ancestor, be sure to pass
1628 # If we're grafting a descendant onto an ancestor, be sure to pass
1629 # mergeancestor=True to update. This does two things: 1) allows the merge if
1629 # mergeancestor=True to update. This does two things: 1) allows the merge if
1630 # the destination is the same as the parent of the ctx (so we can use graft
1630 # the destination is the same as the parent of the ctx (so we can use graft
1631 # to copy commits), and 2) informs update that the incoming changes are
1631 # to copy commits), and 2) informs update that the incoming changes are
1632 # newer than the destination so it doesn't prompt about "remote changed foo
1632 # newer than the destination so it doesn't prompt about "remote changed foo
1633 # which local deleted".
1633 # which local deleted".
1634 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1634 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1635
1635
1636 stats = update(repo, ctx.node(), True, True, pctx.node(),
1636 stats = update(repo, ctx.node(), True, True, pctx.node(),
1637 mergeancestor=mergeancestor, labels=labels)
1637 mergeancestor=mergeancestor, labels=labels)
1638
1638
1639 pother = nullid
1639 pother = nullid
1640 parents = ctx.parents()
1640 parents = ctx.parents()
1641 if keepparent and len(parents) == 2 and pctx in parents:
1641 if keepparent and len(parents) == 2 and pctx in parents:
1642 parents.remove(pctx)
1642 parents.remove(pctx)
1643 pother = parents[0].node()
1643 pother = parents[0].node()
1644
1644
1645 repo.dirstate.beginparentchange()
1645 repo.dirstate.beginparentchange()
1646 repo.setparents(repo['.'].node(), pother)
1646 repo.setparents(repo['.'].node(), pother)
1647 repo.dirstate.write(repo.currenttransaction())
1647 repo.dirstate.write(repo.currenttransaction())
1648 # fix up dirstate for copies and renames
1648 # fix up dirstate for copies and renames
1649 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1649 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1650 repo.dirstate.endparentchange()
1650 repo.dirstate.endparentchange()
1651 return stats
1651 return stats
@@ -1,155 +1,155 b''
1 # peer.py - repository base classes for mercurial
1 # peer.py - repository base classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 from .i18n import _
11 from .i18n import _
12 from . import (
12 from . import (
13 error,
13 error,
14 util,
14 util,
15 )
15 )
16
16
17 # abstract batching support
17 # abstract batching support
18
18
19 class future(object):
19 class future(object):
20 '''placeholder for a value to be set later'''
20 '''placeholder for a value to be set later'''
21 def set(self, value):
21 def set(self, value):
22 if util.safehasattr(self, 'value'):
22 if util.safehasattr(self, 'value'):
23 raise error.RepoError("future is already set")
23 raise error.RepoError("future is already set")
24 self.value = value
24 self.value = value
25
25
26 class batcher(object):
26 class batcher(object):
27 '''base class for batches of commands submittable in a single request
27 '''base class for batches of commands submittable in a single request
28
28
29 All methods invoked on instances of this class are simply queued and
29 All methods invoked on instances of this class are simply queued and
30 return a a future for the result. Once you call submit(), all the queued
30 return a a future for the result. Once you call submit(), all the queued
31 calls are performed and the results set in their respective futures.
31 calls are performed and the results set in their respective futures.
32 '''
32 '''
33 def __init__(self):
33 def __init__(self):
34 self.calls = []
34 self.calls = []
35 def __getattr__(self, name):
35 def __getattr__(self, name):
36 def call(*args, **opts):
36 def call(*args, **opts):
37 resref = future()
37 resref = future()
38 self.calls.append((name, args, opts, resref,))
38 self.calls.append((name, args, opts, resref,))
39 return resref
39 return resref
40 return call
40 return call
41 def submit(self):
41 def submit(self):
42 raise NotImplementedError()
42 raise NotImplementedError()
43
43
44 class iterbatcher(batcher):
44 class iterbatcher(batcher):
45
45
46 def submit(self):
46 def submit(self):
47 raise NotImplementedError()
47 raise NotImplementedError()
48
48
49 def results(self):
49 def results(self):
50 raise NotImplementedError()
50 raise NotImplementedError()
51
51
52 class localbatch(batcher):
52 class localbatch(batcher):
53 '''performs the queued calls directly'''
53 '''performs the queued calls directly'''
54 def __init__(self, local):
54 def __init__(self, local):
55 batcher.__init__(self)
55 batcher.__init__(self)
56 self.local = local
56 self.local = local
57 def submit(self):
57 def submit(self):
58 for name, args, opts, resref in self.calls:
58 for name, args, opts, resref in self.calls:
59 resref.set(getattr(self.local, name)(*args, **opts))
59 resref.set(getattr(self.local, name)(*args, **opts))
60
60
61 class localiterbatcher(iterbatcher):
61 class localiterbatcher(iterbatcher):
62 def __init__(self, local):
62 def __init__(self, local):
63 super(iterbatcher, self).__init__()
63 super(iterbatcher, self).__init__()
64 self.local = local
64 self.local = local
65
65
66 def submit(self):
66 def submit(self):
67 # submit for a local iter batcher is a noop
67 # submit for a local iter batcher is a noop
68 pass
68 pass
69
69
70 def results(self):
70 def results(self):
71 for name, args, opts, resref in self.calls:
71 for name, args, opts, resref in self.calls:
72 yield getattr(self.local, name)(*args, **opts)
72 yield getattr(self.local, name)(*args, **opts)
73
73
74 def batchable(f):
74 def batchable(f):
75 '''annotation for batchable methods
75 '''annotation for batchable methods
76
76
77 Such methods must implement a coroutine as follows:
77 Such methods must implement a coroutine as follows:
78
78
79 @batchable
79 @batchable
80 def sample(self, one, two=None):
80 def sample(self, one, two=None):
81 # Handle locally computable results first:
81 # Handle locally computable results first:
82 if not one:
82 if not one:
83 yield "a local result", None
83 yield "a local result", None
84 # Build list of encoded arguments suitable for your wire protocol:
84 # Build list of encoded arguments suitable for your wire protocol:
85 encargs = [('one', encode(one),), ('two', encode(two),)]
85 encargs = [('one', encode(one),), ('two', encode(two),)]
86 # Create future for injection of encoded result:
86 # Create future for injection of encoded result:
87 encresref = future()
87 encresref = future()
88 # Return encoded arguments and future:
88 # Return encoded arguments and future:
89 yield encargs, encresref
89 yield encargs, encresref
90 # Assuming the future to be filled with the result from the batched
90 # Assuming the future to be filled with the result from the batched
91 # request now. Decode it:
91 # request now. Decode it:
92 yield decode(encresref.value)
92 yield decode(encresref.value)
93
93
94 The decorator returns a function which wraps this coroutine as a plain
94 The decorator returns a function which wraps this coroutine as a plain
95 method, but adds the original method as an attribute called "batchable",
95 method, but adds the original method as an attribute called "batchable",
96 which is used by remotebatch to split the call into separate encoding and
96 which is used by remotebatch to split the call into separate encoding and
97 decoding phases.
97 decoding phases.
98 '''
98 '''
99 def plain(*args, **opts):
99 def plain(*args, **opts):
100 batchable = f(*args, **opts)
100 batchable = f(*args, **opts)
101 encargsorres, encresref = batchable.next()
101 encargsorres, encresref = next(batchable)
102 if not encresref:
102 if not encresref:
103 return encargsorres # a local result in this case
103 return encargsorres # a local result in this case
104 self = args[0]
104 self = args[0]
105 encresref.set(self._submitone(f.func_name, encargsorres))
105 encresref.set(self._submitone(f.func_name, encargsorres))
106 return batchable.next()
106 return next(batchable)
107 setattr(plain, 'batchable', f)
107 setattr(plain, 'batchable', f)
108 return plain
108 return plain
109
109
110 class peerrepository(object):
110 class peerrepository(object):
111
111
112 def batch(self):
112 def batch(self):
113 return localbatch(self)
113 return localbatch(self)
114
114
115 def iterbatch(self):
115 def iterbatch(self):
116 """Batch requests but allow iterating over the results.
116 """Batch requests but allow iterating over the results.
117
117
118 This is to allow interleaving responses with things like
118 This is to allow interleaving responses with things like
119 progress updates for clients.
119 progress updates for clients.
120 """
120 """
121 return localiterbatcher(self)
121 return localiterbatcher(self)
122
122
123 def capable(self, name):
123 def capable(self, name):
124 '''tell whether repo supports named capability.
124 '''tell whether repo supports named capability.
125 return False if not supported.
125 return False if not supported.
126 if boolean capability, return True.
126 if boolean capability, return True.
127 if string capability, return string.'''
127 if string capability, return string.'''
128 caps = self._capabilities()
128 caps = self._capabilities()
129 if name in caps:
129 if name in caps:
130 return True
130 return True
131 name_eq = name + '='
131 name_eq = name + '='
132 for cap in caps:
132 for cap in caps:
133 if cap.startswith(name_eq):
133 if cap.startswith(name_eq):
134 return cap[len(name_eq):]
134 return cap[len(name_eq):]
135 return False
135 return False
136
136
137 def requirecap(self, name, purpose):
137 def requirecap(self, name, purpose):
138 '''raise an exception if the given capability is not present'''
138 '''raise an exception if the given capability is not present'''
139 if not self.capable(name):
139 if not self.capable(name):
140 raise error.CapabilityError(
140 raise error.CapabilityError(
141 _('cannot %s; remote repository does not '
141 _('cannot %s; remote repository does not '
142 'support the %r capability') % (purpose, name))
142 'support the %r capability') % (purpose, name))
143
143
144 def local(self):
144 def local(self):
145 '''return peer as a localrepo, or None'''
145 '''return peer as a localrepo, or None'''
146 return None
146 return None
147
147
148 def peer(self):
148 def peer(self):
149 return self
149 return self
150
150
151 def canpush(self):
151 def canpush(self):
152 return True
152 return True
153
153
154 def close(self):
154 def close(self):
155 pass
155 pass
@@ -1,3396 +1,3396 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 parser,
22 parser,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 registrar,
25 registrar,
26 repoview,
26 repoview,
27 util,
27 util,
28 )
28 )
29
29
30 def _revancestors(repo, revs, followfirst):
30 def _revancestors(repo, revs, followfirst):
31 """Like revlog.ancestors(), but supports followfirst."""
31 """Like revlog.ancestors(), but supports followfirst."""
32 if followfirst:
32 if followfirst:
33 cut = 1
33 cut = 1
34 else:
34 else:
35 cut = None
35 cut = None
36 cl = repo.changelog
36 cl = repo.changelog
37
37
38 def iterate():
38 def iterate():
39 revs.sort(reverse=True)
39 revs.sort(reverse=True)
40 irevs = iter(revs)
40 irevs = iter(revs)
41 h = []
41 h = []
42
42
43 inputrev = next(irevs, None)
43 inputrev = next(irevs, None)
44 if inputrev is not None:
44 if inputrev is not None:
45 heapq.heappush(h, -inputrev)
45 heapq.heappush(h, -inputrev)
46
46
47 seen = set()
47 seen = set()
48 while h:
48 while h:
49 current = -heapq.heappop(h)
49 current = -heapq.heappop(h)
50 if current == inputrev:
50 if current == inputrev:
51 inputrev = next(irevs, None)
51 inputrev = next(irevs, None)
52 if inputrev is not None:
52 if inputrev is not None:
53 heapq.heappush(h, -inputrev)
53 heapq.heappush(h, -inputrev)
54 if current not in seen:
54 if current not in seen:
55 seen.add(current)
55 seen.add(current)
56 yield current
56 yield current
57 for parent in cl.parentrevs(current)[:cut]:
57 for parent in cl.parentrevs(current)[:cut]:
58 if parent != node.nullrev:
58 if parent != node.nullrev:
59 heapq.heappush(h, -parent)
59 heapq.heappush(h, -parent)
60
60
61 return generatorset(iterate(), iterasc=False)
61 return generatorset(iterate(), iterasc=False)
62
62
63 def _revdescendants(repo, revs, followfirst):
63 def _revdescendants(repo, revs, followfirst):
64 """Like revlog.descendants() but supports followfirst."""
64 """Like revlog.descendants() but supports followfirst."""
65 if followfirst:
65 if followfirst:
66 cut = 1
66 cut = 1
67 else:
67 else:
68 cut = None
68 cut = None
69
69
70 def iterate():
70 def iterate():
71 cl = repo.changelog
71 cl = repo.changelog
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 # smartset (and if it is not, it should.)
73 # smartset (and if it is not, it should.)
74 first = min(revs)
74 first = min(revs)
75 nullrev = node.nullrev
75 nullrev = node.nullrev
76 if first == nullrev:
76 if first == nullrev:
77 # Are there nodes with a null first parent and a non-null
77 # Are there nodes with a null first parent and a non-null
78 # second one? Maybe. Do we care? Probably not.
78 # second one? Maybe. Do we care? Probably not.
79 for i in cl:
79 for i in cl:
80 yield i
80 yield i
81 else:
81 else:
82 seen = set(revs)
82 seen = set(revs)
83 for i in cl.revs(first + 1):
83 for i in cl.revs(first + 1):
84 for x in cl.parentrevs(i)[:cut]:
84 for x in cl.parentrevs(i)[:cut]:
85 if x != nullrev and x in seen:
85 if x != nullrev and x in seen:
86 seen.add(i)
86 seen.add(i)
87 yield i
87 yield i
88 break
88 break
89
89
90 return generatorset(iterate(), iterasc=True)
90 return generatorset(iterate(), iterasc=True)
91
91
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 """return (heads(::<roots> and ::<heads>))
93 """return (heads(::<roots> and ::<heads>))
94
94
95 If includepath is True, return (<roots>::<heads>)."""
95 If includepath is True, return (<roots>::<heads>)."""
96 if not roots:
96 if not roots:
97 return []
97 return []
98 parentrevs = repo.changelog.parentrevs
98 parentrevs = repo.changelog.parentrevs
99 roots = set(roots)
99 roots = set(roots)
100 visit = list(heads)
100 visit = list(heads)
101 reachable = set()
101 reachable = set()
102 seen = {}
102 seen = {}
103 # prefetch all the things! (because python is slow)
103 # prefetch all the things! (because python is slow)
104 reached = reachable.add
104 reached = reachable.add
105 dovisit = visit.append
105 dovisit = visit.append
106 nextvisit = visit.pop
106 nextvisit = visit.pop
107 # open-code the post-order traversal due to the tiny size of
107 # open-code the post-order traversal due to the tiny size of
108 # sys.getrecursionlimit()
108 # sys.getrecursionlimit()
109 while visit:
109 while visit:
110 rev = nextvisit()
110 rev = nextvisit()
111 if rev in roots:
111 if rev in roots:
112 reached(rev)
112 reached(rev)
113 if not includepath:
113 if not includepath:
114 continue
114 continue
115 parents = parentrevs(rev)
115 parents = parentrevs(rev)
116 seen[rev] = parents
116 seen[rev] = parents
117 for parent in parents:
117 for parent in parents:
118 if parent >= minroot and parent not in seen:
118 if parent >= minroot and parent not in seen:
119 dovisit(parent)
119 dovisit(parent)
120 if not reachable:
120 if not reachable:
121 return baseset()
121 return baseset()
122 if not includepath:
122 if not includepath:
123 return reachable
123 return reachable
124 for rev in sorted(seen):
124 for rev in sorted(seen):
125 for parent in seen[rev]:
125 for parent in seen[rev]:
126 if parent in reachable:
126 if parent in reachable:
127 reached(rev)
127 reached(rev)
128 return reachable
128 return reachable
129
129
130 def reachableroots(repo, roots, heads, includepath=False):
130 def reachableroots(repo, roots, heads, includepath=False):
131 """return (heads(::<roots> and ::<heads>))
131 """return (heads(::<roots> and ::<heads>))
132
132
133 If includepath is True, return (<roots>::<heads>)."""
133 If includepath is True, return (<roots>::<heads>)."""
134 if not roots:
134 if not roots:
135 return baseset()
135 return baseset()
136 minroot = roots.min()
136 minroot = roots.min()
137 roots = list(roots)
137 roots = list(roots)
138 heads = list(heads)
138 heads = list(heads)
139 try:
139 try:
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 except AttributeError:
141 except AttributeError:
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 revs = baseset(revs)
143 revs = baseset(revs)
144 revs.sort()
144 revs.sort()
145 return revs
145 return revs
146
146
147 elements = {
147 elements = {
148 # token-type: binding-strength, primary, prefix, infix, suffix
148 # token-type: binding-strength, primary, prefix, infix, suffix
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 "##": (20, None, None, ("_concat", 20), None),
150 "##": (20, None, None, ("_concat", 20), None),
151 "~": (18, None, None, ("ancestor", 18), None),
151 "~": (18, None, None, ("ancestor", 18), None),
152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 ("dagrangepost", 17)),
155 ("dagrangepost", 17)),
156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 ("dagrangepost", 17)),
157 ("dagrangepost", 17)),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 "not": (10, None, ("not", 10), None, None),
159 "not": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
161 "and": (5, None, None, ("and", 5), None),
161 "and": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 "or": (4, None, None, ("or", 4), None),
164 "or": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
168 ",": (2, None, None, ("list", 2), None),
168 ",": (2, None, None, ("list", 2), None),
169 ")": (0, None, None, None, None),
169 ")": (0, None, None, None, None),
170 "symbol": (0, "symbol", None, None, None),
170 "symbol": (0, "symbol", None, None, None),
171 "string": (0, "string", None, None, None),
171 "string": (0, "string", None, None, None),
172 "end": (0, None, None, None, None),
172 "end": (0, None, None, None, None),
173 }
173 }
174
174
175 keywords = set(['and', 'or', 'not'])
175 keywords = set(['and', 'or', 'not'])
176
176
177 # default set of valid characters for the initial letter of symbols
177 # default set of valid characters for the initial letter of symbols
178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 if c.isalnum() or c in '._@' or ord(c) > 127)
179 if c.isalnum() or c in '._@' or ord(c) > 127)
180
180
181 # default set of valid characters for non-initial letters of symbols
181 # default set of valid characters for non-initial letters of symbols
182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184
184
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 '''
186 '''
187 Parse a revset statement into a stream of tokens
187 Parse a revset statement into a stream of tokens
188
188
189 ``syminitletters`` is the set of valid characters for the initial
189 ``syminitletters`` is the set of valid characters for the initial
190 letter of symbols.
190 letter of symbols.
191
191
192 By default, character ``c`` is recognized as valid for initial
192 By default, character ``c`` is recognized as valid for initial
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194
194
195 ``symletters`` is the set of valid characters for non-initial
195 ``symletters`` is the set of valid characters for non-initial
196 letters of symbols.
196 letters of symbols.
197
197
198 By default, character ``c`` is recognized as valid for non-initial
198 By default, character ``c`` is recognized as valid for non-initial
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200
200
201 Check that @ is a valid unquoted token character (issue3686):
201 Check that @ is a valid unquoted token character (issue3686):
202 >>> list(tokenize("@::"))
202 >>> list(tokenize("@::"))
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204
204
205 '''
205 '''
206 if syminitletters is None:
206 if syminitletters is None:
207 syminitletters = _syminitletters
207 syminitletters = _syminitletters
208 if symletters is None:
208 if symletters is None:
209 symletters = _symletters
209 symletters = _symletters
210
210
211 if program and lookup:
211 if program and lookup:
212 # attempt to parse old-style ranges first to deal with
212 # attempt to parse old-style ranges first to deal with
213 # things like old-tag which contain query metacharacters
213 # things like old-tag which contain query metacharacters
214 parts = program.split(':', 1)
214 parts = program.split(':', 1)
215 if all(lookup(sym) for sym in parts if sym):
215 if all(lookup(sym) for sym in parts if sym):
216 if parts[0]:
216 if parts[0]:
217 yield ('symbol', parts[0], 0)
217 yield ('symbol', parts[0], 0)
218 if len(parts) > 1:
218 if len(parts) > 1:
219 s = len(parts[0])
219 s = len(parts[0])
220 yield (':', None, s)
220 yield (':', None, s)
221 if parts[1]:
221 if parts[1]:
222 yield ('symbol', parts[1], s + 1)
222 yield ('symbol', parts[1], s + 1)
223 yield ('end', None, len(program))
223 yield ('end', None, len(program))
224 return
224 return
225
225
226 pos, l = 0, len(program)
226 pos, l = 0, len(program)
227 while pos < l:
227 while pos < l:
228 c = program[pos]
228 c = program[pos]
229 if c.isspace(): # skip inter-token whitespace
229 if c.isspace(): # skip inter-token whitespace
230 pass
230 pass
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 yield ('::', None, pos)
232 yield ('::', None, pos)
233 pos += 1 # skip ahead
233 pos += 1 # skip ahead
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 yield ('..', None, pos)
235 yield ('..', None, pos)
236 pos += 1 # skip ahead
236 pos += 1 # skip ahead
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 yield ('##', None, pos)
238 yield ('##', None, pos)
239 pos += 1 # skip ahead
239 pos += 1 # skip ahead
240 elif c in "():=,-|&+!~^%": # handle simple operators
240 elif c in "():=,-|&+!~^%": # handle simple operators
241 yield (c, None, pos)
241 yield (c, None, pos)
242 elif (c in '"\'' or c == 'r' and
242 elif (c in '"\'' or c == 'r' and
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 if c == 'r':
244 if c == 'r':
245 pos += 1
245 pos += 1
246 c = program[pos]
246 c = program[pos]
247 decode = lambda x: x
247 decode = lambda x: x
248 else:
248 else:
249 decode = parser.unescapestr
249 decode = parser.unescapestr
250 pos += 1
250 pos += 1
251 s = pos
251 s = pos
252 while pos < l: # find closing quote
252 while pos < l: # find closing quote
253 d = program[pos]
253 d = program[pos]
254 if d == '\\': # skip over escaped characters
254 if d == '\\': # skip over escaped characters
255 pos += 2
255 pos += 2
256 continue
256 continue
257 if d == c:
257 if d == c:
258 yield ('string', decode(program[s:pos]), s)
258 yield ('string', decode(program[s:pos]), s)
259 break
259 break
260 pos += 1
260 pos += 1
261 else:
261 else:
262 raise error.ParseError(_("unterminated string"), s)
262 raise error.ParseError(_("unterminated string"), s)
263 # gather up a symbol/keyword
263 # gather up a symbol/keyword
264 elif c in syminitletters:
264 elif c in syminitletters:
265 s = pos
265 s = pos
266 pos += 1
266 pos += 1
267 while pos < l: # find end of symbol
267 while pos < l: # find end of symbol
268 d = program[pos]
268 d = program[pos]
269 if d not in symletters:
269 if d not in symletters:
270 break
270 break
271 if d == '.' and program[pos - 1] == '.': # special case for ..
271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 pos -= 1
272 pos -= 1
273 break
273 break
274 pos += 1
274 pos += 1
275 sym = program[s:pos]
275 sym = program[s:pos]
276 if sym in keywords: # operator keywords
276 if sym in keywords: # operator keywords
277 yield (sym, None, s)
277 yield (sym, None, s)
278 elif '-' in sym:
278 elif '-' in sym:
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 if lookup and lookup(sym):
280 if lookup and lookup(sym):
281 # looks like a real symbol
281 # looks like a real symbol
282 yield ('symbol', sym, s)
282 yield ('symbol', sym, s)
283 else:
283 else:
284 # looks like an expression
284 # looks like an expression
285 parts = sym.split('-')
285 parts = sym.split('-')
286 for p in parts[:-1]:
286 for p in parts[:-1]:
287 if p: # possible consecutive -
287 if p: # possible consecutive -
288 yield ('symbol', p, s)
288 yield ('symbol', p, s)
289 s += len(p)
289 s += len(p)
290 yield ('-', None, pos)
290 yield ('-', None, pos)
291 s += 1
291 s += 1
292 if parts[-1]: # possible trailing -
292 if parts[-1]: # possible trailing -
293 yield ('symbol', parts[-1], s)
293 yield ('symbol', parts[-1], s)
294 else:
294 else:
295 yield ('symbol', sym, s)
295 yield ('symbol', sym, s)
296 pos -= 1
296 pos -= 1
297 else:
297 else:
298 raise error.ParseError(_("syntax error in revset '%s'") %
298 raise error.ParseError(_("syntax error in revset '%s'") %
299 program, pos)
299 program, pos)
300 pos += 1
300 pos += 1
301 yield ('end', None, pos)
301 yield ('end', None, pos)
302
302
303 # helpers
303 # helpers
304
304
305 def getstring(x, err):
305 def getstring(x, err):
306 if x and (x[0] == 'string' or x[0] == 'symbol'):
306 if x and (x[0] == 'string' or x[0] == 'symbol'):
307 return x[1]
307 return x[1]
308 raise error.ParseError(err)
308 raise error.ParseError(err)
309
309
310 def getlist(x):
310 def getlist(x):
311 if not x:
311 if not x:
312 return []
312 return []
313 if x[0] == 'list':
313 if x[0] == 'list':
314 return list(x[1:])
314 return list(x[1:])
315 return [x]
315 return [x]
316
316
317 def getargs(x, min, max, err):
317 def getargs(x, min, max, err):
318 l = getlist(x)
318 l = getlist(x)
319 if len(l) < min or (max >= 0 and len(l) > max):
319 if len(l) < min or (max >= 0 and len(l) > max):
320 raise error.ParseError(err)
320 raise error.ParseError(err)
321 return l
321 return l
322
322
323 def getargsdict(x, funcname, keys):
323 def getargsdict(x, funcname, keys):
324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
325 keyvaluenode='keyvalue', keynode='symbol')
325 keyvaluenode='keyvalue', keynode='symbol')
326
326
327 def getset(repo, subset, x):
327 def getset(repo, subset, x):
328 if not x:
328 if not x:
329 raise error.ParseError(_("missing argument"))
329 raise error.ParseError(_("missing argument"))
330 s = methods[x[0]](repo, subset, *x[1:])
330 s = methods[x[0]](repo, subset, *x[1:])
331 if util.safehasattr(s, 'isascending'):
331 if util.safehasattr(s, 'isascending'):
332 return s
332 return s
333 # else case should not happen, because all non-func are internal,
333 # else case should not happen, because all non-func are internal,
334 # ignoring for now.
334 # ignoring for now.
335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
337 % x[1][1],
337 % x[1][1],
338 '3.9')
338 '3.9')
339 return baseset(s)
339 return baseset(s)
340
340
341 def _getrevsource(repo, r):
341 def _getrevsource(repo, r):
342 extra = repo[r].extra()
342 extra = repo[r].extra()
343 for label in ('source', 'transplant_source', 'rebase_source'):
343 for label in ('source', 'transplant_source', 'rebase_source'):
344 if label in extra:
344 if label in extra:
345 try:
345 try:
346 return repo[extra[label]].rev()
346 return repo[extra[label]].rev()
347 except error.RepoLookupError:
347 except error.RepoLookupError:
348 pass
348 pass
349 return None
349 return None
350
350
351 # operator methods
351 # operator methods
352
352
353 def stringset(repo, subset, x):
353 def stringset(repo, subset, x):
354 x = repo[x].rev()
354 x = repo[x].rev()
355 if (x in subset
355 if (x in subset
356 or x == node.nullrev and isinstance(subset, fullreposet)):
356 or x == node.nullrev and isinstance(subset, fullreposet)):
357 return baseset([x])
357 return baseset([x])
358 return baseset()
358 return baseset()
359
359
360 def rangeset(repo, subset, x, y):
360 def rangeset(repo, subset, x, y):
361 m = getset(repo, fullreposet(repo), x)
361 m = getset(repo, fullreposet(repo), x)
362 n = getset(repo, fullreposet(repo), y)
362 n = getset(repo, fullreposet(repo), y)
363
363
364 if not m or not n:
364 if not m or not n:
365 return baseset()
365 return baseset()
366 m, n = m.first(), n.last()
366 m, n = m.first(), n.last()
367
367
368 if m == n:
368 if m == n:
369 r = baseset([m])
369 r = baseset([m])
370 elif n == node.wdirrev:
370 elif n == node.wdirrev:
371 r = spanset(repo, m, len(repo)) + baseset([n])
371 r = spanset(repo, m, len(repo)) + baseset([n])
372 elif m == node.wdirrev:
372 elif m == node.wdirrev:
373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
374 elif m < n:
374 elif m < n:
375 r = spanset(repo, m, n + 1)
375 r = spanset(repo, m, n + 1)
376 else:
376 else:
377 r = spanset(repo, m, n - 1)
377 r = spanset(repo, m, n - 1)
378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
379 # necessary to ensure we preserve the order in subset.
379 # necessary to ensure we preserve the order in subset.
380 #
380 #
381 # This has performance implication, carrying the sorting over when possible
381 # This has performance implication, carrying the sorting over when possible
382 # would be more efficient.
382 # would be more efficient.
383 return r & subset
383 return r & subset
384
384
385 def dagrange(repo, subset, x, y):
385 def dagrange(repo, subset, x, y):
386 r = fullreposet(repo)
386 r = fullreposet(repo)
387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
388 includepath=True)
388 includepath=True)
389 return subset & xs
389 return subset & xs
390
390
391 def andset(repo, subset, x, y):
391 def andset(repo, subset, x, y):
392 return getset(repo, getset(repo, subset, x), y)
392 return getset(repo, getset(repo, subset, x), y)
393
393
394 def differenceset(repo, subset, x, y):
394 def differenceset(repo, subset, x, y):
395 return getset(repo, subset, x) - getset(repo, subset, y)
395 return getset(repo, subset, x) - getset(repo, subset, y)
396
396
397 def orset(repo, subset, *xs):
397 def orset(repo, subset, *xs):
398 assert xs
398 assert xs
399 if len(xs) == 1:
399 if len(xs) == 1:
400 return getset(repo, subset, xs[0])
400 return getset(repo, subset, xs[0])
401 p = len(xs) // 2
401 p = len(xs) // 2
402 a = orset(repo, subset, *xs[:p])
402 a = orset(repo, subset, *xs[:p])
403 b = orset(repo, subset, *xs[p:])
403 b = orset(repo, subset, *xs[p:])
404 return a + b
404 return a + b
405
405
406 def notset(repo, subset, x):
406 def notset(repo, subset, x):
407 return subset - getset(repo, subset, x)
407 return subset - getset(repo, subset, x)
408
408
409 def listset(repo, subset, *xs):
409 def listset(repo, subset, *xs):
410 raise error.ParseError(_("can't use a list in this context"),
410 raise error.ParseError(_("can't use a list in this context"),
411 hint=_('see hg help "revsets.x or y"'))
411 hint=_('see hg help "revsets.x or y"'))
412
412
413 def keyvaluepair(repo, subset, k, v):
413 def keyvaluepair(repo, subset, k, v):
414 raise error.ParseError(_("can't use a key-value pair in this context"))
414 raise error.ParseError(_("can't use a key-value pair in this context"))
415
415
416 def func(repo, subset, a, b):
416 def func(repo, subset, a, b):
417 if a[0] == 'symbol' and a[1] in symbols:
417 if a[0] == 'symbol' and a[1] in symbols:
418 return symbols[a[1]](repo, subset, b)
418 return symbols[a[1]](repo, subset, b)
419
419
420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
421
421
422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
423 raise error.UnknownIdentifier(a[1], syms)
423 raise error.UnknownIdentifier(a[1], syms)
424
424
425 # functions
425 # functions
426
426
427 # symbols are callables like:
427 # symbols are callables like:
428 # fn(repo, subset, x)
428 # fn(repo, subset, x)
429 # with:
429 # with:
430 # repo - current repository instance
430 # repo - current repository instance
431 # subset - of revisions to be examined
431 # subset - of revisions to be examined
432 # x - argument in tree form
432 # x - argument in tree form
433 symbols = {}
433 symbols = {}
434
434
435 # symbols which can't be used for a DoS attack for any given input
435 # symbols which can't be used for a DoS attack for any given input
436 # (e.g. those which accept regexes as plain strings shouldn't be included)
436 # (e.g. those which accept regexes as plain strings shouldn't be included)
437 # functions that just return a lot of changesets (like all) don't count here
437 # functions that just return a lot of changesets (like all) don't count here
438 safesymbols = set()
438 safesymbols = set()
439
439
440 predicate = registrar.revsetpredicate()
440 predicate = registrar.revsetpredicate()
441
441
442 @predicate('_destupdate')
442 @predicate('_destupdate')
443 def _destupdate(repo, subset, x):
443 def _destupdate(repo, subset, x):
444 # experimental revset for update destination
444 # experimental revset for update destination
445 args = getargsdict(x, 'limit', 'clean check')
445 args = getargsdict(x, 'limit', 'clean check')
446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
447
447
448 @predicate('_destmerge')
448 @predicate('_destmerge')
449 def _destmerge(repo, subset, x):
449 def _destmerge(repo, subset, x):
450 # experimental revset for merge destination
450 # experimental revset for merge destination
451 sourceset = None
451 sourceset = None
452 if x is not None:
452 if x is not None:
453 sourceset = getset(repo, fullreposet(repo), x)
453 sourceset = getset(repo, fullreposet(repo), x)
454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
455
455
456 @predicate('adds(pattern)', safe=True)
456 @predicate('adds(pattern)', safe=True)
457 def adds(repo, subset, x):
457 def adds(repo, subset, x):
458 """Changesets that add a file matching pattern.
458 """Changesets that add a file matching pattern.
459
459
460 The pattern without explicit kind like ``glob:`` is expected to be
460 The pattern without explicit kind like ``glob:`` is expected to be
461 relative to the current directory and match against a file or a
461 relative to the current directory and match against a file or a
462 directory.
462 directory.
463 """
463 """
464 # i18n: "adds" is a keyword
464 # i18n: "adds" is a keyword
465 pat = getstring(x, _("adds requires a pattern"))
465 pat = getstring(x, _("adds requires a pattern"))
466 return checkstatus(repo, subset, pat, 1)
466 return checkstatus(repo, subset, pat, 1)
467
467
468 @predicate('ancestor(*changeset)', safe=True)
468 @predicate('ancestor(*changeset)', safe=True)
469 def ancestor(repo, subset, x):
469 def ancestor(repo, subset, x):
470 """A greatest common ancestor of the changesets.
470 """A greatest common ancestor of the changesets.
471
471
472 Accepts 0 or more changesets.
472 Accepts 0 or more changesets.
473 Will return empty list when passed no args.
473 Will return empty list when passed no args.
474 Greatest common ancestor of a single changeset is that changeset.
474 Greatest common ancestor of a single changeset is that changeset.
475 """
475 """
476 # i18n: "ancestor" is a keyword
476 # i18n: "ancestor" is a keyword
477 l = getlist(x)
477 l = getlist(x)
478 rl = fullreposet(repo)
478 rl = fullreposet(repo)
479 anc = None
479 anc = None
480
480
481 # (getset(repo, rl, i) for i in l) generates a list of lists
481 # (getset(repo, rl, i) for i in l) generates a list of lists
482 for revs in (getset(repo, rl, i) for i in l):
482 for revs in (getset(repo, rl, i) for i in l):
483 for r in revs:
483 for r in revs:
484 if anc is None:
484 if anc is None:
485 anc = repo[r]
485 anc = repo[r]
486 else:
486 else:
487 anc = anc.ancestor(repo[r])
487 anc = anc.ancestor(repo[r])
488
488
489 if anc is not None and anc.rev() in subset:
489 if anc is not None and anc.rev() in subset:
490 return baseset([anc.rev()])
490 return baseset([anc.rev()])
491 return baseset()
491 return baseset()
492
492
493 def _ancestors(repo, subset, x, followfirst=False):
493 def _ancestors(repo, subset, x, followfirst=False):
494 heads = getset(repo, fullreposet(repo), x)
494 heads = getset(repo, fullreposet(repo), x)
495 if not heads:
495 if not heads:
496 return baseset()
496 return baseset()
497 s = _revancestors(repo, heads, followfirst)
497 s = _revancestors(repo, heads, followfirst)
498 return subset & s
498 return subset & s
499
499
500 @predicate('ancestors(set)', safe=True)
500 @predicate('ancestors(set)', safe=True)
501 def ancestors(repo, subset, x):
501 def ancestors(repo, subset, x):
502 """Changesets that are ancestors of a changeset in set.
502 """Changesets that are ancestors of a changeset in set.
503 """
503 """
504 return _ancestors(repo, subset, x)
504 return _ancestors(repo, subset, x)
505
505
506 @predicate('_firstancestors', safe=True)
506 @predicate('_firstancestors', safe=True)
507 def _firstancestors(repo, subset, x):
507 def _firstancestors(repo, subset, x):
508 # ``_firstancestors(set)``
508 # ``_firstancestors(set)``
509 # Like ``ancestors(set)`` but follows only the first parents.
509 # Like ``ancestors(set)`` but follows only the first parents.
510 return _ancestors(repo, subset, x, followfirst=True)
510 return _ancestors(repo, subset, x, followfirst=True)
511
511
512 def ancestorspec(repo, subset, x, n):
512 def ancestorspec(repo, subset, x, n):
513 """``set~n``
513 """``set~n``
514 Changesets that are the Nth ancestor (first parents only) of a changeset
514 Changesets that are the Nth ancestor (first parents only) of a changeset
515 in set.
515 in set.
516 """
516 """
517 try:
517 try:
518 n = int(n[1])
518 n = int(n[1])
519 except (TypeError, ValueError):
519 except (TypeError, ValueError):
520 raise error.ParseError(_("~ expects a number"))
520 raise error.ParseError(_("~ expects a number"))
521 ps = set()
521 ps = set()
522 cl = repo.changelog
522 cl = repo.changelog
523 for r in getset(repo, fullreposet(repo), x):
523 for r in getset(repo, fullreposet(repo), x):
524 for i in range(n):
524 for i in range(n):
525 r = cl.parentrevs(r)[0]
525 r = cl.parentrevs(r)[0]
526 ps.add(r)
526 ps.add(r)
527 return subset & ps
527 return subset & ps
528
528
529 @predicate('author(string)', safe=True)
529 @predicate('author(string)', safe=True)
530 def author(repo, subset, x):
530 def author(repo, subset, x):
531 """Alias for ``user(string)``.
531 """Alias for ``user(string)``.
532 """
532 """
533 # i18n: "author" is a keyword
533 # i18n: "author" is a keyword
534 n = encoding.lower(getstring(x, _("author requires a string")))
534 n = encoding.lower(getstring(x, _("author requires a string")))
535 kind, pattern, matcher = _substringmatcher(n)
535 kind, pattern, matcher = _substringmatcher(n)
536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
537 condrepr=('<user %r>', n))
537 condrepr=('<user %r>', n))
538
538
539 @predicate('bisect(string)', safe=True)
539 @predicate('bisect(string)', safe=True)
540 def bisect(repo, subset, x):
540 def bisect(repo, subset, x):
541 """Changesets marked in the specified bisect status:
541 """Changesets marked in the specified bisect status:
542
542
543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
544 - ``goods``, ``bads`` : csets topologically good/bad
544 - ``goods``, ``bads`` : csets topologically good/bad
545 - ``range`` : csets taking part in the bisection
545 - ``range`` : csets taking part in the bisection
546 - ``pruned`` : csets that are goods, bads or skipped
546 - ``pruned`` : csets that are goods, bads or skipped
547 - ``untested`` : csets whose fate is yet unknown
547 - ``untested`` : csets whose fate is yet unknown
548 - ``ignored`` : csets ignored due to DAG topology
548 - ``ignored`` : csets ignored due to DAG topology
549 - ``current`` : the cset currently being bisected
549 - ``current`` : the cset currently being bisected
550 """
550 """
551 # i18n: "bisect" is a keyword
551 # i18n: "bisect" is a keyword
552 status = getstring(x, _("bisect requires a string")).lower()
552 status = getstring(x, _("bisect requires a string")).lower()
553 state = set(hbisect.get(repo, status))
553 state = set(hbisect.get(repo, status))
554 return subset & state
554 return subset & state
555
555
556 # Backward-compatibility
556 # Backward-compatibility
557 # - no help entry so that we do not advertise it any more
557 # - no help entry so that we do not advertise it any more
558 @predicate('bisected', safe=True)
558 @predicate('bisected', safe=True)
559 def bisected(repo, subset, x):
559 def bisected(repo, subset, x):
560 return bisect(repo, subset, x)
560 return bisect(repo, subset, x)
561
561
562 @predicate('bookmark([name])', safe=True)
562 @predicate('bookmark([name])', safe=True)
563 def bookmark(repo, subset, x):
563 def bookmark(repo, subset, x):
564 """The named bookmark or all bookmarks.
564 """The named bookmark or all bookmarks.
565
565
566 If `name` starts with `re:`, the remainder of the name is treated as
566 If `name` starts with `re:`, the remainder of the name is treated as
567 a regular expression. To match a bookmark that actually starts with `re:`,
567 a regular expression. To match a bookmark that actually starts with `re:`,
568 use the prefix `literal:`.
568 use the prefix `literal:`.
569 """
569 """
570 # i18n: "bookmark" is a keyword
570 # i18n: "bookmark" is a keyword
571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
572 if args:
572 if args:
573 bm = getstring(args[0],
573 bm = getstring(args[0],
574 # i18n: "bookmark" is a keyword
574 # i18n: "bookmark" is a keyword
575 _('the argument to bookmark must be a string'))
575 _('the argument to bookmark must be a string'))
576 kind, pattern, matcher = util.stringmatcher(bm)
576 kind, pattern, matcher = util.stringmatcher(bm)
577 bms = set()
577 bms = set()
578 if kind == 'literal':
578 if kind == 'literal':
579 bmrev = repo._bookmarks.get(pattern, None)
579 bmrev = repo._bookmarks.get(pattern, None)
580 if not bmrev:
580 if not bmrev:
581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
582 % pattern)
582 % pattern)
583 bms.add(repo[bmrev].rev())
583 bms.add(repo[bmrev].rev())
584 else:
584 else:
585 matchrevs = set()
585 matchrevs = set()
586 for name, bmrev in repo._bookmarks.iteritems():
586 for name, bmrev in repo._bookmarks.iteritems():
587 if matcher(name):
587 if matcher(name):
588 matchrevs.add(bmrev)
588 matchrevs.add(bmrev)
589 if not matchrevs:
589 if not matchrevs:
590 raise error.RepoLookupError(_("no bookmarks exist"
590 raise error.RepoLookupError(_("no bookmarks exist"
591 " that match '%s'") % pattern)
591 " that match '%s'") % pattern)
592 for bmrev in matchrevs:
592 for bmrev in matchrevs:
593 bms.add(repo[bmrev].rev())
593 bms.add(repo[bmrev].rev())
594 else:
594 else:
595 bms = set([repo[r].rev()
595 bms = set([repo[r].rev()
596 for r in repo._bookmarks.values()])
596 for r in repo._bookmarks.values()])
597 bms -= set([node.nullrev])
597 bms -= set([node.nullrev])
598 return subset & bms
598 return subset & bms
599
599
600 @predicate('branch(string or set)', safe=True)
600 @predicate('branch(string or set)', safe=True)
601 def branch(repo, subset, x):
601 def branch(repo, subset, x):
602 """
602 """
603 All changesets belonging to the given branch or the branches of the given
603 All changesets belonging to the given branch or the branches of the given
604 changesets.
604 changesets.
605
605
606 If `string` starts with `re:`, the remainder of the name is treated as
606 If `string` starts with `re:`, the remainder of the name is treated as
607 a regular expression. To match a branch that actually starts with `re:`,
607 a regular expression. To match a branch that actually starts with `re:`,
608 use the prefix `literal:`.
608 use the prefix `literal:`.
609 """
609 """
610 getbi = repo.revbranchcache().branchinfo
610 getbi = repo.revbranchcache().branchinfo
611
611
612 try:
612 try:
613 b = getstring(x, '')
613 b = getstring(x, '')
614 except error.ParseError:
614 except error.ParseError:
615 # not a string, but another revspec, e.g. tip()
615 # not a string, but another revspec, e.g. tip()
616 pass
616 pass
617 else:
617 else:
618 kind, pattern, matcher = util.stringmatcher(b)
618 kind, pattern, matcher = util.stringmatcher(b)
619 if kind == 'literal':
619 if kind == 'literal':
620 # note: falls through to the revspec case if no branch with
620 # note: falls through to the revspec case if no branch with
621 # this name exists and pattern kind is not specified explicitly
621 # this name exists and pattern kind is not specified explicitly
622 if pattern in repo.branchmap():
622 if pattern in repo.branchmap():
623 return subset.filter(lambda r: matcher(getbi(r)[0]),
623 return subset.filter(lambda r: matcher(getbi(r)[0]),
624 condrepr=('<branch %r>', b))
624 condrepr=('<branch %r>', b))
625 if b.startswith('literal:'):
625 if b.startswith('literal:'):
626 raise error.RepoLookupError(_("branch '%s' does not exist")
626 raise error.RepoLookupError(_("branch '%s' does not exist")
627 % pattern)
627 % pattern)
628 else:
628 else:
629 return subset.filter(lambda r: matcher(getbi(r)[0]),
629 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 condrepr=('<branch %r>', b))
630 condrepr=('<branch %r>', b))
631
631
632 s = getset(repo, fullreposet(repo), x)
632 s = getset(repo, fullreposet(repo), x)
633 b = set()
633 b = set()
634 for r in s:
634 for r in s:
635 b.add(getbi(r)[0])
635 b.add(getbi(r)[0])
636 c = s.__contains__
636 c = s.__contains__
637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
638 condrepr=lambda: '<branch %r>' % sorted(b))
638 condrepr=lambda: '<branch %r>' % sorted(b))
639
639
640 @predicate('bumped()', safe=True)
640 @predicate('bumped()', safe=True)
641 def bumped(repo, subset, x):
641 def bumped(repo, subset, x):
642 """Mutable changesets marked as successors of public changesets.
642 """Mutable changesets marked as successors of public changesets.
643
643
644 Only non-public and non-obsolete changesets can be `bumped`.
644 Only non-public and non-obsolete changesets can be `bumped`.
645 """
645 """
646 # i18n: "bumped" is a keyword
646 # i18n: "bumped" is a keyword
647 getargs(x, 0, 0, _("bumped takes no arguments"))
647 getargs(x, 0, 0, _("bumped takes no arguments"))
648 bumped = obsmod.getrevs(repo, 'bumped')
648 bumped = obsmod.getrevs(repo, 'bumped')
649 return subset & bumped
649 return subset & bumped
650
650
651 @predicate('bundle()', safe=True)
651 @predicate('bundle()', safe=True)
652 def bundle(repo, subset, x):
652 def bundle(repo, subset, x):
653 """Changesets in the bundle.
653 """Changesets in the bundle.
654
654
655 Bundle must be specified by the -R option."""
655 Bundle must be specified by the -R option."""
656
656
657 try:
657 try:
658 bundlerevs = repo.changelog.bundlerevs
658 bundlerevs = repo.changelog.bundlerevs
659 except AttributeError:
659 except AttributeError:
660 raise error.Abort(_("no bundle provided - specify with -R"))
660 raise error.Abort(_("no bundle provided - specify with -R"))
661 return subset & bundlerevs
661 return subset & bundlerevs
662
662
663 def checkstatus(repo, subset, pat, field):
663 def checkstatus(repo, subset, pat, field):
664 hasset = matchmod.patkind(pat) == 'set'
664 hasset = matchmod.patkind(pat) == 'set'
665
665
666 mcache = [None]
666 mcache = [None]
667 def matches(x):
667 def matches(x):
668 c = repo[x]
668 c = repo[x]
669 if not mcache[0] or hasset:
669 if not mcache[0] or hasset:
670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
671 m = mcache[0]
671 m = mcache[0]
672 fname = None
672 fname = None
673 if not m.anypats() and len(m.files()) == 1:
673 if not m.anypats() and len(m.files()) == 1:
674 fname = m.files()[0]
674 fname = m.files()[0]
675 if fname is not None:
675 if fname is not None:
676 if fname not in c.files():
676 if fname not in c.files():
677 return False
677 return False
678 else:
678 else:
679 for f in c.files():
679 for f in c.files():
680 if m(f):
680 if m(f):
681 break
681 break
682 else:
682 else:
683 return False
683 return False
684 files = repo.status(c.p1().node(), c.node())[field]
684 files = repo.status(c.p1().node(), c.node())[field]
685 if fname is not None:
685 if fname is not None:
686 if fname in files:
686 if fname in files:
687 return True
687 return True
688 else:
688 else:
689 for f in files:
689 for f in files:
690 if m(f):
690 if m(f):
691 return True
691 return True
692
692
693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
694
694
695 def _children(repo, narrow, parentset):
695 def _children(repo, narrow, parentset):
696 if not parentset:
696 if not parentset:
697 return baseset()
697 return baseset()
698 cs = set()
698 cs = set()
699 pr = repo.changelog.parentrevs
699 pr = repo.changelog.parentrevs
700 minrev = parentset.min()
700 minrev = parentset.min()
701 for r in narrow:
701 for r in narrow:
702 if r <= minrev:
702 if r <= minrev:
703 continue
703 continue
704 for p in pr(r):
704 for p in pr(r):
705 if p in parentset:
705 if p in parentset:
706 cs.add(r)
706 cs.add(r)
707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
708 # This does not break because of other fullreposet misbehavior.
708 # This does not break because of other fullreposet misbehavior.
709 return baseset(cs)
709 return baseset(cs)
710
710
711 @predicate('children(set)', safe=True)
711 @predicate('children(set)', safe=True)
712 def children(repo, subset, x):
712 def children(repo, subset, x):
713 """Child changesets of changesets in set.
713 """Child changesets of changesets in set.
714 """
714 """
715 s = getset(repo, fullreposet(repo), x)
715 s = getset(repo, fullreposet(repo), x)
716 cs = _children(repo, subset, s)
716 cs = _children(repo, subset, s)
717 return subset & cs
717 return subset & cs
718
718
719 @predicate('closed()', safe=True)
719 @predicate('closed()', safe=True)
720 def closed(repo, subset, x):
720 def closed(repo, subset, x):
721 """Changeset is closed.
721 """Changeset is closed.
722 """
722 """
723 # i18n: "closed" is a keyword
723 # i18n: "closed" is a keyword
724 getargs(x, 0, 0, _("closed takes no arguments"))
724 getargs(x, 0, 0, _("closed takes no arguments"))
725 return subset.filter(lambda r: repo[r].closesbranch(),
725 return subset.filter(lambda r: repo[r].closesbranch(),
726 condrepr='<branch closed>')
726 condrepr='<branch closed>')
727
727
728 @predicate('contains(pattern)')
728 @predicate('contains(pattern)')
729 def contains(repo, subset, x):
729 def contains(repo, subset, x):
730 """The revision's manifest contains a file matching pattern (but might not
730 """The revision's manifest contains a file matching pattern (but might not
731 modify it). See :hg:`help patterns` for information about file patterns.
731 modify it). See :hg:`help patterns` for information about file patterns.
732
732
733 The pattern without explicit kind like ``glob:`` is expected to be
733 The pattern without explicit kind like ``glob:`` is expected to be
734 relative to the current directory and match against a file exactly
734 relative to the current directory and match against a file exactly
735 for efficiency.
735 for efficiency.
736 """
736 """
737 # i18n: "contains" is a keyword
737 # i18n: "contains" is a keyword
738 pat = getstring(x, _("contains requires a pattern"))
738 pat = getstring(x, _("contains requires a pattern"))
739
739
740 def matches(x):
740 def matches(x):
741 if not matchmod.patkind(pat):
741 if not matchmod.patkind(pat):
742 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
742 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
743 if pats in repo[x]:
743 if pats in repo[x]:
744 return True
744 return True
745 else:
745 else:
746 c = repo[x]
746 c = repo[x]
747 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
747 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
748 for f in c.manifest():
748 for f in c.manifest():
749 if m(f):
749 if m(f):
750 return True
750 return True
751 return False
751 return False
752
752
753 return subset.filter(matches, condrepr=('<contains %r>', pat))
753 return subset.filter(matches, condrepr=('<contains %r>', pat))
754
754
755 @predicate('converted([id])', safe=True)
755 @predicate('converted([id])', safe=True)
756 def converted(repo, subset, x):
756 def converted(repo, subset, x):
757 """Changesets converted from the given identifier in the old repository if
757 """Changesets converted from the given identifier in the old repository if
758 present, or all converted changesets if no identifier is specified.
758 present, or all converted changesets if no identifier is specified.
759 """
759 """
760
760
761 # There is exactly no chance of resolving the revision, so do a simple
761 # There is exactly no chance of resolving the revision, so do a simple
762 # string compare and hope for the best
762 # string compare and hope for the best
763
763
764 rev = None
764 rev = None
765 # i18n: "converted" is a keyword
765 # i18n: "converted" is a keyword
766 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
766 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
767 if l:
767 if l:
768 # i18n: "converted" is a keyword
768 # i18n: "converted" is a keyword
769 rev = getstring(l[0], _('converted requires a revision'))
769 rev = getstring(l[0], _('converted requires a revision'))
770
770
771 def _matchvalue(r):
771 def _matchvalue(r):
772 source = repo[r].extra().get('convert_revision', None)
772 source = repo[r].extra().get('convert_revision', None)
773 return source is not None and (rev is None or source.startswith(rev))
773 return source is not None and (rev is None or source.startswith(rev))
774
774
775 return subset.filter(lambda r: _matchvalue(r),
775 return subset.filter(lambda r: _matchvalue(r),
776 condrepr=('<converted %r>', rev))
776 condrepr=('<converted %r>', rev))
777
777
778 @predicate('date(interval)', safe=True)
778 @predicate('date(interval)', safe=True)
779 def date(repo, subset, x):
779 def date(repo, subset, x):
780 """Changesets within the interval, see :hg:`help dates`.
780 """Changesets within the interval, see :hg:`help dates`.
781 """
781 """
782 # i18n: "date" is a keyword
782 # i18n: "date" is a keyword
783 ds = getstring(x, _("date requires a string"))
783 ds = getstring(x, _("date requires a string"))
784 dm = util.matchdate(ds)
784 dm = util.matchdate(ds)
785 return subset.filter(lambda x: dm(repo[x].date()[0]),
785 return subset.filter(lambda x: dm(repo[x].date()[0]),
786 condrepr=('<date %r>', ds))
786 condrepr=('<date %r>', ds))
787
787
788 @predicate('desc(string)', safe=True)
788 @predicate('desc(string)', safe=True)
789 def desc(repo, subset, x):
789 def desc(repo, subset, x):
790 """Search commit message for string. The match is case-insensitive.
790 """Search commit message for string. The match is case-insensitive.
791 """
791 """
792 # i18n: "desc" is a keyword
792 # i18n: "desc" is a keyword
793 ds = encoding.lower(getstring(x, _("desc requires a string")))
793 ds = encoding.lower(getstring(x, _("desc requires a string")))
794
794
795 def matches(x):
795 def matches(x):
796 c = repo[x]
796 c = repo[x]
797 return ds in encoding.lower(c.description())
797 return ds in encoding.lower(c.description())
798
798
799 return subset.filter(matches, condrepr=('<desc %r>', ds))
799 return subset.filter(matches, condrepr=('<desc %r>', ds))
800
800
801 def _descendants(repo, subset, x, followfirst=False):
801 def _descendants(repo, subset, x, followfirst=False):
802 roots = getset(repo, fullreposet(repo), x)
802 roots = getset(repo, fullreposet(repo), x)
803 if not roots:
803 if not roots:
804 return baseset()
804 return baseset()
805 s = _revdescendants(repo, roots, followfirst)
805 s = _revdescendants(repo, roots, followfirst)
806
806
807 # Both sets need to be ascending in order to lazily return the union
807 # Both sets need to be ascending in order to lazily return the union
808 # in the correct order.
808 # in the correct order.
809 base = subset & roots
809 base = subset & roots
810 desc = subset & s
810 desc = subset & s
811 result = base + desc
811 result = base + desc
812 if subset.isascending():
812 if subset.isascending():
813 result.sort()
813 result.sort()
814 elif subset.isdescending():
814 elif subset.isdescending():
815 result.sort(reverse=True)
815 result.sort(reverse=True)
816 else:
816 else:
817 result = subset & result
817 result = subset & result
818 return result
818 return result
819
819
820 @predicate('descendants(set)', safe=True)
820 @predicate('descendants(set)', safe=True)
821 def descendants(repo, subset, x):
821 def descendants(repo, subset, x):
822 """Changesets which are descendants of changesets in set.
822 """Changesets which are descendants of changesets in set.
823 """
823 """
824 return _descendants(repo, subset, x)
824 return _descendants(repo, subset, x)
825
825
826 @predicate('_firstdescendants', safe=True)
826 @predicate('_firstdescendants', safe=True)
827 def _firstdescendants(repo, subset, x):
827 def _firstdescendants(repo, subset, x):
828 # ``_firstdescendants(set)``
828 # ``_firstdescendants(set)``
829 # Like ``descendants(set)`` but follows only the first parents.
829 # Like ``descendants(set)`` but follows only the first parents.
830 return _descendants(repo, subset, x, followfirst=True)
830 return _descendants(repo, subset, x, followfirst=True)
831
831
832 @predicate('destination([set])', safe=True)
832 @predicate('destination([set])', safe=True)
833 def destination(repo, subset, x):
833 def destination(repo, subset, x):
834 """Changesets that were created by a graft, transplant or rebase operation,
834 """Changesets that were created by a graft, transplant or rebase operation,
835 with the given revisions specified as the source. Omitting the optional set
835 with the given revisions specified as the source. Omitting the optional set
836 is the same as passing all().
836 is the same as passing all().
837 """
837 """
838 if x is not None:
838 if x is not None:
839 sources = getset(repo, fullreposet(repo), x)
839 sources = getset(repo, fullreposet(repo), x)
840 else:
840 else:
841 sources = fullreposet(repo)
841 sources = fullreposet(repo)
842
842
843 dests = set()
843 dests = set()
844
844
845 # subset contains all of the possible destinations that can be returned, so
845 # subset contains all of the possible destinations that can be returned, so
846 # iterate over them and see if their source(s) were provided in the arg set.
846 # iterate over them and see if their source(s) were provided in the arg set.
847 # Even if the immediate src of r is not in the arg set, src's source (or
847 # Even if the immediate src of r is not in the arg set, src's source (or
848 # further back) may be. Scanning back further than the immediate src allows
848 # further back) may be. Scanning back further than the immediate src allows
849 # transitive transplants and rebases to yield the same results as transitive
849 # transitive transplants and rebases to yield the same results as transitive
850 # grafts.
850 # grafts.
851 for r in subset:
851 for r in subset:
852 src = _getrevsource(repo, r)
852 src = _getrevsource(repo, r)
853 lineage = None
853 lineage = None
854
854
855 while src is not None:
855 while src is not None:
856 if lineage is None:
856 if lineage is None:
857 lineage = list()
857 lineage = list()
858
858
859 lineage.append(r)
859 lineage.append(r)
860
860
861 # The visited lineage is a match if the current source is in the arg
861 # The visited lineage is a match if the current source is in the arg
862 # set. Since every candidate dest is visited by way of iterating
862 # set. Since every candidate dest is visited by way of iterating
863 # subset, any dests further back in the lineage will be tested by a
863 # subset, any dests further back in the lineage will be tested by a
864 # different iteration over subset. Likewise, if the src was already
864 # different iteration over subset. Likewise, if the src was already
865 # selected, the current lineage can be selected without going back
865 # selected, the current lineage can be selected without going back
866 # further.
866 # further.
867 if src in sources or src in dests:
867 if src in sources or src in dests:
868 dests.update(lineage)
868 dests.update(lineage)
869 break
869 break
870
870
871 r = src
871 r = src
872 src = _getrevsource(repo, r)
872 src = _getrevsource(repo, r)
873
873
874 return subset.filter(dests.__contains__,
874 return subset.filter(dests.__contains__,
875 condrepr=lambda: '<destination %r>' % sorted(dests))
875 condrepr=lambda: '<destination %r>' % sorted(dests))
876
876
877 @predicate('divergent()', safe=True)
877 @predicate('divergent()', safe=True)
878 def divergent(repo, subset, x):
878 def divergent(repo, subset, x):
879 """
879 """
880 Final successors of changesets with an alternative set of final successors.
880 Final successors of changesets with an alternative set of final successors.
881 """
881 """
882 # i18n: "divergent" is a keyword
882 # i18n: "divergent" is a keyword
883 getargs(x, 0, 0, _("divergent takes no arguments"))
883 getargs(x, 0, 0, _("divergent takes no arguments"))
884 divergent = obsmod.getrevs(repo, 'divergent')
884 divergent = obsmod.getrevs(repo, 'divergent')
885 return subset & divergent
885 return subset & divergent
886
886
887 @predicate('extinct()', safe=True)
887 @predicate('extinct()', safe=True)
888 def extinct(repo, subset, x):
888 def extinct(repo, subset, x):
889 """Obsolete changesets with obsolete descendants only.
889 """Obsolete changesets with obsolete descendants only.
890 """
890 """
891 # i18n: "extinct" is a keyword
891 # i18n: "extinct" is a keyword
892 getargs(x, 0, 0, _("extinct takes no arguments"))
892 getargs(x, 0, 0, _("extinct takes no arguments"))
893 extincts = obsmod.getrevs(repo, 'extinct')
893 extincts = obsmod.getrevs(repo, 'extinct')
894 return subset & extincts
894 return subset & extincts
895
895
896 @predicate('extra(label, [value])', safe=True)
896 @predicate('extra(label, [value])', safe=True)
897 def extra(repo, subset, x):
897 def extra(repo, subset, x):
898 """Changesets with the given label in the extra metadata, with the given
898 """Changesets with the given label in the extra metadata, with the given
899 optional value.
899 optional value.
900
900
901 If `value` starts with `re:`, the remainder of the value is treated as
901 If `value` starts with `re:`, the remainder of the value is treated as
902 a regular expression. To match a value that actually starts with `re:`,
902 a regular expression. To match a value that actually starts with `re:`,
903 use the prefix `literal:`.
903 use the prefix `literal:`.
904 """
904 """
905 args = getargsdict(x, 'extra', 'label value')
905 args = getargsdict(x, 'extra', 'label value')
906 if 'label' not in args:
906 if 'label' not in args:
907 # i18n: "extra" is a keyword
907 # i18n: "extra" is a keyword
908 raise error.ParseError(_('extra takes at least 1 argument'))
908 raise error.ParseError(_('extra takes at least 1 argument'))
909 # i18n: "extra" is a keyword
909 # i18n: "extra" is a keyword
910 label = getstring(args['label'], _('first argument to extra must be '
910 label = getstring(args['label'], _('first argument to extra must be '
911 'a string'))
911 'a string'))
912 value = None
912 value = None
913
913
914 if 'value' in args:
914 if 'value' in args:
915 # i18n: "extra" is a keyword
915 # i18n: "extra" is a keyword
916 value = getstring(args['value'], _('second argument to extra must be '
916 value = getstring(args['value'], _('second argument to extra must be '
917 'a string'))
917 'a string'))
918 kind, value, matcher = util.stringmatcher(value)
918 kind, value, matcher = util.stringmatcher(value)
919
919
920 def _matchvalue(r):
920 def _matchvalue(r):
921 extra = repo[r].extra()
921 extra = repo[r].extra()
922 return label in extra and (value is None or matcher(extra[label]))
922 return label in extra and (value is None or matcher(extra[label]))
923
923
924 return subset.filter(lambda r: _matchvalue(r),
924 return subset.filter(lambda r: _matchvalue(r),
925 condrepr=('<extra[%r] %r>', label, value))
925 condrepr=('<extra[%r] %r>', label, value))
926
926
927 @predicate('filelog(pattern)', safe=True)
927 @predicate('filelog(pattern)', safe=True)
928 def filelog(repo, subset, x):
928 def filelog(repo, subset, x):
929 """Changesets connected to the specified filelog.
929 """Changesets connected to the specified filelog.
930
930
931 For performance reasons, visits only revisions mentioned in the file-level
931 For performance reasons, visits only revisions mentioned in the file-level
932 filelog, rather than filtering through all changesets (much faster, but
932 filelog, rather than filtering through all changesets (much faster, but
933 doesn't include deletes or duplicate changes). For a slower, more accurate
933 doesn't include deletes or duplicate changes). For a slower, more accurate
934 result, use ``file()``.
934 result, use ``file()``.
935
935
936 The pattern without explicit kind like ``glob:`` is expected to be
936 The pattern without explicit kind like ``glob:`` is expected to be
937 relative to the current directory and match against a file exactly
937 relative to the current directory and match against a file exactly
938 for efficiency.
938 for efficiency.
939
939
940 If some linkrev points to revisions filtered by the current repoview, we'll
940 If some linkrev points to revisions filtered by the current repoview, we'll
941 work around it to return a non-filtered value.
941 work around it to return a non-filtered value.
942 """
942 """
943
943
944 # i18n: "filelog" is a keyword
944 # i18n: "filelog" is a keyword
945 pat = getstring(x, _("filelog requires a pattern"))
945 pat = getstring(x, _("filelog requires a pattern"))
946 s = set()
946 s = set()
947 cl = repo.changelog
947 cl = repo.changelog
948
948
949 if not matchmod.patkind(pat):
949 if not matchmod.patkind(pat):
950 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
950 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
951 files = [f]
951 files = [f]
952 else:
952 else:
953 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
953 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
954 files = (f for f in repo[None] if m(f))
954 files = (f for f in repo[None] if m(f))
955
955
956 for f in files:
956 for f in files:
957 fl = repo.file(f)
957 fl = repo.file(f)
958 known = {}
958 known = {}
959 scanpos = 0
959 scanpos = 0
960 for fr in list(fl):
960 for fr in list(fl):
961 fn = fl.node(fr)
961 fn = fl.node(fr)
962 if fn in known:
962 if fn in known:
963 s.add(known[fn])
963 s.add(known[fn])
964 continue
964 continue
965
965
966 lr = fl.linkrev(fr)
966 lr = fl.linkrev(fr)
967 if lr in cl:
967 if lr in cl:
968 s.add(lr)
968 s.add(lr)
969 elif scanpos is not None:
969 elif scanpos is not None:
970 # lowest matching changeset is filtered, scan further
970 # lowest matching changeset is filtered, scan further
971 # ahead in changelog
971 # ahead in changelog
972 start = max(lr, scanpos) + 1
972 start = max(lr, scanpos) + 1
973 scanpos = None
973 scanpos = None
974 for r in cl.revs(start):
974 for r in cl.revs(start):
975 # minimize parsing of non-matching entries
975 # minimize parsing of non-matching entries
976 if f in cl.revision(r) and f in cl.readfiles(r):
976 if f in cl.revision(r) and f in cl.readfiles(r):
977 try:
977 try:
978 # try to use manifest delta fastpath
978 # try to use manifest delta fastpath
979 n = repo[r].filenode(f)
979 n = repo[r].filenode(f)
980 if n not in known:
980 if n not in known:
981 if n == fn:
981 if n == fn:
982 s.add(r)
982 s.add(r)
983 scanpos = r
983 scanpos = r
984 break
984 break
985 else:
985 else:
986 known[n] = r
986 known[n] = r
987 except error.ManifestLookupError:
987 except error.ManifestLookupError:
988 # deletion in changelog
988 # deletion in changelog
989 continue
989 continue
990
990
991 return subset & s
991 return subset & s
992
992
993 @predicate('first(set, [n])', safe=True)
993 @predicate('first(set, [n])', safe=True)
994 def first(repo, subset, x):
994 def first(repo, subset, x):
995 """An alias for limit().
995 """An alias for limit().
996 """
996 """
997 return limit(repo, subset, x)
997 return limit(repo, subset, x)
998
998
999 def _follow(repo, subset, x, name, followfirst=False):
999 def _follow(repo, subset, x, name, followfirst=False):
1000 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1000 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1001 c = repo['.']
1001 c = repo['.']
1002 if l:
1002 if l:
1003 x = getstring(l[0], _("%s expected a pattern") % name)
1003 x = getstring(l[0], _("%s expected a pattern") % name)
1004 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1004 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1005 ctx=repo[None], default='path')
1005 ctx=repo[None], default='path')
1006
1006
1007 files = c.manifest().walk(matcher)
1007 files = c.manifest().walk(matcher)
1008
1008
1009 s = set()
1009 s = set()
1010 for fname in files:
1010 for fname in files:
1011 fctx = c[fname]
1011 fctx = c[fname]
1012 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1012 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1013 # include the revision responsible for the most recent version
1013 # include the revision responsible for the most recent version
1014 s.add(fctx.introrev())
1014 s.add(fctx.introrev())
1015 else:
1015 else:
1016 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1016 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1017
1017
1018 return subset & s
1018 return subset & s
1019
1019
1020 @predicate('follow([pattern])', safe=True)
1020 @predicate('follow([pattern])', safe=True)
1021 def follow(repo, subset, x):
1021 def follow(repo, subset, x):
1022 """
1022 """
1023 An alias for ``::.`` (ancestors of the working directory's first parent).
1023 An alias for ``::.`` (ancestors of the working directory's first parent).
1024 If pattern is specified, the histories of files matching given
1024 If pattern is specified, the histories of files matching given
1025 pattern is followed, including copies.
1025 pattern is followed, including copies.
1026 """
1026 """
1027 return _follow(repo, subset, x, 'follow')
1027 return _follow(repo, subset, x, 'follow')
1028
1028
1029 @predicate('_followfirst', safe=True)
1029 @predicate('_followfirst', safe=True)
1030 def _followfirst(repo, subset, x):
1030 def _followfirst(repo, subset, x):
1031 # ``followfirst([pattern])``
1031 # ``followfirst([pattern])``
1032 # Like ``follow([pattern])`` but follows only the first parent of
1032 # Like ``follow([pattern])`` but follows only the first parent of
1033 # every revisions or files revisions.
1033 # every revisions or files revisions.
1034 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1034 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1035
1035
1036 @predicate('all()', safe=True)
1036 @predicate('all()', safe=True)
1037 def getall(repo, subset, x):
1037 def getall(repo, subset, x):
1038 """All changesets, the same as ``0:tip``.
1038 """All changesets, the same as ``0:tip``.
1039 """
1039 """
1040 # i18n: "all" is a keyword
1040 # i18n: "all" is a keyword
1041 getargs(x, 0, 0, _("all takes no arguments"))
1041 getargs(x, 0, 0, _("all takes no arguments"))
1042 return subset & spanset(repo) # drop "null" if any
1042 return subset & spanset(repo) # drop "null" if any
1043
1043
1044 @predicate('grep(regex)')
1044 @predicate('grep(regex)')
1045 def grep(repo, subset, x):
1045 def grep(repo, subset, x):
1046 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1046 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1047 to ensure special escape characters are handled correctly. Unlike
1047 to ensure special escape characters are handled correctly. Unlike
1048 ``keyword(string)``, the match is case-sensitive.
1048 ``keyword(string)``, the match is case-sensitive.
1049 """
1049 """
1050 try:
1050 try:
1051 # i18n: "grep" is a keyword
1051 # i18n: "grep" is a keyword
1052 gr = re.compile(getstring(x, _("grep requires a string")))
1052 gr = re.compile(getstring(x, _("grep requires a string")))
1053 except re.error as e:
1053 except re.error as e:
1054 raise error.ParseError(_('invalid match pattern: %s') % e)
1054 raise error.ParseError(_('invalid match pattern: %s') % e)
1055
1055
1056 def matches(x):
1056 def matches(x):
1057 c = repo[x]
1057 c = repo[x]
1058 for e in c.files() + [c.user(), c.description()]:
1058 for e in c.files() + [c.user(), c.description()]:
1059 if gr.search(e):
1059 if gr.search(e):
1060 return True
1060 return True
1061 return False
1061 return False
1062
1062
1063 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1063 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1064
1064
1065 @predicate('_matchfiles', safe=True)
1065 @predicate('_matchfiles', safe=True)
1066 def _matchfiles(repo, subset, x):
1066 def _matchfiles(repo, subset, x):
1067 # _matchfiles takes a revset list of prefixed arguments:
1067 # _matchfiles takes a revset list of prefixed arguments:
1068 #
1068 #
1069 # [p:foo, i:bar, x:baz]
1069 # [p:foo, i:bar, x:baz]
1070 #
1070 #
1071 # builds a match object from them and filters subset. Allowed
1071 # builds a match object from them and filters subset. Allowed
1072 # prefixes are 'p:' for regular patterns, 'i:' for include
1072 # prefixes are 'p:' for regular patterns, 'i:' for include
1073 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1073 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1074 # a revision identifier, or the empty string to reference the
1074 # a revision identifier, or the empty string to reference the
1075 # working directory, from which the match object is
1075 # working directory, from which the match object is
1076 # initialized. Use 'd:' to set the default matching mode, default
1076 # initialized. Use 'd:' to set the default matching mode, default
1077 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1077 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1078
1078
1079 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1079 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1080 pats, inc, exc = [], [], []
1080 pats, inc, exc = [], [], []
1081 rev, default = None, None
1081 rev, default = None, None
1082 for arg in l:
1082 for arg in l:
1083 s = getstring(arg, "_matchfiles requires string arguments")
1083 s = getstring(arg, "_matchfiles requires string arguments")
1084 prefix, value = s[:2], s[2:]
1084 prefix, value = s[:2], s[2:]
1085 if prefix == 'p:':
1085 if prefix == 'p:':
1086 pats.append(value)
1086 pats.append(value)
1087 elif prefix == 'i:':
1087 elif prefix == 'i:':
1088 inc.append(value)
1088 inc.append(value)
1089 elif prefix == 'x:':
1089 elif prefix == 'x:':
1090 exc.append(value)
1090 exc.append(value)
1091 elif prefix == 'r:':
1091 elif prefix == 'r:':
1092 if rev is not None:
1092 if rev is not None:
1093 raise error.ParseError('_matchfiles expected at most one '
1093 raise error.ParseError('_matchfiles expected at most one '
1094 'revision')
1094 'revision')
1095 if value != '': # empty means working directory; leave rev as None
1095 if value != '': # empty means working directory; leave rev as None
1096 rev = value
1096 rev = value
1097 elif prefix == 'd:':
1097 elif prefix == 'd:':
1098 if default is not None:
1098 if default is not None:
1099 raise error.ParseError('_matchfiles expected at most one '
1099 raise error.ParseError('_matchfiles expected at most one '
1100 'default mode')
1100 'default mode')
1101 default = value
1101 default = value
1102 else:
1102 else:
1103 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1103 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1104 if not default:
1104 if not default:
1105 default = 'glob'
1105 default = 'glob'
1106
1106
1107 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1107 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1108 exclude=exc, ctx=repo[rev], default=default)
1108 exclude=exc, ctx=repo[rev], default=default)
1109
1109
1110 # This directly read the changelog data as creating changectx for all
1110 # This directly read the changelog data as creating changectx for all
1111 # revisions is quite expensive.
1111 # revisions is quite expensive.
1112 getfiles = repo.changelog.readfiles
1112 getfiles = repo.changelog.readfiles
1113 wdirrev = node.wdirrev
1113 wdirrev = node.wdirrev
1114 def matches(x):
1114 def matches(x):
1115 if x == wdirrev:
1115 if x == wdirrev:
1116 files = repo[x].files()
1116 files = repo[x].files()
1117 else:
1117 else:
1118 files = getfiles(x)
1118 files = getfiles(x)
1119 for f in files:
1119 for f in files:
1120 if m(f):
1120 if m(f):
1121 return True
1121 return True
1122 return False
1122 return False
1123
1123
1124 return subset.filter(matches,
1124 return subset.filter(matches,
1125 condrepr=('<matchfiles patterns=%r, include=%r '
1125 condrepr=('<matchfiles patterns=%r, include=%r '
1126 'exclude=%r, default=%r, rev=%r>',
1126 'exclude=%r, default=%r, rev=%r>',
1127 pats, inc, exc, default, rev))
1127 pats, inc, exc, default, rev))
1128
1128
1129 @predicate('file(pattern)', safe=True)
1129 @predicate('file(pattern)', safe=True)
1130 def hasfile(repo, subset, x):
1130 def hasfile(repo, subset, x):
1131 """Changesets affecting files matched by pattern.
1131 """Changesets affecting files matched by pattern.
1132
1132
1133 For a faster but less accurate result, consider using ``filelog()``
1133 For a faster but less accurate result, consider using ``filelog()``
1134 instead.
1134 instead.
1135
1135
1136 This predicate uses ``glob:`` as the default kind of pattern.
1136 This predicate uses ``glob:`` as the default kind of pattern.
1137 """
1137 """
1138 # i18n: "file" is a keyword
1138 # i18n: "file" is a keyword
1139 pat = getstring(x, _("file requires a pattern"))
1139 pat = getstring(x, _("file requires a pattern"))
1140 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1140 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1141
1141
1142 @predicate('head()', safe=True)
1142 @predicate('head()', safe=True)
1143 def head(repo, subset, x):
1143 def head(repo, subset, x):
1144 """Changeset is a named branch head.
1144 """Changeset is a named branch head.
1145 """
1145 """
1146 # i18n: "head" is a keyword
1146 # i18n: "head" is a keyword
1147 getargs(x, 0, 0, _("head takes no arguments"))
1147 getargs(x, 0, 0, _("head takes no arguments"))
1148 hs = set()
1148 hs = set()
1149 cl = repo.changelog
1149 cl = repo.changelog
1150 for b, ls in repo.branchmap().iteritems():
1150 for b, ls in repo.branchmap().iteritems():
1151 hs.update(cl.rev(h) for h in ls)
1151 hs.update(cl.rev(h) for h in ls)
1152 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1152 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1153 # This does not break because of other fullreposet misbehavior.
1153 # This does not break because of other fullreposet misbehavior.
1154 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1154 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1155 # necessary to ensure we preserve the order in subset.
1155 # necessary to ensure we preserve the order in subset.
1156 return baseset(hs) & subset
1156 return baseset(hs) & subset
1157
1157
1158 @predicate('heads(set)', safe=True)
1158 @predicate('heads(set)', safe=True)
1159 def heads(repo, subset, x):
1159 def heads(repo, subset, x):
1160 """Members of set with no children in set.
1160 """Members of set with no children in set.
1161 """
1161 """
1162 s = getset(repo, subset, x)
1162 s = getset(repo, subset, x)
1163 ps = parents(repo, subset, x)
1163 ps = parents(repo, subset, x)
1164 return s - ps
1164 return s - ps
1165
1165
1166 @predicate('hidden()', safe=True)
1166 @predicate('hidden()', safe=True)
1167 def hidden(repo, subset, x):
1167 def hidden(repo, subset, x):
1168 """Hidden changesets.
1168 """Hidden changesets.
1169 """
1169 """
1170 # i18n: "hidden" is a keyword
1170 # i18n: "hidden" is a keyword
1171 getargs(x, 0, 0, _("hidden takes no arguments"))
1171 getargs(x, 0, 0, _("hidden takes no arguments"))
1172 hiddenrevs = repoview.filterrevs(repo, 'visible')
1172 hiddenrevs = repoview.filterrevs(repo, 'visible')
1173 return subset & hiddenrevs
1173 return subset & hiddenrevs
1174
1174
1175 @predicate('keyword(string)', safe=True)
1175 @predicate('keyword(string)', safe=True)
1176 def keyword(repo, subset, x):
1176 def keyword(repo, subset, x):
1177 """Search commit message, user name, and names of changed files for
1177 """Search commit message, user name, and names of changed files for
1178 string. The match is case-insensitive.
1178 string. The match is case-insensitive.
1179 """
1179 """
1180 # i18n: "keyword" is a keyword
1180 # i18n: "keyword" is a keyword
1181 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1181 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1182
1182
1183 def matches(r):
1183 def matches(r):
1184 c = repo[r]
1184 c = repo[r]
1185 return any(kw in encoding.lower(t)
1185 return any(kw in encoding.lower(t)
1186 for t in c.files() + [c.user(), c.description()])
1186 for t in c.files() + [c.user(), c.description()])
1187
1187
1188 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1188 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1189
1189
1190 @predicate('limit(set[, n[, offset]])', safe=True)
1190 @predicate('limit(set[, n[, offset]])', safe=True)
1191 def limit(repo, subset, x):
1191 def limit(repo, subset, x):
1192 """First n members of set, defaulting to 1, starting from offset.
1192 """First n members of set, defaulting to 1, starting from offset.
1193 """
1193 """
1194 args = getargsdict(x, 'limit', 'set n offset')
1194 args = getargsdict(x, 'limit', 'set n offset')
1195 if 'set' not in args:
1195 if 'set' not in args:
1196 # i18n: "limit" is a keyword
1196 # i18n: "limit" is a keyword
1197 raise error.ParseError(_("limit requires one to three arguments"))
1197 raise error.ParseError(_("limit requires one to three arguments"))
1198 try:
1198 try:
1199 lim, ofs = 1, 0
1199 lim, ofs = 1, 0
1200 if 'n' in args:
1200 if 'n' in args:
1201 # i18n: "limit" is a keyword
1201 # i18n: "limit" is a keyword
1202 lim = int(getstring(args['n'], _("limit requires a number")))
1202 lim = int(getstring(args['n'], _("limit requires a number")))
1203 if 'offset' in args:
1203 if 'offset' in args:
1204 # i18n: "limit" is a keyword
1204 # i18n: "limit" is a keyword
1205 ofs = int(getstring(args['offset'], _("limit requires a number")))
1205 ofs = int(getstring(args['offset'], _("limit requires a number")))
1206 if ofs < 0:
1206 if ofs < 0:
1207 raise error.ParseError(_("negative offset"))
1207 raise error.ParseError(_("negative offset"))
1208 except (TypeError, ValueError):
1208 except (TypeError, ValueError):
1209 # i18n: "limit" is a keyword
1209 # i18n: "limit" is a keyword
1210 raise error.ParseError(_("limit expects a number"))
1210 raise error.ParseError(_("limit expects a number"))
1211 os = getset(repo, fullreposet(repo), args['set'])
1211 os = getset(repo, fullreposet(repo), args['set'])
1212 result = []
1212 result = []
1213 it = iter(os)
1213 it = iter(os)
1214 for x in xrange(ofs):
1214 for x in xrange(ofs):
1215 y = next(it, None)
1215 y = next(it, None)
1216 if y is None:
1216 if y is None:
1217 break
1217 break
1218 for x in xrange(lim):
1218 for x in xrange(lim):
1219 y = next(it, None)
1219 y = next(it, None)
1220 if y is None:
1220 if y is None:
1221 break
1221 break
1222 elif y in subset:
1222 elif y in subset:
1223 result.append(y)
1223 result.append(y)
1224 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1224 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1225 lim, ofs, subset, os))
1225 lim, ofs, subset, os))
1226
1226
1227 @predicate('last(set, [n])', safe=True)
1227 @predicate('last(set, [n])', safe=True)
1228 def last(repo, subset, x):
1228 def last(repo, subset, x):
1229 """Last n members of set, defaulting to 1.
1229 """Last n members of set, defaulting to 1.
1230 """
1230 """
1231 # i18n: "last" is a keyword
1231 # i18n: "last" is a keyword
1232 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1232 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1233 try:
1233 try:
1234 lim = 1
1234 lim = 1
1235 if len(l) == 2:
1235 if len(l) == 2:
1236 # i18n: "last" is a keyword
1236 # i18n: "last" is a keyword
1237 lim = int(getstring(l[1], _("last requires a number")))
1237 lim = int(getstring(l[1], _("last requires a number")))
1238 except (TypeError, ValueError):
1238 except (TypeError, ValueError):
1239 # i18n: "last" is a keyword
1239 # i18n: "last" is a keyword
1240 raise error.ParseError(_("last expects a number"))
1240 raise error.ParseError(_("last expects a number"))
1241 os = getset(repo, fullreposet(repo), l[0])
1241 os = getset(repo, fullreposet(repo), l[0])
1242 os.reverse()
1242 os.reverse()
1243 result = []
1243 result = []
1244 it = iter(os)
1244 it = iter(os)
1245 for x in xrange(lim):
1245 for x in xrange(lim):
1246 y = next(it, None)
1246 y = next(it, None)
1247 if y is None:
1247 if y is None:
1248 break
1248 break
1249 elif y in subset:
1249 elif y in subset:
1250 result.append(y)
1250 result.append(y)
1251 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1251 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1252
1252
1253 @predicate('max(set)', safe=True)
1253 @predicate('max(set)', safe=True)
1254 def maxrev(repo, subset, x):
1254 def maxrev(repo, subset, x):
1255 """Changeset with highest revision number in set.
1255 """Changeset with highest revision number in set.
1256 """
1256 """
1257 os = getset(repo, fullreposet(repo), x)
1257 os = getset(repo, fullreposet(repo), x)
1258 try:
1258 try:
1259 m = os.max()
1259 m = os.max()
1260 if m in subset:
1260 if m in subset:
1261 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1261 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1262 except ValueError:
1262 except ValueError:
1263 # os.max() throws a ValueError when the collection is empty.
1263 # os.max() throws a ValueError when the collection is empty.
1264 # Same as python's max().
1264 # Same as python's max().
1265 pass
1265 pass
1266 return baseset(datarepr=('<max %r, %r>', subset, os))
1266 return baseset(datarepr=('<max %r, %r>', subset, os))
1267
1267
1268 @predicate('merge()', safe=True)
1268 @predicate('merge()', safe=True)
1269 def merge(repo, subset, x):
1269 def merge(repo, subset, x):
1270 """Changeset is a merge changeset.
1270 """Changeset is a merge changeset.
1271 """
1271 """
1272 # i18n: "merge" is a keyword
1272 # i18n: "merge" is a keyword
1273 getargs(x, 0, 0, _("merge takes no arguments"))
1273 getargs(x, 0, 0, _("merge takes no arguments"))
1274 cl = repo.changelog
1274 cl = repo.changelog
1275 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1275 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1276 condrepr='<merge>')
1276 condrepr='<merge>')
1277
1277
1278 @predicate('branchpoint()', safe=True)
1278 @predicate('branchpoint()', safe=True)
1279 def branchpoint(repo, subset, x):
1279 def branchpoint(repo, subset, x):
1280 """Changesets with more than one child.
1280 """Changesets with more than one child.
1281 """
1281 """
1282 # i18n: "branchpoint" is a keyword
1282 # i18n: "branchpoint" is a keyword
1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1284 cl = repo.changelog
1284 cl = repo.changelog
1285 if not subset:
1285 if not subset:
1286 return baseset()
1286 return baseset()
1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1288 # (and if it is not, it should.)
1288 # (and if it is not, it should.)
1289 baserev = min(subset)
1289 baserev = min(subset)
1290 parentscount = [0]*(len(repo) - baserev)
1290 parentscount = [0]*(len(repo) - baserev)
1291 for r in cl.revs(start=baserev + 1):
1291 for r in cl.revs(start=baserev + 1):
1292 for p in cl.parentrevs(r):
1292 for p in cl.parentrevs(r):
1293 if p >= baserev:
1293 if p >= baserev:
1294 parentscount[p - baserev] += 1
1294 parentscount[p - baserev] += 1
1295 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1295 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1296 condrepr='<branchpoint>')
1296 condrepr='<branchpoint>')
1297
1297
1298 @predicate('min(set)', safe=True)
1298 @predicate('min(set)', safe=True)
1299 def minrev(repo, subset, x):
1299 def minrev(repo, subset, x):
1300 """Changeset with lowest revision number in set.
1300 """Changeset with lowest revision number in set.
1301 """
1301 """
1302 os = getset(repo, fullreposet(repo), x)
1302 os = getset(repo, fullreposet(repo), x)
1303 try:
1303 try:
1304 m = os.min()
1304 m = os.min()
1305 if m in subset:
1305 if m in subset:
1306 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1306 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1307 except ValueError:
1307 except ValueError:
1308 # os.min() throws a ValueError when the collection is empty.
1308 # os.min() throws a ValueError when the collection is empty.
1309 # Same as python's min().
1309 # Same as python's min().
1310 pass
1310 pass
1311 return baseset(datarepr=('<min %r, %r>', subset, os))
1311 return baseset(datarepr=('<min %r, %r>', subset, os))
1312
1312
1313 @predicate('modifies(pattern)', safe=True)
1313 @predicate('modifies(pattern)', safe=True)
1314 def modifies(repo, subset, x):
1314 def modifies(repo, subset, x):
1315 """Changesets modifying files matched by pattern.
1315 """Changesets modifying files matched by pattern.
1316
1316
1317 The pattern without explicit kind like ``glob:`` is expected to be
1317 The pattern without explicit kind like ``glob:`` is expected to be
1318 relative to the current directory and match against a file or a
1318 relative to the current directory and match against a file or a
1319 directory.
1319 directory.
1320 """
1320 """
1321 # i18n: "modifies" is a keyword
1321 # i18n: "modifies" is a keyword
1322 pat = getstring(x, _("modifies requires a pattern"))
1322 pat = getstring(x, _("modifies requires a pattern"))
1323 return checkstatus(repo, subset, pat, 0)
1323 return checkstatus(repo, subset, pat, 0)
1324
1324
1325 @predicate('named(namespace)')
1325 @predicate('named(namespace)')
1326 def named(repo, subset, x):
1326 def named(repo, subset, x):
1327 """The changesets in a given namespace.
1327 """The changesets in a given namespace.
1328
1328
1329 If `namespace` starts with `re:`, the remainder of the string is treated as
1329 If `namespace` starts with `re:`, the remainder of the string is treated as
1330 a regular expression. To match a namespace that actually starts with `re:`,
1330 a regular expression. To match a namespace that actually starts with `re:`,
1331 use the prefix `literal:`.
1331 use the prefix `literal:`.
1332 """
1332 """
1333 # i18n: "named" is a keyword
1333 # i18n: "named" is a keyword
1334 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1334 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1335
1335
1336 ns = getstring(args[0],
1336 ns = getstring(args[0],
1337 # i18n: "named" is a keyword
1337 # i18n: "named" is a keyword
1338 _('the argument to named must be a string'))
1338 _('the argument to named must be a string'))
1339 kind, pattern, matcher = util.stringmatcher(ns)
1339 kind, pattern, matcher = util.stringmatcher(ns)
1340 namespaces = set()
1340 namespaces = set()
1341 if kind == 'literal':
1341 if kind == 'literal':
1342 if pattern not in repo.names:
1342 if pattern not in repo.names:
1343 raise error.RepoLookupError(_("namespace '%s' does not exist")
1343 raise error.RepoLookupError(_("namespace '%s' does not exist")
1344 % ns)
1344 % ns)
1345 namespaces.add(repo.names[pattern])
1345 namespaces.add(repo.names[pattern])
1346 else:
1346 else:
1347 for name, ns in repo.names.iteritems():
1347 for name, ns in repo.names.iteritems():
1348 if matcher(name):
1348 if matcher(name):
1349 namespaces.add(ns)
1349 namespaces.add(ns)
1350 if not namespaces:
1350 if not namespaces:
1351 raise error.RepoLookupError(_("no namespace exists"
1351 raise error.RepoLookupError(_("no namespace exists"
1352 " that match '%s'") % pattern)
1352 " that match '%s'") % pattern)
1353
1353
1354 names = set()
1354 names = set()
1355 for ns in namespaces:
1355 for ns in namespaces:
1356 for name in ns.listnames(repo):
1356 for name in ns.listnames(repo):
1357 if name not in ns.deprecated:
1357 if name not in ns.deprecated:
1358 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1358 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1359
1359
1360 names -= set([node.nullrev])
1360 names -= set([node.nullrev])
1361 return subset & names
1361 return subset & names
1362
1362
1363 @predicate('id(string)', safe=True)
1363 @predicate('id(string)', safe=True)
1364 def node_(repo, subset, x):
1364 def node_(repo, subset, x):
1365 """Revision non-ambiguously specified by the given hex string prefix.
1365 """Revision non-ambiguously specified by the given hex string prefix.
1366 """
1366 """
1367 # i18n: "id" is a keyword
1367 # i18n: "id" is a keyword
1368 l = getargs(x, 1, 1, _("id requires one argument"))
1368 l = getargs(x, 1, 1, _("id requires one argument"))
1369 # i18n: "id" is a keyword
1369 # i18n: "id" is a keyword
1370 n = getstring(l[0], _("id requires a string"))
1370 n = getstring(l[0], _("id requires a string"))
1371 if len(n) == 40:
1371 if len(n) == 40:
1372 try:
1372 try:
1373 rn = repo.changelog.rev(node.bin(n))
1373 rn = repo.changelog.rev(node.bin(n))
1374 except (LookupError, TypeError):
1374 except (LookupError, TypeError):
1375 rn = None
1375 rn = None
1376 else:
1376 else:
1377 rn = None
1377 rn = None
1378 pm = repo.changelog._partialmatch(n)
1378 pm = repo.changelog._partialmatch(n)
1379 if pm is not None:
1379 if pm is not None:
1380 rn = repo.changelog.rev(pm)
1380 rn = repo.changelog.rev(pm)
1381
1381
1382 if rn is None:
1382 if rn is None:
1383 return baseset()
1383 return baseset()
1384 result = baseset([rn])
1384 result = baseset([rn])
1385 return result & subset
1385 return result & subset
1386
1386
1387 @predicate('obsolete()', safe=True)
1387 @predicate('obsolete()', safe=True)
1388 def obsolete(repo, subset, x):
1388 def obsolete(repo, subset, x):
1389 """Mutable changeset with a newer version."""
1389 """Mutable changeset with a newer version."""
1390 # i18n: "obsolete" is a keyword
1390 # i18n: "obsolete" is a keyword
1391 getargs(x, 0, 0, _("obsolete takes no arguments"))
1391 getargs(x, 0, 0, _("obsolete takes no arguments"))
1392 obsoletes = obsmod.getrevs(repo, 'obsolete')
1392 obsoletes = obsmod.getrevs(repo, 'obsolete')
1393 return subset & obsoletes
1393 return subset & obsoletes
1394
1394
1395 @predicate('only(set, [set])', safe=True)
1395 @predicate('only(set, [set])', safe=True)
1396 def only(repo, subset, x):
1396 def only(repo, subset, x):
1397 """Changesets that are ancestors of the first set that are not ancestors
1397 """Changesets that are ancestors of the first set that are not ancestors
1398 of any other head in the repo. If a second set is specified, the result
1398 of any other head in the repo. If a second set is specified, the result
1399 is ancestors of the first set that are not ancestors of the second set
1399 is ancestors of the first set that are not ancestors of the second set
1400 (i.e. ::<set1> - ::<set2>).
1400 (i.e. ::<set1> - ::<set2>).
1401 """
1401 """
1402 cl = repo.changelog
1402 cl = repo.changelog
1403 # i18n: "only" is a keyword
1403 # i18n: "only" is a keyword
1404 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1404 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1405 include = getset(repo, fullreposet(repo), args[0])
1405 include = getset(repo, fullreposet(repo), args[0])
1406 if len(args) == 1:
1406 if len(args) == 1:
1407 if not include:
1407 if not include:
1408 return baseset()
1408 return baseset()
1409
1409
1410 descendants = set(_revdescendants(repo, include, False))
1410 descendants = set(_revdescendants(repo, include, False))
1411 exclude = [rev for rev in cl.headrevs()
1411 exclude = [rev for rev in cl.headrevs()
1412 if not rev in descendants and not rev in include]
1412 if not rev in descendants and not rev in include]
1413 else:
1413 else:
1414 exclude = getset(repo, fullreposet(repo), args[1])
1414 exclude = getset(repo, fullreposet(repo), args[1])
1415
1415
1416 results = set(cl.findmissingrevs(common=exclude, heads=include))
1416 results = set(cl.findmissingrevs(common=exclude, heads=include))
1417 # XXX we should turn this into a baseset instead of a set, smartset may do
1417 # XXX we should turn this into a baseset instead of a set, smartset may do
1418 # some optimisations from the fact this is a baseset.
1418 # some optimisations from the fact this is a baseset.
1419 return subset & results
1419 return subset & results
1420
1420
1421 @predicate('origin([set])', safe=True)
1421 @predicate('origin([set])', safe=True)
1422 def origin(repo, subset, x):
1422 def origin(repo, subset, x):
1423 """
1423 """
1424 Changesets that were specified as a source for the grafts, transplants or
1424 Changesets that were specified as a source for the grafts, transplants or
1425 rebases that created the given revisions. Omitting the optional set is the
1425 rebases that created the given revisions. Omitting the optional set is the
1426 same as passing all(). If a changeset created by these operations is itself
1426 same as passing all(). If a changeset created by these operations is itself
1427 specified as a source for one of these operations, only the source changeset
1427 specified as a source for one of these operations, only the source changeset
1428 for the first operation is selected.
1428 for the first operation is selected.
1429 """
1429 """
1430 if x is not None:
1430 if x is not None:
1431 dests = getset(repo, fullreposet(repo), x)
1431 dests = getset(repo, fullreposet(repo), x)
1432 else:
1432 else:
1433 dests = fullreposet(repo)
1433 dests = fullreposet(repo)
1434
1434
1435 def _firstsrc(rev):
1435 def _firstsrc(rev):
1436 src = _getrevsource(repo, rev)
1436 src = _getrevsource(repo, rev)
1437 if src is None:
1437 if src is None:
1438 return None
1438 return None
1439
1439
1440 while True:
1440 while True:
1441 prev = _getrevsource(repo, src)
1441 prev = _getrevsource(repo, src)
1442
1442
1443 if prev is None:
1443 if prev is None:
1444 return src
1444 return src
1445 src = prev
1445 src = prev
1446
1446
1447 o = set([_firstsrc(r) for r in dests])
1447 o = set([_firstsrc(r) for r in dests])
1448 o -= set([None])
1448 o -= set([None])
1449 # XXX we should turn this into a baseset instead of a set, smartset may do
1449 # XXX we should turn this into a baseset instead of a set, smartset may do
1450 # some optimisations from the fact this is a baseset.
1450 # some optimisations from the fact this is a baseset.
1451 return subset & o
1451 return subset & o
1452
1452
1453 @predicate('outgoing([path])', safe=True)
1453 @predicate('outgoing([path])', safe=True)
1454 def outgoing(repo, subset, x):
1454 def outgoing(repo, subset, x):
1455 """Changesets not found in the specified destination repository, or the
1455 """Changesets not found in the specified destination repository, or the
1456 default push location.
1456 default push location.
1457 """
1457 """
1458 # Avoid cycles.
1458 # Avoid cycles.
1459 from . import (
1459 from . import (
1460 discovery,
1460 discovery,
1461 hg,
1461 hg,
1462 )
1462 )
1463 # i18n: "outgoing" is a keyword
1463 # i18n: "outgoing" is a keyword
1464 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1464 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1465 # i18n: "outgoing" is a keyword
1465 # i18n: "outgoing" is a keyword
1466 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1466 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1467 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1467 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1468 dest, branches = hg.parseurl(dest)
1468 dest, branches = hg.parseurl(dest)
1469 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1469 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1470 if revs:
1470 if revs:
1471 revs = [repo.lookup(rev) for rev in revs]
1471 revs = [repo.lookup(rev) for rev in revs]
1472 other = hg.peer(repo, {}, dest)
1472 other = hg.peer(repo, {}, dest)
1473 repo.ui.pushbuffer()
1473 repo.ui.pushbuffer()
1474 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1474 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1475 repo.ui.popbuffer()
1475 repo.ui.popbuffer()
1476 cl = repo.changelog
1476 cl = repo.changelog
1477 o = set([cl.rev(r) for r in outgoing.missing])
1477 o = set([cl.rev(r) for r in outgoing.missing])
1478 return subset & o
1478 return subset & o
1479
1479
1480 @predicate('p1([set])', safe=True)
1480 @predicate('p1([set])', safe=True)
1481 def p1(repo, subset, x):
1481 def p1(repo, subset, x):
1482 """First parent of changesets in set, or the working directory.
1482 """First parent of changesets in set, or the working directory.
1483 """
1483 """
1484 if x is None:
1484 if x is None:
1485 p = repo[x].p1().rev()
1485 p = repo[x].p1().rev()
1486 if p >= 0:
1486 if p >= 0:
1487 return subset & baseset([p])
1487 return subset & baseset([p])
1488 return baseset()
1488 return baseset()
1489
1489
1490 ps = set()
1490 ps = set()
1491 cl = repo.changelog
1491 cl = repo.changelog
1492 for r in getset(repo, fullreposet(repo), x):
1492 for r in getset(repo, fullreposet(repo), x):
1493 ps.add(cl.parentrevs(r)[0])
1493 ps.add(cl.parentrevs(r)[0])
1494 ps -= set([node.nullrev])
1494 ps -= set([node.nullrev])
1495 # XXX we should turn this into a baseset instead of a set, smartset may do
1495 # XXX we should turn this into a baseset instead of a set, smartset may do
1496 # some optimisations from the fact this is a baseset.
1496 # some optimisations from the fact this is a baseset.
1497 return subset & ps
1497 return subset & ps
1498
1498
1499 @predicate('p2([set])', safe=True)
1499 @predicate('p2([set])', safe=True)
1500 def p2(repo, subset, x):
1500 def p2(repo, subset, x):
1501 """Second parent of changesets in set, or the working directory.
1501 """Second parent of changesets in set, or the working directory.
1502 """
1502 """
1503 if x is None:
1503 if x is None:
1504 ps = repo[x].parents()
1504 ps = repo[x].parents()
1505 try:
1505 try:
1506 p = ps[1].rev()
1506 p = ps[1].rev()
1507 if p >= 0:
1507 if p >= 0:
1508 return subset & baseset([p])
1508 return subset & baseset([p])
1509 return baseset()
1509 return baseset()
1510 except IndexError:
1510 except IndexError:
1511 return baseset()
1511 return baseset()
1512
1512
1513 ps = set()
1513 ps = set()
1514 cl = repo.changelog
1514 cl = repo.changelog
1515 for r in getset(repo, fullreposet(repo), x):
1515 for r in getset(repo, fullreposet(repo), x):
1516 ps.add(cl.parentrevs(r)[1])
1516 ps.add(cl.parentrevs(r)[1])
1517 ps -= set([node.nullrev])
1517 ps -= set([node.nullrev])
1518 # XXX we should turn this into a baseset instead of a set, smartset may do
1518 # XXX we should turn this into a baseset instead of a set, smartset may do
1519 # some optimisations from the fact this is a baseset.
1519 # some optimisations from the fact this is a baseset.
1520 return subset & ps
1520 return subset & ps
1521
1521
1522 @predicate('parents([set])', safe=True)
1522 @predicate('parents([set])', safe=True)
1523 def parents(repo, subset, x):
1523 def parents(repo, subset, x):
1524 """
1524 """
1525 The set of all parents for all changesets in set, or the working directory.
1525 The set of all parents for all changesets in set, or the working directory.
1526 """
1526 """
1527 if x is None:
1527 if x is None:
1528 ps = set(p.rev() for p in repo[x].parents())
1528 ps = set(p.rev() for p in repo[x].parents())
1529 else:
1529 else:
1530 ps = set()
1530 ps = set()
1531 cl = repo.changelog
1531 cl = repo.changelog
1532 up = ps.update
1532 up = ps.update
1533 parentrevs = cl.parentrevs
1533 parentrevs = cl.parentrevs
1534 for r in getset(repo, fullreposet(repo), x):
1534 for r in getset(repo, fullreposet(repo), x):
1535 if r == node.wdirrev:
1535 if r == node.wdirrev:
1536 up(p.rev() for p in repo[r].parents())
1536 up(p.rev() for p in repo[r].parents())
1537 else:
1537 else:
1538 up(parentrevs(r))
1538 up(parentrevs(r))
1539 ps -= set([node.nullrev])
1539 ps -= set([node.nullrev])
1540 return subset & ps
1540 return subset & ps
1541
1541
1542 def _phase(repo, subset, target):
1542 def _phase(repo, subset, target):
1543 """helper to select all rev in phase <target>"""
1543 """helper to select all rev in phase <target>"""
1544 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1544 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1545 if repo._phasecache._phasesets:
1545 if repo._phasecache._phasesets:
1546 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1546 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1547 s = baseset(s)
1547 s = baseset(s)
1548 s.sort() # set are non ordered, so we enforce ascending
1548 s.sort() # set are non ordered, so we enforce ascending
1549 return subset & s
1549 return subset & s
1550 else:
1550 else:
1551 phase = repo._phasecache.phase
1551 phase = repo._phasecache.phase
1552 condition = lambda r: phase(repo, r) == target
1552 condition = lambda r: phase(repo, r) == target
1553 return subset.filter(condition, condrepr=('<phase %r>', target),
1553 return subset.filter(condition, condrepr=('<phase %r>', target),
1554 cache=False)
1554 cache=False)
1555
1555
1556 @predicate('draft()', safe=True)
1556 @predicate('draft()', safe=True)
1557 def draft(repo, subset, x):
1557 def draft(repo, subset, x):
1558 """Changeset in draft phase."""
1558 """Changeset in draft phase."""
1559 # i18n: "draft" is a keyword
1559 # i18n: "draft" is a keyword
1560 getargs(x, 0, 0, _("draft takes no arguments"))
1560 getargs(x, 0, 0, _("draft takes no arguments"))
1561 target = phases.draft
1561 target = phases.draft
1562 return _phase(repo, subset, target)
1562 return _phase(repo, subset, target)
1563
1563
1564 @predicate('secret()', safe=True)
1564 @predicate('secret()', safe=True)
1565 def secret(repo, subset, x):
1565 def secret(repo, subset, x):
1566 """Changeset in secret phase."""
1566 """Changeset in secret phase."""
1567 # i18n: "secret" is a keyword
1567 # i18n: "secret" is a keyword
1568 getargs(x, 0, 0, _("secret takes no arguments"))
1568 getargs(x, 0, 0, _("secret takes no arguments"))
1569 target = phases.secret
1569 target = phases.secret
1570 return _phase(repo, subset, target)
1570 return _phase(repo, subset, target)
1571
1571
1572 def parentspec(repo, subset, x, n):
1572 def parentspec(repo, subset, x, n):
1573 """``set^0``
1573 """``set^0``
1574 The set.
1574 The set.
1575 ``set^1`` (or ``set^``), ``set^2``
1575 ``set^1`` (or ``set^``), ``set^2``
1576 First or second parent, respectively, of all changesets in set.
1576 First or second parent, respectively, of all changesets in set.
1577 """
1577 """
1578 try:
1578 try:
1579 n = int(n[1])
1579 n = int(n[1])
1580 if n not in (0, 1, 2):
1580 if n not in (0, 1, 2):
1581 raise ValueError
1581 raise ValueError
1582 except (TypeError, ValueError):
1582 except (TypeError, ValueError):
1583 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1583 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1584 ps = set()
1584 ps = set()
1585 cl = repo.changelog
1585 cl = repo.changelog
1586 for r in getset(repo, fullreposet(repo), x):
1586 for r in getset(repo, fullreposet(repo), x):
1587 if n == 0:
1587 if n == 0:
1588 ps.add(r)
1588 ps.add(r)
1589 elif n == 1:
1589 elif n == 1:
1590 ps.add(cl.parentrevs(r)[0])
1590 ps.add(cl.parentrevs(r)[0])
1591 elif n == 2:
1591 elif n == 2:
1592 parents = cl.parentrevs(r)
1592 parents = cl.parentrevs(r)
1593 if len(parents) > 1:
1593 if len(parents) > 1:
1594 ps.add(parents[1])
1594 ps.add(parents[1])
1595 return subset & ps
1595 return subset & ps
1596
1596
1597 @predicate('present(set)', safe=True)
1597 @predicate('present(set)', safe=True)
1598 def present(repo, subset, x):
1598 def present(repo, subset, x):
1599 """An empty set, if any revision in set isn't found; otherwise,
1599 """An empty set, if any revision in set isn't found; otherwise,
1600 all revisions in set.
1600 all revisions in set.
1601
1601
1602 If any of specified revisions is not present in the local repository,
1602 If any of specified revisions is not present in the local repository,
1603 the query is normally aborted. But this predicate allows the query
1603 the query is normally aborted. But this predicate allows the query
1604 to continue even in such cases.
1604 to continue even in such cases.
1605 """
1605 """
1606 try:
1606 try:
1607 return getset(repo, subset, x)
1607 return getset(repo, subset, x)
1608 except error.RepoLookupError:
1608 except error.RepoLookupError:
1609 return baseset()
1609 return baseset()
1610
1610
1611 # for internal use
1611 # for internal use
1612 @predicate('_notpublic', safe=True)
1612 @predicate('_notpublic', safe=True)
1613 def _notpublic(repo, subset, x):
1613 def _notpublic(repo, subset, x):
1614 getargs(x, 0, 0, "_notpublic takes no arguments")
1614 getargs(x, 0, 0, "_notpublic takes no arguments")
1615 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1615 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1616 if repo._phasecache._phasesets:
1616 if repo._phasecache._phasesets:
1617 s = set()
1617 s = set()
1618 for u in repo._phasecache._phasesets[1:]:
1618 for u in repo._phasecache._phasesets[1:]:
1619 s.update(u)
1619 s.update(u)
1620 s = baseset(s - repo.changelog.filteredrevs)
1620 s = baseset(s - repo.changelog.filteredrevs)
1621 s.sort()
1621 s.sort()
1622 return subset & s
1622 return subset & s
1623 else:
1623 else:
1624 phase = repo._phasecache.phase
1624 phase = repo._phasecache.phase
1625 target = phases.public
1625 target = phases.public
1626 condition = lambda r: phase(repo, r) != target
1626 condition = lambda r: phase(repo, r) != target
1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1628 cache=False)
1628 cache=False)
1629
1629
1630 @predicate('public()', safe=True)
1630 @predicate('public()', safe=True)
1631 def public(repo, subset, x):
1631 def public(repo, subset, x):
1632 """Changeset in public phase."""
1632 """Changeset in public phase."""
1633 # i18n: "public" is a keyword
1633 # i18n: "public" is a keyword
1634 getargs(x, 0, 0, _("public takes no arguments"))
1634 getargs(x, 0, 0, _("public takes no arguments"))
1635 phase = repo._phasecache.phase
1635 phase = repo._phasecache.phase
1636 target = phases.public
1636 target = phases.public
1637 condition = lambda r: phase(repo, r) == target
1637 condition = lambda r: phase(repo, r) == target
1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1639 cache=False)
1639 cache=False)
1640
1640
1641 @predicate('remote([id [,path]])', safe=True)
1641 @predicate('remote([id [,path]])', safe=True)
1642 def remote(repo, subset, x):
1642 def remote(repo, subset, x):
1643 """Local revision that corresponds to the given identifier in a
1643 """Local revision that corresponds to the given identifier in a
1644 remote repository, if present. Here, the '.' identifier is a
1644 remote repository, if present. Here, the '.' identifier is a
1645 synonym for the current local branch.
1645 synonym for the current local branch.
1646 """
1646 """
1647
1647
1648 from . import hg # avoid start-up nasties
1648 from . import hg # avoid start-up nasties
1649 # i18n: "remote" is a keyword
1649 # i18n: "remote" is a keyword
1650 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1650 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1651
1651
1652 q = '.'
1652 q = '.'
1653 if len(l) > 0:
1653 if len(l) > 0:
1654 # i18n: "remote" is a keyword
1654 # i18n: "remote" is a keyword
1655 q = getstring(l[0], _("remote requires a string id"))
1655 q = getstring(l[0], _("remote requires a string id"))
1656 if q == '.':
1656 if q == '.':
1657 q = repo['.'].branch()
1657 q = repo['.'].branch()
1658
1658
1659 dest = ''
1659 dest = ''
1660 if len(l) > 1:
1660 if len(l) > 1:
1661 # i18n: "remote" is a keyword
1661 # i18n: "remote" is a keyword
1662 dest = getstring(l[1], _("remote requires a repository path"))
1662 dest = getstring(l[1], _("remote requires a repository path"))
1663 dest = repo.ui.expandpath(dest or 'default')
1663 dest = repo.ui.expandpath(dest or 'default')
1664 dest, branches = hg.parseurl(dest)
1664 dest, branches = hg.parseurl(dest)
1665 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1665 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1666 if revs:
1666 if revs:
1667 revs = [repo.lookup(rev) for rev in revs]
1667 revs = [repo.lookup(rev) for rev in revs]
1668 other = hg.peer(repo, {}, dest)
1668 other = hg.peer(repo, {}, dest)
1669 n = other.lookup(q)
1669 n = other.lookup(q)
1670 if n in repo:
1670 if n in repo:
1671 r = repo[n].rev()
1671 r = repo[n].rev()
1672 if r in subset:
1672 if r in subset:
1673 return baseset([r])
1673 return baseset([r])
1674 return baseset()
1674 return baseset()
1675
1675
1676 @predicate('removes(pattern)', safe=True)
1676 @predicate('removes(pattern)', safe=True)
1677 def removes(repo, subset, x):
1677 def removes(repo, subset, x):
1678 """Changesets which remove files matching pattern.
1678 """Changesets which remove files matching pattern.
1679
1679
1680 The pattern without explicit kind like ``glob:`` is expected to be
1680 The pattern without explicit kind like ``glob:`` is expected to be
1681 relative to the current directory and match against a file or a
1681 relative to the current directory and match against a file or a
1682 directory.
1682 directory.
1683 """
1683 """
1684 # i18n: "removes" is a keyword
1684 # i18n: "removes" is a keyword
1685 pat = getstring(x, _("removes requires a pattern"))
1685 pat = getstring(x, _("removes requires a pattern"))
1686 return checkstatus(repo, subset, pat, 2)
1686 return checkstatus(repo, subset, pat, 2)
1687
1687
1688 @predicate('rev(number)', safe=True)
1688 @predicate('rev(number)', safe=True)
1689 def rev(repo, subset, x):
1689 def rev(repo, subset, x):
1690 """Revision with the given numeric identifier.
1690 """Revision with the given numeric identifier.
1691 """
1691 """
1692 # i18n: "rev" is a keyword
1692 # i18n: "rev" is a keyword
1693 l = getargs(x, 1, 1, _("rev requires one argument"))
1693 l = getargs(x, 1, 1, _("rev requires one argument"))
1694 try:
1694 try:
1695 # i18n: "rev" is a keyword
1695 # i18n: "rev" is a keyword
1696 l = int(getstring(l[0], _("rev requires a number")))
1696 l = int(getstring(l[0], _("rev requires a number")))
1697 except (TypeError, ValueError):
1697 except (TypeError, ValueError):
1698 # i18n: "rev" is a keyword
1698 # i18n: "rev" is a keyword
1699 raise error.ParseError(_("rev expects a number"))
1699 raise error.ParseError(_("rev expects a number"))
1700 if l not in repo.changelog and l != node.nullrev:
1700 if l not in repo.changelog and l != node.nullrev:
1701 return baseset()
1701 return baseset()
1702 return subset & baseset([l])
1702 return subset & baseset([l])
1703
1703
1704 @predicate('matching(revision [, field])', safe=True)
1704 @predicate('matching(revision [, field])', safe=True)
1705 def matching(repo, subset, x):
1705 def matching(repo, subset, x):
1706 """Changesets in which a given set of fields match the set of fields in the
1706 """Changesets in which a given set of fields match the set of fields in the
1707 selected revision or set.
1707 selected revision or set.
1708
1708
1709 To match more than one field pass the list of fields to match separated
1709 To match more than one field pass the list of fields to match separated
1710 by spaces (e.g. ``author description``).
1710 by spaces (e.g. ``author description``).
1711
1711
1712 Valid fields are most regular revision fields and some special fields.
1712 Valid fields are most regular revision fields and some special fields.
1713
1713
1714 Regular revision fields are ``description``, ``author``, ``branch``,
1714 Regular revision fields are ``description``, ``author``, ``branch``,
1715 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1715 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1716 and ``diff``.
1716 and ``diff``.
1717 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1717 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1718 contents of the revision. Two revisions matching their ``diff`` will
1718 contents of the revision. Two revisions matching their ``diff`` will
1719 also match their ``files``.
1719 also match their ``files``.
1720
1720
1721 Special fields are ``summary`` and ``metadata``:
1721 Special fields are ``summary`` and ``metadata``:
1722 ``summary`` matches the first line of the description.
1722 ``summary`` matches the first line of the description.
1723 ``metadata`` is equivalent to matching ``description user date``
1723 ``metadata`` is equivalent to matching ``description user date``
1724 (i.e. it matches the main metadata fields).
1724 (i.e. it matches the main metadata fields).
1725
1725
1726 ``metadata`` is the default field which is used when no fields are
1726 ``metadata`` is the default field which is used when no fields are
1727 specified. You can match more than one field at a time.
1727 specified. You can match more than one field at a time.
1728 """
1728 """
1729 # i18n: "matching" is a keyword
1729 # i18n: "matching" is a keyword
1730 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1730 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1731
1731
1732 revs = getset(repo, fullreposet(repo), l[0])
1732 revs = getset(repo, fullreposet(repo), l[0])
1733
1733
1734 fieldlist = ['metadata']
1734 fieldlist = ['metadata']
1735 if len(l) > 1:
1735 if len(l) > 1:
1736 fieldlist = getstring(l[1],
1736 fieldlist = getstring(l[1],
1737 # i18n: "matching" is a keyword
1737 # i18n: "matching" is a keyword
1738 _("matching requires a string "
1738 _("matching requires a string "
1739 "as its second argument")).split()
1739 "as its second argument")).split()
1740
1740
1741 # Make sure that there are no repeated fields,
1741 # Make sure that there are no repeated fields,
1742 # expand the 'special' 'metadata' field type
1742 # expand the 'special' 'metadata' field type
1743 # and check the 'files' whenever we check the 'diff'
1743 # and check the 'files' whenever we check the 'diff'
1744 fields = []
1744 fields = []
1745 for field in fieldlist:
1745 for field in fieldlist:
1746 if field == 'metadata':
1746 if field == 'metadata':
1747 fields += ['user', 'description', 'date']
1747 fields += ['user', 'description', 'date']
1748 elif field == 'diff':
1748 elif field == 'diff':
1749 # a revision matching the diff must also match the files
1749 # a revision matching the diff must also match the files
1750 # since matching the diff is very costly, make sure to
1750 # since matching the diff is very costly, make sure to
1751 # also match the files first
1751 # also match the files first
1752 fields += ['files', 'diff']
1752 fields += ['files', 'diff']
1753 else:
1753 else:
1754 if field == 'author':
1754 if field == 'author':
1755 field = 'user'
1755 field = 'user'
1756 fields.append(field)
1756 fields.append(field)
1757 fields = set(fields)
1757 fields = set(fields)
1758 if 'summary' in fields and 'description' in fields:
1758 if 'summary' in fields and 'description' in fields:
1759 # If a revision matches its description it also matches its summary
1759 # If a revision matches its description it also matches its summary
1760 fields.discard('summary')
1760 fields.discard('summary')
1761
1761
1762 # We may want to match more than one field
1762 # We may want to match more than one field
1763 # Not all fields take the same amount of time to be matched
1763 # Not all fields take the same amount of time to be matched
1764 # Sort the selected fields in order of increasing matching cost
1764 # Sort the selected fields in order of increasing matching cost
1765 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1765 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1766 'files', 'description', 'substate', 'diff']
1766 'files', 'description', 'substate', 'diff']
1767 def fieldkeyfunc(f):
1767 def fieldkeyfunc(f):
1768 try:
1768 try:
1769 return fieldorder.index(f)
1769 return fieldorder.index(f)
1770 except ValueError:
1770 except ValueError:
1771 # assume an unknown field is very costly
1771 # assume an unknown field is very costly
1772 return len(fieldorder)
1772 return len(fieldorder)
1773 fields = list(fields)
1773 fields = list(fields)
1774 fields.sort(key=fieldkeyfunc)
1774 fields.sort(key=fieldkeyfunc)
1775
1775
1776 # Each field will be matched with its own "getfield" function
1776 # Each field will be matched with its own "getfield" function
1777 # which will be added to the getfieldfuncs array of functions
1777 # which will be added to the getfieldfuncs array of functions
1778 getfieldfuncs = []
1778 getfieldfuncs = []
1779 _funcs = {
1779 _funcs = {
1780 'user': lambda r: repo[r].user(),
1780 'user': lambda r: repo[r].user(),
1781 'branch': lambda r: repo[r].branch(),
1781 'branch': lambda r: repo[r].branch(),
1782 'date': lambda r: repo[r].date(),
1782 'date': lambda r: repo[r].date(),
1783 'description': lambda r: repo[r].description(),
1783 'description': lambda r: repo[r].description(),
1784 'files': lambda r: repo[r].files(),
1784 'files': lambda r: repo[r].files(),
1785 'parents': lambda r: repo[r].parents(),
1785 'parents': lambda r: repo[r].parents(),
1786 'phase': lambda r: repo[r].phase(),
1786 'phase': lambda r: repo[r].phase(),
1787 'substate': lambda r: repo[r].substate,
1787 'substate': lambda r: repo[r].substate,
1788 'summary': lambda r: repo[r].description().splitlines()[0],
1788 'summary': lambda r: repo[r].description().splitlines()[0],
1789 'diff': lambda r: list(repo[r].diff(git=True),)
1789 'diff': lambda r: list(repo[r].diff(git=True),)
1790 }
1790 }
1791 for info in fields:
1791 for info in fields:
1792 getfield = _funcs.get(info, None)
1792 getfield = _funcs.get(info, None)
1793 if getfield is None:
1793 if getfield is None:
1794 raise error.ParseError(
1794 raise error.ParseError(
1795 # i18n: "matching" is a keyword
1795 # i18n: "matching" is a keyword
1796 _("unexpected field name passed to matching: %s") % info)
1796 _("unexpected field name passed to matching: %s") % info)
1797 getfieldfuncs.append(getfield)
1797 getfieldfuncs.append(getfield)
1798 # convert the getfield array of functions into a "getinfo" function
1798 # convert the getfield array of functions into a "getinfo" function
1799 # which returns an array of field values (or a single value if there
1799 # which returns an array of field values (or a single value if there
1800 # is only one field to match)
1800 # is only one field to match)
1801 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1801 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1802
1802
1803 def matches(x):
1803 def matches(x):
1804 for rev in revs:
1804 for rev in revs:
1805 target = getinfo(rev)
1805 target = getinfo(rev)
1806 match = True
1806 match = True
1807 for n, f in enumerate(getfieldfuncs):
1807 for n, f in enumerate(getfieldfuncs):
1808 if target[n] != f(x):
1808 if target[n] != f(x):
1809 match = False
1809 match = False
1810 if match:
1810 if match:
1811 return True
1811 return True
1812 return False
1812 return False
1813
1813
1814 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1814 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1815
1815
1816 @predicate('reverse(set)', safe=True)
1816 @predicate('reverse(set)', safe=True)
1817 def reverse(repo, subset, x):
1817 def reverse(repo, subset, x):
1818 """Reverse order of set.
1818 """Reverse order of set.
1819 """
1819 """
1820 l = getset(repo, subset, x)
1820 l = getset(repo, subset, x)
1821 l.reverse()
1821 l.reverse()
1822 return l
1822 return l
1823
1823
1824 @predicate('roots(set)', safe=True)
1824 @predicate('roots(set)', safe=True)
1825 def roots(repo, subset, x):
1825 def roots(repo, subset, x):
1826 """Changesets in set with no parent changeset in set.
1826 """Changesets in set with no parent changeset in set.
1827 """
1827 """
1828 s = getset(repo, fullreposet(repo), x)
1828 s = getset(repo, fullreposet(repo), x)
1829 parents = repo.changelog.parentrevs
1829 parents = repo.changelog.parentrevs
1830 def filter(r):
1830 def filter(r):
1831 for p in parents(r):
1831 for p in parents(r):
1832 if 0 <= p and p in s:
1832 if 0 <= p and p in s:
1833 return False
1833 return False
1834 return True
1834 return True
1835 return subset & s.filter(filter, condrepr='<roots>')
1835 return subset & s.filter(filter, condrepr='<roots>')
1836
1836
1837 @predicate('sort(set[, [-]key...])', safe=True)
1837 @predicate('sort(set[, [-]key...])', safe=True)
1838 def sort(repo, subset, x):
1838 def sort(repo, subset, x):
1839 """Sort set by keys. The default sort order is ascending, specify a key
1839 """Sort set by keys. The default sort order is ascending, specify a key
1840 as ``-key`` to sort in descending order.
1840 as ``-key`` to sort in descending order.
1841
1841
1842 The keys can be:
1842 The keys can be:
1843
1843
1844 - ``rev`` for the revision number,
1844 - ``rev`` for the revision number,
1845 - ``branch`` for the branch name,
1845 - ``branch`` for the branch name,
1846 - ``desc`` for the commit message (description),
1846 - ``desc`` for the commit message (description),
1847 - ``user`` for user name (``author`` can be used as an alias),
1847 - ``user`` for user name (``author`` can be used as an alias),
1848 - ``date`` for the commit date
1848 - ``date`` for the commit date
1849 """
1849 """
1850 # i18n: "sort" is a keyword
1850 # i18n: "sort" is a keyword
1851 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1851 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1852 keys = "rev"
1852 keys = "rev"
1853 if len(l) == 2:
1853 if len(l) == 2:
1854 # i18n: "sort" is a keyword
1854 # i18n: "sort" is a keyword
1855 keys = getstring(l[1], _("sort spec must be a string"))
1855 keys = getstring(l[1], _("sort spec must be a string"))
1856
1856
1857 s = l[0]
1857 s = l[0]
1858 keys = keys.split()
1858 keys = keys.split()
1859 revs = getset(repo, subset, s)
1859 revs = getset(repo, subset, s)
1860 if keys == ["rev"]:
1860 if keys == ["rev"]:
1861 revs.sort()
1861 revs.sort()
1862 return revs
1862 return revs
1863 elif keys == ["-rev"]:
1863 elif keys == ["-rev"]:
1864 revs.sort(reverse=True)
1864 revs.sort(reverse=True)
1865 return revs
1865 return revs
1866 # sort() is guaranteed to be stable
1866 # sort() is guaranteed to be stable
1867 ctxs = [repo[r] for r in revs]
1867 ctxs = [repo[r] for r in revs]
1868 for k in reversed(keys):
1868 for k in reversed(keys):
1869 if k == 'rev':
1869 if k == 'rev':
1870 ctxs.sort(key=lambda c: c.rev())
1870 ctxs.sort(key=lambda c: c.rev())
1871 elif k == '-rev':
1871 elif k == '-rev':
1872 ctxs.sort(key=lambda c: c.rev(), reverse=True)
1872 ctxs.sort(key=lambda c: c.rev(), reverse=True)
1873 elif k == 'branch':
1873 elif k == 'branch':
1874 ctxs.sort(key=lambda c: c.branch())
1874 ctxs.sort(key=lambda c: c.branch())
1875 elif k == '-branch':
1875 elif k == '-branch':
1876 ctxs.sort(key=lambda c: c.branch(), reverse=True)
1876 ctxs.sort(key=lambda c: c.branch(), reverse=True)
1877 elif k == 'desc':
1877 elif k == 'desc':
1878 ctxs.sort(key=lambda c: c.description())
1878 ctxs.sort(key=lambda c: c.description())
1879 elif k == '-desc':
1879 elif k == '-desc':
1880 ctxs.sort(key=lambda c: c.description(), reverse=True)
1880 ctxs.sort(key=lambda c: c.description(), reverse=True)
1881 elif k in 'user author':
1881 elif k in 'user author':
1882 ctxs.sort(key=lambda c: c.user())
1882 ctxs.sort(key=lambda c: c.user())
1883 elif k in '-user -author':
1883 elif k in '-user -author':
1884 ctxs.sort(key=lambda c: c.user(), reverse=True)
1884 ctxs.sort(key=lambda c: c.user(), reverse=True)
1885 elif k == 'date':
1885 elif k == 'date':
1886 ctxs.sort(key=lambda c: c.date()[0])
1886 ctxs.sort(key=lambda c: c.date()[0])
1887 elif k == '-date':
1887 elif k == '-date':
1888 ctxs.sort(key=lambda c: c.date()[0], reverse=True)
1888 ctxs.sort(key=lambda c: c.date()[0], reverse=True)
1889 else:
1889 else:
1890 raise error.ParseError(_("unknown sort key %r") % k)
1890 raise error.ParseError(_("unknown sort key %r") % k)
1891 return baseset([c.rev() for c in ctxs])
1891 return baseset([c.rev() for c in ctxs])
1892
1892
1893 @predicate('subrepo([pattern])')
1893 @predicate('subrepo([pattern])')
1894 def subrepo(repo, subset, x):
1894 def subrepo(repo, subset, x):
1895 """Changesets that add, modify or remove the given subrepo. If no subrepo
1895 """Changesets that add, modify or remove the given subrepo. If no subrepo
1896 pattern is named, any subrepo changes are returned.
1896 pattern is named, any subrepo changes are returned.
1897 """
1897 """
1898 # i18n: "subrepo" is a keyword
1898 # i18n: "subrepo" is a keyword
1899 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1899 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1900 pat = None
1900 pat = None
1901 if len(args) != 0:
1901 if len(args) != 0:
1902 pat = getstring(args[0], _("subrepo requires a pattern"))
1902 pat = getstring(args[0], _("subrepo requires a pattern"))
1903
1903
1904 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1904 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1905
1905
1906 def submatches(names):
1906 def submatches(names):
1907 k, p, m = util.stringmatcher(pat)
1907 k, p, m = util.stringmatcher(pat)
1908 for name in names:
1908 for name in names:
1909 if m(name):
1909 if m(name):
1910 yield name
1910 yield name
1911
1911
1912 def matches(x):
1912 def matches(x):
1913 c = repo[x]
1913 c = repo[x]
1914 s = repo.status(c.p1().node(), c.node(), match=m)
1914 s = repo.status(c.p1().node(), c.node(), match=m)
1915
1915
1916 if pat is None:
1916 if pat is None:
1917 return s.added or s.modified or s.removed
1917 return s.added or s.modified or s.removed
1918
1918
1919 if s.added:
1919 if s.added:
1920 return any(submatches(c.substate.keys()))
1920 return any(submatches(c.substate.keys()))
1921
1921
1922 if s.modified:
1922 if s.modified:
1923 subs = set(c.p1().substate.keys())
1923 subs = set(c.p1().substate.keys())
1924 subs.update(c.substate.keys())
1924 subs.update(c.substate.keys())
1925
1925
1926 for path in submatches(subs):
1926 for path in submatches(subs):
1927 if c.p1().substate.get(path) != c.substate.get(path):
1927 if c.p1().substate.get(path) != c.substate.get(path):
1928 return True
1928 return True
1929
1929
1930 if s.removed:
1930 if s.removed:
1931 return any(submatches(c.p1().substate.keys()))
1931 return any(submatches(c.p1().substate.keys()))
1932
1932
1933 return False
1933 return False
1934
1934
1935 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1935 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1936
1936
1937 def _substringmatcher(pattern):
1937 def _substringmatcher(pattern):
1938 kind, pattern, matcher = util.stringmatcher(pattern)
1938 kind, pattern, matcher = util.stringmatcher(pattern)
1939 if kind == 'literal':
1939 if kind == 'literal':
1940 matcher = lambda s: pattern in s
1940 matcher = lambda s: pattern in s
1941 return kind, pattern, matcher
1941 return kind, pattern, matcher
1942
1942
1943 @predicate('tag([name])', safe=True)
1943 @predicate('tag([name])', safe=True)
1944 def tag(repo, subset, x):
1944 def tag(repo, subset, x):
1945 """The specified tag by name, or all tagged revisions if no name is given.
1945 """The specified tag by name, or all tagged revisions if no name is given.
1946
1946
1947 If `name` starts with `re:`, the remainder of the name is treated as
1947 If `name` starts with `re:`, the remainder of the name is treated as
1948 a regular expression. To match a tag that actually starts with `re:`,
1948 a regular expression. To match a tag that actually starts with `re:`,
1949 use the prefix `literal:`.
1949 use the prefix `literal:`.
1950 """
1950 """
1951 # i18n: "tag" is a keyword
1951 # i18n: "tag" is a keyword
1952 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1952 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1953 cl = repo.changelog
1953 cl = repo.changelog
1954 if args:
1954 if args:
1955 pattern = getstring(args[0],
1955 pattern = getstring(args[0],
1956 # i18n: "tag" is a keyword
1956 # i18n: "tag" is a keyword
1957 _('the argument to tag must be a string'))
1957 _('the argument to tag must be a string'))
1958 kind, pattern, matcher = util.stringmatcher(pattern)
1958 kind, pattern, matcher = util.stringmatcher(pattern)
1959 if kind == 'literal':
1959 if kind == 'literal':
1960 # avoid resolving all tags
1960 # avoid resolving all tags
1961 tn = repo._tagscache.tags.get(pattern, None)
1961 tn = repo._tagscache.tags.get(pattern, None)
1962 if tn is None:
1962 if tn is None:
1963 raise error.RepoLookupError(_("tag '%s' does not exist")
1963 raise error.RepoLookupError(_("tag '%s' does not exist")
1964 % pattern)
1964 % pattern)
1965 s = set([repo[tn].rev()])
1965 s = set([repo[tn].rev()])
1966 else:
1966 else:
1967 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1967 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1968 else:
1968 else:
1969 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1969 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1970 return subset & s
1970 return subset & s
1971
1971
1972 @predicate('tagged', safe=True)
1972 @predicate('tagged', safe=True)
1973 def tagged(repo, subset, x):
1973 def tagged(repo, subset, x):
1974 return tag(repo, subset, x)
1974 return tag(repo, subset, x)
1975
1975
1976 @predicate('unstable()', safe=True)
1976 @predicate('unstable()', safe=True)
1977 def unstable(repo, subset, x):
1977 def unstable(repo, subset, x):
1978 """Non-obsolete changesets with obsolete ancestors.
1978 """Non-obsolete changesets with obsolete ancestors.
1979 """
1979 """
1980 # i18n: "unstable" is a keyword
1980 # i18n: "unstable" is a keyword
1981 getargs(x, 0, 0, _("unstable takes no arguments"))
1981 getargs(x, 0, 0, _("unstable takes no arguments"))
1982 unstables = obsmod.getrevs(repo, 'unstable')
1982 unstables = obsmod.getrevs(repo, 'unstable')
1983 return subset & unstables
1983 return subset & unstables
1984
1984
1985
1985
1986 @predicate('user(string)', safe=True)
1986 @predicate('user(string)', safe=True)
1987 def user(repo, subset, x):
1987 def user(repo, subset, x):
1988 """User name contains string. The match is case-insensitive.
1988 """User name contains string. The match is case-insensitive.
1989
1989
1990 If `string` starts with `re:`, the remainder of the string is treated as
1990 If `string` starts with `re:`, the remainder of the string is treated as
1991 a regular expression. To match a user that actually contains `re:`, use
1991 a regular expression. To match a user that actually contains `re:`, use
1992 the prefix `literal:`.
1992 the prefix `literal:`.
1993 """
1993 """
1994 return author(repo, subset, x)
1994 return author(repo, subset, x)
1995
1995
1996 # experimental
1996 # experimental
1997 @predicate('wdir', safe=True)
1997 @predicate('wdir', safe=True)
1998 def wdir(repo, subset, x):
1998 def wdir(repo, subset, x):
1999 # i18n: "wdir" is a keyword
1999 # i18n: "wdir" is a keyword
2000 getargs(x, 0, 0, _("wdir takes no arguments"))
2000 getargs(x, 0, 0, _("wdir takes no arguments"))
2001 if node.wdirrev in subset or isinstance(subset, fullreposet):
2001 if node.wdirrev in subset or isinstance(subset, fullreposet):
2002 return baseset([node.wdirrev])
2002 return baseset([node.wdirrev])
2003 return baseset()
2003 return baseset()
2004
2004
2005 # for internal use
2005 # for internal use
2006 @predicate('_list', safe=True)
2006 @predicate('_list', safe=True)
2007 def _list(repo, subset, x):
2007 def _list(repo, subset, x):
2008 s = getstring(x, "internal error")
2008 s = getstring(x, "internal error")
2009 if not s:
2009 if not s:
2010 return baseset()
2010 return baseset()
2011 # remove duplicates here. it's difficult for caller to deduplicate sets
2011 # remove duplicates here. it's difficult for caller to deduplicate sets
2012 # because different symbols can point to the same rev.
2012 # because different symbols can point to the same rev.
2013 cl = repo.changelog
2013 cl = repo.changelog
2014 ls = []
2014 ls = []
2015 seen = set()
2015 seen = set()
2016 for t in s.split('\0'):
2016 for t in s.split('\0'):
2017 try:
2017 try:
2018 # fast path for integer revision
2018 # fast path for integer revision
2019 r = int(t)
2019 r = int(t)
2020 if str(r) != t or r not in cl:
2020 if str(r) != t or r not in cl:
2021 raise ValueError
2021 raise ValueError
2022 revs = [r]
2022 revs = [r]
2023 except ValueError:
2023 except ValueError:
2024 revs = stringset(repo, subset, t)
2024 revs = stringset(repo, subset, t)
2025
2025
2026 for r in revs:
2026 for r in revs:
2027 if r in seen:
2027 if r in seen:
2028 continue
2028 continue
2029 if (r in subset
2029 if (r in subset
2030 or r == node.nullrev and isinstance(subset, fullreposet)):
2030 or r == node.nullrev and isinstance(subset, fullreposet)):
2031 ls.append(r)
2031 ls.append(r)
2032 seen.add(r)
2032 seen.add(r)
2033 return baseset(ls)
2033 return baseset(ls)
2034
2034
2035 # for internal use
2035 # for internal use
2036 @predicate('_intlist', safe=True)
2036 @predicate('_intlist', safe=True)
2037 def _intlist(repo, subset, x):
2037 def _intlist(repo, subset, x):
2038 s = getstring(x, "internal error")
2038 s = getstring(x, "internal error")
2039 if not s:
2039 if not s:
2040 return baseset()
2040 return baseset()
2041 ls = [int(r) for r in s.split('\0')]
2041 ls = [int(r) for r in s.split('\0')]
2042 s = subset
2042 s = subset
2043 return baseset([r for r in ls if r in s])
2043 return baseset([r for r in ls if r in s])
2044
2044
2045 # for internal use
2045 # for internal use
2046 @predicate('_hexlist', safe=True)
2046 @predicate('_hexlist', safe=True)
2047 def _hexlist(repo, subset, x):
2047 def _hexlist(repo, subset, x):
2048 s = getstring(x, "internal error")
2048 s = getstring(x, "internal error")
2049 if not s:
2049 if not s:
2050 return baseset()
2050 return baseset()
2051 cl = repo.changelog
2051 cl = repo.changelog
2052 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2052 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2053 s = subset
2053 s = subset
2054 return baseset([r for r in ls if r in s])
2054 return baseset([r for r in ls if r in s])
2055
2055
2056 methods = {
2056 methods = {
2057 "range": rangeset,
2057 "range": rangeset,
2058 "dagrange": dagrange,
2058 "dagrange": dagrange,
2059 "string": stringset,
2059 "string": stringset,
2060 "symbol": stringset,
2060 "symbol": stringset,
2061 "and": andset,
2061 "and": andset,
2062 "or": orset,
2062 "or": orset,
2063 "not": notset,
2063 "not": notset,
2064 "difference": differenceset,
2064 "difference": differenceset,
2065 "list": listset,
2065 "list": listset,
2066 "keyvalue": keyvaluepair,
2066 "keyvalue": keyvaluepair,
2067 "func": func,
2067 "func": func,
2068 "ancestor": ancestorspec,
2068 "ancestor": ancestorspec,
2069 "parent": parentspec,
2069 "parent": parentspec,
2070 "parentpost": p1,
2070 "parentpost": p1,
2071 }
2071 }
2072
2072
2073 def _matchonly(revs, bases):
2073 def _matchonly(revs, bases):
2074 """
2074 """
2075 >>> f = lambda *args: _matchonly(*map(parse, args))
2075 >>> f = lambda *args: _matchonly(*map(parse, args))
2076 >>> f('ancestors(A)', 'not ancestors(B)')
2076 >>> f('ancestors(A)', 'not ancestors(B)')
2077 ('list', ('symbol', 'A'), ('symbol', 'B'))
2077 ('list', ('symbol', 'A'), ('symbol', 'B'))
2078 """
2078 """
2079 if (revs is not None
2079 if (revs is not None
2080 and revs[0] == 'func'
2080 and revs[0] == 'func'
2081 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2081 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2082 and bases is not None
2082 and bases is not None
2083 and bases[0] == 'not'
2083 and bases[0] == 'not'
2084 and bases[1][0] == 'func'
2084 and bases[1][0] == 'func'
2085 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2085 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2086 return ('list', revs[2], bases[1][2])
2086 return ('list', revs[2], bases[1][2])
2087
2087
2088 def _optimize(x, small):
2088 def _optimize(x, small):
2089 if x is None:
2089 if x is None:
2090 return 0, x
2090 return 0, x
2091
2091
2092 smallbonus = 1
2092 smallbonus = 1
2093 if small:
2093 if small:
2094 smallbonus = .5
2094 smallbonus = .5
2095
2095
2096 op = x[0]
2096 op = x[0]
2097 if op == 'minus':
2097 if op == 'minus':
2098 return _optimize(('and', x[1], ('not', x[2])), small)
2098 return _optimize(('and', x[1], ('not', x[2])), small)
2099 elif op == 'only':
2099 elif op == 'only':
2100 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2100 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2101 return _optimize(t, small)
2101 return _optimize(t, small)
2102 elif op == 'onlypost':
2102 elif op == 'onlypost':
2103 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2103 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2104 elif op == 'dagrangepre':
2104 elif op == 'dagrangepre':
2105 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2105 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2106 elif op == 'dagrangepost':
2106 elif op == 'dagrangepost':
2107 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2107 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2108 elif op == 'rangeall':
2108 elif op == 'rangeall':
2109 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2109 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2110 elif op == 'rangepre':
2110 elif op == 'rangepre':
2111 return _optimize(('range', ('string', '0'), x[1]), small)
2111 return _optimize(('range', ('string', '0'), x[1]), small)
2112 elif op == 'rangepost':
2112 elif op == 'rangepost':
2113 return _optimize(('range', x[1], ('string', 'tip')), small)
2113 return _optimize(('range', x[1], ('string', 'tip')), small)
2114 elif op == 'negate':
2114 elif op == 'negate':
2115 s = getstring(x[1], _("can't negate that"))
2115 s = getstring(x[1], _("can't negate that"))
2116 return _optimize(('string', '-' + s), small)
2116 return _optimize(('string', '-' + s), small)
2117 elif op in 'string symbol negate':
2117 elif op in 'string symbol negate':
2118 return smallbonus, x # single revisions are small
2118 return smallbonus, x # single revisions are small
2119 elif op == 'and':
2119 elif op == 'and':
2120 wa, ta = _optimize(x[1], True)
2120 wa, ta = _optimize(x[1], True)
2121 wb, tb = _optimize(x[2], True)
2121 wb, tb = _optimize(x[2], True)
2122 w = min(wa, wb)
2122 w = min(wa, wb)
2123
2123
2124 # (::x and not ::y)/(not ::y and ::x) have a fast path
2124 # (::x and not ::y)/(not ::y and ::x) have a fast path
2125 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2125 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2126 if tm:
2126 if tm:
2127 return w, ('func', ('symbol', 'only'), tm)
2127 return w, ('func', ('symbol', 'only'), tm)
2128
2128
2129 if tb is not None and tb[0] == 'not':
2129 if tb is not None and tb[0] == 'not':
2130 return wa, ('difference', ta, tb[1])
2130 return wa, ('difference', ta, tb[1])
2131
2131
2132 if wa > wb:
2132 if wa > wb:
2133 return w, (op, tb, ta)
2133 return w, (op, tb, ta)
2134 return w, (op, ta, tb)
2134 return w, (op, ta, tb)
2135 elif op == 'or':
2135 elif op == 'or':
2136 # fast path for machine-generated expression, that is likely to have
2136 # fast path for machine-generated expression, that is likely to have
2137 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2137 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2138 ws, ts, ss = [], [], []
2138 ws, ts, ss = [], [], []
2139 def flushss():
2139 def flushss():
2140 if not ss:
2140 if not ss:
2141 return
2141 return
2142 if len(ss) == 1:
2142 if len(ss) == 1:
2143 w, t = ss[0]
2143 w, t = ss[0]
2144 else:
2144 else:
2145 s = '\0'.join(t[1] for w, t in ss)
2145 s = '\0'.join(t[1] for w, t in ss)
2146 y = ('func', ('symbol', '_list'), ('string', s))
2146 y = ('func', ('symbol', '_list'), ('string', s))
2147 w, t = _optimize(y, False)
2147 w, t = _optimize(y, False)
2148 ws.append(w)
2148 ws.append(w)
2149 ts.append(t)
2149 ts.append(t)
2150 del ss[:]
2150 del ss[:]
2151 for y in x[1:]:
2151 for y in x[1:]:
2152 w, t = _optimize(y, False)
2152 w, t = _optimize(y, False)
2153 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2153 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2154 ss.append((w, t))
2154 ss.append((w, t))
2155 continue
2155 continue
2156 flushss()
2156 flushss()
2157 ws.append(w)
2157 ws.append(w)
2158 ts.append(t)
2158 ts.append(t)
2159 flushss()
2159 flushss()
2160 if len(ts) == 1:
2160 if len(ts) == 1:
2161 return ws[0], ts[0] # 'or' operation is fully optimized out
2161 return ws[0], ts[0] # 'or' operation is fully optimized out
2162 # we can't reorder trees by weight because it would change the order.
2162 # we can't reorder trees by weight because it would change the order.
2163 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2163 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2164 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2164 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2165 return max(ws), (op,) + tuple(ts)
2165 return max(ws), (op,) + tuple(ts)
2166 elif op == 'not':
2166 elif op == 'not':
2167 # Optimize not public() to _notpublic() because we have a fast version
2167 # Optimize not public() to _notpublic() because we have a fast version
2168 if x[1] == ('func', ('symbol', 'public'), None):
2168 if x[1] == ('func', ('symbol', 'public'), None):
2169 newsym = ('func', ('symbol', '_notpublic'), None)
2169 newsym = ('func', ('symbol', '_notpublic'), None)
2170 o = _optimize(newsym, not small)
2170 o = _optimize(newsym, not small)
2171 return o[0], o[1]
2171 return o[0], o[1]
2172 else:
2172 else:
2173 o = _optimize(x[1], not small)
2173 o = _optimize(x[1], not small)
2174 return o[0], (op, o[1])
2174 return o[0], (op, o[1])
2175 elif op == 'parentpost':
2175 elif op == 'parentpost':
2176 o = _optimize(x[1], small)
2176 o = _optimize(x[1], small)
2177 return o[0], (op, o[1])
2177 return o[0], (op, o[1])
2178 elif op == 'group':
2178 elif op == 'group':
2179 return _optimize(x[1], small)
2179 return _optimize(x[1], small)
2180 elif op in 'dagrange range parent ancestorspec':
2180 elif op in 'dagrange range parent ancestorspec':
2181 if op == 'parent':
2181 if op == 'parent':
2182 # x^:y means (x^) : y, not x ^ (:y)
2182 # x^:y means (x^) : y, not x ^ (:y)
2183 post = ('parentpost', x[1])
2183 post = ('parentpost', x[1])
2184 if x[2][0] == 'dagrangepre':
2184 if x[2][0] == 'dagrangepre':
2185 return _optimize(('dagrange', post, x[2][1]), small)
2185 return _optimize(('dagrange', post, x[2][1]), small)
2186 elif x[2][0] == 'rangepre':
2186 elif x[2][0] == 'rangepre':
2187 return _optimize(('range', post, x[2][1]), small)
2187 return _optimize(('range', post, x[2][1]), small)
2188
2188
2189 wa, ta = _optimize(x[1], small)
2189 wa, ta = _optimize(x[1], small)
2190 wb, tb = _optimize(x[2], small)
2190 wb, tb = _optimize(x[2], small)
2191 return wa + wb, (op, ta, tb)
2191 return wa + wb, (op, ta, tb)
2192 elif op == 'list':
2192 elif op == 'list':
2193 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2193 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2194 return sum(ws), (op,) + ts
2194 return sum(ws), (op,) + ts
2195 elif op == 'func':
2195 elif op == 'func':
2196 f = getstring(x[1], _("not a symbol"))
2196 f = getstring(x[1], _("not a symbol"))
2197 wa, ta = _optimize(x[2], small)
2197 wa, ta = _optimize(x[2], small)
2198 if f in ("author branch closed date desc file grep keyword "
2198 if f in ("author branch closed date desc file grep keyword "
2199 "outgoing user"):
2199 "outgoing user"):
2200 w = 10 # slow
2200 w = 10 # slow
2201 elif f in "modifies adds removes":
2201 elif f in "modifies adds removes":
2202 w = 30 # slower
2202 w = 30 # slower
2203 elif f == "contains":
2203 elif f == "contains":
2204 w = 100 # very slow
2204 w = 100 # very slow
2205 elif f == "ancestor":
2205 elif f == "ancestor":
2206 w = 1 * smallbonus
2206 w = 1 * smallbonus
2207 elif f in "reverse limit first _intlist":
2207 elif f in "reverse limit first _intlist":
2208 w = 0
2208 w = 0
2209 elif f in "sort":
2209 elif f in "sort":
2210 w = 10 # assume most sorts look at changelog
2210 w = 10 # assume most sorts look at changelog
2211 else:
2211 else:
2212 w = 1
2212 w = 1
2213 return w + wa, (op, x[1], ta)
2213 return w + wa, (op, x[1], ta)
2214 return 1, x
2214 return 1, x
2215
2215
2216 def optimize(tree):
2216 def optimize(tree):
2217 _weight, newtree = _optimize(tree, small=True)
2217 _weight, newtree = _optimize(tree, small=True)
2218 return newtree
2218 return newtree
2219
2219
2220 # the set of valid characters for the initial letter of symbols in
2220 # the set of valid characters for the initial letter of symbols in
2221 # alias declarations and definitions
2221 # alias declarations and definitions
2222 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2222 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2223 if c.isalnum() or c in '._@$' or ord(c) > 127)
2223 if c.isalnum() or c in '._@$' or ord(c) > 127)
2224
2224
2225 def _parsewith(spec, lookup=None, syminitletters=None):
2225 def _parsewith(spec, lookup=None, syminitletters=None):
2226 """Generate a parse tree of given spec with given tokenizing options
2226 """Generate a parse tree of given spec with given tokenizing options
2227
2227
2228 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2228 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2229 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2229 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2230 >>> _parsewith('$1')
2230 >>> _parsewith('$1')
2231 Traceback (most recent call last):
2231 Traceback (most recent call last):
2232 ...
2232 ...
2233 ParseError: ("syntax error in revset '$1'", 0)
2233 ParseError: ("syntax error in revset '$1'", 0)
2234 >>> _parsewith('foo bar')
2234 >>> _parsewith('foo bar')
2235 Traceback (most recent call last):
2235 Traceback (most recent call last):
2236 ...
2236 ...
2237 ParseError: ('invalid token', 4)
2237 ParseError: ('invalid token', 4)
2238 """
2238 """
2239 p = parser.parser(elements)
2239 p = parser.parser(elements)
2240 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2240 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2241 syminitletters=syminitletters))
2241 syminitletters=syminitletters))
2242 if pos != len(spec):
2242 if pos != len(spec):
2243 raise error.ParseError(_('invalid token'), pos)
2243 raise error.ParseError(_('invalid token'), pos)
2244 return parser.simplifyinfixops(tree, ('list', 'or'))
2244 return parser.simplifyinfixops(tree, ('list', 'or'))
2245
2245
2246 class _aliasrules(parser.basealiasrules):
2246 class _aliasrules(parser.basealiasrules):
2247 """Parsing and expansion rule set of revset aliases"""
2247 """Parsing and expansion rule set of revset aliases"""
2248 _section = _('revset alias')
2248 _section = _('revset alias')
2249
2249
2250 @staticmethod
2250 @staticmethod
2251 def _parse(spec):
2251 def _parse(spec):
2252 """Parse alias declaration/definition ``spec``
2252 """Parse alias declaration/definition ``spec``
2253
2253
2254 This allows symbol names to use also ``$`` as an initial letter
2254 This allows symbol names to use also ``$`` as an initial letter
2255 (for backward compatibility), and callers of this function should
2255 (for backward compatibility), and callers of this function should
2256 examine whether ``$`` is used also for unexpected symbols or not.
2256 examine whether ``$`` is used also for unexpected symbols or not.
2257 """
2257 """
2258 return _parsewith(spec, syminitletters=_aliassyminitletters)
2258 return _parsewith(spec, syminitletters=_aliassyminitletters)
2259
2259
2260 @staticmethod
2260 @staticmethod
2261 def _trygetfunc(tree):
2261 def _trygetfunc(tree):
2262 if tree[0] == 'func' and tree[1][0] == 'symbol':
2262 if tree[0] == 'func' and tree[1][0] == 'symbol':
2263 return tree[1][1], getlist(tree[2])
2263 return tree[1][1], getlist(tree[2])
2264
2264
2265 def expandaliases(ui, tree, showwarning=None):
2265 def expandaliases(ui, tree, showwarning=None):
2266 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2266 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2267 tree = _aliasrules.expand(aliases, tree)
2267 tree = _aliasrules.expand(aliases, tree)
2268 if showwarning:
2268 if showwarning:
2269 # warn about problematic (but not referred) aliases
2269 # warn about problematic (but not referred) aliases
2270 for name, alias in sorted(aliases.iteritems()):
2270 for name, alias in sorted(aliases.iteritems()):
2271 if alias.error and not alias.warned:
2271 if alias.error and not alias.warned:
2272 showwarning(_('warning: %s\n') % (alias.error))
2272 showwarning(_('warning: %s\n') % (alias.error))
2273 alias.warned = True
2273 alias.warned = True
2274 return tree
2274 return tree
2275
2275
2276 def foldconcat(tree):
2276 def foldconcat(tree):
2277 """Fold elements to be concatenated by `##`
2277 """Fold elements to be concatenated by `##`
2278 """
2278 """
2279 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2279 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2280 return tree
2280 return tree
2281 if tree[0] == '_concat':
2281 if tree[0] == '_concat':
2282 pending = [tree]
2282 pending = [tree]
2283 l = []
2283 l = []
2284 while pending:
2284 while pending:
2285 e = pending.pop()
2285 e = pending.pop()
2286 if e[0] == '_concat':
2286 if e[0] == '_concat':
2287 pending.extend(reversed(e[1:]))
2287 pending.extend(reversed(e[1:]))
2288 elif e[0] in ('string', 'symbol'):
2288 elif e[0] in ('string', 'symbol'):
2289 l.append(e[1])
2289 l.append(e[1])
2290 else:
2290 else:
2291 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2291 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2292 raise error.ParseError(msg)
2292 raise error.ParseError(msg)
2293 return ('string', ''.join(l))
2293 return ('string', ''.join(l))
2294 else:
2294 else:
2295 return tuple(foldconcat(t) for t in tree)
2295 return tuple(foldconcat(t) for t in tree)
2296
2296
2297 def parse(spec, lookup=None):
2297 def parse(spec, lookup=None):
2298 return _parsewith(spec, lookup=lookup)
2298 return _parsewith(spec, lookup=lookup)
2299
2299
2300 def posttreebuilthook(tree, repo):
2300 def posttreebuilthook(tree, repo):
2301 # hook for extensions to execute code on the optimized tree
2301 # hook for extensions to execute code on the optimized tree
2302 pass
2302 pass
2303
2303
2304 def match(ui, spec, repo=None):
2304 def match(ui, spec, repo=None):
2305 if not spec:
2305 if not spec:
2306 raise error.ParseError(_("empty query"))
2306 raise error.ParseError(_("empty query"))
2307 lookup = None
2307 lookup = None
2308 if repo:
2308 if repo:
2309 lookup = repo.__contains__
2309 lookup = repo.__contains__
2310 tree = parse(spec, lookup)
2310 tree = parse(spec, lookup)
2311 return _makematcher(ui, tree, repo)
2311 return _makematcher(ui, tree, repo)
2312
2312
2313 def matchany(ui, specs, repo=None):
2313 def matchany(ui, specs, repo=None):
2314 """Create a matcher that will include any revisions matching one of the
2314 """Create a matcher that will include any revisions matching one of the
2315 given specs"""
2315 given specs"""
2316 if not specs:
2316 if not specs:
2317 def mfunc(repo, subset=None):
2317 def mfunc(repo, subset=None):
2318 return baseset()
2318 return baseset()
2319 return mfunc
2319 return mfunc
2320 if not all(specs):
2320 if not all(specs):
2321 raise error.ParseError(_("empty query"))
2321 raise error.ParseError(_("empty query"))
2322 lookup = None
2322 lookup = None
2323 if repo:
2323 if repo:
2324 lookup = repo.__contains__
2324 lookup = repo.__contains__
2325 if len(specs) == 1:
2325 if len(specs) == 1:
2326 tree = parse(specs[0], lookup)
2326 tree = parse(specs[0], lookup)
2327 else:
2327 else:
2328 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2328 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2329 return _makematcher(ui, tree, repo)
2329 return _makematcher(ui, tree, repo)
2330
2330
2331 def _makematcher(ui, tree, repo):
2331 def _makematcher(ui, tree, repo):
2332 if ui:
2332 if ui:
2333 tree = expandaliases(ui, tree, showwarning=ui.warn)
2333 tree = expandaliases(ui, tree, showwarning=ui.warn)
2334 tree = foldconcat(tree)
2334 tree = foldconcat(tree)
2335 tree = optimize(tree)
2335 tree = optimize(tree)
2336 posttreebuilthook(tree, repo)
2336 posttreebuilthook(tree, repo)
2337 def mfunc(repo, subset=None):
2337 def mfunc(repo, subset=None):
2338 if subset is None:
2338 if subset is None:
2339 subset = fullreposet(repo)
2339 subset = fullreposet(repo)
2340 if util.safehasattr(subset, 'isascending'):
2340 if util.safehasattr(subset, 'isascending'):
2341 result = getset(repo, subset, tree)
2341 result = getset(repo, subset, tree)
2342 else:
2342 else:
2343 result = getset(repo, baseset(subset), tree)
2343 result = getset(repo, baseset(subset), tree)
2344 return result
2344 return result
2345 return mfunc
2345 return mfunc
2346
2346
2347 def formatspec(expr, *args):
2347 def formatspec(expr, *args):
2348 '''
2348 '''
2349 This is a convenience function for using revsets internally, and
2349 This is a convenience function for using revsets internally, and
2350 escapes arguments appropriately. Aliases are intentionally ignored
2350 escapes arguments appropriately. Aliases are intentionally ignored
2351 so that intended expression behavior isn't accidentally subverted.
2351 so that intended expression behavior isn't accidentally subverted.
2352
2352
2353 Supported arguments:
2353 Supported arguments:
2354
2354
2355 %r = revset expression, parenthesized
2355 %r = revset expression, parenthesized
2356 %d = int(arg), no quoting
2356 %d = int(arg), no quoting
2357 %s = string(arg), escaped and single-quoted
2357 %s = string(arg), escaped and single-quoted
2358 %b = arg.branch(), escaped and single-quoted
2358 %b = arg.branch(), escaped and single-quoted
2359 %n = hex(arg), single-quoted
2359 %n = hex(arg), single-quoted
2360 %% = a literal '%'
2360 %% = a literal '%'
2361
2361
2362 Prefixing the type with 'l' specifies a parenthesized list of that type.
2362 Prefixing the type with 'l' specifies a parenthesized list of that type.
2363
2363
2364 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2364 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2365 '(10 or 11):: and ((this()) or (that()))'
2365 '(10 or 11):: and ((this()) or (that()))'
2366 >>> formatspec('%d:: and not %d::', 10, 20)
2366 >>> formatspec('%d:: and not %d::', 10, 20)
2367 '10:: and not 20::'
2367 '10:: and not 20::'
2368 >>> formatspec('%ld or %ld', [], [1])
2368 >>> formatspec('%ld or %ld', [], [1])
2369 "_list('') or 1"
2369 "_list('') or 1"
2370 >>> formatspec('keyword(%s)', 'foo\\xe9')
2370 >>> formatspec('keyword(%s)', 'foo\\xe9')
2371 "keyword('foo\\\\xe9')"
2371 "keyword('foo\\\\xe9')"
2372 >>> b = lambda: 'default'
2372 >>> b = lambda: 'default'
2373 >>> b.branch = b
2373 >>> b.branch = b
2374 >>> formatspec('branch(%b)', b)
2374 >>> formatspec('branch(%b)', b)
2375 "branch('default')"
2375 "branch('default')"
2376 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2376 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2377 "root(_list('a\\x00b\\x00c\\x00d'))"
2377 "root(_list('a\\x00b\\x00c\\x00d'))"
2378 '''
2378 '''
2379
2379
2380 def quote(s):
2380 def quote(s):
2381 return repr(str(s))
2381 return repr(str(s))
2382
2382
2383 def argtype(c, arg):
2383 def argtype(c, arg):
2384 if c == 'd':
2384 if c == 'd':
2385 return str(int(arg))
2385 return str(int(arg))
2386 elif c == 's':
2386 elif c == 's':
2387 return quote(arg)
2387 return quote(arg)
2388 elif c == 'r':
2388 elif c == 'r':
2389 parse(arg) # make sure syntax errors are confined
2389 parse(arg) # make sure syntax errors are confined
2390 return '(%s)' % arg
2390 return '(%s)' % arg
2391 elif c == 'n':
2391 elif c == 'n':
2392 return quote(node.hex(arg))
2392 return quote(node.hex(arg))
2393 elif c == 'b':
2393 elif c == 'b':
2394 return quote(arg.branch())
2394 return quote(arg.branch())
2395
2395
2396 def listexp(s, t):
2396 def listexp(s, t):
2397 l = len(s)
2397 l = len(s)
2398 if l == 0:
2398 if l == 0:
2399 return "_list('')"
2399 return "_list('')"
2400 elif l == 1:
2400 elif l == 1:
2401 return argtype(t, s[0])
2401 return argtype(t, s[0])
2402 elif t == 'd':
2402 elif t == 'd':
2403 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2403 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2404 elif t == 's':
2404 elif t == 's':
2405 return "_list('%s')" % "\0".join(s)
2405 return "_list('%s')" % "\0".join(s)
2406 elif t == 'n':
2406 elif t == 'n':
2407 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2407 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2408 elif t == 'b':
2408 elif t == 'b':
2409 return "_list('%s')" % "\0".join(a.branch() for a in s)
2409 return "_list('%s')" % "\0".join(a.branch() for a in s)
2410
2410
2411 m = l // 2
2411 m = l // 2
2412 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2412 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2413
2413
2414 ret = ''
2414 ret = ''
2415 pos = 0
2415 pos = 0
2416 arg = 0
2416 arg = 0
2417 while pos < len(expr):
2417 while pos < len(expr):
2418 c = expr[pos]
2418 c = expr[pos]
2419 if c == '%':
2419 if c == '%':
2420 pos += 1
2420 pos += 1
2421 d = expr[pos]
2421 d = expr[pos]
2422 if d == '%':
2422 if d == '%':
2423 ret += d
2423 ret += d
2424 elif d in 'dsnbr':
2424 elif d in 'dsnbr':
2425 ret += argtype(d, args[arg])
2425 ret += argtype(d, args[arg])
2426 arg += 1
2426 arg += 1
2427 elif d == 'l':
2427 elif d == 'l':
2428 # a list of some type
2428 # a list of some type
2429 pos += 1
2429 pos += 1
2430 d = expr[pos]
2430 d = expr[pos]
2431 ret += listexp(list(args[arg]), d)
2431 ret += listexp(list(args[arg]), d)
2432 arg += 1
2432 arg += 1
2433 else:
2433 else:
2434 raise error.Abort('unexpected revspec format character %s' % d)
2434 raise error.Abort('unexpected revspec format character %s' % d)
2435 else:
2435 else:
2436 ret += c
2436 ret += c
2437 pos += 1
2437 pos += 1
2438
2438
2439 return ret
2439 return ret
2440
2440
2441 def prettyformat(tree):
2441 def prettyformat(tree):
2442 return parser.prettyformat(tree, ('string', 'symbol'))
2442 return parser.prettyformat(tree, ('string', 'symbol'))
2443
2443
2444 def depth(tree):
2444 def depth(tree):
2445 if isinstance(tree, tuple):
2445 if isinstance(tree, tuple):
2446 return max(map(depth, tree)) + 1
2446 return max(map(depth, tree)) + 1
2447 else:
2447 else:
2448 return 0
2448 return 0
2449
2449
2450 def funcsused(tree):
2450 def funcsused(tree):
2451 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2451 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2452 return set()
2452 return set()
2453 else:
2453 else:
2454 funcs = set()
2454 funcs = set()
2455 for s in tree[1:]:
2455 for s in tree[1:]:
2456 funcs |= funcsused(s)
2456 funcs |= funcsused(s)
2457 if tree[0] == 'func':
2457 if tree[0] == 'func':
2458 funcs.add(tree[1][1])
2458 funcs.add(tree[1][1])
2459 return funcs
2459 return funcs
2460
2460
2461 def _formatsetrepr(r):
2461 def _formatsetrepr(r):
2462 """Format an optional printable representation of a set
2462 """Format an optional printable representation of a set
2463
2463
2464 ======== =================================
2464 ======== =================================
2465 type(r) example
2465 type(r) example
2466 ======== =================================
2466 ======== =================================
2467 tuple ('<not %r>', other)
2467 tuple ('<not %r>', other)
2468 str '<branch closed>'
2468 str '<branch closed>'
2469 callable lambda: '<branch %r>' % sorted(b)
2469 callable lambda: '<branch %r>' % sorted(b)
2470 object other
2470 object other
2471 ======== =================================
2471 ======== =================================
2472 """
2472 """
2473 if r is None:
2473 if r is None:
2474 return ''
2474 return ''
2475 elif isinstance(r, tuple):
2475 elif isinstance(r, tuple):
2476 return r[0] % r[1:]
2476 return r[0] % r[1:]
2477 elif isinstance(r, str):
2477 elif isinstance(r, str):
2478 return r
2478 return r
2479 elif callable(r):
2479 elif callable(r):
2480 return r()
2480 return r()
2481 else:
2481 else:
2482 return repr(r)
2482 return repr(r)
2483
2483
2484 class abstractsmartset(object):
2484 class abstractsmartset(object):
2485
2485
2486 def __nonzero__(self):
2486 def __nonzero__(self):
2487 """True if the smartset is not empty"""
2487 """True if the smartset is not empty"""
2488 raise NotImplementedError()
2488 raise NotImplementedError()
2489
2489
2490 def __contains__(self, rev):
2490 def __contains__(self, rev):
2491 """provide fast membership testing"""
2491 """provide fast membership testing"""
2492 raise NotImplementedError()
2492 raise NotImplementedError()
2493
2493
2494 def __iter__(self):
2494 def __iter__(self):
2495 """iterate the set in the order it is supposed to be iterated"""
2495 """iterate the set in the order it is supposed to be iterated"""
2496 raise NotImplementedError()
2496 raise NotImplementedError()
2497
2497
2498 # Attributes containing a function to perform a fast iteration in a given
2498 # Attributes containing a function to perform a fast iteration in a given
2499 # direction. A smartset can have none, one, or both defined.
2499 # direction. A smartset can have none, one, or both defined.
2500 #
2500 #
2501 # Default value is None instead of a function returning None to avoid
2501 # Default value is None instead of a function returning None to avoid
2502 # initializing an iterator just for testing if a fast method exists.
2502 # initializing an iterator just for testing if a fast method exists.
2503 fastasc = None
2503 fastasc = None
2504 fastdesc = None
2504 fastdesc = None
2505
2505
2506 def isascending(self):
2506 def isascending(self):
2507 """True if the set will iterate in ascending order"""
2507 """True if the set will iterate in ascending order"""
2508 raise NotImplementedError()
2508 raise NotImplementedError()
2509
2509
2510 def isdescending(self):
2510 def isdescending(self):
2511 """True if the set will iterate in descending order"""
2511 """True if the set will iterate in descending order"""
2512 raise NotImplementedError()
2512 raise NotImplementedError()
2513
2513
2514 @util.cachefunc
2514 @util.cachefunc
2515 def min(self):
2515 def min(self):
2516 """return the minimum element in the set"""
2516 """return the minimum element in the set"""
2517 if self.fastasc is not None:
2517 if self.fastasc is not None:
2518 for r in self.fastasc():
2518 for r in self.fastasc():
2519 return r
2519 return r
2520 raise ValueError('arg is an empty sequence')
2520 raise ValueError('arg is an empty sequence')
2521 return min(self)
2521 return min(self)
2522
2522
2523 @util.cachefunc
2523 @util.cachefunc
2524 def max(self):
2524 def max(self):
2525 """return the maximum element in the set"""
2525 """return the maximum element in the set"""
2526 if self.fastdesc is not None:
2526 if self.fastdesc is not None:
2527 for r in self.fastdesc():
2527 for r in self.fastdesc():
2528 return r
2528 return r
2529 raise ValueError('arg is an empty sequence')
2529 raise ValueError('arg is an empty sequence')
2530 return max(self)
2530 return max(self)
2531
2531
2532 def first(self):
2532 def first(self):
2533 """return the first element in the set (user iteration perspective)
2533 """return the first element in the set (user iteration perspective)
2534
2534
2535 Return None if the set is empty"""
2535 Return None if the set is empty"""
2536 raise NotImplementedError()
2536 raise NotImplementedError()
2537
2537
2538 def last(self):
2538 def last(self):
2539 """return the last element in the set (user iteration perspective)
2539 """return the last element in the set (user iteration perspective)
2540
2540
2541 Return None if the set is empty"""
2541 Return None if the set is empty"""
2542 raise NotImplementedError()
2542 raise NotImplementedError()
2543
2543
2544 def __len__(self):
2544 def __len__(self):
2545 """return the length of the smartsets
2545 """return the length of the smartsets
2546
2546
2547 This can be expensive on smartset that could be lazy otherwise."""
2547 This can be expensive on smartset that could be lazy otherwise."""
2548 raise NotImplementedError()
2548 raise NotImplementedError()
2549
2549
2550 def reverse(self):
2550 def reverse(self):
2551 """reverse the expected iteration order"""
2551 """reverse the expected iteration order"""
2552 raise NotImplementedError()
2552 raise NotImplementedError()
2553
2553
2554 def sort(self, reverse=True):
2554 def sort(self, reverse=True):
2555 """get the set to iterate in an ascending or descending order"""
2555 """get the set to iterate in an ascending or descending order"""
2556 raise NotImplementedError()
2556 raise NotImplementedError()
2557
2557
2558 def __and__(self, other):
2558 def __and__(self, other):
2559 """Returns a new object with the intersection of the two collections.
2559 """Returns a new object with the intersection of the two collections.
2560
2560
2561 This is part of the mandatory API for smartset."""
2561 This is part of the mandatory API for smartset."""
2562 if isinstance(other, fullreposet):
2562 if isinstance(other, fullreposet):
2563 return self
2563 return self
2564 return self.filter(other.__contains__, condrepr=other, cache=False)
2564 return self.filter(other.__contains__, condrepr=other, cache=False)
2565
2565
2566 def __add__(self, other):
2566 def __add__(self, other):
2567 """Returns a new object with the union of the two collections.
2567 """Returns a new object with the union of the two collections.
2568
2568
2569 This is part of the mandatory API for smartset."""
2569 This is part of the mandatory API for smartset."""
2570 return addset(self, other)
2570 return addset(self, other)
2571
2571
2572 def __sub__(self, other):
2572 def __sub__(self, other):
2573 """Returns a new object with the substraction of the two collections.
2573 """Returns a new object with the substraction of the two collections.
2574
2574
2575 This is part of the mandatory API for smartset."""
2575 This is part of the mandatory API for smartset."""
2576 c = other.__contains__
2576 c = other.__contains__
2577 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2577 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2578 cache=False)
2578 cache=False)
2579
2579
2580 def filter(self, condition, condrepr=None, cache=True):
2580 def filter(self, condition, condrepr=None, cache=True):
2581 """Returns this smartset filtered by condition as a new smartset.
2581 """Returns this smartset filtered by condition as a new smartset.
2582
2582
2583 `condition` is a callable which takes a revision number and returns a
2583 `condition` is a callable which takes a revision number and returns a
2584 boolean. Optional `condrepr` provides a printable representation of
2584 boolean. Optional `condrepr` provides a printable representation of
2585 the given `condition`.
2585 the given `condition`.
2586
2586
2587 This is part of the mandatory API for smartset."""
2587 This is part of the mandatory API for smartset."""
2588 # builtin cannot be cached. but do not needs to
2588 # builtin cannot be cached. but do not needs to
2589 if cache and util.safehasattr(condition, 'func_code'):
2589 if cache and util.safehasattr(condition, 'func_code'):
2590 condition = util.cachefunc(condition)
2590 condition = util.cachefunc(condition)
2591 return filteredset(self, condition, condrepr)
2591 return filteredset(self, condition, condrepr)
2592
2592
2593 class baseset(abstractsmartset):
2593 class baseset(abstractsmartset):
2594 """Basic data structure that represents a revset and contains the basic
2594 """Basic data structure that represents a revset and contains the basic
2595 operation that it should be able to perform.
2595 operation that it should be able to perform.
2596
2596
2597 Every method in this class should be implemented by any smartset class.
2597 Every method in this class should be implemented by any smartset class.
2598 """
2598 """
2599 def __init__(self, data=(), datarepr=None):
2599 def __init__(self, data=(), datarepr=None):
2600 """
2600 """
2601 datarepr: a tuple of (format, obj, ...), a function or an object that
2601 datarepr: a tuple of (format, obj, ...), a function or an object that
2602 provides a printable representation of the given data.
2602 provides a printable representation of the given data.
2603 """
2603 """
2604 self._ascending = None
2604 self._ascending = None
2605 if not isinstance(data, list):
2605 if not isinstance(data, list):
2606 if isinstance(data, set):
2606 if isinstance(data, set):
2607 self._set = data
2607 self._set = data
2608 # set has no order we pick one for stability purpose
2608 # set has no order we pick one for stability purpose
2609 self._ascending = True
2609 self._ascending = True
2610 data = list(data)
2610 data = list(data)
2611 self._list = data
2611 self._list = data
2612 self._datarepr = datarepr
2612 self._datarepr = datarepr
2613
2613
2614 @util.propertycache
2614 @util.propertycache
2615 def _set(self):
2615 def _set(self):
2616 return set(self._list)
2616 return set(self._list)
2617
2617
2618 @util.propertycache
2618 @util.propertycache
2619 def _asclist(self):
2619 def _asclist(self):
2620 asclist = self._list[:]
2620 asclist = self._list[:]
2621 asclist.sort()
2621 asclist.sort()
2622 return asclist
2622 return asclist
2623
2623
2624 def __iter__(self):
2624 def __iter__(self):
2625 if self._ascending is None:
2625 if self._ascending is None:
2626 return iter(self._list)
2626 return iter(self._list)
2627 elif self._ascending:
2627 elif self._ascending:
2628 return iter(self._asclist)
2628 return iter(self._asclist)
2629 else:
2629 else:
2630 return reversed(self._asclist)
2630 return reversed(self._asclist)
2631
2631
2632 def fastasc(self):
2632 def fastasc(self):
2633 return iter(self._asclist)
2633 return iter(self._asclist)
2634
2634
2635 def fastdesc(self):
2635 def fastdesc(self):
2636 return reversed(self._asclist)
2636 return reversed(self._asclist)
2637
2637
2638 @util.propertycache
2638 @util.propertycache
2639 def __contains__(self):
2639 def __contains__(self):
2640 return self._set.__contains__
2640 return self._set.__contains__
2641
2641
2642 def __nonzero__(self):
2642 def __nonzero__(self):
2643 return bool(self._list)
2643 return bool(self._list)
2644
2644
2645 def sort(self, reverse=False):
2645 def sort(self, reverse=False):
2646 self._ascending = not bool(reverse)
2646 self._ascending = not bool(reverse)
2647
2647
2648 def reverse(self):
2648 def reverse(self):
2649 if self._ascending is None:
2649 if self._ascending is None:
2650 self._list.reverse()
2650 self._list.reverse()
2651 else:
2651 else:
2652 self._ascending = not self._ascending
2652 self._ascending = not self._ascending
2653
2653
2654 def __len__(self):
2654 def __len__(self):
2655 return len(self._list)
2655 return len(self._list)
2656
2656
2657 def isascending(self):
2657 def isascending(self):
2658 """Returns True if the collection is ascending order, False if not.
2658 """Returns True if the collection is ascending order, False if not.
2659
2659
2660 This is part of the mandatory API for smartset."""
2660 This is part of the mandatory API for smartset."""
2661 if len(self) <= 1:
2661 if len(self) <= 1:
2662 return True
2662 return True
2663 return self._ascending is not None and self._ascending
2663 return self._ascending is not None and self._ascending
2664
2664
2665 def isdescending(self):
2665 def isdescending(self):
2666 """Returns True if the collection is descending order, False if not.
2666 """Returns True if the collection is descending order, False if not.
2667
2667
2668 This is part of the mandatory API for smartset."""
2668 This is part of the mandatory API for smartset."""
2669 if len(self) <= 1:
2669 if len(self) <= 1:
2670 return True
2670 return True
2671 return self._ascending is not None and not self._ascending
2671 return self._ascending is not None and not self._ascending
2672
2672
2673 def first(self):
2673 def first(self):
2674 if self:
2674 if self:
2675 if self._ascending is None:
2675 if self._ascending is None:
2676 return self._list[0]
2676 return self._list[0]
2677 elif self._ascending:
2677 elif self._ascending:
2678 return self._asclist[0]
2678 return self._asclist[0]
2679 else:
2679 else:
2680 return self._asclist[-1]
2680 return self._asclist[-1]
2681 return None
2681 return None
2682
2682
2683 def last(self):
2683 def last(self):
2684 if self:
2684 if self:
2685 if self._ascending is None:
2685 if self._ascending is None:
2686 return self._list[-1]
2686 return self._list[-1]
2687 elif self._ascending:
2687 elif self._ascending:
2688 return self._asclist[-1]
2688 return self._asclist[-1]
2689 else:
2689 else:
2690 return self._asclist[0]
2690 return self._asclist[0]
2691 return None
2691 return None
2692
2692
2693 def __repr__(self):
2693 def __repr__(self):
2694 d = {None: '', False: '-', True: '+'}[self._ascending]
2694 d = {None: '', False: '-', True: '+'}[self._ascending]
2695 s = _formatsetrepr(self._datarepr)
2695 s = _formatsetrepr(self._datarepr)
2696 if not s:
2696 if not s:
2697 l = self._list
2697 l = self._list
2698 # if _list has been built from a set, it might have a different
2698 # if _list has been built from a set, it might have a different
2699 # order from one python implementation to another.
2699 # order from one python implementation to another.
2700 # We fallback to the sorted version for a stable output.
2700 # We fallback to the sorted version for a stable output.
2701 if self._ascending is not None:
2701 if self._ascending is not None:
2702 l = self._asclist
2702 l = self._asclist
2703 s = repr(l)
2703 s = repr(l)
2704 return '<%s%s %s>' % (type(self).__name__, d, s)
2704 return '<%s%s %s>' % (type(self).__name__, d, s)
2705
2705
2706 class filteredset(abstractsmartset):
2706 class filteredset(abstractsmartset):
2707 """Duck type for baseset class which iterates lazily over the revisions in
2707 """Duck type for baseset class which iterates lazily over the revisions in
2708 the subset and contains a function which tests for membership in the
2708 the subset and contains a function which tests for membership in the
2709 revset
2709 revset
2710 """
2710 """
2711 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2711 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2712 """
2712 """
2713 condition: a function that decide whether a revision in the subset
2713 condition: a function that decide whether a revision in the subset
2714 belongs to the revset or not.
2714 belongs to the revset or not.
2715 condrepr: a tuple of (format, obj, ...), a function or an object that
2715 condrepr: a tuple of (format, obj, ...), a function or an object that
2716 provides a printable representation of the given condition.
2716 provides a printable representation of the given condition.
2717 """
2717 """
2718 self._subset = subset
2718 self._subset = subset
2719 self._condition = condition
2719 self._condition = condition
2720 self._condrepr = condrepr
2720 self._condrepr = condrepr
2721
2721
2722 def __contains__(self, x):
2722 def __contains__(self, x):
2723 return x in self._subset and self._condition(x)
2723 return x in self._subset and self._condition(x)
2724
2724
2725 def __iter__(self):
2725 def __iter__(self):
2726 return self._iterfilter(self._subset)
2726 return self._iterfilter(self._subset)
2727
2727
2728 def _iterfilter(self, it):
2728 def _iterfilter(self, it):
2729 cond = self._condition
2729 cond = self._condition
2730 for x in it:
2730 for x in it:
2731 if cond(x):
2731 if cond(x):
2732 yield x
2732 yield x
2733
2733
2734 @property
2734 @property
2735 def fastasc(self):
2735 def fastasc(self):
2736 it = self._subset.fastasc
2736 it = self._subset.fastasc
2737 if it is None:
2737 if it is None:
2738 return None
2738 return None
2739 return lambda: self._iterfilter(it())
2739 return lambda: self._iterfilter(it())
2740
2740
2741 @property
2741 @property
2742 def fastdesc(self):
2742 def fastdesc(self):
2743 it = self._subset.fastdesc
2743 it = self._subset.fastdesc
2744 if it is None:
2744 if it is None:
2745 return None
2745 return None
2746 return lambda: self._iterfilter(it())
2746 return lambda: self._iterfilter(it())
2747
2747
2748 def __nonzero__(self):
2748 def __nonzero__(self):
2749 fast = self.fastasc
2749 fast = self.fastasc
2750 if fast is None:
2750 if fast is None:
2751 fast = self.fastdesc
2751 fast = self.fastdesc
2752 if fast is not None:
2752 if fast is not None:
2753 it = fast()
2753 it = fast()
2754 else:
2754 else:
2755 it = self
2755 it = self
2756
2756
2757 for r in it:
2757 for r in it:
2758 return True
2758 return True
2759 return False
2759 return False
2760
2760
2761 def __len__(self):
2761 def __len__(self):
2762 # Basic implementation to be changed in future patches.
2762 # Basic implementation to be changed in future patches.
2763 # until this gets improved, we use generator expression
2763 # until this gets improved, we use generator expression
2764 # here, since list compr is free to call __len__ again
2764 # here, since list compr is free to call __len__ again
2765 # causing infinite recursion
2765 # causing infinite recursion
2766 l = baseset(r for r in self)
2766 l = baseset(r for r in self)
2767 return len(l)
2767 return len(l)
2768
2768
2769 def sort(self, reverse=False):
2769 def sort(self, reverse=False):
2770 self._subset.sort(reverse=reverse)
2770 self._subset.sort(reverse=reverse)
2771
2771
2772 def reverse(self):
2772 def reverse(self):
2773 self._subset.reverse()
2773 self._subset.reverse()
2774
2774
2775 def isascending(self):
2775 def isascending(self):
2776 return self._subset.isascending()
2776 return self._subset.isascending()
2777
2777
2778 def isdescending(self):
2778 def isdescending(self):
2779 return self._subset.isdescending()
2779 return self._subset.isdescending()
2780
2780
2781 def first(self):
2781 def first(self):
2782 for x in self:
2782 for x in self:
2783 return x
2783 return x
2784 return None
2784 return None
2785
2785
2786 def last(self):
2786 def last(self):
2787 it = None
2787 it = None
2788 if self.isascending():
2788 if self.isascending():
2789 it = self.fastdesc
2789 it = self.fastdesc
2790 elif self.isdescending():
2790 elif self.isdescending():
2791 it = self.fastasc
2791 it = self.fastasc
2792 if it is not None:
2792 if it is not None:
2793 for x in it():
2793 for x in it():
2794 return x
2794 return x
2795 return None #empty case
2795 return None #empty case
2796 else:
2796 else:
2797 x = None
2797 x = None
2798 for x in self:
2798 for x in self:
2799 pass
2799 pass
2800 return x
2800 return x
2801
2801
2802 def __repr__(self):
2802 def __repr__(self):
2803 xs = [repr(self._subset)]
2803 xs = [repr(self._subset)]
2804 s = _formatsetrepr(self._condrepr)
2804 s = _formatsetrepr(self._condrepr)
2805 if s:
2805 if s:
2806 xs.append(s)
2806 xs.append(s)
2807 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
2807 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
2808
2808
2809 def _iterordered(ascending, iter1, iter2):
2809 def _iterordered(ascending, iter1, iter2):
2810 """produce an ordered iteration from two iterators with the same order
2810 """produce an ordered iteration from two iterators with the same order
2811
2811
2812 The ascending is used to indicated the iteration direction.
2812 The ascending is used to indicated the iteration direction.
2813 """
2813 """
2814 choice = max
2814 choice = max
2815 if ascending:
2815 if ascending:
2816 choice = min
2816 choice = min
2817
2817
2818 val1 = None
2818 val1 = None
2819 val2 = None
2819 val2 = None
2820 try:
2820 try:
2821 # Consume both iterators in an ordered way until one is empty
2821 # Consume both iterators in an ordered way until one is empty
2822 while True:
2822 while True:
2823 if val1 is None:
2823 if val1 is None:
2824 val1 = iter1.next()
2824 val1 = next(iter1)
2825 if val2 is None:
2825 if val2 is None:
2826 val2 = iter2.next()
2826 val2 = next(iter2)
2827 n = choice(val1, val2)
2827 n = choice(val1, val2)
2828 yield n
2828 yield n
2829 if val1 == n:
2829 if val1 == n:
2830 val1 = None
2830 val1 = None
2831 if val2 == n:
2831 if val2 == n:
2832 val2 = None
2832 val2 = None
2833 except StopIteration:
2833 except StopIteration:
2834 # Flush any remaining values and consume the other one
2834 # Flush any remaining values and consume the other one
2835 it = iter2
2835 it = iter2
2836 if val1 is not None:
2836 if val1 is not None:
2837 yield val1
2837 yield val1
2838 it = iter1
2838 it = iter1
2839 elif val2 is not None:
2839 elif val2 is not None:
2840 # might have been equality and both are empty
2840 # might have been equality and both are empty
2841 yield val2
2841 yield val2
2842 for val in it:
2842 for val in it:
2843 yield val
2843 yield val
2844
2844
2845 class addset(abstractsmartset):
2845 class addset(abstractsmartset):
2846 """Represent the addition of two sets
2846 """Represent the addition of two sets
2847
2847
2848 Wrapper structure for lazily adding two structures without losing much
2848 Wrapper structure for lazily adding two structures without losing much
2849 performance on the __contains__ method
2849 performance on the __contains__ method
2850
2850
2851 If the ascending attribute is set, that means the two structures are
2851 If the ascending attribute is set, that means the two structures are
2852 ordered in either an ascending or descending way. Therefore, we can add
2852 ordered in either an ascending or descending way. Therefore, we can add
2853 them maintaining the order by iterating over both at the same time
2853 them maintaining the order by iterating over both at the same time
2854
2854
2855 >>> xs = baseset([0, 3, 2])
2855 >>> xs = baseset([0, 3, 2])
2856 >>> ys = baseset([5, 2, 4])
2856 >>> ys = baseset([5, 2, 4])
2857
2857
2858 >>> rs = addset(xs, ys)
2858 >>> rs = addset(xs, ys)
2859 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
2859 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
2860 (True, True, False, True, 0, 4)
2860 (True, True, False, True, 0, 4)
2861 >>> rs = addset(xs, baseset([]))
2861 >>> rs = addset(xs, baseset([]))
2862 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
2862 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
2863 (True, True, False, 0, 2)
2863 (True, True, False, 0, 2)
2864 >>> rs = addset(baseset([]), baseset([]))
2864 >>> rs = addset(baseset([]), baseset([]))
2865 >>> bool(rs), 0 in rs, rs.first(), rs.last()
2865 >>> bool(rs), 0 in rs, rs.first(), rs.last()
2866 (False, False, None, None)
2866 (False, False, None, None)
2867
2867
2868 iterate unsorted:
2868 iterate unsorted:
2869 >>> rs = addset(xs, ys)
2869 >>> rs = addset(xs, ys)
2870 >>> # (use generator because pypy could call len())
2870 >>> # (use generator because pypy could call len())
2871 >>> list(x for x in rs) # without _genlist
2871 >>> list(x for x in rs) # without _genlist
2872 [0, 3, 2, 5, 4]
2872 [0, 3, 2, 5, 4]
2873 >>> assert not rs._genlist
2873 >>> assert not rs._genlist
2874 >>> len(rs)
2874 >>> len(rs)
2875 5
2875 5
2876 >>> [x for x in rs] # with _genlist
2876 >>> [x for x in rs] # with _genlist
2877 [0, 3, 2, 5, 4]
2877 [0, 3, 2, 5, 4]
2878 >>> assert rs._genlist
2878 >>> assert rs._genlist
2879
2879
2880 iterate ascending:
2880 iterate ascending:
2881 >>> rs = addset(xs, ys, ascending=True)
2881 >>> rs = addset(xs, ys, ascending=True)
2882 >>> # (use generator because pypy could call len())
2882 >>> # (use generator because pypy could call len())
2883 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
2883 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
2884 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
2884 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
2885 >>> assert not rs._asclist
2885 >>> assert not rs._asclist
2886 >>> len(rs)
2886 >>> len(rs)
2887 5
2887 5
2888 >>> [x for x in rs], [x for x in rs.fastasc()]
2888 >>> [x for x in rs], [x for x in rs.fastasc()]
2889 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
2889 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
2890 >>> assert rs._asclist
2890 >>> assert rs._asclist
2891
2891
2892 iterate descending:
2892 iterate descending:
2893 >>> rs = addset(xs, ys, ascending=False)
2893 >>> rs = addset(xs, ys, ascending=False)
2894 >>> # (use generator because pypy could call len())
2894 >>> # (use generator because pypy could call len())
2895 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
2895 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
2896 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
2896 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
2897 >>> assert not rs._asclist
2897 >>> assert not rs._asclist
2898 >>> len(rs)
2898 >>> len(rs)
2899 5
2899 5
2900 >>> [x for x in rs], [x for x in rs.fastdesc()]
2900 >>> [x for x in rs], [x for x in rs.fastdesc()]
2901 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
2901 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
2902 >>> assert rs._asclist
2902 >>> assert rs._asclist
2903
2903
2904 iterate ascending without fastasc:
2904 iterate ascending without fastasc:
2905 >>> rs = addset(xs, generatorset(ys), ascending=True)
2905 >>> rs = addset(xs, generatorset(ys), ascending=True)
2906 >>> assert rs.fastasc is None
2906 >>> assert rs.fastasc is None
2907 >>> [x for x in rs]
2907 >>> [x for x in rs]
2908 [0, 2, 3, 4, 5]
2908 [0, 2, 3, 4, 5]
2909
2909
2910 iterate descending without fastdesc:
2910 iterate descending without fastdesc:
2911 >>> rs = addset(generatorset(xs), ys, ascending=False)
2911 >>> rs = addset(generatorset(xs), ys, ascending=False)
2912 >>> assert rs.fastdesc is None
2912 >>> assert rs.fastdesc is None
2913 >>> [x for x in rs]
2913 >>> [x for x in rs]
2914 [5, 4, 3, 2, 0]
2914 [5, 4, 3, 2, 0]
2915 """
2915 """
2916 def __init__(self, revs1, revs2, ascending=None):
2916 def __init__(self, revs1, revs2, ascending=None):
2917 self._r1 = revs1
2917 self._r1 = revs1
2918 self._r2 = revs2
2918 self._r2 = revs2
2919 self._iter = None
2919 self._iter = None
2920 self._ascending = ascending
2920 self._ascending = ascending
2921 self._genlist = None
2921 self._genlist = None
2922 self._asclist = None
2922 self._asclist = None
2923
2923
2924 def __len__(self):
2924 def __len__(self):
2925 return len(self._list)
2925 return len(self._list)
2926
2926
2927 def __nonzero__(self):
2927 def __nonzero__(self):
2928 return bool(self._r1) or bool(self._r2)
2928 return bool(self._r1) or bool(self._r2)
2929
2929
2930 @util.propertycache
2930 @util.propertycache
2931 def _list(self):
2931 def _list(self):
2932 if not self._genlist:
2932 if not self._genlist:
2933 self._genlist = baseset(iter(self))
2933 self._genlist = baseset(iter(self))
2934 return self._genlist
2934 return self._genlist
2935
2935
2936 def __iter__(self):
2936 def __iter__(self):
2937 """Iterate over both collections without repeating elements
2937 """Iterate over both collections without repeating elements
2938
2938
2939 If the ascending attribute is not set, iterate over the first one and
2939 If the ascending attribute is not set, iterate over the first one and
2940 then over the second one checking for membership on the first one so we
2940 then over the second one checking for membership on the first one so we
2941 dont yield any duplicates.
2941 dont yield any duplicates.
2942
2942
2943 If the ascending attribute is set, iterate over both collections at the
2943 If the ascending attribute is set, iterate over both collections at the
2944 same time, yielding only one value at a time in the given order.
2944 same time, yielding only one value at a time in the given order.
2945 """
2945 """
2946 if self._ascending is None:
2946 if self._ascending is None:
2947 if self._genlist:
2947 if self._genlist:
2948 return iter(self._genlist)
2948 return iter(self._genlist)
2949 def arbitraryordergen():
2949 def arbitraryordergen():
2950 for r in self._r1:
2950 for r in self._r1:
2951 yield r
2951 yield r
2952 inr1 = self._r1.__contains__
2952 inr1 = self._r1.__contains__
2953 for r in self._r2:
2953 for r in self._r2:
2954 if not inr1(r):
2954 if not inr1(r):
2955 yield r
2955 yield r
2956 return arbitraryordergen()
2956 return arbitraryordergen()
2957 # try to use our own fast iterator if it exists
2957 # try to use our own fast iterator if it exists
2958 self._trysetasclist()
2958 self._trysetasclist()
2959 if self._ascending:
2959 if self._ascending:
2960 attr = 'fastasc'
2960 attr = 'fastasc'
2961 else:
2961 else:
2962 attr = 'fastdesc'
2962 attr = 'fastdesc'
2963 it = getattr(self, attr)
2963 it = getattr(self, attr)
2964 if it is not None:
2964 if it is not None:
2965 return it()
2965 return it()
2966 # maybe half of the component supports fast
2966 # maybe half of the component supports fast
2967 # get iterator for _r1
2967 # get iterator for _r1
2968 iter1 = getattr(self._r1, attr)
2968 iter1 = getattr(self._r1, attr)
2969 if iter1 is None:
2969 if iter1 is None:
2970 # let's avoid side effect (not sure it matters)
2970 # let's avoid side effect (not sure it matters)
2971 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
2971 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
2972 else:
2972 else:
2973 iter1 = iter1()
2973 iter1 = iter1()
2974 # get iterator for _r2
2974 # get iterator for _r2
2975 iter2 = getattr(self._r2, attr)
2975 iter2 = getattr(self._r2, attr)
2976 if iter2 is None:
2976 if iter2 is None:
2977 # let's avoid side effect (not sure it matters)
2977 # let's avoid side effect (not sure it matters)
2978 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
2978 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
2979 else:
2979 else:
2980 iter2 = iter2()
2980 iter2 = iter2()
2981 return _iterordered(self._ascending, iter1, iter2)
2981 return _iterordered(self._ascending, iter1, iter2)
2982
2982
2983 def _trysetasclist(self):
2983 def _trysetasclist(self):
2984 """populate the _asclist attribute if possible and necessary"""
2984 """populate the _asclist attribute if possible and necessary"""
2985 if self._genlist is not None and self._asclist is None:
2985 if self._genlist is not None and self._asclist is None:
2986 self._asclist = sorted(self._genlist)
2986 self._asclist = sorted(self._genlist)
2987
2987
2988 @property
2988 @property
2989 def fastasc(self):
2989 def fastasc(self):
2990 self._trysetasclist()
2990 self._trysetasclist()
2991 if self._asclist is not None:
2991 if self._asclist is not None:
2992 return self._asclist.__iter__
2992 return self._asclist.__iter__
2993 iter1 = self._r1.fastasc
2993 iter1 = self._r1.fastasc
2994 iter2 = self._r2.fastasc
2994 iter2 = self._r2.fastasc
2995 if None in (iter1, iter2):
2995 if None in (iter1, iter2):
2996 return None
2996 return None
2997 return lambda: _iterordered(True, iter1(), iter2())
2997 return lambda: _iterordered(True, iter1(), iter2())
2998
2998
2999 @property
2999 @property
3000 def fastdesc(self):
3000 def fastdesc(self):
3001 self._trysetasclist()
3001 self._trysetasclist()
3002 if self._asclist is not None:
3002 if self._asclist is not None:
3003 return self._asclist.__reversed__
3003 return self._asclist.__reversed__
3004 iter1 = self._r1.fastdesc
3004 iter1 = self._r1.fastdesc
3005 iter2 = self._r2.fastdesc
3005 iter2 = self._r2.fastdesc
3006 if None in (iter1, iter2):
3006 if None in (iter1, iter2):
3007 return None
3007 return None
3008 return lambda: _iterordered(False, iter1(), iter2())
3008 return lambda: _iterordered(False, iter1(), iter2())
3009
3009
3010 def __contains__(self, x):
3010 def __contains__(self, x):
3011 return x in self._r1 or x in self._r2
3011 return x in self._r1 or x in self._r2
3012
3012
3013 def sort(self, reverse=False):
3013 def sort(self, reverse=False):
3014 """Sort the added set
3014 """Sort the added set
3015
3015
3016 For this we use the cached list with all the generated values and if we
3016 For this we use the cached list with all the generated values and if we
3017 know they are ascending or descending we can sort them in a smart way.
3017 know they are ascending or descending we can sort them in a smart way.
3018 """
3018 """
3019 self._ascending = not reverse
3019 self._ascending = not reverse
3020
3020
3021 def isascending(self):
3021 def isascending(self):
3022 return self._ascending is not None and self._ascending
3022 return self._ascending is not None and self._ascending
3023
3023
3024 def isdescending(self):
3024 def isdescending(self):
3025 return self._ascending is not None and not self._ascending
3025 return self._ascending is not None and not self._ascending
3026
3026
3027 def reverse(self):
3027 def reverse(self):
3028 if self._ascending is None:
3028 if self._ascending is None:
3029 self._list.reverse()
3029 self._list.reverse()
3030 else:
3030 else:
3031 self._ascending = not self._ascending
3031 self._ascending = not self._ascending
3032
3032
3033 def first(self):
3033 def first(self):
3034 for x in self:
3034 for x in self:
3035 return x
3035 return x
3036 return None
3036 return None
3037
3037
3038 def last(self):
3038 def last(self):
3039 self.reverse()
3039 self.reverse()
3040 val = self.first()
3040 val = self.first()
3041 self.reverse()
3041 self.reverse()
3042 return val
3042 return val
3043
3043
3044 def __repr__(self):
3044 def __repr__(self):
3045 d = {None: '', False: '-', True: '+'}[self._ascending]
3045 d = {None: '', False: '-', True: '+'}[self._ascending]
3046 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3046 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3047
3047
3048 class generatorset(abstractsmartset):
3048 class generatorset(abstractsmartset):
3049 """Wrap a generator for lazy iteration
3049 """Wrap a generator for lazy iteration
3050
3050
3051 Wrapper structure for generators that provides lazy membership and can
3051 Wrapper structure for generators that provides lazy membership and can
3052 be iterated more than once.
3052 be iterated more than once.
3053 When asked for membership it generates values until either it finds the
3053 When asked for membership it generates values until either it finds the
3054 requested one or has gone through all the elements in the generator
3054 requested one or has gone through all the elements in the generator
3055 """
3055 """
3056 def __init__(self, gen, iterasc=None):
3056 def __init__(self, gen, iterasc=None):
3057 """
3057 """
3058 gen: a generator producing the values for the generatorset.
3058 gen: a generator producing the values for the generatorset.
3059 """
3059 """
3060 self._gen = gen
3060 self._gen = gen
3061 self._asclist = None
3061 self._asclist = None
3062 self._cache = {}
3062 self._cache = {}
3063 self._genlist = []
3063 self._genlist = []
3064 self._finished = False
3064 self._finished = False
3065 self._ascending = True
3065 self._ascending = True
3066 if iterasc is not None:
3066 if iterasc is not None:
3067 if iterasc:
3067 if iterasc:
3068 self.fastasc = self._iterator
3068 self.fastasc = self._iterator
3069 self.__contains__ = self._asccontains
3069 self.__contains__ = self._asccontains
3070 else:
3070 else:
3071 self.fastdesc = self._iterator
3071 self.fastdesc = self._iterator
3072 self.__contains__ = self._desccontains
3072 self.__contains__ = self._desccontains
3073
3073
3074 def __nonzero__(self):
3074 def __nonzero__(self):
3075 # Do not use 'for r in self' because it will enforce the iteration
3075 # Do not use 'for r in self' because it will enforce the iteration
3076 # order (default ascending), possibly unrolling a whole descending
3076 # order (default ascending), possibly unrolling a whole descending
3077 # iterator.
3077 # iterator.
3078 if self._genlist:
3078 if self._genlist:
3079 return True
3079 return True
3080 for r in self._consumegen():
3080 for r in self._consumegen():
3081 return True
3081 return True
3082 return False
3082 return False
3083
3083
3084 def __contains__(self, x):
3084 def __contains__(self, x):
3085 if x in self._cache:
3085 if x in self._cache:
3086 return self._cache[x]
3086 return self._cache[x]
3087
3087
3088 # Use new values only, as existing values would be cached.
3088 # Use new values only, as existing values would be cached.
3089 for l in self._consumegen():
3089 for l in self._consumegen():
3090 if l == x:
3090 if l == x:
3091 return True
3091 return True
3092
3092
3093 self._cache[x] = False
3093 self._cache[x] = False
3094 return False
3094 return False
3095
3095
3096 def _asccontains(self, x):
3096 def _asccontains(self, x):
3097 """version of contains optimised for ascending generator"""
3097 """version of contains optimised for ascending generator"""
3098 if x in self._cache:
3098 if x in self._cache:
3099 return self._cache[x]
3099 return self._cache[x]
3100
3100
3101 # Use new values only, as existing values would be cached.
3101 # Use new values only, as existing values would be cached.
3102 for l in self._consumegen():
3102 for l in self._consumegen():
3103 if l == x:
3103 if l == x:
3104 return True
3104 return True
3105 if l > x:
3105 if l > x:
3106 break
3106 break
3107
3107
3108 self._cache[x] = False
3108 self._cache[x] = False
3109 return False
3109 return False
3110
3110
3111 def _desccontains(self, x):
3111 def _desccontains(self, x):
3112 """version of contains optimised for descending generator"""
3112 """version of contains optimised for descending generator"""
3113 if x in self._cache:
3113 if x in self._cache:
3114 return self._cache[x]
3114 return self._cache[x]
3115
3115
3116 # Use new values only, as existing values would be cached.
3116 # Use new values only, as existing values would be cached.
3117 for l in self._consumegen():
3117 for l in self._consumegen():
3118 if l == x:
3118 if l == x:
3119 return True
3119 return True
3120 if l < x:
3120 if l < x:
3121 break
3121 break
3122
3122
3123 self._cache[x] = False
3123 self._cache[x] = False
3124 return False
3124 return False
3125
3125
3126 def __iter__(self):
3126 def __iter__(self):
3127 if self._ascending:
3127 if self._ascending:
3128 it = self.fastasc
3128 it = self.fastasc
3129 else:
3129 else:
3130 it = self.fastdesc
3130 it = self.fastdesc
3131 if it is not None:
3131 if it is not None:
3132 return it()
3132 return it()
3133 # we need to consume the iterator
3133 # we need to consume the iterator
3134 for x in self._consumegen():
3134 for x in self._consumegen():
3135 pass
3135 pass
3136 # recall the same code
3136 # recall the same code
3137 return iter(self)
3137 return iter(self)
3138
3138
3139 def _iterator(self):
3139 def _iterator(self):
3140 if self._finished:
3140 if self._finished:
3141 return iter(self._genlist)
3141 return iter(self._genlist)
3142
3142
3143 # We have to use this complex iteration strategy to allow multiple
3143 # We have to use this complex iteration strategy to allow multiple
3144 # iterations at the same time. We need to be able to catch revision
3144 # iterations at the same time. We need to be able to catch revision
3145 # removed from _consumegen and added to genlist in another instance.
3145 # removed from _consumegen and added to genlist in another instance.
3146 #
3146 #
3147 # Getting rid of it would provide an about 15% speed up on this
3147 # Getting rid of it would provide an about 15% speed up on this
3148 # iteration.
3148 # iteration.
3149 genlist = self._genlist
3149 genlist = self._genlist
3150 nextrev = self._consumegen().next
3150 nextrev = self._consumegen().next
3151 _len = len # cache global lookup
3151 _len = len # cache global lookup
3152 def gen():
3152 def gen():
3153 i = 0
3153 i = 0
3154 while True:
3154 while True:
3155 if i < _len(genlist):
3155 if i < _len(genlist):
3156 yield genlist[i]
3156 yield genlist[i]
3157 else:
3157 else:
3158 yield nextrev()
3158 yield nextrev()
3159 i += 1
3159 i += 1
3160 return gen()
3160 return gen()
3161
3161
3162 def _consumegen(self):
3162 def _consumegen(self):
3163 cache = self._cache
3163 cache = self._cache
3164 genlist = self._genlist.append
3164 genlist = self._genlist.append
3165 for item in self._gen:
3165 for item in self._gen:
3166 cache[item] = True
3166 cache[item] = True
3167 genlist(item)
3167 genlist(item)
3168 yield item
3168 yield item
3169 if not self._finished:
3169 if not self._finished:
3170 self._finished = True
3170 self._finished = True
3171 asc = self._genlist[:]
3171 asc = self._genlist[:]
3172 asc.sort()
3172 asc.sort()
3173 self._asclist = asc
3173 self._asclist = asc
3174 self.fastasc = asc.__iter__
3174 self.fastasc = asc.__iter__
3175 self.fastdesc = asc.__reversed__
3175 self.fastdesc = asc.__reversed__
3176
3176
3177 def __len__(self):
3177 def __len__(self):
3178 for x in self._consumegen():
3178 for x in self._consumegen():
3179 pass
3179 pass
3180 return len(self._genlist)
3180 return len(self._genlist)
3181
3181
3182 def sort(self, reverse=False):
3182 def sort(self, reverse=False):
3183 self._ascending = not reverse
3183 self._ascending = not reverse
3184
3184
3185 def reverse(self):
3185 def reverse(self):
3186 self._ascending = not self._ascending
3186 self._ascending = not self._ascending
3187
3187
3188 def isascending(self):
3188 def isascending(self):
3189 return self._ascending
3189 return self._ascending
3190
3190
3191 def isdescending(self):
3191 def isdescending(self):
3192 return not self._ascending
3192 return not self._ascending
3193
3193
3194 def first(self):
3194 def first(self):
3195 if self._ascending:
3195 if self._ascending:
3196 it = self.fastasc
3196 it = self.fastasc
3197 else:
3197 else:
3198 it = self.fastdesc
3198 it = self.fastdesc
3199 if it is None:
3199 if it is None:
3200 # we need to consume all and try again
3200 # we need to consume all and try again
3201 for x in self._consumegen():
3201 for x in self._consumegen():
3202 pass
3202 pass
3203 return self.first()
3203 return self.first()
3204 return next(it(), None)
3204 return next(it(), None)
3205
3205
3206 def last(self):
3206 def last(self):
3207 if self._ascending:
3207 if self._ascending:
3208 it = self.fastdesc
3208 it = self.fastdesc
3209 else:
3209 else:
3210 it = self.fastasc
3210 it = self.fastasc
3211 if it is None:
3211 if it is None:
3212 # we need to consume all and try again
3212 # we need to consume all and try again
3213 for x in self._consumegen():
3213 for x in self._consumegen():
3214 pass
3214 pass
3215 return self.first()
3215 return self.first()
3216 return next(it(), None)
3216 return next(it(), None)
3217
3217
3218 def __repr__(self):
3218 def __repr__(self):
3219 d = {False: '-', True: '+'}[self._ascending]
3219 d = {False: '-', True: '+'}[self._ascending]
3220 return '<%s%s>' % (type(self).__name__, d)
3220 return '<%s%s>' % (type(self).__name__, d)
3221
3221
3222 class spanset(abstractsmartset):
3222 class spanset(abstractsmartset):
3223 """Duck type for baseset class which represents a range of revisions and
3223 """Duck type for baseset class which represents a range of revisions and
3224 can work lazily and without having all the range in memory
3224 can work lazily and without having all the range in memory
3225
3225
3226 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3226 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3227 notable points:
3227 notable points:
3228 - when x < y it will be automatically descending,
3228 - when x < y it will be automatically descending,
3229 - revision filtered with this repoview will be skipped.
3229 - revision filtered with this repoview will be skipped.
3230
3230
3231 """
3231 """
3232 def __init__(self, repo, start=0, end=None):
3232 def __init__(self, repo, start=0, end=None):
3233 """
3233 """
3234 start: first revision included the set
3234 start: first revision included the set
3235 (default to 0)
3235 (default to 0)
3236 end: first revision excluded (last+1)
3236 end: first revision excluded (last+1)
3237 (default to len(repo)
3237 (default to len(repo)
3238
3238
3239 Spanset will be descending if `end` < `start`.
3239 Spanset will be descending if `end` < `start`.
3240 """
3240 """
3241 if end is None:
3241 if end is None:
3242 end = len(repo)
3242 end = len(repo)
3243 self._ascending = start <= end
3243 self._ascending = start <= end
3244 if not self._ascending:
3244 if not self._ascending:
3245 start, end = end + 1, start +1
3245 start, end = end + 1, start +1
3246 self._start = start
3246 self._start = start
3247 self._end = end
3247 self._end = end
3248 self._hiddenrevs = repo.changelog.filteredrevs
3248 self._hiddenrevs = repo.changelog.filteredrevs
3249
3249
3250 def sort(self, reverse=False):
3250 def sort(self, reverse=False):
3251 self._ascending = not reverse
3251 self._ascending = not reverse
3252
3252
3253 def reverse(self):
3253 def reverse(self):
3254 self._ascending = not self._ascending
3254 self._ascending = not self._ascending
3255
3255
3256 def _iterfilter(self, iterrange):
3256 def _iterfilter(self, iterrange):
3257 s = self._hiddenrevs
3257 s = self._hiddenrevs
3258 for r in iterrange:
3258 for r in iterrange:
3259 if r not in s:
3259 if r not in s:
3260 yield r
3260 yield r
3261
3261
3262 def __iter__(self):
3262 def __iter__(self):
3263 if self._ascending:
3263 if self._ascending:
3264 return self.fastasc()
3264 return self.fastasc()
3265 else:
3265 else:
3266 return self.fastdesc()
3266 return self.fastdesc()
3267
3267
3268 def fastasc(self):
3268 def fastasc(self):
3269 iterrange = xrange(self._start, self._end)
3269 iterrange = xrange(self._start, self._end)
3270 if self._hiddenrevs:
3270 if self._hiddenrevs:
3271 return self._iterfilter(iterrange)
3271 return self._iterfilter(iterrange)
3272 return iter(iterrange)
3272 return iter(iterrange)
3273
3273
3274 def fastdesc(self):
3274 def fastdesc(self):
3275 iterrange = xrange(self._end - 1, self._start - 1, -1)
3275 iterrange = xrange(self._end - 1, self._start - 1, -1)
3276 if self._hiddenrevs:
3276 if self._hiddenrevs:
3277 return self._iterfilter(iterrange)
3277 return self._iterfilter(iterrange)
3278 return iter(iterrange)
3278 return iter(iterrange)
3279
3279
3280 def __contains__(self, rev):
3280 def __contains__(self, rev):
3281 hidden = self._hiddenrevs
3281 hidden = self._hiddenrevs
3282 return ((self._start <= rev < self._end)
3282 return ((self._start <= rev < self._end)
3283 and not (hidden and rev in hidden))
3283 and not (hidden and rev in hidden))
3284
3284
3285 def __nonzero__(self):
3285 def __nonzero__(self):
3286 for r in self:
3286 for r in self:
3287 return True
3287 return True
3288 return False
3288 return False
3289
3289
3290 def __len__(self):
3290 def __len__(self):
3291 if not self._hiddenrevs:
3291 if not self._hiddenrevs:
3292 return abs(self._end - self._start)
3292 return abs(self._end - self._start)
3293 else:
3293 else:
3294 count = 0
3294 count = 0
3295 start = self._start
3295 start = self._start
3296 end = self._end
3296 end = self._end
3297 for rev in self._hiddenrevs:
3297 for rev in self._hiddenrevs:
3298 if (end < rev <= start) or (start <= rev < end):
3298 if (end < rev <= start) or (start <= rev < end):
3299 count += 1
3299 count += 1
3300 return abs(self._end - self._start) - count
3300 return abs(self._end - self._start) - count
3301
3301
3302 def isascending(self):
3302 def isascending(self):
3303 return self._ascending
3303 return self._ascending
3304
3304
3305 def isdescending(self):
3305 def isdescending(self):
3306 return not self._ascending
3306 return not self._ascending
3307
3307
3308 def first(self):
3308 def first(self):
3309 if self._ascending:
3309 if self._ascending:
3310 it = self.fastasc
3310 it = self.fastasc
3311 else:
3311 else:
3312 it = self.fastdesc
3312 it = self.fastdesc
3313 for x in it():
3313 for x in it():
3314 return x
3314 return x
3315 return None
3315 return None
3316
3316
3317 def last(self):
3317 def last(self):
3318 if self._ascending:
3318 if self._ascending:
3319 it = self.fastdesc
3319 it = self.fastdesc
3320 else:
3320 else:
3321 it = self.fastasc
3321 it = self.fastasc
3322 for x in it():
3322 for x in it():
3323 return x
3323 return x
3324 return None
3324 return None
3325
3325
3326 def __repr__(self):
3326 def __repr__(self):
3327 d = {False: '-', True: '+'}[self._ascending]
3327 d = {False: '-', True: '+'}[self._ascending]
3328 return '<%s%s %d:%d>' % (type(self).__name__, d,
3328 return '<%s%s %d:%d>' % (type(self).__name__, d,
3329 self._start, self._end - 1)
3329 self._start, self._end - 1)
3330
3330
3331 class fullreposet(spanset):
3331 class fullreposet(spanset):
3332 """a set containing all revisions in the repo
3332 """a set containing all revisions in the repo
3333
3333
3334 This class exists to host special optimization and magic to handle virtual
3334 This class exists to host special optimization and magic to handle virtual
3335 revisions such as "null".
3335 revisions such as "null".
3336 """
3336 """
3337
3337
3338 def __init__(self, repo):
3338 def __init__(self, repo):
3339 super(fullreposet, self).__init__(repo)
3339 super(fullreposet, self).__init__(repo)
3340
3340
3341 def __and__(self, other):
3341 def __and__(self, other):
3342 """As self contains the whole repo, all of the other set should also be
3342 """As self contains the whole repo, all of the other set should also be
3343 in self. Therefore `self & other = other`.
3343 in self. Therefore `self & other = other`.
3344
3344
3345 This boldly assumes the other contains valid revs only.
3345 This boldly assumes the other contains valid revs only.
3346 """
3346 """
3347 # other not a smartset, make is so
3347 # other not a smartset, make is so
3348 if not util.safehasattr(other, 'isascending'):
3348 if not util.safehasattr(other, 'isascending'):
3349 # filter out hidden revision
3349 # filter out hidden revision
3350 # (this boldly assumes all smartset are pure)
3350 # (this boldly assumes all smartset are pure)
3351 #
3351 #
3352 # `other` was used with "&", let's assume this is a set like
3352 # `other` was used with "&", let's assume this is a set like
3353 # object.
3353 # object.
3354 other = baseset(other - self._hiddenrevs)
3354 other = baseset(other - self._hiddenrevs)
3355
3355
3356 # XXX As fullreposet is also used as bootstrap, this is wrong.
3356 # XXX As fullreposet is also used as bootstrap, this is wrong.
3357 #
3357 #
3358 # With a giveme312() revset returning [3,1,2], this makes
3358 # With a giveme312() revset returning [3,1,2], this makes
3359 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3359 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3360 # We cannot just drop it because other usage still need to sort it:
3360 # We cannot just drop it because other usage still need to sort it:
3361 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3361 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3362 #
3362 #
3363 # There is also some faulty revset implementations that rely on it
3363 # There is also some faulty revset implementations that rely on it
3364 # (eg: children as of its state in e8075329c5fb)
3364 # (eg: children as of its state in e8075329c5fb)
3365 #
3365 #
3366 # When we fix the two points above we can move this into the if clause
3366 # When we fix the two points above we can move this into the if clause
3367 other.sort(reverse=self.isdescending())
3367 other.sort(reverse=self.isdescending())
3368 return other
3368 return other
3369
3369
3370 def prettyformatset(revs):
3370 def prettyformatset(revs):
3371 lines = []
3371 lines = []
3372 rs = repr(revs)
3372 rs = repr(revs)
3373 p = 0
3373 p = 0
3374 while p < len(rs):
3374 while p < len(rs):
3375 q = rs.find('<', p + 1)
3375 q = rs.find('<', p + 1)
3376 if q < 0:
3376 if q < 0:
3377 q = len(rs)
3377 q = len(rs)
3378 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3378 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3379 assert l >= 0
3379 assert l >= 0
3380 lines.append((l, rs[p:q].rstrip()))
3380 lines.append((l, rs[p:q].rstrip()))
3381 p = q
3381 p = q
3382 return '\n'.join(' ' * l + s for l, s in lines)
3382 return '\n'.join(' ' * l + s for l, s in lines)
3383
3383
3384 def loadpredicate(ui, extname, registrarobj):
3384 def loadpredicate(ui, extname, registrarobj):
3385 """Load revset predicates from specified registrarobj
3385 """Load revset predicates from specified registrarobj
3386 """
3386 """
3387 for name, func in registrarobj._table.iteritems():
3387 for name, func in registrarobj._table.iteritems():
3388 symbols[name] = func
3388 symbols[name] = func
3389 if func._safe:
3389 if func._safe:
3390 safesymbols.add(name)
3390 safesymbols.add(name)
3391
3391
3392 # load built-in predicates explicitly to setup safesymbols
3392 # load built-in predicates explicitly to setup safesymbols
3393 loadpredicate(None, None, predicate)
3393 loadpredicate(None, None, predicate)
3394
3394
3395 # tell hggettext to extract docstrings from these functions:
3395 # tell hggettext to extract docstrings from these functions:
3396 i18nfunctions = symbols.values()
3396 i18nfunctions = symbols.values()
@@ -1,571 +1,571 b''
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import array
15 import array
16 import errno
16 import errno
17 import time
17 import time
18
18
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 short,
23 short,
24 )
24 )
25 from . import (
25 from . import (
26 encoding,
26 encoding,
27 error,
27 error,
28 util,
28 util,
29 )
29 )
30
30
31 array = array.array
31 array = array.array
32
32
33 # Tags computation can be expensive and caches exist to make it fast in
33 # Tags computation can be expensive and caches exist to make it fast in
34 # the common case.
34 # the common case.
35 #
35 #
36 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
36 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
37 # each revision in the repository. The file is effectively an array of
37 # each revision in the repository. The file is effectively an array of
38 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
38 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
39 # details.
39 # details.
40 #
40 #
41 # The .hgtags filenode cache grows in proportion to the length of the
41 # The .hgtags filenode cache grows in proportion to the length of the
42 # changelog. The file is truncated when the # changelog is stripped.
42 # changelog. The file is truncated when the # changelog is stripped.
43 #
43 #
44 # The purpose of the filenode cache is to avoid the most expensive part
44 # The purpose of the filenode cache is to avoid the most expensive part
45 # of finding global tags, which is looking up the .hgtags filenode in the
45 # of finding global tags, which is looking up the .hgtags filenode in the
46 # manifest for each head. This can take dozens or over 100ms for
46 # manifest for each head. This can take dozens or over 100ms for
47 # repositories with very large manifests. Multiplied by dozens or even
47 # repositories with very large manifests. Multiplied by dozens or even
48 # hundreds of heads and there is a significant performance concern.
48 # hundreds of heads and there is a significant performance concern.
49 #
49 #
50 # There also exist a separate cache file for each repository filter.
50 # There also exist a separate cache file for each repository filter.
51 # These "tags-*" files store information about the history of tags.
51 # These "tags-*" files store information about the history of tags.
52 #
52 #
53 # The tags cache files consists of a cache validation line followed by
53 # The tags cache files consists of a cache validation line followed by
54 # a history of tags.
54 # a history of tags.
55 #
55 #
56 # The cache validation line has the format:
56 # The cache validation line has the format:
57 #
57 #
58 # <tiprev> <tipnode> [<filteredhash>]
58 # <tiprev> <tipnode> [<filteredhash>]
59 #
59 #
60 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
60 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
61 # node for that changeset. These redundantly identify the repository
61 # node for that changeset. These redundantly identify the repository
62 # tip from the time the cache was written. In addition, <filteredhash>,
62 # tip from the time the cache was written. In addition, <filteredhash>,
63 # if present, is a 40 character hex hash of the contents of the filtered
63 # if present, is a 40 character hex hash of the contents of the filtered
64 # revisions for this filter. If the set of filtered revs changes, the
64 # revisions for this filter. If the set of filtered revs changes, the
65 # hash will change and invalidate the cache.
65 # hash will change and invalidate the cache.
66 #
66 #
67 # The history part of the tags cache consists of lines of the form:
67 # The history part of the tags cache consists of lines of the form:
68 #
68 #
69 # <node> <tag>
69 # <node> <tag>
70 #
70 #
71 # (This format is identical to that of .hgtags files.)
71 # (This format is identical to that of .hgtags files.)
72 #
72 #
73 # <tag> is the tag name and <node> is the 40 character hex changeset
73 # <tag> is the tag name and <node> is the 40 character hex changeset
74 # the tag is associated with.
74 # the tag is associated with.
75 #
75 #
76 # Tags are written sorted by tag name.
76 # Tags are written sorted by tag name.
77 #
77 #
78 # Tags associated with multiple changesets have an entry for each changeset.
78 # Tags associated with multiple changesets have an entry for each changeset.
79 # The most recent changeset (in terms of revlog ordering for the head
79 # The most recent changeset (in terms of revlog ordering for the head
80 # setting it) for each tag is last.
80 # setting it) for each tag is last.
81
81
82 def findglobaltags(ui, repo, alltags, tagtypes):
82 def findglobaltags(ui, repo, alltags, tagtypes):
83 '''Find global tags in a repo.
83 '''Find global tags in a repo.
84
84
85 "alltags" maps tag name to (node, hist) 2-tuples.
85 "alltags" maps tag name to (node, hist) 2-tuples.
86
86
87 "tagtypes" maps tag name to tag type. Global tags always have the
87 "tagtypes" maps tag name to tag type. Global tags always have the
88 "global" tag type.
88 "global" tag type.
89
89
90 The "alltags" and "tagtypes" dicts are updated in place. Empty dicts
90 The "alltags" and "tagtypes" dicts are updated in place. Empty dicts
91 should be passed in.
91 should be passed in.
92
92
93 The tags cache is read and updated as a side-effect of calling.
93 The tags cache is read and updated as a side-effect of calling.
94 '''
94 '''
95 # This is so we can be lazy and assume alltags contains only global
95 # This is so we can be lazy and assume alltags contains only global
96 # tags when we pass it to _writetagcache().
96 # tags when we pass it to _writetagcache().
97 assert len(alltags) == len(tagtypes) == 0, \
97 assert len(alltags) == len(tagtypes) == 0, \
98 "findglobaltags() should be called first"
98 "findglobaltags() should be called first"
99
99
100 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
100 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
101 if cachetags is not None:
101 if cachetags is not None:
102 assert not shouldwrite
102 assert not shouldwrite
103 # XXX is this really 100% correct? are there oddball special
103 # XXX is this really 100% correct? are there oddball special
104 # cases where a global tag should outrank a local tag but won't,
104 # cases where a global tag should outrank a local tag but won't,
105 # because cachetags does not contain rank info?
105 # because cachetags does not contain rank info?
106 _updatetags(cachetags, 'global', alltags, tagtypes)
106 _updatetags(cachetags, 'global', alltags, tagtypes)
107 return
107 return
108
108
109 seen = set() # set of fnode
109 seen = set() # set of fnode
110 fctx = None
110 fctx = None
111 for head in reversed(heads): # oldest to newest
111 for head in reversed(heads): # oldest to newest
112 assert head in repo.changelog.nodemap, \
112 assert head in repo.changelog.nodemap, \
113 "tag cache returned bogus head %s" % short(head)
113 "tag cache returned bogus head %s" % short(head)
114
114
115 fnode = tagfnode.get(head)
115 fnode = tagfnode.get(head)
116 if fnode and fnode not in seen:
116 if fnode and fnode not in seen:
117 seen.add(fnode)
117 seen.add(fnode)
118 if not fctx:
118 if not fctx:
119 fctx = repo.filectx('.hgtags', fileid=fnode)
119 fctx = repo.filectx('.hgtags', fileid=fnode)
120 else:
120 else:
121 fctx = fctx.filectx(fnode)
121 fctx = fctx.filectx(fnode)
122
122
123 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
123 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
124 _updatetags(filetags, 'global', alltags, tagtypes)
124 _updatetags(filetags, 'global', alltags, tagtypes)
125
125
126 # and update the cache (if necessary)
126 # and update the cache (if necessary)
127 if shouldwrite:
127 if shouldwrite:
128 _writetagcache(ui, repo, valid, alltags)
128 _writetagcache(ui, repo, valid, alltags)
129
129
130 def readlocaltags(ui, repo, alltags, tagtypes):
130 def readlocaltags(ui, repo, alltags, tagtypes):
131 '''Read local tags in repo. Update alltags and tagtypes.'''
131 '''Read local tags in repo. Update alltags and tagtypes.'''
132 try:
132 try:
133 data = repo.vfs.read("localtags")
133 data = repo.vfs.read("localtags")
134 except IOError as inst:
134 except IOError as inst:
135 if inst.errno != errno.ENOENT:
135 if inst.errno != errno.ENOENT:
136 raise
136 raise
137 return
137 return
138
138
139 # localtags is in the local encoding; re-encode to UTF-8 on
139 # localtags is in the local encoding; re-encode to UTF-8 on
140 # input for consistency with the rest of this module.
140 # input for consistency with the rest of this module.
141 filetags = _readtags(
141 filetags = _readtags(
142 ui, repo, data.splitlines(), "localtags",
142 ui, repo, data.splitlines(), "localtags",
143 recode=encoding.fromlocal)
143 recode=encoding.fromlocal)
144
144
145 # remove tags pointing to invalid nodes
145 # remove tags pointing to invalid nodes
146 cl = repo.changelog
146 cl = repo.changelog
147 for t in filetags.keys():
147 for t in filetags.keys():
148 try:
148 try:
149 cl.rev(filetags[t][0])
149 cl.rev(filetags[t][0])
150 except (LookupError, ValueError):
150 except (LookupError, ValueError):
151 del filetags[t]
151 del filetags[t]
152
152
153 _updatetags(filetags, "local", alltags, tagtypes)
153 _updatetags(filetags, "local", alltags, tagtypes)
154
154
155 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
155 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
156 '''Read tag definitions from a file (or any source of lines).
156 '''Read tag definitions from a file (or any source of lines).
157
157
158 This function returns two sortdicts with similar information:
158 This function returns two sortdicts with similar information:
159
159
160 - the first dict, bintaghist, contains the tag information as expected by
160 - the first dict, bintaghist, contains the tag information as expected by
161 the _readtags function, i.e. a mapping from tag name to (node, hist):
161 the _readtags function, i.e. a mapping from tag name to (node, hist):
162 - node is the node id from the last line read for that name,
162 - node is the node id from the last line read for that name,
163 - hist is the list of node ids previously associated with it (in file
163 - hist is the list of node ids previously associated with it (in file
164 order). All node ids are binary, not hex.
164 order). All node ids are binary, not hex.
165
165
166 - the second dict, hextaglines, is a mapping from tag name to a list of
166 - the second dict, hextaglines, is a mapping from tag name to a list of
167 [hexnode, line number] pairs, ordered from the oldest to the newest node.
167 [hexnode, line number] pairs, ordered from the oldest to the newest node.
168
168
169 When calcnodelines is False the hextaglines dict is not calculated (an
169 When calcnodelines is False the hextaglines dict is not calculated (an
170 empty dict is returned). This is done to improve this function's
170 empty dict is returned). This is done to improve this function's
171 performance in cases where the line numbers are not needed.
171 performance in cases where the line numbers are not needed.
172 '''
172 '''
173
173
174 bintaghist = util.sortdict()
174 bintaghist = util.sortdict()
175 hextaglines = util.sortdict()
175 hextaglines = util.sortdict()
176 count = 0
176 count = 0
177
177
178 def dbg(msg):
178 def dbg(msg):
179 ui.debug("%s, line %s: %s\n" % (fn, count, msg))
179 ui.debug("%s, line %s: %s\n" % (fn, count, msg))
180
180
181 for nline, line in enumerate(lines):
181 for nline, line in enumerate(lines):
182 count += 1
182 count += 1
183 if not line:
183 if not line:
184 continue
184 continue
185 try:
185 try:
186 (nodehex, name) = line.split(" ", 1)
186 (nodehex, name) = line.split(" ", 1)
187 except ValueError:
187 except ValueError:
188 dbg("cannot parse entry")
188 dbg("cannot parse entry")
189 continue
189 continue
190 name = name.strip()
190 name = name.strip()
191 if recode:
191 if recode:
192 name = recode(name)
192 name = recode(name)
193 try:
193 try:
194 nodebin = bin(nodehex)
194 nodebin = bin(nodehex)
195 except TypeError:
195 except TypeError:
196 dbg("node '%s' is not well formed" % nodehex)
196 dbg("node '%s' is not well formed" % nodehex)
197 continue
197 continue
198
198
199 # update filetags
199 # update filetags
200 if calcnodelines:
200 if calcnodelines:
201 # map tag name to a list of line numbers
201 # map tag name to a list of line numbers
202 if name not in hextaglines:
202 if name not in hextaglines:
203 hextaglines[name] = []
203 hextaglines[name] = []
204 hextaglines[name].append([nodehex, nline])
204 hextaglines[name].append([nodehex, nline])
205 continue
205 continue
206 # map tag name to (node, hist)
206 # map tag name to (node, hist)
207 if name not in bintaghist:
207 if name not in bintaghist:
208 bintaghist[name] = []
208 bintaghist[name] = []
209 bintaghist[name].append(nodebin)
209 bintaghist[name].append(nodebin)
210 return bintaghist, hextaglines
210 return bintaghist, hextaglines
211
211
212 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
212 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
213 '''Read tag definitions from a file (or any source of lines).
213 '''Read tag definitions from a file (or any source of lines).
214
214
215 Returns a mapping from tag name to (node, hist).
215 Returns a mapping from tag name to (node, hist).
216
216
217 "node" is the node id from the last line read for that name. "hist"
217 "node" is the node id from the last line read for that name. "hist"
218 is the list of node ids previously associated with it (in file order).
218 is the list of node ids previously associated with it (in file order).
219 All node ids are binary, not hex.
219 All node ids are binary, not hex.
220 '''
220 '''
221 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
221 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
222 calcnodelines=calcnodelines)
222 calcnodelines=calcnodelines)
223 # util.sortdict().__setitem__ is much slower at replacing then inserting
223 # util.sortdict().__setitem__ is much slower at replacing then inserting
224 # new entries. The difference can matter if there are thousands of tags.
224 # new entries. The difference can matter if there are thousands of tags.
225 # Create a new sortdict to avoid the performance penalty.
225 # Create a new sortdict to avoid the performance penalty.
226 newtags = util.sortdict()
226 newtags = util.sortdict()
227 for tag, taghist in filetags.items():
227 for tag, taghist in filetags.items():
228 newtags[tag] = (taghist[-1], taghist[:-1])
228 newtags[tag] = (taghist[-1], taghist[:-1])
229 return newtags
229 return newtags
230
230
231 def _updatetags(filetags, tagtype, alltags, tagtypes):
231 def _updatetags(filetags, tagtype, alltags, tagtypes):
232 '''Incorporate the tag info read from one file into the two
232 '''Incorporate the tag info read from one file into the two
233 dictionaries, alltags and tagtypes, that contain all tag
233 dictionaries, alltags and tagtypes, that contain all tag
234 info (global across all heads plus local).'''
234 info (global across all heads plus local).'''
235
235
236 for name, nodehist in filetags.iteritems():
236 for name, nodehist in filetags.iteritems():
237 if name not in alltags:
237 if name not in alltags:
238 alltags[name] = nodehist
238 alltags[name] = nodehist
239 tagtypes[name] = tagtype
239 tagtypes[name] = tagtype
240 continue
240 continue
241
241
242 # we prefer alltags[name] if:
242 # we prefer alltags[name] if:
243 # it supersedes us OR
243 # it supersedes us OR
244 # mutual supersedes and it has a higher rank
244 # mutual supersedes and it has a higher rank
245 # otherwise we win because we're tip-most
245 # otherwise we win because we're tip-most
246 anode, ahist = nodehist
246 anode, ahist = nodehist
247 bnode, bhist = alltags[name]
247 bnode, bhist = alltags[name]
248 if (bnode != anode and anode in bhist and
248 if (bnode != anode and anode in bhist and
249 (bnode not in ahist or len(bhist) > len(ahist))):
249 (bnode not in ahist or len(bhist) > len(ahist))):
250 anode = bnode
250 anode = bnode
251 else:
251 else:
252 tagtypes[name] = tagtype
252 tagtypes[name] = tagtype
253 ahist.extend([n for n in bhist if n not in ahist])
253 ahist.extend([n for n in bhist if n not in ahist])
254 alltags[name] = anode, ahist
254 alltags[name] = anode, ahist
255
255
256 def _filename(repo):
256 def _filename(repo):
257 """name of a tagcache file for a given repo or repoview"""
257 """name of a tagcache file for a given repo or repoview"""
258 filename = 'cache/tags2'
258 filename = 'cache/tags2'
259 if repo.filtername:
259 if repo.filtername:
260 filename = '%s-%s' % (filename, repo.filtername)
260 filename = '%s-%s' % (filename, repo.filtername)
261 return filename
261 return filename
262
262
263 def _readtagcache(ui, repo):
263 def _readtagcache(ui, repo):
264 '''Read the tag cache.
264 '''Read the tag cache.
265
265
266 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
266 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
267
267
268 If the cache is completely up-to-date, "cachetags" is a dict of the
268 If the cache is completely up-to-date, "cachetags" is a dict of the
269 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
269 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
270 None and "shouldwrite" is False.
270 None and "shouldwrite" is False.
271
271
272 If the cache is not up to date, "cachetags" is None. "heads" is a list
272 If the cache is not up to date, "cachetags" is None. "heads" is a list
273 of all heads currently in the repository, ordered from tip to oldest.
273 of all heads currently in the repository, ordered from tip to oldest.
274 "validinfo" is a tuple describing cache validation info. This is used
274 "validinfo" is a tuple describing cache validation info. This is used
275 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
275 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
276 filenode. "shouldwrite" is True.
276 filenode. "shouldwrite" is True.
277
277
278 If the cache is not up to date, the caller is responsible for reading tag
278 If the cache is not up to date, the caller is responsible for reading tag
279 info from each returned head. (See findglobaltags().)
279 info from each returned head. (See findglobaltags().)
280 '''
280 '''
281 from . import scmutil # avoid cycle
281 from . import scmutil # avoid cycle
282
282
283 try:
283 try:
284 cachefile = repo.vfs(_filename(repo), 'r')
284 cachefile = repo.vfs(_filename(repo), 'r')
285 # force reading the file for static-http
285 # force reading the file for static-http
286 cachelines = iter(cachefile)
286 cachelines = iter(cachefile)
287 except IOError:
287 except IOError:
288 cachefile = None
288 cachefile = None
289
289
290 cacherev = None
290 cacherev = None
291 cachenode = None
291 cachenode = None
292 cachehash = None
292 cachehash = None
293 if cachefile:
293 if cachefile:
294 try:
294 try:
295 validline = cachelines.next()
295 validline = next(cachelines)
296 validline = validline.split()
296 validline = validline.split()
297 cacherev = int(validline[0])
297 cacherev = int(validline[0])
298 cachenode = bin(validline[1])
298 cachenode = bin(validline[1])
299 if len(validline) > 2:
299 if len(validline) > 2:
300 cachehash = bin(validline[2])
300 cachehash = bin(validline[2])
301 except Exception:
301 except Exception:
302 # corruption of the cache, just recompute it.
302 # corruption of the cache, just recompute it.
303 pass
303 pass
304
304
305 tipnode = repo.changelog.tip()
305 tipnode = repo.changelog.tip()
306 tiprev = len(repo.changelog) - 1
306 tiprev = len(repo.changelog) - 1
307
307
308 # Case 1 (common): tip is the same, so nothing has changed.
308 # Case 1 (common): tip is the same, so nothing has changed.
309 # (Unchanged tip trivially means no changesets have been added.
309 # (Unchanged tip trivially means no changesets have been added.
310 # But, thanks to localrepository.destroyed(), it also means none
310 # But, thanks to localrepository.destroyed(), it also means none
311 # have been destroyed by strip or rollback.)
311 # have been destroyed by strip or rollback.)
312 if (cacherev == tiprev
312 if (cacherev == tiprev
313 and cachenode == tipnode
313 and cachenode == tipnode
314 and cachehash == scmutil.filteredhash(repo, tiprev)):
314 and cachehash == scmutil.filteredhash(repo, tiprev)):
315 tags = _readtags(ui, repo, cachelines, cachefile.name)
315 tags = _readtags(ui, repo, cachelines, cachefile.name)
316 cachefile.close()
316 cachefile.close()
317 return (None, None, None, tags, False)
317 return (None, None, None, tags, False)
318 if cachefile:
318 if cachefile:
319 cachefile.close() # ignore rest of file
319 cachefile.close() # ignore rest of file
320
320
321 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
321 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
322
322
323 repoheads = repo.heads()
323 repoheads = repo.heads()
324 # Case 2 (uncommon): empty repo; get out quickly and don't bother
324 # Case 2 (uncommon): empty repo; get out quickly and don't bother
325 # writing an empty cache.
325 # writing an empty cache.
326 if repoheads == [nullid]:
326 if repoheads == [nullid]:
327 return ([], {}, valid, {}, False)
327 return ([], {}, valid, {}, False)
328
328
329 # Case 3 (uncommon): cache file missing or empty.
329 # Case 3 (uncommon): cache file missing or empty.
330
330
331 # Case 4 (uncommon): tip rev decreased. This should only happen
331 # Case 4 (uncommon): tip rev decreased. This should only happen
332 # when we're called from localrepository.destroyed(). Refresh the
332 # when we're called from localrepository.destroyed(). Refresh the
333 # cache so future invocations will not see disappeared heads in the
333 # cache so future invocations will not see disappeared heads in the
334 # cache.
334 # cache.
335
335
336 # Case 5 (common): tip has changed, so we've added/replaced heads.
336 # Case 5 (common): tip has changed, so we've added/replaced heads.
337
337
338 # As it happens, the code to handle cases 3, 4, 5 is the same.
338 # As it happens, the code to handle cases 3, 4, 5 is the same.
339
339
340 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
340 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
341 # exposed".
341 # exposed".
342 if not len(repo.file('.hgtags')):
342 if not len(repo.file('.hgtags')):
343 # No tags have ever been committed, so we can avoid a
343 # No tags have ever been committed, so we can avoid a
344 # potentially expensive search.
344 # potentially expensive search.
345 return ([], {}, valid, None, True)
345 return ([], {}, valid, None, True)
346
346
347 starttime = time.time()
347 starttime = time.time()
348
348
349 # Now we have to lookup the .hgtags filenode for every new head.
349 # Now we have to lookup the .hgtags filenode for every new head.
350 # This is the most expensive part of finding tags, so performance
350 # This is the most expensive part of finding tags, so performance
351 # depends primarily on the size of newheads. Worst case: no cache
351 # depends primarily on the size of newheads. Worst case: no cache
352 # file, so newheads == repoheads.
352 # file, so newheads == repoheads.
353 fnodescache = hgtagsfnodescache(repo.unfiltered())
353 fnodescache = hgtagsfnodescache(repo.unfiltered())
354 cachefnode = {}
354 cachefnode = {}
355 for head in reversed(repoheads):
355 for head in reversed(repoheads):
356 fnode = fnodescache.getfnode(head)
356 fnode = fnodescache.getfnode(head)
357 if fnode != nullid:
357 if fnode != nullid:
358 cachefnode[head] = fnode
358 cachefnode[head] = fnode
359
359
360 fnodescache.write()
360 fnodescache.write()
361
361
362 duration = time.time() - starttime
362 duration = time.time() - starttime
363 ui.log('tagscache',
363 ui.log('tagscache',
364 '%d/%d cache hits/lookups in %0.4f '
364 '%d/%d cache hits/lookups in %0.4f '
365 'seconds\n',
365 'seconds\n',
366 fnodescache.hitcount, fnodescache.lookupcount, duration)
366 fnodescache.hitcount, fnodescache.lookupcount, duration)
367
367
368 # Caller has to iterate over all heads, but can use the filenodes in
368 # Caller has to iterate over all heads, but can use the filenodes in
369 # cachefnode to get to each .hgtags revision quickly.
369 # cachefnode to get to each .hgtags revision quickly.
370 return (repoheads, cachefnode, valid, None, True)
370 return (repoheads, cachefnode, valid, None, True)
371
371
372 def _writetagcache(ui, repo, valid, cachetags):
372 def _writetagcache(ui, repo, valid, cachetags):
373 filename = _filename(repo)
373 filename = _filename(repo)
374 try:
374 try:
375 cachefile = repo.vfs(filename, 'w', atomictemp=True)
375 cachefile = repo.vfs(filename, 'w', atomictemp=True)
376 except (OSError, IOError):
376 except (OSError, IOError):
377 return
377 return
378
378
379 ui.log('tagscache', 'writing .hg/%s with %d tags\n',
379 ui.log('tagscache', 'writing .hg/%s with %d tags\n',
380 filename, len(cachetags))
380 filename, len(cachetags))
381
381
382 if valid[2]:
382 if valid[2]:
383 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
383 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
384 else:
384 else:
385 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
385 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
386
386
387 # Tag names in the cache are in UTF-8 -- which is the whole reason
387 # Tag names in the cache are in UTF-8 -- which is the whole reason
388 # we keep them in UTF-8 throughout this module. If we converted
388 # we keep them in UTF-8 throughout this module. If we converted
389 # them local encoding on input, we would lose info writing them to
389 # them local encoding on input, we would lose info writing them to
390 # the cache.
390 # the cache.
391 for (name, (node, hist)) in sorted(cachetags.iteritems()):
391 for (name, (node, hist)) in sorted(cachetags.iteritems()):
392 for n in hist:
392 for n in hist:
393 cachefile.write("%s %s\n" % (hex(n), name))
393 cachefile.write("%s %s\n" % (hex(n), name))
394 cachefile.write("%s %s\n" % (hex(node), name))
394 cachefile.write("%s %s\n" % (hex(node), name))
395
395
396 try:
396 try:
397 cachefile.close()
397 cachefile.close()
398 except (OSError, IOError):
398 except (OSError, IOError):
399 pass
399 pass
400
400
401 _fnodescachefile = 'cache/hgtagsfnodes1'
401 _fnodescachefile = 'cache/hgtagsfnodes1'
402 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
402 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
403 _fnodesmissingrec = '\xff' * 24
403 _fnodesmissingrec = '\xff' * 24
404
404
405 class hgtagsfnodescache(object):
405 class hgtagsfnodescache(object):
406 """Persistent cache mapping revisions to .hgtags filenodes.
406 """Persistent cache mapping revisions to .hgtags filenodes.
407
407
408 The cache is an array of records. Each item in the array corresponds to
408 The cache is an array of records. Each item in the array corresponds to
409 a changelog revision. Values in the array contain the first 4 bytes of
409 a changelog revision. Values in the array contain the first 4 bytes of
410 the node hash and the 20 bytes .hgtags filenode for that revision.
410 the node hash and the 20 bytes .hgtags filenode for that revision.
411
411
412 The first 4 bytes are present as a form of verification. Repository
412 The first 4 bytes are present as a form of verification. Repository
413 stripping and rewriting may change the node at a numeric revision in the
413 stripping and rewriting may change the node at a numeric revision in the
414 changelog. The changeset fragment serves as a verifier to detect
414 changelog. The changeset fragment serves as a verifier to detect
415 rewriting. This logic is shared with the rev branch cache (see
415 rewriting. This logic is shared with the rev branch cache (see
416 branchmap.py).
416 branchmap.py).
417
417
418 The instance holds in memory the full cache content but entries are
418 The instance holds in memory the full cache content but entries are
419 only parsed on read.
419 only parsed on read.
420
420
421 Instances behave like lists. ``c[i]`` works where i is a rev or
421 Instances behave like lists. ``c[i]`` works where i is a rev or
422 changeset node. Missing indexes are populated automatically on access.
422 changeset node. Missing indexes are populated automatically on access.
423 """
423 """
424 def __init__(self, repo):
424 def __init__(self, repo):
425 assert repo.filtername is None
425 assert repo.filtername is None
426
426
427 self._repo = repo
427 self._repo = repo
428
428
429 # Only for reporting purposes.
429 # Only for reporting purposes.
430 self.lookupcount = 0
430 self.lookupcount = 0
431 self.hitcount = 0
431 self.hitcount = 0
432
432
433 self._raw = array('c')
433 self._raw = array('c')
434
434
435 try:
435 try:
436 data = repo.vfs.read(_fnodescachefile)
436 data = repo.vfs.read(_fnodescachefile)
437 except (OSError, IOError):
437 except (OSError, IOError):
438 data = ""
438 data = ""
439 self._raw.fromstring(data)
439 self._raw.fromstring(data)
440
440
441 # The end state of self._raw is an array that is of the exact length
441 # The end state of self._raw is an array that is of the exact length
442 # required to hold a record for every revision in the repository.
442 # required to hold a record for every revision in the repository.
443 # We truncate or extend the array as necessary. self._dirtyoffset is
443 # We truncate or extend the array as necessary. self._dirtyoffset is
444 # defined to be the start offset at which we need to write the output
444 # defined to be the start offset at which we need to write the output
445 # file. This offset is also adjusted when new entries are calculated
445 # file. This offset is also adjusted when new entries are calculated
446 # for array members.
446 # for array members.
447 cllen = len(repo.changelog)
447 cllen = len(repo.changelog)
448 wantedlen = cllen * _fnodesrecsize
448 wantedlen = cllen * _fnodesrecsize
449 rawlen = len(self._raw)
449 rawlen = len(self._raw)
450
450
451 self._dirtyoffset = None
451 self._dirtyoffset = None
452
452
453 if rawlen < wantedlen:
453 if rawlen < wantedlen:
454 self._dirtyoffset = rawlen
454 self._dirtyoffset = rawlen
455 self._raw.extend('\xff' * (wantedlen - rawlen))
455 self._raw.extend('\xff' * (wantedlen - rawlen))
456 elif rawlen > wantedlen:
456 elif rawlen > wantedlen:
457 # There's no easy way to truncate array instances. This seems
457 # There's no easy way to truncate array instances. This seems
458 # slightly less evil than copying a potentially large array slice.
458 # slightly less evil than copying a potentially large array slice.
459 for i in range(rawlen - wantedlen):
459 for i in range(rawlen - wantedlen):
460 self._raw.pop()
460 self._raw.pop()
461 self._dirtyoffset = len(self._raw)
461 self._dirtyoffset = len(self._raw)
462
462
463 def getfnode(self, node, computemissing=True):
463 def getfnode(self, node, computemissing=True):
464 """Obtain the filenode of the .hgtags file at a specified revision.
464 """Obtain the filenode of the .hgtags file at a specified revision.
465
465
466 If the value is in the cache, the entry will be validated and returned.
466 If the value is in the cache, the entry will be validated and returned.
467 Otherwise, the filenode will be computed and returned unless
467 Otherwise, the filenode will be computed and returned unless
468 "computemissing" is False, in which case None will be returned without
468 "computemissing" is False, in which case None will be returned without
469 any potentially expensive computation being performed.
469 any potentially expensive computation being performed.
470
470
471 If an .hgtags does not exist at the specified revision, nullid is
471 If an .hgtags does not exist at the specified revision, nullid is
472 returned.
472 returned.
473 """
473 """
474 ctx = self._repo[node]
474 ctx = self._repo[node]
475 rev = ctx.rev()
475 rev = ctx.rev()
476
476
477 self.lookupcount += 1
477 self.lookupcount += 1
478
478
479 offset = rev * _fnodesrecsize
479 offset = rev * _fnodesrecsize
480 record = self._raw[offset:offset + _fnodesrecsize].tostring()
480 record = self._raw[offset:offset + _fnodesrecsize].tostring()
481 properprefix = node[0:4]
481 properprefix = node[0:4]
482
482
483 # Validate and return existing entry.
483 # Validate and return existing entry.
484 if record != _fnodesmissingrec:
484 if record != _fnodesmissingrec:
485 fileprefix = record[0:4]
485 fileprefix = record[0:4]
486
486
487 if fileprefix == properprefix:
487 if fileprefix == properprefix:
488 self.hitcount += 1
488 self.hitcount += 1
489 return record[4:]
489 return record[4:]
490
490
491 # Fall through.
491 # Fall through.
492
492
493 # If we get here, the entry is either missing or invalid.
493 # If we get here, the entry is either missing or invalid.
494
494
495 if not computemissing:
495 if not computemissing:
496 return None
496 return None
497
497
498 # Populate missing entry.
498 # Populate missing entry.
499 try:
499 try:
500 fnode = ctx.filenode('.hgtags')
500 fnode = ctx.filenode('.hgtags')
501 except error.LookupError:
501 except error.LookupError:
502 # No .hgtags file on this revision.
502 # No .hgtags file on this revision.
503 fnode = nullid
503 fnode = nullid
504
504
505 self._writeentry(offset, properprefix, fnode)
505 self._writeentry(offset, properprefix, fnode)
506 return fnode
506 return fnode
507
507
508 def setfnode(self, node, fnode):
508 def setfnode(self, node, fnode):
509 """Set the .hgtags filenode for a given changeset."""
509 """Set the .hgtags filenode for a given changeset."""
510 assert len(fnode) == 20
510 assert len(fnode) == 20
511 ctx = self._repo[node]
511 ctx = self._repo[node]
512
512
513 # Do a lookup first to avoid writing if nothing has changed.
513 # Do a lookup first to avoid writing if nothing has changed.
514 if self.getfnode(ctx.node(), computemissing=False) == fnode:
514 if self.getfnode(ctx.node(), computemissing=False) == fnode:
515 return
515 return
516
516
517 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
517 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
518
518
519 def _writeentry(self, offset, prefix, fnode):
519 def _writeentry(self, offset, prefix, fnode):
520 # Slices on array instances only accept other array.
520 # Slices on array instances only accept other array.
521 entry = array('c', prefix + fnode)
521 entry = array('c', prefix + fnode)
522 self._raw[offset:offset + _fnodesrecsize] = entry
522 self._raw[offset:offset + _fnodesrecsize] = entry
523 # self._dirtyoffset could be None.
523 # self._dirtyoffset could be None.
524 self._dirtyoffset = min(self._dirtyoffset, offset) or 0
524 self._dirtyoffset = min(self._dirtyoffset, offset) or 0
525
525
526 def write(self):
526 def write(self):
527 """Perform all necessary writes to cache file.
527 """Perform all necessary writes to cache file.
528
528
529 This may no-op if no writes are needed or if a write lock could
529 This may no-op if no writes are needed or if a write lock could
530 not be obtained.
530 not be obtained.
531 """
531 """
532 if self._dirtyoffset is None:
532 if self._dirtyoffset is None:
533 return
533 return
534
534
535 data = self._raw[self._dirtyoffset:]
535 data = self._raw[self._dirtyoffset:]
536 if not data:
536 if not data:
537 return
537 return
538
538
539 repo = self._repo
539 repo = self._repo
540
540
541 try:
541 try:
542 lock = repo.wlock(wait=False)
542 lock = repo.wlock(wait=False)
543 except error.LockError:
543 except error.LockError:
544 repo.ui.log('tagscache',
544 repo.ui.log('tagscache',
545 'not writing .hg/%s because lock cannot be acquired\n' %
545 'not writing .hg/%s because lock cannot be acquired\n' %
546 (_fnodescachefile))
546 (_fnodescachefile))
547 return
547 return
548
548
549 try:
549 try:
550 f = repo.vfs.open(_fnodescachefile, 'ab')
550 f = repo.vfs.open(_fnodescachefile, 'ab')
551 try:
551 try:
552 # if the file has been truncated
552 # if the file has been truncated
553 actualoffset = f.tell()
553 actualoffset = f.tell()
554 if actualoffset < self._dirtyoffset:
554 if actualoffset < self._dirtyoffset:
555 self._dirtyoffset = actualoffset
555 self._dirtyoffset = actualoffset
556 data = self._raw[self._dirtyoffset:]
556 data = self._raw[self._dirtyoffset:]
557 f.seek(self._dirtyoffset)
557 f.seek(self._dirtyoffset)
558 f.truncate()
558 f.truncate()
559 repo.ui.log('tagscache',
559 repo.ui.log('tagscache',
560 'writing %d bytes to %s\n' % (
560 'writing %d bytes to %s\n' % (
561 len(data), _fnodescachefile))
561 len(data), _fnodescachefile))
562 f.write(data)
562 f.write(data)
563 self._dirtyoffset = None
563 self._dirtyoffset = None
564 finally:
564 finally:
565 f.close()
565 f.close()
566 except (IOError, OSError) as inst:
566 except (IOError, OSError) as inst:
567 repo.ui.log('tagscache',
567 repo.ui.log('tagscache',
568 "couldn't write %s: %s\n" % (
568 "couldn't write %s: %s\n" % (
569 _fnodescachefile, inst))
569 _fnodescachefile, inst))
570 finally:
570 finally:
571 lock.release()
571 lock.release()
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now