##// END OF EJS Templates
diffutil: remove diffopts() in favor of diffallopts()...
Yuya Nishihara -
r38606:b62000a2 default
parent child Browse files
Show More
@@ -1,521 +1,521 b''
1 # synthrepo.py - repo synthesis
1 # synthrepo.py - repo synthesis
2 #
2 #
3 # Copyright 2012 Facebook
3 # Copyright 2012 Facebook
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''synthesize structurally interesting change history
8 '''synthesize structurally interesting change history
9
9
10 This extension is useful for creating a repository with properties
10 This extension is useful for creating a repository with properties
11 that are statistically similar to an existing repository. During
11 that are statistically similar to an existing repository. During
12 analysis, a simple probability table is constructed from the history
12 analysis, a simple probability table is constructed from the history
13 of an existing repository. During synthesis, these properties are
13 of an existing repository. During synthesis, these properties are
14 reconstructed.
14 reconstructed.
15
15
16 Properties that are analyzed and synthesized include the following:
16 Properties that are analyzed and synthesized include the following:
17
17
18 - Lines added or removed when an existing file is modified
18 - Lines added or removed when an existing file is modified
19 - Number and sizes of files added
19 - Number and sizes of files added
20 - Number of files removed
20 - Number of files removed
21 - Line lengths
21 - Line lengths
22 - Topological distance to parent changeset(s)
22 - Topological distance to parent changeset(s)
23 - Probability of a commit being a merge
23 - Probability of a commit being a merge
24 - Probability of a newly added file being added to a new directory
24 - Probability of a newly added file being added to a new directory
25 - Interarrival time, and time zone, of commits
25 - Interarrival time, and time zone, of commits
26 - Number of files in each directory
26 - Number of files in each directory
27
27
28 A few obvious properties that are not currently handled realistically:
28 A few obvious properties that are not currently handled realistically:
29
29
30 - Merges are treated as regular commits with two parents, which is not
30 - Merges are treated as regular commits with two parents, which is not
31 realistic
31 realistic
32 - Modifications are not treated as operations on hunks of lines, but
32 - Modifications are not treated as operations on hunks of lines, but
33 as insertions and deletions of randomly chosen single lines
33 as insertions and deletions of randomly chosen single lines
34 - Committer ID (always random)
34 - Committer ID (always random)
35 - Executability of files
35 - Executability of files
36 - Symlinks and binary files are ignored
36 - Symlinks and binary files are ignored
37 '''
37 '''
38
38
39 from __future__ import absolute_import
39 from __future__ import absolute_import
40 import bisect
40 import bisect
41 import collections
41 import collections
42 import itertools
42 import itertools
43 import json
43 import json
44 import os
44 import os
45 import random
45 import random
46 import sys
46 import sys
47 import time
47 import time
48
48
49 from mercurial.i18n import _
49 from mercurial.i18n import _
50 from mercurial.node import (
50 from mercurial.node import (
51 nullid,
51 nullid,
52 nullrev,
52 nullrev,
53 short,
53 short,
54 )
54 )
55 from mercurial import (
55 from mercurial import (
56 context,
56 context,
57 error,
57 error,
58 hg,
58 hg,
59 patch,
59 patch,
60 registrar,
60 registrar,
61 scmutil,
61 scmutil,
62 )
62 )
63 from mercurial.utils import (
63 from mercurial.utils import (
64 dateutil,
64 dateutil,
65 diffutil,
65 diffutil,
66 )
66 )
67
67
68 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
68 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
69 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
69 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
70 # be specifying the version(s) of Mercurial they are tested with, or
70 # be specifying the version(s) of Mercurial they are tested with, or
71 # leave the attribute unspecified.
71 # leave the attribute unspecified.
72 testedwith = 'ships-with-hg-core'
72 testedwith = 'ships-with-hg-core'
73
73
74 cmdtable = {}
74 cmdtable = {}
75 command = registrar.command(cmdtable)
75 command = registrar.command(cmdtable)
76
76
77 newfile = {'new fi', 'rename', 'copy f', 'copy t'}
77 newfile = {'new fi', 'rename', 'copy f', 'copy t'}
78
78
79 def zerodict():
79 def zerodict():
80 return collections.defaultdict(lambda: 0)
80 return collections.defaultdict(lambda: 0)
81
81
82 def roundto(x, k):
82 def roundto(x, k):
83 if x > k * 2:
83 if x > k * 2:
84 return int(round(x / float(k)) * k)
84 return int(round(x / float(k)) * k)
85 return int(round(x))
85 return int(round(x))
86
86
87 def parsegitdiff(lines):
87 def parsegitdiff(lines):
88 filename, mar, lineadd, lineremove = None, None, zerodict(), 0
88 filename, mar, lineadd, lineremove = None, None, zerodict(), 0
89 binary = False
89 binary = False
90 for line in lines:
90 for line in lines:
91 start = line[:6]
91 start = line[:6]
92 if start == 'diff -':
92 if start == 'diff -':
93 if filename:
93 if filename:
94 yield filename, mar, lineadd, lineremove, binary
94 yield filename, mar, lineadd, lineremove, binary
95 mar, lineadd, lineremove, binary = 'm', zerodict(), 0, False
95 mar, lineadd, lineremove, binary = 'm', zerodict(), 0, False
96 filename = patch.gitre.match(line).group(1)
96 filename = patch.gitre.match(line).group(1)
97 elif start in newfile:
97 elif start in newfile:
98 mar = 'a'
98 mar = 'a'
99 elif start == 'GIT bi':
99 elif start == 'GIT bi':
100 binary = True
100 binary = True
101 elif start == 'delete':
101 elif start == 'delete':
102 mar = 'r'
102 mar = 'r'
103 elif start:
103 elif start:
104 s = start[0]
104 s = start[0]
105 if s == '-' and not line.startswith('--- '):
105 if s == '-' and not line.startswith('--- '):
106 lineremove += 1
106 lineremove += 1
107 elif s == '+' and not line.startswith('+++ '):
107 elif s == '+' and not line.startswith('+++ '):
108 lineadd[roundto(len(line) - 1, 5)] += 1
108 lineadd[roundto(len(line) - 1, 5)] += 1
109 if filename:
109 if filename:
110 yield filename, mar, lineadd, lineremove, binary
110 yield filename, mar, lineadd, lineremove, binary
111
111
112 @command('analyze',
112 @command('analyze',
113 [('o', 'output', '', _('write output to given file'), _('FILE')),
113 [('o', 'output', '', _('write output to given file'), _('FILE')),
114 ('r', 'rev', [], _('analyze specified revisions'), _('REV'))],
114 ('r', 'rev', [], _('analyze specified revisions'), _('REV'))],
115 _('hg analyze'), optionalrepo=True)
115 _('hg analyze'), optionalrepo=True)
116 def analyze(ui, repo, *revs, **opts):
116 def analyze(ui, repo, *revs, **opts):
117 '''create a simple model of a repository to use for later synthesis
117 '''create a simple model of a repository to use for later synthesis
118
118
119 This command examines every changeset in the given range (or all
119 This command examines every changeset in the given range (or all
120 of history if none are specified) and creates a simple statistical
120 of history if none are specified) and creates a simple statistical
121 model of the history of the repository. It also measures the directory
121 model of the history of the repository. It also measures the directory
122 structure of the repository as checked out.
122 structure of the repository as checked out.
123
123
124 The model is written out to a JSON file, and can be used by
124 The model is written out to a JSON file, and can be used by
125 :hg:`synthesize` to create or augment a repository with synthetic
125 :hg:`synthesize` to create or augment a repository with synthetic
126 commits that have a structure that is statistically similar to the
126 commits that have a structure that is statistically similar to the
127 analyzed repository.
127 analyzed repository.
128 '''
128 '''
129 root = repo.root
129 root = repo.root
130 if not root.endswith(os.path.sep):
130 if not root.endswith(os.path.sep):
131 root += os.path.sep
131 root += os.path.sep
132
132
133 revs = list(revs)
133 revs = list(revs)
134 revs.extend(opts['rev'])
134 revs.extend(opts['rev'])
135 if not revs:
135 if not revs:
136 revs = [':']
136 revs = [':']
137
137
138 output = opts['output']
138 output = opts['output']
139 if not output:
139 if not output:
140 output = os.path.basename(root) + '.json'
140 output = os.path.basename(root) + '.json'
141
141
142 if output == '-':
142 if output == '-':
143 fp = sys.stdout
143 fp = sys.stdout
144 else:
144 else:
145 fp = open(output, 'w')
145 fp = open(output, 'w')
146
146
147 # Always obtain file counts of each directory in the given root directory.
147 # Always obtain file counts of each directory in the given root directory.
148 def onerror(e):
148 def onerror(e):
149 ui.warn(_('error walking directory structure: %s\n') % e)
149 ui.warn(_('error walking directory structure: %s\n') % e)
150
150
151 dirs = {}
151 dirs = {}
152 rootprefixlen = len(root)
152 rootprefixlen = len(root)
153 for dirpath, dirnames, filenames in os.walk(root, onerror=onerror):
153 for dirpath, dirnames, filenames in os.walk(root, onerror=onerror):
154 dirpathfromroot = dirpath[rootprefixlen:]
154 dirpathfromroot = dirpath[rootprefixlen:]
155 dirs[dirpathfromroot] = len(filenames)
155 dirs[dirpathfromroot] = len(filenames)
156 if '.hg' in dirnames:
156 if '.hg' in dirnames:
157 dirnames.remove('.hg')
157 dirnames.remove('.hg')
158
158
159 lineschanged = zerodict()
159 lineschanged = zerodict()
160 children = zerodict()
160 children = zerodict()
161 p1distance = zerodict()
161 p1distance = zerodict()
162 p2distance = zerodict()
162 p2distance = zerodict()
163 linesinfilesadded = zerodict()
163 linesinfilesadded = zerodict()
164 fileschanged = zerodict()
164 fileschanged = zerodict()
165 filesadded = zerodict()
165 filesadded = zerodict()
166 filesremoved = zerodict()
166 filesremoved = zerodict()
167 linelengths = zerodict()
167 linelengths = zerodict()
168 interarrival = zerodict()
168 interarrival = zerodict()
169 parents = zerodict()
169 parents = zerodict()
170 dirsadded = zerodict()
170 dirsadded = zerodict()
171 tzoffset = zerodict()
171 tzoffset = zerodict()
172
172
173 # If a mercurial repo is available, also model the commit history.
173 # If a mercurial repo is available, also model the commit history.
174 if repo:
174 if repo:
175 revs = scmutil.revrange(repo, revs)
175 revs = scmutil.revrange(repo, revs)
176 revs.sort()
176 revs.sort()
177
177
178 progress = ui.makeprogress(_('analyzing'), unit=_('changesets'),
178 progress = ui.makeprogress(_('analyzing'), unit=_('changesets'),
179 total=len(revs))
179 total=len(revs))
180 for i, rev in enumerate(revs):
180 for i, rev in enumerate(revs):
181 progress.update(i)
181 progress.update(i)
182 ctx = repo[rev]
182 ctx = repo[rev]
183 pl = ctx.parents()
183 pl = ctx.parents()
184 pctx = pl[0]
184 pctx = pl[0]
185 prev = pctx.rev()
185 prev = pctx.rev()
186 children[prev] += 1
186 children[prev] += 1
187 p1distance[rev - prev] += 1
187 p1distance[rev - prev] += 1
188 parents[len(pl)] += 1
188 parents[len(pl)] += 1
189 tzoffset[ctx.date()[1]] += 1
189 tzoffset[ctx.date()[1]] += 1
190 if len(pl) > 1:
190 if len(pl) > 1:
191 p2distance[rev - pl[1].rev()] += 1
191 p2distance[rev - pl[1].rev()] += 1
192 if prev == rev - 1:
192 if prev == rev - 1:
193 lastctx = pctx
193 lastctx = pctx
194 else:
194 else:
195 lastctx = repo[rev - 1]
195 lastctx = repo[rev - 1]
196 if lastctx.rev() != nullrev:
196 if lastctx.rev() != nullrev:
197 timedelta = ctx.date()[0] - lastctx.date()[0]
197 timedelta = ctx.date()[0] - lastctx.date()[0]
198 interarrival[roundto(timedelta, 300)] += 1
198 interarrival[roundto(timedelta, 300)] += 1
199 diffopts = diffutil.diffopts(ui, {'git': True})
199 diffopts = diffutil.diffallopts(ui, {'git': True})
200 diff = sum((d.splitlines()
200 diff = sum((d.splitlines()
201 for d in ctx.diff(pctx, opts=diffopts)), [])
201 for d in ctx.diff(pctx, opts=diffopts)), [])
202 fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0
202 fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0
203 for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff):
203 for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff):
204 if isbin:
204 if isbin:
205 continue
205 continue
206 added = sum(lineadd.itervalues(), 0)
206 added = sum(lineadd.itervalues(), 0)
207 if mar == 'm':
207 if mar == 'm':
208 if added and lineremove:
208 if added and lineremove:
209 lineschanged[roundto(added, 5),
209 lineschanged[roundto(added, 5),
210 roundto(lineremove, 5)] += 1
210 roundto(lineremove, 5)] += 1
211 filechanges += 1
211 filechanges += 1
212 elif mar == 'a':
212 elif mar == 'a':
213 fileadds += 1
213 fileadds += 1
214 if '/' in filename:
214 if '/' in filename:
215 filedir = filename.rsplit('/', 1)[0]
215 filedir = filename.rsplit('/', 1)[0]
216 if filedir not in pctx.dirs():
216 if filedir not in pctx.dirs():
217 diradds += 1
217 diradds += 1
218 linesinfilesadded[roundto(added, 5)] += 1
218 linesinfilesadded[roundto(added, 5)] += 1
219 elif mar == 'r':
219 elif mar == 'r':
220 fileremoves += 1
220 fileremoves += 1
221 for length, count in lineadd.iteritems():
221 for length, count in lineadd.iteritems():
222 linelengths[length] += count
222 linelengths[length] += count
223 fileschanged[filechanges] += 1
223 fileschanged[filechanges] += 1
224 filesadded[fileadds] += 1
224 filesadded[fileadds] += 1
225 dirsadded[diradds] += 1
225 dirsadded[diradds] += 1
226 filesremoved[fileremoves] += 1
226 filesremoved[fileremoves] += 1
227 progress.complete()
227 progress.complete()
228
228
229 invchildren = zerodict()
229 invchildren = zerodict()
230
230
231 for rev, count in children.iteritems():
231 for rev, count in children.iteritems():
232 invchildren[count] += 1
232 invchildren[count] += 1
233
233
234 if output != '-':
234 if output != '-':
235 ui.status(_('writing output to %s\n') % output)
235 ui.status(_('writing output to %s\n') % output)
236
236
237 def pronk(d):
237 def pronk(d):
238 return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)
238 return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)
239
239
240 json.dump({'revs': len(revs),
240 json.dump({'revs': len(revs),
241 'initdirs': pronk(dirs),
241 'initdirs': pronk(dirs),
242 'lineschanged': pronk(lineschanged),
242 'lineschanged': pronk(lineschanged),
243 'children': pronk(invchildren),
243 'children': pronk(invchildren),
244 'fileschanged': pronk(fileschanged),
244 'fileschanged': pronk(fileschanged),
245 'filesadded': pronk(filesadded),
245 'filesadded': pronk(filesadded),
246 'linesinfilesadded': pronk(linesinfilesadded),
246 'linesinfilesadded': pronk(linesinfilesadded),
247 'dirsadded': pronk(dirsadded),
247 'dirsadded': pronk(dirsadded),
248 'filesremoved': pronk(filesremoved),
248 'filesremoved': pronk(filesremoved),
249 'linelengths': pronk(linelengths),
249 'linelengths': pronk(linelengths),
250 'parents': pronk(parents),
250 'parents': pronk(parents),
251 'p1distance': pronk(p1distance),
251 'p1distance': pronk(p1distance),
252 'p2distance': pronk(p2distance),
252 'p2distance': pronk(p2distance),
253 'interarrival': pronk(interarrival),
253 'interarrival': pronk(interarrival),
254 'tzoffset': pronk(tzoffset),
254 'tzoffset': pronk(tzoffset),
255 },
255 },
256 fp)
256 fp)
257 fp.close()
257 fp.close()
258
258
259 @command('synthesize',
259 @command('synthesize',
260 [('c', 'count', 0, _('create given number of commits'), _('COUNT')),
260 [('c', 'count', 0, _('create given number of commits'), _('COUNT')),
261 ('', 'dict', '', _('path to a dictionary of words'), _('FILE')),
261 ('', 'dict', '', _('path to a dictionary of words'), _('FILE')),
262 ('', 'initfiles', 0, _('initial file count to create'), _('COUNT'))],
262 ('', 'initfiles', 0, _('initial file count to create'), _('COUNT'))],
263 _('hg synthesize [OPTION].. DESCFILE'))
263 _('hg synthesize [OPTION].. DESCFILE'))
264 def synthesize(ui, repo, descpath, **opts):
264 def synthesize(ui, repo, descpath, **opts):
265 '''synthesize commits based on a model of an existing repository
265 '''synthesize commits based on a model of an existing repository
266
266
267 The model must have been generated by :hg:`analyze`. Commits will
267 The model must have been generated by :hg:`analyze`. Commits will
268 be generated randomly according to the probabilities described in
268 be generated randomly according to the probabilities described in
269 the model. If --initfiles is set, the repository will be seeded with
269 the model. If --initfiles is set, the repository will be seeded with
270 the given number files following the modeled repository's directory
270 the given number files following the modeled repository's directory
271 structure.
271 structure.
272
272
273 When synthesizing new content, commit descriptions, and user
273 When synthesizing new content, commit descriptions, and user
274 names, words will be chosen randomly from a dictionary that is
274 names, words will be chosen randomly from a dictionary that is
275 presumed to contain one word per line. Use --dict to specify the
275 presumed to contain one word per line. Use --dict to specify the
276 path to an alternate dictionary to use.
276 path to an alternate dictionary to use.
277 '''
277 '''
278 try:
278 try:
279 fp = hg.openpath(ui, descpath)
279 fp = hg.openpath(ui, descpath)
280 except Exception as err:
280 except Exception as err:
281 raise error.Abort('%s: %s' % (descpath, err[0].strerror))
281 raise error.Abort('%s: %s' % (descpath, err[0].strerror))
282 desc = json.load(fp)
282 desc = json.load(fp)
283 fp.close()
283 fp.close()
284
284
285 def cdf(l):
285 def cdf(l):
286 if not l:
286 if not l:
287 return [], []
287 return [], []
288 vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True))
288 vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True))
289 t = float(sum(probs, 0))
289 t = float(sum(probs, 0))
290 s, cdfs = 0, []
290 s, cdfs = 0, []
291 for v in probs:
291 for v in probs:
292 s += v
292 s += v
293 cdfs.append(s / t)
293 cdfs.append(s / t)
294 return vals, cdfs
294 return vals, cdfs
295
295
296 lineschanged = cdf(desc['lineschanged'])
296 lineschanged = cdf(desc['lineschanged'])
297 fileschanged = cdf(desc['fileschanged'])
297 fileschanged = cdf(desc['fileschanged'])
298 filesadded = cdf(desc['filesadded'])
298 filesadded = cdf(desc['filesadded'])
299 dirsadded = cdf(desc['dirsadded'])
299 dirsadded = cdf(desc['dirsadded'])
300 filesremoved = cdf(desc['filesremoved'])
300 filesremoved = cdf(desc['filesremoved'])
301 linelengths = cdf(desc['linelengths'])
301 linelengths = cdf(desc['linelengths'])
302 parents = cdf(desc['parents'])
302 parents = cdf(desc['parents'])
303 p1distance = cdf(desc['p1distance'])
303 p1distance = cdf(desc['p1distance'])
304 p2distance = cdf(desc['p2distance'])
304 p2distance = cdf(desc['p2distance'])
305 interarrival = cdf(desc['interarrival'])
305 interarrival = cdf(desc['interarrival'])
306 linesinfilesadded = cdf(desc['linesinfilesadded'])
306 linesinfilesadded = cdf(desc['linesinfilesadded'])
307 tzoffset = cdf(desc['tzoffset'])
307 tzoffset = cdf(desc['tzoffset'])
308
308
309 dictfile = opts.get('dict') or '/usr/share/dict/words'
309 dictfile = opts.get('dict') or '/usr/share/dict/words'
310 try:
310 try:
311 fp = open(dictfile, 'rU')
311 fp = open(dictfile, 'rU')
312 except IOError as err:
312 except IOError as err:
313 raise error.Abort('%s: %s' % (dictfile, err.strerror))
313 raise error.Abort('%s: %s' % (dictfile, err.strerror))
314 words = fp.read().splitlines()
314 words = fp.read().splitlines()
315 fp.close()
315 fp.close()
316
316
317 initdirs = {}
317 initdirs = {}
318 if desc['initdirs']:
318 if desc['initdirs']:
319 for k, v in desc['initdirs']:
319 for k, v in desc['initdirs']:
320 initdirs[k.encode('utf-8').replace('.hg', '_hg')] = v
320 initdirs[k.encode('utf-8').replace('.hg', '_hg')] = v
321 initdirs = renamedirs(initdirs, words)
321 initdirs = renamedirs(initdirs, words)
322 initdirscdf = cdf(initdirs)
322 initdirscdf = cdf(initdirs)
323
323
324 def pick(cdf):
324 def pick(cdf):
325 return cdf[0][bisect.bisect_left(cdf[1], random.random())]
325 return cdf[0][bisect.bisect_left(cdf[1], random.random())]
326
326
327 def pickpath():
327 def pickpath():
328 return os.path.join(pick(initdirscdf), random.choice(words))
328 return os.path.join(pick(initdirscdf), random.choice(words))
329
329
330 def makeline(minimum=0):
330 def makeline(minimum=0):
331 total = max(minimum, pick(linelengths))
331 total = max(minimum, pick(linelengths))
332 c, l = 0, []
332 c, l = 0, []
333 while c < total:
333 while c < total:
334 w = random.choice(words)
334 w = random.choice(words)
335 c += len(w) + 1
335 c += len(w) + 1
336 l.append(w)
336 l.append(w)
337 return ' '.join(l)
337 return ' '.join(l)
338
338
339 wlock = repo.wlock()
339 wlock = repo.wlock()
340 lock = repo.lock()
340 lock = repo.lock()
341
341
342 nevertouch = {'.hgsub', '.hgignore', '.hgtags'}
342 nevertouch = {'.hgsub', '.hgignore', '.hgtags'}
343
343
344 _synthesizing = _('synthesizing')
344 _synthesizing = _('synthesizing')
345 _files = _('initial files')
345 _files = _('initial files')
346 _changesets = _('changesets')
346 _changesets = _('changesets')
347
347
348 # Synthesize a single initial revision adding files to the repo according
348 # Synthesize a single initial revision adding files to the repo according
349 # to the modeled directory structure.
349 # to the modeled directory structure.
350 initcount = int(opts['initfiles'])
350 initcount = int(opts['initfiles'])
351 if initcount and initdirs:
351 if initcount and initdirs:
352 pctx = repo[None].parents()[0]
352 pctx = repo[None].parents()[0]
353 dirs = set(pctx.dirs())
353 dirs = set(pctx.dirs())
354 files = {}
354 files = {}
355
355
356 def validpath(path):
356 def validpath(path):
357 # Don't pick filenames which are already directory names.
357 # Don't pick filenames which are already directory names.
358 if path in dirs:
358 if path in dirs:
359 return False
359 return False
360 # Don't pick directories which were used as file names.
360 # Don't pick directories which were used as file names.
361 while path:
361 while path:
362 if path in files:
362 if path in files:
363 return False
363 return False
364 path = os.path.dirname(path)
364 path = os.path.dirname(path)
365 return True
365 return True
366
366
367 progress = ui.makeprogress(_synthesizing, unit=_files, total=initcount)
367 progress = ui.makeprogress(_synthesizing, unit=_files, total=initcount)
368 for i in xrange(0, initcount):
368 for i in xrange(0, initcount):
369 progress.update(i)
369 progress.update(i)
370
370
371 path = pickpath()
371 path = pickpath()
372 while not validpath(path):
372 while not validpath(path):
373 path = pickpath()
373 path = pickpath()
374 data = '%s contents\n' % path
374 data = '%s contents\n' % path
375 files[path] = data
375 files[path] = data
376 dir = os.path.dirname(path)
376 dir = os.path.dirname(path)
377 while dir and dir not in dirs:
377 while dir and dir not in dirs:
378 dirs.add(dir)
378 dirs.add(dir)
379 dir = os.path.dirname(dir)
379 dir = os.path.dirname(dir)
380
380
381 def filectxfn(repo, memctx, path):
381 def filectxfn(repo, memctx, path):
382 return context.memfilectx(repo, memctx, path, files[path])
382 return context.memfilectx(repo, memctx, path, files[path])
383
383
384 progress.complete()
384 progress.complete()
385 message = 'synthesized wide repo with %d files' % (len(files),)
385 message = 'synthesized wide repo with %d files' % (len(files),)
386 mc = context.memctx(repo, [pctx.node(), nullid], message,
386 mc = context.memctx(repo, [pctx.node(), nullid], message,
387 files, filectxfn, ui.username(),
387 files, filectxfn, ui.username(),
388 '%d %d' % dateutil.makedate())
388 '%d %d' % dateutil.makedate())
389 initnode = mc.commit()
389 initnode = mc.commit()
390 if ui.debugflag:
390 if ui.debugflag:
391 hexfn = hex
391 hexfn = hex
392 else:
392 else:
393 hexfn = short
393 hexfn = short
394 ui.status(_('added commit %s with %d files\n')
394 ui.status(_('added commit %s with %d files\n')
395 % (hexfn(initnode), len(files)))
395 % (hexfn(initnode), len(files)))
396
396
397 # Synthesize incremental revisions to the repository, adding repo depth.
397 # Synthesize incremental revisions to the repository, adding repo depth.
398 count = int(opts['count'])
398 count = int(opts['count'])
399 heads = set(map(repo.changelog.rev, repo.heads()))
399 heads = set(map(repo.changelog.rev, repo.heads()))
400 progress = ui.makeprogress(_synthesizing, unit=_changesets, total=count)
400 progress = ui.makeprogress(_synthesizing, unit=_changesets, total=count)
401 for i in xrange(count):
401 for i in xrange(count):
402 progress.update(i)
402 progress.update(i)
403
403
404 node = repo.changelog.node
404 node = repo.changelog.node
405 revs = len(repo)
405 revs = len(repo)
406
406
407 def pickhead(heads, distance):
407 def pickhead(heads, distance):
408 if heads:
408 if heads:
409 lheads = sorted(heads)
409 lheads = sorted(heads)
410 rev = revs - min(pick(distance), revs)
410 rev = revs - min(pick(distance), revs)
411 if rev < lheads[-1]:
411 if rev < lheads[-1]:
412 rev = lheads[bisect.bisect_left(lheads, rev)]
412 rev = lheads[bisect.bisect_left(lheads, rev)]
413 else:
413 else:
414 rev = lheads[-1]
414 rev = lheads[-1]
415 return rev, node(rev)
415 return rev, node(rev)
416 return nullrev, nullid
416 return nullrev, nullid
417
417
418 r1 = revs - min(pick(p1distance), revs)
418 r1 = revs - min(pick(p1distance), revs)
419 p1 = node(r1)
419 p1 = node(r1)
420
420
421 # the number of heads will grow without bound if we use a pure
421 # the number of heads will grow without bound if we use a pure
422 # model, so artificially constrain their proliferation
422 # model, so artificially constrain their proliferation
423 toomanyheads = len(heads) > random.randint(1, 20)
423 toomanyheads = len(heads) > random.randint(1, 20)
424 if p2distance[0] and (pick(parents) == 2 or toomanyheads):
424 if p2distance[0] and (pick(parents) == 2 or toomanyheads):
425 r2, p2 = pickhead(heads.difference([r1]), p2distance)
425 r2, p2 = pickhead(heads.difference([r1]), p2distance)
426 else:
426 else:
427 r2, p2 = nullrev, nullid
427 r2, p2 = nullrev, nullid
428
428
429 pl = [p1, p2]
429 pl = [p1, p2]
430 pctx = repo[r1]
430 pctx = repo[r1]
431 mf = pctx.manifest()
431 mf = pctx.manifest()
432 mfk = mf.keys()
432 mfk = mf.keys()
433 changes = {}
433 changes = {}
434 if mfk:
434 if mfk:
435 for __ in xrange(pick(fileschanged)):
435 for __ in xrange(pick(fileschanged)):
436 for __ in xrange(10):
436 for __ in xrange(10):
437 fctx = pctx.filectx(random.choice(mfk))
437 fctx = pctx.filectx(random.choice(mfk))
438 path = fctx.path()
438 path = fctx.path()
439 if not (path in nevertouch or fctx.isbinary() or
439 if not (path in nevertouch or fctx.isbinary() or
440 'l' in fctx.flags()):
440 'l' in fctx.flags()):
441 break
441 break
442 lines = fctx.data().splitlines()
442 lines = fctx.data().splitlines()
443 add, remove = pick(lineschanged)
443 add, remove = pick(lineschanged)
444 for __ in xrange(remove):
444 for __ in xrange(remove):
445 if not lines:
445 if not lines:
446 break
446 break
447 del lines[random.randrange(0, len(lines))]
447 del lines[random.randrange(0, len(lines))]
448 for __ in xrange(add):
448 for __ in xrange(add):
449 lines.insert(random.randint(0, len(lines)), makeline())
449 lines.insert(random.randint(0, len(lines)), makeline())
450 path = fctx.path()
450 path = fctx.path()
451 changes[path] = '\n'.join(lines) + '\n'
451 changes[path] = '\n'.join(lines) + '\n'
452 for __ in xrange(pick(filesremoved)):
452 for __ in xrange(pick(filesremoved)):
453 path = random.choice(mfk)
453 path = random.choice(mfk)
454 for __ in xrange(10):
454 for __ in xrange(10):
455 path = random.choice(mfk)
455 path = random.choice(mfk)
456 if path not in changes:
456 if path not in changes:
457 break
457 break
458 if filesadded:
458 if filesadded:
459 dirs = list(pctx.dirs())
459 dirs = list(pctx.dirs())
460 dirs.insert(0, '')
460 dirs.insert(0, '')
461 for __ in xrange(pick(filesadded)):
461 for __ in xrange(pick(filesadded)):
462 pathstr = ''
462 pathstr = ''
463 while pathstr in dirs:
463 while pathstr in dirs:
464 path = [random.choice(dirs)]
464 path = [random.choice(dirs)]
465 if pick(dirsadded):
465 if pick(dirsadded):
466 path.append(random.choice(words))
466 path.append(random.choice(words))
467 path.append(random.choice(words))
467 path.append(random.choice(words))
468 pathstr = '/'.join(filter(None, path))
468 pathstr = '/'.join(filter(None, path))
469 data = '\n'.join(makeline()
469 data = '\n'.join(makeline()
470 for __ in xrange(pick(linesinfilesadded))) + '\n'
470 for __ in xrange(pick(linesinfilesadded))) + '\n'
471 changes[pathstr] = data
471 changes[pathstr] = data
472 def filectxfn(repo, memctx, path):
472 def filectxfn(repo, memctx, path):
473 if path not in changes:
473 if path not in changes:
474 return None
474 return None
475 return context.memfilectx(repo, memctx, path, changes[path])
475 return context.memfilectx(repo, memctx, path, changes[path])
476 if not changes:
476 if not changes:
477 continue
477 continue
478 if revs:
478 if revs:
479 date = repo['tip'].date()[0] + pick(interarrival)
479 date = repo['tip'].date()[0] + pick(interarrival)
480 else:
480 else:
481 date = time.time() - (86400 * count)
481 date = time.time() - (86400 * count)
482 # dates in mercurial must be positive, fit in 32-bit signed integers.
482 # dates in mercurial must be positive, fit in 32-bit signed integers.
483 date = min(0x7fffffff, max(0, date))
483 date = min(0x7fffffff, max(0, date))
484 user = random.choice(words) + '@' + random.choice(words)
484 user = random.choice(words) + '@' + random.choice(words)
485 mc = context.memctx(repo, pl, makeline(minimum=2),
485 mc = context.memctx(repo, pl, makeline(minimum=2),
486 sorted(changes),
486 sorted(changes),
487 filectxfn, user, '%d %d' % (date, pick(tzoffset)))
487 filectxfn, user, '%d %d' % (date, pick(tzoffset)))
488 newnode = mc.commit()
488 newnode = mc.commit()
489 heads.add(repo.changelog.rev(newnode))
489 heads.add(repo.changelog.rev(newnode))
490 heads.discard(r1)
490 heads.discard(r1)
491 heads.discard(r2)
491 heads.discard(r2)
492 progress.complete()
492 progress.complete()
493
493
494 lock.release()
494 lock.release()
495 wlock.release()
495 wlock.release()
496
496
497 def renamedirs(dirs, words):
497 def renamedirs(dirs, words):
498 '''Randomly rename the directory names in the per-dir file count dict.'''
498 '''Randomly rename the directory names in the per-dir file count dict.'''
499 wordgen = itertools.cycle(words)
499 wordgen = itertools.cycle(words)
500 replacements = {'': ''}
500 replacements = {'': ''}
501 def rename(dirpath):
501 def rename(dirpath):
502 '''Recursively rename the directory and all path prefixes.
502 '''Recursively rename the directory and all path prefixes.
503
503
504 The mapping from path to renamed path is stored for all path prefixes
504 The mapping from path to renamed path is stored for all path prefixes
505 as in dynamic programming, ensuring linear runtime and consistent
505 as in dynamic programming, ensuring linear runtime and consistent
506 renaming regardless of iteration order through the model.
506 renaming regardless of iteration order through the model.
507 '''
507 '''
508 if dirpath in replacements:
508 if dirpath in replacements:
509 return replacements[dirpath]
509 return replacements[dirpath]
510 head, _ = os.path.split(dirpath)
510 head, _ = os.path.split(dirpath)
511 if head:
511 if head:
512 head = rename(head)
512 head = rename(head)
513 else:
513 else:
514 head = ''
514 head = ''
515 renamed = os.path.join(head, next(wordgen))
515 renamed = os.path.join(head, next(wordgen))
516 replacements[dirpath] = renamed
516 replacements[dirpath] = renamed
517 return renamed
517 return renamed
518 result = []
518 result = []
519 for dirpath, count in dirs.iteritems():
519 for dirpath, count in dirs.iteritems():
520 result.append([rename(dirpath.lstrip(os.sep)), count])
520 result.append([rename(dirpath.lstrip(os.sep)), count])
521 return result
521 return result
@@ -1,980 +1,980 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 node as nodemod,
14 node as nodemod,
15 phases,
15 phases,
16 util,
16 util,
17 )
17 )
18 from .utils import (
18 from .utils import (
19 dateutil,
19 dateutil,
20 diffutil,
20 diffutil,
21 )
21 )
22
22
23 ### obsolescence marker flag
23 ### obsolescence marker flag
24
24
25 ## bumpedfix flag
25 ## bumpedfix flag
26 #
26 #
27 # When a changeset A' succeed to a changeset A which became public, we call A'
27 # When a changeset A' succeed to a changeset A which became public, we call A'
28 # "bumped" because it's a successors of a public changesets
28 # "bumped" because it's a successors of a public changesets
29 #
29 #
30 # o A' (bumped)
30 # o A' (bumped)
31 # |`:
31 # |`:
32 # | o A
32 # | o A
33 # |/
33 # |/
34 # o Z
34 # o Z
35 #
35 #
36 # The way to solve this situation is to create a new changeset Ad as children
36 # The way to solve this situation is to create a new changeset Ad as children
37 # of A. This changeset have the same content than A'. So the diff from A to A'
37 # of A. This changeset have the same content than A'. So the diff from A to A'
38 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
38 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
39 #
39 #
40 # o Ad
40 # o Ad
41 # |`:
41 # |`:
42 # | x A'
42 # | x A'
43 # |'|
43 # |'|
44 # o | A
44 # o | A
45 # |/
45 # |/
46 # o Z
46 # o Z
47 #
47 #
48 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
48 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
49 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
49 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
50 # This flag mean that the successors express the changes between the public and
50 # This flag mean that the successors express the changes between the public and
51 # bumped version and fix the situation, breaking the transitivity of
51 # bumped version and fix the situation, breaking the transitivity of
52 # "bumped" here.
52 # "bumped" here.
53 bumpedfix = 1
53 bumpedfix = 1
54 usingsha256 = 2
54 usingsha256 = 2
55
55
56 class marker(object):
56 class marker(object):
57 """Wrap obsolete marker raw data"""
57 """Wrap obsolete marker raw data"""
58
58
59 def __init__(self, repo, data):
59 def __init__(self, repo, data):
60 # the repo argument will be used to create changectx in later version
60 # the repo argument will be used to create changectx in later version
61 self._repo = repo
61 self._repo = repo
62 self._data = data
62 self._data = data
63 self._decodedmeta = None
63 self._decodedmeta = None
64
64
65 def __hash__(self):
65 def __hash__(self):
66 return hash(self._data)
66 return hash(self._data)
67
67
68 def __eq__(self, other):
68 def __eq__(self, other):
69 if type(other) != type(self):
69 if type(other) != type(self):
70 return False
70 return False
71 return self._data == other._data
71 return self._data == other._data
72
72
73 def prednode(self):
73 def prednode(self):
74 """Predecessor changeset node identifier"""
74 """Predecessor changeset node identifier"""
75 return self._data[0]
75 return self._data[0]
76
76
77 def succnodes(self):
77 def succnodes(self):
78 """List of successor changesets node identifiers"""
78 """List of successor changesets node identifiers"""
79 return self._data[1]
79 return self._data[1]
80
80
81 def parentnodes(self):
81 def parentnodes(self):
82 """Parents of the predecessors (None if not recorded)"""
82 """Parents of the predecessors (None if not recorded)"""
83 return self._data[5]
83 return self._data[5]
84
84
85 def metadata(self):
85 def metadata(self):
86 """Decoded metadata dictionary"""
86 """Decoded metadata dictionary"""
87 return dict(self._data[3])
87 return dict(self._data[3])
88
88
89 def date(self):
89 def date(self):
90 """Creation date as (unixtime, offset)"""
90 """Creation date as (unixtime, offset)"""
91 return self._data[4]
91 return self._data[4]
92
92
93 def flags(self):
93 def flags(self):
94 """The flags field of the marker"""
94 """The flags field of the marker"""
95 return self._data[2]
95 return self._data[2]
96
96
97 def getmarkers(repo, nodes=None, exclusive=False):
97 def getmarkers(repo, nodes=None, exclusive=False):
98 """returns markers known in a repository
98 """returns markers known in a repository
99
99
100 If <nodes> is specified, only markers "relevant" to those nodes are are
100 If <nodes> is specified, only markers "relevant" to those nodes are are
101 returned"""
101 returned"""
102 if nodes is None:
102 if nodes is None:
103 rawmarkers = repo.obsstore
103 rawmarkers = repo.obsstore
104 elif exclusive:
104 elif exclusive:
105 rawmarkers = exclusivemarkers(repo, nodes)
105 rawmarkers = exclusivemarkers(repo, nodes)
106 else:
106 else:
107 rawmarkers = repo.obsstore.relevantmarkers(nodes)
107 rawmarkers = repo.obsstore.relevantmarkers(nodes)
108
108
109 for markerdata in rawmarkers:
109 for markerdata in rawmarkers:
110 yield marker(repo, markerdata)
110 yield marker(repo, markerdata)
111
111
112 def closestpredecessors(repo, nodeid):
112 def closestpredecessors(repo, nodeid):
113 """yield the list of next predecessors pointing on visible changectx nodes
113 """yield the list of next predecessors pointing on visible changectx nodes
114
114
115 This function respect the repoview filtering, filtered revision will be
115 This function respect the repoview filtering, filtered revision will be
116 considered missing.
116 considered missing.
117 """
117 """
118
118
119 precursors = repo.obsstore.predecessors
119 precursors = repo.obsstore.predecessors
120 stack = [nodeid]
120 stack = [nodeid]
121 seen = set(stack)
121 seen = set(stack)
122
122
123 while stack:
123 while stack:
124 current = stack.pop()
124 current = stack.pop()
125 currentpreccs = precursors.get(current, ())
125 currentpreccs = precursors.get(current, ())
126
126
127 for prec in currentpreccs:
127 for prec in currentpreccs:
128 precnodeid = prec[0]
128 precnodeid = prec[0]
129
129
130 # Basic cycle protection
130 # Basic cycle protection
131 if precnodeid in seen:
131 if precnodeid in seen:
132 continue
132 continue
133 seen.add(precnodeid)
133 seen.add(precnodeid)
134
134
135 if precnodeid in repo:
135 if precnodeid in repo:
136 yield precnodeid
136 yield precnodeid
137 else:
137 else:
138 stack.append(precnodeid)
138 stack.append(precnodeid)
139
139
140 def allpredecessors(obsstore, nodes, ignoreflags=0):
140 def allpredecessors(obsstore, nodes, ignoreflags=0):
141 """Yield node for every precursors of <nodes>.
141 """Yield node for every precursors of <nodes>.
142
142
143 Some precursors may be unknown locally.
143 Some precursors may be unknown locally.
144
144
145 This is a linear yield unsuited to detecting folded changesets. It includes
145 This is a linear yield unsuited to detecting folded changesets. It includes
146 initial nodes too."""
146 initial nodes too."""
147
147
148 remaining = set(nodes)
148 remaining = set(nodes)
149 seen = set(remaining)
149 seen = set(remaining)
150 while remaining:
150 while remaining:
151 current = remaining.pop()
151 current = remaining.pop()
152 yield current
152 yield current
153 for mark in obsstore.predecessors.get(current, ()):
153 for mark in obsstore.predecessors.get(current, ()):
154 # ignore marker flagged with specified flag
154 # ignore marker flagged with specified flag
155 if mark[2] & ignoreflags:
155 if mark[2] & ignoreflags:
156 continue
156 continue
157 suc = mark[0]
157 suc = mark[0]
158 if suc not in seen:
158 if suc not in seen:
159 seen.add(suc)
159 seen.add(suc)
160 remaining.add(suc)
160 remaining.add(suc)
161
161
162 def allsuccessors(obsstore, nodes, ignoreflags=0):
162 def allsuccessors(obsstore, nodes, ignoreflags=0):
163 """Yield node for every successor of <nodes>.
163 """Yield node for every successor of <nodes>.
164
164
165 Some successors may be unknown locally.
165 Some successors may be unknown locally.
166
166
167 This is a linear yield unsuited to detecting split changesets. It includes
167 This is a linear yield unsuited to detecting split changesets. It includes
168 initial nodes too."""
168 initial nodes too."""
169 remaining = set(nodes)
169 remaining = set(nodes)
170 seen = set(remaining)
170 seen = set(remaining)
171 while remaining:
171 while remaining:
172 current = remaining.pop()
172 current = remaining.pop()
173 yield current
173 yield current
174 for mark in obsstore.successors.get(current, ()):
174 for mark in obsstore.successors.get(current, ()):
175 # ignore marker flagged with specified flag
175 # ignore marker flagged with specified flag
176 if mark[2] & ignoreflags:
176 if mark[2] & ignoreflags:
177 continue
177 continue
178 for suc in mark[1]:
178 for suc in mark[1]:
179 if suc not in seen:
179 if suc not in seen:
180 seen.add(suc)
180 seen.add(suc)
181 remaining.add(suc)
181 remaining.add(suc)
182
182
183 def _filterprunes(markers):
183 def _filterprunes(markers):
184 """return a set with no prune markers"""
184 """return a set with no prune markers"""
185 return set(m for m in markers if m[1])
185 return set(m for m in markers if m[1])
186
186
187 def exclusivemarkers(repo, nodes):
187 def exclusivemarkers(repo, nodes):
188 """set of markers relevant to "nodes" but no other locally-known nodes
188 """set of markers relevant to "nodes" but no other locally-known nodes
189
189
190 This function compute the set of markers "exclusive" to a locally-known
190 This function compute the set of markers "exclusive" to a locally-known
191 node. This means we walk the markers starting from <nodes> until we reach a
191 node. This means we walk the markers starting from <nodes> until we reach a
192 locally-known precursors outside of <nodes>. Element of <nodes> with
192 locally-known precursors outside of <nodes>. Element of <nodes> with
193 locally-known successors outside of <nodes> are ignored (since their
193 locally-known successors outside of <nodes> are ignored (since their
194 precursors markers are also relevant to these successors).
194 precursors markers are also relevant to these successors).
195
195
196 For example:
196 For example:
197
197
198 # (A0 rewritten as A1)
198 # (A0 rewritten as A1)
199 #
199 #
200 # A0 <-1- A1 # Marker "1" is exclusive to A1
200 # A0 <-1- A1 # Marker "1" is exclusive to A1
201
201
202 or
202 or
203
203
204 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
204 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
205 #
205 #
206 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
206 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
207
207
208 or
208 or
209
209
210 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
210 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
211 #
211 #
212 # <-2- A1 # Marker "2" is exclusive to A0,A1
212 # <-2- A1 # Marker "2" is exclusive to A0,A1
213 # /
213 # /
214 # <-1- A0
214 # <-1- A0
215 # \
215 # \
216 # <-3- A2 # Marker "3" is exclusive to A0,A2
216 # <-3- A2 # Marker "3" is exclusive to A0,A2
217 #
217 #
218 # in addition:
218 # in addition:
219 #
219 #
220 # Markers "2,3" are exclusive to A1,A2
220 # Markers "2,3" are exclusive to A1,A2
221 # Markers "1,2,3" are exclusive to A0,A1,A2
221 # Markers "1,2,3" are exclusive to A0,A1,A2
222
222
223 See test/test-obsolete-bundle-strip.t for more examples.
223 See test/test-obsolete-bundle-strip.t for more examples.
224
224
225 An example usage is strip. When stripping a changeset, we also want to
225 An example usage is strip. When stripping a changeset, we also want to
226 strip the markers exclusive to this changeset. Otherwise we would have
226 strip the markers exclusive to this changeset. Otherwise we would have
227 "dangling"" obsolescence markers from its precursors: Obsolescence markers
227 "dangling"" obsolescence markers from its precursors: Obsolescence markers
228 marking a node as obsolete without any successors available locally.
228 marking a node as obsolete without any successors available locally.
229
229
230 As for relevant markers, the prune markers for children will be followed.
230 As for relevant markers, the prune markers for children will be followed.
231 Of course, they will only be followed if the pruned children is
231 Of course, they will only be followed if the pruned children is
232 locally-known. Since the prune markers are relevant to the pruned node.
232 locally-known. Since the prune markers are relevant to the pruned node.
233 However, while prune markers are considered relevant to the parent of the
233 However, while prune markers are considered relevant to the parent of the
234 pruned changesets, prune markers for locally-known changeset (with no
234 pruned changesets, prune markers for locally-known changeset (with no
235 successors) are considered exclusive to the pruned nodes. This allows
235 successors) are considered exclusive to the pruned nodes. This allows
236 to strip the prune markers (with the rest of the exclusive chain) alongside
236 to strip the prune markers (with the rest of the exclusive chain) alongside
237 the pruned changesets.
237 the pruned changesets.
238 """
238 """
239 # running on a filtered repository would be dangerous as markers could be
239 # running on a filtered repository would be dangerous as markers could be
240 # reported as exclusive when they are relevant for other filtered nodes.
240 # reported as exclusive when they are relevant for other filtered nodes.
241 unfi = repo.unfiltered()
241 unfi = repo.unfiltered()
242
242
243 # shortcut to various useful item
243 # shortcut to various useful item
244 nm = unfi.changelog.nodemap
244 nm = unfi.changelog.nodemap
245 precursorsmarkers = unfi.obsstore.predecessors
245 precursorsmarkers = unfi.obsstore.predecessors
246 successormarkers = unfi.obsstore.successors
246 successormarkers = unfi.obsstore.successors
247 childrenmarkers = unfi.obsstore.children
247 childrenmarkers = unfi.obsstore.children
248
248
249 # exclusive markers (return of the function)
249 # exclusive markers (return of the function)
250 exclmarkers = set()
250 exclmarkers = set()
251 # we need fast membership testing
251 # we need fast membership testing
252 nodes = set(nodes)
252 nodes = set(nodes)
253 # looking for head in the obshistory
253 # looking for head in the obshistory
254 #
254 #
255 # XXX we are ignoring all issues in regard with cycle for now.
255 # XXX we are ignoring all issues in regard with cycle for now.
256 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
256 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
257 stack.sort()
257 stack.sort()
258 # nodes already stacked
258 # nodes already stacked
259 seennodes = set(stack)
259 seennodes = set(stack)
260 while stack:
260 while stack:
261 current = stack.pop()
261 current = stack.pop()
262 # fetch precursors markers
262 # fetch precursors markers
263 markers = list(precursorsmarkers.get(current, ()))
263 markers = list(precursorsmarkers.get(current, ()))
264 # extend the list with prune markers
264 # extend the list with prune markers
265 for mark in successormarkers.get(current, ()):
265 for mark in successormarkers.get(current, ()):
266 if not mark[1]:
266 if not mark[1]:
267 markers.append(mark)
267 markers.append(mark)
268 # and markers from children (looking for prune)
268 # and markers from children (looking for prune)
269 for mark in childrenmarkers.get(current, ()):
269 for mark in childrenmarkers.get(current, ()):
270 if not mark[1]:
270 if not mark[1]:
271 markers.append(mark)
271 markers.append(mark)
272 # traverse the markers
272 # traverse the markers
273 for mark in markers:
273 for mark in markers:
274 if mark in exclmarkers:
274 if mark in exclmarkers:
275 # markers already selected
275 # markers already selected
276 continue
276 continue
277
277
278 # If the markers is about the current node, select it
278 # If the markers is about the current node, select it
279 #
279 #
280 # (this delay the addition of markers from children)
280 # (this delay the addition of markers from children)
281 if mark[1] or mark[0] == current:
281 if mark[1] or mark[0] == current:
282 exclmarkers.add(mark)
282 exclmarkers.add(mark)
283
283
284 # should we keep traversing through the precursors?
284 # should we keep traversing through the precursors?
285 prec = mark[0]
285 prec = mark[0]
286
286
287 # nodes in the stack or already processed
287 # nodes in the stack or already processed
288 if prec in seennodes:
288 if prec in seennodes:
289 continue
289 continue
290
290
291 # is this a locally known node ?
291 # is this a locally known node ?
292 known = prec in nm
292 known = prec in nm
293 # if locally-known and not in the <nodes> set the traversal
293 # if locally-known and not in the <nodes> set the traversal
294 # stop here.
294 # stop here.
295 if known and prec not in nodes:
295 if known and prec not in nodes:
296 continue
296 continue
297
297
298 # do not keep going if there are unselected markers pointing to this
298 # do not keep going if there are unselected markers pointing to this
299 # nodes. If we end up traversing these unselected markers later the
299 # nodes. If we end up traversing these unselected markers later the
300 # node will be taken care of at that point.
300 # node will be taken care of at that point.
301 precmarkers = _filterprunes(successormarkers.get(prec))
301 precmarkers = _filterprunes(successormarkers.get(prec))
302 if precmarkers.issubset(exclmarkers):
302 if precmarkers.issubset(exclmarkers):
303 seennodes.add(prec)
303 seennodes.add(prec)
304 stack.append(prec)
304 stack.append(prec)
305
305
306 return exclmarkers
306 return exclmarkers
307
307
308 def foreground(repo, nodes):
308 def foreground(repo, nodes):
309 """return all nodes in the "foreground" of other node
309 """return all nodes in the "foreground" of other node
310
310
311 The foreground of a revision is anything reachable using parent -> children
311 The foreground of a revision is anything reachable using parent -> children
312 or precursor -> successor relation. It is very similar to "descendant" but
312 or precursor -> successor relation. It is very similar to "descendant" but
313 augmented with obsolescence information.
313 augmented with obsolescence information.
314
314
315 Beware that possible obsolescence cycle may result if complex situation.
315 Beware that possible obsolescence cycle may result if complex situation.
316 """
316 """
317 repo = repo.unfiltered()
317 repo = repo.unfiltered()
318 foreground = set(repo.set('%ln::', nodes))
318 foreground = set(repo.set('%ln::', nodes))
319 if repo.obsstore:
319 if repo.obsstore:
320 # We only need this complicated logic if there is obsolescence
320 # We only need this complicated logic if there is obsolescence
321 # XXX will probably deserve an optimised revset.
321 # XXX will probably deserve an optimised revset.
322 nm = repo.changelog.nodemap
322 nm = repo.changelog.nodemap
323 plen = -1
323 plen = -1
324 # compute the whole set of successors or descendants
324 # compute the whole set of successors or descendants
325 while len(foreground) != plen:
325 while len(foreground) != plen:
326 plen = len(foreground)
326 plen = len(foreground)
327 succs = set(c.node() for c in foreground)
327 succs = set(c.node() for c in foreground)
328 mutable = [c.node() for c in foreground if c.mutable()]
328 mutable = [c.node() for c in foreground if c.mutable()]
329 succs.update(allsuccessors(repo.obsstore, mutable))
329 succs.update(allsuccessors(repo.obsstore, mutable))
330 known = (n for n in succs if n in nm)
330 known = (n for n in succs if n in nm)
331 foreground = set(repo.set('%ln::', known))
331 foreground = set(repo.set('%ln::', known))
332 return set(c.node() for c in foreground)
332 return set(c.node() for c in foreground)
333
333
334 # effectflag field
334 # effectflag field
335 #
335 #
336 # Effect-flag is a 1-byte bit field used to store what changed between a
336 # Effect-flag is a 1-byte bit field used to store what changed between a
337 # changeset and its successor(s).
337 # changeset and its successor(s).
338 #
338 #
339 # The effect flag is stored in obs-markers metadata while we iterate on the
339 # The effect flag is stored in obs-markers metadata while we iterate on the
340 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
340 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
341 # with an incompatible design for effect flag, we can store a new design under
341 # with an incompatible design for effect flag, we can store a new design under
342 # another field name so we don't break readers. We plan to extend the existing
342 # another field name so we don't break readers. We plan to extend the existing
343 # obsmarkers bit-field when the effect flag design will be stabilized.
343 # obsmarkers bit-field when the effect flag design will be stabilized.
344 #
344 #
345 # The effect-flag is placed behind an experimental flag
345 # The effect-flag is placed behind an experimental flag
346 # `effect-flags` set to off by default.
346 # `effect-flags` set to off by default.
347 #
347 #
348
348
349 EFFECTFLAGFIELD = "ef1"
349 EFFECTFLAGFIELD = "ef1"
350
350
351 DESCCHANGED = 1 << 0 # action changed the description
351 DESCCHANGED = 1 << 0 # action changed the description
352 METACHANGED = 1 << 1 # action change the meta
352 METACHANGED = 1 << 1 # action change the meta
353 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
353 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
354 PARENTCHANGED = 1 << 2 # action change the parent
354 PARENTCHANGED = 1 << 2 # action change the parent
355 USERCHANGED = 1 << 4 # the user changed
355 USERCHANGED = 1 << 4 # the user changed
356 DATECHANGED = 1 << 5 # the date changed
356 DATECHANGED = 1 << 5 # the date changed
357 BRANCHCHANGED = 1 << 6 # the branch changed
357 BRANCHCHANGED = 1 << 6 # the branch changed
358
358
359 METABLACKLIST = [
359 METABLACKLIST = [
360 re.compile('^branch$'),
360 re.compile('^branch$'),
361 re.compile('^.*-source$'),
361 re.compile('^.*-source$'),
362 re.compile('^.*_source$'),
362 re.compile('^.*_source$'),
363 re.compile('^source$'),
363 re.compile('^source$'),
364 ]
364 ]
365
365
366 def metanotblacklisted(metaitem):
366 def metanotblacklisted(metaitem):
367 """ Check that the key of a meta item (extrakey, extravalue) does not
367 """ Check that the key of a meta item (extrakey, extravalue) does not
368 match at least one of the blacklist pattern
368 match at least one of the blacklist pattern
369 """
369 """
370 metakey = metaitem[0]
370 metakey = metaitem[0]
371
371
372 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
372 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
373
373
374 def _prepare_hunk(hunk):
374 def _prepare_hunk(hunk):
375 """Drop all information but the username and patch"""
375 """Drop all information but the username and patch"""
376 cleanhunk = []
376 cleanhunk = []
377 for line in hunk.splitlines():
377 for line in hunk.splitlines():
378 if line.startswith(b'# User') or not line.startswith(b'#'):
378 if line.startswith(b'# User') or not line.startswith(b'#'):
379 if line.startswith(b'@@'):
379 if line.startswith(b'@@'):
380 line = b'@@\n'
380 line = b'@@\n'
381 cleanhunk.append(line)
381 cleanhunk.append(line)
382 return cleanhunk
382 return cleanhunk
383
383
384 def _getdifflines(iterdiff):
384 def _getdifflines(iterdiff):
385 """return a cleaned up lines"""
385 """return a cleaned up lines"""
386 lines = next(iterdiff, None)
386 lines = next(iterdiff, None)
387
387
388 if lines is None:
388 if lines is None:
389 return lines
389 return lines
390
390
391 return _prepare_hunk(lines)
391 return _prepare_hunk(lines)
392
392
393 def _cmpdiff(leftctx, rightctx):
393 def _cmpdiff(leftctx, rightctx):
394 """return True if both ctx introduce the "same diff"
394 """return True if both ctx introduce the "same diff"
395
395
396 This is a first and basic implementation, with many shortcoming.
396 This is a first and basic implementation, with many shortcoming.
397 """
397 """
398 diffopts = diffutil.diffopts(leftctx.repo().ui, {'git': True})
398 diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
399 # Leftctx or right ctx might be filtered, so we need to use the contexts
399 # Leftctx or right ctx might be filtered, so we need to use the contexts
400 # with an unfiltered repository to safely compute the diff
400 # with an unfiltered repository to safely compute the diff
401 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
401 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
402 leftdiff = leftunfi.diff(opts=diffopts)
402 leftdiff = leftunfi.diff(opts=diffopts)
403 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
403 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
404 rightdiff = rightunfi.diff(opts=diffopts)
404 rightdiff = rightunfi.diff(opts=diffopts)
405
405
406 left, right = (0, 0)
406 left, right = (0, 0)
407 while None not in (left, right):
407 while None not in (left, right):
408 left = _getdifflines(leftdiff)
408 left = _getdifflines(leftdiff)
409 right = _getdifflines(rightdiff)
409 right = _getdifflines(rightdiff)
410
410
411 if left != right:
411 if left != right:
412 return False
412 return False
413 return True
413 return True
414
414
415 def geteffectflag(relation):
415 def geteffectflag(relation):
416 """ From an obs-marker relation, compute what changed between the
416 """ From an obs-marker relation, compute what changed between the
417 predecessor and the successor.
417 predecessor and the successor.
418 """
418 """
419 effects = 0
419 effects = 0
420
420
421 source = relation[0]
421 source = relation[0]
422
422
423 for changectx in relation[1]:
423 for changectx in relation[1]:
424 # Check if description has changed
424 # Check if description has changed
425 if changectx.description() != source.description():
425 if changectx.description() != source.description():
426 effects |= DESCCHANGED
426 effects |= DESCCHANGED
427
427
428 # Check if user has changed
428 # Check if user has changed
429 if changectx.user() != source.user():
429 if changectx.user() != source.user():
430 effects |= USERCHANGED
430 effects |= USERCHANGED
431
431
432 # Check if date has changed
432 # Check if date has changed
433 if changectx.date() != source.date():
433 if changectx.date() != source.date():
434 effects |= DATECHANGED
434 effects |= DATECHANGED
435
435
436 # Check if branch has changed
436 # Check if branch has changed
437 if changectx.branch() != source.branch():
437 if changectx.branch() != source.branch():
438 effects |= BRANCHCHANGED
438 effects |= BRANCHCHANGED
439
439
440 # Check if at least one of the parent has changed
440 # Check if at least one of the parent has changed
441 if changectx.parents() != source.parents():
441 if changectx.parents() != source.parents():
442 effects |= PARENTCHANGED
442 effects |= PARENTCHANGED
443
443
444 # Check if other meta has changed
444 # Check if other meta has changed
445 changeextra = changectx.extra().items()
445 changeextra = changectx.extra().items()
446 ctxmeta = list(filter(metanotblacklisted, changeextra))
446 ctxmeta = list(filter(metanotblacklisted, changeextra))
447
447
448 sourceextra = source.extra().items()
448 sourceextra = source.extra().items()
449 srcmeta = list(filter(metanotblacklisted, sourceextra))
449 srcmeta = list(filter(metanotblacklisted, sourceextra))
450
450
451 if ctxmeta != srcmeta:
451 if ctxmeta != srcmeta:
452 effects |= METACHANGED
452 effects |= METACHANGED
453
453
454 # Check if the diff has changed
454 # Check if the diff has changed
455 if not _cmpdiff(source, changectx):
455 if not _cmpdiff(source, changectx):
456 effects |= DIFFCHANGED
456 effects |= DIFFCHANGED
457
457
458 return effects
458 return effects
459
459
460 def getobsoleted(repo, tr):
460 def getobsoleted(repo, tr):
461 """return the set of pre-existing revisions obsoleted by a transaction"""
461 """return the set of pre-existing revisions obsoleted by a transaction"""
462 torev = repo.unfiltered().changelog.nodemap.get
462 torev = repo.unfiltered().changelog.nodemap.get
463 phase = repo._phasecache.phase
463 phase = repo._phasecache.phase
464 succsmarkers = repo.obsstore.successors.get
464 succsmarkers = repo.obsstore.successors.get
465 public = phases.public
465 public = phases.public
466 addedmarkers = tr.changes.get('obsmarkers')
466 addedmarkers = tr.changes.get('obsmarkers')
467 addedrevs = tr.changes.get('revs')
467 addedrevs = tr.changes.get('revs')
468 seenrevs = set()
468 seenrevs = set()
469 obsoleted = set()
469 obsoleted = set()
470 for mark in addedmarkers:
470 for mark in addedmarkers:
471 node = mark[0]
471 node = mark[0]
472 rev = torev(node)
472 rev = torev(node)
473 if rev is None or rev in seenrevs or rev in addedrevs:
473 if rev is None or rev in seenrevs or rev in addedrevs:
474 continue
474 continue
475 seenrevs.add(rev)
475 seenrevs.add(rev)
476 if phase(repo, rev) == public:
476 if phase(repo, rev) == public:
477 continue
477 continue
478 if set(succsmarkers(node) or []).issubset(addedmarkers):
478 if set(succsmarkers(node) or []).issubset(addedmarkers):
479 obsoleted.add(rev)
479 obsoleted.add(rev)
480 return obsoleted
480 return obsoleted
481
481
482 class _succs(list):
482 class _succs(list):
483 """small class to represent a successors with some metadata about it"""
483 """small class to represent a successors with some metadata about it"""
484
484
485 def __init__(self, *args, **kwargs):
485 def __init__(self, *args, **kwargs):
486 super(_succs, self).__init__(*args, **kwargs)
486 super(_succs, self).__init__(*args, **kwargs)
487 self.markers = set()
487 self.markers = set()
488
488
489 def copy(self):
489 def copy(self):
490 new = _succs(self)
490 new = _succs(self)
491 new.markers = self.markers.copy()
491 new.markers = self.markers.copy()
492 return new
492 return new
493
493
494 @util.propertycache
494 @util.propertycache
495 def _set(self):
495 def _set(self):
496 # immutable
496 # immutable
497 return set(self)
497 return set(self)
498
498
499 def canmerge(self, other):
499 def canmerge(self, other):
500 return self._set.issubset(other._set)
500 return self._set.issubset(other._set)
501
501
502 def successorssets(repo, initialnode, closest=False, cache=None):
502 def successorssets(repo, initialnode, closest=False, cache=None):
503 """Return set of all latest successors of initial nodes
503 """Return set of all latest successors of initial nodes
504
504
505 The successors set of a changeset A are the group of revisions that succeed
505 The successors set of a changeset A are the group of revisions that succeed
506 A. It succeeds A as a consistent whole, each revision being only a partial
506 A. It succeeds A as a consistent whole, each revision being only a partial
507 replacement. By default, the successors set contains non-obsolete
507 replacement. By default, the successors set contains non-obsolete
508 changesets only, walking the obsolescence graph until reaching a leaf. If
508 changesets only, walking the obsolescence graph until reaching a leaf. If
509 'closest' is set to True, closest successors-sets are return (the
509 'closest' is set to True, closest successors-sets are return (the
510 obsolescence walk stops on known changesets).
510 obsolescence walk stops on known changesets).
511
511
512 This function returns the full list of successor sets which is why it
512 This function returns the full list of successor sets which is why it
513 returns a list of tuples and not just a single tuple. Each tuple is a valid
513 returns a list of tuples and not just a single tuple. Each tuple is a valid
514 successors set. Note that (A,) may be a valid successors set for changeset A
514 successors set. Note that (A,) may be a valid successors set for changeset A
515 (see below).
515 (see below).
516
516
517 In most cases, a changeset A will have a single element (e.g. the changeset
517 In most cases, a changeset A will have a single element (e.g. the changeset
518 A is replaced by A') in its successors set. Though, it is also common for a
518 A is replaced by A') in its successors set. Though, it is also common for a
519 changeset A to have no elements in its successor set (e.g. the changeset
519 changeset A to have no elements in its successor set (e.g. the changeset
520 has been pruned). Therefore, the returned list of successors sets will be
520 has been pruned). Therefore, the returned list of successors sets will be
521 [(A',)] or [], respectively.
521 [(A',)] or [], respectively.
522
522
523 When a changeset A is split into A' and B', however, it will result in a
523 When a changeset A is split into A' and B', however, it will result in a
524 successors set containing more than a single element, i.e. [(A',B')].
524 successors set containing more than a single element, i.e. [(A',B')].
525 Divergent changesets will result in multiple successors sets, i.e. [(A',),
525 Divergent changesets will result in multiple successors sets, i.e. [(A',),
526 (A'')].
526 (A'')].
527
527
528 If a changeset A is not obsolete, then it will conceptually have no
528 If a changeset A is not obsolete, then it will conceptually have no
529 successors set. To distinguish this from a pruned changeset, the successor
529 successors set. To distinguish this from a pruned changeset, the successor
530 set will contain itself only, i.e. [(A,)].
530 set will contain itself only, i.e. [(A,)].
531
531
532 Finally, final successors unknown locally are considered to be pruned
532 Finally, final successors unknown locally are considered to be pruned
533 (pruned: obsoleted without any successors). (Final: successors not affected
533 (pruned: obsoleted without any successors). (Final: successors not affected
534 by markers).
534 by markers).
535
535
536 The 'closest' mode respect the repoview filtering. For example, without
536 The 'closest' mode respect the repoview filtering. For example, without
537 filter it will stop at the first locally known changeset, with 'visible'
537 filter it will stop at the first locally known changeset, with 'visible'
538 filter it will stop on visible changesets).
538 filter it will stop on visible changesets).
539
539
540 The optional `cache` parameter is a dictionary that may contains
540 The optional `cache` parameter is a dictionary that may contains
541 precomputed successors sets. It is meant to reuse the computation of a
541 precomputed successors sets. It is meant to reuse the computation of a
542 previous call to `successorssets` when multiple calls are made at the same
542 previous call to `successorssets` when multiple calls are made at the same
543 time. The cache dictionary is updated in place. The caller is responsible
543 time. The cache dictionary is updated in place. The caller is responsible
544 for its life span. Code that makes multiple calls to `successorssets`
544 for its life span. Code that makes multiple calls to `successorssets`
545 *should* use this cache mechanism or risk a performance hit.
545 *should* use this cache mechanism or risk a performance hit.
546
546
547 Since results are different depending of the 'closest' most, the same cache
547 Since results are different depending of the 'closest' most, the same cache
548 cannot be reused for both mode.
548 cannot be reused for both mode.
549 """
549 """
550
550
551 succmarkers = repo.obsstore.successors
551 succmarkers = repo.obsstore.successors
552
552
553 # Stack of nodes we search successors sets for
553 # Stack of nodes we search successors sets for
554 toproceed = [initialnode]
554 toproceed = [initialnode]
555 # set version of above list for fast loop detection
555 # set version of above list for fast loop detection
556 # element added to "toproceed" must be added here
556 # element added to "toproceed" must be added here
557 stackedset = set(toproceed)
557 stackedset = set(toproceed)
558 if cache is None:
558 if cache is None:
559 cache = {}
559 cache = {}
560
560
561 # This while loop is the flattened version of a recursive search for
561 # This while loop is the flattened version of a recursive search for
562 # successors sets
562 # successors sets
563 #
563 #
564 # def successorssets(x):
564 # def successorssets(x):
565 # successors = directsuccessors(x)
565 # successors = directsuccessors(x)
566 # ss = [[]]
566 # ss = [[]]
567 # for succ in directsuccessors(x):
567 # for succ in directsuccessors(x):
568 # # product as in itertools cartesian product
568 # # product as in itertools cartesian product
569 # ss = product(ss, successorssets(succ))
569 # ss = product(ss, successorssets(succ))
570 # return ss
570 # return ss
571 #
571 #
572 # But we can not use plain recursive calls here:
572 # But we can not use plain recursive calls here:
573 # - that would blow the python call stack
573 # - that would blow the python call stack
574 # - obsolescence markers may have cycles, we need to handle them.
574 # - obsolescence markers may have cycles, we need to handle them.
575 #
575 #
576 # The `toproceed` list act as our call stack. Every node we search
576 # The `toproceed` list act as our call stack. Every node we search
577 # successors set for are stacked there.
577 # successors set for are stacked there.
578 #
578 #
579 # The `stackedset` is set version of this stack used to check if a node is
579 # The `stackedset` is set version of this stack used to check if a node is
580 # already stacked. This check is used to detect cycles and prevent infinite
580 # already stacked. This check is used to detect cycles and prevent infinite
581 # loop.
581 # loop.
582 #
582 #
583 # successors set of all nodes are stored in the `cache` dictionary.
583 # successors set of all nodes are stored in the `cache` dictionary.
584 #
584 #
585 # After this while loop ends we use the cache to return the successors sets
585 # After this while loop ends we use the cache to return the successors sets
586 # for the node requested by the caller.
586 # for the node requested by the caller.
587 while toproceed:
587 while toproceed:
588 # Every iteration tries to compute the successors sets of the topmost
588 # Every iteration tries to compute the successors sets of the topmost
589 # node of the stack: CURRENT.
589 # node of the stack: CURRENT.
590 #
590 #
591 # There are four possible outcomes:
591 # There are four possible outcomes:
592 #
592 #
593 # 1) We already know the successors sets of CURRENT:
593 # 1) We already know the successors sets of CURRENT:
594 # -> mission accomplished, pop it from the stack.
594 # -> mission accomplished, pop it from the stack.
595 # 2) Stop the walk:
595 # 2) Stop the walk:
596 # default case: Node is not obsolete
596 # default case: Node is not obsolete
597 # closest case: Node is known at this repo filter level
597 # closest case: Node is known at this repo filter level
598 # -> the node is its own successors sets. Add it to the cache.
598 # -> the node is its own successors sets. Add it to the cache.
599 # 3) We do not know successors set of direct successors of CURRENT:
599 # 3) We do not know successors set of direct successors of CURRENT:
600 # -> We add those successors to the stack.
600 # -> We add those successors to the stack.
601 # 4) We know successors sets of all direct successors of CURRENT:
601 # 4) We know successors sets of all direct successors of CURRENT:
602 # -> We can compute CURRENT successors set and add it to the
602 # -> We can compute CURRENT successors set and add it to the
603 # cache.
603 # cache.
604 #
604 #
605 current = toproceed[-1]
605 current = toproceed[-1]
606
606
607 # case 2 condition is a bit hairy because of closest,
607 # case 2 condition is a bit hairy because of closest,
608 # we compute it on its own
608 # we compute it on its own
609 case2condition = ((current not in succmarkers)
609 case2condition = ((current not in succmarkers)
610 or (closest and current != initialnode
610 or (closest and current != initialnode
611 and current in repo))
611 and current in repo))
612
612
613 if current in cache:
613 if current in cache:
614 # case (1): We already know the successors sets
614 # case (1): We already know the successors sets
615 stackedset.remove(toproceed.pop())
615 stackedset.remove(toproceed.pop())
616 elif case2condition:
616 elif case2condition:
617 # case (2): end of walk.
617 # case (2): end of walk.
618 if current in repo:
618 if current in repo:
619 # We have a valid successors.
619 # We have a valid successors.
620 cache[current] = [_succs((current,))]
620 cache[current] = [_succs((current,))]
621 else:
621 else:
622 # Final obsolete version is unknown locally.
622 # Final obsolete version is unknown locally.
623 # Do not count that as a valid successors
623 # Do not count that as a valid successors
624 cache[current] = []
624 cache[current] = []
625 else:
625 else:
626 # cases (3) and (4)
626 # cases (3) and (4)
627 #
627 #
628 # We proceed in two phases. Phase 1 aims to distinguish case (3)
628 # We proceed in two phases. Phase 1 aims to distinguish case (3)
629 # from case (4):
629 # from case (4):
630 #
630 #
631 # For each direct successors of CURRENT, we check whether its
631 # For each direct successors of CURRENT, we check whether its
632 # successors sets are known. If they are not, we stack the
632 # successors sets are known. If they are not, we stack the
633 # unknown node and proceed to the next iteration of the while
633 # unknown node and proceed to the next iteration of the while
634 # loop. (case 3)
634 # loop. (case 3)
635 #
635 #
636 # During this step, we may detect obsolescence cycles: a node
636 # During this step, we may detect obsolescence cycles: a node
637 # with unknown successors sets but already in the call stack.
637 # with unknown successors sets but already in the call stack.
638 # In such a situation, we arbitrary set the successors sets of
638 # In such a situation, we arbitrary set the successors sets of
639 # the node to nothing (node pruned) to break the cycle.
639 # the node to nothing (node pruned) to break the cycle.
640 #
640 #
641 # If no break was encountered we proceed to phase 2.
641 # If no break was encountered we proceed to phase 2.
642 #
642 #
643 # Phase 2 computes successors sets of CURRENT (case 4); see details
643 # Phase 2 computes successors sets of CURRENT (case 4); see details
644 # in phase 2 itself.
644 # in phase 2 itself.
645 #
645 #
646 # Note the two levels of iteration in each phase.
646 # Note the two levels of iteration in each phase.
647 # - The first one handles obsolescence markers using CURRENT as
647 # - The first one handles obsolescence markers using CURRENT as
648 # precursor (successors markers of CURRENT).
648 # precursor (successors markers of CURRENT).
649 #
649 #
650 # Having multiple entry here means divergence.
650 # Having multiple entry here means divergence.
651 #
651 #
652 # - The second one handles successors defined in each marker.
652 # - The second one handles successors defined in each marker.
653 #
653 #
654 # Having none means pruned node, multiple successors means split,
654 # Having none means pruned node, multiple successors means split,
655 # single successors are standard replacement.
655 # single successors are standard replacement.
656 #
656 #
657 for mark in sorted(succmarkers[current]):
657 for mark in sorted(succmarkers[current]):
658 for suc in mark[1]:
658 for suc in mark[1]:
659 if suc not in cache:
659 if suc not in cache:
660 if suc in stackedset:
660 if suc in stackedset:
661 # cycle breaking
661 # cycle breaking
662 cache[suc] = []
662 cache[suc] = []
663 else:
663 else:
664 # case (3) If we have not computed successors sets
664 # case (3) If we have not computed successors sets
665 # of one of those successors we add it to the
665 # of one of those successors we add it to the
666 # `toproceed` stack and stop all work for this
666 # `toproceed` stack and stop all work for this
667 # iteration.
667 # iteration.
668 toproceed.append(suc)
668 toproceed.append(suc)
669 stackedset.add(suc)
669 stackedset.add(suc)
670 break
670 break
671 else:
671 else:
672 continue
672 continue
673 break
673 break
674 else:
674 else:
675 # case (4): we know all successors sets of all direct
675 # case (4): we know all successors sets of all direct
676 # successors
676 # successors
677 #
677 #
678 # Successors set contributed by each marker depends on the
678 # Successors set contributed by each marker depends on the
679 # successors sets of all its "successors" node.
679 # successors sets of all its "successors" node.
680 #
680 #
681 # Each different marker is a divergence in the obsolescence
681 # Each different marker is a divergence in the obsolescence
682 # history. It contributes successors sets distinct from other
682 # history. It contributes successors sets distinct from other
683 # markers.
683 # markers.
684 #
684 #
685 # Within a marker, a successor may have divergent successors
685 # Within a marker, a successor may have divergent successors
686 # sets. In such a case, the marker will contribute multiple
686 # sets. In such a case, the marker will contribute multiple
687 # divergent successors sets. If multiple successors have
687 # divergent successors sets. If multiple successors have
688 # divergent successors sets, a Cartesian product is used.
688 # divergent successors sets, a Cartesian product is used.
689 #
689 #
690 # At the end we post-process successors sets to remove
690 # At the end we post-process successors sets to remove
691 # duplicated entry and successors set that are strict subset of
691 # duplicated entry and successors set that are strict subset of
692 # another one.
692 # another one.
693 succssets = []
693 succssets = []
694 for mark in sorted(succmarkers[current]):
694 for mark in sorted(succmarkers[current]):
695 # successors sets contributed by this marker
695 # successors sets contributed by this marker
696 base = _succs()
696 base = _succs()
697 base.markers.add(mark)
697 base.markers.add(mark)
698 markss = [base]
698 markss = [base]
699 for suc in mark[1]:
699 for suc in mark[1]:
700 # cardinal product with previous successors
700 # cardinal product with previous successors
701 productresult = []
701 productresult = []
702 for prefix in markss:
702 for prefix in markss:
703 for suffix in cache[suc]:
703 for suffix in cache[suc]:
704 newss = prefix.copy()
704 newss = prefix.copy()
705 newss.markers.update(suffix.markers)
705 newss.markers.update(suffix.markers)
706 for part in suffix:
706 for part in suffix:
707 # do not duplicated entry in successors set
707 # do not duplicated entry in successors set
708 # first entry wins.
708 # first entry wins.
709 if part not in newss:
709 if part not in newss:
710 newss.append(part)
710 newss.append(part)
711 productresult.append(newss)
711 productresult.append(newss)
712 markss = productresult
712 markss = productresult
713 succssets.extend(markss)
713 succssets.extend(markss)
714 # remove duplicated and subset
714 # remove duplicated and subset
715 seen = []
715 seen = []
716 final = []
716 final = []
717 candidates = sorted((s for s in succssets if s),
717 candidates = sorted((s for s in succssets if s),
718 key=len, reverse=True)
718 key=len, reverse=True)
719 for cand in candidates:
719 for cand in candidates:
720 for seensuccs in seen:
720 for seensuccs in seen:
721 if cand.canmerge(seensuccs):
721 if cand.canmerge(seensuccs):
722 seensuccs.markers.update(cand.markers)
722 seensuccs.markers.update(cand.markers)
723 break
723 break
724 else:
724 else:
725 final.append(cand)
725 final.append(cand)
726 seen.append(cand)
726 seen.append(cand)
727 final.reverse() # put small successors set first
727 final.reverse() # put small successors set first
728 cache[current] = final
728 cache[current] = final
729 return cache[initialnode]
729 return cache[initialnode]
730
730
731 def successorsandmarkers(repo, ctx):
731 def successorsandmarkers(repo, ctx):
732 """compute the raw data needed for computing obsfate
732 """compute the raw data needed for computing obsfate
733 Returns a list of dict, one dict per successors set
733 Returns a list of dict, one dict per successors set
734 """
734 """
735 if not ctx.obsolete():
735 if not ctx.obsolete():
736 return None
736 return None
737
737
738 ssets = successorssets(repo, ctx.node(), closest=True)
738 ssets = successorssets(repo, ctx.node(), closest=True)
739
739
740 # closestsuccessors returns an empty list for pruned revisions, remap it
740 # closestsuccessors returns an empty list for pruned revisions, remap it
741 # into a list containing an empty list for future processing
741 # into a list containing an empty list for future processing
742 if ssets == []:
742 if ssets == []:
743 ssets = [[]]
743 ssets = [[]]
744
744
745 # Try to recover pruned markers
745 # Try to recover pruned markers
746 succsmap = repo.obsstore.successors
746 succsmap = repo.obsstore.successors
747 fullsuccessorsets = [] # successor set + markers
747 fullsuccessorsets = [] # successor set + markers
748 for sset in ssets:
748 for sset in ssets:
749 if sset:
749 if sset:
750 fullsuccessorsets.append(sset)
750 fullsuccessorsets.append(sset)
751 else:
751 else:
752 # successorsset return an empty set() when ctx or one of its
752 # successorsset return an empty set() when ctx or one of its
753 # successors is pruned.
753 # successors is pruned.
754 # In this case, walk the obs-markers tree again starting with ctx
754 # In this case, walk the obs-markers tree again starting with ctx
755 # and find the relevant pruning obs-makers, the ones without
755 # and find the relevant pruning obs-makers, the ones without
756 # successors.
756 # successors.
757 # Having these markers allow us to compute some information about
757 # Having these markers allow us to compute some information about
758 # its fate, like who pruned this changeset and when.
758 # its fate, like who pruned this changeset and when.
759
759
760 # XXX we do not catch all prune markers (eg rewritten then pruned)
760 # XXX we do not catch all prune markers (eg rewritten then pruned)
761 # (fix me later)
761 # (fix me later)
762 foundany = False
762 foundany = False
763 for mark in succsmap.get(ctx.node(), ()):
763 for mark in succsmap.get(ctx.node(), ()):
764 if not mark[1]:
764 if not mark[1]:
765 foundany = True
765 foundany = True
766 sset = _succs()
766 sset = _succs()
767 sset.markers.add(mark)
767 sset.markers.add(mark)
768 fullsuccessorsets.append(sset)
768 fullsuccessorsets.append(sset)
769 if not foundany:
769 if not foundany:
770 fullsuccessorsets.append(_succs())
770 fullsuccessorsets.append(_succs())
771
771
772 values = []
772 values = []
773 for sset in fullsuccessorsets:
773 for sset in fullsuccessorsets:
774 values.append({'successors': sset, 'markers': sset.markers})
774 values.append({'successors': sset, 'markers': sset.markers})
775
775
776 return values
776 return values
777
777
778 def _getobsfate(successorssets):
778 def _getobsfate(successorssets):
779 """ Compute a changeset obsolescence fate based on its successorssets.
779 """ Compute a changeset obsolescence fate based on its successorssets.
780 Successors can be the tipmost ones or the immediate ones. This function
780 Successors can be the tipmost ones or the immediate ones. This function
781 return values are not meant to be shown directly to users, it is meant to
781 return values are not meant to be shown directly to users, it is meant to
782 be used by internal functions only.
782 be used by internal functions only.
783 Returns one fate from the following values:
783 Returns one fate from the following values:
784 - pruned
784 - pruned
785 - diverged
785 - diverged
786 - superseded
786 - superseded
787 - superseded_split
787 - superseded_split
788 """
788 """
789
789
790 if len(successorssets) == 0:
790 if len(successorssets) == 0:
791 # The commit has been pruned
791 # The commit has been pruned
792 return 'pruned'
792 return 'pruned'
793 elif len(successorssets) > 1:
793 elif len(successorssets) > 1:
794 return 'diverged'
794 return 'diverged'
795 else:
795 else:
796 # No divergence, only one set of successors
796 # No divergence, only one set of successors
797 successors = successorssets[0]
797 successors = successorssets[0]
798
798
799 if len(successors) == 1:
799 if len(successors) == 1:
800 return 'superseded'
800 return 'superseded'
801 else:
801 else:
802 return 'superseded_split'
802 return 'superseded_split'
803
803
804 def obsfateverb(successorset, markers):
804 def obsfateverb(successorset, markers):
805 """ Return the verb summarizing the successorset and potentially using
805 """ Return the verb summarizing the successorset and potentially using
806 information from the markers
806 information from the markers
807 """
807 """
808 if not successorset:
808 if not successorset:
809 verb = 'pruned'
809 verb = 'pruned'
810 elif len(successorset) == 1:
810 elif len(successorset) == 1:
811 verb = 'rewritten'
811 verb = 'rewritten'
812 else:
812 else:
813 verb = 'split'
813 verb = 'split'
814 return verb
814 return verb
815
815
816 def markersdates(markers):
816 def markersdates(markers):
817 """returns the list of dates for a list of markers
817 """returns the list of dates for a list of markers
818 """
818 """
819 return [m[4] for m in markers]
819 return [m[4] for m in markers]
820
820
821 def markersusers(markers):
821 def markersusers(markers):
822 """ Returns a sorted list of markers users without duplicates
822 """ Returns a sorted list of markers users without duplicates
823 """
823 """
824 markersmeta = [dict(m[3]) for m in markers]
824 markersmeta = [dict(m[3]) for m in markers]
825 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
825 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
826
826
827 return sorted(users)
827 return sorted(users)
828
828
829 def markersoperations(markers):
829 def markersoperations(markers):
830 """ Returns a sorted list of markers operations without duplicates
830 """ Returns a sorted list of markers operations without duplicates
831 """
831 """
832 markersmeta = [dict(m[3]) for m in markers]
832 markersmeta = [dict(m[3]) for m in markers]
833 operations = set(meta.get('operation') for meta in markersmeta
833 operations = set(meta.get('operation') for meta in markersmeta
834 if meta.get('operation'))
834 if meta.get('operation'))
835
835
836 return sorted(operations)
836 return sorted(operations)
837
837
838 def obsfateprinter(ui, repo, successors, markers, formatctx):
838 def obsfateprinter(ui, repo, successors, markers, formatctx):
839 """ Build a obsfate string for a single successorset using all obsfate
839 """ Build a obsfate string for a single successorset using all obsfate
840 related function defined in obsutil
840 related function defined in obsutil
841 """
841 """
842 quiet = ui.quiet
842 quiet = ui.quiet
843 verbose = ui.verbose
843 verbose = ui.verbose
844 normal = not verbose and not quiet
844 normal = not verbose and not quiet
845
845
846 line = []
846 line = []
847
847
848 # Verb
848 # Verb
849 line.append(obsfateverb(successors, markers))
849 line.append(obsfateverb(successors, markers))
850
850
851 # Operations
851 # Operations
852 operations = markersoperations(markers)
852 operations = markersoperations(markers)
853 if operations:
853 if operations:
854 line.append(" using %s" % ", ".join(operations))
854 line.append(" using %s" % ", ".join(operations))
855
855
856 # Successors
856 # Successors
857 if successors:
857 if successors:
858 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
858 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
859 line.append(" as %s" % ", ".join(fmtsuccessors))
859 line.append(" as %s" % ", ".join(fmtsuccessors))
860
860
861 # Users
861 # Users
862 users = markersusers(markers)
862 users = markersusers(markers)
863 # Filter out current user in not verbose mode to reduce amount of
863 # Filter out current user in not verbose mode to reduce amount of
864 # information
864 # information
865 if not verbose:
865 if not verbose:
866 currentuser = ui.username(acceptempty=True)
866 currentuser = ui.username(acceptempty=True)
867 if len(users) == 1 and currentuser in users:
867 if len(users) == 1 and currentuser in users:
868 users = None
868 users = None
869
869
870 if (verbose or normal) and users:
870 if (verbose or normal) and users:
871 line.append(" by %s" % ", ".join(users))
871 line.append(" by %s" % ", ".join(users))
872
872
873 # Date
873 # Date
874 dates = markersdates(markers)
874 dates = markersdates(markers)
875
875
876 if dates and verbose:
876 if dates and verbose:
877 min_date = min(dates)
877 min_date = min(dates)
878 max_date = max(dates)
878 max_date = max(dates)
879
879
880 if min_date == max_date:
880 if min_date == max_date:
881 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
881 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
882 line.append(" (at %s)" % fmtmin_date)
882 line.append(" (at %s)" % fmtmin_date)
883 else:
883 else:
884 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
884 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
885 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
885 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
886 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
886 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
887
887
888 return "".join(line)
888 return "".join(line)
889
889
890
890
891 filteredmsgtable = {
891 filteredmsgtable = {
892 "pruned": _("hidden revision '%s' is pruned"),
892 "pruned": _("hidden revision '%s' is pruned"),
893 "diverged": _("hidden revision '%s' has diverged"),
893 "diverged": _("hidden revision '%s' has diverged"),
894 "superseded": _("hidden revision '%s' was rewritten as: %s"),
894 "superseded": _("hidden revision '%s' was rewritten as: %s"),
895 "superseded_split": _("hidden revision '%s' was split as: %s"),
895 "superseded_split": _("hidden revision '%s' was split as: %s"),
896 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
896 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
897 "%d more"),
897 "%d more"),
898 }
898 }
899
899
900 def _getfilteredreason(repo, changeid, ctx):
900 def _getfilteredreason(repo, changeid, ctx):
901 """return a human-friendly string on why a obsolete changeset is hidden
901 """return a human-friendly string on why a obsolete changeset is hidden
902 """
902 """
903 successors = successorssets(repo, ctx.node())
903 successors = successorssets(repo, ctx.node())
904 fate = _getobsfate(successors)
904 fate = _getobsfate(successors)
905
905
906 # Be more precise in case the revision is superseded
906 # Be more precise in case the revision is superseded
907 if fate == 'pruned':
907 if fate == 'pruned':
908 return filteredmsgtable['pruned'] % changeid
908 return filteredmsgtable['pruned'] % changeid
909 elif fate == 'diverged':
909 elif fate == 'diverged':
910 return filteredmsgtable['diverged'] % changeid
910 return filteredmsgtable['diverged'] % changeid
911 elif fate == 'superseded':
911 elif fate == 'superseded':
912 single_successor = nodemod.short(successors[0][0])
912 single_successor = nodemod.short(successors[0][0])
913 return filteredmsgtable['superseded'] % (changeid, single_successor)
913 return filteredmsgtable['superseded'] % (changeid, single_successor)
914 elif fate == 'superseded_split':
914 elif fate == 'superseded_split':
915
915
916 succs = []
916 succs = []
917 for node_id in successors[0]:
917 for node_id in successors[0]:
918 succs.append(nodemod.short(node_id))
918 succs.append(nodemod.short(node_id))
919
919
920 if len(succs) <= 2:
920 if len(succs) <= 2:
921 fmtsuccs = ', '.join(succs)
921 fmtsuccs = ', '.join(succs)
922 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
922 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
923 else:
923 else:
924 firstsuccessors = ', '.join(succs[:2])
924 firstsuccessors = ', '.join(succs[:2])
925 remainingnumber = len(succs) - 2
925 remainingnumber = len(succs) - 2
926
926
927 args = (changeid, firstsuccessors, remainingnumber)
927 args = (changeid, firstsuccessors, remainingnumber)
928 return filteredmsgtable['superseded_split_several'] % args
928 return filteredmsgtable['superseded_split_several'] % args
929
929
930 def divergentsets(repo, ctx):
930 def divergentsets(repo, ctx):
931 """Compute sets of commits divergent with a given one"""
931 """Compute sets of commits divergent with a given one"""
932 cache = {}
932 cache = {}
933 base = {}
933 base = {}
934 for n in allpredecessors(repo.obsstore, [ctx.node()]):
934 for n in allpredecessors(repo.obsstore, [ctx.node()]):
935 if n == ctx.node():
935 if n == ctx.node():
936 # a node can't be a base for divergence with itself
936 # a node can't be a base for divergence with itself
937 continue
937 continue
938 nsuccsets = successorssets(repo, n, cache)
938 nsuccsets = successorssets(repo, n, cache)
939 for nsuccset in nsuccsets:
939 for nsuccset in nsuccsets:
940 if ctx.node() in nsuccset:
940 if ctx.node() in nsuccset:
941 # we are only interested in *other* successor sets
941 # we are only interested in *other* successor sets
942 continue
942 continue
943 if tuple(nsuccset) in base:
943 if tuple(nsuccset) in base:
944 # we already know the latest base for this divergency
944 # we already know the latest base for this divergency
945 continue
945 continue
946 base[tuple(nsuccset)] = n
946 base[tuple(nsuccset)] = n
947 return [{'divergentnodes': divset, 'commonpredecessor': b}
947 return [{'divergentnodes': divset, 'commonpredecessor': b}
948 for divset, b in base.iteritems()]
948 for divset, b in base.iteritems()]
949
949
950 def whyunstable(repo, ctx):
950 def whyunstable(repo, ctx):
951 result = []
951 result = []
952 if ctx.orphan():
952 if ctx.orphan():
953 for parent in ctx.parents():
953 for parent in ctx.parents():
954 kind = None
954 kind = None
955 if parent.orphan():
955 if parent.orphan():
956 kind = 'orphan'
956 kind = 'orphan'
957 elif parent.obsolete():
957 elif parent.obsolete():
958 kind = 'obsolete'
958 kind = 'obsolete'
959 if kind is not None:
959 if kind is not None:
960 result.append({'instability': 'orphan',
960 result.append({'instability': 'orphan',
961 'reason': '%s parent' % kind,
961 'reason': '%s parent' % kind,
962 'node': parent.hex()})
962 'node': parent.hex()})
963 if ctx.phasedivergent():
963 if ctx.phasedivergent():
964 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
964 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
965 ignoreflags=bumpedfix)
965 ignoreflags=bumpedfix)
966 immutable = [repo[p] for p in predecessors
966 immutable = [repo[p] for p in predecessors
967 if p in repo and not repo[p].mutable()]
967 if p in repo and not repo[p].mutable()]
968 for predecessor in immutable:
968 for predecessor in immutable:
969 result.append({'instability': 'phase-divergent',
969 result.append({'instability': 'phase-divergent',
970 'reason': 'immutable predecessor',
970 'reason': 'immutable predecessor',
971 'node': predecessor.hex()})
971 'node': predecessor.hex()})
972 if ctx.contentdivergent():
972 if ctx.contentdivergent():
973 dsets = divergentsets(repo, ctx)
973 dsets = divergentsets(repo, ctx)
974 for dset in dsets:
974 for dset in dsets:
975 divnodes = [repo[n] for n in dset['divergentnodes']]
975 divnodes = [repo[n] for n in dset['divergentnodes']]
976 result.append({'instability': 'content-divergent',
976 result.append({'instability': 'content-divergent',
977 'divergentnodes': divnodes,
977 'divergentnodes': divnodes,
978 'reason': 'predecessor',
978 'reason': 'predecessor',
979 'node': nodemod.hex(dset['commonpredecessor'])})
979 'node': nodemod.hex(dset['commonpredecessor'])})
980 return result
980 return result
@@ -1,2866 +1,2866 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import posixpath
18 import posixpath
19 import re
19 import re
20 import shutil
20 import shutil
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 diffhelper,
30 diffhelper,
31 encoding,
31 encoding,
32 error,
32 error,
33 mail,
33 mail,
34 mdiff,
34 mdiff,
35 pathutil,
35 pathutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42 from .utils import (
42 from .utils import (
43 dateutil,
43 dateutil,
44 diffutil,
44 diffutil,
45 procutil,
45 procutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 stringio = util.stringio
49 stringio = util.stringio
50
50
51 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
52 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 tabsplitter = re.compile(br'(\t+|[^\t]+)')
53 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
53 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
54 b'[^ \ta-zA-Z0-9_\x80-\xff])')
54 b'[^ \ta-zA-Z0-9_\x80-\xff])')
55
55
56 PatchError = error.PatchError
56 PatchError = error.PatchError
57
57
58 # public functions
58 # public functions
59
59
60 def split(stream):
60 def split(stream):
61 '''return an iterator of individual patches from a stream'''
61 '''return an iterator of individual patches from a stream'''
62 def isheader(line, inheader):
62 def isheader(line, inheader):
63 if inheader and line.startswith((' ', '\t')):
63 if inheader and line.startswith((' ', '\t')):
64 # continuation
64 # continuation
65 return True
65 return True
66 if line.startswith((' ', '-', '+')):
66 if line.startswith((' ', '-', '+')):
67 # diff line - don't check for header pattern in there
67 # diff line - don't check for header pattern in there
68 return False
68 return False
69 l = line.split(': ', 1)
69 l = line.split(': ', 1)
70 return len(l) == 2 and ' ' not in l[0]
70 return len(l) == 2 and ' ' not in l[0]
71
71
72 def chunk(lines):
72 def chunk(lines):
73 return stringio(''.join(lines))
73 return stringio(''.join(lines))
74
74
75 def hgsplit(stream, cur):
75 def hgsplit(stream, cur):
76 inheader = True
76 inheader = True
77
77
78 for line in stream:
78 for line in stream:
79 if not line.strip():
79 if not line.strip():
80 inheader = False
80 inheader = False
81 if not inheader and line.startswith('# HG changeset patch'):
81 if not inheader and line.startswith('# HG changeset patch'):
82 yield chunk(cur)
82 yield chunk(cur)
83 cur = []
83 cur = []
84 inheader = True
84 inheader = True
85
85
86 cur.append(line)
86 cur.append(line)
87
87
88 if cur:
88 if cur:
89 yield chunk(cur)
89 yield chunk(cur)
90
90
91 def mboxsplit(stream, cur):
91 def mboxsplit(stream, cur):
92 for line in stream:
92 for line in stream:
93 if line.startswith('From '):
93 if line.startswith('From '):
94 for c in split(chunk(cur[1:])):
94 for c in split(chunk(cur[1:])):
95 yield c
95 yield c
96 cur = []
96 cur = []
97
97
98 cur.append(line)
98 cur.append(line)
99
99
100 if cur:
100 if cur:
101 for c in split(chunk(cur[1:])):
101 for c in split(chunk(cur[1:])):
102 yield c
102 yield c
103
103
104 def mimesplit(stream, cur):
104 def mimesplit(stream, cur):
105 def msgfp(m):
105 def msgfp(m):
106 fp = stringio()
106 fp = stringio()
107 g = email.Generator.Generator(fp, mangle_from_=False)
107 g = email.Generator.Generator(fp, mangle_from_=False)
108 g.flatten(m)
108 g.flatten(m)
109 fp.seek(0)
109 fp.seek(0)
110 return fp
110 return fp
111
111
112 for line in stream:
112 for line in stream:
113 cur.append(line)
113 cur.append(line)
114 c = chunk(cur)
114 c = chunk(cur)
115
115
116 m = mail.parse(c)
116 m = mail.parse(c)
117 if not m.is_multipart():
117 if not m.is_multipart():
118 yield msgfp(m)
118 yield msgfp(m)
119 else:
119 else:
120 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
120 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
121 for part in m.walk():
121 for part in m.walk():
122 ct = part.get_content_type()
122 ct = part.get_content_type()
123 if ct not in ok_types:
123 if ct not in ok_types:
124 continue
124 continue
125 yield msgfp(part)
125 yield msgfp(part)
126
126
127 def headersplit(stream, cur):
127 def headersplit(stream, cur):
128 inheader = False
128 inheader = False
129
129
130 for line in stream:
130 for line in stream:
131 if not inheader and isheader(line, inheader):
131 if not inheader and isheader(line, inheader):
132 yield chunk(cur)
132 yield chunk(cur)
133 cur = []
133 cur = []
134 inheader = True
134 inheader = True
135 if inheader and not isheader(line, inheader):
135 if inheader and not isheader(line, inheader):
136 inheader = False
136 inheader = False
137
137
138 cur.append(line)
138 cur.append(line)
139
139
140 if cur:
140 if cur:
141 yield chunk(cur)
141 yield chunk(cur)
142
142
143 def remainder(cur):
143 def remainder(cur):
144 yield chunk(cur)
144 yield chunk(cur)
145
145
146 class fiter(object):
146 class fiter(object):
147 def __init__(self, fp):
147 def __init__(self, fp):
148 self.fp = fp
148 self.fp = fp
149
149
150 def __iter__(self):
150 def __iter__(self):
151 return self
151 return self
152
152
153 def next(self):
153 def next(self):
154 l = self.fp.readline()
154 l = self.fp.readline()
155 if not l:
155 if not l:
156 raise StopIteration
156 raise StopIteration
157 return l
157 return l
158
158
159 __next__ = next
159 __next__ = next
160
160
161 inheader = False
161 inheader = False
162 cur = []
162 cur = []
163
163
164 mimeheaders = ['content-type']
164 mimeheaders = ['content-type']
165
165
166 if not util.safehasattr(stream, 'next'):
166 if not util.safehasattr(stream, 'next'):
167 # http responses, for example, have readline but not next
167 # http responses, for example, have readline but not next
168 stream = fiter(stream)
168 stream = fiter(stream)
169
169
170 for line in stream:
170 for line in stream:
171 cur.append(line)
171 cur.append(line)
172 if line.startswith('# HG changeset patch'):
172 if line.startswith('# HG changeset patch'):
173 return hgsplit(stream, cur)
173 return hgsplit(stream, cur)
174 elif line.startswith('From '):
174 elif line.startswith('From '):
175 return mboxsplit(stream, cur)
175 return mboxsplit(stream, cur)
176 elif isheader(line, inheader):
176 elif isheader(line, inheader):
177 inheader = True
177 inheader = True
178 if line.split(':', 1)[0].lower() in mimeheaders:
178 if line.split(':', 1)[0].lower() in mimeheaders:
179 # let email parser handle this
179 # let email parser handle this
180 return mimesplit(stream, cur)
180 return mimesplit(stream, cur)
181 elif line.startswith('--- ') and inheader:
181 elif line.startswith('--- ') and inheader:
182 # No evil headers seen by diff start, split by hand
182 # No evil headers seen by diff start, split by hand
183 return headersplit(stream, cur)
183 return headersplit(stream, cur)
184 # Not enough info, keep reading
184 # Not enough info, keep reading
185
185
186 # if we are here, we have a very plain patch
186 # if we are here, we have a very plain patch
187 return remainder(cur)
187 return remainder(cur)
188
188
189 ## Some facility for extensible patch parsing:
189 ## Some facility for extensible patch parsing:
190 # list of pairs ("header to match", "data key")
190 # list of pairs ("header to match", "data key")
191 patchheadermap = [('Date', 'date'),
191 patchheadermap = [('Date', 'date'),
192 ('Branch', 'branch'),
192 ('Branch', 'branch'),
193 ('Node ID', 'nodeid'),
193 ('Node ID', 'nodeid'),
194 ]
194 ]
195
195
196 @contextlib.contextmanager
196 @contextlib.contextmanager
197 def extract(ui, fileobj):
197 def extract(ui, fileobj):
198 '''extract patch from data read from fileobj.
198 '''extract patch from data read from fileobj.
199
199
200 patch can be a normal patch or contained in an email message.
200 patch can be a normal patch or contained in an email message.
201
201
202 return a dictionary. Standard keys are:
202 return a dictionary. Standard keys are:
203 - filename,
203 - filename,
204 - message,
204 - message,
205 - user,
205 - user,
206 - date,
206 - date,
207 - branch,
207 - branch,
208 - node,
208 - node,
209 - p1,
209 - p1,
210 - p2.
210 - p2.
211 Any item can be missing from the dictionary. If filename is missing,
211 Any item can be missing from the dictionary. If filename is missing,
212 fileobj did not contain a patch. Caller must unlink filename when done.'''
212 fileobj did not contain a patch. Caller must unlink filename when done.'''
213
213
214 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
214 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
215 tmpfp = os.fdopen(fd, r'wb')
215 tmpfp = os.fdopen(fd, r'wb')
216 try:
216 try:
217 yield _extract(ui, fileobj, tmpname, tmpfp)
217 yield _extract(ui, fileobj, tmpname, tmpfp)
218 finally:
218 finally:
219 tmpfp.close()
219 tmpfp.close()
220 os.unlink(tmpname)
220 os.unlink(tmpname)
221
221
222 def _extract(ui, fileobj, tmpname, tmpfp):
222 def _extract(ui, fileobj, tmpname, tmpfp):
223
223
224 # attempt to detect the start of a patch
224 # attempt to detect the start of a patch
225 # (this heuristic is borrowed from quilt)
225 # (this heuristic is borrowed from quilt)
226 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
226 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
227 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
227 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
228 br'---[ \t].*?^\+\+\+[ \t]|'
228 br'---[ \t].*?^\+\+\+[ \t]|'
229 br'\*\*\*[ \t].*?^---[ \t])',
229 br'\*\*\*[ \t].*?^---[ \t])',
230 re.MULTILINE | re.DOTALL)
230 re.MULTILINE | re.DOTALL)
231
231
232 data = {}
232 data = {}
233
233
234 msg = mail.parse(fileobj)
234 msg = mail.parse(fileobj)
235
235
236 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
236 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
237 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
237 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
238 if not subject and not data['user']:
238 if not subject and not data['user']:
239 # Not an email, restore parsed headers if any
239 # Not an email, restore parsed headers if any
240 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
240 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
241 for h in msg.items()) + '\n'
241 for h in msg.items()) + '\n'
242
242
243 # should try to parse msg['Date']
243 # should try to parse msg['Date']
244 parents = []
244 parents = []
245
245
246 if subject:
246 if subject:
247 if subject.startswith('[PATCH'):
247 if subject.startswith('[PATCH'):
248 pend = subject.find(']')
248 pend = subject.find(']')
249 if pend >= 0:
249 if pend >= 0:
250 subject = subject[pend + 1:].lstrip()
250 subject = subject[pend + 1:].lstrip()
251 subject = re.sub(br'\n[ \t]+', ' ', subject)
251 subject = re.sub(br'\n[ \t]+', ' ', subject)
252 ui.debug('Subject: %s\n' % subject)
252 ui.debug('Subject: %s\n' % subject)
253 if data['user']:
253 if data['user']:
254 ui.debug('From: %s\n' % data['user'])
254 ui.debug('From: %s\n' % data['user'])
255 diffs_seen = 0
255 diffs_seen = 0
256 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
256 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
257 message = ''
257 message = ''
258 for part in msg.walk():
258 for part in msg.walk():
259 content_type = pycompat.bytestr(part.get_content_type())
259 content_type = pycompat.bytestr(part.get_content_type())
260 ui.debug('Content-Type: %s\n' % content_type)
260 ui.debug('Content-Type: %s\n' % content_type)
261 if content_type not in ok_types:
261 if content_type not in ok_types:
262 continue
262 continue
263 payload = part.get_payload(decode=True)
263 payload = part.get_payload(decode=True)
264 m = diffre.search(payload)
264 m = diffre.search(payload)
265 if m:
265 if m:
266 hgpatch = False
266 hgpatch = False
267 hgpatchheader = False
267 hgpatchheader = False
268 ignoretext = False
268 ignoretext = False
269
269
270 ui.debug('found patch at byte %d\n' % m.start(0))
270 ui.debug('found patch at byte %d\n' % m.start(0))
271 diffs_seen += 1
271 diffs_seen += 1
272 cfp = stringio()
272 cfp = stringio()
273 for line in payload[:m.start(0)].splitlines():
273 for line in payload[:m.start(0)].splitlines():
274 if line.startswith('# HG changeset patch') and not hgpatch:
274 if line.startswith('# HG changeset patch') and not hgpatch:
275 ui.debug('patch generated by hg export\n')
275 ui.debug('patch generated by hg export\n')
276 hgpatch = True
276 hgpatch = True
277 hgpatchheader = True
277 hgpatchheader = True
278 # drop earlier commit message content
278 # drop earlier commit message content
279 cfp.seek(0)
279 cfp.seek(0)
280 cfp.truncate()
280 cfp.truncate()
281 subject = None
281 subject = None
282 elif hgpatchheader:
282 elif hgpatchheader:
283 if line.startswith('# User '):
283 if line.startswith('# User '):
284 data['user'] = line[7:]
284 data['user'] = line[7:]
285 ui.debug('From: %s\n' % data['user'])
285 ui.debug('From: %s\n' % data['user'])
286 elif line.startswith("# Parent "):
286 elif line.startswith("# Parent "):
287 parents.append(line[9:].lstrip())
287 parents.append(line[9:].lstrip())
288 elif line.startswith("# "):
288 elif line.startswith("# "):
289 for header, key in patchheadermap:
289 for header, key in patchheadermap:
290 prefix = '# %s ' % header
290 prefix = '# %s ' % header
291 if line.startswith(prefix):
291 if line.startswith(prefix):
292 data[key] = line[len(prefix):]
292 data[key] = line[len(prefix):]
293 else:
293 else:
294 hgpatchheader = False
294 hgpatchheader = False
295 elif line == '---':
295 elif line == '---':
296 ignoretext = True
296 ignoretext = True
297 if not hgpatchheader and not ignoretext:
297 if not hgpatchheader and not ignoretext:
298 cfp.write(line)
298 cfp.write(line)
299 cfp.write('\n')
299 cfp.write('\n')
300 message = cfp.getvalue()
300 message = cfp.getvalue()
301 if tmpfp:
301 if tmpfp:
302 tmpfp.write(payload)
302 tmpfp.write(payload)
303 if not payload.endswith('\n'):
303 if not payload.endswith('\n'):
304 tmpfp.write('\n')
304 tmpfp.write('\n')
305 elif not diffs_seen and message and content_type == 'text/plain':
305 elif not diffs_seen and message and content_type == 'text/plain':
306 message += '\n' + payload
306 message += '\n' + payload
307
307
308 if subject and not message.startswith(subject):
308 if subject and not message.startswith(subject):
309 message = '%s\n%s' % (subject, message)
309 message = '%s\n%s' % (subject, message)
310 data['message'] = message
310 data['message'] = message
311 tmpfp.close()
311 tmpfp.close()
312 if parents:
312 if parents:
313 data['p1'] = parents.pop(0)
313 data['p1'] = parents.pop(0)
314 if parents:
314 if parents:
315 data['p2'] = parents.pop(0)
315 data['p2'] = parents.pop(0)
316
316
317 if diffs_seen:
317 if diffs_seen:
318 data['filename'] = tmpname
318 data['filename'] = tmpname
319
319
320 return data
320 return data
321
321
322 class patchmeta(object):
322 class patchmeta(object):
323 """Patched file metadata
323 """Patched file metadata
324
324
325 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
325 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
326 or COPY. 'path' is patched file path. 'oldpath' is set to the
326 or COPY. 'path' is patched file path. 'oldpath' is set to the
327 origin file when 'op' is either COPY or RENAME, None otherwise. If
327 origin file when 'op' is either COPY or RENAME, None otherwise. If
328 file mode is changed, 'mode' is a tuple (islink, isexec) where
328 file mode is changed, 'mode' is a tuple (islink, isexec) where
329 'islink' is True if the file is a symlink and 'isexec' is True if
329 'islink' is True if the file is a symlink and 'isexec' is True if
330 the file is executable. Otherwise, 'mode' is None.
330 the file is executable. Otherwise, 'mode' is None.
331 """
331 """
332 def __init__(self, path):
332 def __init__(self, path):
333 self.path = path
333 self.path = path
334 self.oldpath = None
334 self.oldpath = None
335 self.mode = None
335 self.mode = None
336 self.op = 'MODIFY'
336 self.op = 'MODIFY'
337 self.binary = False
337 self.binary = False
338
338
339 def setmode(self, mode):
339 def setmode(self, mode):
340 islink = mode & 0o20000
340 islink = mode & 0o20000
341 isexec = mode & 0o100
341 isexec = mode & 0o100
342 self.mode = (islink, isexec)
342 self.mode = (islink, isexec)
343
343
344 def copy(self):
344 def copy(self):
345 other = patchmeta(self.path)
345 other = patchmeta(self.path)
346 other.oldpath = self.oldpath
346 other.oldpath = self.oldpath
347 other.mode = self.mode
347 other.mode = self.mode
348 other.op = self.op
348 other.op = self.op
349 other.binary = self.binary
349 other.binary = self.binary
350 return other
350 return other
351
351
352 def _ispatchinga(self, afile):
352 def _ispatchinga(self, afile):
353 if afile == '/dev/null':
353 if afile == '/dev/null':
354 return self.op == 'ADD'
354 return self.op == 'ADD'
355 return afile == 'a/' + (self.oldpath or self.path)
355 return afile == 'a/' + (self.oldpath or self.path)
356
356
357 def _ispatchingb(self, bfile):
357 def _ispatchingb(self, bfile):
358 if bfile == '/dev/null':
358 if bfile == '/dev/null':
359 return self.op == 'DELETE'
359 return self.op == 'DELETE'
360 return bfile == 'b/' + self.path
360 return bfile == 'b/' + self.path
361
361
362 def ispatching(self, afile, bfile):
362 def ispatching(self, afile, bfile):
363 return self._ispatchinga(afile) and self._ispatchingb(bfile)
363 return self._ispatchinga(afile) and self._ispatchingb(bfile)
364
364
365 def __repr__(self):
365 def __repr__(self):
366 return "<patchmeta %s %r>" % (self.op, self.path)
366 return "<patchmeta %s %r>" % (self.op, self.path)
367
367
368 def readgitpatch(lr):
368 def readgitpatch(lr):
369 """extract git-style metadata about patches from <patchname>"""
369 """extract git-style metadata about patches from <patchname>"""
370
370
371 # Filter patch for git information
371 # Filter patch for git information
372 gp = None
372 gp = None
373 gitpatches = []
373 gitpatches = []
374 for line in lr:
374 for line in lr:
375 line = line.rstrip(' \r\n')
375 line = line.rstrip(' \r\n')
376 if line.startswith('diff --git a/'):
376 if line.startswith('diff --git a/'):
377 m = gitre.match(line)
377 m = gitre.match(line)
378 if m:
378 if m:
379 if gp:
379 if gp:
380 gitpatches.append(gp)
380 gitpatches.append(gp)
381 dst = m.group(2)
381 dst = m.group(2)
382 gp = patchmeta(dst)
382 gp = patchmeta(dst)
383 elif gp:
383 elif gp:
384 if line.startswith('--- '):
384 if line.startswith('--- '):
385 gitpatches.append(gp)
385 gitpatches.append(gp)
386 gp = None
386 gp = None
387 continue
387 continue
388 if line.startswith('rename from '):
388 if line.startswith('rename from '):
389 gp.op = 'RENAME'
389 gp.op = 'RENAME'
390 gp.oldpath = line[12:]
390 gp.oldpath = line[12:]
391 elif line.startswith('rename to '):
391 elif line.startswith('rename to '):
392 gp.path = line[10:]
392 gp.path = line[10:]
393 elif line.startswith('copy from '):
393 elif line.startswith('copy from '):
394 gp.op = 'COPY'
394 gp.op = 'COPY'
395 gp.oldpath = line[10:]
395 gp.oldpath = line[10:]
396 elif line.startswith('copy to '):
396 elif line.startswith('copy to '):
397 gp.path = line[8:]
397 gp.path = line[8:]
398 elif line.startswith('deleted file'):
398 elif line.startswith('deleted file'):
399 gp.op = 'DELETE'
399 gp.op = 'DELETE'
400 elif line.startswith('new file mode '):
400 elif line.startswith('new file mode '):
401 gp.op = 'ADD'
401 gp.op = 'ADD'
402 gp.setmode(int(line[-6:], 8))
402 gp.setmode(int(line[-6:], 8))
403 elif line.startswith('new mode '):
403 elif line.startswith('new mode '):
404 gp.setmode(int(line[-6:], 8))
404 gp.setmode(int(line[-6:], 8))
405 elif line.startswith('GIT binary patch'):
405 elif line.startswith('GIT binary patch'):
406 gp.binary = True
406 gp.binary = True
407 if gp:
407 if gp:
408 gitpatches.append(gp)
408 gitpatches.append(gp)
409
409
410 return gitpatches
410 return gitpatches
411
411
412 class linereader(object):
412 class linereader(object):
413 # simple class to allow pushing lines back into the input stream
413 # simple class to allow pushing lines back into the input stream
414 def __init__(self, fp):
414 def __init__(self, fp):
415 self.fp = fp
415 self.fp = fp
416 self.buf = []
416 self.buf = []
417
417
418 def push(self, line):
418 def push(self, line):
419 if line is not None:
419 if line is not None:
420 self.buf.append(line)
420 self.buf.append(line)
421
421
422 def readline(self):
422 def readline(self):
423 if self.buf:
423 if self.buf:
424 l = self.buf[0]
424 l = self.buf[0]
425 del self.buf[0]
425 del self.buf[0]
426 return l
426 return l
427 return self.fp.readline()
427 return self.fp.readline()
428
428
429 def __iter__(self):
429 def __iter__(self):
430 return iter(self.readline, '')
430 return iter(self.readline, '')
431
431
432 class abstractbackend(object):
432 class abstractbackend(object):
433 def __init__(self, ui):
433 def __init__(self, ui):
434 self.ui = ui
434 self.ui = ui
435
435
436 def getfile(self, fname):
436 def getfile(self, fname):
437 """Return target file data and flags as a (data, (islink,
437 """Return target file data and flags as a (data, (islink,
438 isexec)) tuple. Data is None if file is missing/deleted.
438 isexec)) tuple. Data is None if file is missing/deleted.
439 """
439 """
440 raise NotImplementedError
440 raise NotImplementedError
441
441
442 def setfile(self, fname, data, mode, copysource):
442 def setfile(self, fname, data, mode, copysource):
443 """Write data to target file fname and set its mode. mode is a
443 """Write data to target file fname and set its mode. mode is a
444 (islink, isexec) tuple. If data is None, the file content should
444 (islink, isexec) tuple. If data is None, the file content should
445 be left unchanged. If the file is modified after being copied,
445 be left unchanged. If the file is modified after being copied,
446 copysource is set to the original file name.
446 copysource is set to the original file name.
447 """
447 """
448 raise NotImplementedError
448 raise NotImplementedError
449
449
450 def unlink(self, fname):
450 def unlink(self, fname):
451 """Unlink target file."""
451 """Unlink target file."""
452 raise NotImplementedError
452 raise NotImplementedError
453
453
454 def writerej(self, fname, failed, total, lines):
454 def writerej(self, fname, failed, total, lines):
455 """Write rejected lines for fname. total is the number of hunks
455 """Write rejected lines for fname. total is the number of hunks
456 which failed to apply and total the total number of hunks for this
456 which failed to apply and total the total number of hunks for this
457 files.
457 files.
458 """
458 """
459
459
460 def exists(self, fname):
460 def exists(self, fname):
461 raise NotImplementedError
461 raise NotImplementedError
462
462
463 def close(self):
463 def close(self):
464 raise NotImplementedError
464 raise NotImplementedError
465
465
466 class fsbackend(abstractbackend):
466 class fsbackend(abstractbackend):
467 def __init__(self, ui, basedir):
467 def __init__(self, ui, basedir):
468 super(fsbackend, self).__init__(ui)
468 super(fsbackend, self).__init__(ui)
469 self.opener = vfsmod.vfs(basedir)
469 self.opener = vfsmod.vfs(basedir)
470
470
471 def getfile(self, fname):
471 def getfile(self, fname):
472 if self.opener.islink(fname):
472 if self.opener.islink(fname):
473 return (self.opener.readlink(fname), (True, False))
473 return (self.opener.readlink(fname), (True, False))
474
474
475 isexec = False
475 isexec = False
476 try:
476 try:
477 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
477 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
478 except OSError as e:
478 except OSError as e:
479 if e.errno != errno.ENOENT:
479 if e.errno != errno.ENOENT:
480 raise
480 raise
481 try:
481 try:
482 return (self.opener.read(fname), (False, isexec))
482 return (self.opener.read(fname), (False, isexec))
483 except IOError as e:
483 except IOError as e:
484 if e.errno != errno.ENOENT:
484 if e.errno != errno.ENOENT:
485 raise
485 raise
486 return None, None
486 return None, None
487
487
488 def setfile(self, fname, data, mode, copysource):
488 def setfile(self, fname, data, mode, copysource):
489 islink, isexec = mode
489 islink, isexec = mode
490 if data is None:
490 if data is None:
491 self.opener.setflags(fname, islink, isexec)
491 self.opener.setflags(fname, islink, isexec)
492 return
492 return
493 if islink:
493 if islink:
494 self.opener.symlink(data, fname)
494 self.opener.symlink(data, fname)
495 else:
495 else:
496 self.opener.write(fname, data)
496 self.opener.write(fname, data)
497 if isexec:
497 if isexec:
498 self.opener.setflags(fname, False, True)
498 self.opener.setflags(fname, False, True)
499
499
500 def unlink(self, fname):
500 def unlink(self, fname):
501 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
501 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
502 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
502 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
503
503
504 def writerej(self, fname, failed, total, lines):
504 def writerej(self, fname, failed, total, lines):
505 fname = fname + ".rej"
505 fname = fname + ".rej"
506 self.ui.warn(
506 self.ui.warn(
507 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
507 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
508 (failed, total, fname))
508 (failed, total, fname))
509 fp = self.opener(fname, 'w')
509 fp = self.opener(fname, 'w')
510 fp.writelines(lines)
510 fp.writelines(lines)
511 fp.close()
511 fp.close()
512
512
513 def exists(self, fname):
513 def exists(self, fname):
514 return self.opener.lexists(fname)
514 return self.opener.lexists(fname)
515
515
516 class workingbackend(fsbackend):
516 class workingbackend(fsbackend):
517 def __init__(self, ui, repo, similarity):
517 def __init__(self, ui, repo, similarity):
518 super(workingbackend, self).__init__(ui, repo.root)
518 super(workingbackend, self).__init__(ui, repo.root)
519 self.repo = repo
519 self.repo = repo
520 self.similarity = similarity
520 self.similarity = similarity
521 self.removed = set()
521 self.removed = set()
522 self.changed = set()
522 self.changed = set()
523 self.copied = []
523 self.copied = []
524
524
525 def _checkknown(self, fname):
525 def _checkknown(self, fname):
526 if self.repo.dirstate[fname] == '?' and self.exists(fname):
526 if self.repo.dirstate[fname] == '?' and self.exists(fname):
527 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
527 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
528
528
529 def setfile(self, fname, data, mode, copysource):
529 def setfile(self, fname, data, mode, copysource):
530 self._checkknown(fname)
530 self._checkknown(fname)
531 super(workingbackend, self).setfile(fname, data, mode, copysource)
531 super(workingbackend, self).setfile(fname, data, mode, copysource)
532 if copysource is not None:
532 if copysource is not None:
533 self.copied.append((copysource, fname))
533 self.copied.append((copysource, fname))
534 self.changed.add(fname)
534 self.changed.add(fname)
535
535
536 def unlink(self, fname):
536 def unlink(self, fname):
537 self._checkknown(fname)
537 self._checkknown(fname)
538 super(workingbackend, self).unlink(fname)
538 super(workingbackend, self).unlink(fname)
539 self.removed.add(fname)
539 self.removed.add(fname)
540 self.changed.add(fname)
540 self.changed.add(fname)
541
541
542 def close(self):
542 def close(self):
543 wctx = self.repo[None]
543 wctx = self.repo[None]
544 changed = set(self.changed)
544 changed = set(self.changed)
545 for src, dst in self.copied:
545 for src, dst in self.copied:
546 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
546 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
547 if self.removed:
547 if self.removed:
548 wctx.forget(sorted(self.removed))
548 wctx.forget(sorted(self.removed))
549 for f in self.removed:
549 for f in self.removed:
550 if f not in self.repo.dirstate:
550 if f not in self.repo.dirstate:
551 # File was deleted and no longer belongs to the
551 # File was deleted and no longer belongs to the
552 # dirstate, it was probably marked added then
552 # dirstate, it was probably marked added then
553 # deleted, and should not be considered by
553 # deleted, and should not be considered by
554 # marktouched().
554 # marktouched().
555 changed.discard(f)
555 changed.discard(f)
556 if changed:
556 if changed:
557 scmutil.marktouched(self.repo, changed, self.similarity)
557 scmutil.marktouched(self.repo, changed, self.similarity)
558 return sorted(self.changed)
558 return sorted(self.changed)
559
559
560 class filestore(object):
560 class filestore(object):
561 def __init__(self, maxsize=None):
561 def __init__(self, maxsize=None):
562 self.opener = None
562 self.opener = None
563 self.files = {}
563 self.files = {}
564 self.created = 0
564 self.created = 0
565 self.maxsize = maxsize
565 self.maxsize = maxsize
566 if self.maxsize is None:
566 if self.maxsize is None:
567 self.maxsize = 4*(2**20)
567 self.maxsize = 4*(2**20)
568 self.size = 0
568 self.size = 0
569 self.data = {}
569 self.data = {}
570
570
571 def setfile(self, fname, data, mode, copied=None):
571 def setfile(self, fname, data, mode, copied=None):
572 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
572 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
573 self.data[fname] = (data, mode, copied)
573 self.data[fname] = (data, mode, copied)
574 self.size += len(data)
574 self.size += len(data)
575 else:
575 else:
576 if self.opener is None:
576 if self.opener is None:
577 root = pycompat.mkdtemp(prefix='hg-patch-')
577 root = pycompat.mkdtemp(prefix='hg-patch-')
578 self.opener = vfsmod.vfs(root)
578 self.opener = vfsmod.vfs(root)
579 # Avoid filename issues with these simple names
579 # Avoid filename issues with these simple names
580 fn = '%d' % self.created
580 fn = '%d' % self.created
581 self.opener.write(fn, data)
581 self.opener.write(fn, data)
582 self.created += 1
582 self.created += 1
583 self.files[fname] = (fn, mode, copied)
583 self.files[fname] = (fn, mode, copied)
584
584
585 def getfile(self, fname):
585 def getfile(self, fname):
586 if fname in self.data:
586 if fname in self.data:
587 return self.data[fname]
587 return self.data[fname]
588 if not self.opener or fname not in self.files:
588 if not self.opener or fname not in self.files:
589 return None, None, None
589 return None, None, None
590 fn, mode, copied = self.files[fname]
590 fn, mode, copied = self.files[fname]
591 return self.opener.read(fn), mode, copied
591 return self.opener.read(fn), mode, copied
592
592
593 def close(self):
593 def close(self):
594 if self.opener:
594 if self.opener:
595 shutil.rmtree(self.opener.base)
595 shutil.rmtree(self.opener.base)
596
596
597 class repobackend(abstractbackend):
597 class repobackend(abstractbackend):
598 def __init__(self, ui, repo, ctx, store):
598 def __init__(self, ui, repo, ctx, store):
599 super(repobackend, self).__init__(ui)
599 super(repobackend, self).__init__(ui)
600 self.repo = repo
600 self.repo = repo
601 self.ctx = ctx
601 self.ctx = ctx
602 self.store = store
602 self.store = store
603 self.changed = set()
603 self.changed = set()
604 self.removed = set()
604 self.removed = set()
605 self.copied = {}
605 self.copied = {}
606
606
607 def _checkknown(self, fname):
607 def _checkknown(self, fname):
608 if fname not in self.ctx:
608 if fname not in self.ctx:
609 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
609 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
610
610
611 def getfile(self, fname):
611 def getfile(self, fname):
612 try:
612 try:
613 fctx = self.ctx[fname]
613 fctx = self.ctx[fname]
614 except error.LookupError:
614 except error.LookupError:
615 return None, None
615 return None, None
616 flags = fctx.flags()
616 flags = fctx.flags()
617 return fctx.data(), ('l' in flags, 'x' in flags)
617 return fctx.data(), ('l' in flags, 'x' in flags)
618
618
619 def setfile(self, fname, data, mode, copysource):
619 def setfile(self, fname, data, mode, copysource):
620 if copysource:
620 if copysource:
621 self._checkknown(copysource)
621 self._checkknown(copysource)
622 if data is None:
622 if data is None:
623 data = self.ctx[fname].data()
623 data = self.ctx[fname].data()
624 self.store.setfile(fname, data, mode, copysource)
624 self.store.setfile(fname, data, mode, copysource)
625 self.changed.add(fname)
625 self.changed.add(fname)
626 if copysource:
626 if copysource:
627 self.copied[fname] = copysource
627 self.copied[fname] = copysource
628
628
629 def unlink(self, fname):
629 def unlink(self, fname):
630 self._checkknown(fname)
630 self._checkknown(fname)
631 self.removed.add(fname)
631 self.removed.add(fname)
632
632
633 def exists(self, fname):
633 def exists(self, fname):
634 return fname in self.ctx
634 return fname in self.ctx
635
635
636 def close(self):
636 def close(self):
637 return self.changed | self.removed
637 return self.changed | self.removed
638
638
639 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
639 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
640 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
640 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
641 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
641 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
642 eolmodes = ['strict', 'crlf', 'lf', 'auto']
642 eolmodes = ['strict', 'crlf', 'lf', 'auto']
643
643
644 class patchfile(object):
644 class patchfile(object):
645 def __init__(self, ui, gp, backend, store, eolmode='strict'):
645 def __init__(self, ui, gp, backend, store, eolmode='strict'):
646 self.fname = gp.path
646 self.fname = gp.path
647 self.eolmode = eolmode
647 self.eolmode = eolmode
648 self.eol = None
648 self.eol = None
649 self.backend = backend
649 self.backend = backend
650 self.ui = ui
650 self.ui = ui
651 self.lines = []
651 self.lines = []
652 self.exists = False
652 self.exists = False
653 self.missing = True
653 self.missing = True
654 self.mode = gp.mode
654 self.mode = gp.mode
655 self.copysource = gp.oldpath
655 self.copysource = gp.oldpath
656 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
656 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
657 self.remove = gp.op == 'DELETE'
657 self.remove = gp.op == 'DELETE'
658 if self.copysource is None:
658 if self.copysource is None:
659 data, mode = backend.getfile(self.fname)
659 data, mode = backend.getfile(self.fname)
660 else:
660 else:
661 data, mode = store.getfile(self.copysource)[:2]
661 data, mode = store.getfile(self.copysource)[:2]
662 if data is not None:
662 if data is not None:
663 self.exists = self.copysource is None or backend.exists(self.fname)
663 self.exists = self.copysource is None or backend.exists(self.fname)
664 self.missing = False
664 self.missing = False
665 if data:
665 if data:
666 self.lines = mdiff.splitnewlines(data)
666 self.lines = mdiff.splitnewlines(data)
667 if self.mode is None:
667 if self.mode is None:
668 self.mode = mode
668 self.mode = mode
669 if self.lines:
669 if self.lines:
670 # Normalize line endings
670 # Normalize line endings
671 if self.lines[0].endswith('\r\n'):
671 if self.lines[0].endswith('\r\n'):
672 self.eol = '\r\n'
672 self.eol = '\r\n'
673 elif self.lines[0].endswith('\n'):
673 elif self.lines[0].endswith('\n'):
674 self.eol = '\n'
674 self.eol = '\n'
675 if eolmode != 'strict':
675 if eolmode != 'strict':
676 nlines = []
676 nlines = []
677 for l in self.lines:
677 for l in self.lines:
678 if l.endswith('\r\n'):
678 if l.endswith('\r\n'):
679 l = l[:-2] + '\n'
679 l = l[:-2] + '\n'
680 nlines.append(l)
680 nlines.append(l)
681 self.lines = nlines
681 self.lines = nlines
682 else:
682 else:
683 if self.create:
683 if self.create:
684 self.missing = False
684 self.missing = False
685 if self.mode is None:
685 if self.mode is None:
686 self.mode = (False, False)
686 self.mode = (False, False)
687 if self.missing:
687 if self.missing:
688 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
688 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
689 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
689 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
690 "current directory)\n"))
690 "current directory)\n"))
691
691
692 self.hash = {}
692 self.hash = {}
693 self.dirty = 0
693 self.dirty = 0
694 self.offset = 0
694 self.offset = 0
695 self.skew = 0
695 self.skew = 0
696 self.rej = []
696 self.rej = []
697 self.fileprinted = False
697 self.fileprinted = False
698 self.printfile(False)
698 self.printfile(False)
699 self.hunks = 0
699 self.hunks = 0
700
700
701 def writelines(self, fname, lines, mode):
701 def writelines(self, fname, lines, mode):
702 if self.eolmode == 'auto':
702 if self.eolmode == 'auto':
703 eol = self.eol
703 eol = self.eol
704 elif self.eolmode == 'crlf':
704 elif self.eolmode == 'crlf':
705 eol = '\r\n'
705 eol = '\r\n'
706 else:
706 else:
707 eol = '\n'
707 eol = '\n'
708
708
709 if self.eolmode != 'strict' and eol and eol != '\n':
709 if self.eolmode != 'strict' and eol and eol != '\n':
710 rawlines = []
710 rawlines = []
711 for l in lines:
711 for l in lines:
712 if l and l.endswith('\n'):
712 if l and l.endswith('\n'):
713 l = l[:-1] + eol
713 l = l[:-1] + eol
714 rawlines.append(l)
714 rawlines.append(l)
715 lines = rawlines
715 lines = rawlines
716
716
717 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
717 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
718
718
719 def printfile(self, warn):
719 def printfile(self, warn):
720 if self.fileprinted:
720 if self.fileprinted:
721 return
721 return
722 if warn or self.ui.verbose:
722 if warn or self.ui.verbose:
723 self.fileprinted = True
723 self.fileprinted = True
724 s = _("patching file %s\n") % self.fname
724 s = _("patching file %s\n") % self.fname
725 if warn:
725 if warn:
726 self.ui.warn(s)
726 self.ui.warn(s)
727 else:
727 else:
728 self.ui.note(s)
728 self.ui.note(s)
729
729
730
730
731 def findlines(self, l, linenum):
731 def findlines(self, l, linenum):
732 # looks through the hash and finds candidate lines. The
732 # looks through the hash and finds candidate lines. The
733 # result is a list of line numbers sorted based on distance
733 # result is a list of line numbers sorted based on distance
734 # from linenum
734 # from linenum
735
735
736 cand = self.hash.get(l, [])
736 cand = self.hash.get(l, [])
737 if len(cand) > 1:
737 if len(cand) > 1:
738 # resort our list of potentials forward then back.
738 # resort our list of potentials forward then back.
739 cand.sort(key=lambda x: abs(x - linenum))
739 cand.sort(key=lambda x: abs(x - linenum))
740 return cand
740 return cand
741
741
742 def write_rej(self):
742 def write_rej(self):
743 # our rejects are a little different from patch(1). This always
743 # our rejects are a little different from patch(1). This always
744 # creates rejects in the same form as the original patch. A file
744 # creates rejects in the same form as the original patch. A file
745 # header is inserted so that you can run the reject through patch again
745 # header is inserted so that you can run the reject through patch again
746 # without having to type the filename.
746 # without having to type the filename.
747 if not self.rej:
747 if not self.rej:
748 return
748 return
749 base = os.path.basename(self.fname)
749 base = os.path.basename(self.fname)
750 lines = ["--- %s\n+++ %s\n" % (base, base)]
750 lines = ["--- %s\n+++ %s\n" % (base, base)]
751 for x in self.rej:
751 for x in self.rej:
752 for l in x.hunk:
752 for l in x.hunk:
753 lines.append(l)
753 lines.append(l)
754 if l[-1:] != '\n':
754 if l[-1:] != '\n':
755 lines.append("\n\ No newline at end of file\n")
755 lines.append("\n\ No newline at end of file\n")
756 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
756 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
757
757
758 def apply(self, h):
758 def apply(self, h):
759 if not h.complete():
759 if not h.complete():
760 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
760 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
761 (h.number, h.desc, len(h.a), h.lena, len(h.b),
761 (h.number, h.desc, len(h.a), h.lena, len(h.b),
762 h.lenb))
762 h.lenb))
763
763
764 self.hunks += 1
764 self.hunks += 1
765
765
766 if self.missing:
766 if self.missing:
767 self.rej.append(h)
767 self.rej.append(h)
768 return -1
768 return -1
769
769
770 if self.exists and self.create:
770 if self.exists and self.create:
771 if self.copysource:
771 if self.copysource:
772 self.ui.warn(_("cannot create %s: destination already "
772 self.ui.warn(_("cannot create %s: destination already "
773 "exists\n") % self.fname)
773 "exists\n") % self.fname)
774 else:
774 else:
775 self.ui.warn(_("file %s already exists\n") % self.fname)
775 self.ui.warn(_("file %s already exists\n") % self.fname)
776 self.rej.append(h)
776 self.rej.append(h)
777 return -1
777 return -1
778
778
779 if isinstance(h, binhunk):
779 if isinstance(h, binhunk):
780 if self.remove:
780 if self.remove:
781 self.backend.unlink(self.fname)
781 self.backend.unlink(self.fname)
782 else:
782 else:
783 l = h.new(self.lines)
783 l = h.new(self.lines)
784 self.lines[:] = l
784 self.lines[:] = l
785 self.offset += len(l)
785 self.offset += len(l)
786 self.dirty = True
786 self.dirty = True
787 return 0
787 return 0
788
788
789 horig = h
789 horig = h
790 if (self.eolmode in ('crlf', 'lf')
790 if (self.eolmode in ('crlf', 'lf')
791 or self.eolmode == 'auto' and self.eol):
791 or self.eolmode == 'auto' and self.eol):
792 # If new eols are going to be normalized, then normalize
792 # If new eols are going to be normalized, then normalize
793 # hunk data before patching. Otherwise, preserve input
793 # hunk data before patching. Otherwise, preserve input
794 # line-endings.
794 # line-endings.
795 h = h.getnormalized()
795 h = h.getnormalized()
796
796
797 # fast case first, no offsets, no fuzz
797 # fast case first, no offsets, no fuzz
798 old, oldstart, new, newstart = h.fuzzit(0, False)
798 old, oldstart, new, newstart = h.fuzzit(0, False)
799 oldstart += self.offset
799 oldstart += self.offset
800 orig_start = oldstart
800 orig_start = oldstart
801 # if there's skew we want to emit the "(offset %d lines)" even
801 # if there's skew we want to emit the "(offset %d lines)" even
802 # when the hunk cleanly applies at start + skew, so skip the
802 # when the hunk cleanly applies at start + skew, so skip the
803 # fast case code
803 # fast case code
804 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
804 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
805 if self.remove:
805 if self.remove:
806 self.backend.unlink(self.fname)
806 self.backend.unlink(self.fname)
807 else:
807 else:
808 self.lines[oldstart:oldstart + len(old)] = new
808 self.lines[oldstart:oldstart + len(old)] = new
809 self.offset += len(new) - len(old)
809 self.offset += len(new) - len(old)
810 self.dirty = True
810 self.dirty = True
811 return 0
811 return 0
812
812
813 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
813 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
814 self.hash = {}
814 self.hash = {}
815 for x, s in enumerate(self.lines):
815 for x, s in enumerate(self.lines):
816 self.hash.setdefault(s, []).append(x)
816 self.hash.setdefault(s, []).append(x)
817
817
818 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
818 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
819 for toponly in [True, False]:
819 for toponly in [True, False]:
820 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
820 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
821 oldstart = oldstart + self.offset + self.skew
821 oldstart = oldstart + self.offset + self.skew
822 oldstart = min(oldstart, len(self.lines))
822 oldstart = min(oldstart, len(self.lines))
823 if old:
823 if old:
824 cand = self.findlines(old[0][1:], oldstart)
824 cand = self.findlines(old[0][1:], oldstart)
825 else:
825 else:
826 # Only adding lines with no or fuzzed context, just
826 # Only adding lines with no or fuzzed context, just
827 # take the skew in account
827 # take the skew in account
828 cand = [oldstart]
828 cand = [oldstart]
829
829
830 for l in cand:
830 for l in cand:
831 if not old or diffhelper.testhunk(old, self.lines, l):
831 if not old or diffhelper.testhunk(old, self.lines, l):
832 self.lines[l : l + len(old)] = new
832 self.lines[l : l + len(old)] = new
833 self.offset += len(new) - len(old)
833 self.offset += len(new) - len(old)
834 self.skew = l - orig_start
834 self.skew = l - orig_start
835 self.dirty = True
835 self.dirty = True
836 offset = l - orig_start - fuzzlen
836 offset = l - orig_start - fuzzlen
837 if fuzzlen:
837 if fuzzlen:
838 msg = _("Hunk #%d succeeded at %d "
838 msg = _("Hunk #%d succeeded at %d "
839 "with fuzz %d "
839 "with fuzz %d "
840 "(offset %d lines).\n")
840 "(offset %d lines).\n")
841 self.printfile(True)
841 self.printfile(True)
842 self.ui.warn(msg %
842 self.ui.warn(msg %
843 (h.number, l + 1, fuzzlen, offset))
843 (h.number, l + 1, fuzzlen, offset))
844 else:
844 else:
845 msg = _("Hunk #%d succeeded at %d "
845 msg = _("Hunk #%d succeeded at %d "
846 "(offset %d lines).\n")
846 "(offset %d lines).\n")
847 self.ui.note(msg % (h.number, l + 1, offset))
847 self.ui.note(msg % (h.number, l + 1, offset))
848 return fuzzlen
848 return fuzzlen
849 self.printfile(True)
849 self.printfile(True)
850 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
850 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
851 self.rej.append(horig)
851 self.rej.append(horig)
852 return -1
852 return -1
853
853
854 def close(self):
854 def close(self):
855 if self.dirty:
855 if self.dirty:
856 self.writelines(self.fname, self.lines, self.mode)
856 self.writelines(self.fname, self.lines, self.mode)
857 self.write_rej()
857 self.write_rej()
858 return len(self.rej)
858 return len(self.rej)
859
859
860 class header(object):
860 class header(object):
861 """patch header
861 """patch header
862 """
862 """
863 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
863 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
864 diff_re = re.compile('diff -r .* (.*)$')
864 diff_re = re.compile('diff -r .* (.*)$')
865 allhunks_re = re.compile('(?:index|deleted file) ')
865 allhunks_re = re.compile('(?:index|deleted file) ')
866 pretty_re = re.compile('(?:new file|deleted file) ')
866 pretty_re = re.compile('(?:new file|deleted file) ')
867 special_re = re.compile('(?:index|deleted|copy|rename) ')
867 special_re = re.compile('(?:index|deleted|copy|rename) ')
868 newfile_re = re.compile('(?:new file)')
868 newfile_re = re.compile('(?:new file)')
869
869
870 def __init__(self, header):
870 def __init__(self, header):
871 self.header = header
871 self.header = header
872 self.hunks = []
872 self.hunks = []
873
873
874 def binary(self):
874 def binary(self):
875 return any(h.startswith('index ') for h in self.header)
875 return any(h.startswith('index ') for h in self.header)
876
876
877 def pretty(self, fp):
877 def pretty(self, fp):
878 for h in self.header:
878 for h in self.header:
879 if h.startswith('index '):
879 if h.startswith('index '):
880 fp.write(_('this modifies a binary file (all or nothing)\n'))
880 fp.write(_('this modifies a binary file (all or nothing)\n'))
881 break
881 break
882 if self.pretty_re.match(h):
882 if self.pretty_re.match(h):
883 fp.write(h)
883 fp.write(h)
884 if self.binary():
884 if self.binary():
885 fp.write(_('this is a binary file\n'))
885 fp.write(_('this is a binary file\n'))
886 break
886 break
887 if h.startswith('---'):
887 if h.startswith('---'):
888 fp.write(_('%d hunks, %d lines changed\n') %
888 fp.write(_('%d hunks, %d lines changed\n') %
889 (len(self.hunks),
889 (len(self.hunks),
890 sum([max(h.added, h.removed) for h in self.hunks])))
890 sum([max(h.added, h.removed) for h in self.hunks])))
891 break
891 break
892 fp.write(h)
892 fp.write(h)
893
893
894 def write(self, fp):
894 def write(self, fp):
895 fp.write(''.join(self.header))
895 fp.write(''.join(self.header))
896
896
897 def allhunks(self):
897 def allhunks(self):
898 return any(self.allhunks_re.match(h) for h in self.header)
898 return any(self.allhunks_re.match(h) for h in self.header)
899
899
900 def files(self):
900 def files(self):
901 match = self.diffgit_re.match(self.header[0])
901 match = self.diffgit_re.match(self.header[0])
902 if match:
902 if match:
903 fromfile, tofile = match.groups()
903 fromfile, tofile = match.groups()
904 if fromfile == tofile:
904 if fromfile == tofile:
905 return [fromfile]
905 return [fromfile]
906 return [fromfile, tofile]
906 return [fromfile, tofile]
907 else:
907 else:
908 return self.diff_re.match(self.header[0]).groups()
908 return self.diff_re.match(self.header[0]).groups()
909
909
910 def filename(self):
910 def filename(self):
911 return self.files()[-1]
911 return self.files()[-1]
912
912
913 def __repr__(self):
913 def __repr__(self):
914 return '<header %s>' % (' '.join(map(repr, self.files())))
914 return '<header %s>' % (' '.join(map(repr, self.files())))
915
915
916 def isnewfile(self):
916 def isnewfile(self):
917 return any(self.newfile_re.match(h) for h in self.header)
917 return any(self.newfile_re.match(h) for h in self.header)
918
918
919 def special(self):
919 def special(self):
920 # Special files are shown only at the header level and not at the hunk
920 # Special files are shown only at the header level and not at the hunk
921 # level for example a file that has been deleted is a special file.
921 # level for example a file that has been deleted is a special file.
922 # The user cannot change the content of the operation, in the case of
922 # The user cannot change the content of the operation, in the case of
923 # the deleted file he has to take the deletion or not take it, he
923 # the deleted file he has to take the deletion or not take it, he
924 # cannot take some of it.
924 # cannot take some of it.
925 # Newly added files are special if they are empty, they are not special
925 # Newly added files are special if they are empty, they are not special
926 # if they have some content as we want to be able to change it
926 # if they have some content as we want to be able to change it
927 nocontent = len(self.header) == 2
927 nocontent = len(self.header) == 2
928 emptynewfile = self.isnewfile() and nocontent
928 emptynewfile = self.isnewfile() and nocontent
929 return emptynewfile or \
929 return emptynewfile or \
930 any(self.special_re.match(h) for h in self.header)
930 any(self.special_re.match(h) for h in self.header)
931
931
932 class recordhunk(object):
932 class recordhunk(object):
933 """patch hunk
933 """patch hunk
934
934
935 XXX shouldn't we merge this with the other hunk class?
935 XXX shouldn't we merge this with the other hunk class?
936 """
936 """
937
937
938 def __init__(self, header, fromline, toline, proc, before, hunk, after,
938 def __init__(self, header, fromline, toline, proc, before, hunk, after,
939 maxcontext=None):
939 maxcontext=None):
940 def trimcontext(lines, reverse=False):
940 def trimcontext(lines, reverse=False):
941 if maxcontext is not None:
941 if maxcontext is not None:
942 delta = len(lines) - maxcontext
942 delta = len(lines) - maxcontext
943 if delta > 0:
943 if delta > 0:
944 if reverse:
944 if reverse:
945 return delta, lines[delta:]
945 return delta, lines[delta:]
946 else:
946 else:
947 return delta, lines[:maxcontext]
947 return delta, lines[:maxcontext]
948 return 0, lines
948 return 0, lines
949
949
950 self.header = header
950 self.header = header
951 trimedbefore, self.before = trimcontext(before, True)
951 trimedbefore, self.before = trimcontext(before, True)
952 self.fromline = fromline + trimedbefore
952 self.fromline = fromline + trimedbefore
953 self.toline = toline + trimedbefore
953 self.toline = toline + trimedbefore
954 _trimedafter, self.after = trimcontext(after, False)
954 _trimedafter, self.after = trimcontext(after, False)
955 self.proc = proc
955 self.proc = proc
956 self.hunk = hunk
956 self.hunk = hunk
957 self.added, self.removed = self.countchanges(self.hunk)
957 self.added, self.removed = self.countchanges(self.hunk)
958
958
959 def __eq__(self, v):
959 def __eq__(self, v):
960 if not isinstance(v, recordhunk):
960 if not isinstance(v, recordhunk):
961 return False
961 return False
962
962
963 return ((v.hunk == self.hunk) and
963 return ((v.hunk == self.hunk) and
964 (v.proc == self.proc) and
964 (v.proc == self.proc) and
965 (self.fromline == v.fromline) and
965 (self.fromline == v.fromline) and
966 (self.header.files() == v.header.files()))
966 (self.header.files() == v.header.files()))
967
967
968 def __hash__(self):
968 def __hash__(self):
969 return hash((tuple(self.hunk),
969 return hash((tuple(self.hunk),
970 tuple(self.header.files()),
970 tuple(self.header.files()),
971 self.fromline,
971 self.fromline,
972 self.proc))
972 self.proc))
973
973
974 def countchanges(self, hunk):
974 def countchanges(self, hunk):
975 """hunk -> (n+,n-)"""
975 """hunk -> (n+,n-)"""
976 add = len([h for h in hunk if h.startswith('+')])
976 add = len([h for h in hunk if h.startswith('+')])
977 rem = len([h for h in hunk if h.startswith('-')])
977 rem = len([h for h in hunk if h.startswith('-')])
978 return add, rem
978 return add, rem
979
979
980 def reversehunk(self):
980 def reversehunk(self):
981 """return another recordhunk which is the reverse of the hunk
981 """return another recordhunk which is the reverse of the hunk
982
982
983 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
983 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
984 that, swap fromline/toline and +/- signs while keep other things
984 that, swap fromline/toline and +/- signs while keep other things
985 unchanged.
985 unchanged.
986 """
986 """
987 m = {'+': '-', '-': '+', '\\': '\\'}
987 m = {'+': '-', '-': '+', '\\': '\\'}
988 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
988 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
989 return recordhunk(self.header, self.toline, self.fromline, self.proc,
989 return recordhunk(self.header, self.toline, self.fromline, self.proc,
990 self.before, hunk, self.after)
990 self.before, hunk, self.after)
991
991
992 def write(self, fp):
992 def write(self, fp):
993 delta = len(self.before) + len(self.after)
993 delta = len(self.before) + len(self.after)
994 if self.after and self.after[-1] == '\\ No newline at end of file\n':
994 if self.after and self.after[-1] == '\\ No newline at end of file\n':
995 delta -= 1
995 delta -= 1
996 fromlen = delta + self.removed
996 fromlen = delta + self.removed
997 tolen = delta + self.added
997 tolen = delta + self.added
998 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
998 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
999 (self.fromline, fromlen, self.toline, tolen,
999 (self.fromline, fromlen, self.toline, tolen,
1000 self.proc and (' ' + self.proc)))
1000 self.proc and (' ' + self.proc)))
1001 fp.write(''.join(self.before + self.hunk + self.after))
1001 fp.write(''.join(self.before + self.hunk + self.after))
1002
1002
1003 pretty = write
1003 pretty = write
1004
1004
1005 def filename(self):
1005 def filename(self):
1006 return self.header.filename()
1006 return self.header.filename()
1007
1007
1008 def __repr__(self):
1008 def __repr__(self):
1009 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1009 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1010
1010
1011 def getmessages():
1011 def getmessages():
1012 return {
1012 return {
1013 'multiple': {
1013 'multiple': {
1014 'apply': _("apply change %d/%d to '%s'?"),
1014 'apply': _("apply change %d/%d to '%s'?"),
1015 'discard': _("discard change %d/%d to '%s'?"),
1015 'discard': _("discard change %d/%d to '%s'?"),
1016 'record': _("record change %d/%d to '%s'?"),
1016 'record': _("record change %d/%d to '%s'?"),
1017 },
1017 },
1018 'single': {
1018 'single': {
1019 'apply': _("apply this change to '%s'?"),
1019 'apply': _("apply this change to '%s'?"),
1020 'discard': _("discard this change to '%s'?"),
1020 'discard': _("discard this change to '%s'?"),
1021 'record': _("record this change to '%s'?"),
1021 'record': _("record this change to '%s'?"),
1022 },
1022 },
1023 'help': {
1023 'help': {
1024 'apply': _('[Ynesfdaq?]'
1024 'apply': _('[Ynesfdaq?]'
1025 '$$ &Yes, apply this change'
1025 '$$ &Yes, apply this change'
1026 '$$ &No, skip this change'
1026 '$$ &No, skip this change'
1027 '$$ &Edit this change manually'
1027 '$$ &Edit this change manually'
1028 '$$ &Skip remaining changes to this file'
1028 '$$ &Skip remaining changes to this file'
1029 '$$ Apply remaining changes to this &file'
1029 '$$ Apply remaining changes to this &file'
1030 '$$ &Done, skip remaining changes and files'
1030 '$$ &Done, skip remaining changes and files'
1031 '$$ Apply &all changes to all remaining files'
1031 '$$ Apply &all changes to all remaining files'
1032 '$$ &Quit, applying no changes'
1032 '$$ &Quit, applying no changes'
1033 '$$ &? (display help)'),
1033 '$$ &? (display help)'),
1034 'discard': _('[Ynesfdaq?]'
1034 'discard': _('[Ynesfdaq?]'
1035 '$$ &Yes, discard this change'
1035 '$$ &Yes, discard this change'
1036 '$$ &No, skip this change'
1036 '$$ &No, skip this change'
1037 '$$ &Edit this change manually'
1037 '$$ &Edit this change manually'
1038 '$$ &Skip remaining changes to this file'
1038 '$$ &Skip remaining changes to this file'
1039 '$$ Discard remaining changes to this &file'
1039 '$$ Discard remaining changes to this &file'
1040 '$$ &Done, skip remaining changes and files'
1040 '$$ &Done, skip remaining changes and files'
1041 '$$ Discard &all changes to all remaining files'
1041 '$$ Discard &all changes to all remaining files'
1042 '$$ &Quit, discarding no changes'
1042 '$$ &Quit, discarding no changes'
1043 '$$ &? (display help)'),
1043 '$$ &? (display help)'),
1044 'record': _('[Ynesfdaq?]'
1044 'record': _('[Ynesfdaq?]'
1045 '$$ &Yes, record this change'
1045 '$$ &Yes, record this change'
1046 '$$ &No, skip this change'
1046 '$$ &No, skip this change'
1047 '$$ &Edit this change manually'
1047 '$$ &Edit this change manually'
1048 '$$ &Skip remaining changes to this file'
1048 '$$ &Skip remaining changes to this file'
1049 '$$ Record remaining changes to this &file'
1049 '$$ Record remaining changes to this &file'
1050 '$$ &Done, skip remaining changes and files'
1050 '$$ &Done, skip remaining changes and files'
1051 '$$ Record &all changes to all remaining files'
1051 '$$ Record &all changes to all remaining files'
1052 '$$ &Quit, recording no changes'
1052 '$$ &Quit, recording no changes'
1053 '$$ &? (display help)'),
1053 '$$ &? (display help)'),
1054 }
1054 }
1055 }
1055 }
1056
1056
1057 def filterpatch(ui, headers, operation=None):
1057 def filterpatch(ui, headers, operation=None):
1058 """Interactively filter patch chunks into applied-only chunks"""
1058 """Interactively filter patch chunks into applied-only chunks"""
1059 messages = getmessages()
1059 messages = getmessages()
1060
1060
1061 if operation is None:
1061 if operation is None:
1062 operation = 'record'
1062 operation = 'record'
1063
1063
1064 def prompt(skipfile, skipall, query, chunk):
1064 def prompt(skipfile, skipall, query, chunk):
1065 """prompt query, and process base inputs
1065 """prompt query, and process base inputs
1066
1066
1067 - y/n for the rest of file
1067 - y/n for the rest of file
1068 - y/n for the rest
1068 - y/n for the rest
1069 - ? (help)
1069 - ? (help)
1070 - q (quit)
1070 - q (quit)
1071
1071
1072 Return True/False and possibly updated skipfile and skipall.
1072 Return True/False and possibly updated skipfile and skipall.
1073 """
1073 """
1074 newpatches = None
1074 newpatches = None
1075 if skipall is not None:
1075 if skipall is not None:
1076 return skipall, skipfile, skipall, newpatches
1076 return skipall, skipfile, skipall, newpatches
1077 if skipfile is not None:
1077 if skipfile is not None:
1078 return skipfile, skipfile, skipall, newpatches
1078 return skipfile, skipfile, skipall, newpatches
1079 while True:
1079 while True:
1080 resps = messages['help'][operation]
1080 resps = messages['help'][operation]
1081 r = ui.promptchoice("%s %s" % (query, resps))
1081 r = ui.promptchoice("%s %s" % (query, resps))
1082 ui.write("\n")
1082 ui.write("\n")
1083 if r == 8: # ?
1083 if r == 8: # ?
1084 for c, t in ui.extractchoices(resps)[1]:
1084 for c, t in ui.extractchoices(resps)[1]:
1085 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1085 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1086 continue
1086 continue
1087 elif r == 0: # yes
1087 elif r == 0: # yes
1088 ret = True
1088 ret = True
1089 elif r == 1: # no
1089 elif r == 1: # no
1090 ret = False
1090 ret = False
1091 elif r == 2: # Edit patch
1091 elif r == 2: # Edit patch
1092 if chunk is None:
1092 if chunk is None:
1093 ui.write(_('cannot edit patch for whole file'))
1093 ui.write(_('cannot edit patch for whole file'))
1094 ui.write("\n")
1094 ui.write("\n")
1095 continue
1095 continue
1096 if chunk.header.binary():
1096 if chunk.header.binary():
1097 ui.write(_('cannot edit patch for binary file'))
1097 ui.write(_('cannot edit patch for binary file'))
1098 ui.write("\n")
1098 ui.write("\n")
1099 continue
1099 continue
1100 # Patch comment based on the Git one (based on comment at end of
1100 # Patch comment based on the Git one (based on comment at end of
1101 # https://mercurial-scm.org/wiki/RecordExtension)
1101 # https://mercurial-scm.org/wiki/RecordExtension)
1102 phelp = '---' + _("""
1102 phelp = '---' + _("""
1103 To remove '-' lines, make them ' ' lines (context).
1103 To remove '-' lines, make them ' ' lines (context).
1104 To remove '+' lines, delete them.
1104 To remove '+' lines, delete them.
1105 Lines starting with # will be removed from the patch.
1105 Lines starting with # will be removed from the patch.
1106
1106
1107 If the patch applies cleanly, the edited hunk will immediately be
1107 If the patch applies cleanly, the edited hunk will immediately be
1108 added to the record list. If it does not apply cleanly, a rejects
1108 added to the record list. If it does not apply cleanly, a rejects
1109 file will be generated: you can use that when you try again. If
1109 file will be generated: you can use that when you try again. If
1110 all lines of the hunk are removed, then the edit is aborted and
1110 all lines of the hunk are removed, then the edit is aborted and
1111 the hunk is left unchanged.
1111 the hunk is left unchanged.
1112 """)
1112 """)
1113 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1113 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1114 suffix=".diff")
1114 suffix=".diff")
1115 ncpatchfp = None
1115 ncpatchfp = None
1116 try:
1116 try:
1117 # Write the initial patch
1117 # Write the initial patch
1118 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1118 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1119 chunk.header.write(f)
1119 chunk.header.write(f)
1120 chunk.write(f)
1120 chunk.write(f)
1121 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1121 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1122 f.close()
1122 f.close()
1123 # Start the editor and wait for it to complete
1123 # Start the editor and wait for it to complete
1124 editor = ui.geteditor()
1124 editor = ui.geteditor()
1125 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1125 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1126 environ={'HGUSER': ui.username()},
1126 environ={'HGUSER': ui.username()},
1127 blockedtag='filterpatch')
1127 blockedtag='filterpatch')
1128 if ret != 0:
1128 if ret != 0:
1129 ui.warn(_("editor exited with exit code %d\n") % ret)
1129 ui.warn(_("editor exited with exit code %d\n") % ret)
1130 continue
1130 continue
1131 # Remove comment lines
1131 # Remove comment lines
1132 patchfp = open(patchfn, r'rb')
1132 patchfp = open(patchfn, r'rb')
1133 ncpatchfp = stringio()
1133 ncpatchfp = stringio()
1134 for line in util.iterfile(patchfp):
1134 for line in util.iterfile(patchfp):
1135 line = util.fromnativeeol(line)
1135 line = util.fromnativeeol(line)
1136 if not line.startswith('#'):
1136 if not line.startswith('#'):
1137 ncpatchfp.write(line)
1137 ncpatchfp.write(line)
1138 patchfp.close()
1138 patchfp.close()
1139 ncpatchfp.seek(0)
1139 ncpatchfp.seek(0)
1140 newpatches = parsepatch(ncpatchfp)
1140 newpatches = parsepatch(ncpatchfp)
1141 finally:
1141 finally:
1142 os.unlink(patchfn)
1142 os.unlink(patchfn)
1143 del ncpatchfp
1143 del ncpatchfp
1144 # Signal that the chunk shouldn't be applied as-is, but
1144 # Signal that the chunk shouldn't be applied as-is, but
1145 # provide the new patch to be used instead.
1145 # provide the new patch to be used instead.
1146 ret = False
1146 ret = False
1147 elif r == 3: # Skip
1147 elif r == 3: # Skip
1148 ret = skipfile = False
1148 ret = skipfile = False
1149 elif r == 4: # file (Record remaining)
1149 elif r == 4: # file (Record remaining)
1150 ret = skipfile = True
1150 ret = skipfile = True
1151 elif r == 5: # done, skip remaining
1151 elif r == 5: # done, skip remaining
1152 ret = skipall = False
1152 ret = skipall = False
1153 elif r == 6: # all
1153 elif r == 6: # all
1154 ret = skipall = True
1154 ret = skipall = True
1155 elif r == 7: # quit
1155 elif r == 7: # quit
1156 raise error.Abort(_('user quit'))
1156 raise error.Abort(_('user quit'))
1157 return ret, skipfile, skipall, newpatches
1157 return ret, skipfile, skipall, newpatches
1158
1158
1159 seen = set()
1159 seen = set()
1160 applied = {} # 'filename' -> [] of chunks
1160 applied = {} # 'filename' -> [] of chunks
1161 skipfile, skipall = None, None
1161 skipfile, skipall = None, None
1162 pos, total = 1, sum(len(h.hunks) for h in headers)
1162 pos, total = 1, sum(len(h.hunks) for h in headers)
1163 for h in headers:
1163 for h in headers:
1164 pos += len(h.hunks)
1164 pos += len(h.hunks)
1165 skipfile = None
1165 skipfile = None
1166 fixoffset = 0
1166 fixoffset = 0
1167 hdr = ''.join(h.header)
1167 hdr = ''.join(h.header)
1168 if hdr in seen:
1168 if hdr in seen:
1169 continue
1169 continue
1170 seen.add(hdr)
1170 seen.add(hdr)
1171 if skipall is None:
1171 if skipall is None:
1172 h.pretty(ui)
1172 h.pretty(ui)
1173 msg = (_('examine changes to %s?') %
1173 msg = (_('examine changes to %s?') %
1174 _(' and ').join("'%s'" % f for f in h.files()))
1174 _(' and ').join("'%s'" % f for f in h.files()))
1175 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1175 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1176 if not r:
1176 if not r:
1177 continue
1177 continue
1178 applied[h.filename()] = [h]
1178 applied[h.filename()] = [h]
1179 if h.allhunks():
1179 if h.allhunks():
1180 applied[h.filename()] += h.hunks
1180 applied[h.filename()] += h.hunks
1181 continue
1181 continue
1182 for i, chunk in enumerate(h.hunks):
1182 for i, chunk in enumerate(h.hunks):
1183 if skipfile is None and skipall is None:
1183 if skipfile is None and skipall is None:
1184 chunk.pretty(ui)
1184 chunk.pretty(ui)
1185 if total == 1:
1185 if total == 1:
1186 msg = messages['single'][operation] % chunk.filename()
1186 msg = messages['single'][operation] % chunk.filename()
1187 else:
1187 else:
1188 idx = pos - len(h.hunks) + i
1188 idx = pos - len(h.hunks) + i
1189 msg = messages['multiple'][operation] % (idx, total,
1189 msg = messages['multiple'][operation] % (idx, total,
1190 chunk.filename())
1190 chunk.filename())
1191 r, skipfile, skipall, newpatches = prompt(skipfile,
1191 r, skipfile, skipall, newpatches = prompt(skipfile,
1192 skipall, msg, chunk)
1192 skipall, msg, chunk)
1193 if r:
1193 if r:
1194 if fixoffset:
1194 if fixoffset:
1195 chunk = copy.copy(chunk)
1195 chunk = copy.copy(chunk)
1196 chunk.toline += fixoffset
1196 chunk.toline += fixoffset
1197 applied[chunk.filename()].append(chunk)
1197 applied[chunk.filename()].append(chunk)
1198 elif newpatches is not None:
1198 elif newpatches is not None:
1199 for newpatch in newpatches:
1199 for newpatch in newpatches:
1200 for newhunk in newpatch.hunks:
1200 for newhunk in newpatch.hunks:
1201 if fixoffset:
1201 if fixoffset:
1202 newhunk.toline += fixoffset
1202 newhunk.toline += fixoffset
1203 applied[newhunk.filename()].append(newhunk)
1203 applied[newhunk.filename()].append(newhunk)
1204 else:
1204 else:
1205 fixoffset += chunk.removed - chunk.added
1205 fixoffset += chunk.removed - chunk.added
1206 return (sum([h for h in applied.itervalues()
1206 return (sum([h for h in applied.itervalues()
1207 if h[0].special() or len(h) > 1], []), {})
1207 if h[0].special() or len(h) > 1], []), {})
1208 class hunk(object):
1208 class hunk(object):
1209 def __init__(self, desc, num, lr, context):
1209 def __init__(self, desc, num, lr, context):
1210 self.number = num
1210 self.number = num
1211 self.desc = desc
1211 self.desc = desc
1212 self.hunk = [desc]
1212 self.hunk = [desc]
1213 self.a = []
1213 self.a = []
1214 self.b = []
1214 self.b = []
1215 self.starta = self.lena = None
1215 self.starta = self.lena = None
1216 self.startb = self.lenb = None
1216 self.startb = self.lenb = None
1217 if lr is not None:
1217 if lr is not None:
1218 if context:
1218 if context:
1219 self.read_context_hunk(lr)
1219 self.read_context_hunk(lr)
1220 else:
1220 else:
1221 self.read_unified_hunk(lr)
1221 self.read_unified_hunk(lr)
1222
1222
1223 def getnormalized(self):
1223 def getnormalized(self):
1224 """Return a copy with line endings normalized to LF."""
1224 """Return a copy with line endings normalized to LF."""
1225
1225
1226 def normalize(lines):
1226 def normalize(lines):
1227 nlines = []
1227 nlines = []
1228 for line in lines:
1228 for line in lines:
1229 if line.endswith('\r\n'):
1229 if line.endswith('\r\n'):
1230 line = line[:-2] + '\n'
1230 line = line[:-2] + '\n'
1231 nlines.append(line)
1231 nlines.append(line)
1232 return nlines
1232 return nlines
1233
1233
1234 # Dummy object, it is rebuilt manually
1234 # Dummy object, it is rebuilt manually
1235 nh = hunk(self.desc, self.number, None, None)
1235 nh = hunk(self.desc, self.number, None, None)
1236 nh.number = self.number
1236 nh.number = self.number
1237 nh.desc = self.desc
1237 nh.desc = self.desc
1238 nh.hunk = self.hunk
1238 nh.hunk = self.hunk
1239 nh.a = normalize(self.a)
1239 nh.a = normalize(self.a)
1240 nh.b = normalize(self.b)
1240 nh.b = normalize(self.b)
1241 nh.starta = self.starta
1241 nh.starta = self.starta
1242 nh.startb = self.startb
1242 nh.startb = self.startb
1243 nh.lena = self.lena
1243 nh.lena = self.lena
1244 nh.lenb = self.lenb
1244 nh.lenb = self.lenb
1245 return nh
1245 return nh
1246
1246
1247 def read_unified_hunk(self, lr):
1247 def read_unified_hunk(self, lr):
1248 m = unidesc.match(self.desc)
1248 m = unidesc.match(self.desc)
1249 if not m:
1249 if not m:
1250 raise PatchError(_("bad hunk #%d") % self.number)
1250 raise PatchError(_("bad hunk #%d") % self.number)
1251 self.starta, self.lena, self.startb, self.lenb = m.groups()
1251 self.starta, self.lena, self.startb, self.lenb = m.groups()
1252 if self.lena is None:
1252 if self.lena is None:
1253 self.lena = 1
1253 self.lena = 1
1254 else:
1254 else:
1255 self.lena = int(self.lena)
1255 self.lena = int(self.lena)
1256 if self.lenb is None:
1256 if self.lenb is None:
1257 self.lenb = 1
1257 self.lenb = 1
1258 else:
1258 else:
1259 self.lenb = int(self.lenb)
1259 self.lenb = int(self.lenb)
1260 self.starta = int(self.starta)
1260 self.starta = int(self.starta)
1261 self.startb = int(self.startb)
1261 self.startb = int(self.startb)
1262 try:
1262 try:
1263 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1263 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1264 self.a, self.b)
1264 self.a, self.b)
1265 except error.ParseError as e:
1265 except error.ParseError as e:
1266 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1266 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1267 # if we hit eof before finishing out the hunk, the last line will
1267 # if we hit eof before finishing out the hunk, the last line will
1268 # be zero length. Lets try to fix it up.
1268 # be zero length. Lets try to fix it up.
1269 while len(self.hunk[-1]) == 0:
1269 while len(self.hunk[-1]) == 0:
1270 del self.hunk[-1]
1270 del self.hunk[-1]
1271 del self.a[-1]
1271 del self.a[-1]
1272 del self.b[-1]
1272 del self.b[-1]
1273 self.lena -= 1
1273 self.lena -= 1
1274 self.lenb -= 1
1274 self.lenb -= 1
1275 self._fixnewline(lr)
1275 self._fixnewline(lr)
1276
1276
1277 def read_context_hunk(self, lr):
1277 def read_context_hunk(self, lr):
1278 self.desc = lr.readline()
1278 self.desc = lr.readline()
1279 m = contextdesc.match(self.desc)
1279 m = contextdesc.match(self.desc)
1280 if not m:
1280 if not m:
1281 raise PatchError(_("bad hunk #%d") % self.number)
1281 raise PatchError(_("bad hunk #%d") % self.number)
1282 self.starta, aend = m.groups()
1282 self.starta, aend = m.groups()
1283 self.starta = int(self.starta)
1283 self.starta = int(self.starta)
1284 if aend is None:
1284 if aend is None:
1285 aend = self.starta
1285 aend = self.starta
1286 self.lena = int(aend) - self.starta
1286 self.lena = int(aend) - self.starta
1287 if self.starta:
1287 if self.starta:
1288 self.lena += 1
1288 self.lena += 1
1289 for x in xrange(self.lena):
1289 for x in xrange(self.lena):
1290 l = lr.readline()
1290 l = lr.readline()
1291 if l.startswith('---'):
1291 if l.startswith('---'):
1292 # lines addition, old block is empty
1292 # lines addition, old block is empty
1293 lr.push(l)
1293 lr.push(l)
1294 break
1294 break
1295 s = l[2:]
1295 s = l[2:]
1296 if l.startswith('- ') or l.startswith('! '):
1296 if l.startswith('- ') or l.startswith('! '):
1297 u = '-' + s
1297 u = '-' + s
1298 elif l.startswith(' '):
1298 elif l.startswith(' '):
1299 u = ' ' + s
1299 u = ' ' + s
1300 else:
1300 else:
1301 raise PatchError(_("bad hunk #%d old text line %d") %
1301 raise PatchError(_("bad hunk #%d old text line %d") %
1302 (self.number, x))
1302 (self.number, x))
1303 self.a.append(u)
1303 self.a.append(u)
1304 self.hunk.append(u)
1304 self.hunk.append(u)
1305
1305
1306 l = lr.readline()
1306 l = lr.readline()
1307 if l.startswith('\ '):
1307 if l.startswith('\ '):
1308 s = self.a[-1][:-1]
1308 s = self.a[-1][:-1]
1309 self.a[-1] = s
1309 self.a[-1] = s
1310 self.hunk[-1] = s
1310 self.hunk[-1] = s
1311 l = lr.readline()
1311 l = lr.readline()
1312 m = contextdesc.match(l)
1312 m = contextdesc.match(l)
1313 if not m:
1313 if not m:
1314 raise PatchError(_("bad hunk #%d") % self.number)
1314 raise PatchError(_("bad hunk #%d") % self.number)
1315 self.startb, bend = m.groups()
1315 self.startb, bend = m.groups()
1316 self.startb = int(self.startb)
1316 self.startb = int(self.startb)
1317 if bend is None:
1317 if bend is None:
1318 bend = self.startb
1318 bend = self.startb
1319 self.lenb = int(bend) - self.startb
1319 self.lenb = int(bend) - self.startb
1320 if self.startb:
1320 if self.startb:
1321 self.lenb += 1
1321 self.lenb += 1
1322 hunki = 1
1322 hunki = 1
1323 for x in xrange(self.lenb):
1323 for x in xrange(self.lenb):
1324 l = lr.readline()
1324 l = lr.readline()
1325 if l.startswith('\ '):
1325 if l.startswith('\ '):
1326 # XXX: the only way to hit this is with an invalid line range.
1326 # XXX: the only way to hit this is with an invalid line range.
1327 # The no-eol marker is not counted in the line range, but I
1327 # The no-eol marker is not counted in the line range, but I
1328 # guess there are diff(1) out there which behave differently.
1328 # guess there are diff(1) out there which behave differently.
1329 s = self.b[-1][:-1]
1329 s = self.b[-1][:-1]
1330 self.b[-1] = s
1330 self.b[-1] = s
1331 self.hunk[hunki - 1] = s
1331 self.hunk[hunki - 1] = s
1332 continue
1332 continue
1333 if not l:
1333 if not l:
1334 # line deletions, new block is empty and we hit EOF
1334 # line deletions, new block is empty and we hit EOF
1335 lr.push(l)
1335 lr.push(l)
1336 break
1336 break
1337 s = l[2:]
1337 s = l[2:]
1338 if l.startswith('+ ') or l.startswith('! '):
1338 if l.startswith('+ ') or l.startswith('! '):
1339 u = '+' + s
1339 u = '+' + s
1340 elif l.startswith(' '):
1340 elif l.startswith(' '):
1341 u = ' ' + s
1341 u = ' ' + s
1342 elif len(self.b) == 0:
1342 elif len(self.b) == 0:
1343 # line deletions, new block is empty
1343 # line deletions, new block is empty
1344 lr.push(l)
1344 lr.push(l)
1345 break
1345 break
1346 else:
1346 else:
1347 raise PatchError(_("bad hunk #%d old text line %d") %
1347 raise PatchError(_("bad hunk #%d old text line %d") %
1348 (self.number, x))
1348 (self.number, x))
1349 self.b.append(s)
1349 self.b.append(s)
1350 while True:
1350 while True:
1351 if hunki >= len(self.hunk):
1351 if hunki >= len(self.hunk):
1352 h = ""
1352 h = ""
1353 else:
1353 else:
1354 h = self.hunk[hunki]
1354 h = self.hunk[hunki]
1355 hunki += 1
1355 hunki += 1
1356 if h == u:
1356 if h == u:
1357 break
1357 break
1358 elif h.startswith('-'):
1358 elif h.startswith('-'):
1359 continue
1359 continue
1360 else:
1360 else:
1361 self.hunk.insert(hunki - 1, u)
1361 self.hunk.insert(hunki - 1, u)
1362 break
1362 break
1363
1363
1364 if not self.a:
1364 if not self.a:
1365 # this happens when lines were only added to the hunk
1365 # this happens when lines were only added to the hunk
1366 for x in self.hunk:
1366 for x in self.hunk:
1367 if x.startswith('-') or x.startswith(' '):
1367 if x.startswith('-') or x.startswith(' '):
1368 self.a.append(x)
1368 self.a.append(x)
1369 if not self.b:
1369 if not self.b:
1370 # this happens when lines were only deleted from the hunk
1370 # this happens when lines were only deleted from the hunk
1371 for x in self.hunk:
1371 for x in self.hunk:
1372 if x.startswith('+') or x.startswith(' '):
1372 if x.startswith('+') or x.startswith(' '):
1373 self.b.append(x[1:])
1373 self.b.append(x[1:])
1374 # @@ -start,len +start,len @@
1374 # @@ -start,len +start,len @@
1375 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1375 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1376 self.startb, self.lenb)
1376 self.startb, self.lenb)
1377 self.hunk[0] = self.desc
1377 self.hunk[0] = self.desc
1378 self._fixnewline(lr)
1378 self._fixnewline(lr)
1379
1379
1380 def _fixnewline(self, lr):
1380 def _fixnewline(self, lr):
1381 l = lr.readline()
1381 l = lr.readline()
1382 if l.startswith('\ '):
1382 if l.startswith('\ '):
1383 diffhelper.fixnewline(self.hunk, self.a, self.b)
1383 diffhelper.fixnewline(self.hunk, self.a, self.b)
1384 else:
1384 else:
1385 lr.push(l)
1385 lr.push(l)
1386
1386
1387 def complete(self):
1387 def complete(self):
1388 return len(self.a) == self.lena and len(self.b) == self.lenb
1388 return len(self.a) == self.lena and len(self.b) == self.lenb
1389
1389
1390 def _fuzzit(self, old, new, fuzz, toponly):
1390 def _fuzzit(self, old, new, fuzz, toponly):
1391 # this removes context lines from the top and bottom of list 'l'. It
1391 # this removes context lines from the top and bottom of list 'l'. It
1392 # checks the hunk to make sure only context lines are removed, and then
1392 # checks the hunk to make sure only context lines are removed, and then
1393 # returns a new shortened list of lines.
1393 # returns a new shortened list of lines.
1394 fuzz = min(fuzz, len(old))
1394 fuzz = min(fuzz, len(old))
1395 if fuzz:
1395 if fuzz:
1396 top = 0
1396 top = 0
1397 bot = 0
1397 bot = 0
1398 hlen = len(self.hunk)
1398 hlen = len(self.hunk)
1399 for x in xrange(hlen - 1):
1399 for x in xrange(hlen - 1):
1400 # the hunk starts with the @@ line, so use x+1
1400 # the hunk starts with the @@ line, so use x+1
1401 if self.hunk[x + 1].startswith(' '):
1401 if self.hunk[x + 1].startswith(' '):
1402 top += 1
1402 top += 1
1403 else:
1403 else:
1404 break
1404 break
1405 if not toponly:
1405 if not toponly:
1406 for x in xrange(hlen - 1):
1406 for x in xrange(hlen - 1):
1407 if self.hunk[hlen - bot - 1].startswith(' '):
1407 if self.hunk[hlen - bot - 1].startswith(' '):
1408 bot += 1
1408 bot += 1
1409 else:
1409 else:
1410 break
1410 break
1411
1411
1412 bot = min(fuzz, bot)
1412 bot = min(fuzz, bot)
1413 top = min(fuzz, top)
1413 top = min(fuzz, top)
1414 return old[top:len(old) - bot], new[top:len(new) - bot], top
1414 return old[top:len(old) - bot], new[top:len(new) - bot], top
1415 return old, new, 0
1415 return old, new, 0
1416
1416
1417 def fuzzit(self, fuzz, toponly):
1417 def fuzzit(self, fuzz, toponly):
1418 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1418 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1419 oldstart = self.starta + top
1419 oldstart = self.starta + top
1420 newstart = self.startb + top
1420 newstart = self.startb + top
1421 # zero length hunk ranges already have their start decremented
1421 # zero length hunk ranges already have their start decremented
1422 if self.lena and oldstart > 0:
1422 if self.lena and oldstart > 0:
1423 oldstart -= 1
1423 oldstart -= 1
1424 if self.lenb and newstart > 0:
1424 if self.lenb and newstart > 0:
1425 newstart -= 1
1425 newstart -= 1
1426 return old, oldstart, new, newstart
1426 return old, oldstart, new, newstart
1427
1427
1428 class binhunk(object):
1428 class binhunk(object):
1429 'A binary patch file.'
1429 'A binary patch file.'
1430 def __init__(self, lr, fname):
1430 def __init__(self, lr, fname):
1431 self.text = None
1431 self.text = None
1432 self.delta = False
1432 self.delta = False
1433 self.hunk = ['GIT binary patch\n']
1433 self.hunk = ['GIT binary patch\n']
1434 self._fname = fname
1434 self._fname = fname
1435 self._read(lr)
1435 self._read(lr)
1436
1436
1437 def complete(self):
1437 def complete(self):
1438 return self.text is not None
1438 return self.text is not None
1439
1439
1440 def new(self, lines):
1440 def new(self, lines):
1441 if self.delta:
1441 if self.delta:
1442 return [applybindelta(self.text, ''.join(lines))]
1442 return [applybindelta(self.text, ''.join(lines))]
1443 return [self.text]
1443 return [self.text]
1444
1444
1445 def _read(self, lr):
1445 def _read(self, lr):
1446 def getline(lr, hunk):
1446 def getline(lr, hunk):
1447 l = lr.readline()
1447 l = lr.readline()
1448 hunk.append(l)
1448 hunk.append(l)
1449 return l.rstrip('\r\n')
1449 return l.rstrip('\r\n')
1450
1450
1451 size = 0
1451 size = 0
1452 while True:
1452 while True:
1453 line = getline(lr, self.hunk)
1453 line = getline(lr, self.hunk)
1454 if not line:
1454 if not line:
1455 raise PatchError(_('could not extract "%s" binary data')
1455 raise PatchError(_('could not extract "%s" binary data')
1456 % self._fname)
1456 % self._fname)
1457 if line.startswith('literal '):
1457 if line.startswith('literal '):
1458 size = int(line[8:].rstrip())
1458 size = int(line[8:].rstrip())
1459 break
1459 break
1460 if line.startswith('delta '):
1460 if line.startswith('delta '):
1461 size = int(line[6:].rstrip())
1461 size = int(line[6:].rstrip())
1462 self.delta = True
1462 self.delta = True
1463 break
1463 break
1464 dec = []
1464 dec = []
1465 line = getline(lr, self.hunk)
1465 line = getline(lr, self.hunk)
1466 while len(line) > 1:
1466 while len(line) > 1:
1467 l = line[0:1]
1467 l = line[0:1]
1468 if l <= 'Z' and l >= 'A':
1468 if l <= 'Z' and l >= 'A':
1469 l = ord(l) - ord('A') + 1
1469 l = ord(l) - ord('A') + 1
1470 else:
1470 else:
1471 l = ord(l) - ord('a') + 27
1471 l = ord(l) - ord('a') + 27
1472 try:
1472 try:
1473 dec.append(util.b85decode(line[1:])[:l])
1473 dec.append(util.b85decode(line[1:])[:l])
1474 except ValueError as e:
1474 except ValueError as e:
1475 raise PatchError(_('could not decode "%s" binary patch: %s')
1475 raise PatchError(_('could not decode "%s" binary patch: %s')
1476 % (self._fname, stringutil.forcebytestr(e)))
1476 % (self._fname, stringutil.forcebytestr(e)))
1477 line = getline(lr, self.hunk)
1477 line = getline(lr, self.hunk)
1478 text = zlib.decompress(''.join(dec))
1478 text = zlib.decompress(''.join(dec))
1479 if len(text) != size:
1479 if len(text) != size:
1480 raise PatchError(_('"%s" length is %d bytes, should be %d')
1480 raise PatchError(_('"%s" length is %d bytes, should be %d')
1481 % (self._fname, len(text), size))
1481 % (self._fname, len(text), size))
1482 self.text = text
1482 self.text = text
1483
1483
1484 def parsefilename(str):
1484 def parsefilename(str):
1485 # --- filename \t|space stuff
1485 # --- filename \t|space stuff
1486 s = str[4:].rstrip('\r\n')
1486 s = str[4:].rstrip('\r\n')
1487 i = s.find('\t')
1487 i = s.find('\t')
1488 if i < 0:
1488 if i < 0:
1489 i = s.find(' ')
1489 i = s.find(' ')
1490 if i < 0:
1490 if i < 0:
1491 return s
1491 return s
1492 return s[:i]
1492 return s[:i]
1493
1493
1494 def reversehunks(hunks):
1494 def reversehunks(hunks):
1495 '''reverse the signs in the hunks given as argument
1495 '''reverse the signs in the hunks given as argument
1496
1496
1497 This function operates on hunks coming out of patch.filterpatch, that is
1497 This function operates on hunks coming out of patch.filterpatch, that is
1498 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1498 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1499
1499
1500 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1500 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1501 ... --- a/folder1/g
1501 ... --- a/folder1/g
1502 ... +++ b/folder1/g
1502 ... +++ b/folder1/g
1503 ... @@ -1,7 +1,7 @@
1503 ... @@ -1,7 +1,7 @@
1504 ... +firstline
1504 ... +firstline
1505 ... c
1505 ... c
1506 ... 1
1506 ... 1
1507 ... 2
1507 ... 2
1508 ... + 3
1508 ... + 3
1509 ... -4
1509 ... -4
1510 ... 5
1510 ... 5
1511 ... d
1511 ... d
1512 ... +lastline"""
1512 ... +lastline"""
1513 >>> hunks = parsepatch([rawpatch])
1513 >>> hunks = parsepatch([rawpatch])
1514 >>> hunkscomingfromfilterpatch = []
1514 >>> hunkscomingfromfilterpatch = []
1515 >>> for h in hunks:
1515 >>> for h in hunks:
1516 ... hunkscomingfromfilterpatch.append(h)
1516 ... hunkscomingfromfilterpatch.append(h)
1517 ... hunkscomingfromfilterpatch.extend(h.hunks)
1517 ... hunkscomingfromfilterpatch.extend(h.hunks)
1518
1518
1519 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1519 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1520 >>> from . import util
1520 >>> from . import util
1521 >>> fp = util.stringio()
1521 >>> fp = util.stringio()
1522 >>> for c in reversedhunks:
1522 >>> for c in reversedhunks:
1523 ... c.write(fp)
1523 ... c.write(fp)
1524 >>> fp.seek(0) or None
1524 >>> fp.seek(0) or None
1525 >>> reversedpatch = fp.read()
1525 >>> reversedpatch = fp.read()
1526 >>> print(pycompat.sysstr(reversedpatch))
1526 >>> print(pycompat.sysstr(reversedpatch))
1527 diff --git a/folder1/g b/folder1/g
1527 diff --git a/folder1/g b/folder1/g
1528 --- a/folder1/g
1528 --- a/folder1/g
1529 +++ b/folder1/g
1529 +++ b/folder1/g
1530 @@ -1,4 +1,3 @@
1530 @@ -1,4 +1,3 @@
1531 -firstline
1531 -firstline
1532 c
1532 c
1533 1
1533 1
1534 2
1534 2
1535 @@ -2,6 +1,6 @@
1535 @@ -2,6 +1,6 @@
1536 c
1536 c
1537 1
1537 1
1538 2
1538 2
1539 - 3
1539 - 3
1540 +4
1540 +4
1541 5
1541 5
1542 d
1542 d
1543 @@ -6,3 +5,2 @@
1543 @@ -6,3 +5,2 @@
1544 5
1544 5
1545 d
1545 d
1546 -lastline
1546 -lastline
1547
1547
1548 '''
1548 '''
1549
1549
1550 newhunks = []
1550 newhunks = []
1551 for c in hunks:
1551 for c in hunks:
1552 if util.safehasattr(c, 'reversehunk'):
1552 if util.safehasattr(c, 'reversehunk'):
1553 c = c.reversehunk()
1553 c = c.reversehunk()
1554 newhunks.append(c)
1554 newhunks.append(c)
1555 return newhunks
1555 return newhunks
1556
1556
1557 def parsepatch(originalchunks, maxcontext=None):
1557 def parsepatch(originalchunks, maxcontext=None):
1558 """patch -> [] of headers -> [] of hunks
1558 """patch -> [] of headers -> [] of hunks
1559
1559
1560 If maxcontext is not None, trim context lines if necessary.
1560 If maxcontext is not None, trim context lines if necessary.
1561
1561
1562 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1562 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1563 ... --- a/folder1/g
1563 ... --- a/folder1/g
1564 ... +++ b/folder1/g
1564 ... +++ b/folder1/g
1565 ... @@ -1,8 +1,10 @@
1565 ... @@ -1,8 +1,10 @@
1566 ... 1
1566 ... 1
1567 ... 2
1567 ... 2
1568 ... -3
1568 ... -3
1569 ... 4
1569 ... 4
1570 ... 5
1570 ... 5
1571 ... 6
1571 ... 6
1572 ... +6.1
1572 ... +6.1
1573 ... +6.2
1573 ... +6.2
1574 ... 7
1574 ... 7
1575 ... 8
1575 ... 8
1576 ... +9'''
1576 ... +9'''
1577 >>> out = util.stringio()
1577 >>> out = util.stringio()
1578 >>> headers = parsepatch([rawpatch], maxcontext=1)
1578 >>> headers = parsepatch([rawpatch], maxcontext=1)
1579 >>> for header in headers:
1579 >>> for header in headers:
1580 ... header.write(out)
1580 ... header.write(out)
1581 ... for hunk in header.hunks:
1581 ... for hunk in header.hunks:
1582 ... hunk.write(out)
1582 ... hunk.write(out)
1583 >>> print(pycompat.sysstr(out.getvalue()))
1583 >>> print(pycompat.sysstr(out.getvalue()))
1584 diff --git a/folder1/g b/folder1/g
1584 diff --git a/folder1/g b/folder1/g
1585 --- a/folder1/g
1585 --- a/folder1/g
1586 +++ b/folder1/g
1586 +++ b/folder1/g
1587 @@ -2,3 +2,2 @@
1587 @@ -2,3 +2,2 @@
1588 2
1588 2
1589 -3
1589 -3
1590 4
1590 4
1591 @@ -6,2 +5,4 @@
1591 @@ -6,2 +5,4 @@
1592 6
1592 6
1593 +6.1
1593 +6.1
1594 +6.2
1594 +6.2
1595 7
1595 7
1596 @@ -8,1 +9,2 @@
1596 @@ -8,1 +9,2 @@
1597 8
1597 8
1598 +9
1598 +9
1599 """
1599 """
1600 class parser(object):
1600 class parser(object):
1601 """patch parsing state machine"""
1601 """patch parsing state machine"""
1602 def __init__(self):
1602 def __init__(self):
1603 self.fromline = 0
1603 self.fromline = 0
1604 self.toline = 0
1604 self.toline = 0
1605 self.proc = ''
1605 self.proc = ''
1606 self.header = None
1606 self.header = None
1607 self.context = []
1607 self.context = []
1608 self.before = []
1608 self.before = []
1609 self.hunk = []
1609 self.hunk = []
1610 self.headers = []
1610 self.headers = []
1611
1611
1612 def addrange(self, limits):
1612 def addrange(self, limits):
1613 fromstart, fromend, tostart, toend, proc = limits
1613 fromstart, fromend, tostart, toend, proc = limits
1614 self.fromline = int(fromstart)
1614 self.fromline = int(fromstart)
1615 self.toline = int(tostart)
1615 self.toline = int(tostart)
1616 self.proc = proc
1616 self.proc = proc
1617
1617
1618 def addcontext(self, context):
1618 def addcontext(self, context):
1619 if self.hunk:
1619 if self.hunk:
1620 h = recordhunk(self.header, self.fromline, self.toline,
1620 h = recordhunk(self.header, self.fromline, self.toline,
1621 self.proc, self.before, self.hunk, context, maxcontext)
1621 self.proc, self.before, self.hunk, context, maxcontext)
1622 self.header.hunks.append(h)
1622 self.header.hunks.append(h)
1623 self.fromline += len(self.before) + h.removed
1623 self.fromline += len(self.before) + h.removed
1624 self.toline += len(self.before) + h.added
1624 self.toline += len(self.before) + h.added
1625 self.before = []
1625 self.before = []
1626 self.hunk = []
1626 self.hunk = []
1627 self.context = context
1627 self.context = context
1628
1628
1629 def addhunk(self, hunk):
1629 def addhunk(self, hunk):
1630 if self.context:
1630 if self.context:
1631 self.before = self.context
1631 self.before = self.context
1632 self.context = []
1632 self.context = []
1633 self.hunk = hunk
1633 self.hunk = hunk
1634
1634
1635 def newfile(self, hdr):
1635 def newfile(self, hdr):
1636 self.addcontext([])
1636 self.addcontext([])
1637 h = header(hdr)
1637 h = header(hdr)
1638 self.headers.append(h)
1638 self.headers.append(h)
1639 self.header = h
1639 self.header = h
1640
1640
1641 def addother(self, line):
1641 def addother(self, line):
1642 pass # 'other' lines are ignored
1642 pass # 'other' lines are ignored
1643
1643
1644 def finished(self):
1644 def finished(self):
1645 self.addcontext([])
1645 self.addcontext([])
1646 return self.headers
1646 return self.headers
1647
1647
1648 transitions = {
1648 transitions = {
1649 'file': {'context': addcontext,
1649 'file': {'context': addcontext,
1650 'file': newfile,
1650 'file': newfile,
1651 'hunk': addhunk,
1651 'hunk': addhunk,
1652 'range': addrange},
1652 'range': addrange},
1653 'context': {'file': newfile,
1653 'context': {'file': newfile,
1654 'hunk': addhunk,
1654 'hunk': addhunk,
1655 'range': addrange,
1655 'range': addrange,
1656 'other': addother},
1656 'other': addother},
1657 'hunk': {'context': addcontext,
1657 'hunk': {'context': addcontext,
1658 'file': newfile,
1658 'file': newfile,
1659 'range': addrange},
1659 'range': addrange},
1660 'range': {'context': addcontext,
1660 'range': {'context': addcontext,
1661 'hunk': addhunk},
1661 'hunk': addhunk},
1662 'other': {'other': addother},
1662 'other': {'other': addother},
1663 }
1663 }
1664
1664
1665 p = parser()
1665 p = parser()
1666 fp = stringio()
1666 fp = stringio()
1667 fp.write(''.join(originalchunks))
1667 fp.write(''.join(originalchunks))
1668 fp.seek(0)
1668 fp.seek(0)
1669
1669
1670 state = 'context'
1670 state = 'context'
1671 for newstate, data in scanpatch(fp):
1671 for newstate, data in scanpatch(fp):
1672 try:
1672 try:
1673 p.transitions[state][newstate](p, data)
1673 p.transitions[state][newstate](p, data)
1674 except KeyError:
1674 except KeyError:
1675 raise PatchError('unhandled transition: %s -> %s' %
1675 raise PatchError('unhandled transition: %s -> %s' %
1676 (state, newstate))
1676 (state, newstate))
1677 state = newstate
1677 state = newstate
1678 del fp
1678 del fp
1679 return p.finished()
1679 return p.finished()
1680
1680
1681 def pathtransform(path, strip, prefix):
1681 def pathtransform(path, strip, prefix):
1682 '''turn a path from a patch into a path suitable for the repository
1682 '''turn a path from a patch into a path suitable for the repository
1683
1683
1684 prefix, if not empty, is expected to be normalized with a / at the end.
1684 prefix, if not empty, is expected to be normalized with a / at the end.
1685
1685
1686 Returns (stripped components, path in repository).
1686 Returns (stripped components, path in repository).
1687
1687
1688 >>> pathtransform(b'a/b/c', 0, b'')
1688 >>> pathtransform(b'a/b/c', 0, b'')
1689 ('', 'a/b/c')
1689 ('', 'a/b/c')
1690 >>> pathtransform(b' a/b/c ', 0, b'')
1690 >>> pathtransform(b' a/b/c ', 0, b'')
1691 ('', ' a/b/c')
1691 ('', ' a/b/c')
1692 >>> pathtransform(b' a/b/c ', 2, b'')
1692 >>> pathtransform(b' a/b/c ', 2, b'')
1693 ('a/b/', 'c')
1693 ('a/b/', 'c')
1694 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1694 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1695 ('', 'd/e/a/b/c')
1695 ('', 'd/e/a/b/c')
1696 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1696 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1697 ('a//b/', 'd/e/c')
1697 ('a//b/', 'd/e/c')
1698 >>> pathtransform(b'a/b/c', 3, b'')
1698 >>> pathtransform(b'a/b/c', 3, b'')
1699 Traceback (most recent call last):
1699 Traceback (most recent call last):
1700 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1700 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1701 '''
1701 '''
1702 pathlen = len(path)
1702 pathlen = len(path)
1703 i = 0
1703 i = 0
1704 if strip == 0:
1704 if strip == 0:
1705 return '', prefix + path.rstrip()
1705 return '', prefix + path.rstrip()
1706 count = strip
1706 count = strip
1707 while count > 0:
1707 while count > 0:
1708 i = path.find('/', i)
1708 i = path.find('/', i)
1709 if i == -1:
1709 if i == -1:
1710 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1710 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1711 (count, strip, path))
1711 (count, strip, path))
1712 i += 1
1712 i += 1
1713 # consume '//' in the path
1713 # consume '//' in the path
1714 while i < pathlen - 1 and path[i:i + 1] == '/':
1714 while i < pathlen - 1 and path[i:i + 1] == '/':
1715 i += 1
1715 i += 1
1716 count -= 1
1716 count -= 1
1717 return path[:i].lstrip(), prefix + path[i:].rstrip()
1717 return path[:i].lstrip(), prefix + path[i:].rstrip()
1718
1718
1719 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1719 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1720 nulla = afile_orig == "/dev/null"
1720 nulla = afile_orig == "/dev/null"
1721 nullb = bfile_orig == "/dev/null"
1721 nullb = bfile_orig == "/dev/null"
1722 create = nulla and hunk.starta == 0 and hunk.lena == 0
1722 create = nulla and hunk.starta == 0 and hunk.lena == 0
1723 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1723 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1724 abase, afile = pathtransform(afile_orig, strip, prefix)
1724 abase, afile = pathtransform(afile_orig, strip, prefix)
1725 gooda = not nulla and backend.exists(afile)
1725 gooda = not nulla and backend.exists(afile)
1726 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1726 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1727 if afile == bfile:
1727 if afile == bfile:
1728 goodb = gooda
1728 goodb = gooda
1729 else:
1729 else:
1730 goodb = not nullb and backend.exists(bfile)
1730 goodb = not nullb and backend.exists(bfile)
1731 missing = not goodb and not gooda and not create
1731 missing = not goodb and not gooda and not create
1732
1732
1733 # some diff programs apparently produce patches where the afile is
1733 # some diff programs apparently produce patches where the afile is
1734 # not /dev/null, but afile starts with bfile
1734 # not /dev/null, but afile starts with bfile
1735 abasedir = afile[:afile.rfind('/') + 1]
1735 abasedir = afile[:afile.rfind('/') + 1]
1736 bbasedir = bfile[:bfile.rfind('/') + 1]
1736 bbasedir = bfile[:bfile.rfind('/') + 1]
1737 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1737 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1738 and hunk.starta == 0 and hunk.lena == 0):
1738 and hunk.starta == 0 and hunk.lena == 0):
1739 create = True
1739 create = True
1740 missing = False
1740 missing = False
1741
1741
1742 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1742 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1743 # diff is between a file and its backup. In this case, the original
1743 # diff is between a file and its backup. In this case, the original
1744 # file should be patched (see original mpatch code).
1744 # file should be patched (see original mpatch code).
1745 isbackup = (abase == bbase and bfile.startswith(afile))
1745 isbackup = (abase == bbase and bfile.startswith(afile))
1746 fname = None
1746 fname = None
1747 if not missing:
1747 if not missing:
1748 if gooda and goodb:
1748 if gooda and goodb:
1749 if isbackup:
1749 if isbackup:
1750 fname = afile
1750 fname = afile
1751 else:
1751 else:
1752 fname = bfile
1752 fname = bfile
1753 elif gooda:
1753 elif gooda:
1754 fname = afile
1754 fname = afile
1755
1755
1756 if not fname:
1756 if not fname:
1757 if not nullb:
1757 if not nullb:
1758 if isbackup:
1758 if isbackup:
1759 fname = afile
1759 fname = afile
1760 else:
1760 else:
1761 fname = bfile
1761 fname = bfile
1762 elif not nulla:
1762 elif not nulla:
1763 fname = afile
1763 fname = afile
1764 else:
1764 else:
1765 raise PatchError(_("undefined source and destination files"))
1765 raise PatchError(_("undefined source and destination files"))
1766
1766
1767 gp = patchmeta(fname)
1767 gp = patchmeta(fname)
1768 if create:
1768 if create:
1769 gp.op = 'ADD'
1769 gp.op = 'ADD'
1770 elif remove:
1770 elif remove:
1771 gp.op = 'DELETE'
1771 gp.op = 'DELETE'
1772 return gp
1772 return gp
1773
1773
1774 def scanpatch(fp):
1774 def scanpatch(fp):
1775 """like patch.iterhunks, but yield different events
1775 """like patch.iterhunks, but yield different events
1776
1776
1777 - ('file', [header_lines + fromfile + tofile])
1777 - ('file', [header_lines + fromfile + tofile])
1778 - ('context', [context_lines])
1778 - ('context', [context_lines])
1779 - ('hunk', [hunk_lines])
1779 - ('hunk', [hunk_lines])
1780 - ('range', (-start,len, +start,len, proc))
1780 - ('range', (-start,len, +start,len, proc))
1781 """
1781 """
1782 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1782 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1783 lr = linereader(fp)
1783 lr = linereader(fp)
1784
1784
1785 def scanwhile(first, p):
1785 def scanwhile(first, p):
1786 """scan lr while predicate holds"""
1786 """scan lr while predicate holds"""
1787 lines = [first]
1787 lines = [first]
1788 for line in iter(lr.readline, ''):
1788 for line in iter(lr.readline, ''):
1789 if p(line):
1789 if p(line):
1790 lines.append(line)
1790 lines.append(line)
1791 else:
1791 else:
1792 lr.push(line)
1792 lr.push(line)
1793 break
1793 break
1794 return lines
1794 return lines
1795
1795
1796 for line in iter(lr.readline, ''):
1796 for line in iter(lr.readline, ''):
1797 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1797 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1798 def notheader(line):
1798 def notheader(line):
1799 s = line.split(None, 1)
1799 s = line.split(None, 1)
1800 return not s or s[0] not in ('---', 'diff')
1800 return not s or s[0] not in ('---', 'diff')
1801 header = scanwhile(line, notheader)
1801 header = scanwhile(line, notheader)
1802 fromfile = lr.readline()
1802 fromfile = lr.readline()
1803 if fromfile.startswith('---'):
1803 if fromfile.startswith('---'):
1804 tofile = lr.readline()
1804 tofile = lr.readline()
1805 header += [fromfile, tofile]
1805 header += [fromfile, tofile]
1806 else:
1806 else:
1807 lr.push(fromfile)
1807 lr.push(fromfile)
1808 yield 'file', header
1808 yield 'file', header
1809 elif line.startswith(' '):
1809 elif line.startswith(' '):
1810 cs = (' ', '\\')
1810 cs = (' ', '\\')
1811 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1811 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1812 elif line.startswith(('-', '+')):
1812 elif line.startswith(('-', '+')):
1813 cs = ('-', '+', '\\')
1813 cs = ('-', '+', '\\')
1814 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1814 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1815 else:
1815 else:
1816 m = lines_re.match(line)
1816 m = lines_re.match(line)
1817 if m:
1817 if m:
1818 yield 'range', m.groups()
1818 yield 'range', m.groups()
1819 else:
1819 else:
1820 yield 'other', line
1820 yield 'other', line
1821
1821
1822 def scangitpatch(lr, firstline):
1822 def scangitpatch(lr, firstline):
1823 """
1823 """
1824 Git patches can emit:
1824 Git patches can emit:
1825 - rename a to b
1825 - rename a to b
1826 - change b
1826 - change b
1827 - copy a to c
1827 - copy a to c
1828 - change c
1828 - change c
1829
1829
1830 We cannot apply this sequence as-is, the renamed 'a' could not be
1830 We cannot apply this sequence as-is, the renamed 'a' could not be
1831 found for it would have been renamed already. And we cannot copy
1831 found for it would have been renamed already. And we cannot copy
1832 from 'b' instead because 'b' would have been changed already. So
1832 from 'b' instead because 'b' would have been changed already. So
1833 we scan the git patch for copy and rename commands so we can
1833 we scan the git patch for copy and rename commands so we can
1834 perform the copies ahead of time.
1834 perform the copies ahead of time.
1835 """
1835 """
1836 pos = 0
1836 pos = 0
1837 try:
1837 try:
1838 pos = lr.fp.tell()
1838 pos = lr.fp.tell()
1839 fp = lr.fp
1839 fp = lr.fp
1840 except IOError:
1840 except IOError:
1841 fp = stringio(lr.fp.read())
1841 fp = stringio(lr.fp.read())
1842 gitlr = linereader(fp)
1842 gitlr = linereader(fp)
1843 gitlr.push(firstline)
1843 gitlr.push(firstline)
1844 gitpatches = readgitpatch(gitlr)
1844 gitpatches = readgitpatch(gitlr)
1845 fp.seek(pos)
1845 fp.seek(pos)
1846 return gitpatches
1846 return gitpatches
1847
1847
1848 def iterhunks(fp):
1848 def iterhunks(fp):
1849 """Read a patch and yield the following events:
1849 """Read a patch and yield the following events:
1850 - ("file", afile, bfile, firsthunk): select a new target file.
1850 - ("file", afile, bfile, firsthunk): select a new target file.
1851 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1851 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1852 "file" event.
1852 "file" event.
1853 - ("git", gitchanges): current diff is in git format, gitchanges
1853 - ("git", gitchanges): current diff is in git format, gitchanges
1854 maps filenames to gitpatch records. Unique event.
1854 maps filenames to gitpatch records. Unique event.
1855 """
1855 """
1856 afile = ""
1856 afile = ""
1857 bfile = ""
1857 bfile = ""
1858 state = None
1858 state = None
1859 hunknum = 0
1859 hunknum = 0
1860 emitfile = newfile = False
1860 emitfile = newfile = False
1861 gitpatches = None
1861 gitpatches = None
1862
1862
1863 # our states
1863 # our states
1864 BFILE = 1
1864 BFILE = 1
1865 context = None
1865 context = None
1866 lr = linereader(fp)
1866 lr = linereader(fp)
1867
1867
1868 for x in iter(lr.readline, ''):
1868 for x in iter(lr.readline, ''):
1869 if state == BFILE and (
1869 if state == BFILE and (
1870 (not context and x.startswith('@'))
1870 (not context and x.startswith('@'))
1871 or (context is not False and x.startswith('***************'))
1871 or (context is not False and x.startswith('***************'))
1872 or x.startswith('GIT binary patch')):
1872 or x.startswith('GIT binary patch')):
1873 gp = None
1873 gp = None
1874 if (gitpatches and
1874 if (gitpatches and
1875 gitpatches[-1].ispatching(afile, bfile)):
1875 gitpatches[-1].ispatching(afile, bfile)):
1876 gp = gitpatches.pop()
1876 gp = gitpatches.pop()
1877 if x.startswith('GIT binary patch'):
1877 if x.startswith('GIT binary patch'):
1878 h = binhunk(lr, gp.path)
1878 h = binhunk(lr, gp.path)
1879 else:
1879 else:
1880 if context is None and x.startswith('***************'):
1880 if context is None and x.startswith('***************'):
1881 context = True
1881 context = True
1882 h = hunk(x, hunknum + 1, lr, context)
1882 h = hunk(x, hunknum + 1, lr, context)
1883 hunknum += 1
1883 hunknum += 1
1884 if emitfile:
1884 if emitfile:
1885 emitfile = False
1885 emitfile = False
1886 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1886 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1887 yield 'hunk', h
1887 yield 'hunk', h
1888 elif x.startswith('diff --git a/'):
1888 elif x.startswith('diff --git a/'):
1889 m = gitre.match(x.rstrip(' \r\n'))
1889 m = gitre.match(x.rstrip(' \r\n'))
1890 if not m:
1890 if not m:
1891 continue
1891 continue
1892 if gitpatches is None:
1892 if gitpatches is None:
1893 # scan whole input for git metadata
1893 # scan whole input for git metadata
1894 gitpatches = scangitpatch(lr, x)
1894 gitpatches = scangitpatch(lr, x)
1895 yield 'git', [g.copy() for g in gitpatches
1895 yield 'git', [g.copy() for g in gitpatches
1896 if g.op in ('COPY', 'RENAME')]
1896 if g.op in ('COPY', 'RENAME')]
1897 gitpatches.reverse()
1897 gitpatches.reverse()
1898 afile = 'a/' + m.group(1)
1898 afile = 'a/' + m.group(1)
1899 bfile = 'b/' + m.group(2)
1899 bfile = 'b/' + m.group(2)
1900 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1900 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1901 gp = gitpatches.pop()
1901 gp = gitpatches.pop()
1902 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1902 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1903 if not gitpatches:
1903 if not gitpatches:
1904 raise PatchError(_('failed to synchronize metadata for "%s"')
1904 raise PatchError(_('failed to synchronize metadata for "%s"')
1905 % afile[2:])
1905 % afile[2:])
1906 gp = gitpatches[-1]
1906 gp = gitpatches[-1]
1907 newfile = True
1907 newfile = True
1908 elif x.startswith('---'):
1908 elif x.startswith('---'):
1909 # check for a unified diff
1909 # check for a unified diff
1910 l2 = lr.readline()
1910 l2 = lr.readline()
1911 if not l2.startswith('+++'):
1911 if not l2.startswith('+++'):
1912 lr.push(l2)
1912 lr.push(l2)
1913 continue
1913 continue
1914 newfile = True
1914 newfile = True
1915 context = False
1915 context = False
1916 afile = parsefilename(x)
1916 afile = parsefilename(x)
1917 bfile = parsefilename(l2)
1917 bfile = parsefilename(l2)
1918 elif x.startswith('***'):
1918 elif x.startswith('***'):
1919 # check for a context diff
1919 # check for a context diff
1920 l2 = lr.readline()
1920 l2 = lr.readline()
1921 if not l2.startswith('---'):
1921 if not l2.startswith('---'):
1922 lr.push(l2)
1922 lr.push(l2)
1923 continue
1923 continue
1924 l3 = lr.readline()
1924 l3 = lr.readline()
1925 lr.push(l3)
1925 lr.push(l3)
1926 if not l3.startswith("***************"):
1926 if not l3.startswith("***************"):
1927 lr.push(l2)
1927 lr.push(l2)
1928 continue
1928 continue
1929 newfile = True
1929 newfile = True
1930 context = True
1930 context = True
1931 afile = parsefilename(x)
1931 afile = parsefilename(x)
1932 bfile = parsefilename(l2)
1932 bfile = parsefilename(l2)
1933
1933
1934 if newfile:
1934 if newfile:
1935 newfile = False
1935 newfile = False
1936 emitfile = True
1936 emitfile = True
1937 state = BFILE
1937 state = BFILE
1938 hunknum = 0
1938 hunknum = 0
1939
1939
1940 while gitpatches:
1940 while gitpatches:
1941 gp = gitpatches.pop()
1941 gp = gitpatches.pop()
1942 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1942 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1943
1943
1944 def applybindelta(binchunk, data):
1944 def applybindelta(binchunk, data):
1945 """Apply a binary delta hunk
1945 """Apply a binary delta hunk
1946 The algorithm used is the algorithm from git's patch-delta.c
1946 The algorithm used is the algorithm from git's patch-delta.c
1947 """
1947 """
1948 def deltahead(binchunk):
1948 def deltahead(binchunk):
1949 i = 0
1949 i = 0
1950 for c in pycompat.bytestr(binchunk):
1950 for c in pycompat.bytestr(binchunk):
1951 i += 1
1951 i += 1
1952 if not (ord(c) & 0x80):
1952 if not (ord(c) & 0x80):
1953 return i
1953 return i
1954 return i
1954 return i
1955 out = ""
1955 out = ""
1956 s = deltahead(binchunk)
1956 s = deltahead(binchunk)
1957 binchunk = binchunk[s:]
1957 binchunk = binchunk[s:]
1958 s = deltahead(binchunk)
1958 s = deltahead(binchunk)
1959 binchunk = binchunk[s:]
1959 binchunk = binchunk[s:]
1960 i = 0
1960 i = 0
1961 while i < len(binchunk):
1961 while i < len(binchunk):
1962 cmd = ord(binchunk[i:i + 1])
1962 cmd = ord(binchunk[i:i + 1])
1963 i += 1
1963 i += 1
1964 if (cmd & 0x80):
1964 if (cmd & 0x80):
1965 offset = 0
1965 offset = 0
1966 size = 0
1966 size = 0
1967 if (cmd & 0x01):
1967 if (cmd & 0x01):
1968 offset = ord(binchunk[i:i + 1])
1968 offset = ord(binchunk[i:i + 1])
1969 i += 1
1969 i += 1
1970 if (cmd & 0x02):
1970 if (cmd & 0x02):
1971 offset |= ord(binchunk[i:i + 1]) << 8
1971 offset |= ord(binchunk[i:i + 1]) << 8
1972 i += 1
1972 i += 1
1973 if (cmd & 0x04):
1973 if (cmd & 0x04):
1974 offset |= ord(binchunk[i:i + 1]) << 16
1974 offset |= ord(binchunk[i:i + 1]) << 16
1975 i += 1
1975 i += 1
1976 if (cmd & 0x08):
1976 if (cmd & 0x08):
1977 offset |= ord(binchunk[i:i + 1]) << 24
1977 offset |= ord(binchunk[i:i + 1]) << 24
1978 i += 1
1978 i += 1
1979 if (cmd & 0x10):
1979 if (cmd & 0x10):
1980 size = ord(binchunk[i:i + 1])
1980 size = ord(binchunk[i:i + 1])
1981 i += 1
1981 i += 1
1982 if (cmd & 0x20):
1982 if (cmd & 0x20):
1983 size |= ord(binchunk[i:i + 1]) << 8
1983 size |= ord(binchunk[i:i + 1]) << 8
1984 i += 1
1984 i += 1
1985 if (cmd & 0x40):
1985 if (cmd & 0x40):
1986 size |= ord(binchunk[i:i + 1]) << 16
1986 size |= ord(binchunk[i:i + 1]) << 16
1987 i += 1
1987 i += 1
1988 if size == 0:
1988 if size == 0:
1989 size = 0x10000
1989 size = 0x10000
1990 offset_end = offset + size
1990 offset_end = offset + size
1991 out += data[offset:offset_end]
1991 out += data[offset:offset_end]
1992 elif cmd != 0:
1992 elif cmd != 0:
1993 offset_end = i + cmd
1993 offset_end = i + cmd
1994 out += binchunk[i:offset_end]
1994 out += binchunk[i:offset_end]
1995 i += cmd
1995 i += cmd
1996 else:
1996 else:
1997 raise PatchError(_('unexpected delta opcode 0'))
1997 raise PatchError(_('unexpected delta opcode 0'))
1998 return out
1998 return out
1999
1999
2000 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2000 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2001 """Reads a patch from fp and tries to apply it.
2001 """Reads a patch from fp and tries to apply it.
2002
2002
2003 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2003 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2004 there was any fuzz.
2004 there was any fuzz.
2005
2005
2006 If 'eolmode' is 'strict', the patch content and patched file are
2006 If 'eolmode' is 'strict', the patch content and patched file are
2007 read in binary mode. Otherwise, line endings are ignored when
2007 read in binary mode. Otherwise, line endings are ignored when
2008 patching then normalized according to 'eolmode'.
2008 patching then normalized according to 'eolmode'.
2009 """
2009 """
2010 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2010 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2011 prefix=prefix, eolmode=eolmode)
2011 prefix=prefix, eolmode=eolmode)
2012
2012
2013 def _canonprefix(repo, prefix):
2013 def _canonprefix(repo, prefix):
2014 if prefix:
2014 if prefix:
2015 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2015 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2016 if prefix != '':
2016 if prefix != '':
2017 prefix += '/'
2017 prefix += '/'
2018 return prefix
2018 return prefix
2019
2019
2020 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2020 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2021 eolmode='strict'):
2021 eolmode='strict'):
2022 prefix = _canonprefix(backend.repo, prefix)
2022 prefix = _canonprefix(backend.repo, prefix)
2023 def pstrip(p):
2023 def pstrip(p):
2024 return pathtransform(p, strip - 1, prefix)[1]
2024 return pathtransform(p, strip - 1, prefix)[1]
2025
2025
2026 rejects = 0
2026 rejects = 0
2027 err = 0
2027 err = 0
2028 current_file = None
2028 current_file = None
2029
2029
2030 for state, values in iterhunks(fp):
2030 for state, values in iterhunks(fp):
2031 if state == 'hunk':
2031 if state == 'hunk':
2032 if not current_file:
2032 if not current_file:
2033 continue
2033 continue
2034 ret = current_file.apply(values)
2034 ret = current_file.apply(values)
2035 if ret > 0:
2035 if ret > 0:
2036 err = 1
2036 err = 1
2037 elif state == 'file':
2037 elif state == 'file':
2038 if current_file:
2038 if current_file:
2039 rejects += current_file.close()
2039 rejects += current_file.close()
2040 current_file = None
2040 current_file = None
2041 afile, bfile, first_hunk, gp = values
2041 afile, bfile, first_hunk, gp = values
2042 if gp:
2042 if gp:
2043 gp.path = pstrip(gp.path)
2043 gp.path = pstrip(gp.path)
2044 if gp.oldpath:
2044 if gp.oldpath:
2045 gp.oldpath = pstrip(gp.oldpath)
2045 gp.oldpath = pstrip(gp.oldpath)
2046 else:
2046 else:
2047 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2047 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2048 prefix)
2048 prefix)
2049 if gp.op == 'RENAME':
2049 if gp.op == 'RENAME':
2050 backend.unlink(gp.oldpath)
2050 backend.unlink(gp.oldpath)
2051 if not first_hunk:
2051 if not first_hunk:
2052 if gp.op == 'DELETE':
2052 if gp.op == 'DELETE':
2053 backend.unlink(gp.path)
2053 backend.unlink(gp.path)
2054 continue
2054 continue
2055 data, mode = None, None
2055 data, mode = None, None
2056 if gp.op in ('RENAME', 'COPY'):
2056 if gp.op in ('RENAME', 'COPY'):
2057 data, mode = store.getfile(gp.oldpath)[:2]
2057 data, mode = store.getfile(gp.oldpath)[:2]
2058 if data is None:
2058 if data is None:
2059 # This means that the old path does not exist
2059 # This means that the old path does not exist
2060 raise PatchError(_("source file '%s' does not exist")
2060 raise PatchError(_("source file '%s' does not exist")
2061 % gp.oldpath)
2061 % gp.oldpath)
2062 if gp.mode:
2062 if gp.mode:
2063 mode = gp.mode
2063 mode = gp.mode
2064 if gp.op == 'ADD':
2064 if gp.op == 'ADD':
2065 # Added files without content have no hunk and
2065 # Added files without content have no hunk and
2066 # must be created
2066 # must be created
2067 data = ''
2067 data = ''
2068 if data or mode:
2068 if data or mode:
2069 if (gp.op in ('ADD', 'RENAME', 'COPY')
2069 if (gp.op in ('ADD', 'RENAME', 'COPY')
2070 and backend.exists(gp.path)):
2070 and backend.exists(gp.path)):
2071 raise PatchError(_("cannot create %s: destination "
2071 raise PatchError(_("cannot create %s: destination "
2072 "already exists") % gp.path)
2072 "already exists") % gp.path)
2073 backend.setfile(gp.path, data, mode, gp.oldpath)
2073 backend.setfile(gp.path, data, mode, gp.oldpath)
2074 continue
2074 continue
2075 try:
2075 try:
2076 current_file = patcher(ui, gp, backend, store,
2076 current_file = patcher(ui, gp, backend, store,
2077 eolmode=eolmode)
2077 eolmode=eolmode)
2078 except PatchError as inst:
2078 except PatchError as inst:
2079 ui.warn(str(inst) + '\n')
2079 ui.warn(str(inst) + '\n')
2080 current_file = None
2080 current_file = None
2081 rejects += 1
2081 rejects += 1
2082 continue
2082 continue
2083 elif state == 'git':
2083 elif state == 'git':
2084 for gp in values:
2084 for gp in values:
2085 path = pstrip(gp.oldpath)
2085 path = pstrip(gp.oldpath)
2086 data, mode = backend.getfile(path)
2086 data, mode = backend.getfile(path)
2087 if data is None:
2087 if data is None:
2088 # The error ignored here will trigger a getfile()
2088 # The error ignored here will trigger a getfile()
2089 # error in a place more appropriate for error
2089 # error in a place more appropriate for error
2090 # handling, and will not interrupt the patching
2090 # handling, and will not interrupt the patching
2091 # process.
2091 # process.
2092 pass
2092 pass
2093 else:
2093 else:
2094 store.setfile(path, data, mode)
2094 store.setfile(path, data, mode)
2095 else:
2095 else:
2096 raise error.Abort(_('unsupported parser state: %s') % state)
2096 raise error.Abort(_('unsupported parser state: %s') % state)
2097
2097
2098 if current_file:
2098 if current_file:
2099 rejects += current_file.close()
2099 rejects += current_file.close()
2100
2100
2101 if rejects:
2101 if rejects:
2102 return -1
2102 return -1
2103 return err
2103 return err
2104
2104
2105 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2105 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2106 similarity):
2106 similarity):
2107 """use <patcher> to apply <patchname> to the working directory.
2107 """use <patcher> to apply <patchname> to the working directory.
2108 returns whether patch was applied with fuzz factor."""
2108 returns whether patch was applied with fuzz factor."""
2109
2109
2110 fuzz = False
2110 fuzz = False
2111 args = []
2111 args = []
2112 cwd = repo.root
2112 cwd = repo.root
2113 if cwd:
2113 if cwd:
2114 args.append('-d %s' % procutil.shellquote(cwd))
2114 args.append('-d %s' % procutil.shellquote(cwd))
2115 cmd = ('%s %s -p%d < %s'
2115 cmd = ('%s %s -p%d < %s'
2116 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2116 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2117 ui.debug('Using external patch tool: %s\n' % cmd)
2117 ui.debug('Using external patch tool: %s\n' % cmd)
2118 fp = procutil.popen(cmd, 'rb')
2118 fp = procutil.popen(cmd, 'rb')
2119 try:
2119 try:
2120 for line in util.iterfile(fp):
2120 for line in util.iterfile(fp):
2121 line = line.rstrip()
2121 line = line.rstrip()
2122 ui.note(line + '\n')
2122 ui.note(line + '\n')
2123 if line.startswith('patching file '):
2123 if line.startswith('patching file '):
2124 pf = util.parsepatchoutput(line)
2124 pf = util.parsepatchoutput(line)
2125 printed_file = False
2125 printed_file = False
2126 files.add(pf)
2126 files.add(pf)
2127 elif line.find('with fuzz') >= 0:
2127 elif line.find('with fuzz') >= 0:
2128 fuzz = True
2128 fuzz = True
2129 if not printed_file:
2129 if not printed_file:
2130 ui.warn(pf + '\n')
2130 ui.warn(pf + '\n')
2131 printed_file = True
2131 printed_file = True
2132 ui.warn(line + '\n')
2132 ui.warn(line + '\n')
2133 elif line.find('saving rejects to file') >= 0:
2133 elif line.find('saving rejects to file') >= 0:
2134 ui.warn(line + '\n')
2134 ui.warn(line + '\n')
2135 elif line.find('FAILED') >= 0:
2135 elif line.find('FAILED') >= 0:
2136 if not printed_file:
2136 if not printed_file:
2137 ui.warn(pf + '\n')
2137 ui.warn(pf + '\n')
2138 printed_file = True
2138 printed_file = True
2139 ui.warn(line + '\n')
2139 ui.warn(line + '\n')
2140 finally:
2140 finally:
2141 if files:
2141 if files:
2142 scmutil.marktouched(repo, files, similarity)
2142 scmutil.marktouched(repo, files, similarity)
2143 code = fp.close()
2143 code = fp.close()
2144 if code:
2144 if code:
2145 raise PatchError(_("patch command failed: %s") %
2145 raise PatchError(_("patch command failed: %s") %
2146 procutil.explainexit(code))
2146 procutil.explainexit(code))
2147 return fuzz
2147 return fuzz
2148
2148
2149 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2149 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2150 eolmode='strict'):
2150 eolmode='strict'):
2151 if files is None:
2151 if files is None:
2152 files = set()
2152 files = set()
2153 if eolmode is None:
2153 if eolmode is None:
2154 eolmode = ui.config('patch', 'eol')
2154 eolmode = ui.config('patch', 'eol')
2155 if eolmode.lower() not in eolmodes:
2155 if eolmode.lower() not in eolmodes:
2156 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2156 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2157 eolmode = eolmode.lower()
2157 eolmode = eolmode.lower()
2158
2158
2159 store = filestore()
2159 store = filestore()
2160 try:
2160 try:
2161 fp = open(patchobj, 'rb')
2161 fp = open(patchobj, 'rb')
2162 except TypeError:
2162 except TypeError:
2163 fp = patchobj
2163 fp = patchobj
2164 try:
2164 try:
2165 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2165 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2166 eolmode=eolmode)
2166 eolmode=eolmode)
2167 finally:
2167 finally:
2168 if fp != patchobj:
2168 if fp != patchobj:
2169 fp.close()
2169 fp.close()
2170 files.update(backend.close())
2170 files.update(backend.close())
2171 store.close()
2171 store.close()
2172 if ret < 0:
2172 if ret < 0:
2173 raise PatchError(_('patch failed to apply'))
2173 raise PatchError(_('patch failed to apply'))
2174 return ret > 0
2174 return ret > 0
2175
2175
2176 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2176 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2177 eolmode='strict', similarity=0):
2177 eolmode='strict', similarity=0):
2178 """use builtin patch to apply <patchobj> to the working directory.
2178 """use builtin patch to apply <patchobj> to the working directory.
2179 returns whether patch was applied with fuzz factor."""
2179 returns whether patch was applied with fuzz factor."""
2180 backend = workingbackend(ui, repo, similarity)
2180 backend = workingbackend(ui, repo, similarity)
2181 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2181 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2182
2182
2183 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2183 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2184 eolmode='strict'):
2184 eolmode='strict'):
2185 backend = repobackend(ui, repo, ctx, store)
2185 backend = repobackend(ui, repo, ctx, store)
2186 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2186 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2187
2187
2188 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2188 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2189 similarity=0):
2189 similarity=0):
2190 """Apply <patchname> to the working directory.
2190 """Apply <patchname> to the working directory.
2191
2191
2192 'eolmode' specifies how end of lines should be handled. It can be:
2192 'eolmode' specifies how end of lines should be handled. It can be:
2193 - 'strict': inputs are read in binary mode, EOLs are preserved
2193 - 'strict': inputs are read in binary mode, EOLs are preserved
2194 - 'crlf': EOLs are ignored when patching and reset to CRLF
2194 - 'crlf': EOLs are ignored when patching and reset to CRLF
2195 - 'lf': EOLs are ignored when patching and reset to LF
2195 - 'lf': EOLs are ignored when patching and reset to LF
2196 - None: get it from user settings, default to 'strict'
2196 - None: get it from user settings, default to 'strict'
2197 'eolmode' is ignored when using an external patcher program.
2197 'eolmode' is ignored when using an external patcher program.
2198
2198
2199 Returns whether patch was applied with fuzz factor.
2199 Returns whether patch was applied with fuzz factor.
2200 """
2200 """
2201 patcher = ui.config('ui', 'patch')
2201 patcher = ui.config('ui', 'patch')
2202 if files is None:
2202 if files is None:
2203 files = set()
2203 files = set()
2204 if patcher:
2204 if patcher:
2205 return _externalpatch(ui, repo, patcher, patchname, strip,
2205 return _externalpatch(ui, repo, patcher, patchname, strip,
2206 files, similarity)
2206 files, similarity)
2207 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2207 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2208 similarity)
2208 similarity)
2209
2209
2210 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2210 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2211 backend = fsbackend(ui, repo.root)
2211 backend = fsbackend(ui, repo.root)
2212 prefix = _canonprefix(repo, prefix)
2212 prefix = _canonprefix(repo, prefix)
2213 with open(patchpath, 'rb') as fp:
2213 with open(patchpath, 'rb') as fp:
2214 changed = set()
2214 changed = set()
2215 for state, values in iterhunks(fp):
2215 for state, values in iterhunks(fp):
2216 if state == 'file':
2216 if state == 'file':
2217 afile, bfile, first_hunk, gp = values
2217 afile, bfile, first_hunk, gp = values
2218 if gp:
2218 if gp:
2219 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2219 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2220 if gp.oldpath:
2220 if gp.oldpath:
2221 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2221 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2222 prefix)[1]
2222 prefix)[1]
2223 else:
2223 else:
2224 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2224 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2225 prefix)
2225 prefix)
2226 changed.add(gp.path)
2226 changed.add(gp.path)
2227 if gp.op == 'RENAME':
2227 if gp.op == 'RENAME':
2228 changed.add(gp.oldpath)
2228 changed.add(gp.oldpath)
2229 elif state not in ('hunk', 'git'):
2229 elif state not in ('hunk', 'git'):
2230 raise error.Abort(_('unsupported parser state: %s') % state)
2230 raise error.Abort(_('unsupported parser state: %s') % state)
2231 return changed
2231 return changed
2232
2232
2233 class GitDiffRequired(Exception):
2233 class GitDiffRequired(Exception):
2234 pass
2234 pass
2235
2235
2236 diffopts = diffutil.diffopts
2236 diffopts = diffutil.diffallopts
2237 diffallopts = diffutil.diffallopts
2237 diffallopts = diffutil.diffallopts
2238 difffeatureopts = diffutil.difffeatureopts
2238 difffeatureopts = diffutil.difffeatureopts
2239
2239
2240 def diff(repo, node1=None, node2=None, match=None, changes=None,
2240 def diff(repo, node1=None, node2=None, match=None, changes=None,
2241 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2241 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2242 hunksfilterfn=None):
2242 hunksfilterfn=None):
2243 '''yields diff of changes to files between two nodes, or node and
2243 '''yields diff of changes to files between two nodes, or node and
2244 working directory.
2244 working directory.
2245
2245
2246 if node1 is None, use first dirstate parent instead.
2246 if node1 is None, use first dirstate parent instead.
2247 if node2 is None, compare node1 with working directory.
2247 if node2 is None, compare node1 with working directory.
2248
2248
2249 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2249 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2250 every time some change cannot be represented with the current
2250 every time some change cannot be represented with the current
2251 patch format. Return False to upgrade to git patch format, True to
2251 patch format. Return False to upgrade to git patch format, True to
2252 accept the loss or raise an exception to abort the diff. It is
2252 accept the loss or raise an exception to abort the diff. It is
2253 called with the name of current file being diffed as 'fn'. If set
2253 called with the name of current file being diffed as 'fn'. If set
2254 to None, patches will always be upgraded to git format when
2254 to None, patches will always be upgraded to git format when
2255 necessary.
2255 necessary.
2256
2256
2257 prefix is a filename prefix that is prepended to all filenames on
2257 prefix is a filename prefix that is prepended to all filenames on
2258 display (used for subrepos).
2258 display (used for subrepos).
2259
2259
2260 relroot, if not empty, must be normalized with a trailing /. Any match
2260 relroot, if not empty, must be normalized with a trailing /. Any match
2261 patterns that fall outside it will be ignored.
2261 patterns that fall outside it will be ignored.
2262
2262
2263 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2263 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2264 information.
2264 information.
2265
2265
2266 hunksfilterfn, if not None, should be a function taking a filectx and
2266 hunksfilterfn, if not None, should be a function taking a filectx and
2267 hunks generator that may yield filtered hunks.
2267 hunks generator that may yield filtered hunks.
2268 '''
2268 '''
2269 for fctx1, fctx2, hdr, hunks in diffhunks(
2269 for fctx1, fctx2, hdr, hunks in diffhunks(
2270 repo, node1=node1, node2=node2,
2270 repo, node1=node1, node2=node2,
2271 match=match, changes=changes, opts=opts,
2271 match=match, changes=changes, opts=opts,
2272 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2272 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2273 ):
2273 ):
2274 if hunksfilterfn is not None:
2274 if hunksfilterfn is not None:
2275 # If the file has been removed, fctx2 is None; but this should
2275 # If the file has been removed, fctx2 is None; but this should
2276 # not occur here since we catch removed files early in
2276 # not occur here since we catch removed files early in
2277 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2277 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2278 assert fctx2 is not None, \
2278 assert fctx2 is not None, \
2279 'fctx2 unexpectly None in diff hunks filtering'
2279 'fctx2 unexpectly None in diff hunks filtering'
2280 hunks = hunksfilterfn(fctx2, hunks)
2280 hunks = hunksfilterfn(fctx2, hunks)
2281 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2281 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2282 if hdr and (text or len(hdr) > 1):
2282 if hdr and (text or len(hdr) > 1):
2283 yield '\n'.join(hdr) + '\n'
2283 yield '\n'.join(hdr) + '\n'
2284 if text:
2284 if text:
2285 yield text
2285 yield text
2286
2286
2287 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2287 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2288 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2288 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2289 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2289 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2290 where `header` is a list of diff headers and `hunks` is an iterable of
2290 where `header` is a list of diff headers and `hunks` is an iterable of
2291 (`hunkrange`, `hunklines`) tuples.
2291 (`hunkrange`, `hunklines`) tuples.
2292
2292
2293 See diff() for the meaning of parameters.
2293 See diff() for the meaning of parameters.
2294 """
2294 """
2295
2295
2296 if opts is None:
2296 if opts is None:
2297 opts = mdiff.defaultopts
2297 opts = mdiff.defaultopts
2298
2298
2299 if not node1 and not node2:
2299 if not node1 and not node2:
2300 node1 = repo.dirstate.p1()
2300 node1 = repo.dirstate.p1()
2301
2301
2302 def lrugetfilectx():
2302 def lrugetfilectx():
2303 cache = {}
2303 cache = {}
2304 order = collections.deque()
2304 order = collections.deque()
2305 def getfilectx(f, ctx):
2305 def getfilectx(f, ctx):
2306 fctx = ctx.filectx(f, filelog=cache.get(f))
2306 fctx = ctx.filectx(f, filelog=cache.get(f))
2307 if f not in cache:
2307 if f not in cache:
2308 if len(cache) > 20:
2308 if len(cache) > 20:
2309 del cache[order.popleft()]
2309 del cache[order.popleft()]
2310 cache[f] = fctx.filelog()
2310 cache[f] = fctx.filelog()
2311 else:
2311 else:
2312 order.remove(f)
2312 order.remove(f)
2313 order.append(f)
2313 order.append(f)
2314 return fctx
2314 return fctx
2315 return getfilectx
2315 return getfilectx
2316 getfilectx = lrugetfilectx()
2316 getfilectx = lrugetfilectx()
2317
2317
2318 ctx1 = repo[node1]
2318 ctx1 = repo[node1]
2319 ctx2 = repo[node2]
2319 ctx2 = repo[node2]
2320
2320
2321 relfiltered = False
2321 relfiltered = False
2322 if relroot != '' and match.always():
2322 if relroot != '' and match.always():
2323 # as a special case, create a new matcher with just the relroot
2323 # as a special case, create a new matcher with just the relroot
2324 pats = [relroot]
2324 pats = [relroot]
2325 match = scmutil.match(ctx2, pats, default='path')
2325 match = scmutil.match(ctx2, pats, default='path')
2326 relfiltered = True
2326 relfiltered = True
2327
2327
2328 if not changes:
2328 if not changes:
2329 changes = repo.status(ctx1, ctx2, match=match)
2329 changes = repo.status(ctx1, ctx2, match=match)
2330 modified, added, removed = changes[:3]
2330 modified, added, removed = changes[:3]
2331
2331
2332 if not modified and not added and not removed:
2332 if not modified and not added and not removed:
2333 return []
2333 return []
2334
2334
2335 if repo.ui.debugflag:
2335 if repo.ui.debugflag:
2336 hexfunc = hex
2336 hexfunc = hex
2337 else:
2337 else:
2338 hexfunc = short
2338 hexfunc = short
2339 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2339 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2340
2340
2341 if copy is None:
2341 if copy is None:
2342 copy = {}
2342 copy = {}
2343 if opts.git or opts.upgrade:
2343 if opts.git or opts.upgrade:
2344 copy = copies.pathcopies(ctx1, ctx2, match=match)
2344 copy = copies.pathcopies(ctx1, ctx2, match=match)
2345
2345
2346 if relroot is not None:
2346 if relroot is not None:
2347 if not relfiltered:
2347 if not relfiltered:
2348 # XXX this would ideally be done in the matcher, but that is
2348 # XXX this would ideally be done in the matcher, but that is
2349 # generally meant to 'or' patterns, not 'and' them. In this case we
2349 # generally meant to 'or' patterns, not 'and' them. In this case we
2350 # need to 'and' all the patterns from the matcher with relroot.
2350 # need to 'and' all the patterns from the matcher with relroot.
2351 def filterrel(l):
2351 def filterrel(l):
2352 return [f for f in l if f.startswith(relroot)]
2352 return [f for f in l if f.startswith(relroot)]
2353 modified = filterrel(modified)
2353 modified = filterrel(modified)
2354 added = filterrel(added)
2354 added = filterrel(added)
2355 removed = filterrel(removed)
2355 removed = filterrel(removed)
2356 relfiltered = True
2356 relfiltered = True
2357 # filter out copies where either side isn't inside the relative root
2357 # filter out copies where either side isn't inside the relative root
2358 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2358 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2359 if dst.startswith(relroot)
2359 if dst.startswith(relroot)
2360 and src.startswith(relroot)))
2360 and src.startswith(relroot)))
2361
2361
2362 modifiedset = set(modified)
2362 modifiedset = set(modified)
2363 addedset = set(added)
2363 addedset = set(added)
2364 removedset = set(removed)
2364 removedset = set(removed)
2365 for f in modified:
2365 for f in modified:
2366 if f not in ctx1:
2366 if f not in ctx1:
2367 # Fix up added, since merged-in additions appear as
2367 # Fix up added, since merged-in additions appear as
2368 # modifications during merges
2368 # modifications during merges
2369 modifiedset.remove(f)
2369 modifiedset.remove(f)
2370 addedset.add(f)
2370 addedset.add(f)
2371 for f in removed:
2371 for f in removed:
2372 if f not in ctx1:
2372 if f not in ctx1:
2373 # Merged-in additions that are then removed are reported as removed.
2373 # Merged-in additions that are then removed are reported as removed.
2374 # They are not in ctx1, so We don't want to show them in the diff.
2374 # They are not in ctx1, so We don't want to show them in the diff.
2375 removedset.remove(f)
2375 removedset.remove(f)
2376 modified = sorted(modifiedset)
2376 modified = sorted(modifiedset)
2377 added = sorted(addedset)
2377 added = sorted(addedset)
2378 removed = sorted(removedset)
2378 removed = sorted(removedset)
2379 for dst, src in list(copy.items()):
2379 for dst, src in list(copy.items()):
2380 if src not in ctx1:
2380 if src not in ctx1:
2381 # Files merged in during a merge and then copied/renamed are
2381 # Files merged in during a merge and then copied/renamed are
2382 # reported as copies. We want to show them in the diff as additions.
2382 # reported as copies. We want to show them in the diff as additions.
2383 del copy[dst]
2383 del copy[dst]
2384
2384
2385 prefetchmatch = scmutil.matchfiles(
2385 prefetchmatch = scmutil.matchfiles(
2386 repo, list(modifiedset | addedset | removedset))
2386 repo, list(modifiedset | addedset | removedset))
2387 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2387 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2388
2388
2389 def difffn(opts, losedata):
2389 def difffn(opts, losedata):
2390 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2390 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2391 copy, getfilectx, opts, losedata, prefix, relroot)
2391 copy, getfilectx, opts, losedata, prefix, relroot)
2392 if opts.upgrade and not opts.git:
2392 if opts.upgrade and not opts.git:
2393 try:
2393 try:
2394 def losedata(fn):
2394 def losedata(fn):
2395 if not losedatafn or not losedatafn(fn=fn):
2395 if not losedatafn or not losedatafn(fn=fn):
2396 raise GitDiffRequired
2396 raise GitDiffRequired
2397 # Buffer the whole output until we are sure it can be generated
2397 # Buffer the whole output until we are sure it can be generated
2398 return list(difffn(opts.copy(git=False), losedata))
2398 return list(difffn(opts.copy(git=False), losedata))
2399 except GitDiffRequired:
2399 except GitDiffRequired:
2400 return difffn(opts.copy(git=True), None)
2400 return difffn(opts.copy(git=True), None)
2401 else:
2401 else:
2402 return difffn(opts, None)
2402 return difffn(opts, None)
2403
2403
2404 def diffsinglehunk(hunklines):
2404 def diffsinglehunk(hunklines):
2405 """yield tokens for a list of lines in a single hunk"""
2405 """yield tokens for a list of lines in a single hunk"""
2406 for line in hunklines:
2406 for line in hunklines:
2407 # chomp
2407 # chomp
2408 chompline = line.rstrip('\n')
2408 chompline = line.rstrip('\n')
2409 # highlight tabs and trailing whitespace
2409 # highlight tabs and trailing whitespace
2410 stripline = chompline.rstrip()
2410 stripline = chompline.rstrip()
2411 if line.startswith('-'):
2411 if line.startswith('-'):
2412 label = 'diff.deleted'
2412 label = 'diff.deleted'
2413 elif line.startswith('+'):
2413 elif line.startswith('+'):
2414 label = 'diff.inserted'
2414 label = 'diff.inserted'
2415 else:
2415 else:
2416 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2416 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2417 for token in tabsplitter.findall(stripline):
2417 for token in tabsplitter.findall(stripline):
2418 if token.startswith('\t'):
2418 if token.startswith('\t'):
2419 yield (token, 'diff.tab')
2419 yield (token, 'diff.tab')
2420 else:
2420 else:
2421 yield (token, label)
2421 yield (token, label)
2422
2422
2423 if chompline != stripline:
2423 if chompline != stripline:
2424 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2424 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2425 if chompline != line:
2425 if chompline != line:
2426 yield (line[len(chompline):], '')
2426 yield (line[len(chompline):], '')
2427
2427
2428 def diffsinglehunkinline(hunklines):
2428 def diffsinglehunkinline(hunklines):
2429 """yield tokens for a list of lines in a single hunk, with inline colors"""
2429 """yield tokens for a list of lines in a single hunk, with inline colors"""
2430 # prepare deleted, and inserted content
2430 # prepare deleted, and inserted content
2431 a = ''
2431 a = ''
2432 b = ''
2432 b = ''
2433 for line in hunklines:
2433 for line in hunklines:
2434 if line[0] == '-':
2434 if line[0] == '-':
2435 a += line[1:]
2435 a += line[1:]
2436 elif line[0] == '+':
2436 elif line[0] == '+':
2437 b += line[1:]
2437 b += line[1:]
2438 else:
2438 else:
2439 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2439 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2440 # fast path: if either side is empty, use diffsinglehunk
2440 # fast path: if either side is empty, use diffsinglehunk
2441 if not a or not b:
2441 if not a or not b:
2442 for t in diffsinglehunk(hunklines):
2442 for t in diffsinglehunk(hunklines):
2443 yield t
2443 yield t
2444 return
2444 return
2445 # re-split the content into words
2445 # re-split the content into words
2446 al = wordsplitter.findall(a)
2446 al = wordsplitter.findall(a)
2447 bl = wordsplitter.findall(b)
2447 bl = wordsplitter.findall(b)
2448 # re-arrange the words to lines since the diff algorithm is line-based
2448 # re-arrange the words to lines since the diff algorithm is line-based
2449 aln = [s if s == '\n' else s + '\n' for s in al]
2449 aln = [s if s == '\n' else s + '\n' for s in al]
2450 bln = [s if s == '\n' else s + '\n' for s in bl]
2450 bln = [s if s == '\n' else s + '\n' for s in bl]
2451 an = ''.join(aln)
2451 an = ''.join(aln)
2452 bn = ''.join(bln)
2452 bn = ''.join(bln)
2453 # run the diff algorithm, prepare atokens and btokens
2453 # run the diff algorithm, prepare atokens and btokens
2454 atokens = []
2454 atokens = []
2455 btokens = []
2455 btokens = []
2456 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2456 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2457 for (a1, a2, b1, b2), btype in blocks:
2457 for (a1, a2, b1, b2), btype in blocks:
2458 changed = btype == '!'
2458 changed = btype == '!'
2459 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2459 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2460 atokens.append((changed, token))
2460 atokens.append((changed, token))
2461 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2461 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2462 btokens.append((changed, token))
2462 btokens.append((changed, token))
2463
2463
2464 # yield deleted tokens, then inserted ones
2464 # yield deleted tokens, then inserted ones
2465 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2465 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2466 ('+', 'diff.inserted', btokens)]:
2466 ('+', 'diff.inserted', btokens)]:
2467 nextisnewline = True
2467 nextisnewline = True
2468 for changed, token in tokens:
2468 for changed, token in tokens:
2469 if nextisnewline:
2469 if nextisnewline:
2470 yield (prefix, label)
2470 yield (prefix, label)
2471 nextisnewline = False
2471 nextisnewline = False
2472 # special handling line end
2472 # special handling line end
2473 isendofline = token.endswith('\n')
2473 isendofline = token.endswith('\n')
2474 if isendofline:
2474 if isendofline:
2475 chomp = token[:-1] # chomp
2475 chomp = token[:-1] # chomp
2476 token = chomp.rstrip() # detect spaces at the end
2476 token = chomp.rstrip() # detect spaces at the end
2477 endspaces = chomp[len(token):]
2477 endspaces = chomp[len(token):]
2478 # scan tabs
2478 # scan tabs
2479 for maybetab in tabsplitter.findall(token):
2479 for maybetab in tabsplitter.findall(token):
2480 if '\t' == maybetab[0]:
2480 if '\t' == maybetab[0]:
2481 currentlabel = 'diff.tab'
2481 currentlabel = 'diff.tab'
2482 else:
2482 else:
2483 if changed:
2483 if changed:
2484 currentlabel = label + '.changed'
2484 currentlabel = label + '.changed'
2485 else:
2485 else:
2486 currentlabel = label + '.unchanged'
2486 currentlabel = label + '.unchanged'
2487 yield (maybetab, currentlabel)
2487 yield (maybetab, currentlabel)
2488 if isendofline:
2488 if isendofline:
2489 if endspaces:
2489 if endspaces:
2490 yield (endspaces, 'diff.trailingwhitespace')
2490 yield (endspaces, 'diff.trailingwhitespace')
2491 yield ('\n', '')
2491 yield ('\n', '')
2492 nextisnewline = True
2492 nextisnewline = True
2493
2493
2494 def difflabel(func, *args, **kw):
2494 def difflabel(func, *args, **kw):
2495 '''yields 2-tuples of (output, label) based on the output of func()'''
2495 '''yields 2-tuples of (output, label) based on the output of func()'''
2496 if kw.get(r'opts') and kw[r'opts'].worddiff:
2496 if kw.get(r'opts') and kw[r'opts'].worddiff:
2497 dodiffhunk = diffsinglehunkinline
2497 dodiffhunk = diffsinglehunkinline
2498 else:
2498 else:
2499 dodiffhunk = diffsinglehunk
2499 dodiffhunk = diffsinglehunk
2500 headprefixes = [('diff', 'diff.diffline'),
2500 headprefixes = [('diff', 'diff.diffline'),
2501 ('copy', 'diff.extended'),
2501 ('copy', 'diff.extended'),
2502 ('rename', 'diff.extended'),
2502 ('rename', 'diff.extended'),
2503 ('old', 'diff.extended'),
2503 ('old', 'diff.extended'),
2504 ('new', 'diff.extended'),
2504 ('new', 'diff.extended'),
2505 ('deleted', 'diff.extended'),
2505 ('deleted', 'diff.extended'),
2506 ('index', 'diff.extended'),
2506 ('index', 'diff.extended'),
2507 ('similarity', 'diff.extended'),
2507 ('similarity', 'diff.extended'),
2508 ('---', 'diff.file_a'),
2508 ('---', 'diff.file_a'),
2509 ('+++', 'diff.file_b')]
2509 ('+++', 'diff.file_b')]
2510 textprefixes = [('@', 'diff.hunk'),
2510 textprefixes = [('@', 'diff.hunk'),
2511 # - and + are handled by diffsinglehunk
2511 # - and + are handled by diffsinglehunk
2512 ]
2512 ]
2513 head = False
2513 head = False
2514
2514
2515 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2515 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2516 hunkbuffer = []
2516 hunkbuffer = []
2517 def consumehunkbuffer():
2517 def consumehunkbuffer():
2518 if hunkbuffer:
2518 if hunkbuffer:
2519 for token in dodiffhunk(hunkbuffer):
2519 for token in dodiffhunk(hunkbuffer):
2520 yield token
2520 yield token
2521 hunkbuffer[:] = []
2521 hunkbuffer[:] = []
2522
2522
2523 for chunk in func(*args, **kw):
2523 for chunk in func(*args, **kw):
2524 lines = chunk.split('\n')
2524 lines = chunk.split('\n')
2525 linecount = len(lines)
2525 linecount = len(lines)
2526 for i, line in enumerate(lines):
2526 for i, line in enumerate(lines):
2527 if head:
2527 if head:
2528 if line.startswith('@'):
2528 if line.startswith('@'):
2529 head = False
2529 head = False
2530 else:
2530 else:
2531 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2531 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2532 head = True
2532 head = True
2533 diffline = False
2533 diffline = False
2534 if not head and line and line.startswith(('+', '-')):
2534 if not head and line and line.startswith(('+', '-')):
2535 diffline = True
2535 diffline = True
2536
2536
2537 prefixes = textprefixes
2537 prefixes = textprefixes
2538 if head:
2538 if head:
2539 prefixes = headprefixes
2539 prefixes = headprefixes
2540 if diffline:
2540 if diffline:
2541 # buffered
2541 # buffered
2542 bufferedline = line
2542 bufferedline = line
2543 if i + 1 < linecount:
2543 if i + 1 < linecount:
2544 bufferedline += "\n"
2544 bufferedline += "\n"
2545 hunkbuffer.append(bufferedline)
2545 hunkbuffer.append(bufferedline)
2546 else:
2546 else:
2547 # unbuffered
2547 # unbuffered
2548 for token in consumehunkbuffer():
2548 for token in consumehunkbuffer():
2549 yield token
2549 yield token
2550 stripline = line.rstrip()
2550 stripline = line.rstrip()
2551 for prefix, label in prefixes:
2551 for prefix, label in prefixes:
2552 if stripline.startswith(prefix):
2552 if stripline.startswith(prefix):
2553 yield (stripline, label)
2553 yield (stripline, label)
2554 if line != stripline:
2554 if line != stripline:
2555 yield (line[len(stripline):],
2555 yield (line[len(stripline):],
2556 'diff.trailingwhitespace')
2556 'diff.trailingwhitespace')
2557 break
2557 break
2558 else:
2558 else:
2559 yield (line, '')
2559 yield (line, '')
2560 if i + 1 < linecount:
2560 if i + 1 < linecount:
2561 yield ('\n', '')
2561 yield ('\n', '')
2562 for token in consumehunkbuffer():
2562 for token in consumehunkbuffer():
2563 yield token
2563 yield token
2564
2564
2565 def diffui(*args, **kw):
2565 def diffui(*args, **kw):
2566 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2566 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2567 return difflabel(diff, *args, **kw)
2567 return difflabel(diff, *args, **kw)
2568
2568
2569 def _filepairs(modified, added, removed, copy, opts):
2569 def _filepairs(modified, added, removed, copy, opts):
2570 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2570 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2571 before and f2 is the the name after. For added files, f1 will be None,
2571 before and f2 is the the name after. For added files, f1 will be None,
2572 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2572 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2573 or 'rename' (the latter two only if opts.git is set).'''
2573 or 'rename' (the latter two only if opts.git is set).'''
2574 gone = set()
2574 gone = set()
2575
2575
2576 copyto = dict([(v, k) for k, v in copy.items()])
2576 copyto = dict([(v, k) for k, v in copy.items()])
2577
2577
2578 addedset, removedset = set(added), set(removed)
2578 addedset, removedset = set(added), set(removed)
2579
2579
2580 for f in sorted(modified + added + removed):
2580 for f in sorted(modified + added + removed):
2581 copyop = None
2581 copyop = None
2582 f1, f2 = f, f
2582 f1, f2 = f, f
2583 if f in addedset:
2583 if f in addedset:
2584 f1 = None
2584 f1 = None
2585 if f in copy:
2585 if f in copy:
2586 if opts.git:
2586 if opts.git:
2587 f1 = copy[f]
2587 f1 = copy[f]
2588 if f1 in removedset and f1 not in gone:
2588 if f1 in removedset and f1 not in gone:
2589 copyop = 'rename'
2589 copyop = 'rename'
2590 gone.add(f1)
2590 gone.add(f1)
2591 else:
2591 else:
2592 copyop = 'copy'
2592 copyop = 'copy'
2593 elif f in removedset:
2593 elif f in removedset:
2594 f2 = None
2594 f2 = None
2595 if opts.git:
2595 if opts.git:
2596 # have we already reported a copy above?
2596 # have we already reported a copy above?
2597 if (f in copyto and copyto[f] in addedset
2597 if (f in copyto and copyto[f] in addedset
2598 and copy[copyto[f]] == f):
2598 and copy[copyto[f]] == f):
2599 continue
2599 continue
2600 yield f1, f2, copyop
2600 yield f1, f2, copyop
2601
2601
2602 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2602 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2603 copy, getfilectx, opts, losedatafn, prefix, relroot):
2603 copy, getfilectx, opts, losedatafn, prefix, relroot):
2604 '''given input data, generate a diff and yield it in blocks
2604 '''given input data, generate a diff and yield it in blocks
2605
2605
2606 If generating a diff would lose data like flags or binary data and
2606 If generating a diff would lose data like flags or binary data and
2607 losedatafn is not None, it will be called.
2607 losedatafn is not None, it will be called.
2608
2608
2609 relroot is removed and prefix is added to every path in the diff output.
2609 relroot is removed and prefix is added to every path in the diff output.
2610
2610
2611 If relroot is not empty, this function expects every path in modified,
2611 If relroot is not empty, this function expects every path in modified,
2612 added, removed and copy to start with it.'''
2612 added, removed and copy to start with it.'''
2613
2613
2614 def gitindex(text):
2614 def gitindex(text):
2615 if not text:
2615 if not text:
2616 text = ""
2616 text = ""
2617 l = len(text)
2617 l = len(text)
2618 s = hashlib.sha1('blob %d\0' % l)
2618 s = hashlib.sha1('blob %d\0' % l)
2619 s.update(text)
2619 s.update(text)
2620 return hex(s.digest())
2620 return hex(s.digest())
2621
2621
2622 if opts.noprefix:
2622 if opts.noprefix:
2623 aprefix = bprefix = ''
2623 aprefix = bprefix = ''
2624 else:
2624 else:
2625 aprefix = 'a/'
2625 aprefix = 'a/'
2626 bprefix = 'b/'
2626 bprefix = 'b/'
2627
2627
2628 def diffline(f, revs):
2628 def diffline(f, revs):
2629 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2629 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2630 return 'diff %s %s' % (revinfo, f)
2630 return 'diff %s %s' % (revinfo, f)
2631
2631
2632 def isempty(fctx):
2632 def isempty(fctx):
2633 return fctx is None or fctx.size() == 0
2633 return fctx is None or fctx.size() == 0
2634
2634
2635 date1 = dateutil.datestr(ctx1.date())
2635 date1 = dateutil.datestr(ctx1.date())
2636 date2 = dateutil.datestr(ctx2.date())
2636 date2 = dateutil.datestr(ctx2.date())
2637
2637
2638 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2638 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2639
2639
2640 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2640 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2641 or repo.ui.configbool('devel', 'check-relroot')):
2641 or repo.ui.configbool('devel', 'check-relroot')):
2642 for f in modified + added + removed + list(copy) + list(copy.values()):
2642 for f in modified + added + removed + list(copy) + list(copy.values()):
2643 if f is not None and not f.startswith(relroot):
2643 if f is not None and not f.startswith(relroot):
2644 raise AssertionError(
2644 raise AssertionError(
2645 "file %s doesn't start with relroot %s" % (f, relroot))
2645 "file %s doesn't start with relroot %s" % (f, relroot))
2646
2646
2647 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2647 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2648 content1 = None
2648 content1 = None
2649 content2 = None
2649 content2 = None
2650 fctx1 = None
2650 fctx1 = None
2651 fctx2 = None
2651 fctx2 = None
2652 flag1 = None
2652 flag1 = None
2653 flag2 = None
2653 flag2 = None
2654 if f1:
2654 if f1:
2655 fctx1 = getfilectx(f1, ctx1)
2655 fctx1 = getfilectx(f1, ctx1)
2656 if opts.git or losedatafn:
2656 if opts.git or losedatafn:
2657 flag1 = ctx1.flags(f1)
2657 flag1 = ctx1.flags(f1)
2658 if f2:
2658 if f2:
2659 fctx2 = getfilectx(f2, ctx2)
2659 fctx2 = getfilectx(f2, ctx2)
2660 if opts.git or losedatafn:
2660 if opts.git or losedatafn:
2661 flag2 = ctx2.flags(f2)
2661 flag2 = ctx2.flags(f2)
2662 # if binary is True, output "summary" or "base85", but not "text diff"
2662 # if binary is True, output "summary" or "base85", but not "text diff"
2663 if opts.text:
2663 if opts.text:
2664 binary = False
2664 binary = False
2665 else:
2665 else:
2666 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2666 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2667
2667
2668 if losedatafn and not opts.git:
2668 if losedatafn and not opts.git:
2669 if (binary or
2669 if (binary or
2670 # copy/rename
2670 # copy/rename
2671 f2 in copy or
2671 f2 in copy or
2672 # empty file creation
2672 # empty file creation
2673 (not f1 and isempty(fctx2)) or
2673 (not f1 and isempty(fctx2)) or
2674 # empty file deletion
2674 # empty file deletion
2675 (isempty(fctx1) and not f2) or
2675 (isempty(fctx1) and not f2) or
2676 # create with flags
2676 # create with flags
2677 (not f1 and flag2) or
2677 (not f1 and flag2) or
2678 # change flags
2678 # change flags
2679 (f1 and f2 and flag1 != flag2)):
2679 (f1 and f2 and flag1 != flag2)):
2680 losedatafn(f2 or f1)
2680 losedatafn(f2 or f1)
2681
2681
2682 path1 = f1 or f2
2682 path1 = f1 or f2
2683 path2 = f2 or f1
2683 path2 = f2 or f1
2684 path1 = posixpath.join(prefix, path1[len(relroot):])
2684 path1 = posixpath.join(prefix, path1[len(relroot):])
2685 path2 = posixpath.join(prefix, path2[len(relroot):])
2685 path2 = posixpath.join(prefix, path2[len(relroot):])
2686 header = []
2686 header = []
2687 if opts.git:
2687 if opts.git:
2688 header.append('diff --git %s%s %s%s' %
2688 header.append('diff --git %s%s %s%s' %
2689 (aprefix, path1, bprefix, path2))
2689 (aprefix, path1, bprefix, path2))
2690 if not f1: # added
2690 if not f1: # added
2691 header.append('new file mode %s' % gitmode[flag2])
2691 header.append('new file mode %s' % gitmode[flag2])
2692 elif not f2: # removed
2692 elif not f2: # removed
2693 header.append('deleted file mode %s' % gitmode[flag1])
2693 header.append('deleted file mode %s' % gitmode[flag1])
2694 else: # modified/copied/renamed
2694 else: # modified/copied/renamed
2695 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2695 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2696 if mode1 != mode2:
2696 if mode1 != mode2:
2697 header.append('old mode %s' % mode1)
2697 header.append('old mode %s' % mode1)
2698 header.append('new mode %s' % mode2)
2698 header.append('new mode %s' % mode2)
2699 if copyop is not None:
2699 if copyop is not None:
2700 if opts.showsimilarity:
2700 if opts.showsimilarity:
2701 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2701 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2702 header.append('similarity index %d%%' % sim)
2702 header.append('similarity index %d%%' % sim)
2703 header.append('%s from %s' % (copyop, path1))
2703 header.append('%s from %s' % (copyop, path1))
2704 header.append('%s to %s' % (copyop, path2))
2704 header.append('%s to %s' % (copyop, path2))
2705 elif revs and not repo.ui.quiet:
2705 elif revs and not repo.ui.quiet:
2706 header.append(diffline(path1, revs))
2706 header.append(diffline(path1, revs))
2707
2707
2708 # fctx.is | diffopts | what to | is fctx.data()
2708 # fctx.is | diffopts | what to | is fctx.data()
2709 # binary() | text nobinary git index | output? | outputted?
2709 # binary() | text nobinary git index | output? | outputted?
2710 # ------------------------------------|----------------------------
2710 # ------------------------------------|----------------------------
2711 # yes | no no no * | summary | no
2711 # yes | no no no * | summary | no
2712 # yes | no no yes * | base85 | yes
2712 # yes | no no yes * | base85 | yes
2713 # yes | no yes no * | summary | no
2713 # yes | no yes no * | summary | no
2714 # yes | no yes yes 0 | summary | no
2714 # yes | no yes yes 0 | summary | no
2715 # yes | no yes yes >0 | summary | semi [1]
2715 # yes | no yes yes >0 | summary | semi [1]
2716 # yes | yes * * * | text diff | yes
2716 # yes | yes * * * | text diff | yes
2717 # no | * * * * | text diff | yes
2717 # no | * * * * | text diff | yes
2718 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2718 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2719 if binary and (not opts.git or (opts.git and opts.nobinary and not
2719 if binary and (not opts.git or (opts.git and opts.nobinary and not
2720 opts.index)):
2720 opts.index)):
2721 # fast path: no binary content will be displayed, content1 and
2721 # fast path: no binary content will be displayed, content1 and
2722 # content2 are only used for equivalent test. cmp() could have a
2722 # content2 are only used for equivalent test. cmp() could have a
2723 # fast path.
2723 # fast path.
2724 if fctx1 is not None:
2724 if fctx1 is not None:
2725 content1 = b'\0'
2725 content1 = b'\0'
2726 if fctx2 is not None:
2726 if fctx2 is not None:
2727 if fctx1 is not None and not fctx1.cmp(fctx2):
2727 if fctx1 is not None and not fctx1.cmp(fctx2):
2728 content2 = b'\0' # not different
2728 content2 = b'\0' # not different
2729 else:
2729 else:
2730 content2 = b'\0\0'
2730 content2 = b'\0\0'
2731 else:
2731 else:
2732 # normal path: load contents
2732 # normal path: load contents
2733 if fctx1 is not None:
2733 if fctx1 is not None:
2734 content1 = fctx1.data()
2734 content1 = fctx1.data()
2735 if fctx2 is not None:
2735 if fctx2 is not None:
2736 content2 = fctx2.data()
2736 content2 = fctx2.data()
2737
2737
2738 if binary and opts.git and not opts.nobinary:
2738 if binary and opts.git and not opts.nobinary:
2739 text = mdiff.b85diff(content1, content2)
2739 text = mdiff.b85diff(content1, content2)
2740 if text:
2740 if text:
2741 header.append('index %s..%s' %
2741 header.append('index %s..%s' %
2742 (gitindex(content1), gitindex(content2)))
2742 (gitindex(content1), gitindex(content2)))
2743 hunks = (None, [text]),
2743 hunks = (None, [text]),
2744 else:
2744 else:
2745 if opts.git and opts.index > 0:
2745 if opts.git and opts.index > 0:
2746 flag = flag1
2746 flag = flag1
2747 if flag is None:
2747 if flag is None:
2748 flag = flag2
2748 flag = flag2
2749 header.append('index %s..%s %s' %
2749 header.append('index %s..%s %s' %
2750 (gitindex(content1)[0:opts.index],
2750 (gitindex(content1)[0:opts.index],
2751 gitindex(content2)[0:opts.index],
2751 gitindex(content2)[0:opts.index],
2752 gitmode[flag]))
2752 gitmode[flag]))
2753
2753
2754 uheaders, hunks = mdiff.unidiff(content1, date1,
2754 uheaders, hunks = mdiff.unidiff(content1, date1,
2755 content2, date2,
2755 content2, date2,
2756 path1, path2,
2756 path1, path2,
2757 binary=binary, opts=opts)
2757 binary=binary, opts=opts)
2758 header.extend(uheaders)
2758 header.extend(uheaders)
2759 yield fctx1, fctx2, header, hunks
2759 yield fctx1, fctx2, header, hunks
2760
2760
2761 def diffstatsum(stats):
2761 def diffstatsum(stats):
2762 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2762 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2763 for f, a, r, b in stats:
2763 for f, a, r, b in stats:
2764 maxfile = max(maxfile, encoding.colwidth(f))
2764 maxfile = max(maxfile, encoding.colwidth(f))
2765 maxtotal = max(maxtotal, a + r)
2765 maxtotal = max(maxtotal, a + r)
2766 addtotal += a
2766 addtotal += a
2767 removetotal += r
2767 removetotal += r
2768 binary = binary or b
2768 binary = binary or b
2769
2769
2770 return maxfile, maxtotal, addtotal, removetotal, binary
2770 return maxfile, maxtotal, addtotal, removetotal, binary
2771
2771
2772 def diffstatdata(lines):
2772 def diffstatdata(lines):
2773 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2773 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2774
2774
2775 results = []
2775 results = []
2776 filename, adds, removes, isbinary = None, 0, 0, False
2776 filename, adds, removes, isbinary = None, 0, 0, False
2777
2777
2778 def addresult():
2778 def addresult():
2779 if filename:
2779 if filename:
2780 results.append((filename, adds, removes, isbinary))
2780 results.append((filename, adds, removes, isbinary))
2781
2781
2782 # inheader is used to track if a line is in the
2782 # inheader is used to track if a line is in the
2783 # header portion of the diff. This helps properly account
2783 # header portion of the diff. This helps properly account
2784 # for lines that start with '--' or '++'
2784 # for lines that start with '--' or '++'
2785 inheader = False
2785 inheader = False
2786
2786
2787 for line in lines:
2787 for line in lines:
2788 if line.startswith('diff'):
2788 if line.startswith('diff'):
2789 addresult()
2789 addresult()
2790 # starting a new file diff
2790 # starting a new file diff
2791 # set numbers to 0 and reset inheader
2791 # set numbers to 0 and reset inheader
2792 inheader = True
2792 inheader = True
2793 adds, removes, isbinary = 0, 0, False
2793 adds, removes, isbinary = 0, 0, False
2794 if line.startswith('diff --git a/'):
2794 if line.startswith('diff --git a/'):
2795 filename = gitre.search(line).group(2)
2795 filename = gitre.search(line).group(2)
2796 elif line.startswith('diff -r'):
2796 elif line.startswith('diff -r'):
2797 # format: "diff -r ... -r ... filename"
2797 # format: "diff -r ... -r ... filename"
2798 filename = diffre.search(line).group(1)
2798 filename = diffre.search(line).group(1)
2799 elif line.startswith('@@'):
2799 elif line.startswith('@@'):
2800 inheader = False
2800 inheader = False
2801 elif line.startswith('+') and not inheader:
2801 elif line.startswith('+') and not inheader:
2802 adds += 1
2802 adds += 1
2803 elif line.startswith('-') and not inheader:
2803 elif line.startswith('-') and not inheader:
2804 removes += 1
2804 removes += 1
2805 elif (line.startswith('GIT binary patch') or
2805 elif (line.startswith('GIT binary patch') or
2806 line.startswith('Binary file')):
2806 line.startswith('Binary file')):
2807 isbinary = True
2807 isbinary = True
2808 addresult()
2808 addresult()
2809 return results
2809 return results
2810
2810
2811 def diffstat(lines, width=80):
2811 def diffstat(lines, width=80):
2812 output = []
2812 output = []
2813 stats = diffstatdata(lines)
2813 stats = diffstatdata(lines)
2814 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2814 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2815
2815
2816 countwidth = len(str(maxtotal))
2816 countwidth = len(str(maxtotal))
2817 if hasbinary and countwidth < 3:
2817 if hasbinary and countwidth < 3:
2818 countwidth = 3
2818 countwidth = 3
2819 graphwidth = width - countwidth - maxname - 6
2819 graphwidth = width - countwidth - maxname - 6
2820 if graphwidth < 10:
2820 if graphwidth < 10:
2821 graphwidth = 10
2821 graphwidth = 10
2822
2822
2823 def scale(i):
2823 def scale(i):
2824 if maxtotal <= graphwidth:
2824 if maxtotal <= graphwidth:
2825 return i
2825 return i
2826 # If diffstat runs out of room it doesn't print anything,
2826 # If diffstat runs out of room it doesn't print anything,
2827 # which isn't very useful, so always print at least one + or -
2827 # which isn't very useful, so always print at least one + or -
2828 # if there were at least some changes.
2828 # if there were at least some changes.
2829 return max(i * graphwidth // maxtotal, int(bool(i)))
2829 return max(i * graphwidth // maxtotal, int(bool(i)))
2830
2830
2831 for filename, adds, removes, isbinary in stats:
2831 for filename, adds, removes, isbinary in stats:
2832 if isbinary:
2832 if isbinary:
2833 count = 'Bin'
2833 count = 'Bin'
2834 else:
2834 else:
2835 count = '%d' % (adds + removes)
2835 count = '%d' % (adds + removes)
2836 pluses = '+' * scale(adds)
2836 pluses = '+' * scale(adds)
2837 minuses = '-' * scale(removes)
2837 minuses = '-' * scale(removes)
2838 output.append(' %s%s | %*s %s%s\n' %
2838 output.append(' %s%s | %*s %s%s\n' %
2839 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2839 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2840 countwidth, count, pluses, minuses))
2840 countwidth, count, pluses, minuses))
2841
2841
2842 if stats:
2842 if stats:
2843 output.append(_(' %d files changed, %d insertions(+), '
2843 output.append(_(' %d files changed, %d insertions(+), '
2844 '%d deletions(-)\n')
2844 '%d deletions(-)\n')
2845 % (len(stats), totaladds, totalremoves))
2845 % (len(stats), totaladds, totalremoves))
2846
2846
2847 return ''.join(output)
2847 return ''.join(output)
2848
2848
2849 def diffstatui(*args, **kw):
2849 def diffstatui(*args, **kw):
2850 '''like diffstat(), but yields 2-tuples of (output, label) for
2850 '''like diffstat(), but yields 2-tuples of (output, label) for
2851 ui.write()
2851 ui.write()
2852 '''
2852 '''
2853
2853
2854 for line in diffstat(*args, **kw).splitlines():
2854 for line in diffstat(*args, **kw).splitlines():
2855 if line and line[-1] in '+-':
2855 if line and line[-1] in '+-':
2856 name, graph = line.rsplit(' ', 1)
2856 name, graph = line.rsplit(' ', 1)
2857 yield (name + ' ', '')
2857 yield (name + ' ', '')
2858 m = re.search(br'\++', graph)
2858 m = re.search(br'\++', graph)
2859 if m:
2859 if m:
2860 yield (m.group(0), 'diffstat.inserted')
2860 yield (m.group(0), 'diffstat.inserted')
2861 m = re.search(br'-+', graph)
2861 m = re.search(br'-+', graph)
2862 if m:
2862 if m:
2863 yield (m.group(0), 'diffstat.deleted')
2863 yield (m.group(0), 'diffstat.deleted')
2864 else:
2864 else:
2865 yield (line, '')
2865 yield (line, '')
2866 yield ('\n', '')
2866 yield ('\n', '')
@@ -1,2250 +1,2250 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 dagop,
14 dagop,
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 obsutil,
22 obsutil,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 pycompat,
25 pycompat,
26 registrar,
26 registrar,
27 repoview,
27 repoview,
28 revsetlang,
28 revsetlang,
29 scmutil,
29 scmutil,
30 smartset,
30 smartset,
31 stack as stackmod,
31 stack as stackmod,
32 util,
32 util,
33 )
33 )
34 from .utils import (
34 from .utils import (
35 dateutil,
35 dateutil,
36 diffutil,
36 diffutil,
37 stringutil,
37 stringutil,
38 )
38 )
39
39
40 # helpers for processing parsed tree
40 # helpers for processing parsed tree
41 getsymbol = revsetlang.getsymbol
41 getsymbol = revsetlang.getsymbol
42 getstring = revsetlang.getstring
42 getstring = revsetlang.getstring
43 getinteger = revsetlang.getinteger
43 getinteger = revsetlang.getinteger
44 getboolean = revsetlang.getboolean
44 getboolean = revsetlang.getboolean
45 getlist = revsetlang.getlist
45 getlist = revsetlang.getlist
46 getrange = revsetlang.getrange
46 getrange = revsetlang.getrange
47 getargs = revsetlang.getargs
47 getargs = revsetlang.getargs
48 getargsdict = revsetlang.getargsdict
48 getargsdict = revsetlang.getargsdict
49
49
50 baseset = smartset.baseset
50 baseset = smartset.baseset
51 generatorset = smartset.generatorset
51 generatorset = smartset.generatorset
52 spanset = smartset.spanset
52 spanset = smartset.spanset
53 fullreposet = smartset.fullreposet
53 fullreposet = smartset.fullreposet
54
54
55 # Constants for ordering requirement, used in getset():
55 # Constants for ordering requirement, used in getset():
56 #
56 #
57 # If 'define', any nested functions and operations MAY change the ordering of
57 # If 'define', any nested functions and operations MAY change the ordering of
58 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
58 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
59 # it). If 'follow', any nested functions and operations MUST take the ordering
59 # it). If 'follow', any nested functions and operations MUST take the ordering
60 # specified by the first operand to the '&' operator.
60 # specified by the first operand to the '&' operator.
61 #
61 #
62 # For instance,
62 # For instance,
63 #
63 #
64 # X & (Y | Z)
64 # X & (Y | Z)
65 # ^ ^^^^^^^
65 # ^ ^^^^^^^
66 # | follow
66 # | follow
67 # define
67 # define
68 #
68 #
69 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
69 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
70 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
70 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
71 #
71 #
72 # 'any' means the order doesn't matter. For instance,
72 # 'any' means the order doesn't matter. For instance,
73 #
73 #
74 # (X & !Y) | ancestors(Z)
74 # (X & !Y) | ancestors(Z)
75 # ^ ^
75 # ^ ^
76 # any any
76 # any any
77 #
77 #
78 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
78 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
79 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
79 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
80 # since 'ancestors' does not care about the order of its argument.
80 # since 'ancestors' does not care about the order of its argument.
81 #
81 #
82 # Currently, most revsets do not care about the order, so 'define' is
82 # Currently, most revsets do not care about the order, so 'define' is
83 # equivalent to 'follow' for them, and the resulting order is based on the
83 # equivalent to 'follow' for them, and the resulting order is based on the
84 # 'subset' parameter passed down to them:
84 # 'subset' parameter passed down to them:
85 #
85 #
86 # m = revset.match(...)
86 # m = revset.match(...)
87 # m(repo, subset, order=defineorder)
87 # m(repo, subset, order=defineorder)
88 # ^^^^^^
88 # ^^^^^^
89 # For most revsets, 'define' means using the order this subset provides
89 # For most revsets, 'define' means using the order this subset provides
90 #
90 #
91 # There are a few revsets that always redefine the order if 'define' is
91 # There are a few revsets that always redefine the order if 'define' is
92 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
92 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
93 anyorder = 'any' # don't care the order, could be even random-shuffled
93 anyorder = 'any' # don't care the order, could be even random-shuffled
94 defineorder = 'define' # ALWAYS redefine, or ALWAYS follow the current order
94 defineorder = 'define' # ALWAYS redefine, or ALWAYS follow the current order
95 followorder = 'follow' # MUST follow the current order
95 followorder = 'follow' # MUST follow the current order
96
96
97 # helpers
97 # helpers
98
98
99 def getset(repo, subset, x, order=defineorder):
99 def getset(repo, subset, x, order=defineorder):
100 if not x:
100 if not x:
101 raise error.ParseError(_("missing argument"))
101 raise error.ParseError(_("missing argument"))
102 return methods[x[0]](repo, subset, *x[1:], order=order)
102 return methods[x[0]](repo, subset, *x[1:], order=order)
103
103
104 def _getrevsource(repo, r):
104 def _getrevsource(repo, r):
105 extra = repo[r].extra()
105 extra = repo[r].extra()
106 for label in ('source', 'transplant_source', 'rebase_source'):
106 for label in ('source', 'transplant_source', 'rebase_source'):
107 if label in extra:
107 if label in extra:
108 try:
108 try:
109 return repo[extra[label]].rev()
109 return repo[extra[label]].rev()
110 except error.RepoLookupError:
110 except error.RepoLookupError:
111 pass
111 pass
112 return None
112 return None
113
113
114 def _sortedb(xs):
114 def _sortedb(xs):
115 return sorted(pycompat.rapply(pycompat.maybebytestr, xs))
115 return sorted(pycompat.rapply(pycompat.maybebytestr, xs))
116
116
117 # operator methods
117 # operator methods
118
118
119 def stringset(repo, subset, x, order):
119 def stringset(repo, subset, x, order):
120 if not x:
120 if not x:
121 raise error.ParseError(_("empty string is not a valid revision"))
121 raise error.ParseError(_("empty string is not a valid revision"))
122 x = scmutil.intrev(scmutil.revsymbol(repo, x))
122 x = scmutil.intrev(scmutil.revsymbol(repo, x))
123 if (x in subset
123 if (x in subset
124 or x == node.nullrev and isinstance(subset, fullreposet)):
124 or x == node.nullrev and isinstance(subset, fullreposet)):
125 return baseset([x])
125 return baseset([x])
126 return baseset()
126 return baseset()
127
127
128 def rangeset(repo, subset, x, y, order):
128 def rangeset(repo, subset, x, y, order):
129 m = getset(repo, fullreposet(repo), x)
129 m = getset(repo, fullreposet(repo), x)
130 n = getset(repo, fullreposet(repo), y)
130 n = getset(repo, fullreposet(repo), y)
131
131
132 if not m or not n:
132 if not m or not n:
133 return baseset()
133 return baseset()
134 return _makerangeset(repo, subset, m.first(), n.last(), order)
134 return _makerangeset(repo, subset, m.first(), n.last(), order)
135
135
136 def rangeall(repo, subset, x, order):
136 def rangeall(repo, subset, x, order):
137 assert x is None
137 assert x is None
138 return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
138 return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
139
139
140 def rangepre(repo, subset, y, order):
140 def rangepre(repo, subset, y, order):
141 # ':y' can't be rewritten to '0:y' since '0' may be hidden
141 # ':y' can't be rewritten to '0:y' since '0' may be hidden
142 n = getset(repo, fullreposet(repo), y)
142 n = getset(repo, fullreposet(repo), y)
143 if not n:
143 if not n:
144 return baseset()
144 return baseset()
145 return _makerangeset(repo, subset, 0, n.last(), order)
145 return _makerangeset(repo, subset, 0, n.last(), order)
146
146
147 def rangepost(repo, subset, x, order):
147 def rangepost(repo, subset, x, order):
148 m = getset(repo, fullreposet(repo), x)
148 m = getset(repo, fullreposet(repo), x)
149 if not m:
149 if not m:
150 return baseset()
150 return baseset()
151 return _makerangeset(repo, subset, m.first(), repo.changelog.tiprev(),
151 return _makerangeset(repo, subset, m.first(), repo.changelog.tiprev(),
152 order)
152 order)
153
153
154 def _makerangeset(repo, subset, m, n, order):
154 def _makerangeset(repo, subset, m, n, order):
155 if m == n:
155 if m == n:
156 r = baseset([m])
156 r = baseset([m])
157 elif n == node.wdirrev:
157 elif n == node.wdirrev:
158 r = spanset(repo, m, len(repo)) + baseset([n])
158 r = spanset(repo, m, len(repo)) + baseset([n])
159 elif m == node.wdirrev:
159 elif m == node.wdirrev:
160 r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
160 r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
161 elif m < n:
161 elif m < n:
162 r = spanset(repo, m, n + 1)
162 r = spanset(repo, m, n + 1)
163 else:
163 else:
164 r = spanset(repo, m, n - 1)
164 r = spanset(repo, m, n - 1)
165
165
166 if order == defineorder:
166 if order == defineorder:
167 return r & subset
167 return r & subset
168 else:
168 else:
169 # carrying the sorting over when possible would be more efficient
169 # carrying the sorting over when possible would be more efficient
170 return subset & r
170 return subset & r
171
171
172 def dagrange(repo, subset, x, y, order):
172 def dagrange(repo, subset, x, y, order):
173 r = fullreposet(repo)
173 r = fullreposet(repo)
174 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
174 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
175 includepath=True)
175 includepath=True)
176 return subset & xs
176 return subset & xs
177
177
178 def andset(repo, subset, x, y, order):
178 def andset(repo, subset, x, y, order):
179 if order == anyorder:
179 if order == anyorder:
180 yorder = anyorder
180 yorder = anyorder
181 else:
181 else:
182 yorder = followorder
182 yorder = followorder
183 return getset(repo, getset(repo, subset, x, order), y, yorder)
183 return getset(repo, getset(repo, subset, x, order), y, yorder)
184
184
185 def andsmallyset(repo, subset, x, y, order):
185 def andsmallyset(repo, subset, x, y, order):
186 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
186 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
187 if order == anyorder:
187 if order == anyorder:
188 yorder = anyorder
188 yorder = anyorder
189 else:
189 else:
190 yorder = followorder
190 yorder = followorder
191 return getset(repo, getset(repo, subset, y, yorder), x, order)
191 return getset(repo, getset(repo, subset, y, yorder), x, order)
192
192
193 def differenceset(repo, subset, x, y, order):
193 def differenceset(repo, subset, x, y, order):
194 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
194 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
195
195
196 def _orsetlist(repo, subset, xs, order):
196 def _orsetlist(repo, subset, xs, order):
197 assert xs
197 assert xs
198 if len(xs) == 1:
198 if len(xs) == 1:
199 return getset(repo, subset, xs[0], order)
199 return getset(repo, subset, xs[0], order)
200 p = len(xs) // 2
200 p = len(xs) // 2
201 a = _orsetlist(repo, subset, xs[:p], order)
201 a = _orsetlist(repo, subset, xs[:p], order)
202 b = _orsetlist(repo, subset, xs[p:], order)
202 b = _orsetlist(repo, subset, xs[p:], order)
203 return a + b
203 return a + b
204
204
205 def orset(repo, subset, x, order):
205 def orset(repo, subset, x, order):
206 xs = getlist(x)
206 xs = getlist(x)
207 if not xs:
207 if not xs:
208 return baseset()
208 return baseset()
209 if order == followorder:
209 if order == followorder:
210 # slow path to take the subset order
210 # slow path to take the subset order
211 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
211 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
212 else:
212 else:
213 return _orsetlist(repo, subset, xs, order)
213 return _orsetlist(repo, subset, xs, order)
214
214
215 def notset(repo, subset, x, order):
215 def notset(repo, subset, x, order):
216 return subset - getset(repo, subset, x, anyorder)
216 return subset - getset(repo, subset, x, anyorder)
217
217
218 def relationset(repo, subset, x, y, order):
218 def relationset(repo, subset, x, y, order):
219 raise error.ParseError(_("can't use a relation in this context"))
219 raise error.ParseError(_("can't use a relation in this context"))
220
220
221 def relsubscriptset(repo, subset, x, y, z, order):
221 def relsubscriptset(repo, subset, x, y, z, order):
222 # this is pretty basic implementation of 'x#y[z]' operator, still
222 # this is pretty basic implementation of 'x#y[z]' operator, still
223 # experimental so undocumented. see the wiki for further ideas.
223 # experimental so undocumented. see the wiki for further ideas.
224 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
224 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
225 rel = getsymbol(y)
225 rel = getsymbol(y)
226 n = getinteger(z, _("relation subscript must be an integer"))
226 n = getinteger(z, _("relation subscript must be an integer"))
227
227
228 # TODO: perhaps this should be a table of relation functions
228 # TODO: perhaps this should be a table of relation functions
229 if rel in ('g', 'generations'):
229 if rel in ('g', 'generations'):
230 # TODO: support range, rewrite tests, and drop startdepth argument
230 # TODO: support range, rewrite tests, and drop startdepth argument
231 # from ancestors() and descendants() predicates
231 # from ancestors() and descendants() predicates
232 if n <= 0:
232 if n <= 0:
233 n = -n
233 n = -n
234 return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
234 return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
235 else:
235 else:
236 return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
236 return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
237
237
238 raise error.UnknownIdentifier(rel, ['generations'])
238 raise error.UnknownIdentifier(rel, ['generations'])
239
239
240 def subscriptset(repo, subset, x, y, order):
240 def subscriptset(repo, subset, x, y, order):
241 raise error.ParseError(_("can't use a subscript in this context"))
241 raise error.ParseError(_("can't use a subscript in this context"))
242
242
243 def listset(repo, subset, *xs, **opts):
243 def listset(repo, subset, *xs, **opts):
244 raise error.ParseError(_("can't use a list in this context"),
244 raise error.ParseError(_("can't use a list in this context"),
245 hint=_('see hg help "revsets.x or y"'))
245 hint=_('see hg help "revsets.x or y"'))
246
246
247 def keyvaluepair(repo, subset, k, v, order):
247 def keyvaluepair(repo, subset, k, v, order):
248 raise error.ParseError(_("can't use a key-value pair in this context"))
248 raise error.ParseError(_("can't use a key-value pair in this context"))
249
249
250 def func(repo, subset, a, b, order):
250 def func(repo, subset, a, b, order):
251 f = getsymbol(a)
251 f = getsymbol(a)
252 if f in symbols:
252 if f in symbols:
253 func = symbols[f]
253 func = symbols[f]
254 if getattr(func, '_takeorder', False):
254 if getattr(func, '_takeorder', False):
255 return func(repo, subset, b, order)
255 return func(repo, subset, b, order)
256 return func(repo, subset, b)
256 return func(repo, subset, b)
257
257
258 keep = lambda fn: getattr(fn, '__doc__', None) is not None
258 keep = lambda fn: getattr(fn, '__doc__', None) is not None
259
259
260 syms = [s for (s, fn) in symbols.items() if keep(fn)]
260 syms = [s for (s, fn) in symbols.items() if keep(fn)]
261 raise error.UnknownIdentifier(f, syms)
261 raise error.UnknownIdentifier(f, syms)
262
262
263 # functions
263 # functions
264
264
265 # symbols are callables like:
265 # symbols are callables like:
266 # fn(repo, subset, x)
266 # fn(repo, subset, x)
267 # with:
267 # with:
268 # repo - current repository instance
268 # repo - current repository instance
269 # subset - of revisions to be examined
269 # subset - of revisions to be examined
270 # x - argument in tree form
270 # x - argument in tree form
271 symbols = revsetlang.symbols
271 symbols = revsetlang.symbols
272
272
273 # symbols which can't be used for a DoS attack for any given input
273 # symbols which can't be used for a DoS attack for any given input
274 # (e.g. those which accept regexes as plain strings shouldn't be included)
274 # (e.g. those which accept regexes as plain strings shouldn't be included)
275 # functions that just return a lot of changesets (like all) don't count here
275 # functions that just return a lot of changesets (like all) don't count here
276 safesymbols = set()
276 safesymbols = set()
277
277
278 predicate = registrar.revsetpredicate()
278 predicate = registrar.revsetpredicate()
279
279
280 @predicate('_destupdate')
280 @predicate('_destupdate')
281 def _destupdate(repo, subset, x):
281 def _destupdate(repo, subset, x):
282 # experimental revset for update destination
282 # experimental revset for update destination
283 args = getargsdict(x, 'limit', 'clean')
283 args = getargsdict(x, 'limit', 'clean')
284 return subset & baseset([destutil.destupdate(repo,
284 return subset & baseset([destutil.destupdate(repo,
285 **pycompat.strkwargs(args))[0]])
285 **pycompat.strkwargs(args))[0]])
286
286
287 @predicate('_destmerge')
287 @predicate('_destmerge')
288 def _destmerge(repo, subset, x):
288 def _destmerge(repo, subset, x):
289 # experimental revset for merge destination
289 # experimental revset for merge destination
290 sourceset = None
290 sourceset = None
291 if x is not None:
291 if x is not None:
292 sourceset = getset(repo, fullreposet(repo), x)
292 sourceset = getset(repo, fullreposet(repo), x)
293 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
293 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
294
294
295 @predicate('adds(pattern)', safe=True, weight=30)
295 @predicate('adds(pattern)', safe=True, weight=30)
296 def adds(repo, subset, x):
296 def adds(repo, subset, x):
297 """Changesets that add a file matching pattern.
297 """Changesets that add a file matching pattern.
298
298
299 The pattern without explicit kind like ``glob:`` is expected to be
299 The pattern without explicit kind like ``glob:`` is expected to be
300 relative to the current directory and match against a file or a
300 relative to the current directory and match against a file or a
301 directory.
301 directory.
302 """
302 """
303 # i18n: "adds" is a keyword
303 # i18n: "adds" is a keyword
304 pat = getstring(x, _("adds requires a pattern"))
304 pat = getstring(x, _("adds requires a pattern"))
305 return checkstatus(repo, subset, pat, 1)
305 return checkstatus(repo, subset, pat, 1)
306
306
307 @predicate('ancestor(*changeset)', safe=True, weight=0.5)
307 @predicate('ancestor(*changeset)', safe=True, weight=0.5)
308 def ancestor(repo, subset, x):
308 def ancestor(repo, subset, x):
309 """A greatest common ancestor of the changesets.
309 """A greatest common ancestor of the changesets.
310
310
311 Accepts 0 or more changesets.
311 Accepts 0 or more changesets.
312 Will return empty list when passed no args.
312 Will return empty list when passed no args.
313 Greatest common ancestor of a single changeset is that changeset.
313 Greatest common ancestor of a single changeset is that changeset.
314 """
314 """
315 reviter = iter(orset(repo, fullreposet(repo), x, order=anyorder))
315 reviter = iter(orset(repo, fullreposet(repo), x, order=anyorder))
316 try:
316 try:
317 anc = repo[next(reviter)]
317 anc = repo[next(reviter)]
318 except StopIteration:
318 except StopIteration:
319 return baseset()
319 return baseset()
320 for r in reviter:
320 for r in reviter:
321 anc = anc.ancestor(repo[r])
321 anc = anc.ancestor(repo[r])
322
322
323 r = scmutil.intrev(anc)
323 r = scmutil.intrev(anc)
324 if r in subset:
324 if r in subset:
325 return baseset([r])
325 return baseset([r])
326 return baseset()
326 return baseset()
327
327
328 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
328 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
329 stopdepth=None):
329 stopdepth=None):
330 heads = getset(repo, fullreposet(repo), x)
330 heads = getset(repo, fullreposet(repo), x)
331 if not heads:
331 if not heads:
332 return baseset()
332 return baseset()
333 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
333 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
334 return subset & s
334 return subset & s
335
335
336 @predicate('ancestors(set[, depth])', safe=True)
336 @predicate('ancestors(set[, depth])', safe=True)
337 def ancestors(repo, subset, x):
337 def ancestors(repo, subset, x):
338 """Changesets that are ancestors of changesets in set, including the
338 """Changesets that are ancestors of changesets in set, including the
339 given changesets themselves.
339 given changesets themselves.
340
340
341 If depth is specified, the result only includes changesets up to
341 If depth is specified, the result only includes changesets up to
342 the specified generation.
342 the specified generation.
343 """
343 """
344 # startdepth is for internal use only until we can decide the UI
344 # startdepth is for internal use only until we can decide the UI
345 args = getargsdict(x, 'ancestors', 'set depth startdepth')
345 args = getargsdict(x, 'ancestors', 'set depth startdepth')
346 if 'set' not in args:
346 if 'set' not in args:
347 # i18n: "ancestors" is a keyword
347 # i18n: "ancestors" is a keyword
348 raise error.ParseError(_('ancestors takes at least 1 argument'))
348 raise error.ParseError(_('ancestors takes at least 1 argument'))
349 startdepth = stopdepth = None
349 startdepth = stopdepth = None
350 if 'startdepth' in args:
350 if 'startdepth' in args:
351 n = getinteger(args['startdepth'],
351 n = getinteger(args['startdepth'],
352 "ancestors expects an integer startdepth")
352 "ancestors expects an integer startdepth")
353 if n < 0:
353 if n < 0:
354 raise error.ParseError("negative startdepth")
354 raise error.ParseError("negative startdepth")
355 startdepth = n
355 startdepth = n
356 if 'depth' in args:
356 if 'depth' in args:
357 # i18n: "ancestors" is a keyword
357 # i18n: "ancestors" is a keyword
358 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
358 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
359 if n < 0:
359 if n < 0:
360 raise error.ParseError(_("negative depth"))
360 raise error.ParseError(_("negative depth"))
361 stopdepth = n + 1
361 stopdepth = n + 1
362 return _ancestors(repo, subset, args['set'],
362 return _ancestors(repo, subset, args['set'],
363 startdepth=startdepth, stopdepth=stopdepth)
363 startdepth=startdepth, stopdepth=stopdepth)
364
364
365 @predicate('_firstancestors', safe=True)
365 @predicate('_firstancestors', safe=True)
366 def _firstancestors(repo, subset, x):
366 def _firstancestors(repo, subset, x):
367 # ``_firstancestors(set)``
367 # ``_firstancestors(set)``
368 # Like ``ancestors(set)`` but follows only the first parents.
368 # Like ``ancestors(set)`` but follows only the first parents.
369 return _ancestors(repo, subset, x, followfirst=True)
369 return _ancestors(repo, subset, x, followfirst=True)
370
370
371 def _childrenspec(repo, subset, x, n, order):
371 def _childrenspec(repo, subset, x, n, order):
372 """Changesets that are the Nth child of a changeset
372 """Changesets that are the Nth child of a changeset
373 in set.
373 in set.
374 """
374 """
375 cs = set()
375 cs = set()
376 for r in getset(repo, fullreposet(repo), x):
376 for r in getset(repo, fullreposet(repo), x):
377 for i in range(n):
377 for i in range(n):
378 c = repo[r].children()
378 c = repo[r].children()
379 if len(c) == 0:
379 if len(c) == 0:
380 break
380 break
381 if len(c) > 1:
381 if len(c) > 1:
382 raise error.RepoLookupError(
382 raise error.RepoLookupError(
383 _("revision in set has more than one child"))
383 _("revision in set has more than one child"))
384 r = c[0].rev()
384 r = c[0].rev()
385 else:
385 else:
386 cs.add(r)
386 cs.add(r)
387 return subset & cs
387 return subset & cs
388
388
389 def ancestorspec(repo, subset, x, n, order):
389 def ancestorspec(repo, subset, x, n, order):
390 """``set~n``
390 """``set~n``
391 Changesets that are the Nth ancestor (first parents only) of a changeset
391 Changesets that are the Nth ancestor (first parents only) of a changeset
392 in set.
392 in set.
393 """
393 """
394 n = getinteger(n, _("~ expects a number"))
394 n = getinteger(n, _("~ expects a number"))
395 if n < 0:
395 if n < 0:
396 # children lookup
396 # children lookup
397 return _childrenspec(repo, subset, x, -n, order)
397 return _childrenspec(repo, subset, x, -n, order)
398 ps = set()
398 ps = set()
399 cl = repo.changelog
399 cl = repo.changelog
400 for r in getset(repo, fullreposet(repo), x):
400 for r in getset(repo, fullreposet(repo), x):
401 for i in range(n):
401 for i in range(n):
402 try:
402 try:
403 r = cl.parentrevs(r)[0]
403 r = cl.parentrevs(r)[0]
404 except error.WdirUnsupported:
404 except error.WdirUnsupported:
405 r = repo[r].parents()[0].rev()
405 r = repo[r].parents()[0].rev()
406 ps.add(r)
406 ps.add(r)
407 return subset & ps
407 return subset & ps
408
408
409 @predicate('author(string)', safe=True, weight=10)
409 @predicate('author(string)', safe=True, weight=10)
410 def author(repo, subset, x):
410 def author(repo, subset, x):
411 """Alias for ``user(string)``.
411 """Alias for ``user(string)``.
412 """
412 """
413 # i18n: "author" is a keyword
413 # i18n: "author" is a keyword
414 n = getstring(x, _("author requires a string"))
414 n = getstring(x, _("author requires a string"))
415 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
415 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
416 return subset.filter(lambda x: matcher(repo[x].user()),
416 return subset.filter(lambda x: matcher(repo[x].user()),
417 condrepr=('<user %r>', n))
417 condrepr=('<user %r>', n))
418
418
419 @predicate('bisect(string)', safe=True)
419 @predicate('bisect(string)', safe=True)
420 def bisect(repo, subset, x):
420 def bisect(repo, subset, x):
421 """Changesets marked in the specified bisect status:
421 """Changesets marked in the specified bisect status:
422
422
423 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
423 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
424 - ``goods``, ``bads`` : csets topologically good/bad
424 - ``goods``, ``bads`` : csets topologically good/bad
425 - ``range`` : csets taking part in the bisection
425 - ``range`` : csets taking part in the bisection
426 - ``pruned`` : csets that are goods, bads or skipped
426 - ``pruned`` : csets that are goods, bads or skipped
427 - ``untested`` : csets whose fate is yet unknown
427 - ``untested`` : csets whose fate is yet unknown
428 - ``ignored`` : csets ignored due to DAG topology
428 - ``ignored`` : csets ignored due to DAG topology
429 - ``current`` : the cset currently being bisected
429 - ``current`` : the cset currently being bisected
430 """
430 """
431 # i18n: "bisect" is a keyword
431 # i18n: "bisect" is a keyword
432 status = getstring(x, _("bisect requires a string")).lower()
432 status = getstring(x, _("bisect requires a string")).lower()
433 state = set(hbisect.get(repo, status))
433 state = set(hbisect.get(repo, status))
434 return subset & state
434 return subset & state
435
435
436 # Backward-compatibility
436 # Backward-compatibility
437 # - no help entry so that we do not advertise it any more
437 # - no help entry so that we do not advertise it any more
438 @predicate('bisected', safe=True)
438 @predicate('bisected', safe=True)
439 def bisected(repo, subset, x):
439 def bisected(repo, subset, x):
440 return bisect(repo, subset, x)
440 return bisect(repo, subset, x)
441
441
442 @predicate('bookmark([name])', safe=True)
442 @predicate('bookmark([name])', safe=True)
443 def bookmark(repo, subset, x):
443 def bookmark(repo, subset, x):
444 """The named bookmark or all bookmarks.
444 """The named bookmark or all bookmarks.
445
445
446 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
446 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
447 """
447 """
448 # i18n: "bookmark" is a keyword
448 # i18n: "bookmark" is a keyword
449 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
449 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
450 if args:
450 if args:
451 bm = getstring(args[0],
451 bm = getstring(args[0],
452 # i18n: "bookmark" is a keyword
452 # i18n: "bookmark" is a keyword
453 _('the argument to bookmark must be a string'))
453 _('the argument to bookmark must be a string'))
454 kind, pattern, matcher = stringutil.stringmatcher(bm)
454 kind, pattern, matcher = stringutil.stringmatcher(bm)
455 bms = set()
455 bms = set()
456 if kind == 'literal':
456 if kind == 'literal':
457 bmrev = repo._bookmarks.get(pattern, None)
457 bmrev = repo._bookmarks.get(pattern, None)
458 if not bmrev:
458 if not bmrev:
459 raise error.RepoLookupError(_("bookmark '%s' does not exist")
459 raise error.RepoLookupError(_("bookmark '%s' does not exist")
460 % pattern)
460 % pattern)
461 bms.add(repo[bmrev].rev())
461 bms.add(repo[bmrev].rev())
462 else:
462 else:
463 matchrevs = set()
463 matchrevs = set()
464 for name, bmrev in repo._bookmarks.iteritems():
464 for name, bmrev in repo._bookmarks.iteritems():
465 if matcher(name):
465 if matcher(name):
466 matchrevs.add(bmrev)
466 matchrevs.add(bmrev)
467 if not matchrevs:
467 if not matchrevs:
468 raise error.RepoLookupError(_("no bookmarks exist"
468 raise error.RepoLookupError(_("no bookmarks exist"
469 " that match '%s'") % pattern)
469 " that match '%s'") % pattern)
470 for bmrev in matchrevs:
470 for bmrev in matchrevs:
471 bms.add(repo[bmrev].rev())
471 bms.add(repo[bmrev].rev())
472 else:
472 else:
473 bms = {repo[r].rev() for r in repo._bookmarks.values()}
473 bms = {repo[r].rev() for r in repo._bookmarks.values()}
474 bms -= {node.nullrev}
474 bms -= {node.nullrev}
475 return subset & bms
475 return subset & bms
476
476
477 @predicate('branch(string or set)', safe=True, weight=10)
477 @predicate('branch(string or set)', safe=True, weight=10)
478 def branch(repo, subset, x):
478 def branch(repo, subset, x):
479 """
479 """
480 All changesets belonging to the given branch or the branches of the given
480 All changesets belonging to the given branch or the branches of the given
481 changesets.
481 changesets.
482
482
483 Pattern matching is supported for `string`. See
483 Pattern matching is supported for `string`. See
484 :hg:`help revisions.patterns`.
484 :hg:`help revisions.patterns`.
485 """
485 """
486 getbi = repo.revbranchcache().branchinfo
486 getbi = repo.revbranchcache().branchinfo
487 def getbranch(r):
487 def getbranch(r):
488 try:
488 try:
489 return getbi(r)[0]
489 return getbi(r)[0]
490 except error.WdirUnsupported:
490 except error.WdirUnsupported:
491 return repo[r].branch()
491 return repo[r].branch()
492
492
493 try:
493 try:
494 b = getstring(x, '')
494 b = getstring(x, '')
495 except error.ParseError:
495 except error.ParseError:
496 # not a string, but another revspec, e.g. tip()
496 # not a string, but another revspec, e.g. tip()
497 pass
497 pass
498 else:
498 else:
499 kind, pattern, matcher = stringutil.stringmatcher(b)
499 kind, pattern, matcher = stringutil.stringmatcher(b)
500 if kind == 'literal':
500 if kind == 'literal':
501 # note: falls through to the revspec case if no branch with
501 # note: falls through to the revspec case if no branch with
502 # this name exists and pattern kind is not specified explicitly
502 # this name exists and pattern kind is not specified explicitly
503 if pattern in repo.branchmap():
503 if pattern in repo.branchmap():
504 return subset.filter(lambda r: matcher(getbranch(r)),
504 return subset.filter(lambda r: matcher(getbranch(r)),
505 condrepr=('<branch %r>', b))
505 condrepr=('<branch %r>', b))
506 if b.startswith('literal:'):
506 if b.startswith('literal:'):
507 raise error.RepoLookupError(_("branch '%s' does not exist")
507 raise error.RepoLookupError(_("branch '%s' does not exist")
508 % pattern)
508 % pattern)
509 else:
509 else:
510 return subset.filter(lambda r: matcher(getbranch(r)),
510 return subset.filter(lambda r: matcher(getbranch(r)),
511 condrepr=('<branch %r>', b))
511 condrepr=('<branch %r>', b))
512
512
513 s = getset(repo, fullreposet(repo), x)
513 s = getset(repo, fullreposet(repo), x)
514 b = set()
514 b = set()
515 for r in s:
515 for r in s:
516 b.add(getbranch(r))
516 b.add(getbranch(r))
517 c = s.__contains__
517 c = s.__contains__
518 return subset.filter(lambda r: c(r) or getbranch(r) in b,
518 return subset.filter(lambda r: c(r) or getbranch(r) in b,
519 condrepr=lambda: '<branch %r>' % _sortedb(b))
519 condrepr=lambda: '<branch %r>' % _sortedb(b))
520
520
521 @predicate('phasedivergent()', safe=True)
521 @predicate('phasedivergent()', safe=True)
522 def phasedivergent(repo, subset, x):
522 def phasedivergent(repo, subset, x):
523 """Mutable changesets marked as successors of public changesets.
523 """Mutable changesets marked as successors of public changesets.
524
524
525 Only non-public and non-obsolete changesets can be `phasedivergent`.
525 Only non-public and non-obsolete changesets can be `phasedivergent`.
526 (EXPERIMENTAL)
526 (EXPERIMENTAL)
527 """
527 """
528 # i18n: "phasedivergent" is a keyword
528 # i18n: "phasedivergent" is a keyword
529 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
529 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
530 phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
530 phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
531 return subset & phasedivergent
531 return subset & phasedivergent
532
532
533 @predicate('bundle()', safe=True)
533 @predicate('bundle()', safe=True)
534 def bundle(repo, subset, x):
534 def bundle(repo, subset, x):
535 """Changesets in the bundle.
535 """Changesets in the bundle.
536
536
537 Bundle must be specified by the -R option."""
537 Bundle must be specified by the -R option."""
538
538
539 try:
539 try:
540 bundlerevs = repo.changelog.bundlerevs
540 bundlerevs = repo.changelog.bundlerevs
541 except AttributeError:
541 except AttributeError:
542 raise error.Abort(_("no bundle provided - specify with -R"))
542 raise error.Abort(_("no bundle provided - specify with -R"))
543 return subset & bundlerevs
543 return subset & bundlerevs
544
544
545 def checkstatus(repo, subset, pat, field):
545 def checkstatus(repo, subset, pat, field):
546 hasset = matchmod.patkind(pat) == 'set'
546 hasset = matchmod.patkind(pat) == 'set'
547
547
548 mcache = [None]
548 mcache = [None]
549 def matches(x):
549 def matches(x):
550 c = repo[x]
550 c = repo[x]
551 if not mcache[0] or hasset:
551 if not mcache[0] or hasset:
552 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
552 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
553 m = mcache[0]
553 m = mcache[0]
554 fname = None
554 fname = None
555 if not m.anypats() and len(m.files()) == 1:
555 if not m.anypats() and len(m.files()) == 1:
556 fname = m.files()[0]
556 fname = m.files()[0]
557 if fname is not None:
557 if fname is not None:
558 if fname not in c.files():
558 if fname not in c.files():
559 return False
559 return False
560 else:
560 else:
561 for f in c.files():
561 for f in c.files():
562 if m(f):
562 if m(f):
563 break
563 break
564 else:
564 else:
565 return False
565 return False
566 files = repo.status(c.p1().node(), c.node())[field]
566 files = repo.status(c.p1().node(), c.node())[field]
567 if fname is not None:
567 if fname is not None:
568 if fname in files:
568 if fname in files:
569 return True
569 return True
570 else:
570 else:
571 for f in files:
571 for f in files:
572 if m(f):
572 if m(f):
573 return True
573 return True
574
574
575 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
575 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
576
576
577 def _children(repo, subset, parentset):
577 def _children(repo, subset, parentset):
578 if not parentset:
578 if not parentset:
579 return baseset()
579 return baseset()
580 cs = set()
580 cs = set()
581 pr = repo.changelog.parentrevs
581 pr = repo.changelog.parentrevs
582 minrev = parentset.min()
582 minrev = parentset.min()
583 nullrev = node.nullrev
583 nullrev = node.nullrev
584 for r in subset:
584 for r in subset:
585 if r <= minrev:
585 if r <= minrev:
586 continue
586 continue
587 p1, p2 = pr(r)
587 p1, p2 = pr(r)
588 if p1 in parentset:
588 if p1 in parentset:
589 cs.add(r)
589 cs.add(r)
590 if p2 != nullrev and p2 in parentset:
590 if p2 != nullrev and p2 in parentset:
591 cs.add(r)
591 cs.add(r)
592 return baseset(cs)
592 return baseset(cs)
593
593
594 @predicate('children(set)', safe=True)
594 @predicate('children(set)', safe=True)
595 def children(repo, subset, x):
595 def children(repo, subset, x):
596 """Child changesets of changesets in set.
596 """Child changesets of changesets in set.
597 """
597 """
598 s = getset(repo, fullreposet(repo), x)
598 s = getset(repo, fullreposet(repo), x)
599 cs = _children(repo, subset, s)
599 cs = _children(repo, subset, s)
600 return subset & cs
600 return subset & cs
601
601
602 @predicate('closed()', safe=True, weight=10)
602 @predicate('closed()', safe=True, weight=10)
603 def closed(repo, subset, x):
603 def closed(repo, subset, x):
604 """Changeset is closed.
604 """Changeset is closed.
605 """
605 """
606 # i18n: "closed" is a keyword
606 # i18n: "closed" is a keyword
607 getargs(x, 0, 0, _("closed takes no arguments"))
607 getargs(x, 0, 0, _("closed takes no arguments"))
608 return subset.filter(lambda r: repo[r].closesbranch(),
608 return subset.filter(lambda r: repo[r].closesbranch(),
609 condrepr='<branch closed>')
609 condrepr='<branch closed>')
610
610
611 @predicate('contains(pattern)', weight=100)
611 @predicate('contains(pattern)', weight=100)
612 def contains(repo, subset, x):
612 def contains(repo, subset, x):
613 """The revision's manifest contains a file matching pattern (but might not
613 """The revision's manifest contains a file matching pattern (but might not
614 modify it). See :hg:`help patterns` for information about file patterns.
614 modify it). See :hg:`help patterns` for information about file patterns.
615
615
616 The pattern without explicit kind like ``glob:`` is expected to be
616 The pattern without explicit kind like ``glob:`` is expected to be
617 relative to the current directory and match against a file exactly
617 relative to the current directory and match against a file exactly
618 for efficiency.
618 for efficiency.
619 """
619 """
620 # i18n: "contains" is a keyword
620 # i18n: "contains" is a keyword
621 pat = getstring(x, _("contains requires a pattern"))
621 pat = getstring(x, _("contains requires a pattern"))
622
622
623 def matches(x):
623 def matches(x):
624 if not matchmod.patkind(pat):
624 if not matchmod.patkind(pat):
625 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
625 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
626 if pats in repo[x]:
626 if pats in repo[x]:
627 return True
627 return True
628 else:
628 else:
629 c = repo[x]
629 c = repo[x]
630 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
630 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
631 for f in c.manifest():
631 for f in c.manifest():
632 if m(f):
632 if m(f):
633 return True
633 return True
634 return False
634 return False
635
635
636 return subset.filter(matches, condrepr=('<contains %r>', pat))
636 return subset.filter(matches, condrepr=('<contains %r>', pat))
637
637
638 @predicate('converted([id])', safe=True)
638 @predicate('converted([id])', safe=True)
639 def converted(repo, subset, x):
639 def converted(repo, subset, x):
640 """Changesets converted from the given identifier in the old repository if
640 """Changesets converted from the given identifier in the old repository if
641 present, or all converted changesets if no identifier is specified.
641 present, or all converted changesets if no identifier is specified.
642 """
642 """
643
643
644 # There is exactly no chance of resolving the revision, so do a simple
644 # There is exactly no chance of resolving the revision, so do a simple
645 # string compare and hope for the best
645 # string compare and hope for the best
646
646
647 rev = None
647 rev = None
648 # i18n: "converted" is a keyword
648 # i18n: "converted" is a keyword
649 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
649 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
650 if l:
650 if l:
651 # i18n: "converted" is a keyword
651 # i18n: "converted" is a keyword
652 rev = getstring(l[0], _('converted requires a revision'))
652 rev = getstring(l[0], _('converted requires a revision'))
653
653
654 def _matchvalue(r):
654 def _matchvalue(r):
655 source = repo[r].extra().get('convert_revision', None)
655 source = repo[r].extra().get('convert_revision', None)
656 return source is not None and (rev is None or source.startswith(rev))
656 return source is not None and (rev is None or source.startswith(rev))
657
657
658 return subset.filter(lambda r: _matchvalue(r),
658 return subset.filter(lambda r: _matchvalue(r),
659 condrepr=('<converted %r>', rev))
659 condrepr=('<converted %r>', rev))
660
660
661 @predicate('date(interval)', safe=True, weight=10)
661 @predicate('date(interval)', safe=True, weight=10)
662 def date(repo, subset, x):
662 def date(repo, subset, x):
663 """Changesets within the interval, see :hg:`help dates`.
663 """Changesets within the interval, see :hg:`help dates`.
664 """
664 """
665 # i18n: "date" is a keyword
665 # i18n: "date" is a keyword
666 ds = getstring(x, _("date requires a string"))
666 ds = getstring(x, _("date requires a string"))
667 dm = dateutil.matchdate(ds)
667 dm = dateutil.matchdate(ds)
668 return subset.filter(lambda x: dm(repo[x].date()[0]),
668 return subset.filter(lambda x: dm(repo[x].date()[0]),
669 condrepr=('<date %r>', ds))
669 condrepr=('<date %r>', ds))
670
670
671 @predicate('desc(string)', safe=True, weight=10)
671 @predicate('desc(string)', safe=True, weight=10)
672 def desc(repo, subset, x):
672 def desc(repo, subset, x):
673 """Search commit message for string. The match is case-insensitive.
673 """Search commit message for string. The match is case-insensitive.
674
674
675 Pattern matching is supported for `string`. See
675 Pattern matching is supported for `string`. See
676 :hg:`help revisions.patterns`.
676 :hg:`help revisions.patterns`.
677 """
677 """
678 # i18n: "desc" is a keyword
678 # i18n: "desc" is a keyword
679 ds = getstring(x, _("desc requires a string"))
679 ds = getstring(x, _("desc requires a string"))
680
680
681 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
681 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
682
682
683 return subset.filter(lambda r: matcher(repo[r].description()),
683 return subset.filter(lambda r: matcher(repo[r].description()),
684 condrepr=('<desc %r>', ds))
684 condrepr=('<desc %r>', ds))
685
685
686 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
686 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
687 stopdepth=None):
687 stopdepth=None):
688 roots = getset(repo, fullreposet(repo), x)
688 roots = getset(repo, fullreposet(repo), x)
689 if not roots:
689 if not roots:
690 return baseset()
690 return baseset()
691 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
691 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
692 return subset & s
692 return subset & s
693
693
694 @predicate('descendants(set[, depth])', safe=True)
694 @predicate('descendants(set[, depth])', safe=True)
695 def descendants(repo, subset, x):
695 def descendants(repo, subset, x):
696 """Changesets which are descendants of changesets in set, including the
696 """Changesets which are descendants of changesets in set, including the
697 given changesets themselves.
697 given changesets themselves.
698
698
699 If depth is specified, the result only includes changesets up to
699 If depth is specified, the result only includes changesets up to
700 the specified generation.
700 the specified generation.
701 """
701 """
702 # startdepth is for internal use only until we can decide the UI
702 # startdepth is for internal use only until we can decide the UI
703 args = getargsdict(x, 'descendants', 'set depth startdepth')
703 args = getargsdict(x, 'descendants', 'set depth startdepth')
704 if 'set' not in args:
704 if 'set' not in args:
705 # i18n: "descendants" is a keyword
705 # i18n: "descendants" is a keyword
706 raise error.ParseError(_('descendants takes at least 1 argument'))
706 raise error.ParseError(_('descendants takes at least 1 argument'))
707 startdepth = stopdepth = None
707 startdepth = stopdepth = None
708 if 'startdepth' in args:
708 if 'startdepth' in args:
709 n = getinteger(args['startdepth'],
709 n = getinteger(args['startdepth'],
710 "descendants expects an integer startdepth")
710 "descendants expects an integer startdepth")
711 if n < 0:
711 if n < 0:
712 raise error.ParseError("negative startdepth")
712 raise error.ParseError("negative startdepth")
713 startdepth = n
713 startdepth = n
714 if 'depth' in args:
714 if 'depth' in args:
715 # i18n: "descendants" is a keyword
715 # i18n: "descendants" is a keyword
716 n = getinteger(args['depth'], _("descendants expects an integer depth"))
716 n = getinteger(args['depth'], _("descendants expects an integer depth"))
717 if n < 0:
717 if n < 0:
718 raise error.ParseError(_("negative depth"))
718 raise error.ParseError(_("negative depth"))
719 stopdepth = n + 1
719 stopdepth = n + 1
720 return _descendants(repo, subset, args['set'],
720 return _descendants(repo, subset, args['set'],
721 startdepth=startdepth, stopdepth=stopdepth)
721 startdepth=startdepth, stopdepth=stopdepth)
722
722
723 @predicate('_firstdescendants', safe=True)
723 @predicate('_firstdescendants', safe=True)
724 def _firstdescendants(repo, subset, x):
724 def _firstdescendants(repo, subset, x):
725 # ``_firstdescendants(set)``
725 # ``_firstdescendants(set)``
726 # Like ``descendants(set)`` but follows only the first parents.
726 # Like ``descendants(set)`` but follows only the first parents.
727 return _descendants(repo, subset, x, followfirst=True)
727 return _descendants(repo, subset, x, followfirst=True)
728
728
729 @predicate('destination([set])', safe=True, weight=10)
729 @predicate('destination([set])', safe=True, weight=10)
730 def destination(repo, subset, x):
730 def destination(repo, subset, x):
731 """Changesets that were created by a graft, transplant or rebase operation,
731 """Changesets that were created by a graft, transplant or rebase operation,
732 with the given revisions specified as the source. Omitting the optional set
732 with the given revisions specified as the source. Omitting the optional set
733 is the same as passing all().
733 is the same as passing all().
734 """
734 """
735 if x is not None:
735 if x is not None:
736 sources = getset(repo, fullreposet(repo), x)
736 sources = getset(repo, fullreposet(repo), x)
737 else:
737 else:
738 sources = fullreposet(repo)
738 sources = fullreposet(repo)
739
739
740 dests = set()
740 dests = set()
741
741
742 # subset contains all of the possible destinations that can be returned, so
742 # subset contains all of the possible destinations that can be returned, so
743 # iterate over them and see if their source(s) were provided in the arg set.
743 # iterate over them and see if their source(s) were provided in the arg set.
744 # Even if the immediate src of r is not in the arg set, src's source (or
744 # Even if the immediate src of r is not in the arg set, src's source (or
745 # further back) may be. Scanning back further than the immediate src allows
745 # further back) may be. Scanning back further than the immediate src allows
746 # transitive transplants and rebases to yield the same results as transitive
746 # transitive transplants and rebases to yield the same results as transitive
747 # grafts.
747 # grafts.
748 for r in subset:
748 for r in subset:
749 src = _getrevsource(repo, r)
749 src = _getrevsource(repo, r)
750 lineage = None
750 lineage = None
751
751
752 while src is not None:
752 while src is not None:
753 if lineage is None:
753 if lineage is None:
754 lineage = list()
754 lineage = list()
755
755
756 lineage.append(r)
756 lineage.append(r)
757
757
758 # The visited lineage is a match if the current source is in the arg
758 # The visited lineage is a match if the current source is in the arg
759 # set. Since every candidate dest is visited by way of iterating
759 # set. Since every candidate dest is visited by way of iterating
760 # subset, any dests further back in the lineage will be tested by a
760 # subset, any dests further back in the lineage will be tested by a
761 # different iteration over subset. Likewise, if the src was already
761 # different iteration over subset. Likewise, if the src was already
762 # selected, the current lineage can be selected without going back
762 # selected, the current lineage can be selected without going back
763 # further.
763 # further.
764 if src in sources or src in dests:
764 if src in sources or src in dests:
765 dests.update(lineage)
765 dests.update(lineage)
766 break
766 break
767
767
768 r = src
768 r = src
769 src = _getrevsource(repo, r)
769 src = _getrevsource(repo, r)
770
770
771 return subset.filter(dests.__contains__,
771 return subset.filter(dests.__contains__,
772 condrepr=lambda: '<destination %r>' % _sortedb(dests))
772 condrepr=lambda: '<destination %r>' % _sortedb(dests))
773
773
774 @predicate('contentdivergent()', safe=True)
774 @predicate('contentdivergent()', safe=True)
775 def contentdivergent(repo, subset, x):
775 def contentdivergent(repo, subset, x):
776 """
776 """
777 Final successors of changesets with an alternative set of final
777 Final successors of changesets with an alternative set of final
778 successors. (EXPERIMENTAL)
778 successors. (EXPERIMENTAL)
779 """
779 """
780 # i18n: "contentdivergent" is a keyword
780 # i18n: "contentdivergent" is a keyword
781 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
781 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
782 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
782 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
783 return subset & contentdivergent
783 return subset & contentdivergent
784
784
785 @predicate('extdata(source)', safe=False, weight=100)
785 @predicate('extdata(source)', safe=False, weight=100)
786 def extdata(repo, subset, x):
786 def extdata(repo, subset, x):
787 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
787 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
788 # i18n: "extdata" is a keyword
788 # i18n: "extdata" is a keyword
789 args = getargsdict(x, 'extdata', 'source')
789 args = getargsdict(x, 'extdata', 'source')
790 source = getstring(args.get('source'),
790 source = getstring(args.get('source'),
791 # i18n: "extdata" is a keyword
791 # i18n: "extdata" is a keyword
792 _('extdata takes at least 1 string argument'))
792 _('extdata takes at least 1 string argument'))
793 data = scmutil.extdatasource(repo, source)
793 data = scmutil.extdatasource(repo, source)
794 return subset & baseset(data)
794 return subset & baseset(data)
795
795
796 @predicate('extinct()', safe=True)
796 @predicate('extinct()', safe=True)
797 def extinct(repo, subset, x):
797 def extinct(repo, subset, x):
798 """Obsolete changesets with obsolete descendants only.
798 """Obsolete changesets with obsolete descendants only.
799 """
799 """
800 # i18n: "extinct" is a keyword
800 # i18n: "extinct" is a keyword
801 getargs(x, 0, 0, _("extinct takes no arguments"))
801 getargs(x, 0, 0, _("extinct takes no arguments"))
802 extincts = obsmod.getrevs(repo, 'extinct')
802 extincts = obsmod.getrevs(repo, 'extinct')
803 return subset & extincts
803 return subset & extincts
804
804
805 @predicate('extra(label, [value])', safe=True)
805 @predicate('extra(label, [value])', safe=True)
806 def extra(repo, subset, x):
806 def extra(repo, subset, x):
807 """Changesets with the given label in the extra metadata, with the given
807 """Changesets with the given label in the extra metadata, with the given
808 optional value.
808 optional value.
809
809
810 Pattern matching is supported for `value`. See
810 Pattern matching is supported for `value`. See
811 :hg:`help revisions.patterns`.
811 :hg:`help revisions.patterns`.
812 """
812 """
813 args = getargsdict(x, 'extra', 'label value')
813 args = getargsdict(x, 'extra', 'label value')
814 if 'label' not in args:
814 if 'label' not in args:
815 # i18n: "extra" is a keyword
815 # i18n: "extra" is a keyword
816 raise error.ParseError(_('extra takes at least 1 argument'))
816 raise error.ParseError(_('extra takes at least 1 argument'))
817 # i18n: "extra" is a keyword
817 # i18n: "extra" is a keyword
818 label = getstring(args['label'], _('first argument to extra must be '
818 label = getstring(args['label'], _('first argument to extra must be '
819 'a string'))
819 'a string'))
820 value = None
820 value = None
821
821
822 if 'value' in args:
822 if 'value' in args:
823 # i18n: "extra" is a keyword
823 # i18n: "extra" is a keyword
824 value = getstring(args['value'], _('second argument to extra must be '
824 value = getstring(args['value'], _('second argument to extra must be '
825 'a string'))
825 'a string'))
826 kind, value, matcher = stringutil.stringmatcher(value)
826 kind, value, matcher = stringutil.stringmatcher(value)
827
827
828 def _matchvalue(r):
828 def _matchvalue(r):
829 extra = repo[r].extra()
829 extra = repo[r].extra()
830 return label in extra and (value is None or matcher(extra[label]))
830 return label in extra and (value is None or matcher(extra[label]))
831
831
832 return subset.filter(lambda r: _matchvalue(r),
832 return subset.filter(lambda r: _matchvalue(r),
833 condrepr=('<extra[%r] %r>', label, value))
833 condrepr=('<extra[%r] %r>', label, value))
834
834
835 @predicate('filelog(pattern)', safe=True)
835 @predicate('filelog(pattern)', safe=True)
836 def filelog(repo, subset, x):
836 def filelog(repo, subset, x):
837 """Changesets connected to the specified filelog.
837 """Changesets connected to the specified filelog.
838
838
839 For performance reasons, visits only revisions mentioned in the file-level
839 For performance reasons, visits only revisions mentioned in the file-level
840 filelog, rather than filtering through all changesets (much faster, but
840 filelog, rather than filtering through all changesets (much faster, but
841 doesn't include deletes or duplicate changes). For a slower, more accurate
841 doesn't include deletes or duplicate changes). For a slower, more accurate
842 result, use ``file()``.
842 result, use ``file()``.
843
843
844 The pattern without explicit kind like ``glob:`` is expected to be
844 The pattern without explicit kind like ``glob:`` is expected to be
845 relative to the current directory and match against a file exactly
845 relative to the current directory and match against a file exactly
846 for efficiency.
846 for efficiency.
847
847
848 If some linkrev points to revisions filtered by the current repoview, we'll
848 If some linkrev points to revisions filtered by the current repoview, we'll
849 work around it to return a non-filtered value.
849 work around it to return a non-filtered value.
850 """
850 """
851
851
852 # i18n: "filelog" is a keyword
852 # i18n: "filelog" is a keyword
853 pat = getstring(x, _("filelog requires a pattern"))
853 pat = getstring(x, _("filelog requires a pattern"))
854 s = set()
854 s = set()
855 cl = repo.changelog
855 cl = repo.changelog
856
856
857 if not matchmod.patkind(pat):
857 if not matchmod.patkind(pat):
858 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
858 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
859 files = [f]
859 files = [f]
860 else:
860 else:
861 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
861 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
862 files = (f for f in repo[None] if m(f))
862 files = (f for f in repo[None] if m(f))
863
863
864 for f in files:
864 for f in files:
865 fl = repo.file(f)
865 fl = repo.file(f)
866 known = {}
866 known = {}
867 scanpos = 0
867 scanpos = 0
868 for fr in list(fl):
868 for fr in list(fl):
869 fn = fl.node(fr)
869 fn = fl.node(fr)
870 if fn in known:
870 if fn in known:
871 s.add(known[fn])
871 s.add(known[fn])
872 continue
872 continue
873
873
874 lr = fl.linkrev(fr)
874 lr = fl.linkrev(fr)
875 if lr in cl:
875 if lr in cl:
876 s.add(lr)
876 s.add(lr)
877 elif scanpos is not None:
877 elif scanpos is not None:
878 # lowest matching changeset is filtered, scan further
878 # lowest matching changeset is filtered, scan further
879 # ahead in changelog
879 # ahead in changelog
880 start = max(lr, scanpos) + 1
880 start = max(lr, scanpos) + 1
881 scanpos = None
881 scanpos = None
882 for r in cl.revs(start):
882 for r in cl.revs(start):
883 # minimize parsing of non-matching entries
883 # minimize parsing of non-matching entries
884 if f in cl.revision(r) and f in cl.readfiles(r):
884 if f in cl.revision(r) and f in cl.readfiles(r):
885 try:
885 try:
886 # try to use manifest delta fastpath
886 # try to use manifest delta fastpath
887 n = repo[r].filenode(f)
887 n = repo[r].filenode(f)
888 if n not in known:
888 if n not in known:
889 if n == fn:
889 if n == fn:
890 s.add(r)
890 s.add(r)
891 scanpos = r
891 scanpos = r
892 break
892 break
893 else:
893 else:
894 known[n] = r
894 known[n] = r
895 except error.ManifestLookupError:
895 except error.ManifestLookupError:
896 # deletion in changelog
896 # deletion in changelog
897 continue
897 continue
898
898
899 return subset & s
899 return subset & s
900
900
901 @predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
901 @predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
902 def first(repo, subset, x, order):
902 def first(repo, subset, x, order):
903 """An alias for limit().
903 """An alias for limit().
904 """
904 """
905 return limit(repo, subset, x, order)
905 return limit(repo, subset, x, order)
906
906
907 def _follow(repo, subset, x, name, followfirst=False):
907 def _follow(repo, subset, x, name, followfirst=False):
908 args = getargsdict(x, name, 'file startrev')
908 args = getargsdict(x, name, 'file startrev')
909 revs = None
909 revs = None
910 if 'startrev' in args:
910 if 'startrev' in args:
911 revs = getset(repo, fullreposet(repo), args['startrev'])
911 revs = getset(repo, fullreposet(repo), args['startrev'])
912 if 'file' in args:
912 if 'file' in args:
913 x = getstring(args['file'], _("%s expected a pattern") % name)
913 x = getstring(args['file'], _("%s expected a pattern") % name)
914 if revs is None:
914 if revs is None:
915 revs = [None]
915 revs = [None]
916 fctxs = []
916 fctxs = []
917 for r in revs:
917 for r in revs:
918 ctx = mctx = repo[r]
918 ctx = mctx = repo[r]
919 if r is None:
919 if r is None:
920 ctx = repo['.']
920 ctx = repo['.']
921 m = matchmod.match(repo.root, repo.getcwd(), [x],
921 m = matchmod.match(repo.root, repo.getcwd(), [x],
922 ctx=mctx, default='path')
922 ctx=mctx, default='path')
923 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
923 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
924 s = dagop.filerevancestors(fctxs, followfirst)
924 s = dagop.filerevancestors(fctxs, followfirst)
925 else:
925 else:
926 if revs is None:
926 if revs is None:
927 revs = baseset([repo['.'].rev()])
927 revs = baseset([repo['.'].rev()])
928 s = dagop.revancestors(repo, revs, followfirst)
928 s = dagop.revancestors(repo, revs, followfirst)
929
929
930 return subset & s
930 return subset & s
931
931
932 @predicate('follow([file[, startrev]])', safe=True)
932 @predicate('follow([file[, startrev]])', safe=True)
933 def follow(repo, subset, x):
933 def follow(repo, subset, x):
934 """
934 """
935 An alias for ``::.`` (ancestors of the working directory's first parent).
935 An alias for ``::.`` (ancestors of the working directory's first parent).
936 If file pattern is specified, the histories of files matching given
936 If file pattern is specified, the histories of files matching given
937 pattern in the revision given by startrev are followed, including copies.
937 pattern in the revision given by startrev are followed, including copies.
938 """
938 """
939 return _follow(repo, subset, x, 'follow')
939 return _follow(repo, subset, x, 'follow')
940
940
941 @predicate('_followfirst', safe=True)
941 @predicate('_followfirst', safe=True)
942 def _followfirst(repo, subset, x):
942 def _followfirst(repo, subset, x):
943 # ``followfirst([file[, startrev]])``
943 # ``followfirst([file[, startrev]])``
944 # Like ``follow([file[, startrev]])`` but follows only the first parent
944 # Like ``follow([file[, startrev]])`` but follows only the first parent
945 # of every revisions or files revisions.
945 # of every revisions or files revisions.
946 return _follow(repo, subset, x, '_followfirst', followfirst=True)
946 return _follow(repo, subset, x, '_followfirst', followfirst=True)
947
947
948 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
948 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
949 safe=True)
949 safe=True)
950 def followlines(repo, subset, x):
950 def followlines(repo, subset, x):
951 """Changesets modifying `file` in line range ('fromline', 'toline').
951 """Changesets modifying `file` in line range ('fromline', 'toline').
952
952
953 Line range corresponds to 'file' content at 'startrev' and should hence be
953 Line range corresponds to 'file' content at 'startrev' and should hence be
954 consistent with file size. If startrev is not specified, working directory's
954 consistent with file size. If startrev is not specified, working directory's
955 parent is used.
955 parent is used.
956
956
957 By default, ancestors of 'startrev' are returned. If 'descend' is True,
957 By default, ancestors of 'startrev' are returned. If 'descend' is True,
958 descendants of 'startrev' are returned though renames are (currently) not
958 descendants of 'startrev' are returned though renames are (currently) not
959 followed in this direction.
959 followed in this direction.
960 """
960 """
961 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
961 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
962 if len(args['lines']) != 1:
962 if len(args['lines']) != 1:
963 raise error.ParseError(_("followlines requires a line range"))
963 raise error.ParseError(_("followlines requires a line range"))
964
964
965 rev = '.'
965 rev = '.'
966 if 'startrev' in args:
966 if 'startrev' in args:
967 revs = getset(repo, fullreposet(repo), args['startrev'])
967 revs = getset(repo, fullreposet(repo), args['startrev'])
968 if len(revs) != 1:
968 if len(revs) != 1:
969 raise error.ParseError(
969 raise error.ParseError(
970 # i18n: "followlines" is a keyword
970 # i18n: "followlines" is a keyword
971 _("followlines expects exactly one revision"))
971 _("followlines expects exactly one revision"))
972 rev = revs.last()
972 rev = revs.last()
973
973
974 pat = getstring(args['file'], _("followlines requires a pattern"))
974 pat = getstring(args['file'], _("followlines requires a pattern"))
975 # i18n: "followlines" is a keyword
975 # i18n: "followlines" is a keyword
976 msg = _("followlines expects exactly one file")
976 msg = _("followlines expects exactly one file")
977 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
977 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
978 # i18n: "followlines" is a keyword
978 # i18n: "followlines" is a keyword
979 lr = getrange(args['lines'][0], _("followlines expects a line range"))
979 lr = getrange(args['lines'][0], _("followlines expects a line range"))
980 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
980 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
981 for a in lr]
981 for a in lr]
982 fromline, toline = util.processlinerange(fromline, toline)
982 fromline, toline = util.processlinerange(fromline, toline)
983
983
984 fctx = repo[rev].filectx(fname)
984 fctx = repo[rev].filectx(fname)
985 descend = False
985 descend = False
986 if 'descend' in args:
986 if 'descend' in args:
987 descend = getboolean(args['descend'],
987 descend = getboolean(args['descend'],
988 # i18n: "descend" is a keyword
988 # i18n: "descend" is a keyword
989 _("descend argument must be a boolean"))
989 _("descend argument must be a boolean"))
990 if descend:
990 if descend:
991 rs = generatorset(
991 rs = generatorset(
992 (c.rev() for c, _linerange
992 (c.rev() for c, _linerange
993 in dagop.blockdescendants(fctx, fromline, toline)),
993 in dagop.blockdescendants(fctx, fromline, toline)),
994 iterasc=True)
994 iterasc=True)
995 else:
995 else:
996 rs = generatorset(
996 rs = generatorset(
997 (c.rev() for c, _linerange
997 (c.rev() for c, _linerange
998 in dagop.blockancestors(fctx, fromline, toline)),
998 in dagop.blockancestors(fctx, fromline, toline)),
999 iterasc=False)
999 iterasc=False)
1000 return subset & rs
1000 return subset & rs
1001
1001
1002 @predicate('all()', safe=True)
1002 @predicate('all()', safe=True)
1003 def getall(repo, subset, x):
1003 def getall(repo, subset, x):
1004 """All changesets, the same as ``0:tip``.
1004 """All changesets, the same as ``0:tip``.
1005 """
1005 """
1006 # i18n: "all" is a keyword
1006 # i18n: "all" is a keyword
1007 getargs(x, 0, 0, _("all takes no arguments"))
1007 getargs(x, 0, 0, _("all takes no arguments"))
1008 return subset & spanset(repo) # drop "null" if any
1008 return subset & spanset(repo) # drop "null" if any
1009
1009
1010 @predicate('grep(regex)', weight=10)
1010 @predicate('grep(regex)', weight=10)
1011 def grep(repo, subset, x):
1011 def grep(repo, subset, x):
1012 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1012 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1013 to ensure special escape characters are handled correctly. Unlike
1013 to ensure special escape characters are handled correctly. Unlike
1014 ``keyword(string)``, the match is case-sensitive.
1014 ``keyword(string)``, the match is case-sensitive.
1015 """
1015 """
1016 try:
1016 try:
1017 # i18n: "grep" is a keyword
1017 # i18n: "grep" is a keyword
1018 gr = re.compile(getstring(x, _("grep requires a string")))
1018 gr = re.compile(getstring(x, _("grep requires a string")))
1019 except re.error as e:
1019 except re.error as e:
1020 raise error.ParseError(
1020 raise error.ParseError(
1021 _('invalid match pattern: %s') % stringutil.forcebytestr(e))
1021 _('invalid match pattern: %s') % stringutil.forcebytestr(e))
1022
1022
1023 def matches(x):
1023 def matches(x):
1024 c = repo[x]
1024 c = repo[x]
1025 for e in c.files() + [c.user(), c.description()]:
1025 for e in c.files() + [c.user(), c.description()]:
1026 if gr.search(e):
1026 if gr.search(e):
1027 return True
1027 return True
1028 return False
1028 return False
1029
1029
1030 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1030 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1031
1031
1032 @predicate('_matchfiles', safe=True)
1032 @predicate('_matchfiles', safe=True)
1033 def _matchfiles(repo, subset, x):
1033 def _matchfiles(repo, subset, x):
1034 # _matchfiles takes a revset list of prefixed arguments:
1034 # _matchfiles takes a revset list of prefixed arguments:
1035 #
1035 #
1036 # [p:foo, i:bar, x:baz]
1036 # [p:foo, i:bar, x:baz]
1037 #
1037 #
1038 # builds a match object from them and filters subset. Allowed
1038 # builds a match object from them and filters subset. Allowed
1039 # prefixes are 'p:' for regular patterns, 'i:' for include
1039 # prefixes are 'p:' for regular patterns, 'i:' for include
1040 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1040 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1041 # a revision identifier, or the empty string to reference the
1041 # a revision identifier, or the empty string to reference the
1042 # working directory, from which the match object is
1042 # working directory, from which the match object is
1043 # initialized. Use 'd:' to set the default matching mode, default
1043 # initialized. Use 'd:' to set the default matching mode, default
1044 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1044 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1045
1045
1046 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1046 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1047 pats, inc, exc = [], [], []
1047 pats, inc, exc = [], [], []
1048 rev, default = None, None
1048 rev, default = None, None
1049 for arg in l:
1049 for arg in l:
1050 s = getstring(arg, "_matchfiles requires string arguments")
1050 s = getstring(arg, "_matchfiles requires string arguments")
1051 prefix, value = s[:2], s[2:]
1051 prefix, value = s[:2], s[2:]
1052 if prefix == 'p:':
1052 if prefix == 'p:':
1053 pats.append(value)
1053 pats.append(value)
1054 elif prefix == 'i:':
1054 elif prefix == 'i:':
1055 inc.append(value)
1055 inc.append(value)
1056 elif prefix == 'x:':
1056 elif prefix == 'x:':
1057 exc.append(value)
1057 exc.append(value)
1058 elif prefix == 'r:':
1058 elif prefix == 'r:':
1059 if rev is not None:
1059 if rev is not None:
1060 raise error.ParseError('_matchfiles expected at most one '
1060 raise error.ParseError('_matchfiles expected at most one '
1061 'revision')
1061 'revision')
1062 if value == '': # empty means working directory
1062 if value == '': # empty means working directory
1063 rev = node.wdirrev
1063 rev = node.wdirrev
1064 else:
1064 else:
1065 rev = value
1065 rev = value
1066 elif prefix == 'd:':
1066 elif prefix == 'd:':
1067 if default is not None:
1067 if default is not None:
1068 raise error.ParseError('_matchfiles expected at most one '
1068 raise error.ParseError('_matchfiles expected at most one '
1069 'default mode')
1069 'default mode')
1070 default = value
1070 default = value
1071 else:
1071 else:
1072 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1072 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1073 if not default:
1073 if not default:
1074 default = 'glob'
1074 default = 'glob'
1075 hasset = any(matchmod.patkind(p) == 'set' for p in pats + inc + exc)
1075 hasset = any(matchmod.patkind(p) == 'set' for p in pats + inc + exc)
1076
1076
1077 mcache = [None]
1077 mcache = [None]
1078
1078
1079 # This directly read the changelog data as creating changectx for all
1079 # This directly read the changelog data as creating changectx for all
1080 # revisions is quite expensive.
1080 # revisions is quite expensive.
1081 getfiles = repo.changelog.readfiles
1081 getfiles = repo.changelog.readfiles
1082 wdirrev = node.wdirrev
1082 wdirrev = node.wdirrev
1083 def matches(x):
1083 def matches(x):
1084 if x == wdirrev:
1084 if x == wdirrev:
1085 files = repo[x].files()
1085 files = repo[x].files()
1086 else:
1086 else:
1087 files = getfiles(x)
1087 files = getfiles(x)
1088
1088
1089 if not mcache[0] or (hasset and rev is None):
1089 if not mcache[0] or (hasset and rev is None):
1090 r = x if rev is None else rev
1090 r = x if rev is None else rev
1091 mcache[0] = matchmod.match(repo.root, repo.getcwd(), pats,
1091 mcache[0] = matchmod.match(repo.root, repo.getcwd(), pats,
1092 include=inc, exclude=exc, ctx=repo[r],
1092 include=inc, exclude=exc, ctx=repo[r],
1093 default=default)
1093 default=default)
1094 m = mcache[0]
1094 m = mcache[0]
1095
1095
1096 for f in files:
1096 for f in files:
1097 if m(f):
1097 if m(f):
1098 return True
1098 return True
1099 return False
1099 return False
1100
1100
1101 return subset.filter(matches,
1101 return subset.filter(matches,
1102 condrepr=('<matchfiles patterns=%r, include=%r '
1102 condrepr=('<matchfiles patterns=%r, include=%r '
1103 'exclude=%r, default=%r, rev=%r>',
1103 'exclude=%r, default=%r, rev=%r>',
1104 pats, inc, exc, default, rev))
1104 pats, inc, exc, default, rev))
1105
1105
1106 @predicate('file(pattern)', safe=True, weight=10)
1106 @predicate('file(pattern)', safe=True, weight=10)
1107 def hasfile(repo, subset, x):
1107 def hasfile(repo, subset, x):
1108 """Changesets affecting files matched by pattern.
1108 """Changesets affecting files matched by pattern.
1109
1109
1110 For a faster but less accurate result, consider using ``filelog()``
1110 For a faster but less accurate result, consider using ``filelog()``
1111 instead.
1111 instead.
1112
1112
1113 This predicate uses ``glob:`` as the default kind of pattern.
1113 This predicate uses ``glob:`` as the default kind of pattern.
1114 """
1114 """
1115 # i18n: "file" is a keyword
1115 # i18n: "file" is a keyword
1116 pat = getstring(x, _("file requires a pattern"))
1116 pat = getstring(x, _("file requires a pattern"))
1117 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1117 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1118
1118
1119 @predicate('head()', safe=True)
1119 @predicate('head()', safe=True)
1120 def head(repo, subset, x):
1120 def head(repo, subset, x):
1121 """Changeset is a named branch head.
1121 """Changeset is a named branch head.
1122 """
1122 """
1123 # i18n: "head" is a keyword
1123 # i18n: "head" is a keyword
1124 getargs(x, 0, 0, _("head takes no arguments"))
1124 getargs(x, 0, 0, _("head takes no arguments"))
1125 hs = set()
1125 hs = set()
1126 cl = repo.changelog
1126 cl = repo.changelog
1127 for ls in repo.branchmap().itervalues():
1127 for ls in repo.branchmap().itervalues():
1128 hs.update(cl.rev(h) for h in ls)
1128 hs.update(cl.rev(h) for h in ls)
1129 return subset & baseset(hs)
1129 return subset & baseset(hs)
1130
1130
1131 @predicate('heads(set)', safe=True, takeorder=True)
1131 @predicate('heads(set)', safe=True, takeorder=True)
1132 def heads(repo, subset, x, order):
1132 def heads(repo, subset, x, order):
1133 """Members of set with no children in set.
1133 """Members of set with no children in set.
1134 """
1134 """
1135 # argument set should never define order
1135 # argument set should never define order
1136 if order == defineorder:
1136 if order == defineorder:
1137 order = followorder
1137 order = followorder
1138 s = getset(repo, subset, x, order=order)
1138 s = getset(repo, subset, x, order=order)
1139 ps = parents(repo, subset, x)
1139 ps = parents(repo, subset, x)
1140 return s - ps
1140 return s - ps
1141
1141
1142 @predicate('hidden()', safe=True)
1142 @predicate('hidden()', safe=True)
1143 def hidden(repo, subset, x):
1143 def hidden(repo, subset, x):
1144 """Hidden changesets.
1144 """Hidden changesets.
1145 """
1145 """
1146 # i18n: "hidden" is a keyword
1146 # i18n: "hidden" is a keyword
1147 getargs(x, 0, 0, _("hidden takes no arguments"))
1147 getargs(x, 0, 0, _("hidden takes no arguments"))
1148 hiddenrevs = repoview.filterrevs(repo, 'visible')
1148 hiddenrevs = repoview.filterrevs(repo, 'visible')
1149 return subset & hiddenrevs
1149 return subset & hiddenrevs
1150
1150
1151 @predicate('keyword(string)', safe=True, weight=10)
1151 @predicate('keyword(string)', safe=True, weight=10)
1152 def keyword(repo, subset, x):
1152 def keyword(repo, subset, x):
1153 """Search commit message, user name, and names of changed files for
1153 """Search commit message, user name, and names of changed files for
1154 string. The match is case-insensitive.
1154 string. The match is case-insensitive.
1155
1155
1156 For a regular expression or case sensitive search of these fields, use
1156 For a regular expression or case sensitive search of these fields, use
1157 ``grep(regex)``.
1157 ``grep(regex)``.
1158 """
1158 """
1159 # i18n: "keyword" is a keyword
1159 # i18n: "keyword" is a keyword
1160 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1160 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1161
1161
1162 def matches(r):
1162 def matches(r):
1163 c = repo[r]
1163 c = repo[r]
1164 return any(kw in encoding.lower(t)
1164 return any(kw in encoding.lower(t)
1165 for t in c.files() + [c.user(), c.description()])
1165 for t in c.files() + [c.user(), c.description()])
1166
1166
1167 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1167 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1168
1168
1169 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1169 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1170 def limit(repo, subset, x, order):
1170 def limit(repo, subset, x, order):
1171 """First n members of set, defaulting to 1, starting from offset.
1171 """First n members of set, defaulting to 1, starting from offset.
1172 """
1172 """
1173 args = getargsdict(x, 'limit', 'set n offset')
1173 args = getargsdict(x, 'limit', 'set n offset')
1174 if 'set' not in args:
1174 if 'set' not in args:
1175 # i18n: "limit" is a keyword
1175 # i18n: "limit" is a keyword
1176 raise error.ParseError(_("limit requires one to three arguments"))
1176 raise error.ParseError(_("limit requires one to three arguments"))
1177 # i18n: "limit" is a keyword
1177 # i18n: "limit" is a keyword
1178 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1178 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1179 if lim < 0:
1179 if lim < 0:
1180 raise error.ParseError(_("negative number to select"))
1180 raise error.ParseError(_("negative number to select"))
1181 # i18n: "limit" is a keyword
1181 # i18n: "limit" is a keyword
1182 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1182 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1183 if ofs < 0:
1183 if ofs < 0:
1184 raise error.ParseError(_("negative offset"))
1184 raise error.ParseError(_("negative offset"))
1185 os = getset(repo, fullreposet(repo), args['set'])
1185 os = getset(repo, fullreposet(repo), args['set'])
1186 ls = os.slice(ofs, ofs + lim)
1186 ls = os.slice(ofs, ofs + lim)
1187 if order == followorder and lim > 1:
1187 if order == followorder and lim > 1:
1188 return subset & ls
1188 return subset & ls
1189 return ls & subset
1189 return ls & subset
1190
1190
1191 @predicate('last(set, [n])', safe=True, takeorder=True)
1191 @predicate('last(set, [n])', safe=True, takeorder=True)
1192 def last(repo, subset, x, order):
1192 def last(repo, subset, x, order):
1193 """Last n members of set, defaulting to 1.
1193 """Last n members of set, defaulting to 1.
1194 """
1194 """
1195 # i18n: "last" is a keyword
1195 # i18n: "last" is a keyword
1196 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1196 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1197 lim = 1
1197 lim = 1
1198 if len(l) == 2:
1198 if len(l) == 2:
1199 # i18n: "last" is a keyword
1199 # i18n: "last" is a keyword
1200 lim = getinteger(l[1], _("last expects a number"))
1200 lim = getinteger(l[1], _("last expects a number"))
1201 if lim < 0:
1201 if lim < 0:
1202 raise error.ParseError(_("negative number to select"))
1202 raise error.ParseError(_("negative number to select"))
1203 os = getset(repo, fullreposet(repo), l[0])
1203 os = getset(repo, fullreposet(repo), l[0])
1204 os.reverse()
1204 os.reverse()
1205 ls = os.slice(0, lim)
1205 ls = os.slice(0, lim)
1206 if order == followorder and lim > 1:
1206 if order == followorder and lim > 1:
1207 return subset & ls
1207 return subset & ls
1208 ls.reverse()
1208 ls.reverse()
1209 return ls & subset
1209 return ls & subset
1210
1210
1211 @predicate('max(set)', safe=True)
1211 @predicate('max(set)', safe=True)
1212 def maxrev(repo, subset, x):
1212 def maxrev(repo, subset, x):
1213 """Changeset with highest revision number in set.
1213 """Changeset with highest revision number in set.
1214 """
1214 """
1215 os = getset(repo, fullreposet(repo), x)
1215 os = getset(repo, fullreposet(repo), x)
1216 try:
1216 try:
1217 m = os.max()
1217 m = os.max()
1218 if m in subset:
1218 if m in subset:
1219 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1219 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1220 except ValueError:
1220 except ValueError:
1221 # os.max() throws a ValueError when the collection is empty.
1221 # os.max() throws a ValueError when the collection is empty.
1222 # Same as python's max().
1222 # Same as python's max().
1223 pass
1223 pass
1224 return baseset(datarepr=('<max %r, %r>', subset, os))
1224 return baseset(datarepr=('<max %r, %r>', subset, os))
1225
1225
1226 @predicate('merge()', safe=True)
1226 @predicate('merge()', safe=True)
1227 def merge(repo, subset, x):
1227 def merge(repo, subset, x):
1228 """Changeset is a merge changeset.
1228 """Changeset is a merge changeset.
1229 """
1229 """
1230 # i18n: "merge" is a keyword
1230 # i18n: "merge" is a keyword
1231 getargs(x, 0, 0, _("merge takes no arguments"))
1231 getargs(x, 0, 0, _("merge takes no arguments"))
1232 cl = repo.changelog
1232 cl = repo.changelog
1233 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1233 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1234 condrepr='<merge>')
1234 condrepr='<merge>')
1235
1235
1236 @predicate('branchpoint()', safe=True)
1236 @predicate('branchpoint()', safe=True)
1237 def branchpoint(repo, subset, x):
1237 def branchpoint(repo, subset, x):
1238 """Changesets with more than one child.
1238 """Changesets with more than one child.
1239 """
1239 """
1240 # i18n: "branchpoint" is a keyword
1240 # i18n: "branchpoint" is a keyword
1241 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1241 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1242 cl = repo.changelog
1242 cl = repo.changelog
1243 if not subset:
1243 if not subset:
1244 return baseset()
1244 return baseset()
1245 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1245 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1246 # (and if it is not, it should.)
1246 # (and if it is not, it should.)
1247 baserev = min(subset)
1247 baserev = min(subset)
1248 parentscount = [0]*(len(repo) - baserev)
1248 parentscount = [0]*(len(repo) - baserev)
1249 for r in cl.revs(start=baserev + 1):
1249 for r in cl.revs(start=baserev + 1):
1250 for p in cl.parentrevs(r):
1250 for p in cl.parentrevs(r):
1251 if p >= baserev:
1251 if p >= baserev:
1252 parentscount[p - baserev] += 1
1252 parentscount[p - baserev] += 1
1253 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1253 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1254 condrepr='<branchpoint>')
1254 condrepr='<branchpoint>')
1255
1255
1256 @predicate('min(set)', safe=True)
1256 @predicate('min(set)', safe=True)
1257 def minrev(repo, subset, x):
1257 def minrev(repo, subset, x):
1258 """Changeset with lowest revision number in set.
1258 """Changeset with lowest revision number in set.
1259 """
1259 """
1260 os = getset(repo, fullreposet(repo), x)
1260 os = getset(repo, fullreposet(repo), x)
1261 try:
1261 try:
1262 m = os.min()
1262 m = os.min()
1263 if m in subset:
1263 if m in subset:
1264 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1264 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1265 except ValueError:
1265 except ValueError:
1266 # os.min() throws a ValueError when the collection is empty.
1266 # os.min() throws a ValueError when the collection is empty.
1267 # Same as python's min().
1267 # Same as python's min().
1268 pass
1268 pass
1269 return baseset(datarepr=('<min %r, %r>', subset, os))
1269 return baseset(datarepr=('<min %r, %r>', subset, os))
1270
1270
1271 @predicate('modifies(pattern)', safe=True, weight=30)
1271 @predicate('modifies(pattern)', safe=True, weight=30)
1272 def modifies(repo, subset, x):
1272 def modifies(repo, subset, x):
1273 """Changesets modifying files matched by pattern.
1273 """Changesets modifying files matched by pattern.
1274
1274
1275 The pattern without explicit kind like ``glob:`` is expected to be
1275 The pattern without explicit kind like ``glob:`` is expected to be
1276 relative to the current directory and match against a file or a
1276 relative to the current directory and match against a file or a
1277 directory.
1277 directory.
1278 """
1278 """
1279 # i18n: "modifies" is a keyword
1279 # i18n: "modifies" is a keyword
1280 pat = getstring(x, _("modifies requires a pattern"))
1280 pat = getstring(x, _("modifies requires a pattern"))
1281 return checkstatus(repo, subset, pat, 0)
1281 return checkstatus(repo, subset, pat, 0)
1282
1282
1283 @predicate('named(namespace)')
1283 @predicate('named(namespace)')
1284 def named(repo, subset, x):
1284 def named(repo, subset, x):
1285 """The changesets in a given namespace.
1285 """The changesets in a given namespace.
1286
1286
1287 Pattern matching is supported for `namespace`. See
1287 Pattern matching is supported for `namespace`. See
1288 :hg:`help revisions.patterns`.
1288 :hg:`help revisions.patterns`.
1289 """
1289 """
1290 # i18n: "named" is a keyword
1290 # i18n: "named" is a keyword
1291 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1291 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1292
1292
1293 ns = getstring(args[0],
1293 ns = getstring(args[0],
1294 # i18n: "named" is a keyword
1294 # i18n: "named" is a keyword
1295 _('the argument to named must be a string'))
1295 _('the argument to named must be a string'))
1296 kind, pattern, matcher = stringutil.stringmatcher(ns)
1296 kind, pattern, matcher = stringutil.stringmatcher(ns)
1297 namespaces = set()
1297 namespaces = set()
1298 if kind == 'literal':
1298 if kind == 'literal':
1299 if pattern not in repo.names:
1299 if pattern not in repo.names:
1300 raise error.RepoLookupError(_("namespace '%s' does not exist")
1300 raise error.RepoLookupError(_("namespace '%s' does not exist")
1301 % ns)
1301 % ns)
1302 namespaces.add(repo.names[pattern])
1302 namespaces.add(repo.names[pattern])
1303 else:
1303 else:
1304 for name, ns in repo.names.iteritems():
1304 for name, ns in repo.names.iteritems():
1305 if matcher(name):
1305 if matcher(name):
1306 namespaces.add(ns)
1306 namespaces.add(ns)
1307 if not namespaces:
1307 if not namespaces:
1308 raise error.RepoLookupError(_("no namespace exists"
1308 raise error.RepoLookupError(_("no namespace exists"
1309 " that match '%s'") % pattern)
1309 " that match '%s'") % pattern)
1310
1310
1311 names = set()
1311 names = set()
1312 for ns in namespaces:
1312 for ns in namespaces:
1313 for name in ns.listnames(repo):
1313 for name in ns.listnames(repo):
1314 if name not in ns.deprecated:
1314 if name not in ns.deprecated:
1315 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1315 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1316
1316
1317 names -= {node.nullrev}
1317 names -= {node.nullrev}
1318 return subset & names
1318 return subset & names
1319
1319
1320 @predicate('id(string)', safe=True)
1320 @predicate('id(string)', safe=True)
1321 def node_(repo, subset, x):
1321 def node_(repo, subset, x):
1322 """Revision non-ambiguously specified by the given hex string prefix.
1322 """Revision non-ambiguously specified by the given hex string prefix.
1323 """
1323 """
1324 # i18n: "id" is a keyword
1324 # i18n: "id" is a keyword
1325 l = getargs(x, 1, 1, _("id requires one argument"))
1325 l = getargs(x, 1, 1, _("id requires one argument"))
1326 # i18n: "id" is a keyword
1326 # i18n: "id" is a keyword
1327 n = getstring(l[0], _("id requires a string"))
1327 n = getstring(l[0], _("id requires a string"))
1328 if len(n) == 40:
1328 if len(n) == 40:
1329 try:
1329 try:
1330 rn = repo.changelog.rev(node.bin(n))
1330 rn = repo.changelog.rev(node.bin(n))
1331 except error.WdirUnsupported:
1331 except error.WdirUnsupported:
1332 rn = node.wdirrev
1332 rn = node.wdirrev
1333 except (LookupError, TypeError):
1333 except (LookupError, TypeError):
1334 rn = None
1334 rn = None
1335 else:
1335 else:
1336 rn = None
1336 rn = None
1337 try:
1337 try:
1338 pm = scmutil.resolvehexnodeidprefix(repo, n)
1338 pm = scmutil.resolvehexnodeidprefix(repo, n)
1339 if pm is not None:
1339 if pm is not None:
1340 rn = repo.changelog.rev(pm)
1340 rn = repo.changelog.rev(pm)
1341 except LookupError:
1341 except LookupError:
1342 pass
1342 pass
1343 except error.WdirUnsupported:
1343 except error.WdirUnsupported:
1344 rn = node.wdirrev
1344 rn = node.wdirrev
1345
1345
1346 if rn is None:
1346 if rn is None:
1347 return baseset()
1347 return baseset()
1348 result = baseset([rn])
1348 result = baseset([rn])
1349 return result & subset
1349 return result & subset
1350
1350
1351 @predicate('none()', safe=True)
1351 @predicate('none()', safe=True)
1352 def none(repo, subset, x):
1352 def none(repo, subset, x):
1353 """No changesets.
1353 """No changesets.
1354 """
1354 """
1355 # i18n: "none" is a keyword
1355 # i18n: "none" is a keyword
1356 getargs(x, 0, 0, _("none takes no arguments"))
1356 getargs(x, 0, 0, _("none takes no arguments"))
1357 return baseset()
1357 return baseset()
1358
1358
1359 @predicate('obsolete()', safe=True)
1359 @predicate('obsolete()', safe=True)
1360 def obsolete(repo, subset, x):
1360 def obsolete(repo, subset, x):
1361 """Mutable changeset with a newer version."""
1361 """Mutable changeset with a newer version."""
1362 # i18n: "obsolete" is a keyword
1362 # i18n: "obsolete" is a keyword
1363 getargs(x, 0, 0, _("obsolete takes no arguments"))
1363 getargs(x, 0, 0, _("obsolete takes no arguments"))
1364 obsoletes = obsmod.getrevs(repo, 'obsolete')
1364 obsoletes = obsmod.getrevs(repo, 'obsolete')
1365 return subset & obsoletes
1365 return subset & obsoletes
1366
1366
1367 @predicate('only(set, [set])', safe=True)
1367 @predicate('only(set, [set])', safe=True)
1368 def only(repo, subset, x):
1368 def only(repo, subset, x):
1369 """Changesets that are ancestors of the first set that are not ancestors
1369 """Changesets that are ancestors of the first set that are not ancestors
1370 of any other head in the repo. If a second set is specified, the result
1370 of any other head in the repo. If a second set is specified, the result
1371 is ancestors of the first set that are not ancestors of the second set
1371 is ancestors of the first set that are not ancestors of the second set
1372 (i.e. ::<set1> - ::<set2>).
1372 (i.e. ::<set1> - ::<set2>).
1373 """
1373 """
1374 cl = repo.changelog
1374 cl = repo.changelog
1375 # i18n: "only" is a keyword
1375 # i18n: "only" is a keyword
1376 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1376 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1377 include = getset(repo, fullreposet(repo), args[0])
1377 include = getset(repo, fullreposet(repo), args[0])
1378 if len(args) == 1:
1378 if len(args) == 1:
1379 if not include:
1379 if not include:
1380 return baseset()
1380 return baseset()
1381
1381
1382 descendants = set(dagop.revdescendants(repo, include, False))
1382 descendants = set(dagop.revdescendants(repo, include, False))
1383 exclude = [rev for rev in cl.headrevs()
1383 exclude = [rev for rev in cl.headrevs()
1384 if not rev in descendants and not rev in include]
1384 if not rev in descendants and not rev in include]
1385 else:
1385 else:
1386 exclude = getset(repo, fullreposet(repo), args[1])
1386 exclude = getset(repo, fullreposet(repo), args[1])
1387
1387
1388 results = set(cl.findmissingrevs(common=exclude, heads=include))
1388 results = set(cl.findmissingrevs(common=exclude, heads=include))
1389 # XXX we should turn this into a baseset instead of a set, smartset may do
1389 # XXX we should turn this into a baseset instead of a set, smartset may do
1390 # some optimizations from the fact this is a baseset.
1390 # some optimizations from the fact this is a baseset.
1391 return subset & results
1391 return subset & results
1392
1392
1393 @predicate('origin([set])', safe=True)
1393 @predicate('origin([set])', safe=True)
1394 def origin(repo, subset, x):
1394 def origin(repo, subset, x):
1395 """
1395 """
1396 Changesets that were specified as a source for the grafts, transplants or
1396 Changesets that were specified as a source for the grafts, transplants or
1397 rebases that created the given revisions. Omitting the optional set is the
1397 rebases that created the given revisions. Omitting the optional set is the
1398 same as passing all(). If a changeset created by these operations is itself
1398 same as passing all(). If a changeset created by these operations is itself
1399 specified as a source for one of these operations, only the source changeset
1399 specified as a source for one of these operations, only the source changeset
1400 for the first operation is selected.
1400 for the first operation is selected.
1401 """
1401 """
1402 if x is not None:
1402 if x is not None:
1403 dests = getset(repo, fullreposet(repo), x)
1403 dests = getset(repo, fullreposet(repo), x)
1404 else:
1404 else:
1405 dests = fullreposet(repo)
1405 dests = fullreposet(repo)
1406
1406
1407 def _firstsrc(rev):
1407 def _firstsrc(rev):
1408 src = _getrevsource(repo, rev)
1408 src = _getrevsource(repo, rev)
1409 if src is None:
1409 if src is None:
1410 return None
1410 return None
1411
1411
1412 while True:
1412 while True:
1413 prev = _getrevsource(repo, src)
1413 prev = _getrevsource(repo, src)
1414
1414
1415 if prev is None:
1415 if prev is None:
1416 return src
1416 return src
1417 src = prev
1417 src = prev
1418
1418
1419 o = {_firstsrc(r) for r in dests}
1419 o = {_firstsrc(r) for r in dests}
1420 o -= {None}
1420 o -= {None}
1421 # XXX we should turn this into a baseset instead of a set, smartset may do
1421 # XXX we should turn this into a baseset instead of a set, smartset may do
1422 # some optimizations from the fact this is a baseset.
1422 # some optimizations from the fact this is a baseset.
1423 return subset & o
1423 return subset & o
1424
1424
1425 @predicate('outgoing([path])', safe=False, weight=10)
1425 @predicate('outgoing([path])', safe=False, weight=10)
1426 def outgoing(repo, subset, x):
1426 def outgoing(repo, subset, x):
1427 """Changesets not found in the specified destination repository, or the
1427 """Changesets not found in the specified destination repository, or the
1428 default push location.
1428 default push location.
1429 """
1429 """
1430 # Avoid cycles.
1430 # Avoid cycles.
1431 from . import (
1431 from . import (
1432 discovery,
1432 discovery,
1433 hg,
1433 hg,
1434 )
1434 )
1435 # i18n: "outgoing" is a keyword
1435 # i18n: "outgoing" is a keyword
1436 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1436 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1437 # i18n: "outgoing" is a keyword
1437 # i18n: "outgoing" is a keyword
1438 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1438 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1439 if not dest:
1439 if not dest:
1440 # ui.paths.getpath() explicitly tests for None, not just a boolean
1440 # ui.paths.getpath() explicitly tests for None, not just a boolean
1441 dest = None
1441 dest = None
1442 path = repo.ui.paths.getpath(dest, default=('default-push', 'default'))
1442 path = repo.ui.paths.getpath(dest, default=('default-push', 'default'))
1443 if not path:
1443 if not path:
1444 raise error.Abort(_('default repository not configured!'),
1444 raise error.Abort(_('default repository not configured!'),
1445 hint=_("see 'hg help config.paths'"))
1445 hint=_("see 'hg help config.paths'"))
1446 dest = path.pushloc or path.loc
1446 dest = path.pushloc or path.loc
1447 branches = path.branch, []
1447 branches = path.branch, []
1448
1448
1449 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1449 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1450 if revs:
1450 if revs:
1451 revs = [repo.lookup(rev) for rev in revs]
1451 revs = [repo.lookup(rev) for rev in revs]
1452 other = hg.peer(repo, {}, dest)
1452 other = hg.peer(repo, {}, dest)
1453 repo.ui.pushbuffer()
1453 repo.ui.pushbuffer()
1454 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1454 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1455 repo.ui.popbuffer()
1455 repo.ui.popbuffer()
1456 cl = repo.changelog
1456 cl = repo.changelog
1457 o = {cl.rev(r) for r in outgoing.missing}
1457 o = {cl.rev(r) for r in outgoing.missing}
1458 return subset & o
1458 return subset & o
1459
1459
1460 @predicate('p1([set])', safe=True)
1460 @predicate('p1([set])', safe=True)
1461 def p1(repo, subset, x):
1461 def p1(repo, subset, x):
1462 """First parent of changesets in set, or the working directory.
1462 """First parent of changesets in set, or the working directory.
1463 """
1463 """
1464 if x is None:
1464 if x is None:
1465 p = repo[x].p1().rev()
1465 p = repo[x].p1().rev()
1466 if p >= 0:
1466 if p >= 0:
1467 return subset & baseset([p])
1467 return subset & baseset([p])
1468 return baseset()
1468 return baseset()
1469
1469
1470 ps = set()
1470 ps = set()
1471 cl = repo.changelog
1471 cl = repo.changelog
1472 for r in getset(repo, fullreposet(repo), x):
1472 for r in getset(repo, fullreposet(repo), x):
1473 try:
1473 try:
1474 ps.add(cl.parentrevs(r)[0])
1474 ps.add(cl.parentrevs(r)[0])
1475 except error.WdirUnsupported:
1475 except error.WdirUnsupported:
1476 ps.add(repo[r].parents()[0].rev())
1476 ps.add(repo[r].parents()[0].rev())
1477 ps -= {node.nullrev}
1477 ps -= {node.nullrev}
1478 # XXX we should turn this into a baseset instead of a set, smartset may do
1478 # XXX we should turn this into a baseset instead of a set, smartset may do
1479 # some optimizations from the fact this is a baseset.
1479 # some optimizations from the fact this is a baseset.
1480 return subset & ps
1480 return subset & ps
1481
1481
1482 @predicate('p2([set])', safe=True)
1482 @predicate('p2([set])', safe=True)
1483 def p2(repo, subset, x):
1483 def p2(repo, subset, x):
1484 """Second parent of changesets in set, or the working directory.
1484 """Second parent of changesets in set, or the working directory.
1485 """
1485 """
1486 if x is None:
1486 if x is None:
1487 ps = repo[x].parents()
1487 ps = repo[x].parents()
1488 try:
1488 try:
1489 p = ps[1].rev()
1489 p = ps[1].rev()
1490 if p >= 0:
1490 if p >= 0:
1491 return subset & baseset([p])
1491 return subset & baseset([p])
1492 return baseset()
1492 return baseset()
1493 except IndexError:
1493 except IndexError:
1494 return baseset()
1494 return baseset()
1495
1495
1496 ps = set()
1496 ps = set()
1497 cl = repo.changelog
1497 cl = repo.changelog
1498 for r in getset(repo, fullreposet(repo), x):
1498 for r in getset(repo, fullreposet(repo), x):
1499 try:
1499 try:
1500 ps.add(cl.parentrevs(r)[1])
1500 ps.add(cl.parentrevs(r)[1])
1501 except error.WdirUnsupported:
1501 except error.WdirUnsupported:
1502 parents = repo[r].parents()
1502 parents = repo[r].parents()
1503 if len(parents) == 2:
1503 if len(parents) == 2:
1504 ps.add(parents[1])
1504 ps.add(parents[1])
1505 ps -= {node.nullrev}
1505 ps -= {node.nullrev}
1506 # XXX we should turn this into a baseset instead of a set, smartset may do
1506 # XXX we should turn this into a baseset instead of a set, smartset may do
1507 # some optimizations from the fact this is a baseset.
1507 # some optimizations from the fact this is a baseset.
1508 return subset & ps
1508 return subset & ps
1509
1509
1510 def parentpost(repo, subset, x, order):
1510 def parentpost(repo, subset, x, order):
1511 return p1(repo, subset, x)
1511 return p1(repo, subset, x)
1512
1512
1513 @predicate('parents([set])', safe=True)
1513 @predicate('parents([set])', safe=True)
1514 def parents(repo, subset, x):
1514 def parents(repo, subset, x):
1515 """
1515 """
1516 The set of all parents for all changesets in set, or the working directory.
1516 The set of all parents for all changesets in set, or the working directory.
1517 """
1517 """
1518 if x is None:
1518 if x is None:
1519 ps = set(p.rev() for p in repo[x].parents())
1519 ps = set(p.rev() for p in repo[x].parents())
1520 else:
1520 else:
1521 ps = set()
1521 ps = set()
1522 cl = repo.changelog
1522 cl = repo.changelog
1523 up = ps.update
1523 up = ps.update
1524 parentrevs = cl.parentrevs
1524 parentrevs = cl.parentrevs
1525 for r in getset(repo, fullreposet(repo), x):
1525 for r in getset(repo, fullreposet(repo), x):
1526 try:
1526 try:
1527 up(parentrevs(r))
1527 up(parentrevs(r))
1528 except error.WdirUnsupported:
1528 except error.WdirUnsupported:
1529 up(p.rev() for p in repo[r].parents())
1529 up(p.rev() for p in repo[r].parents())
1530 ps -= {node.nullrev}
1530 ps -= {node.nullrev}
1531 return subset & ps
1531 return subset & ps
1532
1532
1533 def _phase(repo, subset, *targets):
1533 def _phase(repo, subset, *targets):
1534 """helper to select all rev in <targets> phases"""
1534 """helper to select all rev in <targets> phases"""
1535 return repo._phasecache.getrevset(repo, targets, subset)
1535 return repo._phasecache.getrevset(repo, targets, subset)
1536
1536
1537 @predicate('draft()', safe=True)
1537 @predicate('draft()', safe=True)
1538 def draft(repo, subset, x):
1538 def draft(repo, subset, x):
1539 """Changeset in draft phase."""
1539 """Changeset in draft phase."""
1540 # i18n: "draft" is a keyword
1540 # i18n: "draft" is a keyword
1541 getargs(x, 0, 0, _("draft takes no arguments"))
1541 getargs(x, 0, 0, _("draft takes no arguments"))
1542 target = phases.draft
1542 target = phases.draft
1543 return _phase(repo, subset, target)
1543 return _phase(repo, subset, target)
1544
1544
1545 @predicate('secret()', safe=True)
1545 @predicate('secret()', safe=True)
1546 def secret(repo, subset, x):
1546 def secret(repo, subset, x):
1547 """Changeset in secret phase."""
1547 """Changeset in secret phase."""
1548 # i18n: "secret" is a keyword
1548 # i18n: "secret" is a keyword
1549 getargs(x, 0, 0, _("secret takes no arguments"))
1549 getargs(x, 0, 0, _("secret takes no arguments"))
1550 target = phases.secret
1550 target = phases.secret
1551 return _phase(repo, subset, target)
1551 return _phase(repo, subset, target)
1552
1552
1553 @predicate('stack([revs])', safe=True)
1553 @predicate('stack([revs])', safe=True)
1554 def stack(repo, subset, x):
1554 def stack(repo, subset, x):
1555 """Experimental revset for the stack of changesets or working directory
1555 """Experimental revset for the stack of changesets or working directory
1556 parent. (EXPERIMENTAL)
1556 parent. (EXPERIMENTAL)
1557 """
1557 """
1558 if x is None:
1558 if x is None:
1559 stacks = stackmod.getstack(repo, x)
1559 stacks = stackmod.getstack(repo, x)
1560 else:
1560 else:
1561 stacks = smartset.baseset([])
1561 stacks = smartset.baseset([])
1562 for revision in getset(repo, fullreposet(repo), x):
1562 for revision in getset(repo, fullreposet(repo), x):
1563 currentstack = stackmod.getstack(repo, revision)
1563 currentstack = stackmod.getstack(repo, revision)
1564 stacks = stacks + currentstack
1564 stacks = stacks + currentstack
1565
1565
1566 return subset & stacks
1566 return subset & stacks
1567
1567
1568 def parentspec(repo, subset, x, n, order):
1568 def parentspec(repo, subset, x, n, order):
1569 """``set^0``
1569 """``set^0``
1570 The set.
1570 The set.
1571 ``set^1`` (or ``set^``), ``set^2``
1571 ``set^1`` (or ``set^``), ``set^2``
1572 First or second parent, respectively, of all changesets in set.
1572 First or second parent, respectively, of all changesets in set.
1573 """
1573 """
1574 try:
1574 try:
1575 n = int(n[1])
1575 n = int(n[1])
1576 if n not in (0, 1, 2):
1576 if n not in (0, 1, 2):
1577 raise ValueError
1577 raise ValueError
1578 except (TypeError, ValueError):
1578 except (TypeError, ValueError):
1579 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1579 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1580 ps = set()
1580 ps = set()
1581 cl = repo.changelog
1581 cl = repo.changelog
1582 for r in getset(repo, fullreposet(repo), x):
1582 for r in getset(repo, fullreposet(repo), x):
1583 if n == 0:
1583 if n == 0:
1584 ps.add(r)
1584 ps.add(r)
1585 elif n == 1:
1585 elif n == 1:
1586 try:
1586 try:
1587 ps.add(cl.parentrevs(r)[0])
1587 ps.add(cl.parentrevs(r)[0])
1588 except error.WdirUnsupported:
1588 except error.WdirUnsupported:
1589 ps.add(repo[r].parents()[0].rev())
1589 ps.add(repo[r].parents()[0].rev())
1590 else:
1590 else:
1591 try:
1591 try:
1592 parents = cl.parentrevs(r)
1592 parents = cl.parentrevs(r)
1593 if parents[1] != node.nullrev:
1593 if parents[1] != node.nullrev:
1594 ps.add(parents[1])
1594 ps.add(parents[1])
1595 except error.WdirUnsupported:
1595 except error.WdirUnsupported:
1596 parents = repo[r].parents()
1596 parents = repo[r].parents()
1597 if len(parents) == 2:
1597 if len(parents) == 2:
1598 ps.add(parents[1].rev())
1598 ps.add(parents[1].rev())
1599 return subset & ps
1599 return subset & ps
1600
1600
1601 @predicate('present(set)', safe=True, takeorder=True)
1601 @predicate('present(set)', safe=True, takeorder=True)
1602 def present(repo, subset, x, order):
1602 def present(repo, subset, x, order):
1603 """An empty set, if any revision in set isn't found; otherwise,
1603 """An empty set, if any revision in set isn't found; otherwise,
1604 all revisions in set.
1604 all revisions in set.
1605
1605
1606 If any of specified revisions is not present in the local repository,
1606 If any of specified revisions is not present in the local repository,
1607 the query is normally aborted. But this predicate allows the query
1607 the query is normally aborted. But this predicate allows the query
1608 to continue even in such cases.
1608 to continue even in such cases.
1609 """
1609 """
1610 try:
1610 try:
1611 return getset(repo, subset, x, order)
1611 return getset(repo, subset, x, order)
1612 except error.RepoLookupError:
1612 except error.RepoLookupError:
1613 return baseset()
1613 return baseset()
1614
1614
1615 # for internal use
1615 # for internal use
1616 @predicate('_notpublic', safe=True)
1616 @predicate('_notpublic', safe=True)
1617 def _notpublic(repo, subset, x):
1617 def _notpublic(repo, subset, x):
1618 getargs(x, 0, 0, "_notpublic takes no arguments")
1618 getargs(x, 0, 0, "_notpublic takes no arguments")
1619 return _phase(repo, subset, phases.draft, phases.secret)
1619 return _phase(repo, subset, phases.draft, phases.secret)
1620
1620
1621 # for internal use
1621 # for internal use
1622 @predicate('_phaseandancestors(phasename, set)', safe=True)
1622 @predicate('_phaseandancestors(phasename, set)', safe=True)
1623 def _phaseandancestors(repo, subset, x):
1623 def _phaseandancestors(repo, subset, x):
1624 # equivalent to (phasename() & ancestors(set)) but more efficient
1624 # equivalent to (phasename() & ancestors(set)) but more efficient
1625 # phasename could be one of 'draft', 'secret', or '_notpublic'
1625 # phasename could be one of 'draft', 'secret', or '_notpublic'
1626 args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
1626 args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
1627 phasename = getsymbol(args[0])
1627 phasename = getsymbol(args[0])
1628 s = getset(repo, fullreposet(repo), args[1])
1628 s = getset(repo, fullreposet(repo), args[1])
1629
1629
1630 draft = phases.draft
1630 draft = phases.draft
1631 secret = phases.secret
1631 secret = phases.secret
1632 phasenamemap = {
1632 phasenamemap = {
1633 '_notpublic': draft,
1633 '_notpublic': draft,
1634 'draft': draft, # follow secret's ancestors
1634 'draft': draft, # follow secret's ancestors
1635 'secret': secret,
1635 'secret': secret,
1636 }
1636 }
1637 if phasename not in phasenamemap:
1637 if phasename not in phasenamemap:
1638 raise error.ParseError('%r is not a valid phasename' % phasename)
1638 raise error.ParseError('%r is not a valid phasename' % phasename)
1639
1639
1640 minimalphase = phasenamemap[phasename]
1640 minimalphase = phasenamemap[phasename]
1641 getphase = repo._phasecache.phase
1641 getphase = repo._phasecache.phase
1642
1642
1643 def cutfunc(rev):
1643 def cutfunc(rev):
1644 return getphase(repo, rev) < minimalphase
1644 return getphase(repo, rev) < minimalphase
1645
1645
1646 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
1646 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
1647
1647
1648 if phasename == 'draft': # need to remove secret changesets
1648 if phasename == 'draft': # need to remove secret changesets
1649 revs = revs.filter(lambda r: getphase(repo, r) == draft)
1649 revs = revs.filter(lambda r: getphase(repo, r) == draft)
1650 return subset & revs
1650 return subset & revs
1651
1651
1652 @predicate('public()', safe=True)
1652 @predicate('public()', safe=True)
1653 def public(repo, subset, x):
1653 def public(repo, subset, x):
1654 """Changeset in public phase."""
1654 """Changeset in public phase."""
1655 # i18n: "public" is a keyword
1655 # i18n: "public" is a keyword
1656 getargs(x, 0, 0, _("public takes no arguments"))
1656 getargs(x, 0, 0, _("public takes no arguments"))
1657 return _phase(repo, subset, phases.public)
1657 return _phase(repo, subset, phases.public)
1658
1658
1659 @predicate('remote([id [,path]])', safe=False)
1659 @predicate('remote([id [,path]])', safe=False)
1660 def remote(repo, subset, x):
1660 def remote(repo, subset, x):
1661 """Local revision that corresponds to the given identifier in a
1661 """Local revision that corresponds to the given identifier in a
1662 remote repository, if present. Here, the '.' identifier is a
1662 remote repository, if present. Here, the '.' identifier is a
1663 synonym for the current local branch.
1663 synonym for the current local branch.
1664 """
1664 """
1665
1665
1666 from . import hg # avoid start-up nasties
1666 from . import hg # avoid start-up nasties
1667 # i18n: "remote" is a keyword
1667 # i18n: "remote" is a keyword
1668 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1668 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1669
1669
1670 q = '.'
1670 q = '.'
1671 if len(l) > 0:
1671 if len(l) > 0:
1672 # i18n: "remote" is a keyword
1672 # i18n: "remote" is a keyword
1673 q = getstring(l[0], _("remote requires a string id"))
1673 q = getstring(l[0], _("remote requires a string id"))
1674 if q == '.':
1674 if q == '.':
1675 q = repo['.'].branch()
1675 q = repo['.'].branch()
1676
1676
1677 dest = ''
1677 dest = ''
1678 if len(l) > 1:
1678 if len(l) > 1:
1679 # i18n: "remote" is a keyword
1679 # i18n: "remote" is a keyword
1680 dest = getstring(l[1], _("remote requires a repository path"))
1680 dest = getstring(l[1], _("remote requires a repository path"))
1681 dest = repo.ui.expandpath(dest or 'default')
1681 dest = repo.ui.expandpath(dest or 'default')
1682 dest, branches = hg.parseurl(dest)
1682 dest, branches = hg.parseurl(dest)
1683 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1683 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1684 if revs:
1684 if revs:
1685 revs = [repo.lookup(rev) for rev in revs]
1685 revs = [repo.lookup(rev) for rev in revs]
1686 other = hg.peer(repo, {}, dest)
1686 other = hg.peer(repo, {}, dest)
1687 n = other.lookup(q)
1687 n = other.lookup(q)
1688 if n in repo:
1688 if n in repo:
1689 r = repo[n].rev()
1689 r = repo[n].rev()
1690 if r in subset:
1690 if r in subset:
1691 return baseset([r])
1691 return baseset([r])
1692 return baseset()
1692 return baseset()
1693
1693
1694 @predicate('removes(pattern)', safe=True, weight=30)
1694 @predicate('removes(pattern)', safe=True, weight=30)
1695 def removes(repo, subset, x):
1695 def removes(repo, subset, x):
1696 """Changesets which remove files matching pattern.
1696 """Changesets which remove files matching pattern.
1697
1697
1698 The pattern without explicit kind like ``glob:`` is expected to be
1698 The pattern without explicit kind like ``glob:`` is expected to be
1699 relative to the current directory and match against a file or a
1699 relative to the current directory and match against a file or a
1700 directory.
1700 directory.
1701 """
1701 """
1702 # i18n: "removes" is a keyword
1702 # i18n: "removes" is a keyword
1703 pat = getstring(x, _("removes requires a pattern"))
1703 pat = getstring(x, _("removes requires a pattern"))
1704 return checkstatus(repo, subset, pat, 2)
1704 return checkstatus(repo, subset, pat, 2)
1705
1705
1706 @predicate('rev(number)', safe=True)
1706 @predicate('rev(number)', safe=True)
1707 def rev(repo, subset, x):
1707 def rev(repo, subset, x):
1708 """Revision with the given numeric identifier.
1708 """Revision with the given numeric identifier.
1709 """
1709 """
1710 # i18n: "rev" is a keyword
1710 # i18n: "rev" is a keyword
1711 l = getargs(x, 1, 1, _("rev requires one argument"))
1711 l = getargs(x, 1, 1, _("rev requires one argument"))
1712 try:
1712 try:
1713 # i18n: "rev" is a keyword
1713 # i18n: "rev" is a keyword
1714 l = int(getstring(l[0], _("rev requires a number")))
1714 l = int(getstring(l[0], _("rev requires a number")))
1715 except (TypeError, ValueError):
1715 except (TypeError, ValueError):
1716 # i18n: "rev" is a keyword
1716 # i18n: "rev" is a keyword
1717 raise error.ParseError(_("rev expects a number"))
1717 raise error.ParseError(_("rev expects a number"))
1718 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1718 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1719 return baseset()
1719 return baseset()
1720 return subset & baseset([l])
1720 return subset & baseset([l])
1721
1721
1722 @predicate('matching(revision [, field])', safe=True)
1722 @predicate('matching(revision [, field])', safe=True)
1723 def matching(repo, subset, x):
1723 def matching(repo, subset, x):
1724 """Changesets in which a given set of fields match the set of fields in the
1724 """Changesets in which a given set of fields match the set of fields in the
1725 selected revision or set.
1725 selected revision or set.
1726
1726
1727 To match more than one field pass the list of fields to match separated
1727 To match more than one field pass the list of fields to match separated
1728 by spaces (e.g. ``author description``).
1728 by spaces (e.g. ``author description``).
1729
1729
1730 Valid fields are most regular revision fields and some special fields.
1730 Valid fields are most regular revision fields and some special fields.
1731
1731
1732 Regular revision fields are ``description``, ``author``, ``branch``,
1732 Regular revision fields are ``description``, ``author``, ``branch``,
1733 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1733 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1734 and ``diff``.
1734 and ``diff``.
1735 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1735 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1736 contents of the revision. Two revisions matching their ``diff`` will
1736 contents of the revision. Two revisions matching their ``diff`` will
1737 also match their ``files``.
1737 also match their ``files``.
1738
1738
1739 Special fields are ``summary`` and ``metadata``:
1739 Special fields are ``summary`` and ``metadata``:
1740 ``summary`` matches the first line of the description.
1740 ``summary`` matches the first line of the description.
1741 ``metadata`` is equivalent to matching ``description user date``
1741 ``metadata`` is equivalent to matching ``description user date``
1742 (i.e. it matches the main metadata fields).
1742 (i.e. it matches the main metadata fields).
1743
1743
1744 ``metadata`` is the default field which is used when no fields are
1744 ``metadata`` is the default field which is used when no fields are
1745 specified. You can match more than one field at a time.
1745 specified. You can match more than one field at a time.
1746 """
1746 """
1747 # i18n: "matching" is a keyword
1747 # i18n: "matching" is a keyword
1748 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1748 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1749
1749
1750 revs = getset(repo, fullreposet(repo), l[0])
1750 revs = getset(repo, fullreposet(repo), l[0])
1751
1751
1752 fieldlist = ['metadata']
1752 fieldlist = ['metadata']
1753 if len(l) > 1:
1753 if len(l) > 1:
1754 fieldlist = getstring(l[1],
1754 fieldlist = getstring(l[1],
1755 # i18n: "matching" is a keyword
1755 # i18n: "matching" is a keyword
1756 _("matching requires a string "
1756 _("matching requires a string "
1757 "as its second argument")).split()
1757 "as its second argument")).split()
1758
1758
1759 # Make sure that there are no repeated fields,
1759 # Make sure that there are no repeated fields,
1760 # expand the 'special' 'metadata' field type
1760 # expand the 'special' 'metadata' field type
1761 # and check the 'files' whenever we check the 'diff'
1761 # and check the 'files' whenever we check the 'diff'
1762 fields = []
1762 fields = []
1763 for field in fieldlist:
1763 for field in fieldlist:
1764 if field == 'metadata':
1764 if field == 'metadata':
1765 fields += ['user', 'description', 'date']
1765 fields += ['user', 'description', 'date']
1766 elif field == 'diff':
1766 elif field == 'diff':
1767 # a revision matching the diff must also match the files
1767 # a revision matching the diff must also match the files
1768 # since matching the diff is very costly, make sure to
1768 # since matching the diff is very costly, make sure to
1769 # also match the files first
1769 # also match the files first
1770 fields += ['files', 'diff']
1770 fields += ['files', 'diff']
1771 else:
1771 else:
1772 if field == 'author':
1772 if field == 'author':
1773 field = 'user'
1773 field = 'user'
1774 fields.append(field)
1774 fields.append(field)
1775 fields = set(fields)
1775 fields = set(fields)
1776 if 'summary' in fields and 'description' in fields:
1776 if 'summary' in fields and 'description' in fields:
1777 # If a revision matches its description it also matches its summary
1777 # If a revision matches its description it also matches its summary
1778 fields.discard('summary')
1778 fields.discard('summary')
1779
1779
1780 # We may want to match more than one field
1780 # We may want to match more than one field
1781 # Not all fields take the same amount of time to be matched
1781 # Not all fields take the same amount of time to be matched
1782 # Sort the selected fields in order of increasing matching cost
1782 # Sort the selected fields in order of increasing matching cost
1783 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1783 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1784 'files', 'description', 'substate', 'diff']
1784 'files', 'description', 'substate', 'diff']
1785 def fieldkeyfunc(f):
1785 def fieldkeyfunc(f):
1786 try:
1786 try:
1787 return fieldorder.index(f)
1787 return fieldorder.index(f)
1788 except ValueError:
1788 except ValueError:
1789 # assume an unknown field is very costly
1789 # assume an unknown field is very costly
1790 return len(fieldorder)
1790 return len(fieldorder)
1791 fields = list(fields)
1791 fields = list(fields)
1792 fields.sort(key=fieldkeyfunc)
1792 fields.sort(key=fieldkeyfunc)
1793
1793
1794 # Each field will be matched with its own "getfield" function
1794 # Each field will be matched with its own "getfield" function
1795 # which will be added to the getfieldfuncs array of functions
1795 # which will be added to the getfieldfuncs array of functions
1796 getfieldfuncs = []
1796 getfieldfuncs = []
1797 _funcs = {
1797 _funcs = {
1798 'user': lambda r: repo[r].user(),
1798 'user': lambda r: repo[r].user(),
1799 'branch': lambda r: repo[r].branch(),
1799 'branch': lambda r: repo[r].branch(),
1800 'date': lambda r: repo[r].date(),
1800 'date': lambda r: repo[r].date(),
1801 'description': lambda r: repo[r].description(),
1801 'description': lambda r: repo[r].description(),
1802 'files': lambda r: repo[r].files(),
1802 'files': lambda r: repo[r].files(),
1803 'parents': lambda r: repo[r].parents(),
1803 'parents': lambda r: repo[r].parents(),
1804 'phase': lambda r: repo[r].phase(),
1804 'phase': lambda r: repo[r].phase(),
1805 'substate': lambda r: repo[r].substate,
1805 'substate': lambda r: repo[r].substate,
1806 'summary': lambda r: repo[r].description().splitlines()[0],
1806 'summary': lambda r: repo[r].description().splitlines()[0],
1807 'diff': lambda r: list(repo[r].diff(
1807 'diff': lambda r: list(repo[r].diff(
1808 opts=diffutil.diffopts(repo.ui, {'git': True}))),
1808 opts=diffutil.diffallopts(repo.ui, {'git': True}))),
1809 }
1809 }
1810 for info in fields:
1810 for info in fields:
1811 getfield = _funcs.get(info, None)
1811 getfield = _funcs.get(info, None)
1812 if getfield is None:
1812 if getfield is None:
1813 raise error.ParseError(
1813 raise error.ParseError(
1814 # i18n: "matching" is a keyword
1814 # i18n: "matching" is a keyword
1815 _("unexpected field name passed to matching: %s") % info)
1815 _("unexpected field name passed to matching: %s") % info)
1816 getfieldfuncs.append(getfield)
1816 getfieldfuncs.append(getfield)
1817 # convert the getfield array of functions into a "getinfo" function
1817 # convert the getfield array of functions into a "getinfo" function
1818 # which returns an array of field values (or a single value if there
1818 # which returns an array of field values (or a single value if there
1819 # is only one field to match)
1819 # is only one field to match)
1820 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1820 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1821
1821
1822 def matches(x):
1822 def matches(x):
1823 for rev in revs:
1823 for rev in revs:
1824 target = getinfo(rev)
1824 target = getinfo(rev)
1825 match = True
1825 match = True
1826 for n, f in enumerate(getfieldfuncs):
1826 for n, f in enumerate(getfieldfuncs):
1827 if target[n] != f(x):
1827 if target[n] != f(x):
1828 match = False
1828 match = False
1829 if match:
1829 if match:
1830 return True
1830 return True
1831 return False
1831 return False
1832
1832
1833 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1833 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1834
1834
1835 @predicate('reverse(set)', safe=True, takeorder=True, weight=0)
1835 @predicate('reverse(set)', safe=True, takeorder=True, weight=0)
1836 def reverse(repo, subset, x, order):
1836 def reverse(repo, subset, x, order):
1837 """Reverse order of set.
1837 """Reverse order of set.
1838 """
1838 """
1839 l = getset(repo, subset, x, order)
1839 l = getset(repo, subset, x, order)
1840 if order == defineorder:
1840 if order == defineorder:
1841 l.reverse()
1841 l.reverse()
1842 return l
1842 return l
1843
1843
1844 @predicate('roots(set)', safe=True)
1844 @predicate('roots(set)', safe=True)
1845 def roots(repo, subset, x):
1845 def roots(repo, subset, x):
1846 """Changesets in set with no parent changeset in set.
1846 """Changesets in set with no parent changeset in set.
1847 """
1847 """
1848 s = getset(repo, fullreposet(repo), x)
1848 s = getset(repo, fullreposet(repo), x)
1849 parents = repo.changelog.parentrevs
1849 parents = repo.changelog.parentrevs
1850 def filter(r):
1850 def filter(r):
1851 for p in parents(r):
1851 for p in parents(r):
1852 if 0 <= p and p in s:
1852 if 0 <= p and p in s:
1853 return False
1853 return False
1854 return True
1854 return True
1855 return subset & s.filter(filter, condrepr='<roots>')
1855 return subset & s.filter(filter, condrepr='<roots>')
1856
1856
1857 _sortkeyfuncs = {
1857 _sortkeyfuncs = {
1858 'rev': lambda c: c.rev(),
1858 'rev': lambda c: c.rev(),
1859 'branch': lambda c: c.branch(),
1859 'branch': lambda c: c.branch(),
1860 'desc': lambda c: c.description(),
1860 'desc': lambda c: c.description(),
1861 'user': lambda c: c.user(),
1861 'user': lambda c: c.user(),
1862 'author': lambda c: c.user(),
1862 'author': lambda c: c.user(),
1863 'date': lambda c: c.date()[0],
1863 'date': lambda c: c.date()[0],
1864 }
1864 }
1865
1865
1866 def _getsortargs(x):
1866 def _getsortargs(x):
1867 """Parse sort options into (set, [(key, reverse)], opts)"""
1867 """Parse sort options into (set, [(key, reverse)], opts)"""
1868 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1868 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1869 if 'set' not in args:
1869 if 'set' not in args:
1870 # i18n: "sort" is a keyword
1870 # i18n: "sort" is a keyword
1871 raise error.ParseError(_('sort requires one or two arguments'))
1871 raise error.ParseError(_('sort requires one or two arguments'))
1872 keys = "rev"
1872 keys = "rev"
1873 if 'keys' in args:
1873 if 'keys' in args:
1874 # i18n: "sort" is a keyword
1874 # i18n: "sort" is a keyword
1875 keys = getstring(args['keys'], _("sort spec must be a string"))
1875 keys = getstring(args['keys'], _("sort spec must be a string"))
1876
1876
1877 keyflags = []
1877 keyflags = []
1878 for k in keys.split():
1878 for k in keys.split():
1879 fk = k
1879 fk = k
1880 reverse = (k.startswith('-'))
1880 reverse = (k.startswith('-'))
1881 if reverse:
1881 if reverse:
1882 k = k[1:]
1882 k = k[1:]
1883 if k not in _sortkeyfuncs and k != 'topo':
1883 if k not in _sortkeyfuncs and k != 'topo':
1884 raise error.ParseError(
1884 raise error.ParseError(
1885 _("unknown sort key %r") % pycompat.bytestr(fk))
1885 _("unknown sort key %r") % pycompat.bytestr(fk))
1886 keyflags.append((k, reverse))
1886 keyflags.append((k, reverse))
1887
1887
1888 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1888 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1889 # i18n: "topo" is a keyword
1889 # i18n: "topo" is a keyword
1890 raise error.ParseError(_('topo sort order cannot be combined '
1890 raise error.ParseError(_('topo sort order cannot be combined '
1891 'with other sort keys'))
1891 'with other sort keys'))
1892
1892
1893 opts = {}
1893 opts = {}
1894 if 'topo.firstbranch' in args:
1894 if 'topo.firstbranch' in args:
1895 if any(k == 'topo' for k, reverse in keyflags):
1895 if any(k == 'topo' for k, reverse in keyflags):
1896 opts['topo.firstbranch'] = args['topo.firstbranch']
1896 opts['topo.firstbranch'] = args['topo.firstbranch']
1897 else:
1897 else:
1898 # i18n: "topo" and "topo.firstbranch" are keywords
1898 # i18n: "topo" and "topo.firstbranch" are keywords
1899 raise error.ParseError(_('topo.firstbranch can only be used '
1899 raise error.ParseError(_('topo.firstbranch can only be used '
1900 'when using the topo sort key'))
1900 'when using the topo sort key'))
1901
1901
1902 return args['set'], keyflags, opts
1902 return args['set'], keyflags, opts
1903
1903
1904 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
1904 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
1905 weight=10)
1905 weight=10)
1906 def sort(repo, subset, x, order):
1906 def sort(repo, subset, x, order):
1907 """Sort set by keys. The default sort order is ascending, specify a key
1907 """Sort set by keys. The default sort order is ascending, specify a key
1908 as ``-key`` to sort in descending order.
1908 as ``-key`` to sort in descending order.
1909
1909
1910 The keys can be:
1910 The keys can be:
1911
1911
1912 - ``rev`` for the revision number,
1912 - ``rev`` for the revision number,
1913 - ``branch`` for the branch name,
1913 - ``branch`` for the branch name,
1914 - ``desc`` for the commit message (description),
1914 - ``desc`` for the commit message (description),
1915 - ``user`` for user name (``author`` can be used as an alias),
1915 - ``user`` for user name (``author`` can be used as an alias),
1916 - ``date`` for the commit date
1916 - ``date`` for the commit date
1917 - ``topo`` for a reverse topographical sort
1917 - ``topo`` for a reverse topographical sort
1918
1918
1919 The ``topo`` sort order cannot be combined with other sort keys. This sort
1919 The ``topo`` sort order cannot be combined with other sort keys. This sort
1920 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1920 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1921 specifies what topographical branches to prioritize in the sort.
1921 specifies what topographical branches to prioritize in the sort.
1922
1922
1923 """
1923 """
1924 s, keyflags, opts = _getsortargs(x)
1924 s, keyflags, opts = _getsortargs(x)
1925 revs = getset(repo, subset, s, order)
1925 revs = getset(repo, subset, s, order)
1926
1926
1927 if not keyflags or order != defineorder:
1927 if not keyflags or order != defineorder:
1928 return revs
1928 return revs
1929 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1929 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1930 revs.sort(reverse=keyflags[0][1])
1930 revs.sort(reverse=keyflags[0][1])
1931 return revs
1931 return revs
1932 elif keyflags[0][0] == "topo":
1932 elif keyflags[0][0] == "topo":
1933 firstbranch = ()
1933 firstbranch = ()
1934 if 'topo.firstbranch' in opts:
1934 if 'topo.firstbranch' in opts:
1935 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1935 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1936 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1936 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1937 firstbranch),
1937 firstbranch),
1938 istopo=True)
1938 istopo=True)
1939 if keyflags[0][1]:
1939 if keyflags[0][1]:
1940 revs.reverse()
1940 revs.reverse()
1941 return revs
1941 return revs
1942
1942
1943 # sort() is guaranteed to be stable
1943 # sort() is guaranteed to be stable
1944 ctxs = [repo[r] for r in revs]
1944 ctxs = [repo[r] for r in revs]
1945 for k, reverse in reversed(keyflags):
1945 for k, reverse in reversed(keyflags):
1946 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1946 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1947 return baseset([c.rev() for c in ctxs])
1947 return baseset([c.rev() for c in ctxs])
1948
1948
1949 @predicate('subrepo([pattern])')
1949 @predicate('subrepo([pattern])')
1950 def subrepo(repo, subset, x):
1950 def subrepo(repo, subset, x):
1951 """Changesets that add, modify or remove the given subrepo. If no subrepo
1951 """Changesets that add, modify or remove the given subrepo. If no subrepo
1952 pattern is named, any subrepo changes are returned.
1952 pattern is named, any subrepo changes are returned.
1953 """
1953 """
1954 # i18n: "subrepo" is a keyword
1954 # i18n: "subrepo" is a keyword
1955 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1955 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1956 pat = None
1956 pat = None
1957 if len(args) != 0:
1957 if len(args) != 0:
1958 pat = getstring(args[0], _("subrepo requires a pattern"))
1958 pat = getstring(args[0], _("subrepo requires a pattern"))
1959
1959
1960 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1960 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1961
1961
1962 def submatches(names):
1962 def submatches(names):
1963 k, p, m = stringutil.stringmatcher(pat)
1963 k, p, m = stringutil.stringmatcher(pat)
1964 for name in names:
1964 for name in names:
1965 if m(name):
1965 if m(name):
1966 yield name
1966 yield name
1967
1967
1968 def matches(x):
1968 def matches(x):
1969 c = repo[x]
1969 c = repo[x]
1970 s = repo.status(c.p1().node(), c.node(), match=m)
1970 s = repo.status(c.p1().node(), c.node(), match=m)
1971
1971
1972 if pat is None:
1972 if pat is None:
1973 return s.added or s.modified or s.removed
1973 return s.added or s.modified or s.removed
1974
1974
1975 if s.added:
1975 if s.added:
1976 return any(submatches(c.substate.keys()))
1976 return any(submatches(c.substate.keys()))
1977
1977
1978 if s.modified:
1978 if s.modified:
1979 subs = set(c.p1().substate.keys())
1979 subs = set(c.p1().substate.keys())
1980 subs.update(c.substate.keys())
1980 subs.update(c.substate.keys())
1981
1981
1982 for path in submatches(subs):
1982 for path in submatches(subs):
1983 if c.p1().substate.get(path) != c.substate.get(path):
1983 if c.p1().substate.get(path) != c.substate.get(path):
1984 return True
1984 return True
1985
1985
1986 if s.removed:
1986 if s.removed:
1987 return any(submatches(c.p1().substate.keys()))
1987 return any(submatches(c.p1().substate.keys()))
1988
1988
1989 return False
1989 return False
1990
1990
1991 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1991 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1992
1992
1993 def _mapbynodefunc(repo, s, f):
1993 def _mapbynodefunc(repo, s, f):
1994 """(repo, smartset, [node] -> [node]) -> smartset
1994 """(repo, smartset, [node] -> [node]) -> smartset
1995
1995
1996 Helper method to map a smartset to another smartset given a function only
1996 Helper method to map a smartset to another smartset given a function only
1997 talking about nodes. Handles converting between rev numbers and nodes, and
1997 talking about nodes. Handles converting between rev numbers and nodes, and
1998 filtering.
1998 filtering.
1999 """
1999 """
2000 cl = repo.unfiltered().changelog
2000 cl = repo.unfiltered().changelog
2001 torev = cl.rev
2001 torev = cl.rev
2002 tonode = cl.node
2002 tonode = cl.node
2003 nodemap = cl.nodemap
2003 nodemap = cl.nodemap
2004 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
2004 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
2005 return smartset.baseset(result - repo.changelog.filteredrevs)
2005 return smartset.baseset(result - repo.changelog.filteredrevs)
2006
2006
2007 @predicate('successors(set)', safe=True)
2007 @predicate('successors(set)', safe=True)
2008 def successors(repo, subset, x):
2008 def successors(repo, subset, x):
2009 """All successors for set, including the given set themselves"""
2009 """All successors for set, including the given set themselves"""
2010 s = getset(repo, fullreposet(repo), x)
2010 s = getset(repo, fullreposet(repo), x)
2011 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
2011 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
2012 d = _mapbynodefunc(repo, s, f)
2012 d = _mapbynodefunc(repo, s, f)
2013 return subset & d
2013 return subset & d
2014
2014
2015 def _substringmatcher(pattern, casesensitive=True):
2015 def _substringmatcher(pattern, casesensitive=True):
2016 kind, pattern, matcher = stringutil.stringmatcher(
2016 kind, pattern, matcher = stringutil.stringmatcher(
2017 pattern, casesensitive=casesensitive)
2017 pattern, casesensitive=casesensitive)
2018 if kind == 'literal':
2018 if kind == 'literal':
2019 if not casesensitive:
2019 if not casesensitive:
2020 pattern = encoding.lower(pattern)
2020 pattern = encoding.lower(pattern)
2021 matcher = lambda s: pattern in encoding.lower(s)
2021 matcher = lambda s: pattern in encoding.lower(s)
2022 else:
2022 else:
2023 matcher = lambda s: pattern in s
2023 matcher = lambda s: pattern in s
2024 return kind, pattern, matcher
2024 return kind, pattern, matcher
2025
2025
2026 @predicate('tag([name])', safe=True)
2026 @predicate('tag([name])', safe=True)
2027 def tag(repo, subset, x):
2027 def tag(repo, subset, x):
2028 """The specified tag by name, or all tagged revisions if no name is given.
2028 """The specified tag by name, or all tagged revisions if no name is given.
2029
2029
2030 Pattern matching is supported for `name`. See
2030 Pattern matching is supported for `name`. See
2031 :hg:`help revisions.patterns`.
2031 :hg:`help revisions.patterns`.
2032 """
2032 """
2033 # i18n: "tag" is a keyword
2033 # i18n: "tag" is a keyword
2034 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2034 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2035 cl = repo.changelog
2035 cl = repo.changelog
2036 if args:
2036 if args:
2037 pattern = getstring(args[0],
2037 pattern = getstring(args[0],
2038 # i18n: "tag" is a keyword
2038 # i18n: "tag" is a keyword
2039 _('the argument to tag must be a string'))
2039 _('the argument to tag must be a string'))
2040 kind, pattern, matcher = stringutil.stringmatcher(pattern)
2040 kind, pattern, matcher = stringutil.stringmatcher(pattern)
2041 if kind == 'literal':
2041 if kind == 'literal':
2042 # avoid resolving all tags
2042 # avoid resolving all tags
2043 tn = repo._tagscache.tags.get(pattern, None)
2043 tn = repo._tagscache.tags.get(pattern, None)
2044 if tn is None:
2044 if tn is None:
2045 raise error.RepoLookupError(_("tag '%s' does not exist")
2045 raise error.RepoLookupError(_("tag '%s' does not exist")
2046 % pattern)
2046 % pattern)
2047 s = {repo[tn].rev()}
2047 s = {repo[tn].rev()}
2048 else:
2048 else:
2049 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2049 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2050 else:
2050 else:
2051 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2051 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2052 return subset & s
2052 return subset & s
2053
2053
2054 @predicate('tagged', safe=True)
2054 @predicate('tagged', safe=True)
2055 def tagged(repo, subset, x):
2055 def tagged(repo, subset, x):
2056 return tag(repo, subset, x)
2056 return tag(repo, subset, x)
2057
2057
2058 @predicate('orphan()', safe=True)
2058 @predicate('orphan()', safe=True)
2059 def orphan(repo, subset, x):
2059 def orphan(repo, subset, x):
2060 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2060 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2061 """
2061 """
2062 # i18n: "orphan" is a keyword
2062 # i18n: "orphan" is a keyword
2063 getargs(x, 0, 0, _("orphan takes no arguments"))
2063 getargs(x, 0, 0, _("orphan takes no arguments"))
2064 orphan = obsmod.getrevs(repo, 'orphan')
2064 orphan = obsmod.getrevs(repo, 'orphan')
2065 return subset & orphan
2065 return subset & orphan
2066
2066
2067
2067
2068 @predicate('user(string)', safe=True, weight=10)
2068 @predicate('user(string)', safe=True, weight=10)
2069 def user(repo, subset, x):
2069 def user(repo, subset, x):
2070 """User name contains string. The match is case-insensitive.
2070 """User name contains string. The match is case-insensitive.
2071
2071
2072 Pattern matching is supported for `string`. See
2072 Pattern matching is supported for `string`. See
2073 :hg:`help revisions.patterns`.
2073 :hg:`help revisions.patterns`.
2074 """
2074 """
2075 return author(repo, subset, x)
2075 return author(repo, subset, x)
2076
2076
2077 @predicate('wdir()', safe=True, weight=0)
2077 @predicate('wdir()', safe=True, weight=0)
2078 def wdir(repo, subset, x):
2078 def wdir(repo, subset, x):
2079 """Working directory. (EXPERIMENTAL)"""
2079 """Working directory. (EXPERIMENTAL)"""
2080 # i18n: "wdir" is a keyword
2080 # i18n: "wdir" is a keyword
2081 getargs(x, 0, 0, _("wdir takes no arguments"))
2081 getargs(x, 0, 0, _("wdir takes no arguments"))
2082 if node.wdirrev in subset or isinstance(subset, fullreposet):
2082 if node.wdirrev in subset or isinstance(subset, fullreposet):
2083 return baseset([node.wdirrev])
2083 return baseset([node.wdirrev])
2084 return baseset()
2084 return baseset()
2085
2085
2086 def _orderedlist(repo, subset, x):
2086 def _orderedlist(repo, subset, x):
2087 s = getstring(x, "internal error")
2087 s = getstring(x, "internal error")
2088 if not s:
2088 if not s:
2089 return baseset()
2089 return baseset()
2090 # remove duplicates here. it's difficult for caller to deduplicate sets
2090 # remove duplicates here. it's difficult for caller to deduplicate sets
2091 # because different symbols can point to the same rev.
2091 # because different symbols can point to the same rev.
2092 cl = repo.changelog
2092 cl = repo.changelog
2093 ls = []
2093 ls = []
2094 seen = set()
2094 seen = set()
2095 for t in s.split('\0'):
2095 for t in s.split('\0'):
2096 try:
2096 try:
2097 # fast path for integer revision
2097 # fast path for integer revision
2098 r = int(t)
2098 r = int(t)
2099 if ('%d' % r) != t or r not in cl:
2099 if ('%d' % r) != t or r not in cl:
2100 raise ValueError
2100 raise ValueError
2101 revs = [r]
2101 revs = [r]
2102 except ValueError:
2102 except ValueError:
2103 revs = stringset(repo, subset, t, defineorder)
2103 revs = stringset(repo, subset, t, defineorder)
2104
2104
2105 for r in revs:
2105 for r in revs:
2106 if r in seen:
2106 if r in seen:
2107 continue
2107 continue
2108 if (r in subset
2108 if (r in subset
2109 or r == node.nullrev and isinstance(subset, fullreposet)):
2109 or r == node.nullrev and isinstance(subset, fullreposet)):
2110 ls.append(r)
2110 ls.append(r)
2111 seen.add(r)
2111 seen.add(r)
2112 return baseset(ls)
2112 return baseset(ls)
2113
2113
2114 # for internal use
2114 # for internal use
2115 @predicate('_list', safe=True, takeorder=True)
2115 @predicate('_list', safe=True, takeorder=True)
2116 def _list(repo, subset, x, order):
2116 def _list(repo, subset, x, order):
2117 if order == followorder:
2117 if order == followorder:
2118 # slow path to take the subset order
2118 # slow path to take the subset order
2119 return subset & _orderedlist(repo, fullreposet(repo), x)
2119 return subset & _orderedlist(repo, fullreposet(repo), x)
2120 else:
2120 else:
2121 return _orderedlist(repo, subset, x)
2121 return _orderedlist(repo, subset, x)
2122
2122
2123 def _orderedintlist(repo, subset, x):
2123 def _orderedintlist(repo, subset, x):
2124 s = getstring(x, "internal error")
2124 s = getstring(x, "internal error")
2125 if not s:
2125 if not s:
2126 return baseset()
2126 return baseset()
2127 ls = [int(r) for r in s.split('\0')]
2127 ls = [int(r) for r in s.split('\0')]
2128 s = subset
2128 s = subset
2129 return baseset([r for r in ls if r in s])
2129 return baseset([r for r in ls if r in s])
2130
2130
2131 # for internal use
2131 # for internal use
2132 @predicate('_intlist', safe=True, takeorder=True, weight=0)
2132 @predicate('_intlist', safe=True, takeorder=True, weight=0)
2133 def _intlist(repo, subset, x, order):
2133 def _intlist(repo, subset, x, order):
2134 if order == followorder:
2134 if order == followorder:
2135 # slow path to take the subset order
2135 # slow path to take the subset order
2136 return subset & _orderedintlist(repo, fullreposet(repo), x)
2136 return subset & _orderedintlist(repo, fullreposet(repo), x)
2137 else:
2137 else:
2138 return _orderedintlist(repo, subset, x)
2138 return _orderedintlist(repo, subset, x)
2139
2139
2140 def _orderedhexlist(repo, subset, x):
2140 def _orderedhexlist(repo, subset, x):
2141 s = getstring(x, "internal error")
2141 s = getstring(x, "internal error")
2142 if not s:
2142 if not s:
2143 return baseset()
2143 return baseset()
2144 cl = repo.changelog
2144 cl = repo.changelog
2145 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2145 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2146 s = subset
2146 s = subset
2147 return baseset([r for r in ls if r in s])
2147 return baseset([r for r in ls if r in s])
2148
2148
2149 # for internal use
2149 # for internal use
2150 @predicate('_hexlist', safe=True, takeorder=True)
2150 @predicate('_hexlist', safe=True, takeorder=True)
2151 def _hexlist(repo, subset, x, order):
2151 def _hexlist(repo, subset, x, order):
2152 if order == followorder:
2152 if order == followorder:
2153 # slow path to take the subset order
2153 # slow path to take the subset order
2154 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2154 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2155 else:
2155 else:
2156 return _orderedhexlist(repo, subset, x)
2156 return _orderedhexlist(repo, subset, x)
2157
2157
2158 methods = {
2158 methods = {
2159 "range": rangeset,
2159 "range": rangeset,
2160 "rangeall": rangeall,
2160 "rangeall": rangeall,
2161 "rangepre": rangepre,
2161 "rangepre": rangepre,
2162 "rangepost": rangepost,
2162 "rangepost": rangepost,
2163 "dagrange": dagrange,
2163 "dagrange": dagrange,
2164 "string": stringset,
2164 "string": stringset,
2165 "symbol": stringset,
2165 "symbol": stringset,
2166 "and": andset,
2166 "and": andset,
2167 "andsmally": andsmallyset,
2167 "andsmally": andsmallyset,
2168 "or": orset,
2168 "or": orset,
2169 "not": notset,
2169 "not": notset,
2170 "difference": differenceset,
2170 "difference": differenceset,
2171 "relation": relationset,
2171 "relation": relationset,
2172 "relsubscript": relsubscriptset,
2172 "relsubscript": relsubscriptset,
2173 "subscript": subscriptset,
2173 "subscript": subscriptset,
2174 "list": listset,
2174 "list": listset,
2175 "keyvalue": keyvaluepair,
2175 "keyvalue": keyvaluepair,
2176 "func": func,
2176 "func": func,
2177 "ancestor": ancestorspec,
2177 "ancestor": ancestorspec,
2178 "parent": parentspec,
2178 "parent": parentspec,
2179 "parentpost": parentpost,
2179 "parentpost": parentpost,
2180 }
2180 }
2181
2181
2182 def lookupfn(repo):
2182 def lookupfn(repo):
2183 return lambda symbol: scmutil.isrevsymbol(repo, symbol)
2183 return lambda symbol: scmutil.isrevsymbol(repo, symbol)
2184
2184
2185 def match(ui, spec, lookup=None):
2185 def match(ui, spec, lookup=None):
2186 """Create a matcher for a single revision spec"""
2186 """Create a matcher for a single revision spec"""
2187 return matchany(ui, [spec], lookup=lookup)
2187 return matchany(ui, [spec], lookup=lookup)
2188
2188
2189 def matchany(ui, specs, lookup=None, localalias=None):
2189 def matchany(ui, specs, lookup=None, localalias=None):
2190 """Create a matcher that will include any revisions matching one of the
2190 """Create a matcher that will include any revisions matching one of the
2191 given specs
2191 given specs
2192
2192
2193 If lookup function is not None, the parser will first attempt to handle
2193 If lookup function is not None, the parser will first attempt to handle
2194 old-style ranges, which may contain operator characters.
2194 old-style ranges, which may contain operator characters.
2195
2195
2196 If localalias is not None, it is a dict {name: definitionstring}. It takes
2196 If localalias is not None, it is a dict {name: definitionstring}. It takes
2197 precedence over [revsetalias] config section.
2197 precedence over [revsetalias] config section.
2198 """
2198 """
2199 if not specs:
2199 if not specs:
2200 def mfunc(repo, subset=None):
2200 def mfunc(repo, subset=None):
2201 return baseset()
2201 return baseset()
2202 return mfunc
2202 return mfunc
2203 if not all(specs):
2203 if not all(specs):
2204 raise error.ParseError(_("empty query"))
2204 raise error.ParseError(_("empty query"))
2205 if len(specs) == 1:
2205 if len(specs) == 1:
2206 tree = revsetlang.parse(specs[0], lookup)
2206 tree = revsetlang.parse(specs[0], lookup)
2207 else:
2207 else:
2208 tree = ('or',
2208 tree = ('or',
2209 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2209 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2210
2210
2211 aliases = []
2211 aliases = []
2212 warn = None
2212 warn = None
2213 if ui:
2213 if ui:
2214 aliases.extend(ui.configitems('revsetalias'))
2214 aliases.extend(ui.configitems('revsetalias'))
2215 warn = ui.warn
2215 warn = ui.warn
2216 if localalias:
2216 if localalias:
2217 aliases.extend(localalias.items())
2217 aliases.extend(localalias.items())
2218 if aliases:
2218 if aliases:
2219 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2219 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2220 tree = revsetlang.foldconcat(tree)
2220 tree = revsetlang.foldconcat(tree)
2221 tree = revsetlang.analyze(tree)
2221 tree = revsetlang.analyze(tree)
2222 tree = revsetlang.optimize(tree)
2222 tree = revsetlang.optimize(tree)
2223 return makematcher(tree)
2223 return makematcher(tree)
2224
2224
2225 def makematcher(tree):
2225 def makematcher(tree):
2226 """Create a matcher from an evaluatable tree"""
2226 """Create a matcher from an evaluatable tree"""
2227 def mfunc(repo, subset=None, order=None):
2227 def mfunc(repo, subset=None, order=None):
2228 if order is None:
2228 if order is None:
2229 if subset is None:
2229 if subset is None:
2230 order = defineorder # 'x'
2230 order = defineorder # 'x'
2231 else:
2231 else:
2232 order = followorder # 'subset & x'
2232 order = followorder # 'subset & x'
2233 if subset is None:
2233 if subset is None:
2234 subset = fullreposet(repo)
2234 subset = fullreposet(repo)
2235 return getset(repo, subset, tree, order)
2235 return getset(repo, subset, tree, order)
2236 return mfunc
2236 return mfunc
2237
2237
2238 def loadpredicate(ui, extname, registrarobj):
2238 def loadpredicate(ui, extname, registrarobj):
2239 """Load revset predicates from specified registrarobj
2239 """Load revset predicates from specified registrarobj
2240 """
2240 """
2241 for name, func in registrarobj._table.iteritems():
2241 for name, func in registrarobj._table.iteritems():
2242 symbols[name] = func
2242 symbols[name] = func
2243 if func._safe:
2243 if func._safe:
2244 safesymbols.add(name)
2244 safesymbols.add(name)
2245
2245
2246 # load built-in predicates explicitly to setup safesymbols
2246 # load built-in predicates explicitly to setup safesymbols
2247 loadpredicate(None, None, predicate)
2247 loadpredicate(None, None, predicate)
2248
2248
2249 # tell hggettext to extract docstrings from these functions:
2249 # tell hggettext to extract docstrings from these functions:
2250 i18nfunctions = symbols.values()
2250 i18nfunctions = symbols.values()
@@ -1,816 +1,816 b''
1 # templatekw.py - common changeset template keywords
1 # templatekw.py - common changeset template keywords
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 hex,
12 hex,
13 nullid,
13 nullid,
14 )
14 )
15
15
16 from . import (
16 from . import (
17 encoding,
17 encoding,
18 error,
18 error,
19 hbisect,
19 hbisect,
20 i18n,
20 i18n,
21 obsutil,
21 obsutil,
22 patch,
22 patch,
23 pycompat,
23 pycompat,
24 registrar,
24 registrar,
25 scmutil,
25 scmutil,
26 templateutil,
26 templateutil,
27 util,
27 util,
28 )
28 )
29 from .utils import (
29 from .utils import (
30 diffutil,
30 diffutil,
31 stringutil,
31 stringutil,
32 )
32 )
33
33
34 _hybrid = templateutil.hybrid
34 _hybrid = templateutil.hybrid
35 hybriddict = templateutil.hybriddict
35 hybriddict = templateutil.hybriddict
36 hybridlist = templateutil.hybridlist
36 hybridlist = templateutil.hybridlist
37 compatdict = templateutil.compatdict
37 compatdict = templateutil.compatdict
38 compatlist = templateutil.compatlist
38 compatlist = templateutil.compatlist
39 _showcompatlist = templateutil._showcompatlist
39 _showcompatlist = templateutil._showcompatlist
40
40
41 def getlatesttags(context, mapping, pattern=None):
41 def getlatesttags(context, mapping, pattern=None):
42 '''return date, distance and name for the latest tag of rev'''
42 '''return date, distance and name for the latest tag of rev'''
43 repo = context.resource(mapping, 'repo')
43 repo = context.resource(mapping, 'repo')
44 ctx = context.resource(mapping, 'ctx')
44 ctx = context.resource(mapping, 'ctx')
45 cache = context.resource(mapping, 'cache')
45 cache = context.resource(mapping, 'cache')
46
46
47 cachename = 'latesttags'
47 cachename = 'latesttags'
48 if pattern is not None:
48 if pattern is not None:
49 cachename += '-' + pattern
49 cachename += '-' + pattern
50 match = stringutil.stringmatcher(pattern)[2]
50 match = stringutil.stringmatcher(pattern)[2]
51 else:
51 else:
52 match = util.always
52 match = util.always
53
53
54 if cachename not in cache:
54 if cachename not in cache:
55 # Cache mapping from rev to a tuple with tag date, tag
55 # Cache mapping from rev to a tuple with tag date, tag
56 # distance and tag name
56 # distance and tag name
57 cache[cachename] = {-1: (0, 0, ['null'])}
57 cache[cachename] = {-1: (0, 0, ['null'])}
58 latesttags = cache[cachename]
58 latesttags = cache[cachename]
59
59
60 rev = ctx.rev()
60 rev = ctx.rev()
61 todo = [rev]
61 todo = [rev]
62 while todo:
62 while todo:
63 rev = todo.pop()
63 rev = todo.pop()
64 if rev in latesttags:
64 if rev in latesttags:
65 continue
65 continue
66 ctx = repo[rev]
66 ctx = repo[rev]
67 tags = [t for t in ctx.tags()
67 tags = [t for t in ctx.tags()
68 if (repo.tagtype(t) and repo.tagtype(t) != 'local'
68 if (repo.tagtype(t) and repo.tagtype(t) != 'local'
69 and match(t))]
69 and match(t))]
70 if tags:
70 if tags:
71 latesttags[rev] = ctx.date()[0], 0, [t for t in sorted(tags)]
71 latesttags[rev] = ctx.date()[0], 0, [t for t in sorted(tags)]
72 continue
72 continue
73 try:
73 try:
74 ptags = [latesttags[p.rev()] for p in ctx.parents()]
74 ptags = [latesttags[p.rev()] for p in ctx.parents()]
75 if len(ptags) > 1:
75 if len(ptags) > 1:
76 if ptags[0][2] == ptags[1][2]:
76 if ptags[0][2] == ptags[1][2]:
77 # The tuples are laid out so the right one can be found by
77 # The tuples are laid out so the right one can be found by
78 # comparison in this case.
78 # comparison in this case.
79 pdate, pdist, ptag = max(ptags)
79 pdate, pdist, ptag = max(ptags)
80 else:
80 else:
81 def key(x):
81 def key(x):
82 changessincetag = len(repo.revs('only(%d, %s)',
82 changessincetag = len(repo.revs('only(%d, %s)',
83 ctx.rev(), x[2][0]))
83 ctx.rev(), x[2][0]))
84 # Smallest number of changes since tag wins. Date is
84 # Smallest number of changes since tag wins. Date is
85 # used as tiebreaker.
85 # used as tiebreaker.
86 return [-changessincetag, x[0]]
86 return [-changessincetag, x[0]]
87 pdate, pdist, ptag = max(ptags, key=key)
87 pdate, pdist, ptag = max(ptags, key=key)
88 else:
88 else:
89 pdate, pdist, ptag = ptags[0]
89 pdate, pdist, ptag = ptags[0]
90 except KeyError:
90 except KeyError:
91 # Cache miss - recurse
91 # Cache miss - recurse
92 todo.append(rev)
92 todo.append(rev)
93 todo.extend(p.rev() for p in ctx.parents())
93 todo.extend(p.rev() for p in ctx.parents())
94 continue
94 continue
95 latesttags[rev] = pdate, pdist + 1, ptag
95 latesttags[rev] = pdate, pdist + 1, ptag
96 return latesttags[rev]
96 return latesttags[rev]
97
97
98 def getrenamedfn(repo, endrev=None):
98 def getrenamedfn(repo, endrev=None):
99 rcache = {}
99 rcache = {}
100 if endrev is None:
100 if endrev is None:
101 endrev = len(repo)
101 endrev = len(repo)
102
102
103 def getrenamed(fn, rev):
103 def getrenamed(fn, rev):
104 '''looks up all renames for a file (up to endrev) the first
104 '''looks up all renames for a file (up to endrev) the first
105 time the file is given. It indexes on the changerev and only
105 time the file is given. It indexes on the changerev and only
106 parses the manifest if linkrev != changerev.
106 parses the manifest if linkrev != changerev.
107 Returns rename info for fn at changerev rev.'''
107 Returns rename info for fn at changerev rev.'''
108 if fn not in rcache:
108 if fn not in rcache:
109 rcache[fn] = {}
109 rcache[fn] = {}
110 fl = repo.file(fn)
110 fl = repo.file(fn)
111 for i in fl:
111 for i in fl:
112 lr = fl.linkrev(i)
112 lr = fl.linkrev(i)
113 renamed = fl.renamed(fl.node(i))
113 renamed = fl.renamed(fl.node(i))
114 rcache[fn][lr] = renamed and renamed[0]
114 rcache[fn][lr] = renamed and renamed[0]
115 if lr >= endrev:
115 if lr >= endrev:
116 break
116 break
117 if rev in rcache[fn]:
117 if rev in rcache[fn]:
118 return rcache[fn][rev]
118 return rcache[fn][rev]
119
119
120 # If linkrev != rev (i.e. rev not found in rcache) fallback to
120 # If linkrev != rev (i.e. rev not found in rcache) fallback to
121 # filectx logic.
121 # filectx logic.
122 try:
122 try:
123 renamed = repo[rev][fn].renamed()
123 renamed = repo[rev][fn].renamed()
124 return renamed and renamed[0]
124 return renamed and renamed[0]
125 except error.LookupError:
125 except error.LookupError:
126 return None
126 return None
127
127
128 return getrenamed
128 return getrenamed
129
129
130 def getlogcolumns():
130 def getlogcolumns():
131 """Return a dict of log column labels"""
131 """Return a dict of log column labels"""
132 _ = pycompat.identity # temporarily disable gettext
132 _ = pycompat.identity # temporarily disable gettext
133 # i18n: column positioning for "hg log"
133 # i18n: column positioning for "hg log"
134 columns = _('bookmark: %s\n'
134 columns = _('bookmark: %s\n'
135 'branch: %s\n'
135 'branch: %s\n'
136 'changeset: %s\n'
136 'changeset: %s\n'
137 'copies: %s\n'
137 'copies: %s\n'
138 'date: %s\n'
138 'date: %s\n'
139 'extra: %s=%s\n'
139 'extra: %s=%s\n'
140 'files+: %s\n'
140 'files+: %s\n'
141 'files-: %s\n'
141 'files-: %s\n'
142 'files: %s\n'
142 'files: %s\n'
143 'instability: %s\n'
143 'instability: %s\n'
144 'manifest: %s\n'
144 'manifest: %s\n'
145 'obsolete: %s\n'
145 'obsolete: %s\n'
146 'parent: %s\n'
146 'parent: %s\n'
147 'phase: %s\n'
147 'phase: %s\n'
148 'summary: %s\n'
148 'summary: %s\n'
149 'tag: %s\n'
149 'tag: %s\n'
150 'user: %s\n')
150 'user: %s\n')
151 return dict(zip([s.split(':', 1)[0] for s in columns.splitlines()],
151 return dict(zip([s.split(':', 1)[0] for s in columns.splitlines()],
152 i18n._(columns).splitlines(True)))
152 i18n._(columns).splitlines(True)))
153
153
154 # default templates internally used for rendering of lists
154 # default templates internally used for rendering of lists
155 defaulttempl = {
155 defaulttempl = {
156 'parent': '{rev}:{node|formatnode} ',
156 'parent': '{rev}:{node|formatnode} ',
157 'manifest': '{rev}:{node|formatnode}',
157 'manifest': '{rev}:{node|formatnode}',
158 'file_copy': '{name} ({source})',
158 'file_copy': '{name} ({source})',
159 'envvar': '{key}={value}',
159 'envvar': '{key}={value}',
160 'extra': '{key}={value|stringescape}'
160 'extra': '{key}={value|stringescape}'
161 }
161 }
162 # filecopy is preserved for compatibility reasons
162 # filecopy is preserved for compatibility reasons
163 defaulttempl['filecopy'] = defaulttempl['file_copy']
163 defaulttempl['filecopy'] = defaulttempl['file_copy']
164
164
165 # keywords are callables (see registrar.templatekeyword for details)
165 # keywords are callables (see registrar.templatekeyword for details)
166 keywords = {}
166 keywords = {}
167 templatekeyword = registrar.templatekeyword(keywords)
167 templatekeyword = registrar.templatekeyword(keywords)
168
168
169 @templatekeyword('author', requires={'ctx'})
169 @templatekeyword('author', requires={'ctx'})
170 def showauthor(context, mapping):
170 def showauthor(context, mapping):
171 """String. The unmodified author of the changeset."""
171 """String. The unmodified author of the changeset."""
172 ctx = context.resource(mapping, 'ctx')
172 ctx = context.resource(mapping, 'ctx')
173 return ctx.user()
173 return ctx.user()
174
174
175 @templatekeyword('bisect', requires={'repo', 'ctx'})
175 @templatekeyword('bisect', requires={'repo', 'ctx'})
176 def showbisect(context, mapping):
176 def showbisect(context, mapping):
177 """String. The changeset bisection status."""
177 """String. The changeset bisection status."""
178 repo = context.resource(mapping, 'repo')
178 repo = context.resource(mapping, 'repo')
179 ctx = context.resource(mapping, 'ctx')
179 ctx = context.resource(mapping, 'ctx')
180 return hbisect.label(repo, ctx.node())
180 return hbisect.label(repo, ctx.node())
181
181
182 @templatekeyword('branch', requires={'ctx'})
182 @templatekeyword('branch', requires={'ctx'})
183 def showbranch(context, mapping):
183 def showbranch(context, mapping):
184 """String. The name of the branch on which the changeset was
184 """String. The name of the branch on which the changeset was
185 committed.
185 committed.
186 """
186 """
187 ctx = context.resource(mapping, 'ctx')
187 ctx = context.resource(mapping, 'ctx')
188 return ctx.branch()
188 return ctx.branch()
189
189
190 @templatekeyword('branches', requires={'ctx'})
190 @templatekeyword('branches', requires={'ctx'})
191 def showbranches(context, mapping):
191 def showbranches(context, mapping):
192 """List of strings. The name of the branch on which the
192 """List of strings. The name of the branch on which the
193 changeset was committed. Will be empty if the branch name was
193 changeset was committed. Will be empty if the branch name was
194 default. (DEPRECATED)
194 default. (DEPRECATED)
195 """
195 """
196 ctx = context.resource(mapping, 'ctx')
196 ctx = context.resource(mapping, 'ctx')
197 branch = ctx.branch()
197 branch = ctx.branch()
198 if branch != 'default':
198 if branch != 'default':
199 return compatlist(context, mapping, 'branch', [branch],
199 return compatlist(context, mapping, 'branch', [branch],
200 plural='branches')
200 plural='branches')
201 return compatlist(context, mapping, 'branch', [], plural='branches')
201 return compatlist(context, mapping, 'branch', [], plural='branches')
202
202
203 @templatekeyword('bookmarks', requires={'repo', 'ctx'})
203 @templatekeyword('bookmarks', requires={'repo', 'ctx'})
204 def showbookmarks(context, mapping):
204 def showbookmarks(context, mapping):
205 """List of strings. Any bookmarks associated with the
205 """List of strings. Any bookmarks associated with the
206 changeset. Also sets 'active', the name of the active bookmark.
206 changeset. Also sets 'active', the name of the active bookmark.
207 """
207 """
208 repo = context.resource(mapping, 'repo')
208 repo = context.resource(mapping, 'repo')
209 ctx = context.resource(mapping, 'ctx')
209 ctx = context.resource(mapping, 'ctx')
210 bookmarks = ctx.bookmarks()
210 bookmarks = ctx.bookmarks()
211 active = repo._activebookmark
211 active = repo._activebookmark
212 makemap = lambda v: {'bookmark': v, 'active': active, 'current': active}
212 makemap = lambda v: {'bookmark': v, 'active': active, 'current': active}
213 f = _showcompatlist(context, mapping, 'bookmark', bookmarks)
213 f = _showcompatlist(context, mapping, 'bookmark', bookmarks)
214 return _hybrid(f, bookmarks, makemap, pycompat.identity)
214 return _hybrid(f, bookmarks, makemap, pycompat.identity)
215
215
216 @templatekeyword('children', requires={'ctx'})
216 @templatekeyword('children', requires={'ctx'})
217 def showchildren(context, mapping):
217 def showchildren(context, mapping):
218 """List of strings. The children of the changeset."""
218 """List of strings. The children of the changeset."""
219 ctx = context.resource(mapping, 'ctx')
219 ctx = context.resource(mapping, 'ctx')
220 childrevs = ['%d:%s' % (cctx.rev(), cctx) for cctx in ctx.children()]
220 childrevs = ['%d:%s' % (cctx.rev(), cctx) for cctx in ctx.children()]
221 return compatlist(context, mapping, 'children', childrevs, element='child')
221 return compatlist(context, mapping, 'children', childrevs, element='child')
222
222
223 # Deprecated, but kept alive for help generation a purpose.
223 # Deprecated, but kept alive for help generation a purpose.
224 @templatekeyword('currentbookmark', requires={'repo', 'ctx'})
224 @templatekeyword('currentbookmark', requires={'repo', 'ctx'})
225 def showcurrentbookmark(context, mapping):
225 def showcurrentbookmark(context, mapping):
226 """String. The active bookmark, if it is associated with the changeset.
226 """String. The active bookmark, if it is associated with the changeset.
227 (DEPRECATED)"""
227 (DEPRECATED)"""
228 return showactivebookmark(context, mapping)
228 return showactivebookmark(context, mapping)
229
229
230 @templatekeyword('activebookmark', requires={'repo', 'ctx'})
230 @templatekeyword('activebookmark', requires={'repo', 'ctx'})
231 def showactivebookmark(context, mapping):
231 def showactivebookmark(context, mapping):
232 """String. The active bookmark, if it is associated with the changeset."""
232 """String. The active bookmark, if it is associated with the changeset."""
233 repo = context.resource(mapping, 'repo')
233 repo = context.resource(mapping, 'repo')
234 ctx = context.resource(mapping, 'ctx')
234 ctx = context.resource(mapping, 'ctx')
235 active = repo._activebookmark
235 active = repo._activebookmark
236 if active and active in ctx.bookmarks():
236 if active and active in ctx.bookmarks():
237 return active
237 return active
238 return ''
238 return ''
239
239
240 @templatekeyword('date', requires={'ctx'})
240 @templatekeyword('date', requires={'ctx'})
241 def showdate(context, mapping):
241 def showdate(context, mapping):
242 """Date information. The date when the changeset was committed."""
242 """Date information. The date when the changeset was committed."""
243 ctx = context.resource(mapping, 'ctx')
243 ctx = context.resource(mapping, 'ctx')
244 # the default string format is '<float(unixtime)><tzoffset>' because
244 # the default string format is '<float(unixtime)><tzoffset>' because
245 # python-hglib splits date at decimal separator.
245 # python-hglib splits date at decimal separator.
246 return templateutil.date(ctx.date(), showfmt='%d.0%d')
246 return templateutil.date(ctx.date(), showfmt='%d.0%d')
247
247
248 @templatekeyword('desc', requires={'ctx'})
248 @templatekeyword('desc', requires={'ctx'})
249 def showdescription(context, mapping):
249 def showdescription(context, mapping):
250 """String. The text of the changeset description."""
250 """String. The text of the changeset description."""
251 ctx = context.resource(mapping, 'ctx')
251 ctx = context.resource(mapping, 'ctx')
252 s = ctx.description()
252 s = ctx.description()
253 if isinstance(s, encoding.localstr):
253 if isinstance(s, encoding.localstr):
254 # try hard to preserve utf-8 bytes
254 # try hard to preserve utf-8 bytes
255 return encoding.tolocal(encoding.fromlocal(s).strip())
255 return encoding.tolocal(encoding.fromlocal(s).strip())
256 elif isinstance(s, encoding.safelocalstr):
256 elif isinstance(s, encoding.safelocalstr):
257 return encoding.safelocalstr(s.strip())
257 return encoding.safelocalstr(s.strip())
258 else:
258 else:
259 return s.strip()
259 return s.strip()
260
260
261 @templatekeyword('diffstat', requires={'ui', 'ctx'})
261 @templatekeyword('diffstat', requires={'ui', 'ctx'})
262 def showdiffstat(context, mapping):
262 def showdiffstat(context, mapping):
263 """String. Statistics of changes with the following format:
263 """String. Statistics of changes with the following format:
264 "modified files: +added/-removed lines"
264 "modified files: +added/-removed lines"
265 """
265 """
266 ui = context.resource(mapping, 'ui')
266 ui = context.resource(mapping, 'ui')
267 ctx = context.resource(mapping, 'ctx')
267 ctx = context.resource(mapping, 'ctx')
268 diffopts = diffutil.diffopts(ui, {'noprefix': False})
268 diffopts = diffutil.diffallopts(ui, {'noprefix': False})
269 diff = ctx.diff(opts=diffopts)
269 diff = ctx.diff(opts=diffopts)
270 stats = patch.diffstatdata(util.iterlines(diff))
270 stats = patch.diffstatdata(util.iterlines(diff))
271 maxname, maxtotal, adds, removes, binary = patch.diffstatsum(stats)
271 maxname, maxtotal, adds, removes, binary = patch.diffstatsum(stats)
272 return '%d: +%d/-%d' % (len(stats), adds, removes)
272 return '%d: +%d/-%d' % (len(stats), adds, removes)
273
273
274 @templatekeyword('envvars', requires={'ui'})
274 @templatekeyword('envvars', requires={'ui'})
275 def showenvvars(context, mapping):
275 def showenvvars(context, mapping):
276 """A dictionary of environment variables. (EXPERIMENTAL)"""
276 """A dictionary of environment variables. (EXPERIMENTAL)"""
277 ui = context.resource(mapping, 'ui')
277 ui = context.resource(mapping, 'ui')
278 env = ui.exportableenviron()
278 env = ui.exportableenviron()
279 env = util.sortdict((k, env[k]) for k in sorted(env))
279 env = util.sortdict((k, env[k]) for k in sorted(env))
280 return compatdict(context, mapping, 'envvar', env, plural='envvars')
280 return compatdict(context, mapping, 'envvar', env, plural='envvars')
281
281
282 @templatekeyword('extras', requires={'ctx'})
282 @templatekeyword('extras', requires={'ctx'})
283 def showextras(context, mapping):
283 def showextras(context, mapping):
284 """List of dicts with key, value entries of the 'extras'
284 """List of dicts with key, value entries of the 'extras'
285 field of this changeset."""
285 field of this changeset."""
286 ctx = context.resource(mapping, 'ctx')
286 ctx = context.resource(mapping, 'ctx')
287 extras = ctx.extra()
287 extras = ctx.extra()
288 extras = util.sortdict((k, extras[k]) for k in sorted(extras))
288 extras = util.sortdict((k, extras[k]) for k in sorted(extras))
289 makemap = lambda k: {'key': k, 'value': extras[k]}
289 makemap = lambda k: {'key': k, 'value': extras[k]}
290 c = [makemap(k) for k in extras]
290 c = [makemap(k) for k in extras]
291 f = _showcompatlist(context, mapping, 'extra', c, plural='extras')
291 f = _showcompatlist(context, mapping, 'extra', c, plural='extras')
292 return _hybrid(f, extras, makemap,
292 return _hybrid(f, extras, makemap,
293 lambda k: '%s=%s' % (k, stringutil.escapestr(extras[k])))
293 lambda k: '%s=%s' % (k, stringutil.escapestr(extras[k])))
294
294
295 def _showfilesbystat(context, mapping, name, index):
295 def _showfilesbystat(context, mapping, name, index):
296 repo = context.resource(mapping, 'repo')
296 repo = context.resource(mapping, 'repo')
297 ctx = context.resource(mapping, 'ctx')
297 ctx = context.resource(mapping, 'ctx')
298 revcache = context.resource(mapping, 'revcache')
298 revcache = context.resource(mapping, 'revcache')
299 if 'files' not in revcache:
299 if 'files' not in revcache:
300 revcache['files'] = repo.status(ctx.p1(), ctx)[:3]
300 revcache['files'] = repo.status(ctx.p1(), ctx)[:3]
301 files = revcache['files'][index]
301 files = revcache['files'][index]
302 return compatlist(context, mapping, name, files, element='file')
302 return compatlist(context, mapping, name, files, element='file')
303
303
304 @templatekeyword('file_adds', requires={'repo', 'ctx', 'revcache'})
304 @templatekeyword('file_adds', requires={'repo', 'ctx', 'revcache'})
305 def showfileadds(context, mapping):
305 def showfileadds(context, mapping):
306 """List of strings. Files added by this changeset."""
306 """List of strings. Files added by this changeset."""
307 return _showfilesbystat(context, mapping, 'file_add', 1)
307 return _showfilesbystat(context, mapping, 'file_add', 1)
308
308
309 @templatekeyword('file_copies',
309 @templatekeyword('file_copies',
310 requires={'repo', 'ctx', 'cache', 'revcache'})
310 requires={'repo', 'ctx', 'cache', 'revcache'})
311 def showfilecopies(context, mapping):
311 def showfilecopies(context, mapping):
312 """List of strings. Files copied in this changeset with
312 """List of strings. Files copied in this changeset with
313 their sources.
313 their sources.
314 """
314 """
315 repo = context.resource(mapping, 'repo')
315 repo = context.resource(mapping, 'repo')
316 ctx = context.resource(mapping, 'ctx')
316 ctx = context.resource(mapping, 'ctx')
317 cache = context.resource(mapping, 'cache')
317 cache = context.resource(mapping, 'cache')
318 copies = context.resource(mapping, 'revcache').get('copies')
318 copies = context.resource(mapping, 'revcache').get('copies')
319 if copies is None:
319 if copies is None:
320 if 'getrenamed' not in cache:
320 if 'getrenamed' not in cache:
321 cache['getrenamed'] = getrenamedfn(repo)
321 cache['getrenamed'] = getrenamedfn(repo)
322 copies = []
322 copies = []
323 getrenamed = cache['getrenamed']
323 getrenamed = cache['getrenamed']
324 for fn in ctx.files():
324 for fn in ctx.files():
325 rename = getrenamed(fn, ctx.rev())
325 rename = getrenamed(fn, ctx.rev())
326 if rename:
326 if rename:
327 copies.append((fn, rename))
327 copies.append((fn, rename))
328
328
329 copies = util.sortdict(copies)
329 copies = util.sortdict(copies)
330 return compatdict(context, mapping, 'file_copy', copies,
330 return compatdict(context, mapping, 'file_copy', copies,
331 key='name', value='source', fmt='%s (%s)',
331 key='name', value='source', fmt='%s (%s)',
332 plural='file_copies')
332 plural='file_copies')
333
333
334 # showfilecopiesswitch() displays file copies only if copy records are
334 # showfilecopiesswitch() displays file copies only if copy records are
335 # provided before calling the templater, usually with a --copies
335 # provided before calling the templater, usually with a --copies
336 # command line switch.
336 # command line switch.
337 @templatekeyword('file_copies_switch', requires={'revcache'})
337 @templatekeyword('file_copies_switch', requires={'revcache'})
338 def showfilecopiesswitch(context, mapping):
338 def showfilecopiesswitch(context, mapping):
339 """List of strings. Like "file_copies" but displayed
339 """List of strings. Like "file_copies" but displayed
340 only if the --copied switch is set.
340 only if the --copied switch is set.
341 """
341 """
342 copies = context.resource(mapping, 'revcache').get('copies') or []
342 copies = context.resource(mapping, 'revcache').get('copies') or []
343 copies = util.sortdict(copies)
343 copies = util.sortdict(copies)
344 return compatdict(context, mapping, 'file_copy', copies,
344 return compatdict(context, mapping, 'file_copy', copies,
345 key='name', value='source', fmt='%s (%s)',
345 key='name', value='source', fmt='%s (%s)',
346 plural='file_copies')
346 plural='file_copies')
347
347
348 @templatekeyword('file_dels', requires={'repo', 'ctx', 'revcache'})
348 @templatekeyword('file_dels', requires={'repo', 'ctx', 'revcache'})
349 def showfiledels(context, mapping):
349 def showfiledels(context, mapping):
350 """List of strings. Files removed by this changeset."""
350 """List of strings. Files removed by this changeset."""
351 return _showfilesbystat(context, mapping, 'file_del', 2)
351 return _showfilesbystat(context, mapping, 'file_del', 2)
352
352
353 @templatekeyword('file_mods', requires={'repo', 'ctx', 'revcache'})
353 @templatekeyword('file_mods', requires={'repo', 'ctx', 'revcache'})
354 def showfilemods(context, mapping):
354 def showfilemods(context, mapping):
355 """List of strings. Files modified by this changeset."""
355 """List of strings. Files modified by this changeset."""
356 return _showfilesbystat(context, mapping, 'file_mod', 0)
356 return _showfilesbystat(context, mapping, 'file_mod', 0)
357
357
358 @templatekeyword('files', requires={'ctx'})
358 @templatekeyword('files', requires={'ctx'})
359 def showfiles(context, mapping):
359 def showfiles(context, mapping):
360 """List of strings. All files modified, added, or removed by this
360 """List of strings. All files modified, added, or removed by this
361 changeset.
361 changeset.
362 """
362 """
363 ctx = context.resource(mapping, 'ctx')
363 ctx = context.resource(mapping, 'ctx')
364 return compatlist(context, mapping, 'file', ctx.files())
364 return compatlist(context, mapping, 'file', ctx.files())
365
365
366 @templatekeyword('graphnode', requires={'repo', 'ctx'})
366 @templatekeyword('graphnode', requires={'repo', 'ctx'})
367 def showgraphnode(context, mapping):
367 def showgraphnode(context, mapping):
368 """String. The character representing the changeset node in an ASCII
368 """String. The character representing the changeset node in an ASCII
369 revision graph."""
369 revision graph."""
370 repo = context.resource(mapping, 'repo')
370 repo = context.resource(mapping, 'repo')
371 ctx = context.resource(mapping, 'ctx')
371 ctx = context.resource(mapping, 'ctx')
372 return getgraphnode(repo, ctx)
372 return getgraphnode(repo, ctx)
373
373
374 def getgraphnode(repo, ctx):
374 def getgraphnode(repo, ctx):
375 return getgraphnodecurrent(repo, ctx) or getgraphnodesymbol(ctx)
375 return getgraphnodecurrent(repo, ctx) or getgraphnodesymbol(ctx)
376
376
377 def getgraphnodecurrent(repo, ctx):
377 def getgraphnodecurrent(repo, ctx):
378 wpnodes = repo.dirstate.parents()
378 wpnodes = repo.dirstate.parents()
379 if wpnodes[1] == nullid:
379 if wpnodes[1] == nullid:
380 wpnodes = wpnodes[:1]
380 wpnodes = wpnodes[:1]
381 if ctx.node() in wpnodes:
381 if ctx.node() in wpnodes:
382 return '@'
382 return '@'
383 else:
383 else:
384 return ''
384 return ''
385
385
386 def getgraphnodesymbol(ctx):
386 def getgraphnodesymbol(ctx):
387 if ctx.obsolete():
387 if ctx.obsolete():
388 return 'x'
388 return 'x'
389 elif ctx.isunstable():
389 elif ctx.isunstable():
390 return '*'
390 return '*'
391 elif ctx.closesbranch():
391 elif ctx.closesbranch():
392 return '_'
392 return '_'
393 else:
393 else:
394 return 'o'
394 return 'o'
395
395
396 @templatekeyword('graphwidth', requires=())
396 @templatekeyword('graphwidth', requires=())
397 def showgraphwidth(context, mapping):
397 def showgraphwidth(context, mapping):
398 """Integer. The width of the graph drawn by 'log --graph' or zero."""
398 """Integer. The width of the graph drawn by 'log --graph' or zero."""
399 # just hosts documentation; should be overridden by template mapping
399 # just hosts documentation; should be overridden by template mapping
400 return 0
400 return 0
401
401
402 @templatekeyword('index', requires=())
402 @templatekeyword('index', requires=())
403 def showindex(context, mapping):
403 def showindex(context, mapping):
404 """Integer. The current iteration of the loop. (0 indexed)"""
404 """Integer. The current iteration of the loop. (0 indexed)"""
405 # just hosts documentation; should be overridden by template mapping
405 # just hosts documentation; should be overridden by template mapping
406 raise error.Abort(_("can't use index in this context"))
406 raise error.Abort(_("can't use index in this context"))
407
407
408 @templatekeyword('latesttag', requires={'repo', 'ctx', 'cache'})
408 @templatekeyword('latesttag', requires={'repo', 'ctx', 'cache'})
409 def showlatesttag(context, mapping):
409 def showlatesttag(context, mapping):
410 """List of strings. The global tags on the most recent globally
410 """List of strings. The global tags on the most recent globally
411 tagged ancestor of this changeset. If no such tags exist, the list
411 tagged ancestor of this changeset. If no such tags exist, the list
412 consists of the single string "null".
412 consists of the single string "null".
413 """
413 """
414 return showlatesttags(context, mapping, None)
414 return showlatesttags(context, mapping, None)
415
415
416 def showlatesttags(context, mapping, pattern):
416 def showlatesttags(context, mapping, pattern):
417 """helper method for the latesttag keyword and function"""
417 """helper method for the latesttag keyword and function"""
418 latesttags = getlatesttags(context, mapping, pattern)
418 latesttags = getlatesttags(context, mapping, pattern)
419
419
420 # latesttag[0] is an implementation detail for sorting csets on different
420 # latesttag[0] is an implementation detail for sorting csets on different
421 # branches in a stable manner- it is the date the tagged cset was created,
421 # branches in a stable manner- it is the date the tagged cset was created,
422 # not the date the tag was created. Therefore it isn't made visible here.
422 # not the date the tag was created. Therefore it isn't made visible here.
423 makemap = lambda v: {
423 makemap = lambda v: {
424 'changes': _showchangessincetag,
424 'changes': _showchangessincetag,
425 'distance': latesttags[1],
425 'distance': latesttags[1],
426 'latesttag': v, # BC with {latesttag % '{latesttag}'}
426 'latesttag': v, # BC with {latesttag % '{latesttag}'}
427 'tag': v
427 'tag': v
428 }
428 }
429
429
430 tags = latesttags[2]
430 tags = latesttags[2]
431 f = _showcompatlist(context, mapping, 'latesttag', tags, separator=':')
431 f = _showcompatlist(context, mapping, 'latesttag', tags, separator=':')
432 return _hybrid(f, tags, makemap, pycompat.identity)
432 return _hybrid(f, tags, makemap, pycompat.identity)
433
433
434 @templatekeyword('latesttagdistance', requires={'repo', 'ctx', 'cache'})
434 @templatekeyword('latesttagdistance', requires={'repo', 'ctx', 'cache'})
435 def showlatesttagdistance(context, mapping):
435 def showlatesttagdistance(context, mapping):
436 """Integer. Longest path to the latest tag."""
436 """Integer. Longest path to the latest tag."""
437 return getlatesttags(context, mapping)[1]
437 return getlatesttags(context, mapping)[1]
438
438
439 @templatekeyword('changessincelatesttag', requires={'repo', 'ctx', 'cache'})
439 @templatekeyword('changessincelatesttag', requires={'repo', 'ctx', 'cache'})
440 def showchangessincelatesttag(context, mapping):
440 def showchangessincelatesttag(context, mapping):
441 """Integer. All ancestors not in the latest tag."""
441 """Integer. All ancestors not in the latest tag."""
442 tag = getlatesttags(context, mapping)[2][0]
442 tag = getlatesttags(context, mapping)[2][0]
443 mapping = context.overlaymap(mapping, {'tag': tag})
443 mapping = context.overlaymap(mapping, {'tag': tag})
444 return _showchangessincetag(context, mapping)
444 return _showchangessincetag(context, mapping)
445
445
446 def _showchangessincetag(context, mapping):
446 def _showchangessincetag(context, mapping):
447 repo = context.resource(mapping, 'repo')
447 repo = context.resource(mapping, 'repo')
448 ctx = context.resource(mapping, 'ctx')
448 ctx = context.resource(mapping, 'ctx')
449 offset = 0
449 offset = 0
450 revs = [ctx.rev()]
450 revs = [ctx.rev()]
451 tag = context.symbol(mapping, 'tag')
451 tag = context.symbol(mapping, 'tag')
452
452
453 # The only() revset doesn't currently support wdir()
453 # The only() revset doesn't currently support wdir()
454 if ctx.rev() is None:
454 if ctx.rev() is None:
455 offset = 1
455 offset = 1
456 revs = [p.rev() for p in ctx.parents()]
456 revs = [p.rev() for p in ctx.parents()]
457
457
458 return len(repo.revs('only(%ld, %s)', revs, tag)) + offset
458 return len(repo.revs('only(%ld, %s)', revs, tag)) + offset
459
459
460 # teach templater latesttags.changes is switched to (context, mapping) API
460 # teach templater latesttags.changes is switched to (context, mapping) API
461 _showchangessincetag._requires = {'repo', 'ctx'}
461 _showchangessincetag._requires = {'repo', 'ctx'}
462
462
463 @templatekeyword('manifest', requires={'repo', 'ctx'})
463 @templatekeyword('manifest', requires={'repo', 'ctx'})
464 def showmanifest(context, mapping):
464 def showmanifest(context, mapping):
465 repo = context.resource(mapping, 'repo')
465 repo = context.resource(mapping, 'repo')
466 ctx = context.resource(mapping, 'ctx')
466 ctx = context.resource(mapping, 'ctx')
467 mnode = ctx.manifestnode()
467 mnode = ctx.manifestnode()
468 if mnode is None:
468 if mnode is None:
469 # just avoid crash, we might want to use the 'ff...' hash in future
469 # just avoid crash, we might want to use the 'ff...' hash in future
470 return
470 return
471 mrev = repo.manifestlog.rev(mnode)
471 mrev = repo.manifestlog.rev(mnode)
472 mhex = hex(mnode)
472 mhex = hex(mnode)
473 mapping = context.overlaymap(mapping, {'rev': mrev, 'node': mhex})
473 mapping = context.overlaymap(mapping, {'rev': mrev, 'node': mhex})
474 f = context.process('manifest', mapping)
474 f = context.process('manifest', mapping)
475 # TODO: perhaps 'ctx' should be dropped from mapping because manifest
475 # TODO: perhaps 'ctx' should be dropped from mapping because manifest
476 # rev and node are completely different from changeset's.
476 # rev and node are completely different from changeset's.
477 return templateutil.hybriditem(f, None, f,
477 return templateutil.hybriditem(f, None, f,
478 lambda x: {'rev': mrev, 'node': mhex})
478 lambda x: {'rev': mrev, 'node': mhex})
479
479
480 @templatekeyword('obsfate', requires={'ui', 'repo', 'ctx'})
480 @templatekeyword('obsfate', requires={'ui', 'repo', 'ctx'})
481 def showobsfate(context, mapping):
481 def showobsfate(context, mapping):
482 # this function returns a list containing pre-formatted obsfate strings.
482 # this function returns a list containing pre-formatted obsfate strings.
483 #
483 #
484 # This function will be replaced by templates fragments when we will have
484 # This function will be replaced by templates fragments when we will have
485 # the verbosity templatekw available.
485 # the verbosity templatekw available.
486 succsandmarkers = showsuccsandmarkers(context, mapping)
486 succsandmarkers = showsuccsandmarkers(context, mapping)
487
487
488 ui = context.resource(mapping, 'ui')
488 ui = context.resource(mapping, 'ui')
489 repo = context.resource(mapping, 'repo')
489 repo = context.resource(mapping, 'repo')
490 values = []
490 values = []
491
491
492 for x in succsandmarkers.tovalue(context, mapping):
492 for x in succsandmarkers.tovalue(context, mapping):
493 v = obsutil.obsfateprinter(ui, repo, x['successors'], x['markers'],
493 v = obsutil.obsfateprinter(ui, repo, x['successors'], x['markers'],
494 scmutil.formatchangeid)
494 scmutil.formatchangeid)
495 values.append(v)
495 values.append(v)
496
496
497 return compatlist(context, mapping, "fate", values)
497 return compatlist(context, mapping, "fate", values)
498
498
499 def shownames(context, mapping, namespace):
499 def shownames(context, mapping, namespace):
500 """helper method to generate a template keyword for a namespace"""
500 """helper method to generate a template keyword for a namespace"""
501 repo = context.resource(mapping, 'repo')
501 repo = context.resource(mapping, 'repo')
502 ctx = context.resource(mapping, 'ctx')
502 ctx = context.resource(mapping, 'ctx')
503 ns = repo.names[namespace]
503 ns = repo.names[namespace]
504 names = ns.names(repo, ctx.node())
504 names = ns.names(repo, ctx.node())
505 return compatlist(context, mapping, ns.templatename, names,
505 return compatlist(context, mapping, ns.templatename, names,
506 plural=namespace)
506 plural=namespace)
507
507
508 @templatekeyword('namespaces', requires={'repo', 'ctx'})
508 @templatekeyword('namespaces', requires={'repo', 'ctx'})
509 def shownamespaces(context, mapping):
509 def shownamespaces(context, mapping):
510 """Dict of lists. Names attached to this changeset per
510 """Dict of lists. Names attached to this changeset per
511 namespace."""
511 namespace."""
512 repo = context.resource(mapping, 'repo')
512 repo = context.resource(mapping, 'repo')
513 ctx = context.resource(mapping, 'ctx')
513 ctx = context.resource(mapping, 'ctx')
514
514
515 namespaces = util.sortdict()
515 namespaces = util.sortdict()
516 def makensmapfn(ns):
516 def makensmapfn(ns):
517 # 'name' for iterating over namespaces, templatename for local reference
517 # 'name' for iterating over namespaces, templatename for local reference
518 return lambda v: {'name': v, ns.templatename: v}
518 return lambda v: {'name': v, ns.templatename: v}
519
519
520 for k, ns in repo.names.iteritems():
520 for k, ns in repo.names.iteritems():
521 names = ns.names(repo, ctx.node())
521 names = ns.names(repo, ctx.node())
522 f = _showcompatlist(context, mapping, 'name', names)
522 f = _showcompatlist(context, mapping, 'name', names)
523 namespaces[k] = _hybrid(f, names, makensmapfn(ns), pycompat.identity)
523 namespaces[k] = _hybrid(f, names, makensmapfn(ns), pycompat.identity)
524
524
525 f = _showcompatlist(context, mapping, 'namespace', list(namespaces))
525 f = _showcompatlist(context, mapping, 'namespace', list(namespaces))
526
526
527 def makemap(ns):
527 def makemap(ns):
528 return {
528 return {
529 'namespace': ns,
529 'namespace': ns,
530 'names': namespaces[ns],
530 'names': namespaces[ns],
531 'builtin': repo.names[ns].builtin,
531 'builtin': repo.names[ns].builtin,
532 'colorname': repo.names[ns].colorname,
532 'colorname': repo.names[ns].colorname,
533 }
533 }
534
534
535 return _hybrid(f, namespaces, makemap, pycompat.identity)
535 return _hybrid(f, namespaces, makemap, pycompat.identity)
536
536
537 @templatekeyword('node', requires={'ctx'})
537 @templatekeyword('node', requires={'ctx'})
538 def shownode(context, mapping):
538 def shownode(context, mapping):
539 """String. The changeset identification hash, as a 40 hexadecimal
539 """String. The changeset identification hash, as a 40 hexadecimal
540 digit string.
540 digit string.
541 """
541 """
542 ctx = context.resource(mapping, 'ctx')
542 ctx = context.resource(mapping, 'ctx')
543 return ctx.hex()
543 return ctx.hex()
544
544
545 @templatekeyword('obsolete', requires={'ctx'})
545 @templatekeyword('obsolete', requires={'ctx'})
546 def showobsolete(context, mapping):
546 def showobsolete(context, mapping):
547 """String. Whether the changeset is obsolete. (EXPERIMENTAL)"""
547 """String. Whether the changeset is obsolete. (EXPERIMENTAL)"""
548 ctx = context.resource(mapping, 'ctx')
548 ctx = context.resource(mapping, 'ctx')
549 if ctx.obsolete():
549 if ctx.obsolete():
550 return 'obsolete'
550 return 'obsolete'
551 return ''
551 return ''
552
552
553 @templatekeyword('peerurls', requires={'repo'})
553 @templatekeyword('peerurls', requires={'repo'})
554 def showpeerurls(context, mapping):
554 def showpeerurls(context, mapping):
555 """A dictionary of repository locations defined in the [paths] section
555 """A dictionary of repository locations defined in the [paths] section
556 of your configuration file."""
556 of your configuration file."""
557 repo = context.resource(mapping, 'repo')
557 repo = context.resource(mapping, 'repo')
558 # see commands.paths() for naming of dictionary keys
558 # see commands.paths() for naming of dictionary keys
559 paths = repo.ui.paths
559 paths = repo.ui.paths
560 urls = util.sortdict((k, p.rawloc) for k, p in sorted(paths.iteritems()))
560 urls = util.sortdict((k, p.rawloc) for k, p in sorted(paths.iteritems()))
561 def makemap(k):
561 def makemap(k):
562 p = paths[k]
562 p = paths[k]
563 d = {'name': k, 'url': p.rawloc}
563 d = {'name': k, 'url': p.rawloc}
564 d.update((o, v) for o, v in sorted(p.suboptions.iteritems()))
564 d.update((o, v) for o, v in sorted(p.suboptions.iteritems()))
565 return d
565 return d
566 return _hybrid(None, urls, makemap, lambda k: '%s=%s' % (k, urls[k]))
566 return _hybrid(None, urls, makemap, lambda k: '%s=%s' % (k, urls[k]))
567
567
568 @templatekeyword("predecessors", requires={'repo', 'ctx'})
568 @templatekeyword("predecessors", requires={'repo', 'ctx'})
569 def showpredecessors(context, mapping):
569 def showpredecessors(context, mapping):
570 """Returns the list if the closest visible successors. (EXPERIMENTAL)"""
570 """Returns the list if the closest visible successors. (EXPERIMENTAL)"""
571 repo = context.resource(mapping, 'repo')
571 repo = context.resource(mapping, 'repo')
572 ctx = context.resource(mapping, 'ctx')
572 ctx = context.resource(mapping, 'ctx')
573 predecessors = sorted(obsutil.closestpredecessors(repo, ctx.node()))
573 predecessors = sorted(obsutil.closestpredecessors(repo, ctx.node()))
574 predecessors = pycompat.maplist(hex, predecessors)
574 predecessors = pycompat.maplist(hex, predecessors)
575
575
576 return _hybrid(None, predecessors,
576 return _hybrid(None, predecessors,
577 lambda x: {'ctx': repo[x]},
577 lambda x: {'ctx': repo[x]},
578 lambda x: scmutil.formatchangeid(repo[x]))
578 lambda x: scmutil.formatchangeid(repo[x]))
579
579
580 @templatekeyword('reporoot', requires={'repo'})
580 @templatekeyword('reporoot', requires={'repo'})
581 def showreporoot(context, mapping):
581 def showreporoot(context, mapping):
582 """String. The root directory of the current repository."""
582 """String. The root directory of the current repository."""
583 repo = context.resource(mapping, 'repo')
583 repo = context.resource(mapping, 'repo')
584 return repo.root
584 return repo.root
585
585
586 @templatekeyword("successorssets", requires={'repo', 'ctx'})
586 @templatekeyword("successorssets", requires={'repo', 'ctx'})
587 def showsuccessorssets(context, mapping):
587 def showsuccessorssets(context, mapping):
588 """Returns a string of sets of successors for a changectx. Format used
588 """Returns a string of sets of successors for a changectx. Format used
589 is: [ctx1, ctx2], [ctx3] if ctx has been splitted into ctx1 and ctx2
589 is: [ctx1, ctx2], [ctx3] if ctx has been splitted into ctx1 and ctx2
590 while also diverged into ctx3. (EXPERIMENTAL)"""
590 while also diverged into ctx3. (EXPERIMENTAL)"""
591 repo = context.resource(mapping, 'repo')
591 repo = context.resource(mapping, 'repo')
592 ctx = context.resource(mapping, 'ctx')
592 ctx = context.resource(mapping, 'ctx')
593 if not ctx.obsolete():
593 if not ctx.obsolete():
594 return ''
594 return ''
595
595
596 ssets = obsutil.successorssets(repo, ctx.node(), closest=True)
596 ssets = obsutil.successorssets(repo, ctx.node(), closest=True)
597 ssets = [[hex(n) for n in ss] for ss in ssets]
597 ssets = [[hex(n) for n in ss] for ss in ssets]
598
598
599 data = []
599 data = []
600 for ss in ssets:
600 for ss in ssets:
601 h = _hybrid(None, ss, lambda x: {'ctx': repo[x]},
601 h = _hybrid(None, ss, lambda x: {'ctx': repo[x]},
602 lambda x: scmutil.formatchangeid(repo[x]))
602 lambda x: scmutil.formatchangeid(repo[x]))
603 data.append(h)
603 data.append(h)
604
604
605 # Format the successorssets
605 # Format the successorssets
606 def render(d):
606 def render(d):
607 return templateutil.stringify(context, mapping, d)
607 return templateutil.stringify(context, mapping, d)
608
608
609 def gen(data):
609 def gen(data):
610 yield "; ".join(render(d) for d in data)
610 yield "; ".join(render(d) for d in data)
611
611
612 return _hybrid(gen(data), data, lambda x: {'successorset': x},
612 return _hybrid(gen(data), data, lambda x: {'successorset': x},
613 pycompat.identity)
613 pycompat.identity)
614
614
615 @templatekeyword("succsandmarkers", requires={'repo', 'ctx'})
615 @templatekeyword("succsandmarkers", requires={'repo', 'ctx'})
616 def showsuccsandmarkers(context, mapping):
616 def showsuccsandmarkers(context, mapping):
617 """Returns a list of dict for each final successor of ctx. The dict
617 """Returns a list of dict for each final successor of ctx. The dict
618 contains successors node id in "successors" keys and the list of
618 contains successors node id in "successors" keys and the list of
619 obs-markers from ctx to the set of successors in "markers".
619 obs-markers from ctx to the set of successors in "markers".
620 (EXPERIMENTAL)
620 (EXPERIMENTAL)
621 """
621 """
622 repo = context.resource(mapping, 'repo')
622 repo = context.resource(mapping, 'repo')
623 ctx = context.resource(mapping, 'ctx')
623 ctx = context.resource(mapping, 'ctx')
624
624
625 values = obsutil.successorsandmarkers(repo, ctx)
625 values = obsutil.successorsandmarkers(repo, ctx)
626
626
627 if values is None:
627 if values is None:
628 values = []
628 values = []
629
629
630 # Format successors and markers to avoid exposing binary to templates
630 # Format successors and markers to avoid exposing binary to templates
631 data = []
631 data = []
632 for i in values:
632 for i in values:
633 # Format successors
633 # Format successors
634 successors = i['successors']
634 successors = i['successors']
635
635
636 successors = [hex(n) for n in successors]
636 successors = [hex(n) for n in successors]
637 successors = _hybrid(None, successors,
637 successors = _hybrid(None, successors,
638 lambda x: {'ctx': repo[x]},
638 lambda x: {'ctx': repo[x]},
639 lambda x: scmutil.formatchangeid(repo[x]))
639 lambda x: scmutil.formatchangeid(repo[x]))
640
640
641 # Format markers
641 # Format markers
642 finalmarkers = []
642 finalmarkers = []
643 for m in i['markers']:
643 for m in i['markers']:
644 hexprec = hex(m[0])
644 hexprec = hex(m[0])
645 hexsucs = tuple(hex(n) for n in m[1])
645 hexsucs = tuple(hex(n) for n in m[1])
646 hexparents = None
646 hexparents = None
647 if m[5] is not None:
647 if m[5] is not None:
648 hexparents = tuple(hex(n) for n in m[5])
648 hexparents = tuple(hex(n) for n in m[5])
649 newmarker = (hexprec, hexsucs) + m[2:5] + (hexparents,) + m[6:]
649 newmarker = (hexprec, hexsucs) + m[2:5] + (hexparents,) + m[6:]
650 finalmarkers.append(newmarker)
650 finalmarkers.append(newmarker)
651
651
652 data.append({'successors': successors, 'markers': finalmarkers})
652 data.append({'successors': successors, 'markers': finalmarkers})
653
653
654 return templateutil.mappinglist(data)
654 return templateutil.mappinglist(data)
655
655
656 @templatekeyword('p1rev', requires={'ctx'})
656 @templatekeyword('p1rev', requires={'ctx'})
657 def showp1rev(context, mapping):
657 def showp1rev(context, mapping):
658 """Integer. The repository-local revision number of the changeset's
658 """Integer. The repository-local revision number of the changeset's
659 first parent, or -1 if the changeset has no parents."""
659 first parent, or -1 if the changeset has no parents."""
660 ctx = context.resource(mapping, 'ctx')
660 ctx = context.resource(mapping, 'ctx')
661 return ctx.p1().rev()
661 return ctx.p1().rev()
662
662
663 @templatekeyword('p2rev', requires={'ctx'})
663 @templatekeyword('p2rev', requires={'ctx'})
664 def showp2rev(context, mapping):
664 def showp2rev(context, mapping):
665 """Integer. The repository-local revision number of the changeset's
665 """Integer. The repository-local revision number of the changeset's
666 second parent, or -1 if the changeset has no second parent."""
666 second parent, or -1 if the changeset has no second parent."""
667 ctx = context.resource(mapping, 'ctx')
667 ctx = context.resource(mapping, 'ctx')
668 return ctx.p2().rev()
668 return ctx.p2().rev()
669
669
670 @templatekeyword('p1node', requires={'ctx'})
670 @templatekeyword('p1node', requires={'ctx'})
671 def showp1node(context, mapping):
671 def showp1node(context, mapping):
672 """String. The identification hash of the changeset's first parent,
672 """String. The identification hash of the changeset's first parent,
673 as a 40 digit hexadecimal string. If the changeset has no parents, all
673 as a 40 digit hexadecimal string. If the changeset has no parents, all
674 digits are 0."""
674 digits are 0."""
675 ctx = context.resource(mapping, 'ctx')
675 ctx = context.resource(mapping, 'ctx')
676 return ctx.p1().hex()
676 return ctx.p1().hex()
677
677
678 @templatekeyword('p2node', requires={'ctx'})
678 @templatekeyword('p2node', requires={'ctx'})
679 def showp2node(context, mapping):
679 def showp2node(context, mapping):
680 """String. The identification hash of the changeset's second
680 """String. The identification hash of the changeset's second
681 parent, as a 40 digit hexadecimal string. If the changeset has no second
681 parent, as a 40 digit hexadecimal string. If the changeset has no second
682 parent, all digits are 0."""
682 parent, all digits are 0."""
683 ctx = context.resource(mapping, 'ctx')
683 ctx = context.resource(mapping, 'ctx')
684 return ctx.p2().hex()
684 return ctx.p2().hex()
685
685
686 @templatekeyword('parents', requires={'repo', 'ctx'})
686 @templatekeyword('parents', requires={'repo', 'ctx'})
687 def showparents(context, mapping):
687 def showparents(context, mapping):
688 """List of strings. The parents of the changeset in "rev:node"
688 """List of strings. The parents of the changeset in "rev:node"
689 format. If the changeset has only one "natural" parent (the predecessor
689 format. If the changeset has only one "natural" parent (the predecessor
690 revision) nothing is shown."""
690 revision) nothing is shown."""
691 repo = context.resource(mapping, 'repo')
691 repo = context.resource(mapping, 'repo')
692 ctx = context.resource(mapping, 'ctx')
692 ctx = context.resource(mapping, 'ctx')
693 pctxs = scmutil.meaningfulparents(repo, ctx)
693 pctxs = scmutil.meaningfulparents(repo, ctx)
694 prevs = [p.rev() for p in pctxs]
694 prevs = [p.rev() for p in pctxs]
695 parents = [[('rev', p.rev()),
695 parents = [[('rev', p.rev()),
696 ('node', p.hex()),
696 ('node', p.hex()),
697 ('phase', p.phasestr())]
697 ('phase', p.phasestr())]
698 for p in pctxs]
698 for p in pctxs]
699 f = _showcompatlist(context, mapping, 'parent', parents)
699 f = _showcompatlist(context, mapping, 'parent', parents)
700 return _hybrid(f, prevs, lambda x: {'ctx': repo[x]},
700 return _hybrid(f, prevs, lambda x: {'ctx': repo[x]},
701 lambda x: scmutil.formatchangeid(repo[x]), keytype=int)
701 lambda x: scmutil.formatchangeid(repo[x]), keytype=int)
702
702
703 @templatekeyword('phase', requires={'ctx'})
703 @templatekeyword('phase', requires={'ctx'})
704 def showphase(context, mapping):
704 def showphase(context, mapping):
705 """String. The changeset phase name."""
705 """String. The changeset phase name."""
706 ctx = context.resource(mapping, 'ctx')
706 ctx = context.resource(mapping, 'ctx')
707 return ctx.phasestr()
707 return ctx.phasestr()
708
708
709 @templatekeyword('phaseidx', requires={'ctx'})
709 @templatekeyword('phaseidx', requires={'ctx'})
710 def showphaseidx(context, mapping):
710 def showphaseidx(context, mapping):
711 """Integer. The changeset phase index. (ADVANCED)"""
711 """Integer. The changeset phase index. (ADVANCED)"""
712 ctx = context.resource(mapping, 'ctx')
712 ctx = context.resource(mapping, 'ctx')
713 return ctx.phase()
713 return ctx.phase()
714
714
715 @templatekeyword('rev', requires={'ctx'})
715 @templatekeyword('rev', requires={'ctx'})
716 def showrev(context, mapping):
716 def showrev(context, mapping):
717 """Integer. The repository-local changeset revision number."""
717 """Integer. The repository-local changeset revision number."""
718 ctx = context.resource(mapping, 'ctx')
718 ctx = context.resource(mapping, 'ctx')
719 return scmutil.intrev(ctx)
719 return scmutil.intrev(ctx)
720
720
721 def showrevslist(context, mapping, name, revs):
721 def showrevslist(context, mapping, name, revs):
722 """helper to generate a list of revisions in which a mapped template will
722 """helper to generate a list of revisions in which a mapped template will
723 be evaluated"""
723 be evaluated"""
724 repo = context.resource(mapping, 'repo')
724 repo = context.resource(mapping, 'repo')
725 f = _showcompatlist(context, mapping, name, ['%d' % r for r in revs])
725 f = _showcompatlist(context, mapping, name, ['%d' % r for r in revs])
726 return _hybrid(f, revs,
726 return _hybrid(f, revs,
727 lambda x: {name: x, 'ctx': repo[x]},
727 lambda x: {name: x, 'ctx': repo[x]},
728 pycompat.identity, keytype=int)
728 pycompat.identity, keytype=int)
729
729
730 @templatekeyword('subrepos', requires={'ctx'})
730 @templatekeyword('subrepos', requires={'ctx'})
731 def showsubrepos(context, mapping):
731 def showsubrepos(context, mapping):
732 """List of strings. Updated subrepositories in the changeset."""
732 """List of strings. Updated subrepositories in the changeset."""
733 ctx = context.resource(mapping, 'ctx')
733 ctx = context.resource(mapping, 'ctx')
734 substate = ctx.substate
734 substate = ctx.substate
735 if not substate:
735 if not substate:
736 return compatlist(context, mapping, 'subrepo', [])
736 return compatlist(context, mapping, 'subrepo', [])
737 psubstate = ctx.parents()[0].substate or {}
737 psubstate = ctx.parents()[0].substate or {}
738 subrepos = []
738 subrepos = []
739 for sub in substate:
739 for sub in substate:
740 if sub not in psubstate or substate[sub] != psubstate[sub]:
740 if sub not in psubstate or substate[sub] != psubstate[sub]:
741 subrepos.append(sub) # modified or newly added in ctx
741 subrepos.append(sub) # modified or newly added in ctx
742 for sub in psubstate:
742 for sub in psubstate:
743 if sub not in substate:
743 if sub not in substate:
744 subrepos.append(sub) # removed in ctx
744 subrepos.append(sub) # removed in ctx
745 return compatlist(context, mapping, 'subrepo', sorted(subrepos))
745 return compatlist(context, mapping, 'subrepo', sorted(subrepos))
746
746
747 # don't remove "showtags" definition, even though namespaces will put
747 # don't remove "showtags" definition, even though namespaces will put
748 # a helper function for "tags" keyword into "keywords" map automatically,
748 # a helper function for "tags" keyword into "keywords" map automatically,
749 # because online help text is built without namespaces initialization
749 # because online help text is built without namespaces initialization
750 @templatekeyword('tags', requires={'repo', 'ctx'})
750 @templatekeyword('tags', requires={'repo', 'ctx'})
751 def showtags(context, mapping):
751 def showtags(context, mapping):
752 """List of strings. Any tags associated with the changeset."""
752 """List of strings. Any tags associated with the changeset."""
753 return shownames(context, mapping, 'tags')
753 return shownames(context, mapping, 'tags')
754
754
755 @templatekeyword('termwidth', requires={'ui'})
755 @templatekeyword('termwidth', requires={'ui'})
756 def showtermwidth(context, mapping):
756 def showtermwidth(context, mapping):
757 """Integer. The width of the current terminal."""
757 """Integer. The width of the current terminal."""
758 ui = context.resource(mapping, 'ui')
758 ui = context.resource(mapping, 'ui')
759 return ui.termwidth()
759 return ui.termwidth()
760
760
761 @templatekeyword('instabilities', requires={'ctx'})
761 @templatekeyword('instabilities', requires={'ctx'})
762 def showinstabilities(context, mapping):
762 def showinstabilities(context, mapping):
763 """List of strings. Evolution instabilities affecting the changeset.
763 """List of strings. Evolution instabilities affecting the changeset.
764 (EXPERIMENTAL)
764 (EXPERIMENTAL)
765 """
765 """
766 ctx = context.resource(mapping, 'ctx')
766 ctx = context.resource(mapping, 'ctx')
767 return compatlist(context, mapping, 'instability', ctx.instabilities(),
767 return compatlist(context, mapping, 'instability', ctx.instabilities(),
768 plural='instabilities')
768 plural='instabilities')
769
769
770 @templatekeyword('verbosity', requires={'ui'})
770 @templatekeyword('verbosity', requires={'ui'})
771 def showverbosity(context, mapping):
771 def showverbosity(context, mapping):
772 """String. The current output verbosity in 'debug', 'quiet', 'verbose',
772 """String. The current output verbosity in 'debug', 'quiet', 'verbose',
773 or ''."""
773 or ''."""
774 ui = context.resource(mapping, 'ui')
774 ui = context.resource(mapping, 'ui')
775 # see logcmdutil.changesettemplater for priority of these flags
775 # see logcmdutil.changesettemplater for priority of these flags
776 if ui.debugflag:
776 if ui.debugflag:
777 return 'debug'
777 return 'debug'
778 elif ui.quiet:
778 elif ui.quiet:
779 return 'quiet'
779 return 'quiet'
780 elif ui.verbose:
780 elif ui.verbose:
781 return 'verbose'
781 return 'verbose'
782 return ''
782 return ''
783
783
784 @templatekeyword('whyunstable', requires={'repo', 'ctx'})
784 @templatekeyword('whyunstable', requires={'repo', 'ctx'})
785 def showwhyunstable(context, mapping):
785 def showwhyunstable(context, mapping):
786 """List of dicts explaining all instabilities of a changeset.
786 """List of dicts explaining all instabilities of a changeset.
787 (EXPERIMENTAL)
787 (EXPERIMENTAL)
788 """
788 """
789 repo = context.resource(mapping, 'repo')
789 repo = context.resource(mapping, 'repo')
790 ctx = context.resource(mapping, 'ctx')
790 ctx = context.resource(mapping, 'ctx')
791
791
792 def formatnode(ctx):
792 def formatnode(ctx):
793 return '%s (%s)' % (scmutil.formatchangeid(ctx), ctx.phasestr())
793 return '%s (%s)' % (scmutil.formatchangeid(ctx), ctx.phasestr())
794
794
795 entries = obsutil.whyunstable(repo, ctx)
795 entries = obsutil.whyunstable(repo, ctx)
796
796
797 for entry in entries:
797 for entry in entries:
798 if entry.get('divergentnodes'):
798 if entry.get('divergentnodes'):
799 dnodes = entry['divergentnodes']
799 dnodes = entry['divergentnodes']
800 dnhybrid = _hybrid(None, [dnode.hex() for dnode in dnodes],
800 dnhybrid = _hybrid(None, [dnode.hex() for dnode in dnodes],
801 lambda x: {'ctx': repo[x]},
801 lambda x: {'ctx': repo[x]},
802 lambda x: formatnode(repo[x]))
802 lambda x: formatnode(repo[x]))
803 entry['divergentnodes'] = dnhybrid
803 entry['divergentnodes'] = dnhybrid
804
804
805 tmpl = ('{instability}:{if(divergentnodes, " ")}{divergentnodes} '
805 tmpl = ('{instability}:{if(divergentnodes, " ")}{divergentnodes} '
806 '{reason} {node|short}')
806 '{reason} {node|short}')
807 return templateutil.mappinglist(entries, tmpl=tmpl, sep='\n')
807 return templateutil.mappinglist(entries, tmpl=tmpl, sep='\n')
808
808
809 def loadkeyword(ui, extname, registrarobj):
809 def loadkeyword(ui, extname, registrarobj):
810 """Load template keyword from specified registrarobj
810 """Load template keyword from specified registrarobj
811 """
811 """
812 for name, func in registrarobj._table.iteritems():
812 for name, func in registrarobj._table.iteritems():
813 keywords[name] = func
813 keywords[name] = func
814
814
815 # tell hggettext to extract docstrings from these functions:
815 # tell hggettext to extract docstrings from these functions:
816 i18nfunctions = keywords.values()
816 i18nfunctions = keywords.values()
@@ -1,107 +1,105 b''
1 # diffutil.py - utility functions related to diff and patch
1 # diffutil.py - utility functions related to diff and patch
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 # Copyright 2018 Octobus <octobus@octobus.net>
5 # Copyright 2018 Octobus <octobus@octobus.net>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 from ..i18n import _
12 from ..i18n import _
13
13
14 from .. import (
14 from .. import (
15 mdiff,
15 mdiff,
16 pycompat,
16 pycompat,
17 )
17 )
18
18
19 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
19 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
20 '''return diffopts with all features supported and parsed'''
20 '''return diffopts with all features supported and parsed'''
21 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
21 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
22 git=True, whitespace=True, formatchanging=True)
22 git=True, whitespace=True, formatchanging=True)
23
23
24 diffopts = diffallopts
25
26 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
24 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
27 whitespace=False, formatchanging=False):
25 whitespace=False, formatchanging=False):
28 '''return diffopts with only opted-in features parsed
26 '''return diffopts with only opted-in features parsed
29
27
30 Features:
28 Features:
31 - git: git-style diffs
29 - git: git-style diffs
32 - whitespace: whitespace options like ignoreblanklines and ignorews
30 - whitespace: whitespace options like ignoreblanklines and ignorews
33 - formatchanging: options that will likely break or cause correctness issues
31 - formatchanging: options that will likely break or cause correctness issues
34 with most diff parsers
32 with most diff parsers
35 '''
33 '''
36 def get(key, name=None, getter=ui.configbool, forceplain=None):
34 def get(key, name=None, getter=ui.configbool, forceplain=None):
37 if opts:
35 if opts:
38 v = opts.get(key)
36 v = opts.get(key)
39 # diffopts flags are either None-default (which is passed
37 # diffopts flags are either None-default (which is passed
40 # through unchanged, so we can identify unset values), or
38 # through unchanged, so we can identify unset values), or
41 # some other falsey default (eg --unified, which defaults
39 # some other falsey default (eg --unified, which defaults
42 # to an empty string). We only want to override the config
40 # to an empty string). We only want to override the config
43 # entries from hgrc with command line values if they
41 # entries from hgrc with command line values if they
44 # appear to have been set, which is any truthy value,
42 # appear to have been set, which is any truthy value,
45 # True, or False.
43 # True, or False.
46 if v or isinstance(v, bool):
44 if v or isinstance(v, bool):
47 return v
45 return v
48 if forceplain is not None and ui.plain():
46 if forceplain is not None and ui.plain():
49 return forceplain
47 return forceplain
50 return getter(section, name or key, untrusted=untrusted)
48 return getter(section, name or key, untrusted=untrusted)
51
49
52 # core options, expected to be understood by every diff parser
50 # core options, expected to be understood by every diff parser
53 buildopts = {
51 buildopts = {
54 'nodates': get('nodates'),
52 'nodates': get('nodates'),
55 'showfunc': get('show_function', 'showfunc'),
53 'showfunc': get('show_function', 'showfunc'),
56 'context': get('unified', getter=ui.config),
54 'context': get('unified', getter=ui.config),
57 }
55 }
58 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
56 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
59 buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
57 buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
60
58
61 if git:
59 if git:
62 buildopts['git'] = get('git')
60 buildopts['git'] = get('git')
63
61
64 # since this is in the experimental section, we need to call
62 # since this is in the experimental section, we need to call
65 # ui.configbool directory
63 # ui.configbool directory
66 buildopts['showsimilarity'] = ui.configbool('experimental',
64 buildopts['showsimilarity'] = ui.configbool('experimental',
67 'extendedheader.similarity')
65 'extendedheader.similarity')
68
66
69 # need to inspect the ui object instead of using get() since we want to
67 # need to inspect the ui object instead of using get() since we want to
70 # test for an int
68 # test for an int
71 hconf = ui.config('experimental', 'extendedheader.index')
69 hconf = ui.config('experimental', 'extendedheader.index')
72 if hconf is not None:
70 if hconf is not None:
73 hlen = None
71 hlen = None
74 try:
72 try:
75 # the hash config could be an integer (for length of hash) or a
73 # the hash config could be an integer (for length of hash) or a
76 # word (e.g. short, full, none)
74 # word (e.g. short, full, none)
77 hlen = int(hconf)
75 hlen = int(hconf)
78 if hlen < 0 or hlen > 40:
76 if hlen < 0 or hlen > 40:
79 msg = _("invalid length for extendedheader.index: '%d'\n")
77 msg = _("invalid length for extendedheader.index: '%d'\n")
80 ui.warn(msg % hlen)
78 ui.warn(msg % hlen)
81 except ValueError:
79 except ValueError:
82 # default value
80 # default value
83 if hconf == 'short' or hconf == '':
81 if hconf == 'short' or hconf == '':
84 hlen = 12
82 hlen = 12
85 elif hconf == 'full':
83 elif hconf == 'full':
86 hlen = 40
84 hlen = 40
87 elif hconf != 'none':
85 elif hconf != 'none':
88 msg = _("invalid value for extendedheader.index: '%s'\n")
86 msg = _("invalid value for extendedheader.index: '%s'\n")
89 ui.warn(msg % hconf)
87 ui.warn(msg % hconf)
90 finally:
88 finally:
91 buildopts['index'] = hlen
89 buildopts['index'] = hlen
92
90
93 if whitespace:
91 if whitespace:
94 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
92 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
95 buildopts['ignorewsamount'] = get('ignore_space_change',
93 buildopts['ignorewsamount'] = get('ignore_space_change',
96 'ignorewsamount')
94 'ignorewsamount')
97 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
95 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
98 'ignoreblanklines')
96 'ignoreblanklines')
99 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
97 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
100 if formatchanging:
98 if formatchanging:
101 buildopts['text'] = opts and opts.get('text')
99 buildopts['text'] = opts and opts.get('text')
102 binary = None if opts is None else opts.get('binary')
100 binary = None if opts is None else opts.get('binary')
103 buildopts['nobinary'] = (not binary if binary is not None
101 buildopts['nobinary'] = (not binary if binary is not None
104 else get('nobinary', forceplain=False))
102 else get('nobinary', forceplain=False))
105 buildopts['noprefix'] = get('noprefix', forceplain=False)
103 buildopts['noprefix'] = get('noprefix', forceplain=False)
106
104
107 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
105 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
@@ -1,211 +1,211 b''
1 from __future__ import absolute_import, print_function
1 from __future__ import absolute_import, print_function
2 import os
2 import os
3 import stat
3 import stat
4 import sys
4 import sys
5 from mercurial.node import hex
5 from mercurial.node import hex
6 from mercurial import (
6 from mercurial import (
7 context,
7 context,
8 encoding,
8 encoding,
9 hg,
9 hg,
10 scmutil,
10 scmutil,
11 ui as uimod,
11 ui as uimod,
12 )
12 )
13 from mercurial.utils import diffutil
13 from mercurial.utils import diffutil
14
14
15 print_ = print
15 print_ = print
16 def print(*args, **kwargs):
16 def print(*args, **kwargs):
17 """print() wrapper that flushes stdout buffers to avoid py3 buffer issues
17 """print() wrapper that flushes stdout buffers to avoid py3 buffer issues
18
18
19 We could also just write directly to sys.stdout.buffer the way the
19 We could also just write directly to sys.stdout.buffer the way the
20 ui object will, but this was easier for porting the test.
20 ui object will, but this was easier for porting the test.
21 """
21 """
22 print_(*args, **kwargs)
22 print_(*args, **kwargs)
23 sys.stdout.flush()
23 sys.stdout.flush()
24
24
25 def printb(data, end=b'\n'):
25 def printb(data, end=b'\n'):
26 out = getattr(sys.stdout, 'buffer', sys.stdout)
26 out = getattr(sys.stdout, 'buffer', sys.stdout)
27 out.write(data + end)
27 out.write(data + end)
28 out.flush()
28 out.flush()
29
29
30 u = uimod.ui.load()
30 u = uimod.ui.load()
31
31
32 repo = hg.repository(u, b'test1', create=1)
32 repo = hg.repository(u, b'test1', create=1)
33 os.chdir('test1')
33 os.chdir('test1')
34
34
35 # create 'foo' with fixed time stamp
35 # create 'foo' with fixed time stamp
36 f = open('foo', 'wb')
36 f = open('foo', 'wb')
37 f.write(b'foo\n')
37 f.write(b'foo\n')
38 f.close()
38 f.close()
39 os.utime('foo', (1000, 1000))
39 os.utime('foo', (1000, 1000))
40
40
41 # add+commit 'foo'
41 # add+commit 'foo'
42 repo[None].add([b'foo'])
42 repo[None].add([b'foo'])
43 repo.commit(text=b'commit1', date=b"0 0")
43 repo.commit(text=b'commit1', date=b"0 0")
44
44
45 d = repo[None][b'foo'].date()
45 d = repo[None][b'foo'].date()
46 if os.name == 'nt':
46 if os.name == 'nt':
47 d = d[:2]
47 d = d[:2]
48 print("workingfilectx.date = (%d, %d)" % d)
48 print("workingfilectx.date = (%d, %d)" % d)
49
49
50 # test memctx with non-ASCII commit message
50 # test memctx with non-ASCII commit message
51
51
52 def filectxfn(repo, memctx, path):
52 def filectxfn(repo, memctx, path):
53 return context.memfilectx(repo, memctx, b"foo", b"")
53 return context.memfilectx(repo, memctx, b"foo", b"")
54
54
55 ctx = context.memctx(repo, [b'tip', None],
55 ctx = context.memctx(repo, [b'tip', None],
56 encoding.tolocal(b"Gr\xc3\xbcezi!"),
56 encoding.tolocal(b"Gr\xc3\xbcezi!"),
57 [b"foo"], filectxfn)
57 [b"foo"], filectxfn)
58 ctx.commit()
58 ctx.commit()
59 for enc in "ASCII", "Latin-1", "UTF-8":
59 for enc in "ASCII", "Latin-1", "UTF-8":
60 encoding.encoding = enc
60 encoding.encoding = enc
61 printb(b"%-8s: %s" % (enc.encode('ascii'), repo[b"tip"].description()))
61 printb(b"%-8s: %s" % (enc.encode('ascii'), repo[b"tip"].description()))
62
62
63 # test performing a status
63 # test performing a status
64
64
65 def getfilectx(repo, memctx, f):
65 def getfilectx(repo, memctx, f):
66 fctx = memctx.parents()[0][f]
66 fctx = memctx.parents()[0][f]
67 data, flags = fctx.data(), fctx.flags()
67 data, flags = fctx.data(), fctx.flags()
68 if f == b'foo':
68 if f == b'foo':
69 data += b'bar\n'
69 data += b'bar\n'
70 return context.memfilectx(
70 return context.memfilectx(
71 repo, memctx, f, data, b'l' in flags, b'x' in flags)
71 repo, memctx, f, data, b'l' in flags, b'x' in flags)
72
72
73 ctxa = repo[0]
73 ctxa = repo[0]
74 ctxb = context.memctx(repo, [ctxa.node(), None], b"test diff", [b"foo"],
74 ctxb = context.memctx(repo, [ctxa.node(), None], b"test diff", [b"foo"],
75 getfilectx, ctxa.user(), ctxa.date())
75 getfilectx, ctxa.user(), ctxa.date())
76
76
77 print(ctxb.status(ctxa))
77 print(ctxb.status(ctxa))
78
78
79 # test performing a diff on a memctx
79 # test performing a diff on a memctx
80 diffopts = diffutil.diffopts(repo.ui, {'git': True})
80 diffopts = diffutil.diffallopts(repo.ui, {'git': True})
81 for d in ctxb.diff(ctxa, opts=diffopts):
81 for d in ctxb.diff(ctxa, opts=diffopts):
82 printb(d, end=b'')
82 printb(d, end=b'')
83
83
84 # test safeness and correctness of "ctx.status()"
84 # test safeness and correctness of "ctx.status()"
85 print('= checking context.status():')
85 print('= checking context.status():')
86
86
87 # ancestor "wcctx ~ 2"
87 # ancestor "wcctx ~ 2"
88 actx2 = repo[b'.']
88 actx2 = repo[b'.']
89
89
90 repo.wwrite(b'bar-m', b'bar-m\n', b'')
90 repo.wwrite(b'bar-m', b'bar-m\n', b'')
91 repo.wwrite(b'bar-r', b'bar-r\n', b'')
91 repo.wwrite(b'bar-r', b'bar-r\n', b'')
92 repo[None].add([b'bar-m', b'bar-r'])
92 repo[None].add([b'bar-m', b'bar-r'])
93 repo.commit(text=b'add bar-m, bar-r', date=b"0 0")
93 repo.commit(text=b'add bar-m, bar-r', date=b"0 0")
94
94
95 # ancestor "wcctx ~ 1"
95 # ancestor "wcctx ~ 1"
96 actx1 = repo[b'.']
96 actx1 = repo[b'.']
97
97
98 repo.wwrite(b'bar-m', b'bar-m bar-m\n', b'')
98 repo.wwrite(b'bar-m', b'bar-m bar-m\n', b'')
99 repo.wwrite(b'bar-a', b'bar-a\n', b'')
99 repo.wwrite(b'bar-a', b'bar-a\n', b'')
100 repo[None].add([b'bar-a'])
100 repo[None].add([b'bar-a'])
101 repo[None].forget([b'bar-r'])
101 repo[None].forget([b'bar-r'])
102
102
103 # status at this point:
103 # status at this point:
104 # M bar-m
104 # M bar-m
105 # A bar-a
105 # A bar-a
106 # R bar-r
106 # R bar-r
107 # C foo
107 # C foo
108
108
109 from mercurial import scmutil
109 from mercurial import scmutil
110
110
111 print('== checking workingctx.status:')
111 print('== checking workingctx.status:')
112
112
113 wctx = repo[None]
113 wctx = repo[None]
114 print('wctx._status=%s' % (str(wctx._status)))
114 print('wctx._status=%s' % (str(wctx._status)))
115
115
116 print('=== with "pattern match":')
116 print('=== with "pattern match":')
117 print(actx1.status(other=wctx,
117 print(actx1.status(other=wctx,
118 match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
118 match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
119 print('wctx._status=%s' % (str(wctx._status)))
119 print('wctx._status=%s' % (str(wctx._status)))
120 print(actx2.status(other=wctx,
120 print(actx2.status(other=wctx,
121 match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
121 match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
122 print('wctx._status=%s' % (str(wctx._status)))
122 print('wctx._status=%s' % (str(wctx._status)))
123
123
124 print('=== with "always match" and "listclean=True":')
124 print('=== with "always match" and "listclean=True":')
125 print(actx1.status(other=wctx, listclean=True))
125 print(actx1.status(other=wctx, listclean=True))
126 print('wctx._status=%s' % (str(wctx._status)))
126 print('wctx._status=%s' % (str(wctx._status)))
127 print(actx2.status(other=wctx, listclean=True))
127 print(actx2.status(other=wctx, listclean=True))
128 print('wctx._status=%s' % (str(wctx._status)))
128 print('wctx._status=%s' % (str(wctx._status)))
129
129
130 print("== checking workingcommitctx.status:")
130 print("== checking workingcommitctx.status:")
131
131
132 wcctx = context.workingcommitctx(repo,
132 wcctx = context.workingcommitctx(repo,
133 scmutil.status([b'bar-m'],
133 scmutil.status([b'bar-m'],
134 [b'bar-a'],
134 [b'bar-a'],
135 [],
135 [],
136 [], [], [], []),
136 [], [], [], []),
137 text=b'', date=b'0 0')
137 text=b'', date=b'0 0')
138 print('wcctx._status=%s' % (str(wcctx._status)))
138 print('wcctx._status=%s' % (str(wcctx._status)))
139
139
140 print('=== with "always match":')
140 print('=== with "always match":')
141 print(actx1.status(other=wcctx))
141 print(actx1.status(other=wcctx))
142 print('wcctx._status=%s' % (str(wcctx._status)))
142 print('wcctx._status=%s' % (str(wcctx._status)))
143 print(actx2.status(other=wcctx))
143 print(actx2.status(other=wcctx))
144 print('wcctx._status=%s' % (str(wcctx._status)))
144 print('wcctx._status=%s' % (str(wcctx._status)))
145
145
146 print('=== with "always match" and "listclean=True":')
146 print('=== with "always match" and "listclean=True":')
147 print(actx1.status(other=wcctx, listclean=True))
147 print(actx1.status(other=wcctx, listclean=True))
148 print('wcctx._status=%s' % (str(wcctx._status)))
148 print('wcctx._status=%s' % (str(wcctx._status)))
149 print(actx2.status(other=wcctx, listclean=True))
149 print(actx2.status(other=wcctx, listclean=True))
150 print('wcctx._status=%s' % (str(wcctx._status)))
150 print('wcctx._status=%s' % (str(wcctx._status)))
151
151
152 print('=== with "pattern match":')
152 print('=== with "pattern match":')
153 print(actx1.status(other=wcctx,
153 print(actx1.status(other=wcctx,
154 match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
154 match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
155 print('wcctx._status=%s' % (str(wcctx._status)))
155 print('wcctx._status=%s' % (str(wcctx._status)))
156 print(actx2.status(other=wcctx,
156 print(actx2.status(other=wcctx,
157 match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
157 match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
158 print('wcctx._status=%s' % (str(wcctx._status)))
158 print('wcctx._status=%s' % (str(wcctx._status)))
159
159
160 print('=== with "pattern match" and "listclean=True":')
160 print('=== with "pattern match" and "listclean=True":')
161 print(actx1.status(other=wcctx,
161 print(actx1.status(other=wcctx,
162 match=scmutil.matchfiles(repo, [b'bar-r', b'foo']),
162 match=scmutil.matchfiles(repo, [b'bar-r', b'foo']),
163 listclean=True))
163 listclean=True))
164 print('wcctx._status=%s' % (str(wcctx._status)))
164 print('wcctx._status=%s' % (str(wcctx._status)))
165 print(actx2.status(other=wcctx,
165 print(actx2.status(other=wcctx,
166 match=scmutil.matchfiles(repo, [b'bar-r', b'foo']),
166 match=scmutil.matchfiles(repo, [b'bar-r', b'foo']),
167 listclean=True))
167 listclean=True))
168 print('wcctx._status=%s' % (str(wcctx._status)))
168 print('wcctx._status=%s' % (str(wcctx._status)))
169
169
170 os.chdir('..')
170 os.chdir('..')
171
171
172 # test manifestlog being changed
172 # test manifestlog being changed
173 print('== commit with manifestlog invalidated')
173 print('== commit with manifestlog invalidated')
174
174
175 repo = hg.repository(u, b'test2', create=1)
175 repo = hg.repository(u, b'test2', create=1)
176 os.chdir('test2')
176 os.chdir('test2')
177
177
178 # make some commits
178 # make some commits
179 for i in [b'1', b'2', b'3']:
179 for i in [b'1', b'2', b'3']:
180 with open(i, 'wb') as f:
180 with open(i, 'wb') as f:
181 f.write(i)
181 f.write(i)
182 status = scmutil.status([], [i], [], [], [], [], [])
182 status = scmutil.status([], [i], [], [], [], [], [])
183 ctx = context.workingcommitctx(repo, status, text=i, user=b'test@test.com',
183 ctx = context.workingcommitctx(repo, status, text=i, user=b'test@test.com',
184 date=(0, 0))
184 date=(0, 0))
185 ctx.p1().manifest() # side effect: cache manifestctx
185 ctx.p1().manifest() # side effect: cache manifestctx
186 n = repo.commitctx(ctx)
186 n = repo.commitctx(ctx)
187 printb(b'commit %s: %s' % (i, hex(n)))
187 printb(b'commit %s: %s' % (i, hex(n)))
188
188
189 # touch 00manifest.i mtime so storecache could expire.
189 # touch 00manifest.i mtime so storecache could expire.
190 # repo.__dict__['manifestlog'] is deleted by transaction releasefn.
190 # repo.__dict__['manifestlog'] is deleted by transaction releasefn.
191 st = repo.svfs.stat(b'00manifest.i')
191 st = repo.svfs.stat(b'00manifest.i')
192 repo.svfs.utime(b'00manifest.i',
192 repo.svfs.utime(b'00manifest.i',
193 (st[stat.ST_MTIME] + 1, st[stat.ST_MTIME] + 1))
193 (st[stat.ST_MTIME] + 1, st[stat.ST_MTIME] + 1))
194
194
195 # read the file just committed
195 # read the file just committed
196 try:
196 try:
197 if repo[n][i].data() != i:
197 if repo[n][i].data() != i:
198 print('data mismatch')
198 print('data mismatch')
199 except Exception as ex:
199 except Exception as ex:
200 print('cannot read data: %r' % ex)
200 print('cannot read data: %r' % ex)
201
201
202 with repo.wlock(), repo.lock(), repo.transaction(b'test'):
202 with repo.wlock(), repo.lock(), repo.transaction(b'test'):
203 with open(b'4', 'wb') as f:
203 with open(b'4', 'wb') as f:
204 f.write(b'4')
204 f.write(b'4')
205 repo.dirstate.normal(b'4')
205 repo.dirstate.normal(b'4')
206 repo.commit(b'4')
206 repo.commit(b'4')
207 revsbefore = len(repo.changelog)
207 revsbefore = len(repo.changelog)
208 repo.invalidate(clearfilecache=True)
208 repo.invalidate(clearfilecache=True)
209 revsafter = len(repo.changelog)
209 revsafter = len(repo.changelog)
210 if revsbefore != revsafter:
210 if revsbefore != revsafter:
211 print('changeset lost by repo.invalidate()')
211 print('changeset lost by repo.invalidate()')
General Comments 0
You need to be logged in to leave comments. Login now