##// END OF EJS Templates
diffutil: move the module out of utils package...
Yuya Nishihara -
r38607:1c93e023 @70 default
parent child Browse files
Show More
@@ -1,521 +1,521 b''
1 1 # synthrepo.py - repo synthesis
2 2 #
3 3 # Copyright 2012 Facebook
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''synthesize structurally interesting change history
9 9
10 10 This extension is useful for creating a repository with properties
11 11 that are statistically similar to an existing repository. During
12 12 analysis, a simple probability table is constructed from the history
13 13 of an existing repository. During synthesis, these properties are
14 14 reconstructed.
15 15
16 16 Properties that are analyzed and synthesized include the following:
17 17
18 18 - Lines added or removed when an existing file is modified
19 19 - Number and sizes of files added
20 20 - Number of files removed
21 21 - Line lengths
22 22 - Topological distance to parent changeset(s)
23 23 - Probability of a commit being a merge
24 24 - Probability of a newly added file being added to a new directory
25 25 - Interarrival time, and time zone, of commits
26 26 - Number of files in each directory
27 27
28 28 A few obvious properties that are not currently handled realistically:
29 29
30 30 - Merges are treated as regular commits with two parents, which is not
31 31 realistic
32 32 - Modifications are not treated as operations on hunks of lines, but
33 33 as insertions and deletions of randomly chosen single lines
34 34 - Committer ID (always random)
35 35 - Executability of files
36 36 - Symlinks and binary files are ignored
37 37 '''
38 38
39 39 from __future__ import absolute_import
40 40 import bisect
41 41 import collections
42 42 import itertools
43 43 import json
44 44 import os
45 45 import random
46 46 import sys
47 47 import time
48 48
49 49 from mercurial.i18n import _
50 50 from mercurial.node import (
51 51 nullid,
52 52 nullrev,
53 53 short,
54 54 )
55 55 from mercurial import (
56 56 context,
57 diffutil,
57 58 error,
58 59 hg,
59 60 patch,
60 61 registrar,
61 62 scmutil,
62 63 )
63 64 from mercurial.utils import (
64 65 dateutil,
65 diffutil,
66 66 )
67 67
68 68 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
69 69 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
70 70 # be specifying the version(s) of Mercurial they are tested with, or
71 71 # leave the attribute unspecified.
72 72 testedwith = 'ships-with-hg-core'
73 73
74 74 cmdtable = {}
75 75 command = registrar.command(cmdtable)
76 76
77 77 newfile = {'new fi', 'rename', 'copy f', 'copy t'}
78 78
79 79 def zerodict():
80 80 return collections.defaultdict(lambda: 0)
81 81
82 82 def roundto(x, k):
83 83 if x > k * 2:
84 84 return int(round(x / float(k)) * k)
85 85 return int(round(x))
86 86
87 87 def parsegitdiff(lines):
88 88 filename, mar, lineadd, lineremove = None, None, zerodict(), 0
89 89 binary = False
90 90 for line in lines:
91 91 start = line[:6]
92 92 if start == 'diff -':
93 93 if filename:
94 94 yield filename, mar, lineadd, lineremove, binary
95 95 mar, lineadd, lineremove, binary = 'm', zerodict(), 0, False
96 96 filename = patch.gitre.match(line).group(1)
97 97 elif start in newfile:
98 98 mar = 'a'
99 99 elif start == 'GIT bi':
100 100 binary = True
101 101 elif start == 'delete':
102 102 mar = 'r'
103 103 elif start:
104 104 s = start[0]
105 105 if s == '-' and not line.startswith('--- '):
106 106 lineremove += 1
107 107 elif s == '+' and not line.startswith('+++ '):
108 108 lineadd[roundto(len(line) - 1, 5)] += 1
109 109 if filename:
110 110 yield filename, mar, lineadd, lineremove, binary
111 111
112 112 @command('analyze',
113 113 [('o', 'output', '', _('write output to given file'), _('FILE')),
114 114 ('r', 'rev', [], _('analyze specified revisions'), _('REV'))],
115 115 _('hg analyze'), optionalrepo=True)
116 116 def analyze(ui, repo, *revs, **opts):
117 117 '''create a simple model of a repository to use for later synthesis
118 118
119 119 This command examines every changeset in the given range (or all
120 120 of history if none are specified) and creates a simple statistical
121 121 model of the history of the repository. It also measures the directory
122 122 structure of the repository as checked out.
123 123
124 124 The model is written out to a JSON file, and can be used by
125 125 :hg:`synthesize` to create or augment a repository with synthetic
126 126 commits that have a structure that is statistically similar to the
127 127 analyzed repository.
128 128 '''
129 129 root = repo.root
130 130 if not root.endswith(os.path.sep):
131 131 root += os.path.sep
132 132
133 133 revs = list(revs)
134 134 revs.extend(opts['rev'])
135 135 if not revs:
136 136 revs = [':']
137 137
138 138 output = opts['output']
139 139 if not output:
140 140 output = os.path.basename(root) + '.json'
141 141
142 142 if output == '-':
143 143 fp = sys.stdout
144 144 else:
145 145 fp = open(output, 'w')
146 146
147 147 # Always obtain file counts of each directory in the given root directory.
148 148 def onerror(e):
149 149 ui.warn(_('error walking directory structure: %s\n') % e)
150 150
151 151 dirs = {}
152 152 rootprefixlen = len(root)
153 153 for dirpath, dirnames, filenames in os.walk(root, onerror=onerror):
154 154 dirpathfromroot = dirpath[rootprefixlen:]
155 155 dirs[dirpathfromroot] = len(filenames)
156 156 if '.hg' in dirnames:
157 157 dirnames.remove('.hg')
158 158
159 159 lineschanged = zerodict()
160 160 children = zerodict()
161 161 p1distance = zerodict()
162 162 p2distance = zerodict()
163 163 linesinfilesadded = zerodict()
164 164 fileschanged = zerodict()
165 165 filesadded = zerodict()
166 166 filesremoved = zerodict()
167 167 linelengths = zerodict()
168 168 interarrival = zerodict()
169 169 parents = zerodict()
170 170 dirsadded = zerodict()
171 171 tzoffset = zerodict()
172 172
173 173 # If a mercurial repo is available, also model the commit history.
174 174 if repo:
175 175 revs = scmutil.revrange(repo, revs)
176 176 revs.sort()
177 177
178 178 progress = ui.makeprogress(_('analyzing'), unit=_('changesets'),
179 179 total=len(revs))
180 180 for i, rev in enumerate(revs):
181 181 progress.update(i)
182 182 ctx = repo[rev]
183 183 pl = ctx.parents()
184 184 pctx = pl[0]
185 185 prev = pctx.rev()
186 186 children[prev] += 1
187 187 p1distance[rev - prev] += 1
188 188 parents[len(pl)] += 1
189 189 tzoffset[ctx.date()[1]] += 1
190 190 if len(pl) > 1:
191 191 p2distance[rev - pl[1].rev()] += 1
192 192 if prev == rev - 1:
193 193 lastctx = pctx
194 194 else:
195 195 lastctx = repo[rev - 1]
196 196 if lastctx.rev() != nullrev:
197 197 timedelta = ctx.date()[0] - lastctx.date()[0]
198 198 interarrival[roundto(timedelta, 300)] += 1
199 199 diffopts = diffutil.diffallopts(ui, {'git': True})
200 200 diff = sum((d.splitlines()
201 201 for d in ctx.diff(pctx, opts=diffopts)), [])
202 202 fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0
203 203 for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff):
204 204 if isbin:
205 205 continue
206 206 added = sum(lineadd.itervalues(), 0)
207 207 if mar == 'm':
208 208 if added and lineremove:
209 209 lineschanged[roundto(added, 5),
210 210 roundto(lineremove, 5)] += 1
211 211 filechanges += 1
212 212 elif mar == 'a':
213 213 fileadds += 1
214 214 if '/' in filename:
215 215 filedir = filename.rsplit('/', 1)[0]
216 216 if filedir not in pctx.dirs():
217 217 diradds += 1
218 218 linesinfilesadded[roundto(added, 5)] += 1
219 219 elif mar == 'r':
220 220 fileremoves += 1
221 221 for length, count in lineadd.iteritems():
222 222 linelengths[length] += count
223 223 fileschanged[filechanges] += 1
224 224 filesadded[fileadds] += 1
225 225 dirsadded[diradds] += 1
226 226 filesremoved[fileremoves] += 1
227 227 progress.complete()
228 228
229 229 invchildren = zerodict()
230 230
231 231 for rev, count in children.iteritems():
232 232 invchildren[count] += 1
233 233
234 234 if output != '-':
235 235 ui.status(_('writing output to %s\n') % output)
236 236
237 237 def pronk(d):
238 238 return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)
239 239
240 240 json.dump({'revs': len(revs),
241 241 'initdirs': pronk(dirs),
242 242 'lineschanged': pronk(lineschanged),
243 243 'children': pronk(invchildren),
244 244 'fileschanged': pronk(fileschanged),
245 245 'filesadded': pronk(filesadded),
246 246 'linesinfilesadded': pronk(linesinfilesadded),
247 247 'dirsadded': pronk(dirsadded),
248 248 'filesremoved': pronk(filesremoved),
249 249 'linelengths': pronk(linelengths),
250 250 'parents': pronk(parents),
251 251 'p1distance': pronk(p1distance),
252 252 'p2distance': pronk(p2distance),
253 253 'interarrival': pronk(interarrival),
254 254 'tzoffset': pronk(tzoffset),
255 255 },
256 256 fp)
257 257 fp.close()
258 258
259 259 @command('synthesize',
260 260 [('c', 'count', 0, _('create given number of commits'), _('COUNT')),
261 261 ('', 'dict', '', _('path to a dictionary of words'), _('FILE')),
262 262 ('', 'initfiles', 0, _('initial file count to create'), _('COUNT'))],
263 263 _('hg synthesize [OPTION].. DESCFILE'))
264 264 def synthesize(ui, repo, descpath, **opts):
265 265 '''synthesize commits based on a model of an existing repository
266 266
267 267 The model must have been generated by :hg:`analyze`. Commits will
268 268 be generated randomly according to the probabilities described in
269 269 the model. If --initfiles is set, the repository will be seeded with
270 270 the given number files following the modeled repository's directory
271 271 structure.
272 272
273 273 When synthesizing new content, commit descriptions, and user
274 274 names, words will be chosen randomly from a dictionary that is
275 275 presumed to contain one word per line. Use --dict to specify the
276 276 path to an alternate dictionary to use.
277 277 '''
278 278 try:
279 279 fp = hg.openpath(ui, descpath)
280 280 except Exception as err:
281 281 raise error.Abort('%s: %s' % (descpath, err[0].strerror))
282 282 desc = json.load(fp)
283 283 fp.close()
284 284
285 285 def cdf(l):
286 286 if not l:
287 287 return [], []
288 288 vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True))
289 289 t = float(sum(probs, 0))
290 290 s, cdfs = 0, []
291 291 for v in probs:
292 292 s += v
293 293 cdfs.append(s / t)
294 294 return vals, cdfs
295 295
296 296 lineschanged = cdf(desc['lineschanged'])
297 297 fileschanged = cdf(desc['fileschanged'])
298 298 filesadded = cdf(desc['filesadded'])
299 299 dirsadded = cdf(desc['dirsadded'])
300 300 filesremoved = cdf(desc['filesremoved'])
301 301 linelengths = cdf(desc['linelengths'])
302 302 parents = cdf(desc['parents'])
303 303 p1distance = cdf(desc['p1distance'])
304 304 p2distance = cdf(desc['p2distance'])
305 305 interarrival = cdf(desc['interarrival'])
306 306 linesinfilesadded = cdf(desc['linesinfilesadded'])
307 307 tzoffset = cdf(desc['tzoffset'])
308 308
309 309 dictfile = opts.get('dict') or '/usr/share/dict/words'
310 310 try:
311 311 fp = open(dictfile, 'rU')
312 312 except IOError as err:
313 313 raise error.Abort('%s: %s' % (dictfile, err.strerror))
314 314 words = fp.read().splitlines()
315 315 fp.close()
316 316
317 317 initdirs = {}
318 318 if desc['initdirs']:
319 319 for k, v in desc['initdirs']:
320 320 initdirs[k.encode('utf-8').replace('.hg', '_hg')] = v
321 321 initdirs = renamedirs(initdirs, words)
322 322 initdirscdf = cdf(initdirs)
323 323
324 324 def pick(cdf):
325 325 return cdf[0][bisect.bisect_left(cdf[1], random.random())]
326 326
327 327 def pickpath():
328 328 return os.path.join(pick(initdirscdf), random.choice(words))
329 329
330 330 def makeline(minimum=0):
331 331 total = max(minimum, pick(linelengths))
332 332 c, l = 0, []
333 333 while c < total:
334 334 w = random.choice(words)
335 335 c += len(w) + 1
336 336 l.append(w)
337 337 return ' '.join(l)
338 338
339 339 wlock = repo.wlock()
340 340 lock = repo.lock()
341 341
342 342 nevertouch = {'.hgsub', '.hgignore', '.hgtags'}
343 343
344 344 _synthesizing = _('synthesizing')
345 345 _files = _('initial files')
346 346 _changesets = _('changesets')
347 347
348 348 # Synthesize a single initial revision adding files to the repo according
349 349 # to the modeled directory structure.
350 350 initcount = int(opts['initfiles'])
351 351 if initcount and initdirs:
352 352 pctx = repo[None].parents()[0]
353 353 dirs = set(pctx.dirs())
354 354 files = {}
355 355
356 356 def validpath(path):
357 357 # Don't pick filenames which are already directory names.
358 358 if path in dirs:
359 359 return False
360 360 # Don't pick directories which were used as file names.
361 361 while path:
362 362 if path in files:
363 363 return False
364 364 path = os.path.dirname(path)
365 365 return True
366 366
367 367 progress = ui.makeprogress(_synthesizing, unit=_files, total=initcount)
368 368 for i in xrange(0, initcount):
369 369 progress.update(i)
370 370
371 371 path = pickpath()
372 372 while not validpath(path):
373 373 path = pickpath()
374 374 data = '%s contents\n' % path
375 375 files[path] = data
376 376 dir = os.path.dirname(path)
377 377 while dir and dir not in dirs:
378 378 dirs.add(dir)
379 379 dir = os.path.dirname(dir)
380 380
381 381 def filectxfn(repo, memctx, path):
382 382 return context.memfilectx(repo, memctx, path, files[path])
383 383
384 384 progress.complete()
385 385 message = 'synthesized wide repo with %d files' % (len(files),)
386 386 mc = context.memctx(repo, [pctx.node(), nullid], message,
387 387 files, filectxfn, ui.username(),
388 388 '%d %d' % dateutil.makedate())
389 389 initnode = mc.commit()
390 390 if ui.debugflag:
391 391 hexfn = hex
392 392 else:
393 393 hexfn = short
394 394 ui.status(_('added commit %s with %d files\n')
395 395 % (hexfn(initnode), len(files)))
396 396
397 397 # Synthesize incremental revisions to the repository, adding repo depth.
398 398 count = int(opts['count'])
399 399 heads = set(map(repo.changelog.rev, repo.heads()))
400 400 progress = ui.makeprogress(_synthesizing, unit=_changesets, total=count)
401 401 for i in xrange(count):
402 402 progress.update(i)
403 403
404 404 node = repo.changelog.node
405 405 revs = len(repo)
406 406
407 407 def pickhead(heads, distance):
408 408 if heads:
409 409 lheads = sorted(heads)
410 410 rev = revs - min(pick(distance), revs)
411 411 if rev < lheads[-1]:
412 412 rev = lheads[bisect.bisect_left(lheads, rev)]
413 413 else:
414 414 rev = lheads[-1]
415 415 return rev, node(rev)
416 416 return nullrev, nullid
417 417
418 418 r1 = revs - min(pick(p1distance), revs)
419 419 p1 = node(r1)
420 420
421 421 # the number of heads will grow without bound if we use a pure
422 422 # model, so artificially constrain their proliferation
423 423 toomanyheads = len(heads) > random.randint(1, 20)
424 424 if p2distance[0] and (pick(parents) == 2 or toomanyheads):
425 425 r2, p2 = pickhead(heads.difference([r1]), p2distance)
426 426 else:
427 427 r2, p2 = nullrev, nullid
428 428
429 429 pl = [p1, p2]
430 430 pctx = repo[r1]
431 431 mf = pctx.manifest()
432 432 mfk = mf.keys()
433 433 changes = {}
434 434 if mfk:
435 435 for __ in xrange(pick(fileschanged)):
436 436 for __ in xrange(10):
437 437 fctx = pctx.filectx(random.choice(mfk))
438 438 path = fctx.path()
439 439 if not (path in nevertouch or fctx.isbinary() or
440 440 'l' in fctx.flags()):
441 441 break
442 442 lines = fctx.data().splitlines()
443 443 add, remove = pick(lineschanged)
444 444 for __ in xrange(remove):
445 445 if not lines:
446 446 break
447 447 del lines[random.randrange(0, len(lines))]
448 448 for __ in xrange(add):
449 449 lines.insert(random.randint(0, len(lines)), makeline())
450 450 path = fctx.path()
451 451 changes[path] = '\n'.join(lines) + '\n'
452 452 for __ in xrange(pick(filesremoved)):
453 453 path = random.choice(mfk)
454 454 for __ in xrange(10):
455 455 path = random.choice(mfk)
456 456 if path not in changes:
457 457 break
458 458 if filesadded:
459 459 dirs = list(pctx.dirs())
460 460 dirs.insert(0, '')
461 461 for __ in xrange(pick(filesadded)):
462 462 pathstr = ''
463 463 while pathstr in dirs:
464 464 path = [random.choice(dirs)]
465 465 if pick(dirsadded):
466 466 path.append(random.choice(words))
467 467 path.append(random.choice(words))
468 468 pathstr = '/'.join(filter(None, path))
469 469 data = '\n'.join(makeline()
470 470 for __ in xrange(pick(linesinfilesadded))) + '\n'
471 471 changes[pathstr] = data
472 472 def filectxfn(repo, memctx, path):
473 473 if path not in changes:
474 474 return None
475 475 return context.memfilectx(repo, memctx, path, changes[path])
476 476 if not changes:
477 477 continue
478 478 if revs:
479 479 date = repo['tip'].date()[0] + pick(interarrival)
480 480 else:
481 481 date = time.time() - (86400 * count)
482 482 # dates in mercurial must be positive, fit in 32-bit signed integers.
483 483 date = min(0x7fffffff, max(0, date))
484 484 user = random.choice(words) + '@' + random.choice(words)
485 485 mc = context.memctx(repo, pl, makeline(minimum=2),
486 486 sorted(changes),
487 487 filectxfn, user, '%d %d' % (date, pick(tzoffset)))
488 488 newnode = mc.commit()
489 489 heads.add(repo.changelog.rev(newnode))
490 490 heads.discard(r1)
491 491 heads.discard(r2)
492 492 progress.complete()
493 493
494 494 lock.release()
495 495 wlock.release()
496 496
497 497 def renamedirs(dirs, words):
498 498 '''Randomly rename the directory names in the per-dir file count dict.'''
499 499 wordgen = itertools.cycle(words)
500 500 replacements = {'': ''}
501 501 def rename(dirpath):
502 502 '''Recursively rename the directory and all path prefixes.
503 503
504 504 The mapping from path to renamed path is stored for all path prefixes
505 505 as in dynamic programming, ensuring linear runtime and consistent
506 506 renaming regardless of iteration order through the model.
507 507 '''
508 508 if dirpath in replacements:
509 509 return replacements[dirpath]
510 510 head, _ = os.path.split(dirpath)
511 511 if head:
512 512 head = rename(head)
513 513 else:
514 514 head = ''
515 515 renamed = os.path.join(head, next(wordgen))
516 516 replacements[dirpath] = renamed
517 517 return renamed
518 518 result = []
519 519 for dirpath, count in dirs.iteritems():
520 520 result.append([rename(dirpath.lstrip(os.sep)), count])
521 521 return result
@@ -1,105 +1,105 b''
1 1 # diffutil.py - utility functions related to diff and patch
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 # Copyright 2018 Octobus <octobus@octobus.net>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 from __future__ import absolute_import
11 11
12 from ..i18n import _
12 from .i18n import _
13 13
14 from .. import (
14 from . import (
15 15 mdiff,
16 16 pycompat,
17 17 )
18 18
19 19 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
20 20 '''return diffopts with all features supported and parsed'''
21 21 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
22 22 git=True, whitespace=True, formatchanging=True)
23 23
24 24 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
25 25 whitespace=False, formatchanging=False):
26 26 '''return diffopts with only opted-in features parsed
27 27
28 28 Features:
29 29 - git: git-style diffs
30 30 - whitespace: whitespace options like ignoreblanklines and ignorews
31 31 - formatchanging: options that will likely break or cause correctness issues
32 32 with most diff parsers
33 33 '''
34 34 def get(key, name=None, getter=ui.configbool, forceplain=None):
35 35 if opts:
36 36 v = opts.get(key)
37 37 # diffopts flags are either None-default (which is passed
38 38 # through unchanged, so we can identify unset values), or
39 39 # some other falsey default (eg --unified, which defaults
40 40 # to an empty string). We only want to override the config
41 41 # entries from hgrc with command line values if they
42 42 # appear to have been set, which is any truthy value,
43 43 # True, or False.
44 44 if v or isinstance(v, bool):
45 45 return v
46 46 if forceplain is not None and ui.plain():
47 47 return forceplain
48 48 return getter(section, name or key, untrusted=untrusted)
49 49
50 50 # core options, expected to be understood by every diff parser
51 51 buildopts = {
52 52 'nodates': get('nodates'),
53 53 'showfunc': get('show_function', 'showfunc'),
54 54 'context': get('unified', getter=ui.config),
55 55 }
56 56 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
57 57 buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
58 58
59 59 if git:
60 60 buildopts['git'] = get('git')
61 61
62 62 # since this is in the experimental section, we need to call
63 63 # ui.configbool directory
64 64 buildopts['showsimilarity'] = ui.configbool('experimental',
65 65 'extendedheader.similarity')
66 66
67 67 # need to inspect the ui object instead of using get() since we want to
68 68 # test for an int
69 69 hconf = ui.config('experimental', 'extendedheader.index')
70 70 if hconf is not None:
71 71 hlen = None
72 72 try:
73 73 # the hash config could be an integer (for length of hash) or a
74 74 # word (e.g. short, full, none)
75 75 hlen = int(hconf)
76 76 if hlen < 0 or hlen > 40:
77 77 msg = _("invalid length for extendedheader.index: '%d'\n")
78 78 ui.warn(msg % hlen)
79 79 except ValueError:
80 80 # default value
81 81 if hconf == 'short' or hconf == '':
82 82 hlen = 12
83 83 elif hconf == 'full':
84 84 hlen = 40
85 85 elif hconf != 'none':
86 86 msg = _("invalid value for extendedheader.index: '%s'\n")
87 87 ui.warn(msg % hconf)
88 88 finally:
89 89 buildopts['index'] = hlen
90 90
91 91 if whitespace:
92 92 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
93 93 buildopts['ignorewsamount'] = get('ignore_space_change',
94 94 'ignorewsamount')
95 95 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
96 96 'ignoreblanklines')
97 97 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
98 98 if formatchanging:
99 99 buildopts['text'] = opts and opts.get('text')
100 100 binary = None if opts is None else opts.get('binary')
101 101 buildopts['nobinary'] = (not binary if binary is not None
102 102 else get('nobinary', forceplain=False))
103 103 buildopts['noprefix'] = get('noprefix', forceplain=False)
104 104
105 105 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
@@ -1,806 +1,806 b''
1 1 # hgweb/webutil.py - utility library for the web interface.
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import copy
12 12 import difflib
13 13 import os
14 14 import re
15 15
16 16 from ..i18n import _
17 17 from ..node import hex, nullid, short
18 18
19 19 from .common import (
20 20 ErrorResponse,
21 21 HTTP_BAD_REQUEST,
22 22 HTTP_NOT_FOUND,
23 23 paritygen,
24 24 )
25 25
26 26 from .. import (
27 27 context,
28 diffutil,
28 29 error,
29 30 match,
30 31 mdiff,
31 32 obsutil,
32 33 patch,
33 34 pathutil,
34 35 pycompat,
35 36 scmutil,
36 37 templatefilters,
37 38 templatekw,
38 39 templateutil,
39 40 ui as uimod,
40 41 util,
41 42 )
42 43
43 44 from ..utils import (
44 diffutil,
45 45 stringutil,
46 46 )
47 47
48 48 archivespecs = util.sortdict((
49 49 ('zip', ('application/zip', 'zip', '.zip', None)),
50 50 ('gz', ('application/x-gzip', 'tgz', '.tar.gz', None)),
51 51 ('bz2', ('application/x-bzip2', 'tbz2', '.tar.bz2', None)),
52 52 ))
53 53
54 54 def archivelist(ui, nodeid, url=None):
55 55 allowed = ui.configlist('web', 'allow-archive', untrusted=True)
56 56 archives = []
57 57
58 58 for typ, spec in archivespecs.iteritems():
59 59 if typ in allowed or ui.configbool('web', 'allow' + typ,
60 60 untrusted=True):
61 61 archives.append({
62 62 'type': typ,
63 63 'extension': spec[2],
64 64 'node': nodeid,
65 65 'url': url,
66 66 })
67 67
68 68 return templateutil.mappinglist(archives)
69 69
70 70 def up(p):
71 71 if p[0:1] != "/":
72 72 p = "/" + p
73 73 if p[-1:] == "/":
74 74 p = p[:-1]
75 75 up = os.path.dirname(p)
76 76 if up == "/":
77 77 return "/"
78 78 return up + "/"
79 79
80 80 def _navseq(step, firststep=None):
81 81 if firststep:
82 82 yield firststep
83 83 if firststep >= 20 and firststep <= 40:
84 84 firststep = 50
85 85 yield firststep
86 86 assert step > 0
87 87 assert firststep > 0
88 88 while step <= firststep:
89 89 step *= 10
90 90 while True:
91 91 yield 1 * step
92 92 yield 3 * step
93 93 step *= 10
94 94
95 95 class revnav(object):
96 96
97 97 def __init__(self, repo):
98 98 """Navigation generation object
99 99
100 100 :repo: repo object we generate nav for
101 101 """
102 102 # used for hex generation
103 103 self._revlog = repo.changelog
104 104
105 105 def __nonzero__(self):
106 106 """return True if any revision to navigate over"""
107 107 return self._first() is not None
108 108
109 109 __bool__ = __nonzero__
110 110
111 111 def _first(self):
112 112 """return the minimum non-filtered changeset or None"""
113 113 try:
114 114 return next(iter(self._revlog))
115 115 except StopIteration:
116 116 return None
117 117
118 118 def hex(self, rev):
119 119 return hex(self._revlog.node(rev))
120 120
121 121 def gen(self, pos, pagelen, limit):
122 122 """computes label and revision id for navigation link
123 123
124 124 :pos: is the revision relative to which we generate navigation.
125 125 :pagelen: the size of each navigation page
126 126 :limit: how far shall we link
127 127
128 128 The return is:
129 129 - a single element mappinglist
130 130 - containing a dictionary with a `before` and `after` key
131 131 - values are dictionaries with `label` and `node` keys
132 132 """
133 133 if not self:
134 134 # empty repo
135 135 return templateutil.mappinglist([
136 136 {'before': templateutil.mappinglist([]),
137 137 'after': templateutil.mappinglist([])},
138 138 ])
139 139
140 140 targets = []
141 141 for f in _navseq(1, pagelen):
142 142 if f > limit:
143 143 break
144 144 targets.append(pos + f)
145 145 targets.append(pos - f)
146 146 targets.sort()
147 147
148 148 first = self._first()
149 149 navbefore = [{'label': '(%i)' % first, 'node': self.hex(first)}]
150 150 navafter = []
151 151 for rev in targets:
152 152 if rev not in self._revlog:
153 153 continue
154 154 if pos < rev < limit:
155 155 navafter.append({'label': '+%d' % abs(rev - pos),
156 156 'node': self.hex(rev)})
157 157 if 0 < rev < pos:
158 158 navbefore.append({'label': '-%d' % abs(rev - pos),
159 159 'node': self.hex(rev)})
160 160
161 161 navafter.append({'label': 'tip', 'node': 'tip'})
162 162
163 163 # TODO: maybe this can be a scalar object supporting tomap()
164 164 return templateutil.mappinglist([
165 165 {'before': templateutil.mappinglist(navbefore),
166 166 'after': templateutil.mappinglist(navafter)},
167 167 ])
168 168
169 169 class filerevnav(revnav):
170 170
171 171 def __init__(self, repo, path):
172 172 """Navigation generation object
173 173
174 174 :repo: repo object we generate nav for
175 175 :path: path of the file we generate nav for
176 176 """
177 177 # used for iteration
178 178 self._changelog = repo.unfiltered().changelog
179 179 # used for hex generation
180 180 self._revlog = repo.file(path)
181 181
182 182 def hex(self, rev):
183 183 return hex(self._changelog.node(self._revlog.linkrev(rev)))
184 184
185 185 # TODO: maybe this can be a wrapper class for changectx/filectx list, which
186 186 # yields {'ctx': ctx}
187 187 def _ctxsgen(context, ctxs):
188 188 for s in ctxs:
189 189 d = {
190 190 'node': s.hex(),
191 191 'rev': s.rev(),
192 192 'user': s.user(),
193 193 'date': s.date(),
194 194 'description': s.description(),
195 195 'branch': s.branch(),
196 196 }
197 197 if util.safehasattr(s, 'path'):
198 198 d['file'] = s.path()
199 199 yield d
200 200
201 201 def _siblings(siblings=None, hiderev=None):
202 202 if siblings is None:
203 203 siblings = []
204 204 siblings = [s for s in siblings if s.node() != nullid]
205 205 if len(siblings) == 1 and siblings[0].rev() == hiderev:
206 206 siblings = []
207 207 return templateutil.mappinggenerator(_ctxsgen, args=(siblings,))
208 208
209 209 def difffeatureopts(req, ui, section):
210 210 diffopts = diffutil.difffeatureopts(ui, untrusted=True,
211 211 section=section, whitespace=True)
212 212
213 213 for k in ('ignorews', 'ignorewsamount', 'ignorewseol', 'ignoreblanklines'):
214 214 v = req.qsparams.get(k)
215 215 if v is not None:
216 216 v = stringutil.parsebool(v)
217 217 setattr(diffopts, k, v if v is not None else True)
218 218
219 219 return diffopts
220 220
221 221 def annotate(req, fctx, ui):
222 222 diffopts = difffeatureopts(req, ui, 'annotate')
223 223 return fctx.annotate(follow=True, diffopts=diffopts)
224 224
225 225 def parents(ctx, hide=None):
226 226 if isinstance(ctx, context.basefilectx):
227 227 introrev = ctx.introrev()
228 228 if ctx.changectx().rev() != introrev:
229 229 return _siblings([ctx.repo()[introrev]], hide)
230 230 return _siblings(ctx.parents(), hide)
231 231
232 232 def children(ctx, hide=None):
233 233 return _siblings(ctx.children(), hide)
234 234
235 235 def renamelink(fctx):
236 236 r = fctx.renamed()
237 237 if r:
238 238 return templateutil.mappinglist([{'file': r[0], 'node': hex(r[1])}])
239 239 return templateutil.mappinglist([])
240 240
241 241 def nodetagsdict(repo, node):
242 242 return templateutil.hybridlist(repo.nodetags(node), name='name')
243 243
244 244 def nodebookmarksdict(repo, node):
245 245 return templateutil.hybridlist(repo.nodebookmarks(node), name='name')
246 246
247 247 def nodebranchdict(repo, ctx):
248 248 branches = []
249 249 branch = ctx.branch()
250 250 # If this is an empty repo, ctx.node() == nullid,
251 251 # ctx.branch() == 'default'.
252 252 try:
253 253 branchnode = repo.branchtip(branch)
254 254 except error.RepoLookupError:
255 255 branchnode = None
256 256 if branchnode == ctx.node():
257 257 branches.append(branch)
258 258 return templateutil.hybridlist(branches, name='name')
259 259
260 260 def nodeinbranch(repo, ctx):
261 261 branches = []
262 262 branch = ctx.branch()
263 263 try:
264 264 branchnode = repo.branchtip(branch)
265 265 except error.RepoLookupError:
266 266 branchnode = None
267 267 if branch != 'default' and branchnode != ctx.node():
268 268 branches.append(branch)
269 269 return templateutil.hybridlist(branches, name='name')
270 270
271 271 def nodebranchnodefault(ctx):
272 272 branches = []
273 273 branch = ctx.branch()
274 274 if branch != 'default':
275 275 branches.append(branch)
276 276 return templateutil.hybridlist(branches, name='name')
277 277
278 278 def _nodenamesgen(context, f, node, name):
279 279 for t in f(node):
280 280 yield {name: t}
281 281
282 282 def showtag(repo, t1, node=nullid):
283 283 args = (repo.nodetags, node, 'tag')
284 284 return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
285 285
286 286 def showbookmark(repo, t1, node=nullid):
287 287 args = (repo.nodebookmarks, node, 'bookmark')
288 288 return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
289 289
290 290 def branchentries(repo, stripecount, limit=0):
291 291 tips = []
292 292 heads = repo.heads()
293 293 parity = paritygen(stripecount)
294 294 sortkey = lambda item: (not item[1], item[0].rev())
295 295
296 296 def entries(context):
297 297 count = 0
298 298 if not tips:
299 299 for tag, hs, tip, closed in repo.branchmap().iterbranches():
300 300 tips.append((repo[tip], closed))
301 301 for ctx, closed in sorted(tips, key=sortkey, reverse=True):
302 302 if limit > 0 and count >= limit:
303 303 return
304 304 count += 1
305 305 if closed:
306 306 status = 'closed'
307 307 elif ctx.node() not in heads:
308 308 status = 'inactive'
309 309 else:
310 310 status = 'open'
311 311 yield {
312 312 'parity': next(parity),
313 313 'branch': ctx.branch(),
314 314 'status': status,
315 315 'node': ctx.hex(),
316 316 'date': ctx.date()
317 317 }
318 318
319 319 return templateutil.mappinggenerator(entries)
320 320
321 321 def cleanpath(repo, path):
322 322 path = path.lstrip('/')
323 323 return pathutil.canonpath(repo.root, '', path)
324 324
325 325 def changectx(repo, req):
326 326 changeid = "tip"
327 327 if 'node' in req.qsparams:
328 328 changeid = req.qsparams['node']
329 329 ipos = changeid.find(':')
330 330 if ipos != -1:
331 331 changeid = changeid[(ipos + 1):]
332 332
333 333 return scmutil.revsymbol(repo, changeid)
334 334
335 335 def basechangectx(repo, req):
336 336 if 'node' in req.qsparams:
337 337 changeid = req.qsparams['node']
338 338 ipos = changeid.find(':')
339 339 if ipos != -1:
340 340 changeid = changeid[:ipos]
341 341 return scmutil.revsymbol(repo, changeid)
342 342
343 343 return None
344 344
345 345 def filectx(repo, req):
346 346 if 'file' not in req.qsparams:
347 347 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
348 348 path = cleanpath(repo, req.qsparams['file'])
349 349 if 'node' in req.qsparams:
350 350 changeid = req.qsparams['node']
351 351 elif 'filenode' in req.qsparams:
352 352 changeid = req.qsparams['filenode']
353 353 else:
354 354 raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
355 355 try:
356 356 fctx = scmutil.revsymbol(repo, changeid)[path]
357 357 except error.RepoError:
358 358 fctx = repo.filectx(path, fileid=changeid)
359 359
360 360 return fctx
361 361
362 362 def linerange(req):
363 363 linerange = req.qsparams.getall('linerange')
364 364 if not linerange:
365 365 return None
366 366 if len(linerange) > 1:
367 367 raise ErrorResponse(HTTP_BAD_REQUEST,
368 368 'redundant linerange parameter')
369 369 try:
370 370 fromline, toline = map(int, linerange[0].split(':', 1))
371 371 except ValueError:
372 372 raise ErrorResponse(HTTP_BAD_REQUEST,
373 373 'invalid linerange parameter')
374 374 try:
375 375 return util.processlinerange(fromline, toline)
376 376 except error.ParseError as exc:
377 377 raise ErrorResponse(HTTP_BAD_REQUEST, pycompat.bytestr(exc))
378 378
379 379 def formatlinerange(fromline, toline):
380 380 return '%d:%d' % (fromline + 1, toline)
381 381
382 382 def _succsandmarkersgen(context, mapping):
383 383 repo = context.resource(mapping, 'repo')
384 384 itemmappings = templatekw.showsuccsandmarkers(context, mapping)
385 385 for item in itemmappings.tovalue(context, mapping):
386 386 item['successors'] = _siblings(repo[successor]
387 387 for successor in item['successors'])
388 388 yield item
389 389
390 390 def succsandmarkers(context, mapping):
391 391 return templateutil.mappinggenerator(_succsandmarkersgen, args=(mapping,))
392 392
393 393 # teach templater succsandmarkers is switched to (context, mapping) API
394 394 succsandmarkers._requires = {'repo', 'ctx'}
395 395
396 396 def _whyunstablegen(context, mapping):
397 397 repo = context.resource(mapping, 'repo')
398 398 ctx = context.resource(mapping, 'ctx')
399 399
400 400 entries = obsutil.whyunstable(repo, ctx)
401 401 for entry in entries:
402 402 if entry.get('divergentnodes'):
403 403 entry['divergentnodes'] = _siblings(entry['divergentnodes'])
404 404 yield entry
405 405
406 406 def whyunstable(context, mapping):
407 407 return templateutil.mappinggenerator(_whyunstablegen, args=(mapping,))
408 408
409 409 whyunstable._requires = {'repo', 'ctx'}
410 410
411 411 def commonentry(repo, ctx):
412 412 node = ctx.node()
413 413 return {
414 414 # TODO: perhaps ctx.changectx() should be assigned if ctx is a
415 415 # filectx, but I'm not pretty sure if that would always work because
416 416 # fctx.parents() != fctx.changectx.parents() for example.
417 417 'ctx': ctx,
418 418 'rev': ctx.rev(),
419 419 'node': hex(node),
420 420 'author': ctx.user(),
421 421 'desc': ctx.description(),
422 422 'date': ctx.date(),
423 423 'extra': ctx.extra(),
424 424 'phase': ctx.phasestr(),
425 425 'obsolete': ctx.obsolete(),
426 426 'succsandmarkers': succsandmarkers,
427 427 'instabilities': templateutil.hybridlist(ctx.instabilities(),
428 428 name='instability'),
429 429 'whyunstable': whyunstable,
430 430 'branch': nodebranchnodefault(ctx),
431 431 'inbranch': nodeinbranch(repo, ctx),
432 432 'branches': nodebranchdict(repo, ctx),
433 433 'tags': nodetagsdict(repo, node),
434 434 'bookmarks': nodebookmarksdict(repo, node),
435 435 'parent': lambda **x: parents(ctx),
436 436 'child': lambda **x: children(ctx),
437 437 }
438 438
439 439 def changelistentry(web, ctx):
440 440 '''Obtain a dictionary to be used for entries in a changelist.
441 441
442 442 This function is called when producing items for the "entries" list passed
443 443 to the "shortlog" and "changelog" templates.
444 444 '''
445 445 repo = web.repo
446 446 rev = ctx.rev()
447 447 n = ctx.node()
448 448 showtags = showtag(repo, 'changelogtag', n)
449 449 files = listfilediffs(ctx.files(), n, web.maxfiles)
450 450
451 451 entry = commonentry(repo, ctx)
452 452 entry.update(
453 453 allparents=lambda **x: parents(ctx),
454 454 parent=lambda **x: parents(ctx, rev - 1),
455 455 child=lambda **x: children(ctx, rev + 1),
456 456 changelogtag=showtags,
457 457 files=files,
458 458 )
459 459 return entry
460 460
461 461 def changelistentries(web, revs, maxcount, parityfn):
462 462 """Emit up to N records for an iterable of revisions."""
463 463 repo = web.repo
464 464
465 465 count = 0
466 466 for rev in revs:
467 467 if count >= maxcount:
468 468 break
469 469
470 470 count += 1
471 471
472 472 entry = changelistentry(web, repo[rev])
473 473 entry['parity'] = next(parityfn)
474 474
475 475 yield entry
476 476
477 477 def symrevorshortnode(req, ctx):
478 478 if 'node' in req.qsparams:
479 479 return templatefilters.revescape(req.qsparams['node'])
480 480 else:
481 481 return short(ctx.node())
482 482
483 483 def _listfilesgen(context, ctx, stripecount):
484 484 parity = paritygen(stripecount)
485 485 for blockno, f in enumerate(ctx.files()):
486 486 template = 'filenodelink' if f in ctx else 'filenolink'
487 487 yield context.process(template, {
488 488 'node': ctx.hex(),
489 489 'file': f,
490 490 'blockno': blockno + 1,
491 491 'parity': next(parity),
492 492 })
493 493
494 494 def changesetentry(web, ctx):
495 495 '''Obtain a dictionary to be used to render the "changeset" template.'''
496 496
497 497 showtags = showtag(web.repo, 'changesettag', ctx.node())
498 498 showbookmarks = showbookmark(web.repo, 'changesetbookmark', ctx.node())
499 499 showbranch = nodebranchnodefault(ctx)
500 500
501 501 basectx = basechangectx(web.repo, web.req)
502 502 if basectx is None:
503 503 basectx = ctx.p1()
504 504
505 505 style = web.config('web', 'style')
506 506 if 'style' in web.req.qsparams:
507 507 style = web.req.qsparams['style']
508 508
509 509 diff = diffs(web, ctx, basectx, None, style)
510 510
511 511 parity = paritygen(web.stripecount)
512 512 diffstatsgen = diffstatgen(web.repo.ui, ctx, basectx)
513 513 diffstats = diffstat(ctx, diffstatsgen, parity)
514 514
515 515 return dict(
516 516 diff=diff,
517 517 symrev=symrevorshortnode(web.req, ctx),
518 518 basenode=basectx.hex(),
519 519 changesettag=showtags,
520 520 changesetbookmark=showbookmarks,
521 521 changesetbranch=showbranch,
522 522 files=templateutil.mappedgenerator(_listfilesgen,
523 523 args=(ctx, web.stripecount)),
524 524 diffsummary=lambda **x: diffsummary(diffstatsgen),
525 525 diffstat=diffstats,
526 526 archives=web.archivelist(ctx.hex()),
527 527 **pycompat.strkwargs(commonentry(web.repo, ctx)))
528 528
529 529 def _listfilediffsgen(context, files, node, max):
530 530 for f in files[:max]:
531 531 yield context.process('filedifflink', {'node': hex(node), 'file': f})
532 532 if len(files) > max:
533 533 yield context.process('fileellipses', {})
534 534
535 535 def listfilediffs(files, node, max):
536 536 return templateutil.mappedgenerator(_listfilediffsgen,
537 537 args=(files, node, max))
538 538
539 539 def _prettyprintdifflines(context, lines, blockno, lineidprefix):
540 540 for lineno, l in enumerate(lines, 1):
541 541 difflineno = "%d.%d" % (blockno, lineno)
542 542 if l.startswith('+'):
543 543 ltype = "difflineplus"
544 544 elif l.startswith('-'):
545 545 ltype = "difflineminus"
546 546 elif l.startswith('@'):
547 547 ltype = "difflineat"
548 548 else:
549 549 ltype = "diffline"
550 550 yield context.process(ltype, {
551 551 'line': l,
552 552 'lineno': lineno,
553 553 'lineid': lineidprefix + "l%s" % difflineno,
554 554 'linenumber': "% 8s" % difflineno,
555 555 })
556 556
557 557 def _diffsgen(context, repo, ctx, basectx, files, style, stripecount,
558 558 linerange, lineidprefix):
559 559 if files:
560 560 m = match.exact(repo.root, repo.getcwd(), files)
561 561 else:
562 562 m = match.always(repo.root, repo.getcwd())
563 563
564 564 diffopts = patch.diffopts(repo.ui, untrusted=True)
565 565 node1 = basectx.node()
566 566 node2 = ctx.node()
567 567 parity = paritygen(stripecount)
568 568
569 569 diffhunks = patch.diffhunks(repo, node1, node2, m, opts=diffopts)
570 570 for blockno, (fctx1, fctx2, header, hunks) in enumerate(diffhunks, 1):
571 571 if style != 'raw':
572 572 header = header[1:]
573 573 lines = [h + '\n' for h in header]
574 574 for hunkrange, hunklines in hunks:
575 575 if linerange is not None and hunkrange is not None:
576 576 s1, l1, s2, l2 = hunkrange
577 577 if not mdiff.hunkinrange((s2, l2), linerange):
578 578 continue
579 579 lines.extend(hunklines)
580 580 if lines:
581 581 l = templateutil.mappedgenerator(_prettyprintdifflines,
582 582 args=(lines, blockno,
583 583 lineidprefix))
584 584 yield {
585 585 'parity': next(parity),
586 586 'blockno': blockno,
587 587 'lines': l,
588 588 }
589 589
590 590 def diffs(web, ctx, basectx, files, style, linerange=None, lineidprefix=''):
591 591 args = (web.repo, ctx, basectx, files, style, web.stripecount,
592 592 linerange, lineidprefix)
593 593 return templateutil.mappinggenerator(_diffsgen, args=args, name='diffblock')
594 594
595 595 def _compline(type, leftlineno, leftline, rightlineno, rightline):
596 596 lineid = leftlineno and ("l%d" % leftlineno) or ''
597 597 lineid += rightlineno and ("r%d" % rightlineno) or ''
598 598 llno = '%d' % leftlineno if leftlineno else ''
599 599 rlno = '%d' % rightlineno if rightlineno else ''
600 600 return {
601 601 'type': type,
602 602 'lineid': lineid,
603 603 'leftlineno': leftlineno,
604 604 'leftlinenumber': "% 6s" % llno,
605 605 'leftline': leftline or '',
606 606 'rightlineno': rightlineno,
607 607 'rightlinenumber': "% 6s" % rlno,
608 608 'rightline': rightline or '',
609 609 }
610 610
611 611 def _getcompblockgen(context, leftlines, rightlines, opcodes):
612 612 for type, llo, lhi, rlo, rhi in opcodes:
613 613 len1 = lhi - llo
614 614 len2 = rhi - rlo
615 615 count = min(len1, len2)
616 616 for i in xrange(count):
617 617 yield _compline(type=type,
618 618 leftlineno=llo + i + 1,
619 619 leftline=leftlines[llo + i],
620 620 rightlineno=rlo + i + 1,
621 621 rightline=rightlines[rlo + i])
622 622 if len1 > len2:
623 623 for i in xrange(llo + count, lhi):
624 624 yield _compline(type=type,
625 625 leftlineno=i + 1,
626 626 leftline=leftlines[i],
627 627 rightlineno=None,
628 628 rightline=None)
629 629 elif len2 > len1:
630 630 for i in xrange(rlo + count, rhi):
631 631 yield _compline(type=type,
632 632 leftlineno=None,
633 633 leftline=None,
634 634 rightlineno=i + 1,
635 635 rightline=rightlines[i])
636 636
637 637 def _getcompblock(leftlines, rightlines, opcodes):
638 638 args = (leftlines, rightlines, opcodes)
639 639 return templateutil.mappinggenerator(_getcompblockgen, args=args,
640 640 name='comparisonline')
641 641
642 642 def _comparegen(context, contextnum, leftlines, rightlines):
643 643 '''Generator function that provides side-by-side comparison data.'''
644 644 s = difflib.SequenceMatcher(None, leftlines, rightlines)
645 645 if contextnum < 0:
646 646 l = _getcompblock(leftlines, rightlines, s.get_opcodes())
647 647 yield {'lines': l}
648 648 else:
649 649 for oc in s.get_grouped_opcodes(n=contextnum):
650 650 l = _getcompblock(leftlines, rightlines, oc)
651 651 yield {'lines': l}
652 652
653 653 def compare(contextnum, leftlines, rightlines):
654 654 args = (contextnum, leftlines, rightlines)
655 655 return templateutil.mappinggenerator(_comparegen, args=args,
656 656 name='comparisonblock')
657 657
658 658 def diffstatgen(ui, ctx, basectx):
659 659 '''Generator function that provides the diffstat data.'''
660 660
661 661 diffopts = patch.diffopts(ui, {'noprefix': False})
662 662 stats = patch.diffstatdata(
663 663 util.iterlines(ctx.diff(basectx, opts=diffopts)))
664 664 maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
665 665 while True:
666 666 yield stats, maxname, maxtotal, addtotal, removetotal, binary
667 667
668 668 def diffsummary(statgen):
669 669 '''Return a short summary of the diff.'''
670 670
671 671 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
672 672 return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
673 673 len(stats), addtotal, removetotal)
674 674
675 675 def _diffstattmplgen(context, ctx, statgen, parity):
676 676 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
677 677 files = ctx.files()
678 678
679 679 def pct(i):
680 680 if maxtotal == 0:
681 681 return 0
682 682 return (float(i) / maxtotal) * 100
683 683
684 684 fileno = 0
685 685 for filename, adds, removes, isbinary in stats:
686 686 template = 'diffstatlink' if filename in files else 'diffstatnolink'
687 687 total = adds + removes
688 688 fileno += 1
689 689 yield context.process(template, {
690 690 'node': ctx.hex(),
691 691 'file': filename,
692 692 'fileno': fileno,
693 693 'total': total,
694 694 'addpct': pct(adds),
695 695 'removepct': pct(removes),
696 696 'parity': next(parity),
697 697 })
698 698
699 699 def diffstat(ctx, statgen, parity):
700 700 '''Return a diffstat template for each file in the diff.'''
701 701 args = (ctx, statgen, parity)
702 702 return templateutil.mappedgenerator(_diffstattmplgen, args=args)
703 703
704 704 class sessionvars(templateutil.wrapped):
705 705 def __init__(self, vars, start='?'):
706 706 self._start = start
707 707 self._vars = vars
708 708
709 709 def __getitem__(self, key):
710 710 return self._vars[key]
711 711
712 712 def __setitem__(self, key, value):
713 713 self._vars[key] = value
714 714
715 715 def __copy__(self):
716 716 return sessionvars(copy.copy(self._vars), self._start)
717 717
718 718 def contains(self, context, mapping, item):
719 719 item = templateutil.unwrapvalue(context, mapping, item)
720 720 return item in self._vars
721 721
722 722 def getmember(self, context, mapping, key):
723 723 key = templateutil.unwrapvalue(context, mapping, key)
724 724 return self._vars.get(key)
725 725
726 726 def getmin(self, context, mapping):
727 727 raise error.ParseError(_('not comparable'))
728 728
729 729 def getmax(self, context, mapping):
730 730 raise error.ParseError(_('not comparable'))
731 731
732 732 def filter(self, context, mapping, select):
733 733 # implement if necessary
734 734 raise error.ParseError(_('not filterable'))
735 735
736 736 def itermaps(self, context):
737 737 separator = self._start
738 738 for key, value in sorted(self._vars.iteritems()):
739 739 yield {'name': key,
740 740 'value': pycompat.bytestr(value),
741 741 'separator': separator,
742 742 }
743 743 separator = '&'
744 744
745 745 def join(self, context, mapping, sep):
746 746 # could be '{separator}{name}={value|urlescape}'
747 747 raise error.ParseError(_('not displayable without template'))
748 748
749 749 def show(self, context, mapping):
750 750 return self.join(context, '')
751 751
752 752 def tobool(self, context, mapping):
753 753 return bool(self._vars)
754 754
755 755 def tovalue(self, context, mapping):
756 756 return self._vars
757 757
758 758 class wsgiui(uimod.ui):
759 759 # default termwidth breaks under mod_wsgi
760 760 def termwidth(self):
761 761 return 80
762 762
763 763 def getwebsubs(repo):
764 764 websubtable = []
765 765 websubdefs = repo.ui.configitems('websub')
766 766 # we must maintain interhg backwards compatibility
767 767 websubdefs += repo.ui.configitems('interhg')
768 768 for key, pattern in websubdefs:
769 769 # grab the delimiter from the character after the "s"
770 770 unesc = pattern[1:2]
771 771 delim = stringutil.reescape(unesc)
772 772
773 773 # identify portions of the pattern, taking care to avoid escaped
774 774 # delimiters. the replace format and flags are optional, but
775 775 # delimiters are required.
776 776 match = re.match(
777 777 br'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
778 778 % (delim, delim, delim), pattern)
779 779 if not match:
780 780 repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
781 781 % (key, pattern))
782 782 continue
783 783
784 784 # we need to unescape the delimiter for regexp and format
785 785 delim_re = re.compile(br'(?<!\\)\\%s' % delim)
786 786 regexp = delim_re.sub(unesc, match.group(1))
787 787 format = delim_re.sub(unesc, match.group(2))
788 788
789 789 # the pattern allows for 6 regexp flags, so set them if necessary
790 790 flagin = match.group(3)
791 791 flags = 0
792 792 if flagin:
793 793 for flag in flagin.upper():
794 794 flags |= re.__dict__[flag]
795 795
796 796 try:
797 797 regexp = re.compile(regexp, flags)
798 798 websubtable.append((regexp, format))
799 799 except re.error:
800 800 repo.ui.warn(_("websub: invalid regexp for %s: %s\n")
801 801 % (key, regexp))
802 802 return websubtable
803 803
804 804 def getgraphnode(repo, ctx):
805 805 return (templatekw.getgraphnodecurrent(repo, ctx) +
806 806 templatekw.getgraphnodesymbol(ctx))
@@ -1,980 +1,980 b''
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 diffutil,
14 15 node as nodemod,
15 16 phases,
16 17 util,
17 18 )
18 19 from .utils import (
19 20 dateutil,
20 diffutil,
21 21 )
22 22
23 23 ### obsolescence marker flag
24 24
25 25 ## bumpedfix flag
26 26 #
27 27 # When a changeset A' succeed to a changeset A which became public, we call A'
28 28 # "bumped" because it's a successors of a public changesets
29 29 #
30 30 # o A' (bumped)
31 31 # |`:
32 32 # | o A
33 33 # |/
34 34 # o Z
35 35 #
36 36 # The way to solve this situation is to create a new changeset Ad as children
37 37 # of A. This changeset have the same content than A'. So the diff from A to A'
38 38 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
39 39 #
40 40 # o Ad
41 41 # |`:
42 42 # | x A'
43 43 # |'|
44 44 # o | A
45 45 # |/
46 46 # o Z
47 47 #
48 48 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
49 49 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
50 50 # This flag mean that the successors express the changes between the public and
51 51 # bumped version and fix the situation, breaking the transitivity of
52 52 # "bumped" here.
53 53 bumpedfix = 1
54 54 usingsha256 = 2
55 55
56 56 class marker(object):
57 57 """Wrap obsolete marker raw data"""
58 58
59 59 def __init__(self, repo, data):
60 60 # the repo argument will be used to create changectx in later version
61 61 self._repo = repo
62 62 self._data = data
63 63 self._decodedmeta = None
64 64
65 65 def __hash__(self):
66 66 return hash(self._data)
67 67
68 68 def __eq__(self, other):
69 69 if type(other) != type(self):
70 70 return False
71 71 return self._data == other._data
72 72
73 73 def prednode(self):
74 74 """Predecessor changeset node identifier"""
75 75 return self._data[0]
76 76
77 77 def succnodes(self):
78 78 """List of successor changesets node identifiers"""
79 79 return self._data[1]
80 80
81 81 def parentnodes(self):
82 82 """Parents of the predecessors (None if not recorded)"""
83 83 return self._data[5]
84 84
85 85 def metadata(self):
86 86 """Decoded metadata dictionary"""
87 87 return dict(self._data[3])
88 88
89 89 def date(self):
90 90 """Creation date as (unixtime, offset)"""
91 91 return self._data[4]
92 92
93 93 def flags(self):
94 94 """The flags field of the marker"""
95 95 return self._data[2]
96 96
97 97 def getmarkers(repo, nodes=None, exclusive=False):
98 98 """returns markers known in a repository
99 99
100 100 If <nodes> is specified, only markers "relevant" to those nodes are are
101 101 returned"""
102 102 if nodes is None:
103 103 rawmarkers = repo.obsstore
104 104 elif exclusive:
105 105 rawmarkers = exclusivemarkers(repo, nodes)
106 106 else:
107 107 rawmarkers = repo.obsstore.relevantmarkers(nodes)
108 108
109 109 for markerdata in rawmarkers:
110 110 yield marker(repo, markerdata)
111 111
112 112 def closestpredecessors(repo, nodeid):
113 113 """yield the list of next predecessors pointing on visible changectx nodes
114 114
115 115 This function respect the repoview filtering, filtered revision will be
116 116 considered missing.
117 117 """
118 118
119 119 precursors = repo.obsstore.predecessors
120 120 stack = [nodeid]
121 121 seen = set(stack)
122 122
123 123 while stack:
124 124 current = stack.pop()
125 125 currentpreccs = precursors.get(current, ())
126 126
127 127 for prec in currentpreccs:
128 128 precnodeid = prec[0]
129 129
130 130 # Basic cycle protection
131 131 if precnodeid in seen:
132 132 continue
133 133 seen.add(precnodeid)
134 134
135 135 if precnodeid in repo:
136 136 yield precnodeid
137 137 else:
138 138 stack.append(precnodeid)
139 139
140 140 def allpredecessors(obsstore, nodes, ignoreflags=0):
141 141 """Yield node for every precursors of <nodes>.
142 142
143 143 Some precursors may be unknown locally.
144 144
145 145 This is a linear yield unsuited to detecting folded changesets. It includes
146 146 initial nodes too."""
147 147
148 148 remaining = set(nodes)
149 149 seen = set(remaining)
150 150 while remaining:
151 151 current = remaining.pop()
152 152 yield current
153 153 for mark in obsstore.predecessors.get(current, ()):
154 154 # ignore marker flagged with specified flag
155 155 if mark[2] & ignoreflags:
156 156 continue
157 157 suc = mark[0]
158 158 if suc not in seen:
159 159 seen.add(suc)
160 160 remaining.add(suc)
161 161
162 162 def allsuccessors(obsstore, nodes, ignoreflags=0):
163 163 """Yield node for every successor of <nodes>.
164 164
165 165 Some successors may be unknown locally.
166 166
167 167 This is a linear yield unsuited to detecting split changesets. It includes
168 168 initial nodes too."""
169 169 remaining = set(nodes)
170 170 seen = set(remaining)
171 171 while remaining:
172 172 current = remaining.pop()
173 173 yield current
174 174 for mark in obsstore.successors.get(current, ()):
175 175 # ignore marker flagged with specified flag
176 176 if mark[2] & ignoreflags:
177 177 continue
178 178 for suc in mark[1]:
179 179 if suc not in seen:
180 180 seen.add(suc)
181 181 remaining.add(suc)
182 182
183 183 def _filterprunes(markers):
184 184 """return a set with no prune markers"""
185 185 return set(m for m in markers if m[1])
186 186
187 187 def exclusivemarkers(repo, nodes):
188 188 """set of markers relevant to "nodes" but no other locally-known nodes
189 189
190 190 This function compute the set of markers "exclusive" to a locally-known
191 191 node. This means we walk the markers starting from <nodes> until we reach a
192 192 locally-known precursors outside of <nodes>. Element of <nodes> with
193 193 locally-known successors outside of <nodes> are ignored (since their
194 194 precursors markers are also relevant to these successors).
195 195
196 196 For example:
197 197
198 198 # (A0 rewritten as A1)
199 199 #
200 200 # A0 <-1- A1 # Marker "1" is exclusive to A1
201 201
202 202 or
203 203
204 204 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
205 205 #
206 206 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
207 207
208 208 or
209 209
210 210 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
211 211 #
212 212 # <-2- A1 # Marker "2" is exclusive to A0,A1
213 213 # /
214 214 # <-1- A0
215 215 # \
216 216 # <-3- A2 # Marker "3" is exclusive to A0,A2
217 217 #
218 218 # in addition:
219 219 #
220 220 # Markers "2,3" are exclusive to A1,A2
221 221 # Markers "1,2,3" are exclusive to A0,A1,A2
222 222
223 223 See test/test-obsolete-bundle-strip.t for more examples.
224 224
225 225 An example usage is strip. When stripping a changeset, we also want to
226 226 strip the markers exclusive to this changeset. Otherwise we would have
227 227 "dangling"" obsolescence markers from its precursors: Obsolescence markers
228 228 marking a node as obsolete without any successors available locally.
229 229
230 230 As for relevant markers, the prune markers for children will be followed.
231 231 Of course, they will only be followed if the pruned children is
232 232 locally-known. Since the prune markers are relevant to the pruned node.
233 233 However, while prune markers are considered relevant to the parent of the
234 234 pruned changesets, prune markers for locally-known changeset (with no
235 235 successors) are considered exclusive to the pruned nodes. This allows
236 236 to strip the prune markers (with the rest of the exclusive chain) alongside
237 237 the pruned changesets.
238 238 """
239 239 # running on a filtered repository would be dangerous as markers could be
240 240 # reported as exclusive when they are relevant for other filtered nodes.
241 241 unfi = repo.unfiltered()
242 242
243 243 # shortcut to various useful item
244 244 nm = unfi.changelog.nodemap
245 245 precursorsmarkers = unfi.obsstore.predecessors
246 246 successormarkers = unfi.obsstore.successors
247 247 childrenmarkers = unfi.obsstore.children
248 248
249 249 # exclusive markers (return of the function)
250 250 exclmarkers = set()
251 251 # we need fast membership testing
252 252 nodes = set(nodes)
253 253 # looking for head in the obshistory
254 254 #
255 255 # XXX we are ignoring all issues in regard with cycle for now.
256 256 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
257 257 stack.sort()
258 258 # nodes already stacked
259 259 seennodes = set(stack)
260 260 while stack:
261 261 current = stack.pop()
262 262 # fetch precursors markers
263 263 markers = list(precursorsmarkers.get(current, ()))
264 264 # extend the list with prune markers
265 265 for mark in successormarkers.get(current, ()):
266 266 if not mark[1]:
267 267 markers.append(mark)
268 268 # and markers from children (looking for prune)
269 269 for mark in childrenmarkers.get(current, ()):
270 270 if not mark[1]:
271 271 markers.append(mark)
272 272 # traverse the markers
273 273 for mark in markers:
274 274 if mark in exclmarkers:
275 275 # markers already selected
276 276 continue
277 277
278 278 # If the markers is about the current node, select it
279 279 #
280 280 # (this delay the addition of markers from children)
281 281 if mark[1] or mark[0] == current:
282 282 exclmarkers.add(mark)
283 283
284 284 # should we keep traversing through the precursors?
285 285 prec = mark[0]
286 286
287 287 # nodes in the stack or already processed
288 288 if prec in seennodes:
289 289 continue
290 290
291 291 # is this a locally known node ?
292 292 known = prec in nm
293 293 # if locally-known and not in the <nodes> set the traversal
294 294 # stop here.
295 295 if known and prec not in nodes:
296 296 continue
297 297
298 298 # do not keep going if there are unselected markers pointing to this
299 299 # nodes. If we end up traversing these unselected markers later the
300 300 # node will be taken care of at that point.
301 301 precmarkers = _filterprunes(successormarkers.get(prec))
302 302 if precmarkers.issubset(exclmarkers):
303 303 seennodes.add(prec)
304 304 stack.append(prec)
305 305
306 306 return exclmarkers
307 307
308 308 def foreground(repo, nodes):
309 309 """return all nodes in the "foreground" of other node
310 310
311 311 The foreground of a revision is anything reachable using parent -> children
312 312 or precursor -> successor relation. It is very similar to "descendant" but
313 313 augmented with obsolescence information.
314 314
315 315 Beware that possible obsolescence cycle may result if complex situation.
316 316 """
317 317 repo = repo.unfiltered()
318 318 foreground = set(repo.set('%ln::', nodes))
319 319 if repo.obsstore:
320 320 # We only need this complicated logic if there is obsolescence
321 321 # XXX will probably deserve an optimised revset.
322 322 nm = repo.changelog.nodemap
323 323 plen = -1
324 324 # compute the whole set of successors or descendants
325 325 while len(foreground) != plen:
326 326 plen = len(foreground)
327 327 succs = set(c.node() for c in foreground)
328 328 mutable = [c.node() for c in foreground if c.mutable()]
329 329 succs.update(allsuccessors(repo.obsstore, mutable))
330 330 known = (n for n in succs if n in nm)
331 331 foreground = set(repo.set('%ln::', known))
332 332 return set(c.node() for c in foreground)
333 333
334 334 # effectflag field
335 335 #
336 336 # Effect-flag is a 1-byte bit field used to store what changed between a
337 337 # changeset and its successor(s).
338 338 #
339 339 # The effect flag is stored in obs-markers metadata while we iterate on the
340 340 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
341 341 # with an incompatible design for effect flag, we can store a new design under
342 342 # another field name so we don't break readers. We plan to extend the existing
343 343 # obsmarkers bit-field when the effect flag design will be stabilized.
344 344 #
345 345 # The effect-flag is placed behind an experimental flag
346 346 # `effect-flags` set to off by default.
347 347 #
348 348
349 349 EFFECTFLAGFIELD = "ef1"
350 350
351 351 DESCCHANGED = 1 << 0 # action changed the description
352 352 METACHANGED = 1 << 1 # action change the meta
353 353 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
354 354 PARENTCHANGED = 1 << 2 # action change the parent
355 355 USERCHANGED = 1 << 4 # the user changed
356 356 DATECHANGED = 1 << 5 # the date changed
357 357 BRANCHCHANGED = 1 << 6 # the branch changed
358 358
359 359 METABLACKLIST = [
360 360 re.compile('^branch$'),
361 361 re.compile('^.*-source$'),
362 362 re.compile('^.*_source$'),
363 363 re.compile('^source$'),
364 364 ]
365 365
366 366 def metanotblacklisted(metaitem):
367 367 """ Check that the key of a meta item (extrakey, extravalue) does not
368 368 match at least one of the blacklist pattern
369 369 """
370 370 metakey = metaitem[0]
371 371
372 372 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
373 373
374 374 def _prepare_hunk(hunk):
375 375 """Drop all information but the username and patch"""
376 376 cleanhunk = []
377 377 for line in hunk.splitlines():
378 378 if line.startswith(b'# User') or not line.startswith(b'#'):
379 379 if line.startswith(b'@@'):
380 380 line = b'@@\n'
381 381 cleanhunk.append(line)
382 382 return cleanhunk
383 383
384 384 def _getdifflines(iterdiff):
385 385 """return a cleaned up lines"""
386 386 lines = next(iterdiff, None)
387 387
388 388 if lines is None:
389 389 return lines
390 390
391 391 return _prepare_hunk(lines)
392 392
393 393 def _cmpdiff(leftctx, rightctx):
394 394 """return True if both ctx introduce the "same diff"
395 395
396 396 This is a first and basic implementation, with many shortcoming.
397 397 """
398 398 diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
399 399 # Leftctx or right ctx might be filtered, so we need to use the contexts
400 400 # with an unfiltered repository to safely compute the diff
401 401 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
402 402 leftdiff = leftunfi.diff(opts=diffopts)
403 403 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
404 404 rightdiff = rightunfi.diff(opts=diffopts)
405 405
406 406 left, right = (0, 0)
407 407 while None not in (left, right):
408 408 left = _getdifflines(leftdiff)
409 409 right = _getdifflines(rightdiff)
410 410
411 411 if left != right:
412 412 return False
413 413 return True
414 414
415 415 def geteffectflag(relation):
416 416 """ From an obs-marker relation, compute what changed between the
417 417 predecessor and the successor.
418 418 """
419 419 effects = 0
420 420
421 421 source = relation[0]
422 422
423 423 for changectx in relation[1]:
424 424 # Check if description has changed
425 425 if changectx.description() != source.description():
426 426 effects |= DESCCHANGED
427 427
428 428 # Check if user has changed
429 429 if changectx.user() != source.user():
430 430 effects |= USERCHANGED
431 431
432 432 # Check if date has changed
433 433 if changectx.date() != source.date():
434 434 effects |= DATECHANGED
435 435
436 436 # Check if branch has changed
437 437 if changectx.branch() != source.branch():
438 438 effects |= BRANCHCHANGED
439 439
440 440 # Check if at least one of the parent has changed
441 441 if changectx.parents() != source.parents():
442 442 effects |= PARENTCHANGED
443 443
444 444 # Check if other meta has changed
445 445 changeextra = changectx.extra().items()
446 446 ctxmeta = list(filter(metanotblacklisted, changeextra))
447 447
448 448 sourceextra = source.extra().items()
449 449 srcmeta = list(filter(metanotblacklisted, sourceextra))
450 450
451 451 if ctxmeta != srcmeta:
452 452 effects |= METACHANGED
453 453
454 454 # Check if the diff has changed
455 455 if not _cmpdiff(source, changectx):
456 456 effects |= DIFFCHANGED
457 457
458 458 return effects
459 459
460 460 def getobsoleted(repo, tr):
461 461 """return the set of pre-existing revisions obsoleted by a transaction"""
462 462 torev = repo.unfiltered().changelog.nodemap.get
463 463 phase = repo._phasecache.phase
464 464 succsmarkers = repo.obsstore.successors.get
465 465 public = phases.public
466 466 addedmarkers = tr.changes.get('obsmarkers')
467 467 addedrevs = tr.changes.get('revs')
468 468 seenrevs = set()
469 469 obsoleted = set()
470 470 for mark in addedmarkers:
471 471 node = mark[0]
472 472 rev = torev(node)
473 473 if rev is None or rev in seenrevs or rev in addedrevs:
474 474 continue
475 475 seenrevs.add(rev)
476 476 if phase(repo, rev) == public:
477 477 continue
478 478 if set(succsmarkers(node) or []).issubset(addedmarkers):
479 479 obsoleted.add(rev)
480 480 return obsoleted
481 481
482 482 class _succs(list):
483 483 """small class to represent a successors with some metadata about it"""
484 484
485 485 def __init__(self, *args, **kwargs):
486 486 super(_succs, self).__init__(*args, **kwargs)
487 487 self.markers = set()
488 488
489 489 def copy(self):
490 490 new = _succs(self)
491 491 new.markers = self.markers.copy()
492 492 return new
493 493
494 494 @util.propertycache
495 495 def _set(self):
496 496 # immutable
497 497 return set(self)
498 498
499 499 def canmerge(self, other):
500 500 return self._set.issubset(other._set)
501 501
502 502 def successorssets(repo, initialnode, closest=False, cache=None):
503 503 """Return set of all latest successors of initial nodes
504 504
505 505 The successors set of a changeset A are the group of revisions that succeed
506 506 A. It succeeds A as a consistent whole, each revision being only a partial
507 507 replacement. By default, the successors set contains non-obsolete
508 508 changesets only, walking the obsolescence graph until reaching a leaf. If
509 509 'closest' is set to True, closest successors-sets are return (the
510 510 obsolescence walk stops on known changesets).
511 511
512 512 This function returns the full list of successor sets which is why it
513 513 returns a list of tuples and not just a single tuple. Each tuple is a valid
514 514 successors set. Note that (A,) may be a valid successors set for changeset A
515 515 (see below).
516 516
517 517 In most cases, a changeset A will have a single element (e.g. the changeset
518 518 A is replaced by A') in its successors set. Though, it is also common for a
519 519 changeset A to have no elements in its successor set (e.g. the changeset
520 520 has been pruned). Therefore, the returned list of successors sets will be
521 521 [(A',)] or [], respectively.
522 522
523 523 When a changeset A is split into A' and B', however, it will result in a
524 524 successors set containing more than a single element, i.e. [(A',B')].
525 525 Divergent changesets will result in multiple successors sets, i.e. [(A',),
526 526 (A'')].
527 527
528 528 If a changeset A is not obsolete, then it will conceptually have no
529 529 successors set. To distinguish this from a pruned changeset, the successor
530 530 set will contain itself only, i.e. [(A,)].
531 531
532 532 Finally, final successors unknown locally are considered to be pruned
533 533 (pruned: obsoleted without any successors). (Final: successors not affected
534 534 by markers).
535 535
536 536 The 'closest' mode respect the repoview filtering. For example, without
537 537 filter it will stop at the first locally known changeset, with 'visible'
538 538 filter it will stop on visible changesets).
539 539
540 540 The optional `cache` parameter is a dictionary that may contains
541 541 precomputed successors sets. It is meant to reuse the computation of a
542 542 previous call to `successorssets` when multiple calls are made at the same
543 543 time. The cache dictionary is updated in place. The caller is responsible
544 544 for its life span. Code that makes multiple calls to `successorssets`
545 545 *should* use this cache mechanism or risk a performance hit.
546 546
547 547 Since results are different depending of the 'closest' most, the same cache
548 548 cannot be reused for both mode.
549 549 """
550 550
551 551 succmarkers = repo.obsstore.successors
552 552
553 553 # Stack of nodes we search successors sets for
554 554 toproceed = [initialnode]
555 555 # set version of above list for fast loop detection
556 556 # element added to "toproceed" must be added here
557 557 stackedset = set(toproceed)
558 558 if cache is None:
559 559 cache = {}
560 560
561 561 # This while loop is the flattened version of a recursive search for
562 562 # successors sets
563 563 #
564 564 # def successorssets(x):
565 565 # successors = directsuccessors(x)
566 566 # ss = [[]]
567 567 # for succ in directsuccessors(x):
568 568 # # product as in itertools cartesian product
569 569 # ss = product(ss, successorssets(succ))
570 570 # return ss
571 571 #
572 572 # But we can not use plain recursive calls here:
573 573 # - that would blow the python call stack
574 574 # - obsolescence markers may have cycles, we need to handle them.
575 575 #
576 576 # The `toproceed` list act as our call stack. Every node we search
577 577 # successors set for are stacked there.
578 578 #
579 579 # The `stackedset` is set version of this stack used to check if a node is
580 580 # already stacked. This check is used to detect cycles and prevent infinite
581 581 # loop.
582 582 #
583 583 # successors set of all nodes are stored in the `cache` dictionary.
584 584 #
585 585 # After this while loop ends we use the cache to return the successors sets
586 586 # for the node requested by the caller.
587 587 while toproceed:
588 588 # Every iteration tries to compute the successors sets of the topmost
589 589 # node of the stack: CURRENT.
590 590 #
591 591 # There are four possible outcomes:
592 592 #
593 593 # 1) We already know the successors sets of CURRENT:
594 594 # -> mission accomplished, pop it from the stack.
595 595 # 2) Stop the walk:
596 596 # default case: Node is not obsolete
597 597 # closest case: Node is known at this repo filter level
598 598 # -> the node is its own successors sets. Add it to the cache.
599 599 # 3) We do not know successors set of direct successors of CURRENT:
600 600 # -> We add those successors to the stack.
601 601 # 4) We know successors sets of all direct successors of CURRENT:
602 602 # -> We can compute CURRENT successors set and add it to the
603 603 # cache.
604 604 #
605 605 current = toproceed[-1]
606 606
607 607 # case 2 condition is a bit hairy because of closest,
608 608 # we compute it on its own
609 609 case2condition = ((current not in succmarkers)
610 610 or (closest and current != initialnode
611 611 and current in repo))
612 612
613 613 if current in cache:
614 614 # case (1): We already know the successors sets
615 615 stackedset.remove(toproceed.pop())
616 616 elif case2condition:
617 617 # case (2): end of walk.
618 618 if current in repo:
619 619 # We have a valid successors.
620 620 cache[current] = [_succs((current,))]
621 621 else:
622 622 # Final obsolete version is unknown locally.
623 623 # Do not count that as a valid successors
624 624 cache[current] = []
625 625 else:
626 626 # cases (3) and (4)
627 627 #
628 628 # We proceed in two phases. Phase 1 aims to distinguish case (3)
629 629 # from case (4):
630 630 #
631 631 # For each direct successors of CURRENT, we check whether its
632 632 # successors sets are known. If they are not, we stack the
633 633 # unknown node and proceed to the next iteration of the while
634 634 # loop. (case 3)
635 635 #
636 636 # During this step, we may detect obsolescence cycles: a node
637 637 # with unknown successors sets but already in the call stack.
638 638 # In such a situation, we arbitrary set the successors sets of
639 639 # the node to nothing (node pruned) to break the cycle.
640 640 #
641 641 # If no break was encountered we proceed to phase 2.
642 642 #
643 643 # Phase 2 computes successors sets of CURRENT (case 4); see details
644 644 # in phase 2 itself.
645 645 #
646 646 # Note the two levels of iteration in each phase.
647 647 # - The first one handles obsolescence markers using CURRENT as
648 648 # precursor (successors markers of CURRENT).
649 649 #
650 650 # Having multiple entry here means divergence.
651 651 #
652 652 # - The second one handles successors defined in each marker.
653 653 #
654 654 # Having none means pruned node, multiple successors means split,
655 655 # single successors are standard replacement.
656 656 #
657 657 for mark in sorted(succmarkers[current]):
658 658 for suc in mark[1]:
659 659 if suc not in cache:
660 660 if suc in stackedset:
661 661 # cycle breaking
662 662 cache[suc] = []
663 663 else:
664 664 # case (3) If we have not computed successors sets
665 665 # of one of those successors we add it to the
666 666 # `toproceed` stack and stop all work for this
667 667 # iteration.
668 668 toproceed.append(suc)
669 669 stackedset.add(suc)
670 670 break
671 671 else:
672 672 continue
673 673 break
674 674 else:
675 675 # case (4): we know all successors sets of all direct
676 676 # successors
677 677 #
678 678 # Successors set contributed by each marker depends on the
679 679 # successors sets of all its "successors" node.
680 680 #
681 681 # Each different marker is a divergence in the obsolescence
682 682 # history. It contributes successors sets distinct from other
683 683 # markers.
684 684 #
685 685 # Within a marker, a successor may have divergent successors
686 686 # sets. In such a case, the marker will contribute multiple
687 687 # divergent successors sets. If multiple successors have
688 688 # divergent successors sets, a Cartesian product is used.
689 689 #
690 690 # At the end we post-process successors sets to remove
691 691 # duplicated entry and successors set that are strict subset of
692 692 # another one.
693 693 succssets = []
694 694 for mark in sorted(succmarkers[current]):
695 695 # successors sets contributed by this marker
696 696 base = _succs()
697 697 base.markers.add(mark)
698 698 markss = [base]
699 699 for suc in mark[1]:
700 700 # cardinal product with previous successors
701 701 productresult = []
702 702 for prefix in markss:
703 703 for suffix in cache[suc]:
704 704 newss = prefix.copy()
705 705 newss.markers.update(suffix.markers)
706 706 for part in suffix:
707 707 # do not duplicated entry in successors set
708 708 # first entry wins.
709 709 if part not in newss:
710 710 newss.append(part)
711 711 productresult.append(newss)
712 712 markss = productresult
713 713 succssets.extend(markss)
714 714 # remove duplicated and subset
715 715 seen = []
716 716 final = []
717 717 candidates = sorted((s for s in succssets if s),
718 718 key=len, reverse=True)
719 719 for cand in candidates:
720 720 for seensuccs in seen:
721 721 if cand.canmerge(seensuccs):
722 722 seensuccs.markers.update(cand.markers)
723 723 break
724 724 else:
725 725 final.append(cand)
726 726 seen.append(cand)
727 727 final.reverse() # put small successors set first
728 728 cache[current] = final
729 729 return cache[initialnode]
730 730
731 731 def successorsandmarkers(repo, ctx):
732 732 """compute the raw data needed for computing obsfate
733 733 Returns a list of dict, one dict per successors set
734 734 """
735 735 if not ctx.obsolete():
736 736 return None
737 737
738 738 ssets = successorssets(repo, ctx.node(), closest=True)
739 739
740 740 # closestsuccessors returns an empty list for pruned revisions, remap it
741 741 # into a list containing an empty list for future processing
742 742 if ssets == []:
743 743 ssets = [[]]
744 744
745 745 # Try to recover pruned markers
746 746 succsmap = repo.obsstore.successors
747 747 fullsuccessorsets = [] # successor set + markers
748 748 for sset in ssets:
749 749 if sset:
750 750 fullsuccessorsets.append(sset)
751 751 else:
752 752 # successorsset return an empty set() when ctx or one of its
753 753 # successors is pruned.
754 754 # In this case, walk the obs-markers tree again starting with ctx
755 755 # and find the relevant pruning obs-makers, the ones without
756 756 # successors.
757 757 # Having these markers allow us to compute some information about
758 758 # its fate, like who pruned this changeset and when.
759 759
760 760 # XXX we do not catch all prune markers (eg rewritten then pruned)
761 761 # (fix me later)
762 762 foundany = False
763 763 for mark in succsmap.get(ctx.node(), ()):
764 764 if not mark[1]:
765 765 foundany = True
766 766 sset = _succs()
767 767 sset.markers.add(mark)
768 768 fullsuccessorsets.append(sset)
769 769 if not foundany:
770 770 fullsuccessorsets.append(_succs())
771 771
772 772 values = []
773 773 for sset in fullsuccessorsets:
774 774 values.append({'successors': sset, 'markers': sset.markers})
775 775
776 776 return values
777 777
778 778 def _getobsfate(successorssets):
779 779 """ Compute a changeset obsolescence fate based on its successorssets.
780 780 Successors can be the tipmost ones or the immediate ones. This function
781 781 return values are not meant to be shown directly to users, it is meant to
782 782 be used by internal functions only.
783 783 Returns one fate from the following values:
784 784 - pruned
785 785 - diverged
786 786 - superseded
787 787 - superseded_split
788 788 """
789 789
790 790 if len(successorssets) == 0:
791 791 # The commit has been pruned
792 792 return 'pruned'
793 793 elif len(successorssets) > 1:
794 794 return 'diverged'
795 795 else:
796 796 # No divergence, only one set of successors
797 797 successors = successorssets[0]
798 798
799 799 if len(successors) == 1:
800 800 return 'superseded'
801 801 else:
802 802 return 'superseded_split'
803 803
804 804 def obsfateverb(successorset, markers):
805 805 """ Return the verb summarizing the successorset and potentially using
806 806 information from the markers
807 807 """
808 808 if not successorset:
809 809 verb = 'pruned'
810 810 elif len(successorset) == 1:
811 811 verb = 'rewritten'
812 812 else:
813 813 verb = 'split'
814 814 return verb
815 815
816 816 def markersdates(markers):
817 817 """returns the list of dates for a list of markers
818 818 """
819 819 return [m[4] for m in markers]
820 820
821 821 def markersusers(markers):
822 822 """ Returns a sorted list of markers users without duplicates
823 823 """
824 824 markersmeta = [dict(m[3]) for m in markers]
825 825 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
826 826
827 827 return sorted(users)
828 828
829 829 def markersoperations(markers):
830 830 """ Returns a sorted list of markers operations without duplicates
831 831 """
832 832 markersmeta = [dict(m[3]) for m in markers]
833 833 operations = set(meta.get('operation') for meta in markersmeta
834 834 if meta.get('operation'))
835 835
836 836 return sorted(operations)
837 837
838 838 def obsfateprinter(ui, repo, successors, markers, formatctx):
839 839 """ Build a obsfate string for a single successorset using all obsfate
840 840 related function defined in obsutil
841 841 """
842 842 quiet = ui.quiet
843 843 verbose = ui.verbose
844 844 normal = not verbose and not quiet
845 845
846 846 line = []
847 847
848 848 # Verb
849 849 line.append(obsfateverb(successors, markers))
850 850
851 851 # Operations
852 852 operations = markersoperations(markers)
853 853 if operations:
854 854 line.append(" using %s" % ", ".join(operations))
855 855
856 856 # Successors
857 857 if successors:
858 858 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
859 859 line.append(" as %s" % ", ".join(fmtsuccessors))
860 860
861 861 # Users
862 862 users = markersusers(markers)
863 863 # Filter out current user in not verbose mode to reduce amount of
864 864 # information
865 865 if not verbose:
866 866 currentuser = ui.username(acceptempty=True)
867 867 if len(users) == 1 and currentuser in users:
868 868 users = None
869 869
870 870 if (verbose or normal) and users:
871 871 line.append(" by %s" % ", ".join(users))
872 872
873 873 # Date
874 874 dates = markersdates(markers)
875 875
876 876 if dates and verbose:
877 877 min_date = min(dates)
878 878 max_date = max(dates)
879 879
880 880 if min_date == max_date:
881 881 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
882 882 line.append(" (at %s)" % fmtmin_date)
883 883 else:
884 884 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
885 885 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
886 886 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
887 887
888 888 return "".join(line)
889 889
890 890
891 891 filteredmsgtable = {
892 892 "pruned": _("hidden revision '%s' is pruned"),
893 893 "diverged": _("hidden revision '%s' has diverged"),
894 894 "superseded": _("hidden revision '%s' was rewritten as: %s"),
895 895 "superseded_split": _("hidden revision '%s' was split as: %s"),
896 896 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
897 897 "%d more"),
898 898 }
899 899
900 900 def _getfilteredreason(repo, changeid, ctx):
901 901 """return a human-friendly string on why a obsolete changeset is hidden
902 902 """
903 903 successors = successorssets(repo, ctx.node())
904 904 fate = _getobsfate(successors)
905 905
906 906 # Be more precise in case the revision is superseded
907 907 if fate == 'pruned':
908 908 return filteredmsgtable['pruned'] % changeid
909 909 elif fate == 'diverged':
910 910 return filteredmsgtable['diverged'] % changeid
911 911 elif fate == 'superseded':
912 912 single_successor = nodemod.short(successors[0][0])
913 913 return filteredmsgtable['superseded'] % (changeid, single_successor)
914 914 elif fate == 'superseded_split':
915 915
916 916 succs = []
917 917 for node_id in successors[0]:
918 918 succs.append(nodemod.short(node_id))
919 919
920 920 if len(succs) <= 2:
921 921 fmtsuccs = ', '.join(succs)
922 922 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
923 923 else:
924 924 firstsuccessors = ', '.join(succs[:2])
925 925 remainingnumber = len(succs) - 2
926 926
927 927 args = (changeid, firstsuccessors, remainingnumber)
928 928 return filteredmsgtable['superseded_split_several'] % args
929 929
930 930 def divergentsets(repo, ctx):
931 931 """Compute sets of commits divergent with a given one"""
932 932 cache = {}
933 933 base = {}
934 934 for n in allpredecessors(repo.obsstore, [ctx.node()]):
935 935 if n == ctx.node():
936 936 # a node can't be a base for divergence with itself
937 937 continue
938 938 nsuccsets = successorssets(repo, n, cache)
939 939 for nsuccset in nsuccsets:
940 940 if ctx.node() in nsuccset:
941 941 # we are only interested in *other* successor sets
942 942 continue
943 943 if tuple(nsuccset) in base:
944 944 # we already know the latest base for this divergency
945 945 continue
946 946 base[tuple(nsuccset)] = n
947 947 return [{'divergentnodes': divset, 'commonpredecessor': b}
948 948 for divset, b in base.iteritems()]
949 949
950 950 def whyunstable(repo, ctx):
951 951 result = []
952 952 if ctx.orphan():
953 953 for parent in ctx.parents():
954 954 kind = None
955 955 if parent.orphan():
956 956 kind = 'orphan'
957 957 elif parent.obsolete():
958 958 kind = 'obsolete'
959 959 if kind is not None:
960 960 result.append({'instability': 'orphan',
961 961 'reason': '%s parent' % kind,
962 962 'node': parent.hex()})
963 963 if ctx.phasedivergent():
964 964 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
965 965 ignoreflags=bumpedfix)
966 966 immutable = [repo[p] for p in predecessors
967 967 if p in repo and not repo[p].mutable()]
968 968 for predecessor in immutable:
969 969 result.append({'instability': 'phase-divergent',
970 970 'reason': 'immutable predecessor',
971 971 'node': predecessor.hex()})
972 972 if ctx.contentdivergent():
973 973 dsets = divergentsets(repo, ctx)
974 974 for dset in dsets:
975 975 divnodes = [repo[n] for n in dset['divergentnodes']]
976 976 result.append({'instability': 'content-divergent',
977 977 'divergentnodes': divnodes,
978 978 'reason': 'predecessor',
979 979 'node': nodemod.hex(dset['commonpredecessor'])})
980 980 return result
@@ -1,2866 +1,2866 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import, print_function
10 10
11 11 import collections
12 12 import contextlib
13 13 import copy
14 14 import email
15 15 import errno
16 16 import hashlib
17 17 import os
18 18 import posixpath
19 19 import re
20 20 import shutil
21 21 import zlib
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 hex,
26 26 short,
27 27 )
28 28 from . import (
29 29 copies,
30 30 diffhelper,
31 diffutil,
31 32 encoding,
32 33 error,
33 34 mail,
34 35 mdiff,
35 36 pathutil,
36 37 pycompat,
37 38 scmutil,
38 39 similar,
39 40 util,
40 41 vfs as vfsmod,
41 42 )
42 43 from .utils import (
43 44 dateutil,
44 diffutil,
45 45 procutil,
46 46 stringutil,
47 47 )
48 48
49 49 stringio = util.stringio
50 50
51 51 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
52 52 tabsplitter = re.compile(br'(\t+|[^\t]+)')
53 53 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
54 54 b'[^ \ta-zA-Z0-9_\x80-\xff])')
55 55
56 56 PatchError = error.PatchError
57 57
58 58 # public functions
59 59
60 60 def split(stream):
61 61 '''return an iterator of individual patches from a stream'''
62 62 def isheader(line, inheader):
63 63 if inheader and line.startswith((' ', '\t')):
64 64 # continuation
65 65 return True
66 66 if line.startswith((' ', '-', '+')):
67 67 # diff line - don't check for header pattern in there
68 68 return False
69 69 l = line.split(': ', 1)
70 70 return len(l) == 2 and ' ' not in l[0]
71 71
72 72 def chunk(lines):
73 73 return stringio(''.join(lines))
74 74
75 75 def hgsplit(stream, cur):
76 76 inheader = True
77 77
78 78 for line in stream:
79 79 if not line.strip():
80 80 inheader = False
81 81 if not inheader and line.startswith('# HG changeset patch'):
82 82 yield chunk(cur)
83 83 cur = []
84 84 inheader = True
85 85
86 86 cur.append(line)
87 87
88 88 if cur:
89 89 yield chunk(cur)
90 90
91 91 def mboxsplit(stream, cur):
92 92 for line in stream:
93 93 if line.startswith('From '):
94 94 for c in split(chunk(cur[1:])):
95 95 yield c
96 96 cur = []
97 97
98 98 cur.append(line)
99 99
100 100 if cur:
101 101 for c in split(chunk(cur[1:])):
102 102 yield c
103 103
104 104 def mimesplit(stream, cur):
105 105 def msgfp(m):
106 106 fp = stringio()
107 107 g = email.Generator.Generator(fp, mangle_from_=False)
108 108 g.flatten(m)
109 109 fp.seek(0)
110 110 return fp
111 111
112 112 for line in stream:
113 113 cur.append(line)
114 114 c = chunk(cur)
115 115
116 116 m = mail.parse(c)
117 117 if not m.is_multipart():
118 118 yield msgfp(m)
119 119 else:
120 120 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
121 121 for part in m.walk():
122 122 ct = part.get_content_type()
123 123 if ct not in ok_types:
124 124 continue
125 125 yield msgfp(part)
126 126
127 127 def headersplit(stream, cur):
128 128 inheader = False
129 129
130 130 for line in stream:
131 131 if not inheader and isheader(line, inheader):
132 132 yield chunk(cur)
133 133 cur = []
134 134 inheader = True
135 135 if inheader and not isheader(line, inheader):
136 136 inheader = False
137 137
138 138 cur.append(line)
139 139
140 140 if cur:
141 141 yield chunk(cur)
142 142
143 143 def remainder(cur):
144 144 yield chunk(cur)
145 145
146 146 class fiter(object):
147 147 def __init__(self, fp):
148 148 self.fp = fp
149 149
150 150 def __iter__(self):
151 151 return self
152 152
153 153 def next(self):
154 154 l = self.fp.readline()
155 155 if not l:
156 156 raise StopIteration
157 157 return l
158 158
159 159 __next__ = next
160 160
161 161 inheader = False
162 162 cur = []
163 163
164 164 mimeheaders = ['content-type']
165 165
166 166 if not util.safehasattr(stream, 'next'):
167 167 # http responses, for example, have readline but not next
168 168 stream = fiter(stream)
169 169
170 170 for line in stream:
171 171 cur.append(line)
172 172 if line.startswith('# HG changeset patch'):
173 173 return hgsplit(stream, cur)
174 174 elif line.startswith('From '):
175 175 return mboxsplit(stream, cur)
176 176 elif isheader(line, inheader):
177 177 inheader = True
178 178 if line.split(':', 1)[0].lower() in mimeheaders:
179 179 # let email parser handle this
180 180 return mimesplit(stream, cur)
181 181 elif line.startswith('--- ') and inheader:
182 182 # No evil headers seen by diff start, split by hand
183 183 return headersplit(stream, cur)
184 184 # Not enough info, keep reading
185 185
186 186 # if we are here, we have a very plain patch
187 187 return remainder(cur)
188 188
189 189 ## Some facility for extensible patch parsing:
190 190 # list of pairs ("header to match", "data key")
191 191 patchheadermap = [('Date', 'date'),
192 192 ('Branch', 'branch'),
193 193 ('Node ID', 'nodeid'),
194 194 ]
195 195
196 196 @contextlib.contextmanager
197 197 def extract(ui, fileobj):
198 198 '''extract patch from data read from fileobj.
199 199
200 200 patch can be a normal patch or contained in an email message.
201 201
202 202 return a dictionary. Standard keys are:
203 203 - filename,
204 204 - message,
205 205 - user,
206 206 - date,
207 207 - branch,
208 208 - node,
209 209 - p1,
210 210 - p2.
211 211 Any item can be missing from the dictionary. If filename is missing,
212 212 fileobj did not contain a patch. Caller must unlink filename when done.'''
213 213
214 214 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
215 215 tmpfp = os.fdopen(fd, r'wb')
216 216 try:
217 217 yield _extract(ui, fileobj, tmpname, tmpfp)
218 218 finally:
219 219 tmpfp.close()
220 220 os.unlink(tmpname)
221 221
222 222 def _extract(ui, fileobj, tmpname, tmpfp):
223 223
224 224 # attempt to detect the start of a patch
225 225 # (this heuristic is borrowed from quilt)
226 226 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
227 227 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
228 228 br'---[ \t].*?^\+\+\+[ \t]|'
229 229 br'\*\*\*[ \t].*?^---[ \t])',
230 230 re.MULTILINE | re.DOTALL)
231 231
232 232 data = {}
233 233
234 234 msg = mail.parse(fileobj)
235 235
236 236 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
237 237 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
238 238 if not subject and not data['user']:
239 239 # Not an email, restore parsed headers if any
240 240 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
241 241 for h in msg.items()) + '\n'
242 242
243 243 # should try to parse msg['Date']
244 244 parents = []
245 245
246 246 if subject:
247 247 if subject.startswith('[PATCH'):
248 248 pend = subject.find(']')
249 249 if pend >= 0:
250 250 subject = subject[pend + 1:].lstrip()
251 251 subject = re.sub(br'\n[ \t]+', ' ', subject)
252 252 ui.debug('Subject: %s\n' % subject)
253 253 if data['user']:
254 254 ui.debug('From: %s\n' % data['user'])
255 255 diffs_seen = 0
256 256 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
257 257 message = ''
258 258 for part in msg.walk():
259 259 content_type = pycompat.bytestr(part.get_content_type())
260 260 ui.debug('Content-Type: %s\n' % content_type)
261 261 if content_type not in ok_types:
262 262 continue
263 263 payload = part.get_payload(decode=True)
264 264 m = diffre.search(payload)
265 265 if m:
266 266 hgpatch = False
267 267 hgpatchheader = False
268 268 ignoretext = False
269 269
270 270 ui.debug('found patch at byte %d\n' % m.start(0))
271 271 diffs_seen += 1
272 272 cfp = stringio()
273 273 for line in payload[:m.start(0)].splitlines():
274 274 if line.startswith('# HG changeset patch') and not hgpatch:
275 275 ui.debug('patch generated by hg export\n')
276 276 hgpatch = True
277 277 hgpatchheader = True
278 278 # drop earlier commit message content
279 279 cfp.seek(0)
280 280 cfp.truncate()
281 281 subject = None
282 282 elif hgpatchheader:
283 283 if line.startswith('# User '):
284 284 data['user'] = line[7:]
285 285 ui.debug('From: %s\n' % data['user'])
286 286 elif line.startswith("# Parent "):
287 287 parents.append(line[9:].lstrip())
288 288 elif line.startswith("# "):
289 289 for header, key in patchheadermap:
290 290 prefix = '# %s ' % header
291 291 if line.startswith(prefix):
292 292 data[key] = line[len(prefix):]
293 293 else:
294 294 hgpatchheader = False
295 295 elif line == '---':
296 296 ignoretext = True
297 297 if not hgpatchheader and not ignoretext:
298 298 cfp.write(line)
299 299 cfp.write('\n')
300 300 message = cfp.getvalue()
301 301 if tmpfp:
302 302 tmpfp.write(payload)
303 303 if not payload.endswith('\n'):
304 304 tmpfp.write('\n')
305 305 elif not diffs_seen and message and content_type == 'text/plain':
306 306 message += '\n' + payload
307 307
308 308 if subject and not message.startswith(subject):
309 309 message = '%s\n%s' % (subject, message)
310 310 data['message'] = message
311 311 tmpfp.close()
312 312 if parents:
313 313 data['p1'] = parents.pop(0)
314 314 if parents:
315 315 data['p2'] = parents.pop(0)
316 316
317 317 if diffs_seen:
318 318 data['filename'] = tmpname
319 319
320 320 return data
321 321
322 322 class patchmeta(object):
323 323 """Patched file metadata
324 324
325 325 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
326 326 or COPY. 'path' is patched file path. 'oldpath' is set to the
327 327 origin file when 'op' is either COPY or RENAME, None otherwise. If
328 328 file mode is changed, 'mode' is a tuple (islink, isexec) where
329 329 'islink' is True if the file is a symlink and 'isexec' is True if
330 330 the file is executable. Otherwise, 'mode' is None.
331 331 """
332 332 def __init__(self, path):
333 333 self.path = path
334 334 self.oldpath = None
335 335 self.mode = None
336 336 self.op = 'MODIFY'
337 337 self.binary = False
338 338
339 339 def setmode(self, mode):
340 340 islink = mode & 0o20000
341 341 isexec = mode & 0o100
342 342 self.mode = (islink, isexec)
343 343
344 344 def copy(self):
345 345 other = patchmeta(self.path)
346 346 other.oldpath = self.oldpath
347 347 other.mode = self.mode
348 348 other.op = self.op
349 349 other.binary = self.binary
350 350 return other
351 351
352 352 def _ispatchinga(self, afile):
353 353 if afile == '/dev/null':
354 354 return self.op == 'ADD'
355 355 return afile == 'a/' + (self.oldpath or self.path)
356 356
357 357 def _ispatchingb(self, bfile):
358 358 if bfile == '/dev/null':
359 359 return self.op == 'DELETE'
360 360 return bfile == 'b/' + self.path
361 361
362 362 def ispatching(self, afile, bfile):
363 363 return self._ispatchinga(afile) and self._ispatchingb(bfile)
364 364
365 365 def __repr__(self):
366 366 return "<patchmeta %s %r>" % (self.op, self.path)
367 367
368 368 def readgitpatch(lr):
369 369 """extract git-style metadata about patches from <patchname>"""
370 370
371 371 # Filter patch for git information
372 372 gp = None
373 373 gitpatches = []
374 374 for line in lr:
375 375 line = line.rstrip(' \r\n')
376 376 if line.startswith('diff --git a/'):
377 377 m = gitre.match(line)
378 378 if m:
379 379 if gp:
380 380 gitpatches.append(gp)
381 381 dst = m.group(2)
382 382 gp = patchmeta(dst)
383 383 elif gp:
384 384 if line.startswith('--- '):
385 385 gitpatches.append(gp)
386 386 gp = None
387 387 continue
388 388 if line.startswith('rename from '):
389 389 gp.op = 'RENAME'
390 390 gp.oldpath = line[12:]
391 391 elif line.startswith('rename to '):
392 392 gp.path = line[10:]
393 393 elif line.startswith('copy from '):
394 394 gp.op = 'COPY'
395 395 gp.oldpath = line[10:]
396 396 elif line.startswith('copy to '):
397 397 gp.path = line[8:]
398 398 elif line.startswith('deleted file'):
399 399 gp.op = 'DELETE'
400 400 elif line.startswith('new file mode '):
401 401 gp.op = 'ADD'
402 402 gp.setmode(int(line[-6:], 8))
403 403 elif line.startswith('new mode '):
404 404 gp.setmode(int(line[-6:], 8))
405 405 elif line.startswith('GIT binary patch'):
406 406 gp.binary = True
407 407 if gp:
408 408 gitpatches.append(gp)
409 409
410 410 return gitpatches
411 411
412 412 class linereader(object):
413 413 # simple class to allow pushing lines back into the input stream
414 414 def __init__(self, fp):
415 415 self.fp = fp
416 416 self.buf = []
417 417
418 418 def push(self, line):
419 419 if line is not None:
420 420 self.buf.append(line)
421 421
422 422 def readline(self):
423 423 if self.buf:
424 424 l = self.buf[0]
425 425 del self.buf[0]
426 426 return l
427 427 return self.fp.readline()
428 428
429 429 def __iter__(self):
430 430 return iter(self.readline, '')
431 431
432 432 class abstractbackend(object):
433 433 def __init__(self, ui):
434 434 self.ui = ui
435 435
436 436 def getfile(self, fname):
437 437 """Return target file data and flags as a (data, (islink,
438 438 isexec)) tuple. Data is None if file is missing/deleted.
439 439 """
440 440 raise NotImplementedError
441 441
442 442 def setfile(self, fname, data, mode, copysource):
443 443 """Write data to target file fname and set its mode. mode is a
444 444 (islink, isexec) tuple. If data is None, the file content should
445 445 be left unchanged. If the file is modified after being copied,
446 446 copysource is set to the original file name.
447 447 """
448 448 raise NotImplementedError
449 449
450 450 def unlink(self, fname):
451 451 """Unlink target file."""
452 452 raise NotImplementedError
453 453
454 454 def writerej(self, fname, failed, total, lines):
455 455 """Write rejected lines for fname. total is the number of hunks
456 456 which failed to apply and total the total number of hunks for this
457 457 files.
458 458 """
459 459
460 460 def exists(self, fname):
461 461 raise NotImplementedError
462 462
463 463 def close(self):
464 464 raise NotImplementedError
465 465
466 466 class fsbackend(abstractbackend):
467 467 def __init__(self, ui, basedir):
468 468 super(fsbackend, self).__init__(ui)
469 469 self.opener = vfsmod.vfs(basedir)
470 470
471 471 def getfile(self, fname):
472 472 if self.opener.islink(fname):
473 473 return (self.opener.readlink(fname), (True, False))
474 474
475 475 isexec = False
476 476 try:
477 477 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
478 478 except OSError as e:
479 479 if e.errno != errno.ENOENT:
480 480 raise
481 481 try:
482 482 return (self.opener.read(fname), (False, isexec))
483 483 except IOError as e:
484 484 if e.errno != errno.ENOENT:
485 485 raise
486 486 return None, None
487 487
488 488 def setfile(self, fname, data, mode, copysource):
489 489 islink, isexec = mode
490 490 if data is None:
491 491 self.opener.setflags(fname, islink, isexec)
492 492 return
493 493 if islink:
494 494 self.opener.symlink(data, fname)
495 495 else:
496 496 self.opener.write(fname, data)
497 497 if isexec:
498 498 self.opener.setflags(fname, False, True)
499 499
500 500 def unlink(self, fname):
501 501 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
502 502 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
503 503
504 504 def writerej(self, fname, failed, total, lines):
505 505 fname = fname + ".rej"
506 506 self.ui.warn(
507 507 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
508 508 (failed, total, fname))
509 509 fp = self.opener(fname, 'w')
510 510 fp.writelines(lines)
511 511 fp.close()
512 512
513 513 def exists(self, fname):
514 514 return self.opener.lexists(fname)
515 515
516 516 class workingbackend(fsbackend):
517 517 def __init__(self, ui, repo, similarity):
518 518 super(workingbackend, self).__init__(ui, repo.root)
519 519 self.repo = repo
520 520 self.similarity = similarity
521 521 self.removed = set()
522 522 self.changed = set()
523 523 self.copied = []
524 524
525 525 def _checkknown(self, fname):
526 526 if self.repo.dirstate[fname] == '?' and self.exists(fname):
527 527 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
528 528
529 529 def setfile(self, fname, data, mode, copysource):
530 530 self._checkknown(fname)
531 531 super(workingbackend, self).setfile(fname, data, mode, copysource)
532 532 if copysource is not None:
533 533 self.copied.append((copysource, fname))
534 534 self.changed.add(fname)
535 535
536 536 def unlink(self, fname):
537 537 self._checkknown(fname)
538 538 super(workingbackend, self).unlink(fname)
539 539 self.removed.add(fname)
540 540 self.changed.add(fname)
541 541
542 542 def close(self):
543 543 wctx = self.repo[None]
544 544 changed = set(self.changed)
545 545 for src, dst in self.copied:
546 546 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
547 547 if self.removed:
548 548 wctx.forget(sorted(self.removed))
549 549 for f in self.removed:
550 550 if f not in self.repo.dirstate:
551 551 # File was deleted and no longer belongs to the
552 552 # dirstate, it was probably marked added then
553 553 # deleted, and should not be considered by
554 554 # marktouched().
555 555 changed.discard(f)
556 556 if changed:
557 557 scmutil.marktouched(self.repo, changed, self.similarity)
558 558 return sorted(self.changed)
559 559
560 560 class filestore(object):
561 561 def __init__(self, maxsize=None):
562 562 self.opener = None
563 563 self.files = {}
564 564 self.created = 0
565 565 self.maxsize = maxsize
566 566 if self.maxsize is None:
567 567 self.maxsize = 4*(2**20)
568 568 self.size = 0
569 569 self.data = {}
570 570
571 571 def setfile(self, fname, data, mode, copied=None):
572 572 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
573 573 self.data[fname] = (data, mode, copied)
574 574 self.size += len(data)
575 575 else:
576 576 if self.opener is None:
577 577 root = pycompat.mkdtemp(prefix='hg-patch-')
578 578 self.opener = vfsmod.vfs(root)
579 579 # Avoid filename issues with these simple names
580 580 fn = '%d' % self.created
581 581 self.opener.write(fn, data)
582 582 self.created += 1
583 583 self.files[fname] = (fn, mode, copied)
584 584
585 585 def getfile(self, fname):
586 586 if fname in self.data:
587 587 return self.data[fname]
588 588 if not self.opener or fname not in self.files:
589 589 return None, None, None
590 590 fn, mode, copied = self.files[fname]
591 591 return self.opener.read(fn), mode, copied
592 592
593 593 def close(self):
594 594 if self.opener:
595 595 shutil.rmtree(self.opener.base)
596 596
597 597 class repobackend(abstractbackend):
598 598 def __init__(self, ui, repo, ctx, store):
599 599 super(repobackend, self).__init__(ui)
600 600 self.repo = repo
601 601 self.ctx = ctx
602 602 self.store = store
603 603 self.changed = set()
604 604 self.removed = set()
605 605 self.copied = {}
606 606
607 607 def _checkknown(self, fname):
608 608 if fname not in self.ctx:
609 609 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
610 610
611 611 def getfile(self, fname):
612 612 try:
613 613 fctx = self.ctx[fname]
614 614 except error.LookupError:
615 615 return None, None
616 616 flags = fctx.flags()
617 617 return fctx.data(), ('l' in flags, 'x' in flags)
618 618
619 619 def setfile(self, fname, data, mode, copysource):
620 620 if copysource:
621 621 self._checkknown(copysource)
622 622 if data is None:
623 623 data = self.ctx[fname].data()
624 624 self.store.setfile(fname, data, mode, copysource)
625 625 self.changed.add(fname)
626 626 if copysource:
627 627 self.copied[fname] = copysource
628 628
629 629 def unlink(self, fname):
630 630 self._checkknown(fname)
631 631 self.removed.add(fname)
632 632
633 633 def exists(self, fname):
634 634 return fname in self.ctx
635 635
636 636 def close(self):
637 637 return self.changed | self.removed
638 638
639 639 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
640 640 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
641 641 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
642 642 eolmodes = ['strict', 'crlf', 'lf', 'auto']
643 643
644 644 class patchfile(object):
645 645 def __init__(self, ui, gp, backend, store, eolmode='strict'):
646 646 self.fname = gp.path
647 647 self.eolmode = eolmode
648 648 self.eol = None
649 649 self.backend = backend
650 650 self.ui = ui
651 651 self.lines = []
652 652 self.exists = False
653 653 self.missing = True
654 654 self.mode = gp.mode
655 655 self.copysource = gp.oldpath
656 656 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
657 657 self.remove = gp.op == 'DELETE'
658 658 if self.copysource is None:
659 659 data, mode = backend.getfile(self.fname)
660 660 else:
661 661 data, mode = store.getfile(self.copysource)[:2]
662 662 if data is not None:
663 663 self.exists = self.copysource is None or backend.exists(self.fname)
664 664 self.missing = False
665 665 if data:
666 666 self.lines = mdiff.splitnewlines(data)
667 667 if self.mode is None:
668 668 self.mode = mode
669 669 if self.lines:
670 670 # Normalize line endings
671 671 if self.lines[0].endswith('\r\n'):
672 672 self.eol = '\r\n'
673 673 elif self.lines[0].endswith('\n'):
674 674 self.eol = '\n'
675 675 if eolmode != 'strict':
676 676 nlines = []
677 677 for l in self.lines:
678 678 if l.endswith('\r\n'):
679 679 l = l[:-2] + '\n'
680 680 nlines.append(l)
681 681 self.lines = nlines
682 682 else:
683 683 if self.create:
684 684 self.missing = False
685 685 if self.mode is None:
686 686 self.mode = (False, False)
687 687 if self.missing:
688 688 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
689 689 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
690 690 "current directory)\n"))
691 691
692 692 self.hash = {}
693 693 self.dirty = 0
694 694 self.offset = 0
695 695 self.skew = 0
696 696 self.rej = []
697 697 self.fileprinted = False
698 698 self.printfile(False)
699 699 self.hunks = 0
700 700
701 701 def writelines(self, fname, lines, mode):
702 702 if self.eolmode == 'auto':
703 703 eol = self.eol
704 704 elif self.eolmode == 'crlf':
705 705 eol = '\r\n'
706 706 else:
707 707 eol = '\n'
708 708
709 709 if self.eolmode != 'strict' and eol and eol != '\n':
710 710 rawlines = []
711 711 for l in lines:
712 712 if l and l.endswith('\n'):
713 713 l = l[:-1] + eol
714 714 rawlines.append(l)
715 715 lines = rawlines
716 716
717 717 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
718 718
719 719 def printfile(self, warn):
720 720 if self.fileprinted:
721 721 return
722 722 if warn or self.ui.verbose:
723 723 self.fileprinted = True
724 724 s = _("patching file %s\n") % self.fname
725 725 if warn:
726 726 self.ui.warn(s)
727 727 else:
728 728 self.ui.note(s)
729 729
730 730
731 731 def findlines(self, l, linenum):
732 732 # looks through the hash and finds candidate lines. The
733 733 # result is a list of line numbers sorted based on distance
734 734 # from linenum
735 735
736 736 cand = self.hash.get(l, [])
737 737 if len(cand) > 1:
738 738 # resort our list of potentials forward then back.
739 739 cand.sort(key=lambda x: abs(x - linenum))
740 740 return cand
741 741
742 742 def write_rej(self):
743 743 # our rejects are a little different from patch(1). This always
744 744 # creates rejects in the same form as the original patch. A file
745 745 # header is inserted so that you can run the reject through patch again
746 746 # without having to type the filename.
747 747 if not self.rej:
748 748 return
749 749 base = os.path.basename(self.fname)
750 750 lines = ["--- %s\n+++ %s\n" % (base, base)]
751 751 for x in self.rej:
752 752 for l in x.hunk:
753 753 lines.append(l)
754 754 if l[-1:] != '\n':
755 755 lines.append("\n\ No newline at end of file\n")
756 756 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
757 757
758 758 def apply(self, h):
759 759 if not h.complete():
760 760 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
761 761 (h.number, h.desc, len(h.a), h.lena, len(h.b),
762 762 h.lenb))
763 763
764 764 self.hunks += 1
765 765
766 766 if self.missing:
767 767 self.rej.append(h)
768 768 return -1
769 769
770 770 if self.exists and self.create:
771 771 if self.copysource:
772 772 self.ui.warn(_("cannot create %s: destination already "
773 773 "exists\n") % self.fname)
774 774 else:
775 775 self.ui.warn(_("file %s already exists\n") % self.fname)
776 776 self.rej.append(h)
777 777 return -1
778 778
779 779 if isinstance(h, binhunk):
780 780 if self.remove:
781 781 self.backend.unlink(self.fname)
782 782 else:
783 783 l = h.new(self.lines)
784 784 self.lines[:] = l
785 785 self.offset += len(l)
786 786 self.dirty = True
787 787 return 0
788 788
789 789 horig = h
790 790 if (self.eolmode in ('crlf', 'lf')
791 791 or self.eolmode == 'auto' and self.eol):
792 792 # If new eols are going to be normalized, then normalize
793 793 # hunk data before patching. Otherwise, preserve input
794 794 # line-endings.
795 795 h = h.getnormalized()
796 796
797 797 # fast case first, no offsets, no fuzz
798 798 old, oldstart, new, newstart = h.fuzzit(0, False)
799 799 oldstart += self.offset
800 800 orig_start = oldstart
801 801 # if there's skew we want to emit the "(offset %d lines)" even
802 802 # when the hunk cleanly applies at start + skew, so skip the
803 803 # fast case code
804 804 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
805 805 if self.remove:
806 806 self.backend.unlink(self.fname)
807 807 else:
808 808 self.lines[oldstart:oldstart + len(old)] = new
809 809 self.offset += len(new) - len(old)
810 810 self.dirty = True
811 811 return 0
812 812
813 813 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
814 814 self.hash = {}
815 815 for x, s in enumerate(self.lines):
816 816 self.hash.setdefault(s, []).append(x)
817 817
818 818 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
819 819 for toponly in [True, False]:
820 820 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
821 821 oldstart = oldstart + self.offset + self.skew
822 822 oldstart = min(oldstart, len(self.lines))
823 823 if old:
824 824 cand = self.findlines(old[0][1:], oldstart)
825 825 else:
826 826 # Only adding lines with no or fuzzed context, just
827 827 # take the skew in account
828 828 cand = [oldstart]
829 829
830 830 for l in cand:
831 831 if not old or diffhelper.testhunk(old, self.lines, l):
832 832 self.lines[l : l + len(old)] = new
833 833 self.offset += len(new) - len(old)
834 834 self.skew = l - orig_start
835 835 self.dirty = True
836 836 offset = l - orig_start - fuzzlen
837 837 if fuzzlen:
838 838 msg = _("Hunk #%d succeeded at %d "
839 839 "with fuzz %d "
840 840 "(offset %d lines).\n")
841 841 self.printfile(True)
842 842 self.ui.warn(msg %
843 843 (h.number, l + 1, fuzzlen, offset))
844 844 else:
845 845 msg = _("Hunk #%d succeeded at %d "
846 846 "(offset %d lines).\n")
847 847 self.ui.note(msg % (h.number, l + 1, offset))
848 848 return fuzzlen
849 849 self.printfile(True)
850 850 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
851 851 self.rej.append(horig)
852 852 return -1
853 853
854 854 def close(self):
855 855 if self.dirty:
856 856 self.writelines(self.fname, self.lines, self.mode)
857 857 self.write_rej()
858 858 return len(self.rej)
859 859
860 860 class header(object):
861 861 """patch header
862 862 """
863 863 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
864 864 diff_re = re.compile('diff -r .* (.*)$')
865 865 allhunks_re = re.compile('(?:index|deleted file) ')
866 866 pretty_re = re.compile('(?:new file|deleted file) ')
867 867 special_re = re.compile('(?:index|deleted|copy|rename) ')
868 868 newfile_re = re.compile('(?:new file)')
869 869
870 870 def __init__(self, header):
871 871 self.header = header
872 872 self.hunks = []
873 873
874 874 def binary(self):
875 875 return any(h.startswith('index ') for h in self.header)
876 876
877 877 def pretty(self, fp):
878 878 for h in self.header:
879 879 if h.startswith('index '):
880 880 fp.write(_('this modifies a binary file (all or nothing)\n'))
881 881 break
882 882 if self.pretty_re.match(h):
883 883 fp.write(h)
884 884 if self.binary():
885 885 fp.write(_('this is a binary file\n'))
886 886 break
887 887 if h.startswith('---'):
888 888 fp.write(_('%d hunks, %d lines changed\n') %
889 889 (len(self.hunks),
890 890 sum([max(h.added, h.removed) for h in self.hunks])))
891 891 break
892 892 fp.write(h)
893 893
894 894 def write(self, fp):
895 895 fp.write(''.join(self.header))
896 896
897 897 def allhunks(self):
898 898 return any(self.allhunks_re.match(h) for h in self.header)
899 899
900 900 def files(self):
901 901 match = self.diffgit_re.match(self.header[0])
902 902 if match:
903 903 fromfile, tofile = match.groups()
904 904 if fromfile == tofile:
905 905 return [fromfile]
906 906 return [fromfile, tofile]
907 907 else:
908 908 return self.diff_re.match(self.header[0]).groups()
909 909
910 910 def filename(self):
911 911 return self.files()[-1]
912 912
913 913 def __repr__(self):
914 914 return '<header %s>' % (' '.join(map(repr, self.files())))
915 915
916 916 def isnewfile(self):
917 917 return any(self.newfile_re.match(h) for h in self.header)
918 918
919 919 def special(self):
920 920 # Special files are shown only at the header level and not at the hunk
921 921 # level for example a file that has been deleted is a special file.
922 922 # The user cannot change the content of the operation, in the case of
923 923 # the deleted file he has to take the deletion or not take it, he
924 924 # cannot take some of it.
925 925 # Newly added files are special if they are empty, they are not special
926 926 # if they have some content as we want to be able to change it
927 927 nocontent = len(self.header) == 2
928 928 emptynewfile = self.isnewfile() and nocontent
929 929 return emptynewfile or \
930 930 any(self.special_re.match(h) for h in self.header)
931 931
932 932 class recordhunk(object):
933 933 """patch hunk
934 934
935 935 XXX shouldn't we merge this with the other hunk class?
936 936 """
937 937
938 938 def __init__(self, header, fromline, toline, proc, before, hunk, after,
939 939 maxcontext=None):
940 940 def trimcontext(lines, reverse=False):
941 941 if maxcontext is not None:
942 942 delta = len(lines) - maxcontext
943 943 if delta > 0:
944 944 if reverse:
945 945 return delta, lines[delta:]
946 946 else:
947 947 return delta, lines[:maxcontext]
948 948 return 0, lines
949 949
950 950 self.header = header
951 951 trimedbefore, self.before = trimcontext(before, True)
952 952 self.fromline = fromline + trimedbefore
953 953 self.toline = toline + trimedbefore
954 954 _trimedafter, self.after = trimcontext(after, False)
955 955 self.proc = proc
956 956 self.hunk = hunk
957 957 self.added, self.removed = self.countchanges(self.hunk)
958 958
959 959 def __eq__(self, v):
960 960 if not isinstance(v, recordhunk):
961 961 return False
962 962
963 963 return ((v.hunk == self.hunk) and
964 964 (v.proc == self.proc) and
965 965 (self.fromline == v.fromline) and
966 966 (self.header.files() == v.header.files()))
967 967
968 968 def __hash__(self):
969 969 return hash((tuple(self.hunk),
970 970 tuple(self.header.files()),
971 971 self.fromline,
972 972 self.proc))
973 973
974 974 def countchanges(self, hunk):
975 975 """hunk -> (n+,n-)"""
976 976 add = len([h for h in hunk if h.startswith('+')])
977 977 rem = len([h for h in hunk if h.startswith('-')])
978 978 return add, rem
979 979
980 980 def reversehunk(self):
981 981 """return another recordhunk which is the reverse of the hunk
982 982
983 983 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
984 984 that, swap fromline/toline and +/- signs while keep other things
985 985 unchanged.
986 986 """
987 987 m = {'+': '-', '-': '+', '\\': '\\'}
988 988 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
989 989 return recordhunk(self.header, self.toline, self.fromline, self.proc,
990 990 self.before, hunk, self.after)
991 991
992 992 def write(self, fp):
993 993 delta = len(self.before) + len(self.after)
994 994 if self.after and self.after[-1] == '\\ No newline at end of file\n':
995 995 delta -= 1
996 996 fromlen = delta + self.removed
997 997 tolen = delta + self.added
998 998 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
999 999 (self.fromline, fromlen, self.toline, tolen,
1000 1000 self.proc and (' ' + self.proc)))
1001 1001 fp.write(''.join(self.before + self.hunk + self.after))
1002 1002
1003 1003 pretty = write
1004 1004
1005 1005 def filename(self):
1006 1006 return self.header.filename()
1007 1007
1008 1008 def __repr__(self):
1009 1009 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1010 1010
1011 1011 def getmessages():
1012 1012 return {
1013 1013 'multiple': {
1014 1014 'apply': _("apply change %d/%d to '%s'?"),
1015 1015 'discard': _("discard change %d/%d to '%s'?"),
1016 1016 'record': _("record change %d/%d to '%s'?"),
1017 1017 },
1018 1018 'single': {
1019 1019 'apply': _("apply this change to '%s'?"),
1020 1020 'discard': _("discard this change to '%s'?"),
1021 1021 'record': _("record this change to '%s'?"),
1022 1022 },
1023 1023 'help': {
1024 1024 'apply': _('[Ynesfdaq?]'
1025 1025 '$$ &Yes, apply this change'
1026 1026 '$$ &No, skip this change'
1027 1027 '$$ &Edit this change manually'
1028 1028 '$$ &Skip remaining changes to this file'
1029 1029 '$$ Apply remaining changes to this &file'
1030 1030 '$$ &Done, skip remaining changes and files'
1031 1031 '$$ Apply &all changes to all remaining files'
1032 1032 '$$ &Quit, applying no changes'
1033 1033 '$$ &? (display help)'),
1034 1034 'discard': _('[Ynesfdaq?]'
1035 1035 '$$ &Yes, discard this change'
1036 1036 '$$ &No, skip this change'
1037 1037 '$$ &Edit this change manually'
1038 1038 '$$ &Skip remaining changes to this file'
1039 1039 '$$ Discard remaining changes to this &file'
1040 1040 '$$ &Done, skip remaining changes and files'
1041 1041 '$$ Discard &all changes to all remaining files'
1042 1042 '$$ &Quit, discarding no changes'
1043 1043 '$$ &? (display help)'),
1044 1044 'record': _('[Ynesfdaq?]'
1045 1045 '$$ &Yes, record this change'
1046 1046 '$$ &No, skip this change'
1047 1047 '$$ &Edit this change manually'
1048 1048 '$$ &Skip remaining changes to this file'
1049 1049 '$$ Record remaining changes to this &file'
1050 1050 '$$ &Done, skip remaining changes and files'
1051 1051 '$$ Record &all changes to all remaining files'
1052 1052 '$$ &Quit, recording no changes'
1053 1053 '$$ &? (display help)'),
1054 1054 }
1055 1055 }
1056 1056
1057 1057 def filterpatch(ui, headers, operation=None):
1058 1058 """Interactively filter patch chunks into applied-only chunks"""
1059 1059 messages = getmessages()
1060 1060
1061 1061 if operation is None:
1062 1062 operation = 'record'
1063 1063
1064 1064 def prompt(skipfile, skipall, query, chunk):
1065 1065 """prompt query, and process base inputs
1066 1066
1067 1067 - y/n for the rest of file
1068 1068 - y/n for the rest
1069 1069 - ? (help)
1070 1070 - q (quit)
1071 1071
1072 1072 Return True/False and possibly updated skipfile and skipall.
1073 1073 """
1074 1074 newpatches = None
1075 1075 if skipall is not None:
1076 1076 return skipall, skipfile, skipall, newpatches
1077 1077 if skipfile is not None:
1078 1078 return skipfile, skipfile, skipall, newpatches
1079 1079 while True:
1080 1080 resps = messages['help'][operation]
1081 1081 r = ui.promptchoice("%s %s" % (query, resps))
1082 1082 ui.write("\n")
1083 1083 if r == 8: # ?
1084 1084 for c, t in ui.extractchoices(resps)[1]:
1085 1085 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1086 1086 continue
1087 1087 elif r == 0: # yes
1088 1088 ret = True
1089 1089 elif r == 1: # no
1090 1090 ret = False
1091 1091 elif r == 2: # Edit patch
1092 1092 if chunk is None:
1093 1093 ui.write(_('cannot edit patch for whole file'))
1094 1094 ui.write("\n")
1095 1095 continue
1096 1096 if chunk.header.binary():
1097 1097 ui.write(_('cannot edit patch for binary file'))
1098 1098 ui.write("\n")
1099 1099 continue
1100 1100 # Patch comment based on the Git one (based on comment at end of
1101 1101 # https://mercurial-scm.org/wiki/RecordExtension)
1102 1102 phelp = '---' + _("""
1103 1103 To remove '-' lines, make them ' ' lines (context).
1104 1104 To remove '+' lines, delete them.
1105 1105 Lines starting with # will be removed from the patch.
1106 1106
1107 1107 If the patch applies cleanly, the edited hunk will immediately be
1108 1108 added to the record list. If it does not apply cleanly, a rejects
1109 1109 file will be generated: you can use that when you try again. If
1110 1110 all lines of the hunk are removed, then the edit is aborted and
1111 1111 the hunk is left unchanged.
1112 1112 """)
1113 1113 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1114 1114 suffix=".diff")
1115 1115 ncpatchfp = None
1116 1116 try:
1117 1117 # Write the initial patch
1118 1118 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1119 1119 chunk.header.write(f)
1120 1120 chunk.write(f)
1121 1121 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1122 1122 f.close()
1123 1123 # Start the editor and wait for it to complete
1124 1124 editor = ui.geteditor()
1125 1125 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1126 1126 environ={'HGUSER': ui.username()},
1127 1127 blockedtag='filterpatch')
1128 1128 if ret != 0:
1129 1129 ui.warn(_("editor exited with exit code %d\n") % ret)
1130 1130 continue
1131 1131 # Remove comment lines
1132 1132 patchfp = open(patchfn, r'rb')
1133 1133 ncpatchfp = stringio()
1134 1134 for line in util.iterfile(patchfp):
1135 1135 line = util.fromnativeeol(line)
1136 1136 if not line.startswith('#'):
1137 1137 ncpatchfp.write(line)
1138 1138 patchfp.close()
1139 1139 ncpatchfp.seek(0)
1140 1140 newpatches = parsepatch(ncpatchfp)
1141 1141 finally:
1142 1142 os.unlink(patchfn)
1143 1143 del ncpatchfp
1144 1144 # Signal that the chunk shouldn't be applied as-is, but
1145 1145 # provide the new patch to be used instead.
1146 1146 ret = False
1147 1147 elif r == 3: # Skip
1148 1148 ret = skipfile = False
1149 1149 elif r == 4: # file (Record remaining)
1150 1150 ret = skipfile = True
1151 1151 elif r == 5: # done, skip remaining
1152 1152 ret = skipall = False
1153 1153 elif r == 6: # all
1154 1154 ret = skipall = True
1155 1155 elif r == 7: # quit
1156 1156 raise error.Abort(_('user quit'))
1157 1157 return ret, skipfile, skipall, newpatches
1158 1158
1159 1159 seen = set()
1160 1160 applied = {} # 'filename' -> [] of chunks
1161 1161 skipfile, skipall = None, None
1162 1162 pos, total = 1, sum(len(h.hunks) for h in headers)
1163 1163 for h in headers:
1164 1164 pos += len(h.hunks)
1165 1165 skipfile = None
1166 1166 fixoffset = 0
1167 1167 hdr = ''.join(h.header)
1168 1168 if hdr in seen:
1169 1169 continue
1170 1170 seen.add(hdr)
1171 1171 if skipall is None:
1172 1172 h.pretty(ui)
1173 1173 msg = (_('examine changes to %s?') %
1174 1174 _(' and ').join("'%s'" % f for f in h.files()))
1175 1175 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1176 1176 if not r:
1177 1177 continue
1178 1178 applied[h.filename()] = [h]
1179 1179 if h.allhunks():
1180 1180 applied[h.filename()] += h.hunks
1181 1181 continue
1182 1182 for i, chunk in enumerate(h.hunks):
1183 1183 if skipfile is None and skipall is None:
1184 1184 chunk.pretty(ui)
1185 1185 if total == 1:
1186 1186 msg = messages['single'][operation] % chunk.filename()
1187 1187 else:
1188 1188 idx = pos - len(h.hunks) + i
1189 1189 msg = messages['multiple'][operation] % (idx, total,
1190 1190 chunk.filename())
1191 1191 r, skipfile, skipall, newpatches = prompt(skipfile,
1192 1192 skipall, msg, chunk)
1193 1193 if r:
1194 1194 if fixoffset:
1195 1195 chunk = copy.copy(chunk)
1196 1196 chunk.toline += fixoffset
1197 1197 applied[chunk.filename()].append(chunk)
1198 1198 elif newpatches is not None:
1199 1199 for newpatch in newpatches:
1200 1200 for newhunk in newpatch.hunks:
1201 1201 if fixoffset:
1202 1202 newhunk.toline += fixoffset
1203 1203 applied[newhunk.filename()].append(newhunk)
1204 1204 else:
1205 1205 fixoffset += chunk.removed - chunk.added
1206 1206 return (sum([h for h in applied.itervalues()
1207 1207 if h[0].special() or len(h) > 1], []), {})
1208 1208 class hunk(object):
1209 1209 def __init__(self, desc, num, lr, context):
1210 1210 self.number = num
1211 1211 self.desc = desc
1212 1212 self.hunk = [desc]
1213 1213 self.a = []
1214 1214 self.b = []
1215 1215 self.starta = self.lena = None
1216 1216 self.startb = self.lenb = None
1217 1217 if lr is not None:
1218 1218 if context:
1219 1219 self.read_context_hunk(lr)
1220 1220 else:
1221 1221 self.read_unified_hunk(lr)
1222 1222
1223 1223 def getnormalized(self):
1224 1224 """Return a copy with line endings normalized to LF."""
1225 1225
1226 1226 def normalize(lines):
1227 1227 nlines = []
1228 1228 for line in lines:
1229 1229 if line.endswith('\r\n'):
1230 1230 line = line[:-2] + '\n'
1231 1231 nlines.append(line)
1232 1232 return nlines
1233 1233
1234 1234 # Dummy object, it is rebuilt manually
1235 1235 nh = hunk(self.desc, self.number, None, None)
1236 1236 nh.number = self.number
1237 1237 nh.desc = self.desc
1238 1238 nh.hunk = self.hunk
1239 1239 nh.a = normalize(self.a)
1240 1240 nh.b = normalize(self.b)
1241 1241 nh.starta = self.starta
1242 1242 nh.startb = self.startb
1243 1243 nh.lena = self.lena
1244 1244 nh.lenb = self.lenb
1245 1245 return nh
1246 1246
1247 1247 def read_unified_hunk(self, lr):
1248 1248 m = unidesc.match(self.desc)
1249 1249 if not m:
1250 1250 raise PatchError(_("bad hunk #%d") % self.number)
1251 1251 self.starta, self.lena, self.startb, self.lenb = m.groups()
1252 1252 if self.lena is None:
1253 1253 self.lena = 1
1254 1254 else:
1255 1255 self.lena = int(self.lena)
1256 1256 if self.lenb is None:
1257 1257 self.lenb = 1
1258 1258 else:
1259 1259 self.lenb = int(self.lenb)
1260 1260 self.starta = int(self.starta)
1261 1261 self.startb = int(self.startb)
1262 1262 try:
1263 1263 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1264 1264 self.a, self.b)
1265 1265 except error.ParseError as e:
1266 1266 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1267 1267 # if we hit eof before finishing out the hunk, the last line will
1268 1268 # be zero length. Lets try to fix it up.
1269 1269 while len(self.hunk[-1]) == 0:
1270 1270 del self.hunk[-1]
1271 1271 del self.a[-1]
1272 1272 del self.b[-1]
1273 1273 self.lena -= 1
1274 1274 self.lenb -= 1
1275 1275 self._fixnewline(lr)
1276 1276
1277 1277 def read_context_hunk(self, lr):
1278 1278 self.desc = lr.readline()
1279 1279 m = contextdesc.match(self.desc)
1280 1280 if not m:
1281 1281 raise PatchError(_("bad hunk #%d") % self.number)
1282 1282 self.starta, aend = m.groups()
1283 1283 self.starta = int(self.starta)
1284 1284 if aend is None:
1285 1285 aend = self.starta
1286 1286 self.lena = int(aend) - self.starta
1287 1287 if self.starta:
1288 1288 self.lena += 1
1289 1289 for x in xrange(self.lena):
1290 1290 l = lr.readline()
1291 1291 if l.startswith('---'):
1292 1292 # lines addition, old block is empty
1293 1293 lr.push(l)
1294 1294 break
1295 1295 s = l[2:]
1296 1296 if l.startswith('- ') or l.startswith('! '):
1297 1297 u = '-' + s
1298 1298 elif l.startswith(' '):
1299 1299 u = ' ' + s
1300 1300 else:
1301 1301 raise PatchError(_("bad hunk #%d old text line %d") %
1302 1302 (self.number, x))
1303 1303 self.a.append(u)
1304 1304 self.hunk.append(u)
1305 1305
1306 1306 l = lr.readline()
1307 1307 if l.startswith('\ '):
1308 1308 s = self.a[-1][:-1]
1309 1309 self.a[-1] = s
1310 1310 self.hunk[-1] = s
1311 1311 l = lr.readline()
1312 1312 m = contextdesc.match(l)
1313 1313 if not m:
1314 1314 raise PatchError(_("bad hunk #%d") % self.number)
1315 1315 self.startb, bend = m.groups()
1316 1316 self.startb = int(self.startb)
1317 1317 if bend is None:
1318 1318 bend = self.startb
1319 1319 self.lenb = int(bend) - self.startb
1320 1320 if self.startb:
1321 1321 self.lenb += 1
1322 1322 hunki = 1
1323 1323 for x in xrange(self.lenb):
1324 1324 l = lr.readline()
1325 1325 if l.startswith('\ '):
1326 1326 # XXX: the only way to hit this is with an invalid line range.
1327 1327 # The no-eol marker is not counted in the line range, but I
1328 1328 # guess there are diff(1) out there which behave differently.
1329 1329 s = self.b[-1][:-1]
1330 1330 self.b[-1] = s
1331 1331 self.hunk[hunki - 1] = s
1332 1332 continue
1333 1333 if not l:
1334 1334 # line deletions, new block is empty and we hit EOF
1335 1335 lr.push(l)
1336 1336 break
1337 1337 s = l[2:]
1338 1338 if l.startswith('+ ') or l.startswith('! '):
1339 1339 u = '+' + s
1340 1340 elif l.startswith(' '):
1341 1341 u = ' ' + s
1342 1342 elif len(self.b) == 0:
1343 1343 # line deletions, new block is empty
1344 1344 lr.push(l)
1345 1345 break
1346 1346 else:
1347 1347 raise PatchError(_("bad hunk #%d old text line %d") %
1348 1348 (self.number, x))
1349 1349 self.b.append(s)
1350 1350 while True:
1351 1351 if hunki >= len(self.hunk):
1352 1352 h = ""
1353 1353 else:
1354 1354 h = self.hunk[hunki]
1355 1355 hunki += 1
1356 1356 if h == u:
1357 1357 break
1358 1358 elif h.startswith('-'):
1359 1359 continue
1360 1360 else:
1361 1361 self.hunk.insert(hunki - 1, u)
1362 1362 break
1363 1363
1364 1364 if not self.a:
1365 1365 # this happens when lines were only added to the hunk
1366 1366 for x in self.hunk:
1367 1367 if x.startswith('-') or x.startswith(' '):
1368 1368 self.a.append(x)
1369 1369 if not self.b:
1370 1370 # this happens when lines were only deleted from the hunk
1371 1371 for x in self.hunk:
1372 1372 if x.startswith('+') or x.startswith(' '):
1373 1373 self.b.append(x[1:])
1374 1374 # @@ -start,len +start,len @@
1375 1375 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1376 1376 self.startb, self.lenb)
1377 1377 self.hunk[0] = self.desc
1378 1378 self._fixnewline(lr)
1379 1379
1380 1380 def _fixnewline(self, lr):
1381 1381 l = lr.readline()
1382 1382 if l.startswith('\ '):
1383 1383 diffhelper.fixnewline(self.hunk, self.a, self.b)
1384 1384 else:
1385 1385 lr.push(l)
1386 1386
1387 1387 def complete(self):
1388 1388 return len(self.a) == self.lena and len(self.b) == self.lenb
1389 1389
1390 1390 def _fuzzit(self, old, new, fuzz, toponly):
1391 1391 # this removes context lines from the top and bottom of list 'l'. It
1392 1392 # checks the hunk to make sure only context lines are removed, and then
1393 1393 # returns a new shortened list of lines.
1394 1394 fuzz = min(fuzz, len(old))
1395 1395 if fuzz:
1396 1396 top = 0
1397 1397 bot = 0
1398 1398 hlen = len(self.hunk)
1399 1399 for x in xrange(hlen - 1):
1400 1400 # the hunk starts with the @@ line, so use x+1
1401 1401 if self.hunk[x + 1].startswith(' '):
1402 1402 top += 1
1403 1403 else:
1404 1404 break
1405 1405 if not toponly:
1406 1406 for x in xrange(hlen - 1):
1407 1407 if self.hunk[hlen - bot - 1].startswith(' '):
1408 1408 bot += 1
1409 1409 else:
1410 1410 break
1411 1411
1412 1412 bot = min(fuzz, bot)
1413 1413 top = min(fuzz, top)
1414 1414 return old[top:len(old) - bot], new[top:len(new) - bot], top
1415 1415 return old, new, 0
1416 1416
1417 1417 def fuzzit(self, fuzz, toponly):
1418 1418 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1419 1419 oldstart = self.starta + top
1420 1420 newstart = self.startb + top
1421 1421 # zero length hunk ranges already have their start decremented
1422 1422 if self.lena and oldstart > 0:
1423 1423 oldstart -= 1
1424 1424 if self.lenb and newstart > 0:
1425 1425 newstart -= 1
1426 1426 return old, oldstart, new, newstart
1427 1427
1428 1428 class binhunk(object):
1429 1429 'A binary patch file.'
1430 1430 def __init__(self, lr, fname):
1431 1431 self.text = None
1432 1432 self.delta = False
1433 1433 self.hunk = ['GIT binary patch\n']
1434 1434 self._fname = fname
1435 1435 self._read(lr)
1436 1436
1437 1437 def complete(self):
1438 1438 return self.text is not None
1439 1439
1440 1440 def new(self, lines):
1441 1441 if self.delta:
1442 1442 return [applybindelta(self.text, ''.join(lines))]
1443 1443 return [self.text]
1444 1444
1445 1445 def _read(self, lr):
1446 1446 def getline(lr, hunk):
1447 1447 l = lr.readline()
1448 1448 hunk.append(l)
1449 1449 return l.rstrip('\r\n')
1450 1450
1451 1451 size = 0
1452 1452 while True:
1453 1453 line = getline(lr, self.hunk)
1454 1454 if not line:
1455 1455 raise PatchError(_('could not extract "%s" binary data')
1456 1456 % self._fname)
1457 1457 if line.startswith('literal '):
1458 1458 size = int(line[8:].rstrip())
1459 1459 break
1460 1460 if line.startswith('delta '):
1461 1461 size = int(line[6:].rstrip())
1462 1462 self.delta = True
1463 1463 break
1464 1464 dec = []
1465 1465 line = getline(lr, self.hunk)
1466 1466 while len(line) > 1:
1467 1467 l = line[0:1]
1468 1468 if l <= 'Z' and l >= 'A':
1469 1469 l = ord(l) - ord('A') + 1
1470 1470 else:
1471 1471 l = ord(l) - ord('a') + 27
1472 1472 try:
1473 1473 dec.append(util.b85decode(line[1:])[:l])
1474 1474 except ValueError as e:
1475 1475 raise PatchError(_('could not decode "%s" binary patch: %s')
1476 1476 % (self._fname, stringutil.forcebytestr(e)))
1477 1477 line = getline(lr, self.hunk)
1478 1478 text = zlib.decompress(''.join(dec))
1479 1479 if len(text) != size:
1480 1480 raise PatchError(_('"%s" length is %d bytes, should be %d')
1481 1481 % (self._fname, len(text), size))
1482 1482 self.text = text
1483 1483
1484 1484 def parsefilename(str):
1485 1485 # --- filename \t|space stuff
1486 1486 s = str[4:].rstrip('\r\n')
1487 1487 i = s.find('\t')
1488 1488 if i < 0:
1489 1489 i = s.find(' ')
1490 1490 if i < 0:
1491 1491 return s
1492 1492 return s[:i]
1493 1493
1494 1494 def reversehunks(hunks):
1495 1495 '''reverse the signs in the hunks given as argument
1496 1496
1497 1497 This function operates on hunks coming out of patch.filterpatch, that is
1498 1498 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1499 1499
1500 1500 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1501 1501 ... --- a/folder1/g
1502 1502 ... +++ b/folder1/g
1503 1503 ... @@ -1,7 +1,7 @@
1504 1504 ... +firstline
1505 1505 ... c
1506 1506 ... 1
1507 1507 ... 2
1508 1508 ... + 3
1509 1509 ... -4
1510 1510 ... 5
1511 1511 ... d
1512 1512 ... +lastline"""
1513 1513 >>> hunks = parsepatch([rawpatch])
1514 1514 >>> hunkscomingfromfilterpatch = []
1515 1515 >>> for h in hunks:
1516 1516 ... hunkscomingfromfilterpatch.append(h)
1517 1517 ... hunkscomingfromfilterpatch.extend(h.hunks)
1518 1518
1519 1519 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1520 1520 >>> from . import util
1521 1521 >>> fp = util.stringio()
1522 1522 >>> for c in reversedhunks:
1523 1523 ... c.write(fp)
1524 1524 >>> fp.seek(0) or None
1525 1525 >>> reversedpatch = fp.read()
1526 1526 >>> print(pycompat.sysstr(reversedpatch))
1527 1527 diff --git a/folder1/g b/folder1/g
1528 1528 --- a/folder1/g
1529 1529 +++ b/folder1/g
1530 1530 @@ -1,4 +1,3 @@
1531 1531 -firstline
1532 1532 c
1533 1533 1
1534 1534 2
1535 1535 @@ -2,6 +1,6 @@
1536 1536 c
1537 1537 1
1538 1538 2
1539 1539 - 3
1540 1540 +4
1541 1541 5
1542 1542 d
1543 1543 @@ -6,3 +5,2 @@
1544 1544 5
1545 1545 d
1546 1546 -lastline
1547 1547
1548 1548 '''
1549 1549
1550 1550 newhunks = []
1551 1551 for c in hunks:
1552 1552 if util.safehasattr(c, 'reversehunk'):
1553 1553 c = c.reversehunk()
1554 1554 newhunks.append(c)
1555 1555 return newhunks
1556 1556
1557 1557 def parsepatch(originalchunks, maxcontext=None):
1558 1558 """patch -> [] of headers -> [] of hunks
1559 1559
1560 1560 If maxcontext is not None, trim context lines if necessary.
1561 1561
1562 1562 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1563 1563 ... --- a/folder1/g
1564 1564 ... +++ b/folder1/g
1565 1565 ... @@ -1,8 +1,10 @@
1566 1566 ... 1
1567 1567 ... 2
1568 1568 ... -3
1569 1569 ... 4
1570 1570 ... 5
1571 1571 ... 6
1572 1572 ... +6.1
1573 1573 ... +6.2
1574 1574 ... 7
1575 1575 ... 8
1576 1576 ... +9'''
1577 1577 >>> out = util.stringio()
1578 1578 >>> headers = parsepatch([rawpatch], maxcontext=1)
1579 1579 >>> for header in headers:
1580 1580 ... header.write(out)
1581 1581 ... for hunk in header.hunks:
1582 1582 ... hunk.write(out)
1583 1583 >>> print(pycompat.sysstr(out.getvalue()))
1584 1584 diff --git a/folder1/g b/folder1/g
1585 1585 --- a/folder1/g
1586 1586 +++ b/folder1/g
1587 1587 @@ -2,3 +2,2 @@
1588 1588 2
1589 1589 -3
1590 1590 4
1591 1591 @@ -6,2 +5,4 @@
1592 1592 6
1593 1593 +6.1
1594 1594 +6.2
1595 1595 7
1596 1596 @@ -8,1 +9,2 @@
1597 1597 8
1598 1598 +9
1599 1599 """
1600 1600 class parser(object):
1601 1601 """patch parsing state machine"""
1602 1602 def __init__(self):
1603 1603 self.fromline = 0
1604 1604 self.toline = 0
1605 1605 self.proc = ''
1606 1606 self.header = None
1607 1607 self.context = []
1608 1608 self.before = []
1609 1609 self.hunk = []
1610 1610 self.headers = []
1611 1611
1612 1612 def addrange(self, limits):
1613 1613 fromstart, fromend, tostart, toend, proc = limits
1614 1614 self.fromline = int(fromstart)
1615 1615 self.toline = int(tostart)
1616 1616 self.proc = proc
1617 1617
1618 1618 def addcontext(self, context):
1619 1619 if self.hunk:
1620 1620 h = recordhunk(self.header, self.fromline, self.toline,
1621 1621 self.proc, self.before, self.hunk, context, maxcontext)
1622 1622 self.header.hunks.append(h)
1623 1623 self.fromline += len(self.before) + h.removed
1624 1624 self.toline += len(self.before) + h.added
1625 1625 self.before = []
1626 1626 self.hunk = []
1627 1627 self.context = context
1628 1628
1629 1629 def addhunk(self, hunk):
1630 1630 if self.context:
1631 1631 self.before = self.context
1632 1632 self.context = []
1633 1633 self.hunk = hunk
1634 1634
1635 1635 def newfile(self, hdr):
1636 1636 self.addcontext([])
1637 1637 h = header(hdr)
1638 1638 self.headers.append(h)
1639 1639 self.header = h
1640 1640
1641 1641 def addother(self, line):
1642 1642 pass # 'other' lines are ignored
1643 1643
1644 1644 def finished(self):
1645 1645 self.addcontext([])
1646 1646 return self.headers
1647 1647
1648 1648 transitions = {
1649 1649 'file': {'context': addcontext,
1650 1650 'file': newfile,
1651 1651 'hunk': addhunk,
1652 1652 'range': addrange},
1653 1653 'context': {'file': newfile,
1654 1654 'hunk': addhunk,
1655 1655 'range': addrange,
1656 1656 'other': addother},
1657 1657 'hunk': {'context': addcontext,
1658 1658 'file': newfile,
1659 1659 'range': addrange},
1660 1660 'range': {'context': addcontext,
1661 1661 'hunk': addhunk},
1662 1662 'other': {'other': addother},
1663 1663 }
1664 1664
1665 1665 p = parser()
1666 1666 fp = stringio()
1667 1667 fp.write(''.join(originalchunks))
1668 1668 fp.seek(0)
1669 1669
1670 1670 state = 'context'
1671 1671 for newstate, data in scanpatch(fp):
1672 1672 try:
1673 1673 p.transitions[state][newstate](p, data)
1674 1674 except KeyError:
1675 1675 raise PatchError('unhandled transition: %s -> %s' %
1676 1676 (state, newstate))
1677 1677 state = newstate
1678 1678 del fp
1679 1679 return p.finished()
1680 1680
1681 1681 def pathtransform(path, strip, prefix):
1682 1682 '''turn a path from a patch into a path suitable for the repository
1683 1683
1684 1684 prefix, if not empty, is expected to be normalized with a / at the end.
1685 1685
1686 1686 Returns (stripped components, path in repository).
1687 1687
1688 1688 >>> pathtransform(b'a/b/c', 0, b'')
1689 1689 ('', 'a/b/c')
1690 1690 >>> pathtransform(b' a/b/c ', 0, b'')
1691 1691 ('', ' a/b/c')
1692 1692 >>> pathtransform(b' a/b/c ', 2, b'')
1693 1693 ('a/b/', 'c')
1694 1694 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1695 1695 ('', 'd/e/a/b/c')
1696 1696 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1697 1697 ('a//b/', 'd/e/c')
1698 1698 >>> pathtransform(b'a/b/c', 3, b'')
1699 1699 Traceback (most recent call last):
1700 1700 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1701 1701 '''
1702 1702 pathlen = len(path)
1703 1703 i = 0
1704 1704 if strip == 0:
1705 1705 return '', prefix + path.rstrip()
1706 1706 count = strip
1707 1707 while count > 0:
1708 1708 i = path.find('/', i)
1709 1709 if i == -1:
1710 1710 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1711 1711 (count, strip, path))
1712 1712 i += 1
1713 1713 # consume '//' in the path
1714 1714 while i < pathlen - 1 and path[i:i + 1] == '/':
1715 1715 i += 1
1716 1716 count -= 1
1717 1717 return path[:i].lstrip(), prefix + path[i:].rstrip()
1718 1718
1719 1719 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1720 1720 nulla = afile_orig == "/dev/null"
1721 1721 nullb = bfile_orig == "/dev/null"
1722 1722 create = nulla and hunk.starta == 0 and hunk.lena == 0
1723 1723 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1724 1724 abase, afile = pathtransform(afile_orig, strip, prefix)
1725 1725 gooda = not nulla and backend.exists(afile)
1726 1726 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1727 1727 if afile == bfile:
1728 1728 goodb = gooda
1729 1729 else:
1730 1730 goodb = not nullb and backend.exists(bfile)
1731 1731 missing = not goodb and not gooda and not create
1732 1732
1733 1733 # some diff programs apparently produce patches where the afile is
1734 1734 # not /dev/null, but afile starts with bfile
1735 1735 abasedir = afile[:afile.rfind('/') + 1]
1736 1736 bbasedir = bfile[:bfile.rfind('/') + 1]
1737 1737 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1738 1738 and hunk.starta == 0 and hunk.lena == 0):
1739 1739 create = True
1740 1740 missing = False
1741 1741
1742 1742 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1743 1743 # diff is between a file and its backup. In this case, the original
1744 1744 # file should be patched (see original mpatch code).
1745 1745 isbackup = (abase == bbase and bfile.startswith(afile))
1746 1746 fname = None
1747 1747 if not missing:
1748 1748 if gooda and goodb:
1749 1749 if isbackup:
1750 1750 fname = afile
1751 1751 else:
1752 1752 fname = bfile
1753 1753 elif gooda:
1754 1754 fname = afile
1755 1755
1756 1756 if not fname:
1757 1757 if not nullb:
1758 1758 if isbackup:
1759 1759 fname = afile
1760 1760 else:
1761 1761 fname = bfile
1762 1762 elif not nulla:
1763 1763 fname = afile
1764 1764 else:
1765 1765 raise PatchError(_("undefined source and destination files"))
1766 1766
1767 1767 gp = patchmeta(fname)
1768 1768 if create:
1769 1769 gp.op = 'ADD'
1770 1770 elif remove:
1771 1771 gp.op = 'DELETE'
1772 1772 return gp
1773 1773
1774 1774 def scanpatch(fp):
1775 1775 """like patch.iterhunks, but yield different events
1776 1776
1777 1777 - ('file', [header_lines + fromfile + tofile])
1778 1778 - ('context', [context_lines])
1779 1779 - ('hunk', [hunk_lines])
1780 1780 - ('range', (-start,len, +start,len, proc))
1781 1781 """
1782 1782 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1783 1783 lr = linereader(fp)
1784 1784
1785 1785 def scanwhile(first, p):
1786 1786 """scan lr while predicate holds"""
1787 1787 lines = [first]
1788 1788 for line in iter(lr.readline, ''):
1789 1789 if p(line):
1790 1790 lines.append(line)
1791 1791 else:
1792 1792 lr.push(line)
1793 1793 break
1794 1794 return lines
1795 1795
1796 1796 for line in iter(lr.readline, ''):
1797 1797 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1798 1798 def notheader(line):
1799 1799 s = line.split(None, 1)
1800 1800 return not s or s[0] not in ('---', 'diff')
1801 1801 header = scanwhile(line, notheader)
1802 1802 fromfile = lr.readline()
1803 1803 if fromfile.startswith('---'):
1804 1804 tofile = lr.readline()
1805 1805 header += [fromfile, tofile]
1806 1806 else:
1807 1807 lr.push(fromfile)
1808 1808 yield 'file', header
1809 1809 elif line.startswith(' '):
1810 1810 cs = (' ', '\\')
1811 1811 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1812 1812 elif line.startswith(('-', '+')):
1813 1813 cs = ('-', '+', '\\')
1814 1814 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1815 1815 else:
1816 1816 m = lines_re.match(line)
1817 1817 if m:
1818 1818 yield 'range', m.groups()
1819 1819 else:
1820 1820 yield 'other', line
1821 1821
1822 1822 def scangitpatch(lr, firstline):
1823 1823 """
1824 1824 Git patches can emit:
1825 1825 - rename a to b
1826 1826 - change b
1827 1827 - copy a to c
1828 1828 - change c
1829 1829
1830 1830 We cannot apply this sequence as-is, the renamed 'a' could not be
1831 1831 found for it would have been renamed already. And we cannot copy
1832 1832 from 'b' instead because 'b' would have been changed already. So
1833 1833 we scan the git patch for copy and rename commands so we can
1834 1834 perform the copies ahead of time.
1835 1835 """
1836 1836 pos = 0
1837 1837 try:
1838 1838 pos = lr.fp.tell()
1839 1839 fp = lr.fp
1840 1840 except IOError:
1841 1841 fp = stringio(lr.fp.read())
1842 1842 gitlr = linereader(fp)
1843 1843 gitlr.push(firstline)
1844 1844 gitpatches = readgitpatch(gitlr)
1845 1845 fp.seek(pos)
1846 1846 return gitpatches
1847 1847
1848 1848 def iterhunks(fp):
1849 1849 """Read a patch and yield the following events:
1850 1850 - ("file", afile, bfile, firsthunk): select a new target file.
1851 1851 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1852 1852 "file" event.
1853 1853 - ("git", gitchanges): current diff is in git format, gitchanges
1854 1854 maps filenames to gitpatch records. Unique event.
1855 1855 """
1856 1856 afile = ""
1857 1857 bfile = ""
1858 1858 state = None
1859 1859 hunknum = 0
1860 1860 emitfile = newfile = False
1861 1861 gitpatches = None
1862 1862
1863 1863 # our states
1864 1864 BFILE = 1
1865 1865 context = None
1866 1866 lr = linereader(fp)
1867 1867
1868 1868 for x in iter(lr.readline, ''):
1869 1869 if state == BFILE and (
1870 1870 (not context and x.startswith('@'))
1871 1871 or (context is not False and x.startswith('***************'))
1872 1872 or x.startswith('GIT binary patch')):
1873 1873 gp = None
1874 1874 if (gitpatches and
1875 1875 gitpatches[-1].ispatching(afile, bfile)):
1876 1876 gp = gitpatches.pop()
1877 1877 if x.startswith('GIT binary patch'):
1878 1878 h = binhunk(lr, gp.path)
1879 1879 else:
1880 1880 if context is None and x.startswith('***************'):
1881 1881 context = True
1882 1882 h = hunk(x, hunknum + 1, lr, context)
1883 1883 hunknum += 1
1884 1884 if emitfile:
1885 1885 emitfile = False
1886 1886 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1887 1887 yield 'hunk', h
1888 1888 elif x.startswith('diff --git a/'):
1889 1889 m = gitre.match(x.rstrip(' \r\n'))
1890 1890 if not m:
1891 1891 continue
1892 1892 if gitpatches is None:
1893 1893 # scan whole input for git metadata
1894 1894 gitpatches = scangitpatch(lr, x)
1895 1895 yield 'git', [g.copy() for g in gitpatches
1896 1896 if g.op in ('COPY', 'RENAME')]
1897 1897 gitpatches.reverse()
1898 1898 afile = 'a/' + m.group(1)
1899 1899 bfile = 'b/' + m.group(2)
1900 1900 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1901 1901 gp = gitpatches.pop()
1902 1902 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1903 1903 if not gitpatches:
1904 1904 raise PatchError(_('failed to synchronize metadata for "%s"')
1905 1905 % afile[2:])
1906 1906 gp = gitpatches[-1]
1907 1907 newfile = True
1908 1908 elif x.startswith('---'):
1909 1909 # check for a unified diff
1910 1910 l2 = lr.readline()
1911 1911 if not l2.startswith('+++'):
1912 1912 lr.push(l2)
1913 1913 continue
1914 1914 newfile = True
1915 1915 context = False
1916 1916 afile = parsefilename(x)
1917 1917 bfile = parsefilename(l2)
1918 1918 elif x.startswith('***'):
1919 1919 # check for a context diff
1920 1920 l2 = lr.readline()
1921 1921 if not l2.startswith('---'):
1922 1922 lr.push(l2)
1923 1923 continue
1924 1924 l3 = lr.readline()
1925 1925 lr.push(l3)
1926 1926 if not l3.startswith("***************"):
1927 1927 lr.push(l2)
1928 1928 continue
1929 1929 newfile = True
1930 1930 context = True
1931 1931 afile = parsefilename(x)
1932 1932 bfile = parsefilename(l2)
1933 1933
1934 1934 if newfile:
1935 1935 newfile = False
1936 1936 emitfile = True
1937 1937 state = BFILE
1938 1938 hunknum = 0
1939 1939
1940 1940 while gitpatches:
1941 1941 gp = gitpatches.pop()
1942 1942 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1943 1943
1944 1944 def applybindelta(binchunk, data):
1945 1945 """Apply a binary delta hunk
1946 1946 The algorithm used is the algorithm from git's patch-delta.c
1947 1947 """
1948 1948 def deltahead(binchunk):
1949 1949 i = 0
1950 1950 for c in pycompat.bytestr(binchunk):
1951 1951 i += 1
1952 1952 if not (ord(c) & 0x80):
1953 1953 return i
1954 1954 return i
1955 1955 out = ""
1956 1956 s = deltahead(binchunk)
1957 1957 binchunk = binchunk[s:]
1958 1958 s = deltahead(binchunk)
1959 1959 binchunk = binchunk[s:]
1960 1960 i = 0
1961 1961 while i < len(binchunk):
1962 1962 cmd = ord(binchunk[i:i + 1])
1963 1963 i += 1
1964 1964 if (cmd & 0x80):
1965 1965 offset = 0
1966 1966 size = 0
1967 1967 if (cmd & 0x01):
1968 1968 offset = ord(binchunk[i:i + 1])
1969 1969 i += 1
1970 1970 if (cmd & 0x02):
1971 1971 offset |= ord(binchunk[i:i + 1]) << 8
1972 1972 i += 1
1973 1973 if (cmd & 0x04):
1974 1974 offset |= ord(binchunk[i:i + 1]) << 16
1975 1975 i += 1
1976 1976 if (cmd & 0x08):
1977 1977 offset |= ord(binchunk[i:i + 1]) << 24
1978 1978 i += 1
1979 1979 if (cmd & 0x10):
1980 1980 size = ord(binchunk[i:i + 1])
1981 1981 i += 1
1982 1982 if (cmd & 0x20):
1983 1983 size |= ord(binchunk[i:i + 1]) << 8
1984 1984 i += 1
1985 1985 if (cmd & 0x40):
1986 1986 size |= ord(binchunk[i:i + 1]) << 16
1987 1987 i += 1
1988 1988 if size == 0:
1989 1989 size = 0x10000
1990 1990 offset_end = offset + size
1991 1991 out += data[offset:offset_end]
1992 1992 elif cmd != 0:
1993 1993 offset_end = i + cmd
1994 1994 out += binchunk[i:offset_end]
1995 1995 i += cmd
1996 1996 else:
1997 1997 raise PatchError(_('unexpected delta opcode 0'))
1998 1998 return out
1999 1999
2000 2000 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2001 2001 """Reads a patch from fp and tries to apply it.
2002 2002
2003 2003 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2004 2004 there was any fuzz.
2005 2005
2006 2006 If 'eolmode' is 'strict', the patch content and patched file are
2007 2007 read in binary mode. Otherwise, line endings are ignored when
2008 2008 patching then normalized according to 'eolmode'.
2009 2009 """
2010 2010 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2011 2011 prefix=prefix, eolmode=eolmode)
2012 2012
2013 2013 def _canonprefix(repo, prefix):
2014 2014 if prefix:
2015 2015 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2016 2016 if prefix != '':
2017 2017 prefix += '/'
2018 2018 return prefix
2019 2019
2020 2020 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2021 2021 eolmode='strict'):
2022 2022 prefix = _canonprefix(backend.repo, prefix)
2023 2023 def pstrip(p):
2024 2024 return pathtransform(p, strip - 1, prefix)[1]
2025 2025
2026 2026 rejects = 0
2027 2027 err = 0
2028 2028 current_file = None
2029 2029
2030 2030 for state, values in iterhunks(fp):
2031 2031 if state == 'hunk':
2032 2032 if not current_file:
2033 2033 continue
2034 2034 ret = current_file.apply(values)
2035 2035 if ret > 0:
2036 2036 err = 1
2037 2037 elif state == 'file':
2038 2038 if current_file:
2039 2039 rejects += current_file.close()
2040 2040 current_file = None
2041 2041 afile, bfile, first_hunk, gp = values
2042 2042 if gp:
2043 2043 gp.path = pstrip(gp.path)
2044 2044 if gp.oldpath:
2045 2045 gp.oldpath = pstrip(gp.oldpath)
2046 2046 else:
2047 2047 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2048 2048 prefix)
2049 2049 if gp.op == 'RENAME':
2050 2050 backend.unlink(gp.oldpath)
2051 2051 if not first_hunk:
2052 2052 if gp.op == 'DELETE':
2053 2053 backend.unlink(gp.path)
2054 2054 continue
2055 2055 data, mode = None, None
2056 2056 if gp.op in ('RENAME', 'COPY'):
2057 2057 data, mode = store.getfile(gp.oldpath)[:2]
2058 2058 if data is None:
2059 2059 # This means that the old path does not exist
2060 2060 raise PatchError(_("source file '%s' does not exist")
2061 2061 % gp.oldpath)
2062 2062 if gp.mode:
2063 2063 mode = gp.mode
2064 2064 if gp.op == 'ADD':
2065 2065 # Added files without content have no hunk and
2066 2066 # must be created
2067 2067 data = ''
2068 2068 if data or mode:
2069 2069 if (gp.op in ('ADD', 'RENAME', 'COPY')
2070 2070 and backend.exists(gp.path)):
2071 2071 raise PatchError(_("cannot create %s: destination "
2072 2072 "already exists") % gp.path)
2073 2073 backend.setfile(gp.path, data, mode, gp.oldpath)
2074 2074 continue
2075 2075 try:
2076 2076 current_file = patcher(ui, gp, backend, store,
2077 2077 eolmode=eolmode)
2078 2078 except PatchError as inst:
2079 2079 ui.warn(str(inst) + '\n')
2080 2080 current_file = None
2081 2081 rejects += 1
2082 2082 continue
2083 2083 elif state == 'git':
2084 2084 for gp in values:
2085 2085 path = pstrip(gp.oldpath)
2086 2086 data, mode = backend.getfile(path)
2087 2087 if data is None:
2088 2088 # The error ignored here will trigger a getfile()
2089 2089 # error in a place more appropriate for error
2090 2090 # handling, and will not interrupt the patching
2091 2091 # process.
2092 2092 pass
2093 2093 else:
2094 2094 store.setfile(path, data, mode)
2095 2095 else:
2096 2096 raise error.Abort(_('unsupported parser state: %s') % state)
2097 2097
2098 2098 if current_file:
2099 2099 rejects += current_file.close()
2100 2100
2101 2101 if rejects:
2102 2102 return -1
2103 2103 return err
2104 2104
2105 2105 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2106 2106 similarity):
2107 2107 """use <patcher> to apply <patchname> to the working directory.
2108 2108 returns whether patch was applied with fuzz factor."""
2109 2109
2110 2110 fuzz = False
2111 2111 args = []
2112 2112 cwd = repo.root
2113 2113 if cwd:
2114 2114 args.append('-d %s' % procutil.shellquote(cwd))
2115 2115 cmd = ('%s %s -p%d < %s'
2116 2116 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2117 2117 ui.debug('Using external patch tool: %s\n' % cmd)
2118 2118 fp = procutil.popen(cmd, 'rb')
2119 2119 try:
2120 2120 for line in util.iterfile(fp):
2121 2121 line = line.rstrip()
2122 2122 ui.note(line + '\n')
2123 2123 if line.startswith('patching file '):
2124 2124 pf = util.parsepatchoutput(line)
2125 2125 printed_file = False
2126 2126 files.add(pf)
2127 2127 elif line.find('with fuzz') >= 0:
2128 2128 fuzz = True
2129 2129 if not printed_file:
2130 2130 ui.warn(pf + '\n')
2131 2131 printed_file = True
2132 2132 ui.warn(line + '\n')
2133 2133 elif line.find('saving rejects to file') >= 0:
2134 2134 ui.warn(line + '\n')
2135 2135 elif line.find('FAILED') >= 0:
2136 2136 if not printed_file:
2137 2137 ui.warn(pf + '\n')
2138 2138 printed_file = True
2139 2139 ui.warn(line + '\n')
2140 2140 finally:
2141 2141 if files:
2142 2142 scmutil.marktouched(repo, files, similarity)
2143 2143 code = fp.close()
2144 2144 if code:
2145 2145 raise PatchError(_("patch command failed: %s") %
2146 2146 procutil.explainexit(code))
2147 2147 return fuzz
2148 2148
2149 2149 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2150 2150 eolmode='strict'):
2151 2151 if files is None:
2152 2152 files = set()
2153 2153 if eolmode is None:
2154 2154 eolmode = ui.config('patch', 'eol')
2155 2155 if eolmode.lower() not in eolmodes:
2156 2156 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2157 2157 eolmode = eolmode.lower()
2158 2158
2159 2159 store = filestore()
2160 2160 try:
2161 2161 fp = open(patchobj, 'rb')
2162 2162 except TypeError:
2163 2163 fp = patchobj
2164 2164 try:
2165 2165 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2166 2166 eolmode=eolmode)
2167 2167 finally:
2168 2168 if fp != patchobj:
2169 2169 fp.close()
2170 2170 files.update(backend.close())
2171 2171 store.close()
2172 2172 if ret < 0:
2173 2173 raise PatchError(_('patch failed to apply'))
2174 2174 return ret > 0
2175 2175
2176 2176 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2177 2177 eolmode='strict', similarity=0):
2178 2178 """use builtin patch to apply <patchobj> to the working directory.
2179 2179 returns whether patch was applied with fuzz factor."""
2180 2180 backend = workingbackend(ui, repo, similarity)
2181 2181 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2182 2182
2183 2183 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2184 2184 eolmode='strict'):
2185 2185 backend = repobackend(ui, repo, ctx, store)
2186 2186 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2187 2187
2188 2188 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2189 2189 similarity=0):
2190 2190 """Apply <patchname> to the working directory.
2191 2191
2192 2192 'eolmode' specifies how end of lines should be handled. It can be:
2193 2193 - 'strict': inputs are read in binary mode, EOLs are preserved
2194 2194 - 'crlf': EOLs are ignored when patching and reset to CRLF
2195 2195 - 'lf': EOLs are ignored when patching and reset to LF
2196 2196 - None: get it from user settings, default to 'strict'
2197 2197 'eolmode' is ignored when using an external patcher program.
2198 2198
2199 2199 Returns whether patch was applied with fuzz factor.
2200 2200 """
2201 2201 patcher = ui.config('ui', 'patch')
2202 2202 if files is None:
2203 2203 files = set()
2204 2204 if patcher:
2205 2205 return _externalpatch(ui, repo, patcher, patchname, strip,
2206 2206 files, similarity)
2207 2207 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2208 2208 similarity)
2209 2209
2210 2210 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2211 2211 backend = fsbackend(ui, repo.root)
2212 2212 prefix = _canonprefix(repo, prefix)
2213 2213 with open(patchpath, 'rb') as fp:
2214 2214 changed = set()
2215 2215 for state, values in iterhunks(fp):
2216 2216 if state == 'file':
2217 2217 afile, bfile, first_hunk, gp = values
2218 2218 if gp:
2219 2219 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2220 2220 if gp.oldpath:
2221 2221 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2222 2222 prefix)[1]
2223 2223 else:
2224 2224 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2225 2225 prefix)
2226 2226 changed.add(gp.path)
2227 2227 if gp.op == 'RENAME':
2228 2228 changed.add(gp.oldpath)
2229 2229 elif state not in ('hunk', 'git'):
2230 2230 raise error.Abort(_('unsupported parser state: %s') % state)
2231 2231 return changed
2232 2232
2233 2233 class GitDiffRequired(Exception):
2234 2234 pass
2235 2235
2236 2236 diffopts = diffutil.diffallopts
2237 2237 diffallopts = diffutil.diffallopts
2238 2238 difffeatureopts = diffutil.difffeatureopts
2239 2239
2240 2240 def diff(repo, node1=None, node2=None, match=None, changes=None,
2241 2241 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2242 2242 hunksfilterfn=None):
2243 2243 '''yields diff of changes to files between two nodes, or node and
2244 2244 working directory.
2245 2245
2246 2246 if node1 is None, use first dirstate parent instead.
2247 2247 if node2 is None, compare node1 with working directory.
2248 2248
2249 2249 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2250 2250 every time some change cannot be represented with the current
2251 2251 patch format. Return False to upgrade to git patch format, True to
2252 2252 accept the loss or raise an exception to abort the diff. It is
2253 2253 called with the name of current file being diffed as 'fn'. If set
2254 2254 to None, patches will always be upgraded to git format when
2255 2255 necessary.
2256 2256
2257 2257 prefix is a filename prefix that is prepended to all filenames on
2258 2258 display (used for subrepos).
2259 2259
2260 2260 relroot, if not empty, must be normalized with a trailing /. Any match
2261 2261 patterns that fall outside it will be ignored.
2262 2262
2263 2263 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2264 2264 information.
2265 2265
2266 2266 hunksfilterfn, if not None, should be a function taking a filectx and
2267 2267 hunks generator that may yield filtered hunks.
2268 2268 '''
2269 2269 for fctx1, fctx2, hdr, hunks in diffhunks(
2270 2270 repo, node1=node1, node2=node2,
2271 2271 match=match, changes=changes, opts=opts,
2272 2272 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2273 2273 ):
2274 2274 if hunksfilterfn is not None:
2275 2275 # If the file has been removed, fctx2 is None; but this should
2276 2276 # not occur here since we catch removed files early in
2277 2277 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2278 2278 assert fctx2 is not None, \
2279 2279 'fctx2 unexpectly None in diff hunks filtering'
2280 2280 hunks = hunksfilterfn(fctx2, hunks)
2281 2281 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2282 2282 if hdr and (text or len(hdr) > 1):
2283 2283 yield '\n'.join(hdr) + '\n'
2284 2284 if text:
2285 2285 yield text
2286 2286
2287 2287 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2288 2288 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2289 2289 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2290 2290 where `header` is a list of diff headers and `hunks` is an iterable of
2291 2291 (`hunkrange`, `hunklines`) tuples.
2292 2292
2293 2293 See diff() for the meaning of parameters.
2294 2294 """
2295 2295
2296 2296 if opts is None:
2297 2297 opts = mdiff.defaultopts
2298 2298
2299 2299 if not node1 and not node2:
2300 2300 node1 = repo.dirstate.p1()
2301 2301
2302 2302 def lrugetfilectx():
2303 2303 cache = {}
2304 2304 order = collections.deque()
2305 2305 def getfilectx(f, ctx):
2306 2306 fctx = ctx.filectx(f, filelog=cache.get(f))
2307 2307 if f not in cache:
2308 2308 if len(cache) > 20:
2309 2309 del cache[order.popleft()]
2310 2310 cache[f] = fctx.filelog()
2311 2311 else:
2312 2312 order.remove(f)
2313 2313 order.append(f)
2314 2314 return fctx
2315 2315 return getfilectx
2316 2316 getfilectx = lrugetfilectx()
2317 2317
2318 2318 ctx1 = repo[node1]
2319 2319 ctx2 = repo[node2]
2320 2320
2321 2321 relfiltered = False
2322 2322 if relroot != '' and match.always():
2323 2323 # as a special case, create a new matcher with just the relroot
2324 2324 pats = [relroot]
2325 2325 match = scmutil.match(ctx2, pats, default='path')
2326 2326 relfiltered = True
2327 2327
2328 2328 if not changes:
2329 2329 changes = repo.status(ctx1, ctx2, match=match)
2330 2330 modified, added, removed = changes[:3]
2331 2331
2332 2332 if not modified and not added and not removed:
2333 2333 return []
2334 2334
2335 2335 if repo.ui.debugflag:
2336 2336 hexfunc = hex
2337 2337 else:
2338 2338 hexfunc = short
2339 2339 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2340 2340
2341 2341 if copy is None:
2342 2342 copy = {}
2343 2343 if opts.git or opts.upgrade:
2344 2344 copy = copies.pathcopies(ctx1, ctx2, match=match)
2345 2345
2346 2346 if relroot is not None:
2347 2347 if not relfiltered:
2348 2348 # XXX this would ideally be done in the matcher, but that is
2349 2349 # generally meant to 'or' patterns, not 'and' them. In this case we
2350 2350 # need to 'and' all the patterns from the matcher with relroot.
2351 2351 def filterrel(l):
2352 2352 return [f for f in l if f.startswith(relroot)]
2353 2353 modified = filterrel(modified)
2354 2354 added = filterrel(added)
2355 2355 removed = filterrel(removed)
2356 2356 relfiltered = True
2357 2357 # filter out copies where either side isn't inside the relative root
2358 2358 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2359 2359 if dst.startswith(relroot)
2360 2360 and src.startswith(relroot)))
2361 2361
2362 2362 modifiedset = set(modified)
2363 2363 addedset = set(added)
2364 2364 removedset = set(removed)
2365 2365 for f in modified:
2366 2366 if f not in ctx1:
2367 2367 # Fix up added, since merged-in additions appear as
2368 2368 # modifications during merges
2369 2369 modifiedset.remove(f)
2370 2370 addedset.add(f)
2371 2371 for f in removed:
2372 2372 if f not in ctx1:
2373 2373 # Merged-in additions that are then removed are reported as removed.
2374 2374 # They are not in ctx1, so We don't want to show them in the diff.
2375 2375 removedset.remove(f)
2376 2376 modified = sorted(modifiedset)
2377 2377 added = sorted(addedset)
2378 2378 removed = sorted(removedset)
2379 2379 for dst, src in list(copy.items()):
2380 2380 if src not in ctx1:
2381 2381 # Files merged in during a merge and then copied/renamed are
2382 2382 # reported as copies. We want to show them in the diff as additions.
2383 2383 del copy[dst]
2384 2384
2385 2385 prefetchmatch = scmutil.matchfiles(
2386 2386 repo, list(modifiedset | addedset | removedset))
2387 2387 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2388 2388
2389 2389 def difffn(opts, losedata):
2390 2390 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2391 2391 copy, getfilectx, opts, losedata, prefix, relroot)
2392 2392 if opts.upgrade and not opts.git:
2393 2393 try:
2394 2394 def losedata(fn):
2395 2395 if not losedatafn or not losedatafn(fn=fn):
2396 2396 raise GitDiffRequired
2397 2397 # Buffer the whole output until we are sure it can be generated
2398 2398 return list(difffn(opts.copy(git=False), losedata))
2399 2399 except GitDiffRequired:
2400 2400 return difffn(opts.copy(git=True), None)
2401 2401 else:
2402 2402 return difffn(opts, None)
2403 2403
2404 2404 def diffsinglehunk(hunklines):
2405 2405 """yield tokens for a list of lines in a single hunk"""
2406 2406 for line in hunklines:
2407 2407 # chomp
2408 2408 chompline = line.rstrip('\n')
2409 2409 # highlight tabs and trailing whitespace
2410 2410 stripline = chompline.rstrip()
2411 2411 if line.startswith('-'):
2412 2412 label = 'diff.deleted'
2413 2413 elif line.startswith('+'):
2414 2414 label = 'diff.inserted'
2415 2415 else:
2416 2416 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2417 2417 for token in tabsplitter.findall(stripline):
2418 2418 if token.startswith('\t'):
2419 2419 yield (token, 'diff.tab')
2420 2420 else:
2421 2421 yield (token, label)
2422 2422
2423 2423 if chompline != stripline:
2424 2424 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2425 2425 if chompline != line:
2426 2426 yield (line[len(chompline):], '')
2427 2427
2428 2428 def diffsinglehunkinline(hunklines):
2429 2429 """yield tokens for a list of lines in a single hunk, with inline colors"""
2430 2430 # prepare deleted, and inserted content
2431 2431 a = ''
2432 2432 b = ''
2433 2433 for line in hunklines:
2434 2434 if line[0] == '-':
2435 2435 a += line[1:]
2436 2436 elif line[0] == '+':
2437 2437 b += line[1:]
2438 2438 else:
2439 2439 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2440 2440 # fast path: if either side is empty, use diffsinglehunk
2441 2441 if not a or not b:
2442 2442 for t in diffsinglehunk(hunklines):
2443 2443 yield t
2444 2444 return
2445 2445 # re-split the content into words
2446 2446 al = wordsplitter.findall(a)
2447 2447 bl = wordsplitter.findall(b)
2448 2448 # re-arrange the words to lines since the diff algorithm is line-based
2449 2449 aln = [s if s == '\n' else s + '\n' for s in al]
2450 2450 bln = [s if s == '\n' else s + '\n' for s in bl]
2451 2451 an = ''.join(aln)
2452 2452 bn = ''.join(bln)
2453 2453 # run the diff algorithm, prepare atokens and btokens
2454 2454 atokens = []
2455 2455 btokens = []
2456 2456 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2457 2457 for (a1, a2, b1, b2), btype in blocks:
2458 2458 changed = btype == '!'
2459 2459 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2460 2460 atokens.append((changed, token))
2461 2461 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2462 2462 btokens.append((changed, token))
2463 2463
2464 2464 # yield deleted tokens, then inserted ones
2465 2465 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2466 2466 ('+', 'diff.inserted', btokens)]:
2467 2467 nextisnewline = True
2468 2468 for changed, token in tokens:
2469 2469 if nextisnewline:
2470 2470 yield (prefix, label)
2471 2471 nextisnewline = False
2472 2472 # special handling line end
2473 2473 isendofline = token.endswith('\n')
2474 2474 if isendofline:
2475 2475 chomp = token[:-1] # chomp
2476 2476 token = chomp.rstrip() # detect spaces at the end
2477 2477 endspaces = chomp[len(token):]
2478 2478 # scan tabs
2479 2479 for maybetab in tabsplitter.findall(token):
2480 2480 if '\t' == maybetab[0]:
2481 2481 currentlabel = 'diff.tab'
2482 2482 else:
2483 2483 if changed:
2484 2484 currentlabel = label + '.changed'
2485 2485 else:
2486 2486 currentlabel = label + '.unchanged'
2487 2487 yield (maybetab, currentlabel)
2488 2488 if isendofline:
2489 2489 if endspaces:
2490 2490 yield (endspaces, 'diff.trailingwhitespace')
2491 2491 yield ('\n', '')
2492 2492 nextisnewline = True
2493 2493
2494 2494 def difflabel(func, *args, **kw):
2495 2495 '''yields 2-tuples of (output, label) based on the output of func()'''
2496 2496 if kw.get(r'opts') and kw[r'opts'].worddiff:
2497 2497 dodiffhunk = diffsinglehunkinline
2498 2498 else:
2499 2499 dodiffhunk = diffsinglehunk
2500 2500 headprefixes = [('diff', 'diff.diffline'),
2501 2501 ('copy', 'diff.extended'),
2502 2502 ('rename', 'diff.extended'),
2503 2503 ('old', 'diff.extended'),
2504 2504 ('new', 'diff.extended'),
2505 2505 ('deleted', 'diff.extended'),
2506 2506 ('index', 'diff.extended'),
2507 2507 ('similarity', 'diff.extended'),
2508 2508 ('---', 'diff.file_a'),
2509 2509 ('+++', 'diff.file_b')]
2510 2510 textprefixes = [('@', 'diff.hunk'),
2511 2511 # - and + are handled by diffsinglehunk
2512 2512 ]
2513 2513 head = False
2514 2514
2515 2515 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2516 2516 hunkbuffer = []
2517 2517 def consumehunkbuffer():
2518 2518 if hunkbuffer:
2519 2519 for token in dodiffhunk(hunkbuffer):
2520 2520 yield token
2521 2521 hunkbuffer[:] = []
2522 2522
2523 2523 for chunk in func(*args, **kw):
2524 2524 lines = chunk.split('\n')
2525 2525 linecount = len(lines)
2526 2526 for i, line in enumerate(lines):
2527 2527 if head:
2528 2528 if line.startswith('@'):
2529 2529 head = False
2530 2530 else:
2531 2531 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2532 2532 head = True
2533 2533 diffline = False
2534 2534 if not head and line and line.startswith(('+', '-')):
2535 2535 diffline = True
2536 2536
2537 2537 prefixes = textprefixes
2538 2538 if head:
2539 2539 prefixes = headprefixes
2540 2540 if diffline:
2541 2541 # buffered
2542 2542 bufferedline = line
2543 2543 if i + 1 < linecount:
2544 2544 bufferedline += "\n"
2545 2545 hunkbuffer.append(bufferedline)
2546 2546 else:
2547 2547 # unbuffered
2548 2548 for token in consumehunkbuffer():
2549 2549 yield token
2550 2550 stripline = line.rstrip()
2551 2551 for prefix, label in prefixes:
2552 2552 if stripline.startswith(prefix):
2553 2553 yield (stripline, label)
2554 2554 if line != stripline:
2555 2555 yield (line[len(stripline):],
2556 2556 'diff.trailingwhitespace')
2557 2557 break
2558 2558 else:
2559 2559 yield (line, '')
2560 2560 if i + 1 < linecount:
2561 2561 yield ('\n', '')
2562 2562 for token in consumehunkbuffer():
2563 2563 yield token
2564 2564
2565 2565 def diffui(*args, **kw):
2566 2566 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2567 2567 return difflabel(diff, *args, **kw)
2568 2568
2569 2569 def _filepairs(modified, added, removed, copy, opts):
2570 2570 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2571 2571 before and f2 is the the name after. For added files, f1 will be None,
2572 2572 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2573 2573 or 'rename' (the latter two only if opts.git is set).'''
2574 2574 gone = set()
2575 2575
2576 2576 copyto = dict([(v, k) for k, v in copy.items()])
2577 2577
2578 2578 addedset, removedset = set(added), set(removed)
2579 2579
2580 2580 for f in sorted(modified + added + removed):
2581 2581 copyop = None
2582 2582 f1, f2 = f, f
2583 2583 if f in addedset:
2584 2584 f1 = None
2585 2585 if f in copy:
2586 2586 if opts.git:
2587 2587 f1 = copy[f]
2588 2588 if f1 in removedset and f1 not in gone:
2589 2589 copyop = 'rename'
2590 2590 gone.add(f1)
2591 2591 else:
2592 2592 copyop = 'copy'
2593 2593 elif f in removedset:
2594 2594 f2 = None
2595 2595 if opts.git:
2596 2596 # have we already reported a copy above?
2597 2597 if (f in copyto and copyto[f] in addedset
2598 2598 and copy[copyto[f]] == f):
2599 2599 continue
2600 2600 yield f1, f2, copyop
2601 2601
2602 2602 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2603 2603 copy, getfilectx, opts, losedatafn, prefix, relroot):
2604 2604 '''given input data, generate a diff and yield it in blocks
2605 2605
2606 2606 If generating a diff would lose data like flags or binary data and
2607 2607 losedatafn is not None, it will be called.
2608 2608
2609 2609 relroot is removed and prefix is added to every path in the diff output.
2610 2610
2611 2611 If relroot is not empty, this function expects every path in modified,
2612 2612 added, removed and copy to start with it.'''
2613 2613
2614 2614 def gitindex(text):
2615 2615 if not text:
2616 2616 text = ""
2617 2617 l = len(text)
2618 2618 s = hashlib.sha1('blob %d\0' % l)
2619 2619 s.update(text)
2620 2620 return hex(s.digest())
2621 2621
2622 2622 if opts.noprefix:
2623 2623 aprefix = bprefix = ''
2624 2624 else:
2625 2625 aprefix = 'a/'
2626 2626 bprefix = 'b/'
2627 2627
2628 2628 def diffline(f, revs):
2629 2629 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2630 2630 return 'diff %s %s' % (revinfo, f)
2631 2631
2632 2632 def isempty(fctx):
2633 2633 return fctx is None or fctx.size() == 0
2634 2634
2635 2635 date1 = dateutil.datestr(ctx1.date())
2636 2636 date2 = dateutil.datestr(ctx2.date())
2637 2637
2638 2638 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2639 2639
2640 2640 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2641 2641 or repo.ui.configbool('devel', 'check-relroot')):
2642 2642 for f in modified + added + removed + list(copy) + list(copy.values()):
2643 2643 if f is not None and not f.startswith(relroot):
2644 2644 raise AssertionError(
2645 2645 "file %s doesn't start with relroot %s" % (f, relroot))
2646 2646
2647 2647 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2648 2648 content1 = None
2649 2649 content2 = None
2650 2650 fctx1 = None
2651 2651 fctx2 = None
2652 2652 flag1 = None
2653 2653 flag2 = None
2654 2654 if f1:
2655 2655 fctx1 = getfilectx(f1, ctx1)
2656 2656 if opts.git or losedatafn:
2657 2657 flag1 = ctx1.flags(f1)
2658 2658 if f2:
2659 2659 fctx2 = getfilectx(f2, ctx2)
2660 2660 if opts.git or losedatafn:
2661 2661 flag2 = ctx2.flags(f2)
2662 2662 # if binary is True, output "summary" or "base85", but not "text diff"
2663 2663 if opts.text:
2664 2664 binary = False
2665 2665 else:
2666 2666 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2667 2667
2668 2668 if losedatafn and not opts.git:
2669 2669 if (binary or
2670 2670 # copy/rename
2671 2671 f2 in copy or
2672 2672 # empty file creation
2673 2673 (not f1 and isempty(fctx2)) or
2674 2674 # empty file deletion
2675 2675 (isempty(fctx1) and not f2) or
2676 2676 # create with flags
2677 2677 (not f1 and flag2) or
2678 2678 # change flags
2679 2679 (f1 and f2 and flag1 != flag2)):
2680 2680 losedatafn(f2 or f1)
2681 2681
2682 2682 path1 = f1 or f2
2683 2683 path2 = f2 or f1
2684 2684 path1 = posixpath.join(prefix, path1[len(relroot):])
2685 2685 path2 = posixpath.join(prefix, path2[len(relroot):])
2686 2686 header = []
2687 2687 if opts.git:
2688 2688 header.append('diff --git %s%s %s%s' %
2689 2689 (aprefix, path1, bprefix, path2))
2690 2690 if not f1: # added
2691 2691 header.append('new file mode %s' % gitmode[flag2])
2692 2692 elif not f2: # removed
2693 2693 header.append('deleted file mode %s' % gitmode[flag1])
2694 2694 else: # modified/copied/renamed
2695 2695 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2696 2696 if mode1 != mode2:
2697 2697 header.append('old mode %s' % mode1)
2698 2698 header.append('new mode %s' % mode2)
2699 2699 if copyop is not None:
2700 2700 if opts.showsimilarity:
2701 2701 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2702 2702 header.append('similarity index %d%%' % sim)
2703 2703 header.append('%s from %s' % (copyop, path1))
2704 2704 header.append('%s to %s' % (copyop, path2))
2705 2705 elif revs and not repo.ui.quiet:
2706 2706 header.append(diffline(path1, revs))
2707 2707
2708 2708 # fctx.is | diffopts | what to | is fctx.data()
2709 2709 # binary() | text nobinary git index | output? | outputted?
2710 2710 # ------------------------------------|----------------------------
2711 2711 # yes | no no no * | summary | no
2712 2712 # yes | no no yes * | base85 | yes
2713 2713 # yes | no yes no * | summary | no
2714 2714 # yes | no yes yes 0 | summary | no
2715 2715 # yes | no yes yes >0 | summary | semi [1]
2716 2716 # yes | yes * * * | text diff | yes
2717 2717 # no | * * * * | text diff | yes
2718 2718 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2719 2719 if binary and (not opts.git or (opts.git and opts.nobinary and not
2720 2720 opts.index)):
2721 2721 # fast path: no binary content will be displayed, content1 and
2722 2722 # content2 are only used for equivalent test. cmp() could have a
2723 2723 # fast path.
2724 2724 if fctx1 is not None:
2725 2725 content1 = b'\0'
2726 2726 if fctx2 is not None:
2727 2727 if fctx1 is not None and not fctx1.cmp(fctx2):
2728 2728 content2 = b'\0' # not different
2729 2729 else:
2730 2730 content2 = b'\0\0'
2731 2731 else:
2732 2732 # normal path: load contents
2733 2733 if fctx1 is not None:
2734 2734 content1 = fctx1.data()
2735 2735 if fctx2 is not None:
2736 2736 content2 = fctx2.data()
2737 2737
2738 2738 if binary and opts.git and not opts.nobinary:
2739 2739 text = mdiff.b85diff(content1, content2)
2740 2740 if text:
2741 2741 header.append('index %s..%s' %
2742 2742 (gitindex(content1), gitindex(content2)))
2743 2743 hunks = (None, [text]),
2744 2744 else:
2745 2745 if opts.git and opts.index > 0:
2746 2746 flag = flag1
2747 2747 if flag is None:
2748 2748 flag = flag2
2749 2749 header.append('index %s..%s %s' %
2750 2750 (gitindex(content1)[0:opts.index],
2751 2751 gitindex(content2)[0:opts.index],
2752 2752 gitmode[flag]))
2753 2753
2754 2754 uheaders, hunks = mdiff.unidiff(content1, date1,
2755 2755 content2, date2,
2756 2756 path1, path2,
2757 2757 binary=binary, opts=opts)
2758 2758 header.extend(uheaders)
2759 2759 yield fctx1, fctx2, header, hunks
2760 2760
2761 2761 def diffstatsum(stats):
2762 2762 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2763 2763 for f, a, r, b in stats:
2764 2764 maxfile = max(maxfile, encoding.colwidth(f))
2765 2765 maxtotal = max(maxtotal, a + r)
2766 2766 addtotal += a
2767 2767 removetotal += r
2768 2768 binary = binary or b
2769 2769
2770 2770 return maxfile, maxtotal, addtotal, removetotal, binary
2771 2771
2772 2772 def diffstatdata(lines):
2773 2773 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2774 2774
2775 2775 results = []
2776 2776 filename, adds, removes, isbinary = None, 0, 0, False
2777 2777
2778 2778 def addresult():
2779 2779 if filename:
2780 2780 results.append((filename, adds, removes, isbinary))
2781 2781
2782 2782 # inheader is used to track if a line is in the
2783 2783 # header portion of the diff. This helps properly account
2784 2784 # for lines that start with '--' or '++'
2785 2785 inheader = False
2786 2786
2787 2787 for line in lines:
2788 2788 if line.startswith('diff'):
2789 2789 addresult()
2790 2790 # starting a new file diff
2791 2791 # set numbers to 0 and reset inheader
2792 2792 inheader = True
2793 2793 adds, removes, isbinary = 0, 0, False
2794 2794 if line.startswith('diff --git a/'):
2795 2795 filename = gitre.search(line).group(2)
2796 2796 elif line.startswith('diff -r'):
2797 2797 # format: "diff -r ... -r ... filename"
2798 2798 filename = diffre.search(line).group(1)
2799 2799 elif line.startswith('@@'):
2800 2800 inheader = False
2801 2801 elif line.startswith('+') and not inheader:
2802 2802 adds += 1
2803 2803 elif line.startswith('-') and not inheader:
2804 2804 removes += 1
2805 2805 elif (line.startswith('GIT binary patch') or
2806 2806 line.startswith('Binary file')):
2807 2807 isbinary = True
2808 2808 addresult()
2809 2809 return results
2810 2810
2811 2811 def diffstat(lines, width=80):
2812 2812 output = []
2813 2813 stats = diffstatdata(lines)
2814 2814 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2815 2815
2816 2816 countwidth = len(str(maxtotal))
2817 2817 if hasbinary and countwidth < 3:
2818 2818 countwidth = 3
2819 2819 graphwidth = width - countwidth - maxname - 6
2820 2820 if graphwidth < 10:
2821 2821 graphwidth = 10
2822 2822
2823 2823 def scale(i):
2824 2824 if maxtotal <= graphwidth:
2825 2825 return i
2826 2826 # If diffstat runs out of room it doesn't print anything,
2827 2827 # which isn't very useful, so always print at least one + or -
2828 2828 # if there were at least some changes.
2829 2829 return max(i * graphwidth // maxtotal, int(bool(i)))
2830 2830
2831 2831 for filename, adds, removes, isbinary in stats:
2832 2832 if isbinary:
2833 2833 count = 'Bin'
2834 2834 else:
2835 2835 count = '%d' % (adds + removes)
2836 2836 pluses = '+' * scale(adds)
2837 2837 minuses = '-' * scale(removes)
2838 2838 output.append(' %s%s | %*s %s%s\n' %
2839 2839 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2840 2840 countwidth, count, pluses, minuses))
2841 2841
2842 2842 if stats:
2843 2843 output.append(_(' %d files changed, %d insertions(+), '
2844 2844 '%d deletions(-)\n')
2845 2845 % (len(stats), totaladds, totalremoves))
2846 2846
2847 2847 return ''.join(output)
2848 2848
2849 2849 def diffstatui(*args, **kw):
2850 2850 '''like diffstat(), but yields 2-tuples of (output, label) for
2851 2851 ui.write()
2852 2852 '''
2853 2853
2854 2854 for line in diffstat(*args, **kw).splitlines():
2855 2855 if line and line[-1] in '+-':
2856 2856 name, graph = line.rsplit(' ', 1)
2857 2857 yield (name + ' ', '')
2858 2858 m = re.search(br'\++', graph)
2859 2859 if m:
2860 2860 yield (m.group(0), 'diffstat.inserted')
2861 2861 m = re.search(br'-+', graph)
2862 2862 if m:
2863 2863 yield (m.group(0), 'diffstat.deleted')
2864 2864 else:
2865 2865 yield (line, '')
2866 2866 yield ('\n', '')
@@ -1,2250 +1,2250 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 dagop,
15 15 destutil,
16 diffutil,
16 17 encoding,
17 18 error,
18 19 hbisect,
19 20 match as matchmod,
20 21 node,
21 22 obsolete as obsmod,
22 23 obsutil,
23 24 pathutil,
24 25 phases,
25 26 pycompat,
26 27 registrar,
27 28 repoview,
28 29 revsetlang,
29 30 scmutil,
30 31 smartset,
31 32 stack as stackmod,
32 33 util,
33 34 )
34 35 from .utils import (
35 36 dateutil,
36 diffutil,
37 37 stringutil,
38 38 )
39 39
40 40 # helpers for processing parsed tree
41 41 getsymbol = revsetlang.getsymbol
42 42 getstring = revsetlang.getstring
43 43 getinteger = revsetlang.getinteger
44 44 getboolean = revsetlang.getboolean
45 45 getlist = revsetlang.getlist
46 46 getrange = revsetlang.getrange
47 47 getargs = revsetlang.getargs
48 48 getargsdict = revsetlang.getargsdict
49 49
50 50 baseset = smartset.baseset
51 51 generatorset = smartset.generatorset
52 52 spanset = smartset.spanset
53 53 fullreposet = smartset.fullreposet
54 54
55 55 # Constants for ordering requirement, used in getset():
56 56 #
57 57 # If 'define', any nested functions and operations MAY change the ordering of
58 58 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
59 59 # it). If 'follow', any nested functions and operations MUST take the ordering
60 60 # specified by the first operand to the '&' operator.
61 61 #
62 62 # For instance,
63 63 #
64 64 # X & (Y | Z)
65 65 # ^ ^^^^^^^
66 66 # | follow
67 67 # define
68 68 #
69 69 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
70 70 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
71 71 #
72 72 # 'any' means the order doesn't matter. For instance,
73 73 #
74 74 # (X & !Y) | ancestors(Z)
75 75 # ^ ^
76 76 # any any
77 77 #
78 78 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
79 79 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
80 80 # since 'ancestors' does not care about the order of its argument.
81 81 #
82 82 # Currently, most revsets do not care about the order, so 'define' is
83 83 # equivalent to 'follow' for them, and the resulting order is based on the
84 84 # 'subset' parameter passed down to them:
85 85 #
86 86 # m = revset.match(...)
87 87 # m(repo, subset, order=defineorder)
88 88 # ^^^^^^
89 89 # For most revsets, 'define' means using the order this subset provides
90 90 #
91 91 # There are a few revsets that always redefine the order if 'define' is
92 92 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
93 93 anyorder = 'any' # don't care the order, could be even random-shuffled
94 94 defineorder = 'define' # ALWAYS redefine, or ALWAYS follow the current order
95 95 followorder = 'follow' # MUST follow the current order
96 96
97 97 # helpers
98 98
99 99 def getset(repo, subset, x, order=defineorder):
100 100 if not x:
101 101 raise error.ParseError(_("missing argument"))
102 102 return methods[x[0]](repo, subset, *x[1:], order=order)
103 103
104 104 def _getrevsource(repo, r):
105 105 extra = repo[r].extra()
106 106 for label in ('source', 'transplant_source', 'rebase_source'):
107 107 if label in extra:
108 108 try:
109 109 return repo[extra[label]].rev()
110 110 except error.RepoLookupError:
111 111 pass
112 112 return None
113 113
114 114 def _sortedb(xs):
115 115 return sorted(pycompat.rapply(pycompat.maybebytestr, xs))
116 116
117 117 # operator methods
118 118
119 119 def stringset(repo, subset, x, order):
120 120 if not x:
121 121 raise error.ParseError(_("empty string is not a valid revision"))
122 122 x = scmutil.intrev(scmutil.revsymbol(repo, x))
123 123 if (x in subset
124 124 or x == node.nullrev and isinstance(subset, fullreposet)):
125 125 return baseset([x])
126 126 return baseset()
127 127
128 128 def rangeset(repo, subset, x, y, order):
129 129 m = getset(repo, fullreposet(repo), x)
130 130 n = getset(repo, fullreposet(repo), y)
131 131
132 132 if not m or not n:
133 133 return baseset()
134 134 return _makerangeset(repo, subset, m.first(), n.last(), order)
135 135
136 136 def rangeall(repo, subset, x, order):
137 137 assert x is None
138 138 return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
139 139
140 140 def rangepre(repo, subset, y, order):
141 141 # ':y' can't be rewritten to '0:y' since '0' may be hidden
142 142 n = getset(repo, fullreposet(repo), y)
143 143 if not n:
144 144 return baseset()
145 145 return _makerangeset(repo, subset, 0, n.last(), order)
146 146
147 147 def rangepost(repo, subset, x, order):
148 148 m = getset(repo, fullreposet(repo), x)
149 149 if not m:
150 150 return baseset()
151 151 return _makerangeset(repo, subset, m.first(), repo.changelog.tiprev(),
152 152 order)
153 153
154 154 def _makerangeset(repo, subset, m, n, order):
155 155 if m == n:
156 156 r = baseset([m])
157 157 elif n == node.wdirrev:
158 158 r = spanset(repo, m, len(repo)) + baseset([n])
159 159 elif m == node.wdirrev:
160 160 r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
161 161 elif m < n:
162 162 r = spanset(repo, m, n + 1)
163 163 else:
164 164 r = spanset(repo, m, n - 1)
165 165
166 166 if order == defineorder:
167 167 return r & subset
168 168 else:
169 169 # carrying the sorting over when possible would be more efficient
170 170 return subset & r
171 171
172 172 def dagrange(repo, subset, x, y, order):
173 173 r = fullreposet(repo)
174 174 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
175 175 includepath=True)
176 176 return subset & xs
177 177
178 178 def andset(repo, subset, x, y, order):
179 179 if order == anyorder:
180 180 yorder = anyorder
181 181 else:
182 182 yorder = followorder
183 183 return getset(repo, getset(repo, subset, x, order), y, yorder)
184 184
185 185 def andsmallyset(repo, subset, x, y, order):
186 186 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
187 187 if order == anyorder:
188 188 yorder = anyorder
189 189 else:
190 190 yorder = followorder
191 191 return getset(repo, getset(repo, subset, y, yorder), x, order)
192 192
193 193 def differenceset(repo, subset, x, y, order):
194 194 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
195 195
196 196 def _orsetlist(repo, subset, xs, order):
197 197 assert xs
198 198 if len(xs) == 1:
199 199 return getset(repo, subset, xs[0], order)
200 200 p = len(xs) // 2
201 201 a = _orsetlist(repo, subset, xs[:p], order)
202 202 b = _orsetlist(repo, subset, xs[p:], order)
203 203 return a + b
204 204
205 205 def orset(repo, subset, x, order):
206 206 xs = getlist(x)
207 207 if not xs:
208 208 return baseset()
209 209 if order == followorder:
210 210 # slow path to take the subset order
211 211 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
212 212 else:
213 213 return _orsetlist(repo, subset, xs, order)
214 214
215 215 def notset(repo, subset, x, order):
216 216 return subset - getset(repo, subset, x, anyorder)
217 217
218 218 def relationset(repo, subset, x, y, order):
219 219 raise error.ParseError(_("can't use a relation in this context"))
220 220
221 221 def relsubscriptset(repo, subset, x, y, z, order):
222 222 # this is pretty basic implementation of 'x#y[z]' operator, still
223 223 # experimental so undocumented. see the wiki for further ideas.
224 224 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
225 225 rel = getsymbol(y)
226 226 n = getinteger(z, _("relation subscript must be an integer"))
227 227
228 228 # TODO: perhaps this should be a table of relation functions
229 229 if rel in ('g', 'generations'):
230 230 # TODO: support range, rewrite tests, and drop startdepth argument
231 231 # from ancestors() and descendants() predicates
232 232 if n <= 0:
233 233 n = -n
234 234 return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
235 235 else:
236 236 return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
237 237
238 238 raise error.UnknownIdentifier(rel, ['generations'])
239 239
240 240 def subscriptset(repo, subset, x, y, order):
241 241 raise error.ParseError(_("can't use a subscript in this context"))
242 242
243 243 def listset(repo, subset, *xs, **opts):
244 244 raise error.ParseError(_("can't use a list in this context"),
245 245 hint=_('see hg help "revsets.x or y"'))
246 246
247 247 def keyvaluepair(repo, subset, k, v, order):
248 248 raise error.ParseError(_("can't use a key-value pair in this context"))
249 249
250 250 def func(repo, subset, a, b, order):
251 251 f = getsymbol(a)
252 252 if f in symbols:
253 253 func = symbols[f]
254 254 if getattr(func, '_takeorder', False):
255 255 return func(repo, subset, b, order)
256 256 return func(repo, subset, b)
257 257
258 258 keep = lambda fn: getattr(fn, '__doc__', None) is not None
259 259
260 260 syms = [s for (s, fn) in symbols.items() if keep(fn)]
261 261 raise error.UnknownIdentifier(f, syms)
262 262
263 263 # functions
264 264
265 265 # symbols are callables like:
266 266 # fn(repo, subset, x)
267 267 # with:
268 268 # repo - current repository instance
269 269 # subset - of revisions to be examined
270 270 # x - argument in tree form
271 271 symbols = revsetlang.symbols
272 272
273 273 # symbols which can't be used for a DoS attack for any given input
274 274 # (e.g. those which accept regexes as plain strings shouldn't be included)
275 275 # functions that just return a lot of changesets (like all) don't count here
276 276 safesymbols = set()
277 277
278 278 predicate = registrar.revsetpredicate()
279 279
280 280 @predicate('_destupdate')
281 281 def _destupdate(repo, subset, x):
282 282 # experimental revset for update destination
283 283 args = getargsdict(x, 'limit', 'clean')
284 284 return subset & baseset([destutil.destupdate(repo,
285 285 **pycompat.strkwargs(args))[0]])
286 286
287 287 @predicate('_destmerge')
288 288 def _destmerge(repo, subset, x):
289 289 # experimental revset for merge destination
290 290 sourceset = None
291 291 if x is not None:
292 292 sourceset = getset(repo, fullreposet(repo), x)
293 293 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
294 294
295 295 @predicate('adds(pattern)', safe=True, weight=30)
296 296 def adds(repo, subset, x):
297 297 """Changesets that add a file matching pattern.
298 298
299 299 The pattern without explicit kind like ``glob:`` is expected to be
300 300 relative to the current directory and match against a file or a
301 301 directory.
302 302 """
303 303 # i18n: "adds" is a keyword
304 304 pat = getstring(x, _("adds requires a pattern"))
305 305 return checkstatus(repo, subset, pat, 1)
306 306
307 307 @predicate('ancestor(*changeset)', safe=True, weight=0.5)
308 308 def ancestor(repo, subset, x):
309 309 """A greatest common ancestor of the changesets.
310 310
311 311 Accepts 0 or more changesets.
312 312 Will return empty list when passed no args.
313 313 Greatest common ancestor of a single changeset is that changeset.
314 314 """
315 315 reviter = iter(orset(repo, fullreposet(repo), x, order=anyorder))
316 316 try:
317 317 anc = repo[next(reviter)]
318 318 except StopIteration:
319 319 return baseset()
320 320 for r in reviter:
321 321 anc = anc.ancestor(repo[r])
322 322
323 323 r = scmutil.intrev(anc)
324 324 if r in subset:
325 325 return baseset([r])
326 326 return baseset()
327 327
328 328 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
329 329 stopdepth=None):
330 330 heads = getset(repo, fullreposet(repo), x)
331 331 if not heads:
332 332 return baseset()
333 333 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
334 334 return subset & s
335 335
336 336 @predicate('ancestors(set[, depth])', safe=True)
337 337 def ancestors(repo, subset, x):
338 338 """Changesets that are ancestors of changesets in set, including the
339 339 given changesets themselves.
340 340
341 341 If depth is specified, the result only includes changesets up to
342 342 the specified generation.
343 343 """
344 344 # startdepth is for internal use only until we can decide the UI
345 345 args = getargsdict(x, 'ancestors', 'set depth startdepth')
346 346 if 'set' not in args:
347 347 # i18n: "ancestors" is a keyword
348 348 raise error.ParseError(_('ancestors takes at least 1 argument'))
349 349 startdepth = stopdepth = None
350 350 if 'startdepth' in args:
351 351 n = getinteger(args['startdepth'],
352 352 "ancestors expects an integer startdepth")
353 353 if n < 0:
354 354 raise error.ParseError("negative startdepth")
355 355 startdepth = n
356 356 if 'depth' in args:
357 357 # i18n: "ancestors" is a keyword
358 358 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
359 359 if n < 0:
360 360 raise error.ParseError(_("negative depth"))
361 361 stopdepth = n + 1
362 362 return _ancestors(repo, subset, args['set'],
363 363 startdepth=startdepth, stopdepth=stopdepth)
364 364
365 365 @predicate('_firstancestors', safe=True)
366 366 def _firstancestors(repo, subset, x):
367 367 # ``_firstancestors(set)``
368 368 # Like ``ancestors(set)`` but follows only the first parents.
369 369 return _ancestors(repo, subset, x, followfirst=True)
370 370
371 371 def _childrenspec(repo, subset, x, n, order):
372 372 """Changesets that are the Nth child of a changeset
373 373 in set.
374 374 """
375 375 cs = set()
376 376 for r in getset(repo, fullreposet(repo), x):
377 377 for i in range(n):
378 378 c = repo[r].children()
379 379 if len(c) == 0:
380 380 break
381 381 if len(c) > 1:
382 382 raise error.RepoLookupError(
383 383 _("revision in set has more than one child"))
384 384 r = c[0].rev()
385 385 else:
386 386 cs.add(r)
387 387 return subset & cs
388 388
389 389 def ancestorspec(repo, subset, x, n, order):
390 390 """``set~n``
391 391 Changesets that are the Nth ancestor (first parents only) of a changeset
392 392 in set.
393 393 """
394 394 n = getinteger(n, _("~ expects a number"))
395 395 if n < 0:
396 396 # children lookup
397 397 return _childrenspec(repo, subset, x, -n, order)
398 398 ps = set()
399 399 cl = repo.changelog
400 400 for r in getset(repo, fullreposet(repo), x):
401 401 for i in range(n):
402 402 try:
403 403 r = cl.parentrevs(r)[0]
404 404 except error.WdirUnsupported:
405 405 r = repo[r].parents()[0].rev()
406 406 ps.add(r)
407 407 return subset & ps
408 408
409 409 @predicate('author(string)', safe=True, weight=10)
410 410 def author(repo, subset, x):
411 411 """Alias for ``user(string)``.
412 412 """
413 413 # i18n: "author" is a keyword
414 414 n = getstring(x, _("author requires a string"))
415 415 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
416 416 return subset.filter(lambda x: matcher(repo[x].user()),
417 417 condrepr=('<user %r>', n))
418 418
419 419 @predicate('bisect(string)', safe=True)
420 420 def bisect(repo, subset, x):
421 421 """Changesets marked in the specified bisect status:
422 422
423 423 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
424 424 - ``goods``, ``bads`` : csets topologically good/bad
425 425 - ``range`` : csets taking part in the bisection
426 426 - ``pruned`` : csets that are goods, bads or skipped
427 427 - ``untested`` : csets whose fate is yet unknown
428 428 - ``ignored`` : csets ignored due to DAG topology
429 429 - ``current`` : the cset currently being bisected
430 430 """
431 431 # i18n: "bisect" is a keyword
432 432 status = getstring(x, _("bisect requires a string")).lower()
433 433 state = set(hbisect.get(repo, status))
434 434 return subset & state
435 435
436 436 # Backward-compatibility
437 437 # - no help entry so that we do not advertise it any more
438 438 @predicate('bisected', safe=True)
439 439 def bisected(repo, subset, x):
440 440 return bisect(repo, subset, x)
441 441
442 442 @predicate('bookmark([name])', safe=True)
443 443 def bookmark(repo, subset, x):
444 444 """The named bookmark or all bookmarks.
445 445
446 446 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
447 447 """
448 448 # i18n: "bookmark" is a keyword
449 449 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
450 450 if args:
451 451 bm = getstring(args[0],
452 452 # i18n: "bookmark" is a keyword
453 453 _('the argument to bookmark must be a string'))
454 454 kind, pattern, matcher = stringutil.stringmatcher(bm)
455 455 bms = set()
456 456 if kind == 'literal':
457 457 bmrev = repo._bookmarks.get(pattern, None)
458 458 if not bmrev:
459 459 raise error.RepoLookupError(_("bookmark '%s' does not exist")
460 460 % pattern)
461 461 bms.add(repo[bmrev].rev())
462 462 else:
463 463 matchrevs = set()
464 464 for name, bmrev in repo._bookmarks.iteritems():
465 465 if matcher(name):
466 466 matchrevs.add(bmrev)
467 467 if not matchrevs:
468 468 raise error.RepoLookupError(_("no bookmarks exist"
469 469 " that match '%s'") % pattern)
470 470 for bmrev in matchrevs:
471 471 bms.add(repo[bmrev].rev())
472 472 else:
473 473 bms = {repo[r].rev() for r in repo._bookmarks.values()}
474 474 bms -= {node.nullrev}
475 475 return subset & bms
476 476
477 477 @predicate('branch(string or set)', safe=True, weight=10)
478 478 def branch(repo, subset, x):
479 479 """
480 480 All changesets belonging to the given branch or the branches of the given
481 481 changesets.
482 482
483 483 Pattern matching is supported for `string`. See
484 484 :hg:`help revisions.patterns`.
485 485 """
486 486 getbi = repo.revbranchcache().branchinfo
487 487 def getbranch(r):
488 488 try:
489 489 return getbi(r)[0]
490 490 except error.WdirUnsupported:
491 491 return repo[r].branch()
492 492
493 493 try:
494 494 b = getstring(x, '')
495 495 except error.ParseError:
496 496 # not a string, but another revspec, e.g. tip()
497 497 pass
498 498 else:
499 499 kind, pattern, matcher = stringutil.stringmatcher(b)
500 500 if kind == 'literal':
501 501 # note: falls through to the revspec case if no branch with
502 502 # this name exists and pattern kind is not specified explicitly
503 503 if pattern in repo.branchmap():
504 504 return subset.filter(lambda r: matcher(getbranch(r)),
505 505 condrepr=('<branch %r>', b))
506 506 if b.startswith('literal:'):
507 507 raise error.RepoLookupError(_("branch '%s' does not exist")
508 508 % pattern)
509 509 else:
510 510 return subset.filter(lambda r: matcher(getbranch(r)),
511 511 condrepr=('<branch %r>', b))
512 512
513 513 s = getset(repo, fullreposet(repo), x)
514 514 b = set()
515 515 for r in s:
516 516 b.add(getbranch(r))
517 517 c = s.__contains__
518 518 return subset.filter(lambda r: c(r) or getbranch(r) in b,
519 519 condrepr=lambda: '<branch %r>' % _sortedb(b))
520 520
521 521 @predicate('phasedivergent()', safe=True)
522 522 def phasedivergent(repo, subset, x):
523 523 """Mutable changesets marked as successors of public changesets.
524 524
525 525 Only non-public and non-obsolete changesets can be `phasedivergent`.
526 526 (EXPERIMENTAL)
527 527 """
528 528 # i18n: "phasedivergent" is a keyword
529 529 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
530 530 phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
531 531 return subset & phasedivergent
532 532
533 533 @predicate('bundle()', safe=True)
534 534 def bundle(repo, subset, x):
535 535 """Changesets in the bundle.
536 536
537 537 Bundle must be specified by the -R option."""
538 538
539 539 try:
540 540 bundlerevs = repo.changelog.bundlerevs
541 541 except AttributeError:
542 542 raise error.Abort(_("no bundle provided - specify with -R"))
543 543 return subset & bundlerevs
544 544
545 545 def checkstatus(repo, subset, pat, field):
546 546 hasset = matchmod.patkind(pat) == 'set'
547 547
548 548 mcache = [None]
549 549 def matches(x):
550 550 c = repo[x]
551 551 if not mcache[0] or hasset:
552 552 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
553 553 m = mcache[0]
554 554 fname = None
555 555 if not m.anypats() and len(m.files()) == 1:
556 556 fname = m.files()[0]
557 557 if fname is not None:
558 558 if fname not in c.files():
559 559 return False
560 560 else:
561 561 for f in c.files():
562 562 if m(f):
563 563 break
564 564 else:
565 565 return False
566 566 files = repo.status(c.p1().node(), c.node())[field]
567 567 if fname is not None:
568 568 if fname in files:
569 569 return True
570 570 else:
571 571 for f in files:
572 572 if m(f):
573 573 return True
574 574
575 575 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
576 576
577 577 def _children(repo, subset, parentset):
578 578 if not parentset:
579 579 return baseset()
580 580 cs = set()
581 581 pr = repo.changelog.parentrevs
582 582 minrev = parentset.min()
583 583 nullrev = node.nullrev
584 584 for r in subset:
585 585 if r <= minrev:
586 586 continue
587 587 p1, p2 = pr(r)
588 588 if p1 in parentset:
589 589 cs.add(r)
590 590 if p2 != nullrev and p2 in parentset:
591 591 cs.add(r)
592 592 return baseset(cs)
593 593
594 594 @predicate('children(set)', safe=True)
595 595 def children(repo, subset, x):
596 596 """Child changesets of changesets in set.
597 597 """
598 598 s = getset(repo, fullreposet(repo), x)
599 599 cs = _children(repo, subset, s)
600 600 return subset & cs
601 601
602 602 @predicate('closed()', safe=True, weight=10)
603 603 def closed(repo, subset, x):
604 604 """Changeset is closed.
605 605 """
606 606 # i18n: "closed" is a keyword
607 607 getargs(x, 0, 0, _("closed takes no arguments"))
608 608 return subset.filter(lambda r: repo[r].closesbranch(),
609 609 condrepr='<branch closed>')
610 610
611 611 @predicate('contains(pattern)', weight=100)
612 612 def contains(repo, subset, x):
613 613 """The revision's manifest contains a file matching pattern (but might not
614 614 modify it). See :hg:`help patterns` for information about file patterns.
615 615
616 616 The pattern without explicit kind like ``glob:`` is expected to be
617 617 relative to the current directory and match against a file exactly
618 618 for efficiency.
619 619 """
620 620 # i18n: "contains" is a keyword
621 621 pat = getstring(x, _("contains requires a pattern"))
622 622
623 623 def matches(x):
624 624 if not matchmod.patkind(pat):
625 625 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
626 626 if pats in repo[x]:
627 627 return True
628 628 else:
629 629 c = repo[x]
630 630 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
631 631 for f in c.manifest():
632 632 if m(f):
633 633 return True
634 634 return False
635 635
636 636 return subset.filter(matches, condrepr=('<contains %r>', pat))
637 637
638 638 @predicate('converted([id])', safe=True)
639 639 def converted(repo, subset, x):
640 640 """Changesets converted from the given identifier in the old repository if
641 641 present, or all converted changesets if no identifier is specified.
642 642 """
643 643
644 644 # There is exactly no chance of resolving the revision, so do a simple
645 645 # string compare and hope for the best
646 646
647 647 rev = None
648 648 # i18n: "converted" is a keyword
649 649 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
650 650 if l:
651 651 # i18n: "converted" is a keyword
652 652 rev = getstring(l[0], _('converted requires a revision'))
653 653
654 654 def _matchvalue(r):
655 655 source = repo[r].extra().get('convert_revision', None)
656 656 return source is not None and (rev is None or source.startswith(rev))
657 657
658 658 return subset.filter(lambda r: _matchvalue(r),
659 659 condrepr=('<converted %r>', rev))
660 660
661 661 @predicate('date(interval)', safe=True, weight=10)
662 662 def date(repo, subset, x):
663 663 """Changesets within the interval, see :hg:`help dates`.
664 664 """
665 665 # i18n: "date" is a keyword
666 666 ds = getstring(x, _("date requires a string"))
667 667 dm = dateutil.matchdate(ds)
668 668 return subset.filter(lambda x: dm(repo[x].date()[0]),
669 669 condrepr=('<date %r>', ds))
670 670
671 671 @predicate('desc(string)', safe=True, weight=10)
672 672 def desc(repo, subset, x):
673 673 """Search commit message for string. The match is case-insensitive.
674 674
675 675 Pattern matching is supported for `string`. See
676 676 :hg:`help revisions.patterns`.
677 677 """
678 678 # i18n: "desc" is a keyword
679 679 ds = getstring(x, _("desc requires a string"))
680 680
681 681 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
682 682
683 683 return subset.filter(lambda r: matcher(repo[r].description()),
684 684 condrepr=('<desc %r>', ds))
685 685
686 686 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
687 687 stopdepth=None):
688 688 roots = getset(repo, fullreposet(repo), x)
689 689 if not roots:
690 690 return baseset()
691 691 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
692 692 return subset & s
693 693
694 694 @predicate('descendants(set[, depth])', safe=True)
695 695 def descendants(repo, subset, x):
696 696 """Changesets which are descendants of changesets in set, including the
697 697 given changesets themselves.
698 698
699 699 If depth is specified, the result only includes changesets up to
700 700 the specified generation.
701 701 """
702 702 # startdepth is for internal use only until we can decide the UI
703 703 args = getargsdict(x, 'descendants', 'set depth startdepth')
704 704 if 'set' not in args:
705 705 # i18n: "descendants" is a keyword
706 706 raise error.ParseError(_('descendants takes at least 1 argument'))
707 707 startdepth = stopdepth = None
708 708 if 'startdepth' in args:
709 709 n = getinteger(args['startdepth'],
710 710 "descendants expects an integer startdepth")
711 711 if n < 0:
712 712 raise error.ParseError("negative startdepth")
713 713 startdepth = n
714 714 if 'depth' in args:
715 715 # i18n: "descendants" is a keyword
716 716 n = getinteger(args['depth'], _("descendants expects an integer depth"))
717 717 if n < 0:
718 718 raise error.ParseError(_("negative depth"))
719 719 stopdepth = n + 1
720 720 return _descendants(repo, subset, args['set'],
721 721 startdepth=startdepth, stopdepth=stopdepth)
722 722
723 723 @predicate('_firstdescendants', safe=True)
724 724 def _firstdescendants(repo, subset, x):
725 725 # ``_firstdescendants(set)``
726 726 # Like ``descendants(set)`` but follows only the first parents.
727 727 return _descendants(repo, subset, x, followfirst=True)
728 728
729 729 @predicate('destination([set])', safe=True, weight=10)
730 730 def destination(repo, subset, x):
731 731 """Changesets that were created by a graft, transplant or rebase operation,
732 732 with the given revisions specified as the source. Omitting the optional set
733 733 is the same as passing all().
734 734 """
735 735 if x is not None:
736 736 sources = getset(repo, fullreposet(repo), x)
737 737 else:
738 738 sources = fullreposet(repo)
739 739
740 740 dests = set()
741 741
742 742 # subset contains all of the possible destinations that can be returned, so
743 743 # iterate over them and see if their source(s) were provided in the arg set.
744 744 # Even if the immediate src of r is not in the arg set, src's source (or
745 745 # further back) may be. Scanning back further than the immediate src allows
746 746 # transitive transplants and rebases to yield the same results as transitive
747 747 # grafts.
748 748 for r in subset:
749 749 src = _getrevsource(repo, r)
750 750 lineage = None
751 751
752 752 while src is not None:
753 753 if lineage is None:
754 754 lineage = list()
755 755
756 756 lineage.append(r)
757 757
758 758 # The visited lineage is a match if the current source is in the arg
759 759 # set. Since every candidate dest is visited by way of iterating
760 760 # subset, any dests further back in the lineage will be tested by a
761 761 # different iteration over subset. Likewise, if the src was already
762 762 # selected, the current lineage can be selected without going back
763 763 # further.
764 764 if src in sources or src in dests:
765 765 dests.update(lineage)
766 766 break
767 767
768 768 r = src
769 769 src = _getrevsource(repo, r)
770 770
771 771 return subset.filter(dests.__contains__,
772 772 condrepr=lambda: '<destination %r>' % _sortedb(dests))
773 773
774 774 @predicate('contentdivergent()', safe=True)
775 775 def contentdivergent(repo, subset, x):
776 776 """
777 777 Final successors of changesets with an alternative set of final
778 778 successors. (EXPERIMENTAL)
779 779 """
780 780 # i18n: "contentdivergent" is a keyword
781 781 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
782 782 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
783 783 return subset & contentdivergent
784 784
785 785 @predicate('extdata(source)', safe=False, weight=100)
786 786 def extdata(repo, subset, x):
787 787 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
788 788 # i18n: "extdata" is a keyword
789 789 args = getargsdict(x, 'extdata', 'source')
790 790 source = getstring(args.get('source'),
791 791 # i18n: "extdata" is a keyword
792 792 _('extdata takes at least 1 string argument'))
793 793 data = scmutil.extdatasource(repo, source)
794 794 return subset & baseset(data)
795 795
796 796 @predicate('extinct()', safe=True)
797 797 def extinct(repo, subset, x):
798 798 """Obsolete changesets with obsolete descendants only.
799 799 """
800 800 # i18n: "extinct" is a keyword
801 801 getargs(x, 0, 0, _("extinct takes no arguments"))
802 802 extincts = obsmod.getrevs(repo, 'extinct')
803 803 return subset & extincts
804 804
805 805 @predicate('extra(label, [value])', safe=True)
806 806 def extra(repo, subset, x):
807 807 """Changesets with the given label in the extra metadata, with the given
808 808 optional value.
809 809
810 810 Pattern matching is supported for `value`. See
811 811 :hg:`help revisions.patterns`.
812 812 """
813 813 args = getargsdict(x, 'extra', 'label value')
814 814 if 'label' not in args:
815 815 # i18n: "extra" is a keyword
816 816 raise error.ParseError(_('extra takes at least 1 argument'))
817 817 # i18n: "extra" is a keyword
818 818 label = getstring(args['label'], _('first argument to extra must be '
819 819 'a string'))
820 820 value = None
821 821
822 822 if 'value' in args:
823 823 # i18n: "extra" is a keyword
824 824 value = getstring(args['value'], _('second argument to extra must be '
825 825 'a string'))
826 826 kind, value, matcher = stringutil.stringmatcher(value)
827 827
828 828 def _matchvalue(r):
829 829 extra = repo[r].extra()
830 830 return label in extra and (value is None or matcher(extra[label]))
831 831
832 832 return subset.filter(lambda r: _matchvalue(r),
833 833 condrepr=('<extra[%r] %r>', label, value))
834 834
835 835 @predicate('filelog(pattern)', safe=True)
836 836 def filelog(repo, subset, x):
837 837 """Changesets connected to the specified filelog.
838 838
839 839 For performance reasons, visits only revisions mentioned in the file-level
840 840 filelog, rather than filtering through all changesets (much faster, but
841 841 doesn't include deletes or duplicate changes). For a slower, more accurate
842 842 result, use ``file()``.
843 843
844 844 The pattern without explicit kind like ``glob:`` is expected to be
845 845 relative to the current directory and match against a file exactly
846 846 for efficiency.
847 847
848 848 If some linkrev points to revisions filtered by the current repoview, we'll
849 849 work around it to return a non-filtered value.
850 850 """
851 851
852 852 # i18n: "filelog" is a keyword
853 853 pat = getstring(x, _("filelog requires a pattern"))
854 854 s = set()
855 855 cl = repo.changelog
856 856
857 857 if not matchmod.patkind(pat):
858 858 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
859 859 files = [f]
860 860 else:
861 861 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
862 862 files = (f for f in repo[None] if m(f))
863 863
864 864 for f in files:
865 865 fl = repo.file(f)
866 866 known = {}
867 867 scanpos = 0
868 868 for fr in list(fl):
869 869 fn = fl.node(fr)
870 870 if fn in known:
871 871 s.add(known[fn])
872 872 continue
873 873
874 874 lr = fl.linkrev(fr)
875 875 if lr in cl:
876 876 s.add(lr)
877 877 elif scanpos is not None:
878 878 # lowest matching changeset is filtered, scan further
879 879 # ahead in changelog
880 880 start = max(lr, scanpos) + 1
881 881 scanpos = None
882 882 for r in cl.revs(start):
883 883 # minimize parsing of non-matching entries
884 884 if f in cl.revision(r) and f in cl.readfiles(r):
885 885 try:
886 886 # try to use manifest delta fastpath
887 887 n = repo[r].filenode(f)
888 888 if n not in known:
889 889 if n == fn:
890 890 s.add(r)
891 891 scanpos = r
892 892 break
893 893 else:
894 894 known[n] = r
895 895 except error.ManifestLookupError:
896 896 # deletion in changelog
897 897 continue
898 898
899 899 return subset & s
900 900
901 901 @predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
902 902 def first(repo, subset, x, order):
903 903 """An alias for limit().
904 904 """
905 905 return limit(repo, subset, x, order)
906 906
907 907 def _follow(repo, subset, x, name, followfirst=False):
908 908 args = getargsdict(x, name, 'file startrev')
909 909 revs = None
910 910 if 'startrev' in args:
911 911 revs = getset(repo, fullreposet(repo), args['startrev'])
912 912 if 'file' in args:
913 913 x = getstring(args['file'], _("%s expected a pattern") % name)
914 914 if revs is None:
915 915 revs = [None]
916 916 fctxs = []
917 917 for r in revs:
918 918 ctx = mctx = repo[r]
919 919 if r is None:
920 920 ctx = repo['.']
921 921 m = matchmod.match(repo.root, repo.getcwd(), [x],
922 922 ctx=mctx, default='path')
923 923 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
924 924 s = dagop.filerevancestors(fctxs, followfirst)
925 925 else:
926 926 if revs is None:
927 927 revs = baseset([repo['.'].rev()])
928 928 s = dagop.revancestors(repo, revs, followfirst)
929 929
930 930 return subset & s
931 931
932 932 @predicate('follow([file[, startrev]])', safe=True)
933 933 def follow(repo, subset, x):
934 934 """
935 935 An alias for ``::.`` (ancestors of the working directory's first parent).
936 936 If file pattern is specified, the histories of files matching given
937 937 pattern in the revision given by startrev are followed, including copies.
938 938 """
939 939 return _follow(repo, subset, x, 'follow')
940 940
941 941 @predicate('_followfirst', safe=True)
942 942 def _followfirst(repo, subset, x):
943 943 # ``followfirst([file[, startrev]])``
944 944 # Like ``follow([file[, startrev]])`` but follows only the first parent
945 945 # of every revisions or files revisions.
946 946 return _follow(repo, subset, x, '_followfirst', followfirst=True)
947 947
948 948 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
949 949 safe=True)
950 950 def followlines(repo, subset, x):
951 951 """Changesets modifying `file` in line range ('fromline', 'toline').
952 952
953 953 Line range corresponds to 'file' content at 'startrev' and should hence be
954 954 consistent with file size. If startrev is not specified, working directory's
955 955 parent is used.
956 956
957 957 By default, ancestors of 'startrev' are returned. If 'descend' is True,
958 958 descendants of 'startrev' are returned though renames are (currently) not
959 959 followed in this direction.
960 960 """
961 961 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
962 962 if len(args['lines']) != 1:
963 963 raise error.ParseError(_("followlines requires a line range"))
964 964
965 965 rev = '.'
966 966 if 'startrev' in args:
967 967 revs = getset(repo, fullreposet(repo), args['startrev'])
968 968 if len(revs) != 1:
969 969 raise error.ParseError(
970 970 # i18n: "followlines" is a keyword
971 971 _("followlines expects exactly one revision"))
972 972 rev = revs.last()
973 973
974 974 pat = getstring(args['file'], _("followlines requires a pattern"))
975 975 # i18n: "followlines" is a keyword
976 976 msg = _("followlines expects exactly one file")
977 977 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
978 978 # i18n: "followlines" is a keyword
979 979 lr = getrange(args['lines'][0], _("followlines expects a line range"))
980 980 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
981 981 for a in lr]
982 982 fromline, toline = util.processlinerange(fromline, toline)
983 983
984 984 fctx = repo[rev].filectx(fname)
985 985 descend = False
986 986 if 'descend' in args:
987 987 descend = getboolean(args['descend'],
988 988 # i18n: "descend" is a keyword
989 989 _("descend argument must be a boolean"))
990 990 if descend:
991 991 rs = generatorset(
992 992 (c.rev() for c, _linerange
993 993 in dagop.blockdescendants(fctx, fromline, toline)),
994 994 iterasc=True)
995 995 else:
996 996 rs = generatorset(
997 997 (c.rev() for c, _linerange
998 998 in dagop.blockancestors(fctx, fromline, toline)),
999 999 iterasc=False)
1000 1000 return subset & rs
1001 1001
1002 1002 @predicate('all()', safe=True)
1003 1003 def getall(repo, subset, x):
1004 1004 """All changesets, the same as ``0:tip``.
1005 1005 """
1006 1006 # i18n: "all" is a keyword
1007 1007 getargs(x, 0, 0, _("all takes no arguments"))
1008 1008 return subset & spanset(repo) # drop "null" if any
1009 1009
1010 1010 @predicate('grep(regex)', weight=10)
1011 1011 def grep(repo, subset, x):
1012 1012 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1013 1013 to ensure special escape characters are handled correctly. Unlike
1014 1014 ``keyword(string)``, the match is case-sensitive.
1015 1015 """
1016 1016 try:
1017 1017 # i18n: "grep" is a keyword
1018 1018 gr = re.compile(getstring(x, _("grep requires a string")))
1019 1019 except re.error as e:
1020 1020 raise error.ParseError(
1021 1021 _('invalid match pattern: %s') % stringutil.forcebytestr(e))
1022 1022
1023 1023 def matches(x):
1024 1024 c = repo[x]
1025 1025 for e in c.files() + [c.user(), c.description()]:
1026 1026 if gr.search(e):
1027 1027 return True
1028 1028 return False
1029 1029
1030 1030 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1031 1031
1032 1032 @predicate('_matchfiles', safe=True)
1033 1033 def _matchfiles(repo, subset, x):
1034 1034 # _matchfiles takes a revset list of prefixed arguments:
1035 1035 #
1036 1036 # [p:foo, i:bar, x:baz]
1037 1037 #
1038 1038 # builds a match object from them and filters subset. Allowed
1039 1039 # prefixes are 'p:' for regular patterns, 'i:' for include
1040 1040 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1041 1041 # a revision identifier, or the empty string to reference the
1042 1042 # working directory, from which the match object is
1043 1043 # initialized. Use 'd:' to set the default matching mode, default
1044 1044 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1045 1045
1046 1046 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1047 1047 pats, inc, exc = [], [], []
1048 1048 rev, default = None, None
1049 1049 for arg in l:
1050 1050 s = getstring(arg, "_matchfiles requires string arguments")
1051 1051 prefix, value = s[:2], s[2:]
1052 1052 if prefix == 'p:':
1053 1053 pats.append(value)
1054 1054 elif prefix == 'i:':
1055 1055 inc.append(value)
1056 1056 elif prefix == 'x:':
1057 1057 exc.append(value)
1058 1058 elif prefix == 'r:':
1059 1059 if rev is not None:
1060 1060 raise error.ParseError('_matchfiles expected at most one '
1061 1061 'revision')
1062 1062 if value == '': # empty means working directory
1063 1063 rev = node.wdirrev
1064 1064 else:
1065 1065 rev = value
1066 1066 elif prefix == 'd:':
1067 1067 if default is not None:
1068 1068 raise error.ParseError('_matchfiles expected at most one '
1069 1069 'default mode')
1070 1070 default = value
1071 1071 else:
1072 1072 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1073 1073 if not default:
1074 1074 default = 'glob'
1075 1075 hasset = any(matchmod.patkind(p) == 'set' for p in pats + inc + exc)
1076 1076
1077 1077 mcache = [None]
1078 1078
1079 1079 # This directly read the changelog data as creating changectx for all
1080 1080 # revisions is quite expensive.
1081 1081 getfiles = repo.changelog.readfiles
1082 1082 wdirrev = node.wdirrev
1083 1083 def matches(x):
1084 1084 if x == wdirrev:
1085 1085 files = repo[x].files()
1086 1086 else:
1087 1087 files = getfiles(x)
1088 1088
1089 1089 if not mcache[0] or (hasset and rev is None):
1090 1090 r = x if rev is None else rev
1091 1091 mcache[0] = matchmod.match(repo.root, repo.getcwd(), pats,
1092 1092 include=inc, exclude=exc, ctx=repo[r],
1093 1093 default=default)
1094 1094 m = mcache[0]
1095 1095
1096 1096 for f in files:
1097 1097 if m(f):
1098 1098 return True
1099 1099 return False
1100 1100
1101 1101 return subset.filter(matches,
1102 1102 condrepr=('<matchfiles patterns=%r, include=%r '
1103 1103 'exclude=%r, default=%r, rev=%r>',
1104 1104 pats, inc, exc, default, rev))
1105 1105
1106 1106 @predicate('file(pattern)', safe=True, weight=10)
1107 1107 def hasfile(repo, subset, x):
1108 1108 """Changesets affecting files matched by pattern.
1109 1109
1110 1110 For a faster but less accurate result, consider using ``filelog()``
1111 1111 instead.
1112 1112
1113 1113 This predicate uses ``glob:`` as the default kind of pattern.
1114 1114 """
1115 1115 # i18n: "file" is a keyword
1116 1116 pat = getstring(x, _("file requires a pattern"))
1117 1117 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1118 1118
1119 1119 @predicate('head()', safe=True)
1120 1120 def head(repo, subset, x):
1121 1121 """Changeset is a named branch head.
1122 1122 """
1123 1123 # i18n: "head" is a keyword
1124 1124 getargs(x, 0, 0, _("head takes no arguments"))
1125 1125 hs = set()
1126 1126 cl = repo.changelog
1127 1127 for ls in repo.branchmap().itervalues():
1128 1128 hs.update(cl.rev(h) for h in ls)
1129 1129 return subset & baseset(hs)
1130 1130
1131 1131 @predicate('heads(set)', safe=True, takeorder=True)
1132 1132 def heads(repo, subset, x, order):
1133 1133 """Members of set with no children in set.
1134 1134 """
1135 1135 # argument set should never define order
1136 1136 if order == defineorder:
1137 1137 order = followorder
1138 1138 s = getset(repo, subset, x, order=order)
1139 1139 ps = parents(repo, subset, x)
1140 1140 return s - ps
1141 1141
1142 1142 @predicate('hidden()', safe=True)
1143 1143 def hidden(repo, subset, x):
1144 1144 """Hidden changesets.
1145 1145 """
1146 1146 # i18n: "hidden" is a keyword
1147 1147 getargs(x, 0, 0, _("hidden takes no arguments"))
1148 1148 hiddenrevs = repoview.filterrevs(repo, 'visible')
1149 1149 return subset & hiddenrevs
1150 1150
1151 1151 @predicate('keyword(string)', safe=True, weight=10)
1152 1152 def keyword(repo, subset, x):
1153 1153 """Search commit message, user name, and names of changed files for
1154 1154 string. The match is case-insensitive.
1155 1155
1156 1156 For a regular expression or case sensitive search of these fields, use
1157 1157 ``grep(regex)``.
1158 1158 """
1159 1159 # i18n: "keyword" is a keyword
1160 1160 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1161 1161
1162 1162 def matches(r):
1163 1163 c = repo[r]
1164 1164 return any(kw in encoding.lower(t)
1165 1165 for t in c.files() + [c.user(), c.description()])
1166 1166
1167 1167 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1168 1168
1169 1169 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1170 1170 def limit(repo, subset, x, order):
1171 1171 """First n members of set, defaulting to 1, starting from offset.
1172 1172 """
1173 1173 args = getargsdict(x, 'limit', 'set n offset')
1174 1174 if 'set' not in args:
1175 1175 # i18n: "limit" is a keyword
1176 1176 raise error.ParseError(_("limit requires one to three arguments"))
1177 1177 # i18n: "limit" is a keyword
1178 1178 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1179 1179 if lim < 0:
1180 1180 raise error.ParseError(_("negative number to select"))
1181 1181 # i18n: "limit" is a keyword
1182 1182 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1183 1183 if ofs < 0:
1184 1184 raise error.ParseError(_("negative offset"))
1185 1185 os = getset(repo, fullreposet(repo), args['set'])
1186 1186 ls = os.slice(ofs, ofs + lim)
1187 1187 if order == followorder and lim > 1:
1188 1188 return subset & ls
1189 1189 return ls & subset
1190 1190
1191 1191 @predicate('last(set, [n])', safe=True, takeorder=True)
1192 1192 def last(repo, subset, x, order):
1193 1193 """Last n members of set, defaulting to 1.
1194 1194 """
1195 1195 # i18n: "last" is a keyword
1196 1196 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1197 1197 lim = 1
1198 1198 if len(l) == 2:
1199 1199 # i18n: "last" is a keyword
1200 1200 lim = getinteger(l[1], _("last expects a number"))
1201 1201 if lim < 0:
1202 1202 raise error.ParseError(_("negative number to select"))
1203 1203 os = getset(repo, fullreposet(repo), l[0])
1204 1204 os.reverse()
1205 1205 ls = os.slice(0, lim)
1206 1206 if order == followorder and lim > 1:
1207 1207 return subset & ls
1208 1208 ls.reverse()
1209 1209 return ls & subset
1210 1210
1211 1211 @predicate('max(set)', safe=True)
1212 1212 def maxrev(repo, subset, x):
1213 1213 """Changeset with highest revision number in set.
1214 1214 """
1215 1215 os = getset(repo, fullreposet(repo), x)
1216 1216 try:
1217 1217 m = os.max()
1218 1218 if m in subset:
1219 1219 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1220 1220 except ValueError:
1221 1221 # os.max() throws a ValueError when the collection is empty.
1222 1222 # Same as python's max().
1223 1223 pass
1224 1224 return baseset(datarepr=('<max %r, %r>', subset, os))
1225 1225
1226 1226 @predicate('merge()', safe=True)
1227 1227 def merge(repo, subset, x):
1228 1228 """Changeset is a merge changeset.
1229 1229 """
1230 1230 # i18n: "merge" is a keyword
1231 1231 getargs(x, 0, 0, _("merge takes no arguments"))
1232 1232 cl = repo.changelog
1233 1233 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1234 1234 condrepr='<merge>')
1235 1235
1236 1236 @predicate('branchpoint()', safe=True)
1237 1237 def branchpoint(repo, subset, x):
1238 1238 """Changesets with more than one child.
1239 1239 """
1240 1240 # i18n: "branchpoint" is a keyword
1241 1241 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1242 1242 cl = repo.changelog
1243 1243 if not subset:
1244 1244 return baseset()
1245 1245 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1246 1246 # (and if it is not, it should.)
1247 1247 baserev = min(subset)
1248 1248 parentscount = [0]*(len(repo) - baserev)
1249 1249 for r in cl.revs(start=baserev + 1):
1250 1250 for p in cl.parentrevs(r):
1251 1251 if p >= baserev:
1252 1252 parentscount[p - baserev] += 1
1253 1253 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1254 1254 condrepr='<branchpoint>')
1255 1255
1256 1256 @predicate('min(set)', safe=True)
1257 1257 def minrev(repo, subset, x):
1258 1258 """Changeset with lowest revision number in set.
1259 1259 """
1260 1260 os = getset(repo, fullreposet(repo), x)
1261 1261 try:
1262 1262 m = os.min()
1263 1263 if m in subset:
1264 1264 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1265 1265 except ValueError:
1266 1266 # os.min() throws a ValueError when the collection is empty.
1267 1267 # Same as python's min().
1268 1268 pass
1269 1269 return baseset(datarepr=('<min %r, %r>', subset, os))
1270 1270
1271 1271 @predicate('modifies(pattern)', safe=True, weight=30)
1272 1272 def modifies(repo, subset, x):
1273 1273 """Changesets modifying files matched by pattern.
1274 1274
1275 1275 The pattern without explicit kind like ``glob:`` is expected to be
1276 1276 relative to the current directory and match against a file or a
1277 1277 directory.
1278 1278 """
1279 1279 # i18n: "modifies" is a keyword
1280 1280 pat = getstring(x, _("modifies requires a pattern"))
1281 1281 return checkstatus(repo, subset, pat, 0)
1282 1282
1283 1283 @predicate('named(namespace)')
1284 1284 def named(repo, subset, x):
1285 1285 """The changesets in a given namespace.
1286 1286
1287 1287 Pattern matching is supported for `namespace`. See
1288 1288 :hg:`help revisions.patterns`.
1289 1289 """
1290 1290 # i18n: "named" is a keyword
1291 1291 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1292 1292
1293 1293 ns = getstring(args[0],
1294 1294 # i18n: "named" is a keyword
1295 1295 _('the argument to named must be a string'))
1296 1296 kind, pattern, matcher = stringutil.stringmatcher(ns)
1297 1297 namespaces = set()
1298 1298 if kind == 'literal':
1299 1299 if pattern not in repo.names:
1300 1300 raise error.RepoLookupError(_("namespace '%s' does not exist")
1301 1301 % ns)
1302 1302 namespaces.add(repo.names[pattern])
1303 1303 else:
1304 1304 for name, ns in repo.names.iteritems():
1305 1305 if matcher(name):
1306 1306 namespaces.add(ns)
1307 1307 if not namespaces:
1308 1308 raise error.RepoLookupError(_("no namespace exists"
1309 1309 " that match '%s'") % pattern)
1310 1310
1311 1311 names = set()
1312 1312 for ns in namespaces:
1313 1313 for name in ns.listnames(repo):
1314 1314 if name not in ns.deprecated:
1315 1315 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1316 1316
1317 1317 names -= {node.nullrev}
1318 1318 return subset & names
1319 1319
1320 1320 @predicate('id(string)', safe=True)
1321 1321 def node_(repo, subset, x):
1322 1322 """Revision non-ambiguously specified by the given hex string prefix.
1323 1323 """
1324 1324 # i18n: "id" is a keyword
1325 1325 l = getargs(x, 1, 1, _("id requires one argument"))
1326 1326 # i18n: "id" is a keyword
1327 1327 n = getstring(l[0], _("id requires a string"))
1328 1328 if len(n) == 40:
1329 1329 try:
1330 1330 rn = repo.changelog.rev(node.bin(n))
1331 1331 except error.WdirUnsupported:
1332 1332 rn = node.wdirrev
1333 1333 except (LookupError, TypeError):
1334 1334 rn = None
1335 1335 else:
1336 1336 rn = None
1337 1337 try:
1338 1338 pm = scmutil.resolvehexnodeidprefix(repo, n)
1339 1339 if pm is not None:
1340 1340 rn = repo.changelog.rev(pm)
1341 1341 except LookupError:
1342 1342 pass
1343 1343 except error.WdirUnsupported:
1344 1344 rn = node.wdirrev
1345 1345
1346 1346 if rn is None:
1347 1347 return baseset()
1348 1348 result = baseset([rn])
1349 1349 return result & subset
1350 1350
1351 1351 @predicate('none()', safe=True)
1352 1352 def none(repo, subset, x):
1353 1353 """No changesets.
1354 1354 """
1355 1355 # i18n: "none" is a keyword
1356 1356 getargs(x, 0, 0, _("none takes no arguments"))
1357 1357 return baseset()
1358 1358
1359 1359 @predicate('obsolete()', safe=True)
1360 1360 def obsolete(repo, subset, x):
1361 1361 """Mutable changeset with a newer version."""
1362 1362 # i18n: "obsolete" is a keyword
1363 1363 getargs(x, 0, 0, _("obsolete takes no arguments"))
1364 1364 obsoletes = obsmod.getrevs(repo, 'obsolete')
1365 1365 return subset & obsoletes
1366 1366
1367 1367 @predicate('only(set, [set])', safe=True)
1368 1368 def only(repo, subset, x):
1369 1369 """Changesets that are ancestors of the first set that are not ancestors
1370 1370 of any other head in the repo. If a second set is specified, the result
1371 1371 is ancestors of the first set that are not ancestors of the second set
1372 1372 (i.e. ::<set1> - ::<set2>).
1373 1373 """
1374 1374 cl = repo.changelog
1375 1375 # i18n: "only" is a keyword
1376 1376 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1377 1377 include = getset(repo, fullreposet(repo), args[0])
1378 1378 if len(args) == 1:
1379 1379 if not include:
1380 1380 return baseset()
1381 1381
1382 1382 descendants = set(dagop.revdescendants(repo, include, False))
1383 1383 exclude = [rev for rev in cl.headrevs()
1384 1384 if not rev in descendants and not rev in include]
1385 1385 else:
1386 1386 exclude = getset(repo, fullreposet(repo), args[1])
1387 1387
1388 1388 results = set(cl.findmissingrevs(common=exclude, heads=include))
1389 1389 # XXX we should turn this into a baseset instead of a set, smartset may do
1390 1390 # some optimizations from the fact this is a baseset.
1391 1391 return subset & results
1392 1392
1393 1393 @predicate('origin([set])', safe=True)
1394 1394 def origin(repo, subset, x):
1395 1395 """
1396 1396 Changesets that were specified as a source for the grafts, transplants or
1397 1397 rebases that created the given revisions. Omitting the optional set is the
1398 1398 same as passing all(). If a changeset created by these operations is itself
1399 1399 specified as a source for one of these operations, only the source changeset
1400 1400 for the first operation is selected.
1401 1401 """
1402 1402 if x is not None:
1403 1403 dests = getset(repo, fullreposet(repo), x)
1404 1404 else:
1405 1405 dests = fullreposet(repo)
1406 1406
1407 1407 def _firstsrc(rev):
1408 1408 src = _getrevsource(repo, rev)
1409 1409 if src is None:
1410 1410 return None
1411 1411
1412 1412 while True:
1413 1413 prev = _getrevsource(repo, src)
1414 1414
1415 1415 if prev is None:
1416 1416 return src
1417 1417 src = prev
1418 1418
1419 1419 o = {_firstsrc(r) for r in dests}
1420 1420 o -= {None}
1421 1421 # XXX we should turn this into a baseset instead of a set, smartset may do
1422 1422 # some optimizations from the fact this is a baseset.
1423 1423 return subset & o
1424 1424
1425 1425 @predicate('outgoing([path])', safe=False, weight=10)
1426 1426 def outgoing(repo, subset, x):
1427 1427 """Changesets not found in the specified destination repository, or the
1428 1428 default push location.
1429 1429 """
1430 1430 # Avoid cycles.
1431 1431 from . import (
1432 1432 discovery,
1433 1433 hg,
1434 1434 )
1435 1435 # i18n: "outgoing" is a keyword
1436 1436 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1437 1437 # i18n: "outgoing" is a keyword
1438 1438 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1439 1439 if not dest:
1440 1440 # ui.paths.getpath() explicitly tests for None, not just a boolean
1441 1441 dest = None
1442 1442 path = repo.ui.paths.getpath(dest, default=('default-push', 'default'))
1443 1443 if not path:
1444 1444 raise error.Abort(_('default repository not configured!'),
1445 1445 hint=_("see 'hg help config.paths'"))
1446 1446 dest = path.pushloc or path.loc
1447 1447 branches = path.branch, []
1448 1448
1449 1449 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1450 1450 if revs:
1451 1451 revs = [repo.lookup(rev) for rev in revs]
1452 1452 other = hg.peer(repo, {}, dest)
1453 1453 repo.ui.pushbuffer()
1454 1454 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1455 1455 repo.ui.popbuffer()
1456 1456 cl = repo.changelog
1457 1457 o = {cl.rev(r) for r in outgoing.missing}
1458 1458 return subset & o
1459 1459
1460 1460 @predicate('p1([set])', safe=True)
1461 1461 def p1(repo, subset, x):
1462 1462 """First parent of changesets in set, or the working directory.
1463 1463 """
1464 1464 if x is None:
1465 1465 p = repo[x].p1().rev()
1466 1466 if p >= 0:
1467 1467 return subset & baseset([p])
1468 1468 return baseset()
1469 1469
1470 1470 ps = set()
1471 1471 cl = repo.changelog
1472 1472 for r in getset(repo, fullreposet(repo), x):
1473 1473 try:
1474 1474 ps.add(cl.parentrevs(r)[0])
1475 1475 except error.WdirUnsupported:
1476 1476 ps.add(repo[r].parents()[0].rev())
1477 1477 ps -= {node.nullrev}
1478 1478 # XXX we should turn this into a baseset instead of a set, smartset may do
1479 1479 # some optimizations from the fact this is a baseset.
1480 1480 return subset & ps
1481 1481
1482 1482 @predicate('p2([set])', safe=True)
1483 1483 def p2(repo, subset, x):
1484 1484 """Second parent of changesets in set, or the working directory.
1485 1485 """
1486 1486 if x is None:
1487 1487 ps = repo[x].parents()
1488 1488 try:
1489 1489 p = ps[1].rev()
1490 1490 if p >= 0:
1491 1491 return subset & baseset([p])
1492 1492 return baseset()
1493 1493 except IndexError:
1494 1494 return baseset()
1495 1495
1496 1496 ps = set()
1497 1497 cl = repo.changelog
1498 1498 for r in getset(repo, fullreposet(repo), x):
1499 1499 try:
1500 1500 ps.add(cl.parentrevs(r)[1])
1501 1501 except error.WdirUnsupported:
1502 1502 parents = repo[r].parents()
1503 1503 if len(parents) == 2:
1504 1504 ps.add(parents[1])
1505 1505 ps -= {node.nullrev}
1506 1506 # XXX we should turn this into a baseset instead of a set, smartset may do
1507 1507 # some optimizations from the fact this is a baseset.
1508 1508 return subset & ps
1509 1509
1510 1510 def parentpost(repo, subset, x, order):
1511 1511 return p1(repo, subset, x)
1512 1512
1513 1513 @predicate('parents([set])', safe=True)
1514 1514 def parents(repo, subset, x):
1515 1515 """
1516 1516 The set of all parents for all changesets in set, or the working directory.
1517 1517 """
1518 1518 if x is None:
1519 1519 ps = set(p.rev() for p in repo[x].parents())
1520 1520 else:
1521 1521 ps = set()
1522 1522 cl = repo.changelog
1523 1523 up = ps.update
1524 1524 parentrevs = cl.parentrevs
1525 1525 for r in getset(repo, fullreposet(repo), x):
1526 1526 try:
1527 1527 up(parentrevs(r))
1528 1528 except error.WdirUnsupported:
1529 1529 up(p.rev() for p in repo[r].parents())
1530 1530 ps -= {node.nullrev}
1531 1531 return subset & ps
1532 1532
1533 1533 def _phase(repo, subset, *targets):
1534 1534 """helper to select all rev in <targets> phases"""
1535 1535 return repo._phasecache.getrevset(repo, targets, subset)
1536 1536
1537 1537 @predicate('draft()', safe=True)
1538 1538 def draft(repo, subset, x):
1539 1539 """Changeset in draft phase."""
1540 1540 # i18n: "draft" is a keyword
1541 1541 getargs(x, 0, 0, _("draft takes no arguments"))
1542 1542 target = phases.draft
1543 1543 return _phase(repo, subset, target)
1544 1544
1545 1545 @predicate('secret()', safe=True)
1546 1546 def secret(repo, subset, x):
1547 1547 """Changeset in secret phase."""
1548 1548 # i18n: "secret" is a keyword
1549 1549 getargs(x, 0, 0, _("secret takes no arguments"))
1550 1550 target = phases.secret
1551 1551 return _phase(repo, subset, target)
1552 1552
1553 1553 @predicate('stack([revs])', safe=True)
1554 1554 def stack(repo, subset, x):
1555 1555 """Experimental revset for the stack of changesets or working directory
1556 1556 parent. (EXPERIMENTAL)
1557 1557 """
1558 1558 if x is None:
1559 1559 stacks = stackmod.getstack(repo, x)
1560 1560 else:
1561 1561 stacks = smartset.baseset([])
1562 1562 for revision in getset(repo, fullreposet(repo), x):
1563 1563 currentstack = stackmod.getstack(repo, revision)
1564 1564 stacks = stacks + currentstack
1565 1565
1566 1566 return subset & stacks
1567 1567
1568 1568 def parentspec(repo, subset, x, n, order):
1569 1569 """``set^0``
1570 1570 The set.
1571 1571 ``set^1`` (or ``set^``), ``set^2``
1572 1572 First or second parent, respectively, of all changesets in set.
1573 1573 """
1574 1574 try:
1575 1575 n = int(n[1])
1576 1576 if n not in (0, 1, 2):
1577 1577 raise ValueError
1578 1578 except (TypeError, ValueError):
1579 1579 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1580 1580 ps = set()
1581 1581 cl = repo.changelog
1582 1582 for r in getset(repo, fullreposet(repo), x):
1583 1583 if n == 0:
1584 1584 ps.add(r)
1585 1585 elif n == 1:
1586 1586 try:
1587 1587 ps.add(cl.parentrevs(r)[0])
1588 1588 except error.WdirUnsupported:
1589 1589 ps.add(repo[r].parents()[0].rev())
1590 1590 else:
1591 1591 try:
1592 1592 parents = cl.parentrevs(r)
1593 1593 if parents[1] != node.nullrev:
1594 1594 ps.add(parents[1])
1595 1595 except error.WdirUnsupported:
1596 1596 parents = repo[r].parents()
1597 1597 if len(parents) == 2:
1598 1598 ps.add(parents[1].rev())
1599 1599 return subset & ps
1600 1600
1601 1601 @predicate('present(set)', safe=True, takeorder=True)
1602 1602 def present(repo, subset, x, order):
1603 1603 """An empty set, if any revision in set isn't found; otherwise,
1604 1604 all revisions in set.
1605 1605
1606 1606 If any of specified revisions is not present in the local repository,
1607 1607 the query is normally aborted. But this predicate allows the query
1608 1608 to continue even in such cases.
1609 1609 """
1610 1610 try:
1611 1611 return getset(repo, subset, x, order)
1612 1612 except error.RepoLookupError:
1613 1613 return baseset()
1614 1614
1615 1615 # for internal use
1616 1616 @predicate('_notpublic', safe=True)
1617 1617 def _notpublic(repo, subset, x):
1618 1618 getargs(x, 0, 0, "_notpublic takes no arguments")
1619 1619 return _phase(repo, subset, phases.draft, phases.secret)
1620 1620
1621 1621 # for internal use
1622 1622 @predicate('_phaseandancestors(phasename, set)', safe=True)
1623 1623 def _phaseandancestors(repo, subset, x):
1624 1624 # equivalent to (phasename() & ancestors(set)) but more efficient
1625 1625 # phasename could be one of 'draft', 'secret', or '_notpublic'
1626 1626 args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
1627 1627 phasename = getsymbol(args[0])
1628 1628 s = getset(repo, fullreposet(repo), args[1])
1629 1629
1630 1630 draft = phases.draft
1631 1631 secret = phases.secret
1632 1632 phasenamemap = {
1633 1633 '_notpublic': draft,
1634 1634 'draft': draft, # follow secret's ancestors
1635 1635 'secret': secret,
1636 1636 }
1637 1637 if phasename not in phasenamemap:
1638 1638 raise error.ParseError('%r is not a valid phasename' % phasename)
1639 1639
1640 1640 minimalphase = phasenamemap[phasename]
1641 1641 getphase = repo._phasecache.phase
1642 1642
1643 1643 def cutfunc(rev):
1644 1644 return getphase(repo, rev) < minimalphase
1645 1645
1646 1646 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
1647 1647
1648 1648 if phasename == 'draft': # need to remove secret changesets
1649 1649 revs = revs.filter(lambda r: getphase(repo, r) == draft)
1650 1650 return subset & revs
1651 1651
1652 1652 @predicate('public()', safe=True)
1653 1653 def public(repo, subset, x):
1654 1654 """Changeset in public phase."""
1655 1655 # i18n: "public" is a keyword
1656 1656 getargs(x, 0, 0, _("public takes no arguments"))
1657 1657 return _phase(repo, subset, phases.public)
1658 1658
1659 1659 @predicate('remote([id [,path]])', safe=False)
1660 1660 def remote(repo, subset, x):
1661 1661 """Local revision that corresponds to the given identifier in a
1662 1662 remote repository, if present. Here, the '.' identifier is a
1663 1663 synonym for the current local branch.
1664 1664 """
1665 1665
1666 1666 from . import hg # avoid start-up nasties
1667 1667 # i18n: "remote" is a keyword
1668 1668 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1669 1669
1670 1670 q = '.'
1671 1671 if len(l) > 0:
1672 1672 # i18n: "remote" is a keyword
1673 1673 q = getstring(l[0], _("remote requires a string id"))
1674 1674 if q == '.':
1675 1675 q = repo['.'].branch()
1676 1676
1677 1677 dest = ''
1678 1678 if len(l) > 1:
1679 1679 # i18n: "remote" is a keyword
1680 1680 dest = getstring(l[1], _("remote requires a repository path"))
1681 1681 dest = repo.ui.expandpath(dest or 'default')
1682 1682 dest, branches = hg.parseurl(dest)
1683 1683 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1684 1684 if revs:
1685 1685 revs = [repo.lookup(rev) for rev in revs]
1686 1686 other = hg.peer(repo, {}, dest)
1687 1687 n = other.lookup(q)
1688 1688 if n in repo:
1689 1689 r = repo[n].rev()
1690 1690 if r in subset:
1691 1691 return baseset([r])
1692 1692 return baseset()
1693 1693
1694 1694 @predicate('removes(pattern)', safe=True, weight=30)
1695 1695 def removes(repo, subset, x):
1696 1696 """Changesets which remove files matching pattern.
1697 1697
1698 1698 The pattern without explicit kind like ``glob:`` is expected to be
1699 1699 relative to the current directory and match against a file or a
1700 1700 directory.
1701 1701 """
1702 1702 # i18n: "removes" is a keyword
1703 1703 pat = getstring(x, _("removes requires a pattern"))
1704 1704 return checkstatus(repo, subset, pat, 2)
1705 1705
1706 1706 @predicate('rev(number)', safe=True)
1707 1707 def rev(repo, subset, x):
1708 1708 """Revision with the given numeric identifier.
1709 1709 """
1710 1710 # i18n: "rev" is a keyword
1711 1711 l = getargs(x, 1, 1, _("rev requires one argument"))
1712 1712 try:
1713 1713 # i18n: "rev" is a keyword
1714 1714 l = int(getstring(l[0], _("rev requires a number")))
1715 1715 except (TypeError, ValueError):
1716 1716 # i18n: "rev" is a keyword
1717 1717 raise error.ParseError(_("rev expects a number"))
1718 1718 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1719 1719 return baseset()
1720 1720 return subset & baseset([l])
1721 1721
1722 1722 @predicate('matching(revision [, field])', safe=True)
1723 1723 def matching(repo, subset, x):
1724 1724 """Changesets in which a given set of fields match the set of fields in the
1725 1725 selected revision or set.
1726 1726
1727 1727 To match more than one field pass the list of fields to match separated
1728 1728 by spaces (e.g. ``author description``).
1729 1729
1730 1730 Valid fields are most regular revision fields and some special fields.
1731 1731
1732 1732 Regular revision fields are ``description``, ``author``, ``branch``,
1733 1733 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1734 1734 and ``diff``.
1735 1735 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1736 1736 contents of the revision. Two revisions matching their ``diff`` will
1737 1737 also match their ``files``.
1738 1738
1739 1739 Special fields are ``summary`` and ``metadata``:
1740 1740 ``summary`` matches the first line of the description.
1741 1741 ``metadata`` is equivalent to matching ``description user date``
1742 1742 (i.e. it matches the main metadata fields).
1743 1743
1744 1744 ``metadata`` is the default field which is used when no fields are
1745 1745 specified. You can match more than one field at a time.
1746 1746 """
1747 1747 # i18n: "matching" is a keyword
1748 1748 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1749 1749
1750 1750 revs = getset(repo, fullreposet(repo), l[0])
1751 1751
1752 1752 fieldlist = ['metadata']
1753 1753 if len(l) > 1:
1754 1754 fieldlist = getstring(l[1],
1755 1755 # i18n: "matching" is a keyword
1756 1756 _("matching requires a string "
1757 1757 "as its second argument")).split()
1758 1758
1759 1759 # Make sure that there are no repeated fields,
1760 1760 # expand the 'special' 'metadata' field type
1761 1761 # and check the 'files' whenever we check the 'diff'
1762 1762 fields = []
1763 1763 for field in fieldlist:
1764 1764 if field == 'metadata':
1765 1765 fields += ['user', 'description', 'date']
1766 1766 elif field == 'diff':
1767 1767 # a revision matching the diff must also match the files
1768 1768 # since matching the diff is very costly, make sure to
1769 1769 # also match the files first
1770 1770 fields += ['files', 'diff']
1771 1771 else:
1772 1772 if field == 'author':
1773 1773 field = 'user'
1774 1774 fields.append(field)
1775 1775 fields = set(fields)
1776 1776 if 'summary' in fields and 'description' in fields:
1777 1777 # If a revision matches its description it also matches its summary
1778 1778 fields.discard('summary')
1779 1779
1780 1780 # We may want to match more than one field
1781 1781 # Not all fields take the same amount of time to be matched
1782 1782 # Sort the selected fields in order of increasing matching cost
1783 1783 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1784 1784 'files', 'description', 'substate', 'diff']
1785 1785 def fieldkeyfunc(f):
1786 1786 try:
1787 1787 return fieldorder.index(f)
1788 1788 except ValueError:
1789 1789 # assume an unknown field is very costly
1790 1790 return len(fieldorder)
1791 1791 fields = list(fields)
1792 1792 fields.sort(key=fieldkeyfunc)
1793 1793
1794 1794 # Each field will be matched with its own "getfield" function
1795 1795 # which will be added to the getfieldfuncs array of functions
1796 1796 getfieldfuncs = []
1797 1797 _funcs = {
1798 1798 'user': lambda r: repo[r].user(),
1799 1799 'branch': lambda r: repo[r].branch(),
1800 1800 'date': lambda r: repo[r].date(),
1801 1801 'description': lambda r: repo[r].description(),
1802 1802 'files': lambda r: repo[r].files(),
1803 1803 'parents': lambda r: repo[r].parents(),
1804 1804 'phase': lambda r: repo[r].phase(),
1805 1805 'substate': lambda r: repo[r].substate,
1806 1806 'summary': lambda r: repo[r].description().splitlines()[0],
1807 1807 'diff': lambda r: list(repo[r].diff(
1808 1808 opts=diffutil.diffallopts(repo.ui, {'git': True}))),
1809 1809 }
1810 1810 for info in fields:
1811 1811 getfield = _funcs.get(info, None)
1812 1812 if getfield is None:
1813 1813 raise error.ParseError(
1814 1814 # i18n: "matching" is a keyword
1815 1815 _("unexpected field name passed to matching: %s") % info)
1816 1816 getfieldfuncs.append(getfield)
1817 1817 # convert the getfield array of functions into a "getinfo" function
1818 1818 # which returns an array of field values (or a single value if there
1819 1819 # is only one field to match)
1820 1820 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1821 1821
1822 1822 def matches(x):
1823 1823 for rev in revs:
1824 1824 target = getinfo(rev)
1825 1825 match = True
1826 1826 for n, f in enumerate(getfieldfuncs):
1827 1827 if target[n] != f(x):
1828 1828 match = False
1829 1829 if match:
1830 1830 return True
1831 1831 return False
1832 1832
1833 1833 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1834 1834
1835 1835 @predicate('reverse(set)', safe=True, takeorder=True, weight=0)
1836 1836 def reverse(repo, subset, x, order):
1837 1837 """Reverse order of set.
1838 1838 """
1839 1839 l = getset(repo, subset, x, order)
1840 1840 if order == defineorder:
1841 1841 l.reverse()
1842 1842 return l
1843 1843
1844 1844 @predicate('roots(set)', safe=True)
1845 1845 def roots(repo, subset, x):
1846 1846 """Changesets in set with no parent changeset in set.
1847 1847 """
1848 1848 s = getset(repo, fullreposet(repo), x)
1849 1849 parents = repo.changelog.parentrevs
1850 1850 def filter(r):
1851 1851 for p in parents(r):
1852 1852 if 0 <= p and p in s:
1853 1853 return False
1854 1854 return True
1855 1855 return subset & s.filter(filter, condrepr='<roots>')
1856 1856
1857 1857 _sortkeyfuncs = {
1858 1858 'rev': lambda c: c.rev(),
1859 1859 'branch': lambda c: c.branch(),
1860 1860 'desc': lambda c: c.description(),
1861 1861 'user': lambda c: c.user(),
1862 1862 'author': lambda c: c.user(),
1863 1863 'date': lambda c: c.date()[0],
1864 1864 }
1865 1865
1866 1866 def _getsortargs(x):
1867 1867 """Parse sort options into (set, [(key, reverse)], opts)"""
1868 1868 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1869 1869 if 'set' not in args:
1870 1870 # i18n: "sort" is a keyword
1871 1871 raise error.ParseError(_('sort requires one or two arguments'))
1872 1872 keys = "rev"
1873 1873 if 'keys' in args:
1874 1874 # i18n: "sort" is a keyword
1875 1875 keys = getstring(args['keys'], _("sort spec must be a string"))
1876 1876
1877 1877 keyflags = []
1878 1878 for k in keys.split():
1879 1879 fk = k
1880 1880 reverse = (k.startswith('-'))
1881 1881 if reverse:
1882 1882 k = k[1:]
1883 1883 if k not in _sortkeyfuncs and k != 'topo':
1884 1884 raise error.ParseError(
1885 1885 _("unknown sort key %r") % pycompat.bytestr(fk))
1886 1886 keyflags.append((k, reverse))
1887 1887
1888 1888 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1889 1889 # i18n: "topo" is a keyword
1890 1890 raise error.ParseError(_('topo sort order cannot be combined '
1891 1891 'with other sort keys'))
1892 1892
1893 1893 opts = {}
1894 1894 if 'topo.firstbranch' in args:
1895 1895 if any(k == 'topo' for k, reverse in keyflags):
1896 1896 opts['topo.firstbranch'] = args['topo.firstbranch']
1897 1897 else:
1898 1898 # i18n: "topo" and "topo.firstbranch" are keywords
1899 1899 raise error.ParseError(_('topo.firstbranch can only be used '
1900 1900 'when using the topo sort key'))
1901 1901
1902 1902 return args['set'], keyflags, opts
1903 1903
1904 1904 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
1905 1905 weight=10)
1906 1906 def sort(repo, subset, x, order):
1907 1907 """Sort set by keys. The default sort order is ascending, specify a key
1908 1908 as ``-key`` to sort in descending order.
1909 1909
1910 1910 The keys can be:
1911 1911
1912 1912 - ``rev`` for the revision number,
1913 1913 - ``branch`` for the branch name,
1914 1914 - ``desc`` for the commit message (description),
1915 1915 - ``user`` for user name (``author`` can be used as an alias),
1916 1916 - ``date`` for the commit date
1917 1917 - ``topo`` for a reverse topographical sort
1918 1918
1919 1919 The ``topo`` sort order cannot be combined with other sort keys. This sort
1920 1920 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1921 1921 specifies what topographical branches to prioritize in the sort.
1922 1922
1923 1923 """
1924 1924 s, keyflags, opts = _getsortargs(x)
1925 1925 revs = getset(repo, subset, s, order)
1926 1926
1927 1927 if not keyflags or order != defineorder:
1928 1928 return revs
1929 1929 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1930 1930 revs.sort(reverse=keyflags[0][1])
1931 1931 return revs
1932 1932 elif keyflags[0][0] == "topo":
1933 1933 firstbranch = ()
1934 1934 if 'topo.firstbranch' in opts:
1935 1935 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1936 1936 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1937 1937 firstbranch),
1938 1938 istopo=True)
1939 1939 if keyflags[0][1]:
1940 1940 revs.reverse()
1941 1941 return revs
1942 1942
1943 1943 # sort() is guaranteed to be stable
1944 1944 ctxs = [repo[r] for r in revs]
1945 1945 for k, reverse in reversed(keyflags):
1946 1946 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1947 1947 return baseset([c.rev() for c in ctxs])
1948 1948
1949 1949 @predicate('subrepo([pattern])')
1950 1950 def subrepo(repo, subset, x):
1951 1951 """Changesets that add, modify or remove the given subrepo. If no subrepo
1952 1952 pattern is named, any subrepo changes are returned.
1953 1953 """
1954 1954 # i18n: "subrepo" is a keyword
1955 1955 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1956 1956 pat = None
1957 1957 if len(args) != 0:
1958 1958 pat = getstring(args[0], _("subrepo requires a pattern"))
1959 1959
1960 1960 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1961 1961
1962 1962 def submatches(names):
1963 1963 k, p, m = stringutil.stringmatcher(pat)
1964 1964 for name in names:
1965 1965 if m(name):
1966 1966 yield name
1967 1967
1968 1968 def matches(x):
1969 1969 c = repo[x]
1970 1970 s = repo.status(c.p1().node(), c.node(), match=m)
1971 1971
1972 1972 if pat is None:
1973 1973 return s.added or s.modified or s.removed
1974 1974
1975 1975 if s.added:
1976 1976 return any(submatches(c.substate.keys()))
1977 1977
1978 1978 if s.modified:
1979 1979 subs = set(c.p1().substate.keys())
1980 1980 subs.update(c.substate.keys())
1981 1981
1982 1982 for path in submatches(subs):
1983 1983 if c.p1().substate.get(path) != c.substate.get(path):
1984 1984 return True
1985 1985
1986 1986 if s.removed:
1987 1987 return any(submatches(c.p1().substate.keys()))
1988 1988
1989 1989 return False
1990 1990
1991 1991 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1992 1992
1993 1993 def _mapbynodefunc(repo, s, f):
1994 1994 """(repo, smartset, [node] -> [node]) -> smartset
1995 1995
1996 1996 Helper method to map a smartset to another smartset given a function only
1997 1997 talking about nodes. Handles converting between rev numbers and nodes, and
1998 1998 filtering.
1999 1999 """
2000 2000 cl = repo.unfiltered().changelog
2001 2001 torev = cl.rev
2002 2002 tonode = cl.node
2003 2003 nodemap = cl.nodemap
2004 2004 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
2005 2005 return smartset.baseset(result - repo.changelog.filteredrevs)
2006 2006
2007 2007 @predicate('successors(set)', safe=True)
2008 2008 def successors(repo, subset, x):
2009 2009 """All successors for set, including the given set themselves"""
2010 2010 s = getset(repo, fullreposet(repo), x)
2011 2011 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
2012 2012 d = _mapbynodefunc(repo, s, f)
2013 2013 return subset & d
2014 2014
2015 2015 def _substringmatcher(pattern, casesensitive=True):
2016 2016 kind, pattern, matcher = stringutil.stringmatcher(
2017 2017 pattern, casesensitive=casesensitive)
2018 2018 if kind == 'literal':
2019 2019 if not casesensitive:
2020 2020 pattern = encoding.lower(pattern)
2021 2021 matcher = lambda s: pattern in encoding.lower(s)
2022 2022 else:
2023 2023 matcher = lambda s: pattern in s
2024 2024 return kind, pattern, matcher
2025 2025
2026 2026 @predicate('tag([name])', safe=True)
2027 2027 def tag(repo, subset, x):
2028 2028 """The specified tag by name, or all tagged revisions if no name is given.
2029 2029
2030 2030 Pattern matching is supported for `name`. See
2031 2031 :hg:`help revisions.patterns`.
2032 2032 """
2033 2033 # i18n: "tag" is a keyword
2034 2034 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2035 2035 cl = repo.changelog
2036 2036 if args:
2037 2037 pattern = getstring(args[0],
2038 2038 # i18n: "tag" is a keyword
2039 2039 _('the argument to tag must be a string'))
2040 2040 kind, pattern, matcher = stringutil.stringmatcher(pattern)
2041 2041 if kind == 'literal':
2042 2042 # avoid resolving all tags
2043 2043 tn = repo._tagscache.tags.get(pattern, None)
2044 2044 if tn is None:
2045 2045 raise error.RepoLookupError(_("tag '%s' does not exist")
2046 2046 % pattern)
2047 2047 s = {repo[tn].rev()}
2048 2048 else:
2049 2049 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2050 2050 else:
2051 2051 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2052 2052 return subset & s
2053 2053
2054 2054 @predicate('tagged', safe=True)
2055 2055 def tagged(repo, subset, x):
2056 2056 return tag(repo, subset, x)
2057 2057
2058 2058 @predicate('orphan()', safe=True)
2059 2059 def orphan(repo, subset, x):
2060 2060 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2061 2061 """
2062 2062 # i18n: "orphan" is a keyword
2063 2063 getargs(x, 0, 0, _("orphan takes no arguments"))
2064 2064 orphan = obsmod.getrevs(repo, 'orphan')
2065 2065 return subset & orphan
2066 2066
2067 2067
2068 2068 @predicate('user(string)', safe=True, weight=10)
2069 2069 def user(repo, subset, x):
2070 2070 """User name contains string. The match is case-insensitive.
2071 2071
2072 2072 Pattern matching is supported for `string`. See
2073 2073 :hg:`help revisions.patterns`.
2074 2074 """
2075 2075 return author(repo, subset, x)
2076 2076
2077 2077 @predicate('wdir()', safe=True, weight=0)
2078 2078 def wdir(repo, subset, x):
2079 2079 """Working directory. (EXPERIMENTAL)"""
2080 2080 # i18n: "wdir" is a keyword
2081 2081 getargs(x, 0, 0, _("wdir takes no arguments"))
2082 2082 if node.wdirrev in subset or isinstance(subset, fullreposet):
2083 2083 return baseset([node.wdirrev])
2084 2084 return baseset()
2085 2085
2086 2086 def _orderedlist(repo, subset, x):
2087 2087 s = getstring(x, "internal error")
2088 2088 if not s:
2089 2089 return baseset()
2090 2090 # remove duplicates here. it's difficult for caller to deduplicate sets
2091 2091 # because different symbols can point to the same rev.
2092 2092 cl = repo.changelog
2093 2093 ls = []
2094 2094 seen = set()
2095 2095 for t in s.split('\0'):
2096 2096 try:
2097 2097 # fast path for integer revision
2098 2098 r = int(t)
2099 2099 if ('%d' % r) != t or r not in cl:
2100 2100 raise ValueError
2101 2101 revs = [r]
2102 2102 except ValueError:
2103 2103 revs = stringset(repo, subset, t, defineorder)
2104 2104
2105 2105 for r in revs:
2106 2106 if r in seen:
2107 2107 continue
2108 2108 if (r in subset
2109 2109 or r == node.nullrev and isinstance(subset, fullreposet)):
2110 2110 ls.append(r)
2111 2111 seen.add(r)
2112 2112 return baseset(ls)
2113 2113
2114 2114 # for internal use
2115 2115 @predicate('_list', safe=True, takeorder=True)
2116 2116 def _list(repo, subset, x, order):
2117 2117 if order == followorder:
2118 2118 # slow path to take the subset order
2119 2119 return subset & _orderedlist(repo, fullreposet(repo), x)
2120 2120 else:
2121 2121 return _orderedlist(repo, subset, x)
2122 2122
2123 2123 def _orderedintlist(repo, subset, x):
2124 2124 s = getstring(x, "internal error")
2125 2125 if not s:
2126 2126 return baseset()
2127 2127 ls = [int(r) for r in s.split('\0')]
2128 2128 s = subset
2129 2129 return baseset([r for r in ls if r in s])
2130 2130
2131 2131 # for internal use
2132 2132 @predicate('_intlist', safe=True, takeorder=True, weight=0)
2133 2133 def _intlist(repo, subset, x, order):
2134 2134 if order == followorder:
2135 2135 # slow path to take the subset order
2136 2136 return subset & _orderedintlist(repo, fullreposet(repo), x)
2137 2137 else:
2138 2138 return _orderedintlist(repo, subset, x)
2139 2139
2140 2140 def _orderedhexlist(repo, subset, x):
2141 2141 s = getstring(x, "internal error")
2142 2142 if not s:
2143 2143 return baseset()
2144 2144 cl = repo.changelog
2145 2145 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2146 2146 s = subset
2147 2147 return baseset([r for r in ls if r in s])
2148 2148
2149 2149 # for internal use
2150 2150 @predicate('_hexlist', safe=True, takeorder=True)
2151 2151 def _hexlist(repo, subset, x, order):
2152 2152 if order == followorder:
2153 2153 # slow path to take the subset order
2154 2154 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2155 2155 else:
2156 2156 return _orderedhexlist(repo, subset, x)
2157 2157
2158 2158 methods = {
2159 2159 "range": rangeset,
2160 2160 "rangeall": rangeall,
2161 2161 "rangepre": rangepre,
2162 2162 "rangepost": rangepost,
2163 2163 "dagrange": dagrange,
2164 2164 "string": stringset,
2165 2165 "symbol": stringset,
2166 2166 "and": andset,
2167 2167 "andsmally": andsmallyset,
2168 2168 "or": orset,
2169 2169 "not": notset,
2170 2170 "difference": differenceset,
2171 2171 "relation": relationset,
2172 2172 "relsubscript": relsubscriptset,
2173 2173 "subscript": subscriptset,
2174 2174 "list": listset,
2175 2175 "keyvalue": keyvaluepair,
2176 2176 "func": func,
2177 2177 "ancestor": ancestorspec,
2178 2178 "parent": parentspec,
2179 2179 "parentpost": parentpost,
2180 2180 }
2181 2181
2182 2182 def lookupfn(repo):
2183 2183 return lambda symbol: scmutil.isrevsymbol(repo, symbol)
2184 2184
2185 2185 def match(ui, spec, lookup=None):
2186 2186 """Create a matcher for a single revision spec"""
2187 2187 return matchany(ui, [spec], lookup=lookup)
2188 2188
2189 2189 def matchany(ui, specs, lookup=None, localalias=None):
2190 2190 """Create a matcher that will include any revisions matching one of the
2191 2191 given specs
2192 2192
2193 2193 If lookup function is not None, the parser will first attempt to handle
2194 2194 old-style ranges, which may contain operator characters.
2195 2195
2196 2196 If localalias is not None, it is a dict {name: definitionstring}. It takes
2197 2197 precedence over [revsetalias] config section.
2198 2198 """
2199 2199 if not specs:
2200 2200 def mfunc(repo, subset=None):
2201 2201 return baseset()
2202 2202 return mfunc
2203 2203 if not all(specs):
2204 2204 raise error.ParseError(_("empty query"))
2205 2205 if len(specs) == 1:
2206 2206 tree = revsetlang.parse(specs[0], lookup)
2207 2207 else:
2208 2208 tree = ('or',
2209 2209 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2210 2210
2211 2211 aliases = []
2212 2212 warn = None
2213 2213 if ui:
2214 2214 aliases.extend(ui.configitems('revsetalias'))
2215 2215 warn = ui.warn
2216 2216 if localalias:
2217 2217 aliases.extend(localalias.items())
2218 2218 if aliases:
2219 2219 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2220 2220 tree = revsetlang.foldconcat(tree)
2221 2221 tree = revsetlang.analyze(tree)
2222 2222 tree = revsetlang.optimize(tree)
2223 2223 return makematcher(tree)
2224 2224
2225 2225 def makematcher(tree):
2226 2226 """Create a matcher from an evaluatable tree"""
2227 2227 def mfunc(repo, subset=None, order=None):
2228 2228 if order is None:
2229 2229 if subset is None:
2230 2230 order = defineorder # 'x'
2231 2231 else:
2232 2232 order = followorder # 'subset & x'
2233 2233 if subset is None:
2234 2234 subset = fullreposet(repo)
2235 2235 return getset(repo, subset, tree, order)
2236 2236 return mfunc
2237 2237
2238 2238 def loadpredicate(ui, extname, registrarobj):
2239 2239 """Load revset predicates from specified registrarobj
2240 2240 """
2241 2241 for name, func in registrarobj._table.iteritems():
2242 2242 symbols[name] = func
2243 2243 if func._safe:
2244 2244 safesymbols.add(name)
2245 2245
2246 2246 # load built-in predicates explicitly to setup safesymbols
2247 2247 loadpredicate(None, None, predicate)
2248 2248
2249 2249 # tell hggettext to extract docstrings from these functions:
2250 2250 i18nfunctions = symbols.values()
@@ -1,816 +1,816 b''
1 1 # templatekw.py - common changeset template keywords
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from .node import (
12 12 hex,
13 13 nullid,
14 14 )
15 15
16 16 from . import (
17 diffutil,
17 18 encoding,
18 19 error,
19 20 hbisect,
20 21 i18n,
21 22 obsutil,
22 23 patch,
23 24 pycompat,
24 25 registrar,
25 26 scmutil,
26 27 templateutil,
27 28 util,
28 29 )
29 30 from .utils import (
30 diffutil,
31 31 stringutil,
32 32 )
33 33
34 34 _hybrid = templateutil.hybrid
35 35 hybriddict = templateutil.hybriddict
36 36 hybridlist = templateutil.hybridlist
37 37 compatdict = templateutil.compatdict
38 38 compatlist = templateutil.compatlist
39 39 _showcompatlist = templateutil._showcompatlist
40 40
41 41 def getlatesttags(context, mapping, pattern=None):
42 42 '''return date, distance and name for the latest tag of rev'''
43 43 repo = context.resource(mapping, 'repo')
44 44 ctx = context.resource(mapping, 'ctx')
45 45 cache = context.resource(mapping, 'cache')
46 46
47 47 cachename = 'latesttags'
48 48 if pattern is not None:
49 49 cachename += '-' + pattern
50 50 match = stringutil.stringmatcher(pattern)[2]
51 51 else:
52 52 match = util.always
53 53
54 54 if cachename not in cache:
55 55 # Cache mapping from rev to a tuple with tag date, tag
56 56 # distance and tag name
57 57 cache[cachename] = {-1: (0, 0, ['null'])}
58 58 latesttags = cache[cachename]
59 59
60 60 rev = ctx.rev()
61 61 todo = [rev]
62 62 while todo:
63 63 rev = todo.pop()
64 64 if rev in latesttags:
65 65 continue
66 66 ctx = repo[rev]
67 67 tags = [t for t in ctx.tags()
68 68 if (repo.tagtype(t) and repo.tagtype(t) != 'local'
69 69 and match(t))]
70 70 if tags:
71 71 latesttags[rev] = ctx.date()[0], 0, [t for t in sorted(tags)]
72 72 continue
73 73 try:
74 74 ptags = [latesttags[p.rev()] for p in ctx.parents()]
75 75 if len(ptags) > 1:
76 76 if ptags[0][2] == ptags[1][2]:
77 77 # The tuples are laid out so the right one can be found by
78 78 # comparison in this case.
79 79 pdate, pdist, ptag = max(ptags)
80 80 else:
81 81 def key(x):
82 82 changessincetag = len(repo.revs('only(%d, %s)',
83 83 ctx.rev(), x[2][0]))
84 84 # Smallest number of changes since tag wins. Date is
85 85 # used as tiebreaker.
86 86 return [-changessincetag, x[0]]
87 87 pdate, pdist, ptag = max(ptags, key=key)
88 88 else:
89 89 pdate, pdist, ptag = ptags[0]
90 90 except KeyError:
91 91 # Cache miss - recurse
92 92 todo.append(rev)
93 93 todo.extend(p.rev() for p in ctx.parents())
94 94 continue
95 95 latesttags[rev] = pdate, pdist + 1, ptag
96 96 return latesttags[rev]
97 97
98 98 def getrenamedfn(repo, endrev=None):
99 99 rcache = {}
100 100 if endrev is None:
101 101 endrev = len(repo)
102 102
103 103 def getrenamed(fn, rev):
104 104 '''looks up all renames for a file (up to endrev) the first
105 105 time the file is given. It indexes on the changerev and only
106 106 parses the manifest if linkrev != changerev.
107 107 Returns rename info for fn at changerev rev.'''
108 108 if fn not in rcache:
109 109 rcache[fn] = {}
110 110 fl = repo.file(fn)
111 111 for i in fl:
112 112 lr = fl.linkrev(i)
113 113 renamed = fl.renamed(fl.node(i))
114 114 rcache[fn][lr] = renamed and renamed[0]
115 115 if lr >= endrev:
116 116 break
117 117 if rev in rcache[fn]:
118 118 return rcache[fn][rev]
119 119
120 120 # If linkrev != rev (i.e. rev not found in rcache) fallback to
121 121 # filectx logic.
122 122 try:
123 123 renamed = repo[rev][fn].renamed()
124 124 return renamed and renamed[0]
125 125 except error.LookupError:
126 126 return None
127 127
128 128 return getrenamed
129 129
130 130 def getlogcolumns():
131 131 """Return a dict of log column labels"""
132 132 _ = pycompat.identity # temporarily disable gettext
133 133 # i18n: column positioning for "hg log"
134 134 columns = _('bookmark: %s\n'
135 135 'branch: %s\n'
136 136 'changeset: %s\n'
137 137 'copies: %s\n'
138 138 'date: %s\n'
139 139 'extra: %s=%s\n'
140 140 'files+: %s\n'
141 141 'files-: %s\n'
142 142 'files: %s\n'
143 143 'instability: %s\n'
144 144 'manifest: %s\n'
145 145 'obsolete: %s\n'
146 146 'parent: %s\n'
147 147 'phase: %s\n'
148 148 'summary: %s\n'
149 149 'tag: %s\n'
150 150 'user: %s\n')
151 151 return dict(zip([s.split(':', 1)[0] for s in columns.splitlines()],
152 152 i18n._(columns).splitlines(True)))
153 153
154 154 # default templates internally used for rendering of lists
155 155 defaulttempl = {
156 156 'parent': '{rev}:{node|formatnode} ',
157 157 'manifest': '{rev}:{node|formatnode}',
158 158 'file_copy': '{name} ({source})',
159 159 'envvar': '{key}={value}',
160 160 'extra': '{key}={value|stringescape}'
161 161 }
162 162 # filecopy is preserved for compatibility reasons
163 163 defaulttempl['filecopy'] = defaulttempl['file_copy']
164 164
165 165 # keywords are callables (see registrar.templatekeyword for details)
166 166 keywords = {}
167 167 templatekeyword = registrar.templatekeyword(keywords)
168 168
169 169 @templatekeyword('author', requires={'ctx'})
170 170 def showauthor(context, mapping):
171 171 """String. The unmodified author of the changeset."""
172 172 ctx = context.resource(mapping, 'ctx')
173 173 return ctx.user()
174 174
175 175 @templatekeyword('bisect', requires={'repo', 'ctx'})
176 176 def showbisect(context, mapping):
177 177 """String. The changeset bisection status."""
178 178 repo = context.resource(mapping, 'repo')
179 179 ctx = context.resource(mapping, 'ctx')
180 180 return hbisect.label(repo, ctx.node())
181 181
182 182 @templatekeyword('branch', requires={'ctx'})
183 183 def showbranch(context, mapping):
184 184 """String. The name of the branch on which the changeset was
185 185 committed.
186 186 """
187 187 ctx = context.resource(mapping, 'ctx')
188 188 return ctx.branch()
189 189
190 190 @templatekeyword('branches', requires={'ctx'})
191 191 def showbranches(context, mapping):
192 192 """List of strings. The name of the branch on which the
193 193 changeset was committed. Will be empty if the branch name was
194 194 default. (DEPRECATED)
195 195 """
196 196 ctx = context.resource(mapping, 'ctx')
197 197 branch = ctx.branch()
198 198 if branch != 'default':
199 199 return compatlist(context, mapping, 'branch', [branch],
200 200 plural='branches')
201 201 return compatlist(context, mapping, 'branch', [], plural='branches')
202 202
203 203 @templatekeyword('bookmarks', requires={'repo', 'ctx'})
204 204 def showbookmarks(context, mapping):
205 205 """List of strings. Any bookmarks associated with the
206 206 changeset. Also sets 'active', the name of the active bookmark.
207 207 """
208 208 repo = context.resource(mapping, 'repo')
209 209 ctx = context.resource(mapping, 'ctx')
210 210 bookmarks = ctx.bookmarks()
211 211 active = repo._activebookmark
212 212 makemap = lambda v: {'bookmark': v, 'active': active, 'current': active}
213 213 f = _showcompatlist(context, mapping, 'bookmark', bookmarks)
214 214 return _hybrid(f, bookmarks, makemap, pycompat.identity)
215 215
216 216 @templatekeyword('children', requires={'ctx'})
217 217 def showchildren(context, mapping):
218 218 """List of strings. The children of the changeset."""
219 219 ctx = context.resource(mapping, 'ctx')
220 220 childrevs = ['%d:%s' % (cctx.rev(), cctx) for cctx in ctx.children()]
221 221 return compatlist(context, mapping, 'children', childrevs, element='child')
222 222
223 223 # Deprecated, but kept alive for help generation a purpose.
224 224 @templatekeyword('currentbookmark', requires={'repo', 'ctx'})
225 225 def showcurrentbookmark(context, mapping):
226 226 """String. The active bookmark, if it is associated with the changeset.
227 227 (DEPRECATED)"""
228 228 return showactivebookmark(context, mapping)
229 229
230 230 @templatekeyword('activebookmark', requires={'repo', 'ctx'})
231 231 def showactivebookmark(context, mapping):
232 232 """String. The active bookmark, if it is associated with the changeset."""
233 233 repo = context.resource(mapping, 'repo')
234 234 ctx = context.resource(mapping, 'ctx')
235 235 active = repo._activebookmark
236 236 if active and active in ctx.bookmarks():
237 237 return active
238 238 return ''
239 239
240 240 @templatekeyword('date', requires={'ctx'})
241 241 def showdate(context, mapping):
242 242 """Date information. The date when the changeset was committed."""
243 243 ctx = context.resource(mapping, 'ctx')
244 244 # the default string format is '<float(unixtime)><tzoffset>' because
245 245 # python-hglib splits date at decimal separator.
246 246 return templateutil.date(ctx.date(), showfmt='%d.0%d')
247 247
248 248 @templatekeyword('desc', requires={'ctx'})
249 249 def showdescription(context, mapping):
250 250 """String. The text of the changeset description."""
251 251 ctx = context.resource(mapping, 'ctx')
252 252 s = ctx.description()
253 253 if isinstance(s, encoding.localstr):
254 254 # try hard to preserve utf-8 bytes
255 255 return encoding.tolocal(encoding.fromlocal(s).strip())
256 256 elif isinstance(s, encoding.safelocalstr):
257 257 return encoding.safelocalstr(s.strip())
258 258 else:
259 259 return s.strip()
260 260
261 261 @templatekeyword('diffstat', requires={'ui', 'ctx'})
262 262 def showdiffstat(context, mapping):
263 263 """String. Statistics of changes with the following format:
264 264 "modified files: +added/-removed lines"
265 265 """
266 266 ui = context.resource(mapping, 'ui')
267 267 ctx = context.resource(mapping, 'ctx')
268 268 diffopts = diffutil.diffallopts(ui, {'noprefix': False})
269 269 diff = ctx.diff(opts=diffopts)
270 270 stats = patch.diffstatdata(util.iterlines(diff))
271 271 maxname, maxtotal, adds, removes, binary = patch.diffstatsum(stats)
272 272 return '%d: +%d/-%d' % (len(stats), adds, removes)
273 273
274 274 @templatekeyword('envvars', requires={'ui'})
275 275 def showenvvars(context, mapping):
276 276 """A dictionary of environment variables. (EXPERIMENTAL)"""
277 277 ui = context.resource(mapping, 'ui')
278 278 env = ui.exportableenviron()
279 279 env = util.sortdict((k, env[k]) for k in sorted(env))
280 280 return compatdict(context, mapping, 'envvar', env, plural='envvars')
281 281
282 282 @templatekeyword('extras', requires={'ctx'})
283 283 def showextras(context, mapping):
284 284 """List of dicts with key, value entries of the 'extras'
285 285 field of this changeset."""
286 286 ctx = context.resource(mapping, 'ctx')
287 287 extras = ctx.extra()
288 288 extras = util.sortdict((k, extras[k]) for k in sorted(extras))
289 289 makemap = lambda k: {'key': k, 'value': extras[k]}
290 290 c = [makemap(k) for k in extras]
291 291 f = _showcompatlist(context, mapping, 'extra', c, plural='extras')
292 292 return _hybrid(f, extras, makemap,
293 293 lambda k: '%s=%s' % (k, stringutil.escapestr(extras[k])))
294 294
295 295 def _showfilesbystat(context, mapping, name, index):
296 296 repo = context.resource(mapping, 'repo')
297 297 ctx = context.resource(mapping, 'ctx')
298 298 revcache = context.resource(mapping, 'revcache')
299 299 if 'files' not in revcache:
300 300 revcache['files'] = repo.status(ctx.p1(), ctx)[:3]
301 301 files = revcache['files'][index]
302 302 return compatlist(context, mapping, name, files, element='file')
303 303
304 304 @templatekeyword('file_adds', requires={'repo', 'ctx', 'revcache'})
305 305 def showfileadds(context, mapping):
306 306 """List of strings. Files added by this changeset."""
307 307 return _showfilesbystat(context, mapping, 'file_add', 1)
308 308
309 309 @templatekeyword('file_copies',
310 310 requires={'repo', 'ctx', 'cache', 'revcache'})
311 311 def showfilecopies(context, mapping):
312 312 """List of strings. Files copied in this changeset with
313 313 their sources.
314 314 """
315 315 repo = context.resource(mapping, 'repo')
316 316 ctx = context.resource(mapping, 'ctx')
317 317 cache = context.resource(mapping, 'cache')
318 318 copies = context.resource(mapping, 'revcache').get('copies')
319 319 if copies is None:
320 320 if 'getrenamed' not in cache:
321 321 cache['getrenamed'] = getrenamedfn(repo)
322 322 copies = []
323 323 getrenamed = cache['getrenamed']
324 324 for fn in ctx.files():
325 325 rename = getrenamed(fn, ctx.rev())
326 326 if rename:
327 327 copies.append((fn, rename))
328 328
329 329 copies = util.sortdict(copies)
330 330 return compatdict(context, mapping, 'file_copy', copies,
331 331 key='name', value='source', fmt='%s (%s)',
332 332 plural='file_copies')
333 333
334 334 # showfilecopiesswitch() displays file copies only if copy records are
335 335 # provided before calling the templater, usually with a --copies
336 336 # command line switch.
337 337 @templatekeyword('file_copies_switch', requires={'revcache'})
338 338 def showfilecopiesswitch(context, mapping):
339 339 """List of strings. Like "file_copies" but displayed
340 340 only if the --copied switch is set.
341 341 """
342 342 copies = context.resource(mapping, 'revcache').get('copies') or []
343 343 copies = util.sortdict(copies)
344 344 return compatdict(context, mapping, 'file_copy', copies,
345 345 key='name', value='source', fmt='%s (%s)',
346 346 plural='file_copies')
347 347
348 348 @templatekeyword('file_dels', requires={'repo', 'ctx', 'revcache'})
349 349 def showfiledels(context, mapping):
350 350 """List of strings. Files removed by this changeset."""
351 351 return _showfilesbystat(context, mapping, 'file_del', 2)
352 352
353 353 @templatekeyword('file_mods', requires={'repo', 'ctx', 'revcache'})
354 354 def showfilemods(context, mapping):
355 355 """List of strings. Files modified by this changeset."""
356 356 return _showfilesbystat(context, mapping, 'file_mod', 0)
357 357
358 358 @templatekeyword('files', requires={'ctx'})
359 359 def showfiles(context, mapping):
360 360 """List of strings. All files modified, added, or removed by this
361 361 changeset.
362 362 """
363 363 ctx = context.resource(mapping, 'ctx')
364 364 return compatlist(context, mapping, 'file', ctx.files())
365 365
366 366 @templatekeyword('graphnode', requires={'repo', 'ctx'})
367 367 def showgraphnode(context, mapping):
368 368 """String. The character representing the changeset node in an ASCII
369 369 revision graph."""
370 370 repo = context.resource(mapping, 'repo')
371 371 ctx = context.resource(mapping, 'ctx')
372 372 return getgraphnode(repo, ctx)
373 373
374 374 def getgraphnode(repo, ctx):
375 375 return getgraphnodecurrent(repo, ctx) or getgraphnodesymbol(ctx)
376 376
377 377 def getgraphnodecurrent(repo, ctx):
378 378 wpnodes = repo.dirstate.parents()
379 379 if wpnodes[1] == nullid:
380 380 wpnodes = wpnodes[:1]
381 381 if ctx.node() in wpnodes:
382 382 return '@'
383 383 else:
384 384 return ''
385 385
386 386 def getgraphnodesymbol(ctx):
387 387 if ctx.obsolete():
388 388 return 'x'
389 389 elif ctx.isunstable():
390 390 return '*'
391 391 elif ctx.closesbranch():
392 392 return '_'
393 393 else:
394 394 return 'o'
395 395
396 396 @templatekeyword('graphwidth', requires=())
397 397 def showgraphwidth(context, mapping):
398 398 """Integer. The width of the graph drawn by 'log --graph' or zero."""
399 399 # just hosts documentation; should be overridden by template mapping
400 400 return 0
401 401
402 402 @templatekeyword('index', requires=())
403 403 def showindex(context, mapping):
404 404 """Integer. The current iteration of the loop. (0 indexed)"""
405 405 # just hosts documentation; should be overridden by template mapping
406 406 raise error.Abort(_("can't use index in this context"))
407 407
408 408 @templatekeyword('latesttag', requires={'repo', 'ctx', 'cache'})
409 409 def showlatesttag(context, mapping):
410 410 """List of strings. The global tags on the most recent globally
411 411 tagged ancestor of this changeset. If no such tags exist, the list
412 412 consists of the single string "null".
413 413 """
414 414 return showlatesttags(context, mapping, None)
415 415
416 416 def showlatesttags(context, mapping, pattern):
417 417 """helper method for the latesttag keyword and function"""
418 418 latesttags = getlatesttags(context, mapping, pattern)
419 419
420 420 # latesttag[0] is an implementation detail for sorting csets on different
421 421 # branches in a stable manner- it is the date the tagged cset was created,
422 422 # not the date the tag was created. Therefore it isn't made visible here.
423 423 makemap = lambda v: {
424 424 'changes': _showchangessincetag,
425 425 'distance': latesttags[1],
426 426 'latesttag': v, # BC with {latesttag % '{latesttag}'}
427 427 'tag': v
428 428 }
429 429
430 430 tags = latesttags[2]
431 431 f = _showcompatlist(context, mapping, 'latesttag', tags, separator=':')
432 432 return _hybrid(f, tags, makemap, pycompat.identity)
433 433
434 434 @templatekeyword('latesttagdistance', requires={'repo', 'ctx', 'cache'})
435 435 def showlatesttagdistance(context, mapping):
436 436 """Integer. Longest path to the latest tag."""
437 437 return getlatesttags(context, mapping)[1]
438 438
439 439 @templatekeyword('changessincelatesttag', requires={'repo', 'ctx', 'cache'})
440 440 def showchangessincelatesttag(context, mapping):
441 441 """Integer. All ancestors not in the latest tag."""
442 442 tag = getlatesttags(context, mapping)[2][0]
443 443 mapping = context.overlaymap(mapping, {'tag': tag})
444 444 return _showchangessincetag(context, mapping)
445 445
446 446 def _showchangessincetag(context, mapping):
447 447 repo = context.resource(mapping, 'repo')
448 448 ctx = context.resource(mapping, 'ctx')
449 449 offset = 0
450 450 revs = [ctx.rev()]
451 451 tag = context.symbol(mapping, 'tag')
452 452
453 453 # The only() revset doesn't currently support wdir()
454 454 if ctx.rev() is None:
455 455 offset = 1
456 456 revs = [p.rev() for p in ctx.parents()]
457 457
458 458 return len(repo.revs('only(%ld, %s)', revs, tag)) + offset
459 459
460 460 # teach templater latesttags.changes is switched to (context, mapping) API
461 461 _showchangessincetag._requires = {'repo', 'ctx'}
462 462
463 463 @templatekeyword('manifest', requires={'repo', 'ctx'})
464 464 def showmanifest(context, mapping):
465 465 repo = context.resource(mapping, 'repo')
466 466 ctx = context.resource(mapping, 'ctx')
467 467 mnode = ctx.manifestnode()
468 468 if mnode is None:
469 469 # just avoid crash, we might want to use the 'ff...' hash in future
470 470 return
471 471 mrev = repo.manifestlog.rev(mnode)
472 472 mhex = hex(mnode)
473 473 mapping = context.overlaymap(mapping, {'rev': mrev, 'node': mhex})
474 474 f = context.process('manifest', mapping)
475 475 # TODO: perhaps 'ctx' should be dropped from mapping because manifest
476 476 # rev and node are completely different from changeset's.
477 477 return templateutil.hybriditem(f, None, f,
478 478 lambda x: {'rev': mrev, 'node': mhex})
479 479
480 480 @templatekeyword('obsfate', requires={'ui', 'repo', 'ctx'})
481 481 def showobsfate(context, mapping):
482 482 # this function returns a list containing pre-formatted obsfate strings.
483 483 #
484 484 # This function will be replaced by templates fragments when we will have
485 485 # the verbosity templatekw available.
486 486 succsandmarkers = showsuccsandmarkers(context, mapping)
487 487
488 488 ui = context.resource(mapping, 'ui')
489 489 repo = context.resource(mapping, 'repo')
490 490 values = []
491 491
492 492 for x in succsandmarkers.tovalue(context, mapping):
493 493 v = obsutil.obsfateprinter(ui, repo, x['successors'], x['markers'],
494 494 scmutil.formatchangeid)
495 495 values.append(v)
496 496
497 497 return compatlist(context, mapping, "fate", values)
498 498
499 499 def shownames(context, mapping, namespace):
500 500 """helper method to generate a template keyword for a namespace"""
501 501 repo = context.resource(mapping, 'repo')
502 502 ctx = context.resource(mapping, 'ctx')
503 503 ns = repo.names[namespace]
504 504 names = ns.names(repo, ctx.node())
505 505 return compatlist(context, mapping, ns.templatename, names,
506 506 plural=namespace)
507 507
508 508 @templatekeyword('namespaces', requires={'repo', 'ctx'})
509 509 def shownamespaces(context, mapping):
510 510 """Dict of lists. Names attached to this changeset per
511 511 namespace."""
512 512 repo = context.resource(mapping, 'repo')
513 513 ctx = context.resource(mapping, 'ctx')
514 514
515 515 namespaces = util.sortdict()
516 516 def makensmapfn(ns):
517 517 # 'name' for iterating over namespaces, templatename for local reference
518 518 return lambda v: {'name': v, ns.templatename: v}
519 519
520 520 for k, ns in repo.names.iteritems():
521 521 names = ns.names(repo, ctx.node())
522 522 f = _showcompatlist(context, mapping, 'name', names)
523 523 namespaces[k] = _hybrid(f, names, makensmapfn(ns), pycompat.identity)
524 524
525 525 f = _showcompatlist(context, mapping, 'namespace', list(namespaces))
526 526
527 527 def makemap(ns):
528 528 return {
529 529 'namespace': ns,
530 530 'names': namespaces[ns],
531 531 'builtin': repo.names[ns].builtin,
532 532 'colorname': repo.names[ns].colorname,
533 533 }
534 534
535 535 return _hybrid(f, namespaces, makemap, pycompat.identity)
536 536
537 537 @templatekeyword('node', requires={'ctx'})
538 538 def shownode(context, mapping):
539 539 """String. The changeset identification hash, as a 40 hexadecimal
540 540 digit string.
541 541 """
542 542 ctx = context.resource(mapping, 'ctx')
543 543 return ctx.hex()
544 544
545 545 @templatekeyword('obsolete', requires={'ctx'})
546 546 def showobsolete(context, mapping):
547 547 """String. Whether the changeset is obsolete. (EXPERIMENTAL)"""
548 548 ctx = context.resource(mapping, 'ctx')
549 549 if ctx.obsolete():
550 550 return 'obsolete'
551 551 return ''
552 552
553 553 @templatekeyword('peerurls', requires={'repo'})
554 554 def showpeerurls(context, mapping):
555 555 """A dictionary of repository locations defined in the [paths] section
556 556 of your configuration file."""
557 557 repo = context.resource(mapping, 'repo')
558 558 # see commands.paths() for naming of dictionary keys
559 559 paths = repo.ui.paths
560 560 urls = util.sortdict((k, p.rawloc) for k, p in sorted(paths.iteritems()))
561 561 def makemap(k):
562 562 p = paths[k]
563 563 d = {'name': k, 'url': p.rawloc}
564 564 d.update((o, v) for o, v in sorted(p.suboptions.iteritems()))
565 565 return d
566 566 return _hybrid(None, urls, makemap, lambda k: '%s=%s' % (k, urls[k]))
567 567
568 568 @templatekeyword("predecessors", requires={'repo', 'ctx'})
569 569 def showpredecessors(context, mapping):
570 570 """Returns the list if the closest visible successors. (EXPERIMENTAL)"""
571 571 repo = context.resource(mapping, 'repo')
572 572 ctx = context.resource(mapping, 'ctx')
573 573 predecessors = sorted(obsutil.closestpredecessors(repo, ctx.node()))
574 574 predecessors = pycompat.maplist(hex, predecessors)
575 575
576 576 return _hybrid(None, predecessors,
577 577 lambda x: {'ctx': repo[x]},
578 578 lambda x: scmutil.formatchangeid(repo[x]))
579 579
580 580 @templatekeyword('reporoot', requires={'repo'})
581 581 def showreporoot(context, mapping):
582 582 """String. The root directory of the current repository."""
583 583 repo = context.resource(mapping, 'repo')
584 584 return repo.root
585 585
586 586 @templatekeyword("successorssets", requires={'repo', 'ctx'})
587 587 def showsuccessorssets(context, mapping):
588 588 """Returns a string of sets of successors for a changectx. Format used
589 589 is: [ctx1, ctx2], [ctx3] if ctx has been splitted into ctx1 and ctx2
590 590 while also diverged into ctx3. (EXPERIMENTAL)"""
591 591 repo = context.resource(mapping, 'repo')
592 592 ctx = context.resource(mapping, 'ctx')
593 593 if not ctx.obsolete():
594 594 return ''
595 595
596 596 ssets = obsutil.successorssets(repo, ctx.node(), closest=True)
597 597 ssets = [[hex(n) for n in ss] for ss in ssets]
598 598
599 599 data = []
600 600 for ss in ssets:
601 601 h = _hybrid(None, ss, lambda x: {'ctx': repo[x]},
602 602 lambda x: scmutil.formatchangeid(repo[x]))
603 603 data.append(h)
604 604
605 605 # Format the successorssets
606 606 def render(d):
607 607 return templateutil.stringify(context, mapping, d)
608 608
609 609 def gen(data):
610 610 yield "; ".join(render(d) for d in data)
611 611
612 612 return _hybrid(gen(data), data, lambda x: {'successorset': x},
613 613 pycompat.identity)
614 614
615 615 @templatekeyword("succsandmarkers", requires={'repo', 'ctx'})
616 616 def showsuccsandmarkers(context, mapping):
617 617 """Returns a list of dict for each final successor of ctx. The dict
618 618 contains successors node id in "successors" keys and the list of
619 619 obs-markers from ctx to the set of successors in "markers".
620 620 (EXPERIMENTAL)
621 621 """
622 622 repo = context.resource(mapping, 'repo')
623 623 ctx = context.resource(mapping, 'ctx')
624 624
625 625 values = obsutil.successorsandmarkers(repo, ctx)
626 626
627 627 if values is None:
628 628 values = []
629 629
630 630 # Format successors and markers to avoid exposing binary to templates
631 631 data = []
632 632 for i in values:
633 633 # Format successors
634 634 successors = i['successors']
635 635
636 636 successors = [hex(n) for n in successors]
637 637 successors = _hybrid(None, successors,
638 638 lambda x: {'ctx': repo[x]},
639 639 lambda x: scmutil.formatchangeid(repo[x]))
640 640
641 641 # Format markers
642 642 finalmarkers = []
643 643 for m in i['markers']:
644 644 hexprec = hex(m[0])
645 645 hexsucs = tuple(hex(n) for n in m[1])
646 646 hexparents = None
647 647 if m[5] is not None:
648 648 hexparents = tuple(hex(n) for n in m[5])
649 649 newmarker = (hexprec, hexsucs) + m[2:5] + (hexparents,) + m[6:]
650 650 finalmarkers.append(newmarker)
651 651
652 652 data.append({'successors': successors, 'markers': finalmarkers})
653 653
654 654 return templateutil.mappinglist(data)
655 655
656 656 @templatekeyword('p1rev', requires={'ctx'})
657 657 def showp1rev(context, mapping):
658 658 """Integer. The repository-local revision number of the changeset's
659 659 first parent, or -1 if the changeset has no parents."""
660 660 ctx = context.resource(mapping, 'ctx')
661 661 return ctx.p1().rev()
662 662
663 663 @templatekeyword('p2rev', requires={'ctx'})
664 664 def showp2rev(context, mapping):
665 665 """Integer. The repository-local revision number of the changeset's
666 666 second parent, or -1 if the changeset has no second parent."""
667 667 ctx = context.resource(mapping, 'ctx')
668 668 return ctx.p2().rev()
669 669
670 670 @templatekeyword('p1node', requires={'ctx'})
671 671 def showp1node(context, mapping):
672 672 """String. The identification hash of the changeset's first parent,
673 673 as a 40 digit hexadecimal string. If the changeset has no parents, all
674 674 digits are 0."""
675 675 ctx = context.resource(mapping, 'ctx')
676 676 return ctx.p1().hex()
677 677
678 678 @templatekeyword('p2node', requires={'ctx'})
679 679 def showp2node(context, mapping):
680 680 """String. The identification hash of the changeset's second
681 681 parent, as a 40 digit hexadecimal string. If the changeset has no second
682 682 parent, all digits are 0."""
683 683 ctx = context.resource(mapping, 'ctx')
684 684 return ctx.p2().hex()
685 685
686 686 @templatekeyword('parents', requires={'repo', 'ctx'})
687 687 def showparents(context, mapping):
688 688 """List of strings. The parents of the changeset in "rev:node"
689 689 format. If the changeset has only one "natural" parent (the predecessor
690 690 revision) nothing is shown."""
691 691 repo = context.resource(mapping, 'repo')
692 692 ctx = context.resource(mapping, 'ctx')
693 693 pctxs = scmutil.meaningfulparents(repo, ctx)
694 694 prevs = [p.rev() for p in pctxs]
695 695 parents = [[('rev', p.rev()),
696 696 ('node', p.hex()),
697 697 ('phase', p.phasestr())]
698 698 for p in pctxs]
699 699 f = _showcompatlist(context, mapping, 'parent', parents)
700 700 return _hybrid(f, prevs, lambda x: {'ctx': repo[x]},
701 701 lambda x: scmutil.formatchangeid(repo[x]), keytype=int)
702 702
703 703 @templatekeyword('phase', requires={'ctx'})
704 704 def showphase(context, mapping):
705 705 """String. The changeset phase name."""
706 706 ctx = context.resource(mapping, 'ctx')
707 707 return ctx.phasestr()
708 708
709 709 @templatekeyword('phaseidx', requires={'ctx'})
710 710 def showphaseidx(context, mapping):
711 711 """Integer. The changeset phase index. (ADVANCED)"""
712 712 ctx = context.resource(mapping, 'ctx')
713 713 return ctx.phase()
714 714
715 715 @templatekeyword('rev', requires={'ctx'})
716 716 def showrev(context, mapping):
717 717 """Integer. The repository-local changeset revision number."""
718 718 ctx = context.resource(mapping, 'ctx')
719 719 return scmutil.intrev(ctx)
720 720
721 721 def showrevslist(context, mapping, name, revs):
722 722 """helper to generate a list of revisions in which a mapped template will
723 723 be evaluated"""
724 724 repo = context.resource(mapping, 'repo')
725 725 f = _showcompatlist(context, mapping, name, ['%d' % r for r in revs])
726 726 return _hybrid(f, revs,
727 727 lambda x: {name: x, 'ctx': repo[x]},
728 728 pycompat.identity, keytype=int)
729 729
730 730 @templatekeyword('subrepos', requires={'ctx'})
731 731 def showsubrepos(context, mapping):
732 732 """List of strings. Updated subrepositories in the changeset."""
733 733 ctx = context.resource(mapping, 'ctx')
734 734 substate = ctx.substate
735 735 if not substate:
736 736 return compatlist(context, mapping, 'subrepo', [])
737 737 psubstate = ctx.parents()[0].substate or {}
738 738 subrepos = []
739 739 for sub in substate:
740 740 if sub not in psubstate or substate[sub] != psubstate[sub]:
741 741 subrepos.append(sub) # modified or newly added in ctx
742 742 for sub in psubstate:
743 743 if sub not in substate:
744 744 subrepos.append(sub) # removed in ctx
745 745 return compatlist(context, mapping, 'subrepo', sorted(subrepos))
746 746
747 747 # don't remove "showtags" definition, even though namespaces will put
748 748 # a helper function for "tags" keyword into "keywords" map automatically,
749 749 # because online help text is built without namespaces initialization
750 750 @templatekeyword('tags', requires={'repo', 'ctx'})
751 751 def showtags(context, mapping):
752 752 """List of strings. Any tags associated with the changeset."""
753 753 return shownames(context, mapping, 'tags')
754 754
755 755 @templatekeyword('termwidth', requires={'ui'})
756 756 def showtermwidth(context, mapping):
757 757 """Integer. The width of the current terminal."""
758 758 ui = context.resource(mapping, 'ui')
759 759 return ui.termwidth()
760 760
761 761 @templatekeyword('instabilities', requires={'ctx'})
762 762 def showinstabilities(context, mapping):
763 763 """List of strings. Evolution instabilities affecting the changeset.
764 764 (EXPERIMENTAL)
765 765 """
766 766 ctx = context.resource(mapping, 'ctx')
767 767 return compatlist(context, mapping, 'instability', ctx.instabilities(),
768 768 plural='instabilities')
769 769
770 770 @templatekeyword('verbosity', requires={'ui'})
771 771 def showverbosity(context, mapping):
772 772 """String. The current output verbosity in 'debug', 'quiet', 'verbose',
773 773 or ''."""
774 774 ui = context.resource(mapping, 'ui')
775 775 # see logcmdutil.changesettemplater for priority of these flags
776 776 if ui.debugflag:
777 777 return 'debug'
778 778 elif ui.quiet:
779 779 return 'quiet'
780 780 elif ui.verbose:
781 781 return 'verbose'
782 782 return ''
783 783
784 784 @templatekeyword('whyunstable', requires={'repo', 'ctx'})
785 785 def showwhyunstable(context, mapping):
786 786 """List of dicts explaining all instabilities of a changeset.
787 787 (EXPERIMENTAL)
788 788 """
789 789 repo = context.resource(mapping, 'repo')
790 790 ctx = context.resource(mapping, 'ctx')
791 791
792 792 def formatnode(ctx):
793 793 return '%s (%s)' % (scmutil.formatchangeid(ctx), ctx.phasestr())
794 794
795 795 entries = obsutil.whyunstable(repo, ctx)
796 796
797 797 for entry in entries:
798 798 if entry.get('divergentnodes'):
799 799 dnodes = entry['divergentnodes']
800 800 dnhybrid = _hybrid(None, [dnode.hex() for dnode in dnodes],
801 801 lambda x: {'ctx': repo[x]},
802 802 lambda x: formatnode(repo[x]))
803 803 entry['divergentnodes'] = dnhybrid
804 804
805 805 tmpl = ('{instability}:{if(divergentnodes, " ")}{divergentnodes} '
806 806 '{reason} {node|short}')
807 807 return templateutil.mappinglist(entries, tmpl=tmpl, sep='\n')
808 808
809 809 def loadkeyword(ui, extname, registrarobj):
810 810 """Load template keyword from specified registrarobj
811 811 """
812 812 for name, func in registrarobj._table.iteritems():
813 813 keywords[name] = func
814 814
815 815 # tell hggettext to extract docstrings from these functions:
816 816 i18nfunctions = keywords.values()
@@ -1,211 +1,211 b''
1 1 from __future__ import absolute_import, print_function
2 2 import os
3 3 import stat
4 4 import sys
5 5 from mercurial.node import hex
6 6 from mercurial import (
7 7 context,
8 diffutil,
8 9 encoding,
9 10 hg,
10 11 scmutil,
11 12 ui as uimod,
12 13 )
13 from mercurial.utils import diffutil
14 14
15 15 print_ = print
16 16 def print(*args, **kwargs):
17 17 """print() wrapper that flushes stdout buffers to avoid py3 buffer issues
18 18
19 19 We could also just write directly to sys.stdout.buffer the way the
20 20 ui object will, but this was easier for porting the test.
21 21 """
22 22 print_(*args, **kwargs)
23 23 sys.stdout.flush()
24 24
25 25 def printb(data, end=b'\n'):
26 26 out = getattr(sys.stdout, 'buffer', sys.stdout)
27 27 out.write(data + end)
28 28 out.flush()
29 29
30 30 u = uimod.ui.load()
31 31
32 32 repo = hg.repository(u, b'test1', create=1)
33 33 os.chdir('test1')
34 34
35 35 # create 'foo' with fixed time stamp
36 36 f = open('foo', 'wb')
37 37 f.write(b'foo\n')
38 38 f.close()
39 39 os.utime('foo', (1000, 1000))
40 40
41 41 # add+commit 'foo'
42 42 repo[None].add([b'foo'])
43 43 repo.commit(text=b'commit1', date=b"0 0")
44 44
45 45 d = repo[None][b'foo'].date()
46 46 if os.name == 'nt':
47 47 d = d[:2]
48 48 print("workingfilectx.date = (%d, %d)" % d)
49 49
50 50 # test memctx with non-ASCII commit message
51 51
52 52 def filectxfn(repo, memctx, path):
53 53 return context.memfilectx(repo, memctx, b"foo", b"")
54 54
55 55 ctx = context.memctx(repo, [b'tip', None],
56 56 encoding.tolocal(b"Gr\xc3\xbcezi!"),
57 57 [b"foo"], filectxfn)
58 58 ctx.commit()
59 59 for enc in "ASCII", "Latin-1", "UTF-8":
60 60 encoding.encoding = enc
61 61 printb(b"%-8s: %s" % (enc.encode('ascii'), repo[b"tip"].description()))
62 62
63 63 # test performing a status
64 64
65 65 def getfilectx(repo, memctx, f):
66 66 fctx = memctx.parents()[0][f]
67 67 data, flags = fctx.data(), fctx.flags()
68 68 if f == b'foo':
69 69 data += b'bar\n'
70 70 return context.memfilectx(
71 71 repo, memctx, f, data, b'l' in flags, b'x' in flags)
72 72
73 73 ctxa = repo[0]
74 74 ctxb = context.memctx(repo, [ctxa.node(), None], b"test diff", [b"foo"],
75 75 getfilectx, ctxa.user(), ctxa.date())
76 76
77 77 print(ctxb.status(ctxa))
78 78
79 79 # test performing a diff on a memctx
80 80 diffopts = diffutil.diffallopts(repo.ui, {'git': True})
81 81 for d in ctxb.diff(ctxa, opts=diffopts):
82 82 printb(d, end=b'')
83 83
84 84 # test safeness and correctness of "ctx.status()"
85 85 print('= checking context.status():')
86 86
87 87 # ancestor "wcctx ~ 2"
88 88 actx2 = repo[b'.']
89 89
90 90 repo.wwrite(b'bar-m', b'bar-m\n', b'')
91 91 repo.wwrite(b'bar-r', b'bar-r\n', b'')
92 92 repo[None].add([b'bar-m', b'bar-r'])
93 93 repo.commit(text=b'add bar-m, bar-r', date=b"0 0")
94 94
95 95 # ancestor "wcctx ~ 1"
96 96 actx1 = repo[b'.']
97 97
98 98 repo.wwrite(b'bar-m', b'bar-m bar-m\n', b'')
99 99 repo.wwrite(b'bar-a', b'bar-a\n', b'')
100 100 repo[None].add([b'bar-a'])
101 101 repo[None].forget([b'bar-r'])
102 102
103 103 # status at this point:
104 104 # M bar-m
105 105 # A bar-a
106 106 # R bar-r
107 107 # C foo
108 108
109 109 from mercurial import scmutil
110 110
111 111 print('== checking workingctx.status:')
112 112
113 113 wctx = repo[None]
114 114 print('wctx._status=%s' % (str(wctx._status)))
115 115
116 116 print('=== with "pattern match":')
117 117 print(actx1.status(other=wctx,
118 118 match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
119 119 print('wctx._status=%s' % (str(wctx._status)))
120 120 print(actx2.status(other=wctx,
121 121 match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
122 122 print('wctx._status=%s' % (str(wctx._status)))
123 123
124 124 print('=== with "always match" and "listclean=True":')
125 125 print(actx1.status(other=wctx, listclean=True))
126 126 print('wctx._status=%s' % (str(wctx._status)))
127 127 print(actx2.status(other=wctx, listclean=True))
128 128 print('wctx._status=%s' % (str(wctx._status)))
129 129
130 130 print("== checking workingcommitctx.status:")
131 131
132 132 wcctx = context.workingcommitctx(repo,
133 133 scmutil.status([b'bar-m'],
134 134 [b'bar-a'],
135 135 [],
136 136 [], [], [], []),
137 137 text=b'', date=b'0 0')
138 138 print('wcctx._status=%s' % (str(wcctx._status)))
139 139
140 140 print('=== with "always match":')
141 141 print(actx1.status(other=wcctx))
142 142 print('wcctx._status=%s' % (str(wcctx._status)))
143 143 print(actx2.status(other=wcctx))
144 144 print('wcctx._status=%s' % (str(wcctx._status)))
145 145
146 146 print('=== with "always match" and "listclean=True":')
147 147 print(actx1.status(other=wcctx, listclean=True))
148 148 print('wcctx._status=%s' % (str(wcctx._status)))
149 149 print(actx2.status(other=wcctx, listclean=True))
150 150 print('wcctx._status=%s' % (str(wcctx._status)))
151 151
152 152 print('=== with "pattern match":')
153 153 print(actx1.status(other=wcctx,
154 154 match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
155 155 print('wcctx._status=%s' % (str(wcctx._status)))
156 156 print(actx2.status(other=wcctx,
157 157 match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
158 158 print('wcctx._status=%s' % (str(wcctx._status)))
159 159
160 160 print('=== with "pattern match" and "listclean=True":')
161 161 print(actx1.status(other=wcctx,
162 162 match=scmutil.matchfiles(repo, [b'bar-r', b'foo']),
163 163 listclean=True))
164 164 print('wcctx._status=%s' % (str(wcctx._status)))
165 165 print(actx2.status(other=wcctx,
166 166 match=scmutil.matchfiles(repo, [b'bar-r', b'foo']),
167 167 listclean=True))
168 168 print('wcctx._status=%s' % (str(wcctx._status)))
169 169
170 170 os.chdir('..')
171 171
172 172 # test manifestlog being changed
173 173 print('== commit with manifestlog invalidated')
174 174
175 175 repo = hg.repository(u, b'test2', create=1)
176 176 os.chdir('test2')
177 177
178 178 # make some commits
179 179 for i in [b'1', b'2', b'3']:
180 180 with open(i, 'wb') as f:
181 181 f.write(i)
182 182 status = scmutil.status([], [i], [], [], [], [], [])
183 183 ctx = context.workingcommitctx(repo, status, text=i, user=b'test@test.com',
184 184 date=(0, 0))
185 185 ctx.p1().manifest() # side effect: cache manifestctx
186 186 n = repo.commitctx(ctx)
187 187 printb(b'commit %s: %s' % (i, hex(n)))
188 188
189 189 # touch 00manifest.i mtime so storecache could expire.
190 190 # repo.__dict__['manifestlog'] is deleted by transaction releasefn.
191 191 st = repo.svfs.stat(b'00manifest.i')
192 192 repo.svfs.utime(b'00manifest.i',
193 193 (st[stat.ST_MTIME] + 1, st[stat.ST_MTIME] + 1))
194 194
195 195 # read the file just committed
196 196 try:
197 197 if repo[n][i].data() != i:
198 198 print('data mismatch')
199 199 except Exception as ex:
200 200 print('cannot read data: %r' % ex)
201 201
202 202 with repo.wlock(), repo.lock(), repo.transaction(b'test'):
203 203 with open(b'4', 'wb') as f:
204 204 f.write(b'4')
205 205 repo.dirstate.normal(b'4')
206 206 repo.commit(b'4')
207 207 revsbefore = len(repo.changelog)
208 208 repo.invalidate(clearfilecache=True)
209 209 revsafter = len(repo.changelog)
210 210 if revsbefore != revsafter:
211 211 print('changeset lost by repo.invalidate()')
General Comments 0
You need to be logged in to leave comments. Login now