##// END OF EJS Templates
revset: use delayregistrar to register predicate in extension easily...
FUJIWARA Katsunori -
r27586:42910f9f default
parent child Browse files
Show More
@@ -1,1433 +1,1435 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 15 archival, pathutil, revset, error
16 16 from mercurial.i18n import _
17 17
18 18 import lfutil
19 19 import lfcommands
20 20 import basestore
21 21
22 22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 23
24 24 def composelargefilematcher(match, manifest):
25 25 '''create a matcher that matches only the largefiles in the original
26 26 matcher'''
27 27 m = copy.copy(match)
28 28 lfile = lambda f: lfutil.standin(f) in manifest
29 29 m._files = filter(lfile, m._files)
30 30 m._fileroots = set(m._files)
31 31 m._always = False
32 32 origmatchfn = m.matchfn
33 33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 34 return m
35 35
36 36 def composenormalfilematcher(match, manifest, exclude=None):
37 37 excluded = set()
38 38 if exclude is not None:
39 39 excluded.update(exclude)
40 40
41 41 m = copy.copy(match)
42 42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 43 manifest or f in excluded)
44 44 m._files = filter(notlfile, m._files)
45 45 m._fileroots = set(m._files)
46 46 m._always = False
47 47 origmatchfn = m.matchfn
48 48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 49 return m
50 50
51 51 def installnormalfilesmatchfn(manifest):
52 52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 53 def overridematch(ctx, pats=(), opts=None, globbed=False,
54 54 default='relpath', badfn=None):
55 55 if opts is None:
56 56 opts = {}
57 57 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
58 58 return composenormalfilematcher(match, manifest)
59 59 oldmatch = installmatchfn(overridematch)
60 60
61 61 def installmatchfn(f):
62 62 '''monkey patch the scmutil module with a custom match function.
63 63 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
64 64 oldmatch = scmutil.match
65 65 setattr(f, 'oldmatch', oldmatch)
66 66 scmutil.match = f
67 67 return oldmatch
68 68
69 69 def restorematchfn():
70 70 '''restores scmutil.match to what it was before installmatchfn
71 71 was called. no-op if scmutil.match is its original function.
72 72
73 73 Note that n calls to installmatchfn will require n calls to
74 74 restore the original matchfn.'''
75 75 scmutil.match = getattr(scmutil.match, 'oldmatch')
76 76
77 77 def installmatchandpatsfn(f):
78 78 oldmatchandpats = scmutil.matchandpats
79 79 setattr(f, 'oldmatchandpats', oldmatchandpats)
80 80 scmutil.matchandpats = f
81 81 return oldmatchandpats
82 82
83 83 def restorematchandpatsfn():
84 84 '''restores scmutil.matchandpats to what it was before
85 85 installmatchandpatsfn was called. No-op if scmutil.matchandpats
86 86 is its original function.
87 87
88 88 Note that n calls to installmatchandpatsfn will require n calls
89 89 to restore the original matchfn.'''
90 90 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
91 91 scmutil.matchandpats)
92 92
93 93 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
94 94 large = opts.get('large')
95 95 lfsize = lfutil.getminsize(
96 96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
97 97
98 98 lfmatcher = None
99 99 if lfutil.islfilesrepo(repo):
100 100 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
101 101 if lfpats:
102 102 lfmatcher = match_.match(repo.root, '', list(lfpats))
103 103
104 104 lfnames = []
105 105 m = matcher
106 106
107 107 wctx = repo[None]
108 108 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
109 109 exact = m.exact(f)
110 110 lfile = lfutil.standin(f) in wctx
111 111 nfile = f in wctx
112 112 exists = lfile or nfile
113 113
114 114 # addremove in core gets fancy with the name, add doesn't
115 115 if isaddremove:
116 116 name = m.uipath(f)
117 117 else:
118 118 name = m.rel(f)
119 119
120 120 # Don't warn the user when they attempt to add a normal tracked file.
121 121 # The normal add code will do that for us.
122 122 if exact and exists:
123 123 if lfile:
124 124 ui.warn(_('%s already a largefile\n') % name)
125 125 continue
126 126
127 127 if (exact or not exists) and not lfutil.isstandin(f):
128 128 # In case the file was removed previously, but not committed
129 129 # (issue3507)
130 130 if not repo.wvfs.exists(f):
131 131 continue
132 132
133 133 abovemin = (lfsize and
134 134 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
135 135 if large or abovemin or (lfmatcher and lfmatcher(f)):
136 136 lfnames.append(f)
137 137 if ui.verbose or not exact:
138 138 ui.status(_('adding %s as a largefile\n') % name)
139 139
140 140 bad = []
141 141
142 142 # Need to lock, otherwise there could be a race condition between
143 143 # when standins are created and added to the repo.
144 144 wlock = repo.wlock()
145 145 try:
146 146 if not opts.get('dry_run'):
147 147 standins = []
148 148 lfdirstate = lfutil.openlfdirstate(ui, repo)
149 149 for f in lfnames:
150 150 standinname = lfutil.standin(f)
151 151 lfutil.writestandin(repo, standinname, hash='',
152 152 executable=lfutil.getexecutable(repo.wjoin(f)))
153 153 standins.append(standinname)
154 154 if lfdirstate[f] == 'r':
155 155 lfdirstate.normallookup(f)
156 156 else:
157 157 lfdirstate.add(f)
158 158 lfdirstate.write()
159 159 bad += [lfutil.splitstandin(f)
160 160 for f in repo[None].add(standins)
161 161 if f in m.files()]
162 162
163 163 added = [f for f in lfnames if f not in bad]
164 164 finally:
165 165 wlock.release()
166 166 return added, bad
167 167
168 168 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
169 169 after = opts.get('after')
170 170 m = composelargefilematcher(matcher, repo[None].manifest())
171 171 try:
172 172 repo.lfstatus = True
173 173 s = repo.status(match=m, clean=not isaddremove)
174 174 finally:
175 175 repo.lfstatus = False
176 176 manifest = repo[None].manifest()
177 177 modified, added, deleted, clean = [[f for f in list
178 178 if lfutil.standin(f) in manifest]
179 179 for list in (s.modified, s.added,
180 180 s.deleted, s.clean)]
181 181
182 182 def warn(files, msg):
183 183 for f in files:
184 184 ui.warn(msg % m.rel(f))
185 185 return int(len(files) > 0)
186 186
187 187 result = 0
188 188
189 189 if after:
190 190 remove = deleted
191 191 result = warn(modified + added + clean,
192 192 _('not removing %s: file still exists\n'))
193 193 else:
194 194 remove = deleted + clean
195 195 result = warn(modified, _('not removing %s: file is modified (use -f'
196 196 ' to force removal)\n'))
197 197 result = warn(added, _('not removing %s: file has been marked for add'
198 198 ' (use forget to undo)\n')) or result
199 199
200 200 # Need to lock because standin files are deleted then removed from the
201 201 # repository and we could race in-between.
202 202 wlock = repo.wlock()
203 203 try:
204 204 lfdirstate = lfutil.openlfdirstate(ui, repo)
205 205 for f in sorted(remove):
206 206 if ui.verbose or not m.exact(f):
207 207 # addremove in core gets fancy with the name, remove doesn't
208 208 if isaddremove:
209 209 name = m.uipath(f)
210 210 else:
211 211 name = m.rel(f)
212 212 ui.status(_('removing %s\n') % name)
213 213
214 214 if not opts.get('dry_run'):
215 215 if not after:
216 216 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
217 217
218 218 if opts.get('dry_run'):
219 219 return result
220 220
221 221 remove = [lfutil.standin(f) for f in remove]
222 222 # If this is being called by addremove, let the original addremove
223 223 # function handle this.
224 224 if not isaddremove:
225 225 for f in remove:
226 226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
227 227 repo[None].forget(remove)
228 228
229 229 for f in remove:
230 230 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
231 231 False)
232 232
233 233 lfdirstate.write()
234 234 finally:
235 235 wlock.release()
236 236
237 237 return result
238 238
239 239 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 240 # appear at their right place in the manifests.
241 241 def decodepath(orig, path):
242 242 return lfutil.splitstandin(path) or path
243 243
244 244 # -- Wrappers: modify existing commands --------------------------------
245 245
246 246 def overrideadd(orig, ui, repo, *pats, **opts):
247 247 if opts.get('normal') and opts.get('large'):
248 248 raise error.Abort(_('--normal cannot be used with --large'))
249 249 return orig(ui, repo, *pats, **opts)
250 250
251 251 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
252 252 # The --normal flag short circuits this override
253 253 if opts.get('normal'):
254 254 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
255 255
256 256 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
257 257 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
258 258 ladded)
259 259 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
260 260
261 261 bad.extend(f for f in lbad)
262 262 return bad
263 263
264 264 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
265 265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
266 266 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
267 267 return removelargefiles(ui, repo, False, matcher, after=after,
268 268 force=force) or result
269 269
270 270 def overridestatusfn(orig, repo, rev2, **opts):
271 271 try:
272 272 repo._repo.lfstatus = True
273 273 return orig(repo, rev2, **opts)
274 274 finally:
275 275 repo._repo.lfstatus = False
276 276
277 277 def overridestatus(orig, ui, repo, *pats, **opts):
278 278 try:
279 279 repo.lfstatus = True
280 280 return orig(ui, repo, *pats, **opts)
281 281 finally:
282 282 repo.lfstatus = False
283 283
284 284 def overridedirty(orig, repo, ignoreupdate=False):
285 285 try:
286 286 repo._repo.lfstatus = True
287 287 return orig(repo, ignoreupdate)
288 288 finally:
289 289 repo._repo.lfstatus = False
290 290
291 291 def overridelog(orig, ui, repo, *pats, **opts):
292 292 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
293 293 default='relpath', badfn=None):
294 294 """Matcher that merges root directory with .hglf, suitable for log.
295 295 It is still possible to match .hglf directly.
296 296 For any listed files run log on the standin too.
297 297 matchfn tries both the given filename and with .hglf stripped.
298 298 """
299 299 if opts is None:
300 300 opts = {}
301 301 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
302 302 badfn=badfn)
303 303 m, p = copy.copy(matchandpats)
304 304
305 305 if m.always():
306 306 # We want to match everything anyway, so there's no benefit trying
307 307 # to add standins.
308 308 return matchandpats
309 309
310 310 pats = set(p)
311 311
312 312 def fixpats(pat, tostandin=lfutil.standin):
313 313 if pat.startswith('set:'):
314 314 return pat
315 315
316 316 kindpat = match_._patsplit(pat, None)
317 317
318 318 if kindpat[0] is not None:
319 319 return kindpat[0] + ':' + tostandin(kindpat[1])
320 320 return tostandin(kindpat[1])
321 321
322 322 if m._cwd:
323 323 hglf = lfutil.shortname
324 324 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
325 325
326 326 def tostandin(f):
327 327 # The file may already be a standin, so truncate the back
328 328 # prefix and test before mangling it. This avoids turning
329 329 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
330 330 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
331 331 return f
332 332
333 333 # An absolute path is from outside the repo, so truncate the
334 334 # path to the root before building the standin. Otherwise cwd
335 335 # is somewhere in the repo, relative to root, and needs to be
336 336 # prepended before building the standin.
337 337 if os.path.isabs(m._cwd):
338 338 f = f[len(back):]
339 339 else:
340 340 f = m._cwd + '/' + f
341 341 return back + lfutil.standin(f)
342 342
343 343 pats.update(fixpats(f, tostandin) for f in p)
344 344 else:
345 345 def tostandin(f):
346 346 if lfutil.splitstandin(f):
347 347 return f
348 348 return lfutil.standin(f)
349 349 pats.update(fixpats(f, tostandin) for f in p)
350 350
351 351 for i in range(0, len(m._files)):
352 352 # Don't add '.hglf' to m.files, since that is already covered by '.'
353 353 if m._files[i] == '.':
354 354 continue
355 355 standin = lfutil.standin(m._files[i])
356 356 # If the "standin" is a directory, append instead of replace to
357 357 # support naming a directory on the command line with only
358 358 # largefiles. The original directory is kept to support normal
359 359 # files.
360 360 if standin in repo[ctx.node()]:
361 361 m._files[i] = standin
362 362 elif m._files[i] not in repo[ctx.node()] \
363 363 and repo.wvfs.isdir(standin):
364 364 m._files.append(standin)
365 365
366 366 m._fileroots = set(m._files)
367 367 m._always = False
368 368 origmatchfn = m.matchfn
369 369 def lfmatchfn(f):
370 370 lf = lfutil.splitstandin(f)
371 371 if lf is not None and origmatchfn(lf):
372 372 return True
373 373 r = origmatchfn(f)
374 374 return r
375 375 m.matchfn = lfmatchfn
376 376
377 377 ui.debug('updated patterns: %s\n' % sorted(pats))
378 378 return m, pats
379 379
380 380 # For hg log --patch, the match object is used in two different senses:
381 381 # (1) to determine what revisions should be printed out, and
382 382 # (2) to determine what files to print out diffs for.
383 383 # The magic matchandpats override should be used for case (1) but not for
384 384 # case (2).
385 385 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
386 386 wctx = repo[None]
387 387 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
388 388 return lambda rev: match
389 389
390 390 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
391 391 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
392 392 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
393 393
394 394 try:
395 395 return orig(ui, repo, *pats, **opts)
396 396 finally:
397 397 restorematchandpatsfn()
398 398 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
399 399
400 400 def overrideverify(orig, ui, repo, *pats, **opts):
401 401 large = opts.pop('large', False)
402 402 all = opts.pop('lfa', False)
403 403 contents = opts.pop('lfc', False)
404 404
405 405 result = orig(ui, repo, *pats, **opts)
406 406 if large or all or contents:
407 407 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
408 408 return result
409 409
410 410 def overridedebugstate(orig, ui, repo, *pats, **opts):
411 411 large = opts.pop('large', False)
412 412 if large:
413 413 class fakerepo(object):
414 414 dirstate = lfutil.openlfdirstate(ui, repo)
415 415 orig(ui, fakerepo, *pats, **opts)
416 416 else:
417 417 orig(ui, repo, *pats, **opts)
418 418
419 419 # Before starting the manifest merge, merge.updates will call
420 420 # _checkunknownfile to check if there are any files in the merged-in
421 421 # changeset that collide with unknown files in the working copy.
422 422 #
423 423 # The largefiles are seen as unknown, so this prevents us from merging
424 424 # in a file 'foo' if we already have a largefile with the same name.
425 425 #
426 426 # The overridden function filters the unknown files by removing any
427 427 # largefiles. This makes the merge proceed and we can then handle this
428 428 # case further in the overridden calculateupdates function below.
429 429 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
430 430 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
431 431 return False
432 432 return origfn(repo, wctx, mctx, f, f2)
433 433
434 434 # The manifest merge handles conflicts on the manifest level. We want
435 435 # to handle changes in largefile-ness of files at this level too.
436 436 #
437 437 # The strategy is to run the original calculateupdates and then process
438 438 # the action list it outputs. There are two cases we need to deal with:
439 439 #
440 440 # 1. Normal file in p1, largefile in p2. Here the largefile is
441 441 # detected via its standin file, which will enter the working copy
442 442 # with a "get" action. It is not "merge" since the standin is all
443 443 # Mercurial is concerned with at this level -- the link to the
444 444 # existing normal file is not relevant here.
445 445 #
446 446 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
447 447 # since the largefile will be present in the working copy and
448 448 # different from the normal file in p2. Mercurial therefore
449 449 # triggers a merge action.
450 450 #
451 451 # In both cases, we prompt the user and emit new actions to either
452 452 # remove the standin (if the normal file was kept) or to remove the
453 453 # normal file and get the standin (if the largefile was kept). The
454 454 # default prompt answer is to use the largefile version since it was
455 455 # presumably changed on purpose.
456 456 #
457 457 # Finally, the merge.applyupdates function will then take care of
458 458 # writing the files into the working copy and lfcommands.updatelfiles
459 459 # will update the largefiles.
460 460 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
461 461 acceptremote, followcopies, matcher=None):
462 462 overwrite = force and not branchmerge
463 463 actions, diverge, renamedelete = origfn(
464 464 repo, p1, p2, pas, branchmerge, force, acceptremote,
465 465 followcopies, matcher=matcher)
466 466
467 467 if overwrite:
468 468 return actions, diverge, renamedelete
469 469
470 470 # Convert to dictionary with filename as key and action as value.
471 471 lfiles = set()
472 472 for f in actions:
473 473 splitstandin = f and lfutil.splitstandin(f)
474 474 if splitstandin in p1:
475 475 lfiles.add(splitstandin)
476 476 elif lfutil.standin(f) in p1:
477 477 lfiles.add(f)
478 478
479 479 for lfile in lfiles:
480 480 standin = lfutil.standin(lfile)
481 481 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
482 482 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
483 483 if sm in ('g', 'dc') and lm != 'r':
484 484 if sm == 'dc':
485 485 f1, f2, fa, move, anc = sargs
486 486 sargs = (p2[f2].flags(),)
487 487 # Case 1: normal file in the working copy, largefile in
488 488 # the second parent
489 489 usermsg = _('remote turned local normal file %s into a largefile\n'
490 490 'use (l)argefile or keep (n)ormal file?'
491 491 '$$ &Largefile $$ &Normal file') % lfile
492 492 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
493 493 actions[lfile] = ('r', None, 'replaced by standin')
494 494 actions[standin] = ('g', sargs, 'replaces standin')
495 495 else: # keep local normal file
496 496 actions[lfile] = ('k', None, 'replaces standin')
497 497 if branchmerge:
498 498 actions[standin] = ('k', None, 'replaced by non-standin')
499 499 else:
500 500 actions[standin] = ('r', None, 'replaced by non-standin')
501 501 elif lm in ('g', 'dc') and sm != 'r':
502 502 if lm == 'dc':
503 503 f1, f2, fa, move, anc = largs
504 504 largs = (p2[f2].flags(),)
505 505 # Case 2: largefile in the working copy, normal file in
506 506 # the second parent
507 507 usermsg = _('remote turned local largefile %s into a normal file\n'
508 508 'keep (l)argefile or use (n)ormal file?'
509 509 '$$ &Largefile $$ &Normal file') % lfile
510 510 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
511 511 if branchmerge:
512 512 # largefile can be restored from standin safely
513 513 actions[lfile] = ('k', None, 'replaced by standin')
514 514 actions[standin] = ('k', None, 'replaces standin')
515 515 else:
516 516 # "lfile" should be marked as "removed" without
517 517 # removal of itself
518 518 actions[lfile] = ('lfmr', None,
519 519 'forget non-standin largefile')
520 520
521 521 # linear-merge should treat this largefile as 're-added'
522 522 actions[standin] = ('a', None, 'keep standin')
523 523 else: # pick remote normal file
524 524 actions[lfile] = ('g', largs, 'replaces standin')
525 525 actions[standin] = ('r', None, 'replaced by non-standin')
526 526
527 527 return actions, diverge, renamedelete
528 528
529 529 def mergerecordupdates(orig, repo, actions, branchmerge):
530 530 if 'lfmr' in actions:
531 531 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
532 532 for lfile, args, msg in actions['lfmr']:
533 533 # this should be executed before 'orig', to execute 'remove'
534 534 # before all other actions
535 535 repo.dirstate.remove(lfile)
536 536 # make sure lfile doesn't get synclfdirstate'd as normal
537 537 lfdirstate.add(lfile)
538 538 lfdirstate.write()
539 539
540 540 return orig(repo, actions, branchmerge)
541 541
542 542
543 543 # Override filemerge to prompt the user about how they wish to merge
544 544 # largefiles. This will handle identical edits without prompting the user.
545 545 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
546 546 labels=None):
547 547 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
548 548 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
549 549 labels=labels)
550 550
551 551 ahash = fca.data().strip().lower()
552 552 dhash = fcd.data().strip().lower()
553 553 ohash = fco.data().strip().lower()
554 554 if (ohash != ahash and
555 555 ohash != dhash and
556 556 (dhash == ahash or
557 557 repo.ui.promptchoice(
558 558 _('largefile %s has a merge conflict\nancestor was %s\n'
559 559 'keep (l)ocal %s or\ntake (o)ther %s?'
560 560 '$$ &Local $$ &Other') %
561 561 (lfutil.splitstandin(orig), ahash, dhash, ohash),
562 562 0) == 1)):
563 563 repo.wwrite(fcd.path(), fco.data(), fco.flags())
564 564 return True, 0, False
565 565
566 566 def copiespathcopies(orig, ctx1, ctx2, match=None):
567 567 copies = orig(ctx1, ctx2, match=match)
568 568 updated = {}
569 569
570 570 for k, v in copies.iteritems():
571 571 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
572 572
573 573 return updated
574 574
575 575 # Copy first changes the matchers to match standins instead of
576 576 # largefiles. Then it overrides util.copyfile in that function it
577 577 # checks if the destination largefile already exists. It also keeps a
578 578 # list of copied files so that the largefiles can be copied and the
579 579 # dirstate updated.
580 580 def overridecopy(orig, ui, repo, pats, opts, rename=False):
581 581 # doesn't remove largefile on rename
582 582 if len(pats) < 2:
583 583 # this isn't legal, let the original function deal with it
584 584 return orig(ui, repo, pats, opts, rename)
585 585
586 586 # This could copy both lfiles and normal files in one command,
587 587 # but we don't want to do that. First replace their matcher to
588 588 # only match normal files and run it, then replace it to just
589 589 # match largefiles and run it again.
590 590 nonormalfiles = False
591 591 nolfiles = False
592 592 installnormalfilesmatchfn(repo[None].manifest())
593 593 try:
594 594 result = orig(ui, repo, pats, opts, rename)
595 595 except error.Abort as e:
596 596 if str(e) != _('no files to copy'):
597 597 raise e
598 598 else:
599 599 nonormalfiles = True
600 600 result = 0
601 601 finally:
602 602 restorematchfn()
603 603
604 604 # The first rename can cause our current working directory to be removed.
605 605 # In that case there is nothing left to copy/rename so just quit.
606 606 try:
607 607 repo.getcwd()
608 608 except OSError:
609 609 return result
610 610
611 611 def makestandin(relpath):
612 612 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
613 613 return os.path.join(repo.wjoin(lfutil.standin(path)))
614 614
615 615 fullpats = scmutil.expandpats(pats)
616 616 dest = fullpats[-1]
617 617
618 618 if os.path.isdir(dest):
619 619 if not os.path.isdir(makestandin(dest)):
620 620 os.makedirs(makestandin(dest))
621 621
622 622 try:
623 623 # When we call orig below it creates the standins but we don't add
624 624 # them to the dir state until later so lock during that time.
625 625 wlock = repo.wlock()
626 626
627 627 manifest = repo[None].manifest()
628 628 def overridematch(ctx, pats=(), opts=None, globbed=False,
629 629 default='relpath', badfn=None):
630 630 if opts is None:
631 631 opts = {}
632 632 newpats = []
633 633 # The patterns were previously mangled to add the standin
634 634 # directory; we need to remove that now
635 635 for pat in pats:
636 636 if match_.patkind(pat) is None and lfutil.shortname in pat:
637 637 newpats.append(pat.replace(lfutil.shortname, ''))
638 638 else:
639 639 newpats.append(pat)
640 640 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
641 641 m = copy.copy(match)
642 642 lfile = lambda f: lfutil.standin(f) in manifest
643 643 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
644 644 m._fileroots = set(m._files)
645 645 origmatchfn = m.matchfn
646 646 m.matchfn = lambda f: (lfutil.isstandin(f) and
647 647 (f in manifest) and
648 648 origmatchfn(lfutil.splitstandin(f)) or
649 649 None)
650 650 return m
651 651 oldmatch = installmatchfn(overridematch)
652 652 listpats = []
653 653 for pat in pats:
654 654 if match_.patkind(pat) is not None:
655 655 listpats.append(pat)
656 656 else:
657 657 listpats.append(makestandin(pat))
658 658
659 659 try:
660 660 origcopyfile = util.copyfile
661 661 copiedfiles = []
662 662 def overridecopyfile(src, dest):
663 663 if (lfutil.shortname in src and
664 664 dest.startswith(repo.wjoin(lfutil.shortname))):
665 665 destlfile = dest.replace(lfutil.shortname, '')
666 666 if not opts['force'] and os.path.exists(destlfile):
667 667 raise IOError('',
668 668 _('destination largefile already exists'))
669 669 copiedfiles.append((src, dest))
670 670 origcopyfile(src, dest)
671 671
672 672 util.copyfile = overridecopyfile
673 673 result += orig(ui, repo, listpats, opts, rename)
674 674 finally:
675 675 util.copyfile = origcopyfile
676 676
677 677 lfdirstate = lfutil.openlfdirstate(ui, repo)
678 678 for (src, dest) in copiedfiles:
679 679 if (lfutil.shortname in src and
680 680 dest.startswith(repo.wjoin(lfutil.shortname))):
681 681 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
682 682 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
683 683 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
684 684 if not os.path.isdir(destlfiledir):
685 685 os.makedirs(destlfiledir)
686 686 if rename:
687 687 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
688 688
689 689 # The file is gone, but this deletes any empty parent
690 690 # directories as a side-effect.
691 691 util.unlinkpath(repo.wjoin(srclfile), True)
692 692 lfdirstate.remove(srclfile)
693 693 else:
694 694 util.copyfile(repo.wjoin(srclfile),
695 695 repo.wjoin(destlfile))
696 696
697 697 lfdirstate.add(destlfile)
698 698 lfdirstate.write()
699 699 except error.Abort as e:
700 700 if str(e) != _('no files to copy'):
701 701 raise e
702 702 else:
703 703 nolfiles = True
704 704 finally:
705 705 restorematchfn()
706 706 wlock.release()
707 707
708 708 if nolfiles and nonormalfiles:
709 709 raise error.Abort(_('no files to copy'))
710 710
711 711 return result
712 712
713 713 # When the user calls revert, we have to be careful to not revert any
714 714 # changes to other largefiles accidentally. This means we have to keep
715 715 # track of the largefiles that are being reverted so we only pull down
716 716 # the necessary largefiles.
717 717 #
718 718 # Standins are only updated (to match the hash of largefiles) before
719 719 # commits. Update the standins then run the original revert, changing
720 720 # the matcher to hit standins instead of largefiles. Based on the
721 721 # resulting standins update the largefiles.
722 722 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
723 723 # Because we put the standins in a bad state (by updating them)
724 724 # and then return them to a correct state we need to lock to
725 725 # prevent others from changing them in their incorrect state.
726 726 wlock = repo.wlock()
727 727 try:
728 728 lfdirstate = lfutil.openlfdirstate(ui, repo)
729 729 s = lfutil.lfdirstatestatus(lfdirstate, repo)
730 730 lfdirstate.write()
731 731 for lfile in s.modified:
732 732 lfutil.updatestandin(repo, lfutil.standin(lfile))
733 733 for lfile in s.deleted:
734 734 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
735 735 os.unlink(repo.wjoin(lfutil.standin(lfile)))
736 736
737 737 oldstandins = lfutil.getstandinsstate(repo)
738 738
739 739 def overridematch(mctx, pats=(), opts=None, globbed=False,
740 740 default='relpath', badfn=None):
741 741 if opts is None:
742 742 opts = {}
743 743 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
744 744 m = copy.copy(match)
745 745
746 746 # revert supports recursing into subrepos, and though largefiles
747 747 # currently doesn't work correctly in that case, this match is
748 748 # called, so the lfdirstate above may not be the correct one for
749 749 # this invocation of match.
750 750 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
751 751 False)
752 752
753 753 def tostandin(f):
754 754 standin = lfutil.standin(f)
755 755 if standin in ctx or standin in mctx:
756 756 return standin
757 757 elif standin in repo[None] or lfdirstate[f] == 'r':
758 758 return None
759 759 return f
760 760 m._files = [tostandin(f) for f in m._files]
761 761 m._files = [f for f in m._files if f is not None]
762 762 m._fileroots = set(m._files)
763 763 origmatchfn = m.matchfn
764 764 def matchfn(f):
765 765 if lfutil.isstandin(f):
766 766 return (origmatchfn(lfutil.splitstandin(f)) and
767 767 (f in ctx or f in mctx))
768 768 return origmatchfn(f)
769 769 m.matchfn = matchfn
770 770 return m
771 771 oldmatch = installmatchfn(overridematch)
772 772 try:
773 773 orig(ui, repo, ctx, parents, *pats, **opts)
774 774 finally:
775 775 restorematchfn()
776 776
777 777 newstandins = lfutil.getstandinsstate(repo)
778 778 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
779 779 # lfdirstate should be 'normallookup'-ed for updated files,
780 780 # because reverting doesn't touch dirstate for 'normal' files
781 781 # when target revision is explicitly specified: in such case,
782 782 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
783 783 # of target (standin) file.
784 784 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
785 785 normallookup=True)
786 786
787 787 finally:
788 788 wlock.release()
789 789
790 790 # after pulling changesets, we need to take some extra care to get
791 791 # largefiles updated remotely
792 792 def overridepull(orig, ui, repo, source=None, **opts):
793 793 revsprepull = len(repo)
794 794 if not source:
795 795 source = 'default'
796 796 repo.lfpullsource = source
797 797 result = orig(ui, repo, source, **opts)
798 798 revspostpull = len(repo)
799 799 lfrevs = opts.get('lfrev', [])
800 800 if opts.get('all_largefiles'):
801 801 lfrevs.append('pulled()')
802 802 if lfrevs and revspostpull > revsprepull:
803 803 numcached = 0
804 804 repo.firstpulled = revsprepull # for pulled() revset expression
805 805 try:
806 806 for rev in scmutil.revrange(repo, lfrevs):
807 807 ui.note(_('pulling largefiles for revision %s\n') % rev)
808 808 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
809 809 numcached += len(cached)
810 810 finally:
811 811 del repo.firstpulled
812 812 ui.status(_("%d largefiles cached\n") % numcached)
813 813 return result
814 814
815 revsetpredicate = revset.extpredicate()
816
817 @revsetpredicate('pulled()')
815 818 def pulledrevsetsymbol(repo, subset, x):
816 """``pulled()``
817 Changesets that just has been pulled.
819 """Changesets that just has been pulled.
818 820
819 821 Only available with largefiles from pull --lfrev expressions.
820 822
821 823 .. container:: verbose
822 824
823 825 Some examples:
824 826
825 827 - pull largefiles for all new changesets::
826 828
827 829 hg pull -lfrev "pulled()"
828 830
829 831 - pull largefiles for all new branch heads::
830 832
831 833 hg pull -lfrev "head(pulled()) and not closed()"
832 834
833 835 """
834 836
835 837 try:
836 838 firstpulled = repo.firstpulled
837 839 except AttributeError:
838 840 raise error.Abort(_("pulled() only available in --lfrev"))
839 841 return revset.baseset([r for r in subset if r >= firstpulled])
840 842
841 843 def overrideclone(orig, ui, source, dest=None, **opts):
842 844 d = dest
843 845 if d is None:
844 846 d = hg.defaultdest(source)
845 847 if opts.get('all_largefiles') and not hg.islocal(d):
846 848 raise error.Abort(_(
847 849 '--all-largefiles is incompatible with non-local destination %s') %
848 850 d)
849 851
850 852 return orig(ui, source, dest, **opts)
851 853
852 854 def hgclone(orig, ui, opts, *args, **kwargs):
853 855 result = orig(ui, opts, *args, **kwargs)
854 856
855 857 if result is not None:
856 858 sourcerepo, destrepo = result
857 859 repo = destrepo.local()
858 860
859 861 # When cloning to a remote repo (like through SSH), no repo is available
860 862 # from the peer. Therefore the largefiles can't be downloaded and the
861 863 # hgrc can't be updated.
862 864 if not repo:
863 865 return result
864 866
865 867 # If largefiles is required for this repo, permanently enable it locally
866 868 if 'largefiles' in repo.requirements:
867 869 fp = repo.vfs('hgrc', 'a', text=True)
868 870 try:
869 871 fp.write('\n[extensions]\nlargefiles=\n')
870 872 finally:
871 873 fp.close()
872 874
873 875 # Caching is implicitly limited to 'rev' option, since the dest repo was
874 876 # truncated at that point. The user may expect a download count with
875 877 # this option, so attempt whether or not this is a largefile repo.
876 878 if opts.get('all_largefiles'):
877 879 success, missing = lfcommands.downloadlfiles(ui, repo, None)
878 880
879 881 if missing != 0:
880 882 return None
881 883
882 884 return result
883 885
884 886 def overriderebase(orig, ui, repo, **opts):
885 887 if not util.safehasattr(repo, '_largefilesenabled'):
886 888 return orig(ui, repo, **opts)
887 889
888 890 resuming = opts.get('continue')
889 891 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
890 892 repo._lfstatuswriters.append(lambda *msg, **opts: None)
891 893 try:
892 894 return orig(ui, repo, **opts)
893 895 finally:
894 896 repo._lfstatuswriters.pop()
895 897 repo._lfcommithooks.pop()
896 898
897 899 def overridearchivecmd(orig, ui, repo, dest, **opts):
898 900 repo.unfiltered().lfstatus = True
899 901
900 902 try:
901 903 return orig(ui, repo.unfiltered(), dest, **opts)
902 904 finally:
903 905 repo.unfiltered().lfstatus = False
904 906
905 907 def hgwebarchive(orig, web, req, tmpl):
906 908 web.repo.lfstatus = True
907 909
908 910 try:
909 911 return orig(web, req, tmpl)
910 912 finally:
911 913 web.repo.lfstatus = False
912 914
913 915 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
914 916 prefix='', mtime=None, subrepos=None):
915 917 # For some reason setting repo.lfstatus in hgwebarchive only changes the
916 918 # unfiltered repo's attr, so check that as well.
917 919 if not repo.lfstatus and not repo.unfiltered().lfstatus:
918 920 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
919 921 subrepos)
920 922
921 923 # No need to lock because we are only reading history and
922 924 # largefile caches, neither of which are modified.
923 925 if node is not None:
924 926 lfcommands.cachelfiles(repo.ui, repo, node)
925 927
926 928 if kind not in archival.archivers:
927 929 raise error.Abort(_("unknown archive type '%s'") % kind)
928 930
929 931 ctx = repo[node]
930 932
931 933 if kind == 'files':
932 934 if prefix:
933 935 raise error.Abort(
934 936 _('cannot give prefix when archiving to files'))
935 937 else:
936 938 prefix = archival.tidyprefix(dest, kind, prefix)
937 939
938 940 def write(name, mode, islink, getdata):
939 941 if matchfn and not matchfn(name):
940 942 return
941 943 data = getdata()
942 944 if decode:
943 945 data = repo.wwritedata(name, data)
944 946 archiver.addfile(prefix + name, mode, islink, data)
945 947
946 948 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
947 949
948 950 if repo.ui.configbool("ui", "archivemeta", True):
949 951 write('.hg_archival.txt', 0o644, False,
950 952 lambda: archival.buildmetadata(ctx))
951 953
952 954 for f in ctx:
953 955 ff = ctx.flags(f)
954 956 getdata = ctx[f].data
955 957 if lfutil.isstandin(f):
956 958 if node is not None:
957 959 path = lfutil.findfile(repo, getdata().strip())
958 960
959 961 if path is None:
960 962 raise error.Abort(
961 963 _('largefile %s not found in repo store or system cache')
962 964 % lfutil.splitstandin(f))
963 965 else:
964 966 path = lfutil.splitstandin(f)
965 967
966 968 f = lfutil.splitstandin(f)
967 969
968 970 def getdatafn():
969 971 fd = None
970 972 try:
971 973 fd = open(path, 'rb')
972 974 return fd.read()
973 975 finally:
974 976 if fd:
975 977 fd.close()
976 978
977 979 getdata = getdatafn
978 980 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
979 981
980 982 if subrepos:
981 983 for subpath in sorted(ctx.substate):
982 984 sub = ctx.workingsub(subpath)
983 985 submatch = match_.narrowmatcher(subpath, matchfn)
984 986 sub._repo.lfstatus = True
985 987 sub.archive(archiver, prefix, submatch)
986 988
987 989 archiver.done()
988 990
989 991 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
990 992 if not repo._repo.lfstatus:
991 993 return orig(repo, archiver, prefix, match)
992 994
993 995 repo._get(repo._state + ('hg',))
994 996 rev = repo._state[1]
995 997 ctx = repo._repo[rev]
996 998
997 999 if ctx.node() is not None:
998 1000 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
999 1001
1000 1002 def write(name, mode, islink, getdata):
1001 1003 # At this point, the standin has been replaced with the largefile name,
1002 1004 # so the normal matcher works here without the lfutil variants.
1003 1005 if match and not match(f):
1004 1006 return
1005 1007 data = getdata()
1006 1008
1007 1009 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1008 1010
1009 1011 for f in ctx:
1010 1012 ff = ctx.flags(f)
1011 1013 getdata = ctx[f].data
1012 1014 if lfutil.isstandin(f):
1013 1015 if ctx.node() is not None:
1014 1016 path = lfutil.findfile(repo._repo, getdata().strip())
1015 1017
1016 1018 if path is None:
1017 1019 raise error.Abort(
1018 1020 _('largefile %s not found in repo store or system cache')
1019 1021 % lfutil.splitstandin(f))
1020 1022 else:
1021 1023 path = lfutil.splitstandin(f)
1022 1024
1023 1025 f = lfutil.splitstandin(f)
1024 1026
1025 1027 def getdatafn():
1026 1028 fd = None
1027 1029 try:
1028 1030 fd = open(os.path.join(prefix, path), 'rb')
1029 1031 return fd.read()
1030 1032 finally:
1031 1033 if fd:
1032 1034 fd.close()
1033 1035
1034 1036 getdata = getdatafn
1035 1037
1036 1038 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1037 1039
1038 1040 for subpath in sorted(ctx.substate):
1039 1041 sub = ctx.workingsub(subpath)
1040 1042 submatch = match_.narrowmatcher(subpath, match)
1041 1043 sub._repo.lfstatus = True
1042 1044 sub.archive(archiver, prefix + repo._path + '/', submatch)
1043 1045
1044 1046 # If a largefile is modified, the change is not reflected in its
1045 1047 # standin until a commit. cmdutil.bailifchanged() raises an exception
1046 1048 # if the repo has uncommitted changes. Wrap it to also check if
1047 1049 # largefiles were changed. This is used by bisect, backout and fetch.
1048 1050 def overridebailifchanged(orig, repo, *args, **kwargs):
1049 1051 orig(repo, *args, **kwargs)
1050 1052 repo.lfstatus = True
1051 1053 s = repo.status()
1052 1054 repo.lfstatus = False
1053 1055 if s.modified or s.added or s.removed or s.deleted:
1054 1056 raise error.Abort(_('uncommitted changes'))
1055 1057
1056 1058 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1057 1059 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1058 1060 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1059 1061 m = composelargefilematcher(match, repo[None].manifest())
1060 1062
1061 1063 try:
1062 1064 repo.lfstatus = True
1063 1065 s = repo.status(match=m, clean=True)
1064 1066 finally:
1065 1067 repo.lfstatus = False
1066 1068 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1067 1069 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1068 1070
1069 1071 for f in forget:
1070 1072 if lfutil.standin(f) not in repo.dirstate and not \
1071 1073 repo.wvfs.isdir(lfutil.standin(f)):
1072 1074 ui.warn(_('not removing %s: file is already untracked\n')
1073 1075 % m.rel(f))
1074 1076 bad.append(f)
1075 1077
1076 1078 for f in forget:
1077 1079 if ui.verbose or not m.exact(f):
1078 1080 ui.status(_('removing %s\n') % m.rel(f))
1079 1081
1080 1082 # Need to lock because standin files are deleted then removed from the
1081 1083 # repository and we could race in-between.
1082 1084 wlock = repo.wlock()
1083 1085 try:
1084 1086 lfdirstate = lfutil.openlfdirstate(ui, repo)
1085 1087 for f in forget:
1086 1088 if lfdirstate[f] == 'a':
1087 1089 lfdirstate.drop(f)
1088 1090 else:
1089 1091 lfdirstate.remove(f)
1090 1092 lfdirstate.write()
1091 1093 standins = [lfutil.standin(f) for f in forget]
1092 1094 for f in standins:
1093 1095 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1094 1096 rejected = repo[None].forget(standins)
1095 1097 finally:
1096 1098 wlock.release()
1097 1099
1098 1100 bad.extend(f for f in rejected if f in m.files())
1099 1101 forgot.extend(f for f in forget if f not in rejected)
1100 1102 return bad, forgot
1101 1103
1102 1104 def _getoutgoings(repo, other, missing, addfunc):
1103 1105 """get pairs of filename and largefile hash in outgoing revisions
1104 1106 in 'missing'.
1105 1107
1106 1108 largefiles already existing on 'other' repository are ignored.
1107 1109
1108 1110 'addfunc' is invoked with each unique pairs of filename and
1109 1111 largefile hash value.
1110 1112 """
1111 1113 knowns = set()
1112 1114 lfhashes = set()
1113 1115 def dedup(fn, lfhash):
1114 1116 k = (fn, lfhash)
1115 1117 if k not in knowns:
1116 1118 knowns.add(k)
1117 1119 lfhashes.add(lfhash)
1118 1120 lfutil.getlfilestoupload(repo, missing, dedup)
1119 1121 if lfhashes:
1120 1122 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1121 1123 for fn, lfhash in knowns:
1122 1124 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1123 1125 addfunc(fn, lfhash)
1124 1126
1125 1127 def outgoinghook(ui, repo, other, opts, missing):
1126 1128 if opts.pop('large', None):
1127 1129 lfhashes = set()
1128 1130 if ui.debugflag:
1129 1131 toupload = {}
1130 1132 def addfunc(fn, lfhash):
1131 1133 if fn not in toupload:
1132 1134 toupload[fn] = []
1133 1135 toupload[fn].append(lfhash)
1134 1136 lfhashes.add(lfhash)
1135 1137 def showhashes(fn):
1136 1138 for lfhash in sorted(toupload[fn]):
1137 1139 ui.debug(' %s\n' % (lfhash))
1138 1140 else:
1139 1141 toupload = set()
1140 1142 def addfunc(fn, lfhash):
1141 1143 toupload.add(fn)
1142 1144 lfhashes.add(lfhash)
1143 1145 def showhashes(fn):
1144 1146 pass
1145 1147 _getoutgoings(repo, other, missing, addfunc)
1146 1148
1147 1149 if not toupload:
1148 1150 ui.status(_('largefiles: no files to upload\n'))
1149 1151 else:
1150 1152 ui.status(_('largefiles to upload (%d entities):\n')
1151 1153 % (len(lfhashes)))
1152 1154 for file in sorted(toupload):
1153 1155 ui.status(lfutil.splitstandin(file) + '\n')
1154 1156 showhashes(file)
1155 1157 ui.status('\n')
1156 1158
1157 1159 def summaryremotehook(ui, repo, opts, changes):
1158 1160 largeopt = opts.get('large', False)
1159 1161 if changes is None:
1160 1162 if largeopt:
1161 1163 return (False, True) # only outgoing check is needed
1162 1164 else:
1163 1165 return (False, False)
1164 1166 elif largeopt:
1165 1167 url, branch, peer, outgoing = changes[1]
1166 1168 if peer is None:
1167 1169 # i18n: column positioning for "hg summary"
1168 1170 ui.status(_('largefiles: (no remote repo)\n'))
1169 1171 return
1170 1172
1171 1173 toupload = set()
1172 1174 lfhashes = set()
1173 1175 def addfunc(fn, lfhash):
1174 1176 toupload.add(fn)
1175 1177 lfhashes.add(lfhash)
1176 1178 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1177 1179
1178 1180 if not toupload:
1179 1181 # i18n: column positioning for "hg summary"
1180 1182 ui.status(_('largefiles: (no files to upload)\n'))
1181 1183 else:
1182 1184 # i18n: column positioning for "hg summary"
1183 1185 ui.status(_('largefiles: %d entities for %d files to upload\n')
1184 1186 % (len(lfhashes), len(toupload)))
1185 1187
1186 1188 def overridesummary(orig, ui, repo, *pats, **opts):
1187 1189 try:
1188 1190 repo.lfstatus = True
1189 1191 orig(ui, repo, *pats, **opts)
1190 1192 finally:
1191 1193 repo.lfstatus = False
1192 1194
1193 1195 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1194 1196 similarity=None):
1195 1197 if opts is None:
1196 1198 opts = {}
1197 1199 if not lfutil.islfilesrepo(repo):
1198 1200 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1199 1201 # Get the list of missing largefiles so we can remove them
1200 1202 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1201 1203 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1202 1204 False, False, False)
1203 1205
1204 1206 # Call into the normal remove code, but the removing of the standin, we want
1205 1207 # to have handled by original addremove. Monkey patching here makes sure
1206 1208 # we don't remove the standin in the largefiles code, preventing a very
1207 1209 # confused state later.
1208 1210 if s.deleted:
1209 1211 m = copy.copy(matcher)
1210 1212
1211 1213 # The m._files and m._map attributes are not changed to the deleted list
1212 1214 # because that affects the m.exact() test, which in turn governs whether
1213 1215 # or not the file name is printed, and how. Simply limit the original
1214 1216 # matches to those in the deleted status list.
1215 1217 matchfn = m.matchfn
1216 1218 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1217 1219
1218 1220 removelargefiles(repo.ui, repo, True, m, **opts)
1219 1221 # Call into the normal add code, and any files that *should* be added as
1220 1222 # largefiles will be
1221 1223 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1222 1224 # Now that we've handled largefiles, hand off to the original addremove
1223 1225 # function to take care of the rest. Make sure it doesn't do anything with
1224 1226 # largefiles by passing a matcher that will ignore them.
1225 1227 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1226 1228 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1227 1229
1228 1230 # Calling purge with --all will cause the largefiles to be deleted.
1229 1231 # Override repo.status to prevent this from happening.
1230 1232 def overridepurge(orig, ui, repo, *dirs, **opts):
1231 1233 # XXX Monkey patching a repoview will not work. The assigned attribute will
1232 1234 # be set on the unfiltered repo, but we will only lookup attributes in the
1233 1235 # unfiltered repo if the lookup in the repoview object itself fails. As the
1234 1236 # monkey patched method exists on the repoview class the lookup will not
1235 1237 # fail. As a result, the original version will shadow the monkey patched
1236 1238 # one, defeating the monkey patch.
1237 1239 #
1238 1240 # As a work around we use an unfiltered repo here. We should do something
1239 1241 # cleaner instead.
1240 1242 repo = repo.unfiltered()
1241 1243 oldstatus = repo.status
1242 1244 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1243 1245 clean=False, unknown=False, listsubrepos=False):
1244 1246 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1245 1247 listsubrepos)
1246 1248 lfdirstate = lfutil.openlfdirstate(ui, repo)
1247 1249 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1248 1250 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1249 1251 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1250 1252 unknown, ignored, r.clean)
1251 1253 repo.status = overridestatus
1252 1254 orig(ui, repo, *dirs, **opts)
1253 1255 repo.status = oldstatus
1254 1256 def overriderollback(orig, ui, repo, **opts):
1255 1257 wlock = repo.wlock()
1256 1258 try:
1257 1259 before = repo.dirstate.parents()
1258 1260 orphans = set(f for f in repo.dirstate
1259 1261 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1260 1262 result = orig(ui, repo, **opts)
1261 1263 after = repo.dirstate.parents()
1262 1264 if before == after:
1263 1265 return result # no need to restore standins
1264 1266
1265 1267 pctx = repo['.']
1266 1268 for f in repo.dirstate:
1267 1269 if lfutil.isstandin(f):
1268 1270 orphans.discard(f)
1269 1271 if repo.dirstate[f] == 'r':
1270 1272 repo.wvfs.unlinkpath(f, ignoremissing=True)
1271 1273 elif f in pctx:
1272 1274 fctx = pctx[f]
1273 1275 repo.wwrite(f, fctx.data(), fctx.flags())
1274 1276 else:
1275 1277 # content of standin is not so important in 'a',
1276 1278 # 'm' or 'n' (coming from the 2nd parent) cases
1277 1279 lfutil.writestandin(repo, f, '', False)
1278 1280 for standin in orphans:
1279 1281 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1280 1282
1281 1283 lfdirstate = lfutil.openlfdirstate(ui, repo)
1282 1284 orphans = set(lfdirstate)
1283 1285 lfiles = lfutil.listlfiles(repo)
1284 1286 for file in lfiles:
1285 1287 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1286 1288 orphans.discard(file)
1287 1289 for lfile in orphans:
1288 1290 lfdirstate.drop(lfile)
1289 1291 lfdirstate.write()
1290 1292 finally:
1291 1293 wlock.release()
1292 1294 return result
1293 1295
1294 1296 def overridetransplant(orig, ui, repo, *revs, **opts):
1295 1297 resuming = opts.get('continue')
1296 1298 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1297 1299 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1298 1300 try:
1299 1301 result = orig(ui, repo, *revs, **opts)
1300 1302 finally:
1301 1303 repo._lfstatuswriters.pop()
1302 1304 repo._lfcommithooks.pop()
1303 1305 return result
1304 1306
1305 1307 def overridecat(orig, ui, repo, file1, *pats, **opts):
1306 1308 ctx = scmutil.revsingle(repo, opts.get('rev'))
1307 1309 err = 1
1308 1310 notbad = set()
1309 1311 m = scmutil.match(ctx, (file1,) + pats, opts)
1310 1312 origmatchfn = m.matchfn
1311 1313 def lfmatchfn(f):
1312 1314 if origmatchfn(f):
1313 1315 return True
1314 1316 lf = lfutil.splitstandin(f)
1315 1317 if lf is None:
1316 1318 return False
1317 1319 notbad.add(lf)
1318 1320 return origmatchfn(lf)
1319 1321 m.matchfn = lfmatchfn
1320 1322 origbadfn = m.bad
1321 1323 def lfbadfn(f, msg):
1322 1324 if not f in notbad:
1323 1325 origbadfn(f, msg)
1324 1326 m.bad = lfbadfn
1325 1327
1326 1328 origvisitdirfn = m.visitdir
1327 1329 def lfvisitdirfn(dir):
1328 1330 if dir == lfutil.shortname:
1329 1331 return True
1330 1332 ret = origvisitdirfn(dir)
1331 1333 if ret:
1332 1334 return ret
1333 1335 lf = lfutil.splitstandin(dir)
1334 1336 if lf is None:
1335 1337 return False
1336 1338 return origvisitdirfn(lf)
1337 1339 m.visitdir = lfvisitdirfn
1338 1340
1339 1341 for f in ctx.walk(m):
1340 1342 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1341 1343 pathname=f)
1342 1344 lf = lfutil.splitstandin(f)
1343 1345 if lf is None or origmatchfn(f):
1344 1346 # duplicating unreachable code from commands.cat
1345 1347 data = ctx[f].data()
1346 1348 if opts.get('decode'):
1347 1349 data = repo.wwritedata(f, data)
1348 1350 fp.write(data)
1349 1351 else:
1350 1352 hash = lfutil.readstandin(repo, lf, ctx.rev())
1351 1353 if not lfutil.inusercache(repo.ui, hash):
1352 1354 store = basestore._openstore(repo)
1353 1355 success, missing = store.get([(lf, hash)])
1354 1356 if len(success) != 1:
1355 1357 raise error.Abort(
1356 1358 _('largefile %s is not in cache and could not be '
1357 1359 'downloaded') % lf)
1358 1360 path = lfutil.usercachepath(repo.ui, hash)
1359 1361 fpin = open(path, "rb")
1360 1362 for chunk in util.filechunkiter(fpin, 128 * 1024):
1361 1363 fp.write(chunk)
1362 1364 fpin.close()
1363 1365 fp.close()
1364 1366 err = 0
1365 1367 return err
1366 1368
1367 1369 def mergeupdate(orig, repo, node, branchmerge, force,
1368 1370 *args, **kwargs):
1369 1371 matcher = kwargs.get('matcher', None)
1370 1372 # note if this is a partial update
1371 1373 partial = matcher and not matcher.always()
1372 1374 wlock = repo.wlock()
1373 1375 try:
1374 1376 # branch | | |
1375 1377 # merge | force | partial | action
1376 1378 # -------+-------+---------+--------------
1377 1379 # x | x | x | linear-merge
1378 1380 # o | x | x | branch-merge
1379 1381 # x | o | x | overwrite (as clean update)
1380 1382 # o | o | x | force-branch-merge (*1)
1381 1383 # x | x | o | (*)
1382 1384 # o | x | o | (*)
1383 1385 # x | o | o | overwrite (as revert)
1384 1386 # o | o | o | (*)
1385 1387 #
1386 1388 # (*) don't care
1387 1389 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1388 1390
1389 1391 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1390 1392 unsure, s = lfdirstate.status(match_.always(repo.root,
1391 1393 repo.getcwd()),
1392 1394 [], False, False, False)
1393 1395 pctx = repo['.']
1394 1396 for lfile in unsure + s.modified:
1395 1397 lfileabs = repo.wvfs.join(lfile)
1396 1398 if not os.path.exists(lfileabs):
1397 1399 continue
1398 1400 lfhash = lfutil.hashrepofile(repo, lfile)
1399 1401 standin = lfutil.standin(lfile)
1400 1402 lfutil.writestandin(repo, standin, lfhash,
1401 1403 lfutil.getexecutable(lfileabs))
1402 1404 if (standin in pctx and
1403 1405 lfhash == lfutil.readstandin(repo, lfile, '.')):
1404 1406 lfdirstate.normal(lfile)
1405 1407 for lfile in s.added:
1406 1408 lfutil.updatestandin(repo, lfutil.standin(lfile))
1407 1409 lfdirstate.write()
1408 1410
1409 1411 oldstandins = lfutil.getstandinsstate(repo)
1410 1412
1411 1413 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1412 1414
1413 1415 newstandins = lfutil.getstandinsstate(repo)
1414 1416 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1415 1417 if branchmerge or force or partial:
1416 1418 filelist.extend(s.deleted + s.removed)
1417 1419
1418 1420 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1419 1421 normallookup=partial)
1420 1422
1421 1423 return result
1422 1424 finally:
1423 1425 wlock.release()
1424 1426
1425 1427 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1426 1428 result = orig(repo, files, *args, **kwargs)
1427 1429
1428 1430 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1429 1431 if filelist:
1430 1432 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1431 1433 printmessage=False, normallookup=True)
1432 1434
1433 1435 return result
@@ -1,172 +1,173 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles extension: uisetup'''
10 10
11 11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
12 httppeer, merge, scmutil, sshpeer, wireproto, revset, subrepo, copies
12 httppeer, merge, scmutil, sshpeer, wireproto, subrepo, copies
13 13 from mercurial.i18n import _
14 14 from mercurial.hgweb import hgweb_mod, webcommands
15 15
16 16 import overrides
17 17 import proto
18 18
19 19 def uisetup(ui):
20 20 # Disable auto-status for some commands which assume that all
21 21 # files in the result are under Mercurial's control
22 22
23 23 entry = extensions.wrapcommand(commands.table, 'add',
24 24 overrides.overrideadd)
25 25 addopt = [('', 'large', None, _('add as largefile')),
26 26 ('', 'normal', None, _('add as normal file')),
27 27 ('', 'lfsize', '', _('add all files above this size '
28 28 '(in megabytes) as largefiles '
29 29 '(default: 10)'))]
30 30 entry[1].extend(addopt)
31 31
32 32 # The scmutil function is called both by the (trivial) addremove command,
33 33 # and in the process of handling commit -A (issue3542)
34 34 entry = extensions.wrapfunction(scmutil, 'addremove',
35 35 overrides.scmutiladdremove)
36 36 extensions.wrapfunction(cmdutil, 'add', overrides.cmdutiladd)
37 37 extensions.wrapfunction(cmdutil, 'remove', overrides.cmdutilremove)
38 38 extensions.wrapfunction(cmdutil, 'forget', overrides.cmdutilforget)
39 39
40 40 extensions.wrapfunction(copies, 'pathcopies', overrides.copiespathcopies)
41 41
42 42 # Subrepos call status function
43 43 entry = extensions.wrapcommand(commands.table, 'status',
44 44 overrides.overridestatus)
45 45 entry = extensions.wrapfunction(subrepo.hgsubrepo, 'status',
46 46 overrides.overridestatusfn)
47 47
48 48 entry = extensions.wrapcommand(commands.table, 'log',
49 49 overrides.overridelog)
50 50 entry = extensions.wrapcommand(commands.table, 'rollback',
51 51 overrides.overriderollback)
52 52 entry = extensions.wrapcommand(commands.table, 'verify',
53 53 overrides.overrideverify)
54 54
55 55 verifyopt = [('', 'large', None,
56 56 _('verify that all largefiles in current revision exists')),
57 57 ('', 'lfa', None,
58 58 _('verify largefiles in all revisions, not just current')),
59 59 ('', 'lfc', None,
60 60 _('verify local largefile contents, not just existence'))]
61 61 entry[1].extend(verifyopt)
62 62
63 63 entry = extensions.wrapcommand(commands.table, 'debugstate',
64 64 overrides.overridedebugstate)
65 65 debugstateopt = [('', 'large', None, _('display largefiles dirstate'))]
66 66 entry[1].extend(debugstateopt)
67 67
68 68 outgoing = lambda orgfunc, *arg, **kwargs: orgfunc(*arg, **kwargs)
69 69 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
70 70 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
71 71 entry[1].extend(outgoingopt)
72 72 cmdutil.outgoinghooks.add('largefiles', overrides.outgoinghook)
73 73 entry = extensions.wrapcommand(commands.table, 'summary',
74 74 overrides.overridesummary)
75 75 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
76 76 entry[1].extend(summaryopt)
77 77 cmdutil.summaryremotehooks.add('largefiles', overrides.summaryremotehook)
78 78
79 79 entry = extensions.wrapcommand(commands.table, 'pull',
80 80 overrides.overridepull)
81 81 pullopt = [('', 'all-largefiles', None,
82 82 _('download all pulled versions of largefiles (DEPRECATED)')),
83 83 ('', 'lfrev', [],
84 84 _('download largefiles for these revisions'), _('REV'))]
85 85 entry[1].extend(pullopt)
86 revset.symbols['pulled'] = overrides.pulledrevsetsymbol
87 86
88 87 entry = extensions.wrapcommand(commands.table, 'clone',
89 88 overrides.overrideclone)
90 89 cloneopt = [('', 'all-largefiles', None,
91 90 _('download all versions of all largefiles'))]
92 91 entry[1].extend(cloneopt)
93 92 entry = extensions.wrapfunction(hg, 'clone', overrides.hgclone)
94 93
95 94 entry = extensions.wrapcommand(commands.table, 'cat',
96 95 overrides.overridecat)
97 96 entry = extensions.wrapfunction(merge, '_checkunknownfile',
98 97 overrides.overridecheckunknownfile)
99 98 entry = extensions.wrapfunction(merge, 'calculateupdates',
100 99 overrides.overridecalculateupdates)
101 100 entry = extensions.wrapfunction(merge, 'recordupdates',
102 101 overrides.mergerecordupdates)
103 102 entry = extensions.wrapfunction(merge, 'update',
104 103 overrides.mergeupdate)
105 104 entry = extensions.wrapfunction(filemerge, '_filemerge',
106 105 overrides.overridefilemerge)
107 106 entry = extensions.wrapfunction(cmdutil, 'copy',
108 107 overrides.overridecopy)
109 108
110 109 # Summary calls dirty on the subrepos
111 110 entry = extensions.wrapfunction(subrepo.hgsubrepo, 'dirty',
112 111 overrides.overridedirty)
113 112
114 113 entry = extensions.wrapfunction(cmdutil, 'revert',
115 114 overrides.overriderevert)
116 115
117 116 extensions.wrapcommand(commands.table, 'archive',
118 117 overrides.overridearchivecmd)
119 118 extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
120 119 extensions.wrapfunction(subrepo.hgsubrepo, 'archive',
121 120 overrides.hgsubrepoarchive)
122 121 extensions.wrapfunction(webcommands, 'archive',
123 122 overrides.hgwebarchive)
124 123 extensions.wrapfunction(cmdutil, 'bailifchanged',
125 124 overrides.overridebailifchanged)
126 125
127 126 extensions.wrapfunction(scmutil, 'marktouched',
128 127 overrides.scmutilmarktouched)
129 128
130 129 # create the new wireproto commands ...
131 130 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
132 131 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
133 132 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
134 133
135 134 # ... and wrap some existing ones
136 135 wireproto.commands['capabilities'] = (proto.capabilities, '')
137 136 wireproto.commands['heads'] = (proto.heads, '')
138 137 wireproto.commands['lheads'] = (wireproto.heads, '')
139 138
140 139 # make putlfile behave the same as push and {get,stat}lfile behave
141 140 # the same as pull w.r.t. permissions checks
142 141 hgweb_mod.perms['putlfile'] = 'push'
143 142 hgweb_mod.perms['getlfile'] = 'pull'
144 143 hgweb_mod.perms['statlfile'] = 'pull'
145 144
146 145 extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
147 146
148 147 # the hello wireproto command uses wireproto.capabilities, so it won't see
149 148 # our largefiles capability unless we replace the actual function as well.
150 149 proto.capabilitiesorig = wireproto.capabilities
151 150 wireproto.capabilities = proto.capabilities
152 151
153 152 # can't do this in reposetup because it needs to have happened before
154 153 # wirerepo.__init__ is called
155 154 proto.ssholdcallstream = sshpeer.sshpeer._callstream
156 155 proto.httpoldcallstream = httppeer.httppeer._callstream
157 156 sshpeer.sshpeer._callstream = proto.sshrepocallstream
158 157 httppeer.httppeer._callstream = proto.httprepocallstream
159 158
160 159 # override some extensions' stuff as well
161 160 for name, module in extensions.extensions():
162 161 if name == 'purge':
163 162 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
164 163 overrides.overridepurge)
165 164 if name == 'rebase':
166 165 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
167 166 overrides.overriderebase)
168 167 extensions.wrapfunction(module, 'rebase',
169 168 overrides.overriderebase)
170 169 if name == 'transplant':
171 170 extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
172 171 overrides.overridetransplant)
172
173 overrides.revsetpredicate.setup()
@@ -1,3607 +1,3609 b''
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use :hg:`help command` for more details)::
18 18
19 19 create new patch qnew
20 20 import existing patch qimport
21 21
22 22 print patch series qseries
23 23 print applied patches qapplied
24 24
25 25 add known patch to applied stack qpush
26 26 remove patch from applied stack qpop
27 27 refresh contents of top applied patch qrefresh
28 28
29 29 By default, mq will automatically use git patches when required to
30 30 avoid losing file mode changes, copy records, binary files or empty
31 31 files creations or deletions. This behavior can be configured with::
32 32
33 33 [mq]
34 34 git = auto/keep/yes/no
35 35
36 36 If set to 'keep', mq will obey the [diff] section configuration while
37 37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 38 'no', mq will override the [diff] section and always generate git or
39 39 regular patches, possibly losing data in the second case.
40 40
41 41 It may be desirable for mq changesets to be kept in the secret phase (see
42 42 :hg:`help phases`), which can be enabled with the following setting::
43 43
44 44 [mq]
45 45 secret = True
46 46
47 47 You will by default be managing a patch queue named "patches". You can
48 48 create other, independent patch queues with the :hg:`qqueue` command.
49 49
50 50 If the working directory contains uncommitted files, qpush, qpop and
51 51 qgoto abort immediately. If -f/--force is used, the changes are
52 52 discarded. Setting::
53 53
54 54 [mq]
55 55 keepchanges = True
56 56
57 57 make them behave as if --keep-changes were passed, and non-conflicting
58 58 local changes will be tolerated and preserved. If incompatible options
59 59 such as -f/--force or --exact are passed, this setting is ignored.
60 60
61 61 This extension used to provide a strip command. This command now lives
62 62 in the strip extension.
63 63 '''
64 64
65 65 from mercurial.i18n import _
66 66 from mercurial.node import bin, hex, short, nullid, nullrev
67 67 from mercurial.lock import release
68 68 from mercurial import commands, cmdutil, hg, scmutil, util, revset
69 69 from mercurial import extensions, error, phases
70 70 from mercurial import patch as patchmod
71 71 from mercurial import lock as lockmod
72 72 from mercurial import localrepo
73 73 from mercurial import subrepo
74 74 import os, re, errno, shutil
75 75
76 76 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
77 77
78 78 cmdtable = {}
79 79 command = cmdutil.command(cmdtable)
80 80 # Note for extension authors: ONLY specify testedwith = 'internal' for
81 81 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
82 82 # be specifying the version(s) of Mercurial they are tested with, or
83 83 # leave the attribute unspecified.
84 84 testedwith = 'internal'
85 85
86 86 # force load strip extension formerly included in mq and import some utility
87 87 try:
88 88 stripext = extensions.find('strip')
89 89 except KeyError:
90 90 # note: load is lazy so we could avoid the try-except,
91 91 # but I (marmoute) prefer this explicit code.
92 92 class dummyui(object):
93 93 def debug(self, msg):
94 94 pass
95 95 stripext = extensions.load(dummyui(), 'strip', '')
96 96
97 97 strip = stripext.strip
98 98 checksubstate = stripext.checksubstate
99 99 checklocalchanges = stripext.checklocalchanges
100 100
101 101
102 102 # Patch names looks like unix-file names.
103 103 # They must be joinable with queue directory and result in the patch path.
104 104 normname = util.normpath
105 105
106 106 class statusentry(object):
107 107 def __init__(self, node, name):
108 108 self.node, self.name = node, name
109 109 def __repr__(self):
110 110 return hex(self.node) + ':' + self.name
111 111
112 112 # The order of the headers in 'hg export' HG patches:
113 113 HGHEADERS = [
114 114 # '# HG changeset patch',
115 115 '# User ',
116 116 '# Date ',
117 117 '# ',
118 118 '# Branch ',
119 119 '# Node ID ',
120 120 '# Parent ', # can occur twice for merges - but that is not relevant for mq
121 121 ]
122 122 # The order of headers in plain 'mail style' patches:
123 123 PLAINHEADERS = {
124 124 'from': 0,
125 125 'date': 1,
126 126 'subject': 2,
127 127 }
128 128
129 129 def inserthgheader(lines, header, value):
130 130 """Assuming lines contains a HG patch header, add a header line with value.
131 131 >>> try: inserthgheader([], '# Date ', 'z')
132 132 ... except ValueError, inst: print "oops"
133 133 oops
134 134 >>> inserthgheader(['# HG changeset patch'], '# Date ', 'z')
135 135 ['# HG changeset patch', '# Date z']
136 136 >>> inserthgheader(['# HG changeset patch', ''], '# Date ', 'z')
137 137 ['# HG changeset patch', '# Date z', '']
138 138 >>> inserthgheader(['# HG changeset patch', '# User y'], '# Date ', 'z')
139 139 ['# HG changeset patch', '# User y', '# Date z']
140 140 >>> inserthgheader(['# HG changeset patch', '# Date x', '# User y'],
141 141 ... '# User ', 'z')
142 142 ['# HG changeset patch', '# Date x', '# User z']
143 143 >>> inserthgheader(['# HG changeset patch', '# Date y'], '# Date ', 'z')
144 144 ['# HG changeset patch', '# Date z']
145 145 >>> inserthgheader(['# HG changeset patch', '', '# Date y'], '# Date ', 'z')
146 146 ['# HG changeset patch', '# Date z', '', '# Date y']
147 147 >>> inserthgheader(['# HG changeset patch', '# Parent y'], '# Date ', 'z')
148 148 ['# HG changeset patch', '# Date z', '# Parent y']
149 149 """
150 150 start = lines.index('# HG changeset patch') + 1
151 151 newindex = HGHEADERS.index(header)
152 152 bestpos = len(lines)
153 153 for i in range(start, len(lines)):
154 154 line = lines[i]
155 155 if not line.startswith('# '):
156 156 bestpos = min(bestpos, i)
157 157 break
158 158 for lineindex, h in enumerate(HGHEADERS):
159 159 if line.startswith(h):
160 160 if lineindex == newindex:
161 161 lines[i] = header + value
162 162 return lines
163 163 if lineindex > newindex:
164 164 bestpos = min(bestpos, i)
165 165 break # next line
166 166 lines.insert(bestpos, header + value)
167 167 return lines
168 168
169 169 def insertplainheader(lines, header, value):
170 170 """For lines containing a plain patch header, add a header line with value.
171 171 >>> insertplainheader([], 'Date', 'z')
172 172 ['Date: z']
173 173 >>> insertplainheader([''], 'Date', 'z')
174 174 ['Date: z', '']
175 175 >>> insertplainheader(['x'], 'Date', 'z')
176 176 ['Date: z', '', 'x']
177 177 >>> insertplainheader(['From: y', 'x'], 'Date', 'z')
178 178 ['From: y', 'Date: z', '', 'x']
179 179 >>> insertplainheader([' date : x', ' from : y', ''], 'From', 'z')
180 180 [' date : x', 'From: z', '']
181 181 >>> insertplainheader(['', 'Date: y'], 'Date', 'z')
182 182 ['Date: z', '', 'Date: y']
183 183 >>> insertplainheader(['foo: bar', 'DATE: z', 'x'], 'From', 'y')
184 184 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
185 185 """
186 186 newprio = PLAINHEADERS[header.lower()]
187 187 bestpos = len(lines)
188 188 for i, line in enumerate(lines):
189 189 if ':' in line:
190 190 lheader = line.split(':', 1)[0].strip().lower()
191 191 lprio = PLAINHEADERS.get(lheader, newprio + 1)
192 192 if lprio == newprio:
193 193 lines[i] = '%s: %s' % (header, value)
194 194 return lines
195 195 if lprio > newprio and i < bestpos:
196 196 bestpos = i
197 197 else:
198 198 if line:
199 199 lines.insert(i, '')
200 200 if i < bestpos:
201 201 bestpos = i
202 202 break
203 203 lines.insert(bestpos, '%s: %s' % (header, value))
204 204 return lines
205 205
206 206 class patchheader(object):
207 207 def __init__(self, pf, plainmode=False):
208 208 def eatdiff(lines):
209 209 while lines:
210 210 l = lines[-1]
211 211 if (l.startswith("diff -") or
212 212 l.startswith("Index:") or
213 213 l.startswith("===========")):
214 214 del lines[-1]
215 215 else:
216 216 break
217 217 def eatempty(lines):
218 218 while lines:
219 219 if not lines[-1].strip():
220 220 del lines[-1]
221 221 else:
222 222 break
223 223
224 224 message = []
225 225 comments = []
226 226 user = None
227 227 date = None
228 228 parent = None
229 229 format = None
230 230 subject = None
231 231 branch = None
232 232 nodeid = None
233 233 diffstart = 0
234 234
235 235 for line in file(pf):
236 236 line = line.rstrip()
237 237 if (line.startswith('diff --git')
238 238 or (diffstart and line.startswith('+++ '))):
239 239 diffstart = 2
240 240 break
241 241 diffstart = 0 # reset
242 242 if line.startswith("--- "):
243 243 diffstart = 1
244 244 continue
245 245 elif format == "hgpatch":
246 246 # parse values when importing the result of an hg export
247 247 if line.startswith("# User "):
248 248 user = line[7:]
249 249 elif line.startswith("# Date "):
250 250 date = line[7:]
251 251 elif line.startswith("# Parent "):
252 252 parent = line[9:].lstrip() # handle double trailing space
253 253 elif line.startswith("# Branch "):
254 254 branch = line[9:]
255 255 elif line.startswith("# Node ID "):
256 256 nodeid = line[10:]
257 257 elif not line.startswith("# ") and line:
258 258 message.append(line)
259 259 format = None
260 260 elif line == '# HG changeset patch':
261 261 message = []
262 262 format = "hgpatch"
263 263 elif (format != "tagdone" and (line.startswith("Subject: ") or
264 264 line.startswith("subject: "))):
265 265 subject = line[9:]
266 266 format = "tag"
267 267 elif (format != "tagdone" and (line.startswith("From: ") or
268 268 line.startswith("from: "))):
269 269 user = line[6:]
270 270 format = "tag"
271 271 elif (format != "tagdone" and (line.startswith("Date: ") or
272 272 line.startswith("date: "))):
273 273 date = line[6:]
274 274 format = "tag"
275 275 elif format == "tag" and line == "":
276 276 # when looking for tags (subject: from: etc) they
277 277 # end once you find a blank line in the source
278 278 format = "tagdone"
279 279 elif message or line:
280 280 message.append(line)
281 281 comments.append(line)
282 282
283 283 eatdiff(message)
284 284 eatdiff(comments)
285 285 # Remember the exact starting line of the patch diffs before consuming
286 286 # empty lines, for external use by TortoiseHg and others
287 287 self.diffstartline = len(comments)
288 288 eatempty(message)
289 289 eatempty(comments)
290 290
291 291 # make sure message isn't empty
292 292 if format and format.startswith("tag") and subject:
293 293 message.insert(0, subject)
294 294
295 295 self.message = message
296 296 self.comments = comments
297 297 self.user = user
298 298 self.date = date
299 299 self.parent = parent
300 300 # nodeid and branch are for external use by TortoiseHg and others
301 301 self.nodeid = nodeid
302 302 self.branch = branch
303 303 self.haspatch = diffstart > 1
304 304 self.plainmode = (plainmode or
305 305 '# HG changeset patch' not in self.comments and
306 306 any(c.startswith('Date: ') or
307 307 c.startswith('From: ')
308 308 for c in self.comments))
309 309
310 310 def setuser(self, user):
311 311 try:
312 312 inserthgheader(self.comments, '# User ', user)
313 313 except ValueError:
314 314 if self.plainmode:
315 315 insertplainheader(self.comments, 'From', user)
316 316 else:
317 317 tmp = ['# HG changeset patch', '# User ' + user]
318 318 self.comments = tmp + self.comments
319 319 self.user = user
320 320
321 321 def setdate(self, date):
322 322 try:
323 323 inserthgheader(self.comments, '# Date ', date)
324 324 except ValueError:
325 325 if self.plainmode:
326 326 insertplainheader(self.comments, 'Date', date)
327 327 else:
328 328 tmp = ['# HG changeset patch', '# Date ' + date]
329 329 self.comments = tmp + self.comments
330 330 self.date = date
331 331
332 332 def setparent(self, parent):
333 333 try:
334 334 inserthgheader(self.comments, '# Parent ', parent)
335 335 except ValueError:
336 336 if not self.plainmode:
337 337 tmp = ['# HG changeset patch', '# Parent ' + parent]
338 338 self.comments = tmp + self.comments
339 339 self.parent = parent
340 340
341 341 def setmessage(self, message):
342 342 if self.comments:
343 343 self._delmsg()
344 344 self.message = [message]
345 345 if message:
346 346 if self.plainmode and self.comments and self.comments[-1]:
347 347 self.comments.append('')
348 348 self.comments.append(message)
349 349
350 350 def __str__(self):
351 351 s = '\n'.join(self.comments).rstrip()
352 352 if not s:
353 353 return ''
354 354 return s + '\n\n'
355 355
356 356 def _delmsg(self):
357 357 '''Remove existing message, keeping the rest of the comments fields.
358 358 If comments contains 'subject: ', message will prepend
359 359 the field and a blank line.'''
360 360 if self.message:
361 361 subj = 'subject: ' + self.message[0].lower()
362 362 for i in xrange(len(self.comments)):
363 363 if subj == self.comments[i].lower():
364 364 del self.comments[i]
365 365 self.message = self.message[2:]
366 366 break
367 367 ci = 0
368 368 for mi in self.message:
369 369 while mi != self.comments[ci]:
370 370 ci += 1
371 371 del self.comments[ci]
372 372
373 373 def newcommit(repo, phase, *args, **kwargs):
374 374 """helper dedicated to ensure a commit respect mq.secret setting
375 375
376 376 It should be used instead of repo.commit inside the mq source for operation
377 377 creating new changeset.
378 378 """
379 379 repo = repo.unfiltered()
380 380 if phase is None:
381 381 if repo.ui.configbool('mq', 'secret', False):
382 382 phase = phases.secret
383 383 if phase is not None:
384 384 phasebackup = repo.ui.backupconfig('phases', 'new-commit')
385 385 allowemptybackup = repo.ui.backupconfig('ui', 'allowemptycommit')
386 386 try:
387 387 if phase is not None:
388 388 repo.ui.setconfig('phases', 'new-commit', phase, 'mq')
389 389 repo.ui.setconfig('ui', 'allowemptycommit', True)
390 390 return repo.commit(*args, **kwargs)
391 391 finally:
392 392 repo.ui.restoreconfig(allowemptybackup)
393 393 if phase is not None:
394 394 repo.ui.restoreconfig(phasebackup)
395 395
396 396 class AbortNoCleanup(error.Abort):
397 397 pass
398 398
399 399 def makepatchname(existing, title, fallbackname):
400 400 """Return a suitable filename for title, adding a suffix to make
401 401 it unique in the existing list"""
402 402 namebase = re.sub('[\s\W_]+', '_', title.lower()).strip('_')
403 403 if not namebase:
404 404 namebase = fallbackname
405 405 name = namebase
406 406 i = 0
407 407 while name in existing:
408 408 i += 1
409 409 name = '%s__%s' % (namebase, i)
410 410 return name
411 411
412 412 class queue(object):
413 413 def __init__(self, ui, baseui, path, patchdir=None):
414 414 self.basepath = path
415 415 try:
416 416 fh = open(os.path.join(path, 'patches.queue'))
417 417 cur = fh.read().rstrip()
418 418 fh.close()
419 419 if not cur:
420 420 curpath = os.path.join(path, 'patches')
421 421 else:
422 422 curpath = os.path.join(path, 'patches-' + cur)
423 423 except IOError:
424 424 curpath = os.path.join(path, 'patches')
425 425 self.path = patchdir or curpath
426 426 self.opener = scmutil.opener(self.path)
427 427 self.ui = ui
428 428 self.baseui = baseui
429 429 self.applieddirty = False
430 430 self.seriesdirty = False
431 431 self.added = []
432 432 self.seriespath = "series"
433 433 self.statuspath = "status"
434 434 self.guardspath = "guards"
435 435 self.activeguards = None
436 436 self.guardsdirty = False
437 437 # Handle mq.git as a bool with extended values
438 438 try:
439 439 gitmode = ui.configbool('mq', 'git', None)
440 440 if gitmode is None:
441 441 raise error.ConfigError
442 442 if gitmode:
443 443 self.gitmode = 'yes'
444 444 else:
445 445 self.gitmode = 'no'
446 446 except error.ConfigError:
447 447 # let's have check-config ignore the type mismatch
448 448 self.gitmode = ui.config(r'mq', 'git', 'auto').lower()
449 449 # deprecated config: mq.plain
450 450 self.plainmode = ui.configbool('mq', 'plain', False)
451 451 self.checkapplied = True
452 452
453 453 @util.propertycache
454 454 def applied(self):
455 455 def parselines(lines):
456 456 for l in lines:
457 457 entry = l.split(':', 1)
458 458 if len(entry) > 1:
459 459 n, name = entry
460 460 yield statusentry(bin(n), name)
461 461 elif l.strip():
462 462 self.ui.warn(_('malformated mq status line: %s\n') % entry)
463 463 # else we ignore empty lines
464 464 try:
465 465 lines = self.opener.read(self.statuspath).splitlines()
466 466 return list(parselines(lines))
467 467 except IOError as e:
468 468 if e.errno == errno.ENOENT:
469 469 return []
470 470 raise
471 471
472 472 @util.propertycache
473 473 def fullseries(self):
474 474 try:
475 475 return self.opener.read(self.seriespath).splitlines()
476 476 except IOError as e:
477 477 if e.errno == errno.ENOENT:
478 478 return []
479 479 raise
480 480
481 481 @util.propertycache
482 482 def series(self):
483 483 self.parseseries()
484 484 return self.series
485 485
486 486 @util.propertycache
487 487 def seriesguards(self):
488 488 self.parseseries()
489 489 return self.seriesguards
490 490
491 491 def invalidate(self):
492 492 for a in 'applied fullseries series seriesguards'.split():
493 493 if a in self.__dict__:
494 494 delattr(self, a)
495 495 self.applieddirty = False
496 496 self.seriesdirty = False
497 497 self.guardsdirty = False
498 498 self.activeguards = None
499 499
500 500 def diffopts(self, opts=None, patchfn=None):
501 501 diffopts = patchmod.diffopts(self.ui, opts)
502 502 if self.gitmode == 'auto':
503 503 diffopts.upgrade = True
504 504 elif self.gitmode == 'keep':
505 505 pass
506 506 elif self.gitmode in ('yes', 'no'):
507 507 diffopts.git = self.gitmode == 'yes'
508 508 else:
509 509 raise error.Abort(_('mq.git option can be auto/keep/yes/no'
510 510 ' got %s') % self.gitmode)
511 511 if patchfn:
512 512 diffopts = self.patchopts(diffopts, patchfn)
513 513 return diffopts
514 514
515 515 def patchopts(self, diffopts, *patches):
516 516 """Return a copy of input diff options with git set to true if
517 517 referenced patch is a git patch and should be preserved as such.
518 518 """
519 519 diffopts = diffopts.copy()
520 520 if not diffopts.git and self.gitmode == 'keep':
521 521 for patchfn in patches:
522 522 patchf = self.opener(patchfn, 'r')
523 523 # if the patch was a git patch, refresh it as a git patch
524 524 for line in patchf:
525 525 if line.startswith('diff --git'):
526 526 diffopts.git = True
527 527 break
528 528 patchf.close()
529 529 return diffopts
530 530
531 531 def join(self, *p):
532 532 return os.path.join(self.path, *p)
533 533
534 534 def findseries(self, patch):
535 535 def matchpatch(l):
536 536 l = l.split('#', 1)[0]
537 537 return l.strip() == patch
538 538 for index, l in enumerate(self.fullseries):
539 539 if matchpatch(l):
540 540 return index
541 541 return None
542 542
543 543 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
544 544
545 545 def parseseries(self):
546 546 self.series = []
547 547 self.seriesguards = []
548 548 for l in self.fullseries:
549 549 h = l.find('#')
550 550 if h == -1:
551 551 patch = l
552 552 comment = ''
553 553 elif h == 0:
554 554 continue
555 555 else:
556 556 patch = l[:h]
557 557 comment = l[h:]
558 558 patch = patch.strip()
559 559 if patch:
560 560 if patch in self.series:
561 561 raise error.Abort(_('%s appears more than once in %s') %
562 562 (patch, self.join(self.seriespath)))
563 563 self.series.append(patch)
564 564 self.seriesguards.append(self.guard_re.findall(comment))
565 565
566 566 def checkguard(self, guard):
567 567 if not guard:
568 568 return _('guard cannot be an empty string')
569 569 bad_chars = '# \t\r\n\f'
570 570 first = guard[0]
571 571 if first in '-+':
572 572 return (_('guard %r starts with invalid character: %r') %
573 573 (guard, first))
574 574 for c in bad_chars:
575 575 if c in guard:
576 576 return _('invalid character in guard %r: %r') % (guard, c)
577 577
578 578 def setactive(self, guards):
579 579 for guard in guards:
580 580 bad = self.checkguard(guard)
581 581 if bad:
582 582 raise error.Abort(bad)
583 583 guards = sorted(set(guards))
584 584 self.ui.debug('active guards: %s\n' % ' '.join(guards))
585 585 self.activeguards = guards
586 586 self.guardsdirty = True
587 587
588 588 def active(self):
589 589 if self.activeguards is None:
590 590 self.activeguards = []
591 591 try:
592 592 guards = self.opener.read(self.guardspath).split()
593 593 except IOError as err:
594 594 if err.errno != errno.ENOENT:
595 595 raise
596 596 guards = []
597 597 for i, guard in enumerate(guards):
598 598 bad = self.checkguard(guard)
599 599 if bad:
600 600 self.ui.warn('%s:%d: %s\n' %
601 601 (self.join(self.guardspath), i + 1, bad))
602 602 else:
603 603 self.activeguards.append(guard)
604 604 return self.activeguards
605 605
606 606 def setguards(self, idx, guards):
607 607 for g in guards:
608 608 if len(g) < 2:
609 609 raise error.Abort(_('guard %r too short') % g)
610 610 if g[0] not in '-+':
611 611 raise error.Abort(_('guard %r starts with invalid char') % g)
612 612 bad = self.checkguard(g[1:])
613 613 if bad:
614 614 raise error.Abort(bad)
615 615 drop = self.guard_re.sub('', self.fullseries[idx])
616 616 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
617 617 self.parseseries()
618 618 self.seriesdirty = True
619 619
620 620 def pushable(self, idx):
621 621 if isinstance(idx, str):
622 622 idx = self.series.index(idx)
623 623 patchguards = self.seriesguards[idx]
624 624 if not patchguards:
625 625 return True, None
626 626 guards = self.active()
627 627 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
628 628 if exactneg:
629 629 return False, repr(exactneg[0])
630 630 pos = [g for g in patchguards if g[0] == '+']
631 631 exactpos = [g for g in pos if g[1:] in guards]
632 632 if pos:
633 633 if exactpos:
634 634 return True, repr(exactpos[0])
635 635 return False, ' '.join(map(repr, pos))
636 636 return True, ''
637 637
638 638 def explainpushable(self, idx, all_patches=False):
639 639 if all_patches:
640 640 write = self.ui.write
641 641 else:
642 642 write = self.ui.warn
643 643
644 644 if all_patches or self.ui.verbose:
645 645 if isinstance(idx, str):
646 646 idx = self.series.index(idx)
647 647 pushable, why = self.pushable(idx)
648 648 if all_patches and pushable:
649 649 if why is None:
650 650 write(_('allowing %s - no guards in effect\n') %
651 651 self.series[idx])
652 652 else:
653 653 if not why:
654 654 write(_('allowing %s - no matching negative guards\n') %
655 655 self.series[idx])
656 656 else:
657 657 write(_('allowing %s - guarded by %s\n') %
658 658 (self.series[idx], why))
659 659 if not pushable:
660 660 if why:
661 661 write(_('skipping %s - guarded by %s\n') %
662 662 (self.series[idx], why))
663 663 else:
664 664 write(_('skipping %s - no matching guards\n') %
665 665 self.series[idx])
666 666
667 667 def savedirty(self):
668 668 def writelist(items, path):
669 669 fp = self.opener(path, 'w')
670 670 for i in items:
671 671 fp.write("%s\n" % i)
672 672 fp.close()
673 673 if self.applieddirty:
674 674 writelist(map(str, self.applied), self.statuspath)
675 675 self.applieddirty = False
676 676 if self.seriesdirty:
677 677 writelist(self.fullseries, self.seriespath)
678 678 self.seriesdirty = False
679 679 if self.guardsdirty:
680 680 writelist(self.activeguards, self.guardspath)
681 681 self.guardsdirty = False
682 682 if self.added:
683 683 qrepo = self.qrepo()
684 684 if qrepo:
685 685 qrepo[None].add(f for f in self.added if f not in qrepo[None])
686 686 self.added = []
687 687
688 688 def removeundo(self, repo):
689 689 undo = repo.sjoin('undo')
690 690 if not os.path.exists(undo):
691 691 return
692 692 try:
693 693 os.unlink(undo)
694 694 except OSError as inst:
695 695 self.ui.warn(_('error removing undo: %s\n') % str(inst))
696 696
697 697 def backup(self, repo, files, copy=False):
698 698 # backup local changes in --force case
699 699 for f in sorted(files):
700 700 absf = repo.wjoin(f)
701 701 if os.path.lexists(absf):
702 702 self.ui.note(_('saving current version of %s as %s\n') %
703 703 (f, cmdutil.origpath(self.ui, repo, f)))
704 704
705 705 absorig = cmdutil.origpath(self.ui, repo, absf)
706 706 if copy:
707 707 util.copyfile(absf, absorig)
708 708 else:
709 709 util.rename(absf, absorig)
710 710
711 711 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
712 712 fp=None, changes=None, opts={}):
713 713 stat = opts.get('stat')
714 714 m = scmutil.match(repo[node1], files, opts)
715 715 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
716 716 changes, stat, fp)
717 717
718 718 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
719 719 # first try just applying the patch
720 720 (err, n) = self.apply(repo, [patch], update_status=False,
721 721 strict=True, merge=rev)
722 722
723 723 if err == 0:
724 724 return (err, n)
725 725
726 726 if n is None:
727 727 raise error.Abort(_("apply failed for patch %s") % patch)
728 728
729 729 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
730 730
731 731 # apply failed, strip away that rev and merge.
732 732 hg.clean(repo, head)
733 733 strip(self.ui, repo, [n], update=False, backup=False)
734 734
735 735 ctx = repo[rev]
736 736 ret = hg.merge(repo, rev)
737 737 if ret:
738 738 raise error.Abort(_("update returned %d") % ret)
739 739 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
740 740 if n is None:
741 741 raise error.Abort(_("repo commit failed"))
742 742 try:
743 743 ph = patchheader(mergeq.join(patch), self.plainmode)
744 744 except Exception:
745 745 raise error.Abort(_("unable to read %s") % patch)
746 746
747 747 diffopts = self.patchopts(diffopts, patch)
748 748 patchf = self.opener(patch, "w")
749 749 comments = str(ph)
750 750 if comments:
751 751 patchf.write(comments)
752 752 self.printdiff(repo, diffopts, head, n, fp=patchf)
753 753 patchf.close()
754 754 self.removeundo(repo)
755 755 return (0, n)
756 756
757 757 def qparents(self, repo, rev=None):
758 758 """return the mq handled parent or p1
759 759
760 760 In some case where mq get himself in being the parent of a merge the
761 761 appropriate parent may be p2.
762 762 (eg: an in progress merge started with mq disabled)
763 763
764 764 If no parent are managed by mq, p1 is returned.
765 765 """
766 766 if rev is None:
767 767 (p1, p2) = repo.dirstate.parents()
768 768 if p2 == nullid:
769 769 return p1
770 770 if not self.applied:
771 771 return None
772 772 return self.applied[-1].node
773 773 p1, p2 = repo.changelog.parents(rev)
774 774 if p2 != nullid and p2 in [x.node for x in self.applied]:
775 775 return p2
776 776 return p1
777 777
778 778 def mergepatch(self, repo, mergeq, series, diffopts):
779 779 if not self.applied:
780 780 # each of the patches merged in will have two parents. This
781 781 # can confuse the qrefresh, qdiff, and strip code because it
782 782 # needs to know which parent is actually in the patch queue.
783 783 # so, we insert a merge marker with only one parent. This way
784 784 # the first patch in the queue is never a merge patch
785 785 #
786 786 pname = ".hg.patches.merge.marker"
787 787 n = newcommit(repo, None, '[mq]: merge marker', force=True)
788 788 self.removeundo(repo)
789 789 self.applied.append(statusentry(n, pname))
790 790 self.applieddirty = True
791 791
792 792 head = self.qparents(repo)
793 793
794 794 for patch in series:
795 795 patch = mergeq.lookup(patch, strict=True)
796 796 if not patch:
797 797 self.ui.warn(_("patch %s does not exist\n") % patch)
798 798 return (1, None)
799 799 pushable, reason = self.pushable(patch)
800 800 if not pushable:
801 801 self.explainpushable(patch, all_patches=True)
802 802 continue
803 803 info = mergeq.isapplied(patch)
804 804 if not info:
805 805 self.ui.warn(_("patch %s is not applied\n") % patch)
806 806 return (1, None)
807 807 rev = info[1]
808 808 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
809 809 if head:
810 810 self.applied.append(statusentry(head, patch))
811 811 self.applieddirty = True
812 812 if err:
813 813 return (err, head)
814 814 self.savedirty()
815 815 return (0, head)
816 816
817 817 def patch(self, repo, patchfile):
818 818 '''Apply patchfile to the working directory.
819 819 patchfile: name of patch file'''
820 820 files = set()
821 821 try:
822 822 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
823 823 files=files, eolmode=None)
824 824 return (True, list(files), fuzz)
825 825 except Exception as inst:
826 826 self.ui.note(str(inst) + '\n')
827 827 if not self.ui.verbose:
828 828 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
829 829 self.ui.traceback()
830 830 return (False, list(files), False)
831 831
832 832 def apply(self, repo, series, list=False, update_status=True,
833 833 strict=False, patchdir=None, merge=None, all_files=None,
834 834 tobackup=None, keepchanges=False):
835 835 wlock = lock = tr = None
836 836 try:
837 837 wlock = repo.wlock()
838 838 lock = repo.lock()
839 839 tr = repo.transaction("qpush")
840 840 try:
841 841 ret = self._apply(repo, series, list, update_status,
842 842 strict, patchdir, merge, all_files=all_files,
843 843 tobackup=tobackup, keepchanges=keepchanges)
844 844 tr.close()
845 845 self.savedirty()
846 846 return ret
847 847 except AbortNoCleanup:
848 848 tr.close()
849 849 self.savedirty()
850 850 raise
851 851 except: # re-raises
852 852 try:
853 853 tr.abort()
854 854 finally:
855 855 self.invalidate()
856 856 raise
857 857 finally:
858 858 release(tr, lock, wlock)
859 859 self.removeundo(repo)
860 860
861 861 def _apply(self, repo, series, list=False, update_status=True,
862 862 strict=False, patchdir=None, merge=None, all_files=None,
863 863 tobackup=None, keepchanges=False):
864 864 """returns (error, hash)
865 865
866 866 error = 1 for unable to read, 2 for patch failed, 3 for patch
867 867 fuzz. tobackup is None or a set of files to backup before they
868 868 are modified by a patch.
869 869 """
870 870 # TODO unify with commands.py
871 871 if not patchdir:
872 872 patchdir = self.path
873 873 err = 0
874 874 n = None
875 875 for patchname in series:
876 876 pushable, reason = self.pushable(patchname)
877 877 if not pushable:
878 878 self.explainpushable(patchname, all_patches=True)
879 879 continue
880 880 self.ui.status(_("applying %s\n") % patchname)
881 881 pf = os.path.join(patchdir, patchname)
882 882
883 883 try:
884 884 ph = patchheader(self.join(patchname), self.plainmode)
885 885 except IOError:
886 886 self.ui.warn(_("unable to read %s\n") % patchname)
887 887 err = 1
888 888 break
889 889
890 890 message = ph.message
891 891 if not message:
892 892 # The commit message should not be translated
893 893 message = "imported patch %s\n" % patchname
894 894 else:
895 895 if list:
896 896 # The commit message should not be translated
897 897 message.append("\nimported patch %s" % patchname)
898 898 message = '\n'.join(message)
899 899
900 900 if ph.haspatch:
901 901 if tobackup:
902 902 touched = patchmod.changedfiles(self.ui, repo, pf)
903 903 touched = set(touched) & tobackup
904 904 if touched and keepchanges:
905 905 raise AbortNoCleanup(
906 906 _("conflicting local changes found"),
907 907 hint=_("did you forget to qrefresh?"))
908 908 self.backup(repo, touched, copy=True)
909 909 tobackup = tobackup - touched
910 910 (patcherr, files, fuzz) = self.patch(repo, pf)
911 911 if all_files is not None:
912 912 all_files.update(files)
913 913 patcherr = not patcherr
914 914 else:
915 915 self.ui.warn(_("patch %s is empty\n") % patchname)
916 916 patcherr, files, fuzz = 0, [], 0
917 917
918 918 if merge and files:
919 919 # Mark as removed/merged and update dirstate parent info
920 920 removed = []
921 921 merged = []
922 922 for f in files:
923 923 if os.path.lexists(repo.wjoin(f)):
924 924 merged.append(f)
925 925 else:
926 926 removed.append(f)
927 927 repo.dirstate.beginparentchange()
928 928 for f in removed:
929 929 repo.dirstate.remove(f)
930 930 for f in merged:
931 931 repo.dirstate.merge(f)
932 932 p1, p2 = repo.dirstate.parents()
933 933 repo.setparents(p1, merge)
934 934 repo.dirstate.endparentchange()
935 935
936 936 if all_files and '.hgsubstate' in all_files:
937 937 wctx = repo[None]
938 938 pctx = repo['.']
939 939 overwrite = False
940 940 mergedsubstate = subrepo.submerge(repo, pctx, wctx, wctx,
941 941 overwrite)
942 942 files += mergedsubstate.keys()
943 943
944 944 match = scmutil.matchfiles(repo, files or [])
945 945 oldtip = repo['tip']
946 946 n = newcommit(repo, None, message, ph.user, ph.date, match=match,
947 947 force=True)
948 948 if repo['tip'] == oldtip:
949 949 raise error.Abort(_("qpush exactly duplicates child changeset"))
950 950 if n is None:
951 951 raise error.Abort(_("repository commit failed"))
952 952
953 953 if update_status:
954 954 self.applied.append(statusentry(n, patchname))
955 955
956 956 if patcherr:
957 957 self.ui.warn(_("patch failed, rejects left in working "
958 958 "directory\n"))
959 959 err = 2
960 960 break
961 961
962 962 if fuzz and strict:
963 963 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
964 964 err = 3
965 965 break
966 966 return (err, n)
967 967
968 968 def _cleanup(self, patches, numrevs, keep=False):
969 969 if not keep:
970 970 r = self.qrepo()
971 971 if r:
972 972 r[None].forget(patches)
973 973 for p in patches:
974 974 try:
975 975 os.unlink(self.join(p))
976 976 except OSError as inst:
977 977 if inst.errno != errno.ENOENT:
978 978 raise
979 979
980 980 qfinished = []
981 981 if numrevs:
982 982 qfinished = self.applied[:numrevs]
983 983 del self.applied[:numrevs]
984 984 self.applieddirty = True
985 985
986 986 unknown = []
987 987
988 988 for (i, p) in sorted([(self.findseries(p), p) for p in patches],
989 989 reverse=True):
990 990 if i is not None:
991 991 del self.fullseries[i]
992 992 else:
993 993 unknown.append(p)
994 994
995 995 if unknown:
996 996 if numrevs:
997 997 rev = dict((entry.name, entry.node) for entry in qfinished)
998 998 for p in unknown:
999 999 msg = _('revision %s refers to unknown patches: %s\n')
1000 1000 self.ui.warn(msg % (short(rev[p]), p))
1001 1001 else:
1002 1002 msg = _('unknown patches: %s\n')
1003 1003 raise error.Abort(''.join(msg % p for p in unknown))
1004 1004
1005 1005 self.parseseries()
1006 1006 self.seriesdirty = True
1007 1007 return [entry.node for entry in qfinished]
1008 1008
1009 1009 def _revpatches(self, repo, revs):
1010 1010 firstrev = repo[self.applied[0].node].rev()
1011 1011 patches = []
1012 1012 for i, rev in enumerate(revs):
1013 1013
1014 1014 if rev < firstrev:
1015 1015 raise error.Abort(_('revision %d is not managed') % rev)
1016 1016
1017 1017 ctx = repo[rev]
1018 1018 base = self.applied[i].node
1019 1019 if ctx.node() != base:
1020 1020 msg = _('cannot delete revision %d above applied patches')
1021 1021 raise error.Abort(msg % rev)
1022 1022
1023 1023 patch = self.applied[i].name
1024 1024 for fmt in ('[mq]: %s', 'imported patch %s'):
1025 1025 if ctx.description() == fmt % patch:
1026 1026 msg = _('patch %s finalized without changeset message\n')
1027 1027 repo.ui.status(msg % patch)
1028 1028 break
1029 1029
1030 1030 patches.append(patch)
1031 1031 return patches
1032 1032
1033 1033 def finish(self, repo, revs):
1034 1034 # Manually trigger phase computation to ensure phasedefaults is
1035 1035 # executed before we remove the patches.
1036 1036 repo._phasecache
1037 1037 patches = self._revpatches(repo, sorted(revs))
1038 1038 qfinished = self._cleanup(patches, len(patches))
1039 1039 if qfinished and repo.ui.configbool('mq', 'secret', False):
1040 1040 # only use this logic when the secret option is added
1041 1041 oldqbase = repo[qfinished[0]]
1042 1042 tphase = repo.ui.config('phases', 'new-commit', phases.draft)
1043 1043 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1044 1044 tr = repo.transaction('qfinish')
1045 1045 try:
1046 1046 phases.advanceboundary(repo, tr, tphase, qfinished)
1047 1047 tr.close()
1048 1048 finally:
1049 1049 tr.release()
1050 1050
1051 1051 def delete(self, repo, patches, opts):
1052 1052 if not patches and not opts.get('rev'):
1053 1053 raise error.Abort(_('qdelete requires at least one revision or '
1054 1054 'patch name'))
1055 1055
1056 1056 realpatches = []
1057 1057 for patch in patches:
1058 1058 patch = self.lookup(patch, strict=True)
1059 1059 info = self.isapplied(patch)
1060 1060 if info:
1061 1061 raise error.Abort(_("cannot delete applied patch %s") % patch)
1062 1062 if patch not in self.series:
1063 1063 raise error.Abort(_("patch %s not in series file") % patch)
1064 1064 if patch not in realpatches:
1065 1065 realpatches.append(patch)
1066 1066
1067 1067 numrevs = 0
1068 1068 if opts.get('rev'):
1069 1069 if not self.applied:
1070 1070 raise error.Abort(_('no patches applied'))
1071 1071 revs = scmutil.revrange(repo, opts.get('rev'))
1072 1072 revs.sort()
1073 1073 revpatches = self._revpatches(repo, revs)
1074 1074 realpatches += revpatches
1075 1075 numrevs = len(revpatches)
1076 1076
1077 1077 self._cleanup(realpatches, numrevs, opts.get('keep'))
1078 1078
1079 1079 def checktoppatch(self, repo):
1080 1080 '''check that working directory is at qtip'''
1081 1081 if self.applied:
1082 1082 top = self.applied[-1].node
1083 1083 patch = self.applied[-1].name
1084 1084 if repo.dirstate.p1() != top:
1085 1085 raise error.Abort(_("working directory revision is not qtip"))
1086 1086 return top, patch
1087 1087 return None, None
1088 1088
1089 1089 def putsubstate2changes(self, substatestate, changes):
1090 1090 for files in changes[:3]:
1091 1091 if '.hgsubstate' in files:
1092 1092 return # already listed up
1093 1093 # not yet listed up
1094 1094 if substatestate in 'a?':
1095 1095 changes[1].append('.hgsubstate')
1096 1096 elif substatestate in 'r':
1097 1097 changes[2].append('.hgsubstate')
1098 1098 else: # modified
1099 1099 changes[0].append('.hgsubstate')
1100 1100
1101 1101 def checklocalchanges(self, repo, force=False, refresh=True):
1102 1102 excsuffix = ''
1103 1103 if refresh:
1104 1104 excsuffix = ', qrefresh first'
1105 1105 # plain versions for i18n tool to detect them
1106 1106 _("local changes found, qrefresh first")
1107 1107 _("local changed subrepos found, qrefresh first")
1108 1108 return checklocalchanges(repo, force, excsuffix)
1109 1109
1110 1110 _reserved = ('series', 'status', 'guards', '.', '..')
1111 1111 def checkreservedname(self, name):
1112 1112 if name in self._reserved:
1113 1113 raise error.Abort(_('"%s" cannot be used as the name of a patch')
1114 1114 % name)
1115 1115 for prefix in ('.hg', '.mq'):
1116 1116 if name.startswith(prefix):
1117 1117 raise error.Abort(_('patch name cannot begin with "%s"')
1118 1118 % prefix)
1119 1119 for c in ('#', ':', '\r', '\n'):
1120 1120 if c in name:
1121 1121 raise error.Abort(_('%r cannot be used in the name of a patch')
1122 1122 % c)
1123 1123
1124 1124 def checkpatchname(self, name, force=False):
1125 1125 self.checkreservedname(name)
1126 1126 if not force and os.path.exists(self.join(name)):
1127 1127 if os.path.isdir(self.join(name)):
1128 1128 raise error.Abort(_('"%s" already exists as a directory')
1129 1129 % name)
1130 1130 else:
1131 1131 raise error.Abort(_('patch "%s" already exists') % name)
1132 1132
1133 1133 def checkkeepchanges(self, keepchanges, force):
1134 1134 if force and keepchanges:
1135 1135 raise error.Abort(_('cannot use both --force and --keep-changes'))
1136 1136
1137 1137 def new(self, repo, patchfn, *pats, **opts):
1138 1138 """options:
1139 1139 msg: a string or a no-argument function returning a string
1140 1140 """
1141 1141 msg = opts.get('msg')
1142 1142 edit = opts.get('edit')
1143 1143 editform = opts.get('editform', 'mq.qnew')
1144 1144 user = opts.get('user')
1145 1145 date = opts.get('date')
1146 1146 if date:
1147 1147 date = util.parsedate(date)
1148 1148 diffopts = self.diffopts({'git': opts.get('git')})
1149 1149 if opts.get('checkname', True):
1150 1150 self.checkpatchname(patchfn)
1151 1151 inclsubs = checksubstate(repo)
1152 1152 if inclsubs:
1153 1153 substatestate = repo.dirstate['.hgsubstate']
1154 1154 if opts.get('include') or opts.get('exclude') or pats:
1155 1155 # detect missing files in pats
1156 1156 def badfn(f, msg):
1157 1157 if f != '.hgsubstate': # .hgsubstate is auto-created
1158 1158 raise error.Abort('%s: %s' % (f, msg))
1159 1159 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1160 1160 changes = repo.status(match=match)
1161 1161 else:
1162 1162 changes = self.checklocalchanges(repo, force=True)
1163 1163 commitfiles = list(inclsubs)
1164 1164 for files in changes[:3]:
1165 1165 commitfiles.extend(files)
1166 1166 match = scmutil.matchfiles(repo, commitfiles)
1167 1167 if len(repo[None].parents()) > 1:
1168 1168 raise error.Abort(_('cannot manage merge changesets'))
1169 1169 self.checktoppatch(repo)
1170 1170 insert = self.fullseriesend()
1171 1171 wlock = repo.wlock()
1172 1172 try:
1173 1173 try:
1174 1174 # if patch file write fails, abort early
1175 1175 p = self.opener(patchfn, "w")
1176 1176 except IOError as e:
1177 1177 raise error.Abort(_('cannot write patch "%s": %s')
1178 1178 % (patchfn, e.strerror))
1179 1179 try:
1180 1180 defaultmsg = "[mq]: %s" % patchfn
1181 1181 editor = cmdutil.getcommiteditor(editform=editform)
1182 1182 if edit:
1183 1183 def finishdesc(desc):
1184 1184 if desc.rstrip():
1185 1185 return desc
1186 1186 else:
1187 1187 return defaultmsg
1188 1188 # i18n: this message is shown in editor with "HG: " prefix
1189 1189 extramsg = _('Leave message empty to use default message.')
1190 1190 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1191 1191 extramsg=extramsg,
1192 1192 editform=editform)
1193 1193 commitmsg = msg
1194 1194 else:
1195 1195 commitmsg = msg or defaultmsg
1196 1196
1197 1197 n = newcommit(repo, None, commitmsg, user, date, match=match,
1198 1198 force=True, editor=editor)
1199 1199 if n is None:
1200 1200 raise error.Abort(_("repo commit failed"))
1201 1201 try:
1202 1202 self.fullseries[insert:insert] = [patchfn]
1203 1203 self.applied.append(statusentry(n, patchfn))
1204 1204 self.parseseries()
1205 1205 self.seriesdirty = True
1206 1206 self.applieddirty = True
1207 1207 nctx = repo[n]
1208 1208 ph = patchheader(self.join(patchfn), self.plainmode)
1209 1209 if user:
1210 1210 ph.setuser(user)
1211 1211 if date:
1212 1212 ph.setdate('%s %s' % date)
1213 1213 ph.setparent(hex(nctx.p1().node()))
1214 1214 msg = nctx.description().strip()
1215 1215 if msg == defaultmsg.strip():
1216 1216 msg = ''
1217 1217 ph.setmessage(msg)
1218 1218 p.write(str(ph))
1219 1219 if commitfiles:
1220 1220 parent = self.qparents(repo, n)
1221 1221 if inclsubs:
1222 1222 self.putsubstate2changes(substatestate, changes)
1223 1223 chunks = patchmod.diff(repo, node1=parent, node2=n,
1224 1224 changes=changes, opts=diffopts)
1225 1225 for chunk in chunks:
1226 1226 p.write(chunk)
1227 1227 p.close()
1228 1228 r = self.qrepo()
1229 1229 if r:
1230 1230 r[None].add([patchfn])
1231 1231 except: # re-raises
1232 1232 repo.rollback()
1233 1233 raise
1234 1234 except Exception:
1235 1235 patchpath = self.join(patchfn)
1236 1236 try:
1237 1237 os.unlink(patchpath)
1238 1238 except OSError:
1239 1239 self.ui.warn(_('error unlinking %s\n') % patchpath)
1240 1240 raise
1241 1241 self.removeundo(repo)
1242 1242 finally:
1243 1243 release(wlock)
1244 1244
1245 1245 def isapplied(self, patch):
1246 1246 """returns (index, rev, patch)"""
1247 1247 for i, a in enumerate(self.applied):
1248 1248 if a.name == patch:
1249 1249 return (i, a.node, a.name)
1250 1250 return None
1251 1251
1252 1252 # if the exact patch name does not exist, we try a few
1253 1253 # variations. If strict is passed, we try only #1
1254 1254 #
1255 1255 # 1) a number (as string) to indicate an offset in the series file
1256 1256 # 2) a unique substring of the patch name was given
1257 1257 # 3) patchname[-+]num to indicate an offset in the series file
1258 1258 def lookup(self, patch, strict=False):
1259 1259 def partialname(s):
1260 1260 if s in self.series:
1261 1261 return s
1262 1262 matches = [x for x in self.series if s in x]
1263 1263 if len(matches) > 1:
1264 1264 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1265 1265 for m in matches:
1266 1266 self.ui.warn(' %s\n' % m)
1267 1267 return None
1268 1268 if matches:
1269 1269 return matches[0]
1270 1270 if self.series and self.applied:
1271 1271 if s == 'qtip':
1272 1272 return self.series[self.seriesend(True) - 1]
1273 1273 if s == 'qbase':
1274 1274 return self.series[0]
1275 1275 return None
1276 1276
1277 1277 if patch in self.series:
1278 1278 return patch
1279 1279
1280 1280 if not os.path.isfile(self.join(patch)):
1281 1281 try:
1282 1282 sno = int(patch)
1283 1283 except (ValueError, OverflowError):
1284 1284 pass
1285 1285 else:
1286 1286 if -len(self.series) <= sno < len(self.series):
1287 1287 return self.series[sno]
1288 1288
1289 1289 if not strict:
1290 1290 res = partialname(patch)
1291 1291 if res:
1292 1292 return res
1293 1293 minus = patch.rfind('-')
1294 1294 if minus >= 0:
1295 1295 res = partialname(patch[:minus])
1296 1296 if res:
1297 1297 i = self.series.index(res)
1298 1298 try:
1299 1299 off = int(patch[minus + 1:] or 1)
1300 1300 except (ValueError, OverflowError):
1301 1301 pass
1302 1302 else:
1303 1303 if i - off >= 0:
1304 1304 return self.series[i - off]
1305 1305 plus = patch.rfind('+')
1306 1306 if plus >= 0:
1307 1307 res = partialname(patch[:plus])
1308 1308 if res:
1309 1309 i = self.series.index(res)
1310 1310 try:
1311 1311 off = int(patch[plus + 1:] or 1)
1312 1312 except (ValueError, OverflowError):
1313 1313 pass
1314 1314 else:
1315 1315 if i + off < len(self.series):
1316 1316 return self.series[i + off]
1317 1317 raise error.Abort(_("patch %s not in series") % patch)
1318 1318
1319 1319 def push(self, repo, patch=None, force=False, list=False, mergeq=None,
1320 1320 all=False, move=False, exact=False, nobackup=False,
1321 1321 keepchanges=False):
1322 1322 self.checkkeepchanges(keepchanges, force)
1323 1323 diffopts = self.diffopts()
1324 1324 wlock = repo.wlock()
1325 1325 try:
1326 1326 heads = []
1327 1327 for hs in repo.branchmap().itervalues():
1328 1328 heads.extend(hs)
1329 1329 if not heads:
1330 1330 heads = [nullid]
1331 1331 if repo.dirstate.p1() not in heads and not exact:
1332 1332 self.ui.status(_("(working directory not at a head)\n"))
1333 1333
1334 1334 if not self.series:
1335 1335 self.ui.warn(_('no patches in series\n'))
1336 1336 return 0
1337 1337
1338 1338 # Suppose our series file is: A B C and the current 'top'
1339 1339 # patch is B. qpush C should be performed (moving forward)
1340 1340 # qpush B is a NOP (no change) qpush A is an error (can't
1341 1341 # go backwards with qpush)
1342 1342 if patch:
1343 1343 patch = self.lookup(patch)
1344 1344 info = self.isapplied(patch)
1345 1345 if info and info[0] >= len(self.applied) - 1:
1346 1346 self.ui.warn(
1347 1347 _('qpush: %s is already at the top\n') % patch)
1348 1348 return 0
1349 1349
1350 1350 pushable, reason = self.pushable(patch)
1351 1351 if pushable:
1352 1352 if self.series.index(patch) < self.seriesend():
1353 1353 raise error.Abort(
1354 1354 _("cannot push to a previous patch: %s") % patch)
1355 1355 else:
1356 1356 if reason:
1357 1357 reason = _('guarded by %s') % reason
1358 1358 else:
1359 1359 reason = _('no matching guards')
1360 1360 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1361 1361 return 1
1362 1362 elif all:
1363 1363 patch = self.series[-1]
1364 1364 if self.isapplied(patch):
1365 1365 self.ui.warn(_('all patches are currently applied\n'))
1366 1366 return 0
1367 1367
1368 1368 # Following the above example, starting at 'top' of B:
1369 1369 # qpush should be performed (pushes C), but a subsequent
1370 1370 # qpush without an argument is an error (nothing to
1371 1371 # apply). This allows a loop of "...while hg qpush..." to
1372 1372 # work as it detects an error when done
1373 1373 start = self.seriesend()
1374 1374 if start == len(self.series):
1375 1375 self.ui.warn(_('patch series already fully applied\n'))
1376 1376 return 1
1377 1377 if not force and not keepchanges:
1378 1378 self.checklocalchanges(repo, refresh=self.applied)
1379 1379
1380 1380 if exact:
1381 1381 if keepchanges:
1382 1382 raise error.Abort(
1383 1383 _("cannot use --exact and --keep-changes together"))
1384 1384 if move:
1385 1385 raise error.Abort(_('cannot use --exact and --move '
1386 1386 'together'))
1387 1387 if self.applied:
1388 1388 raise error.Abort(_('cannot push --exact with applied '
1389 1389 'patches'))
1390 1390 root = self.series[start]
1391 1391 target = patchheader(self.join(root), self.plainmode).parent
1392 1392 if not target:
1393 1393 raise error.Abort(
1394 1394 _("%s does not have a parent recorded") % root)
1395 1395 if not repo[target] == repo['.']:
1396 1396 hg.update(repo, target)
1397 1397
1398 1398 if move:
1399 1399 if not patch:
1400 1400 raise error.Abort(_("please specify the patch to move"))
1401 1401 for fullstart, rpn in enumerate(self.fullseries):
1402 1402 # strip markers for patch guards
1403 1403 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1404 1404 break
1405 1405 for i, rpn in enumerate(self.fullseries[fullstart:]):
1406 1406 # strip markers for patch guards
1407 1407 if self.guard_re.split(rpn, 1)[0] == patch:
1408 1408 break
1409 1409 index = fullstart + i
1410 1410 assert index < len(self.fullseries)
1411 1411 fullpatch = self.fullseries[index]
1412 1412 del self.fullseries[index]
1413 1413 self.fullseries.insert(fullstart, fullpatch)
1414 1414 self.parseseries()
1415 1415 self.seriesdirty = True
1416 1416
1417 1417 self.applieddirty = True
1418 1418 if start > 0:
1419 1419 self.checktoppatch(repo)
1420 1420 if not patch:
1421 1421 patch = self.series[start]
1422 1422 end = start + 1
1423 1423 else:
1424 1424 end = self.series.index(patch, start) + 1
1425 1425
1426 1426 tobackup = set()
1427 1427 if (not nobackup and force) or keepchanges:
1428 1428 status = self.checklocalchanges(repo, force=True)
1429 1429 if keepchanges:
1430 1430 tobackup.update(status.modified + status.added +
1431 1431 status.removed + status.deleted)
1432 1432 else:
1433 1433 tobackup.update(status.modified + status.added)
1434 1434
1435 1435 s = self.series[start:end]
1436 1436 all_files = set()
1437 1437 try:
1438 1438 if mergeq:
1439 1439 ret = self.mergepatch(repo, mergeq, s, diffopts)
1440 1440 else:
1441 1441 ret = self.apply(repo, s, list, all_files=all_files,
1442 1442 tobackup=tobackup, keepchanges=keepchanges)
1443 1443 except AbortNoCleanup:
1444 1444 raise
1445 1445 except: # re-raises
1446 1446 self.ui.warn(_('cleaning up working directory...\n'))
1447 1447 cmdutil.revert(self.ui, repo, repo['.'],
1448 1448 repo.dirstate.parents(), no_backup=True)
1449 1449 # only remove unknown files that we know we touched or
1450 1450 # created while patching
1451 1451 for f in all_files:
1452 1452 if f not in repo.dirstate:
1453 1453 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1454 1454 self.ui.warn(_('done\n'))
1455 1455 raise
1456 1456
1457 1457 if not self.applied:
1458 1458 return ret[0]
1459 1459 top = self.applied[-1].name
1460 1460 if ret[0] and ret[0] > 1:
1461 1461 msg = _("errors during apply, please fix and qrefresh %s\n")
1462 1462 self.ui.write(msg % top)
1463 1463 else:
1464 1464 self.ui.write(_("now at: %s\n") % top)
1465 1465 return ret[0]
1466 1466
1467 1467 finally:
1468 1468 wlock.release()
1469 1469
1470 1470 def pop(self, repo, patch=None, force=False, update=True, all=False,
1471 1471 nobackup=False, keepchanges=False):
1472 1472 self.checkkeepchanges(keepchanges, force)
1473 1473 wlock = repo.wlock()
1474 1474 try:
1475 1475 if patch:
1476 1476 # index, rev, patch
1477 1477 info = self.isapplied(patch)
1478 1478 if not info:
1479 1479 patch = self.lookup(patch)
1480 1480 info = self.isapplied(patch)
1481 1481 if not info:
1482 1482 raise error.Abort(_("patch %s is not applied") % patch)
1483 1483
1484 1484 if not self.applied:
1485 1485 # Allow qpop -a to work repeatedly,
1486 1486 # but not qpop without an argument
1487 1487 self.ui.warn(_("no patches applied\n"))
1488 1488 return not all
1489 1489
1490 1490 if all:
1491 1491 start = 0
1492 1492 elif patch:
1493 1493 start = info[0] + 1
1494 1494 else:
1495 1495 start = len(self.applied) - 1
1496 1496
1497 1497 if start >= len(self.applied):
1498 1498 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1499 1499 return
1500 1500
1501 1501 if not update:
1502 1502 parents = repo.dirstate.parents()
1503 1503 rr = [x.node for x in self.applied]
1504 1504 for p in parents:
1505 1505 if p in rr:
1506 1506 self.ui.warn(_("qpop: forcing dirstate update\n"))
1507 1507 update = True
1508 1508 else:
1509 1509 parents = [p.node() for p in repo[None].parents()]
1510 1510 needupdate = False
1511 1511 for entry in self.applied[start:]:
1512 1512 if entry.node in parents:
1513 1513 needupdate = True
1514 1514 break
1515 1515 update = needupdate
1516 1516
1517 1517 tobackup = set()
1518 1518 if update:
1519 1519 s = self.checklocalchanges(repo, force=force or keepchanges)
1520 1520 if force:
1521 1521 if not nobackup:
1522 1522 tobackup.update(s.modified + s.added)
1523 1523 elif keepchanges:
1524 1524 tobackup.update(s.modified + s.added +
1525 1525 s.removed + s.deleted)
1526 1526
1527 1527 self.applieddirty = True
1528 1528 end = len(self.applied)
1529 1529 rev = self.applied[start].node
1530 1530
1531 1531 try:
1532 1532 heads = repo.changelog.heads(rev)
1533 1533 except error.LookupError:
1534 1534 node = short(rev)
1535 1535 raise error.Abort(_('trying to pop unknown node %s') % node)
1536 1536
1537 1537 if heads != [self.applied[-1].node]:
1538 1538 raise error.Abort(_("popping would remove a revision not "
1539 1539 "managed by this patch queue"))
1540 1540 if not repo[self.applied[-1].node].mutable():
1541 1541 raise error.Abort(
1542 1542 _("popping would remove a public revision"),
1543 1543 hint=_('see "hg help phases" for details'))
1544 1544
1545 1545 # we know there are no local changes, so we can make a simplified
1546 1546 # form of hg.update.
1547 1547 if update:
1548 1548 qp = self.qparents(repo, rev)
1549 1549 ctx = repo[qp]
1550 1550 m, a, r, d = repo.status(qp, '.')[:4]
1551 1551 if d:
1552 1552 raise error.Abort(_("deletions found between repo revs"))
1553 1553
1554 1554 tobackup = set(a + m + r) & tobackup
1555 1555 if keepchanges and tobackup:
1556 1556 raise error.Abort(_("local changes found, qrefresh first"))
1557 1557 self.backup(repo, tobackup)
1558 1558 repo.dirstate.beginparentchange()
1559 1559 for f in a:
1560 1560 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1561 1561 repo.dirstate.drop(f)
1562 1562 for f in m + r:
1563 1563 fctx = ctx[f]
1564 1564 repo.wwrite(f, fctx.data(), fctx.flags())
1565 1565 repo.dirstate.normal(f)
1566 1566 repo.setparents(qp, nullid)
1567 1567 repo.dirstate.endparentchange()
1568 1568 for patch in reversed(self.applied[start:end]):
1569 1569 self.ui.status(_("popping %s\n") % patch.name)
1570 1570 del self.applied[start:end]
1571 1571 strip(self.ui, repo, [rev], update=False, backup=False)
1572 1572 for s, state in repo['.'].substate.items():
1573 1573 repo['.'].sub(s).get(state)
1574 1574 if self.applied:
1575 1575 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1576 1576 else:
1577 1577 self.ui.write(_("patch queue now empty\n"))
1578 1578 finally:
1579 1579 wlock.release()
1580 1580
1581 1581 def diff(self, repo, pats, opts):
1582 1582 top, patch = self.checktoppatch(repo)
1583 1583 if not top:
1584 1584 self.ui.write(_("no patches applied\n"))
1585 1585 return
1586 1586 qp = self.qparents(repo, top)
1587 1587 if opts.get('reverse'):
1588 1588 node1, node2 = None, qp
1589 1589 else:
1590 1590 node1, node2 = qp, None
1591 1591 diffopts = self.diffopts(opts, patch)
1592 1592 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1593 1593
1594 1594 def refresh(self, repo, pats=None, **opts):
1595 1595 if not self.applied:
1596 1596 self.ui.write(_("no patches applied\n"))
1597 1597 return 1
1598 1598 msg = opts.get('msg', '').rstrip()
1599 1599 edit = opts.get('edit')
1600 1600 editform = opts.get('editform', 'mq.qrefresh')
1601 1601 newuser = opts.get('user')
1602 1602 newdate = opts.get('date')
1603 1603 if newdate:
1604 1604 newdate = '%d %d' % util.parsedate(newdate)
1605 1605 wlock = repo.wlock()
1606 1606
1607 1607 try:
1608 1608 self.checktoppatch(repo)
1609 1609 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1610 1610 if repo.changelog.heads(top) != [top]:
1611 1611 raise error.Abort(_("cannot qrefresh a revision with children"))
1612 1612 if not repo[top].mutable():
1613 1613 raise error.Abort(_("cannot qrefresh public revision"),
1614 1614 hint=_('see "hg help phases" for details'))
1615 1615
1616 1616 cparents = repo.changelog.parents(top)
1617 1617 patchparent = self.qparents(repo, top)
1618 1618
1619 1619 inclsubs = checksubstate(repo, hex(patchparent))
1620 1620 if inclsubs:
1621 1621 substatestate = repo.dirstate['.hgsubstate']
1622 1622
1623 1623 ph = patchheader(self.join(patchfn), self.plainmode)
1624 1624 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1625 1625 if newuser:
1626 1626 ph.setuser(newuser)
1627 1627 if newdate:
1628 1628 ph.setdate(newdate)
1629 1629 ph.setparent(hex(patchparent))
1630 1630
1631 1631 # only commit new patch when write is complete
1632 1632 patchf = self.opener(patchfn, 'w', atomictemp=True)
1633 1633
1634 1634 # update the dirstate in place, strip off the qtip commit
1635 1635 # and then commit.
1636 1636 #
1637 1637 # this should really read:
1638 1638 # mm, dd, aa = repo.status(top, patchparent)[:3]
1639 1639 # but we do it backwards to take advantage of manifest/changelog
1640 1640 # caching against the next repo.status call
1641 1641 mm, aa, dd = repo.status(patchparent, top)[:3]
1642 1642 changes = repo.changelog.read(top)
1643 1643 man = repo.manifest.read(changes[0])
1644 1644 aaa = aa[:]
1645 1645 matchfn = scmutil.match(repo[None], pats, opts)
1646 1646 # in short mode, we only diff the files included in the
1647 1647 # patch already plus specified files
1648 1648 if opts.get('short'):
1649 1649 # if amending a patch, we start with existing
1650 1650 # files plus specified files - unfiltered
1651 1651 match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1652 1652 # filter with include/exclude options
1653 1653 matchfn = scmutil.match(repo[None], opts=opts)
1654 1654 else:
1655 1655 match = scmutil.matchall(repo)
1656 1656 m, a, r, d = repo.status(match=match)[:4]
1657 1657 mm = set(mm)
1658 1658 aa = set(aa)
1659 1659 dd = set(dd)
1660 1660
1661 1661 # we might end up with files that were added between
1662 1662 # qtip and the dirstate parent, but then changed in the
1663 1663 # local dirstate. in this case, we want them to only
1664 1664 # show up in the added section
1665 1665 for x in m:
1666 1666 if x not in aa:
1667 1667 mm.add(x)
1668 1668 # we might end up with files added by the local dirstate that
1669 1669 # were deleted by the patch. In this case, they should only
1670 1670 # show up in the changed section.
1671 1671 for x in a:
1672 1672 if x in dd:
1673 1673 dd.remove(x)
1674 1674 mm.add(x)
1675 1675 else:
1676 1676 aa.add(x)
1677 1677 # make sure any files deleted in the local dirstate
1678 1678 # are not in the add or change column of the patch
1679 1679 forget = []
1680 1680 for x in d + r:
1681 1681 if x in aa:
1682 1682 aa.remove(x)
1683 1683 forget.append(x)
1684 1684 continue
1685 1685 else:
1686 1686 mm.discard(x)
1687 1687 dd.add(x)
1688 1688
1689 1689 m = list(mm)
1690 1690 r = list(dd)
1691 1691 a = list(aa)
1692 1692
1693 1693 # create 'match' that includes the files to be recommitted.
1694 1694 # apply matchfn via repo.status to ensure correct case handling.
1695 1695 cm, ca, cr, cd = repo.status(patchparent, match=matchfn)[:4]
1696 1696 allmatches = set(cm + ca + cr + cd)
1697 1697 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1698 1698
1699 1699 files = set(inclsubs)
1700 1700 for x in refreshchanges:
1701 1701 files.update(x)
1702 1702 match = scmutil.matchfiles(repo, files)
1703 1703
1704 1704 bmlist = repo[top].bookmarks()
1705 1705
1706 1706 dsguard = None
1707 1707 try:
1708 1708 dsguard = cmdutil.dirstateguard(repo, 'mq.refresh')
1709 1709 if diffopts.git or diffopts.upgrade:
1710 1710 copies = {}
1711 1711 for dst in a:
1712 1712 src = repo.dirstate.copied(dst)
1713 1713 # during qfold, the source file for copies may
1714 1714 # be removed. Treat this as a simple add.
1715 1715 if src is not None and src in repo.dirstate:
1716 1716 copies.setdefault(src, []).append(dst)
1717 1717 repo.dirstate.add(dst)
1718 1718 # remember the copies between patchparent and qtip
1719 1719 for dst in aaa:
1720 1720 f = repo.file(dst)
1721 1721 src = f.renamed(man[dst])
1722 1722 if src:
1723 1723 copies.setdefault(src[0], []).extend(
1724 1724 copies.get(dst, []))
1725 1725 if dst in a:
1726 1726 copies[src[0]].append(dst)
1727 1727 # we can't copy a file created by the patch itself
1728 1728 if dst in copies:
1729 1729 del copies[dst]
1730 1730 for src, dsts in copies.iteritems():
1731 1731 for dst in dsts:
1732 1732 repo.dirstate.copy(src, dst)
1733 1733 else:
1734 1734 for dst in a:
1735 1735 repo.dirstate.add(dst)
1736 1736 # Drop useless copy information
1737 1737 for f in list(repo.dirstate.copies()):
1738 1738 repo.dirstate.copy(None, f)
1739 1739 for f in r:
1740 1740 repo.dirstate.remove(f)
1741 1741 # if the patch excludes a modified file, mark that
1742 1742 # file with mtime=0 so status can see it.
1743 1743 mm = []
1744 1744 for i in xrange(len(m) - 1, -1, -1):
1745 1745 if not matchfn(m[i]):
1746 1746 mm.append(m[i])
1747 1747 del m[i]
1748 1748 for f in m:
1749 1749 repo.dirstate.normal(f)
1750 1750 for f in mm:
1751 1751 repo.dirstate.normallookup(f)
1752 1752 for f in forget:
1753 1753 repo.dirstate.drop(f)
1754 1754
1755 1755 user = ph.user or changes[1]
1756 1756
1757 1757 oldphase = repo[top].phase()
1758 1758
1759 1759 # assumes strip can roll itself back if interrupted
1760 1760 repo.setparents(*cparents)
1761 1761 self.applied.pop()
1762 1762 self.applieddirty = True
1763 1763 strip(self.ui, repo, [top], update=False, backup=False)
1764 1764 dsguard.close()
1765 1765 finally:
1766 1766 release(dsguard)
1767 1767
1768 1768 try:
1769 1769 # might be nice to attempt to roll back strip after this
1770 1770
1771 1771 defaultmsg = "[mq]: %s" % patchfn
1772 1772 editor = cmdutil.getcommiteditor(editform=editform)
1773 1773 if edit:
1774 1774 def finishdesc(desc):
1775 1775 if desc.rstrip():
1776 1776 ph.setmessage(desc)
1777 1777 return desc
1778 1778 return defaultmsg
1779 1779 # i18n: this message is shown in editor with "HG: " prefix
1780 1780 extramsg = _('Leave message empty to use default message.')
1781 1781 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1782 1782 extramsg=extramsg,
1783 1783 editform=editform)
1784 1784 message = msg or "\n".join(ph.message)
1785 1785 elif not msg:
1786 1786 if not ph.message:
1787 1787 message = defaultmsg
1788 1788 else:
1789 1789 message = "\n".join(ph.message)
1790 1790 else:
1791 1791 message = msg
1792 1792 ph.setmessage(msg)
1793 1793
1794 1794 # Ensure we create a new changeset in the same phase than
1795 1795 # the old one.
1796 1796 lock = tr = None
1797 1797 try:
1798 1798 lock = repo.lock()
1799 1799 tr = repo.transaction('mq')
1800 1800 n = newcommit(repo, oldphase, message, user, ph.date,
1801 1801 match=match, force=True, editor=editor)
1802 1802 # only write patch after a successful commit
1803 1803 c = [list(x) for x in refreshchanges]
1804 1804 if inclsubs:
1805 1805 self.putsubstate2changes(substatestate, c)
1806 1806 chunks = patchmod.diff(repo, patchparent,
1807 1807 changes=c, opts=diffopts)
1808 1808 comments = str(ph)
1809 1809 if comments:
1810 1810 patchf.write(comments)
1811 1811 for chunk in chunks:
1812 1812 patchf.write(chunk)
1813 1813 patchf.close()
1814 1814
1815 1815 marks = repo._bookmarks
1816 1816 for bm in bmlist:
1817 1817 marks[bm] = n
1818 1818 marks.recordchange(tr)
1819 1819 tr.close()
1820 1820
1821 1821 self.applied.append(statusentry(n, patchfn))
1822 1822 finally:
1823 1823 lockmod.release(lock, tr)
1824 1824 except: # re-raises
1825 1825 ctx = repo[cparents[0]]
1826 1826 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1827 1827 self.savedirty()
1828 1828 self.ui.warn(_('qrefresh interrupted while patch was popped! '
1829 1829 '(revert --all, qpush to recover)\n'))
1830 1830 raise
1831 1831 finally:
1832 1832 wlock.release()
1833 1833 self.removeundo(repo)
1834 1834
1835 1835 def init(self, repo, create=False):
1836 1836 if not create and os.path.isdir(self.path):
1837 1837 raise error.Abort(_("patch queue directory already exists"))
1838 1838 try:
1839 1839 os.mkdir(self.path)
1840 1840 except OSError as inst:
1841 1841 if inst.errno != errno.EEXIST or not create:
1842 1842 raise
1843 1843 if create:
1844 1844 return self.qrepo(create=True)
1845 1845
1846 1846 def unapplied(self, repo, patch=None):
1847 1847 if patch and patch not in self.series:
1848 1848 raise error.Abort(_("patch %s is not in series file") % patch)
1849 1849 if not patch:
1850 1850 start = self.seriesend()
1851 1851 else:
1852 1852 start = self.series.index(patch) + 1
1853 1853 unapplied = []
1854 1854 for i in xrange(start, len(self.series)):
1855 1855 pushable, reason = self.pushable(i)
1856 1856 if pushable:
1857 1857 unapplied.append((i, self.series[i]))
1858 1858 self.explainpushable(i)
1859 1859 return unapplied
1860 1860
1861 1861 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1862 1862 summary=False):
1863 1863 def displayname(pfx, patchname, state):
1864 1864 if pfx:
1865 1865 self.ui.write(pfx)
1866 1866 if summary:
1867 1867 ph = patchheader(self.join(patchname), self.plainmode)
1868 1868 if ph.message:
1869 1869 msg = ph.message[0]
1870 1870 else:
1871 1871 msg = ''
1872 1872
1873 1873 if self.ui.formatted():
1874 1874 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1875 1875 if width > 0:
1876 1876 msg = util.ellipsis(msg, width)
1877 1877 else:
1878 1878 msg = ''
1879 1879 self.ui.write(patchname, label='qseries.' + state)
1880 1880 self.ui.write(': ')
1881 1881 self.ui.write(msg, label='qseries.message.' + state)
1882 1882 else:
1883 1883 self.ui.write(patchname, label='qseries.' + state)
1884 1884 self.ui.write('\n')
1885 1885
1886 1886 applied = set([p.name for p in self.applied])
1887 1887 if length is None:
1888 1888 length = len(self.series) - start
1889 1889 if not missing:
1890 1890 if self.ui.verbose:
1891 1891 idxwidth = len(str(start + length - 1))
1892 1892 for i in xrange(start, start + length):
1893 1893 patch = self.series[i]
1894 1894 if patch in applied:
1895 1895 char, state = 'A', 'applied'
1896 1896 elif self.pushable(i)[0]:
1897 1897 char, state = 'U', 'unapplied'
1898 1898 else:
1899 1899 char, state = 'G', 'guarded'
1900 1900 pfx = ''
1901 1901 if self.ui.verbose:
1902 1902 pfx = '%*d %s ' % (idxwidth, i, char)
1903 1903 elif status and status != char:
1904 1904 continue
1905 1905 displayname(pfx, patch, state)
1906 1906 else:
1907 1907 msng_list = []
1908 1908 for root, dirs, files in os.walk(self.path):
1909 1909 d = root[len(self.path) + 1:]
1910 1910 for f in files:
1911 1911 fl = os.path.join(d, f)
1912 1912 if (fl not in self.series and
1913 1913 fl not in (self.statuspath, self.seriespath,
1914 1914 self.guardspath)
1915 1915 and not fl.startswith('.')):
1916 1916 msng_list.append(fl)
1917 1917 for x in sorted(msng_list):
1918 1918 pfx = self.ui.verbose and ('D ') or ''
1919 1919 displayname(pfx, x, 'missing')
1920 1920
1921 1921 def issaveline(self, l):
1922 1922 if l.name == '.hg.patches.save.line':
1923 1923 return True
1924 1924
1925 1925 def qrepo(self, create=False):
1926 1926 ui = self.baseui.copy()
1927 1927 if create or os.path.isdir(self.join(".hg")):
1928 1928 return hg.repository(ui, path=self.path, create=create)
1929 1929
1930 1930 def restore(self, repo, rev, delete=None, qupdate=None):
1931 1931 desc = repo[rev].description().strip()
1932 1932 lines = desc.splitlines()
1933 1933 i = 0
1934 1934 datastart = None
1935 1935 series = []
1936 1936 applied = []
1937 1937 qpp = None
1938 1938 for i, line in enumerate(lines):
1939 1939 if line == 'Patch Data:':
1940 1940 datastart = i + 1
1941 1941 elif line.startswith('Dirstate:'):
1942 1942 l = line.rstrip()
1943 1943 l = l[10:].split(' ')
1944 1944 qpp = [bin(x) for x in l]
1945 1945 elif datastart is not None:
1946 1946 l = line.rstrip()
1947 1947 n, name = l.split(':', 1)
1948 1948 if n:
1949 1949 applied.append(statusentry(bin(n), name))
1950 1950 else:
1951 1951 series.append(l)
1952 1952 if datastart is None:
1953 1953 self.ui.warn(_("no saved patch data found\n"))
1954 1954 return 1
1955 1955 self.ui.warn(_("restoring status: %s\n") % lines[0])
1956 1956 self.fullseries = series
1957 1957 self.applied = applied
1958 1958 self.parseseries()
1959 1959 self.seriesdirty = True
1960 1960 self.applieddirty = True
1961 1961 heads = repo.changelog.heads()
1962 1962 if delete:
1963 1963 if rev not in heads:
1964 1964 self.ui.warn(_("save entry has children, leaving it alone\n"))
1965 1965 else:
1966 1966 self.ui.warn(_("removing save entry %s\n") % short(rev))
1967 1967 pp = repo.dirstate.parents()
1968 1968 if rev in pp:
1969 1969 update = True
1970 1970 else:
1971 1971 update = False
1972 1972 strip(self.ui, repo, [rev], update=update, backup=False)
1973 1973 if qpp:
1974 1974 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1975 1975 (short(qpp[0]), short(qpp[1])))
1976 1976 if qupdate:
1977 1977 self.ui.status(_("updating queue directory\n"))
1978 1978 r = self.qrepo()
1979 1979 if not r:
1980 1980 self.ui.warn(_("unable to load queue repository\n"))
1981 1981 return 1
1982 1982 hg.clean(r, qpp[0])
1983 1983
1984 1984 def save(self, repo, msg=None):
1985 1985 if not self.applied:
1986 1986 self.ui.warn(_("save: no patches applied, exiting\n"))
1987 1987 return 1
1988 1988 if self.issaveline(self.applied[-1]):
1989 1989 self.ui.warn(_("status is already saved\n"))
1990 1990 return 1
1991 1991
1992 1992 if not msg:
1993 1993 msg = _("hg patches saved state")
1994 1994 else:
1995 1995 msg = "hg patches: " + msg.rstrip('\r\n')
1996 1996 r = self.qrepo()
1997 1997 if r:
1998 1998 pp = r.dirstate.parents()
1999 1999 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2000 2000 msg += "\n\nPatch Data:\n"
2001 2001 msg += ''.join('%s\n' % x for x in self.applied)
2002 2002 msg += ''.join(':%s\n' % x for x in self.fullseries)
2003 2003 n = repo.commit(msg, force=True)
2004 2004 if not n:
2005 2005 self.ui.warn(_("repo commit failed\n"))
2006 2006 return 1
2007 2007 self.applied.append(statusentry(n, '.hg.patches.save.line'))
2008 2008 self.applieddirty = True
2009 2009 self.removeundo(repo)
2010 2010
2011 2011 def fullseriesend(self):
2012 2012 if self.applied:
2013 2013 p = self.applied[-1].name
2014 2014 end = self.findseries(p)
2015 2015 if end is None:
2016 2016 return len(self.fullseries)
2017 2017 return end + 1
2018 2018 return 0
2019 2019
2020 2020 def seriesend(self, all_patches=False):
2021 2021 """If all_patches is False, return the index of the next pushable patch
2022 2022 in the series, or the series length. If all_patches is True, return the
2023 2023 index of the first patch past the last applied one.
2024 2024 """
2025 2025 end = 0
2026 2026 def nextpatch(start):
2027 2027 if all_patches or start >= len(self.series):
2028 2028 return start
2029 2029 for i in xrange(start, len(self.series)):
2030 2030 p, reason = self.pushable(i)
2031 2031 if p:
2032 2032 return i
2033 2033 self.explainpushable(i)
2034 2034 return len(self.series)
2035 2035 if self.applied:
2036 2036 p = self.applied[-1].name
2037 2037 try:
2038 2038 end = self.series.index(p)
2039 2039 except ValueError:
2040 2040 return 0
2041 2041 return nextpatch(end + 1)
2042 2042 return nextpatch(end)
2043 2043
2044 2044 def appliedname(self, index):
2045 2045 pname = self.applied[index].name
2046 2046 if not self.ui.verbose:
2047 2047 p = pname
2048 2048 else:
2049 2049 p = str(self.series.index(pname)) + " " + pname
2050 2050 return p
2051 2051
2052 2052 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
2053 2053 force=None, git=False):
2054 2054 def checkseries(patchname):
2055 2055 if patchname in self.series:
2056 2056 raise error.Abort(_('patch %s is already in the series file')
2057 2057 % patchname)
2058 2058
2059 2059 if rev:
2060 2060 if files:
2061 2061 raise error.Abort(_('option "-r" not valid when importing '
2062 2062 'files'))
2063 2063 rev = scmutil.revrange(repo, rev)
2064 2064 rev.sort(reverse=True)
2065 2065 elif not files:
2066 2066 raise error.Abort(_('no files or revisions specified'))
2067 2067 if (len(files) > 1 or len(rev) > 1) and patchname:
2068 2068 raise error.Abort(_('option "-n" not valid when importing multiple '
2069 2069 'patches'))
2070 2070 imported = []
2071 2071 if rev:
2072 2072 # If mq patches are applied, we can only import revisions
2073 2073 # that form a linear path to qbase.
2074 2074 # Otherwise, they should form a linear path to a head.
2075 2075 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2076 2076 if len(heads) > 1:
2077 2077 raise error.Abort(_('revision %d is the root of more than one '
2078 2078 'branch') % rev.last())
2079 2079 if self.applied:
2080 2080 base = repo.changelog.node(rev.first())
2081 2081 if base in [n.node for n in self.applied]:
2082 2082 raise error.Abort(_('revision %d is already managed')
2083 2083 % rev.first())
2084 2084 if heads != [self.applied[-1].node]:
2085 2085 raise error.Abort(_('revision %d is not the parent of '
2086 2086 'the queue') % rev.first())
2087 2087 base = repo.changelog.rev(self.applied[0].node)
2088 2088 lastparent = repo.changelog.parentrevs(base)[0]
2089 2089 else:
2090 2090 if heads != [repo.changelog.node(rev.first())]:
2091 2091 raise error.Abort(_('revision %d has unmanaged children')
2092 2092 % rev.first())
2093 2093 lastparent = None
2094 2094
2095 2095 diffopts = self.diffopts({'git': git})
2096 2096 tr = repo.transaction('qimport')
2097 2097 try:
2098 2098 for r in rev:
2099 2099 if not repo[r].mutable():
2100 2100 raise error.Abort(_('revision %d is not mutable') % r,
2101 2101 hint=_('see "hg help phases" '
2102 2102 'for details'))
2103 2103 p1, p2 = repo.changelog.parentrevs(r)
2104 2104 n = repo.changelog.node(r)
2105 2105 if p2 != nullrev:
2106 2106 raise error.Abort(_('cannot import merge revision %d')
2107 2107 % r)
2108 2108 if lastparent and lastparent != r:
2109 2109 raise error.Abort(_('revision %d is not the parent of '
2110 2110 '%d')
2111 2111 % (r, lastparent))
2112 2112 lastparent = p1
2113 2113
2114 2114 if not patchname:
2115 2115 patchname = makepatchname(self.fullseries,
2116 2116 repo[r].description().split('\n', 1)[0],
2117 2117 '%d.diff' % r)
2118 2118 checkseries(patchname)
2119 2119 self.checkpatchname(patchname, force)
2120 2120 self.fullseries.insert(0, patchname)
2121 2121
2122 2122 patchf = self.opener(patchname, "w")
2123 2123 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
2124 2124 patchf.close()
2125 2125
2126 2126 se = statusentry(n, patchname)
2127 2127 self.applied.insert(0, se)
2128 2128
2129 2129 self.added.append(patchname)
2130 2130 imported.append(patchname)
2131 2131 patchname = None
2132 2132 if rev and repo.ui.configbool('mq', 'secret', False):
2133 2133 # if we added anything with --rev, move the secret root
2134 2134 phases.retractboundary(repo, tr, phases.secret, [n])
2135 2135 self.parseseries()
2136 2136 self.applieddirty = True
2137 2137 self.seriesdirty = True
2138 2138 tr.close()
2139 2139 finally:
2140 2140 tr.release()
2141 2141
2142 2142 for i, filename in enumerate(files):
2143 2143 if existing:
2144 2144 if filename == '-':
2145 2145 raise error.Abort(_('-e is incompatible with import from -')
2146 2146 )
2147 2147 filename = normname(filename)
2148 2148 self.checkreservedname(filename)
2149 2149 if util.url(filename).islocal():
2150 2150 originpath = self.join(filename)
2151 2151 if not os.path.isfile(originpath):
2152 2152 raise error.Abort(
2153 2153 _("patch %s does not exist") % filename)
2154 2154
2155 2155 if patchname:
2156 2156 self.checkpatchname(patchname, force)
2157 2157
2158 2158 self.ui.write(_('renaming %s to %s\n')
2159 2159 % (filename, patchname))
2160 2160 util.rename(originpath, self.join(patchname))
2161 2161 else:
2162 2162 patchname = filename
2163 2163
2164 2164 else:
2165 2165 if filename == '-' and not patchname:
2166 2166 raise error.Abort(_('need --name to import a patch from -'))
2167 2167 elif not patchname:
2168 2168 patchname = normname(os.path.basename(filename.rstrip('/')))
2169 2169 self.checkpatchname(patchname, force)
2170 2170 try:
2171 2171 if filename == '-':
2172 2172 text = self.ui.fin.read()
2173 2173 else:
2174 2174 fp = hg.openpath(self.ui, filename)
2175 2175 text = fp.read()
2176 2176 fp.close()
2177 2177 except (OSError, IOError):
2178 2178 raise error.Abort(_("unable to read file %s") % filename)
2179 2179 patchf = self.opener(patchname, "w")
2180 2180 patchf.write(text)
2181 2181 patchf.close()
2182 2182 if not force:
2183 2183 checkseries(patchname)
2184 2184 if patchname not in self.series:
2185 2185 index = self.fullseriesend() + i
2186 2186 self.fullseries[index:index] = [patchname]
2187 2187 self.parseseries()
2188 2188 self.seriesdirty = True
2189 2189 self.ui.warn(_("adding %s to series file\n") % patchname)
2190 2190 self.added.append(patchname)
2191 2191 imported.append(patchname)
2192 2192 patchname = None
2193 2193
2194 2194 self.removeundo(repo)
2195 2195 return imported
2196 2196
2197 2197 def fixkeepchangesopts(ui, opts):
2198 2198 if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
2199 2199 or opts.get('exact')):
2200 2200 return opts
2201 2201 opts = dict(opts)
2202 2202 opts['keep_changes'] = True
2203 2203 return opts
2204 2204
2205 2205 @command("qdelete|qremove|qrm",
2206 2206 [('k', 'keep', None, _('keep patch file')),
2207 2207 ('r', 'rev', [],
2208 2208 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2209 2209 _('hg qdelete [-k] [PATCH]...'))
2210 2210 def delete(ui, repo, *patches, **opts):
2211 2211 """remove patches from queue
2212 2212
2213 2213 The patches must not be applied, and at least one patch is required. Exact
2214 2214 patch identifiers must be given. With -k/--keep, the patch files are
2215 2215 preserved in the patch directory.
2216 2216
2217 2217 To stop managing a patch and move it into permanent history,
2218 2218 use the :hg:`qfinish` command."""
2219 2219 q = repo.mq
2220 2220 q.delete(repo, patches, opts)
2221 2221 q.savedirty()
2222 2222 return 0
2223 2223
2224 2224 @command("qapplied",
2225 2225 [('1', 'last', None, _('show only the preceding applied patch'))
2226 2226 ] + seriesopts,
2227 2227 _('hg qapplied [-1] [-s] [PATCH]'))
2228 2228 def applied(ui, repo, patch=None, **opts):
2229 2229 """print the patches already applied
2230 2230
2231 2231 Returns 0 on success."""
2232 2232
2233 2233 q = repo.mq
2234 2234
2235 2235 if patch:
2236 2236 if patch not in q.series:
2237 2237 raise error.Abort(_("patch %s is not in series file") % patch)
2238 2238 end = q.series.index(patch) + 1
2239 2239 else:
2240 2240 end = q.seriesend(True)
2241 2241
2242 2242 if opts.get('last') and not end:
2243 2243 ui.write(_("no patches applied\n"))
2244 2244 return 1
2245 2245 elif opts.get('last') and end == 1:
2246 2246 ui.write(_("only one patch applied\n"))
2247 2247 return 1
2248 2248 elif opts.get('last'):
2249 2249 start = end - 2
2250 2250 end = 1
2251 2251 else:
2252 2252 start = 0
2253 2253
2254 2254 q.qseries(repo, length=end, start=start, status='A',
2255 2255 summary=opts.get('summary'))
2256 2256
2257 2257
2258 2258 @command("qunapplied",
2259 2259 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2260 2260 _('hg qunapplied [-1] [-s] [PATCH]'))
2261 2261 def unapplied(ui, repo, patch=None, **opts):
2262 2262 """print the patches not yet applied
2263 2263
2264 2264 Returns 0 on success."""
2265 2265
2266 2266 q = repo.mq
2267 2267 if patch:
2268 2268 if patch not in q.series:
2269 2269 raise error.Abort(_("patch %s is not in series file") % patch)
2270 2270 start = q.series.index(patch) + 1
2271 2271 else:
2272 2272 start = q.seriesend(True)
2273 2273
2274 2274 if start == len(q.series) and opts.get('first'):
2275 2275 ui.write(_("all patches applied\n"))
2276 2276 return 1
2277 2277
2278 2278 if opts.get('first'):
2279 2279 length = 1
2280 2280 else:
2281 2281 length = None
2282 2282 q.qseries(repo, start=start, length=length, status='U',
2283 2283 summary=opts.get('summary'))
2284 2284
2285 2285 @command("qimport",
2286 2286 [('e', 'existing', None, _('import file in patch directory')),
2287 2287 ('n', 'name', '',
2288 2288 _('name of patch file'), _('NAME')),
2289 2289 ('f', 'force', None, _('overwrite existing files')),
2290 2290 ('r', 'rev', [],
2291 2291 _('place existing revisions under mq control'), _('REV')),
2292 2292 ('g', 'git', None, _('use git extended diff format')),
2293 2293 ('P', 'push', None, _('qpush after importing'))],
2294 2294 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'))
2295 2295 def qimport(ui, repo, *filename, **opts):
2296 2296 """import a patch or existing changeset
2297 2297
2298 2298 The patch is inserted into the series after the last applied
2299 2299 patch. If no patches have been applied, qimport prepends the patch
2300 2300 to the series.
2301 2301
2302 2302 The patch will have the same name as its source file unless you
2303 2303 give it a new one with -n/--name.
2304 2304
2305 2305 You can register an existing patch inside the patch directory with
2306 2306 the -e/--existing flag.
2307 2307
2308 2308 With -f/--force, an existing patch of the same name will be
2309 2309 overwritten.
2310 2310
2311 2311 An existing changeset may be placed under mq control with -r/--rev
2312 2312 (e.g. qimport --rev . -n patch will place the current revision
2313 2313 under mq control). With -g/--git, patches imported with --rev will
2314 2314 use the git diff format. See the diffs help topic for information
2315 2315 on why this is important for preserving rename/copy information
2316 2316 and permission changes. Use :hg:`qfinish` to remove changesets
2317 2317 from mq control.
2318 2318
2319 2319 To import a patch from standard input, pass - as the patch file.
2320 2320 When importing from standard input, a patch name must be specified
2321 2321 using the --name flag.
2322 2322
2323 2323 To import an existing patch while renaming it::
2324 2324
2325 2325 hg qimport -e existing-patch -n new-name
2326 2326
2327 2327 Returns 0 if import succeeded.
2328 2328 """
2329 2329 lock = repo.lock() # cause this may move phase
2330 2330 try:
2331 2331 q = repo.mq
2332 2332 try:
2333 2333 imported = q.qimport(
2334 2334 repo, filename, patchname=opts.get('name'),
2335 2335 existing=opts.get('existing'), force=opts.get('force'),
2336 2336 rev=opts.get('rev'), git=opts.get('git'))
2337 2337 finally:
2338 2338 q.savedirty()
2339 2339 finally:
2340 2340 lock.release()
2341 2341
2342 2342 if imported and opts.get('push') and not opts.get('rev'):
2343 2343 return q.push(repo, imported[-1])
2344 2344 return 0
2345 2345
2346 2346 def qinit(ui, repo, create):
2347 2347 """initialize a new queue repository
2348 2348
2349 2349 This command also creates a series file for ordering patches, and
2350 2350 an mq-specific .hgignore file in the queue repository, to exclude
2351 2351 the status and guards files (these contain mostly transient state).
2352 2352
2353 2353 Returns 0 if initialization succeeded."""
2354 2354 q = repo.mq
2355 2355 r = q.init(repo, create)
2356 2356 q.savedirty()
2357 2357 if r:
2358 2358 if not os.path.exists(r.wjoin('.hgignore')):
2359 2359 fp = r.wvfs('.hgignore', 'w')
2360 2360 fp.write('^\\.hg\n')
2361 2361 fp.write('^\\.mq\n')
2362 2362 fp.write('syntax: glob\n')
2363 2363 fp.write('status\n')
2364 2364 fp.write('guards\n')
2365 2365 fp.close()
2366 2366 if not os.path.exists(r.wjoin('series')):
2367 2367 r.wvfs('series', 'w').close()
2368 2368 r[None].add(['.hgignore', 'series'])
2369 2369 commands.add(ui, r)
2370 2370 return 0
2371 2371
2372 2372 @command("^qinit",
2373 2373 [('c', 'create-repo', None, _('create queue repository'))],
2374 2374 _('hg qinit [-c]'))
2375 2375 def init(ui, repo, **opts):
2376 2376 """init a new queue repository (DEPRECATED)
2377 2377
2378 2378 The queue repository is unversioned by default. If
2379 2379 -c/--create-repo is specified, qinit will create a separate nested
2380 2380 repository for patches (qinit -c may also be run later to convert
2381 2381 an unversioned patch repository into a versioned one). You can use
2382 2382 qcommit to commit changes to this queue repository.
2383 2383
2384 2384 This command is deprecated. Without -c, it's implied by other relevant
2385 2385 commands. With -c, use :hg:`init --mq` instead."""
2386 2386 return qinit(ui, repo, create=opts.get('create_repo'))
2387 2387
2388 2388 @command("qclone",
2389 2389 [('', 'pull', None, _('use pull protocol to copy metadata')),
2390 2390 ('U', 'noupdate', None,
2391 2391 _('do not update the new working directories')),
2392 2392 ('', 'uncompressed', None,
2393 2393 _('use uncompressed transfer (fast over LAN)')),
2394 2394 ('p', 'patches', '',
2395 2395 _('location of source patch repository'), _('REPO')),
2396 2396 ] + commands.remoteopts,
2397 2397 _('hg qclone [OPTION]... SOURCE [DEST]'),
2398 2398 norepo=True)
2399 2399 def clone(ui, source, dest=None, **opts):
2400 2400 '''clone main and patch repository at same time
2401 2401
2402 2402 If source is local, destination will have no patches applied. If
2403 2403 source is remote, this command can not check if patches are
2404 2404 applied in source, so cannot guarantee that patches are not
2405 2405 applied in destination. If you clone remote repository, be sure
2406 2406 before that it has no patches applied.
2407 2407
2408 2408 Source patch repository is looked for in <src>/.hg/patches by
2409 2409 default. Use -p <url> to change.
2410 2410
2411 2411 The patch directory must be a nested Mercurial repository, as
2412 2412 would be created by :hg:`init --mq`.
2413 2413
2414 2414 Return 0 on success.
2415 2415 '''
2416 2416 def patchdir(repo):
2417 2417 """compute a patch repo url from a repo object"""
2418 2418 url = repo.url()
2419 2419 if url.endswith('/'):
2420 2420 url = url[:-1]
2421 2421 return url + '/.hg/patches'
2422 2422
2423 2423 # main repo (destination and sources)
2424 2424 if dest is None:
2425 2425 dest = hg.defaultdest(source)
2426 2426 sr = hg.peer(ui, opts, ui.expandpath(source))
2427 2427
2428 2428 # patches repo (source only)
2429 2429 if opts.get('patches'):
2430 2430 patchespath = ui.expandpath(opts.get('patches'))
2431 2431 else:
2432 2432 patchespath = patchdir(sr)
2433 2433 try:
2434 2434 hg.peer(ui, opts, patchespath)
2435 2435 except error.RepoError:
2436 2436 raise error.Abort(_('versioned patch repository not found'
2437 2437 ' (see init --mq)'))
2438 2438 qbase, destrev = None, None
2439 2439 if sr.local():
2440 2440 repo = sr.local()
2441 2441 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2442 2442 qbase = repo.mq.applied[0].node
2443 2443 if not hg.islocal(dest):
2444 2444 heads = set(repo.heads())
2445 2445 destrev = list(heads.difference(repo.heads(qbase)))
2446 2446 destrev.append(repo.changelog.parents(qbase)[0])
2447 2447 elif sr.capable('lookup'):
2448 2448 try:
2449 2449 qbase = sr.lookup('qbase')
2450 2450 except error.RepoError:
2451 2451 pass
2452 2452
2453 2453 ui.note(_('cloning main repository\n'))
2454 2454 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2455 2455 pull=opts.get('pull'),
2456 2456 rev=destrev,
2457 2457 update=False,
2458 2458 stream=opts.get('uncompressed'))
2459 2459
2460 2460 ui.note(_('cloning patch repository\n'))
2461 2461 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2462 2462 pull=opts.get('pull'), update=not opts.get('noupdate'),
2463 2463 stream=opts.get('uncompressed'))
2464 2464
2465 2465 if dr.local():
2466 2466 repo = dr.local()
2467 2467 if qbase:
2468 2468 ui.note(_('stripping applied patches from destination '
2469 2469 'repository\n'))
2470 2470 strip(ui, repo, [qbase], update=False, backup=None)
2471 2471 if not opts.get('noupdate'):
2472 2472 ui.note(_('updating destination repository\n'))
2473 2473 hg.update(repo, repo.changelog.tip())
2474 2474
2475 2475 @command("qcommit|qci",
2476 2476 commands.table["^commit|ci"][1],
2477 2477 _('hg qcommit [OPTION]... [FILE]...'),
2478 2478 inferrepo=True)
2479 2479 def commit(ui, repo, *pats, **opts):
2480 2480 """commit changes in the queue repository (DEPRECATED)
2481 2481
2482 2482 This command is deprecated; use :hg:`commit --mq` instead."""
2483 2483 q = repo.mq
2484 2484 r = q.qrepo()
2485 2485 if not r:
2486 2486 raise error.Abort('no queue repository')
2487 2487 commands.commit(r.ui, r, *pats, **opts)
2488 2488
2489 2489 @command("qseries",
2490 2490 [('m', 'missing', None, _('print patches not in series')),
2491 2491 ] + seriesopts,
2492 2492 _('hg qseries [-ms]'))
2493 2493 def series(ui, repo, **opts):
2494 2494 """print the entire series file
2495 2495
2496 2496 Returns 0 on success."""
2497 2497 repo.mq.qseries(repo, missing=opts.get('missing'),
2498 2498 summary=opts.get('summary'))
2499 2499 return 0
2500 2500
2501 2501 @command("qtop", seriesopts, _('hg qtop [-s]'))
2502 2502 def top(ui, repo, **opts):
2503 2503 """print the name of the current patch
2504 2504
2505 2505 Returns 0 on success."""
2506 2506 q = repo.mq
2507 2507 if q.applied:
2508 2508 t = q.seriesend(True)
2509 2509 else:
2510 2510 t = 0
2511 2511
2512 2512 if t:
2513 2513 q.qseries(repo, start=t - 1, length=1, status='A',
2514 2514 summary=opts.get('summary'))
2515 2515 else:
2516 2516 ui.write(_("no patches applied\n"))
2517 2517 return 1
2518 2518
2519 2519 @command("qnext", seriesopts, _('hg qnext [-s]'))
2520 2520 def next(ui, repo, **opts):
2521 2521 """print the name of the next pushable patch
2522 2522
2523 2523 Returns 0 on success."""
2524 2524 q = repo.mq
2525 2525 end = q.seriesend()
2526 2526 if end == len(q.series):
2527 2527 ui.write(_("all patches applied\n"))
2528 2528 return 1
2529 2529 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2530 2530
2531 2531 @command("qprev", seriesopts, _('hg qprev [-s]'))
2532 2532 def prev(ui, repo, **opts):
2533 2533 """print the name of the preceding applied patch
2534 2534
2535 2535 Returns 0 on success."""
2536 2536 q = repo.mq
2537 2537 l = len(q.applied)
2538 2538 if l == 1:
2539 2539 ui.write(_("only one patch applied\n"))
2540 2540 return 1
2541 2541 if not l:
2542 2542 ui.write(_("no patches applied\n"))
2543 2543 return 1
2544 2544 idx = q.series.index(q.applied[-2].name)
2545 2545 q.qseries(repo, start=idx, length=1, status='A',
2546 2546 summary=opts.get('summary'))
2547 2547
2548 2548 def setupheaderopts(ui, opts):
2549 2549 if not opts.get('user') and opts.get('currentuser'):
2550 2550 opts['user'] = ui.username()
2551 2551 if not opts.get('date') and opts.get('currentdate'):
2552 2552 opts['date'] = "%d %d" % util.makedate()
2553 2553
2554 2554 @command("^qnew",
2555 2555 [('e', 'edit', None, _('invoke editor on commit messages')),
2556 2556 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2557 2557 ('g', 'git', None, _('use git extended diff format')),
2558 2558 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2559 2559 ('u', 'user', '',
2560 2560 _('add "From: <USER>" to patch'), _('USER')),
2561 2561 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2562 2562 ('d', 'date', '',
2563 2563 _('add "Date: <DATE>" to patch'), _('DATE'))
2564 2564 ] + commands.walkopts + commands.commitopts,
2565 2565 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
2566 2566 inferrepo=True)
2567 2567 def new(ui, repo, patch, *args, **opts):
2568 2568 """create a new patch
2569 2569
2570 2570 qnew creates a new patch on top of the currently-applied patch (if
2571 2571 any). The patch will be initialized with any outstanding changes
2572 2572 in the working directory. You may also use -I/--include,
2573 2573 -X/--exclude, and/or a list of files after the patch name to add
2574 2574 only changes to matching files to the new patch, leaving the rest
2575 2575 as uncommitted modifications.
2576 2576
2577 2577 -u/--user and -d/--date can be used to set the (given) user and
2578 2578 date, respectively. -U/--currentuser and -D/--currentdate set user
2579 2579 to current user and date to current date.
2580 2580
2581 2581 -e/--edit, -m/--message or -l/--logfile set the patch header as
2582 2582 well as the commit message. If none is specified, the header is
2583 2583 empty and the commit message is '[mq]: PATCH'.
2584 2584
2585 2585 Use the -g/--git option to keep the patch in the git extended diff
2586 2586 format. Read the diffs help topic for more information on why this
2587 2587 is important for preserving permission changes and copy/rename
2588 2588 information.
2589 2589
2590 2590 Returns 0 on successful creation of a new patch.
2591 2591 """
2592 2592 msg = cmdutil.logmessage(ui, opts)
2593 2593 q = repo.mq
2594 2594 opts['msg'] = msg
2595 2595 setupheaderopts(ui, opts)
2596 2596 q.new(repo, patch, *args, **opts)
2597 2597 q.savedirty()
2598 2598 return 0
2599 2599
2600 2600 @command("^qrefresh",
2601 2601 [('e', 'edit', None, _('invoke editor on commit messages')),
2602 2602 ('g', 'git', None, _('use git extended diff format')),
2603 2603 ('s', 'short', None,
2604 2604 _('refresh only files already in the patch and specified files')),
2605 2605 ('U', 'currentuser', None,
2606 2606 _('add/update author field in patch with current user')),
2607 2607 ('u', 'user', '',
2608 2608 _('add/update author field in patch with given user'), _('USER')),
2609 2609 ('D', 'currentdate', None,
2610 2610 _('add/update date field in patch with current date')),
2611 2611 ('d', 'date', '',
2612 2612 _('add/update date field in patch with given date'), _('DATE'))
2613 2613 ] + commands.walkopts + commands.commitopts,
2614 2614 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
2615 2615 inferrepo=True)
2616 2616 def refresh(ui, repo, *pats, **opts):
2617 2617 """update the current patch
2618 2618
2619 2619 If any file patterns are provided, the refreshed patch will
2620 2620 contain only the modifications that match those patterns; the
2621 2621 remaining modifications will remain in the working directory.
2622 2622
2623 2623 If -s/--short is specified, files currently included in the patch
2624 2624 will be refreshed just like matched files and remain in the patch.
2625 2625
2626 2626 If -e/--edit is specified, Mercurial will start your configured editor for
2627 2627 you to enter a message. In case qrefresh fails, you will find a backup of
2628 2628 your message in ``.hg/last-message.txt``.
2629 2629
2630 2630 hg add/remove/copy/rename work as usual, though you might want to
2631 2631 use git-style patches (-g/--git or [diff] git=1) to track copies
2632 2632 and renames. See the diffs help topic for more information on the
2633 2633 git diff format.
2634 2634
2635 2635 Returns 0 on success.
2636 2636 """
2637 2637 q = repo.mq
2638 2638 message = cmdutil.logmessage(ui, opts)
2639 2639 setupheaderopts(ui, opts)
2640 2640 wlock = repo.wlock()
2641 2641 try:
2642 2642 ret = q.refresh(repo, pats, msg=message, **opts)
2643 2643 q.savedirty()
2644 2644 return ret
2645 2645 finally:
2646 2646 wlock.release()
2647 2647
2648 2648 @command("^qdiff",
2649 2649 commands.diffopts + commands.diffopts2 + commands.walkopts,
2650 2650 _('hg qdiff [OPTION]... [FILE]...'),
2651 2651 inferrepo=True)
2652 2652 def diff(ui, repo, *pats, **opts):
2653 2653 """diff of the current patch and subsequent modifications
2654 2654
2655 2655 Shows a diff which includes the current patch as well as any
2656 2656 changes which have been made in the working directory since the
2657 2657 last refresh (thus showing what the current patch would become
2658 2658 after a qrefresh).
2659 2659
2660 2660 Use :hg:`diff` if you only want to see the changes made since the
2661 2661 last qrefresh, or :hg:`export qtip` if you want to see changes
2662 2662 made by the current patch without including changes made since the
2663 2663 qrefresh.
2664 2664
2665 2665 Returns 0 on success.
2666 2666 """
2667 2667 repo.mq.diff(repo, pats, opts)
2668 2668 return 0
2669 2669
2670 2670 @command('qfold',
2671 2671 [('e', 'edit', None, _('invoke editor on commit messages')),
2672 2672 ('k', 'keep', None, _('keep folded patch files')),
2673 2673 ] + commands.commitopts,
2674 2674 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
2675 2675 def fold(ui, repo, *files, **opts):
2676 2676 """fold the named patches into the current patch
2677 2677
2678 2678 Patches must not yet be applied. Each patch will be successively
2679 2679 applied to the current patch in the order given. If all the
2680 2680 patches apply successfully, the current patch will be refreshed
2681 2681 with the new cumulative patch, and the folded patches will be
2682 2682 deleted. With -k/--keep, the folded patch files will not be
2683 2683 removed afterwards.
2684 2684
2685 2685 The header for each folded patch will be concatenated with the
2686 2686 current patch header, separated by a line of ``* * *``.
2687 2687
2688 2688 Returns 0 on success."""
2689 2689 q = repo.mq
2690 2690 if not files:
2691 2691 raise error.Abort(_('qfold requires at least one patch name'))
2692 2692 if not q.checktoppatch(repo)[0]:
2693 2693 raise error.Abort(_('no patches applied'))
2694 2694 q.checklocalchanges(repo)
2695 2695
2696 2696 message = cmdutil.logmessage(ui, opts)
2697 2697
2698 2698 parent = q.lookup('qtip')
2699 2699 patches = []
2700 2700 messages = []
2701 2701 for f in files:
2702 2702 p = q.lookup(f)
2703 2703 if p in patches or p == parent:
2704 2704 ui.warn(_('skipping already folded patch %s\n') % p)
2705 2705 if q.isapplied(p):
2706 2706 raise error.Abort(_('qfold cannot fold already applied patch %s')
2707 2707 % p)
2708 2708 patches.append(p)
2709 2709
2710 2710 for p in patches:
2711 2711 if not message:
2712 2712 ph = patchheader(q.join(p), q.plainmode)
2713 2713 if ph.message:
2714 2714 messages.append(ph.message)
2715 2715 pf = q.join(p)
2716 2716 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2717 2717 if not patchsuccess:
2718 2718 raise error.Abort(_('error folding patch %s') % p)
2719 2719
2720 2720 if not message:
2721 2721 ph = patchheader(q.join(parent), q.plainmode)
2722 2722 message = ph.message
2723 2723 for msg in messages:
2724 2724 if msg:
2725 2725 if message:
2726 2726 message.append('* * *')
2727 2727 message.extend(msg)
2728 2728 message = '\n'.join(message)
2729 2729
2730 2730 diffopts = q.patchopts(q.diffopts(), *patches)
2731 2731 wlock = repo.wlock()
2732 2732 try:
2733 2733 q.refresh(repo, msg=message, git=diffopts.git, edit=opts.get('edit'),
2734 2734 editform='mq.qfold')
2735 2735 q.delete(repo, patches, opts)
2736 2736 q.savedirty()
2737 2737 finally:
2738 2738 wlock.release()
2739 2739
2740 2740 @command("qgoto",
2741 2741 [('', 'keep-changes', None,
2742 2742 _('tolerate non-conflicting local changes')),
2743 2743 ('f', 'force', None, _('overwrite any local changes')),
2744 2744 ('', 'no-backup', None, _('do not save backup copies of files'))],
2745 2745 _('hg qgoto [OPTION]... PATCH'))
2746 2746 def goto(ui, repo, patch, **opts):
2747 2747 '''push or pop patches until named patch is at top of stack
2748 2748
2749 2749 Returns 0 on success.'''
2750 2750 opts = fixkeepchangesopts(ui, opts)
2751 2751 q = repo.mq
2752 2752 patch = q.lookup(patch)
2753 2753 nobackup = opts.get('no_backup')
2754 2754 keepchanges = opts.get('keep_changes')
2755 2755 if q.isapplied(patch):
2756 2756 ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
2757 2757 keepchanges=keepchanges)
2758 2758 else:
2759 2759 ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
2760 2760 keepchanges=keepchanges)
2761 2761 q.savedirty()
2762 2762 return ret
2763 2763
2764 2764 @command("qguard",
2765 2765 [('l', 'list', None, _('list all patches and guards')),
2766 2766 ('n', 'none', None, _('drop all guards'))],
2767 2767 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
2768 2768 def guard(ui, repo, *args, **opts):
2769 2769 '''set or print guards for a patch
2770 2770
2771 2771 Guards control whether a patch can be pushed. A patch with no
2772 2772 guards is always pushed. A patch with a positive guard ("+foo") is
2773 2773 pushed only if the :hg:`qselect` command has activated it. A patch with
2774 2774 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2775 2775 has activated it.
2776 2776
2777 2777 With no arguments, print the currently active guards.
2778 2778 With arguments, set guards for the named patch.
2779 2779
2780 2780 .. note::
2781 2781
2782 2782 Specifying negative guards now requires '--'.
2783 2783
2784 2784 To set guards on another patch::
2785 2785
2786 2786 hg qguard other.patch -- +2.6.17 -stable
2787 2787
2788 2788 Returns 0 on success.
2789 2789 '''
2790 2790 def status(idx):
2791 2791 guards = q.seriesguards[idx] or ['unguarded']
2792 2792 if q.series[idx] in applied:
2793 2793 state = 'applied'
2794 2794 elif q.pushable(idx)[0]:
2795 2795 state = 'unapplied'
2796 2796 else:
2797 2797 state = 'guarded'
2798 2798 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2799 2799 ui.write('%s: ' % ui.label(q.series[idx], label))
2800 2800
2801 2801 for i, guard in enumerate(guards):
2802 2802 if guard.startswith('+'):
2803 2803 ui.write(guard, label='qguard.positive')
2804 2804 elif guard.startswith('-'):
2805 2805 ui.write(guard, label='qguard.negative')
2806 2806 else:
2807 2807 ui.write(guard, label='qguard.unguarded')
2808 2808 if i != len(guards) - 1:
2809 2809 ui.write(' ')
2810 2810 ui.write('\n')
2811 2811 q = repo.mq
2812 2812 applied = set(p.name for p in q.applied)
2813 2813 patch = None
2814 2814 args = list(args)
2815 2815 if opts.get('list'):
2816 2816 if args or opts.get('none'):
2817 2817 raise error.Abort(_('cannot mix -l/--list with options or '
2818 2818 'arguments'))
2819 2819 for i in xrange(len(q.series)):
2820 2820 status(i)
2821 2821 return
2822 2822 if not args or args[0][0:1] in '-+':
2823 2823 if not q.applied:
2824 2824 raise error.Abort(_('no patches applied'))
2825 2825 patch = q.applied[-1].name
2826 2826 if patch is None and args[0][0:1] not in '-+':
2827 2827 patch = args.pop(0)
2828 2828 if patch is None:
2829 2829 raise error.Abort(_('no patch to work with'))
2830 2830 if args or opts.get('none'):
2831 2831 idx = q.findseries(patch)
2832 2832 if idx is None:
2833 2833 raise error.Abort(_('no patch named %s') % patch)
2834 2834 q.setguards(idx, args)
2835 2835 q.savedirty()
2836 2836 else:
2837 2837 status(q.series.index(q.lookup(patch)))
2838 2838
2839 2839 @command("qheader", [], _('hg qheader [PATCH]'))
2840 2840 def header(ui, repo, patch=None):
2841 2841 """print the header of the topmost or specified patch
2842 2842
2843 2843 Returns 0 on success."""
2844 2844 q = repo.mq
2845 2845
2846 2846 if patch:
2847 2847 patch = q.lookup(patch)
2848 2848 else:
2849 2849 if not q.applied:
2850 2850 ui.write(_('no patches applied\n'))
2851 2851 return 1
2852 2852 patch = q.lookup('qtip')
2853 2853 ph = patchheader(q.join(patch), q.plainmode)
2854 2854
2855 2855 ui.write('\n'.join(ph.message) + '\n')
2856 2856
2857 2857 def lastsavename(path):
2858 2858 (directory, base) = os.path.split(path)
2859 2859 names = os.listdir(directory)
2860 2860 namere = re.compile("%s.([0-9]+)" % base)
2861 2861 maxindex = None
2862 2862 maxname = None
2863 2863 for f in names:
2864 2864 m = namere.match(f)
2865 2865 if m:
2866 2866 index = int(m.group(1))
2867 2867 if maxindex is None or index > maxindex:
2868 2868 maxindex = index
2869 2869 maxname = f
2870 2870 if maxname:
2871 2871 return (os.path.join(directory, maxname), maxindex)
2872 2872 return (None, None)
2873 2873
2874 2874 def savename(path):
2875 2875 (last, index) = lastsavename(path)
2876 2876 if last is None:
2877 2877 index = 0
2878 2878 newpath = path + ".%d" % (index + 1)
2879 2879 return newpath
2880 2880
2881 2881 @command("^qpush",
2882 2882 [('', 'keep-changes', None,
2883 2883 _('tolerate non-conflicting local changes')),
2884 2884 ('f', 'force', None, _('apply on top of local changes')),
2885 2885 ('e', 'exact', None,
2886 2886 _('apply the target patch to its recorded parent')),
2887 2887 ('l', 'list', None, _('list patch name in commit text')),
2888 2888 ('a', 'all', None, _('apply all patches')),
2889 2889 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2890 2890 ('n', 'name', '',
2891 2891 _('merge queue name (DEPRECATED)'), _('NAME')),
2892 2892 ('', 'move', None,
2893 2893 _('reorder patch series and apply only the patch')),
2894 2894 ('', 'no-backup', None, _('do not save backup copies of files'))],
2895 2895 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
2896 2896 def push(ui, repo, patch=None, **opts):
2897 2897 """push the next patch onto the stack
2898 2898
2899 2899 By default, abort if the working directory contains uncommitted
2900 2900 changes. With --keep-changes, abort only if the uncommitted files
2901 2901 overlap with patched files. With -f/--force, backup and patch over
2902 2902 uncommitted changes.
2903 2903
2904 2904 Return 0 on success.
2905 2905 """
2906 2906 q = repo.mq
2907 2907 mergeq = None
2908 2908
2909 2909 opts = fixkeepchangesopts(ui, opts)
2910 2910 if opts.get('merge'):
2911 2911 if opts.get('name'):
2912 2912 newpath = repo.join(opts.get('name'))
2913 2913 else:
2914 2914 newpath, i = lastsavename(q.path)
2915 2915 if not newpath:
2916 2916 ui.warn(_("no saved queues found, please use -n\n"))
2917 2917 return 1
2918 2918 mergeq = queue(ui, repo.baseui, repo.path, newpath)
2919 2919 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2920 2920 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2921 2921 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2922 2922 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
2923 2923 keepchanges=opts.get('keep_changes'))
2924 2924 return ret
2925 2925
2926 2926 @command("^qpop",
2927 2927 [('a', 'all', None, _('pop all patches')),
2928 2928 ('n', 'name', '',
2929 2929 _('queue name to pop (DEPRECATED)'), _('NAME')),
2930 2930 ('', 'keep-changes', None,
2931 2931 _('tolerate non-conflicting local changes')),
2932 2932 ('f', 'force', None, _('forget any local changes to patched files')),
2933 2933 ('', 'no-backup', None, _('do not save backup copies of files'))],
2934 2934 _('hg qpop [-a] [-f] [PATCH | INDEX]'))
2935 2935 def pop(ui, repo, patch=None, **opts):
2936 2936 """pop the current patch off the stack
2937 2937
2938 2938 Without argument, pops off the top of the patch stack. If given a
2939 2939 patch name, keeps popping off patches until the named patch is at
2940 2940 the top of the stack.
2941 2941
2942 2942 By default, abort if the working directory contains uncommitted
2943 2943 changes. With --keep-changes, abort only if the uncommitted files
2944 2944 overlap with patched files. With -f/--force, backup and discard
2945 2945 changes made to such files.
2946 2946
2947 2947 Return 0 on success.
2948 2948 """
2949 2949 opts = fixkeepchangesopts(ui, opts)
2950 2950 localupdate = True
2951 2951 if opts.get('name'):
2952 2952 q = queue(ui, repo.baseui, repo.path, repo.join(opts.get('name')))
2953 2953 ui.warn(_('using patch queue: %s\n') % q.path)
2954 2954 localupdate = False
2955 2955 else:
2956 2956 q = repo.mq
2957 2957 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2958 2958 all=opts.get('all'), nobackup=opts.get('no_backup'),
2959 2959 keepchanges=opts.get('keep_changes'))
2960 2960 q.savedirty()
2961 2961 return ret
2962 2962
2963 2963 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
2964 2964 def rename(ui, repo, patch, name=None, **opts):
2965 2965 """rename a patch
2966 2966
2967 2967 With one argument, renames the current patch to PATCH1.
2968 2968 With two arguments, renames PATCH1 to PATCH2.
2969 2969
2970 2970 Returns 0 on success."""
2971 2971 q = repo.mq
2972 2972 if not name:
2973 2973 name = patch
2974 2974 patch = None
2975 2975
2976 2976 if patch:
2977 2977 patch = q.lookup(patch)
2978 2978 else:
2979 2979 if not q.applied:
2980 2980 ui.write(_('no patches applied\n'))
2981 2981 return
2982 2982 patch = q.lookup('qtip')
2983 2983 absdest = q.join(name)
2984 2984 if os.path.isdir(absdest):
2985 2985 name = normname(os.path.join(name, os.path.basename(patch)))
2986 2986 absdest = q.join(name)
2987 2987 q.checkpatchname(name)
2988 2988
2989 2989 ui.note(_('renaming %s to %s\n') % (patch, name))
2990 2990 i = q.findseries(patch)
2991 2991 guards = q.guard_re.findall(q.fullseries[i])
2992 2992 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
2993 2993 q.parseseries()
2994 2994 q.seriesdirty = True
2995 2995
2996 2996 info = q.isapplied(patch)
2997 2997 if info:
2998 2998 q.applied[info[0]] = statusentry(info[1], name)
2999 2999 q.applieddirty = True
3000 3000
3001 3001 destdir = os.path.dirname(absdest)
3002 3002 if not os.path.isdir(destdir):
3003 3003 os.makedirs(destdir)
3004 3004 util.rename(q.join(patch), absdest)
3005 3005 r = q.qrepo()
3006 3006 if r and patch in r.dirstate:
3007 3007 wctx = r[None]
3008 3008 wlock = r.wlock()
3009 3009 try:
3010 3010 if r.dirstate[patch] == 'a':
3011 3011 r.dirstate.drop(patch)
3012 3012 r.dirstate.add(name)
3013 3013 else:
3014 3014 wctx.copy(patch, name)
3015 3015 wctx.forget([patch])
3016 3016 finally:
3017 3017 wlock.release()
3018 3018
3019 3019 q.savedirty()
3020 3020
3021 3021 @command("qrestore",
3022 3022 [('d', 'delete', None, _('delete save entry')),
3023 3023 ('u', 'update', None, _('update queue working directory'))],
3024 3024 _('hg qrestore [-d] [-u] REV'))
3025 3025 def restore(ui, repo, rev, **opts):
3026 3026 """restore the queue state saved by a revision (DEPRECATED)
3027 3027
3028 3028 This command is deprecated, use :hg:`rebase` instead."""
3029 3029 rev = repo.lookup(rev)
3030 3030 q = repo.mq
3031 3031 q.restore(repo, rev, delete=opts.get('delete'),
3032 3032 qupdate=opts.get('update'))
3033 3033 q.savedirty()
3034 3034 return 0
3035 3035
3036 3036 @command("qsave",
3037 3037 [('c', 'copy', None, _('copy patch directory')),
3038 3038 ('n', 'name', '',
3039 3039 _('copy directory name'), _('NAME')),
3040 3040 ('e', 'empty', None, _('clear queue status file')),
3041 3041 ('f', 'force', None, _('force copy'))] + commands.commitopts,
3042 3042 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
3043 3043 def save(ui, repo, **opts):
3044 3044 """save current queue state (DEPRECATED)
3045 3045
3046 3046 This command is deprecated, use :hg:`rebase` instead."""
3047 3047 q = repo.mq
3048 3048 message = cmdutil.logmessage(ui, opts)
3049 3049 ret = q.save(repo, msg=message)
3050 3050 if ret:
3051 3051 return ret
3052 3052 q.savedirty() # save to .hg/patches before copying
3053 3053 if opts.get('copy'):
3054 3054 path = q.path
3055 3055 if opts.get('name'):
3056 3056 newpath = os.path.join(q.basepath, opts.get('name'))
3057 3057 if os.path.exists(newpath):
3058 3058 if not os.path.isdir(newpath):
3059 3059 raise error.Abort(_('destination %s exists and is not '
3060 3060 'a directory') % newpath)
3061 3061 if not opts.get('force'):
3062 3062 raise error.Abort(_('destination %s exists, '
3063 3063 'use -f to force') % newpath)
3064 3064 else:
3065 3065 newpath = savename(path)
3066 3066 ui.warn(_("copy %s to %s\n") % (path, newpath))
3067 3067 util.copyfiles(path, newpath)
3068 3068 if opts.get('empty'):
3069 3069 del q.applied[:]
3070 3070 q.applieddirty = True
3071 3071 q.savedirty()
3072 3072 return 0
3073 3073
3074 3074
3075 3075 @command("qselect",
3076 3076 [('n', 'none', None, _('disable all guards')),
3077 3077 ('s', 'series', None, _('list all guards in series file')),
3078 3078 ('', 'pop', None, _('pop to before first guarded applied patch')),
3079 3079 ('', 'reapply', None, _('pop, then reapply patches'))],
3080 3080 _('hg qselect [OPTION]... [GUARD]...'))
3081 3081 def select(ui, repo, *args, **opts):
3082 3082 '''set or print guarded patches to push
3083 3083
3084 3084 Use the :hg:`qguard` command to set or print guards on patch, then use
3085 3085 qselect to tell mq which guards to use. A patch will be pushed if
3086 3086 it has no guards or any positive guards match the currently
3087 3087 selected guard, but will not be pushed if any negative guards
3088 3088 match the current guard. For example::
3089 3089
3090 3090 qguard foo.patch -- -stable (negative guard)
3091 3091 qguard bar.patch +stable (positive guard)
3092 3092 qselect stable
3093 3093
3094 3094 This activates the "stable" guard. mq will skip foo.patch (because
3095 3095 it has a negative match) but push bar.patch (because it has a
3096 3096 positive match).
3097 3097
3098 3098 With no arguments, prints the currently active guards.
3099 3099 With one argument, sets the active guard.
3100 3100
3101 3101 Use -n/--none to deactivate guards (no other arguments needed).
3102 3102 When no guards are active, patches with positive guards are
3103 3103 skipped and patches with negative guards are pushed.
3104 3104
3105 3105 qselect can change the guards on applied patches. It does not pop
3106 3106 guarded patches by default. Use --pop to pop back to the last
3107 3107 applied patch that is not guarded. Use --reapply (which implies
3108 3108 --pop) to push back to the current patch afterwards, but skip
3109 3109 guarded patches.
3110 3110
3111 3111 Use -s/--series to print a list of all guards in the series file
3112 3112 (no other arguments needed). Use -v for more information.
3113 3113
3114 3114 Returns 0 on success.'''
3115 3115
3116 3116 q = repo.mq
3117 3117 guards = q.active()
3118 3118 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3119 3119 if args or opts.get('none'):
3120 3120 old_unapplied = q.unapplied(repo)
3121 3121 old_guarded = [i for i in xrange(len(q.applied)) if not pushable(i)]
3122 3122 q.setactive(args)
3123 3123 q.savedirty()
3124 3124 if not args:
3125 3125 ui.status(_('guards deactivated\n'))
3126 3126 if not opts.get('pop') and not opts.get('reapply'):
3127 3127 unapplied = q.unapplied(repo)
3128 3128 guarded = [i for i in xrange(len(q.applied)) if not pushable(i)]
3129 3129 if len(unapplied) != len(old_unapplied):
3130 3130 ui.status(_('number of unguarded, unapplied patches has '
3131 3131 'changed from %d to %d\n') %
3132 3132 (len(old_unapplied), len(unapplied)))
3133 3133 if len(guarded) != len(old_guarded):
3134 3134 ui.status(_('number of guarded, applied patches has changed '
3135 3135 'from %d to %d\n') %
3136 3136 (len(old_guarded), len(guarded)))
3137 3137 elif opts.get('series'):
3138 3138 guards = {}
3139 3139 noguards = 0
3140 3140 for gs in q.seriesguards:
3141 3141 if not gs:
3142 3142 noguards += 1
3143 3143 for g in gs:
3144 3144 guards.setdefault(g, 0)
3145 3145 guards[g] += 1
3146 3146 if ui.verbose:
3147 3147 guards['NONE'] = noguards
3148 3148 guards = guards.items()
3149 3149 guards.sort(key=lambda x: x[0][1:])
3150 3150 if guards:
3151 3151 ui.note(_('guards in series file:\n'))
3152 3152 for guard, count in guards:
3153 3153 ui.note('%2d ' % count)
3154 3154 ui.write(guard, '\n')
3155 3155 else:
3156 3156 ui.note(_('no guards in series file\n'))
3157 3157 else:
3158 3158 if guards:
3159 3159 ui.note(_('active guards:\n'))
3160 3160 for g in guards:
3161 3161 ui.write(g, '\n')
3162 3162 else:
3163 3163 ui.write(_('no active guards\n'))
3164 3164 reapply = opts.get('reapply') and q.applied and q.applied[-1].name
3165 3165 popped = False
3166 3166 if opts.get('pop') or opts.get('reapply'):
3167 3167 for i in xrange(len(q.applied)):
3168 3168 if not pushable(i):
3169 3169 ui.status(_('popping guarded patches\n'))
3170 3170 popped = True
3171 3171 if i == 0:
3172 3172 q.pop(repo, all=True)
3173 3173 else:
3174 3174 q.pop(repo, q.applied[i - 1].name)
3175 3175 break
3176 3176 if popped:
3177 3177 try:
3178 3178 if reapply:
3179 3179 ui.status(_('reapplying unguarded patches\n'))
3180 3180 q.push(repo, reapply)
3181 3181 finally:
3182 3182 q.savedirty()
3183 3183
3184 3184 @command("qfinish",
3185 3185 [('a', 'applied', None, _('finish all applied changesets'))],
3186 3186 _('hg qfinish [-a] [REV]...'))
3187 3187 def finish(ui, repo, *revrange, **opts):
3188 3188 """move applied patches into repository history
3189 3189
3190 3190 Finishes the specified revisions (corresponding to applied
3191 3191 patches) by moving them out of mq control into regular repository
3192 3192 history.
3193 3193
3194 3194 Accepts a revision range or the -a/--applied option. If --applied
3195 3195 is specified, all applied mq revisions are removed from mq
3196 3196 control. Otherwise, the given revisions must be at the base of the
3197 3197 stack of applied patches.
3198 3198
3199 3199 This can be especially useful if your changes have been applied to
3200 3200 an upstream repository, or if you are about to push your changes
3201 3201 to upstream.
3202 3202
3203 3203 Returns 0 on success.
3204 3204 """
3205 3205 if not opts.get('applied') and not revrange:
3206 3206 raise error.Abort(_('no revisions specified'))
3207 3207 elif opts.get('applied'):
3208 3208 revrange = ('qbase::qtip',) + revrange
3209 3209
3210 3210 q = repo.mq
3211 3211 if not q.applied:
3212 3212 ui.status(_('no patches applied\n'))
3213 3213 return 0
3214 3214
3215 3215 revs = scmutil.revrange(repo, revrange)
3216 3216 if repo['.'].rev() in revs and repo[None].files():
3217 3217 ui.warn(_('warning: uncommitted changes in the working directory\n'))
3218 3218 # queue.finish may changes phases but leave the responsibility to lock the
3219 3219 # repo to the caller to avoid deadlock with wlock. This command code is
3220 3220 # responsibility for this locking.
3221 3221 lock = repo.lock()
3222 3222 try:
3223 3223 q.finish(repo, revs)
3224 3224 q.savedirty()
3225 3225 finally:
3226 3226 lock.release()
3227 3227 return 0
3228 3228
3229 3229 @command("qqueue",
3230 3230 [('l', 'list', False, _('list all available queues')),
3231 3231 ('', 'active', False, _('print name of active queue')),
3232 3232 ('c', 'create', False, _('create new queue')),
3233 3233 ('', 'rename', False, _('rename active queue')),
3234 3234 ('', 'delete', False, _('delete reference to queue')),
3235 3235 ('', 'purge', False, _('delete queue, and remove patch dir')),
3236 3236 ],
3237 3237 _('[OPTION] [QUEUE]'))
3238 3238 def qqueue(ui, repo, name=None, **opts):
3239 3239 '''manage multiple patch queues
3240 3240
3241 3241 Supports switching between different patch queues, as well as creating
3242 3242 new patch queues and deleting existing ones.
3243 3243
3244 3244 Omitting a queue name or specifying -l/--list will show you the registered
3245 3245 queues - by default the "normal" patches queue is registered. The currently
3246 3246 active queue will be marked with "(active)". Specifying --active will print
3247 3247 only the name of the active queue.
3248 3248
3249 3249 To create a new queue, use -c/--create. The queue is automatically made
3250 3250 active, except in the case where there are applied patches from the
3251 3251 currently active queue in the repository. Then the queue will only be
3252 3252 created and switching will fail.
3253 3253
3254 3254 To delete an existing queue, use --delete. You cannot delete the currently
3255 3255 active queue.
3256 3256
3257 3257 Returns 0 on success.
3258 3258 '''
3259 3259 q = repo.mq
3260 3260 _defaultqueue = 'patches'
3261 3261 _allqueues = 'patches.queues'
3262 3262 _activequeue = 'patches.queue'
3263 3263
3264 3264 def _getcurrent():
3265 3265 cur = os.path.basename(q.path)
3266 3266 if cur.startswith('patches-'):
3267 3267 cur = cur[8:]
3268 3268 return cur
3269 3269
3270 3270 def _noqueues():
3271 3271 try:
3272 3272 fh = repo.vfs(_allqueues, 'r')
3273 3273 fh.close()
3274 3274 except IOError:
3275 3275 return True
3276 3276
3277 3277 return False
3278 3278
3279 3279 def _getqueues():
3280 3280 current = _getcurrent()
3281 3281
3282 3282 try:
3283 3283 fh = repo.vfs(_allqueues, 'r')
3284 3284 queues = [queue.strip() for queue in fh if queue.strip()]
3285 3285 fh.close()
3286 3286 if current not in queues:
3287 3287 queues.append(current)
3288 3288 except IOError:
3289 3289 queues = [_defaultqueue]
3290 3290
3291 3291 return sorted(queues)
3292 3292
3293 3293 def _setactive(name):
3294 3294 if q.applied:
3295 3295 raise error.Abort(_('new queue created, but cannot make active '
3296 3296 'as patches are applied'))
3297 3297 _setactivenocheck(name)
3298 3298
3299 3299 def _setactivenocheck(name):
3300 3300 fh = repo.vfs(_activequeue, 'w')
3301 3301 if name != 'patches':
3302 3302 fh.write(name)
3303 3303 fh.close()
3304 3304
3305 3305 def _addqueue(name):
3306 3306 fh = repo.vfs(_allqueues, 'a')
3307 3307 fh.write('%s\n' % (name,))
3308 3308 fh.close()
3309 3309
3310 3310 def _queuedir(name):
3311 3311 if name == 'patches':
3312 3312 return repo.join('patches')
3313 3313 else:
3314 3314 return repo.join('patches-' + name)
3315 3315
3316 3316 def _validname(name):
3317 3317 for n in name:
3318 3318 if n in ':\\/.':
3319 3319 return False
3320 3320 return True
3321 3321
3322 3322 def _delete(name):
3323 3323 if name not in existing:
3324 3324 raise error.Abort(_('cannot delete queue that does not exist'))
3325 3325
3326 3326 current = _getcurrent()
3327 3327
3328 3328 if name == current:
3329 3329 raise error.Abort(_('cannot delete currently active queue'))
3330 3330
3331 3331 fh = repo.vfs('patches.queues.new', 'w')
3332 3332 for queue in existing:
3333 3333 if queue == name:
3334 3334 continue
3335 3335 fh.write('%s\n' % (queue,))
3336 3336 fh.close()
3337 3337 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3338 3338
3339 3339 if not name or opts.get('list') or opts.get('active'):
3340 3340 current = _getcurrent()
3341 3341 if opts.get('active'):
3342 3342 ui.write('%s\n' % (current,))
3343 3343 return
3344 3344 for queue in _getqueues():
3345 3345 ui.write('%s' % (queue,))
3346 3346 if queue == current and not ui.quiet:
3347 3347 ui.write(_(' (active)\n'))
3348 3348 else:
3349 3349 ui.write('\n')
3350 3350 return
3351 3351
3352 3352 if not _validname(name):
3353 3353 raise error.Abort(
3354 3354 _('invalid queue name, may not contain the characters ":\\/."'))
3355 3355
3356 3356 existing = _getqueues()
3357 3357
3358 3358 if opts.get('create'):
3359 3359 if name in existing:
3360 3360 raise error.Abort(_('queue "%s" already exists') % name)
3361 3361 if _noqueues():
3362 3362 _addqueue(_defaultqueue)
3363 3363 _addqueue(name)
3364 3364 _setactive(name)
3365 3365 elif opts.get('rename'):
3366 3366 current = _getcurrent()
3367 3367 if name == current:
3368 3368 raise error.Abort(_('can\'t rename "%s" to its current name')
3369 3369 % name)
3370 3370 if name in existing:
3371 3371 raise error.Abort(_('queue "%s" already exists') % name)
3372 3372
3373 3373 olddir = _queuedir(current)
3374 3374 newdir = _queuedir(name)
3375 3375
3376 3376 if os.path.exists(newdir):
3377 3377 raise error.Abort(_('non-queue directory "%s" already exists') %
3378 3378 newdir)
3379 3379
3380 3380 fh = repo.vfs('patches.queues.new', 'w')
3381 3381 for queue in existing:
3382 3382 if queue == current:
3383 3383 fh.write('%s\n' % (name,))
3384 3384 if os.path.exists(olddir):
3385 3385 util.rename(olddir, newdir)
3386 3386 else:
3387 3387 fh.write('%s\n' % (queue,))
3388 3388 fh.close()
3389 3389 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3390 3390 _setactivenocheck(name)
3391 3391 elif opts.get('delete'):
3392 3392 _delete(name)
3393 3393 elif opts.get('purge'):
3394 3394 if name in existing:
3395 3395 _delete(name)
3396 3396 qdir = _queuedir(name)
3397 3397 if os.path.exists(qdir):
3398 3398 shutil.rmtree(qdir)
3399 3399 else:
3400 3400 if name not in existing:
3401 3401 raise error.Abort(_('use --create to create a new queue'))
3402 3402 _setactive(name)
3403 3403
3404 3404 def mqphasedefaults(repo, roots):
3405 3405 """callback used to set mq changeset as secret when no phase data exists"""
3406 3406 if repo.mq.applied:
3407 3407 if repo.ui.configbool('mq', 'secret', False):
3408 3408 mqphase = phases.secret
3409 3409 else:
3410 3410 mqphase = phases.draft
3411 3411 qbase = repo[repo.mq.applied[0].node]
3412 3412 roots[mqphase].add(qbase.node())
3413 3413 return roots
3414 3414
3415 3415 def reposetup(ui, repo):
3416 3416 class mqrepo(repo.__class__):
3417 3417 @localrepo.unfilteredpropertycache
3418 3418 def mq(self):
3419 3419 return queue(self.ui, self.baseui, self.path)
3420 3420
3421 3421 def invalidateall(self):
3422 3422 super(mqrepo, self).invalidateall()
3423 3423 if localrepo.hasunfilteredcache(self, 'mq'):
3424 3424 # recreate mq in case queue path was changed
3425 3425 delattr(self.unfiltered(), 'mq')
3426 3426
3427 3427 def abortifwdirpatched(self, errmsg, force=False):
3428 3428 if self.mq.applied and self.mq.checkapplied and not force:
3429 3429 parents = self.dirstate.parents()
3430 3430 patches = [s.node for s in self.mq.applied]
3431 3431 if parents[0] in patches or parents[1] in patches:
3432 3432 raise error.Abort(errmsg)
3433 3433
3434 3434 def commit(self, text="", user=None, date=None, match=None,
3435 3435 force=False, editor=False, extra={}):
3436 3436 self.abortifwdirpatched(
3437 3437 _('cannot commit over an applied mq patch'),
3438 3438 force)
3439 3439
3440 3440 return super(mqrepo, self).commit(text, user, date, match, force,
3441 3441 editor, extra)
3442 3442
3443 3443 def checkpush(self, pushop):
3444 3444 if self.mq.applied and self.mq.checkapplied and not pushop.force:
3445 3445 outapplied = [e.node for e in self.mq.applied]
3446 3446 if pushop.revs:
3447 3447 # Assume applied patches have no non-patch descendants and
3448 3448 # are not on remote already. Filtering any changeset not
3449 3449 # pushed.
3450 3450 heads = set(pushop.revs)
3451 3451 for node in reversed(outapplied):
3452 3452 if node in heads:
3453 3453 break
3454 3454 else:
3455 3455 outapplied.pop()
3456 3456 # looking for pushed and shared changeset
3457 3457 for node in outapplied:
3458 3458 if self[node].phase() < phases.secret:
3459 3459 raise error.Abort(_('source has mq patches applied'))
3460 3460 # no non-secret patches pushed
3461 3461 super(mqrepo, self).checkpush(pushop)
3462 3462
3463 3463 def _findtags(self):
3464 3464 '''augment tags from base class with patch tags'''
3465 3465 result = super(mqrepo, self)._findtags()
3466 3466
3467 3467 q = self.mq
3468 3468 if not q.applied:
3469 3469 return result
3470 3470
3471 3471 mqtags = [(patch.node, patch.name) for patch in q.applied]
3472 3472
3473 3473 try:
3474 3474 # for now ignore filtering business
3475 3475 self.unfiltered().changelog.rev(mqtags[-1][0])
3476 3476 except error.LookupError:
3477 3477 self.ui.warn(_('mq status file refers to unknown node %s\n')
3478 3478 % short(mqtags[-1][0]))
3479 3479 return result
3480 3480
3481 3481 # do not add fake tags for filtered revisions
3482 3482 included = self.changelog.hasnode
3483 3483 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
3484 3484 if not mqtags:
3485 3485 return result
3486 3486
3487 3487 mqtags.append((mqtags[-1][0], 'qtip'))
3488 3488 mqtags.append((mqtags[0][0], 'qbase'))
3489 3489 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3490 3490 tags = result[0]
3491 3491 for patch in mqtags:
3492 3492 if patch[1] in tags:
3493 3493 self.ui.warn(_('tag %s overrides mq patch of the same '
3494 3494 'name\n') % patch[1])
3495 3495 else:
3496 3496 tags[patch[1]] = patch[0]
3497 3497
3498 3498 return result
3499 3499
3500 3500 if repo.local():
3501 3501 repo.__class__ = mqrepo
3502 3502
3503 3503 repo._phasedefaults.append(mqphasedefaults)
3504 3504
3505 3505 def mqimport(orig, ui, repo, *args, **kwargs):
3506 3506 if (util.safehasattr(repo, 'abortifwdirpatched')
3507 3507 and not kwargs.get('no_commit', False)):
3508 3508 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3509 3509 kwargs.get('force'))
3510 3510 return orig(ui, repo, *args, **kwargs)
3511 3511
3512 3512 def mqinit(orig, ui, *args, **kwargs):
3513 3513 mq = kwargs.pop('mq', None)
3514 3514
3515 3515 if not mq:
3516 3516 return orig(ui, *args, **kwargs)
3517 3517
3518 3518 if args:
3519 3519 repopath = args[0]
3520 3520 if not hg.islocal(repopath):
3521 3521 raise error.Abort(_('only a local queue repository '
3522 3522 'may be initialized'))
3523 3523 else:
3524 3524 repopath = cmdutil.findrepo(os.getcwd())
3525 3525 if not repopath:
3526 3526 raise error.Abort(_('there is no Mercurial repository here '
3527 3527 '(.hg not found)'))
3528 3528 repo = hg.repository(ui, repopath)
3529 3529 return qinit(ui, repo, True)
3530 3530
3531 3531 def mqcommand(orig, ui, repo, *args, **kwargs):
3532 3532 """Add --mq option to operate on patch repository instead of main"""
3533 3533
3534 3534 # some commands do not like getting unknown options
3535 3535 mq = kwargs.pop('mq', None)
3536 3536
3537 3537 if not mq:
3538 3538 return orig(ui, repo, *args, **kwargs)
3539 3539
3540 3540 q = repo.mq
3541 3541 r = q.qrepo()
3542 3542 if not r:
3543 3543 raise error.Abort(_('no queue repository'))
3544 3544 return orig(r.ui, r, *args, **kwargs)
3545 3545
3546 3546 def summaryhook(ui, repo):
3547 3547 q = repo.mq
3548 3548 m = []
3549 3549 a, u = len(q.applied), len(q.unapplied(repo))
3550 3550 if a:
3551 3551 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3552 3552 if u:
3553 3553 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3554 3554 if m:
3555 3555 # i18n: column positioning for "hg summary"
3556 3556 ui.write(_("mq: %s\n") % ', '.join(m))
3557 3557 else:
3558 3558 # i18n: column positioning for "hg summary"
3559 3559 ui.note(_("mq: (empty queue)\n"))
3560 3560
3561 revsetpredicate = revset.extpredicate()
3562
3563 @revsetpredicate('mq()')
3561 3564 def revsetmq(repo, subset, x):
3562 """``mq()``
3563 Changesets managed by MQ.
3565 """Changesets managed by MQ.
3564 3566 """
3565 3567 revset.getargs(x, 0, 0, _("mq takes no arguments"))
3566 3568 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3567 3569 return revset.baseset([r for r in subset if r in applied])
3568 3570
3569 3571 # tell hggettext to extract docstrings from these functions:
3570 3572 i18nfunctions = [revsetmq]
3571 3573
3572 3574 def extsetup(ui):
3573 3575 # Ensure mq wrappers are called first, regardless of extension load order by
3574 3576 # NOT wrapping in uisetup() and instead deferring to init stage two here.
3575 3577 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3576 3578
3577 3579 extensions.wrapcommand(commands.table, 'import', mqimport)
3578 3580 cmdutil.summaryhooks.add('mq', summaryhook)
3579 3581
3580 3582 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3581 3583 entry[1].extend(mqopt)
3582 3584
3583 3585 nowrap = set(commands.norepo.split(" "))
3584 3586
3585 3587 def dotable(cmdtable):
3586 3588 for cmd in cmdtable.keys():
3587 3589 cmd = cmdutil.parsealiases(cmd)[0]
3588 3590 if cmd in nowrap:
3589 3591 continue
3590 3592 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3591 3593 entry[1].extend(mqopt)
3592 3594
3593 3595 dotable(commands.table)
3594 3596
3595 3597 for extname, extmodule in extensions.extensions():
3596 3598 if extmodule.__file__ != __file__:
3597 3599 dotable(getattr(extmodule, 'cmdtable', {}))
3598 3600
3599 revset.symbols['mq'] = revsetmq
3601 revsetpredicate.setup()
3600 3602
3601 3603 colortable = {'qguard.negative': 'red',
3602 3604 'qguard.positive': 'yellow',
3603 3605 'qguard.unguarded': 'green',
3604 3606 'qseries.applied': 'blue bold underline',
3605 3607 'qseries.guarded': 'black bold',
3606 3608 'qseries.missing': 'red bold',
3607 3609 'qseries.unapplied': 'black bold'}
@@ -1,1241 +1,1244 b''
1 1 # rebase.py - rebasing feature for mercurial
2 2 #
3 3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to move sets of revisions to a different ancestor
9 9
10 10 This extension lets you rebase changesets in an existing Mercurial
11 11 repository.
12 12
13 13 For more information:
14 14 https://mercurial-scm.org/wiki/RebaseExtension
15 15 '''
16 16
17 17 from mercurial import hg, util, repair, merge, cmdutil, commands, bookmarks
18 18 from mercurial import extensions, patch, scmutil, phases, obsolete, error
19 19 from mercurial import copies, repoview, revset
20 20 from mercurial.commands import templateopts
21 21 from mercurial.node import nullrev, nullid, hex, short
22 22 from mercurial.lock import release
23 23 from mercurial.i18n import _
24 24 import os, errno
25 25
26 26 # The following constants are used throughout the rebase module. The ordering of
27 27 # their values must be maintained.
28 28
29 29 # Indicates that a revision needs to be rebased
30 30 revtodo = -1
31 31 nullmerge = -2
32 32 revignored = -3
33 33 # successor in rebase destination
34 34 revprecursor = -4
35 35 # plain prune (no successor)
36 36 revpruned = -5
37 37 revskipped = (revignored, revprecursor, revpruned)
38 38
39 39 cmdtable = {}
40 40 command = cmdutil.command(cmdtable)
41 41 # Note for extension authors: ONLY specify testedwith = 'internal' for
42 42 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
43 43 # be specifying the version(s) of Mercurial they are tested with, or
44 44 # leave the attribute unspecified.
45 45 testedwith = 'internal'
46 46
47 47 def _nothingtorebase():
48 48 return 1
49 49
50 50 def _makeextrafn(copiers):
51 51 """make an extrafn out of the given copy-functions.
52 52
53 53 A copy function takes a context and an extra dict, and mutates the
54 54 extra dict as needed based on the given context.
55 55 """
56 56 def extrafn(ctx, extra):
57 57 for c in copiers:
58 58 c(ctx, extra)
59 59 return extrafn
60 60
61 61 def _destrebase(repo):
62 62 # Destination defaults to the latest revision in the
63 63 # current branch
64 64 branch = repo[None].branch()
65 65 return repo[branch].rev()
66 66
67 revsetpredicate = revset.extpredicate()
68
69 @revsetpredicate('_destrebase')
67 70 def _revsetdestrebase(repo, subset, x):
68 71 # ``_rebasedefaultdest()``
69 72
70 73 # default destination for rebase.
71 74 # # XXX: Currently private because I expect the signature to change.
72 75 # # XXX: - taking rev as arguments,
73 76 # # XXX: - bailing out in case of ambiguity vs returning all data.
74 77 # # XXX: - probably merging with the merge destination.
75 78 # i18n: "_rebasedefaultdest" is a keyword
76 79 revset.getargs(x, 0, 0, _("_rebasedefaultdest takes no arguments"))
77 80 return subset & revset.baseset([_destrebase(repo)])
78 81
79 82 @command('rebase',
80 83 [('s', 'source', '',
81 84 _('rebase the specified changeset and descendants'), _('REV')),
82 85 ('b', 'base', '',
83 86 _('rebase everything from branching point of specified changeset'),
84 87 _('REV')),
85 88 ('r', 'rev', [],
86 89 _('rebase these revisions'),
87 90 _('REV')),
88 91 ('d', 'dest', '',
89 92 _('rebase onto the specified changeset'), _('REV')),
90 93 ('', 'collapse', False, _('collapse the rebased changesets')),
91 94 ('m', 'message', '',
92 95 _('use text as collapse commit message'), _('TEXT')),
93 96 ('e', 'edit', False, _('invoke editor on commit messages')),
94 97 ('l', 'logfile', '',
95 98 _('read collapse commit message from file'), _('FILE')),
96 99 ('k', 'keep', False, _('keep original changesets')),
97 100 ('', 'keepbranches', False, _('keep original branch names')),
98 101 ('D', 'detach', False, _('(DEPRECATED)')),
99 102 ('i', 'interactive', False, _('(DEPRECATED)')),
100 103 ('t', 'tool', '', _('specify merge tool')),
101 104 ('c', 'continue', False, _('continue an interrupted rebase')),
102 105 ('a', 'abort', False, _('abort an interrupted rebase'))] +
103 106 templateopts,
104 107 _('[-s REV | -b REV] [-d REV] [OPTION]'))
105 108 def rebase(ui, repo, **opts):
106 109 """move changeset (and descendants) to a different branch
107 110
108 111 Rebase uses repeated merging to graft changesets from one part of
109 112 history (the source) onto another (the destination). This can be
110 113 useful for linearizing *local* changes relative to a master
111 114 development tree.
112 115
113 116 Published commits cannot be rebased (see :hg:`help phases`).
114 117 To copy commits, see :hg:`help graft`.
115 118
116 119 If you don't specify a destination changeset (``-d/--dest``),
117 120 rebase uses the current branch tip as the destination. (The
118 121 destination changeset is not modified by rebasing, but new
119 122 changesets are added as its descendants.)
120 123
121 124 There are three ways to select changesets::
122 125
123 126 1. Explicitly select them using ``--rev``.
124 127
125 128 2. Use ``--source`` to select a root changeset and include all of its
126 129 descendants.
127 130
128 131 3. Use ``--base`` to select a changeset; rebase will find ancestors
129 132 and their descendants which are not also ancestors of the destination.
130 133
131 134 Rebase will destroy original changesets unless you use ``--keep``.
132 135 It will also move your bookmarks (even if you do).
133 136
134 137 Some changesets may be dropped if they do not contribute changes
135 138 (e.g. merges from the destination branch).
136 139
137 140 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
138 141 a named branch with two heads. You will need to explicitly specify source
139 142 and/or destination.
140 143
141 144 If a rebase is interrupted to manually resolve a conflict, it can be
142 145 continued with --continue/-c or aborted with --abort/-a.
143 146
144 147 .. container:: verbose
145 148
146 149 Examples:
147 150
148 151 - move "local changes" (current commit back to branching point)
149 152 to the current branch tip after a pull::
150 153
151 154 hg rebase
152 155
153 156 - move a single changeset to the stable branch::
154 157
155 158 hg rebase -r 5f493448 -d stable
156 159
157 160 - splice a commit and all its descendants onto another part of history::
158 161
159 162 hg rebase --source c0c3 --dest 4cf9
160 163
161 164 - rebase everything on a branch marked by a bookmark onto the
162 165 default branch::
163 166
164 167 hg rebase --base myfeature --dest default
165 168
166 169 - collapse a sequence of changes into a single commit::
167 170
168 171 hg rebase --collapse -r 1520:1525 -d .
169 172
170 173 - move a named branch while preserving its name::
171 174
172 175 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
173 176
174 177 Returns 0 on success, 1 if nothing to rebase or there are
175 178 unresolved conflicts.
176 179
177 180 """
178 181 originalwd = target = None
179 182 activebookmark = None
180 183 external = nullrev
181 184 # Mapping between the old revision id and either what is the new rebased
182 185 # revision or what needs to be done with the old revision. The state dict
183 186 # will be what contains most of the rebase progress state.
184 187 state = {}
185 188 skipped = set()
186 189 targetancestors = set()
187 190
188 191
189 192 lock = wlock = None
190 193 try:
191 194 wlock = repo.wlock()
192 195 lock = repo.lock()
193 196
194 197 # Validate input and define rebasing points
195 198 destf = opts.get('dest', None)
196 199 srcf = opts.get('source', None)
197 200 basef = opts.get('base', None)
198 201 revf = opts.get('rev', [])
199 202 contf = opts.get('continue')
200 203 abortf = opts.get('abort')
201 204 collapsef = opts.get('collapse', False)
202 205 collapsemsg = cmdutil.logmessage(ui, opts)
203 206 date = opts.get('date', None)
204 207 e = opts.get('extrafn') # internal, used by e.g. hgsubversion
205 208 extrafns = []
206 209 if e:
207 210 extrafns = [e]
208 211 keepf = opts.get('keep', False)
209 212 keepbranchesf = opts.get('keepbranches', False)
210 213 # keepopen is not meant for use on the command line, but by
211 214 # other extensions
212 215 keepopen = opts.get('keepopen', False)
213 216
214 217 if opts.get('interactive'):
215 218 try:
216 219 if extensions.find('histedit'):
217 220 enablehistedit = ''
218 221 except KeyError:
219 222 enablehistedit = " --config extensions.histedit="
220 223 help = "hg%s help -e histedit" % enablehistedit
221 224 msg = _("interactive history editing is supported by the "
222 225 "'histedit' extension (see \"%s\")") % help
223 226 raise error.Abort(msg)
224 227
225 228 if collapsemsg and not collapsef:
226 229 raise error.Abort(
227 230 _('message can only be specified with collapse'))
228 231
229 232 if contf or abortf:
230 233 if contf and abortf:
231 234 raise error.Abort(_('cannot use both abort and continue'))
232 235 if collapsef:
233 236 raise error.Abort(
234 237 _('cannot use collapse with continue or abort'))
235 238 if srcf or basef or destf:
236 239 raise error.Abort(
237 240 _('abort and continue do not allow specifying revisions'))
238 241 if abortf and opts.get('tool', False):
239 242 ui.warn(_('tool option will be ignored\n'))
240 243
241 244 try:
242 245 (originalwd, target, state, skipped, collapsef, keepf,
243 246 keepbranchesf, external, activebookmark) = restorestatus(repo)
244 247 except error.RepoLookupError:
245 248 if abortf:
246 249 clearstatus(repo)
247 250 repo.ui.warn(_('rebase aborted (no revision is removed,'
248 251 ' only broken state is cleared)\n'))
249 252 return 0
250 253 else:
251 254 msg = _('cannot continue inconsistent rebase')
252 255 hint = _('use "hg rebase --abort" to clear broken state')
253 256 raise error.Abort(msg, hint=hint)
254 257 if abortf:
255 258 return abort(repo, originalwd, target, state,
256 259 activebookmark=activebookmark)
257 260 else:
258 261 if srcf and basef:
259 262 raise error.Abort(_('cannot specify both a '
260 263 'source and a base'))
261 264 if revf and basef:
262 265 raise error.Abort(_('cannot specify both a '
263 266 'revision and a base'))
264 267 if revf and srcf:
265 268 raise error.Abort(_('cannot specify both a '
266 269 'revision and a source'))
267 270
268 271 cmdutil.checkunfinished(repo)
269 272 cmdutil.bailifchanged(repo)
270 273
271 274 if destf:
272 275 dest = scmutil.revsingle(repo, destf)
273 276 else:
274 277 dest = repo[_destrebase(repo)]
275 278 destf = str(dest)
276 279
277 280 if revf:
278 281 rebaseset = scmutil.revrange(repo, revf)
279 282 if not rebaseset:
280 283 ui.status(_('empty "rev" revision set - '
281 284 'nothing to rebase\n'))
282 285 return _nothingtorebase()
283 286 elif srcf:
284 287 src = scmutil.revrange(repo, [srcf])
285 288 if not src:
286 289 ui.status(_('empty "source" revision set - '
287 290 'nothing to rebase\n'))
288 291 return _nothingtorebase()
289 292 rebaseset = repo.revs('(%ld)::', src)
290 293 assert rebaseset
291 294 else:
292 295 base = scmutil.revrange(repo, [basef or '.'])
293 296 if not base:
294 297 ui.status(_('empty "base" revision set - '
295 298 "can't compute rebase set\n"))
296 299 return _nothingtorebase()
297 300 commonanc = repo.revs('ancestor(%ld, %d)', base, dest).first()
298 301 if commonanc is not None:
299 302 rebaseset = repo.revs('(%d::(%ld) - %d)::',
300 303 commonanc, base, commonanc)
301 304 else:
302 305 rebaseset = []
303 306
304 307 if not rebaseset:
305 308 # transform to list because smartsets are not comparable to
306 309 # lists. This should be improved to honor laziness of
307 310 # smartset.
308 311 if list(base) == [dest.rev()]:
309 312 if basef:
310 313 ui.status(_('nothing to rebase - %s is both "base"'
311 314 ' and destination\n') % dest)
312 315 else:
313 316 ui.status(_('nothing to rebase - working directory '
314 317 'parent is also destination\n'))
315 318 elif not repo.revs('%ld - ::%d', base, dest):
316 319 if basef:
317 320 ui.status(_('nothing to rebase - "base" %s is '
318 321 'already an ancestor of destination '
319 322 '%s\n') %
320 323 ('+'.join(str(repo[r]) for r in base),
321 324 dest))
322 325 else:
323 326 ui.status(_('nothing to rebase - working '
324 327 'directory parent is already an '
325 328 'ancestor of destination %s\n') % dest)
326 329 else: # can it happen?
327 330 ui.status(_('nothing to rebase from %s to %s\n') %
328 331 ('+'.join(str(repo[r]) for r in base), dest))
329 332 return _nothingtorebase()
330 333
331 334 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
332 335 if (not (keepf or allowunstable)
333 336 and repo.revs('first(children(%ld) - %ld)',
334 337 rebaseset, rebaseset)):
335 338 raise error.Abort(
336 339 _("can't remove original changesets with"
337 340 " unrebased descendants"),
338 341 hint=_('use --keep to keep original changesets'))
339 342
340 343 obsoletenotrebased = {}
341 344 if ui.configbool('experimental', 'rebaseskipobsolete'):
342 345 rebasesetrevs = set(rebaseset)
343 346 obsoletenotrebased = _computeobsoletenotrebased(repo,
344 347 rebasesetrevs,
345 348 dest)
346 349
347 350 # - plain prune (no successor) changesets are rebased
348 351 # - split changesets are not rebased if at least one of the
349 352 # changeset resulting from the split is an ancestor of dest
350 353 rebaseset = rebasesetrevs - set(obsoletenotrebased)
351 354 if rebasesetrevs and not rebaseset:
352 355 msg = _('all requested changesets have equivalents '
353 356 'or were marked as obsolete')
354 357 hint = _('to force the rebase, set the config '
355 358 'experimental.rebaseskipobsolete to False')
356 359 raise error.Abort(msg, hint=hint)
357 360
358 361 result = buildstate(repo, dest, rebaseset, collapsef,
359 362 obsoletenotrebased)
360 363
361 364 if not result:
362 365 # Empty state built, nothing to rebase
363 366 ui.status(_('nothing to rebase\n'))
364 367 return _nothingtorebase()
365 368
366 369 root = min(rebaseset)
367 370 if not keepf and not repo[root].mutable():
368 371 raise error.Abort(_("can't rebase public changeset %s")
369 372 % repo[root],
370 373 hint=_('see "hg help phases" for details'))
371 374
372 375 originalwd, target, state = result
373 376 if collapsef:
374 377 targetancestors = repo.changelog.ancestors([target],
375 378 inclusive=True)
376 379 external = externalparent(repo, state, targetancestors)
377 380
378 381 if dest.closesbranch() and not keepbranchesf:
379 382 ui.status(_('reopening closed branch head %s\n') % dest)
380 383
381 384 if keepbranchesf and collapsef:
382 385 branches = set()
383 386 for rev in state:
384 387 branches.add(repo[rev].branch())
385 388 if len(branches) > 1:
386 389 raise error.Abort(_('cannot collapse multiple named '
387 390 'branches'))
388 391
389 392 # Rebase
390 393 if not targetancestors:
391 394 targetancestors = repo.changelog.ancestors([target], inclusive=True)
392 395
393 396 # Keep track of the current bookmarks in order to reset them later
394 397 currentbookmarks = repo._bookmarks.copy()
395 398 activebookmark = activebookmark or repo._activebookmark
396 399 if activebookmark:
397 400 bookmarks.deactivate(repo)
398 401
399 402 extrafn = _makeextrafn(extrafns)
400 403
401 404 sortedstate = sorted(state)
402 405 total = len(sortedstate)
403 406 pos = 0
404 407 for rev in sortedstate:
405 408 ctx = repo[rev]
406 409 desc = '%d:%s "%s"' % (ctx.rev(), ctx,
407 410 ctx.description().split('\n', 1)[0])
408 411 names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node())
409 412 if names:
410 413 desc += ' (%s)' % ' '.join(names)
411 414 pos += 1
412 415 if state[rev] == revtodo:
413 416 ui.status(_('rebasing %s\n') % desc)
414 417 ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)),
415 418 _('changesets'), total)
416 419 p1, p2, base = defineparents(repo, rev, target, state,
417 420 targetancestors)
418 421 storestatus(repo, originalwd, target, state, collapsef, keepf,
419 422 keepbranchesf, external, activebookmark)
420 423 if len(repo[None].parents()) == 2:
421 424 repo.ui.debug('resuming interrupted rebase\n')
422 425 else:
423 426 try:
424 427 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
425 428 'rebase')
426 429 stats = rebasenode(repo, rev, p1, base, state,
427 430 collapsef, target)
428 431 if stats and stats[3] > 0:
429 432 raise error.InterventionRequired(
430 433 _('unresolved conflicts (see hg '
431 434 'resolve, then hg rebase --continue)'))
432 435 finally:
433 436 ui.setconfig('ui', 'forcemerge', '', 'rebase')
434 437 if not collapsef:
435 438 merging = p2 != nullrev
436 439 editform = cmdutil.mergeeditform(merging, 'rebase')
437 440 editor = cmdutil.getcommiteditor(editform=editform, **opts)
438 441 newnode = concludenode(repo, rev, p1, p2, extrafn=extrafn,
439 442 editor=editor,
440 443 keepbranches=keepbranchesf,
441 444 date=date)
442 445 else:
443 446 # Skip commit if we are collapsing
444 447 repo.dirstate.beginparentchange()
445 448 repo.setparents(repo[p1].node())
446 449 repo.dirstate.endparentchange()
447 450 newnode = None
448 451 # Update the state
449 452 if newnode is not None:
450 453 state[rev] = repo[newnode].rev()
451 454 ui.debug('rebased as %s\n' % short(newnode))
452 455 else:
453 456 if not collapsef:
454 457 ui.warn(_('note: rebase of %d:%s created no changes '
455 458 'to commit\n') % (rev, ctx))
456 459 skipped.add(rev)
457 460 state[rev] = p1
458 461 ui.debug('next revision set to %s\n' % p1)
459 462 elif state[rev] == nullmerge:
460 463 ui.debug('ignoring null merge rebase of %s\n' % rev)
461 464 elif state[rev] == revignored:
462 465 ui.status(_('not rebasing ignored %s\n') % desc)
463 466 elif state[rev] == revprecursor:
464 467 targetctx = repo[obsoletenotrebased[rev]]
465 468 desctarget = '%d:%s "%s"' % (targetctx.rev(), targetctx,
466 469 targetctx.description().split('\n', 1)[0])
467 470 msg = _('note: not rebasing %s, already in destination as %s\n')
468 471 ui.status(msg % (desc, desctarget))
469 472 elif state[rev] == revpruned:
470 473 msg = _('note: not rebasing %s, it has no successor\n')
471 474 ui.status(msg % desc)
472 475 else:
473 476 ui.status(_('already rebased %s as %s\n') %
474 477 (desc, repo[state[rev]]))
475 478
476 479 ui.progress(_('rebasing'), None)
477 480 ui.note(_('rebase merging completed\n'))
478 481
479 482 if collapsef and not keepopen:
480 483 p1, p2, _base = defineparents(repo, min(state), target,
481 484 state, targetancestors)
482 485 editopt = opts.get('edit')
483 486 editform = 'rebase.collapse'
484 487 if collapsemsg:
485 488 commitmsg = collapsemsg
486 489 else:
487 490 commitmsg = 'Collapsed revision'
488 491 for rebased in state:
489 492 if rebased not in skipped and state[rebased] > nullmerge:
490 493 commitmsg += '\n* %s' % repo[rebased].description()
491 494 editopt = True
492 495 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
493 496 newnode = concludenode(repo, rev, p1, external, commitmsg=commitmsg,
494 497 extrafn=extrafn, editor=editor,
495 498 keepbranches=keepbranchesf,
496 499 date=date)
497 500 if newnode is None:
498 501 newrev = target
499 502 else:
500 503 newrev = repo[newnode].rev()
501 504 for oldrev in state.iterkeys():
502 505 if state[oldrev] > nullmerge:
503 506 state[oldrev] = newrev
504 507
505 508 if 'qtip' in repo.tags():
506 509 updatemq(repo, state, skipped, **opts)
507 510
508 511 if currentbookmarks:
509 512 # Nodeids are needed to reset bookmarks
510 513 nstate = {}
511 514 for k, v in state.iteritems():
512 515 if v > nullmerge:
513 516 nstate[repo[k].node()] = repo[v].node()
514 517 # XXX this is the same as dest.node() for the non-continue path --
515 518 # this should probably be cleaned up
516 519 targetnode = repo[target].node()
517 520
518 521 # restore original working directory
519 522 # (we do this before stripping)
520 523 newwd = state.get(originalwd, originalwd)
521 524 if newwd < 0:
522 525 # original directory is a parent of rebase set root or ignored
523 526 newwd = originalwd
524 527 if newwd not in [c.rev() for c in repo[None].parents()]:
525 528 ui.note(_("update back to initial working directory parent\n"))
526 529 hg.updaterepo(repo, newwd, False)
527 530
528 531 if not keepf:
529 532 collapsedas = None
530 533 if collapsef:
531 534 collapsedas = newnode
532 535 clearrebased(ui, repo, state, skipped, collapsedas)
533 536
534 537 tr = None
535 538 try:
536 539 tr = repo.transaction('bookmark')
537 540 if currentbookmarks:
538 541 updatebookmarks(repo, targetnode, nstate, currentbookmarks, tr)
539 542 if activebookmark not in repo._bookmarks:
540 543 # active bookmark was divergent one and has been deleted
541 544 activebookmark = None
542 545 tr.close()
543 546 finally:
544 547 release(tr)
545 548 clearstatus(repo)
546 549
547 550 ui.note(_("rebase completed\n"))
548 551 util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
549 552 if skipped:
550 553 ui.note(_("%d revisions have been skipped\n") % len(skipped))
551 554
552 555 if (activebookmark and
553 556 repo['.'].node() == repo._bookmarks[activebookmark]):
554 557 bookmarks.activate(repo, activebookmark)
555 558
556 559 finally:
557 560 release(lock, wlock)
558 561
559 562 def externalparent(repo, state, targetancestors):
560 563 """Return the revision that should be used as the second parent
561 564 when the revisions in state is collapsed on top of targetancestors.
562 565 Abort if there is more than one parent.
563 566 """
564 567 parents = set()
565 568 source = min(state)
566 569 for rev in state:
567 570 if rev == source:
568 571 continue
569 572 for p in repo[rev].parents():
570 573 if (p.rev() not in state
571 574 and p.rev() not in targetancestors):
572 575 parents.add(p.rev())
573 576 if not parents:
574 577 return nullrev
575 578 if len(parents) == 1:
576 579 return parents.pop()
577 580 raise error.Abort(_('unable to collapse on top of %s, there is more '
578 581 'than one external parent: %s') %
579 582 (max(targetancestors),
580 583 ', '.join(str(p) for p in sorted(parents))))
581 584
582 585 def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None,
583 586 keepbranches=False, date=None):
584 587 '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev
585 588 but also store useful information in extra.
586 589 Return node of committed revision.'''
587 590 dsguard = cmdutil.dirstateguard(repo, 'rebase')
588 591 try:
589 592 repo.setparents(repo[p1].node(), repo[p2].node())
590 593 ctx = repo[rev]
591 594 if commitmsg is None:
592 595 commitmsg = ctx.description()
593 596 keepbranch = keepbranches and repo[p1].branch() != ctx.branch()
594 597 extra = ctx.extra().copy()
595 598 if not keepbranches:
596 599 del extra['branch']
597 600 extra['rebase_source'] = ctx.hex()
598 601 if extrafn:
599 602 extrafn(ctx, extra)
600 603
601 604 backup = repo.ui.backupconfig('phases', 'new-commit')
602 605 try:
603 606 targetphase = max(ctx.phase(), phases.draft)
604 607 repo.ui.setconfig('phases', 'new-commit', targetphase, 'rebase')
605 608 if keepbranch:
606 609 repo.ui.setconfig('ui', 'allowemptycommit', True)
607 610 # Commit might fail if unresolved files exist
608 611 if date is None:
609 612 date = ctx.date()
610 613 newnode = repo.commit(text=commitmsg, user=ctx.user(),
611 614 date=date, extra=extra, editor=editor)
612 615 finally:
613 616 repo.ui.restoreconfig(backup)
614 617
615 618 repo.dirstate.setbranch(repo[newnode].branch())
616 619 dsguard.close()
617 620 return newnode
618 621 finally:
619 622 release(dsguard)
620 623
621 624 def rebasenode(repo, rev, p1, base, state, collapse, target):
622 625 'Rebase a single revision rev on top of p1 using base as merge ancestor'
623 626 # Merge phase
624 627 # Update to target and merge it with local
625 628 if repo['.'].rev() != p1:
626 629 repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
627 630 merge.update(repo, p1, False, True)
628 631 else:
629 632 repo.ui.debug(" already in target\n")
630 633 repo.dirstate.write(repo.currenttransaction())
631 634 repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev]))
632 635 if base is not None:
633 636 repo.ui.debug(" detach base %d:%s\n" % (base, repo[base]))
634 637 # When collapsing in-place, the parent is the common ancestor, we
635 638 # have to allow merging with it.
636 639 stats = merge.update(repo, rev, True, True, base, collapse,
637 640 labels=['dest', 'source'])
638 641 if collapse:
639 642 copies.duplicatecopies(repo, rev, target)
640 643 else:
641 644 # If we're not using --collapse, we need to
642 645 # duplicate copies between the revision we're
643 646 # rebasing and its first parent, but *not*
644 647 # duplicate any copies that have already been
645 648 # performed in the destination.
646 649 p1rev = repo[rev].p1().rev()
647 650 copies.duplicatecopies(repo, rev, p1rev, skiprev=target)
648 651 return stats
649 652
650 653 def nearestrebased(repo, rev, state):
651 654 """return the nearest ancestors of rev in the rebase result"""
652 655 rebased = [r for r in state if state[r] > nullmerge]
653 656 candidates = repo.revs('max(%ld and (::%d))', rebased, rev)
654 657 if candidates:
655 658 return state[candidates.first()]
656 659 else:
657 660 return None
658 661
659 662 def defineparents(repo, rev, target, state, targetancestors):
660 663 'Return the new parent relationship of the revision that will be rebased'
661 664 parents = repo[rev].parents()
662 665 p1 = p2 = nullrev
663 666
664 667 p1n = parents[0].rev()
665 668 if p1n in targetancestors:
666 669 p1 = target
667 670 elif p1n in state:
668 671 if state[p1n] == nullmerge:
669 672 p1 = target
670 673 elif state[p1n] in revskipped:
671 674 p1 = nearestrebased(repo, p1n, state)
672 675 if p1 is None:
673 676 p1 = target
674 677 else:
675 678 p1 = state[p1n]
676 679 else: # p1n external
677 680 p1 = target
678 681 p2 = p1n
679 682
680 683 if len(parents) == 2 and parents[1].rev() not in targetancestors:
681 684 p2n = parents[1].rev()
682 685 # interesting second parent
683 686 if p2n in state:
684 687 if p1 == target: # p1n in targetancestors or external
685 688 p1 = state[p2n]
686 689 elif state[p2n] in revskipped:
687 690 p2 = nearestrebased(repo, p2n, state)
688 691 if p2 is None:
689 692 # no ancestors rebased yet, detach
690 693 p2 = target
691 694 else:
692 695 p2 = state[p2n]
693 696 else: # p2n external
694 697 if p2 != nullrev: # p1n external too => rev is a merged revision
695 698 raise error.Abort(_('cannot use revision %d as base, result '
696 699 'would have 3 parents') % rev)
697 700 p2 = p2n
698 701 repo.ui.debug(" future parents are %d and %d\n" %
699 702 (repo[p1].rev(), repo[p2].rev()))
700 703
701 704 if rev == min(state):
702 705 # Case (1) initial changeset of a non-detaching rebase.
703 706 # Let the merge mechanism find the base itself.
704 707 base = None
705 708 elif not repo[rev].p2():
706 709 # Case (2) detaching the node with a single parent, use this parent
707 710 base = repo[rev].p1().rev()
708 711 else:
709 712 # Assuming there is a p1, this is the case where there also is a p2.
710 713 # We are thus rebasing a merge and need to pick the right merge base.
711 714 #
712 715 # Imagine we have:
713 716 # - M: current rebase revision in this step
714 717 # - A: one parent of M
715 718 # - B: other parent of M
716 719 # - D: destination of this merge step (p1 var)
717 720 #
718 721 # Consider the case where D is a descendant of A or B and the other is
719 722 # 'outside'. In this case, the right merge base is the D ancestor.
720 723 #
721 724 # An informal proof, assuming A is 'outside' and B is the D ancestor:
722 725 #
723 726 # If we pick B as the base, the merge involves:
724 727 # - changes from B to M (actual changeset payload)
725 728 # - changes from B to D (induced by rebase) as D is a rebased
726 729 # version of B)
727 730 # Which exactly represent the rebase operation.
728 731 #
729 732 # If we pick A as the base, the merge involves:
730 733 # - changes from A to M (actual changeset payload)
731 734 # - changes from A to D (with include changes between unrelated A and B
732 735 # plus changes induced by rebase)
733 736 # Which does not represent anything sensible and creates a lot of
734 737 # conflicts. A is thus not the right choice - B is.
735 738 #
736 739 # Note: The base found in this 'proof' is only correct in the specified
737 740 # case. This base does not make sense if is not D a descendant of A or B
738 741 # or if the other is not parent 'outside' (especially not if the other
739 742 # parent has been rebased). The current implementation does not
740 743 # make it feasible to consider different cases separately. In these
741 744 # other cases we currently just leave it to the user to correctly
742 745 # resolve an impossible merge using a wrong ancestor.
743 746 for p in repo[rev].parents():
744 747 if state.get(p.rev()) == p1:
745 748 base = p.rev()
746 749 break
747 750 else: # fallback when base not found
748 751 base = None
749 752
750 753 # Raise because this function is called wrong (see issue 4106)
751 754 raise AssertionError('no base found to rebase on '
752 755 '(defineparents called wrong)')
753 756 return p1, p2, base
754 757
755 758 def isagitpatch(repo, patchname):
756 759 'Return true if the given patch is in git format'
757 760 mqpatch = os.path.join(repo.mq.path, patchname)
758 761 for line in patch.linereader(file(mqpatch, 'rb')):
759 762 if line.startswith('diff --git'):
760 763 return True
761 764 return False
762 765
763 766 def updatemq(repo, state, skipped, **opts):
764 767 'Update rebased mq patches - finalize and then import them'
765 768 mqrebase = {}
766 769 mq = repo.mq
767 770 original_series = mq.fullseries[:]
768 771 skippedpatches = set()
769 772
770 773 for p in mq.applied:
771 774 rev = repo[p.node].rev()
772 775 if rev in state:
773 776 repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
774 777 (rev, p.name))
775 778 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
776 779 else:
777 780 # Applied but not rebased, not sure this should happen
778 781 skippedpatches.add(p.name)
779 782
780 783 if mqrebase:
781 784 mq.finish(repo, mqrebase.keys())
782 785
783 786 # We must start import from the newest revision
784 787 for rev in sorted(mqrebase, reverse=True):
785 788 if rev not in skipped:
786 789 name, isgit = mqrebase[rev]
787 790 repo.ui.note(_('updating mq patch %s to %s:%s\n') %
788 791 (name, state[rev], repo[state[rev]]))
789 792 mq.qimport(repo, (), patchname=name, git=isgit,
790 793 rev=[str(state[rev])])
791 794 else:
792 795 # Rebased and skipped
793 796 skippedpatches.add(mqrebase[rev][0])
794 797
795 798 # Patches were either applied and rebased and imported in
796 799 # order, applied and removed or unapplied. Discard the removed
797 800 # ones while preserving the original series order and guards.
798 801 newseries = [s for s in original_series
799 802 if mq.guard_re.split(s, 1)[0] not in skippedpatches]
800 803 mq.fullseries[:] = newseries
801 804 mq.seriesdirty = True
802 805 mq.savedirty()
803 806
804 807 def updatebookmarks(repo, targetnode, nstate, originalbookmarks, tr):
805 808 'Move bookmarks to their correct changesets, and delete divergent ones'
806 809 marks = repo._bookmarks
807 810 for k, v in originalbookmarks.iteritems():
808 811 if v in nstate:
809 812 # update the bookmarks for revs that have moved
810 813 marks[k] = nstate[v]
811 814 bookmarks.deletedivergent(repo, [targetnode], k)
812 815 marks.recordchange(tr)
813 816
814 817 def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches,
815 818 external, activebookmark):
816 819 'Store the current status to allow recovery'
817 820 f = repo.vfs("rebasestate", "w")
818 821 f.write(repo[originalwd].hex() + '\n')
819 822 f.write(repo[target].hex() + '\n')
820 823 f.write(repo[external].hex() + '\n')
821 824 f.write('%d\n' % int(collapse))
822 825 f.write('%d\n' % int(keep))
823 826 f.write('%d\n' % int(keepbranches))
824 827 f.write('%s\n' % (activebookmark or ''))
825 828 for d, v in state.iteritems():
826 829 oldrev = repo[d].hex()
827 830 if v >= 0:
828 831 newrev = repo[v].hex()
829 832 elif v == revtodo:
830 833 # To maintain format compatibility, we have to use nullid.
831 834 # Please do remove this special case when upgrading the format.
832 835 newrev = hex(nullid)
833 836 else:
834 837 newrev = v
835 838 f.write("%s:%s\n" % (oldrev, newrev))
836 839 f.close()
837 840 repo.ui.debug('rebase status stored\n')
838 841
839 842 def clearstatus(repo):
840 843 'Remove the status files'
841 844 _clearrebasesetvisibiliy(repo)
842 845 util.unlinkpath(repo.join("rebasestate"), ignoremissing=True)
843 846
844 847 def restorestatus(repo):
845 848 'Restore a previously stored status'
846 849 keepbranches = None
847 850 target = None
848 851 collapse = False
849 852 external = nullrev
850 853 activebookmark = None
851 854 state = {}
852 855
853 856 try:
854 857 f = repo.vfs("rebasestate")
855 858 for i, l in enumerate(f.read().splitlines()):
856 859 if i == 0:
857 860 originalwd = repo[l].rev()
858 861 elif i == 1:
859 862 target = repo[l].rev()
860 863 elif i == 2:
861 864 external = repo[l].rev()
862 865 elif i == 3:
863 866 collapse = bool(int(l))
864 867 elif i == 4:
865 868 keep = bool(int(l))
866 869 elif i == 5:
867 870 keepbranches = bool(int(l))
868 871 elif i == 6 and not (len(l) == 81 and ':' in l):
869 872 # line 6 is a recent addition, so for backwards compatibility
870 873 # check that the line doesn't look like the oldrev:newrev lines
871 874 activebookmark = l
872 875 else:
873 876 oldrev, newrev = l.split(':')
874 877 if newrev in (str(nullmerge), str(revignored),
875 878 str(revprecursor), str(revpruned)):
876 879 state[repo[oldrev].rev()] = int(newrev)
877 880 elif newrev == nullid:
878 881 state[repo[oldrev].rev()] = revtodo
879 882 # Legacy compat special case
880 883 else:
881 884 state[repo[oldrev].rev()] = repo[newrev].rev()
882 885
883 886 except IOError as err:
884 887 if err.errno != errno.ENOENT:
885 888 raise
886 889 raise error.Abort(_('no rebase in progress'))
887 890
888 891 if keepbranches is None:
889 892 raise error.Abort(_('.hg/rebasestate is incomplete'))
890 893
891 894 skipped = set()
892 895 # recompute the set of skipped revs
893 896 if not collapse:
894 897 seen = set([target])
895 898 for old, new in sorted(state.items()):
896 899 if new != revtodo and new in seen:
897 900 skipped.add(old)
898 901 seen.add(new)
899 902 repo.ui.debug('computed skipped revs: %s\n' %
900 903 (' '.join(str(r) for r in sorted(skipped)) or None))
901 904 repo.ui.debug('rebase status resumed\n')
902 905 _setrebasesetvisibility(repo, state.keys())
903 906 return (originalwd, target, state, skipped,
904 907 collapse, keep, keepbranches, external, activebookmark)
905 908
906 909 def needupdate(repo, state):
907 910 '''check whether we should `update --clean` away from a merge, or if
908 911 somehow the working dir got forcibly updated, e.g. by older hg'''
909 912 parents = [p.rev() for p in repo[None].parents()]
910 913
911 914 # Are we in a merge state at all?
912 915 if len(parents) < 2:
913 916 return False
914 917
915 918 # We should be standing on the first as-of-yet unrebased commit.
916 919 firstunrebased = min([old for old, new in state.iteritems()
917 920 if new == nullrev])
918 921 if firstunrebased in parents:
919 922 return True
920 923
921 924 return False
922 925
923 926 def abort(repo, originalwd, target, state, activebookmark=None):
924 927 '''Restore the repository to its original state. Additional args:
925 928
926 929 activebookmark: the name of the bookmark that should be active after the
927 930 restore'''
928 931
929 932 try:
930 933 # If the first commits in the rebased set get skipped during the rebase,
931 934 # their values within the state mapping will be the target rev id. The
932 935 # dstates list must must not contain the target rev (issue4896)
933 936 dstates = [s for s in state.values() if s >= 0 and s != target]
934 937 immutable = [d for d in dstates if not repo[d].mutable()]
935 938 cleanup = True
936 939 if immutable:
937 940 repo.ui.warn(_("warning: can't clean up public changesets %s\n")
938 941 % ', '.join(str(repo[r]) for r in immutable),
939 942 hint=_('see "hg help phases" for details'))
940 943 cleanup = False
941 944
942 945 descendants = set()
943 946 if dstates:
944 947 descendants = set(repo.changelog.descendants(dstates))
945 948 if descendants - set(dstates):
946 949 repo.ui.warn(_("warning: new changesets detected on target branch, "
947 950 "can't strip\n"))
948 951 cleanup = False
949 952
950 953 if cleanup:
951 954 # Update away from the rebase if necessary
952 955 if needupdate(repo, state):
953 956 merge.update(repo, originalwd, False, True)
954 957
955 958 # Strip from the first rebased revision
956 959 rebased = filter(lambda x: x >= 0 and x != target, state.values())
957 960 if rebased:
958 961 strippoints = [
959 962 c.node() for c in repo.set('roots(%ld)', rebased)]
960 963 # no backup of rebased cset versions needed
961 964 repair.strip(repo.ui, repo, strippoints)
962 965
963 966 if activebookmark and activebookmark in repo._bookmarks:
964 967 bookmarks.activate(repo, activebookmark)
965 968
966 969 finally:
967 970 clearstatus(repo)
968 971 repo.ui.warn(_('rebase aborted\n'))
969 972 return 0
970 973
971 974 def buildstate(repo, dest, rebaseset, collapse, obsoletenotrebased):
972 975 '''Define which revisions are going to be rebased and where
973 976
974 977 repo: repo
975 978 dest: context
976 979 rebaseset: set of rev
977 980 '''
978 981 _setrebasesetvisibility(repo, rebaseset)
979 982
980 983 # This check isn't strictly necessary, since mq detects commits over an
981 984 # applied patch. But it prevents messing up the working directory when
982 985 # a partially completed rebase is blocked by mq.
983 986 if 'qtip' in repo.tags() and (dest.node() in
984 987 [s.node for s in repo.mq.applied]):
985 988 raise error.Abort(_('cannot rebase onto an applied mq patch'))
986 989
987 990 roots = list(repo.set('roots(%ld)', rebaseset))
988 991 if not roots:
989 992 raise error.Abort(_('no matching revisions'))
990 993 roots.sort()
991 994 state = {}
992 995 detachset = set()
993 996 for root in roots:
994 997 commonbase = root.ancestor(dest)
995 998 if commonbase == root:
996 999 raise error.Abort(_('source is ancestor of destination'))
997 1000 if commonbase == dest:
998 1001 samebranch = root.branch() == dest.branch()
999 1002 if not collapse and samebranch and root in dest.children():
1000 1003 repo.ui.debug('source is a child of destination\n')
1001 1004 return None
1002 1005
1003 1006 repo.ui.debug('rebase onto %d starting from %s\n' % (dest, root))
1004 1007 state.update(dict.fromkeys(rebaseset, revtodo))
1005 1008 # Rebase tries to turn <dest> into a parent of <root> while
1006 1009 # preserving the number of parents of rebased changesets:
1007 1010 #
1008 1011 # - A changeset with a single parent will always be rebased as a
1009 1012 # changeset with a single parent.
1010 1013 #
1011 1014 # - A merge will be rebased as merge unless its parents are both
1012 1015 # ancestors of <dest> or are themselves in the rebased set and
1013 1016 # pruned while rebased.
1014 1017 #
1015 1018 # If one parent of <root> is an ancestor of <dest>, the rebased
1016 1019 # version of this parent will be <dest>. This is always true with
1017 1020 # --base option.
1018 1021 #
1019 1022 # Otherwise, we need to *replace* the original parents with
1020 1023 # <dest>. This "detaches" the rebased set from its former location
1021 1024 # and rebases it onto <dest>. Changes introduced by ancestors of
1022 1025 # <root> not common with <dest> (the detachset, marked as
1023 1026 # nullmerge) are "removed" from the rebased changesets.
1024 1027 #
1025 1028 # - If <root> has a single parent, set it to <dest>.
1026 1029 #
1027 1030 # - If <root> is a merge, we cannot decide which parent to
1028 1031 # replace, the rebase operation is not clearly defined.
1029 1032 #
1030 1033 # The table below sums up this behavior:
1031 1034 #
1032 1035 # +------------------+----------------------+-------------------------+
1033 1036 # | | one parent | merge |
1034 1037 # +------------------+----------------------+-------------------------+
1035 1038 # | parent in | new parent is <dest> | parents in ::<dest> are |
1036 1039 # | ::<dest> | | remapped to <dest> |
1037 1040 # +------------------+----------------------+-------------------------+
1038 1041 # | unrelated source | new parent is <dest> | ambiguous, abort |
1039 1042 # +------------------+----------------------+-------------------------+
1040 1043 #
1041 1044 # The actual abort is handled by `defineparents`
1042 1045 if len(root.parents()) <= 1:
1043 1046 # ancestors of <root> not ancestors of <dest>
1044 1047 detachset.update(repo.changelog.findmissingrevs([commonbase.rev()],
1045 1048 [root.rev()]))
1046 1049 for r in detachset:
1047 1050 if r not in state:
1048 1051 state[r] = nullmerge
1049 1052 if len(roots) > 1:
1050 1053 # If we have multiple roots, we may have "hole" in the rebase set.
1051 1054 # Rebase roots that descend from those "hole" should not be detached as
1052 1055 # other root are. We use the special `revignored` to inform rebase that
1053 1056 # the revision should be ignored but that `defineparents` should search
1054 1057 # a rebase destination that make sense regarding rebased topology.
1055 1058 rebasedomain = set(repo.revs('%ld::%ld', rebaseset, rebaseset))
1056 1059 for ignored in set(rebasedomain) - set(rebaseset):
1057 1060 state[ignored] = revignored
1058 1061 for r in obsoletenotrebased:
1059 1062 if obsoletenotrebased[r] is None:
1060 1063 state[r] = revpruned
1061 1064 else:
1062 1065 state[r] = revprecursor
1063 1066 return repo['.'].rev(), dest.rev(), state
1064 1067
1065 1068 def clearrebased(ui, repo, state, skipped, collapsedas=None):
1066 1069 """dispose of rebased revision at the end of the rebase
1067 1070
1068 1071 If `collapsedas` is not None, the rebase was a collapse whose result if the
1069 1072 `collapsedas` node."""
1070 1073 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1071 1074 markers = []
1072 1075 for rev, newrev in sorted(state.items()):
1073 1076 if newrev >= 0:
1074 1077 if rev in skipped:
1075 1078 succs = ()
1076 1079 elif collapsedas is not None:
1077 1080 succs = (repo[collapsedas],)
1078 1081 else:
1079 1082 succs = (repo[newrev],)
1080 1083 markers.append((repo[rev], succs))
1081 1084 if markers:
1082 1085 obsolete.createmarkers(repo, markers)
1083 1086 else:
1084 1087 rebased = [rev for rev in state if state[rev] > nullmerge]
1085 1088 if rebased:
1086 1089 stripped = []
1087 1090 for root in repo.set('roots(%ld)', rebased):
1088 1091 if set(repo.changelog.descendants([root.rev()])) - set(state):
1089 1092 ui.warn(_("warning: new changesets detected "
1090 1093 "on source branch, not stripping\n"))
1091 1094 else:
1092 1095 stripped.append(root.node())
1093 1096 if stripped:
1094 1097 # backup the old csets by default
1095 1098 repair.strip(ui, repo, stripped, "all")
1096 1099
1097 1100
1098 1101 def pullrebase(orig, ui, repo, *args, **opts):
1099 1102 'Call rebase after pull if the latter has been invoked with --rebase'
1100 1103 ret = None
1101 1104 if opts.get('rebase'):
1102 1105 wlock = lock = None
1103 1106 try:
1104 1107 wlock = repo.wlock()
1105 1108 lock = repo.lock()
1106 1109 if opts.get('update'):
1107 1110 del opts['update']
1108 1111 ui.debug('--update and --rebase are not compatible, ignoring '
1109 1112 'the update flag\n')
1110 1113
1111 1114 movemarkfrom = repo['.'].node()
1112 1115 revsprepull = len(repo)
1113 1116 origpostincoming = commands.postincoming
1114 1117 def _dummy(*args, **kwargs):
1115 1118 pass
1116 1119 commands.postincoming = _dummy
1117 1120 try:
1118 1121 ret = orig(ui, repo, *args, **opts)
1119 1122 finally:
1120 1123 commands.postincoming = origpostincoming
1121 1124 revspostpull = len(repo)
1122 1125 if revspostpull > revsprepull:
1123 1126 # --rev option from pull conflict with rebase own --rev
1124 1127 # dropping it
1125 1128 if 'rev' in opts:
1126 1129 del opts['rev']
1127 1130 # positional argument from pull conflicts with rebase's own
1128 1131 # --source.
1129 1132 if 'source' in opts:
1130 1133 del opts['source']
1131 1134 rebase(ui, repo, **opts)
1132 1135 branch = repo[None].branch()
1133 1136 dest = repo[branch].rev()
1134 1137 if dest != repo['.'].rev():
1135 1138 # there was nothing to rebase we force an update
1136 1139 hg.update(repo, dest)
1137 1140 if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
1138 1141 ui.status(_("updating bookmark %s\n")
1139 1142 % repo._activebookmark)
1140 1143 finally:
1141 1144 release(lock, wlock)
1142 1145 else:
1143 1146 if opts.get('tool'):
1144 1147 raise error.Abort(_('--tool can only be used with --rebase'))
1145 1148 ret = orig(ui, repo, *args, **opts)
1146 1149
1147 1150 return ret
1148 1151
1149 1152 def _setrebasesetvisibility(repo, revs):
1150 1153 """store the currently rebased set on the repo object
1151 1154
1152 1155 This is used by another function to prevent rebased revision to because
1153 1156 hidden (see issue4505)"""
1154 1157 repo = repo.unfiltered()
1155 1158 revs = set(revs)
1156 1159 repo._rebaseset = revs
1157 1160 # invalidate cache if visibility changes
1158 1161 hiddens = repo.filteredrevcache.get('visible', set())
1159 1162 if revs & hiddens:
1160 1163 repo.invalidatevolatilesets()
1161 1164
1162 1165 def _clearrebasesetvisibiliy(repo):
1163 1166 """remove rebaseset data from the repo"""
1164 1167 repo = repo.unfiltered()
1165 1168 if '_rebaseset' in vars(repo):
1166 1169 del repo._rebaseset
1167 1170
1168 1171 def _rebasedvisible(orig, repo):
1169 1172 """ensure rebased revs stay visible (see issue4505)"""
1170 1173 blockers = orig(repo)
1171 1174 blockers.update(getattr(repo, '_rebaseset', ()))
1172 1175 return blockers
1173 1176
1174 1177 def _computeobsoletenotrebased(repo, rebasesetrevs, dest):
1175 1178 """return a mapping obsolete => successor for all obsolete nodes to be
1176 1179 rebased that have a successors in the destination
1177 1180
1178 1181 obsolete => None entries in the mapping indicate nodes with no succesor"""
1179 1182 obsoletenotrebased = {}
1180 1183
1181 1184 # Build a mapping successor => obsolete nodes for the obsolete
1182 1185 # nodes to be rebased
1183 1186 allsuccessors = {}
1184 1187 cl = repo.changelog
1185 1188 for r in rebasesetrevs:
1186 1189 n = repo[r]
1187 1190 if n.obsolete():
1188 1191 node = cl.node(r)
1189 1192 for s in obsolete.allsuccessors(repo.obsstore, [node]):
1190 1193 try:
1191 1194 allsuccessors[cl.rev(s)] = cl.rev(node)
1192 1195 except LookupError:
1193 1196 pass
1194 1197
1195 1198 if allsuccessors:
1196 1199 # Look for successors of obsolete nodes to be rebased among
1197 1200 # the ancestors of dest
1198 1201 ancs = cl.ancestors([repo[dest].rev()],
1199 1202 stoprev=min(allsuccessors),
1200 1203 inclusive=True)
1201 1204 for s in allsuccessors:
1202 1205 if s in ancs:
1203 1206 obsoletenotrebased[allsuccessors[s]] = s
1204 1207 elif (s == allsuccessors[s] and
1205 1208 allsuccessors.values().count(s) == 1):
1206 1209 # plain prune
1207 1210 obsoletenotrebased[s] = None
1208 1211
1209 1212 return obsoletenotrebased
1210 1213
1211 1214 def summaryhook(ui, repo):
1212 1215 if not os.path.exists(repo.join('rebasestate')):
1213 1216 return
1214 1217 try:
1215 1218 state = restorestatus(repo)[2]
1216 1219 except error.RepoLookupError:
1217 1220 # i18n: column positioning for "hg summary"
1218 1221 msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n')
1219 1222 ui.write(msg)
1220 1223 return
1221 1224 numrebased = len([i for i in state.itervalues() if i >= 0])
1222 1225 # i18n: column positioning for "hg summary"
1223 1226 ui.write(_('rebase: %s, %s (rebase --continue)\n') %
1224 1227 (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased,
1225 1228 ui.label(_('%d remaining'), 'rebase.remaining') %
1226 1229 (len(state) - numrebased)))
1227 1230
1228 1231 def uisetup(ui):
1229 1232 #Replace pull with a decorator to provide --rebase option
1230 1233 entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
1231 1234 entry[1].append(('', 'rebase', None,
1232 1235 _("rebase working directory to branch head")))
1233 1236 entry[1].append(('t', 'tool', '',
1234 1237 _("specify merge tool for rebase")))
1235 1238 cmdutil.summaryhooks.add('rebase', summaryhook)
1236 1239 cmdutil.unfinishedstates.append(
1237 1240 ['rebasestate', False, False, _('rebase in progress'),
1238 1241 _("use 'hg rebase --continue' or 'hg rebase --abort'")])
1239 1242 # ensure rebased rev are not hidden
1240 1243 extensions.wrapfunction(repoview, '_getdynamicblockers', _rebasedvisible)
1241 revset.symbols['_destrebase'] = _revsetdestrebase
1244 revsetpredicate.setup()
@@ -1,721 +1,723 b''
1 1 # Patch transplanting extension for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to transplant changesets from another branch
9 9
10 10 This extension allows you to transplant changes to another parent revision,
11 11 possibly in another repository. The transplant is done using 'diff' patches.
12 12
13 13 Transplanted patches are recorded in .hg/transplant/transplants, as a
14 14 map from a changeset hash to its hash in the source repository.
15 15 '''
16 16
17 17 from mercurial.i18n import _
18 18 import os, tempfile
19 19 from mercurial.node import short
20 20 from mercurial import bundlerepo, hg, merge, match
21 21 from mercurial import patch, revlog, scmutil, util, error, cmdutil
22 22 from mercurial import revset, templatekw, exchange
23 23 from mercurial import lock as lockmod
24 24
25 25 class TransplantError(error.Abort):
26 26 pass
27 27
28 28 cmdtable = {}
29 29 command = cmdutil.command(cmdtable)
30 30 # Note for extension authors: ONLY specify testedwith = 'internal' for
31 31 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
32 32 # be specifying the version(s) of Mercurial they are tested with, or
33 33 # leave the attribute unspecified.
34 34 testedwith = 'internal'
35 35
36 36 class transplantentry(object):
37 37 def __init__(self, lnode, rnode):
38 38 self.lnode = lnode
39 39 self.rnode = rnode
40 40
41 41 class transplants(object):
42 42 def __init__(self, path=None, transplantfile=None, opener=None):
43 43 self.path = path
44 44 self.transplantfile = transplantfile
45 45 self.opener = opener
46 46
47 47 if not opener:
48 48 self.opener = scmutil.opener(self.path)
49 49 self.transplants = {}
50 50 self.dirty = False
51 51 self.read()
52 52
53 53 def read(self):
54 54 abspath = os.path.join(self.path, self.transplantfile)
55 55 if self.transplantfile and os.path.exists(abspath):
56 56 for line in self.opener.read(self.transplantfile).splitlines():
57 57 lnode, rnode = map(revlog.bin, line.split(':'))
58 58 list = self.transplants.setdefault(rnode, [])
59 59 list.append(transplantentry(lnode, rnode))
60 60
61 61 def write(self):
62 62 if self.dirty and self.transplantfile:
63 63 if not os.path.isdir(self.path):
64 64 os.mkdir(self.path)
65 65 fp = self.opener(self.transplantfile, 'w')
66 66 for list in self.transplants.itervalues():
67 67 for t in list:
68 68 l, r = map(revlog.hex, (t.lnode, t.rnode))
69 69 fp.write(l + ':' + r + '\n')
70 70 fp.close()
71 71 self.dirty = False
72 72
73 73 def get(self, rnode):
74 74 return self.transplants.get(rnode) or []
75 75
76 76 def set(self, lnode, rnode):
77 77 list = self.transplants.setdefault(rnode, [])
78 78 list.append(transplantentry(lnode, rnode))
79 79 self.dirty = True
80 80
81 81 def remove(self, transplant):
82 82 list = self.transplants.get(transplant.rnode)
83 83 if list:
84 84 del list[list.index(transplant)]
85 85 self.dirty = True
86 86
87 87 class transplanter(object):
88 88 def __init__(self, ui, repo, opts):
89 89 self.ui = ui
90 90 self.path = repo.join('transplant')
91 91 self.opener = scmutil.opener(self.path)
92 92 self.transplants = transplants(self.path, 'transplants',
93 93 opener=self.opener)
94 94 def getcommiteditor():
95 95 editform = cmdutil.mergeeditform(repo[None], 'transplant')
96 96 return cmdutil.getcommiteditor(editform=editform, **opts)
97 97 self.getcommiteditor = getcommiteditor
98 98
99 99 def applied(self, repo, node, parent):
100 100 '''returns True if a node is already an ancestor of parent
101 101 or is parent or has already been transplanted'''
102 102 if hasnode(repo, parent):
103 103 parentrev = repo.changelog.rev(parent)
104 104 if hasnode(repo, node):
105 105 rev = repo.changelog.rev(node)
106 106 reachable = repo.changelog.ancestors([parentrev], rev,
107 107 inclusive=True)
108 108 if rev in reachable:
109 109 return True
110 110 for t in self.transplants.get(node):
111 111 # it might have been stripped
112 112 if not hasnode(repo, t.lnode):
113 113 self.transplants.remove(t)
114 114 return False
115 115 lnoderev = repo.changelog.rev(t.lnode)
116 116 if lnoderev in repo.changelog.ancestors([parentrev], lnoderev,
117 117 inclusive=True):
118 118 return True
119 119 return False
120 120
121 121 def apply(self, repo, source, revmap, merges, opts=None):
122 122 '''apply the revisions in revmap one by one in revision order'''
123 123 if opts is None:
124 124 opts = {}
125 125 revs = sorted(revmap)
126 126 p1, p2 = repo.dirstate.parents()
127 127 pulls = []
128 128 diffopts = patch.difffeatureopts(self.ui, opts)
129 129 diffopts.git = True
130 130
131 131 lock = tr = None
132 132 try:
133 133 lock = repo.lock()
134 134 tr = repo.transaction('transplant')
135 135 for rev in revs:
136 136 node = revmap[rev]
137 137 revstr = '%s:%s' % (rev, short(node))
138 138
139 139 if self.applied(repo, node, p1):
140 140 self.ui.warn(_('skipping already applied revision %s\n') %
141 141 revstr)
142 142 continue
143 143
144 144 parents = source.changelog.parents(node)
145 145 if not (opts.get('filter') or opts.get('log')):
146 146 # If the changeset parent is the same as the
147 147 # wdir's parent, just pull it.
148 148 if parents[0] == p1:
149 149 pulls.append(node)
150 150 p1 = node
151 151 continue
152 152 if pulls:
153 153 if source != repo:
154 154 exchange.pull(repo, source.peer(), heads=pulls)
155 155 merge.update(repo, pulls[-1], False, False)
156 156 p1, p2 = repo.dirstate.parents()
157 157 pulls = []
158 158
159 159 domerge = False
160 160 if node in merges:
161 161 # pulling all the merge revs at once would mean we
162 162 # couldn't transplant after the latest even if
163 163 # transplants before them fail.
164 164 domerge = True
165 165 if not hasnode(repo, node):
166 166 exchange.pull(repo, source.peer(), heads=[node])
167 167
168 168 skipmerge = False
169 169 if parents[1] != revlog.nullid:
170 170 if not opts.get('parent'):
171 171 self.ui.note(_('skipping merge changeset %s:%s\n')
172 172 % (rev, short(node)))
173 173 skipmerge = True
174 174 else:
175 175 parent = source.lookup(opts['parent'])
176 176 if parent not in parents:
177 177 raise error.Abort(_('%s is not a parent of %s') %
178 178 (short(parent), short(node)))
179 179 else:
180 180 parent = parents[0]
181 181
182 182 if skipmerge:
183 183 patchfile = None
184 184 else:
185 185 fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
186 186 fp = os.fdopen(fd, 'w')
187 187 gen = patch.diff(source, parent, node, opts=diffopts)
188 188 for chunk in gen:
189 189 fp.write(chunk)
190 190 fp.close()
191 191
192 192 del revmap[rev]
193 193 if patchfile or domerge:
194 194 try:
195 195 try:
196 196 n = self.applyone(repo, node,
197 197 source.changelog.read(node),
198 198 patchfile, merge=domerge,
199 199 log=opts.get('log'),
200 200 filter=opts.get('filter'))
201 201 except TransplantError:
202 202 # Do not rollback, it is up to the user to
203 203 # fix the merge or cancel everything
204 204 tr.close()
205 205 raise
206 206 if n and domerge:
207 207 self.ui.status(_('%s merged at %s\n') % (revstr,
208 208 short(n)))
209 209 elif n:
210 210 self.ui.status(_('%s transplanted to %s\n')
211 211 % (short(node),
212 212 short(n)))
213 213 finally:
214 214 if patchfile:
215 215 os.unlink(patchfile)
216 216 tr.close()
217 217 if pulls:
218 218 exchange.pull(repo, source.peer(), heads=pulls)
219 219 merge.update(repo, pulls[-1], False, False)
220 220 finally:
221 221 self.saveseries(revmap, merges)
222 222 self.transplants.write()
223 223 if tr:
224 224 tr.release()
225 225 if lock:
226 226 lock.release()
227 227
228 228 def filter(self, filter, node, changelog, patchfile):
229 229 '''arbitrarily rewrite changeset before applying it'''
230 230
231 231 self.ui.status(_('filtering %s\n') % patchfile)
232 232 user, date, msg = (changelog[1], changelog[2], changelog[4])
233 233 fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
234 234 fp = os.fdopen(fd, 'w')
235 235 fp.write("# HG changeset patch\n")
236 236 fp.write("# User %s\n" % user)
237 237 fp.write("# Date %d %d\n" % date)
238 238 fp.write(msg + '\n')
239 239 fp.close()
240 240
241 241 try:
242 242 self.ui.system('%s %s %s' % (filter, util.shellquote(headerfile),
243 243 util.shellquote(patchfile)),
244 244 environ={'HGUSER': changelog[1],
245 245 'HGREVISION': revlog.hex(node),
246 246 },
247 247 onerr=error.Abort, errprefix=_('filter failed'))
248 248 user, date, msg = self.parselog(file(headerfile))[1:4]
249 249 finally:
250 250 os.unlink(headerfile)
251 251
252 252 return (user, date, msg)
253 253
254 254 def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
255 255 filter=None):
256 256 '''apply the patch in patchfile to the repository as a transplant'''
257 257 (manifest, user, (time, timezone), files, message) = cl[:5]
258 258 date = "%d %d" % (time, timezone)
259 259 extra = {'transplant_source': node}
260 260 if filter:
261 261 (user, date, message) = self.filter(filter, node, cl, patchfile)
262 262
263 263 if log:
264 264 # we don't translate messages inserted into commits
265 265 message += '\n(transplanted from %s)' % revlog.hex(node)
266 266
267 267 self.ui.status(_('applying %s\n') % short(node))
268 268 self.ui.note('%s %s\n%s\n' % (user, date, message))
269 269
270 270 if not patchfile and not merge:
271 271 raise error.Abort(_('can only omit patchfile if merging'))
272 272 if patchfile:
273 273 try:
274 274 files = set()
275 275 patch.patch(self.ui, repo, patchfile, files=files, eolmode=None)
276 276 files = list(files)
277 277 except Exception as inst:
278 278 seriespath = os.path.join(self.path, 'series')
279 279 if os.path.exists(seriespath):
280 280 os.unlink(seriespath)
281 281 p1 = repo.dirstate.p1()
282 282 p2 = node
283 283 self.log(user, date, message, p1, p2, merge=merge)
284 284 self.ui.write(str(inst) + '\n')
285 285 raise TransplantError(_('fix up the merge and run '
286 286 'hg transplant --continue'))
287 287 else:
288 288 files = None
289 289 if merge:
290 290 p1, p2 = repo.dirstate.parents()
291 291 repo.setparents(p1, node)
292 292 m = match.always(repo.root, '')
293 293 else:
294 294 m = match.exact(repo.root, '', files)
295 295
296 296 n = repo.commit(message, user, date, extra=extra, match=m,
297 297 editor=self.getcommiteditor())
298 298 if not n:
299 299 self.ui.warn(_('skipping emptied changeset %s\n') % short(node))
300 300 return None
301 301 if not merge:
302 302 self.transplants.set(n, node)
303 303
304 304 return n
305 305
306 306 def resume(self, repo, source, opts):
307 307 '''recover last transaction and apply remaining changesets'''
308 308 if os.path.exists(os.path.join(self.path, 'journal')):
309 309 n, node = self.recover(repo, source, opts)
310 310 if n:
311 311 self.ui.status(_('%s transplanted as %s\n') % (short(node),
312 312 short(n)))
313 313 else:
314 314 self.ui.status(_('%s skipped due to empty diff\n')
315 315 % (short(node),))
316 316 seriespath = os.path.join(self.path, 'series')
317 317 if not os.path.exists(seriespath):
318 318 self.transplants.write()
319 319 return
320 320 nodes, merges = self.readseries()
321 321 revmap = {}
322 322 for n in nodes:
323 323 revmap[source.changelog.rev(n)] = n
324 324 os.unlink(seriespath)
325 325
326 326 self.apply(repo, source, revmap, merges, opts)
327 327
328 328 def recover(self, repo, source, opts):
329 329 '''commit working directory using journal metadata'''
330 330 node, user, date, message, parents = self.readlog()
331 331 merge = False
332 332
333 333 if not user or not date or not message or not parents[0]:
334 334 raise error.Abort(_('transplant log file is corrupt'))
335 335
336 336 parent = parents[0]
337 337 if len(parents) > 1:
338 338 if opts.get('parent'):
339 339 parent = source.lookup(opts['parent'])
340 340 if parent not in parents:
341 341 raise error.Abort(_('%s is not a parent of %s') %
342 342 (short(parent), short(node)))
343 343 else:
344 344 merge = True
345 345
346 346 extra = {'transplant_source': node}
347 347 try:
348 348 p1, p2 = repo.dirstate.parents()
349 349 if p1 != parent:
350 350 raise error.Abort(_('working directory not at transplant '
351 351 'parent %s') % revlog.hex(parent))
352 352 if merge:
353 353 repo.setparents(p1, parents[1])
354 354 modified, added, removed, deleted = repo.status()[:4]
355 355 if merge or modified or added or removed or deleted:
356 356 n = repo.commit(message, user, date, extra=extra,
357 357 editor=self.getcommiteditor())
358 358 if not n:
359 359 raise error.Abort(_('commit failed'))
360 360 if not merge:
361 361 self.transplants.set(n, node)
362 362 else:
363 363 n = None
364 364 self.unlog()
365 365
366 366 return n, node
367 367 finally:
368 368 # TODO: get rid of this meaningless try/finally enclosing.
369 369 # this is kept only to reduce changes in a patch.
370 370 pass
371 371
372 372 def readseries(self):
373 373 nodes = []
374 374 merges = []
375 375 cur = nodes
376 376 for line in self.opener.read('series').splitlines():
377 377 if line.startswith('# Merges'):
378 378 cur = merges
379 379 continue
380 380 cur.append(revlog.bin(line))
381 381
382 382 return (nodes, merges)
383 383
384 384 def saveseries(self, revmap, merges):
385 385 if not revmap:
386 386 return
387 387
388 388 if not os.path.isdir(self.path):
389 389 os.mkdir(self.path)
390 390 series = self.opener('series', 'w')
391 391 for rev in sorted(revmap):
392 392 series.write(revlog.hex(revmap[rev]) + '\n')
393 393 if merges:
394 394 series.write('# Merges\n')
395 395 for m in merges:
396 396 series.write(revlog.hex(m) + '\n')
397 397 series.close()
398 398
399 399 def parselog(self, fp):
400 400 parents = []
401 401 message = []
402 402 node = revlog.nullid
403 403 inmsg = False
404 404 user = None
405 405 date = None
406 406 for line in fp.read().splitlines():
407 407 if inmsg:
408 408 message.append(line)
409 409 elif line.startswith('# User '):
410 410 user = line[7:]
411 411 elif line.startswith('# Date '):
412 412 date = line[7:]
413 413 elif line.startswith('# Node ID '):
414 414 node = revlog.bin(line[10:])
415 415 elif line.startswith('# Parent '):
416 416 parents.append(revlog.bin(line[9:]))
417 417 elif not line.startswith('# '):
418 418 inmsg = True
419 419 message.append(line)
420 420 if None in (user, date):
421 421 raise error.Abort(_("filter corrupted changeset (no user or date)"))
422 422 return (node, user, date, '\n'.join(message), parents)
423 423
424 424 def log(self, user, date, message, p1, p2, merge=False):
425 425 '''journal changelog metadata for later recover'''
426 426
427 427 if not os.path.isdir(self.path):
428 428 os.mkdir(self.path)
429 429 fp = self.opener('journal', 'w')
430 430 fp.write('# User %s\n' % user)
431 431 fp.write('# Date %s\n' % date)
432 432 fp.write('# Node ID %s\n' % revlog.hex(p2))
433 433 fp.write('# Parent ' + revlog.hex(p1) + '\n')
434 434 if merge:
435 435 fp.write('# Parent ' + revlog.hex(p2) + '\n')
436 436 fp.write(message.rstrip() + '\n')
437 437 fp.close()
438 438
439 439 def readlog(self):
440 440 return self.parselog(self.opener('journal'))
441 441
442 442 def unlog(self):
443 443 '''remove changelog journal'''
444 444 absdst = os.path.join(self.path, 'journal')
445 445 if os.path.exists(absdst):
446 446 os.unlink(absdst)
447 447
448 448 def transplantfilter(self, repo, source, root):
449 449 def matchfn(node):
450 450 if self.applied(repo, node, root):
451 451 return False
452 452 if source.changelog.parents(node)[1] != revlog.nullid:
453 453 return False
454 454 extra = source.changelog.read(node)[5]
455 455 cnode = extra.get('transplant_source')
456 456 if cnode and self.applied(repo, cnode, root):
457 457 return False
458 458 return True
459 459
460 460 return matchfn
461 461
462 462 def hasnode(repo, node):
463 463 try:
464 464 return repo.changelog.rev(node) is not None
465 465 except error.RevlogError:
466 466 return False
467 467
468 468 def browserevs(ui, repo, nodes, opts):
469 469 '''interactively transplant changesets'''
470 470 displayer = cmdutil.show_changeset(ui, repo, opts)
471 471 transplants = []
472 472 merges = []
473 473 prompt = _('apply changeset? [ynmpcq?]:'
474 474 '$$ &yes, transplant this changeset'
475 475 '$$ &no, skip this changeset'
476 476 '$$ &merge at this changeset'
477 477 '$$ show &patch'
478 478 '$$ &commit selected changesets'
479 479 '$$ &quit and cancel transplant'
480 480 '$$ &? (show this help)')
481 481 for node in nodes:
482 482 displayer.show(repo[node])
483 483 action = None
484 484 while not action:
485 485 action = 'ynmpcq?'[ui.promptchoice(prompt)]
486 486 if action == '?':
487 487 for c, t in ui.extractchoices(prompt)[1]:
488 488 ui.write('%s: %s\n' % (c, t))
489 489 action = None
490 490 elif action == 'p':
491 491 parent = repo.changelog.parents(node)[0]
492 492 for chunk in patch.diff(repo, parent, node):
493 493 ui.write(chunk)
494 494 action = None
495 495 if action == 'y':
496 496 transplants.append(node)
497 497 elif action == 'm':
498 498 merges.append(node)
499 499 elif action == 'c':
500 500 break
501 501 elif action == 'q':
502 502 transplants = ()
503 503 merges = ()
504 504 break
505 505 displayer.close()
506 506 return (transplants, merges)
507 507
508 508 @command('transplant',
509 509 [('s', 'source', '', _('transplant changesets from REPO'), _('REPO')),
510 510 ('b', 'branch', [], _('use this source changeset as head'), _('REV')),
511 511 ('a', 'all', None, _('pull all changesets up to the --branch revisions')),
512 512 ('p', 'prune', [], _('skip over REV'), _('REV')),
513 513 ('m', 'merge', [], _('merge at REV'), _('REV')),
514 514 ('', 'parent', '',
515 515 _('parent to choose when transplanting merge'), _('REV')),
516 516 ('e', 'edit', False, _('invoke editor on commit messages')),
517 517 ('', 'log', None, _('append transplant info to log message')),
518 518 ('c', 'continue', None, _('continue last transplant session '
519 519 'after fixing conflicts')),
520 520 ('', 'filter', '',
521 521 _('filter changesets through command'), _('CMD'))],
522 522 _('hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] '
523 523 '[-m REV] [REV]...'))
524 524 def transplant(ui, repo, *revs, **opts):
525 525 '''transplant changesets from another branch
526 526
527 527 Selected changesets will be applied on top of the current working
528 528 directory with the log of the original changeset. The changesets
529 529 are copied and will thus appear twice in the history with different
530 530 identities.
531 531
532 532 Consider using the graft command if everything is inside the same
533 533 repository - it will use merges and will usually give a better result.
534 534 Use the rebase extension if the changesets are unpublished and you want
535 535 to move them instead of copying them.
536 536
537 537 If --log is specified, log messages will have a comment appended
538 538 of the form::
539 539
540 540 (transplanted from CHANGESETHASH)
541 541
542 542 You can rewrite the changelog message with the --filter option.
543 543 Its argument will be invoked with the current changelog message as
544 544 $1 and the patch as $2.
545 545
546 546 --source/-s specifies another repository to use for selecting changesets,
547 547 just as if it temporarily had been pulled.
548 548 If --branch/-b is specified, these revisions will be used as
549 549 heads when deciding which changesets to transplant, just as if only
550 550 these revisions had been pulled.
551 551 If --all/-a is specified, all the revisions up to the heads specified
552 552 with --branch will be transplanted.
553 553
554 554 Example:
555 555
556 556 - transplant all changes up to REV on top of your current revision::
557 557
558 558 hg transplant --branch REV --all
559 559
560 560 You can optionally mark selected transplanted changesets as merge
561 561 changesets. You will not be prompted to transplant any ancestors
562 562 of a merged transplant, and you can merge descendants of them
563 563 normally instead of transplanting them.
564 564
565 565 Merge changesets may be transplanted directly by specifying the
566 566 proper parent changeset by calling :hg:`transplant --parent`.
567 567
568 568 If no merges or revisions are provided, :hg:`transplant` will
569 569 start an interactive changeset browser.
570 570
571 571 If a changeset application fails, you can fix the merge by hand
572 572 and then resume where you left off by calling :hg:`transplant
573 573 --continue/-c`.
574 574 '''
575 575 wlock = None
576 576 try:
577 577 wlock = repo.wlock()
578 578 return _dotransplant(ui, repo, *revs, **opts)
579 579 finally:
580 580 lockmod.release(wlock)
581 581
582 582 def _dotransplant(ui, repo, *revs, **opts):
583 583 def incwalk(repo, csets, match=util.always):
584 584 for node in csets:
585 585 if match(node):
586 586 yield node
587 587
588 588 def transplantwalk(repo, dest, heads, match=util.always):
589 589 '''Yield all nodes that are ancestors of a head but not ancestors
590 590 of dest.
591 591 If no heads are specified, the heads of repo will be used.'''
592 592 if not heads:
593 593 heads = repo.heads()
594 594 ancestors = []
595 595 ctx = repo[dest]
596 596 for head in heads:
597 597 ancestors.append(ctx.ancestor(repo[head]).node())
598 598 for node in repo.changelog.nodesbetween(ancestors, heads)[0]:
599 599 if match(node):
600 600 yield node
601 601
602 602 def checkopts(opts, revs):
603 603 if opts.get('continue'):
604 604 if opts.get('branch') or opts.get('all') or opts.get('merge'):
605 605 raise error.Abort(_('--continue is incompatible with '
606 606 '--branch, --all and --merge'))
607 607 return
608 608 if not (opts.get('source') or revs or
609 609 opts.get('merge') or opts.get('branch')):
610 610 raise error.Abort(_('no source URL, branch revision, or revision '
611 611 'list provided'))
612 612 if opts.get('all'):
613 613 if not opts.get('branch'):
614 614 raise error.Abort(_('--all requires a branch revision'))
615 615 if revs:
616 616 raise error.Abort(_('--all is incompatible with a '
617 617 'revision list'))
618 618
619 619 checkopts(opts, revs)
620 620
621 621 if not opts.get('log'):
622 622 # deprecated config: transplant.log
623 623 opts['log'] = ui.config('transplant', 'log')
624 624 if not opts.get('filter'):
625 625 # deprecated config: transplant.filter
626 626 opts['filter'] = ui.config('transplant', 'filter')
627 627
628 628 tp = transplanter(ui, repo, opts)
629 629
630 630 cmdutil.checkunfinished(repo)
631 631 p1, p2 = repo.dirstate.parents()
632 632 if len(repo) > 0 and p1 == revlog.nullid:
633 633 raise error.Abort(_('no revision checked out'))
634 634 if not opts.get('continue'):
635 635 if p2 != revlog.nullid:
636 636 raise error.Abort(_('outstanding uncommitted merges'))
637 637 m, a, r, d = repo.status()[:4]
638 638 if m or a or r or d:
639 639 raise error.Abort(_('outstanding local changes'))
640 640
641 641 sourcerepo = opts.get('source')
642 642 if sourcerepo:
643 643 peer = hg.peer(repo, opts, ui.expandpath(sourcerepo))
644 644 heads = map(peer.lookup, opts.get('branch', ()))
645 645 target = set(heads)
646 646 for r in revs:
647 647 try:
648 648 target.add(peer.lookup(r))
649 649 except error.RepoError:
650 650 pass
651 651 source, csets, cleanupfn = bundlerepo.getremotechanges(ui, repo, peer,
652 652 onlyheads=sorted(target), force=True)
653 653 else:
654 654 source = repo
655 655 heads = map(source.lookup, opts.get('branch', ()))
656 656 cleanupfn = None
657 657
658 658 try:
659 659 if opts.get('continue'):
660 660 tp.resume(repo, source, opts)
661 661 return
662 662
663 663 tf = tp.transplantfilter(repo, source, p1)
664 664 if opts.get('prune'):
665 665 prune = set(source.lookup(r)
666 666 for r in scmutil.revrange(source, opts.get('prune')))
667 667 matchfn = lambda x: tf(x) and x not in prune
668 668 else:
669 669 matchfn = tf
670 670 merges = map(source.lookup, opts.get('merge', ()))
671 671 revmap = {}
672 672 if revs:
673 673 for r in scmutil.revrange(source, revs):
674 674 revmap[int(r)] = source.lookup(r)
675 675 elif opts.get('all') or not merges:
676 676 if source != repo:
677 677 alltransplants = incwalk(source, csets, match=matchfn)
678 678 else:
679 679 alltransplants = transplantwalk(source, p1, heads,
680 680 match=matchfn)
681 681 if opts.get('all'):
682 682 revs = alltransplants
683 683 else:
684 684 revs, newmerges = browserevs(ui, source, alltransplants, opts)
685 685 merges.extend(newmerges)
686 686 for r in revs:
687 687 revmap[source.changelog.rev(r)] = r
688 688 for r in merges:
689 689 revmap[source.changelog.rev(r)] = r
690 690
691 691 tp.apply(repo, source, revmap, merges, opts)
692 692 finally:
693 693 if cleanupfn:
694 694 cleanupfn()
695 695
696 revsetpredicate = revset.extpredicate()
697
698 @revsetpredicate('transplanted([set])')
696 699 def revsettransplanted(repo, subset, x):
697 """``transplanted([set])``
698 Transplanted changesets in set, or all transplanted changesets.
700 """Transplanted changesets in set, or all transplanted changesets.
699 701 """
700 702 if x:
701 703 s = revset.getset(repo, subset, x)
702 704 else:
703 705 s = subset
704 706 return revset.baseset([r for r in s if
705 707 repo[r].extra().get('transplant_source')])
706 708
707 709 def kwtransplanted(repo, ctx, **args):
708 710 """:transplanted: String. The node identifier of the transplanted
709 711 changeset if any."""
710 712 n = ctx.extra().get('transplant_source')
711 713 return n and revlog.hex(n) or ''
712 714
713 715 def extsetup(ui):
714 revset.symbols['transplanted'] = revsettransplanted
716 revsetpredicate.setup()
715 717 templatekw.keywords['transplanted'] = kwtransplanted
716 718 cmdutil.unfinishedstates.append(
717 719 ['series', True, False, _('transplant in progress'),
718 720 _("use 'hg transplant --continue' or 'hg update' to abort")])
719 721
720 722 # tell hggettext to extract docstrings from these functions:
721 723 i18nfunctions = [revsettransplanted, kwtransplanted]
@@ -1,3705 +1,3730 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 def parseerrordetail(inst):
304 304 """Compose error message from specified ParseError object
305 305 """
306 306 if len(inst.args) > 1:
307 307 return _('at %s: %s') % (inst.args[1], inst.args[0])
308 308 else:
309 309 return inst.args[0]
310 310
311 311 # helpers
312 312
313 313 def getstring(x, err):
314 314 if x and (x[0] == 'string' or x[0] == 'symbol'):
315 315 return x[1]
316 316 raise error.ParseError(err)
317 317
318 318 def getlist(x):
319 319 if not x:
320 320 return []
321 321 if x[0] == 'list':
322 322 return getlist(x[1]) + [x[2]]
323 323 return [x]
324 324
325 325 def getargs(x, min, max, err):
326 326 l = getlist(x)
327 327 if len(l) < min or (max >= 0 and len(l) > max):
328 328 raise error.ParseError(err)
329 329 return l
330 330
331 331 def getargsdict(x, funcname, keys):
332 332 return parser.buildargsdict(getlist(x), funcname, keys.split(),
333 333 keyvaluenode='keyvalue', keynode='symbol')
334 334
335 335 def isvalidsymbol(tree):
336 336 """Examine whether specified ``tree`` is valid ``symbol`` or not
337 337 """
338 338 return tree[0] == 'symbol' and len(tree) > 1
339 339
340 340 def getsymbol(tree):
341 341 """Get symbol name from valid ``symbol`` in ``tree``
342 342
343 343 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
344 344 """
345 345 return tree[1]
346 346
347 347 def isvalidfunc(tree):
348 348 """Examine whether specified ``tree`` is valid ``func`` or not
349 349 """
350 350 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
351 351
352 352 def getfuncname(tree):
353 353 """Get function name from valid ``func`` in ``tree``
354 354
355 355 This assumes that ``tree`` is already examined by ``isvalidfunc``.
356 356 """
357 357 return getsymbol(tree[1])
358 358
359 359 def getfuncargs(tree):
360 360 """Get list of function arguments from valid ``func`` in ``tree``
361 361
362 362 This assumes that ``tree`` is already examined by ``isvalidfunc``.
363 363 """
364 364 if len(tree) > 2:
365 365 return getlist(tree[2])
366 366 else:
367 367 return []
368 368
369 369 def getset(repo, subset, x):
370 370 if not x:
371 371 raise error.ParseError(_("missing argument"))
372 372 s = methods[x[0]](repo, subset, *x[1:])
373 373 if util.safehasattr(s, 'isascending'):
374 374 return s
375 375 if (repo.ui.configbool('devel', 'all-warnings')
376 376 or repo.ui.configbool('devel', 'old-revset')):
377 377 # else case should not happen, because all non-func are internal,
378 378 # ignoring for now.
379 379 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
380 380 repo.ui.develwarn('revset "%s" use list instead of smartset, '
381 381 '(upgrade your code)' % x[1][1])
382 382 return baseset(s)
383 383
384 384 def _getrevsource(repo, r):
385 385 extra = repo[r].extra()
386 386 for label in ('source', 'transplant_source', 'rebase_source'):
387 387 if label in extra:
388 388 try:
389 389 return repo[extra[label]].rev()
390 390 except error.RepoLookupError:
391 391 pass
392 392 return None
393 393
394 394 # operator methods
395 395
396 396 def stringset(repo, subset, x):
397 397 x = repo[x].rev()
398 398 if (x in subset
399 399 or x == node.nullrev and isinstance(subset, fullreposet)):
400 400 return baseset([x])
401 401 return baseset()
402 402
403 403 def rangeset(repo, subset, x, y):
404 404 m = getset(repo, fullreposet(repo), x)
405 405 n = getset(repo, fullreposet(repo), y)
406 406
407 407 if not m or not n:
408 408 return baseset()
409 409 m, n = m.first(), n.last()
410 410
411 411 if m == n:
412 412 r = baseset([m])
413 413 elif n == node.wdirrev:
414 414 r = spanset(repo, m, len(repo)) + baseset([n])
415 415 elif m == node.wdirrev:
416 416 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
417 417 elif m < n:
418 418 r = spanset(repo, m, n + 1)
419 419 else:
420 420 r = spanset(repo, m, n - 1)
421 421 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
422 422 # necessary to ensure we preserve the order in subset.
423 423 #
424 424 # This has performance implication, carrying the sorting over when possible
425 425 # would be more efficient.
426 426 return r & subset
427 427
428 428 def dagrange(repo, subset, x, y):
429 429 r = fullreposet(repo)
430 430 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
431 431 includepath=True)
432 432 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
433 433 # necessary to ensure we preserve the order in subset.
434 434 return xs & subset
435 435
436 436 def andset(repo, subset, x, y):
437 437 return getset(repo, getset(repo, subset, x), y)
438 438
439 439 def orset(repo, subset, *xs):
440 440 assert xs
441 441 if len(xs) == 1:
442 442 return getset(repo, subset, xs[0])
443 443 p = len(xs) // 2
444 444 a = orset(repo, subset, *xs[:p])
445 445 b = orset(repo, subset, *xs[p:])
446 446 return a + b
447 447
448 448 def notset(repo, subset, x):
449 449 return subset - getset(repo, subset, x)
450 450
451 451 def listset(repo, subset, a, b):
452 452 raise error.ParseError(_("can't use a list in this context"),
453 453 hint=_('see hg help "revsets.x or y"'))
454 454
455 455 def keyvaluepair(repo, subset, k, v):
456 456 raise error.ParseError(_("can't use a key-value pair in this context"))
457 457
458 458 def func(repo, subset, a, b):
459 459 if a[0] == 'symbol' and a[1] in symbols:
460 460 return symbols[a[1]](repo, subset, b)
461 461
462 462 keep = lambda fn: getattr(fn, '__doc__', None) is not None
463 463
464 464 syms = [s for (s, fn) in symbols.items() if keep(fn)]
465 465 raise error.UnknownIdentifier(a[1], syms)
466 466
467 467 # functions
468 468
469 469 # symbols are callables like:
470 470 # fn(repo, subset, x)
471 471 # with:
472 472 # repo - current repository instance
473 473 # subset - of revisions to be examined
474 474 # x - argument in tree form
475 475 symbols = {}
476 476
477 477 class predicate(registrar.funcregistrar):
478 478 """Decorator to register revset predicate
479 479
480 480 Usage::
481 481
482 482 @predicate('mypredicate(arg1, arg2[, arg3])')
483 483 def mypredicatefunc(repo, subset, x):
484 484 '''Explanation of this revset predicate ....
485 485 '''
486 486 pass
487 487
488 488 The first string argument of the constructor is used also in
489 489 online help.
490
491 Use 'extpredicate' instead of this to register revset predicate in
492 extensions.
490 493 """
491 494 table = symbols
492 495 formatdoc = "``%s``\n %s"
493 496 getname = registrar.funcregistrar.parsefuncdecl
494 497
498 class extpredicate(registrar.delayregistrar):
499 """Decorator to register revset predicate in extensions
500
501 Usage::
502
503 revsetpredicate = revset.extpredicate()
504
505 @revsetpredicate('mypredicate(arg1, arg2[, arg3])')
506 def mypredicatefunc(repo, subset, x):
507 '''Explanation of this revset predicate ....
508 '''
509 pass
510
511 def uisetup(ui):
512 revsetpredicate.setup()
513
514 'revsetpredicate' instance above can be used to decorate multiple
515 functions, and 'setup()' on it registers all such functions at
516 once.
517 """
518 registrar = predicate
519
495 520 @predicate('_destupdate')
496 521 def _destupdate(repo, subset, x):
497 522 # experimental revset for update destination
498 523 args = getargsdict(x, 'limit', 'clean check')
499 524 return subset & baseset([destutil.destupdate(repo, **args)[0]])
500 525
501 526 @predicate('_destmerge')
502 527 def _destmerge(repo, subset, x):
503 528 # experimental revset for merge destination
504 529 getargs(x, 0, 0, _("_mergedefaultdest takes no arguments"))
505 530 return subset & baseset([destutil.destmerge(repo)])
506 531
507 532 @predicate('adds(pattern)')
508 533 def adds(repo, subset, x):
509 534 """Changesets that add a file matching pattern.
510 535
511 536 The pattern without explicit kind like ``glob:`` is expected to be
512 537 relative to the current directory and match against a file or a
513 538 directory.
514 539 """
515 540 # i18n: "adds" is a keyword
516 541 pat = getstring(x, _("adds requires a pattern"))
517 542 return checkstatus(repo, subset, pat, 1)
518 543
519 544 @predicate('ancestor(*changeset)')
520 545 def ancestor(repo, subset, x):
521 546 """A greatest common ancestor of the changesets.
522 547
523 548 Accepts 0 or more changesets.
524 549 Will return empty list when passed no args.
525 550 Greatest common ancestor of a single changeset is that changeset.
526 551 """
527 552 # i18n: "ancestor" is a keyword
528 553 l = getlist(x)
529 554 rl = fullreposet(repo)
530 555 anc = None
531 556
532 557 # (getset(repo, rl, i) for i in l) generates a list of lists
533 558 for revs in (getset(repo, rl, i) for i in l):
534 559 for r in revs:
535 560 if anc is None:
536 561 anc = repo[r]
537 562 else:
538 563 anc = anc.ancestor(repo[r])
539 564
540 565 if anc is not None and anc.rev() in subset:
541 566 return baseset([anc.rev()])
542 567 return baseset()
543 568
544 569 def _ancestors(repo, subset, x, followfirst=False):
545 570 heads = getset(repo, fullreposet(repo), x)
546 571 if not heads:
547 572 return baseset()
548 573 s = _revancestors(repo, heads, followfirst)
549 574 return subset & s
550 575
551 576 @predicate('ancestors(set)')
552 577 def ancestors(repo, subset, x):
553 578 """Changesets that are ancestors of a changeset in set.
554 579 """
555 580 return _ancestors(repo, subset, x)
556 581
557 582 @predicate('_firstancestors')
558 583 def _firstancestors(repo, subset, x):
559 584 # ``_firstancestors(set)``
560 585 # Like ``ancestors(set)`` but follows only the first parents.
561 586 return _ancestors(repo, subset, x, followfirst=True)
562 587
563 588 def ancestorspec(repo, subset, x, n):
564 589 """``set~n``
565 590 Changesets that are the Nth ancestor (first parents only) of a changeset
566 591 in set.
567 592 """
568 593 try:
569 594 n = int(n[1])
570 595 except (TypeError, ValueError):
571 596 raise error.ParseError(_("~ expects a number"))
572 597 ps = set()
573 598 cl = repo.changelog
574 599 for r in getset(repo, fullreposet(repo), x):
575 600 for i in range(n):
576 601 r = cl.parentrevs(r)[0]
577 602 ps.add(r)
578 603 return subset & ps
579 604
580 605 @predicate('author(string)')
581 606 def author(repo, subset, x):
582 607 """Alias for ``user(string)``.
583 608 """
584 609 # i18n: "author" is a keyword
585 610 n = encoding.lower(getstring(x, _("author requires a string")))
586 611 kind, pattern, matcher = _substringmatcher(n)
587 612 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
588 613
589 614 @predicate('bisect(string)')
590 615 def bisect(repo, subset, x):
591 616 """Changesets marked in the specified bisect status:
592 617
593 618 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
594 619 - ``goods``, ``bads`` : csets topologically good/bad
595 620 - ``range`` : csets taking part in the bisection
596 621 - ``pruned`` : csets that are goods, bads or skipped
597 622 - ``untested`` : csets whose fate is yet unknown
598 623 - ``ignored`` : csets ignored due to DAG topology
599 624 - ``current`` : the cset currently being bisected
600 625 """
601 626 # i18n: "bisect" is a keyword
602 627 status = getstring(x, _("bisect requires a string")).lower()
603 628 state = set(hbisect.get(repo, status))
604 629 return subset & state
605 630
606 631 # Backward-compatibility
607 632 # - no help entry so that we do not advertise it any more
608 633 @predicate('bisected')
609 634 def bisected(repo, subset, x):
610 635 return bisect(repo, subset, x)
611 636
612 637 @predicate('bookmark([name])')
613 638 def bookmark(repo, subset, x):
614 639 """The named bookmark or all bookmarks.
615 640
616 641 If `name` starts with `re:`, the remainder of the name is treated as
617 642 a regular expression. To match a bookmark that actually starts with `re:`,
618 643 use the prefix `literal:`.
619 644 """
620 645 # i18n: "bookmark" is a keyword
621 646 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
622 647 if args:
623 648 bm = getstring(args[0],
624 649 # i18n: "bookmark" is a keyword
625 650 _('the argument to bookmark must be a string'))
626 651 kind, pattern, matcher = util.stringmatcher(bm)
627 652 bms = set()
628 653 if kind == 'literal':
629 654 bmrev = repo._bookmarks.get(pattern, None)
630 655 if not bmrev:
631 656 raise error.RepoLookupError(_("bookmark '%s' does not exist")
632 657 % pattern)
633 658 bms.add(repo[bmrev].rev())
634 659 else:
635 660 matchrevs = set()
636 661 for name, bmrev in repo._bookmarks.iteritems():
637 662 if matcher(name):
638 663 matchrevs.add(bmrev)
639 664 if not matchrevs:
640 665 raise error.RepoLookupError(_("no bookmarks exist"
641 666 " that match '%s'") % pattern)
642 667 for bmrev in matchrevs:
643 668 bms.add(repo[bmrev].rev())
644 669 else:
645 670 bms = set([repo[r].rev()
646 671 for r in repo._bookmarks.values()])
647 672 bms -= set([node.nullrev])
648 673 return subset & bms
649 674
650 675 @predicate('branch(string or set)')
651 676 def branch(repo, subset, x):
652 677 """
653 678 All changesets belonging to the given branch or the branches of the given
654 679 changesets.
655 680
656 681 If `string` starts with `re:`, the remainder of the name is treated as
657 682 a regular expression. To match a branch that actually starts with `re:`,
658 683 use the prefix `literal:`.
659 684 """
660 685 getbi = repo.revbranchcache().branchinfo
661 686
662 687 try:
663 688 b = getstring(x, '')
664 689 except error.ParseError:
665 690 # not a string, but another revspec, e.g. tip()
666 691 pass
667 692 else:
668 693 kind, pattern, matcher = util.stringmatcher(b)
669 694 if kind == 'literal':
670 695 # note: falls through to the revspec case if no branch with
671 696 # this name exists and pattern kind is not specified explicitly
672 697 if pattern in repo.branchmap():
673 698 return subset.filter(lambda r: matcher(getbi(r)[0]))
674 699 if b.startswith('literal:'):
675 700 raise error.RepoLookupError(_("branch '%s' does not exist")
676 701 % pattern)
677 702 else:
678 703 return subset.filter(lambda r: matcher(getbi(r)[0]))
679 704
680 705 s = getset(repo, fullreposet(repo), x)
681 706 b = set()
682 707 for r in s:
683 708 b.add(getbi(r)[0])
684 709 c = s.__contains__
685 710 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
686 711
687 712 @predicate('bumped()')
688 713 def bumped(repo, subset, x):
689 714 """Mutable changesets marked as successors of public changesets.
690 715
691 716 Only non-public and non-obsolete changesets can be `bumped`.
692 717 """
693 718 # i18n: "bumped" is a keyword
694 719 getargs(x, 0, 0, _("bumped takes no arguments"))
695 720 bumped = obsmod.getrevs(repo, 'bumped')
696 721 return subset & bumped
697 722
698 723 @predicate('bundle()')
699 724 def bundle(repo, subset, x):
700 725 """Changesets in the bundle.
701 726
702 727 Bundle must be specified by the -R option."""
703 728
704 729 try:
705 730 bundlerevs = repo.changelog.bundlerevs
706 731 except AttributeError:
707 732 raise error.Abort(_("no bundle provided - specify with -R"))
708 733 return subset & bundlerevs
709 734
710 735 def checkstatus(repo, subset, pat, field):
711 736 hasset = matchmod.patkind(pat) == 'set'
712 737
713 738 mcache = [None]
714 739 def matches(x):
715 740 c = repo[x]
716 741 if not mcache[0] or hasset:
717 742 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
718 743 m = mcache[0]
719 744 fname = None
720 745 if not m.anypats() and len(m.files()) == 1:
721 746 fname = m.files()[0]
722 747 if fname is not None:
723 748 if fname not in c.files():
724 749 return False
725 750 else:
726 751 for f in c.files():
727 752 if m(f):
728 753 break
729 754 else:
730 755 return False
731 756 files = repo.status(c.p1().node(), c.node())[field]
732 757 if fname is not None:
733 758 if fname in files:
734 759 return True
735 760 else:
736 761 for f in files:
737 762 if m(f):
738 763 return True
739 764
740 765 return subset.filter(matches)
741 766
742 767 def _children(repo, narrow, parentset):
743 768 if not parentset:
744 769 return baseset()
745 770 cs = set()
746 771 pr = repo.changelog.parentrevs
747 772 minrev = parentset.min()
748 773 for r in narrow:
749 774 if r <= minrev:
750 775 continue
751 776 for p in pr(r):
752 777 if p in parentset:
753 778 cs.add(r)
754 779 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
755 780 # This does not break because of other fullreposet misbehavior.
756 781 return baseset(cs)
757 782
758 783 @predicate('children(set)')
759 784 def children(repo, subset, x):
760 785 """Child changesets of changesets in set.
761 786 """
762 787 s = getset(repo, fullreposet(repo), x)
763 788 cs = _children(repo, subset, s)
764 789 return subset & cs
765 790
766 791 @predicate('closed()')
767 792 def closed(repo, subset, x):
768 793 """Changeset is closed.
769 794 """
770 795 # i18n: "closed" is a keyword
771 796 getargs(x, 0, 0, _("closed takes no arguments"))
772 797 return subset.filter(lambda r: repo[r].closesbranch())
773 798
774 799 @predicate('contains(pattern)')
775 800 def contains(repo, subset, x):
776 801 """The revision's manifest contains a file matching pattern (but might not
777 802 modify it). See :hg:`help patterns` for information about file patterns.
778 803
779 804 The pattern without explicit kind like ``glob:`` is expected to be
780 805 relative to the current directory and match against a file exactly
781 806 for efficiency.
782 807 """
783 808 # i18n: "contains" is a keyword
784 809 pat = getstring(x, _("contains requires a pattern"))
785 810
786 811 def matches(x):
787 812 if not matchmod.patkind(pat):
788 813 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
789 814 if pats in repo[x]:
790 815 return True
791 816 else:
792 817 c = repo[x]
793 818 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
794 819 for f in c.manifest():
795 820 if m(f):
796 821 return True
797 822 return False
798 823
799 824 return subset.filter(matches)
800 825
801 826 @predicate('converted([id])')
802 827 def converted(repo, subset, x):
803 828 """Changesets converted from the given identifier in the old repository if
804 829 present, or all converted changesets if no identifier is specified.
805 830 """
806 831
807 832 # There is exactly no chance of resolving the revision, so do a simple
808 833 # string compare and hope for the best
809 834
810 835 rev = None
811 836 # i18n: "converted" is a keyword
812 837 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
813 838 if l:
814 839 # i18n: "converted" is a keyword
815 840 rev = getstring(l[0], _('converted requires a revision'))
816 841
817 842 def _matchvalue(r):
818 843 source = repo[r].extra().get('convert_revision', None)
819 844 return source is not None and (rev is None or source.startswith(rev))
820 845
821 846 return subset.filter(lambda r: _matchvalue(r))
822 847
823 848 @predicate('date(interval)')
824 849 def date(repo, subset, x):
825 850 """Changesets within the interval, see :hg:`help dates`.
826 851 """
827 852 # i18n: "date" is a keyword
828 853 ds = getstring(x, _("date requires a string"))
829 854 dm = util.matchdate(ds)
830 855 return subset.filter(lambda x: dm(repo[x].date()[0]))
831 856
832 857 @predicate('desc(string)')
833 858 def desc(repo, subset, x):
834 859 """Search commit message for string. The match is case-insensitive.
835 860 """
836 861 # i18n: "desc" is a keyword
837 862 ds = encoding.lower(getstring(x, _("desc requires a string")))
838 863
839 864 def matches(x):
840 865 c = repo[x]
841 866 return ds in encoding.lower(c.description())
842 867
843 868 return subset.filter(matches)
844 869
845 870 def _descendants(repo, subset, x, followfirst=False):
846 871 roots = getset(repo, fullreposet(repo), x)
847 872 if not roots:
848 873 return baseset()
849 874 s = _revdescendants(repo, roots, followfirst)
850 875
851 876 # Both sets need to be ascending in order to lazily return the union
852 877 # in the correct order.
853 878 base = subset & roots
854 879 desc = subset & s
855 880 result = base + desc
856 881 if subset.isascending():
857 882 result.sort()
858 883 elif subset.isdescending():
859 884 result.sort(reverse=True)
860 885 else:
861 886 result = subset & result
862 887 return result
863 888
864 889 @predicate('descendants(set)')
865 890 def descendants(repo, subset, x):
866 891 """Changesets which are descendants of changesets in set.
867 892 """
868 893 return _descendants(repo, subset, x)
869 894
870 895 @predicate('_firstdescendants')
871 896 def _firstdescendants(repo, subset, x):
872 897 # ``_firstdescendants(set)``
873 898 # Like ``descendants(set)`` but follows only the first parents.
874 899 return _descendants(repo, subset, x, followfirst=True)
875 900
876 901 @predicate('destination([set])')
877 902 def destination(repo, subset, x):
878 903 """Changesets that were created by a graft, transplant or rebase operation,
879 904 with the given revisions specified as the source. Omitting the optional set
880 905 is the same as passing all().
881 906 """
882 907 if x is not None:
883 908 sources = getset(repo, fullreposet(repo), x)
884 909 else:
885 910 sources = fullreposet(repo)
886 911
887 912 dests = set()
888 913
889 914 # subset contains all of the possible destinations that can be returned, so
890 915 # iterate over them and see if their source(s) were provided in the arg set.
891 916 # Even if the immediate src of r is not in the arg set, src's source (or
892 917 # further back) may be. Scanning back further than the immediate src allows
893 918 # transitive transplants and rebases to yield the same results as transitive
894 919 # grafts.
895 920 for r in subset:
896 921 src = _getrevsource(repo, r)
897 922 lineage = None
898 923
899 924 while src is not None:
900 925 if lineage is None:
901 926 lineage = list()
902 927
903 928 lineage.append(r)
904 929
905 930 # The visited lineage is a match if the current source is in the arg
906 931 # set. Since every candidate dest is visited by way of iterating
907 932 # subset, any dests further back in the lineage will be tested by a
908 933 # different iteration over subset. Likewise, if the src was already
909 934 # selected, the current lineage can be selected without going back
910 935 # further.
911 936 if src in sources or src in dests:
912 937 dests.update(lineage)
913 938 break
914 939
915 940 r = src
916 941 src = _getrevsource(repo, r)
917 942
918 943 return subset.filter(dests.__contains__)
919 944
920 945 @predicate('divergent()')
921 946 def divergent(repo, subset, x):
922 947 """
923 948 Final successors of changesets with an alternative set of final successors.
924 949 """
925 950 # i18n: "divergent" is a keyword
926 951 getargs(x, 0, 0, _("divergent takes no arguments"))
927 952 divergent = obsmod.getrevs(repo, 'divergent')
928 953 return subset & divergent
929 954
930 955 @predicate('extinct()')
931 956 def extinct(repo, subset, x):
932 957 """Obsolete changesets with obsolete descendants only.
933 958 """
934 959 # i18n: "extinct" is a keyword
935 960 getargs(x, 0, 0, _("extinct takes no arguments"))
936 961 extincts = obsmod.getrevs(repo, 'extinct')
937 962 return subset & extincts
938 963
939 964 @predicate('extra(label, [value])')
940 965 def extra(repo, subset, x):
941 966 """Changesets with the given label in the extra metadata, with the given
942 967 optional value.
943 968
944 969 If `value` starts with `re:`, the remainder of the value is treated as
945 970 a regular expression. To match a value that actually starts with `re:`,
946 971 use the prefix `literal:`.
947 972 """
948 973 args = getargsdict(x, 'extra', 'label value')
949 974 if 'label' not in args:
950 975 # i18n: "extra" is a keyword
951 976 raise error.ParseError(_('extra takes at least 1 argument'))
952 977 # i18n: "extra" is a keyword
953 978 label = getstring(args['label'], _('first argument to extra must be '
954 979 'a string'))
955 980 value = None
956 981
957 982 if 'value' in args:
958 983 # i18n: "extra" is a keyword
959 984 value = getstring(args['value'], _('second argument to extra must be '
960 985 'a string'))
961 986 kind, value, matcher = util.stringmatcher(value)
962 987
963 988 def _matchvalue(r):
964 989 extra = repo[r].extra()
965 990 return label in extra and (value is None or matcher(extra[label]))
966 991
967 992 return subset.filter(lambda r: _matchvalue(r))
968 993
969 994 @predicate('filelog(pattern)')
970 995 def filelog(repo, subset, x):
971 996 """Changesets connected to the specified filelog.
972 997
973 998 For performance reasons, visits only revisions mentioned in the file-level
974 999 filelog, rather than filtering through all changesets (much faster, but
975 1000 doesn't include deletes or duplicate changes). For a slower, more accurate
976 1001 result, use ``file()``.
977 1002
978 1003 The pattern without explicit kind like ``glob:`` is expected to be
979 1004 relative to the current directory and match against a file exactly
980 1005 for efficiency.
981 1006
982 1007 If some linkrev points to revisions filtered by the current repoview, we'll
983 1008 work around it to return a non-filtered value.
984 1009 """
985 1010
986 1011 # i18n: "filelog" is a keyword
987 1012 pat = getstring(x, _("filelog requires a pattern"))
988 1013 s = set()
989 1014 cl = repo.changelog
990 1015
991 1016 if not matchmod.patkind(pat):
992 1017 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
993 1018 files = [f]
994 1019 else:
995 1020 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
996 1021 files = (f for f in repo[None] if m(f))
997 1022
998 1023 for f in files:
999 1024 backrevref = {} # final value for: filerev -> changerev
1000 1025 lowestchild = {} # lowest known filerev child of a filerev
1001 1026 delayed = [] # filerev with filtered linkrev, for post-processing
1002 1027 lowesthead = None # cache for manifest content of all head revisions
1003 1028 fl = repo.file(f)
1004 1029 for fr in list(fl):
1005 1030 rev = fl.linkrev(fr)
1006 1031 if rev not in cl:
1007 1032 # changerev pointed in linkrev is filtered
1008 1033 # record it for post processing.
1009 1034 delayed.append((fr, rev))
1010 1035 continue
1011 1036 for p in fl.parentrevs(fr):
1012 1037 if 0 <= p and p not in lowestchild:
1013 1038 lowestchild[p] = fr
1014 1039 backrevref[fr] = rev
1015 1040 s.add(rev)
1016 1041
1017 1042 # Post-processing of all filerevs we skipped because they were
1018 1043 # filtered. If such filerevs have known and unfiltered children, this
1019 1044 # means they have an unfiltered appearance out there. We'll use linkrev
1020 1045 # adjustment to find one of these appearances. The lowest known child
1021 1046 # will be used as a starting point because it is the best upper-bound we
1022 1047 # have.
1023 1048 #
1024 1049 # This approach will fail when an unfiltered but linkrev-shadowed
1025 1050 # appearance exists in a head changeset without unfiltered filerev
1026 1051 # children anywhere.
1027 1052 while delayed:
1028 1053 # must be a descending iteration. To slowly fill lowest child
1029 1054 # information that is of potential use by the next item.
1030 1055 fr, rev = delayed.pop()
1031 1056 lkr = rev
1032 1057
1033 1058 child = lowestchild.get(fr)
1034 1059
1035 1060 if child is None:
1036 1061 # search for existence of this file revision in a head revision.
1037 1062 # There are three possibilities:
1038 1063 # - the revision exists in a head and we can find an
1039 1064 # introduction from there,
1040 1065 # - the revision does not exist in a head because it has been
1041 1066 # changed since its introduction: we would have found a child
1042 1067 # and be in the other 'else' clause,
1043 1068 # - all versions of the revision are hidden.
1044 1069 if lowesthead is None:
1045 1070 lowesthead = {}
1046 1071 for h in repo.heads():
1047 1072 fnode = repo[h].manifest().get(f)
1048 1073 if fnode is not None:
1049 1074 lowesthead[fl.rev(fnode)] = h
1050 1075 headrev = lowesthead.get(fr)
1051 1076 if headrev is None:
1052 1077 # content is nowhere unfiltered
1053 1078 continue
1054 1079 rev = repo[headrev][f].introrev()
1055 1080 else:
1056 1081 # the lowest known child is a good upper bound
1057 1082 childcrev = backrevref[child]
1058 1083 # XXX this does not guarantee returning the lowest
1059 1084 # introduction of this revision, but this gives a
1060 1085 # result which is a good start and will fit in most
1061 1086 # cases. We probably need to fix the multiple
1062 1087 # introductions case properly (report each
1063 1088 # introduction, even for identical file revisions)
1064 1089 # once and for all at some point anyway.
1065 1090 for p in repo[childcrev][f].parents():
1066 1091 if p.filerev() == fr:
1067 1092 rev = p.rev()
1068 1093 break
1069 1094 if rev == lkr: # no shadowed entry found
1070 1095 # XXX This should never happen unless some manifest points
1071 1096 # to biggish file revisions (like a revision that uses a
1072 1097 # parent that never appears in the manifest ancestors)
1073 1098 continue
1074 1099
1075 1100 # Fill the data for the next iteration.
1076 1101 for p in fl.parentrevs(fr):
1077 1102 if 0 <= p and p not in lowestchild:
1078 1103 lowestchild[p] = fr
1079 1104 backrevref[fr] = rev
1080 1105 s.add(rev)
1081 1106
1082 1107 return subset & s
1083 1108
1084 1109 @predicate('first(set, [n])')
1085 1110 def first(repo, subset, x):
1086 1111 """An alias for limit().
1087 1112 """
1088 1113 return limit(repo, subset, x)
1089 1114
1090 1115 def _follow(repo, subset, x, name, followfirst=False):
1091 1116 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1092 1117 c = repo['.']
1093 1118 if l:
1094 1119 x = getstring(l[0], _("%s expected a pattern") % name)
1095 1120 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1096 1121 ctx=repo[None], default='path')
1097 1122
1098 1123 s = set()
1099 1124 for fname in c:
1100 1125 if matcher(fname):
1101 1126 fctx = c[fname]
1102 1127 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1103 1128 # include the revision responsible for the most recent version
1104 1129 s.add(fctx.introrev())
1105 1130 else:
1106 1131 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1107 1132
1108 1133 return subset & s
1109 1134
1110 1135 @predicate('follow([pattern])')
1111 1136 def follow(repo, subset, x):
1112 1137 """
1113 1138 An alias for ``::.`` (ancestors of the working directory's first parent).
1114 1139 If pattern is specified, the histories of files matching given
1115 1140 pattern is followed, including copies.
1116 1141 """
1117 1142 return _follow(repo, subset, x, 'follow')
1118 1143
1119 1144 @predicate('_followfirst')
1120 1145 def _followfirst(repo, subset, x):
1121 1146 # ``followfirst([pattern])``
1122 1147 # Like ``follow([pattern])`` but follows only the first parent of
1123 1148 # every revisions or files revisions.
1124 1149 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1125 1150
1126 1151 @predicate('all()')
1127 1152 def getall(repo, subset, x):
1128 1153 """All changesets, the same as ``0:tip``.
1129 1154 """
1130 1155 # i18n: "all" is a keyword
1131 1156 getargs(x, 0, 0, _("all takes no arguments"))
1132 1157 return subset & spanset(repo) # drop "null" if any
1133 1158
1134 1159 @predicate('grep(regex)')
1135 1160 def grep(repo, subset, x):
1136 1161 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1137 1162 to ensure special escape characters are handled correctly. Unlike
1138 1163 ``keyword(string)``, the match is case-sensitive.
1139 1164 """
1140 1165 try:
1141 1166 # i18n: "grep" is a keyword
1142 1167 gr = re.compile(getstring(x, _("grep requires a string")))
1143 1168 except re.error as e:
1144 1169 raise error.ParseError(_('invalid match pattern: %s') % e)
1145 1170
1146 1171 def matches(x):
1147 1172 c = repo[x]
1148 1173 for e in c.files() + [c.user(), c.description()]:
1149 1174 if gr.search(e):
1150 1175 return True
1151 1176 return False
1152 1177
1153 1178 return subset.filter(matches)
1154 1179
1155 1180 @predicate('_matchfiles')
1156 1181 def _matchfiles(repo, subset, x):
1157 1182 # _matchfiles takes a revset list of prefixed arguments:
1158 1183 #
1159 1184 # [p:foo, i:bar, x:baz]
1160 1185 #
1161 1186 # builds a match object from them and filters subset. Allowed
1162 1187 # prefixes are 'p:' for regular patterns, 'i:' for include
1163 1188 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1164 1189 # a revision identifier, or the empty string to reference the
1165 1190 # working directory, from which the match object is
1166 1191 # initialized. Use 'd:' to set the default matching mode, default
1167 1192 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1168 1193
1169 1194 # i18n: "_matchfiles" is a keyword
1170 1195 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1171 1196 pats, inc, exc = [], [], []
1172 1197 rev, default = None, None
1173 1198 for arg in l:
1174 1199 # i18n: "_matchfiles" is a keyword
1175 1200 s = getstring(arg, _("_matchfiles requires string arguments"))
1176 1201 prefix, value = s[:2], s[2:]
1177 1202 if prefix == 'p:':
1178 1203 pats.append(value)
1179 1204 elif prefix == 'i:':
1180 1205 inc.append(value)
1181 1206 elif prefix == 'x:':
1182 1207 exc.append(value)
1183 1208 elif prefix == 'r:':
1184 1209 if rev is not None:
1185 1210 # i18n: "_matchfiles" is a keyword
1186 1211 raise error.ParseError(_('_matchfiles expected at most one '
1187 1212 'revision'))
1188 1213 if value != '': # empty means working directory; leave rev as None
1189 1214 rev = value
1190 1215 elif prefix == 'd:':
1191 1216 if default is not None:
1192 1217 # i18n: "_matchfiles" is a keyword
1193 1218 raise error.ParseError(_('_matchfiles expected at most one '
1194 1219 'default mode'))
1195 1220 default = value
1196 1221 else:
1197 1222 # i18n: "_matchfiles" is a keyword
1198 1223 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1199 1224 if not default:
1200 1225 default = 'glob'
1201 1226
1202 1227 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1203 1228 exclude=exc, ctx=repo[rev], default=default)
1204 1229
1205 1230 # This directly read the changelog data as creating changectx for all
1206 1231 # revisions is quite expensive.
1207 1232 getfiles = repo.changelog.readfiles
1208 1233 wdirrev = node.wdirrev
1209 1234 def matches(x):
1210 1235 if x == wdirrev:
1211 1236 files = repo[x].files()
1212 1237 else:
1213 1238 files = getfiles(x)
1214 1239 for f in files:
1215 1240 if m(f):
1216 1241 return True
1217 1242 return False
1218 1243
1219 1244 return subset.filter(matches)
1220 1245
1221 1246 @predicate('file(pattern)')
1222 1247 def hasfile(repo, subset, x):
1223 1248 """Changesets affecting files matched by pattern.
1224 1249
1225 1250 For a faster but less accurate result, consider using ``filelog()``
1226 1251 instead.
1227 1252
1228 1253 This predicate uses ``glob:`` as the default kind of pattern.
1229 1254 """
1230 1255 # i18n: "file" is a keyword
1231 1256 pat = getstring(x, _("file requires a pattern"))
1232 1257 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1233 1258
1234 1259 @predicate('head()')
1235 1260 def head(repo, subset, x):
1236 1261 """Changeset is a named branch head.
1237 1262 """
1238 1263 # i18n: "head" is a keyword
1239 1264 getargs(x, 0, 0, _("head takes no arguments"))
1240 1265 hs = set()
1241 1266 cl = repo.changelog
1242 1267 for b, ls in repo.branchmap().iteritems():
1243 1268 hs.update(cl.rev(h) for h in ls)
1244 1269 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1245 1270 # This does not break because of other fullreposet misbehavior.
1246 1271 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1247 1272 # necessary to ensure we preserve the order in subset.
1248 1273 return baseset(hs) & subset
1249 1274
1250 1275 @predicate('heads(set)')
1251 1276 def heads(repo, subset, x):
1252 1277 """Members of set with no children in set.
1253 1278 """
1254 1279 s = getset(repo, subset, x)
1255 1280 ps = parents(repo, subset, x)
1256 1281 return s - ps
1257 1282
1258 1283 @predicate('hidden()')
1259 1284 def hidden(repo, subset, x):
1260 1285 """Hidden changesets.
1261 1286 """
1262 1287 # i18n: "hidden" is a keyword
1263 1288 getargs(x, 0, 0, _("hidden takes no arguments"))
1264 1289 hiddenrevs = repoview.filterrevs(repo, 'visible')
1265 1290 return subset & hiddenrevs
1266 1291
1267 1292 @predicate('keyword(string)')
1268 1293 def keyword(repo, subset, x):
1269 1294 """Search commit message, user name, and names of changed files for
1270 1295 string. The match is case-insensitive.
1271 1296 """
1272 1297 # i18n: "keyword" is a keyword
1273 1298 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1274 1299
1275 1300 def matches(r):
1276 1301 c = repo[r]
1277 1302 return any(kw in encoding.lower(t)
1278 1303 for t in c.files() + [c.user(), c.description()])
1279 1304
1280 1305 return subset.filter(matches)
1281 1306
1282 1307 @predicate('limit(set[, n[, offset]])')
1283 1308 def limit(repo, subset, x):
1284 1309 """First n members of set, defaulting to 1, starting from offset.
1285 1310 """
1286 1311 args = getargsdict(x, 'limit', 'set n offset')
1287 1312 if 'set' not in args:
1288 1313 # i18n: "limit" is a keyword
1289 1314 raise error.ParseError(_("limit requires one to three arguments"))
1290 1315 try:
1291 1316 lim, ofs = 1, 0
1292 1317 if 'n' in args:
1293 1318 # i18n: "limit" is a keyword
1294 1319 lim = int(getstring(args['n'], _("limit requires a number")))
1295 1320 if 'offset' in args:
1296 1321 # i18n: "limit" is a keyword
1297 1322 ofs = int(getstring(args['offset'], _("limit requires a number")))
1298 1323 if ofs < 0:
1299 1324 raise error.ParseError(_("negative offset"))
1300 1325 except (TypeError, ValueError):
1301 1326 # i18n: "limit" is a keyword
1302 1327 raise error.ParseError(_("limit expects a number"))
1303 1328 os = getset(repo, fullreposet(repo), args['set'])
1304 1329 result = []
1305 1330 it = iter(os)
1306 1331 for x in xrange(ofs):
1307 1332 y = next(it, None)
1308 1333 if y is None:
1309 1334 break
1310 1335 for x in xrange(lim):
1311 1336 y = next(it, None)
1312 1337 if y is None:
1313 1338 break
1314 1339 elif y in subset:
1315 1340 result.append(y)
1316 1341 return baseset(result)
1317 1342
1318 1343 @predicate('last(set, [n])')
1319 1344 def last(repo, subset, x):
1320 1345 """Last n members of set, defaulting to 1.
1321 1346 """
1322 1347 # i18n: "last" is a keyword
1323 1348 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1324 1349 try:
1325 1350 lim = 1
1326 1351 if len(l) == 2:
1327 1352 # i18n: "last" is a keyword
1328 1353 lim = int(getstring(l[1], _("last requires a number")))
1329 1354 except (TypeError, ValueError):
1330 1355 # i18n: "last" is a keyword
1331 1356 raise error.ParseError(_("last expects a number"))
1332 1357 os = getset(repo, fullreposet(repo), l[0])
1333 1358 os.reverse()
1334 1359 result = []
1335 1360 it = iter(os)
1336 1361 for x in xrange(lim):
1337 1362 y = next(it, None)
1338 1363 if y is None:
1339 1364 break
1340 1365 elif y in subset:
1341 1366 result.append(y)
1342 1367 return baseset(result)
1343 1368
1344 1369 @predicate('max(set)')
1345 1370 def maxrev(repo, subset, x):
1346 1371 """Changeset with highest revision number in set.
1347 1372 """
1348 1373 os = getset(repo, fullreposet(repo), x)
1349 1374 try:
1350 1375 m = os.max()
1351 1376 if m in subset:
1352 1377 return baseset([m])
1353 1378 except ValueError:
1354 1379 # os.max() throws a ValueError when the collection is empty.
1355 1380 # Same as python's max().
1356 1381 pass
1357 1382 return baseset()
1358 1383
1359 1384 @predicate('merge()')
1360 1385 def merge(repo, subset, x):
1361 1386 """Changeset is a merge changeset.
1362 1387 """
1363 1388 # i18n: "merge" is a keyword
1364 1389 getargs(x, 0, 0, _("merge takes no arguments"))
1365 1390 cl = repo.changelog
1366 1391 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1367 1392
1368 1393 @predicate('branchpoint()')
1369 1394 def branchpoint(repo, subset, x):
1370 1395 """Changesets with more than one child.
1371 1396 """
1372 1397 # i18n: "branchpoint" is a keyword
1373 1398 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1374 1399 cl = repo.changelog
1375 1400 if not subset:
1376 1401 return baseset()
1377 1402 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1378 1403 # (and if it is not, it should.)
1379 1404 baserev = min(subset)
1380 1405 parentscount = [0]*(len(repo) - baserev)
1381 1406 for r in cl.revs(start=baserev + 1):
1382 1407 for p in cl.parentrevs(r):
1383 1408 if p >= baserev:
1384 1409 parentscount[p - baserev] += 1
1385 1410 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1386 1411
1387 1412 @predicate('min(set)')
1388 1413 def minrev(repo, subset, x):
1389 1414 """Changeset with lowest revision number in set.
1390 1415 """
1391 1416 os = getset(repo, fullreposet(repo), x)
1392 1417 try:
1393 1418 m = os.min()
1394 1419 if m in subset:
1395 1420 return baseset([m])
1396 1421 except ValueError:
1397 1422 # os.min() throws a ValueError when the collection is empty.
1398 1423 # Same as python's min().
1399 1424 pass
1400 1425 return baseset()
1401 1426
1402 1427 @predicate('modifies(pattern)')
1403 1428 def modifies(repo, subset, x):
1404 1429 """Changesets modifying files matched by pattern.
1405 1430
1406 1431 The pattern without explicit kind like ``glob:`` is expected to be
1407 1432 relative to the current directory and match against a file or a
1408 1433 directory.
1409 1434 """
1410 1435 # i18n: "modifies" is a keyword
1411 1436 pat = getstring(x, _("modifies requires a pattern"))
1412 1437 return checkstatus(repo, subset, pat, 0)
1413 1438
1414 1439 @predicate('named(namespace)')
1415 1440 def named(repo, subset, x):
1416 1441 """The changesets in a given namespace.
1417 1442
1418 1443 If `namespace` starts with `re:`, the remainder of the string is treated as
1419 1444 a regular expression. To match a namespace that actually starts with `re:`,
1420 1445 use the prefix `literal:`.
1421 1446 """
1422 1447 # i18n: "named" is a keyword
1423 1448 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1424 1449
1425 1450 ns = getstring(args[0],
1426 1451 # i18n: "named" is a keyword
1427 1452 _('the argument to named must be a string'))
1428 1453 kind, pattern, matcher = util.stringmatcher(ns)
1429 1454 namespaces = set()
1430 1455 if kind == 'literal':
1431 1456 if pattern not in repo.names:
1432 1457 raise error.RepoLookupError(_("namespace '%s' does not exist")
1433 1458 % ns)
1434 1459 namespaces.add(repo.names[pattern])
1435 1460 else:
1436 1461 for name, ns in repo.names.iteritems():
1437 1462 if matcher(name):
1438 1463 namespaces.add(ns)
1439 1464 if not namespaces:
1440 1465 raise error.RepoLookupError(_("no namespace exists"
1441 1466 " that match '%s'") % pattern)
1442 1467
1443 1468 names = set()
1444 1469 for ns in namespaces:
1445 1470 for name in ns.listnames(repo):
1446 1471 if name not in ns.deprecated:
1447 1472 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1448 1473
1449 1474 names -= set([node.nullrev])
1450 1475 return subset & names
1451 1476
1452 1477 @predicate('id(string)')
1453 1478 def node_(repo, subset, x):
1454 1479 """Revision non-ambiguously specified by the given hex string prefix.
1455 1480 """
1456 1481 # i18n: "id" is a keyword
1457 1482 l = getargs(x, 1, 1, _("id requires one argument"))
1458 1483 # i18n: "id" is a keyword
1459 1484 n = getstring(l[0], _("id requires a string"))
1460 1485 if len(n) == 40:
1461 1486 try:
1462 1487 rn = repo.changelog.rev(node.bin(n))
1463 1488 except (LookupError, TypeError):
1464 1489 rn = None
1465 1490 else:
1466 1491 rn = None
1467 1492 pm = repo.changelog._partialmatch(n)
1468 1493 if pm is not None:
1469 1494 rn = repo.changelog.rev(pm)
1470 1495
1471 1496 if rn is None:
1472 1497 return baseset()
1473 1498 result = baseset([rn])
1474 1499 return result & subset
1475 1500
1476 1501 @predicate('obsolete()')
1477 1502 def obsolete(repo, subset, x):
1478 1503 """Mutable changeset with a newer version."""
1479 1504 # i18n: "obsolete" is a keyword
1480 1505 getargs(x, 0, 0, _("obsolete takes no arguments"))
1481 1506 obsoletes = obsmod.getrevs(repo, 'obsolete')
1482 1507 return subset & obsoletes
1483 1508
1484 1509 @predicate('only(set, [set])')
1485 1510 def only(repo, subset, x):
1486 1511 """Changesets that are ancestors of the first set that are not ancestors
1487 1512 of any other head in the repo. If a second set is specified, the result
1488 1513 is ancestors of the first set that are not ancestors of the second set
1489 1514 (i.e. ::<set1> - ::<set2>).
1490 1515 """
1491 1516 cl = repo.changelog
1492 1517 # i18n: "only" is a keyword
1493 1518 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1494 1519 include = getset(repo, fullreposet(repo), args[0])
1495 1520 if len(args) == 1:
1496 1521 if not include:
1497 1522 return baseset()
1498 1523
1499 1524 descendants = set(_revdescendants(repo, include, False))
1500 1525 exclude = [rev for rev in cl.headrevs()
1501 1526 if not rev in descendants and not rev in include]
1502 1527 else:
1503 1528 exclude = getset(repo, fullreposet(repo), args[1])
1504 1529
1505 1530 results = set(cl.findmissingrevs(common=exclude, heads=include))
1506 1531 # XXX we should turn this into a baseset instead of a set, smartset may do
1507 1532 # some optimisations from the fact this is a baseset.
1508 1533 return subset & results
1509 1534
1510 1535 @predicate('origin([set])')
1511 1536 def origin(repo, subset, x):
1512 1537 """
1513 1538 Changesets that were specified as a source for the grafts, transplants or
1514 1539 rebases that created the given revisions. Omitting the optional set is the
1515 1540 same as passing all(). If a changeset created by these operations is itself
1516 1541 specified as a source for one of these operations, only the source changeset
1517 1542 for the first operation is selected.
1518 1543 """
1519 1544 if x is not None:
1520 1545 dests = getset(repo, fullreposet(repo), x)
1521 1546 else:
1522 1547 dests = fullreposet(repo)
1523 1548
1524 1549 def _firstsrc(rev):
1525 1550 src = _getrevsource(repo, rev)
1526 1551 if src is None:
1527 1552 return None
1528 1553
1529 1554 while True:
1530 1555 prev = _getrevsource(repo, src)
1531 1556
1532 1557 if prev is None:
1533 1558 return src
1534 1559 src = prev
1535 1560
1536 1561 o = set([_firstsrc(r) for r in dests])
1537 1562 o -= set([None])
1538 1563 # XXX we should turn this into a baseset instead of a set, smartset may do
1539 1564 # some optimisations from the fact this is a baseset.
1540 1565 return subset & o
1541 1566
1542 1567 @predicate('outgoing([path])')
1543 1568 def outgoing(repo, subset, x):
1544 1569 """Changesets not found in the specified destination repository, or the
1545 1570 default push location.
1546 1571 """
1547 1572 # Avoid cycles.
1548 1573 from . import (
1549 1574 discovery,
1550 1575 hg,
1551 1576 )
1552 1577 # i18n: "outgoing" is a keyword
1553 1578 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1554 1579 # i18n: "outgoing" is a keyword
1555 1580 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1556 1581 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1557 1582 dest, branches = hg.parseurl(dest)
1558 1583 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1559 1584 if revs:
1560 1585 revs = [repo.lookup(rev) for rev in revs]
1561 1586 other = hg.peer(repo, {}, dest)
1562 1587 repo.ui.pushbuffer()
1563 1588 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1564 1589 repo.ui.popbuffer()
1565 1590 cl = repo.changelog
1566 1591 o = set([cl.rev(r) for r in outgoing.missing])
1567 1592 return subset & o
1568 1593
1569 1594 @predicate('p1([set])')
1570 1595 def p1(repo, subset, x):
1571 1596 """First parent of changesets in set, or the working directory.
1572 1597 """
1573 1598 if x is None:
1574 1599 p = repo[x].p1().rev()
1575 1600 if p >= 0:
1576 1601 return subset & baseset([p])
1577 1602 return baseset()
1578 1603
1579 1604 ps = set()
1580 1605 cl = repo.changelog
1581 1606 for r in getset(repo, fullreposet(repo), x):
1582 1607 ps.add(cl.parentrevs(r)[0])
1583 1608 ps -= set([node.nullrev])
1584 1609 # XXX we should turn this into a baseset instead of a set, smartset may do
1585 1610 # some optimisations from the fact this is a baseset.
1586 1611 return subset & ps
1587 1612
1588 1613 @predicate('p2([set])')
1589 1614 def p2(repo, subset, x):
1590 1615 """Second parent of changesets in set, or the working directory.
1591 1616 """
1592 1617 if x is None:
1593 1618 ps = repo[x].parents()
1594 1619 try:
1595 1620 p = ps[1].rev()
1596 1621 if p >= 0:
1597 1622 return subset & baseset([p])
1598 1623 return baseset()
1599 1624 except IndexError:
1600 1625 return baseset()
1601 1626
1602 1627 ps = set()
1603 1628 cl = repo.changelog
1604 1629 for r in getset(repo, fullreposet(repo), x):
1605 1630 ps.add(cl.parentrevs(r)[1])
1606 1631 ps -= set([node.nullrev])
1607 1632 # XXX we should turn this into a baseset instead of a set, smartset may do
1608 1633 # some optimisations from the fact this is a baseset.
1609 1634 return subset & ps
1610 1635
1611 1636 @predicate('parents([set])')
1612 1637 def parents(repo, subset, x):
1613 1638 """
1614 1639 The set of all parents for all changesets in set, or the working directory.
1615 1640 """
1616 1641 if x is None:
1617 1642 ps = set(p.rev() for p in repo[x].parents())
1618 1643 else:
1619 1644 ps = set()
1620 1645 cl = repo.changelog
1621 1646 up = ps.update
1622 1647 parentrevs = cl.parentrevs
1623 1648 for r in getset(repo, fullreposet(repo), x):
1624 1649 if r == node.wdirrev:
1625 1650 up(p.rev() for p in repo[r].parents())
1626 1651 else:
1627 1652 up(parentrevs(r))
1628 1653 ps -= set([node.nullrev])
1629 1654 return subset & ps
1630 1655
1631 1656 def _phase(repo, subset, target):
1632 1657 """helper to select all rev in phase <target>"""
1633 1658 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1634 1659 if repo._phasecache._phasesets:
1635 1660 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1636 1661 s = baseset(s)
1637 1662 s.sort() # set are non ordered, so we enforce ascending
1638 1663 return subset & s
1639 1664 else:
1640 1665 phase = repo._phasecache.phase
1641 1666 condition = lambda r: phase(repo, r) == target
1642 1667 return subset.filter(condition, cache=False)
1643 1668
1644 1669 @predicate('draft()')
1645 1670 def draft(repo, subset, x):
1646 1671 """Changeset in draft phase."""
1647 1672 # i18n: "draft" is a keyword
1648 1673 getargs(x, 0, 0, _("draft takes no arguments"))
1649 1674 target = phases.draft
1650 1675 return _phase(repo, subset, target)
1651 1676
1652 1677 @predicate('secret()')
1653 1678 def secret(repo, subset, x):
1654 1679 """Changeset in secret phase."""
1655 1680 # i18n: "secret" is a keyword
1656 1681 getargs(x, 0, 0, _("secret takes no arguments"))
1657 1682 target = phases.secret
1658 1683 return _phase(repo, subset, target)
1659 1684
1660 1685 def parentspec(repo, subset, x, n):
1661 1686 """``set^0``
1662 1687 The set.
1663 1688 ``set^1`` (or ``set^``), ``set^2``
1664 1689 First or second parent, respectively, of all changesets in set.
1665 1690 """
1666 1691 try:
1667 1692 n = int(n[1])
1668 1693 if n not in (0, 1, 2):
1669 1694 raise ValueError
1670 1695 except (TypeError, ValueError):
1671 1696 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1672 1697 ps = set()
1673 1698 cl = repo.changelog
1674 1699 for r in getset(repo, fullreposet(repo), x):
1675 1700 if n == 0:
1676 1701 ps.add(r)
1677 1702 elif n == 1:
1678 1703 ps.add(cl.parentrevs(r)[0])
1679 1704 elif n == 2:
1680 1705 parents = cl.parentrevs(r)
1681 1706 if len(parents) > 1:
1682 1707 ps.add(parents[1])
1683 1708 return subset & ps
1684 1709
1685 1710 @predicate('present(set)')
1686 1711 def present(repo, subset, x):
1687 1712 """An empty set, if any revision in set isn't found; otherwise,
1688 1713 all revisions in set.
1689 1714
1690 1715 If any of specified revisions is not present in the local repository,
1691 1716 the query is normally aborted. But this predicate allows the query
1692 1717 to continue even in such cases.
1693 1718 """
1694 1719 try:
1695 1720 return getset(repo, subset, x)
1696 1721 except error.RepoLookupError:
1697 1722 return baseset()
1698 1723
1699 1724 # for internal use
1700 1725 @predicate('_notpublic')
1701 1726 def _notpublic(repo, subset, x):
1702 1727 getargs(x, 0, 0, "_notpublic takes no arguments")
1703 1728 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1704 1729 if repo._phasecache._phasesets:
1705 1730 s = set()
1706 1731 for u in repo._phasecache._phasesets[1:]:
1707 1732 s.update(u)
1708 1733 s = baseset(s - repo.changelog.filteredrevs)
1709 1734 s.sort()
1710 1735 return subset & s
1711 1736 else:
1712 1737 phase = repo._phasecache.phase
1713 1738 target = phases.public
1714 1739 condition = lambda r: phase(repo, r) != target
1715 1740 return subset.filter(condition, cache=False)
1716 1741
1717 1742 @predicate('public()')
1718 1743 def public(repo, subset, x):
1719 1744 """Changeset in public phase."""
1720 1745 # i18n: "public" is a keyword
1721 1746 getargs(x, 0, 0, _("public takes no arguments"))
1722 1747 phase = repo._phasecache.phase
1723 1748 target = phases.public
1724 1749 condition = lambda r: phase(repo, r) == target
1725 1750 return subset.filter(condition, cache=False)
1726 1751
1727 1752 @predicate('remote([id [,path]])')
1728 1753 def remote(repo, subset, x):
1729 1754 """Local revision that corresponds to the given identifier in a
1730 1755 remote repository, if present. Here, the '.' identifier is a
1731 1756 synonym for the current local branch.
1732 1757 """
1733 1758
1734 1759 from . import hg # avoid start-up nasties
1735 1760 # i18n: "remote" is a keyword
1736 1761 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1737 1762
1738 1763 q = '.'
1739 1764 if len(l) > 0:
1740 1765 # i18n: "remote" is a keyword
1741 1766 q = getstring(l[0], _("remote requires a string id"))
1742 1767 if q == '.':
1743 1768 q = repo['.'].branch()
1744 1769
1745 1770 dest = ''
1746 1771 if len(l) > 1:
1747 1772 # i18n: "remote" is a keyword
1748 1773 dest = getstring(l[1], _("remote requires a repository path"))
1749 1774 dest = repo.ui.expandpath(dest or 'default')
1750 1775 dest, branches = hg.parseurl(dest)
1751 1776 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1752 1777 if revs:
1753 1778 revs = [repo.lookup(rev) for rev in revs]
1754 1779 other = hg.peer(repo, {}, dest)
1755 1780 n = other.lookup(q)
1756 1781 if n in repo:
1757 1782 r = repo[n].rev()
1758 1783 if r in subset:
1759 1784 return baseset([r])
1760 1785 return baseset()
1761 1786
1762 1787 @predicate('removes(pattern)')
1763 1788 def removes(repo, subset, x):
1764 1789 """Changesets which remove files matching pattern.
1765 1790
1766 1791 The pattern without explicit kind like ``glob:`` is expected to be
1767 1792 relative to the current directory and match against a file or a
1768 1793 directory.
1769 1794 """
1770 1795 # i18n: "removes" is a keyword
1771 1796 pat = getstring(x, _("removes requires a pattern"))
1772 1797 return checkstatus(repo, subset, pat, 2)
1773 1798
1774 1799 @predicate('rev(number)')
1775 1800 def rev(repo, subset, x):
1776 1801 """Revision with the given numeric identifier.
1777 1802 """
1778 1803 # i18n: "rev" is a keyword
1779 1804 l = getargs(x, 1, 1, _("rev requires one argument"))
1780 1805 try:
1781 1806 # i18n: "rev" is a keyword
1782 1807 l = int(getstring(l[0], _("rev requires a number")))
1783 1808 except (TypeError, ValueError):
1784 1809 # i18n: "rev" is a keyword
1785 1810 raise error.ParseError(_("rev expects a number"))
1786 1811 if l not in repo.changelog and l != node.nullrev:
1787 1812 return baseset()
1788 1813 return subset & baseset([l])
1789 1814
1790 1815 @predicate('matching(revision [, field])')
1791 1816 def matching(repo, subset, x):
1792 1817 """Changesets in which a given set of fields match the set of fields in the
1793 1818 selected revision or set.
1794 1819
1795 1820 To match more than one field pass the list of fields to match separated
1796 1821 by spaces (e.g. ``author description``).
1797 1822
1798 1823 Valid fields are most regular revision fields and some special fields.
1799 1824
1800 1825 Regular revision fields are ``description``, ``author``, ``branch``,
1801 1826 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1802 1827 and ``diff``.
1803 1828 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1804 1829 contents of the revision. Two revisions matching their ``diff`` will
1805 1830 also match their ``files``.
1806 1831
1807 1832 Special fields are ``summary`` and ``metadata``:
1808 1833 ``summary`` matches the first line of the description.
1809 1834 ``metadata`` is equivalent to matching ``description user date``
1810 1835 (i.e. it matches the main metadata fields).
1811 1836
1812 1837 ``metadata`` is the default field which is used when no fields are
1813 1838 specified. You can match more than one field at a time.
1814 1839 """
1815 1840 # i18n: "matching" is a keyword
1816 1841 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1817 1842
1818 1843 revs = getset(repo, fullreposet(repo), l[0])
1819 1844
1820 1845 fieldlist = ['metadata']
1821 1846 if len(l) > 1:
1822 1847 fieldlist = getstring(l[1],
1823 1848 # i18n: "matching" is a keyword
1824 1849 _("matching requires a string "
1825 1850 "as its second argument")).split()
1826 1851
1827 1852 # Make sure that there are no repeated fields,
1828 1853 # expand the 'special' 'metadata' field type
1829 1854 # and check the 'files' whenever we check the 'diff'
1830 1855 fields = []
1831 1856 for field in fieldlist:
1832 1857 if field == 'metadata':
1833 1858 fields += ['user', 'description', 'date']
1834 1859 elif field == 'diff':
1835 1860 # a revision matching the diff must also match the files
1836 1861 # since matching the diff is very costly, make sure to
1837 1862 # also match the files first
1838 1863 fields += ['files', 'diff']
1839 1864 else:
1840 1865 if field == 'author':
1841 1866 field = 'user'
1842 1867 fields.append(field)
1843 1868 fields = set(fields)
1844 1869 if 'summary' in fields and 'description' in fields:
1845 1870 # If a revision matches its description it also matches its summary
1846 1871 fields.discard('summary')
1847 1872
1848 1873 # We may want to match more than one field
1849 1874 # Not all fields take the same amount of time to be matched
1850 1875 # Sort the selected fields in order of increasing matching cost
1851 1876 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1852 1877 'files', 'description', 'substate', 'diff']
1853 1878 def fieldkeyfunc(f):
1854 1879 try:
1855 1880 return fieldorder.index(f)
1856 1881 except ValueError:
1857 1882 # assume an unknown field is very costly
1858 1883 return len(fieldorder)
1859 1884 fields = list(fields)
1860 1885 fields.sort(key=fieldkeyfunc)
1861 1886
1862 1887 # Each field will be matched with its own "getfield" function
1863 1888 # which will be added to the getfieldfuncs array of functions
1864 1889 getfieldfuncs = []
1865 1890 _funcs = {
1866 1891 'user': lambda r: repo[r].user(),
1867 1892 'branch': lambda r: repo[r].branch(),
1868 1893 'date': lambda r: repo[r].date(),
1869 1894 'description': lambda r: repo[r].description(),
1870 1895 'files': lambda r: repo[r].files(),
1871 1896 'parents': lambda r: repo[r].parents(),
1872 1897 'phase': lambda r: repo[r].phase(),
1873 1898 'substate': lambda r: repo[r].substate,
1874 1899 'summary': lambda r: repo[r].description().splitlines()[0],
1875 1900 'diff': lambda r: list(repo[r].diff(git=True),)
1876 1901 }
1877 1902 for info in fields:
1878 1903 getfield = _funcs.get(info, None)
1879 1904 if getfield is None:
1880 1905 raise error.ParseError(
1881 1906 # i18n: "matching" is a keyword
1882 1907 _("unexpected field name passed to matching: %s") % info)
1883 1908 getfieldfuncs.append(getfield)
1884 1909 # convert the getfield array of functions into a "getinfo" function
1885 1910 # which returns an array of field values (or a single value if there
1886 1911 # is only one field to match)
1887 1912 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1888 1913
1889 1914 def matches(x):
1890 1915 for rev in revs:
1891 1916 target = getinfo(rev)
1892 1917 match = True
1893 1918 for n, f in enumerate(getfieldfuncs):
1894 1919 if target[n] != f(x):
1895 1920 match = False
1896 1921 if match:
1897 1922 return True
1898 1923 return False
1899 1924
1900 1925 return subset.filter(matches)
1901 1926
1902 1927 @predicate('reverse(set)')
1903 1928 def reverse(repo, subset, x):
1904 1929 """Reverse order of set.
1905 1930 """
1906 1931 l = getset(repo, subset, x)
1907 1932 l.reverse()
1908 1933 return l
1909 1934
1910 1935 @predicate('roots(set)')
1911 1936 def roots(repo, subset, x):
1912 1937 """Changesets in set with no parent changeset in set.
1913 1938 """
1914 1939 s = getset(repo, fullreposet(repo), x)
1915 1940 parents = repo.changelog.parentrevs
1916 1941 def filter(r):
1917 1942 for p in parents(r):
1918 1943 if 0 <= p and p in s:
1919 1944 return False
1920 1945 return True
1921 1946 return subset & s.filter(filter)
1922 1947
1923 1948 @predicate('sort(set[, [-]key...])')
1924 1949 def sort(repo, subset, x):
1925 1950 """Sort set by keys. The default sort order is ascending, specify a key
1926 1951 as ``-key`` to sort in descending order.
1927 1952
1928 1953 The keys can be:
1929 1954
1930 1955 - ``rev`` for the revision number,
1931 1956 - ``branch`` for the branch name,
1932 1957 - ``desc`` for the commit message (description),
1933 1958 - ``user`` for user name (``author`` can be used as an alias),
1934 1959 - ``date`` for the commit date
1935 1960 """
1936 1961 # i18n: "sort" is a keyword
1937 1962 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1938 1963 keys = "rev"
1939 1964 if len(l) == 2:
1940 1965 # i18n: "sort" is a keyword
1941 1966 keys = getstring(l[1], _("sort spec must be a string"))
1942 1967
1943 1968 s = l[0]
1944 1969 keys = keys.split()
1945 1970 l = []
1946 1971 def invert(s):
1947 1972 return "".join(chr(255 - ord(c)) for c in s)
1948 1973 revs = getset(repo, subset, s)
1949 1974 if keys == ["rev"]:
1950 1975 revs.sort()
1951 1976 return revs
1952 1977 elif keys == ["-rev"]:
1953 1978 revs.sort(reverse=True)
1954 1979 return revs
1955 1980 for r in revs:
1956 1981 c = repo[r]
1957 1982 e = []
1958 1983 for k in keys:
1959 1984 if k == 'rev':
1960 1985 e.append(r)
1961 1986 elif k == '-rev':
1962 1987 e.append(-r)
1963 1988 elif k == 'branch':
1964 1989 e.append(c.branch())
1965 1990 elif k == '-branch':
1966 1991 e.append(invert(c.branch()))
1967 1992 elif k == 'desc':
1968 1993 e.append(c.description())
1969 1994 elif k == '-desc':
1970 1995 e.append(invert(c.description()))
1971 1996 elif k in 'user author':
1972 1997 e.append(c.user())
1973 1998 elif k in '-user -author':
1974 1999 e.append(invert(c.user()))
1975 2000 elif k == 'date':
1976 2001 e.append(c.date()[0])
1977 2002 elif k == '-date':
1978 2003 e.append(-c.date()[0])
1979 2004 else:
1980 2005 raise error.ParseError(_("unknown sort key %r") % k)
1981 2006 e.append(r)
1982 2007 l.append(e)
1983 2008 l.sort()
1984 2009 return baseset([e[-1] for e in l])
1985 2010
1986 2011 @predicate('subrepo([pattern])')
1987 2012 def subrepo(repo, subset, x):
1988 2013 """Changesets that add, modify or remove the given subrepo. If no subrepo
1989 2014 pattern is named, any subrepo changes are returned.
1990 2015 """
1991 2016 # i18n: "subrepo" is a keyword
1992 2017 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1993 2018 if len(args) != 0:
1994 2019 pat = getstring(args[0], _("subrepo requires a pattern"))
1995 2020
1996 2021 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1997 2022
1998 2023 def submatches(names):
1999 2024 k, p, m = util.stringmatcher(pat)
2000 2025 for name in names:
2001 2026 if m(name):
2002 2027 yield name
2003 2028
2004 2029 def matches(x):
2005 2030 c = repo[x]
2006 2031 s = repo.status(c.p1().node(), c.node(), match=m)
2007 2032
2008 2033 if len(args) == 0:
2009 2034 return s.added or s.modified or s.removed
2010 2035
2011 2036 if s.added:
2012 2037 return any(submatches(c.substate.keys()))
2013 2038
2014 2039 if s.modified:
2015 2040 subs = set(c.p1().substate.keys())
2016 2041 subs.update(c.substate.keys())
2017 2042
2018 2043 for path in submatches(subs):
2019 2044 if c.p1().substate.get(path) != c.substate.get(path):
2020 2045 return True
2021 2046
2022 2047 if s.removed:
2023 2048 return any(submatches(c.p1().substate.keys()))
2024 2049
2025 2050 return False
2026 2051
2027 2052 return subset.filter(matches)
2028 2053
2029 2054 def _substringmatcher(pattern):
2030 2055 kind, pattern, matcher = util.stringmatcher(pattern)
2031 2056 if kind == 'literal':
2032 2057 matcher = lambda s: pattern in s
2033 2058 return kind, pattern, matcher
2034 2059
2035 2060 @predicate('tag([name])')
2036 2061 def tag(repo, subset, x):
2037 2062 """The specified tag by name, or all tagged revisions if no name is given.
2038 2063
2039 2064 If `name` starts with `re:`, the remainder of the name is treated as
2040 2065 a regular expression. To match a tag that actually starts with `re:`,
2041 2066 use the prefix `literal:`.
2042 2067 """
2043 2068 # i18n: "tag" is a keyword
2044 2069 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2045 2070 cl = repo.changelog
2046 2071 if args:
2047 2072 pattern = getstring(args[0],
2048 2073 # i18n: "tag" is a keyword
2049 2074 _('the argument to tag must be a string'))
2050 2075 kind, pattern, matcher = util.stringmatcher(pattern)
2051 2076 if kind == 'literal':
2052 2077 # avoid resolving all tags
2053 2078 tn = repo._tagscache.tags.get(pattern, None)
2054 2079 if tn is None:
2055 2080 raise error.RepoLookupError(_("tag '%s' does not exist")
2056 2081 % pattern)
2057 2082 s = set([repo[tn].rev()])
2058 2083 else:
2059 2084 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2060 2085 else:
2061 2086 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2062 2087 return subset & s
2063 2088
2064 2089 @predicate('tagged')
2065 2090 def tagged(repo, subset, x):
2066 2091 return tag(repo, subset, x)
2067 2092
2068 2093 @predicate('unstable()')
2069 2094 def unstable(repo, subset, x):
2070 2095 """Non-obsolete changesets with obsolete ancestors.
2071 2096 """
2072 2097 # i18n: "unstable" is a keyword
2073 2098 getargs(x, 0, 0, _("unstable takes no arguments"))
2074 2099 unstables = obsmod.getrevs(repo, 'unstable')
2075 2100 return subset & unstables
2076 2101
2077 2102
2078 2103 @predicate('user(string)')
2079 2104 def user(repo, subset, x):
2080 2105 """User name contains string. The match is case-insensitive.
2081 2106
2082 2107 If `string` starts with `re:`, the remainder of the string is treated as
2083 2108 a regular expression. To match a user that actually contains `re:`, use
2084 2109 the prefix `literal:`.
2085 2110 """
2086 2111 return author(repo, subset, x)
2087 2112
2088 2113 # experimental
2089 2114 @predicate('wdir')
2090 2115 def wdir(repo, subset, x):
2091 2116 # i18n: "wdir" is a keyword
2092 2117 getargs(x, 0, 0, _("wdir takes no arguments"))
2093 2118 if node.wdirrev in subset or isinstance(subset, fullreposet):
2094 2119 return baseset([node.wdirrev])
2095 2120 return baseset()
2096 2121
2097 2122 # for internal use
2098 2123 @predicate('_list')
2099 2124 def _list(repo, subset, x):
2100 2125 s = getstring(x, "internal error")
2101 2126 if not s:
2102 2127 return baseset()
2103 2128 # remove duplicates here. it's difficult for caller to deduplicate sets
2104 2129 # because different symbols can point to the same rev.
2105 2130 cl = repo.changelog
2106 2131 ls = []
2107 2132 seen = set()
2108 2133 for t in s.split('\0'):
2109 2134 try:
2110 2135 # fast path for integer revision
2111 2136 r = int(t)
2112 2137 if str(r) != t or r not in cl:
2113 2138 raise ValueError
2114 2139 revs = [r]
2115 2140 except ValueError:
2116 2141 revs = stringset(repo, subset, t)
2117 2142
2118 2143 for r in revs:
2119 2144 if r in seen:
2120 2145 continue
2121 2146 if (r in subset
2122 2147 or r == node.nullrev and isinstance(subset, fullreposet)):
2123 2148 ls.append(r)
2124 2149 seen.add(r)
2125 2150 return baseset(ls)
2126 2151
2127 2152 # for internal use
2128 2153 @predicate('_intlist')
2129 2154 def _intlist(repo, subset, x):
2130 2155 s = getstring(x, "internal error")
2131 2156 if not s:
2132 2157 return baseset()
2133 2158 ls = [int(r) for r in s.split('\0')]
2134 2159 s = subset
2135 2160 return baseset([r for r in ls if r in s])
2136 2161
2137 2162 # for internal use
2138 2163 @predicate('_hexlist')
2139 2164 def _hexlist(repo, subset, x):
2140 2165 s = getstring(x, "internal error")
2141 2166 if not s:
2142 2167 return baseset()
2143 2168 cl = repo.changelog
2144 2169 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2145 2170 s = subset
2146 2171 return baseset([r for r in ls if r in s])
2147 2172
2148 2173 # symbols which can't be used for a DoS attack for any given input
2149 2174 # (e.g. those which accept regexes as plain strings shouldn't be included)
2150 2175 # functions that just return a lot of changesets (like all) don't count here
2151 2176 safesymbols = set([
2152 2177 "adds",
2153 2178 "all",
2154 2179 "ancestor",
2155 2180 "ancestors",
2156 2181 "_firstancestors",
2157 2182 "author",
2158 2183 "bisect",
2159 2184 "bisected",
2160 2185 "bookmark",
2161 2186 "branch",
2162 2187 "branchpoint",
2163 2188 "bumped",
2164 2189 "bundle",
2165 2190 "children",
2166 2191 "closed",
2167 2192 "converted",
2168 2193 "date",
2169 2194 "desc",
2170 2195 "descendants",
2171 2196 "_firstdescendants",
2172 2197 "destination",
2173 2198 "divergent",
2174 2199 "draft",
2175 2200 "extinct",
2176 2201 "extra",
2177 2202 "file",
2178 2203 "filelog",
2179 2204 "first",
2180 2205 "follow",
2181 2206 "_followfirst",
2182 2207 "head",
2183 2208 "heads",
2184 2209 "hidden",
2185 2210 "id",
2186 2211 "keyword",
2187 2212 "last",
2188 2213 "limit",
2189 2214 "_matchfiles",
2190 2215 "max",
2191 2216 "merge",
2192 2217 "min",
2193 2218 "modifies",
2194 2219 "obsolete",
2195 2220 "only",
2196 2221 "origin",
2197 2222 "outgoing",
2198 2223 "p1",
2199 2224 "p2",
2200 2225 "parents",
2201 2226 "present",
2202 2227 "public",
2203 2228 "_notpublic",
2204 2229 "remote",
2205 2230 "removes",
2206 2231 "rev",
2207 2232 "reverse",
2208 2233 "roots",
2209 2234 "sort",
2210 2235 "secret",
2211 2236 "matching",
2212 2237 "tag",
2213 2238 "tagged",
2214 2239 "user",
2215 2240 "unstable",
2216 2241 "wdir",
2217 2242 "_list",
2218 2243 "_intlist",
2219 2244 "_hexlist",
2220 2245 ])
2221 2246
2222 2247 methods = {
2223 2248 "range": rangeset,
2224 2249 "dagrange": dagrange,
2225 2250 "string": stringset,
2226 2251 "symbol": stringset,
2227 2252 "and": andset,
2228 2253 "or": orset,
2229 2254 "not": notset,
2230 2255 "list": listset,
2231 2256 "keyvalue": keyvaluepair,
2232 2257 "func": func,
2233 2258 "ancestor": ancestorspec,
2234 2259 "parent": parentspec,
2235 2260 "parentpost": p1,
2236 2261 }
2237 2262
2238 2263 def optimize(x, small):
2239 2264 if x is None:
2240 2265 return 0, x
2241 2266
2242 2267 smallbonus = 1
2243 2268 if small:
2244 2269 smallbonus = .5
2245 2270
2246 2271 op = x[0]
2247 2272 if op == 'minus':
2248 2273 return optimize(('and', x[1], ('not', x[2])), small)
2249 2274 elif op == 'only':
2250 2275 return optimize(('func', ('symbol', 'only'),
2251 2276 ('list', x[1], x[2])), small)
2252 2277 elif op == 'onlypost':
2253 2278 return optimize(('func', ('symbol', 'only'), x[1]), small)
2254 2279 elif op == 'dagrangepre':
2255 2280 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2256 2281 elif op == 'dagrangepost':
2257 2282 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2258 2283 elif op == 'rangeall':
2259 2284 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2260 2285 elif op == 'rangepre':
2261 2286 return optimize(('range', ('string', '0'), x[1]), small)
2262 2287 elif op == 'rangepost':
2263 2288 return optimize(('range', x[1], ('string', 'tip')), small)
2264 2289 elif op == 'negate':
2265 2290 return optimize(('string',
2266 2291 '-' + getstring(x[1], _("can't negate that"))), small)
2267 2292 elif op in 'string symbol negate':
2268 2293 return smallbonus, x # single revisions are small
2269 2294 elif op == 'and':
2270 2295 wa, ta = optimize(x[1], True)
2271 2296 wb, tb = optimize(x[2], True)
2272 2297
2273 2298 # (::x and not ::y)/(not ::y and ::x) have a fast path
2274 2299 def isonly(revs, bases):
2275 2300 return (
2276 2301 revs is not None
2277 2302 and revs[0] == 'func'
2278 2303 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2279 2304 and bases is not None
2280 2305 and bases[0] == 'not'
2281 2306 and bases[1][0] == 'func'
2282 2307 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2283 2308
2284 2309 w = min(wa, wb)
2285 2310 if isonly(ta, tb):
2286 2311 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2287 2312 if isonly(tb, ta):
2288 2313 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2289 2314
2290 2315 if wa > wb:
2291 2316 return w, (op, tb, ta)
2292 2317 return w, (op, ta, tb)
2293 2318 elif op == 'or':
2294 2319 # fast path for machine-generated expression, that is likely to have
2295 2320 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2296 2321 ws, ts, ss = [], [], []
2297 2322 def flushss():
2298 2323 if not ss:
2299 2324 return
2300 2325 if len(ss) == 1:
2301 2326 w, t = ss[0]
2302 2327 else:
2303 2328 s = '\0'.join(t[1] for w, t in ss)
2304 2329 y = ('func', ('symbol', '_list'), ('string', s))
2305 2330 w, t = optimize(y, False)
2306 2331 ws.append(w)
2307 2332 ts.append(t)
2308 2333 del ss[:]
2309 2334 for y in x[1:]:
2310 2335 w, t = optimize(y, False)
2311 2336 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2312 2337 ss.append((w, t))
2313 2338 continue
2314 2339 flushss()
2315 2340 ws.append(w)
2316 2341 ts.append(t)
2317 2342 flushss()
2318 2343 if len(ts) == 1:
2319 2344 return ws[0], ts[0] # 'or' operation is fully optimized out
2320 2345 # we can't reorder trees by weight because it would change the order.
2321 2346 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2322 2347 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2323 2348 return max(ws), (op,) + tuple(ts)
2324 2349 elif op == 'not':
2325 2350 # Optimize not public() to _notpublic() because we have a fast version
2326 2351 if x[1] == ('func', ('symbol', 'public'), None):
2327 2352 newsym = ('func', ('symbol', '_notpublic'), None)
2328 2353 o = optimize(newsym, not small)
2329 2354 return o[0], o[1]
2330 2355 else:
2331 2356 o = optimize(x[1], not small)
2332 2357 return o[0], (op, o[1])
2333 2358 elif op == 'parentpost':
2334 2359 o = optimize(x[1], small)
2335 2360 return o[0], (op, o[1])
2336 2361 elif op == 'group':
2337 2362 return optimize(x[1], small)
2338 2363 elif op in 'dagrange range list parent ancestorspec':
2339 2364 if op == 'parent':
2340 2365 # x^:y means (x^) : y, not x ^ (:y)
2341 2366 post = ('parentpost', x[1])
2342 2367 if x[2][0] == 'dagrangepre':
2343 2368 return optimize(('dagrange', post, x[2][1]), small)
2344 2369 elif x[2][0] == 'rangepre':
2345 2370 return optimize(('range', post, x[2][1]), small)
2346 2371
2347 2372 wa, ta = optimize(x[1], small)
2348 2373 wb, tb = optimize(x[2], small)
2349 2374 return wa + wb, (op, ta, tb)
2350 2375 elif op == 'func':
2351 2376 f = getstring(x[1], _("not a symbol"))
2352 2377 wa, ta = optimize(x[2], small)
2353 2378 if f in ("author branch closed date desc file grep keyword "
2354 2379 "outgoing user"):
2355 2380 w = 10 # slow
2356 2381 elif f in "modifies adds removes":
2357 2382 w = 30 # slower
2358 2383 elif f == "contains":
2359 2384 w = 100 # very slow
2360 2385 elif f == "ancestor":
2361 2386 w = 1 * smallbonus
2362 2387 elif f in "reverse limit first _intlist":
2363 2388 w = 0
2364 2389 elif f in "sort":
2365 2390 w = 10 # assume most sorts look at changelog
2366 2391 else:
2367 2392 w = 1
2368 2393 return w + wa, (op, x[1], ta)
2369 2394 return 1, x
2370 2395
2371 2396 _aliasarg = ('func', ('symbol', '_aliasarg'))
2372 2397 def _getaliasarg(tree):
2373 2398 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2374 2399 return X, None otherwise.
2375 2400 """
2376 2401 if (len(tree) == 3 and tree[:2] == _aliasarg
2377 2402 and tree[2][0] == 'string'):
2378 2403 return tree[2][1]
2379 2404 return None
2380 2405
2381 2406 def _checkaliasarg(tree, known=None):
2382 2407 """Check tree contains no _aliasarg construct or only ones which
2383 2408 value is in known. Used to avoid alias placeholders injection.
2384 2409 """
2385 2410 if isinstance(tree, tuple):
2386 2411 arg = _getaliasarg(tree)
2387 2412 if arg is not None and (not known or arg not in known):
2388 2413 raise error.UnknownIdentifier('_aliasarg', [])
2389 2414 for t in tree:
2390 2415 _checkaliasarg(t, known)
2391 2416
2392 2417 # the set of valid characters for the initial letter of symbols in
2393 2418 # alias declarations and definitions
2394 2419 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2395 2420 if c.isalnum() or c in '._@$' or ord(c) > 127)
2396 2421
2397 2422 def _tokenizealias(program, lookup=None):
2398 2423 """Parse alias declaration/definition into a stream of tokens
2399 2424
2400 2425 This allows symbol names to use also ``$`` as an initial letter
2401 2426 (for backward compatibility), and callers of this function should
2402 2427 examine whether ``$`` is used also for unexpected symbols or not.
2403 2428 """
2404 2429 return tokenize(program, lookup=lookup,
2405 2430 syminitletters=_aliassyminitletters)
2406 2431
2407 2432 def _parsealiasdecl(decl):
2408 2433 """Parse alias declaration ``decl``
2409 2434
2410 2435 This returns ``(name, tree, args, errorstr)`` tuple:
2411 2436
2412 2437 - ``name``: of declared alias (may be ``decl`` itself at error)
2413 2438 - ``tree``: parse result (or ``None`` at error)
2414 2439 - ``args``: list of alias argument names (or None for symbol declaration)
2415 2440 - ``errorstr``: detail about detected error (or None)
2416 2441
2417 2442 >>> _parsealiasdecl('foo')
2418 2443 ('foo', ('symbol', 'foo'), None, None)
2419 2444 >>> _parsealiasdecl('$foo')
2420 2445 ('$foo', None, None, "'$' not for alias arguments")
2421 2446 >>> _parsealiasdecl('foo::bar')
2422 2447 ('foo::bar', None, None, 'invalid format')
2423 2448 >>> _parsealiasdecl('foo bar')
2424 2449 ('foo bar', None, None, 'at 4: invalid token')
2425 2450 >>> _parsealiasdecl('foo()')
2426 2451 ('foo', ('func', ('symbol', 'foo')), [], None)
2427 2452 >>> _parsealiasdecl('$foo()')
2428 2453 ('$foo()', None, None, "'$' not for alias arguments")
2429 2454 >>> _parsealiasdecl('foo($1, $2)')
2430 2455 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2431 2456 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2432 2457 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2433 2458 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2434 2459 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2435 2460 >>> _parsealiasdecl('foo(bar($1, $2))')
2436 2461 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2437 2462 >>> _parsealiasdecl('foo("string")')
2438 2463 ('foo("string")', None, None, 'invalid argument list')
2439 2464 >>> _parsealiasdecl('foo($1, $2')
2440 2465 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2441 2466 >>> _parsealiasdecl('foo("string')
2442 2467 ('foo("string', None, None, 'at 5: unterminated string')
2443 2468 >>> _parsealiasdecl('foo($1, $2, $1)')
2444 2469 ('foo', None, None, 'argument names collide with each other')
2445 2470 """
2446 2471 p = parser.parser(elements)
2447 2472 try:
2448 2473 tree, pos = p.parse(_tokenizealias(decl))
2449 2474 if (pos != len(decl)):
2450 2475 raise error.ParseError(_('invalid token'), pos)
2451 2476
2452 2477 if isvalidsymbol(tree):
2453 2478 # "name = ...." style
2454 2479 name = getsymbol(tree)
2455 2480 if name.startswith('$'):
2456 2481 return (decl, None, None, _("'$' not for alias arguments"))
2457 2482 return (name, ('symbol', name), None, None)
2458 2483
2459 2484 if isvalidfunc(tree):
2460 2485 # "name(arg, ....) = ...." style
2461 2486 name = getfuncname(tree)
2462 2487 if name.startswith('$'):
2463 2488 return (decl, None, None, _("'$' not for alias arguments"))
2464 2489 args = []
2465 2490 for arg in getfuncargs(tree):
2466 2491 if not isvalidsymbol(arg):
2467 2492 return (decl, None, None, _("invalid argument list"))
2468 2493 args.append(getsymbol(arg))
2469 2494 if len(args) != len(set(args)):
2470 2495 return (name, None, None,
2471 2496 _("argument names collide with each other"))
2472 2497 return (name, ('func', ('symbol', name)), args, None)
2473 2498
2474 2499 return (decl, None, None, _("invalid format"))
2475 2500 except error.ParseError as inst:
2476 2501 return (decl, None, None, parseerrordetail(inst))
2477 2502
2478 2503 def _parsealiasdefn(defn, args):
2479 2504 """Parse alias definition ``defn``
2480 2505
2481 2506 This function also replaces alias argument references in the
2482 2507 specified definition by ``_aliasarg(ARGNAME)``.
2483 2508
2484 2509 ``args`` is a list of alias argument names, or None if the alias
2485 2510 is declared as a symbol.
2486 2511
2487 2512 This returns "tree" as parsing result.
2488 2513
2489 2514 >>> args = ['$1', '$2', 'foo']
2490 2515 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2491 2516 (or
2492 2517 (func
2493 2518 ('symbol', '_aliasarg')
2494 2519 ('string', '$1'))
2495 2520 (func
2496 2521 ('symbol', '_aliasarg')
2497 2522 ('string', 'foo')))
2498 2523 >>> try:
2499 2524 ... _parsealiasdefn('$1 or $bar', args)
2500 2525 ... except error.ParseError, inst:
2501 2526 ... print parseerrordetail(inst)
2502 2527 at 6: '$' not for alias arguments
2503 2528 >>> args = ['$1', '$10', 'foo']
2504 2529 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2505 2530 (or
2506 2531 (func
2507 2532 ('symbol', '_aliasarg')
2508 2533 ('string', '$10'))
2509 2534 ('symbol', 'foobar'))
2510 2535 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2511 2536 (or
2512 2537 ('string', '$1')
2513 2538 ('string', 'foo'))
2514 2539 """
2515 2540 def tokenizedefn(program, lookup=None):
2516 2541 if args:
2517 2542 argset = set(args)
2518 2543 else:
2519 2544 argset = set()
2520 2545
2521 2546 for t, value, pos in _tokenizealias(program, lookup=lookup):
2522 2547 if t == 'symbol':
2523 2548 if value in argset:
2524 2549 # emulate tokenization of "_aliasarg('ARGNAME')":
2525 2550 # "_aliasarg()" is an unknown symbol only used separate
2526 2551 # alias argument placeholders from regular strings.
2527 2552 yield ('symbol', '_aliasarg', pos)
2528 2553 yield ('(', None, pos)
2529 2554 yield ('string', value, pos)
2530 2555 yield (')', None, pos)
2531 2556 continue
2532 2557 elif value.startswith('$'):
2533 2558 raise error.ParseError(_("'$' not for alias arguments"),
2534 2559 pos)
2535 2560 yield (t, value, pos)
2536 2561
2537 2562 p = parser.parser(elements)
2538 2563 tree, pos = p.parse(tokenizedefn(defn))
2539 2564 if pos != len(defn):
2540 2565 raise error.ParseError(_('invalid token'), pos)
2541 2566 return parser.simplifyinfixops(tree, ('or',))
2542 2567
2543 2568 class revsetalias(object):
2544 2569 # whether own `error` information is already shown or not.
2545 2570 # this avoids showing same warning multiple times at each `findaliases`.
2546 2571 warned = False
2547 2572
2548 2573 def __init__(self, name, value):
2549 2574 '''Aliases like:
2550 2575
2551 2576 h = heads(default)
2552 2577 b($1) = ancestors($1) - ancestors(default)
2553 2578 '''
2554 2579 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2555 2580 if self.error:
2556 2581 self.error = _('failed to parse the declaration of revset alias'
2557 2582 ' "%s": %s') % (self.name, self.error)
2558 2583 return
2559 2584
2560 2585 try:
2561 2586 self.replacement = _parsealiasdefn(value, self.args)
2562 2587 # Check for placeholder injection
2563 2588 _checkaliasarg(self.replacement, self.args)
2564 2589 except error.ParseError as inst:
2565 2590 self.error = _('failed to parse the definition of revset alias'
2566 2591 ' "%s": %s') % (self.name, parseerrordetail(inst))
2567 2592
2568 2593 def _getalias(aliases, tree):
2569 2594 """If tree looks like an unexpanded alias, return it. Return None
2570 2595 otherwise.
2571 2596 """
2572 2597 if isinstance(tree, tuple) and tree:
2573 2598 if tree[0] == 'symbol' and len(tree) == 2:
2574 2599 name = tree[1]
2575 2600 alias = aliases.get(name)
2576 2601 if alias and alias.args is None and alias.tree == tree:
2577 2602 return alias
2578 2603 if tree[0] == 'func' and len(tree) > 1:
2579 2604 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2580 2605 name = tree[1][1]
2581 2606 alias = aliases.get(name)
2582 2607 if alias and alias.args is not None and alias.tree == tree[:2]:
2583 2608 return alias
2584 2609 return None
2585 2610
2586 2611 def _expandargs(tree, args):
2587 2612 """Replace _aliasarg instances with the substitution value of the
2588 2613 same name in args, recursively.
2589 2614 """
2590 2615 if not tree or not isinstance(tree, tuple):
2591 2616 return tree
2592 2617 arg = _getaliasarg(tree)
2593 2618 if arg is not None:
2594 2619 return args[arg]
2595 2620 return tuple(_expandargs(t, args) for t in tree)
2596 2621
2597 2622 def _expandaliases(aliases, tree, expanding, cache):
2598 2623 """Expand aliases in tree, recursively.
2599 2624
2600 2625 'aliases' is a dictionary mapping user defined aliases to
2601 2626 revsetalias objects.
2602 2627 """
2603 2628 if not isinstance(tree, tuple):
2604 2629 # Do not expand raw strings
2605 2630 return tree
2606 2631 alias = _getalias(aliases, tree)
2607 2632 if alias is not None:
2608 2633 if alias.error:
2609 2634 raise error.Abort(alias.error)
2610 2635 if alias in expanding:
2611 2636 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2612 2637 'detected') % alias.name)
2613 2638 expanding.append(alias)
2614 2639 if alias.name not in cache:
2615 2640 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2616 2641 expanding, cache)
2617 2642 result = cache[alias.name]
2618 2643 expanding.pop()
2619 2644 if alias.args is not None:
2620 2645 l = getlist(tree[2])
2621 2646 if len(l) != len(alias.args):
2622 2647 raise error.ParseError(
2623 2648 _('invalid number of arguments: %d') % len(l))
2624 2649 l = [_expandaliases(aliases, a, [], cache) for a in l]
2625 2650 result = _expandargs(result, dict(zip(alias.args, l)))
2626 2651 else:
2627 2652 result = tuple(_expandaliases(aliases, t, expanding, cache)
2628 2653 for t in tree)
2629 2654 return result
2630 2655
2631 2656 def findaliases(ui, tree, showwarning=None):
2632 2657 _checkaliasarg(tree)
2633 2658 aliases = {}
2634 2659 for k, v in ui.configitems('revsetalias'):
2635 2660 alias = revsetalias(k, v)
2636 2661 aliases[alias.name] = alias
2637 2662 tree = _expandaliases(aliases, tree, [], {})
2638 2663 if showwarning:
2639 2664 # warn about problematic (but not referred) aliases
2640 2665 for name, alias in sorted(aliases.iteritems()):
2641 2666 if alias.error and not alias.warned:
2642 2667 showwarning(_('warning: %s\n') % (alias.error))
2643 2668 alias.warned = True
2644 2669 return tree
2645 2670
2646 2671 def foldconcat(tree):
2647 2672 """Fold elements to be concatenated by `##`
2648 2673 """
2649 2674 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2650 2675 return tree
2651 2676 if tree[0] == '_concat':
2652 2677 pending = [tree]
2653 2678 l = []
2654 2679 while pending:
2655 2680 e = pending.pop()
2656 2681 if e[0] == '_concat':
2657 2682 pending.extend(reversed(e[1:]))
2658 2683 elif e[0] in ('string', 'symbol'):
2659 2684 l.append(e[1])
2660 2685 else:
2661 2686 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2662 2687 raise error.ParseError(msg)
2663 2688 return ('string', ''.join(l))
2664 2689 else:
2665 2690 return tuple(foldconcat(t) for t in tree)
2666 2691
2667 2692 def parse(spec, lookup=None):
2668 2693 p = parser.parser(elements)
2669 2694 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2670 2695 if pos != len(spec):
2671 2696 raise error.ParseError(_("invalid token"), pos)
2672 2697 return parser.simplifyinfixops(tree, ('or',))
2673 2698
2674 2699 def posttreebuilthook(tree, repo):
2675 2700 # hook for extensions to execute code on the optimized tree
2676 2701 pass
2677 2702
2678 2703 def match(ui, spec, repo=None):
2679 2704 if not spec:
2680 2705 raise error.ParseError(_("empty query"))
2681 2706 lookup = None
2682 2707 if repo:
2683 2708 lookup = repo.__contains__
2684 2709 tree = parse(spec, lookup)
2685 2710 return _makematcher(ui, tree, repo)
2686 2711
2687 2712 def matchany(ui, specs, repo=None):
2688 2713 """Create a matcher that will include any revisions matching one of the
2689 2714 given specs"""
2690 2715 if not specs:
2691 2716 def mfunc(repo, subset=None):
2692 2717 return baseset()
2693 2718 return mfunc
2694 2719 if not all(specs):
2695 2720 raise error.ParseError(_("empty query"))
2696 2721 lookup = None
2697 2722 if repo:
2698 2723 lookup = repo.__contains__
2699 2724 if len(specs) == 1:
2700 2725 tree = parse(specs[0], lookup)
2701 2726 else:
2702 2727 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2703 2728 return _makematcher(ui, tree, repo)
2704 2729
2705 2730 def _makematcher(ui, tree, repo):
2706 2731 if ui:
2707 2732 tree = findaliases(ui, tree, showwarning=ui.warn)
2708 2733 tree = foldconcat(tree)
2709 2734 weight, tree = optimize(tree, True)
2710 2735 posttreebuilthook(tree, repo)
2711 2736 def mfunc(repo, subset=None):
2712 2737 if subset is None:
2713 2738 subset = fullreposet(repo)
2714 2739 if util.safehasattr(subset, 'isascending'):
2715 2740 result = getset(repo, subset, tree)
2716 2741 else:
2717 2742 result = getset(repo, baseset(subset), tree)
2718 2743 return result
2719 2744 return mfunc
2720 2745
2721 2746 def formatspec(expr, *args):
2722 2747 '''
2723 2748 This is a convenience function for using revsets internally, and
2724 2749 escapes arguments appropriately. Aliases are intentionally ignored
2725 2750 so that intended expression behavior isn't accidentally subverted.
2726 2751
2727 2752 Supported arguments:
2728 2753
2729 2754 %r = revset expression, parenthesized
2730 2755 %d = int(arg), no quoting
2731 2756 %s = string(arg), escaped and single-quoted
2732 2757 %b = arg.branch(), escaped and single-quoted
2733 2758 %n = hex(arg), single-quoted
2734 2759 %% = a literal '%'
2735 2760
2736 2761 Prefixing the type with 'l' specifies a parenthesized list of that type.
2737 2762
2738 2763 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2739 2764 '(10 or 11):: and ((this()) or (that()))'
2740 2765 >>> formatspec('%d:: and not %d::', 10, 20)
2741 2766 '10:: and not 20::'
2742 2767 >>> formatspec('%ld or %ld', [], [1])
2743 2768 "_list('') or 1"
2744 2769 >>> formatspec('keyword(%s)', 'foo\\xe9')
2745 2770 "keyword('foo\\\\xe9')"
2746 2771 >>> b = lambda: 'default'
2747 2772 >>> b.branch = b
2748 2773 >>> formatspec('branch(%b)', b)
2749 2774 "branch('default')"
2750 2775 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2751 2776 "root(_list('a\\x00b\\x00c\\x00d'))"
2752 2777 '''
2753 2778
2754 2779 def quote(s):
2755 2780 return repr(str(s))
2756 2781
2757 2782 def argtype(c, arg):
2758 2783 if c == 'd':
2759 2784 return str(int(arg))
2760 2785 elif c == 's':
2761 2786 return quote(arg)
2762 2787 elif c == 'r':
2763 2788 parse(arg) # make sure syntax errors are confined
2764 2789 return '(%s)' % arg
2765 2790 elif c == 'n':
2766 2791 return quote(node.hex(arg))
2767 2792 elif c == 'b':
2768 2793 return quote(arg.branch())
2769 2794
2770 2795 def listexp(s, t):
2771 2796 l = len(s)
2772 2797 if l == 0:
2773 2798 return "_list('')"
2774 2799 elif l == 1:
2775 2800 return argtype(t, s[0])
2776 2801 elif t == 'd':
2777 2802 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2778 2803 elif t == 's':
2779 2804 return "_list('%s')" % "\0".join(s)
2780 2805 elif t == 'n':
2781 2806 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2782 2807 elif t == 'b':
2783 2808 return "_list('%s')" % "\0".join(a.branch() for a in s)
2784 2809
2785 2810 m = l // 2
2786 2811 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2787 2812
2788 2813 ret = ''
2789 2814 pos = 0
2790 2815 arg = 0
2791 2816 while pos < len(expr):
2792 2817 c = expr[pos]
2793 2818 if c == '%':
2794 2819 pos += 1
2795 2820 d = expr[pos]
2796 2821 if d == '%':
2797 2822 ret += d
2798 2823 elif d in 'dsnbr':
2799 2824 ret += argtype(d, args[arg])
2800 2825 arg += 1
2801 2826 elif d == 'l':
2802 2827 # a list of some type
2803 2828 pos += 1
2804 2829 d = expr[pos]
2805 2830 ret += listexp(list(args[arg]), d)
2806 2831 arg += 1
2807 2832 else:
2808 2833 raise error.Abort('unexpected revspec format character %s' % d)
2809 2834 else:
2810 2835 ret += c
2811 2836 pos += 1
2812 2837
2813 2838 return ret
2814 2839
2815 2840 def prettyformat(tree):
2816 2841 return parser.prettyformat(tree, ('string', 'symbol'))
2817 2842
2818 2843 def depth(tree):
2819 2844 if isinstance(tree, tuple):
2820 2845 return max(map(depth, tree)) + 1
2821 2846 else:
2822 2847 return 0
2823 2848
2824 2849 def funcsused(tree):
2825 2850 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2826 2851 return set()
2827 2852 else:
2828 2853 funcs = set()
2829 2854 for s in tree[1:]:
2830 2855 funcs |= funcsused(s)
2831 2856 if tree[0] == 'func':
2832 2857 funcs.add(tree[1][1])
2833 2858 return funcs
2834 2859
2835 2860 class abstractsmartset(object):
2836 2861
2837 2862 def __nonzero__(self):
2838 2863 """True if the smartset is not empty"""
2839 2864 raise NotImplementedError()
2840 2865
2841 2866 def __contains__(self, rev):
2842 2867 """provide fast membership testing"""
2843 2868 raise NotImplementedError()
2844 2869
2845 2870 def __iter__(self):
2846 2871 """iterate the set in the order it is supposed to be iterated"""
2847 2872 raise NotImplementedError()
2848 2873
2849 2874 # Attributes containing a function to perform a fast iteration in a given
2850 2875 # direction. A smartset can have none, one, or both defined.
2851 2876 #
2852 2877 # Default value is None instead of a function returning None to avoid
2853 2878 # initializing an iterator just for testing if a fast method exists.
2854 2879 fastasc = None
2855 2880 fastdesc = None
2856 2881
2857 2882 def isascending(self):
2858 2883 """True if the set will iterate in ascending order"""
2859 2884 raise NotImplementedError()
2860 2885
2861 2886 def isdescending(self):
2862 2887 """True if the set will iterate in descending order"""
2863 2888 raise NotImplementedError()
2864 2889
2865 2890 @util.cachefunc
2866 2891 def min(self):
2867 2892 """return the minimum element in the set"""
2868 2893 if self.fastasc is not None:
2869 2894 for r in self.fastasc():
2870 2895 return r
2871 2896 raise ValueError('arg is an empty sequence')
2872 2897 return min(self)
2873 2898
2874 2899 @util.cachefunc
2875 2900 def max(self):
2876 2901 """return the maximum element in the set"""
2877 2902 if self.fastdesc is not None:
2878 2903 for r in self.fastdesc():
2879 2904 return r
2880 2905 raise ValueError('arg is an empty sequence')
2881 2906 return max(self)
2882 2907
2883 2908 def first(self):
2884 2909 """return the first element in the set (user iteration perspective)
2885 2910
2886 2911 Return None if the set is empty"""
2887 2912 raise NotImplementedError()
2888 2913
2889 2914 def last(self):
2890 2915 """return the last element in the set (user iteration perspective)
2891 2916
2892 2917 Return None if the set is empty"""
2893 2918 raise NotImplementedError()
2894 2919
2895 2920 def __len__(self):
2896 2921 """return the length of the smartsets
2897 2922
2898 2923 This can be expensive on smartset that could be lazy otherwise."""
2899 2924 raise NotImplementedError()
2900 2925
2901 2926 def reverse(self):
2902 2927 """reverse the expected iteration order"""
2903 2928 raise NotImplementedError()
2904 2929
2905 2930 def sort(self, reverse=True):
2906 2931 """get the set to iterate in an ascending or descending order"""
2907 2932 raise NotImplementedError()
2908 2933
2909 2934 def __and__(self, other):
2910 2935 """Returns a new object with the intersection of the two collections.
2911 2936
2912 2937 This is part of the mandatory API for smartset."""
2913 2938 if isinstance(other, fullreposet):
2914 2939 return self
2915 2940 return self.filter(other.__contains__, cache=False)
2916 2941
2917 2942 def __add__(self, other):
2918 2943 """Returns a new object with the union of the two collections.
2919 2944
2920 2945 This is part of the mandatory API for smartset."""
2921 2946 return addset(self, other)
2922 2947
2923 2948 def __sub__(self, other):
2924 2949 """Returns a new object with the substraction of the two collections.
2925 2950
2926 2951 This is part of the mandatory API for smartset."""
2927 2952 c = other.__contains__
2928 2953 return self.filter(lambda r: not c(r), cache=False)
2929 2954
2930 2955 def filter(self, condition, cache=True):
2931 2956 """Returns this smartset filtered by condition as a new smartset.
2932 2957
2933 2958 `condition` is a callable which takes a revision number and returns a
2934 2959 boolean.
2935 2960
2936 2961 This is part of the mandatory API for smartset."""
2937 2962 # builtin cannot be cached. but do not needs to
2938 2963 if cache and util.safehasattr(condition, 'func_code'):
2939 2964 condition = util.cachefunc(condition)
2940 2965 return filteredset(self, condition)
2941 2966
2942 2967 class baseset(abstractsmartset):
2943 2968 """Basic data structure that represents a revset and contains the basic
2944 2969 operation that it should be able to perform.
2945 2970
2946 2971 Every method in this class should be implemented by any smartset class.
2947 2972 """
2948 2973 def __init__(self, data=()):
2949 2974 if not isinstance(data, list):
2950 2975 if isinstance(data, set):
2951 2976 self._set = data
2952 2977 data = list(data)
2953 2978 self._list = data
2954 2979 self._ascending = None
2955 2980
2956 2981 @util.propertycache
2957 2982 def _set(self):
2958 2983 return set(self._list)
2959 2984
2960 2985 @util.propertycache
2961 2986 def _asclist(self):
2962 2987 asclist = self._list[:]
2963 2988 asclist.sort()
2964 2989 return asclist
2965 2990
2966 2991 def __iter__(self):
2967 2992 if self._ascending is None:
2968 2993 return iter(self._list)
2969 2994 elif self._ascending:
2970 2995 return iter(self._asclist)
2971 2996 else:
2972 2997 return reversed(self._asclist)
2973 2998
2974 2999 def fastasc(self):
2975 3000 return iter(self._asclist)
2976 3001
2977 3002 def fastdesc(self):
2978 3003 return reversed(self._asclist)
2979 3004
2980 3005 @util.propertycache
2981 3006 def __contains__(self):
2982 3007 return self._set.__contains__
2983 3008
2984 3009 def __nonzero__(self):
2985 3010 return bool(self._list)
2986 3011
2987 3012 def sort(self, reverse=False):
2988 3013 self._ascending = not bool(reverse)
2989 3014
2990 3015 def reverse(self):
2991 3016 if self._ascending is None:
2992 3017 self._list.reverse()
2993 3018 else:
2994 3019 self._ascending = not self._ascending
2995 3020
2996 3021 def __len__(self):
2997 3022 return len(self._list)
2998 3023
2999 3024 def isascending(self):
3000 3025 """Returns True if the collection is ascending order, False if not.
3001 3026
3002 3027 This is part of the mandatory API for smartset."""
3003 3028 if len(self) <= 1:
3004 3029 return True
3005 3030 return self._ascending is not None and self._ascending
3006 3031
3007 3032 def isdescending(self):
3008 3033 """Returns True if the collection is descending order, False if not.
3009 3034
3010 3035 This is part of the mandatory API for smartset."""
3011 3036 if len(self) <= 1:
3012 3037 return True
3013 3038 return self._ascending is not None and not self._ascending
3014 3039
3015 3040 def first(self):
3016 3041 if self:
3017 3042 if self._ascending is None:
3018 3043 return self._list[0]
3019 3044 elif self._ascending:
3020 3045 return self._asclist[0]
3021 3046 else:
3022 3047 return self._asclist[-1]
3023 3048 return None
3024 3049
3025 3050 def last(self):
3026 3051 if self:
3027 3052 if self._ascending is None:
3028 3053 return self._list[-1]
3029 3054 elif self._ascending:
3030 3055 return self._asclist[-1]
3031 3056 else:
3032 3057 return self._asclist[0]
3033 3058 return None
3034 3059
3035 3060 def __repr__(self):
3036 3061 d = {None: '', False: '-', True: '+'}[self._ascending]
3037 3062 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3038 3063
3039 3064 class filteredset(abstractsmartset):
3040 3065 """Duck type for baseset class which iterates lazily over the revisions in
3041 3066 the subset and contains a function which tests for membership in the
3042 3067 revset
3043 3068 """
3044 3069 def __init__(self, subset, condition=lambda x: True):
3045 3070 """
3046 3071 condition: a function that decide whether a revision in the subset
3047 3072 belongs to the revset or not.
3048 3073 """
3049 3074 self._subset = subset
3050 3075 self._condition = condition
3051 3076
3052 3077 def __contains__(self, x):
3053 3078 return x in self._subset and self._condition(x)
3054 3079
3055 3080 def __iter__(self):
3056 3081 return self._iterfilter(self._subset)
3057 3082
3058 3083 def _iterfilter(self, it):
3059 3084 cond = self._condition
3060 3085 for x in it:
3061 3086 if cond(x):
3062 3087 yield x
3063 3088
3064 3089 @property
3065 3090 def fastasc(self):
3066 3091 it = self._subset.fastasc
3067 3092 if it is None:
3068 3093 return None
3069 3094 return lambda: self._iterfilter(it())
3070 3095
3071 3096 @property
3072 3097 def fastdesc(self):
3073 3098 it = self._subset.fastdesc
3074 3099 if it is None:
3075 3100 return None
3076 3101 return lambda: self._iterfilter(it())
3077 3102
3078 3103 def __nonzero__(self):
3079 3104 fast = self.fastasc
3080 3105 if fast is None:
3081 3106 fast = self.fastdesc
3082 3107 if fast is not None:
3083 3108 it = fast()
3084 3109 else:
3085 3110 it = self
3086 3111
3087 3112 for r in it:
3088 3113 return True
3089 3114 return False
3090 3115
3091 3116 def __len__(self):
3092 3117 # Basic implementation to be changed in future patches.
3093 3118 l = baseset([r for r in self])
3094 3119 return len(l)
3095 3120
3096 3121 def sort(self, reverse=False):
3097 3122 self._subset.sort(reverse=reverse)
3098 3123
3099 3124 def reverse(self):
3100 3125 self._subset.reverse()
3101 3126
3102 3127 def isascending(self):
3103 3128 return self._subset.isascending()
3104 3129
3105 3130 def isdescending(self):
3106 3131 return self._subset.isdescending()
3107 3132
3108 3133 def first(self):
3109 3134 for x in self:
3110 3135 return x
3111 3136 return None
3112 3137
3113 3138 def last(self):
3114 3139 it = None
3115 3140 if self.isascending():
3116 3141 it = self.fastdesc
3117 3142 elif self.isdescending():
3118 3143 it = self.fastasc
3119 3144 if it is not None:
3120 3145 for x in it():
3121 3146 return x
3122 3147 return None #empty case
3123 3148 else:
3124 3149 x = None
3125 3150 for x in self:
3126 3151 pass
3127 3152 return x
3128 3153
3129 3154 def __repr__(self):
3130 3155 return '<%s %r>' % (type(self).__name__, self._subset)
3131 3156
3132 3157 def _iterordered(ascending, iter1, iter2):
3133 3158 """produce an ordered iteration from two iterators with the same order
3134 3159
3135 3160 The ascending is used to indicated the iteration direction.
3136 3161 """
3137 3162 choice = max
3138 3163 if ascending:
3139 3164 choice = min
3140 3165
3141 3166 val1 = None
3142 3167 val2 = None
3143 3168 try:
3144 3169 # Consume both iterators in an ordered way until one is empty
3145 3170 while True:
3146 3171 if val1 is None:
3147 3172 val1 = iter1.next()
3148 3173 if val2 is None:
3149 3174 val2 = iter2.next()
3150 3175 next = choice(val1, val2)
3151 3176 yield next
3152 3177 if val1 == next:
3153 3178 val1 = None
3154 3179 if val2 == next:
3155 3180 val2 = None
3156 3181 except StopIteration:
3157 3182 # Flush any remaining values and consume the other one
3158 3183 it = iter2
3159 3184 if val1 is not None:
3160 3185 yield val1
3161 3186 it = iter1
3162 3187 elif val2 is not None:
3163 3188 # might have been equality and both are empty
3164 3189 yield val2
3165 3190 for val in it:
3166 3191 yield val
3167 3192
3168 3193 class addset(abstractsmartset):
3169 3194 """Represent the addition of two sets
3170 3195
3171 3196 Wrapper structure for lazily adding two structures without losing much
3172 3197 performance on the __contains__ method
3173 3198
3174 3199 If the ascending attribute is set, that means the two structures are
3175 3200 ordered in either an ascending or descending way. Therefore, we can add
3176 3201 them maintaining the order by iterating over both at the same time
3177 3202
3178 3203 >>> xs = baseset([0, 3, 2])
3179 3204 >>> ys = baseset([5, 2, 4])
3180 3205
3181 3206 >>> rs = addset(xs, ys)
3182 3207 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3183 3208 (True, True, False, True, 0, 4)
3184 3209 >>> rs = addset(xs, baseset([]))
3185 3210 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3186 3211 (True, True, False, 0, 2)
3187 3212 >>> rs = addset(baseset([]), baseset([]))
3188 3213 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3189 3214 (False, False, None, None)
3190 3215
3191 3216 iterate unsorted:
3192 3217 >>> rs = addset(xs, ys)
3193 3218 >>> [x for x in rs] # without _genlist
3194 3219 [0, 3, 2, 5, 4]
3195 3220 >>> assert not rs._genlist
3196 3221 >>> len(rs)
3197 3222 5
3198 3223 >>> [x for x in rs] # with _genlist
3199 3224 [0, 3, 2, 5, 4]
3200 3225 >>> assert rs._genlist
3201 3226
3202 3227 iterate ascending:
3203 3228 >>> rs = addset(xs, ys, ascending=True)
3204 3229 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3205 3230 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3206 3231 >>> assert not rs._asclist
3207 3232 >>> len(rs)
3208 3233 5
3209 3234 >>> [x for x in rs], [x for x in rs.fastasc()]
3210 3235 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3211 3236 >>> assert rs._asclist
3212 3237
3213 3238 iterate descending:
3214 3239 >>> rs = addset(xs, ys, ascending=False)
3215 3240 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3216 3241 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3217 3242 >>> assert not rs._asclist
3218 3243 >>> len(rs)
3219 3244 5
3220 3245 >>> [x for x in rs], [x for x in rs.fastdesc()]
3221 3246 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3222 3247 >>> assert rs._asclist
3223 3248
3224 3249 iterate ascending without fastasc:
3225 3250 >>> rs = addset(xs, generatorset(ys), ascending=True)
3226 3251 >>> assert rs.fastasc is None
3227 3252 >>> [x for x in rs]
3228 3253 [0, 2, 3, 4, 5]
3229 3254
3230 3255 iterate descending without fastdesc:
3231 3256 >>> rs = addset(generatorset(xs), ys, ascending=False)
3232 3257 >>> assert rs.fastdesc is None
3233 3258 >>> [x for x in rs]
3234 3259 [5, 4, 3, 2, 0]
3235 3260 """
3236 3261 def __init__(self, revs1, revs2, ascending=None):
3237 3262 self._r1 = revs1
3238 3263 self._r2 = revs2
3239 3264 self._iter = None
3240 3265 self._ascending = ascending
3241 3266 self._genlist = None
3242 3267 self._asclist = None
3243 3268
3244 3269 def __len__(self):
3245 3270 return len(self._list)
3246 3271
3247 3272 def __nonzero__(self):
3248 3273 return bool(self._r1) or bool(self._r2)
3249 3274
3250 3275 @util.propertycache
3251 3276 def _list(self):
3252 3277 if not self._genlist:
3253 3278 self._genlist = baseset(iter(self))
3254 3279 return self._genlist
3255 3280
3256 3281 def __iter__(self):
3257 3282 """Iterate over both collections without repeating elements
3258 3283
3259 3284 If the ascending attribute is not set, iterate over the first one and
3260 3285 then over the second one checking for membership on the first one so we
3261 3286 dont yield any duplicates.
3262 3287
3263 3288 If the ascending attribute is set, iterate over both collections at the
3264 3289 same time, yielding only one value at a time in the given order.
3265 3290 """
3266 3291 if self._ascending is None:
3267 3292 if self._genlist:
3268 3293 return iter(self._genlist)
3269 3294 def arbitraryordergen():
3270 3295 for r in self._r1:
3271 3296 yield r
3272 3297 inr1 = self._r1.__contains__
3273 3298 for r in self._r2:
3274 3299 if not inr1(r):
3275 3300 yield r
3276 3301 return arbitraryordergen()
3277 3302 # try to use our own fast iterator if it exists
3278 3303 self._trysetasclist()
3279 3304 if self._ascending:
3280 3305 attr = 'fastasc'
3281 3306 else:
3282 3307 attr = 'fastdesc'
3283 3308 it = getattr(self, attr)
3284 3309 if it is not None:
3285 3310 return it()
3286 3311 # maybe half of the component supports fast
3287 3312 # get iterator for _r1
3288 3313 iter1 = getattr(self._r1, attr)
3289 3314 if iter1 is None:
3290 3315 # let's avoid side effect (not sure it matters)
3291 3316 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3292 3317 else:
3293 3318 iter1 = iter1()
3294 3319 # get iterator for _r2
3295 3320 iter2 = getattr(self._r2, attr)
3296 3321 if iter2 is None:
3297 3322 # let's avoid side effect (not sure it matters)
3298 3323 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3299 3324 else:
3300 3325 iter2 = iter2()
3301 3326 return _iterordered(self._ascending, iter1, iter2)
3302 3327
3303 3328 def _trysetasclist(self):
3304 3329 """populate the _asclist attribute if possible and necessary"""
3305 3330 if self._genlist is not None and self._asclist is None:
3306 3331 self._asclist = sorted(self._genlist)
3307 3332
3308 3333 @property
3309 3334 def fastasc(self):
3310 3335 self._trysetasclist()
3311 3336 if self._asclist is not None:
3312 3337 return self._asclist.__iter__
3313 3338 iter1 = self._r1.fastasc
3314 3339 iter2 = self._r2.fastasc
3315 3340 if None in (iter1, iter2):
3316 3341 return None
3317 3342 return lambda: _iterordered(True, iter1(), iter2())
3318 3343
3319 3344 @property
3320 3345 def fastdesc(self):
3321 3346 self._trysetasclist()
3322 3347 if self._asclist is not None:
3323 3348 return self._asclist.__reversed__
3324 3349 iter1 = self._r1.fastdesc
3325 3350 iter2 = self._r2.fastdesc
3326 3351 if None in (iter1, iter2):
3327 3352 return None
3328 3353 return lambda: _iterordered(False, iter1(), iter2())
3329 3354
3330 3355 def __contains__(self, x):
3331 3356 return x in self._r1 or x in self._r2
3332 3357
3333 3358 def sort(self, reverse=False):
3334 3359 """Sort the added set
3335 3360
3336 3361 For this we use the cached list with all the generated values and if we
3337 3362 know they are ascending or descending we can sort them in a smart way.
3338 3363 """
3339 3364 self._ascending = not reverse
3340 3365
3341 3366 def isascending(self):
3342 3367 return self._ascending is not None and self._ascending
3343 3368
3344 3369 def isdescending(self):
3345 3370 return self._ascending is not None and not self._ascending
3346 3371
3347 3372 def reverse(self):
3348 3373 if self._ascending is None:
3349 3374 self._list.reverse()
3350 3375 else:
3351 3376 self._ascending = not self._ascending
3352 3377
3353 3378 def first(self):
3354 3379 for x in self:
3355 3380 return x
3356 3381 return None
3357 3382
3358 3383 def last(self):
3359 3384 self.reverse()
3360 3385 val = self.first()
3361 3386 self.reverse()
3362 3387 return val
3363 3388
3364 3389 def __repr__(self):
3365 3390 d = {None: '', False: '-', True: '+'}[self._ascending]
3366 3391 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3367 3392
3368 3393 class generatorset(abstractsmartset):
3369 3394 """Wrap a generator for lazy iteration
3370 3395
3371 3396 Wrapper structure for generators that provides lazy membership and can
3372 3397 be iterated more than once.
3373 3398 When asked for membership it generates values until either it finds the
3374 3399 requested one or has gone through all the elements in the generator
3375 3400 """
3376 3401 def __init__(self, gen, iterasc=None):
3377 3402 """
3378 3403 gen: a generator producing the values for the generatorset.
3379 3404 """
3380 3405 self._gen = gen
3381 3406 self._asclist = None
3382 3407 self._cache = {}
3383 3408 self._genlist = []
3384 3409 self._finished = False
3385 3410 self._ascending = True
3386 3411 if iterasc is not None:
3387 3412 if iterasc:
3388 3413 self.fastasc = self._iterator
3389 3414 self.__contains__ = self._asccontains
3390 3415 else:
3391 3416 self.fastdesc = self._iterator
3392 3417 self.__contains__ = self._desccontains
3393 3418
3394 3419 def __nonzero__(self):
3395 3420 # Do not use 'for r in self' because it will enforce the iteration
3396 3421 # order (default ascending), possibly unrolling a whole descending
3397 3422 # iterator.
3398 3423 if self._genlist:
3399 3424 return True
3400 3425 for r in self._consumegen():
3401 3426 return True
3402 3427 return False
3403 3428
3404 3429 def __contains__(self, x):
3405 3430 if x in self._cache:
3406 3431 return self._cache[x]
3407 3432
3408 3433 # Use new values only, as existing values would be cached.
3409 3434 for l in self._consumegen():
3410 3435 if l == x:
3411 3436 return True
3412 3437
3413 3438 self._cache[x] = False
3414 3439 return False
3415 3440
3416 3441 def _asccontains(self, x):
3417 3442 """version of contains optimised for ascending generator"""
3418 3443 if x in self._cache:
3419 3444 return self._cache[x]
3420 3445
3421 3446 # Use new values only, as existing values would be cached.
3422 3447 for l in self._consumegen():
3423 3448 if l == x:
3424 3449 return True
3425 3450 if l > x:
3426 3451 break
3427 3452
3428 3453 self._cache[x] = False
3429 3454 return False
3430 3455
3431 3456 def _desccontains(self, x):
3432 3457 """version of contains optimised for descending generator"""
3433 3458 if x in self._cache:
3434 3459 return self._cache[x]
3435 3460
3436 3461 # Use new values only, as existing values would be cached.
3437 3462 for l in self._consumegen():
3438 3463 if l == x:
3439 3464 return True
3440 3465 if l < x:
3441 3466 break
3442 3467
3443 3468 self._cache[x] = False
3444 3469 return False
3445 3470
3446 3471 def __iter__(self):
3447 3472 if self._ascending:
3448 3473 it = self.fastasc
3449 3474 else:
3450 3475 it = self.fastdesc
3451 3476 if it is not None:
3452 3477 return it()
3453 3478 # we need to consume the iterator
3454 3479 for x in self._consumegen():
3455 3480 pass
3456 3481 # recall the same code
3457 3482 return iter(self)
3458 3483
3459 3484 def _iterator(self):
3460 3485 if self._finished:
3461 3486 return iter(self._genlist)
3462 3487
3463 3488 # We have to use this complex iteration strategy to allow multiple
3464 3489 # iterations at the same time. We need to be able to catch revision
3465 3490 # removed from _consumegen and added to genlist in another instance.
3466 3491 #
3467 3492 # Getting rid of it would provide an about 15% speed up on this
3468 3493 # iteration.
3469 3494 genlist = self._genlist
3470 3495 nextrev = self._consumegen().next
3471 3496 _len = len # cache global lookup
3472 3497 def gen():
3473 3498 i = 0
3474 3499 while True:
3475 3500 if i < _len(genlist):
3476 3501 yield genlist[i]
3477 3502 else:
3478 3503 yield nextrev()
3479 3504 i += 1
3480 3505 return gen()
3481 3506
3482 3507 def _consumegen(self):
3483 3508 cache = self._cache
3484 3509 genlist = self._genlist.append
3485 3510 for item in self._gen:
3486 3511 cache[item] = True
3487 3512 genlist(item)
3488 3513 yield item
3489 3514 if not self._finished:
3490 3515 self._finished = True
3491 3516 asc = self._genlist[:]
3492 3517 asc.sort()
3493 3518 self._asclist = asc
3494 3519 self.fastasc = asc.__iter__
3495 3520 self.fastdesc = asc.__reversed__
3496 3521
3497 3522 def __len__(self):
3498 3523 for x in self._consumegen():
3499 3524 pass
3500 3525 return len(self._genlist)
3501 3526
3502 3527 def sort(self, reverse=False):
3503 3528 self._ascending = not reverse
3504 3529
3505 3530 def reverse(self):
3506 3531 self._ascending = not self._ascending
3507 3532
3508 3533 def isascending(self):
3509 3534 return self._ascending
3510 3535
3511 3536 def isdescending(self):
3512 3537 return not self._ascending
3513 3538
3514 3539 def first(self):
3515 3540 if self._ascending:
3516 3541 it = self.fastasc
3517 3542 else:
3518 3543 it = self.fastdesc
3519 3544 if it is None:
3520 3545 # we need to consume all and try again
3521 3546 for x in self._consumegen():
3522 3547 pass
3523 3548 return self.first()
3524 3549 return next(it(), None)
3525 3550
3526 3551 def last(self):
3527 3552 if self._ascending:
3528 3553 it = self.fastdesc
3529 3554 else:
3530 3555 it = self.fastasc
3531 3556 if it is None:
3532 3557 # we need to consume all and try again
3533 3558 for x in self._consumegen():
3534 3559 pass
3535 3560 return self.first()
3536 3561 return next(it(), None)
3537 3562
3538 3563 def __repr__(self):
3539 3564 d = {False: '-', True: '+'}[self._ascending]
3540 3565 return '<%s%s>' % (type(self).__name__, d)
3541 3566
3542 3567 class spanset(abstractsmartset):
3543 3568 """Duck type for baseset class which represents a range of revisions and
3544 3569 can work lazily and without having all the range in memory
3545 3570
3546 3571 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3547 3572 notable points:
3548 3573 - when x < y it will be automatically descending,
3549 3574 - revision filtered with this repoview will be skipped.
3550 3575
3551 3576 """
3552 3577 def __init__(self, repo, start=0, end=None):
3553 3578 """
3554 3579 start: first revision included the set
3555 3580 (default to 0)
3556 3581 end: first revision excluded (last+1)
3557 3582 (default to len(repo)
3558 3583
3559 3584 Spanset will be descending if `end` < `start`.
3560 3585 """
3561 3586 if end is None:
3562 3587 end = len(repo)
3563 3588 self._ascending = start <= end
3564 3589 if not self._ascending:
3565 3590 start, end = end + 1, start +1
3566 3591 self._start = start
3567 3592 self._end = end
3568 3593 self._hiddenrevs = repo.changelog.filteredrevs
3569 3594
3570 3595 def sort(self, reverse=False):
3571 3596 self._ascending = not reverse
3572 3597
3573 3598 def reverse(self):
3574 3599 self._ascending = not self._ascending
3575 3600
3576 3601 def _iterfilter(self, iterrange):
3577 3602 s = self._hiddenrevs
3578 3603 for r in iterrange:
3579 3604 if r not in s:
3580 3605 yield r
3581 3606
3582 3607 def __iter__(self):
3583 3608 if self._ascending:
3584 3609 return self.fastasc()
3585 3610 else:
3586 3611 return self.fastdesc()
3587 3612
3588 3613 def fastasc(self):
3589 3614 iterrange = xrange(self._start, self._end)
3590 3615 if self._hiddenrevs:
3591 3616 return self._iterfilter(iterrange)
3592 3617 return iter(iterrange)
3593 3618
3594 3619 def fastdesc(self):
3595 3620 iterrange = xrange(self._end - 1, self._start - 1, -1)
3596 3621 if self._hiddenrevs:
3597 3622 return self._iterfilter(iterrange)
3598 3623 return iter(iterrange)
3599 3624
3600 3625 def __contains__(self, rev):
3601 3626 hidden = self._hiddenrevs
3602 3627 return ((self._start <= rev < self._end)
3603 3628 and not (hidden and rev in hidden))
3604 3629
3605 3630 def __nonzero__(self):
3606 3631 for r in self:
3607 3632 return True
3608 3633 return False
3609 3634
3610 3635 def __len__(self):
3611 3636 if not self._hiddenrevs:
3612 3637 return abs(self._end - self._start)
3613 3638 else:
3614 3639 count = 0
3615 3640 start = self._start
3616 3641 end = self._end
3617 3642 for rev in self._hiddenrevs:
3618 3643 if (end < rev <= start) or (start <= rev < end):
3619 3644 count += 1
3620 3645 return abs(self._end - self._start) - count
3621 3646
3622 3647 def isascending(self):
3623 3648 return self._ascending
3624 3649
3625 3650 def isdescending(self):
3626 3651 return not self._ascending
3627 3652
3628 3653 def first(self):
3629 3654 if self._ascending:
3630 3655 it = self.fastasc
3631 3656 else:
3632 3657 it = self.fastdesc
3633 3658 for x in it():
3634 3659 return x
3635 3660 return None
3636 3661
3637 3662 def last(self):
3638 3663 if self._ascending:
3639 3664 it = self.fastdesc
3640 3665 else:
3641 3666 it = self.fastasc
3642 3667 for x in it():
3643 3668 return x
3644 3669 return None
3645 3670
3646 3671 def __repr__(self):
3647 3672 d = {False: '-', True: '+'}[self._ascending]
3648 3673 return '<%s%s %d:%d>' % (type(self).__name__, d,
3649 3674 self._start, self._end - 1)
3650 3675
3651 3676 class fullreposet(spanset):
3652 3677 """a set containing all revisions in the repo
3653 3678
3654 3679 This class exists to host special optimization and magic to handle virtual
3655 3680 revisions such as "null".
3656 3681 """
3657 3682
3658 3683 def __init__(self, repo):
3659 3684 super(fullreposet, self).__init__(repo)
3660 3685
3661 3686 def __and__(self, other):
3662 3687 """As self contains the whole repo, all of the other set should also be
3663 3688 in self. Therefore `self & other = other`.
3664 3689
3665 3690 This boldly assumes the other contains valid revs only.
3666 3691 """
3667 3692 # other not a smartset, make is so
3668 3693 if not util.safehasattr(other, 'isascending'):
3669 3694 # filter out hidden revision
3670 3695 # (this boldly assumes all smartset are pure)
3671 3696 #
3672 3697 # `other` was used with "&", let's assume this is a set like
3673 3698 # object.
3674 3699 other = baseset(other - self._hiddenrevs)
3675 3700
3676 3701 # XXX As fullreposet is also used as bootstrap, this is wrong.
3677 3702 #
3678 3703 # With a giveme312() revset returning [3,1,2], this makes
3679 3704 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3680 3705 # We cannot just drop it because other usage still need to sort it:
3681 3706 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3682 3707 #
3683 3708 # There is also some faulty revset implementations that rely on it
3684 3709 # (eg: children as of its state in e8075329c5fb)
3685 3710 #
3686 3711 # When we fix the two points above we can move this into the if clause
3687 3712 other.sort(reverse=self.isdescending())
3688 3713 return other
3689 3714
3690 3715 def prettyformatset(revs):
3691 3716 lines = []
3692 3717 rs = repr(revs)
3693 3718 p = 0
3694 3719 while p < len(rs):
3695 3720 q = rs.find('<', p + 1)
3696 3721 if q < 0:
3697 3722 q = len(rs)
3698 3723 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3699 3724 assert l >= 0
3700 3725 lines.append((l, rs[p:q].rstrip()))
3701 3726 p = q
3702 3727 return '\n'.join(' ' * l + s for l, s in lines)
3703 3728
3704 3729 # tell hggettext to extract docstrings from these functions:
3705 3730 i18nfunctions = symbols.values()
@@ -1,2191 +1,2231 b''
1 1 $ HGENCODING=utf-8
2 2 $ export HGENCODING
3 3 $ cat > testrevset.py << EOF
4 4 > import mercurial.revset
5 5 >
6 6 > baseset = mercurial.revset.baseset
7 7 >
8 8 > def r3232(repo, subset, x):
9 9 > """"simple revset that return [3,2,3,2]
10 10 >
11 11 > revisions duplicated on purpose.
12 12 > """
13 13 > if 3 not in subset:
14 14 > if 2 in subset:
15 15 > return baseset([2,2])
16 16 > return baseset()
17 17 > return baseset([3,3,2,2])
18 18 >
19 19 > mercurial.revset.symbols['r3232'] = r3232
20 20 > EOF
21 21 $ cat >> $HGRCPATH << EOF
22 22 > [extensions]
23 23 > testrevset=$TESTTMP/testrevset.py
24 24 > EOF
25 25
26 26 $ try() {
27 27 > hg debugrevspec --debug "$@"
28 28 > }
29 29
30 30 $ log() {
31 31 > hg log --template '{rev}\n' -r "$1"
32 32 > }
33 33
34 34 $ hg init repo
35 35 $ cd repo
36 36
37 37 $ echo a > a
38 38 $ hg branch a
39 39 marked working directory as branch a
40 40 (branches are permanent and global, did you want a bookmark?)
41 41 $ hg ci -Aqm0
42 42
43 43 $ echo b > b
44 44 $ hg branch b
45 45 marked working directory as branch b
46 46 $ hg ci -Aqm1
47 47
48 48 $ rm a
49 49 $ hg branch a-b-c-
50 50 marked working directory as branch a-b-c-
51 51 $ hg ci -Aqm2 -u Bob
52 52
53 53 $ hg log -r "extra('branch', 'a-b-c-')" --template '{rev}\n'
54 54 2
55 55 $ hg log -r "extra('branch')" --template '{rev}\n'
56 56 0
57 57 1
58 58 2
59 59 $ hg log -r "extra('branch', 're:a')" --template '{rev} {branch}\n'
60 60 0 a
61 61 2 a-b-c-
62 62
63 63 $ hg co 1
64 64 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
65 65 $ hg branch +a+b+c+
66 66 marked working directory as branch +a+b+c+
67 67 $ hg ci -Aqm3
68 68
69 69 $ hg co 2 # interleave
70 70 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
71 71 $ echo bb > b
72 72 $ hg branch -- -a-b-c-
73 73 marked working directory as branch -a-b-c-
74 74 $ hg ci -Aqm4 -d "May 12 2005"
75 75
76 76 $ hg co 3
77 77 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
78 78 $ hg branch !a/b/c/
79 79 marked working directory as branch !a/b/c/
80 80 $ hg ci -Aqm"5 bug"
81 81
82 82 $ hg merge 4
83 83 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
84 84 (branch merge, don't forget to commit)
85 85 $ hg branch _a_b_c_
86 86 marked working directory as branch _a_b_c_
87 87 $ hg ci -Aqm"6 issue619"
88 88
89 89 $ hg branch .a.b.c.
90 90 marked working directory as branch .a.b.c.
91 91 $ hg ci -Aqm7
92 92
93 93 $ hg branch all
94 94 marked working directory as branch all
95 95
96 96 $ hg co 4
97 97 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
98 98 $ hg branch Γ©
99 99 marked working directory as branch \xc3\xa9 (esc)
100 100 $ hg ci -Aqm9
101 101
102 102 $ hg tag -r6 1.0
103 103 $ hg bookmark -r6 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
104 104
105 105 $ hg clone --quiet -U -r 7 . ../remote1
106 106 $ hg clone --quiet -U -r 8 . ../remote2
107 107 $ echo "[paths]" >> .hg/hgrc
108 108 $ echo "default = ../remote1" >> .hg/hgrc
109 109
110 110 trivial
111 111
112 112 $ try 0:1
113 113 (range
114 114 ('symbol', '0')
115 115 ('symbol', '1'))
116 116 * set:
117 117 <spanset+ 0:1>
118 118 0
119 119 1
120 120 $ try --optimize :
121 121 (rangeall
122 122 None)
123 123 * optimized:
124 124 (range
125 125 ('string', '0')
126 126 ('string', 'tip'))
127 127 * set:
128 128 <spanset+ 0:9>
129 129 0
130 130 1
131 131 2
132 132 3
133 133 4
134 134 5
135 135 6
136 136 7
137 137 8
138 138 9
139 139 $ try 3::6
140 140 (dagrange
141 141 ('symbol', '3')
142 142 ('symbol', '6'))
143 143 * set:
144 144 <baseset+ [3, 5, 6]>
145 145 3
146 146 5
147 147 6
148 148 $ try '0|1|2'
149 149 (or
150 150 ('symbol', '0')
151 151 ('symbol', '1')
152 152 ('symbol', '2'))
153 153 * set:
154 154 <baseset [0, 1, 2]>
155 155 0
156 156 1
157 157 2
158 158
159 159 names that should work without quoting
160 160
161 161 $ try a
162 162 ('symbol', 'a')
163 163 * set:
164 164 <baseset [0]>
165 165 0
166 166 $ try b-a
167 167 (minus
168 168 ('symbol', 'b')
169 169 ('symbol', 'a'))
170 170 * set:
171 171 <filteredset
172 172 <baseset [1]>>
173 173 1
174 174 $ try _a_b_c_
175 175 ('symbol', '_a_b_c_')
176 176 * set:
177 177 <baseset [6]>
178 178 6
179 179 $ try _a_b_c_-a
180 180 (minus
181 181 ('symbol', '_a_b_c_')
182 182 ('symbol', 'a'))
183 183 * set:
184 184 <filteredset
185 185 <baseset [6]>>
186 186 6
187 187 $ try .a.b.c.
188 188 ('symbol', '.a.b.c.')
189 189 * set:
190 190 <baseset [7]>
191 191 7
192 192 $ try .a.b.c.-a
193 193 (minus
194 194 ('symbol', '.a.b.c.')
195 195 ('symbol', 'a'))
196 196 * set:
197 197 <filteredset
198 198 <baseset [7]>>
199 199 7
200 200
201 201 names that should be caught by fallback mechanism
202 202
203 203 $ try -- '-a-b-c-'
204 204 ('symbol', '-a-b-c-')
205 205 * set:
206 206 <baseset [4]>
207 207 4
208 208 $ log -a-b-c-
209 209 4
210 210 $ try '+a+b+c+'
211 211 ('symbol', '+a+b+c+')
212 212 * set:
213 213 <baseset [3]>
214 214 3
215 215 $ try '+a+b+c+:'
216 216 (rangepost
217 217 ('symbol', '+a+b+c+'))
218 218 * set:
219 219 <spanset+ 3:9>
220 220 3
221 221 4
222 222 5
223 223 6
224 224 7
225 225 8
226 226 9
227 227 $ try ':+a+b+c+'
228 228 (rangepre
229 229 ('symbol', '+a+b+c+'))
230 230 * set:
231 231 <spanset+ 0:3>
232 232 0
233 233 1
234 234 2
235 235 3
236 236 $ try -- '-a-b-c-:+a+b+c+'
237 237 (range
238 238 ('symbol', '-a-b-c-')
239 239 ('symbol', '+a+b+c+'))
240 240 * set:
241 241 <spanset- 3:4>
242 242 4
243 243 3
244 244 $ log '-a-b-c-:+a+b+c+'
245 245 4
246 246 3
247 247
248 248 $ try -- -a-b-c--a # complains
249 249 (minus
250 250 (minus
251 251 (minus
252 252 (negate
253 253 ('symbol', 'a'))
254 254 ('symbol', 'b'))
255 255 ('symbol', 'c'))
256 256 (negate
257 257 ('symbol', 'a')))
258 258 abort: unknown revision '-a'!
259 259 [255]
260 260 $ try Γ©
261 261 ('symbol', '\xc3\xa9')
262 262 * set:
263 263 <baseset [9]>
264 264 9
265 265
266 266 no quoting needed
267 267
268 268 $ log ::a-b-c-
269 269 0
270 270 1
271 271 2
272 272
273 273 quoting needed
274 274
275 275 $ try '"-a-b-c-"-a'
276 276 (minus
277 277 ('string', '-a-b-c-')
278 278 ('symbol', 'a'))
279 279 * set:
280 280 <filteredset
281 281 <baseset [4]>>
282 282 4
283 283
284 284 $ log '1 or 2'
285 285 1
286 286 2
287 287 $ log '1|2'
288 288 1
289 289 2
290 290 $ log '1 and 2'
291 291 $ log '1&2'
292 292 $ try '1&2|3' # precedence - and is higher
293 293 (or
294 294 (and
295 295 ('symbol', '1')
296 296 ('symbol', '2'))
297 297 ('symbol', '3'))
298 298 * set:
299 299 <addset
300 300 <baseset []>,
301 301 <baseset [3]>>
302 302 3
303 303 $ try '1|2&3'
304 304 (or
305 305 ('symbol', '1')
306 306 (and
307 307 ('symbol', '2')
308 308 ('symbol', '3')))
309 309 * set:
310 310 <addset
311 311 <baseset [1]>,
312 312 <baseset []>>
313 313 1
314 314 $ try '1&2&3' # associativity
315 315 (and
316 316 (and
317 317 ('symbol', '1')
318 318 ('symbol', '2'))
319 319 ('symbol', '3'))
320 320 * set:
321 321 <baseset []>
322 322 $ try '1|(2|3)'
323 323 (or
324 324 ('symbol', '1')
325 325 (group
326 326 (or
327 327 ('symbol', '2')
328 328 ('symbol', '3'))))
329 329 * set:
330 330 <addset
331 331 <baseset [1]>,
332 332 <baseset [2, 3]>>
333 333 1
334 334 2
335 335 3
336 336 $ log '1.0' # tag
337 337 6
338 338 $ log 'a' # branch
339 339 0
340 340 $ log '2785f51ee'
341 341 0
342 342 $ log 'date(2005)'
343 343 4
344 344 $ log 'date(this is a test)'
345 345 hg: parse error at 10: unexpected token: symbol
346 346 [255]
347 347 $ log 'date()'
348 348 hg: parse error: date requires a string
349 349 [255]
350 350 $ log 'date'
351 351 abort: unknown revision 'date'!
352 352 [255]
353 353 $ log 'date('
354 354 hg: parse error at 5: not a prefix: end
355 355 [255]
356 356 $ log 'date("\xy")'
357 357 hg: parse error: invalid \x escape
358 358 [255]
359 359 $ log 'date(tip)'
360 360 abort: invalid date: 'tip'
361 361 [255]
362 362 $ log '0:date'
363 363 abort: unknown revision 'date'!
364 364 [255]
365 365 $ log '::"date"'
366 366 abort: unknown revision 'date'!
367 367 [255]
368 368 $ hg book date -r 4
369 369 $ log '0:date'
370 370 0
371 371 1
372 372 2
373 373 3
374 374 4
375 375 $ log '::date'
376 376 0
377 377 1
378 378 2
379 379 4
380 380 $ log '::"date"'
381 381 0
382 382 1
383 383 2
384 384 4
385 385 $ log 'date(2005) and 1::'
386 386 4
387 387 $ hg book -d date
388 388
389 389 keyword arguments
390 390
391 391 $ log 'extra(branch, value=a)'
392 392 0
393 393
394 394 $ log 'extra(branch, a, b)'
395 395 hg: parse error: extra takes at most 2 arguments
396 396 [255]
397 397 $ log 'extra(a, label=b)'
398 398 hg: parse error: extra got multiple values for keyword argument 'label'
399 399 [255]
400 400 $ log 'extra(label=branch, default)'
401 401 hg: parse error: extra got an invalid argument
402 402 [255]
403 403 $ log 'extra(branch, foo+bar=baz)'
404 404 hg: parse error: extra got an invalid argument
405 405 [255]
406 406 $ log 'extra(unknown=branch)'
407 407 hg: parse error: extra got an unexpected keyword argument 'unknown'
408 408 [255]
409 409
410 410 $ try 'foo=bar|baz'
411 411 (keyvalue
412 412 ('symbol', 'foo')
413 413 (or
414 414 ('symbol', 'bar')
415 415 ('symbol', 'baz')))
416 416 hg: parse error: can't use a key-value pair in this context
417 417 [255]
418 418
419 419 Test that symbols only get parsed as functions if there's an opening
420 420 parenthesis.
421 421
422 422 $ hg book only -r 9
423 423 $ log 'only(only)' # Outer "only" is a function, inner "only" is the bookmark
424 424 8
425 425 9
426 426
427 427 ancestor can accept 0 or more arguments
428 428
429 429 $ log 'ancestor()'
430 430 $ log 'ancestor(1)'
431 431 1
432 432 $ log 'ancestor(4,5)'
433 433 1
434 434 $ log 'ancestor(4,5) and 4'
435 435 $ log 'ancestor(0,0,1,3)'
436 436 0
437 437 $ log 'ancestor(3,1,5,3,5,1)'
438 438 1
439 439 $ log 'ancestor(0,1,3,5)'
440 440 0
441 441 $ log 'ancestor(1,2,3,4,5)'
442 442 1
443 443
444 444 test ancestors
445 445
446 446 $ log 'ancestors(5)'
447 447 0
448 448 1
449 449 3
450 450 5
451 451 $ log 'ancestor(ancestors(5))'
452 452 0
453 453 $ log '::r3232()'
454 454 0
455 455 1
456 456 2
457 457 3
458 458
459 459 $ log 'author(bob)'
460 460 2
461 461 $ log 'author("re:bob|test")'
462 462 0
463 463 1
464 464 2
465 465 3
466 466 4
467 467 5
468 468 6
469 469 7
470 470 8
471 471 9
472 472 $ log 'branch(Γ©)'
473 473 8
474 474 9
475 475 $ log 'branch(a)'
476 476 0
477 477 $ hg log -r 'branch("re:a")' --template '{rev} {branch}\n'
478 478 0 a
479 479 2 a-b-c-
480 480 3 +a+b+c+
481 481 4 -a-b-c-
482 482 5 !a/b/c/
483 483 6 _a_b_c_
484 484 7 .a.b.c.
485 485 $ log 'children(ancestor(4,5))'
486 486 2
487 487 3
488 488 $ log 'closed()'
489 489 $ log 'contains(a)'
490 490 0
491 491 1
492 492 3
493 493 5
494 494 $ log 'contains("../repo/a")'
495 495 0
496 496 1
497 497 3
498 498 5
499 499 $ log 'desc(B)'
500 500 5
501 501 $ log 'descendants(2 or 3)'
502 502 2
503 503 3
504 504 4
505 505 5
506 506 6
507 507 7
508 508 8
509 509 9
510 510 $ log 'file("b*")'
511 511 1
512 512 4
513 513 $ log 'filelog("b")'
514 514 1
515 515 4
516 516 $ log 'filelog("../repo/b")'
517 517 1
518 518 4
519 519 $ log 'follow()'
520 520 0
521 521 1
522 522 2
523 523 4
524 524 8
525 525 9
526 526 $ log 'grep("issue\d+")'
527 527 6
528 528 $ try 'grep("(")' # invalid regular expression
529 529 (func
530 530 ('symbol', 'grep')
531 531 ('string', '('))
532 532 hg: parse error: invalid match pattern: unbalanced parenthesis
533 533 [255]
534 534 $ try 'grep("\bissue\d+")'
535 535 (func
536 536 ('symbol', 'grep')
537 537 ('string', '\x08issue\\d+'))
538 538 * set:
539 539 <filteredset
540 540 <fullreposet+ 0:9>>
541 541 $ try 'grep(r"\bissue\d+")'
542 542 (func
543 543 ('symbol', 'grep')
544 544 ('string', '\\bissue\\d+'))
545 545 * set:
546 546 <filteredset
547 547 <fullreposet+ 0:9>>
548 548 6
549 549 $ try 'grep(r"\")'
550 550 hg: parse error at 7: unterminated string
551 551 [255]
552 552 $ log 'head()'
553 553 0
554 554 1
555 555 2
556 556 3
557 557 4
558 558 5
559 559 6
560 560 7
561 561 9
562 562 $ log 'heads(6::)'
563 563 7
564 564 $ log 'keyword(issue)'
565 565 6
566 566 $ log 'keyword("test a")'
567 567 $ log 'limit(head(), 1)'
568 568 0
569 569 $ log 'limit(author("re:bob|test"), 3, 5)'
570 570 5
571 571 6
572 572 7
573 573 $ log 'limit(author("re:bob|test"), offset=6)'
574 574 6
575 575 $ log 'limit(author("re:bob|test"), offset=10)'
576 576 $ log 'limit(all(), 1, -1)'
577 577 hg: parse error: negative offset
578 578 [255]
579 579 $ log 'matching(6)'
580 580 6
581 581 $ log 'matching(6:7, "phase parents user date branch summary files description substate")'
582 582 6
583 583 7
584 584
585 585 Testing min and max
586 586
587 587 max: simple
588 588
589 589 $ log 'max(contains(a))'
590 590 5
591 591
592 592 max: simple on unordered set)
593 593
594 594 $ log 'max((4+0+2+5+7) and contains(a))'
595 595 5
596 596
597 597 max: no result
598 598
599 599 $ log 'max(contains(stringthatdoesnotappearanywhere))'
600 600
601 601 max: no result on unordered set
602 602
603 603 $ log 'max((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'
604 604
605 605 min: simple
606 606
607 607 $ log 'min(contains(a))'
608 608 0
609 609
610 610 min: simple on unordered set
611 611
612 612 $ log 'min((4+0+2+5+7) and contains(a))'
613 613 0
614 614
615 615 min: empty
616 616
617 617 $ log 'min(contains(stringthatdoesnotappearanywhere))'
618 618
619 619 min: empty on unordered set
620 620
621 621 $ log 'min((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'
622 622
623 623
624 624 $ log 'merge()'
625 625 6
626 626 $ log 'branchpoint()'
627 627 1
628 628 4
629 629 $ log 'modifies(b)'
630 630 4
631 631 $ log 'modifies("path:b")'
632 632 4
633 633 $ log 'modifies("*")'
634 634 4
635 635 6
636 636 $ log 'modifies("set:modified()")'
637 637 4
638 638 $ log 'id(5)'
639 639 2
640 640 $ log 'only(9)'
641 641 8
642 642 9
643 643 $ log 'only(8)'
644 644 8
645 645 $ log 'only(9, 5)'
646 646 2
647 647 4
648 648 8
649 649 9
650 650 $ log 'only(7 + 9, 5 + 2)'
651 651 4
652 652 6
653 653 7
654 654 8
655 655 9
656 656
657 657 Test empty set input
658 658 $ log 'only(p2())'
659 659 $ log 'only(p1(), p2())'
660 660 0
661 661 1
662 662 2
663 663 4
664 664 8
665 665 9
666 666
667 667 Test '%' operator
668 668
669 669 $ log '9%'
670 670 8
671 671 9
672 672 $ log '9%5'
673 673 2
674 674 4
675 675 8
676 676 9
677 677 $ log '(7 + 9)%(5 + 2)'
678 678 4
679 679 6
680 680 7
681 681 8
682 682 9
683 683
684 684 Test opreand of '%' is optimized recursively (issue4670)
685 685
686 686 $ try --optimize '8:9-8%'
687 687 (onlypost
688 688 (minus
689 689 (range
690 690 ('symbol', '8')
691 691 ('symbol', '9'))
692 692 ('symbol', '8')))
693 693 * optimized:
694 694 (func
695 695 ('symbol', 'only')
696 696 (and
697 697 (range
698 698 ('symbol', '8')
699 699 ('symbol', '9'))
700 700 (not
701 701 ('symbol', '8'))))
702 702 * set:
703 703 <baseset+ [8, 9]>
704 704 8
705 705 9
706 706 $ try --optimize '(9)%(5)'
707 707 (only
708 708 (group
709 709 ('symbol', '9'))
710 710 (group
711 711 ('symbol', '5')))
712 712 * optimized:
713 713 (func
714 714 ('symbol', 'only')
715 715 (list
716 716 ('symbol', '9')
717 717 ('symbol', '5')))
718 718 * set:
719 719 <baseset+ [8, 9, 2, 4]>
720 720 2
721 721 4
722 722 8
723 723 9
724 724
725 725 Test the order of operations
726 726
727 727 $ log '7 + 9%5 + 2'
728 728 7
729 729 2
730 730 4
731 731 8
732 732 9
733 733
734 734 Test explicit numeric revision
735 735 $ log 'rev(-2)'
736 736 $ log 'rev(-1)'
737 737 -1
738 738 $ log 'rev(0)'
739 739 0
740 740 $ log 'rev(9)'
741 741 9
742 742 $ log 'rev(10)'
743 743 $ log 'rev(tip)'
744 744 hg: parse error: rev expects a number
745 745 [255]
746 746
747 747 Test hexadecimal revision
748 748 $ log 'id(2)'
749 749 abort: 00changelog.i@2: ambiguous identifier!
750 750 [255]
751 751 $ log 'id(23268)'
752 752 4
753 753 $ log 'id(2785f51eece)'
754 754 0
755 755 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532c)'
756 756 8
757 757 $ log 'id(d5d0dcbdc4a)'
758 758 $ log 'id(d5d0dcbdc4w)'
759 759 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532d)'
760 760 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532q)'
761 761 $ log 'id(1.0)'
762 762 $ log 'id(xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)'
763 763
764 764 Test null revision
765 765 $ log '(null)'
766 766 -1
767 767 $ log '(null:0)'
768 768 -1
769 769 0
770 770 $ log '(0:null)'
771 771 0
772 772 -1
773 773 $ log 'null::0'
774 774 -1
775 775 0
776 776 $ log 'null:tip - 0:'
777 777 -1
778 778 $ log 'null: and null::' | head -1
779 779 -1
780 780 $ log 'null: or 0:' | head -2
781 781 -1
782 782 0
783 783 $ log 'ancestors(null)'
784 784 -1
785 785 $ log 'reverse(null:)' | tail -2
786 786 0
787 787 -1
788 788 BROKEN: should be '-1'
789 789 $ log 'first(null:)'
790 790 BROKEN: should be '-1'
791 791 $ log 'min(null:)'
792 792 $ log 'tip:null and all()' | tail -2
793 793 1
794 794 0
795 795
796 796 Test working-directory revision
797 797 $ hg debugrevspec 'wdir()'
798 798 2147483647
799 799 $ hg debugrevspec 'tip or wdir()'
800 800 9
801 801 2147483647
802 802 $ hg debugrevspec '0:tip and wdir()'
803 803 $ log '0:wdir()' | tail -3
804 804 8
805 805 9
806 806 2147483647
807 807 $ log 'wdir():0' | head -3
808 808 2147483647
809 809 9
810 810 8
811 811 $ log 'wdir():wdir()'
812 812 2147483647
813 813 $ log '(all() + wdir()) & min(. + wdir())'
814 814 9
815 815 $ log '(all() + wdir()) & max(. + wdir())'
816 816 2147483647
817 817 $ log '(all() + wdir()) & first(wdir() + .)'
818 818 2147483647
819 819 $ log '(all() + wdir()) & last(. + wdir())'
820 820 2147483647
821 821
822 822 $ log 'outgoing()'
823 823 8
824 824 9
825 825 $ log 'outgoing("../remote1")'
826 826 8
827 827 9
828 828 $ log 'outgoing("../remote2")'
829 829 3
830 830 5
831 831 6
832 832 7
833 833 9
834 834 $ log 'p1(merge())'
835 835 5
836 836 $ log 'p2(merge())'
837 837 4
838 838 $ log 'parents(merge())'
839 839 4
840 840 5
841 841 $ log 'p1(branchpoint())'
842 842 0
843 843 2
844 844 $ log 'p2(branchpoint())'
845 845 $ log 'parents(branchpoint())'
846 846 0
847 847 2
848 848 $ log 'removes(a)'
849 849 2
850 850 6
851 851 $ log 'roots(all())'
852 852 0
853 853 $ log 'reverse(2 or 3 or 4 or 5)'
854 854 5
855 855 4
856 856 3
857 857 2
858 858 $ log 'reverse(all())'
859 859 9
860 860 8
861 861 7
862 862 6
863 863 5
864 864 4
865 865 3
866 866 2
867 867 1
868 868 0
869 869 $ log 'reverse(all()) & filelog(b)'
870 870 4
871 871 1
872 872 $ log 'rev(5)'
873 873 5
874 874 $ log 'sort(limit(reverse(all()), 3))'
875 875 7
876 876 8
877 877 9
878 878 $ log 'sort(2 or 3 or 4 or 5, date)'
879 879 2
880 880 3
881 881 5
882 882 4
883 883 $ log 'tagged()'
884 884 6
885 885 $ log 'tag()'
886 886 6
887 887 $ log 'tag(1.0)'
888 888 6
889 889 $ log 'tag(tip)'
890 890 9
891 891
892 892 test sort revset
893 893 --------------------------------------------
894 894
895 895 test when adding two unordered revsets
896 896
897 897 $ log 'sort(keyword(issue) or modifies(b))'
898 898 4
899 899 6
900 900
901 901 test when sorting a reversed collection in the same way it is
902 902
903 903 $ log 'sort(reverse(all()), -rev)'
904 904 9
905 905 8
906 906 7
907 907 6
908 908 5
909 909 4
910 910 3
911 911 2
912 912 1
913 913 0
914 914
915 915 test when sorting a reversed collection
916 916
917 917 $ log 'sort(reverse(all()), rev)'
918 918 0
919 919 1
920 920 2
921 921 3
922 922 4
923 923 5
924 924 6
925 925 7
926 926 8
927 927 9
928 928
929 929
930 930 test sorting two sorted collections in different orders
931 931
932 932 $ log 'sort(outgoing() or reverse(removes(a)), rev)'
933 933 2
934 934 6
935 935 8
936 936 9
937 937
938 938 test sorting two sorted collections in different orders backwards
939 939
940 940 $ log 'sort(outgoing() or reverse(removes(a)), -rev)'
941 941 9
942 942 8
943 943 6
944 944 2
945 945
946 946 test subtracting something from an addset
947 947
948 948 $ log '(outgoing() or removes(a)) - removes(a)'
949 949 8
950 950 9
951 951
952 952 test intersecting something with an addset
953 953
954 954 $ log 'parents(outgoing() or removes(a))'
955 955 1
956 956 4
957 957 5
958 958 8
959 959
960 960 test that `or` operation combines elements in the right order:
961 961
962 962 $ log '3:4 or 2:5'
963 963 3
964 964 4
965 965 2
966 966 5
967 967 $ log '3:4 or 5:2'
968 968 3
969 969 4
970 970 5
971 971 2
972 972 $ log 'sort(3:4 or 2:5)'
973 973 2
974 974 3
975 975 4
976 976 5
977 977 $ log 'sort(3:4 or 5:2)'
978 978 2
979 979 3
980 980 4
981 981 5
982 982
983 983 test that more than one `-r`s are combined in the right order and deduplicated:
984 984
985 985 $ hg log -T '{rev}\n' -r 3 -r 3 -r 4 -r 5:2 -r 'ancestors(4)'
986 986 3
987 987 4
988 988 5
989 989 2
990 990 0
991 991 1
992 992
993 993 test that `or` operation skips duplicated revisions from right-hand side
994 994
995 995 $ try 'reverse(1::5) or ancestors(4)'
996 996 (or
997 997 (func
998 998 ('symbol', 'reverse')
999 999 (dagrange
1000 1000 ('symbol', '1')
1001 1001 ('symbol', '5')))
1002 1002 (func
1003 1003 ('symbol', 'ancestors')
1004 1004 ('symbol', '4')))
1005 1005 * set:
1006 1006 <addset
1007 1007 <baseset- [1, 3, 5]>,
1008 1008 <generatorset+>>
1009 1009 5
1010 1010 3
1011 1011 1
1012 1012 0
1013 1013 2
1014 1014 4
1015 1015 $ try 'sort(ancestors(4) or reverse(1::5))'
1016 1016 (func
1017 1017 ('symbol', 'sort')
1018 1018 (or
1019 1019 (func
1020 1020 ('symbol', 'ancestors')
1021 1021 ('symbol', '4'))
1022 1022 (func
1023 1023 ('symbol', 'reverse')
1024 1024 (dagrange
1025 1025 ('symbol', '1')
1026 1026 ('symbol', '5')))))
1027 1027 * set:
1028 1028 <addset+
1029 1029 <generatorset+>,
1030 1030 <baseset- [1, 3, 5]>>
1031 1031 0
1032 1032 1
1033 1033 2
1034 1034 3
1035 1035 4
1036 1036 5
1037 1037
1038 1038 test optimization of trivial `or` operation
1039 1039
1040 1040 $ try --optimize '0|(1)|"2"|-2|tip|null'
1041 1041 (or
1042 1042 ('symbol', '0')
1043 1043 (group
1044 1044 ('symbol', '1'))
1045 1045 ('string', '2')
1046 1046 (negate
1047 1047 ('symbol', '2'))
1048 1048 ('symbol', 'tip')
1049 1049 ('symbol', 'null'))
1050 1050 * optimized:
1051 1051 (func
1052 1052 ('symbol', '_list')
1053 1053 ('string', '0\x001\x002\x00-2\x00tip\x00null'))
1054 1054 * set:
1055 1055 <baseset [0, 1, 2, 8, 9, -1]>
1056 1056 0
1057 1057 1
1058 1058 2
1059 1059 8
1060 1060 9
1061 1061 -1
1062 1062
1063 1063 $ try --optimize '0|1|2:3'
1064 1064 (or
1065 1065 ('symbol', '0')
1066 1066 ('symbol', '1')
1067 1067 (range
1068 1068 ('symbol', '2')
1069 1069 ('symbol', '3')))
1070 1070 * optimized:
1071 1071 (or
1072 1072 (func
1073 1073 ('symbol', '_list')
1074 1074 ('string', '0\x001'))
1075 1075 (range
1076 1076 ('symbol', '2')
1077 1077 ('symbol', '3')))
1078 1078 * set:
1079 1079 <addset
1080 1080 <baseset [0, 1]>,
1081 1081 <spanset+ 2:3>>
1082 1082 0
1083 1083 1
1084 1084 2
1085 1085 3
1086 1086
1087 1087 $ try --optimize '0:1|2|3:4|5|6'
1088 1088 (or
1089 1089 (range
1090 1090 ('symbol', '0')
1091 1091 ('symbol', '1'))
1092 1092 ('symbol', '2')
1093 1093 (range
1094 1094 ('symbol', '3')
1095 1095 ('symbol', '4'))
1096 1096 ('symbol', '5')
1097 1097 ('symbol', '6'))
1098 1098 * optimized:
1099 1099 (or
1100 1100 (range
1101 1101 ('symbol', '0')
1102 1102 ('symbol', '1'))
1103 1103 ('symbol', '2')
1104 1104 (range
1105 1105 ('symbol', '3')
1106 1106 ('symbol', '4'))
1107 1107 (func
1108 1108 ('symbol', '_list')
1109 1109 ('string', '5\x006')))
1110 1110 * set:
1111 1111 <addset
1112 1112 <addset
1113 1113 <spanset+ 0:1>,
1114 1114 <baseset [2]>>,
1115 1115 <addset
1116 1116 <spanset+ 3:4>,
1117 1117 <baseset [5, 6]>>>
1118 1118 0
1119 1119 1
1120 1120 2
1121 1121 3
1122 1122 4
1123 1123 5
1124 1124 6
1125 1125
1126 1126 test that `_list` should be narrowed by provided `subset`
1127 1127
1128 1128 $ log '0:2 and (null|1|2|3)'
1129 1129 1
1130 1130 2
1131 1131
1132 1132 test that `_list` should remove duplicates
1133 1133
1134 1134 $ log '0|1|2|1|2|-1|tip'
1135 1135 0
1136 1136 1
1137 1137 2
1138 1138 9
1139 1139
1140 1140 test unknown revision in `_list`
1141 1141
1142 1142 $ log '0|unknown'
1143 1143 abort: unknown revision 'unknown'!
1144 1144 [255]
1145 1145
1146 1146 test integer range in `_list`
1147 1147
1148 1148 $ log '-1|-10'
1149 1149 9
1150 1150 0
1151 1151
1152 1152 $ log '-10|-11'
1153 1153 abort: unknown revision '-11'!
1154 1154 [255]
1155 1155
1156 1156 $ log '9|10'
1157 1157 abort: unknown revision '10'!
1158 1158 [255]
1159 1159
1160 1160 test '0000' != '0' in `_list`
1161 1161
1162 1162 $ log '0|0000'
1163 1163 0
1164 1164 -1
1165 1165
1166 1166 test ',' in `_list`
1167 1167 $ log '0,1'
1168 1168 hg: parse error: can't use a list in this context
1169 1169 (see hg help "revsets.x or y")
1170 1170 [255]
1171 1171
1172 1172 test that chained `or` operations make balanced addsets
1173 1173
1174 1174 $ try '0:1|1:2|2:3|3:4|4:5'
1175 1175 (or
1176 1176 (range
1177 1177 ('symbol', '0')
1178 1178 ('symbol', '1'))
1179 1179 (range
1180 1180 ('symbol', '1')
1181 1181 ('symbol', '2'))
1182 1182 (range
1183 1183 ('symbol', '2')
1184 1184 ('symbol', '3'))
1185 1185 (range
1186 1186 ('symbol', '3')
1187 1187 ('symbol', '4'))
1188 1188 (range
1189 1189 ('symbol', '4')
1190 1190 ('symbol', '5')))
1191 1191 * set:
1192 1192 <addset
1193 1193 <addset
1194 1194 <spanset+ 0:1>,
1195 1195 <spanset+ 1:2>>,
1196 1196 <addset
1197 1197 <spanset+ 2:3>,
1198 1198 <addset
1199 1199 <spanset+ 3:4>,
1200 1200 <spanset+ 4:5>>>>
1201 1201 0
1202 1202 1
1203 1203 2
1204 1204 3
1205 1205 4
1206 1206 5
1207 1207
1208 1208 no crash by empty group "()" while optimizing `or` operations
1209 1209
1210 1210 $ try --optimize '0|()'
1211 1211 (or
1212 1212 ('symbol', '0')
1213 1213 (group
1214 1214 None))
1215 1215 * optimized:
1216 1216 (or
1217 1217 ('symbol', '0')
1218 1218 None)
1219 1219 hg: parse error: missing argument
1220 1220 [255]
1221 1221
1222 1222 test that chained `or` operations never eat up stack (issue4624)
1223 1223 (uses `0:1` instead of `0` to avoid future optimization of trivial revisions)
1224 1224
1225 1225 $ hg log -T '{rev}\n' -r "`python -c "print '|'.join(['0:1'] * 500)"`"
1226 1226 0
1227 1227 1
1228 1228
1229 1229 test that repeated `-r` options never eat up stack (issue4565)
1230 1230 (uses `-r 0::1` to avoid possible optimization at old-style parser)
1231 1231
1232 1232 $ hg log -T '{rev}\n' `python -c "for i in xrange(500): print '-r 0::1 ',"`
1233 1233 0
1234 1234 1
1235 1235
1236 1236 check that conversion to only works
1237 1237 $ try --optimize '::3 - ::1'
1238 1238 (minus
1239 1239 (dagrangepre
1240 1240 ('symbol', '3'))
1241 1241 (dagrangepre
1242 1242 ('symbol', '1')))
1243 1243 * optimized:
1244 1244 (func
1245 1245 ('symbol', 'only')
1246 1246 (list
1247 1247 ('symbol', '3')
1248 1248 ('symbol', '1')))
1249 1249 * set:
1250 1250 <baseset+ [3]>
1251 1251 3
1252 1252 $ try --optimize 'ancestors(1) - ancestors(3)'
1253 1253 (minus
1254 1254 (func
1255 1255 ('symbol', 'ancestors')
1256 1256 ('symbol', '1'))
1257 1257 (func
1258 1258 ('symbol', 'ancestors')
1259 1259 ('symbol', '3')))
1260 1260 * optimized:
1261 1261 (func
1262 1262 ('symbol', 'only')
1263 1263 (list
1264 1264 ('symbol', '1')
1265 1265 ('symbol', '3')))
1266 1266 * set:
1267 1267 <baseset+ []>
1268 1268 $ try --optimize 'not ::2 and ::6'
1269 1269 (and
1270 1270 (not
1271 1271 (dagrangepre
1272 1272 ('symbol', '2')))
1273 1273 (dagrangepre
1274 1274 ('symbol', '6')))
1275 1275 * optimized:
1276 1276 (func
1277 1277 ('symbol', 'only')
1278 1278 (list
1279 1279 ('symbol', '6')
1280 1280 ('symbol', '2')))
1281 1281 * set:
1282 1282 <baseset+ [3, 4, 5, 6]>
1283 1283 3
1284 1284 4
1285 1285 5
1286 1286 6
1287 1287 $ try --optimize 'ancestors(6) and not ancestors(4)'
1288 1288 (and
1289 1289 (func
1290 1290 ('symbol', 'ancestors')
1291 1291 ('symbol', '6'))
1292 1292 (not
1293 1293 (func
1294 1294 ('symbol', 'ancestors')
1295 1295 ('symbol', '4'))))
1296 1296 * optimized:
1297 1297 (func
1298 1298 ('symbol', 'only')
1299 1299 (list
1300 1300 ('symbol', '6')
1301 1301 ('symbol', '4')))
1302 1302 * set:
1303 1303 <baseset+ [3, 5, 6]>
1304 1304 3
1305 1305 5
1306 1306 6
1307 1307
1308 1308 no crash by empty group "()" while optimizing to "only()"
1309 1309
1310 1310 $ try --optimize '::1 and ()'
1311 1311 (and
1312 1312 (dagrangepre
1313 1313 ('symbol', '1'))
1314 1314 (group
1315 1315 None))
1316 1316 * optimized:
1317 1317 (and
1318 1318 None
1319 1319 (func
1320 1320 ('symbol', 'ancestors')
1321 1321 ('symbol', '1')))
1322 1322 hg: parse error: missing argument
1323 1323 [255]
1324 1324
1325 1325 we can use patterns when searching for tags
1326 1326
1327 1327 $ log 'tag("1..*")'
1328 1328 abort: tag '1..*' does not exist!
1329 1329 [255]
1330 1330 $ log 'tag("re:1..*")'
1331 1331 6
1332 1332 $ log 'tag("re:[0-9].[0-9]")'
1333 1333 6
1334 1334 $ log 'tag("literal:1.0")'
1335 1335 6
1336 1336 $ log 'tag("re:0..*")'
1337 1337
1338 1338 $ log 'tag(unknown)'
1339 1339 abort: tag 'unknown' does not exist!
1340 1340 [255]
1341 1341 $ log 'tag("re:unknown")'
1342 1342 $ log 'present(tag("unknown"))'
1343 1343 $ log 'present(tag("re:unknown"))'
1344 1344 $ log 'branch(unknown)'
1345 1345 abort: unknown revision 'unknown'!
1346 1346 [255]
1347 1347 $ log 'branch("literal:unknown")'
1348 1348 abort: branch 'unknown' does not exist!
1349 1349 [255]
1350 1350 $ log 'branch("re:unknown")'
1351 1351 $ log 'present(branch("unknown"))'
1352 1352 $ log 'present(branch("re:unknown"))'
1353 1353 $ log 'user(bob)'
1354 1354 2
1355 1355
1356 1356 $ log '4::8'
1357 1357 4
1358 1358 8
1359 1359 $ log '4:8'
1360 1360 4
1361 1361 5
1362 1362 6
1363 1363 7
1364 1364 8
1365 1365
1366 1366 $ log 'sort(!merge() & (modifies(b) | user(bob) | keyword(bug) | keyword(issue) & 1::9), "-date")'
1367 1367 4
1368 1368 2
1369 1369 5
1370 1370
1371 1371 $ log 'not 0 and 0:2'
1372 1372 1
1373 1373 2
1374 1374 $ log 'not 1 and 0:2'
1375 1375 0
1376 1376 2
1377 1377 $ log 'not 2 and 0:2'
1378 1378 0
1379 1379 1
1380 1380 $ log '(1 and 2)::'
1381 1381 $ log '(1 and 2):'
1382 1382 $ log '(1 and 2):3'
1383 1383 $ log 'sort(head(), -rev)'
1384 1384 9
1385 1385 7
1386 1386 6
1387 1387 5
1388 1388 4
1389 1389 3
1390 1390 2
1391 1391 1
1392 1392 0
1393 1393 $ log '4::8 - 8'
1394 1394 4
1395 1395 $ log 'matching(1 or 2 or 3) and (2 or 3 or 1)'
1396 1396 2
1397 1397 3
1398 1398 1
1399 1399
1400 1400 $ log 'named("unknown")'
1401 1401 abort: namespace 'unknown' does not exist!
1402 1402 [255]
1403 1403 $ log 'named("re:unknown")'
1404 1404 abort: no namespace exists that match 'unknown'!
1405 1405 [255]
1406 1406 $ log 'present(named("unknown"))'
1407 1407 $ log 'present(named("re:unknown"))'
1408 1408
1409 1409 $ log 'tag()'
1410 1410 6
1411 1411 $ log 'named("tags")'
1412 1412 6
1413 1413
1414 1414 issue2437
1415 1415
1416 1416 $ log '3 and p1(5)'
1417 1417 3
1418 1418 $ log '4 and p2(6)'
1419 1419 4
1420 1420 $ log '1 and parents(:2)'
1421 1421 1
1422 1422 $ log '2 and children(1:)'
1423 1423 2
1424 1424 $ log 'roots(all()) or roots(all())'
1425 1425 0
1426 1426 $ hg debugrevspec 'roots(all()) or roots(all())'
1427 1427 0
1428 1428 $ log 'heads(branch(Γ©)) or heads(branch(Γ©))'
1429 1429 9
1430 1430 $ log 'ancestors(8) and (heads(branch("-a-b-c-")) or heads(branch(Γ©)))'
1431 1431 4
1432 1432
1433 1433 issue2654: report a parse error if the revset was not completely parsed
1434 1434
1435 1435 $ log '1 OR 2'
1436 1436 hg: parse error at 2: invalid token
1437 1437 [255]
1438 1438
1439 1439 or operator should preserve ordering:
1440 1440 $ log 'reverse(2::4) or tip'
1441 1441 4
1442 1442 2
1443 1443 9
1444 1444
1445 1445 parentrevspec
1446 1446
1447 1447 $ log 'merge()^0'
1448 1448 6
1449 1449 $ log 'merge()^'
1450 1450 5
1451 1451 $ log 'merge()^1'
1452 1452 5
1453 1453 $ log 'merge()^2'
1454 1454 4
1455 1455 $ log 'merge()^^'
1456 1456 3
1457 1457 $ log 'merge()^1^'
1458 1458 3
1459 1459 $ log 'merge()^^^'
1460 1460 1
1461 1461
1462 1462 $ log 'merge()~0'
1463 1463 6
1464 1464 $ log 'merge()~1'
1465 1465 5
1466 1466 $ log 'merge()~2'
1467 1467 3
1468 1468 $ log 'merge()~2^1'
1469 1469 1
1470 1470 $ log 'merge()~3'
1471 1471 1
1472 1472
1473 1473 $ log '(-3:tip)^'
1474 1474 4
1475 1475 6
1476 1476 8
1477 1477
1478 1478 $ log 'tip^foo'
1479 1479 hg: parse error: ^ expects a number 0, 1, or 2
1480 1480 [255]
1481 1481
1482 1482 Bogus function gets suggestions
1483 1483 $ log 'add()'
1484 1484 hg: parse error: unknown identifier: add
1485 1485 (did you mean 'adds'?)
1486 1486 [255]
1487 1487 $ log 'added()'
1488 1488 hg: parse error: unknown identifier: added
1489 1489 (did you mean 'adds'?)
1490 1490 [255]
1491 1491 $ log 'remo()'
1492 1492 hg: parse error: unknown identifier: remo
1493 1493 (did you mean one of remote, removes?)
1494 1494 [255]
1495 1495 $ log 'babar()'
1496 1496 hg: parse error: unknown identifier: babar
1497 1497 [255]
1498 1498
1499 1499 Bogus function with a similar internal name doesn't suggest the internal name
1500 1500 $ log 'matches()'
1501 1501 hg: parse error: unknown identifier: matches
1502 1502 (did you mean 'matching'?)
1503 1503 [255]
1504 1504
1505 1505 Undocumented functions aren't suggested as similar either
1506 1506 $ log 'wdir2()'
1507 1507 hg: parse error: unknown identifier: wdir2
1508 1508 [255]
1509 1509
1510 1510 multiple revspecs
1511 1511
1512 1512 $ hg log -r 'tip~1:tip' -r 'tip~2:tip~1' --template '{rev}\n'
1513 1513 8
1514 1514 9
1515 1515 4
1516 1516 5
1517 1517 6
1518 1518 7
1519 1519
1520 1520 test usage in revpair (with "+")
1521 1521
1522 1522 (real pair)
1523 1523
1524 1524 $ hg diff -r 'tip^^' -r 'tip'
1525 1525 diff -r 2326846efdab -r 24286f4ae135 .hgtags
1526 1526 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1527 1527 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
1528 1528 @@ -0,0 +1,1 @@
1529 1529 +e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0
1530 1530 $ hg diff -r 'tip^^::tip'
1531 1531 diff -r 2326846efdab -r 24286f4ae135 .hgtags
1532 1532 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1533 1533 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
1534 1534 @@ -0,0 +1,1 @@
1535 1535 +e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0
1536 1536
1537 1537 (single rev)
1538 1538
1539 1539 $ hg diff -r 'tip^' -r 'tip^'
1540 1540 $ hg diff -r 'tip^:tip^'
1541 1541
1542 1542 (single rev that does not looks like a range)
1543 1543
1544 1544 $ hg diff -r 'tip^::tip^ or tip^'
1545 1545 diff -r d5d0dcbdc4d9 .hgtags
1546 1546 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1547 1547 +++ b/.hgtags * (glob)
1548 1548 @@ -0,0 +1,1 @@
1549 1549 +e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0
1550 1550 $ hg diff -r 'tip^ or tip^'
1551 1551 diff -r d5d0dcbdc4d9 .hgtags
1552 1552 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1553 1553 +++ b/.hgtags * (glob)
1554 1554 @@ -0,0 +1,1 @@
1555 1555 +e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0
1556 1556
1557 1557 (no rev)
1558 1558
1559 1559 $ hg diff -r 'author("babar") or author("celeste")'
1560 1560 abort: empty revision range
1561 1561 [255]
1562 1562
1563 1563 aliases:
1564 1564
1565 1565 $ echo '[revsetalias]' >> .hg/hgrc
1566 1566 $ echo 'm = merge()' >> .hg/hgrc
1567 1567 (revset aliases can override builtin revsets)
1568 1568 $ echo 'p2($1) = p1($1)' >> .hg/hgrc
1569 1569 $ echo 'sincem = descendants(m)' >> .hg/hgrc
1570 1570 $ echo 'd($1) = reverse(sort($1, date))' >> .hg/hgrc
1571 1571 $ echo 'rs(ARG1, ARG2) = reverse(sort(ARG1, ARG2))' >> .hg/hgrc
1572 1572 $ echo 'rs4(ARG1, ARGA, ARGB, ARG2) = reverse(sort(ARG1, ARG2))' >> .hg/hgrc
1573 1573
1574 1574 $ try m
1575 1575 ('symbol', 'm')
1576 1576 (func
1577 1577 ('symbol', 'merge')
1578 1578 None)
1579 1579 * set:
1580 1580 <filteredset
1581 1581 <fullreposet+ 0:9>>
1582 1582 6
1583 1583
1584 1584 $ HGPLAIN=1
1585 1585 $ export HGPLAIN
1586 1586 $ try m
1587 1587 ('symbol', 'm')
1588 1588 abort: unknown revision 'm'!
1589 1589 [255]
1590 1590
1591 1591 $ HGPLAINEXCEPT=revsetalias
1592 1592 $ export HGPLAINEXCEPT
1593 1593 $ try m
1594 1594 ('symbol', 'm')
1595 1595 (func
1596 1596 ('symbol', 'merge')
1597 1597 None)
1598 1598 * set:
1599 1599 <filteredset
1600 1600 <fullreposet+ 0:9>>
1601 1601 6
1602 1602
1603 1603 $ unset HGPLAIN
1604 1604 $ unset HGPLAINEXCEPT
1605 1605
1606 1606 $ try 'p2(.)'
1607 1607 (func
1608 1608 ('symbol', 'p2')
1609 1609 ('symbol', '.'))
1610 1610 (func
1611 1611 ('symbol', 'p1')
1612 1612 ('symbol', '.'))
1613 1613 * set:
1614 1614 <baseset+ [8]>
1615 1615 8
1616 1616
1617 1617 $ HGPLAIN=1
1618 1618 $ export HGPLAIN
1619 1619 $ try 'p2(.)'
1620 1620 (func
1621 1621 ('symbol', 'p2')
1622 1622 ('symbol', '.'))
1623 1623 * set:
1624 1624 <baseset+ []>
1625 1625
1626 1626 $ HGPLAINEXCEPT=revsetalias
1627 1627 $ export HGPLAINEXCEPT
1628 1628 $ try 'p2(.)'
1629 1629 (func
1630 1630 ('symbol', 'p2')
1631 1631 ('symbol', '.'))
1632 1632 (func
1633 1633 ('symbol', 'p1')
1634 1634 ('symbol', '.'))
1635 1635 * set:
1636 1636 <baseset+ [8]>
1637 1637 8
1638 1638
1639 1639 $ unset HGPLAIN
1640 1640 $ unset HGPLAINEXCEPT
1641 1641
1642 1642 test alias recursion
1643 1643
1644 1644 $ try sincem
1645 1645 ('symbol', 'sincem')
1646 1646 (func
1647 1647 ('symbol', 'descendants')
1648 1648 (func
1649 1649 ('symbol', 'merge')
1650 1650 None))
1651 1651 * set:
1652 1652 <addset+
1653 1653 <filteredset
1654 1654 <fullreposet+ 0:9>>,
1655 1655 <generatorset+>>
1656 1656 6
1657 1657 7
1658 1658
1659 1659 test infinite recursion
1660 1660
1661 1661 $ echo 'recurse1 = recurse2' >> .hg/hgrc
1662 1662 $ echo 'recurse2 = recurse1' >> .hg/hgrc
1663 1663 $ try recurse1
1664 1664 ('symbol', 'recurse1')
1665 1665 hg: parse error: infinite expansion of revset alias "recurse1" detected
1666 1666 [255]
1667 1667
1668 1668 $ echo 'level1($1, $2) = $1 or $2' >> .hg/hgrc
1669 1669 $ echo 'level2($1, $2) = level1($2, $1)' >> .hg/hgrc
1670 1670 $ try "level2(level1(1, 2), 3)"
1671 1671 (func
1672 1672 ('symbol', 'level2')
1673 1673 (list
1674 1674 (func
1675 1675 ('symbol', 'level1')
1676 1676 (list
1677 1677 ('symbol', '1')
1678 1678 ('symbol', '2')))
1679 1679 ('symbol', '3')))
1680 1680 (or
1681 1681 ('symbol', '3')
1682 1682 (or
1683 1683 ('symbol', '1')
1684 1684 ('symbol', '2')))
1685 1685 * set:
1686 1686 <addset
1687 1687 <baseset [3]>,
1688 1688 <baseset [1, 2]>>
1689 1689 3
1690 1690 1
1691 1691 2
1692 1692
1693 1693 test nesting and variable passing
1694 1694
1695 1695 $ echo 'nested($1) = nested2($1)' >> .hg/hgrc
1696 1696 $ echo 'nested2($1) = nested3($1)' >> .hg/hgrc
1697 1697 $ echo 'nested3($1) = max($1)' >> .hg/hgrc
1698 1698 $ try 'nested(2:5)'
1699 1699 (func
1700 1700 ('symbol', 'nested')
1701 1701 (range
1702 1702 ('symbol', '2')
1703 1703 ('symbol', '5')))
1704 1704 (func
1705 1705 ('symbol', 'max')
1706 1706 (range
1707 1707 ('symbol', '2')
1708 1708 ('symbol', '5')))
1709 1709 * set:
1710 1710 <baseset [5]>
1711 1711 5
1712 1712
1713 1713 test chained `or` operations are flattened at parsing phase
1714 1714
1715 1715 $ echo 'chainedorops($1, $2, $3) = $1|$2|$3' >> .hg/hgrc
1716 1716 $ try 'chainedorops(0:1, 1:2, 2:3)'
1717 1717 (func
1718 1718 ('symbol', 'chainedorops')
1719 1719 (list
1720 1720 (list
1721 1721 (range
1722 1722 ('symbol', '0')
1723 1723 ('symbol', '1'))
1724 1724 (range
1725 1725 ('symbol', '1')
1726 1726 ('symbol', '2')))
1727 1727 (range
1728 1728 ('symbol', '2')
1729 1729 ('symbol', '3'))))
1730 1730 (or
1731 1731 (range
1732 1732 ('symbol', '0')
1733 1733 ('symbol', '1'))
1734 1734 (range
1735 1735 ('symbol', '1')
1736 1736 ('symbol', '2'))
1737 1737 (range
1738 1738 ('symbol', '2')
1739 1739 ('symbol', '3')))
1740 1740 * set:
1741 1741 <addset
1742 1742 <spanset+ 0:1>,
1743 1743 <addset
1744 1744 <spanset+ 1:2>,
1745 1745 <spanset+ 2:3>>>
1746 1746 0
1747 1747 1
1748 1748 2
1749 1749 3
1750 1750
1751 1751 test variable isolation, variable placeholders are rewritten as string
1752 1752 then parsed and matched again as string. Check they do not leak too
1753 1753 far away.
1754 1754
1755 1755 $ echo 'injectparamasstring = max("$1")' >> .hg/hgrc
1756 1756 $ echo 'callinjection($1) = descendants(injectparamasstring)' >> .hg/hgrc
1757 1757 $ try 'callinjection(2:5)'
1758 1758 (func
1759 1759 ('symbol', 'callinjection')
1760 1760 (range
1761 1761 ('symbol', '2')
1762 1762 ('symbol', '5')))
1763 1763 (func
1764 1764 ('symbol', 'descendants')
1765 1765 (func
1766 1766 ('symbol', 'max')
1767 1767 ('string', '$1')))
1768 1768 abort: unknown revision '$1'!
1769 1769 [255]
1770 1770
1771 1771 $ echo 'injectparamasstring2 = max(_aliasarg("$1"))' >> .hg/hgrc
1772 1772 $ echo 'callinjection2($1) = descendants(injectparamasstring2)' >> .hg/hgrc
1773 1773 $ try 'callinjection2(2:5)'
1774 1774 (func
1775 1775 ('symbol', 'callinjection2')
1776 1776 (range
1777 1777 ('symbol', '2')
1778 1778 ('symbol', '5')))
1779 1779 abort: failed to parse the definition of revset alias "injectparamasstring2": unknown identifier: _aliasarg
1780 1780 [255]
1781 1781 $ hg debugrevspec --debug --config revsetalias.anotherbadone='branch(' "tip"
1782 1782 ('symbol', 'tip')
1783 1783 warning: failed to parse the definition of revset alias "anotherbadone": at 7: not a prefix: end
1784 1784 warning: failed to parse the definition of revset alias "injectparamasstring2": unknown identifier: _aliasarg
1785 1785 * set:
1786 1786 <baseset [9]>
1787 1787 9
1788 1788 >>> data = file('.hg/hgrc', 'rb').read()
1789 1789 >>> file('.hg/hgrc', 'wb').write(data.replace('_aliasarg', ''))
1790 1790
1791 1791 $ try 'tip'
1792 1792 ('symbol', 'tip')
1793 1793 * set:
1794 1794 <baseset [9]>
1795 1795 9
1796 1796
1797 1797 $ hg debugrevspec --debug --config revsetalias.'bad name'='tip' "tip"
1798 1798 ('symbol', 'tip')
1799 1799 warning: failed to parse the declaration of revset alias "bad name": at 4: invalid token
1800 1800 * set:
1801 1801 <baseset [9]>
1802 1802 9
1803 1803 $ echo 'strictreplacing($1, $10) = $10 or desc("$1")' >> .hg/hgrc
1804 1804 $ try 'strictreplacing("foo", tip)'
1805 1805 (func
1806 1806 ('symbol', 'strictreplacing')
1807 1807 (list
1808 1808 ('string', 'foo')
1809 1809 ('symbol', 'tip')))
1810 1810 (or
1811 1811 ('symbol', 'tip')
1812 1812 (func
1813 1813 ('symbol', 'desc')
1814 1814 ('string', '$1')))
1815 1815 * set:
1816 1816 <addset
1817 1817 <baseset [9]>,
1818 1818 <filteredset
1819 1819 <fullreposet+ 0:9>>>
1820 1820 9
1821 1821
1822 1822 $ try 'd(2:5)'
1823 1823 (func
1824 1824 ('symbol', 'd')
1825 1825 (range
1826 1826 ('symbol', '2')
1827 1827 ('symbol', '5')))
1828 1828 (func
1829 1829 ('symbol', 'reverse')
1830 1830 (func
1831 1831 ('symbol', 'sort')
1832 1832 (list
1833 1833 (range
1834 1834 ('symbol', '2')
1835 1835 ('symbol', '5'))
1836 1836 ('symbol', 'date'))))
1837 1837 * set:
1838 1838 <baseset [4, 5, 3, 2]>
1839 1839 4
1840 1840 5
1841 1841 3
1842 1842 2
1843 1843 $ try 'rs(2 or 3, date)'
1844 1844 (func
1845 1845 ('symbol', 'rs')
1846 1846 (list
1847 1847 (or
1848 1848 ('symbol', '2')
1849 1849 ('symbol', '3'))
1850 1850 ('symbol', 'date')))
1851 1851 (func
1852 1852 ('symbol', 'reverse')
1853 1853 (func
1854 1854 ('symbol', 'sort')
1855 1855 (list
1856 1856 (or
1857 1857 ('symbol', '2')
1858 1858 ('symbol', '3'))
1859 1859 ('symbol', 'date'))))
1860 1860 * set:
1861 1861 <baseset [3, 2]>
1862 1862 3
1863 1863 2
1864 1864 $ try 'rs()'
1865 1865 (func
1866 1866 ('symbol', 'rs')
1867 1867 None)
1868 1868 hg: parse error: invalid number of arguments: 0
1869 1869 [255]
1870 1870 $ try 'rs(2)'
1871 1871 (func
1872 1872 ('symbol', 'rs')
1873 1873 ('symbol', '2'))
1874 1874 hg: parse error: invalid number of arguments: 1
1875 1875 [255]
1876 1876 $ try 'rs(2, data, 7)'
1877 1877 (func
1878 1878 ('symbol', 'rs')
1879 1879 (list
1880 1880 (list
1881 1881 ('symbol', '2')
1882 1882 ('symbol', 'data'))
1883 1883 ('symbol', '7')))
1884 1884 hg: parse error: invalid number of arguments: 3
1885 1885 [255]
1886 1886 $ try 'rs4(2 or 3, x, x, date)'
1887 1887 (func
1888 1888 ('symbol', 'rs4')
1889 1889 (list
1890 1890 (list
1891 1891 (list
1892 1892 (or
1893 1893 ('symbol', '2')
1894 1894 ('symbol', '3'))
1895 1895 ('symbol', 'x'))
1896 1896 ('symbol', 'x'))
1897 1897 ('symbol', 'date')))
1898 1898 (func
1899 1899 ('symbol', 'reverse')
1900 1900 (func
1901 1901 ('symbol', 'sort')
1902 1902 (list
1903 1903 (or
1904 1904 ('symbol', '2')
1905 1905 ('symbol', '3'))
1906 1906 ('symbol', 'date'))))
1907 1907 * set:
1908 1908 <baseset [3, 2]>
1909 1909 3
1910 1910 2
1911 1911
1912 1912 issue4553: check that revset aliases override existing hash prefix
1913 1913
1914 1914 $ hg log -qr e
1915 1915 6:e0cc66ef77e8
1916 1916
1917 1917 $ hg log -qr e --config revsetalias.e="all()"
1918 1918 0:2785f51eece5
1919 1919 1:d75937da8da0
1920 1920 2:5ed5505e9f1c
1921 1921 3:8528aa5637f2
1922 1922 4:2326846efdab
1923 1923 5:904fa392b941
1924 1924 6:e0cc66ef77e8
1925 1925 7:013af1973af4
1926 1926 8:d5d0dcbdc4d9
1927 1927 9:24286f4ae135
1928 1928
1929 1929 $ hg log -qr e: --config revsetalias.e="0"
1930 1930 0:2785f51eece5
1931 1931 1:d75937da8da0
1932 1932 2:5ed5505e9f1c
1933 1933 3:8528aa5637f2
1934 1934 4:2326846efdab
1935 1935 5:904fa392b941
1936 1936 6:e0cc66ef77e8
1937 1937 7:013af1973af4
1938 1938 8:d5d0dcbdc4d9
1939 1939 9:24286f4ae135
1940 1940
1941 1941 $ hg log -qr :e --config revsetalias.e="9"
1942 1942 0:2785f51eece5
1943 1943 1:d75937da8da0
1944 1944 2:5ed5505e9f1c
1945 1945 3:8528aa5637f2
1946 1946 4:2326846efdab
1947 1947 5:904fa392b941
1948 1948 6:e0cc66ef77e8
1949 1949 7:013af1973af4
1950 1950 8:d5d0dcbdc4d9
1951 1951 9:24286f4ae135
1952 1952
1953 1953 $ hg log -qr e:
1954 1954 6:e0cc66ef77e8
1955 1955 7:013af1973af4
1956 1956 8:d5d0dcbdc4d9
1957 1957 9:24286f4ae135
1958 1958
1959 1959 $ hg log -qr :e
1960 1960 0:2785f51eece5
1961 1961 1:d75937da8da0
1962 1962 2:5ed5505e9f1c
1963 1963 3:8528aa5637f2
1964 1964 4:2326846efdab
1965 1965 5:904fa392b941
1966 1966 6:e0cc66ef77e8
1967 1967
1968 1968 issue2549 - correct optimizations
1969 1969
1970 1970 $ log 'limit(1 or 2 or 3, 2) and not 2'
1971 1971 1
1972 1972 $ log 'max(1 or 2) and not 2'
1973 1973 $ log 'min(1 or 2) and not 1'
1974 1974 $ log 'last(1 or 2, 1) and not 2'
1975 1975
1976 1976 issue4289 - ordering of built-ins
1977 1977 $ hg log -M -q -r 3:2
1978 1978 3:8528aa5637f2
1979 1979 2:5ed5505e9f1c
1980 1980
1981 1981 test revsets started with 40-chars hash (issue3669)
1982 1982
1983 1983 $ ISSUE3669_TIP=`hg tip --template '{node}'`
1984 1984 $ hg log -r "${ISSUE3669_TIP}" --template '{rev}\n'
1985 1985 9
1986 1986 $ hg log -r "${ISSUE3669_TIP}^" --template '{rev}\n'
1987 1987 8
1988 1988
1989 1989 test or-ed indirect predicates (issue3775)
1990 1990
1991 1991 $ log '6 or 6^1' | sort
1992 1992 5
1993 1993 6
1994 1994 $ log '6^1 or 6' | sort
1995 1995 5
1996 1996 6
1997 1997 $ log '4 or 4~1' | sort
1998 1998 2
1999 1999 4
2000 2000 $ log '4~1 or 4' | sort
2001 2001 2
2002 2002 4
2003 2003 $ log '(0 or 2):(4 or 6) or 0 or 6' | sort
2004 2004 0
2005 2005 1
2006 2006 2
2007 2007 3
2008 2008 4
2009 2009 5
2010 2010 6
2011 2011 $ log '0 or 6 or (0 or 2):(4 or 6)' | sort
2012 2012 0
2013 2013 1
2014 2014 2
2015 2015 3
2016 2016 4
2017 2017 5
2018 2018 6
2019 2019
2020 2020 tests for 'remote()' predicate:
2021 2021 #. (csets in remote) (id) (remote)
2022 2022 1. less than local current branch "default"
2023 2023 2. same with local specified "default"
2024 2024 3. more than local specified specified
2025 2025
2026 2026 $ hg clone --quiet -U . ../remote3
2027 2027 $ cd ../remote3
2028 2028 $ hg update -q 7
2029 2029 $ echo r > r
2030 2030 $ hg ci -Aqm 10
2031 2031 $ log 'remote()'
2032 2032 7
2033 2033 $ log 'remote("a-b-c-")'
2034 2034 2
2035 2035 $ cd ../repo
2036 2036 $ log 'remote(".a.b.c.", "../remote3")'
2037 2037
2038 2038 tests for concatenation of strings/symbols by "##"
2039 2039
2040 2040 $ try "278 ## '5f5' ## 1ee ## 'ce5'"
2041 2041 (_concat
2042 2042 (_concat
2043 2043 (_concat
2044 2044 ('symbol', '278')
2045 2045 ('string', '5f5'))
2046 2046 ('symbol', '1ee'))
2047 2047 ('string', 'ce5'))
2048 2048 ('string', '2785f51eece5')
2049 2049 * set:
2050 2050 <baseset [0]>
2051 2051 0
2052 2052
2053 2053 $ echo 'cat4($1, $2, $3, $4) = $1 ## $2 ## $3 ## $4' >> .hg/hgrc
2054 2054 $ try "cat4(278, '5f5', 1ee, 'ce5')"
2055 2055 (func
2056 2056 ('symbol', 'cat4')
2057 2057 (list
2058 2058 (list
2059 2059 (list
2060 2060 ('symbol', '278')
2061 2061 ('string', '5f5'))
2062 2062 ('symbol', '1ee'))
2063 2063 ('string', 'ce5')))
2064 2064 (_concat
2065 2065 (_concat
2066 2066 (_concat
2067 2067 ('symbol', '278')
2068 2068 ('string', '5f5'))
2069 2069 ('symbol', '1ee'))
2070 2070 ('string', 'ce5'))
2071 2071 ('string', '2785f51eece5')
2072 2072 * set:
2073 2073 <baseset [0]>
2074 2074 0
2075 2075
2076 2076 (check concatenation in alias nesting)
2077 2077
2078 2078 $ echo 'cat2($1, $2) = $1 ## $2' >> .hg/hgrc
2079 2079 $ echo 'cat2x2($1, $2, $3, $4) = cat2($1 ## $2, $3 ## $4)' >> .hg/hgrc
2080 2080 $ log "cat2x2(278, '5f5', 1ee, 'ce5')"
2081 2081 0
2082 2082
2083 2083 (check operator priority)
2084 2084
2085 2085 $ echo 'cat2n2($1, $2, $3, $4) = $1 ## $2 or $3 ## $4~2' >> .hg/hgrc
2086 2086 $ log "cat2n2(2785f5, 1eece5, 24286f, 4ae135)"
2087 2087 0
2088 2088 4
2089 2089
2090 2090 $ cd ..
2091 2091
2092 2092 prepare repository that has "default" branches of multiple roots
2093 2093
2094 2094 $ hg init namedbranch
2095 2095 $ cd namedbranch
2096 2096
2097 2097 $ echo default0 >> a
2098 2098 $ hg ci -Aqm0
2099 2099 $ echo default1 >> a
2100 2100 $ hg ci -m1
2101 2101
2102 2102 $ hg branch -q stable
2103 2103 $ echo stable2 >> a
2104 2104 $ hg ci -m2
2105 2105 $ echo stable3 >> a
2106 2106 $ hg ci -m3
2107 2107
2108 2108 $ hg update -q null
2109 2109 $ echo default4 >> a
2110 2110 $ hg ci -Aqm4
2111 2111 $ echo default5 >> a
2112 2112 $ hg ci -m5
2113 2113
2114 2114 "null" revision belongs to "default" branch (issue4683)
2115 2115
2116 2116 $ log 'branch(null)'
2117 2117 0
2118 2118 1
2119 2119 4
2120 2120 5
2121 2121
2122 2122 "null" revision belongs to "default" branch, but it shouldn't appear in set
2123 2123 unless explicitly specified (issue4682)
2124 2124
2125 2125 $ log 'children(branch(default))'
2126 2126 1
2127 2127 2
2128 2128 5
2129 2129
2130 2130 $ cd ..
2131 2131
2132 2132 test author/desc/keyword in problematic encoding
2133 2133 # unicode: cp932:
2134 2134 # u30A2 0x83 0x41(= 'A')
2135 2135 # u30C2 0x83 0x61(= 'a')
2136 2136
2137 2137 $ hg init problematicencoding
2138 2138 $ cd problematicencoding
2139 2139
2140 2140 $ python > setup.sh <<EOF
2141 2141 > print u'''
2142 2142 > echo a > text
2143 2143 > hg add text
2144 2144 > hg --encoding utf-8 commit -u '\u30A2' -m none
2145 2145 > echo b > text
2146 2146 > hg --encoding utf-8 commit -u '\u30C2' -m none
2147 2147 > echo c > text
2148 2148 > hg --encoding utf-8 commit -u none -m '\u30A2'
2149 2149 > echo d > text
2150 2150 > hg --encoding utf-8 commit -u none -m '\u30C2'
2151 2151 > '''.encode('utf-8')
2152 2152 > EOF
2153 2153 $ sh < setup.sh
2154 2154
2155 2155 test in problematic encoding
2156 2156 $ python > test.sh <<EOF
2157 2157 > print u'''
2158 2158 > hg --encoding cp932 log --template '{rev}\\n' -r 'author(\u30A2)'
2159 2159 > echo ====
2160 2160 > hg --encoding cp932 log --template '{rev}\\n' -r 'author(\u30C2)'
2161 2161 > echo ====
2162 2162 > hg --encoding cp932 log --template '{rev}\\n' -r 'desc(\u30A2)'
2163 2163 > echo ====
2164 2164 > hg --encoding cp932 log --template '{rev}\\n' -r 'desc(\u30C2)'
2165 2165 > echo ====
2166 2166 > hg --encoding cp932 log --template '{rev}\\n' -r 'keyword(\u30A2)'
2167 2167 > echo ====
2168 2168 > hg --encoding cp932 log --template '{rev}\\n' -r 'keyword(\u30C2)'
2169 2169 > '''.encode('cp932')
2170 2170 > EOF
2171 2171 $ sh < test.sh
2172 2172 0
2173 2173 ====
2174 2174 1
2175 2175 ====
2176 2176 2
2177 2177 ====
2178 2178 3
2179 2179 ====
2180 2180 0
2181 2181 2
2182 2182 ====
2183 2183 1
2184 2184 3
2185 2185
2186 2186 test error message of bad revset
2187 2187 $ hg log -r 'foo\\'
2188 2188 hg: parse error at 3: syntax error in revset 'foo\\'
2189 2189 [255]
2190 2190
2191 2191 $ cd ..
2192
2193 Test registrar.delayregistrar via revset.extpredicate
2194
2195 'extpredicate' decorator shouldn't register any functions until
2196 'setup()' on it.
2197
2198 $ cd repo
2199
2200 $ cat <<EOF > $TESTTMP/custompredicate.py
2201 > from mercurial import revset
2202 >
2203 > revsetpredicate = revset.extpredicate()
2204 >
2205 > @revsetpredicate('custom1()')
2206 > def custom1(repo, subset, x):
2207 > return revset.baseset([1])
2208 > @revsetpredicate('custom2()')
2209 > def custom2(repo, subset, x):
2210 > return revset.baseset([2])
2211 >
2212 > def uisetup(ui):
2213 > if ui.configbool('custompredicate', 'enabled'):
2214 > revsetpredicate.setup()
2215 > EOF
2216 $ cat <<EOF > .hg/hgrc
2217 > [extensions]
2218 > custompredicate = $TESTTMP/custompredicate.py
2219 > EOF
2220
2221 $ hg debugrevspec "custom1()"
2222 hg: parse error: unknown identifier: custom1
2223 [255]
2224 $ hg debugrevspec "custom2()"
2225 hg: parse error: unknown identifier: custom2
2226 [255]
2227 $ hg debugrevspec "custom1() or custom2()" --config custompredicate.enabled=true
2228 1
2229 2
2230
2231 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now