##// END OF EJS Templates
revert: evaluate filesets against working directory (issue4497)...
Martin von Zweigbergk -
r24438:5b85a5bc default
parent child Browse files
Show More
@@ -1,1406 +1,1406 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 15 archival, pathutil, revset
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18
19 19 import lfutil
20 20 import lfcommands
21 21 import basestore
22 22
23 23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 24
25 25 def composelargefilematcher(match, manifest):
26 26 '''create a matcher that matches only the largefiles in the original
27 27 matcher'''
28 28 m = copy.copy(match)
29 29 lfile = lambda f: lfutil.standin(f) in manifest
30 30 m._files = filter(lfile, m._files)
31 31 m._fmap = set(m._files)
32 32 m._always = False
33 33 origmatchfn = m.matchfn
34 34 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
35 35 return m
36 36
37 37 def composenormalfilematcher(match, manifest, exclude=None):
38 38 excluded = set()
39 39 if exclude is not None:
40 40 excluded.update(exclude)
41 41
42 42 m = copy.copy(match)
43 43 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
44 44 manifest or f in excluded)
45 45 m._files = filter(notlfile, m._files)
46 46 m._fmap = set(m._files)
47 47 m._always = False
48 48 origmatchfn = m.matchfn
49 49 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
50 50 return m
51 51
52 52 def installnormalfilesmatchfn(manifest):
53 53 '''installmatchfn with a matchfn that ignores all largefiles'''
54 54 def overridematch(ctx, pats=[], opts={}, globbed=False,
55 55 default='relpath'):
56 56 match = oldmatch(ctx, pats, opts, globbed, default)
57 57 return composenormalfilematcher(match, manifest)
58 58 oldmatch = installmatchfn(overridematch)
59 59
60 60 def installmatchfn(f):
61 61 '''monkey patch the scmutil module with a custom match function.
62 62 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
63 63 oldmatch = scmutil.match
64 64 setattr(f, 'oldmatch', oldmatch)
65 65 scmutil.match = f
66 66 return oldmatch
67 67
68 68 def restorematchfn():
69 69 '''restores scmutil.match to what it was before installmatchfn
70 70 was called. no-op if scmutil.match is its original function.
71 71
72 72 Note that n calls to installmatchfn will require n calls to
73 73 restore the original matchfn.'''
74 74 scmutil.match = getattr(scmutil.match, 'oldmatch')
75 75
76 76 def installmatchandpatsfn(f):
77 77 oldmatchandpats = scmutil.matchandpats
78 78 setattr(f, 'oldmatchandpats', oldmatchandpats)
79 79 scmutil.matchandpats = f
80 80 return oldmatchandpats
81 81
82 82 def restorematchandpatsfn():
83 83 '''restores scmutil.matchandpats to what it was before
84 84 installmatchandpatsfn was called. No-op if scmutil.matchandpats
85 85 is its original function.
86 86
87 87 Note that n calls to installmatchandpatsfn will require n calls
88 88 to restore the original matchfn.'''
89 89 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
90 90 scmutil.matchandpats)
91 91
92 92 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
93 93 large = opts.get('large')
94 94 lfsize = lfutil.getminsize(
95 95 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
96 96
97 97 lfmatcher = None
98 98 if lfutil.islfilesrepo(repo):
99 99 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
100 100 if lfpats:
101 101 lfmatcher = match_.match(repo.root, '', list(lfpats))
102 102
103 103 lfnames = []
104 104 m = copy.copy(matcher)
105 105 m.bad = lambda x, y: None
106 106 wctx = repo[None]
107 107 for f in repo.walk(m):
108 108 exact = m.exact(f)
109 109 lfile = lfutil.standin(f) in wctx
110 110 nfile = f in wctx
111 111 exists = lfile or nfile
112 112
113 113 # addremove in core gets fancy with the name, add doesn't
114 114 if isaddremove:
115 115 name = m.uipath(f)
116 116 else:
117 117 name = m.rel(f)
118 118
119 119 # Don't warn the user when they attempt to add a normal tracked file.
120 120 # The normal add code will do that for us.
121 121 if exact and exists:
122 122 if lfile:
123 123 ui.warn(_('%s already a largefile\n') % name)
124 124 continue
125 125
126 126 if (exact or not exists) and not lfutil.isstandin(f):
127 127 # In case the file was removed previously, but not committed
128 128 # (issue3507)
129 129 if not repo.wvfs.exists(f):
130 130 continue
131 131
132 132 abovemin = (lfsize and
133 133 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
134 134 if large or abovemin or (lfmatcher and lfmatcher(f)):
135 135 lfnames.append(f)
136 136 if ui.verbose or not exact:
137 137 ui.status(_('adding %s as a largefile\n') % name)
138 138
139 139 bad = []
140 140
141 141 # Need to lock, otherwise there could be a race condition between
142 142 # when standins are created and added to the repo.
143 143 wlock = repo.wlock()
144 144 try:
145 145 if not opts.get('dry_run'):
146 146 standins = []
147 147 lfdirstate = lfutil.openlfdirstate(ui, repo)
148 148 for f in lfnames:
149 149 standinname = lfutil.standin(f)
150 150 lfutil.writestandin(repo, standinname, hash='',
151 151 executable=lfutil.getexecutable(repo.wjoin(f)))
152 152 standins.append(standinname)
153 153 if lfdirstate[f] == 'r':
154 154 lfdirstate.normallookup(f)
155 155 else:
156 156 lfdirstate.add(f)
157 157 lfdirstate.write()
158 158 bad += [lfutil.splitstandin(f)
159 159 for f in repo[None].add(standins)
160 160 if f in m.files()]
161 161
162 162 added = [f for f in lfnames if f not in bad]
163 163 finally:
164 164 wlock.release()
165 165 return added, bad
166 166
167 167 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
168 168 after = opts.get('after')
169 169 m = composelargefilematcher(matcher, repo[None].manifest())
170 170 try:
171 171 repo.lfstatus = True
172 172 s = repo.status(match=m, clean=not isaddremove)
173 173 finally:
174 174 repo.lfstatus = False
175 175 manifest = repo[None].manifest()
176 176 modified, added, deleted, clean = [[f for f in list
177 177 if lfutil.standin(f) in manifest]
178 178 for list in (s.modified, s.added,
179 179 s.deleted, s.clean)]
180 180
181 181 def warn(files, msg):
182 182 for f in files:
183 183 ui.warn(msg % m.rel(f))
184 184 return int(len(files) > 0)
185 185
186 186 result = 0
187 187
188 188 if after:
189 189 remove = deleted
190 190 result = warn(modified + added + clean,
191 191 _('not removing %s: file still exists\n'))
192 192 else:
193 193 remove = deleted + clean
194 194 result = warn(modified, _('not removing %s: file is modified (use -f'
195 195 ' to force removal)\n'))
196 196 result = warn(added, _('not removing %s: file has been marked for add'
197 197 ' (use forget to undo)\n')) or result
198 198
199 199 # Need to lock because standin files are deleted then removed from the
200 200 # repository and we could race in-between.
201 201 wlock = repo.wlock()
202 202 try:
203 203 lfdirstate = lfutil.openlfdirstate(ui, repo)
204 204 for f in sorted(remove):
205 205 if ui.verbose or not m.exact(f):
206 206 # addremove in core gets fancy with the name, remove doesn't
207 207 if isaddremove:
208 208 name = m.uipath(f)
209 209 else:
210 210 name = m.rel(f)
211 211 ui.status(_('removing %s\n') % name)
212 212
213 213 if not opts.get('dry_run'):
214 214 if not after:
215 215 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
216 216
217 217 if opts.get('dry_run'):
218 218 return result
219 219
220 220 remove = [lfutil.standin(f) for f in remove]
221 221 # If this is being called by addremove, let the original addremove
222 222 # function handle this.
223 223 if not isaddremove:
224 224 for f in remove:
225 225 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
226 226 repo[None].forget(remove)
227 227
228 228 for f in remove:
229 229 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
230 230 False)
231 231
232 232 lfdirstate.write()
233 233 finally:
234 234 wlock.release()
235 235
236 236 return result
237 237
238 238 # For overriding mercurial.hgweb.webcommands so that largefiles will
239 239 # appear at their right place in the manifests.
240 240 def decodepath(orig, path):
241 241 return lfutil.splitstandin(path) or path
242 242
243 243 # -- Wrappers: modify existing commands --------------------------------
244 244
245 245 def overrideadd(orig, ui, repo, *pats, **opts):
246 246 if opts.get('normal') and opts.get('large'):
247 247 raise util.Abort(_('--normal cannot be used with --large'))
248 248 return orig(ui, repo, *pats, **opts)
249 249
250 250 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
251 251 # The --normal flag short circuits this override
252 252 if opts.get('normal'):
253 253 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
254 254
255 255 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
256 256 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
257 257 ladded)
258 258 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
259 259
260 260 bad.extend(f for f in lbad)
261 261 return bad
262 262
263 263 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
264 264 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
265 265 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
266 266 return removelargefiles(ui, repo, False, matcher, after=after,
267 267 force=force) or result
268 268
269 269 def overridestatusfn(orig, repo, rev2, **opts):
270 270 try:
271 271 repo._repo.lfstatus = True
272 272 return orig(repo, rev2, **opts)
273 273 finally:
274 274 repo._repo.lfstatus = False
275 275
276 276 def overridestatus(orig, ui, repo, *pats, **opts):
277 277 try:
278 278 repo.lfstatus = True
279 279 return orig(ui, repo, *pats, **opts)
280 280 finally:
281 281 repo.lfstatus = False
282 282
283 283 def overridedirty(orig, repo, ignoreupdate=False):
284 284 try:
285 285 repo._repo.lfstatus = True
286 286 return orig(repo, ignoreupdate)
287 287 finally:
288 288 repo._repo.lfstatus = False
289 289
290 290 def overridelog(orig, ui, repo, *pats, **opts):
291 291 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
292 292 default='relpath'):
293 293 """Matcher that merges root directory with .hglf, suitable for log.
294 294 It is still possible to match .hglf directly.
295 295 For any listed files run log on the standin too.
296 296 matchfn tries both the given filename and with .hglf stripped.
297 297 """
298 298 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
299 299 m, p = copy.copy(matchandpats)
300 300
301 301 if m.always():
302 302 # We want to match everything anyway, so there's no benefit trying
303 303 # to add standins.
304 304 return matchandpats
305 305
306 306 pats = set(p)
307 307
308 308 def fixpats(pat, tostandin=lfutil.standin):
309 309 kindpat = match_._patsplit(pat, None)
310 310
311 311 if kindpat[0] is not None:
312 312 return kindpat[0] + ':' + tostandin(kindpat[1])
313 313 return tostandin(kindpat[1])
314 314
315 315 if m._cwd:
316 316 hglf = lfutil.shortname
317 317 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
318 318
319 319 def tostandin(f):
320 320 # The file may already be a standin, so trucate the back
321 321 # prefix and test before mangling it. This avoids turning
322 322 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
323 323 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
324 324 return f
325 325
326 326 # An absolute path is from outside the repo, so truncate the
327 327 # path to the root before building the standin. Otherwise cwd
328 328 # is somewhere in the repo, relative to root, and needs to be
329 329 # prepended before building the standin.
330 330 if os.path.isabs(m._cwd):
331 331 f = f[len(back):]
332 332 else:
333 333 f = m._cwd + '/' + f
334 334 return back + lfutil.standin(f)
335 335
336 336 pats.update(fixpats(f, tostandin) for f in p)
337 337 else:
338 338 def tostandin(f):
339 339 if lfutil.splitstandin(f):
340 340 return f
341 341 return lfutil.standin(f)
342 342 pats.update(fixpats(f, tostandin) for f in p)
343 343
344 344 for i in range(0, len(m._files)):
345 345 # Don't add '.hglf' to m.files, since that is already covered by '.'
346 346 if m._files[i] == '.':
347 347 continue
348 348 standin = lfutil.standin(m._files[i])
349 349 # If the "standin" is a directory, append instead of replace to
350 350 # support naming a directory on the command line with only
351 351 # largefiles. The original directory is kept to support normal
352 352 # files.
353 353 if standin in repo[ctx.node()]:
354 354 m._files[i] = standin
355 355 elif m._files[i] not in repo[ctx.node()] \
356 356 and repo.wvfs.isdir(standin):
357 357 m._files.append(standin)
358 358
359 359 m._fmap = set(m._files)
360 360 m._always = False
361 361 origmatchfn = m.matchfn
362 362 def lfmatchfn(f):
363 363 lf = lfutil.splitstandin(f)
364 364 if lf is not None and origmatchfn(lf):
365 365 return True
366 366 r = origmatchfn(f)
367 367 return r
368 368 m.matchfn = lfmatchfn
369 369
370 370 ui.debug('updated patterns: %s\n' % sorted(pats))
371 371 return m, pats
372 372
373 373 # For hg log --patch, the match object is used in two different senses:
374 374 # (1) to determine what revisions should be printed out, and
375 375 # (2) to determine what files to print out diffs for.
376 376 # The magic matchandpats override should be used for case (1) but not for
377 377 # case (2).
378 378 def overridemakelogfilematcher(repo, pats, opts):
379 379 pctx = repo[None]
380 380 match, pats = oldmatchandpats(pctx, pats, opts)
381 381 return lambda rev: match
382 382
383 383 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
384 384 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
385 385 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
386 386
387 387 try:
388 388 return orig(ui, repo, *pats, **opts)
389 389 finally:
390 390 restorematchandpatsfn()
391 391 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
392 392
393 393 def overrideverify(orig, ui, repo, *pats, **opts):
394 394 large = opts.pop('large', False)
395 395 all = opts.pop('lfa', False)
396 396 contents = opts.pop('lfc', False)
397 397
398 398 result = orig(ui, repo, *pats, **opts)
399 399 if large or all or contents:
400 400 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
401 401 return result
402 402
403 403 def overridedebugstate(orig, ui, repo, *pats, **opts):
404 404 large = opts.pop('large', False)
405 405 if large:
406 406 class fakerepo(object):
407 407 dirstate = lfutil.openlfdirstate(ui, repo)
408 408 orig(ui, fakerepo, *pats, **opts)
409 409 else:
410 410 orig(ui, repo, *pats, **opts)
411 411
412 412 # Override needs to refresh standins so that update's normal merge
413 413 # will go through properly. Then the other update hook (overriding repo.update)
414 414 # will get the new files. Filemerge is also overridden so that the merge
415 415 # will merge standins correctly.
416 416 def overrideupdate(orig, ui, repo, *pats, **opts):
417 417 # Need to lock between the standins getting updated and their
418 418 # largefiles getting updated
419 419 wlock = repo.wlock()
420 420 try:
421 421 if opts['check']:
422 422 lfdirstate = lfutil.openlfdirstate(ui, repo)
423 423 unsure, s = lfdirstate.status(
424 424 match_.always(repo.root, repo.getcwd()),
425 425 [], False, False, False)
426 426
427 427 mod = len(s.modified) > 0
428 428 for lfile in unsure:
429 429 standin = lfutil.standin(lfile)
430 430 if repo['.'][standin].data().strip() != \
431 431 lfutil.hashfile(repo.wjoin(lfile)):
432 432 mod = True
433 433 else:
434 434 lfdirstate.normal(lfile)
435 435 lfdirstate.write()
436 436 if mod:
437 437 raise util.Abort(_('uncommitted changes'))
438 438 return orig(ui, repo, *pats, **opts)
439 439 finally:
440 440 wlock.release()
441 441
442 442 # Before starting the manifest merge, merge.updates will call
443 443 # _checkunknownfile to check if there are any files in the merged-in
444 444 # changeset that collide with unknown files in the working copy.
445 445 #
446 446 # The largefiles are seen as unknown, so this prevents us from merging
447 447 # in a file 'foo' if we already have a largefile with the same name.
448 448 #
449 449 # The overridden function filters the unknown files by removing any
450 450 # largefiles. This makes the merge proceed and we can then handle this
451 451 # case further in the overridden calculateupdates function below.
452 452 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
453 453 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
454 454 return False
455 455 return origfn(repo, wctx, mctx, f, f2)
456 456
457 457 # The manifest merge handles conflicts on the manifest level. We want
458 458 # to handle changes in largefile-ness of files at this level too.
459 459 #
460 460 # The strategy is to run the original calculateupdates and then process
461 461 # the action list it outputs. There are two cases we need to deal with:
462 462 #
463 463 # 1. Normal file in p1, largefile in p2. Here the largefile is
464 464 # detected via its standin file, which will enter the working copy
465 465 # with a "get" action. It is not "merge" since the standin is all
466 466 # Mercurial is concerned with at this level -- the link to the
467 467 # existing normal file is not relevant here.
468 468 #
469 469 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
470 470 # since the largefile will be present in the working copy and
471 471 # different from the normal file in p2. Mercurial therefore
472 472 # triggers a merge action.
473 473 #
474 474 # In both cases, we prompt the user and emit new actions to either
475 475 # remove the standin (if the normal file was kept) or to remove the
476 476 # normal file and get the standin (if the largefile was kept). The
477 477 # default prompt answer is to use the largefile version since it was
478 478 # presumably changed on purpose.
479 479 #
480 480 # Finally, the merge.applyupdates function will then take care of
481 481 # writing the files into the working copy and lfcommands.updatelfiles
482 482 # will update the largefiles.
483 483 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
484 484 partial, acceptremote, followcopies):
485 485 overwrite = force and not branchmerge
486 486 actions, diverge, renamedelete = origfn(
487 487 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
488 488 followcopies)
489 489
490 490 if overwrite:
491 491 return actions, diverge, renamedelete
492 492
493 493 # Convert to dictionary with filename as key and action as value.
494 494 lfiles = set()
495 495 for f in actions:
496 496 splitstandin = f and lfutil.splitstandin(f)
497 497 if splitstandin in p1:
498 498 lfiles.add(splitstandin)
499 499 elif lfutil.standin(f) in p1:
500 500 lfiles.add(f)
501 501
502 502 for lfile in lfiles:
503 503 standin = lfutil.standin(lfile)
504 504 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
505 505 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
506 506 if sm in ('g', 'dc') and lm != 'r':
507 507 # Case 1: normal file in the working copy, largefile in
508 508 # the second parent
509 509 usermsg = _('remote turned local normal file %s into a largefile\n'
510 510 'use (l)argefile or keep (n)ormal file?'
511 511 '$$ &Largefile $$ &Normal file') % lfile
512 512 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
513 513 actions[lfile] = ('r', None, 'replaced by standin')
514 514 actions[standin] = ('g', sargs, 'replaces standin')
515 515 else: # keep local normal file
516 516 actions[lfile] = ('k', None, 'replaces standin')
517 517 if branchmerge:
518 518 actions[standin] = ('k', None, 'replaced by non-standin')
519 519 else:
520 520 actions[standin] = ('r', None, 'replaced by non-standin')
521 521 elif lm in ('g', 'dc') and sm != 'r':
522 522 # Case 2: largefile in the working copy, normal file in
523 523 # the second parent
524 524 usermsg = _('remote turned local largefile %s into a normal file\n'
525 525 'keep (l)argefile or use (n)ormal file?'
526 526 '$$ &Largefile $$ &Normal file') % lfile
527 527 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
528 528 if branchmerge:
529 529 # largefile can be restored from standin safely
530 530 actions[lfile] = ('k', None, 'replaced by standin')
531 531 actions[standin] = ('k', None, 'replaces standin')
532 532 else:
533 533 # "lfile" should be marked as "removed" without
534 534 # removal of itself
535 535 actions[lfile] = ('lfmr', None,
536 536 'forget non-standin largefile')
537 537
538 538 # linear-merge should treat this largefile as 're-added'
539 539 actions[standin] = ('a', None, 'keep standin')
540 540 else: # pick remote normal file
541 541 actions[lfile] = ('g', largs, 'replaces standin')
542 542 actions[standin] = ('r', None, 'replaced by non-standin')
543 543
544 544 return actions, diverge, renamedelete
545 545
546 546 def mergerecordupdates(orig, repo, actions, branchmerge):
547 547 if 'lfmr' in actions:
548 548 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
549 549 for lfile, args, msg in actions['lfmr']:
550 550 # this should be executed before 'orig', to execute 'remove'
551 551 # before all other actions
552 552 repo.dirstate.remove(lfile)
553 553 # make sure lfile doesn't get synclfdirstate'd as normal
554 554 lfdirstate.add(lfile)
555 555 lfdirstate.write()
556 556
557 557 return orig(repo, actions, branchmerge)
558 558
559 559
560 560 # Override filemerge to prompt the user about how they wish to merge
561 561 # largefiles. This will handle identical edits without prompting the user.
562 562 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
563 563 if not lfutil.isstandin(orig):
564 564 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
565 565
566 566 ahash = fca.data().strip().lower()
567 567 dhash = fcd.data().strip().lower()
568 568 ohash = fco.data().strip().lower()
569 569 if (ohash != ahash and
570 570 ohash != dhash and
571 571 (dhash == ahash or
572 572 repo.ui.promptchoice(
573 573 _('largefile %s has a merge conflict\nancestor was %s\n'
574 574 'keep (l)ocal %s or\ntake (o)ther %s?'
575 575 '$$ &Local $$ &Other') %
576 576 (lfutil.splitstandin(orig), ahash, dhash, ohash),
577 577 0) == 1)):
578 578 repo.wwrite(fcd.path(), fco.data(), fco.flags())
579 579 return 0
580 580
581 581 def copiespathcopies(orig, ctx1, ctx2):
582 582 copies = orig(ctx1, ctx2)
583 583 updated = {}
584 584
585 585 for k, v in copies.iteritems():
586 586 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
587 587
588 588 return updated
589 589
590 590 # Copy first changes the matchers to match standins instead of
591 591 # largefiles. Then it overrides util.copyfile in that function it
592 592 # checks if the destination largefile already exists. It also keeps a
593 593 # list of copied files so that the largefiles can be copied and the
594 594 # dirstate updated.
595 595 def overridecopy(orig, ui, repo, pats, opts, rename=False):
596 596 # doesn't remove largefile on rename
597 597 if len(pats) < 2:
598 598 # this isn't legal, let the original function deal with it
599 599 return orig(ui, repo, pats, opts, rename)
600 600
601 601 # This could copy both lfiles and normal files in one command,
602 602 # but we don't want to do that. First replace their matcher to
603 603 # only match normal files and run it, then replace it to just
604 604 # match largefiles and run it again.
605 605 nonormalfiles = False
606 606 nolfiles = False
607 607 installnormalfilesmatchfn(repo[None].manifest())
608 608 try:
609 609 try:
610 610 result = orig(ui, repo, pats, opts, rename)
611 611 except util.Abort, e:
612 612 if str(e) != _('no files to copy'):
613 613 raise e
614 614 else:
615 615 nonormalfiles = True
616 616 result = 0
617 617 finally:
618 618 restorematchfn()
619 619
620 620 # The first rename can cause our current working directory to be removed.
621 621 # In that case there is nothing left to copy/rename so just quit.
622 622 try:
623 623 repo.getcwd()
624 624 except OSError:
625 625 return result
626 626
627 627 def makestandin(relpath):
628 628 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
629 629 return os.path.join(repo.wjoin(lfutil.standin(path)))
630 630
631 631 fullpats = scmutil.expandpats(pats)
632 632 dest = fullpats[-1]
633 633
634 634 if os.path.isdir(dest):
635 635 if not os.path.isdir(makestandin(dest)):
636 636 os.makedirs(makestandin(dest))
637 637
638 638 try:
639 639 try:
640 640 # When we call orig below it creates the standins but we don't add
641 641 # them to the dir state until later so lock during that time.
642 642 wlock = repo.wlock()
643 643
644 644 manifest = repo[None].manifest()
645 645 def overridematch(ctx, pats=[], opts={}, globbed=False,
646 646 default='relpath'):
647 647 newpats = []
648 648 # The patterns were previously mangled to add the standin
649 649 # directory; we need to remove that now
650 650 for pat in pats:
651 651 if match_.patkind(pat) is None and lfutil.shortname in pat:
652 652 newpats.append(pat.replace(lfutil.shortname, ''))
653 653 else:
654 654 newpats.append(pat)
655 655 match = oldmatch(ctx, newpats, opts, globbed, default)
656 656 m = copy.copy(match)
657 657 lfile = lambda f: lfutil.standin(f) in manifest
658 658 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
659 659 m._fmap = set(m._files)
660 660 origmatchfn = m.matchfn
661 661 m.matchfn = lambda f: (lfutil.isstandin(f) and
662 662 (f in manifest) and
663 663 origmatchfn(lfutil.splitstandin(f)) or
664 664 None)
665 665 return m
666 666 oldmatch = installmatchfn(overridematch)
667 667 listpats = []
668 668 for pat in pats:
669 669 if match_.patkind(pat) is not None:
670 670 listpats.append(pat)
671 671 else:
672 672 listpats.append(makestandin(pat))
673 673
674 674 try:
675 675 origcopyfile = util.copyfile
676 676 copiedfiles = []
677 677 def overridecopyfile(src, dest):
678 678 if (lfutil.shortname in src and
679 679 dest.startswith(repo.wjoin(lfutil.shortname))):
680 680 destlfile = dest.replace(lfutil.shortname, '')
681 681 if not opts['force'] and os.path.exists(destlfile):
682 682 raise IOError('',
683 683 _('destination largefile already exists'))
684 684 copiedfiles.append((src, dest))
685 685 origcopyfile(src, dest)
686 686
687 687 util.copyfile = overridecopyfile
688 688 result += orig(ui, repo, listpats, opts, rename)
689 689 finally:
690 690 util.copyfile = origcopyfile
691 691
692 692 lfdirstate = lfutil.openlfdirstate(ui, repo)
693 693 for (src, dest) in copiedfiles:
694 694 if (lfutil.shortname in src and
695 695 dest.startswith(repo.wjoin(lfutil.shortname))):
696 696 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
697 697 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
698 698 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
699 699 if not os.path.isdir(destlfiledir):
700 700 os.makedirs(destlfiledir)
701 701 if rename:
702 702 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
703 703
704 704 # The file is gone, but this deletes any empty parent
705 705 # directories as a side-effect.
706 706 util.unlinkpath(repo.wjoin(srclfile), True)
707 707 lfdirstate.remove(srclfile)
708 708 else:
709 709 util.copyfile(repo.wjoin(srclfile),
710 710 repo.wjoin(destlfile))
711 711
712 712 lfdirstate.add(destlfile)
713 713 lfdirstate.write()
714 714 except util.Abort, e:
715 715 if str(e) != _('no files to copy'):
716 716 raise e
717 717 else:
718 718 nolfiles = True
719 719 finally:
720 720 restorematchfn()
721 721 wlock.release()
722 722
723 723 if nolfiles and nonormalfiles:
724 724 raise util.Abort(_('no files to copy'))
725 725
726 726 return result
727 727
728 728 # When the user calls revert, we have to be careful to not revert any
729 729 # changes to other largefiles accidentally. This means we have to keep
730 730 # track of the largefiles that are being reverted so we only pull down
731 731 # the necessary largefiles.
732 732 #
733 733 # Standins are only updated (to match the hash of largefiles) before
734 734 # commits. Update the standins then run the original revert, changing
735 735 # the matcher to hit standins instead of largefiles. Based on the
736 736 # resulting standins update the largefiles.
737 737 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
738 738 # Because we put the standins in a bad state (by updating them)
739 739 # and then return them to a correct state we need to lock to
740 740 # prevent others from changing them in their incorrect state.
741 741 wlock = repo.wlock()
742 742 try:
743 743 lfdirstate = lfutil.openlfdirstate(ui, repo)
744 744 s = lfutil.lfdirstatestatus(lfdirstate, repo)
745 745 lfdirstate.write()
746 746 for lfile in s.modified:
747 747 lfutil.updatestandin(repo, lfutil.standin(lfile))
748 748 for lfile in s.deleted:
749 749 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
750 750 os.unlink(repo.wjoin(lfutil.standin(lfile)))
751 751
752 752 oldstandins = lfutil.getstandinsstate(repo)
753 753
754 754 def overridematch(mctx, pats=[], opts={}, globbed=False,
755 755 default='relpath'):
756 756 match = oldmatch(mctx, pats, opts, globbed, default)
757 757 m = copy.copy(match)
758 758
759 759 # revert supports recursing into subrepos, and though largefiles
760 760 # currently doesn't work correctly in that case, this match is
761 761 # called, so the lfdirstate above may not be the correct one for
762 762 # this invocation of match.
763 763 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
764 764 False)
765 765
766 766 def tostandin(f):
767 767 standin = lfutil.standin(f)
768 if standin in mctx:
768 if standin in ctx or standin in mctx:
769 769 return standin
770 770 elif standin in repo[None] or lfdirstate[f] == 'r':
771 771 return None
772 772 return f
773 773 m._files = [tostandin(f) for f in m._files]
774 774 m._files = [f for f in m._files if f is not None]
775 775 m._fmap = set(m._files)
776 776 origmatchfn = m.matchfn
777 777 def matchfn(f):
778 778 if lfutil.isstandin(f):
779 779 return (origmatchfn(lfutil.splitstandin(f)) and
780 (f in repo[None] or f in mctx))
780 (f in ctx or f in mctx))
781 781 return origmatchfn(f)
782 782 m.matchfn = matchfn
783 783 return m
784 784 oldmatch = installmatchfn(overridematch)
785 785 try:
786 786 orig(ui, repo, ctx, parents, *pats, **opts)
787 787 finally:
788 788 restorematchfn()
789 789
790 790 newstandins = lfutil.getstandinsstate(repo)
791 791 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
792 792 # lfdirstate should be 'normallookup'-ed for updated files,
793 793 # because reverting doesn't touch dirstate for 'normal' files
794 794 # when target revision is explicitly specified: in such case,
795 795 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
796 796 # of target (standin) file.
797 797 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
798 798 normallookup=True)
799 799
800 800 finally:
801 801 wlock.release()
802 802
803 803 # after pulling changesets, we need to take some extra care to get
804 804 # largefiles updated remotely
805 805 def overridepull(orig, ui, repo, source=None, **opts):
806 806 revsprepull = len(repo)
807 807 if not source:
808 808 source = 'default'
809 809 repo.lfpullsource = source
810 810 result = orig(ui, repo, source, **opts)
811 811 revspostpull = len(repo)
812 812 lfrevs = opts.get('lfrev', [])
813 813 if opts.get('all_largefiles'):
814 814 lfrevs.append('pulled()')
815 815 if lfrevs and revspostpull > revsprepull:
816 816 numcached = 0
817 817 repo.firstpulled = revsprepull # for pulled() revset expression
818 818 try:
819 819 for rev in scmutil.revrange(repo, lfrevs):
820 820 ui.note(_('pulling largefiles for revision %s\n') % rev)
821 821 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
822 822 numcached += len(cached)
823 823 finally:
824 824 del repo.firstpulled
825 825 ui.status(_("%d largefiles cached\n") % numcached)
826 826 return result
827 827
828 828 def pulledrevsetsymbol(repo, subset, x):
829 829 """``pulled()``
830 830 Changesets that just has been pulled.
831 831
832 832 Only available with largefiles from pull --lfrev expressions.
833 833
834 834 .. container:: verbose
835 835
836 836 Some examples:
837 837
838 838 - pull largefiles for all new changesets::
839 839
840 840 hg pull -lfrev "pulled()"
841 841
842 842 - pull largefiles for all new branch heads::
843 843
844 844 hg pull -lfrev "head(pulled()) and not closed()"
845 845
846 846 """
847 847
848 848 try:
849 849 firstpulled = repo.firstpulled
850 850 except AttributeError:
851 851 raise util.Abort(_("pulled() only available in --lfrev"))
852 852 return revset.baseset([r for r in subset if r >= firstpulled])
853 853
854 854 def overrideclone(orig, ui, source, dest=None, **opts):
855 855 d = dest
856 856 if d is None:
857 857 d = hg.defaultdest(source)
858 858 if opts.get('all_largefiles') and not hg.islocal(d):
859 859 raise util.Abort(_(
860 860 '--all-largefiles is incompatible with non-local destination %s') %
861 861 d)
862 862
863 863 return orig(ui, source, dest, **opts)
864 864
865 865 def hgclone(orig, ui, opts, *args, **kwargs):
866 866 result = orig(ui, opts, *args, **kwargs)
867 867
868 868 if result is not None:
869 869 sourcerepo, destrepo = result
870 870 repo = destrepo.local()
871 871
872 872 # If largefiles is required for this repo, permanently enable it locally
873 873 if 'largefiles' in repo.requirements:
874 874 fp = repo.vfs('hgrc', 'a', text=True)
875 875 try:
876 876 fp.write('\n[extensions]\nlargefiles=\n')
877 877 finally:
878 878 fp.close()
879 879
880 880 # Caching is implicitly limited to 'rev' option, since the dest repo was
881 881 # truncated at that point. The user may expect a download count with
882 882 # this option, so attempt whether or not this is a largefile repo.
883 883 if opts.get('all_largefiles'):
884 884 success, missing = lfcommands.downloadlfiles(ui, repo, None)
885 885
886 886 if missing != 0:
887 887 return None
888 888
889 889 return result
890 890
891 891 def overriderebase(orig, ui, repo, **opts):
892 892 if not util.safehasattr(repo, '_largefilesenabled'):
893 893 return orig(ui, repo, **opts)
894 894
895 895 resuming = opts.get('continue')
896 896 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
897 897 repo._lfstatuswriters.append(lambda *msg, **opts: None)
898 898 try:
899 899 return orig(ui, repo, **opts)
900 900 finally:
901 901 repo._lfstatuswriters.pop()
902 902 repo._lfcommithooks.pop()
903 903
904 904 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
905 905 prefix='', mtime=None, subrepos=None):
906 906 # No need to lock because we are only reading history and
907 907 # largefile caches, neither of which are modified.
908 908 lfcommands.cachelfiles(repo.ui, repo, node)
909 909
910 910 if kind not in archival.archivers:
911 911 raise util.Abort(_("unknown archive type '%s'") % kind)
912 912
913 913 ctx = repo[node]
914 914
915 915 if kind == 'files':
916 916 if prefix:
917 917 raise util.Abort(
918 918 _('cannot give prefix when archiving to files'))
919 919 else:
920 920 prefix = archival.tidyprefix(dest, kind, prefix)
921 921
922 922 def write(name, mode, islink, getdata):
923 923 if matchfn and not matchfn(name):
924 924 return
925 925 data = getdata()
926 926 if decode:
927 927 data = repo.wwritedata(name, data)
928 928 archiver.addfile(prefix + name, mode, islink, data)
929 929
930 930 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
931 931
932 932 if repo.ui.configbool("ui", "archivemeta", True):
933 933 def metadata():
934 934 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
935 935 hex(repo.changelog.node(0)), hex(node), ctx.branch())
936 936
937 937 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
938 938 if repo.tagtype(t) == 'global')
939 939 if not tags:
940 940 repo.ui.pushbuffer()
941 941 opts = {'template': '{latesttag}\n{latesttagdistance}',
942 942 'style': '', 'patch': None, 'git': None}
943 943 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
944 944 ltags, dist = repo.ui.popbuffer().split('\n')
945 945 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
946 946 tags += 'latesttagdistance: %s\n' % dist
947 947
948 948 return base + tags
949 949
950 950 write('.hg_archival.txt', 0644, False, metadata)
951 951
952 952 for f in ctx:
953 953 ff = ctx.flags(f)
954 954 getdata = ctx[f].data
955 955 if lfutil.isstandin(f):
956 956 path = lfutil.findfile(repo, getdata().strip())
957 957 if path is None:
958 958 raise util.Abort(
959 959 _('largefile %s not found in repo store or system cache')
960 960 % lfutil.splitstandin(f))
961 961 f = lfutil.splitstandin(f)
962 962
963 963 def getdatafn():
964 964 fd = None
965 965 try:
966 966 fd = open(path, 'rb')
967 967 return fd.read()
968 968 finally:
969 969 if fd:
970 970 fd.close()
971 971
972 972 getdata = getdatafn
973 973 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
974 974
975 975 if subrepos:
976 976 for subpath in sorted(ctx.substate):
977 977 sub = ctx.sub(subpath)
978 978 submatch = match_.narrowmatcher(subpath, matchfn)
979 979 sub.archive(archiver, prefix, submatch)
980 980
981 981 archiver.done()
982 982
983 983 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
984 984 repo._get(repo._state + ('hg',))
985 985 rev = repo._state[1]
986 986 ctx = repo._repo[rev]
987 987
988 988 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
989 989
990 990 def write(name, mode, islink, getdata):
991 991 # At this point, the standin has been replaced with the largefile name,
992 992 # so the normal matcher works here without the lfutil variants.
993 993 if match and not match(f):
994 994 return
995 995 data = getdata()
996 996
997 997 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
998 998
999 999 for f in ctx:
1000 1000 ff = ctx.flags(f)
1001 1001 getdata = ctx[f].data
1002 1002 if lfutil.isstandin(f):
1003 1003 path = lfutil.findfile(repo._repo, getdata().strip())
1004 1004 if path is None:
1005 1005 raise util.Abort(
1006 1006 _('largefile %s not found in repo store or system cache')
1007 1007 % lfutil.splitstandin(f))
1008 1008 f = lfutil.splitstandin(f)
1009 1009
1010 1010 def getdatafn():
1011 1011 fd = None
1012 1012 try:
1013 1013 fd = open(os.path.join(prefix, path), 'rb')
1014 1014 return fd.read()
1015 1015 finally:
1016 1016 if fd:
1017 1017 fd.close()
1018 1018
1019 1019 getdata = getdatafn
1020 1020
1021 1021 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
1022 1022
1023 1023 for subpath in sorted(ctx.substate):
1024 1024 sub = ctx.sub(subpath)
1025 1025 submatch = match_.narrowmatcher(subpath, match)
1026 1026 sub.archive(archiver, os.path.join(prefix, repo._path) + '/', submatch)
1027 1027
1028 1028 # If a largefile is modified, the change is not reflected in its
1029 1029 # standin until a commit. cmdutil.bailifchanged() raises an exception
1030 1030 # if the repo has uncommitted changes. Wrap it to also check if
1031 1031 # largefiles were changed. This is used by bisect, backout and fetch.
1032 1032 def overridebailifchanged(orig, repo):
1033 1033 orig(repo)
1034 1034 repo.lfstatus = True
1035 1035 s = repo.status()
1036 1036 repo.lfstatus = False
1037 1037 if s.modified or s.added or s.removed or s.deleted:
1038 1038 raise util.Abort(_('uncommitted changes'))
1039 1039
1040 1040 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1041 1041 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1042 1042 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1043 1043 m = composelargefilematcher(match, repo[None].manifest())
1044 1044
1045 1045 try:
1046 1046 repo.lfstatus = True
1047 1047 s = repo.status(match=m, clean=True)
1048 1048 finally:
1049 1049 repo.lfstatus = False
1050 1050 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1051 1051 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1052 1052
1053 1053 for f in forget:
1054 1054 if lfutil.standin(f) not in repo.dirstate and not \
1055 1055 repo.wvfs.isdir(lfutil.standin(f)):
1056 1056 ui.warn(_('not removing %s: file is already untracked\n')
1057 1057 % m.rel(f))
1058 1058 bad.append(f)
1059 1059
1060 1060 for f in forget:
1061 1061 if ui.verbose or not m.exact(f):
1062 1062 ui.status(_('removing %s\n') % m.rel(f))
1063 1063
1064 1064 # Need to lock because standin files are deleted then removed from the
1065 1065 # repository and we could race in-between.
1066 1066 wlock = repo.wlock()
1067 1067 try:
1068 1068 lfdirstate = lfutil.openlfdirstate(ui, repo)
1069 1069 for f in forget:
1070 1070 if lfdirstate[f] == 'a':
1071 1071 lfdirstate.drop(f)
1072 1072 else:
1073 1073 lfdirstate.remove(f)
1074 1074 lfdirstate.write()
1075 1075 standins = [lfutil.standin(f) for f in forget]
1076 1076 for f in standins:
1077 1077 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1078 1078 rejected = repo[None].forget(standins)
1079 1079 finally:
1080 1080 wlock.release()
1081 1081
1082 1082 bad.extend(f for f in rejected if f in m.files())
1083 1083 forgot.extend(f for f in forget if f not in rejected)
1084 1084 return bad, forgot
1085 1085
1086 1086 def _getoutgoings(repo, other, missing, addfunc):
1087 1087 """get pairs of filename and largefile hash in outgoing revisions
1088 1088 in 'missing'.
1089 1089
1090 1090 largefiles already existing on 'other' repository are ignored.
1091 1091
1092 1092 'addfunc' is invoked with each unique pairs of filename and
1093 1093 largefile hash value.
1094 1094 """
1095 1095 knowns = set()
1096 1096 lfhashes = set()
1097 1097 def dedup(fn, lfhash):
1098 1098 k = (fn, lfhash)
1099 1099 if k not in knowns:
1100 1100 knowns.add(k)
1101 1101 lfhashes.add(lfhash)
1102 1102 lfutil.getlfilestoupload(repo, missing, dedup)
1103 1103 if lfhashes:
1104 1104 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1105 1105 for fn, lfhash in knowns:
1106 1106 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1107 1107 addfunc(fn, lfhash)
1108 1108
1109 1109 def outgoinghook(ui, repo, other, opts, missing):
1110 1110 if opts.pop('large', None):
1111 1111 lfhashes = set()
1112 1112 if ui.debugflag:
1113 1113 toupload = {}
1114 1114 def addfunc(fn, lfhash):
1115 1115 if fn not in toupload:
1116 1116 toupload[fn] = []
1117 1117 toupload[fn].append(lfhash)
1118 1118 lfhashes.add(lfhash)
1119 1119 def showhashes(fn):
1120 1120 for lfhash in sorted(toupload[fn]):
1121 1121 ui.debug(' %s\n' % (lfhash))
1122 1122 else:
1123 1123 toupload = set()
1124 1124 def addfunc(fn, lfhash):
1125 1125 toupload.add(fn)
1126 1126 lfhashes.add(lfhash)
1127 1127 def showhashes(fn):
1128 1128 pass
1129 1129 _getoutgoings(repo, other, missing, addfunc)
1130 1130
1131 1131 if not toupload:
1132 1132 ui.status(_('largefiles: no files to upload\n'))
1133 1133 else:
1134 1134 ui.status(_('largefiles to upload (%d entities):\n')
1135 1135 % (len(lfhashes)))
1136 1136 for file in sorted(toupload):
1137 1137 ui.status(lfutil.splitstandin(file) + '\n')
1138 1138 showhashes(file)
1139 1139 ui.status('\n')
1140 1140
1141 1141 def summaryremotehook(ui, repo, opts, changes):
1142 1142 largeopt = opts.get('large', False)
1143 1143 if changes is None:
1144 1144 if largeopt:
1145 1145 return (False, True) # only outgoing check is needed
1146 1146 else:
1147 1147 return (False, False)
1148 1148 elif largeopt:
1149 1149 url, branch, peer, outgoing = changes[1]
1150 1150 if peer is None:
1151 1151 # i18n: column positioning for "hg summary"
1152 1152 ui.status(_('largefiles: (no remote repo)\n'))
1153 1153 return
1154 1154
1155 1155 toupload = set()
1156 1156 lfhashes = set()
1157 1157 def addfunc(fn, lfhash):
1158 1158 toupload.add(fn)
1159 1159 lfhashes.add(lfhash)
1160 1160 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1161 1161
1162 1162 if not toupload:
1163 1163 # i18n: column positioning for "hg summary"
1164 1164 ui.status(_('largefiles: (no files to upload)\n'))
1165 1165 else:
1166 1166 # i18n: column positioning for "hg summary"
1167 1167 ui.status(_('largefiles: %d entities for %d files to upload\n')
1168 1168 % (len(lfhashes), len(toupload)))
1169 1169
1170 1170 def overridesummary(orig, ui, repo, *pats, **opts):
1171 1171 try:
1172 1172 repo.lfstatus = True
1173 1173 orig(ui, repo, *pats, **opts)
1174 1174 finally:
1175 1175 repo.lfstatus = False
1176 1176
1177 1177 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1178 1178 similarity=None):
1179 1179 if not lfutil.islfilesrepo(repo):
1180 1180 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1181 1181 # Get the list of missing largefiles so we can remove them
1182 1182 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1183 1183 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1184 1184 False, False, False)
1185 1185
1186 1186 # Call into the normal remove code, but the removing of the standin, we want
1187 1187 # to have handled by original addremove. Monkey patching here makes sure
1188 1188 # we don't remove the standin in the largefiles code, preventing a very
1189 1189 # confused state later.
1190 1190 if s.deleted:
1191 1191 m = copy.copy(matcher)
1192 1192
1193 1193 # The m._files and m._map attributes are not changed to the deleted list
1194 1194 # because that affects the m.exact() test, which in turn governs whether
1195 1195 # or not the file name is printed, and how. Simply limit the original
1196 1196 # matches to those in the deleted status list.
1197 1197 matchfn = m.matchfn
1198 1198 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1199 1199
1200 1200 removelargefiles(repo.ui, repo, True, m, **opts)
1201 1201 # Call into the normal add code, and any files that *should* be added as
1202 1202 # largefiles will be
1203 1203 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1204 1204 # Now that we've handled largefiles, hand off to the original addremove
1205 1205 # function to take care of the rest. Make sure it doesn't do anything with
1206 1206 # largefiles by passing a matcher that will ignore them.
1207 1207 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1208 1208 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1209 1209
1210 1210 # Calling purge with --all will cause the largefiles to be deleted.
1211 1211 # Override repo.status to prevent this from happening.
1212 1212 def overridepurge(orig, ui, repo, *dirs, **opts):
1213 1213 # XXX Monkey patching a repoview will not work. The assigned attribute will
1214 1214 # be set on the unfiltered repo, but we will only lookup attributes in the
1215 1215 # unfiltered repo if the lookup in the repoview object itself fails. As the
1216 1216 # monkey patched method exists on the repoview class the lookup will not
1217 1217 # fail. As a result, the original version will shadow the monkey patched
1218 1218 # one, defeating the monkey patch.
1219 1219 #
1220 1220 # As a work around we use an unfiltered repo here. We should do something
1221 1221 # cleaner instead.
1222 1222 repo = repo.unfiltered()
1223 1223 oldstatus = repo.status
1224 1224 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1225 1225 clean=False, unknown=False, listsubrepos=False):
1226 1226 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1227 1227 listsubrepos)
1228 1228 lfdirstate = lfutil.openlfdirstate(ui, repo)
1229 1229 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1230 1230 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1231 1231 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1232 1232 unknown, ignored, r.clean)
1233 1233 repo.status = overridestatus
1234 1234 orig(ui, repo, *dirs, **opts)
1235 1235 repo.status = oldstatus
1236 1236 def overriderollback(orig, ui, repo, **opts):
1237 1237 wlock = repo.wlock()
1238 1238 try:
1239 1239 before = repo.dirstate.parents()
1240 1240 orphans = set(f for f in repo.dirstate
1241 1241 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1242 1242 result = orig(ui, repo, **opts)
1243 1243 after = repo.dirstate.parents()
1244 1244 if before == after:
1245 1245 return result # no need to restore standins
1246 1246
1247 1247 pctx = repo['.']
1248 1248 for f in repo.dirstate:
1249 1249 if lfutil.isstandin(f):
1250 1250 orphans.discard(f)
1251 1251 if repo.dirstate[f] == 'r':
1252 1252 repo.wvfs.unlinkpath(f, ignoremissing=True)
1253 1253 elif f in pctx:
1254 1254 fctx = pctx[f]
1255 1255 repo.wwrite(f, fctx.data(), fctx.flags())
1256 1256 else:
1257 1257 # content of standin is not so important in 'a',
1258 1258 # 'm' or 'n' (coming from the 2nd parent) cases
1259 1259 lfutil.writestandin(repo, f, '', False)
1260 1260 for standin in orphans:
1261 1261 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1262 1262
1263 1263 lfdirstate = lfutil.openlfdirstate(ui, repo)
1264 1264 orphans = set(lfdirstate)
1265 1265 lfiles = lfutil.listlfiles(repo)
1266 1266 for file in lfiles:
1267 1267 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1268 1268 orphans.discard(file)
1269 1269 for lfile in orphans:
1270 1270 lfdirstate.drop(lfile)
1271 1271 lfdirstate.write()
1272 1272 finally:
1273 1273 wlock.release()
1274 1274 return result
1275 1275
1276 1276 def overridetransplant(orig, ui, repo, *revs, **opts):
1277 1277 resuming = opts.get('continue')
1278 1278 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1279 1279 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1280 1280 try:
1281 1281 result = orig(ui, repo, *revs, **opts)
1282 1282 finally:
1283 1283 repo._lfstatuswriters.pop()
1284 1284 repo._lfcommithooks.pop()
1285 1285 return result
1286 1286
1287 1287 def overridecat(orig, ui, repo, file1, *pats, **opts):
1288 1288 ctx = scmutil.revsingle(repo, opts.get('rev'))
1289 1289 err = 1
1290 1290 notbad = set()
1291 1291 m = scmutil.match(ctx, (file1,) + pats, opts)
1292 1292 origmatchfn = m.matchfn
1293 1293 def lfmatchfn(f):
1294 1294 if origmatchfn(f):
1295 1295 return True
1296 1296 lf = lfutil.splitstandin(f)
1297 1297 if lf is None:
1298 1298 return False
1299 1299 notbad.add(lf)
1300 1300 return origmatchfn(lf)
1301 1301 m.matchfn = lfmatchfn
1302 1302 origbadfn = m.bad
1303 1303 def lfbadfn(f, msg):
1304 1304 if not f in notbad:
1305 1305 origbadfn(f, msg)
1306 1306 m.bad = lfbadfn
1307 1307 for f in ctx.walk(m):
1308 1308 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1309 1309 pathname=f)
1310 1310 lf = lfutil.splitstandin(f)
1311 1311 if lf is None or origmatchfn(f):
1312 1312 # duplicating unreachable code from commands.cat
1313 1313 data = ctx[f].data()
1314 1314 if opts.get('decode'):
1315 1315 data = repo.wwritedata(f, data)
1316 1316 fp.write(data)
1317 1317 else:
1318 1318 hash = lfutil.readstandin(repo, lf, ctx.rev())
1319 1319 if not lfutil.inusercache(repo.ui, hash):
1320 1320 store = basestore._openstore(repo)
1321 1321 success, missing = store.get([(lf, hash)])
1322 1322 if len(success) != 1:
1323 1323 raise util.Abort(
1324 1324 _('largefile %s is not in cache and could not be '
1325 1325 'downloaded') % lf)
1326 1326 path = lfutil.usercachepath(repo.ui, hash)
1327 1327 fpin = open(path, "rb")
1328 1328 for chunk in util.filechunkiter(fpin, 128 * 1024):
1329 1329 fp.write(chunk)
1330 1330 fpin.close()
1331 1331 fp.close()
1332 1332 err = 0
1333 1333 return err
1334 1334
1335 1335 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1336 1336 *args, **kwargs):
1337 1337 wlock = repo.wlock()
1338 1338 try:
1339 1339 # branch | | |
1340 1340 # merge | force | partial | action
1341 1341 # -------+-------+---------+--------------
1342 1342 # x | x | x | linear-merge
1343 1343 # o | x | x | branch-merge
1344 1344 # x | o | x | overwrite (as clean update)
1345 1345 # o | o | x | force-branch-merge (*1)
1346 1346 # x | x | o | (*)
1347 1347 # o | x | o | (*)
1348 1348 # x | o | o | overwrite (as revert)
1349 1349 # o | o | o | (*)
1350 1350 #
1351 1351 # (*) don't care
1352 1352 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1353 1353
1354 1354 linearmerge = not branchmerge and not force and not partial
1355 1355
1356 1356 if linearmerge or (branchmerge and force and not partial):
1357 1357 # update standins for linear-merge or force-branch-merge,
1358 1358 # because largefiles in the working directory may be modified
1359 1359 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1360 1360 unsure, s = lfdirstate.status(match_.always(repo.root,
1361 1361 repo.getcwd()),
1362 1362 [], False, False, False)
1363 1363 pctx = repo['.']
1364 1364 for lfile in unsure + s.modified:
1365 1365 lfileabs = repo.wvfs.join(lfile)
1366 1366 if not os.path.exists(lfileabs):
1367 1367 continue
1368 1368 lfhash = lfutil.hashrepofile(repo, lfile)
1369 1369 standin = lfutil.standin(lfile)
1370 1370 lfutil.writestandin(repo, standin, lfhash,
1371 1371 lfutil.getexecutable(lfileabs))
1372 1372 if (standin in pctx and
1373 1373 lfhash == lfutil.readstandin(repo, lfile, '.')):
1374 1374 lfdirstate.normal(lfile)
1375 1375 for lfile in s.added:
1376 1376 lfutil.updatestandin(repo, lfutil.standin(lfile))
1377 1377 lfdirstate.write()
1378 1378
1379 1379 if linearmerge:
1380 1380 # Only call updatelfiles on the standins that have changed
1381 1381 # to save time
1382 1382 oldstandins = lfutil.getstandinsstate(repo)
1383 1383
1384 1384 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1385 1385
1386 1386 filelist = None
1387 1387 if linearmerge:
1388 1388 newstandins = lfutil.getstandinsstate(repo)
1389 1389 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1390 1390
1391 1391 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1392 1392 normallookup=partial, checked=linearmerge)
1393 1393
1394 1394 return result
1395 1395 finally:
1396 1396 wlock.release()
1397 1397
1398 1398 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1399 1399 result = orig(repo, files, *args, **kwargs)
1400 1400
1401 1401 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1402 1402 if filelist:
1403 1403 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1404 1404 printmessage=False, normallookup=True)
1405 1405
1406 1406 return result
@@ -1,3256 +1,3255 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, errno, re, tempfile, cStringIO, shutil
11 11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 12 import match as matchmod
13 13 import context, repair, graphmod, revset, phases, obsolete, pathutil
14 14 import changelog
15 15 import bookmarks
16 16 import encoding
17 17 import crecord as crecordmod
18 18 import lock as lockmod
19 19
20 20 def parsealiases(cmd):
21 21 return cmd.lstrip("^").split("|")
22 22
23 23 def setupwrapcolorwrite(ui):
24 24 # wrap ui.write so diff output can be labeled/colorized
25 25 def wrapwrite(orig, *args, **kw):
26 26 label = kw.pop('label', '')
27 27 for chunk, l in patch.difflabel(lambda: args):
28 28 orig(chunk, label=label + l)
29 29
30 30 oldwrite = ui.write
31 31 def wrap(*args, **kwargs):
32 32 return wrapwrite(oldwrite, *args, **kwargs)
33 33 setattr(ui, 'write', wrap)
34 34 return oldwrite
35 35
36 36 def filterchunks(ui, originalhunks, usecurses, testfile):
37 37 if usecurses:
38 38 if testfile:
39 39 recordfn = crecordmod.testdecorator(testfile,
40 40 crecordmod.testchunkselector)
41 41 else:
42 42 recordfn = crecordmod.chunkselector
43 43
44 44 return crecordmod.filterpatch(ui, originalhunks, recordfn)
45 45
46 46 else:
47 47 return patch.filterpatch(ui, originalhunks)
48 48
49 49 def recordfilter(ui, originalhunks):
50 50 usecurses = ui.configbool('experimental', 'crecord', False)
51 51 testfile = ui.config('experimental', 'crecordtest', None)
52 52 oldwrite = setupwrapcolorwrite(ui)
53 53 try:
54 54 newchunks = filterchunks(ui, originalhunks, usecurses, testfile)
55 55 finally:
56 56 ui.write = oldwrite
57 57 return newchunks
58 58
59 59 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
60 60 filterfn, *pats, **opts):
61 61 import merge as mergemod
62 62 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
63 63 ishunk = lambda x: isinstance(x, hunkclasses)
64 64
65 65 if not ui.interactive():
66 66 raise util.Abort(_('running non-interactively, use %s instead') %
67 67 cmdsuggest)
68 68
69 69 # make sure username is set before going interactive
70 70 if not opts.get('user'):
71 71 ui.username() # raise exception, username not provided
72 72
73 73 def recordfunc(ui, repo, message, match, opts):
74 74 """This is generic record driver.
75 75
76 76 Its job is to interactively filter local changes, and
77 77 accordingly prepare working directory into a state in which the
78 78 job can be delegated to a non-interactive commit command such as
79 79 'commit' or 'qrefresh'.
80 80
81 81 After the actual job is done by non-interactive command, the
82 82 working directory is restored to its original state.
83 83
84 84 In the end we'll record interesting changes, and everything else
85 85 will be left in place, so the user can continue working.
86 86 """
87 87
88 88 checkunfinished(repo, commit=True)
89 89 merge = len(repo[None].parents()) > 1
90 90 if merge:
91 91 raise util.Abort(_('cannot partially commit a merge '
92 92 '(use "hg commit" instead)'))
93 93
94 94 status = repo.status(match=match)
95 95 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
96 96 diffopts.nodates = True
97 97 diffopts.git = True
98 98 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
99 99 originalchunks = patch.parsepatch(originaldiff)
100 100
101 101 # 1. filter patch, so we have intending-to apply subset of it
102 102 try:
103 103 chunks = filterfn(ui, originalchunks)
104 104 except patch.PatchError, err:
105 105 raise util.Abort(_('error parsing patch: %s') % err)
106 106
107 107 contenders = set()
108 108 for h in chunks:
109 109 try:
110 110 contenders.update(set(h.files()))
111 111 except AttributeError:
112 112 pass
113 113
114 114 changed = status.modified + status.added + status.removed
115 115 newfiles = [f for f in changed if f in contenders]
116 116 if not newfiles:
117 117 ui.status(_('no changes to record\n'))
118 118 return 0
119 119
120 120 newandmodifiedfiles = set()
121 121 for h in chunks:
122 122 isnew = h.filename() in status.added
123 123 if ishunk(h) and isnew and not h in originalchunks:
124 124 newandmodifiedfiles.add(h.filename())
125 125
126 126 modified = set(status.modified)
127 127
128 128 # 2. backup changed files, so we can restore them in the end
129 129
130 130 if backupall:
131 131 tobackup = changed
132 132 else:
133 133 tobackup = [f for f in newfiles
134 134 if f in modified or f in newandmodifiedfiles]
135 135
136 136 backups = {}
137 137 if tobackup:
138 138 backupdir = repo.join('record-backups')
139 139 try:
140 140 os.mkdir(backupdir)
141 141 except OSError, err:
142 142 if err.errno != errno.EEXIST:
143 143 raise
144 144 try:
145 145 # backup continues
146 146 for f in tobackup:
147 147 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
148 148 dir=backupdir)
149 149 os.close(fd)
150 150 ui.debug('backup %r as %r\n' % (f, tmpname))
151 151 util.copyfile(repo.wjoin(f), tmpname)
152 152 shutil.copystat(repo.wjoin(f), tmpname)
153 153 backups[f] = tmpname
154 154
155 155 fp = cStringIO.StringIO()
156 156 for c in chunks:
157 157 fname = c.filename()
158 158 if fname in backups or fname in newandmodifiedfiles:
159 159 c.write(fp)
160 160 dopatch = fp.tell()
161 161 fp.seek(0)
162 162
163 163 [os.unlink(c) for c in newandmodifiedfiles]
164 164
165 165 # 3a. apply filtered patch to clean repo (clean)
166 166 if backups:
167 167 # Equivalent to hg.revert
168 168 choices = lambda key: key in backups
169 169 mergemod.update(repo, repo.dirstate.p1(),
170 170 False, True, choices)
171 171
172 172
173 173 # 3b. (apply)
174 174 if dopatch:
175 175 try:
176 176 ui.debug('applying patch\n')
177 177 ui.debug(fp.getvalue())
178 178 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
179 179 except patch.PatchError, err:
180 180 raise util.Abort(str(err))
181 181 del fp
182 182
183 183 # 4. We prepared working directory according to filtered
184 184 # patch. Now is the time to delegate the job to
185 185 # commit/qrefresh or the like!
186 186
187 187 # Make all of the pathnames absolute.
188 188 newfiles = [repo.wjoin(nf) for nf in newfiles]
189 189 commitfunc(ui, repo, *newfiles, **opts)
190 190
191 191 return 0
192 192 finally:
193 193 # 5. finally restore backed-up files
194 194 try:
195 195 for realname, tmpname in backups.iteritems():
196 196 ui.debug('restoring %r to %r\n' % (tmpname, realname))
197 197 util.copyfile(tmpname, repo.wjoin(realname))
198 198 # Our calls to copystat() here and above are a
199 199 # hack to trick any editors that have f open that
200 200 # we haven't modified them.
201 201 #
202 202 # Also note that this racy as an editor could
203 203 # notice the file's mtime before we've finished
204 204 # writing it.
205 205 shutil.copystat(tmpname, repo.wjoin(realname))
206 206 os.unlink(tmpname)
207 207 if tobackup:
208 208 os.rmdir(backupdir)
209 209 except OSError:
210 210 pass
211 211
212 212 return commit(ui, repo, recordfunc, pats, opts)
213 213
214 214 def findpossible(cmd, table, strict=False):
215 215 """
216 216 Return cmd -> (aliases, command table entry)
217 217 for each matching command.
218 218 Return debug commands (or their aliases) only if no normal command matches.
219 219 """
220 220 choice = {}
221 221 debugchoice = {}
222 222
223 223 if cmd in table:
224 224 # short-circuit exact matches, "log" alias beats "^log|history"
225 225 keys = [cmd]
226 226 else:
227 227 keys = table.keys()
228 228
229 229 allcmds = []
230 230 for e in keys:
231 231 aliases = parsealiases(e)
232 232 allcmds.extend(aliases)
233 233 found = None
234 234 if cmd in aliases:
235 235 found = cmd
236 236 elif not strict:
237 237 for a in aliases:
238 238 if a.startswith(cmd):
239 239 found = a
240 240 break
241 241 if found is not None:
242 242 if aliases[0].startswith("debug") or found.startswith("debug"):
243 243 debugchoice[found] = (aliases, table[e])
244 244 else:
245 245 choice[found] = (aliases, table[e])
246 246
247 247 if not choice and debugchoice:
248 248 choice = debugchoice
249 249
250 250 return choice, allcmds
251 251
252 252 def findcmd(cmd, table, strict=True):
253 253 """Return (aliases, command table entry) for command string."""
254 254 choice, allcmds = findpossible(cmd, table, strict)
255 255
256 256 if cmd in choice:
257 257 return choice[cmd]
258 258
259 259 if len(choice) > 1:
260 260 clist = choice.keys()
261 261 clist.sort()
262 262 raise error.AmbiguousCommand(cmd, clist)
263 263
264 264 if choice:
265 265 return choice.values()[0]
266 266
267 267 raise error.UnknownCommand(cmd, allcmds)
268 268
269 269 def findrepo(p):
270 270 while not os.path.isdir(os.path.join(p, ".hg")):
271 271 oldp, p = p, os.path.dirname(p)
272 272 if p == oldp:
273 273 return None
274 274
275 275 return p
276 276
277 277 def bailifchanged(repo):
278 278 if repo.dirstate.p2() != nullid:
279 279 raise util.Abort(_('outstanding uncommitted merge'))
280 280 modified, added, removed, deleted = repo.status()[:4]
281 281 if modified or added or removed or deleted:
282 282 raise util.Abort(_('uncommitted changes'))
283 283 ctx = repo[None]
284 284 for s in sorted(ctx.substate):
285 285 if ctx.sub(s).dirty():
286 286 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
287 287
288 288 def logmessage(ui, opts):
289 289 """ get the log message according to -m and -l option """
290 290 message = opts.get('message')
291 291 logfile = opts.get('logfile')
292 292
293 293 if message and logfile:
294 294 raise util.Abort(_('options --message and --logfile are mutually '
295 295 'exclusive'))
296 296 if not message and logfile:
297 297 try:
298 298 if logfile == '-':
299 299 message = ui.fin.read()
300 300 else:
301 301 message = '\n'.join(util.readfile(logfile).splitlines())
302 302 except IOError, inst:
303 303 raise util.Abort(_("can't read commit message '%s': %s") %
304 304 (logfile, inst.strerror))
305 305 return message
306 306
307 307 def mergeeditform(ctxorbool, baseformname):
308 308 """return appropriate editform name (referencing a committemplate)
309 309
310 310 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
311 311 merging is committed.
312 312
313 313 This returns baseformname with '.merge' appended if it is a merge,
314 314 otherwise '.normal' is appended.
315 315 """
316 316 if isinstance(ctxorbool, bool):
317 317 if ctxorbool:
318 318 return baseformname + ".merge"
319 319 elif 1 < len(ctxorbool.parents()):
320 320 return baseformname + ".merge"
321 321
322 322 return baseformname + ".normal"
323 323
324 324 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
325 325 editform='', **opts):
326 326 """get appropriate commit message editor according to '--edit' option
327 327
328 328 'finishdesc' is a function to be called with edited commit message
329 329 (= 'description' of the new changeset) just after editing, but
330 330 before checking empty-ness. It should return actual text to be
331 331 stored into history. This allows to change description before
332 332 storing.
333 333
334 334 'extramsg' is a extra message to be shown in the editor instead of
335 335 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
336 336 is automatically added.
337 337
338 338 'editform' is a dot-separated list of names, to distinguish
339 339 the purpose of commit text editing.
340 340
341 341 'getcommiteditor' returns 'commitforceeditor' regardless of
342 342 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
343 343 they are specific for usage in MQ.
344 344 """
345 345 if edit or finishdesc or extramsg:
346 346 return lambda r, c, s: commitforceeditor(r, c, s,
347 347 finishdesc=finishdesc,
348 348 extramsg=extramsg,
349 349 editform=editform)
350 350 elif editform:
351 351 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
352 352 else:
353 353 return commiteditor
354 354
355 355 def loglimit(opts):
356 356 """get the log limit according to option -l/--limit"""
357 357 limit = opts.get('limit')
358 358 if limit:
359 359 try:
360 360 limit = int(limit)
361 361 except ValueError:
362 362 raise util.Abort(_('limit must be a positive integer'))
363 363 if limit <= 0:
364 364 raise util.Abort(_('limit must be positive'))
365 365 else:
366 366 limit = None
367 367 return limit
368 368
369 369 def makefilename(repo, pat, node, desc=None,
370 370 total=None, seqno=None, revwidth=None, pathname=None):
371 371 node_expander = {
372 372 'H': lambda: hex(node),
373 373 'R': lambda: str(repo.changelog.rev(node)),
374 374 'h': lambda: short(node),
375 375 'm': lambda: re.sub('[^\w]', '_', str(desc))
376 376 }
377 377 expander = {
378 378 '%': lambda: '%',
379 379 'b': lambda: os.path.basename(repo.root),
380 380 }
381 381
382 382 try:
383 383 if node:
384 384 expander.update(node_expander)
385 385 if node:
386 386 expander['r'] = (lambda:
387 387 str(repo.changelog.rev(node)).zfill(revwidth or 0))
388 388 if total is not None:
389 389 expander['N'] = lambda: str(total)
390 390 if seqno is not None:
391 391 expander['n'] = lambda: str(seqno)
392 392 if total is not None and seqno is not None:
393 393 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
394 394 if pathname is not None:
395 395 expander['s'] = lambda: os.path.basename(pathname)
396 396 expander['d'] = lambda: os.path.dirname(pathname) or '.'
397 397 expander['p'] = lambda: pathname
398 398
399 399 newname = []
400 400 patlen = len(pat)
401 401 i = 0
402 402 while i < patlen:
403 403 c = pat[i]
404 404 if c == '%':
405 405 i += 1
406 406 c = pat[i]
407 407 c = expander[c]()
408 408 newname.append(c)
409 409 i += 1
410 410 return ''.join(newname)
411 411 except KeyError, inst:
412 412 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
413 413 inst.args[0])
414 414
415 415 def makefileobj(repo, pat, node=None, desc=None, total=None,
416 416 seqno=None, revwidth=None, mode='wb', modemap=None,
417 417 pathname=None):
418 418
419 419 writable = mode not in ('r', 'rb')
420 420
421 421 if not pat or pat == '-':
422 422 if writable:
423 423 fp = repo.ui.fout
424 424 else:
425 425 fp = repo.ui.fin
426 426 if util.safehasattr(fp, 'fileno'):
427 427 return os.fdopen(os.dup(fp.fileno()), mode)
428 428 else:
429 429 # if this fp can't be duped properly, return
430 430 # a dummy object that can be closed
431 431 class wrappedfileobj(object):
432 432 noop = lambda x: None
433 433 def __init__(self, f):
434 434 self.f = f
435 435 def __getattr__(self, attr):
436 436 if attr == 'close':
437 437 return self.noop
438 438 else:
439 439 return getattr(self.f, attr)
440 440
441 441 return wrappedfileobj(fp)
442 442 if util.safehasattr(pat, 'write') and writable:
443 443 return pat
444 444 if util.safehasattr(pat, 'read') and 'r' in mode:
445 445 return pat
446 446 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
447 447 if modemap is not None:
448 448 mode = modemap.get(fn, mode)
449 449 if mode == 'wb':
450 450 modemap[fn] = 'ab'
451 451 return open(fn, mode)
452 452
453 453 def openrevlog(repo, cmd, file_, opts):
454 454 """opens the changelog, manifest, a filelog or a given revlog"""
455 455 cl = opts['changelog']
456 456 mf = opts['manifest']
457 457 msg = None
458 458 if cl and mf:
459 459 msg = _('cannot specify --changelog and --manifest at the same time')
460 460 elif cl or mf:
461 461 if file_:
462 462 msg = _('cannot specify filename with --changelog or --manifest')
463 463 elif not repo:
464 464 msg = _('cannot specify --changelog or --manifest '
465 465 'without a repository')
466 466 if msg:
467 467 raise util.Abort(msg)
468 468
469 469 r = None
470 470 if repo:
471 471 if cl:
472 472 r = repo.unfiltered().changelog
473 473 elif mf:
474 474 r = repo.manifest
475 475 elif file_:
476 476 filelog = repo.file(file_)
477 477 if len(filelog):
478 478 r = filelog
479 479 if not r:
480 480 if not file_:
481 481 raise error.CommandError(cmd, _('invalid arguments'))
482 482 if not os.path.isfile(file_):
483 483 raise util.Abort(_("revlog '%s' not found") % file_)
484 484 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
485 485 file_[:-2] + ".i")
486 486 return r
487 487
488 488 def copy(ui, repo, pats, opts, rename=False):
489 489 # called with the repo lock held
490 490 #
491 491 # hgsep => pathname that uses "/" to separate directories
492 492 # ossep => pathname that uses os.sep to separate directories
493 493 cwd = repo.getcwd()
494 494 targets = {}
495 495 after = opts.get("after")
496 496 dryrun = opts.get("dry_run")
497 497 wctx = repo[None]
498 498
499 499 def walkpat(pat):
500 500 srcs = []
501 501 if after:
502 502 badstates = '?'
503 503 else:
504 504 badstates = '?r'
505 505 m = scmutil.match(repo[None], [pat], opts, globbed=True)
506 506 for abs in repo.walk(m):
507 507 state = repo.dirstate[abs]
508 508 rel = m.rel(abs)
509 509 exact = m.exact(abs)
510 510 if state in badstates:
511 511 if exact and state == '?':
512 512 ui.warn(_('%s: not copying - file is not managed\n') % rel)
513 513 if exact and state == 'r':
514 514 ui.warn(_('%s: not copying - file has been marked for'
515 515 ' remove\n') % rel)
516 516 continue
517 517 # abs: hgsep
518 518 # rel: ossep
519 519 srcs.append((abs, rel, exact))
520 520 return srcs
521 521
522 522 # abssrc: hgsep
523 523 # relsrc: ossep
524 524 # otarget: ossep
525 525 def copyfile(abssrc, relsrc, otarget, exact):
526 526 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
527 527 if '/' in abstarget:
528 528 # We cannot normalize abstarget itself, this would prevent
529 529 # case only renames, like a => A.
530 530 abspath, absname = abstarget.rsplit('/', 1)
531 531 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
532 532 reltarget = repo.pathto(abstarget, cwd)
533 533 target = repo.wjoin(abstarget)
534 534 src = repo.wjoin(abssrc)
535 535 state = repo.dirstate[abstarget]
536 536
537 537 scmutil.checkportable(ui, abstarget)
538 538
539 539 # check for collisions
540 540 prevsrc = targets.get(abstarget)
541 541 if prevsrc is not None:
542 542 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
543 543 (reltarget, repo.pathto(abssrc, cwd),
544 544 repo.pathto(prevsrc, cwd)))
545 545 return
546 546
547 547 # check for overwrites
548 548 exists = os.path.lexists(target)
549 549 samefile = False
550 550 if exists and abssrc != abstarget:
551 551 if (repo.dirstate.normalize(abssrc) ==
552 552 repo.dirstate.normalize(abstarget)):
553 553 if not rename:
554 554 ui.warn(_("%s: can't copy - same file\n") % reltarget)
555 555 return
556 556 exists = False
557 557 samefile = True
558 558
559 559 if not after and exists or after and state in 'mn':
560 560 if not opts['force']:
561 561 ui.warn(_('%s: not overwriting - file exists\n') %
562 562 reltarget)
563 563 return
564 564
565 565 if after:
566 566 if not exists:
567 567 if rename:
568 568 ui.warn(_('%s: not recording move - %s does not exist\n') %
569 569 (relsrc, reltarget))
570 570 else:
571 571 ui.warn(_('%s: not recording copy - %s does not exist\n') %
572 572 (relsrc, reltarget))
573 573 return
574 574 elif not dryrun:
575 575 try:
576 576 if exists:
577 577 os.unlink(target)
578 578 targetdir = os.path.dirname(target) or '.'
579 579 if not os.path.isdir(targetdir):
580 580 os.makedirs(targetdir)
581 581 if samefile:
582 582 tmp = target + "~hgrename"
583 583 os.rename(src, tmp)
584 584 os.rename(tmp, target)
585 585 else:
586 586 util.copyfile(src, target)
587 587 srcexists = True
588 588 except IOError, inst:
589 589 if inst.errno == errno.ENOENT:
590 590 ui.warn(_('%s: deleted in working directory\n') % relsrc)
591 591 srcexists = False
592 592 else:
593 593 ui.warn(_('%s: cannot copy - %s\n') %
594 594 (relsrc, inst.strerror))
595 595 return True # report a failure
596 596
597 597 if ui.verbose or not exact:
598 598 if rename:
599 599 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
600 600 else:
601 601 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
602 602
603 603 targets[abstarget] = abssrc
604 604
605 605 # fix up dirstate
606 606 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
607 607 dryrun=dryrun, cwd=cwd)
608 608 if rename and not dryrun:
609 609 if not after and srcexists and not samefile:
610 610 util.unlinkpath(repo.wjoin(abssrc))
611 611 wctx.forget([abssrc])
612 612
613 613 # pat: ossep
614 614 # dest ossep
615 615 # srcs: list of (hgsep, hgsep, ossep, bool)
616 616 # return: function that takes hgsep and returns ossep
617 617 def targetpathfn(pat, dest, srcs):
618 618 if os.path.isdir(pat):
619 619 abspfx = pathutil.canonpath(repo.root, cwd, pat)
620 620 abspfx = util.localpath(abspfx)
621 621 if destdirexists:
622 622 striplen = len(os.path.split(abspfx)[0])
623 623 else:
624 624 striplen = len(abspfx)
625 625 if striplen:
626 626 striplen += len(os.sep)
627 627 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
628 628 elif destdirexists:
629 629 res = lambda p: os.path.join(dest,
630 630 os.path.basename(util.localpath(p)))
631 631 else:
632 632 res = lambda p: dest
633 633 return res
634 634
635 635 # pat: ossep
636 636 # dest ossep
637 637 # srcs: list of (hgsep, hgsep, ossep, bool)
638 638 # return: function that takes hgsep and returns ossep
639 639 def targetpathafterfn(pat, dest, srcs):
640 640 if matchmod.patkind(pat):
641 641 # a mercurial pattern
642 642 res = lambda p: os.path.join(dest,
643 643 os.path.basename(util.localpath(p)))
644 644 else:
645 645 abspfx = pathutil.canonpath(repo.root, cwd, pat)
646 646 if len(abspfx) < len(srcs[0][0]):
647 647 # A directory. Either the target path contains the last
648 648 # component of the source path or it does not.
649 649 def evalpath(striplen):
650 650 score = 0
651 651 for s in srcs:
652 652 t = os.path.join(dest, util.localpath(s[0])[striplen:])
653 653 if os.path.lexists(t):
654 654 score += 1
655 655 return score
656 656
657 657 abspfx = util.localpath(abspfx)
658 658 striplen = len(abspfx)
659 659 if striplen:
660 660 striplen += len(os.sep)
661 661 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
662 662 score = evalpath(striplen)
663 663 striplen1 = len(os.path.split(abspfx)[0])
664 664 if striplen1:
665 665 striplen1 += len(os.sep)
666 666 if evalpath(striplen1) > score:
667 667 striplen = striplen1
668 668 res = lambda p: os.path.join(dest,
669 669 util.localpath(p)[striplen:])
670 670 else:
671 671 # a file
672 672 if destdirexists:
673 673 res = lambda p: os.path.join(dest,
674 674 os.path.basename(util.localpath(p)))
675 675 else:
676 676 res = lambda p: dest
677 677 return res
678 678
679 679
680 680 pats = scmutil.expandpats(pats)
681 681 if not pats:
682 682 raise util.Abort(_('no source or destination specified'))
683 683 if len(pats) == 1:
684 684 raise util.Abort(_('no destination specified'))
685 685 dest = pats.pop()
686 686 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
687 687 if not destdirexists:
688 688 if len(pats) > 1 or matchmod.patkind(pats[0]):
689 689 raise util.Abort(_('with multiple sources, destination must be an '
690 690 'existing directory'))
691 691 if util.endswithsep(dest):
692 692 raise util.Abort(_('destination %s is not a directory') % dest)
693 693
694 694 tfn = targetpathfn
695 695 if after:
696 696 tfn = targetpathafterfn
697 697 copylist = []
698 698 for pat in pats:
699 699 srcs = walkpat(pat)
700 700 if not srcs:
701 701 continue
702 702 copylist.append((tfn(pat, dest, srcs), srcs))
703 703 if not copylist:
704 704 raise util.Abort(_('no files to copy'))
705 705
706 706 errors = 0
707 707 for targetpath, srcs in copylist:
708 708 for abssrc, relsrc, exact in srcs:
709 709 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
710 710 errors += 1
711 711
712 712 if errors:
713 713 ui.warn(_('(consider using --after)\n'))
714 714
715 715 return errors != 0
716 716
717 717 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
718 718 runargs=None, appendpid=False):
719 719 '''Run a command as a service.'''
720 720
721 721 def writepid(pid):
722 722 if opts['pid_file']:
723 723 if appendpid:
724 724 mode = 'a'
725 725 else:
726 726 mode = 'w'
727 727 fp = open(opts['pid_file'], mode)
728 728 fp.write(str(pid) + '\n')
729 729 fp.close()
730 730
731 731 if opts['daemon'] and not opts['daemon_pipefds']:
732 732 # Signal child process startup with file removal
733 733 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
734 734 os.close(lockfd)
735 735 try:
736 736 if not runargs:
737 737 runargs = util.hgcmd() + sys.argv[1:]
738 738 runargs.append('--daemon-pipefds=%s' % lockpath)
739 739 # Don't pass --cwd to the child process, because we've already
740 740 # changed directory.
741 741 for i in xrange(1, len(runargs)):
742 742 if runargs[i].startswith('--cwd='):
743 743 del runargs[i]
744 744 break
745 745 elif runargs[i].startswith('--cwd'):
746 746 del runargs[i:i + 2]
747 747 break
748 748 def condfn():
749 749 return not os.path.exists(lockpath)
750 750 pid = util.rundetached(runargs, condfn)
751 751 if pid < 0:
752 752 raise util.Abort(_('child process failed to start'))
753 753 writepid(pid)
754 754 finally:
755 755 try:
756 756 os.unlink(lockpath)
757 757 except OSError, e:
758 758 if e.errno != errno.ENOENT:
759 759 raise
760 760 if parentfn:
761 761 return parentfn(pid)
762 762 else:
763 763 return
764 764
765 765 if initfn:
766 766 initfn()
767 767
768 768 if not opts['daemon']:
769 769 writepid(os.getpid())
770 770
771 771 if opts['daemon_pipefds']:
772 772 lockpath = opts['daemon_pipefds']
773 773 try:
774 774 os.setsid()
775 775 except AttributeError:
776 776 pass
777 777 os.unlink(lockpath)
778 778 util.hidewindow()
779 779 sys.stdout.flush()
780 780 sys.stderr.flush()
781 781
782 782 nullfd = os.open(os.devnull, os.O_RDWR)
783 783 logfilefd = nullfd
784 784 if logfile:
785 785 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
786 786 os.dup2(nullfd, 0)
787 787 os.dup2(logfilefd, 1)
788 788 os.dup2(logfilefd, 2)
789 789 if nullfd not in (0, 1, 2):
790 790 os.close(nullfd)
791 791 if logfile and logfilefd not in (0, 1, 2):
792 792 os.close(logfilefd)
793 793
794 794 if runfn:
795 795 return runfn()
796 796
797 797 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
798 798 """Utility function used by commands.import to import a single patch
799 799
800 800 This function is explicitly defined here to help the evolve extension to
801 801 wrap this part of the import logic.
802 802
803 803 The API is currently a bit ugly because it a simple code translation from
804 804 the import command. Feel free to make it better.
805 805
806 806 :hunk: a patch (as a binary string)
807 807 :parents: nodes that will be parent of the created commit
808 808 :opts: the full dict of option passed to the import command
809 809 :msgs: list to save commit message to.
810 810 (used in case we need to save it when failing)
811 811 :updatefunc: a function that update a repo to a given node
812 812 updatefunc(<repo>, <node>)
813 813 """
814 814 tmpname, message, user, date, branch, nodeid, p1, p2 = \
815 815 patch.extract(ui, hunk)
816 816
817 817 update = not opts.get('bypass')
818 818 strip = opts["strip"]
819 819 prefix = opts["prefix"]
820 820 sim = float(opts.get('similarity') or 0)
821 821 if not tmpname:
822 822 return (None, None, False)
823 823 msg = _('applied to working directory')
824 824
825 825 rejects = False
826 826
827 827 try:
828 828 cmdline_message = logmessage(ui, opts)
829 829 if cmdline_message:
830 830 # pickup the cmdline msg
831 831 message = cmdline_message
832 832 elif message:
833 833 # pickup the patch msg
834 834 message = message.strip()
835 835 else:
836 836 # launch the editor
837 837 message = None
838 838 ui.debug('message:\n%s\n' % message)
839 839
840 840 if len(parents) == 1:
841 841 parents.append(repo[nullid])
842 842 if opts.get('exact'):
843 843 if not nodeid or not p1:
844 844 raise util.Abort(_('not a Mercurial patch'))
845 845 p1 = repo[p1]
846 846 p2 = repo[p2 or nullid]
847 847 elif p2:
848 848 try:
849 849 p1 = repo[p1]
850 850 p2 = repo[p2]
851 851 # Without any options, consider p2 only if the
852 852 # patch is being applied on top of the recorded
853 853 # first parent.
854 854 if p1 != parents[0]:
855 855 p1 = parents[0]
856 856 p2 = repo[nullid]
857 857 except error.RepoError:
858 858 p1, p2 = parents
859 859 if p2.node() == nullid:
860 860 ui.warn(_("warning: import the patch as a normal revision\n"
861 861 "(use --exact to import the patch as a merge)\n"))
862 862 else:
863 863 p1, p2 = parents
864 864
865 865 n = None
866 866 if update:
867 867 repo.dirstate.beginparentchange()
868 868 if p1 != parents[0]:
869 869 updatefunc(repo, p1.node())
870 870 if p2 != parents[1]:
871 871 repo.setparents(p1.node(), p2.node())
872 872
873 873 if opts.get('exact') or opts.get('import_branch'):
874 874 repo.dirstate.setbranch(branch or 'default')
875 875
876 876 partial = opts.get('partial', False)
877 877 files = set()
878 878 try:
879 879 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
880 880 files=files, eolmode=None, similarity=sim / 100.0)
881 881 except patch.PatchError, e:
882 882 if not partial:
883 883 raise util.Abort(str(e))
884 884 if partial:
885 885 rejects = True
886 886
887 887 files = list(files)
888 888 if opts.get('no_commit'):
889 889 if message:
890 890 msgs.append(message)
891 891 else:
892 892 if opts.get('exact') or p2:
893 893 # If you got here, you either use --force and know what
894 894 # you are doing or used --exact or a merge patch while
895 895 # being updated to its first parent.
896 896 m = None
897 897 else:
898 898 m = scmutil.matchfiles(repo, files or [])
899 899 editform = mergeeditform(repo[None], 'import.normal')
900 900 if opts.get('exact'):
901 901 editor = None
902 902 else:
903 903 editor = getcommiteditor(editform=editform, **opts)
904 904 n = repo.commit(message, opts.get('user') or user,
905 905 opts.get('date') or date, match=m,
906 906 editor=editor, force=partial)
907 907 repo.dirstate.endparentchange()
908 908 else:
909 909 if opts.get('exact') or opts.get('import_branch'):
910 910 branch = branch or 'default'
911 911 else:
912 912 branch = p1.branch()
913 913 store = patch.filestore()
914 914 try:
915 915 files = set()
916 916 try:
917 917 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
918 918 files, eolmode=None)
919 919 except patch.PatchError, e:
920 920 raise util.Abort(str(e))
921 921 if opts.get('exact'):
922 922 editor = None
923 923 else:
924 924 editor = getcommiteditor(editform='import.bypass')
925 925 memctx = context.makememctx(repo, (p1.node(), p2.node()),
926 926 message,
927 927 opts.get('user') or user,
928 928 opts.get('date') or date,
929 929 branch, files, store,
930 930 editor=editor)
931 931 n = memctx.commit()
932 932 finally:
933 933 store.close()
934 934 if opts.get('exact') and opts.get('no_commit'):
935 935 # --exact with --no-commit is still useful in that it does merge
936 936 # and branch bits
937 937 ui.warn(_("warning: can't check exact import with --no-commit\n"))
938 938 elif opts.get('exact') and hex(n) != nodeid:
939 939 raise util.Abort(_('patch is damaged or loses information'))
940 940 if n:
941 941 # i18n: refers to a short changeset id
942 942 msg = _('created %s') % short(n)
943 943 return (msg, n, rejects)
944 944 finally:
945 945 os.unlink(tmpname)
946 946
947 947 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
948 948 opts=None):
949 949 '''export changesets as hg patches.'''
950 950
951 951 total = len(revs)
952 952 revwidth = max([len(str(rev)) for rev in revs])
953 953 filemode = {}
954 954
955 955 def single(rev, seqno, fp):
956 956 ctx = repo[rev]
957 957 node = ctx.node()
958 958 parents = [p.node() for p in ctx.parents() if p]
959 959 branch = ctx.branch()
960 960 if switch_parent:
961 961 parents.reverse()
962 962
963 963 if parents:
964 964 prev = parents[0]
965 965 else:
966 966 prev = nullid
967 967
968 968 shouldclose = False
969 969 if not fp and len(template) > 0:
970 970 desc_lines = ctx.description().rstrip().split('\n')
971 971 desc = desc_lines[0] #Commit always has a first line.
972 972 fp = makefileobj(repo, template, node, desc=desc, total=total,
973 973 seqno=seqno, revwidth=revwidth, mode='wb',
974 974 modemap=filemode)
975 975 if fp != template:
976 976 shouldclose = True
977 977 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
978 978 repo.ui.note("%s\n" % fp.name)
979 979
980 980 if not fp:
981 981 write = repo.ui.write
982 982 else:
983 983 def write(s, **kw):
984 984 fp.write(s)
985 985
986 986
987 987 write("# HG changeset patch\n")
988 988 write("# User %s\n" % ctx.user())
989 989 write("# Date %d %d\n" % ctx.date())
990 990 write("# %s\n" % util.datestr(ctx.date()))
991 991 if branch and branch != 'default':
992 992 write("# Branch %s\n" % branch)
993 993 write("# Node ID %s\n" % hex(node))
994 994 write("# Parent %s\n" % hex(prev))
995 995 if len(parents) > 1:
996 996 write("# Parent %s\n" % hex(parents[1]))
997 997 write(ctx.description().rstrip())
998 998 write("\n\n")
999 999
1000 1000 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
1001 1001 write(chunk, label=label)
1002 1002
1003 1003 if shouldclose:
1004 1004 fp.close()
1005 1005
1006 1006 for seqno, rev in enumerate(revs):
1007 1007 single(rev, seqno + 1, fp)
1008 1008
1009 1009 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1010 1010 changes=None, stat=False, fp=None, prefix='',
1011 1011 relative='', listsubrepos=False):
1012 1012 '''show diff or diffstat.'''
1013 1013 if fp is None:
1014 1014 write = ui.write
1015 1015 else:
1016 1016 def write(s, **kw):
1017 1017 fp.write(s)
1018 1018
1019 1019 if relative:
1020 1020 relroot = pathutil.canonpath(repo.root, repo.getcwd(), relative)
1021 1021 else:
1022 1022 relroot = ''
1023 1023 if relroot != '':
1024 1024 # XXX relative roots currently don't work if the root is within a
1025 1025 # subrepo
1026 1026 uirelroot = match.uipath(relroot)
1027 1027 relroot += '/'
1028 1028 for matchroot in match.files():
1029 1029 if not matchroot.startswith(relroot):
1030 1030 ui.warn(_('warning: %s not inside relative root %s\n') % (
1031 1031 match.uipath(matchroot), uirelroot))
1032 1032
1033 1033 if stat:
1034 1034 diffopts = diffopts.copy(context=0)
1035 1035 width = 80
1036 1036 if not ui.plain():
1037 1037 width = ui.termwidth()
1038 1038 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1039 1039 prefix=prefix, relroot=relroot)
1040 1040 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1041 1041 width=width,
1042 1042 git=diffopts.git):
1043 1043 write(chunk, label=label)
1044 1044 else:
1045 1045 for chunk, label in patch.diffui(repo, node1, node2, match,
1046 1046 changes, diffopts, prefix=prefix,
1047 1047 relroot=relroot):
1048 1048 write(chunk, label=label)
1049 1049
1050 1050 if listsubrepos:
1051 1051 ctx1 = repo[node1]
1052 1052 ctx2 = repo[node2]
1053 1053 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1054 1054 tempnode2 = node2
1055 1055 try:
1056 1056 if node2 is not None:
1057 1057 tempnode2 = ctx2.substate[subpath][1]
1058 1058 except KeyError:
1059 1059 # A subrepo that existed in node1 was deleted between node1 and
1060 1060 # node2 (inclusive). Thus, ctx2's substate won't contain that
1061 1061 # subpath. The best we can do is to ignore it.
1062 1062 tempnode2 = None
1063 1063 submatch = matchmod.narrowmatcher(subpath, match)
1064 1064 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1065 1065 stat=stat, fp=fp, prefix=prefix)
1066 1066
1067 1067 class changeset_printer(object):
1068 1068 '''show changeset information when templating not requested.'''
1069 1069
1070 1070 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1071 1071 self.ui = ui
1072 1072 self.repo = repo
1073 1073 self.buffered = buffered
1074 1074 self.matchfn = matchfn
1075 1075 self.diffopts = diffopts
1076 1076 self.header = {}
1077 1077 self.hunk = {}
1078 1078 self.lastheader = None
1079 1079 self.footer = None
1080 1080
1081 1081 def flush(self, rev):
1082 1082 if rev in self.header:
1083 1083 h = self.header[rev]
1084 1084 if h != self.lastheader:
1085 1085 self.lastheader = h
1086 1086 self.ui.write(h)
1087 1087 del self.header[rev]
1088 1088 if rev in self.hunk:
1089 1089 self.ui.write(self.hunk[rev])
1090 1090 del self.hunk[rev]
1091 1091 return 1
1092 1092 return 0
1093 1093
1094 1094 def close(self):
1095 1095 if self.footer:
1096 1096 self.ui.write(self.footer)
1097 1097
1098 1098 def show(self, ctx, copies=None, matchfn=None, **props):
1099 1099 if self.buffered:
1100 1100 self.ui.pushbuffer()
1101 1101 self._show(ctx, copies, matchfn, props)
1102 1102 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1103 1103 else:
1104 1104 self._show(ctx, copies, matchfn, props)
1105 1105
1106 1106 def _show(self, ctx, copies, matchfn, props):
1107 1107 '''show a single changeset or file revision'''
1108 1108 changenode = ctx.node()
1109 1109 rev = ctx.rev()
1110 1110
1111 1111 if self.ui.quiet:
1112 1112 self.ui.write("%d:%s\n" % (rev, short(changenode)),
1113 1113 label='log.node')
1114 1114 return
1115 1115
1116 1116 log = self.repo.changelog
1117 1117 date = util.datestr(ctx.date())
1118 1118
1119 1119 if self.ui.debugflag:
1120 1120 hexfunc = hex
1121 1121 else:
1122 1122 hexfunc = short
1123 1123
1124 1124 parents = [(p, hexfunc(log.node(p)))
1125 1125 for p in self._meaningful_parentrevs(log, rev)]
1126 1126
1127 1127 # i18n: column positioning for "hg log"
1128 1128 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
1129 1129 label='log.changeset changeset.%s' % ctx.phasestr())
1130 1130
1131 1131 # branches are shown first before any other names due to backwards
1132 1132 # compatibility
1133 1133 branch = ctx.branch()
1134 1134 # don't show the default branch name
1135 1135 if branch != 'default':
1136 1136 # i18n: column positioning for "hg log"
1137 1137 self.ui.write(_("branch: %s\n") % branch,
1138 1138 label='log.branch')
1139 1139
1140 1140 for name, ns in self.repo.names.iteritems():
1141 1141 # branches has special logic already handled above, so here we just
1142 1142 # skip it
1143 1143 if name == 'branches':
1144 1144 continue
1145 1145 # we will use the templatename as the color name since those two
1146 1146 # should be the same
1147 1147 for name in ns.names(self.repo, changenode):
1148 1148 self.ui.write(ns.logfmt % name,
1149 1149 label='log.%s' % ns.colorname)
1150 1150 if self.ui.debugflag:
1151 1151 # i18n: column positioning for "hg log"
1152 1152 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
1153 1153 label='log.phase')
1154 1154 for parent in parents:
1155 1155 label = 'log.parent changeset.%s' % self.repo[parent[0]].phasestr()
1156 1156 # i18n: column positioning for "hg log"
1157 1157 self.ui.write(_("parent: %d:%s\n") % parent,
1158 1158 label=label)
1159 1159
1160 1160 if self.ui.debugflag:
1161 1161 mnode = ctx.manifestnode()
1162 1162 # i18n: column positioning for "hg log"
1163 1163 self.ui.write(_("manifest: %d:%s\n") %
1164 1164 (self.repo.manifest.rev(mnode), hex(mnode)),
1165 1165 label='ui.debug log.manifest')
1166 1166 # i18n: column positioning for "hg log"
1167 1167 self.ui.write(_("user: %s\n") % ctx.user(),
1168 1168 label='log.user')
1169 1169 # i18n: column positioning for "hg log"
1170 1170 self.ui.write(_("date: %s\n") % date,
1171 1171 label='log.date')
1172 1172
1173 1173 if self.ui.debugflag:
1174 1174 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
1175 1175 for key, value in zip([# i18n: column positioning for "hg log"
1176 1176 _("files:"),
1177 1177 # i18n: column positioning for "hg log"
1178 1178 _("files+:"),
1179 1179 # i18n: column positioning for "hg log"
1180 1180 _("files-:")], files):
1181 1181 if value:
1182 1182 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1183 1183 label='ui.debug log.files')
1184 1184 elif ctx.files() and self.ui.verbose:
1185 1185 # i18n: column positioning for "hg log"
1186 1186 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1187 1187 label='ui.note log.files')
1188 1188 if copies and self.ui.verbose:
1189 1189 copies = ['%s (%s)' % c for c in copies]
1190 1190 # i18n: column positioning for "hg log"
1191 1191 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1192 1192 label='ui.note log.copies')
1193 1193
1194 1194 extra = ctx.extra()
1195 1195 if extra and self.ui.debugflag:
1196 1196 for key, value in sorted(extra.items()):
1197 1197 # i18n: column positioning for "hg log"
1198 1198 self.ui.write(_("extra: %s=%s\n")
1199 1199 % (key, value.encode('string_escape')),
1200 1200 label='ui.debug log.extra')
1201 1201
1202 1202 description = ctx.description().strip()
1203 1203 if description:
1204 1204 if self.ui.verbose:
1205 1205 self.ui.write(_("description:\n"),
1206 1206 label='ui.note log.description')
1207 1207 self.ui.write(description,
1208 1208 label='ui.note log.description')
1209 1209 self.ui.write("\n\n")
1210 1210 else:
1211 1211 # i18n: column positioning for "hg log"
1212 1212 self.ui.write(_("summary: %s\n") %
1213 1213 description.splitlines()[0],
1214 1214 label='log.summary')
1215 1215 self.ui.write("\n")
1216 1216
1217 1217 self.showpatch(changenode, matchfn)
1218 1218
1219 1219 def showpatch(self, node, matchfn):
1220 1220 if not matchfn:
1221 1221 matchfn = self.matchfn
1222 1222 if matchfn:
1223 1223 stat = self.diffopts.get('stat')
1224 1224 diff = self.diffopts.get('patch')
1225 1225 diffopts = patch.diffallopts(self.ui, self.diffopts)
1226 1226 prev = self.repo.changelog.parents(node)[0]
1227 1227 if stat:
1228 1228 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1229 1229 match=matchfn, stat=True)
1230 1230 if diff:
1231 1231 if stat:
1232 1232 self.ui.write("\n")
1233 1233 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1234 1234 match=matchfn, stat=False)
1235 1235 self.ui.write("\n")
1236 1236
1237 1237 def _meaningful_parentrevs(self, log, rev):
1238 1238 """Return list of meaningful (or all if debug) parentrevs for rev.
1239 1239
1240 1240 For merges (two non-nullrev revisions) both parents are meaningful.
1241 1241 Otherwise the first parent revision is considered meaningful if it
1242 1242 is not the preceding revision.
1243 1243 """
1244 1244 parents = log.parentrevs(rev)
1245 1245 if not self.ui.debugflag and parents[1] == nullrev:
1246 1246 if parents[0] >= rev - 1:
1247 1247 parents = []
1248 1248 else:
1249 1249 parents = [parents[0]]
1250 1250 return parents
1251 1251
1252 1252 class jsonchangeset(changeset_printer):
1253 1253 '''format changeset information.'''
1254 1254
1255 1255 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1256 1256 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1257 1257 self.cache = {}
1258 1258 self._first = True
1259 1259
1260 1260 def close(self):
1261 1261 if not self._first:
1262 1262 self.ui.write("\n]\n")
1263 1263 else:
1264 1264 self.ui.write("[]\n")
1265 1265
1266 1266 def _show(self, ctx, copies, matchfn, props):
1267 1267 '''show a single changeset or file revision'''
1268 1268 hexnode = hex(ctx.node())
1269 1269 rev = ctx.rev()
1270 1270 j = encoding.jsonescape
1271 1271
1272 1272 if self._first:
1273 1273 self.ui.write("[\n {")
1274 1274 self._first = False
1275 1275 else:
1276 1276 self.ui.write(",\n {")
1277 1277
1278 1278 if self.ui.quiet:
1279 1279 self.ui.write('\n "rev": %d' % rev)
1280 1280 self.ui.write(',\n "node": "%s"' % hexnode)
1281 1281 self.ui.write('\n }')
1282 1282 return
1283 1283
1284 1284 self.ui.write('\n "rev": %d' % rev)
1285 1285 self.ui.write(',\n "node": "%s"' % hexnode)
1286 1286 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1287 1287 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1288 1288 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1289 1289 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1290 1290 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1291 1291
1292 1292 self.ui.write(',\n "bookmarks": [%s]' %
1293 1293 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1294 1294 self.ui.write(',\n "tags": [%s]' %
1295 1295 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1296 1296 self.ui.write(',\n "parents": [%s]' %
1297 1297 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1298 1298
1299 1299 if self.ui.debugflag:
1300 1300 self.ui.write(',\n "manifest": "%s"' % hex(ctx.manifestnode()))
1301 1301
1302 1302 self.ui.write(',\n "extra": {%s}' %
1303 1303 ", ".join('"%s": "%s"' % (j(k), j(v))
1304 1304 for k, v in ctx.extra().items()))
1305 1305
1306 1306 files = ctx.p1().status(ctx)
1307 1307 self.ui.write(',\n "modified": [%s]' %
1308 1308 ", ".join('"%s"' % j(f) for f in files[0]))
1309 1309 self.ui.write(',\n "added": [%s]' %
1310 1310 ", ".join('"%s"' % j(f) for f in files[1]))
1311 1311 self.ui.write(',\n "removed": [%s]' %
1312 1312 ", ".join('"%s"' % j(f) for f in files[2]))
1313 1313
1314 1314 elif self.ui.verbose:
1315 1315 self.ui.write(',\n "files": [%s]' %
1316 1316 ", ".join('"%s"' % j(f) for f in ctx.files()))
1317 1317
1318 1318 if copies:
1319 1319 self.ui.write(',\n "copies": {%s}' %
1320 1320 ", ".join('"%s": "%s"' % (j(k), j(v))
1321 1321 for k, v in copies))
1322 1322
1323 1323 matchfn = self.matchfn
1324 1324 if matchfn:
1325 1325 stat = self.diffopts.get('stat')
1326 1326 diff = self.diffopts.get('patch')
1327 1327 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1328 1328 node, prev = ctx.node(), ctx.p1().node()
1329 1329 if stat:
1330 1330 self.ui.pushbuffer()
1331 1331 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1332 1332 match=matchfn, stat=True)
1333 1333 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1334 1334 if diff:
1335 1335 self.ui.pushbuffer()
1336 1336 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1337 1337 match=matchfn, stat=False)
1338 1338 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1339 1339
1340 1340 self.ui.write("\n }")
1341 1341
1342 1342 class changeset_templater(changeset_printer):
1343 1343 '''format changeset information.'''
1344 1344
1345 1345 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1346 1346 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1347 1347 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1348 1348 defaulttempl = {
1349 1349 'parent': '{rev}:{node|formatnode} ',
1350 1350 'manifest': '{rev}:{node|formatnode}',
1351 1351 'file_copy': '{name} ({source})',
1352 1352 'extra': '{key}={value|stringescape}'
1353 1353 }
1354 1354 # filecopy is preserved for compatibility reasons
1355 1355 defaulttempl['filecopy'] = defaulttempl['file_copy']
1356 1356 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1357 1357 cache=defaulttempl)
1358 1358 if tmpl:
1359 1359 self.t.cache['changeset'] = tmpl
1360 1360
1361 1361 self.cache = {}
1362 1362
1363 1363 def _meaningful_parentrevs(self, ctx):
1364 1364 """Return list of meaningful (or all if debug) parentrevs for rev.
1365 1365 """
1366 1366 parents = ctx.parents()
1367 1367 if len(parents) > 1:
1368 1368 return parents
1369 1369 if self.ui.debugflag:
1370 1370 return [parents[0], self.repo['null']]
1371 1371 if parents[0].rev() >= ctx.rev() - 1:
1372 1372 return []
1373 1373 return parents
1374 1374
1375 1375 def _show(self, ctx, copies, matchfn, props):
1376 1376 '''show a single changeset or file revision'''
1377 1377
1378 1378 showlist = templatekw.showlist
1379 1379
1380 1380 # showparents() behaviour depends on ui trace level which
1381 1381 # causes unexpected behaviours at templating level and makes
1382 1382 # it harder to extract it in a standalone function. Its
1383 1383 # behaviour cannot be changed so leave it here for now.
1384 1384 def showparents(**args):
1385 1385 ctx = args['ctx']
1386 1386 parents = [[('rev', p.rev()),
1387 1387 ('node', p.hex()),
1388 1388 ('phase', p.phasestr())]
1389 1389 for p in self._meaningful_parentrevs(ctx)]
1390 1390 return showlist('parent', parents, **args)
1391 1391
1392 1392 props = props.copy()
1393 1393 props.update(templatekw.keywords)
1394 1394 props['parents'] = showparents
1395 1395 props['templ'] = self.t
1396 1396 props['ctx'] = ctx
1397 1397 props['repo'] = self.repo
1398 1398 props['revcache'] = {'copies': copies}
1399 1399 props['cache'] = self.cache
1400 1400
1401 1401 # find correct templates for current mode
1402 1402
1403 1403 tmplmodes = [
1404 1404 (True, None),
1405 1405 (self.ui.verbose, 'verbose'),
1406 1406 (self.ui.quiet, 'quiet'),
1407 1407 (self.ui.debugflag, 'debug'),
1408 1408 ]
1409 1409
1410 1410 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1411 1411 for mode, postfix in tmplmodes:
1412 1412 for type in types:
1413 1413 cur = postfix and ('%s_%s' % (type, postfix)) or type
1414 1414 if mode and cur in self.t:
1415 1415 types[type] = cur
1416 1416
1417 1417 try:
1418 1418
1419 1419 # write header
1420 1420 if types['header']:
1421 1421 h = templater.stringify(self.t(types['header'], **props))
1422 1422 if self.buffered:
1423 1423 self.header[ctx.rev()] = h
1424 1424 else:
1425 1425 if self.lastheader != h:
1426 1426 self.lastheader = h
1427 1427 self.ui.write(h)
1428 1428
1429 1429 # write changeset metadata, then patch if requested
1430 1430 key = types['changeset']
1431 1431 self.ui.write(templater.stringify(self.t(key, **props)))
1432 1432 self.showpatch(ctx.node(), matchfn)
1433 1433
1434 1434 if types['footer']:
1435 1435 if not self.footer:
1436 1436 self.footer = templater.stringify(self.t(types['footer'],
1437 1437 **props))
1438 1438
1439 1439 except KeyError, inst:
1440 1440 msg = _("%s: no key named '%s'")
1441 1441 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1442 1442 except SyntaxError, inst:
1443 1443 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1444 1444
1445 1445 def gettemplate(ui, tmpl, style):
1446 1446 """
1447 1447 Find the template matching the given template spec or style.
1448 1448 """
1449 1449
1450 1450 # ui settings
1451 1451 if not tmpl and not style: # template are stronger than style
1452 1452 tmpl = ui.config('ui', 'logtemplate')
1453 1453 if tmpl:
1454 1454 try:
1455 1455 tmpl = templater.parsestring(tmpl)
1456 1456 except SyntaxError:
1457 1457 tmpl = templater.parsestring(tmpl, quoted=False)
1458 1458 return tmpl, None
1459 1459 else:
1460 1460 style = util.expandpath(ui.config('ui', 'style', ''))
1461 1461
1462 1462 if not tmpl and style:
1463 1463 mapfile = style
1464 1464 if not os.path.split(mapfile)[0]:
1465 1465 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1466 1466 or templater.templatepath(mapfile))
1467 1467 if mapname:
1468 1468 mapfile = mapname
1469 1469 return None, mapfile
1470 1470
1471 1471 if not tmpl:
1472 1472 return None, None
1473 1473
1474 1474 # looks like a literal template?
1475 1475 if '{' in tmpl:
1476 1476 return tmpl, None
1477 1477
1478 1478 # perhaps a stock style?
1479 1479 if not os.path.split(tmpl)[0]:
1480 1480 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1481 1481 or templater.templatepath(tmpl))
1482 1482 if mapname and os.path.isfile(mapname):
1483 1483 return None, mapname
1484 1484
1485 1485 # perhaps it's a reference to [templates]
1486 1486 t = ui.config('templates', tmpl)
1487 1487 if t:
1488 1488 try:
1489 1489 tmpl = templater.parsestring(t)
1490 1490 except SyntaxError:
1491 1491 tmpl = templater.parsestring(t, quoted=False)
1492 1492 return tmpl, None
1493 1493
1494 1494 if tmpl == 'list':
1495 1495 ui.write(_("available styles: %s\n") % templater.stylelist())
1496 1496 raise util.Abort(_("specify a template"))
1497 1497
1498 1498 # perhaps it's a path to a map or a template
1499 1499 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1500 1500 # is it a mapfile for a style?
1501 1501 if os.path.basename(tmpl).startswith("map-"):
1502 1502 return None, os.path.realpath(tmpl)
1503 1503 tmpl = open(tmpl).read()
1504 1504 return tmpl, None
1505 1505
1506 1506 # constant string?
1507 1507 return tmpl, None
1508 1508
1509 1509 def show_changeset(ui, repo, opts, buffered=False):
1510 1510 """show one changeset using template or regular display.
1511 1511
1512 1512 Display format will be the first non-empty hit of:
1513 1513 1. option 'template'
1514 1514 2. option 'style'
1515 1515 3. [ui] setting 'logtemplate'
1516 1516 4. [ui] setting 'style'
1517 1517 If all of these values are either the unset or the empty string,
1518 1518 regular display via changeset_printer() is done.
1519 1519 """
1520 1520 # options
1521 1521 matchfn = None
1522 1522 if opts.get('patch') or opts.get('stat'):
1523 1523 matchfn = scmutil.matchall(repo)
1524 1524
1525 1525 if opts.get('template') == 'json':
1526 1526 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1527 1527
1528 1528 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1529 1529
1530 1530 if not tmpl and not mapfile:
1531 1531 return changeset_printer(ui, repo, matchfn, opts, buffered)
1532 1532
1533 1533 try:
1534 1534 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1535 1535 buffered)
1536 1536 except SyntaxError, inst:
1537 1537 raise util.Abort(inst.args[0])
1538 1538 return t
1539 1539
1540 1540 def showmarker(ui, marker):
1541 1541 """utility function to display obsolescence marker in a readable way
1542 1542
1543 1543 To be used by debug function."""
1544 1544 ui.write(hex(marker.precnode()))
1545 1545 for repl in marker.succnodes():
1546 1546 ui.write(' ')
1547 1547 ui.write(hex(repl))
1548 1548 ui.write(' %X ' % marker.flags())
1549 1549 parents = marker.parentnodes()
1550 1550 if parents is not None:
1551 1551 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1552 1552 ui.write('(%s) ' % util.datestr(marker.date()))
1553 1553 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1554 1554 sorted(marker.metadata().items())
1555 1555 if t[0] != 'date')))
1556 1556 ui.write('\n')
1557 1557
1558 1558 def finddate(ui, repo, date):
1559 1559 """Find the tipmost changeset that matches the given date spec"""
1560 1560
1561 1561 df = util.matchdate(date)
1562 1562 m = scmutil.matchall(repo)
1563 1563 results = {}
1564 1564
1565 1565 def prep(ctx, fns):
1566 1566 d = ctx.date()
1567 1567 if df(d[0]):
1568 1568 results[ctx.rev()] = d
1569 1569
1570 1570 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1571 1571 rev = ctx.rev()
1572 1572 if rev in results:
1573 1573 ui.status(_("found revision %s from %s\n") %
1574 1574 (rev, util.datestr(results[rev])))
1575 1575 return str(rev)
1576 1576
1577 1577 raise util.Abort(_("revision matching date not found"))
1578 1578
1579 1579 def increasingwindows(windowsize=8, sizelimit=512):
1580 1580 while True:
1581 1581 yield windowsize
1582 1582 if windowsize < sizelimit:
1583 1583 windowsize *= 2
1584 1584
1585 1585 class FileWalkError(Exception):
1586 1586 pass
1587 1587
1588 1588 def walkfilerevs(repo, match, follow, revs, fncache):
1589 1589 '''Walks the file history for the matched files.
1590 1590
1591 1591 Returns the changeset revs that are involved in the file history.
1592 1592
1593 1593 Throws FileWalkError if the file history can't be walked using
1594 1594 filelogs alone.
1595 1595 '''
1596 1596 wanted = set()
1597 1597 copies = []
1598 1598 minrev, maxrev = min(revs), max(revs)
1599 1599 def filerevgen(filelog, last):
1600 1600 """
1601 1601 Only files, no patterns. Check the history of each file.
1602 1602
1603 1603 Examines filelog entries within minrev, maxrev linkrev range
1604 1604 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1605 1605 tuples in backwards order
1606 1606 """
1607 1607 cl_count = len(repo)
1608 1608 revs = []
1609 1609 for j in xrange(0, last + 1):
1610 1610 linkrev = filelog.linkrev(j)
1611 1611 if linkrev < minrev:
1612 1612 continue
1613 1613 # only yield rev for which we have the changelog, it can
1614 1614 # happen while doing "hg log" during a pull or commit
1615 1615 if linkrev >= cl_count:
1616 1616 break
1617 1617
1618 1618 parentlinkrevs = []
1619 1619 for p in filelog.parentrevs(j):
1620 1620 if p != nullrev:
1621 1621 parentlinkrevs.append(filelog.linkrev(p))
1622 1622 n = filelog.node(j)
1623 1623 revs.append((linkrev, parentlinkrevs,
1624 1624 follow and filelog.renamed(n)))
1625 1625
1626 1626 return reversed(revs)
1627 1627 def iterfiles():
1628 1628 pctx = repo['.']
1629 1629 for filename in match.files():
1630 1630 if follow:
1631 1631 if filename not in pctx:
1632 1632 raise util.Abort(_('cannot follow file not in parent '
1633 1633 'revision: "%s"') % filename)
1634 1634 yield filename, pctx[filename].filenode()
1635 1635 else:
1636 1636 yield filename, None
1637 1637 for filename_node in copies:
1638 1638 yield filename_node
1639 1639
1640 1640 for file_, node in iterfiles():
1641 1641 filelog = repo.file(file_)
1642 1642 if not len(filelog):
1643 1643 if node is None:
1644 1644 # A zero count may be a directory or deleted file, so
1645 1645 # try to find matching entries on the slow path.
1646 1646 if follow:
1647 1647 raise util.Abort(
1648 1648 _('cannot follow nonexistent file: "%s"') % file_)
1649 1649 raise FileWalkError("Cannot walk via filelog")
1650 1650 else:
1651 1651 continue
1652 1652
1653 1653 if node is None:
1654 1654 last = len(filelog) - 1
1655 1655 else:
1656 1656 last = filelog.rev(node)
1657 1657
1658 1658
1659 1659 # keep track of all ancestors of the file
1660 1660 ancestors = set([filelog.linkrev(last)])
1661 1661
1662 1662 # iterate from latest to oldest revision
1663 1663 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1664 1664 if not follow:
1665 1665 if rev > maxrev:
1666 1666 continue
1667 1667 else:
1668 1668 # Note that last might not be the first interesting
1669 1669 # rev to us:
1670 1670 # if the file has been changed after maxrev, we'll
1671 1671 # have linkrev(last) > maxrev, and we still need
1672 1672 # to explore the file graph
1673 1673 if rev not in ancestors:
1674 1674 continue
1675 1675 # XXX insert 1327 fix here
1676 1676 if flparentlinkrevs:
1677 1677 ancestors.update(flparentlinkrevs)
1678 1678
1679 1679 fncache.setdefault(rev, []).append(file_)
1680 1680 wanted.add(rev)
1681 1681 if copied:
1682 1682 copies.append(copied)
1683 1683
1684 1684 return wanted
1685 1685
1686 1686 class _followfilter(object):
1687 1687 def __init__(self, repo, onlyfirst=False):
1688 1688 self.repo = repo
1689 1689 self.startrev = nullrev
1690 1690 self.roots = set()
1691 1691 self.onlyfirst = onlyfirst
1692 1692
1693 1693 def match(self, rev):
1694 1694 def realparents(rev):
1695 1695 if self.onlyfirst:
1696 1696 return self.repo.changelog.parentrevs(rev)[0:1]
1697 1697 else:
1698 1698 return filter(lambda x: x != nullrev,
1699 1699 self.repo.changelog.parentrevs(rev))
1700 1700
1701 1701 if self.startrev == nullrev:
1702 1702 self.startrev = rev
1703 1703 return True
1704 1704
1705 1705 if rev > self.startrev:
1706 1706 # forward: all descendants
1707 1707 if not self.roots:
1708 1708 self.roots.add(self.startrev)
1709 1709 for parent in realparents(rev):
1710 1710 if parent in self.roots:
1711 1711 self.roots.add(rev)
1712 1712 return True
1713 1713 else:
1714 1714 # backwards: all parents
1715 1715 if not self.roots:
1716 1716 self.roots.update(realparents(self.startrev))
1717 1717 if rev in self.roots:
1718 1718 self.roots.remove(rev)
1719 1719 self.roots.update(realparents(rev))
1720 1720 return True
1721 1721
1722 1722 return False
1723 1723
1724 1724 def walkchangerevs(repo, match, opts, prepare):
1725 1725 '''Iterate over files and the revs in which they changed.
1726 1726
1727 1727 Callers most commonly need to iterate backwards over the history
1728 1728 in which they are interested. Doing so has awful (quadratic-looking)
1729 1729 performance, so we use iterators in a "windowed" way.
1730 1730
1731 1731 We walk a window of revisions in the desired order. Within the
1732 1732 window, we first walk forwards to gather data, then in the desired
1733 1733 order (usually backwards) to display it.
1734 1734
1735 1735 This function returns an iterator yielding contexts. Before
1736 1736 yielding each context, the iterator will first call the prepare
1737 1737 function on each context in the window in forward order.'''
1738 1738
1739 1739 follow = opts.get('follow') or opts.get('follow_first')
1740 1740 revs = _logrevs(repo, opts)
1741 1741 if not revs:
1742 1742 return []
1743 1743 wanted = set()
1744 1744 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1745 1745 fncache = {}
1746 1746 change = repo.changectx
1747 1747
1748 1748 # First step is to fill wanted, the set of revisions that we want to yield.
1749 1749 # When it does not induce extra cost, we also fill fncache for revisions in
1750 1750 # wanted: a cache of filenames that were changed (ctx.files()) and that
1751 1751 # match the file filtering conditions.
1752 1752
1753 1753 if match.always():
1754 1754 # No files, no patterns. Display all revs.
1755 1755 wanted = revs
1756 1756
1757 1757 if not slowpath and match.files():
1758 1758 # We only have to read through the filelog to find wanted revisions
1759 1759
1760 1760 try:
1761 1761 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1762 1762 except FileWalkError:
1763 1763 slowpath = True
1764 1764
1765 1765 # We decided to fall back to the slowpath because at least one
1766 1766 # of the paths was not a file. Check to see if at least one of them
1767 1767 # existed in history, otherwise simply return
1768 1768 for path in match.files():
1769 1769 if path == '.' or path in repo.store:
1770 1770 break
1771 1771 else:
1772 1772 return []
1773 1773
1774 1774 if slowpath:
1775 1775 # We have to read the changelog to match filenames against
1776 1776 # changed files
1777 1777
1778 1778 if follow:
1779 1779 raise util.Abort(_('can only follow copies/renames for explicit '
1780 1780 'filenames'))
1781 1781
1782 1782 # The slow path checks files modified in every changeset.
1783 1783 # This is really slow on large repos, so compute the set lazily.
1784 1784 class lazywantedset(object):
1785 1785 def __init__(self):
1786 1786 self.set = set()
1787 1787 self.revs = set(revs)
1788 1788
1789 1789 # No need to worry about locality here because it will be accessed
1790 1790 # in the same order as the increasing window below.
1791 1791 def __contains__(self, value):
1792 1792 if value in self.set:
1793 1793 return True
1794 1794 elif not value in self.revs:
1795 1795 return False
1796 1796 else:
1797 1797 self.revs.discard(value)
1798 1798 ctx = change(value)
1799 1799 matches = filter(match, ctx.files())
1800 1800 if matches:
1801 1801 fncache[value] = matches
1802 1802 self.set.add(value)
1803 1803 return True
1804 1804 return False
1805 1805
1806 1806 def discard(self, value):
1807 1807 self.revs.discard(value)
1808 1808 self.set.discard(value)
1809 1809
1810 1810 wanted = lazywantedset()
1811 1811
1812 1812 # it might be worthwhile to do this in the iterator if the rev range
1813 1813 # is descending and the prune args are all within that range
1814 1814 for rev in opts.get('prune', ()):
1815 1815 rev = repo[rev].rev()
1816 1816 ff = _followfilter(repo)
1817 1817 stop = min(revs[0], revs[-1])
1818 1818 for x in xrange(rev, stop - 1, -1):
1819 1819 if ff.match(x):
1820 1820 wanted = wanted - [x]
1821 1821
1822 1822 # Now that wanted is correctly initialized, we can iterate over the
1823 1823 # revision range, yielding only revisions in wanted.
1824 1824 def iterate():
1825 1825 if follow and not match.files():
1826 1826 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1827 1827 def want(rev):
1828 1828 return ff.match(rev) and rev in wanted
1829 1829 else:
1830 1830 def want(rev):
1831 1831 return rev in wanted
1832 1832
1833 1833 it = iter(revs)
1834 1834 stopiteration = False
1835 1835 for windowsize in increasingwindows():
1836 1836 nrevs = []
1837 1837 for i in xrange(windowsize):
1838 1838 try:
1839 1839 rev = it.next()
1840 1840 if want(rev):
1841 1841 nrevs.append(rev)
1842 1842 except (StopIteration):
1843 1843 stopiteration = True
1844 1844 break
1845 1845 for rev in sorted(nrevs):
1846 1846 fns = fncache.get(rev)
1847 1847 ctx = change(rev)
1848 1848 if not fns:
1849 1849 def fns_generator():
1850 1850 for f in ctx.files():
1851 1851 if match(f):
1852 1852 yield f
1853 1853 fns = fns_generator()
1854 1854 prepare(ctx, fns)
1855 1855 for rev in nrevs:
1856 1856 yield change(rev)
1857 1857
1858 1858 if stopiteration:
1859 1859 break
1860 1860
1861 1861 return iterate()
1862 1862
1863 1863 def _makefollowlogfilematcher(repo, files, followfirst):
1864 1864 # When displaying a revision with --patch --follow FILE, we have
1865 1865 # to know which file of the revision must be diffed. With
1866 1866 # --follow, we want the names of the ancestors of FILE in the
1867 1867 # revision, stored in "fcache". "fcache" is populated by
1868 1868 # reproducing the graph traversal already done by --follow revset
1869 1869 # and relating linkrevs to file names (which is not "correct" but
1870 1870 # good enough).
1871 1871 fcache = {}
1872 1872 fcacheready = [False]
1873 1873 pctx = repo['.']
1874 1874
1875 1875 def populate():
1876 1876 for fn in files:
1877 1877 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1878 1878 for c in i:
1879 1879 fcache.setdefault(c.linkrev(), set()).add(c.path())
1880 1880
1881 1881 def filematcher(rev):
1882 1882 if not fcacheready[0]:
1883 1883 # Lazy initialization
1884 1884 fcacheready[0] = True
1885 1885 populate()
1886 1886 return scmutil.matchfiles(repo, fcache.get(rev, []))
1887 1887
1888 1888 return filematcher
1889 1889
1890 1890 def _makenofollowlogfilematcher(repo, pats, opts):
1891 1891 '''hook for extensions to override the filematcher for non-follow cases'''
1892 1892 return None
1893 1893
1894 1894 def _makelogrevset(repo, pats, opts, revs):
1895 1895 """Return (expr, filematcher) where expr is a revset string built
1896 1896 from log options and file patterns or None. If --stat or --patch
1897 1897 are not passed filematcher is None. Otherwise it is a callable
1898 1898 taking a revision number and returning a match objects filtering
1899 1899 the files to be detailed when displaying the revision.
1900 1900 """
1901 1901 opt2revset = {
1902 1902 'no_merges': ('not merge()', None),
1903 1903 'only_merges': ('merge()', None),
1904 1904 '_ancestors': ('ancestors(%(val)s)', None),
1905 1905 '_fancestors': ('_firstancestors(%(val)s)', None),
1906 1906 '_descendants': ('descendants(%(val)s)', None),
1907 1907 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1908 1908 '_matchfiles': ('_matchfiles(%(val)s)', None),
1909 1909 'date': ('date(%(val)r)', None),
1910 1910 'branch': ('branch(%(val)r)', ' or '),
1911 1911 '_patslog': ('filelog(%(val)r)', ' or '),
1912 1912 '_patsfollow': ('follow(%(val)r)', ' or '),
1913 1913 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1914 1914 'keyword': ('keyword(%(val)r)', ' or '),
1915 1915 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1916 1916 'user': ('user(%(val)r)', ' or '),
1917 1917 }
1918 1918
1919 1919 opts = dict(opts)
1920 1920 # follow or not follow?
1921 1921 follow = opts.get('follow') or opts.get('follow_first')
1922 1922 if opts.get('follow_first'):
1923 1923 followfirst = 1
1924 1924 else:
1925 1925 followfirst = 0
1926 1926 # --follow with FILE behaviour depends on revs...
1927 1927 it = iter(revs)
1928 1928 startrev = it.next()
1929 1929 try:
1930 1930 followdescendants = startrev < it.next()
1931 1931 except (StopIteration):
1932 1932 followdescendants = False
1933 1933
1934 1934 # branch and only_branch are really aliases and must be handled at
1935 1935 # the same time
1936 1936 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1937 1937 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1938 1938 # pats/include/exclude are passed to match.match() directly in
1939 1939 # _matchfiles() revset but walkchangerevs() builds its matcher with
1940 1940 # scmutil.match(). The difference is input pats are globbed on
1941 1941 # platforms without shell expansion (windows).
1942 1942 pctx = repo[None]
1943 1943 match, pats = scmutil.matchandpats(pctx, pats, opts)
1944 1944 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1945 1945 if not slowpath:
1946 1946 for f in match.files():
1947 1947 if follow and f not in pctx:
1948 1948 # If the file exists, it may be a directory, so let it
1949 1949 # take the slow path.
1950 1950 if os.path.exists(repo.wjoin(f)):
1951 1951 slowpath = True
1952 1952 continue
1953 1953 else:
1954 1954 raise util.Abort(_('cannot follow file not in parent '
1955 1955 'revision: "%s"') % f)
1956 1956 filelog = repo.file(f)
1957 1957 if not filelog:
1958 1958 # A zero count may be a directory or deleted file, so
1959 1959 # try to find matching entries on the slow path.
1960 1960 if follow:
1961 1961 raise util.Abort(
1962 1962 _('cannot follow nonexistent file: "%s"') % f)
1963 1963 slowpath = True
1964 1964
1965 1965 # We decided to fall back to the slowpath because at least one
1966 1966 # of the paths was not a file. Check to see if at least one of them
1967 1967 # existed in history - in that case, we'll continue down the
1968 1968 # slowpath; otherwise, we can turn off the slowpath
1969 1969 if slowpath:
1970 1970 for path in match.files():
1971 1971 if path == '.' or path in repo.store:
1972 1972 break
1973 1973 else:
1974 1974 slowpath = False
1975 1975
1976 1976 fpats = ('_patsfollow', '_patsfollowfirst')
1977 1977 fnopats = (('_ancestors', '_fancestors'),
1978 1978 ('_descendants', '_fdescendants'))
1979 1979 if slowpath:
1980 1980 # See walkchangerevs() slow path.
1981 1981 #
1982 1982 # pats/include/exclude cannot be represented as separate
1983 1983 # revset expressions as their filtering logic applies at file
1984 1984 # level. For instance "-I a -X a" matches a revision touching
1985 1985 # "a" and "b" while "file(a) and not file(b)" does
1986 1986 # not. Besides, filesets are evaluated against the working
1987 1987 # directory.
1988 1988 matchargs = ['r:', 'd:relpath']
1989 1989 for p in pats:
1990 1990 matchargs.append('p:' + p)
1991 1991 for p in opts.get('include', []):
1992 1992 matchargs.append('i:' + p)
1993 1993 for p in opts.get('exclude', []):
1994 1994 matchargs.append('x:' + p)
1995 1995 matchargs = ','.join(('%r' % p) for p in matchargs)
1996 1996 opts['_matchfiles'] = matchargs
1997 1997 if follow:
1998 1998 opts[fnopats[0][followfirst]] = '.'
1999 1999 else:
2000 2000 if follow:
2001 2001 if pats:
2002 2002 # follow() revset interprets its file argument as a
2003 2003 # manifest entry, so use match.files(), not pats.
2004 2004 opts[fpats[followfirst]] = list(match.files())
2005 2005 else:
2006 2006 op = fnopats[followdescendants][followfirst]
2007 2007 opts[op] = 'rev(%d)' % startrev
2008 2008 else:
2009 2009 opts['_patslog'] = list(pats)
2010 2010
2011 2011 filematcher = None
2012 2012 if opts.get('patch') or opts.get('stat'):
2013 2013 # When following files, track renames via a special matcher.
2014 2014 # If we're forced to take the slowpath it means we're following
2015 2015 # at least one pattern/directory, so don't bother with rename tracking.
2016 2016 if follow and not match.always() and not slowpath:
2017 2017 # _makefollowlogfilematcher expects its files argument to be
2018 2018 # relative to the repo root, so use match.files(), not pats.
2019 2019 filematcher = _makefollowlogfilematcher(repo, match.files(),
2020 2020 followfirst)
2021 2021 else:
2022 2022 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2023 2023 if filematcher is None:
2024 2024 filematcher = lambda rev: match
2025 2025
2026 2026 expr = []
2027 2027 for op, val in sorted(opts.iteritems()):
2028 2028 if not val:
2029 2029 continue
2030 2030 if op not in opt2revset:
2031 2031 continue
2032 2032 revop, andor = opt2revset[op]
2033 2033 if '%(val)' not in revop:
2034 2034 expr.append(revop)
2035 2035 else:
2036 2036 if not isinstance(val, list):
2037 2037 e = revop % {'val': val}
2038 2038 else:
2039 2039 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2040 2040 expr.append(e)
2041 2041
2042 2042 if expr:
2043 2043 expr = '(' + ' and '.join(expr) + ')'
2044 2044 else:
2045 2045 expr = None
2046 2046 return expr, filematcher
2047 2047
2048 2048 def _logrevs(repo, opts):
2049 2049 # Default --rev value depends on --follow but --follow behaviour
2050 2050 # depends on revisions resolved from --rev...
2051 2051 follow = opts.get('follow') or opts.get('follow_first')
2052 2052 if opts.get('rev'):
2053 2053 revs = scmutil.revrange(repo, opts['rev'])
2054 2054 elif follow and repo.dirstate.p1() == nullid:
2055 2055 revs = revset.baseset()
2056 2056 elif follow:
2057 2057 revs = repo.revs('reverse(:.)')
2058 2058 else:
2059 2059 revs = revset.spanset(repo)
2060 2060 revs.reverse()
2061 2061 return revs
2062 2062
2063 2063 def getgraphlogrevs(repo, pats, opts):
2064 2064 """Return (revs, expr, filematcher) where revs is an iterable of
2065 2065 revision numbers, expr is a revset string built from log options
2066 2066 and file patterns or None, and used to filter 'revs'. If --stat or
2067 2067 --patch are not passed filematcher is None. Otherwise it is a
2068 2068 callable taking a revision number and returning a match objects
2069 2069 filtering the files to be detailed when displaying the revision.
2070 2070 """
2071 2071 limit = loglimit(opts)
2072 2072 revs = _logrevs(repo, opts)
2073 2073 if not revs:
2074 2074 return revset.baseset(), None, None
2075 2075 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2076 2076 if opts.get('rev'):
2077 2077 # User-specified revs might be unsorted, but don't sort before
2078 2078 # _makelogrevset because it might depend on the order of revs
2079 2079 revs.sort(reverse=True)
2080 2080 if expr:
2081 2081 # Revset matchers often operate faster on revisions in changelog
2082 2082 # order, because most filters deal with the changelog.
2083 2083 revs.reverse()
2084 2084 matcher = revset.match(repo.ui, expr)
2085 2085 # Revset matches can reorder revisions. "A or B" typically returns
2086 2086 # returns the revision matching A then the revision matching B. Sort
2087 2087 # again to fix that.
2088 2088 revs = matcher(repo, revs)
2089 2089 revs.sort(reverse=True)
2090 2090 if limit is not None:
2091 2091 limitedrevs = []
2092 2092 for idx, rev in enumerate(revs):
2093 2093 if idx >= limit:
2094 2094 break
2095 2095 limitedrevs.append(rev)
2096 2096 revs = revset.baseset(limitedrevs)
2097 2097
2098 2098 return revs, expr, filematcher
2099 2099
2100 2100 def getlogrevs(repo, pats, opts):
2101 2101 """Return (revs, expr, filematcher) where revs is an iterable of
2102 2102 revision numbers, expr is a revset string built from log options
2103 2103 and file patterns or None, and used to filter 'revs'. If --stat or
2104 2104 --patch are not passed filematcher is None. Otherwise it is a
2105 2105 callable taking a revision number and returning a match objects
2106 2106 filtering the files to be detailed when displaying the revision.
2107 2107 """
2108 2108 limit = loglimit(opts)
2109 2109 revs = _logrevs(repo, opts)
2110 2110 if not revs:
2111 2111 return revset.baseset([]), None, None
2112 2112 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2113 2113 if expr:
2114 2114 # Revset matchers often operate faster on revisions in changelog
2115 2115 # order, because most filters deal with the changelog.
2116 2116 if not opts.get('rev'):
2117 2117 revs.reverse()
2118 2118 matcher = revset.match(repo.ui, expr)
2119 2119 # Revset matches can reorder revisions. "A or B" typically returns
2120 2120 # returns the revision matching A then the revision matching B. Sort
2121 2121 # again to fix that.
2122 2122 revs = matcher(repo, revs)
2123 2123 if not opts.get('rev'):
2124 2124 revs.sort(reverse=True)
2125 2125 if limit is not None:
2126 2126 count = 0
2127 2127 limitedrevs = []
2128 2128 it = iter(revs)
2129 2129 while count < limit:
2130 2130 try:
2131 2131 limitedrevs.append(it.next())
2132 2132 except (StopIteration):
2133 2133 break
2134 2134 count += 1
2135 2135 revs = revset.baseset(limitedrevs)
2136 2136
2137 2137 return revs, expr, filematcher
2138 2138
2139 2139 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2140 2140 filematcher=None):
2141 2141 seen, state = [], graphmod.asciistate()
2142 2142 for rev, type, ctx, parents in dag:
2143 2143 char = 'o'
2144 2144 if ctx.node() in showparents:
2145 2145 char = '@'
2146 2146 elif ctx.obsolete():
2147 2147 char = 'x'
2148 2148 elif ctx.closesbranch():
2149 2149 char = '_'
2150 2150 copies = None
2151 2151 if getrenamed and ctx.rev():
2152 2152 copies = []
2153 2153 for fn in ctx.files():
2154 2154 rename = getrenamed(fn, ctx.rev())
2155 2155 if rename:
2156 2156 copies.append((fn, rename[0]))
2157 2157 revmatchfn = None
2158 2158 if filematcher is not None:
2159 2159 revmatchfn = filematcher(ctx.rev())
2160 2160 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2161 2161 lines = displayer.hunk.pop(rev).split('\n')
2162 2162 if not lines[-1]:
2163 2163 del lines[-1]
2164 2164 displayer.flush(rev)
2165 2165 edges = edgefn(type, char, lines, seen, rev, parents)
2166 2166 for type, char, lines, coldata in edges:
2167 2167 graphmod.ascii(ui, state, type, char, lines, coldata)
2168 2168 displayer.close()
2169 2169
2170 2170 def graphlog(ui, repo, *pats, **opts):
2171 2171 # Parameters are identical to log command ones
2172 2172 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2173 2173 revdag = graphmod.dagwalker(repo, revs)
2174 2174
2175 2175 getrenamed = None
2176 2176 if opts.get('copies'):
2177 2177 endrev = None
2178 2178 if opts.get('rev'):
2179 2179 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2180 2180 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2181 2181 displayer = show_changeset(ui, repo, opts, buffered=True)
2182 2182 showparents = [ctx.node() for ctx in repo[None].parents()]
2183 2183 displaygraph(ui, revdag, displayer, showparents,
2184 2184 graphmod.asciiedges, getrenamed, filematcher)
2185 2185
2186 2186 def checkunsupportedgraphflags(pats, opts):
2187 2187 for op in ["newest_first"]:
2188 2188 if op in opts and opts[op]:
2189 2189 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2190 2190 % op.replace("_", "-"))
2191 2191
2192 2192 def graphrevs(repo, nodes, opts):
2193 2193 limit = loglimit(opts)
2194 2194 nodes.reverse()
2195 2195 if limit is not None:
2196 2196 nodes = nodes[:limit]
2197 2197 return graphmod.nodes(repo, nodes)
2198 2198
2199 2199 def add(ui, repo, match, prefix, explicitonly, **opts):
2200 2200 join = lambda f: os.path.join(prefix, f)
2201 2201 bad = []
2202 2202 oldbad = match.bad
2203 2203 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2204 2204 names = []
2205 2205 wctx = repo[None]
2206 2206 cca = None
2207 2207 abort, warn = scmutil.checkportabilityalert(ui)
2208 2208 if abort or warn:
2209 2209 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2210 2210 for f in wctx.walk(match):
2211 2211 exact = match.exact(f)
2212 2212 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2213 2213 if cca:
2214 2214 cca(f)
2215 2215 names.append(f)
2216 2216 if ui.verbose or not exact:
2217 2217 ui.status(_('adding %s\n') % match.rel(f))
2218 2218
2219 2219 for subpath in sorted(wctx.substate):
2220 2220 sub = wctx.sub(subpath)
2221 2221 try:
2222 2222 submatch = matchmod.narrowmatcher(subpath, match)
2223 2223 if opts.get('subrepos'):
2224 2224 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2225 2225 else:
2226 2226 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2227 2227 except error.LookupError:
2228 2228 ui.status(_("skipping missing subrepository: %s\n")
2229 2229 % join(subpath))
2230 2230
2231 2231 if not opts.get('dry_run'):
2232 2232 rejected = wctx.add(names, prefix)
2233 2233 bad.extend(f for f in rejected if f in match.files())
2234 2234 return bad
2235 2235
2236 2236 def forget(ui, repo, match, prefix, explicitonly):
2237 2237 join = lambda f: os.path.join(prefix, f)
2238 2238 bad = []
2239 2239 oldbad = match.bad
2240 2240 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2241 2241 wctx = repo[None]
2242 2242 forgot = []
2243 2243 s = repo.status(match=match, clean=True)
2244 2244 forget = sorted(s[0] + s[1] + s[3] + s[6])
2245 2245 if explicitonly:
2246 2246 forget = [f for f in forget if match.exact(f)]
2247 2247
2248 2248 for subpath in sorted(wctx.substate):
2249 2249 sub = wctx.sub(subpath)
2250 2250 try:
2251 2251 submatch = matchmod.narrowmatcher(subpath, match)
2252 2252 subbad, subforgot = sub.forget(submatch, prefix)
2253 2253 bad.extend([subpath + '/' + f for f in subbad])
2254 2254 forgot.extend([subpath + '/' + f for f in subforgot])
2255 2255 except error.LookupError:
2256 2256 ui.status(_("skipping missing subrepository: %s\n")
2257 2257 % join(subpath))
2258 2258
2259 2259 if not explicitonly:
2260 2260 for f in match.files():
2261 2261 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2262 2262 if f not in forgot:
2263 2263 if repo.wvfs.exists(f):
2264 2264 ui.warn(_('not removing %s: '
2265 2265 'file is already untracked\n')
2266 2266 % match.rel(f))
2267 2267 bad.append(f)
2268 2268
2269 2269 for f in forget:
2270 2270 if ui.verbose or not match.exact(f):
2271 2271 ui.status(_('removing %s\n') % match.rel(f))
2272 2272
2273 2273 rejected = wctx.forget(forget, prefix)
2274 2274 bad.extend(f for f in rejected if f in match.files())
2275 2275 forgot.extend(f for f in forget if f not in rejected)
2276 2276 return bad, forgot
2277 2277
2278 2278 def files(ui, ctx, m, fm, fmt, subrepos):
2279 2279 rev = ctx.rev()
2280 2280 ret = 1
2281 2281 ds = ctx.repo().dirstate
2282 2282
2283 2283 for f in ctx.matches(m):
2284 2284 if rev is None and ds[f] == 'r':
2285 2285 continue
2286 2286 fm.startitem()
2287 2287 if ui.verbose:
2288 2288 fc = ctx[f]
2289 2289 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2290 2290 fm.data(abspath=f)
2291 2291 fm.write('path', fmt, m.rel(f))
2292 2292 ret = 0
2293 2293
2294 2294 if subrepos:
2295 2295 for subpath in sorted(ctx.substate):
2296 2296 sub = ctx.sub(subpath)
2297 2297 try:
2298 2298 submatch = matchmod.narrowmatcher(subpath, m)
2299 2299 if sub.printfiles(ui, submatch, fm, fmt) == 0:
2300 2300 ret = 0
2301 2301 except error.LookupError:
2302 2302 ui.status(_("skipping missing subrepository: %s\n")
2303 2303 % m.abs(subpath))
2304 2304
2305 2305 return ret
2306 2306
2307 2307 def remove(ui, repo, m, prefix, after, force, subrepos):
2308 2308 join = lambda f: os.path.join(prefix, f)
2309 2309 ret = 0
2310 2310 s = repo.status(match=m, clean=True)
2311 2311 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2312 2312
2313 2313 wctx = repo[None]
2314 2314
2315 2315 for subpath in sorted(wctx.substate):
2316 2316 def matchessubrepo(matcher, subpath):
2317 2317 if matcher.exact(subpath):
2318 2318 return True
2319 2319 for f in matcher.files():
2320 2320 if f.startswith(subpath):
2321 2321 return True
2322 2322 return False
2323 2323
2324 2324 if subrepos or matchessubrepo(m, subpath):
2325 2325 sub = wctx.sub(subpath)
2326 2326 try:
2327 2327 submatch = matchmod.narrowmatcher(subpath, m)
2328 2328 if sub.removefiles(submatch, prefix, after, force, subrepos):
2329 2329 ret = 1
2330 2330 except error.LookupError:
2331 2331 ui.status(_("skipping missing subrepository: %s\n")
2332 2332 % join(subpath))
2333 2333
2334 2334 # warn about failure to delete explicit files/dirs
2335 2335 deleteddirs = scmutil.dirs(deleted)
2336 2336 for f in m.files():
2337 2337 def insubrepo():
2338 2338 for subpath in wctx.substate:
2339 2339 if f.startswith(subpath):
2340 2340 return True
2341 2341 return False
2342 2342
2343 2343 isdir = f in deleteddirs or f in wctx.dirs()
2344 2344 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2345 2345 continue
2346 2346
2347 2347 if repo.wvfs.exists(f):
2348 2348 if repo.wvfs.isdir(f):
2349 2349 ui.warn(_('not removing %s: no tracked files\n')
2350 2350 % m.rel(f))
2351 2351 else:
2352 2352 ui.warn(_('not removing %s: file is untracked\n')
2353 2353 % m.rel(f))
2354 2354 # missing files will generate a warning elsewhere
2355 2355 ret = 1
2356 2356
2357 2357 if force:
2358 2358 list = modified + deleted + clean + added
2359 2359 elif after:
2360 2360 list = deleted
2361 2361 for f in modified + added + clean:
2362 2362 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2363 2363 ret = 1
2364 2364 else:
2365 2365 list = deleted + clean
2366 2366 for f in modified:
2367 2367 ui.warn(_('not removing %s: file is modified (use -f'
2368 2368 ' to force removal)\n') % m.rel(f))
2369 2369 ret = 1
2370 2370 for f in added:
2371 2371 ui.warn(_('not removing %s: file has been marked for add'
2372 2372 ' (use forget to undo)\n') % m.rel(f))
2373 2373 ret = 1
2374 2374
2375 2375 for f in sorted(list):
2376 2376 if ui.verbose or not m.exact(f):
2377 2377 ui.status(_('removing %s\n') % m.rel(f))
2378 2378
2379 2379 wlock = repo.wlock()
2380 2380 try:
2381 2381 if not after:
2382 2382 for f in list:
2383 2383 if f in added:
2384 2384 continue # we never unlink added files on remove
2385 2385 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2386 2386 repo[None].forget(list)
2387 2387 finally:
2388 2388 wlock.release()
2389 2389
2390 2390 return ret
2391 2391
2392 2392 def cat(ui, repo, ctx, matcher, prefix, **opts):
2393 2393 err = 1
2394 2394
2395 2395 def write(path):
2396 2396 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2397 2397 pathname=os.path.join(prefix, path))
2398 2398 data = ctx[path].data()
2399 2399 if opts.get('decode'):
2400 2400 data = repo.wwritedata(path, data)
2401 2401 fp.write(data)
2402 2402 fp.close()
2403 2403
2404 2404 # Automation often uses hg cat on single files, so special case it
2405 2405 # for performance to avoid the cost of parsing the manifest.
2406 2406 if len(matcher.files()) == 1 and not matcher.anypats():
2407 2407 file = matcher.files()[0]
2408 2408 mf = repo.manifest
2409 2409 mfnode = ctx._changeset[0]
2410 2410 if mf.find(mfnode, file)[0]:
2411 2411 write(file)
2412 2412 return 0
2413 2413
2414 2414 # Don't warn about "missing" files that are really in subrepos
2415 2415 bad = matcher.bad
2416 2416
2417 2417 def badfn(path, msg):
2418 2418 for subpath in ctx.substate:
2419 2419 if path.startswith(subpath):
2420 2420 return
2421 2421 bad(path, msg)
2422 2422
2423 2423 matcher.bad = badfn
2424 2424
2425 2425 for abs in ctx.walk(matcher):
2426 2426 write(abs)
2427 2427 err = 0
2428 2428
2429 2429 matcher.bad = bad
2430 2430
2431 2431 for subpath in sorted(ctx.substate):
2432 2432 sub = ctx.sub(subpath)
2433 2433 try:
2434 2434 submatch = matchmod.narrowmatcher(subpath, matcher)
2435 2435
2436 2436 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2437 2437 **opts):
2438 2438 err = 0
2439 2439 except error.RepoLookupError:
2440 2440 ui.status(_("skipping missing subrepository: %s\n")
2441 2441 % os.path.join(prefix, subpath))
2442 2442
2443 2443 return err
2444 2444
2445 2445 def commit(ui, repo, commitfunc, pats, opts):
2446 2446 '''commit the specified files or all outstanding changes'''
2447 2447 date = opts.get('date')
2448 2448 if date:
2449 2449 opts['date'] = util.parsedate(date)
2450 2450 message = logmessage(ui, opts)
2451 2451 matcher = scmutil.match(repo[None], pats, opts)
2452 2452
2453 2453 # extract addremove carefully -- this function can be called from a command
2454 2454 # that doesn't support addremove
2455 2455 if opts.get('addremove'):
2456 2456 if scmutil.addremove(repo, matcher, "", opts) != 0:
2457 2457 raise util.Abort(
2458 2458 _("failed to mark all new/missing files as added/removed"))
2459 2459
2460 2460 return commitfunc(ui, repo, message, matcher, opts)
2461 2461
2462 2462 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2463 2463 # amend will reuse the existing user if not specified, but the obsolete
2464 2464 # marker creation requires that the current user's name is specified.
2465 2465 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2466 2466 ui.username() # raise exception if username not set
2467 2467
2468 2468 ui.note(_('amending changeset %s\n') % old)
2469 2469 base = old.p1()
2470 2470
2471 2471 wlock = lock = newid = None
2472 2472 try:
2473 2473 wlock = repo.wlock()
2474 2474 lock = repo.lock()
2475 2475 tr = repo.transaction('amend')
2476 2476 try:
2477 2477 # See if we got a message from -m or -l, if not, open the editor
2478 2478 # with the message of the changeset to amend
2479 2479 message = logmessage(ui, opts)
2480 2480 # ensure logfile does not conflict with later enforcement of the
2481 2481 # message. potential logfile content has been processed by
2482 2482 # `logmessage` anyway.
2483 2483 opts.pop('logfile')
2484 2484 # First, do a regular commit to record all changes in the working
2485 2485 # directory (if there are any)
2486 2486 ui.callhooks = False
2487 2487 currentbookmark = repo._bookmarkcurrent
2488 2488 try:
2489 2489 repo._bookmarkcurrent = None
2490 2490 opts['message'] = 'temporary amend commit for %s' % old
2491 2491 node = commit(ui, repo, commitfunc, pats, opts)
2492 2492 finally:
2493 2493 repo._bookmarkcurrent = currentbookmark
2494 2494 ui.callhooks = True
2495 2495 ctx = repo[node]
2496 2496
2497 2497 # Participating changesets:
2498 2498 #
2499 2499 # node/ctx o - new (intermediate) commit that contains changes
2500 2500 # | from working dir to go into amending commit
2501 2501 # | (or a workingctx if there were no changes)
2502 2502 # |
2503 2503 # old o - changeset to amend
2504 2504 # |
2505 2505 # base o - parent of amending changeset
2506 2506
2507 2507 # Update extra dict from amended commit (e.g. to preserve graft
2508 2508 # source)
2509 2509 extra.update(old.extra())
2510 2510
2511 2511 # Also update it from the intermediate commit or from the wctx
2512 2512 extra.update(ctx.extra())
2513 2513
2514 2514 if len(old.parents()) > 1:
2515 2515 # ctx.files() isn't reliable for merges, so fall back to the
2516 2516 # slower repo.status() method
2517 2517 files = set([fn for st in repo.status(base, old)[:3]
2518 2518 for fn in st])
2519 2519 else:
2520 2520 files = set(old.files())
2521 2521
2522 2522 # Second, we use either the commit we just did, or if there were no
2523 2523 # changes the parent of the working directory as the version of the
2524 2524 # files in the final amend commit
2525 2525 if node:
2526 2526 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2527 2527
2528 2528 user = ctx.user()
2529 2529 date = ctx.date()
2530 2530 # Recompute copies (avoid recording a -> b -> a)
2531 2531 copied = copies.pathcopies(base, ctx)
2532 2532 if old.p2:
2533 2533 copied.update(copies.pathcopies(old.p2(), ctx))
2534 2534
2535 2535 # Prune files which were reverted by the updates: if old
2536 2536 # introduced file X and our intermediate commit, node,
2537 2537 # renamed that file, then those two files are the same and
2538 2538 # we can discard X from our list of files. Likewise if X
2539 2539 # was deleted, it's no longer relevant
2540 2540 files.update(ctx.files())
2541 2541
2542 2542 def samefile(f):
2543 2543 if f in ctx.manifest():
2544 2544 a = ctx.filectx(f)
2545 2545 if f in base.manifest():
2546 2546 b = base.filectx(f)
2547 2547 return (not a.cmp(b)
2548 2548 and a.flags() == b.flags())
2549 2549 else:
2550 2550 return False
2551 2551 else:
2552 2552 return f not in base.manifest()
2553 2553 files = [f for f in files if not samefile(f)]
2554 2554
2555 2555 def filectxfn(repo, ctx_, path):
2556 2556 try:
2557 2557 fctx = ctx[path]
2558 2558 flags = fctx.flags()
2559 2559 mctx = context.memfilectx(repo,
2560 2560 fctx.path(), fctx.data(),
2561 2561 islink='l' in flags,
2562 2562 isexec='x' in flags,
2563 2563 copied=copied.get(path))
2564 2564 return mctx
2565 2565 except KeyError:
2566 2566 return None
2567 2567 else:
2568 2568 ui.note(_('copying changeset %s to %s\n') % (old, base))
2569 2569
2570 2570 # Use version of files as in the old cset
2571 2571 def filectxfn(repo, ctx_, path):
2572 2572 try:
2573 2573 return old.filectx(path)
2574 2574 except KeyError:
2575 2575 return None
2576 2576
2577 2577 user = opts.get('user') or old.user()
2578 2578 date = opts.get('date') or old.date()
2579 2579 editform = mergeeditform(old, 'commit.amend')
2580 2580 editor = getcommiteditor(editform=editform, **opts)
2581 2581 if not message:
2582 2582 editor = getcommiteditor(edit=True, editform=editform)
2583 2583 message = old.description()
2584 2584
2585 2585 pureextra = extra.copy()
2586 2586 extra['amend_source'] = old.hex()
2587 2587
2588 2588 new = context.memctx(repo,
2589 2589 parents=[base.node(), old.p2().node()],
2590 2590 text=message,
2591 2591 files=files,
2592 2592 filectxfn=filectxfn,
2593 2593 user=user,
2594 2594 date=date,
2595 2595 extra=extra,
2596 2596 editor=editor)
2597 2597
2598 2598 newdesc = changelog.stripdesc(new.description())
2599 2599 if ((not node)
2600 2600 and newdesc == old.description()
2601 2601 and user == old.user()
2602 2602 and date == old.date()
2603 2603 and pureextra == old.extra()):
2604 2604 # nothing changed. continuing here would create a new node
2605 2605 # anyway because of the amend_source noise.
2606 2606 #
2607 2607 # This not what we expect from amend.
2608 2608 return old.node()
2609 2609
2610 2610 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2611 2611 try:
2612 2612 if opts.get('secret'):
2613 2613 commitphase = 'secret'
2614 2614 else:
2615 2615 commitphase = old.phase()
2616 2616 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2617 2617 newid = repo.commitctx(new)
2618 2618 finally:
2619 2619 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2620 2620 if newid != old.node():
2621 2621 # Reroute the working copy parent to the new changeset
2622 2622 repo.setparents(newid, nullid)
2623 2623
2624 2624 # Move bookmarks from old parent to amend commit
2625 2625 bms = repo.nodebookmarks(old.node())
2626 2626 if bms:
2627 2627 marks = repo._bookmarks
2628 2628 for bm in bms:
2629 2629 marks[bm] = newid
2630 2630 marks.write()
2631 2631 #commit the whole amend process
2632 2632 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2633 2633 if createmarkers and newid != old.node():
2634 2634 # mark the new changeset as successor of the rewritten one
2635 2635 new = repo[newid]
2636 2636 obs = [(old, (new,))]
2637 2637 if node:
2638 2638 obs.append((ctx, ()))
2639 2639
2640 2640 obsolete.createmarkers(repo, obs)
2641 2641 tr.close()
2642 2642 finally:
2643 2643 tr.release()
2644 2644 if not createmarkers and newid != old.node():
2645 2645 # Strip the intermediate commit (if there was one) and the amended
2646 2646 # commit
2647 2647 if node:
2648 2648 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2649 2649 ui.note(_('stripping amended changeset %s\n') % old)
2650 2650 repair.strip(ui, repo, old.node(), topic='amend-backup')
2651 2651 finally:
2652 2652 if newid is None:
2653 2653 repo.dirstate.invalidate()
2654 2654 lockmod.release(lock, wlock)
2655 2655 return newid
2656 2656
2657 2657 def commiteditor(repo, ctx, subs, editform=''):
2658 2658 if ctx.description():
2659 2659 return ctx.description()
2660 2660 return commitforceeditor(repo, ctx, subs, editform=editform)
2661 2661
2662 2662 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2663 2663 editform=''):
2664 2664 if not extramsg:
2665 2665 extramsg = _("Leave message empty to abort commit.")
2666 2666
2667 2667 forms = [e for e in editform.split('.') if e]
2668 2668 forms.insert(0, 'changeset')
2669 2669 while forms:
2670 2670 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2671 2671 if tmpl:
2672 2672 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2673 2673 break
2674 2674 forms.pop()
2675 2675 else:
2676 2676 committext = buildcommittext(repo, ctx, subs, extramsg)
2677 2677
2678 2678 # run editor in the repository root
2679 2679 olddir = os.getcwd()
2680 2680 os.chdir(repo.root)
2681 2681 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2682 2682 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2683 2683 os.chdir(olddir)
2684 2684
2685 2685 if finishdesc:
2686 2686 text = finishdesc(text)
2687 2687 if not text.strip():
2688 2688 raise util.Abort(_("empty commit message"))
2689 2689
2690 2690 return text
2691 2691
2692 2692 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2693 2693 ui = repo.ui
2694 2694 tmpl, mapfile = gettemplate(ui, tmpl, None)
2695 2695
2696 2696 try:
2697 2697 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2698 2698 except SyntaxError, inst:
2699 2699 raise util.Abort(inst.args[0])
2700 2700
2701 2701 for k, v in repo.ui.configitems('committemplate'):
2702 2702 if k != 'changeset':
2703 2703 t.t.cache[k] = v
2704 2704
2705 2705 if not extramsg:
2706 2706 extramsg = '' # ensure that extramsg is string
2707 2707
2708 2708 ui.pushbuffer()
2709 2709 t.show(ctx, extramsg=extramsg)
2710 2710 return ui.popbuffer()
2711 2711
2712 2712 def buildcommittext(repo, ctx, subs, extramsg):
2713 2713 edittext = []
2714 2714 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2715 2715 if ctx.description():
2716 2716 edittext.append(ctx.description())
2717 2717 edittext.append("")
2718 2718 edittext.append("") # Empty line between message and comments.
2719 2719 edittext.append(_("HG: Enter commit message."
2720 2720 " Lines beginning with 'HG:' are removed."))
2721 2721 edittext.append("HG: %s" % extramsg)
2722 2722 edittext.append("HG: --")
2723 2723 edittext.append(_("HG: user: %s") % ctx.user())
2724 2724 if ctx.p2():
2725 2725 edittext.append(_("HG: branch merge"))
2726 2726 if ctx.branch():
2727 2727 edittext.append(_("HG: branch '%s'") % ctx.branch())
2728 2728 if bookmarks.iscurrent(repo):
2729 2729 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
2730 2730 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2731 2731 edittext.extend([_("HG: added %s") % f for f in added])
2732 2732 edittext.extend([_("HG: changed %s") % f for f in modified])
2733 2733 edittext.extend([_("HG: removed %s") % f for f in removed])
2734 2734 if not added and not modified and not removed:
2735 2735 edittext.append(_("HG: no files changed"))
2736 2736 edittext.append("")
2737 2737
2738 2738 return "\n".join(edittext)
2739 2739
2740 2740 def commitstatus(repo, node, branch, bheads=None, opts={}):
2741 2741 ctx = repo[node]
2742 2742 parents = ctx.parents()
2743 2743
2744 2744 if (not opts.get('amend') and bheads and node not in bheads and not
2745 2745 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2746 2746 repo.ui.status(_('created new head\n'))
2747 2747 # The message is not printed for initial roots. For the other
2748 2748 # changesets, it is printed in the following situations:
2749 2749 #
2750 2750 # Par column: for the 2 parents with ...
2751 2751 # N: null or no parent
2752 2752 # B: parent is on another named branch
2753 2753 # C: parent is a regular non head changeset
2754 2754 # H: parent was a branch head of the current branch
2755 2755 # Msg column: whether we print "created new head" message
2756 2756 # In the following, it is assumed that there already exists some
2757 2757 # initial branch heads of the current branch, otherwise nothing is
2758 2758 # printed anyway.
2759 2759 #
2760 2760 # Par Msg Comment
2761 2761 # N N y additional topo root
2762 2762 #
2763 2763 # B N y additional branch root
2764 2764 # C N y additional topo head
2765 2765 # H N n usual case
2766 2766 #
2767 2767 # B B y weird additional branch root
2768 2768 # C B y branch merge
2769 2769 # H B n merge with named branch
2770 2770 #
2771 2771 # C C y additional head from merge
2772 2772 # C H n merge with a head
2773 2773 #
2774 2774 # H H n head merge: head count decreases
2775 2775
2776 2776 if not opts.get('close_branch'):
2777 2777 for r in parents:
2778 2778 if r.closesbranch() and r.branch() == branch:
2779 2779 repo.ui.status(_('reopening closed branch head %d\n') % r)
2780 2780
2781 2781 if repo.ui.debugflag:
2782 2782 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2783 2783 elif repo.ui.verbose:
2784 2784 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2785 2785
2786 2786 def revert(ui, repo, ctx, parents, *pats, **opts):
2787 2787 parent, p2 = parents
2788 2788 node = ctx.node()
2789 2789
2790 2790 mf = ctx.manifest()
2791 2791 if node == p2:
2792 2792 parent = p2
2793 2793 if node == parent:
2794 2794 pmf = mf
2795 2795 else:
2796 2796 pmf = None
2797 2797
2798 2798 # need all matching names in dirstate and manifest of target rev,
2799 2799 # so have to walk both. do not print errors if files exist in one
2800 2800 # but not other.
2801 2801
2802 2802 # `names` is a mapping for all elements in working copy and target revision
2803 2803 # The mapping is in the form:
2804 2804 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2805 2805 names = {}
2806 2806
2807 2807 wlock = repo.wlock()
2808 2808 try:
2809 2809 ## filling of the `names` mapping
2810 2810 # walk dirstate to fill `names`
2811 2811
2812 2812 m = scmutil.match(repo[None], pats, opts)
2813 2813 if not m.always() or node != parent:
2814 2814 m.bad = lambda x, y: False
2815 2815 for abs in repo.walk(m):
2816 2816 names[abs] = m.rel(abs), m.exact(abs)
2817 2817
2818 2818 # walk target manifest to fill `names`
2819 2819
2820 2820 def badfn(path, msg):
2821 2821 if path in names:
2822 2822 return
2823 2823 if path in ctx.substate:
2824 2824 return
2825 2825 path_ = path + '/'
2826 2826 for f in names:
2827 2827 if f.startswith(path_):
2828 2828 return
2829 2829 ui.warn("%s: %s\n" % (m.rel(path), msg))
2830 2830
2831 m = scmutil.match(ctx, pats, opts)
2832 2831 m.bad = badfn
2833 2832 for abs in ctx.walk(m):
2834 2833 if abs not in names:
2835 2834 names[abs] = m.rel(abs), m.exact(abs)
2836 2835
2837 2836 # Find status of all file in `names`.
2838 2837 m = scmutil.matchfiles(repo, names)
2839 2838
2840 2839 changes = repo.status(node1=node, match=m,
2841 2840 unknown=True, ignored=True, clean=True)
2842 2841 else:
2843 2842 changes = repo.status(match=m)
2844 2843 for kind in changes:
2845 2844 for abs in kind:
2846 2845 names[abs] = m.rel(abs), m.exact(abs)
2847 2846
2848 2847 m = scmutil.matchfiles(repo, names)
2849 2848
2850 2849 modified = set(changes.modified)
2851 2850 added = set(changes.added)
2852 2851 removed = set(changes.removed)
2853 2852 _deleted = set(changes.deleted)
2854 2853 unknown = set(changes.unknown)
2855 2854 unknown.update(changes.ignored)
2856 2855 clean = set(changes.clean)
2857 2856 modadded = set()
2858 2857
2859 2858 # split between files known in target manifest and the others
2860 2859 smf = set(mf)
2861 2860
2862 2861 # determine the exact nature of the deleted changesets
2863 2862 deladded = _deleted - smf
2864 2863 deleted = _deleted - deladded
2865 2864
2866 2865 # We need to account for the state of the file in the dirstate,
2867 2866 # even when we revert against something else than parent. This will
2868 2867 # slightly alter the behavior of revert (doing back up or not, delete
2869 2868 # or just forget etc).
2870 2869 if parent == node:
2871 2870 dsmodified = modified
2872 2871 dsadded = added
2873 2872 dsremoved = removed
2874 2873 # store all local modifications, useful later for rename detection
2875 2874 localchanges = dsmodified | dsadded
2876 2875 modified, added, removed = set(), set(), set()
2877 2876 else:
2878 2877 changes = repo.status(node1=parent, match=m)
2879 2878 dsmodified = set(changes.modified)
2880 2879 dsadded = set(changes.added)
2881 2880 dsremoved = set(changes.removed)
2882 2881 # store all local modifications, useful later for rename detection
2883 2882 localchanges = dsmodified | dsadded
2884 2883
2885 2884 # only take into account for removes between wc and target
2886 2885 clean |= dsremoved - removed
2887 2886 dsremoved &= removed
2888 2887 # distinct between dirstate remove and other
2889 2888 removed -= dsremoved
2890 2889
2891 2890 modadded = added & dsmodified
2892 2891 added -= modadded
2893 2892
2894 2893 # tell newly modified apart.
2895 2894 dsmodified &= modified
2896 2895 dsmodified |= modified & dsadded # dirstate added may needs backup
2897 2896 modified -= dsmodified
2898 2897
2899 2898 # We need to wait for some post-processing to update this set
2900 2899 # before making the distinction. The dirstate will be used for
2901 2900 # that purpose.
2902 2901 dsadded = added
2903 2902
2904 2903 # in case of merge, files that are actually added can be reported as
2905 2904 # modified, we need to post process the result
2906 2905 if p2 != nullid:
2907 2906 if pmf is None:
2908 2907 # only need parent manifest in the merge case,
2909 2908 # so do not read by default
2910 2909 pmf = repo[parent].manifest()
2911 2910 mergeadd = dsmodified - set(pmf)
2912 2911 dsadded |= mergeadd
2913 2912 dsmodified -= mergeadd
2914 2913
2915 2914 # if f is a rename, update `names` to also revert the source
2916 2915 cwd = repo.getcwd()
2917 2916 for f in localchanges:
2918 2917 src = repo.dirstate.copied(f)
2919 2918 # XXX should we check for rename down to target node?
2920 2919 if src and src not in names and repo.dirstate[src] == 'r':
2921 2920 dsremoved.add(src)
2922 2921 names[src] = (repo.pathto(src, cwd), True)
2923 2922
2924 2923 # distinguish between file to forget and the other
2925 2924 added = set()
2926 2925 for abs in dsadded:
2927 2926 if repo.dirstate[abs] != 'a':
2928 2927 added.add(abs)
2929 2928 dsadded -= added
2930 2929
2931 2930 for abs in deladded:
2932 2931 if repo.dirstate[abs] == 'a':
2933 2932 dsadded.add(abs)
2934 2933 deladded -= dsadded
2935 2934
2936 2935 # For files marked as removed, we check if an unknown file is present at
2937 2936 # the same path. If a such file exists it may need to be backed up.
2938 2937 # Making the distinction at this stage helps have simpler backup
2939 2938 # logic.
2940 2939 removunk = set()
2941 2940 for abs in removed:
2942 2941 target = repo.wjoin(abs)
2943 2942 if os.path.lexists(target):
2944 2943 removunk.add(abs)
2945 2944 removed -= removunk
2946 2945
2947 2946 dsremovunk = set()
2948 2947 for abs in dsremoved:
2949 2948 target = repo.wjoin(abs)
2950 2949 if os.path.lexists(target):
2951 2950 dsremovunk.add(abs)
2952 2951 dsremoved -= dsremovunk
2953 2952
2954 2953 # action to be actually performed by revert
2955 2954 # (<list of file>, message>) tuple
2956 2955 actions = {'revert': ([], _('reverting %s\n')),
2957 2956 'add': ([], _('adding %s\n')),
2958 2957 'remove': ([], _('removing %s\n')),
2959 2958 'drop': ([], _('removing %s\n')),
2960 2959 'forget': ([], _('forgetting %s\n')),
2961 2960 'undelete': ([], _('undeleting %s\n')),
2962 2961 'noop': (None, _('no changes needed to %s\n')),
2963 2962 'unknown': (None, _('file not managed: %s\n')),
2964 2963 }
2965 2964
2966 2965 # "constant" that convey the backup strategy.
2967 2966 # All set to `discard` if `no-backup` is set do avoid checking
2968 2967 # no_backup lower in the code.
2969 2968 # These values are ordered for comparison purposes
2970 2969 backup = 2 # unconditionally do backup
2971 2970 check = 1 # check if the existing file differs from target
2972 2971 discard = 0 # never do backup
2973 2972 if opts.get('no_backup'):
2974 2973 backup = check = discard
2975 2974
2976 2975 backupanddel = actions['remove']
2977 2976 if not opts.get('no_backup'):
2978 2977 backupanddel = actions['drop']
2979 2978
2980 2979 disptable = (
2981 2980 # dispatch table:
2982 2981 # file state
2983 2982 # action
2984 2983 # make backup
2985 2984
2986 2985 ## Sets that results that will change file on disk
2987 2986 # Modified compared to target, no local change
2988 2987 (modified, actions['revert'], discard),
2989 2988 # Modified compared to target, but local file is deleted
2990 2989 (deleted, actions['revert'], discard),
2991 2990 # Modified compared to target, local change
2992 2991 (dsmodified, actions['revert'], backup),
2993 2992 # Added since target
2994 2993 (added, actions['remove'], discard),
2995 2994 # Added in working directory
2996 2995 (dsadded, actions['forget'], discard),
2997 2996 # Added since target, have local modification
2998 2997 (modadded, backupanddel, backup),
2999 2998 # Added since target but file is missing in working directory
3000 2999 (deladded, actions['drop'], discard),
3001 3000 # Removed since target, before working copy parent
3002 3001 (removed, actions['add'], discard),
3003 3002 # Same as `removed` but an unknown file exists at the same path
3004 3003 (removunk, actions['add'], check),
3005 3004 # Removed since targe, marked as such in working copy parent
3006 3005 (dsremoved, actions['undelete'], discard),
3007 3006 # Same as `dsremoved` but an unknown file exists at the same path
3008 3007 (dsremovunk, actions['undelete'], check),
3009 3008 ## the following sets does not result in any file changes
3010 3009 # File with no modification
3011 3010 (clean, actions['noop'], discard),
3012 3011 # Existing file, not tracked anywhere
3013 3012 (unknown, actions['unknown'], discard),
3014 3013 )
3015 3014
3016 3015 wctx = repo[None]
3017 3016 for abs, (rel, exact) in sorted(names.items()):
3018 3017 # target file to be touch on disk (relative to cwd)
3019 3018 target = repo.wjoin(abs)
3020 3019 # search the entry in the dispatch table.
3021 3020 # if the file is in any of these sets, it was touched in the working
3022 3021 # directory parent and we are sure it needs to be reverted.
3023 3022 for table, (xlist, msg), dobackup in disptable:
3024 3023 if abs not in table:
3025 3024 continue
3026 3025 if xlist is not None:
3027 3026 xlist.append(abs)
3028 3027 if dobackup and (backup <= dobackup
3029 3028 or wctx[abs].cmp(ctx[abs])):
3030 3029 bakname = "%s.orig" % rel
3031 3030 ui.note(_('saving current version of %s as %s\n') %
3032 3031 (rel, bakname))
3033 3032 if not opts.get('dry_run'):
3034 3033 util.rename(target, bakname)
3035 3034 if ui.verbose or not exact:
3036 3035 if not isinstance(msg, basestring):
3037 3036 msg = msg(abs)
3038 3037 ui.status(msg % rel)
3039 3038 elif exact:
3040 3039 ui.warn(msg % rel)
3041 3040 break
3042 3041
3043 3042
3044 3043 if not opts.get('dry_run'):
3045 3044 needdata = ('revert', 'add', 'undelete')
3046 3045 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3047 3046 interactive = opts.get('interactive', False)
3048 3047 _performrevert(repo, parents, ctx, actions, interactive)
3049 3048
3050 3049 # get the list of subrepos that must be reverted
3051 3050 subrepomatch = scmutil.match(ctx, pats, opts)
3052 3051 targetsubs = sorted(s for s in ctx.substate if subrepomatch(s))
3053 3052
3054 3053 if targetsubs:
3055 3054 # Revert the subrepos on the revert list
3056 3055 for sub in targetsubs:
3057 3056 ctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3058 3057 finally:
3059 3058 wlock.release()
3060 3059
3061 3060 def _revertprefetch(repo, ctx, *files):
3062 3061 """Let extension changing the storage layer prefetch content"""
3063 3062 pass
3064 3063
3065 3064 def _performrevert(repo, parents, ctx, actions, interactive=False):
3066 3065 """function that actually perform all the actions computed for revert
3067 3066
3068 3067 This is an independent function to let extension to plug in and react to
3069 3068 the imminent revert.
3070 3069
3071 3070 Make sure you have the working directory locked when calling this function.
3072 3071 """
3073 3072 parent, p2 = parents
3074 3073 node = ctx.node()
3075 3074 def checkout(f):
3076 3075 fc = ctx[f]
3077 3076 repo.wwrite(f, fc.data(), fc.flags())
3078 3077
3079 3078 audit_path = pathutil.pathauditor(repo.root)
3080 3079 for f in actions['forget'][0]:
3081 3080 repo.dirstate.drop(f)
3082 3081 for f in actions['remove'][0]:
3083 3082 audit_path(f)
3084 3083 util.unlinkpath(repo.wjoin(f))
3085 3084 repo.dirstate.remove(f)
3086 3085 for f in actions['drop'][0]:
3087 3086 audit_path(f)
3088 3087 repo.dirstate.remove(f)
3089 3088
3090 3089 normal = None
3091 3090 if node == parent:
3092 3091 # We're reverting to our parent. If possible, we'd like status
3093 3092 # to report the file as clean. We have to use normallookup for
3094 3093 # merges to avoid losing information about merged/dirty files.
3095 3094 if p2 != nullid:
3096 3095 normal = repo.dirstate.normallookup
3097 3096 else:
3098 3097 normal = repo.dirstate.normal
3099 3098
3100 3099 if interactive:
3101 3100 # Prompt the user for changes to revert
3102 3101 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3103 3102 m = scmutil.match(ctx, torevert, {})
3104 3103 diff = patch.diff(repo, None, ctx.node(), m)
3105 3104 originalchunks = patch.parsepatch(diff)
3106 3105 try:
3107 3106 chunks = recordfilter(repo.ui, originalchunks)
3108 3107 except patch.PatchError, err:
3109 3108 raise util.Abort(_('error parsing patch: %s') % err)
3110 3109
3111 3110 # Apply changes
3112 3111 fp = cStringIO.StringIO()
3113 3112 for c in chunks:
3114 3113 c.write(fp)
3115 3114 dopatch = fp.tell()
3116 3115 fp.seek(0)
3117 3116 if dopatch:
3118 3117 try:
3119 3118 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3120 3119 except patch.PatchError, err:
3121 3120 raise util.Abort(str(err))
3122 3121 del fp
3123 3122
3124 3123 for f in actions['revert'][0]:
3125 3124 if normal:
3126 3125 normal(f)
3127 3126
3128 3127 else:
3129 3128 for f in actions['revert'][0]:
3130 3129 checkout(f)
3131 3130 if normal:
3132 3131 normal(f)
3133 3132
3134 3133 for f in actions['add'][0]:
3135 3134 checkout(f)
3136 3135 repo.dirstate.add(f)
3137 3136
3138 3137 normal = repo.dirstate.normallookup
3139 3138 if node == parent and p2 == nullid:
3140 3139 normal = repo.dirstate.normal
3141 3140 for f in actions['undelete'][0]:
3142 3141 checkout(f)
3143 3142 normal(f)
3144 3143
3145 3144 copied = copies.pathcopies(repo[parent], ctx)
3146 3145
3147 3146 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3148 3147 if f in copied:
3149 3148 repo.dirstate.copy(copied[f], f)
3150 3149
3151 3150 def command(table):
3152 3151 """Returns a function object to be used as a decorator for making commands.
3153 3152
3154 3153 This function receives a command table as its argument. The table should
3155 3154 be a dict.
3156 3155
3157 3156 The returned function can be used as a decorator for adding commands
3158 3157 to that command table. This function accepts multiple arguments to define
3159 3158 a command.
3160 3159
3161 3160 The first argument is the command name.
3162 3161
3163 3162 The options argument is an iterable of tuples defining command arguments.
3164 3163 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3165 3164
3166 3165 The synopsis argument defines a short, one line summary of how to use the
3167 3166 command. This shows up in the help output.
3168 3167
3169 3168 The norepo argument defines whether the command does not require a
3170 3169 local repository. Most commands operate against a repository, thus the
3171 3170 default is False.
3172 3171
3173 3172 The optionalrepo argument defines whether the command optionally requires
3174 3173 a local repository.
3175 3174
3176 3175 The inferrepo argument defines whether to try to find a repository from the
3177 3176 command line arguments. If True, arguments will be examined for potential
3178 3177 repository locations. See ``findrepo()``. If a repository is found, it
3179 3178 will be used.
3180 3179 """
3181 3180 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3182 3181 inferrepo=False):
3183 3182 def decorator(func):
3184 3183 if synopsis:
3185 3184 table[name] = func, list(options), synopsis
3186 3185 else:
3187 3186 table[name] = func, list(options)
3188 3187
3189 3188 if norepo:
3190 3189 # Avoid import cycle.
3191 3190 import commands
3192 3191 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3193 3192
3194 3193 if optionalrepo:
3195 3194 import commands
3196 3195 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3197 3196
3198 3197 if inferrepo:
3199 3198 import commands
3200 3199 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3201 3200
3202 3201 return func
3203 3202 return decorator
3204 3203
3205 3204 return cmd
3206 3205
3207 3206 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3208 3207 # commands.outgoing. "missing" is "missing" of the result of
3209 3208 # "findcommonoutgoing()"
3210 3209 outgoinghooks = util.hooks()
3211 3210
3212 3211 # a list of (ui, repo) functions called by commands.summary
3213 3212 summaryhooks = util.hooks()
3214 3213
3215 3214 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3216 3215 #
3217 3216 # functions should return tuple of booleans below, if 'changes' is None:
3218 3217 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3219 3218 #
3220 3219 # otherwise, 'changes' is a tuple of tuples below:
3221 3220 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3222 3221 # - (desturl, destbranch, destpeer, outgoing)
3223 3222 summaryremotehooks = util.hooks()
3224 3223
3225 3224 # A list of state files kept by multistep operations like graft.
3226 3225 # Since graft cannot be aborted, it is considered 'clearable' by update.
3227 3226 # note: bisect is intentionally excluded
3228 3227 # (state file, clearable, allowcommit, error, hint)
3229 3228 unfinishedstates = [
3230 3229 ('graftstate', True, False, _('graft in progress'),
3231 3230 _("use 'hg graft --continue' or 'hg update' to abort")),
3232 3231 ('updatestate', True, False, _('last update was interrupted'),
3233 3232 _("use 'hg update' to get a consistent checkout"))
3234 3233 ]
3235 3234
3236 3235 def checkunfinished(repo, commit=False):
3237 3236 '''Look for an unfinished multistep operation, like graft, and abort
3238 3237 if found. It's probably good to check this right before
3239 3238 bailifchanged().
3240 3239 '''
3241 3240 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3242 3241 if commit and allowcommit:
3243 3242 continue
3244 3243 if repo.vfs.exists(f):
3245 3244 raise util.Abort(msg, hint=hint)
3246 3245
3247 3246 def clearunfinished(repo):
3248 3247 '''Check for unfinished operations (as above), and clear the ones
3249 3248 that are clearable.
3250 3249 '''
3251 3250 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3252 3251 if not clearable and repo.vfs.exists(f):
3253 3252 raise util.Abort(msg, hint=hint)
3254 3253 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3255 3254 if clearable and repo.vfs.exists(f):
3256 3255 util.unlink(repo.join(f))
@@ -1,181 +1,176 b''
1 1 $ hg init
2 2
3 3 Set up history and working copy
4 4
5 5 $ python $TESTDIR/generate-working-copy-states.py state 2 1
6 6 $ hg addremove -q --similarity 0
7 7 $ hg commit -m first
8 8
9 9 $ python $TESTDIR/generate-working-copy-states.py state 2 2
10 10 $ hg addremove -q --similarity 0
11 11 $ hg commit -m second
12 12
13 13 $ python $TESTDIR/generate-working-copy-states.py state 2 wc
14 14 $ hg addremove -q --similarity 0
15 15 $ hg forget *_*_*-untracked
16 16 $ rm *_*_missing-*
17 17
18 18 Test status
19 19
20 20 $ hg st -A 'set:modified()'
21 21 M content1_content1_content3-tracked
22 22 M content1_content2_content1-tracked
23 23 M content1_content2_content3-tracked
24 24 M missing_content2_content3-tracked
25 25
26 26 $ hg st -A 'set:added()'
27 27 A content1_missing_content1-tracked
28 28 A content1_missing_content3-tracked
29 29 A missing_missing_content3-tracked
30 30
31 31 $ hg st -A 'set:removed()'
32 32 R content1_content1_content1-untracked
33 33 R content1_content1_content3-untracked
34 34 R content1_content1_missing-untracked
35 35 R content1_content2_content1-untracked
36 36 R content1_content2_content2-untracked
37 37 R content1_content2_content3-untracked
38 38 R content1_content2_missing-untracked
39 39 R missing_content2_content2-untracked
40 40 R missing_content2_content3-untracked
41 41 R missing_content2_missing-untracked
42 42
43 43 $ hg st -A 'set:deleted()'
44 44 ! content1_content1_missing-tracked
45 45 ! content1_content2_missing-tracked
46 46 ! content1_missing_missing-tracked
47 47 ! missing_content2_missing-tracked
48 48 ! missing_missing_missing-tracked
49 49
50 50 $ hg st -A 'set:unknown()'
51 51 ? content1_missing_content1-untracked
52 52 ? content1_missing_content3-untracked
53 53 ? missing_missing_content3-untracked
54 54
55 55 $ hg st -A 'set:clean()'
56 56 C content1_content1_content1-tracked
57 57 C content1_content2_content2-tracked
58 58 C missing_content2_content2-tracked
59 59
60 60 Test log
61 61
62 62 $ hg log -T '{rev}\n' --stat 'set:modified()'
63 63 1
64 64 content1_content2_content1-tracked | 2 +-
65 65 content1_content2_content3-tracked | 2 +-
66 66 missing_content2_content3-tracked | 1 +
67 67 3 files changed, 3 insertions(+), 2 deletions(-)
68 68
69 69 0
70 70 content1_content1_content3-tracked | 1 +
71 71 content1_content2_content1-tracked | 1 +
72 72 content1_content2_content3-tracked | 1 +
73 73 3 files changed, 3 insertions(+), 0 deletions(-)
74 74
75 75 $ hg log -T '{rev}\n' --stat 'set:added()'
76 76 1
77 77 content1_missing_content1-tracked | 1 -
78 78 content1_missing_content3-tracked | 1 -
79 79 2 files changed, 0 insertions(+), 2 deletions(-)
80 80
81 81 0
82 82 content1_missing_content1-tracked | 1 +
83 83 content1_missing_content3-tracked | 1 +
84 84 2 files changed, 2 insertions(+), 0 deletions(-)
85 85
86 86 $ hg log -T '{rev}\n' --stat 'set:removed()'
87 87 1
88 88 content1_content2_content1-untracked | 2 +-
89 89 content1_content2_content2-untracked | 2 +-
90 90 content1_content2_content3-untracked | 2 +-
91 91 content1_content2_missing-untracked | 2 +-
92 92 missing_content2_content2-untracked | 1 +
93 93 missing_content2_content3-untracked | 1 +
94 94 missing_content2_missing-untracked | 1 +
95 95 7 files changed, 7 insertions(+), 4 deletions(-)
96 96
97 97 0
98 98 content1_content1_content1-untracked | 1 +
99 99 content1_content1_content3-untracked | 1 +
100 100 content1_content1_missing-untracked | 1 +
101 101 content1_content2_content1-untracked | 1 +
102 102 content1_content2_content2-untracked | 1 +
103 103 content1_content2_content3-untracked | 1 +
104 104 content1_content2_missing-untracked | 1 +
105 105 7 files changed, 7 insertions(+), 0 deletions(-)
106 106
107 107 $ hg log -T '{rev}\n' --stat 'set:deleted()'
108 108 1
109 109 content1_content2_missing-tracked | 2 +-
110 110 content1_missing_missing-tracked | 1 -
111 111 missing_content2_missing-tracked | 1 +
112 112 3 files changed, 2 insertions(+), 2 deletions(-)
113 113
114 114 0
115 115 content1_content1_missing-tracked | 1 +
116 116 content1_content2_missing-tracked | 1 +
117 117 content1_missing_missing-tracked | 1 +
118 118 3 files changed, 3 insertions(+), 0 deletions(-)
119 119
120 120 $ hg log -T '{rev}\n' --stat 'set:unknown()'
121 121 1
122 122 content1_missing_content1-untracked | 1 -
123 123 content1_missing_content3-untracked | 1 -
124 124 2 files changed, 0 insertions(+), 2 deletions(-)
125 125
126 126 0
127 127 content1_missing_content1-untracked | 1 +
128 128 content1_missing_content3-untracked | 1 +
129 129 2 files changed, 2 insertions(+), 0 deletions(-)
130 130
131 131 $ hg log -T '{rev}\n' --stat 'set:clean()'
132 132 1
133 133 content1_content2_content2-tracked | 2 +-
134 134 missing_content2_content2-tracked | 1 +
135 135 2 files changed, 2 insertions(+), 1 deletions(-)
136 136
137 137 0
138 138 content1_content1_content1-tracked | 1 +
139 139 content1_content2_content2-tracked | 1 +
140 140 2 files changed, 2 insertions(+), 0 deletions(-)
141 141
142 142 Test revert
143 143
144 BROKEN: the files that get undeleted were not modified, they were removed,
145 and content1_content2_missing-tracked was also not modified, it was deleted
146
147 144 $ hg revert 'set:modified()'
148 145 reverting content1_content1_content3-tracked
149 146 reverting content1_content2_content1-tracked
150 undeleting content1_content2_content1-untracked
151 undeleting content1_content2_content2-untracked
152 147 reverting content1_content2_content3-tracked
153 undeleting content1_content2_content3-untracked
154 reverting content1_content2_missing-tracked
155 undeleting content1_content2_missing-untracked
156 148 reverting missing_content2_content3-tracked
157 149
158 BROKEN: only the files that get forgotten are correct
159
160 150 $ hg revert 'set:added()'
161 151 forgetting content1_missing_content1-tracked
162 152 forgetting content1_missing_content3-tracked
163 undeleting missing_content2_content2-untracked
164 undeleting missing_content2_content3-untracked
165 reverting missing_content2_missing-tracked
166 undeleting missing_content2_missing-untracked
167 153 forgetting missing_missing_content3-tracked
168 154
169 155 $ hg revert 'set:removed()'
170 156 undeleting content1_content1_content1-untracked
171 157 undeleting content1_content1_content3-untracked
172 158 undeleting content1_content1_missing-untracked
159 undeleting content1_content2_content1-untracked
160 undeleting content1_content2_content2-untracked
161 undeleting content1_content2_content3-untracked
162 undeleting content1_content2_missing-untracked
163 undeleting missing_content2_content2-untracked
164 undeleting missing_content2_content3-untracked
165 undeleting missing_content2_missing-untracked
173 166
174 167 $ hg revert 'set:deleted()'
175 168 reverting content1_content1_missing-tracked
169 reverting content1_content2_missing-tracked
176 170 forgetting content1_missing_missing-tracked
171 reverting missing_content2_missing-tracked
177 172 forgetting missing_missing_missing-tracked
178 173
179 174 $ hg revert 'set:unknown()'
180 175
181 176 $ hg revert 'set:clean()'
General Comments 0
You need to be logged in to leave comments. Login now