##// END OF EJS Templates
log: prefer 'wctx' over 'pctx' for working context
Martin von Zweigbergk -
r24534:1925769b default
parent child Browse files
Show More
@@ -1,1376 +1,1376 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 15 archival, pathutil, revset
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18
19 19 import lfutil
20 20 import lfcommands
21 21 import basestore
22 22
23 23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 24
25 25 def composelargefilematcher(match, manifest):
26 26 '''create a matcher that matches only the largefiles in the original
27 27 matcher'''
28 28 m = copy.copy(match)
29 29 lfile = lambda f: lfutil.standin(f) in manifest
30 30 m._files = filter(lfile, m._files)
31 31 m._fmap = set(m._files)
32 32 m._always = False
33 33 origmatchfn = m.matchfn
34 34 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
35 35 return m
36 36
37 37 def composenormalfilematcher(match, manifest, exclude=None):
38 38 excluded = set()
39 39 if exclude is not None:
40 40 excluded.update(exclude)
41 41
42 42 m = copy.copy(match)
43 43 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
44 44 manifest or f in excluded)
45 45 m._files = filter(notlfile, m._files)
46 46 m._fmap = set(m._files)
47 47 m._always = False
48 48 origmatchfn = m.matchfn
49 49 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
50 50 return m
51 51
52 52 def installnormalfilesmatchfn(manifest):
53 53 '''installmatchfn with a matchfn that ignores all largefiles'''
54 54 def overridematch(ctx, pats=[], opts={}, globbed=False,
55 55 default='relpath'):
56 56 match = oldmatch(ctx, pats, opts, globbed, default)
57 57 return composenormalfilematcher(match, manifest)
58 58 oldmatch = installmatchfn(overridematch)
59 59
60 60 def installmatchfn(f):
61 61 '''monkey patch the scmutil module with a custom match function.
62 62 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
63 63 oldmatch = scmutil.match
64 64 setattr(f, 'oldmatch', oldmatch)
65 65 scmutil.match = f
66 66 return oldmatch
67 67
68 68 def restorematchfn():
69 69 '''restores scmutil.match to what it was before installmatchfn
70 70 was called. no-op if scmutil.match is its original function.
71 71
72 72 Note that n calls to installmatchfn will require n calls to
73 73 restore the original matchfn.'''
74 74 scmutil.match = getattr(scmutil.match, 'oldmatch')
75 75
76 76 def installmatchandpatsfn(f):
77 77 oldmatchandpats = scmutil.matchandpats
78 78 setattr(f, 'oldmatchandpats', oldmatchandpats)
79 79 scmutil.matchandpats = f
80 80 return oldmatchandpats
81 81
82 82 def restorematchandpatsfn():
83 83 '''restores scmutil.matchandpats to what it was before
84 84 installmatchandpatsfn was called. No-op if scmutil.matchandpats
85 85 is its original function.
86 86
87 87 Note that n calls to installmatchandpatsfn will require n calls
88 88 to restore the original matchfn.'''
89 89 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
90 90 scmutil.matchandpats)
91 91
92 92 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
93 93 large = opts.get('large')
94 94 lfsize = lfutil.getminsize(
95 95 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
96 96
97 97 lfmatcher = None
98 98 if lfutil.islfilesrepo(repo):
99 99 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
100 100 if lfpats:
101 101 lfmatcher = match_.match(repo.root, '', list(lfpats))
102 102
103 103 lfnames = []
104 104 m = copy.copy(matcher)
105 105 m.bad = lambda x, y: None
106 106 wctx = repo[None]
107 107 for f in repo.walk(m):
108 108 exact = m.exact(f)
109 109 lfile = lfutil.standin(f) in wctx
110 110 nfile = f in wctx
111 111 exists = lfile or nfile
112 112
113 113 # addremove in core gets fancy with the name, add doesn't
114 114 if isaddremove:
115 115 name = m.uipath(f)
116 116 else:
117 117 name = m.rel(f)
118 118
119 119 # Don't warn the user when they attempt to add a normal tracked file.
120 120 # The normal add code will do that for us.
121 121 if exact and exists:
122 122 if lfile:
123 123 ui.warn(_('%s already a largefile\n') % name)
124 124 continue
125 125
126 126 if (exact or not exists) and not lfutil.isstandin(f):
127 127 # In case the file was removed previously, but not committed
128 128 # (issue3507)
129 129 if not repo.wvfs.exists(f):
130 130 continue
131 131
132 132 abovemin = (lfsize and
133 133 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
134 134 if large or abovemin or (lfmatcher and lfmatcher(f)):
135 135 lfnames.append(f)
136 136 if ui.verbose or not exact:
137 137 ui.status(_('adding %s as a largefile\n') % name)
138 138
139 139 bad = []
140 140
141 141 # Need to lock, otherwise there could be a race condition between
142 142 # when standins are created and added to the repo.
143 143 wlock = repo.wlock()
144 144 try:
145 145 if not opts.get('dry_run'):
146 146 standins = []
147 147 lfdirstate = lfutil.openlfdirstate(ui, repo)
148 148 for f in lfnames:
149 149 standinname = lfutil.standin(f)
150 150 lfutil.writestandin(repo, standinname, hash='',
151 151 executable=lfutil.getexecutable(repo.wjoin(f)))
152 152 standins.append(standinname)
153 153 if lfdirstate[f] == 'r':
154 154 lfdirstate.normallookup(f)
155 155 else:
156 156 lfdirstate.add(f)
157 157 lfdirstate.write()
158 158 bad += [lfutil.splitstandin(f)
159 159 for f in repo[None].add(standins)
160 160 if f in m.files()]
161 161
162 162 added = [f for f in lfnames if f not in bad]
163 163 finally:
164 164 wlock.release()
165 165 return added, bad
166 166
167 167 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
168 168 after = opts.get('after')
169 169 m = composelargefilematcher(matcher, repo[None].manifest())
170 170 try:
171 171 repo.lfstatus = True
172 172 s = repo.status(match=m, clean=not isaddremove)
173 173 finally:
174 174 repo.lfstatus = False
175 175 manifest = repo[None].manifest()
176 176 modified, added, deleted, clean = [[f for f in list
177 177 if lfutil.standin(f) in manifest]
178 178 for list in (s.modified, s.added,
179 179 s.deleted, s.clean)]
180 180
181 181 def warn(files, msg):
182 182 for f in files:
183 183 ui.warn(msg % m.rel(f))
184 184 return int(len(files) > 0)
185 185
186 186 result = 0
187 187
188 188 if after:
189 189 remove = deleted
190 190 result = warn(modified + added + clean,
191 191 _('not removing %s: file still exists\n'))
192 192 else:
193 193 remove = deleted + clean
194 194 result = warn(modified, _('not removing %s: file is modified (use -f'
195 195 ' to force removal)\n'))
196 196 result = warn(added, _('not removing %s: file has been marked for add'
197 197 ' (use forget to undo)\n')) or result
198 198
199 199 # Need to lock because standin files are deleted then removed from the
200 200 # repository and we could race in-between.
201 201 wlock = repo.wlock()
202 202 try:
203 203 lfdirstate = lfutil.openlfdirstate(ui, repo)
204 204 for f in sorted(remove):
205 205 if ui.verbose or not m.exact(f):
206 206 # addremove in core gets fancy with the name, remove doesn't
207 207 if isaddremove:
208 208 name = m.uipath(f)
209 209 else:
210 210 name = m.rel(f)
211 211 ui.status(_('removing %s\n') % name)
212 212
213 213 if not opts.get('dry_run'):
214 214 if not after:
215 215 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
216 216
217 217 if opts.get('dry_run'):
218 218 return result
219 219
220 220 remove = [lfutil.standin(f) for f in remove]
221 221 # If this is being called by addremove, let the original addremove
222 222 # function handle this.
223 223 if not isaddremove:
224 224 for f in remove:
225 225 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
226 226 repo[None].forget(remove)
227 227
228 228 for f in remove:
229 229 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
230 230 False)
231 231
232 232 lfdirstate.write()
233 233 finally:
234 234 wlock.release()
235 235
236 236 return result
237 237
238 238 # For overriding mercurial.hgweb.webcommands so that largefiles will
239 239 # appear at their right place in the manifests.
240 240 def decodepath(orig, path):
241 241 return lfutil.splitstandin(path) or path
242 242
243 243 # -- Wrappers: modify existing commands --------------------------------
244 244
245 245 def overrideadd(orig, ui, repo, *pats, **opts):
246 246 if opts.get('normal') and opts.get('large'):
247 247 raise util.Abort(_('--normal cannot be used with --large'))
248 248 return orig(ui, repo, *pats, **opts)
249 249
250 250 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
251 251 # The --normal flag short circuits this override
252 252 if opts.get('normal'):
253 253 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
254 254
255 255 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
256 256 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
257 257 ladded)
258 258 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
259 259
260 260 bad.extend(f for f in lbad)
261 261 return bad
262 262
263 263 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
264 264 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
265 265 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
266 266 return removelargefiles(ui, repo, False, matcher, after=after,
267 267 force=force) or result
268 268
269 269 def overridestatusfn(orig, repo, rev2, **opts):
270 270 try:
271 271 repo._repo.lfstatus = True
272 272 return orig(repo, rev2, **opts)
273 273 finally:
274 274 repo._repo.lfstatus = False
275 275
276 276 def overridestatus(orig, ui, repo, *pats, **opts):
277 277 try:
278 278 repo.lfstatus = True
279 279 return orig(ui, repo, *pats, **opts)
280 280 finally:
281 281 repo.lfstatus = False
282 282
283 283 def overridedirty(orig, repo, ignoreupdate=False):
284 284 try:
285 285 repo._repo.lfstatus = True
286 286 return orig(repo, ignoreupdate)
287 287 finally:
288 288 repo._repo.lfstatus = False
289 289
290 290 def overridelog(orig, ui, repo, *pats, **opts):
291 291 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
292 292 default='relpath'):
293 293 """Matcher that merges root directory with .hglf, suitable for log.
294 294 It is still possible to match .hglf directly.
295 295 For any listed files run log on the standin too.
296 296 matchfn tries both the given filename and with .hglf stripped.
297 297 """
298 298 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
299 299 m, p = copy.copy(matchandpats)
300 300
301 301 if m.always():
302 302 # We want to match everything anyway, so there's no benefit trying
303 303 # to add standins.
304 304 return matchandpats
305 305
306 306 pats = set(p)
307 307
308 308 def fixpats(pat, tostandin=lfutil.standin):
309 309 kindpat = match_._patsplit(pat, None)
310 310
311 311 if kindpat[0] is not None:
312 312 return kindpat[0] + ':' + tostandin(kindpat[1])
313 313 return tostandin(kindpat[1])
314 314
315 315 if m._cwd:
316 316 hglf = lfutil.shortname
317 317 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
318 318
319 319 def tostandin(f):
320 320 # The file may already be a standin, so trucate the back
321 321 # prefix and test before mangling it. This avoids turning
322 322 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
323 323 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
324 324 return f
325 325
326 326 # An absolute path is from outside the repo, so truncate the
327 327 # path to the root before building the standin. Otherwise cwd
328 328 # is somewhere in the repo, relative to root, and needs to be
329 329 # prepended before building the standin.
330 330 if os.path.isabs(m._cwd):
331 331 f = f[len(back):]
332 332 else:
333 333 f = m._cwd + '/' + f
334 334 return back + lfutil.standin(f)
335 335
336 336 pats.update(fixpats(f, tostandin) for f in p)
337 337 else:
338 338 def tostandin(f):
339 339 if lfutil.splitstandin(f):
340 340 return f
341 341 return lfutil.standin(f)
342 342 pats.update(fixpats(f, tostandin) for f in p)
343 343
344 344 for i in range(0, len(m._files)):
345 345 # Don't add '.hglf' to m.files, since that is already covered by '.'
346 346 if m._files[i] == '.':
347 347 continue
348 348 standin = lfutil.standin(m._files[i])
349 349 # If the "standin" is a directory, append instead of replace to
350 350 # support naming a directory on the command line with only
351 351 # largefiles. The original directory is kept to support normal
352 352 # files.
353 353 if standin in repo[ctx.node()]:
354 354 m._files[i] = standin
355 355 elif m._files[i] not in repo[ctx.node()] \
356 356 and repo.wvfs.isdir(standin):
357 357 m._files.append(standin)
358 358
359 359 m._fmap = set(m._files)
360 360 m._always = False
361 361 origmatchfn = m.matchfn
362 362 def lfmatchfn(f):
363 363 lf = lfutil.splitstandin(f)
364 364 if lf is not None and origmatchfn(lf):
365 365 return True
366 366 r = origmatchfn(f)
367 367 return r
368 368 m.matchfn = lfmatchfn
369 369
370 370 ui.debug('updated patterns: %s\n' % sorted(pats))
371 371 return m, pats
372 372
373 373 # For hg log --patch, the match object is used in two different senses:
374 374 # (1) to determine what revisions should be printed out, and
375 375 # (2) to determine what files to print out diffs for.
376 376 # The magic matchandpats override should be used for case (1) but not for
377 377 # case (2).
378 378 def overridemakelogfilematcher(repo, pats, opts):
379 pctx = repo[None]
380 match, pats = oldmatchandpats(pctx, pats, opts)
379 wctx = repo[None]
380 match, pats = oldmatchandpats(wctx, pats, opts)
381 381 return lambda rev: match
382 382
383 383 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
384 384 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
385 385 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
386 386
387 387 try:
388 388 return orig(ui, repo, *pats, **opts)
389 389 finally:
390 390 restorematchandpatsfn()
391 391 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
392 392
393 393 def overrideverify(orig, ui, repo, *pats, **opts):
394 394 large = opts.pop('large', False)
395 395 all = opts.pop('lfa', False)
396 396 contents = opts.pop('lfc', False)
397 397
398 398 result = orig(ui, repo, *pats, **opts)
399 399 if large or all or contents:
400 400 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
401 401 return result
402 402
403 403 def overridedebugstate(orig, ui, repo, *pats, **opts):
404 404 large = opts.pop('large', False)
405 405 if large:
406 406 class fakerepo(object):
407 407 dirstate = lfutil.openlfdirstate(ui, repo)
408 408 orig(ui, fakerepo, *pats, **opts)
409 409 else:
410 410 orig(ui, repo, *pats, **opts)
411 411
412 412 # Before starting the manifest merge, merge.updates will call
413 413 # _checkunknownfile to check if there are any files in the merged-in
414 414 # changeset that collide with unknown files in the working copy.
415 415 #
416 416 # The largefiles are seen as unknown, so this prevents us from merging
417 417 # in a file 'foo' if we already have a largefile with the same name.
418 418 #
419 419 # The overridden function filters the unknown files by removing any
420 420 # largefiles. This makes the merge proceed and we can then handle this
421 421 # case further in the overridden calculateupdates function below.
422 422 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
423 423 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
424 424 return False
425 425 return origfn(repo, wctx, mctx, f, f2)
426 426
427 427 # The manifest merge handles conflicts on the manifest level. We want
428 428 # to handle changes in largefile-ness of files at this level too.
429 429 #
430 430 # The strategy is to run the original calculateupdates and then process
431 431 # the action list it outputs. There are two cases we need to deal with:
432 432 #
433 433 # 1. Normal file in p1, largefile in p2. Here the largefile is
434 434 # detected via its standin file, which will enter the working copy
435 435 # with a "get" action. It is not "merge" since the standin is all
436 436 # Mercurial is concerned with at this level -- the link to the
437 437 # existing normal file is not relevant here.
438 438 #
439 439 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
440 440 # since the largefile will be present in the working copy and
441 441 # different from the normal file in p2. Mercurial therefore
442 442 # triggers a merge action.
443 443 #
444 444 # In both cases, we prompt the user and emit new actions to either
445 445 # remove the standin (if the normal file was kept) or to remove the
446 446 # normal file and get the standin (if the largefile was kept). The
447 447 # default prompt answer is to use the largefile version since it was
448 448 # presumably changed on purpose.
449 449 #
450 450 # Finally, the merge.applyupdates function will then take care of
451 451 # writing the files into the working copy and lfcommands.updatelfiles
452 452 # will update the largefiles.
453 453 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
454 454 partial, acceptremote, followcopies):
455 455 overwrite = force and not branchmerge
456 456 actions, diverge, renamedelete = origfn(
457 457 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
458 458 followcopies)
459 459
460 460 if overwrite:
461 461 return actions, diverge, renamedelete
462 462
463 463 # Convert to dictionary with filename as key and action as value.
464 464 lfiles = set()
465 465 for f in actions:
466 466 splitstandin = f and lfutil.splitstandin(f)
467 467 if splitstandin in p1:
468 468 lfiles.add(splitstandin)
469 469 elif lfutil.standin(f) in p1:
470 470 lfiles.add(f)
471 471
472 472 for lfile in lfiles:
473 473 standin = lfutil.standin(lfile)
474 474 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
475 475 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
476 476 if sm in ('g', 'dc') and lm != 'r':
477 477 # Case 1: normal file in the working copy, largefile in
478 478 # the second parent
479 479 usermsg = _('remote turned local normal file %s into a largefile\n'
480 480 'use (l)argefile or keep (n)ormal file?'
481 481 '$$ &Largefile $$ &Normal file') % lfile
482 482 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
483 483 actions[lfile] = ('r', None, 'replaced by standin')
484 484 actions[standin] = ('g', sargs, 'replaces standin')
485 485 else: # keep local normal file
486 486 actions[lfile] = ('k', None, 'replaces standin')
487 487 if branchmerge:
488 488 actions[standin] = ('k', None, 'replaced by non-standin')
489 489 else:
490 490 actions[standin] = ('r', None, 'replaced by non-standin')
491 491 elif lm in ('g', 'dc') and sm != 'r':
492 492 # Case 2: largefile in the working copy, normal file in
493 493 # the second parent
494 494 usermsg = _('remote turned local largefile %s into a normal file\n'
495 495 'keep (l)argefile or use (n)ormal file?'
496 496 '$$ &Largefile $$ &Normal file') % lfile
497 497 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
498 498 if branchmerge:
499 499 # largefile can be restored from standin safely
500 500 actions[lfile] = ('k', None, 'replaced by standin')
501 501 actions[standin] = ('k', None, 'replaces standin')
502 502 else:
503 503 # "lfile" should be marked as "removed" without
504 504 # removal of itself
505 505 actions[lfile] = ('lfmr', None,
506 506 'forget non-standin largefile')
507 507
508 508 # linear-merge should treat this largefile as 're-added'
509 509 actions[standin] = ('a', None, 'keep standin')
510 510 else: # pick remote normal file
511 511 actions[lfile] = ('g', largs, 'replaces standin')
512 512 actions[standin] = ('r', None, 'replaced by non-standin')
513 513
514 514 return actions, diverge, renamedelete
515 515
516 516 def mergerecordupdates(orig, repo, actions, branchmerge):
517 517 if 'lfmr' in actions:
518 518 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
519 519 for lfile, args, msg in actions['lfmr']:
520 520 # this should be executed before 'orig', to execute 'remove'
521 521 # before all other actions
522 522 repo.dirstate.remove(lfile)
523 523 # make sure lfile doesn't get synclfdirstate'd as normal
524 524 lfdirstate.add(lfile)
525 525 lfdirstate.write()
526 526
527 527 return orig(repo, actions, branchmerge)
528 528
529 529
530 530 # Override filemerge to prompt the user about how they wish to merge
531 531 # largefiles. This will handle identical edits without prompting the user.
532 532 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
533 533 if not lfutil.isstandin(orig):
534 534 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
535 535
536 536 ahash = fca.data().strip().lower()
537 537 dhash = fcd.data().strip().lower()
538 538 ohash = fco.data().strip().lower()
539 539 if (ohash != ahash and
540 540 ohash != dhash and
541 541 (dhash == ahash or
542 542 repo.ui.promptchoice(
543 543 _('largefile %s has a merge conflict\nancestor was %s\n'
544 544 'keep (l)ocal %s or\ntake (o)ther %s?'
545 545 '$$ &Local $$ &Other') %
546 546 (lfutil.splitstandin(orig), ahash, dhash, ohash),
547 547 0) == 1)):
548 548 repo.wwrite(fcd.path(), fco.data(), fco.flags())
549 549 return 0
550 550
551 551 def copiespathcopies(orig, ctx1, ctx2):
552 552 copies = orig(ctx1, ctx2)
553 553 updated = {}
554 554
555 555 for k, v in copies.iteritems():
556 556 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
557 557
558 558 return updated
559 559
560 560 # Copy first changes the matchers to match standins instead of
561 561 # largefiles. Then it overrides util.copyfile in that function it
562 562 # checks if the destination largefile already exists. It also keeps a
563 563 # list of copied files so that the largefiles can be copied and the
564 564 # dirstate updated.
565 565 def overridecopy(orig, ui, repo, pats, opts, rename=False):
566 566 # doesn't remove largefile on rename
567 567 if len(pats) < 2:
568 568 # this isn't legal, let the original function deal with it
569 569 return orig(ui, repo, pats, opts, rename)
570 570
571 571 # This could copy both lfiles and normal files in one command,
572 572 # but we don't want to do that. First replace their matcher to
573 573 # only match normal files and run it, then replace it to just
574 574 # match largefiles and run it again.
575 575 nonormalfiles = False
576 576 nolfiles = False
577 577 installnormalfilesmatchfn(repo[None].manifest())
578 578 try:
579 579 try:
580 580 result = orig(ui, repo, pats, opts, rename)
581 581 except util.Abort, e:
582 582 if str(e) != _('no files to copy'):
583 583 raise e
584 584 else:
585 585 nonormalfiles = True
586 586 result = 0
587 587 finally:
588 588 restorematchfn()
589 589
590 590 # The first rename can cause our current working directory to be removed.
591 591 # In that case there is nothing left to copy/rename so just quit.
592 592 try:
593 593 repo.getcwd()
594 594 except OSError:
595 595 return result
596 596
597 597 def makestandin(relpath):
598 598 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
599 599 return os.path.join(repo.wjoin(lfutil.standin(path)))
600 600
601 601 fullpats = scmutil.expandpats(pats)
602 602 dest = fullpats[-1]
603 603
604 604 if os.path.isdir(dest):
605 605 if not os.path.isdir(makestandin(dest)):
606 606 os.makedirs(makestandin(dest))
607 607
608 608 try:
609 609 try:
610 610 # When we call orig below it creates the standins but we don't add
611 611 # them to the dir state until later so lock during that time.
612 612 wlock = repo.wlock()
613 613
614 614 manifest = repo[None].manifest()
615 615 def overridematch(ctx, pats=[], opts={}, globbed=False,
616 616 default='relpath'):
617 617 newpats = []
618 618 # The patterns were previously mangled to add the standin
619 619 # directory; we need to remove that now
620 620 for pat in pats:
621 621 if match_.patkind(pat) is None and lfutil.shortname in pat:
622 622 newpats.append(pat.replace(lfutil.shortname, ''))
623 623 else:
624 624 newpats.append(pat)
625 625 match = oldmatch(ctx, newpats, opts, globbed, default)
626 626 m = copy.copy(match)
627 627 lfile = lambda f: lfutil.standin(f) in manifest
628 628 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
629 629 m._fmap = set(m._files)
630 630 origmatchfn = m.matchfn
631 631 m.matchfn = lambda f: (lfutil.isstandin(f) and
632 632 (f in manifest) and
633 633 origmatchfn(lfutil.splitstandin(f)) or
634 634 None)
635 635 return m
636 636 oldmatch = installmatchfn(overridematch)
637 637 listpats = []
638 638 for pat in pats:
639 639 if match_.patkind(pat) is not None:
640 640 listpats.append(pat)
641 641 else:
642 642 listpats.append(makestandin(pat))
643 643
644 644 try:
645 645 origcopyfile = util.copyfile
646 646 copiedfiles = []
647 647 def overridecopyfile(src, dest):
648 648 if (lfutil.shortname in src and
649 649 dest.startswith(repo.wjoin(lfutil.shortname))):
650 650 destlfile = dest.replace(lfutil.shortname, '')
651 651 if not opts['force'] and os.path.exists(destlfile):
652 652 raise IOError('',
653 653 _('destination largefile already exists'))
654 654 copiedfiles.append((src, dest))
655 655 origcopyfile(src, dest)
656 656
657 657 util.copyfile = overridecopyfile
658 658 result += orig(ui, repo, listpats, opts, rename)
659 659 finally:
660 660 util.copyfile = origcopyfile
661 661
662 662 lfdirstate = lfutil.openlfdirstate(ui, repo)
663 663 for (src, dest) in copiedfiles:
664 664 if (lfutil.shortname in src and
665 665 dest.startswith(repo.wjoin(lfutil.shortname))):
666 666 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
667 667 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
668 668 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
669 669 if not os.path.isdir(destlfiledir):
670 670 os.makedirs(destlfiledir)
671 671 if rename:
672 672 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
673 673
674 674 # The file is gone, but this deletes any empty parent
675 675 # directories as a side-effect.
676 676 util.unlinkpath(repo.wjoin(srclfile), True)
677 677 lfdirstate.remove(srclfile)
678 678 else:
679 679 util.copyfile(repo.wjoin(srclfile),
680 680 repo.wjoin(destlfile))
681 681
682 682 lfdirstate.add(destlfile)
683 683 lfdirstate.write()
684 684 except util.Abort, e:
685 685 if str(e) != _('no files to copy'):
686 686 raise e
687 687 else:
688 688 nolfiles = True
689 689 finally:
690 690 restorematchfn()
691 691 wlock.release()
692 692
693 693 if nolfiles and nonormalfiles:
694 694 raise util.Abort(_('no files to copy'))
695 695
696 696 return result
697 697
698 698 # When the user calls revert, we have to be careful to not revert any
699 699 # changes to other largefiles accidentally. This means we have to keep
700 700 # track of the largefiles that are being reverted so we only pull down
701 701 # the necessary largefiles.
702 702 #
703 703 # Standins are only updated (to match the hash of largefiles) before
704 704 # commits. Update the standins then run the original revert, changing
705 705 # the matcher to hit standins instead of largefiles. Based on the
706 706 # resulting standins update the largefiles.
707 707 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
708 708 # Because we put the standins in a bad state (by updating them)
709 709 # and then return them to a correct state we need to lock to
710 710 # prevent others from changing them in their incorrect state.
711 711 wlock = repo.wlock()
712 712 try:
713 713 lfdirstate = lfutil.openlfdirstate(ui, repo)
714 714 s = lfutil.lfdirstatestatus(lfdirstate, repo)
715 715 lfdirstate.write()
716 716 for lfile in s.modified:
717 717 lfutil.updatestandin(repo, lfutil.standin(lfile))
718 718 for lfile in s.deleted:
719 719 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
720 720 os.unlink(repo.wjoin(lfutil.standin(lfile)))
721 721
722 722 oldstandins = lfutil.getstandinsstate(repo)
723 723
724 724 def overridematch(mctx, pats=[], opts={}, globbed=False,
725 725 default='relpath'):
726 726 match = oldmatch(mctx, pats, opts, globbed, default)
727 727 m = copy.copy(match)
728 728
729 729 # revert supports recursing into subrepos, and though largefiles
730 730 # currently doesn't work correctly in that case, this match is
731 731 # called, so the lfdirstate above may not be the correct one for
732 732 # this invocation of match.
733 733 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
734 734 False)
735 735
736 736 def tostandin(f):
737 737 standin = lfutil.standin(f)
738 738 if standin in ctx or standin in mctx:
739 739 return standin
740 740 elif standin in repo[None] or lfdirstate[f] == 'r':
741 741 return None
742 742 return f
743 743 m._files = [tostandin(f) for f in m._files]
744 744 m._files = [f for f in m._files if f is not None]
745 745 m._fmap = set(m._files)
746 746 origmatchfn = m.matchfn
747 747 def matchfn(f):
748 748 if lfutil.isstandin(f):
749 749 return (origmatchfn(lfutil.splitstandin(f)) and
750 750 (f in ctx or f in mctx))
751 751 return origmatchfn(f)
752 752 m.matchfn = matchfn
753 753 return m
754 754 oldmatch = installmatchfn(overridematch)
755 755 try:
756 756 orig(ui, repo, ctx, parents, *pats, **opts)
757 757 finally:
758 758 restorematchfn()
759 759
760 760 newstandins = lfutil.getstandinsstate(repo)
761 761 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
762 762 # lfdirstate should be 'normallookup'-ed for updated files,
763 763 # because reverting doesn't touch dirstate for 'normal' files
764 764 # when target revision is explicitly specified: in such case,
765 765 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
766 766 # of target (standin) file.
767 767 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
768 768 normallookup=True)
769 769
770 770 finally:
771 771 wlock.release()
772 772
773 773 # after pulling changesets, we need to take some extra care to get
774 774 # largefiles updated remotely
775 775 def overridepull(orig, ui, repo, source=None, **opts):
776 776 revsprepull = len(repo)
777 777 if not source:
778 778 source = 'default'
779 779 repo.lfpullsource = source
780 780 result = orig(ui, repo, source, **opts)
781 781 revspostpull = len(repo)
782 782 lfrevs = opts.get('lfrev', [])
783 783 if opts.get('all_largefiles'):
784 784 lfrevs.append('pulled()')
785 785 if lfrevs and revspostpull > revsprepull:
786 786 numcached = 0
787 787 repo.firstpulled = revsprepull # for pulled() revset expression
788 788 try:
789 789 for rev in scmutil.revrange(repo, lfrevs):
790 790 ui.note(_('pulling largefiles for revision %s\n') % rev)
791 791 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
792 792 numcached += len(cached)
793 793 finally:
794 794 del repo.firstpulled
795 795 ui.status(_("%d largefiles cached\n") % numcached)
796 796 return result
797 797
798 798 def pulledrevsetsymbol(repo, subset, x):
799 799 """``pulled()``
800 800 Changesets that just has been pulled.
801 801
802 802 Only available with largefiles from pull --lfrev expressions.
803 803
804 804 .. container:: verbose
805 805
806 806 Some examples:
807 807
808 808 - pull largefiles for all new changesets::
809 809
810 810 hg pull -lfrev "pulled()"
811 811
812 812 - pull largefiles for all new branch heads::
813 813
814 814 hg pull -lfrev "head(pulled()) and not closed()"
815 815
816 816 """
817 817
818 818 try:
819 819 firstpulled = repo.firstpulled
820 820 except AttributeError:
821 821 raise util.Abort(_("pulled() only available in --lfrev"))
822 822 return revset.baseset([r for r in subset if r >= firstpulled])
823 823
824 824 def overrideclone(orig, ui, source, dest=None, **opts):
825 825 d = dest
826 826 if d is None:
827 827 d = hg.defaultdest(source)
828 828 if opts.get('all_largefiles') and not hg.islocal(d):
829 829 raise util.Abort(_(
830 830 '--all-largefiles is incompatible with non-local destination %s') %
831 831 d)
832 832
833 833 return orig(ui, source, dest, **opts)
834 834
835 835 def hgclone(orig, ui, opts, *args, **kwargs):
836 836 result = orig(ui, opts, *args, **kwargs)
837 837
838 838 if result is not None:
839 839 sourcerepo, destrepo = result
840 840 repo = destrepo.local()
841 841
842 842 # If largefiles is required for this repo, permanently enable it locally
843 843 if 'largefiles' in repo.requirements:
844 844 fp = repo.vfs('hgrc', 'a', text=True)
845 845 try:
846 846 fp.write('\n[extensions]\nlargefiles=\n')
847 847 finally:
848 848 fp.close()
849 849
850 850 # Caching is implicitly limited to 'rev' option, since the dest repo was
851 851 # truncated at that point. The user may expect a download count with
852 852 # this option, so attempt whether or not this is a largefile repo.
853 853 if opts.get('all_largefiles'):
854 854 success, missing = lfcommands.downloadlfiles(ui, repo, None)
855 855
856 856 if missing != 0:
857 857 return None
858 858
859 859 return result
860 860
861 861 def overriderebase(orig, ui, repo, **opts):
862 862 if not util.safehasattr(repo, '_largefilesenabled'):
863 863 return orig(ui, repo, **opts)
864 864
865 865 resuming = opts.get('continue')
866 866 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
867 867 repo._lfstatuswriters.append(lambda *msg, **opts: None)
868 868 try:
869 869 return orig(ui, repo, **opts)
870 870 finally:
871 871 repo._lfstatuswriters.pop()
872 872 repo._lfcommithooks.pop()
873 873
874 874 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
875 875 prefix='', mtime=None, subrepos=None):
876 876 # No need to lock because we are only reading history and
877 877 # largefile caches, neither of which are modified.
878 878 lfcommands.cachelfiles(repo.ui, repo, node)
879 879
880 880 if kind not in archival.archivers:
881 881 raise util.Abort(_("unknown archive type '%s'") % kind)
882 882
883 883 ctx = repo[node]
884 884
885 885 if kind == 'files':
886 886 if prefix:
887 887 raise util.Abort(
888 888 _('cannot give prefix when archiving to files'))
889 889 else:
890 890 prefix = archival.tidyprefix(dest, kind, prefix)
891 891
892 892 def write(name, mode, islink, getdata):
893 893 if matchfn and not matchfn(name):
894 894 return
895 895 data = getdata()
896 896 if decode:
897 897 data = repo.wwritedata(name, data)
898 898 archiver.addfile(prefix + name, mode, islink, data)
899 899
900 900 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
901 901
902 902 if repo.ui.configbool("ui", "archivemeta", True):
903 903 def metadata():
904 904 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
905 905 hex(repo.changelog.node(0)), hex(node), ctx.branch())
906 906
907 907 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
908 908 if repo.tagtype(t) == 'global')
909 909 if not tags:
910 910 repo.ui.pushbuffer()
911 911 opts = {'template': '{latesttag}\n{latesttagdistance}',
912 912 'style': '', 'patch': None, 'git': None}
913 913 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
914 914 ltags, dist = repo.ui.popbuffer().split('\n')
915 915 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
916 916 tags += 'latesttagdistance: %s\n' % dist
917 917
918 918 return base + tags
919 919
920 920 write('.hg_archival.txt', 0644, False, metadata)
921 921
922 922 for f in ctx:
923 923 ff = ctx.flags(f)
924 924 getdata = ctx[f].data
925 925 if lfutil.isstandin(f):
926 926 path = lfutil.findfile(repo, getdata().strip())
927 927 if path is None:
928 928 raise util.Abort(
929 929 _('largefile %s not found in repo store or system cache')
930 930 % lfutil.splitstandin(f))
931 931 f = lfutil.splitstandin(f)
932 932
933 933 def getdatafn():
934 934 fd = None
935 935 try:
936 936 fd = open(path, 'rb')
937 937 return fd.read()
938 938 finally:
939 939 if fd:
940 940 fd.close()
941 941
942 942 getdata = getdatafn
943 943 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
944 944
945 945 if subrepos:
946 946 for subpath in sorted(ctx.substate):
947 947 sub = ctx.sub(subpath)
948 948 submatch = match_.narrowmatcher(subpath, matchfn)
949 949 sub.archive(archiver, prefix, submatch)
950 950
951 951 archiver.done()
952 952
953 953 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
954 954 repo._get(repo._state + ('hg',))
955 955 rev = repo._state[1]
956 956 ctx = repo._repo[rev]
957 957
958 958 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
959 959
960 960 def write(name, mode, islink, getdata):
961 961 # At this point, the standin has been replaced with the largefile name,
962 962 # so the normal matcher works here without the lfutil variants.
963 963 if match and not match(f):
964 964 return
965 965 data = getdata()
966 966
967 967 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
968 968
969 969 for f in ctx:
970 970 ff = ctx.flags(f)
971 971 getdata = ctx[f].data
972 972 if lfutil.isstandin(f):
973 973 path = lfutil.findfile(repo._repo, getdata().strip())
974 974 if path is None:
975 975 raise util.Abort(
976 976 _('largefile %s not found in repo store or system cache')
977 977 % lfutil.splitstandin(f))
978 978 f = lfutil.splitstandin(f)
979 979
980 980 def getdatafn():
981 981 fd = None
982 982 try:
983 983 fd = open(os.path.join(prefix, path), 'rb')
984 984 return fd.read()
985 985 finally:
986 986 if fd:
987 987 fd.close()
988 988
989 989 getdata = getdatafn
990 990
991 991 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
992 992
993 993 for subpath in sorted(ctx.substate):
994 994 sub = ctx.sub(subpath)
995 995 submatch = match_.narrowmatcher(subpath, match)
996 996 sub.archive(archiver, os.path.join(prefix, repo._path) + '/', submatch)
997 997
998 998 # If a largefile is modified, the change is not reflected in its
999 999 # standin until a commit. cmdutil.bailifchanged() raises an exception
1000 1000 # if the repo has uncommitted changes. Wrap it to also check if
1001 1001 # largefiles were changed. This is used by bisect, backout and fetch.
1002 1002 def overridebailifchanged(orig, repo, *args, **kwargs):
1003 1003 orig(repo, *args, **kwargs)
1004 1004 repo.lfstatus = True
1005 1005 s = repo.status()
1006 1006 repo.lfstatus = False
1007 1007 if s.modified or s.added or s.removed or s.deleted:
1008 1008 raise util.Abort(_('uncommitted changes'))
1009 1009
1010 1010 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1011 1011 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1012 1012 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1013 1013 m = composelargefilematcher(match, repo[None].manifest())
1014 1014
1015 1015 try:
1016 1016 repo.lfstatus = True
1017 1017 s = repo.status(match=m, clean=True)
1018 1018 finally:
1019 1019 repo.lfstatus = False
1020 1020 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1021 1021 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1022 1022
1023 1023 for f in forget:
1024 1024 if lfutil.standin(f) not in repo.dirstate and not \
1025 1025 repo.wvfs.isdir(lfutil.standin(f)):
1026 1026 ui.warn(_('not removing %s: file is already untracked\n')
1027 1027 % m.rel(f))
1028 1028 bad.append(f)
1029 1029
1030 1030 for f in forget:
1031 1031 if ui.verbose or not m.exact(f):
1032 1032 ui.status(_('removing %s\n') % m.rel(f))
1033 1033
1034 1034 # Need to lock because standin files are deleted then removed from the
1035 1035 # repository and we could race in-between.
1036 1036 wlock = repo.wlock()
1037 1037 try:
1038 1038 lfdirstate = lfutil.openlfdirstate(ui, repo)
1039 1039 for f in forget:
1040 1040 if lfdirstate[f] == 'a':
1041 1041 lfdirstate.drop(f)
1042 1042 else:
1043 1043 lfdirstate.remove(f)
1044 1044 lfdirstate.write()
1045 1045 standins = [lfutil.standin(f) for f in forget]
1046 1046 for f in standins:
1047 1047 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1048 1048 rejected = repo[None].forget(standins)
1049 1049 finally:
1050 1050 wlock.release()
1051 1051
1052 1052 bad.extend(f for f in rejected if f in m.files())
1053 1053 forgot.extend(f for f in forget if f not in rejected)
1054 1054 return bad, forgot
1055 1055
1056 1056 def _getoutgoings(repo, other, missing, addfunc):
1057 1057 """get pairs of filename and largefile hash in outgoing revisions
1058 1058 in 'missing'.
1059 1059
1060 1060 largefiles already existing on 'other' repository are ignored.
1061 1061
1062 1062 'addfunc' is invoked with each unique pairs of filename and
1063 1063 largefile hash value.
1064 1064 """
1065 1065 knowns = set()
1066 1066 lfhashes = set()
1067 1067 def dedup(fn, lfhash):
1068 1068 k = (fn, lfhash)
1069 1069 if k not in knowns:
1070 1070 knowns.add(k)
1071 1071 lfhashes.add(lfhash)
1072 1072 lfutil.getlfilestoupload(repo, missing, dedup)
1073 1073 if lfhashes:
1074 1074 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1075 1075 for fn, lfhash in knowns:
1076 1076 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1077 1077 addfunc(fn, lfhash)
1078 1078
1079 1079 def outgoinghook(ui, repo, other, opts, missing):
1080 1080 if opts.pop('large', None):
1081 1081 lfhashes = set()
1082 1082 if ui.debugflag:
1083 1083 toupload = {}
1084 1084 def addfunc(fn, lfhash):
1085 1085 if fn not in toupload:
1086 1086 toupload[fn] = []
1087 1087 toupload[fn].append(lfhash)
1088 1088 lfhashes.add(lfhash)
1089 1089 def showhashes(fn):
1090 1090 for lfhash in sorted(toupload[fn]):
1091 1091 ui.debug(' %s\n' % (lfhash))
1092 1092 else:
1093 1093 toupload = set()
1094 1094 def addfunc(fn, lfhash):
1095 1095 toupload.add(fn)
1096 1096 lfhashes.add(lfhash)
1097 1097 def showhashes(fn):
1098 1098 pass
1099 1099 _getoutgoings(repo, other, missing, addfunc)
1100 1100
1101 1101 if not toupload:
1102 1102 ui.status(_('largefiles: no files to upload\n'))
1103 1103 else:
1104 1104 ui.status(_('largefiles to upload (%d entities):\n')
1105 1105 % (len(lfhashes)))
1106 1106 for file in sorted(toupload):
1107 1107 ui.status(lfutil.splitstandin(file) + '\n')
1108 1108 showhashes(file)
1109 1109 ui.status('\n')
1110 1110
1111 1111 def summaryremotehook(ui, repo, opts, changes):
1112 1112 largeopt = opts.get('large', False)
1113 1113 if changes is None:
1114 1114 if largeopt:
1115 1115 return (False, True) # only outgoing check is needed
1116 1116 else:
1117 1117 return (False, False)
1118 1118 elif largeopt:
1119 1119 url, branch, peer, outgoing = changes[1]
1120 1120 if peer is None:
1121 1121 # i18n: column positioning for "hg summary"
1122 1122 ui.status(_('largefiles: (no remote repo)\n'))
1123 1123 return
1124 1124
1125 1125 toupload = set()
1126 1126 lfhashes = set()
1127 1127 def addfunc(fn, lfhash):
1128 1128 toupload.add(fn)
1129 1129 lfhashes.add(lfhash)
1130 1130 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1131 1131
1132 1132 if not toupload:
1133 1133 # i18n: column positioning for "hg summary"
1134 1134 ui.status(_('largefiles: (no files to upload)\n'))
1135 1135 else:
1136 1136 # i18n: column positioning for "hg summary"
1137 1137 ui.status(_('largefiles: %d entities for %d files to upload\n')
1138 1138 % (len(lfhashes), len(toupload)))
1139 1139
1140 1140 def overridesummary(orig, ui, repo, *pats, **opts):
1141 1141 try:
1142 1142 repo.lfstatus = True
1143 1143 orig(ui, repo, *pats, **opts)
1144 1144 finally:
1145 1145 repo.lfstatus = False
1146 1146
1147 1147 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1148 1148 similarity=None):
1149 1149 if not lfutil.islfilesrepo(repo):
1150 1150 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1151 1151 # Get the list of missing largefiles so we can remove them
1152 1152 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1153 1153 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1154 1154 False, False, False)
1155 1155
1156 1156 # Call into the normal remove code, but the removing of the standin, we want
1157 1157 # to have handled by original addremove. Monkey patching here makes sure
1158 1158 # we don't remove the standin in the largefiles code, preventing a very
1159 1159 # confused state later.
1160 1160 if s.deleted:
1161 1161 m = copy.copy(matcher)
1162 1162
1163 1163 # The m._files and m._map attributes are not changed to the deleted list
1164 1164 # because that affects the m.exact() test, which in turn governs whether
1165 1165 # or not the file name is printed, and how. Simply limit the original
1166 1166 # matches to those in the deleted status list.
1167 1167 matchfn = m.matchfn
1168 1168 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1169 1169
1170 1170 removelargefiles(repo.ui, repo, True, m, **opts)
1171 1171 # Call into the normal add code, and any files that *should* be added as
1172 1172 # largefiles will be
1173 1173 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1174 1174 # Now that we've handled largefiles, hand off to the original addremove
1175 1175 # function to take care of the rest. Make sure it doesn't do anything with
1176 1176 # largefiles by passing a matcher that will ignore them.
1177 1177 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1178 1178 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1179 1179
1180 1180 # Calling purge with --all will cause the largefiles to be deleted.
1181 1181 # Override repo.status to prevent this from happening.
1182 1182 def overridepurge(orig, ui, repo, *dirs, **opts):
1183 1183 # XXX Monkey patching a repoview will not work. The assigned attribute will
1184 1184 # be set on the unfiltered repo, but we will only lookup attributes in the
1185 1185 # unfiltered repo if the lookup in the repoview object itself fails. As the
1186 1186 # monkey patched method exists on the repoview class the lookup will not
1187 1187 # fail. As a result, the original version will shadow the monkey patched
1188 1188 # one, defeating the monkey patch.
1189 1189 #
1190 1190 # As a work around we use an unfiltered repo here. We should do something
1191 1191 # cleaner instead.
1192 1192 repo = repo.unfiltered()
1193 1193 oldstatus = repo.status
1194 1194 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1195 1195 clean=False, unknown=False, listsubrepos=False):
1196 1196 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1197 1197 listsubrepos)
1198 1198 lfdirstate = lfutil.openlfdirstate(ui, repo)
1199 1199 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1200 1200 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1201 1201 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1202 1202 unknown, ignored, r.clean)
1203 1203 repo.status = overridestatus
1204 1204 orig(ui, repo, *dirs, **opts)
1205 1205 repo.status = oldstatus
1206 1206 def overriderollback(orig, ui, repo, **opts):
1207 1207 wlock = repo.wlock()
1208 1208 try:
1209 1209 before = repo.dirstate.parents()
1210 1210 orphans = set(f for f in repo.dirstate
1211 1211 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1212 1212 result = orig(ui, repo, **opts)
1213 1213 after = repo.dirstate.parents()
1214 1214 if before == after:
1215 1215 return result # no need to restore standins
1216 1216
1217 1217 pctx = repo['.']
1218 1218 for f in repo.dirstate:
1219 1219 if lfutil.isstandin(f):
1220 1220 orphans.discard(f)
1221 1221 if repo.dirstate[f] == 'r':
1222 1222 repo.wvfs.unlinkpath(f, ignoremissing=True)
1223 1223 elif f in pctx:
1224 1224 fctx = pctx[f]
1225 1225 repo.wwrite(f, fctx.data(), fctx.flags())
1226 1226 else:
1227 1227 # content of standin is not so important in 'a',
1228 1228 # 'm' or 'n' (coming from the 2nd parent) cases
1229 1229 lfutil.writestandin(repo, f, '', False)
1230 1230 for standin in orphans:
1231 1231 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1232 1232
1233 1233 lfdirstate = lfutil.openlfdirstate(ui, repo)
1234 1234 orphans = set(lfdirstate)
1235 1235 lfiles = lfutil.listlfiles(repo)
1236 1236 for file in lfiles:
1237 1237 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1238 1238 orphans.discard(file)
1239 1239 for lfile in orphans:
1240 1240 lfdirstate.drop(lfile)
1241 1241 lfdirstate.write()
1242 1242 finally:
1243 1243 wlock.release()
1244 1244 return result
1245 1245
1246 1246 def overridetransplant(orig, ui, repo, *revs, **opts):
1247 1247 resuming = opts.get('continue')
1248 1248 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1249 1249 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1250 1250 try:
1251 1251 result = orig(ui, repo, *revs, **opts)
1252 1252 finally:
1253 1253 repo._lfstatuswriters.pop()
1254 1254 repo._lfcommithooks.pop()
1255 1255 return result
1256 1256
1257 1257 def overridecat(orig, ui, repo, file1, *pats, **opts):
1258 1258 ctx = scmutil.revsingle(repo, opts.get('rev'))
1259 1259 err = 1
1260 1260 notbad = set()
1261 1261 m = scmutil.match(ctx, (file1,) + pats, opts)
1262 1262 origmatchfn = m.matchfn
1263 1263 def lfmatchfn(f):
1264 1264 if origmatchfn(f):
1265 1265 return True
1266 1266 lf = lfutil.splitstandin(f)
1267 1267 if lf is None:
1268 1268 return False
1269 1269 notbad.add(lf)
1270 1270 return origmatchfn(lf)
1271 1271 m.matchfn = lfmatchfn
1272 1272 origbadfn = m.bad
1273 1273 def lfbadfn(f, msg):
1274 1274 if not f in notbad:
1275 1275 origbadfn(f, msg)
1276 1276 m.bad = lfbadfn
1277 1277 for f in ctx.walk(m):
1278 1278 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1279 1279 pathname=f)
1280 1280 lf = lfutil.splitstandin(f)
1281 1281 if lf is None or origmatchfn(f):
1282 1282 # duplicating unreachable code from commands.cat
1283 1283 data = ctx[f].data()
1284 1284 if opts.get('decode'):
1285 1285 data = repo.wwritedata(f, data)
1286 1286 fp.write(data)
1287 1287 else:
1288 1288 hash = lfutil.readstandin(repo, lf, ctx.rev())
1289 1289 if not lfutil.inusercache(repo.ui, hash):
1290 1290 store = basestore._openstore(repo)
1291 1291 success, missing = store.get([(lf, hash)])
1292 1292 if len(success) != 1:
1293 1293 raise util.Abort(
1294 1294 _('largefile %s is not in cache and could not be '
1295 1295 'downloaded') % lf)
1296 1296 path = lfutil.usercachepath(repo.ui, hash)
1297 1297 fpin = open(path, "rb")
1298 1298 for chunk in util.filechunkiter(fpin, 128 * 1024):
1299 1299 fp.write(chunk)
1300 1300 fpin.close()
1301 1301 fp.close()
1302 1302 err = 0
1303 1303 return err
1304 1304
1305 1305 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1306 1306 *args, **kwargs):
1307 1307 wlock = repo.wlock()
1308 1308 try:
1309 1309 # branch | | |
1310 1310 # merge | force | partial | action
1311 1311 # -------+-------+---------+--------------
1312 1312 # x | x | x | linear-merge
1313 1313 # o | x | x | branch-merge
1314 1314 # x | o | x | overwrite (as clean update)
1315 1315 # o | o | x | force-branch-merge (*1)
1316 1316 # x | x | o | (*)
1317 1317 # o | x | o | (*)
1318 1318 # x | o | o | overwrite (as revert)
1319 1319 # o | o | o | (*)
1320 1320 #
1321 1321 # (*) don't care
1322 1322 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1323 1323
1324 1324 linearmerge = not branchmerge and not force and not partial
1325 1325
1326 1326 if linearmerge or (branchmerge and force and not partial):
1327 1327 # update standins for linear-merge or force-branch-merge,
1328 1328 # because largefiles in the working directory may be modified
1329 1329 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1330 1330 unsure, s = lfdirstate.status(match_.always(repo.root,
1331 1331 repo.getcwd()),
1332 1332 [], False, False, False)
1333 1333 pctx = repo['.']
1334 1334 for lfile in unsure + s.modified:
1335 1335 lfileabs = repo.wvfs.join(lfile)
1336 1336 if not os.path.exists(lfileabs):
1337 1337 continue
1338 1338 lfhash = lfutil.hashrepofile(repo, lfile)
1339 1339 standin = lfutil.standin(lfile)
1340 1340 lfutil.writestandin(repo, standin, lfhash,
1341 1341 lfutil.getexecutable(lfileabs))
1342 1342 if (standin in pctx and
1343 1343 lfhash == lfutil.readstandin(repo, lfile, '.')):
1344 1344 lfdirstate.normal(lfile)
1345 1345 for lfile in s.added:
1346 1346 lfutil.updatestandin(repo, lfutil.standin(lfile))
1347 1347 lfdirstate.write()
1348 1348
1349 1349 if linearmerge:
1350 1350 # Only call updatelfiles on the standins that have changed
1351 1351 # to save time
1352 1352 oldstandins = lfutil.getstandinsstate(repo)
1353 1353
1354 1354 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1355 1355
1356 1356 filelist = None
1357 1357 if linearmerge:
1358 1358 newstandins = lfutil.getstandinsstate(repo)
1359 1359 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1360 1360
1361 1361 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1362 1362 normallookup=partial, checked=linearmerge)
1363 1363
1364 1364 return result
1365 1365 finally:
1366 1366 wlock.release()
1367 1367
1368 1368 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1369 1369 result = orig(repo, files, *args, **kwargs)
1370 1370
1371 1371 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1372 1372 if filelist:
1373 1373 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1374 1374 printmessage=False, normallookup=True)
1375 1375
1376 1376 return result
@@ -1,3242 +1,3242 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, errno, re, tempfile, cStringIO, shutil
11 11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 12 import match as matchmod
13 13 import context, repair, graphmod, revset, phases, obsolete, pathutil
14 14 import changelog
15 15 import bookmarks
16 16 import encoding
17 17 import crecord as crecordmod
18 18 import lock as lockmod
19 19
20 20 def parsealiases(cmd):
21 21 return cmd.lstrip("^").split("|")
22 22
23 23 def setupwrapcolorwrite(ui):
24 24 # wrap ui.write so diff output can be labeled/colorized
25 25 def wrapwrite(orig, *args, **kw):
26 26 label = kw.pop('label', '')
27 27 for chunk, l in patch.difflabel(lambda: args):
28 28 orig(chunk, label=label + l)
29 29
30 30 oldwrite = ui.write
31 31 def wrap(*args, **kwargs):
32 32 return wrapwrite(oldwrite, *args, **kwargs)
33 33 setattr(ui, 'write', wrap)
34 34 return oldwrite
35 35
36 36 def filterchunks(ui, originalhunks, usecurses, testfile):
37 37 if usecurses:
38 38 if testfile:
39 39 recordfn = crecordmod.testdecorator(testfile,
40 40 crecordmod.testchunkselector)
41 41 else:
42 42 recordfn = crecordmod.chunkselector
43 43
44 44 return crecordmod.filterpatch(ui, originalhunks, recordfn)
45 45
46 46 else:
47 47 return patch.filterpatch(ui, originalhunks)
48 48
49 49 def recordfilter(ui, originalhunks):
50 50 usecurses = ui.configbool('experimental', 'crecord', False)
51 51 testfile = ui.config('experimental', 'crecordtest', None)
52 52 oldwrite = setupwrapcolorwrite(ui)
53 53 try:
54 54 newchunks = filterchunks(ui, originalhunks, usecurses, testfile)
55 55 finally:
56 56 ui.write = oldwrite
57 57 return newchunks
58 58
59 59 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
60 60 filterfn, *pats, **opts):
61 61 import merge as mergemod
62 62 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
63 63 ishunk = lambda x: isinstance(x, hunkclasses)
64 64
65 65 if not ui.interactive():
66 66 raise util.Abort(_('running non-interactively, use %s instead') %
67 67 cmdsuggest)
68 68
69 69 # make sure username is set before going interactive
70 70 if not opts.get('user'):
71 71 ui.username() # raise exception, username not provided
72 72
73 73 def recordfunc(ui, repo, message, match, opts):
74 74 """This is generic record driver.
75 75
76 76 Its job is to interactively filter local changes, and
77 77 accordingly prepare working directory into a state in which the
78 78 job can be delegated to a non-interactive commit command such as
79 79 'commit' or 'qrefresh'.
80 80
81 81 After the actual job is done by non-interactive command, the
82 82 working directory is restored to its original state.
83 83
84 84 In the end we'll record interesting changes, and everything else
85 85 will be left in place, so the user can continue working.
86 86 """
87 87
88 88 checkunfinished(repo, commit=True)
89 89 merge = len(repo[None].parents()) > 1
90 90 if merge:
91 91 raise util.Abort(_('cannot partially commit a merge '
92 92 '(use "hg commit" instead)'))
93 93
94 94 status = repo.status(match=match)
95 95 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
96 96 diffopts.nodates = True
97 97 diffopts.git = True
98 98 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
99 99 originalchunks = patch.parsepatch(originaldiff)
100 100
101 101 # 1. filter patch, so we have intending-to apply subset of it
102 102 try:
103 103 chunks = filterfn(ui, originalchunks)
104 104 except patch.PatchError, err:
105 105 raise util.Abort(_('error parsing patch: %s') % err)
106 106
107 107 contenders = set()
108 108 for h in chunks:
109 109 try:
110 110 contenders.update(set(h.files()))
111 111 except AttributeError:
112 112 pass
113 113
114 114 changed = status.modified + status.added + status.removed
115 115 newfiles = [f for f in changed if f in contenders]
116 116 if not newfiles:
117 117 ui.status(_('no changes to record\n'))
118 118 return 0
119 119
120 120 newandmodifiedfiles = set()
121 121 for h in chunks:
122 122 isnew = h.filename() in status.added
123 123 if ishunk(h) and isnew and not h in originalchunks:
124 124 newandmodifiedfiles.add(h.filename())
125 125
126 126 modified = set(status.modified)
127 127
128 128 # 2. backup changed files, so we can restore them in the end
129 129
130 130 if backupall:
131 131 tobackup = changed
132 132 else:
133 133 tobackup = [f for f in newfiles
134 134 if f in modified or f in newandmodifiedfiles]
135 135
136 136 backups = {}
137 137 if tobackup:
138 138 backupdir = repo.join('record-backups')
139 139 try:
140 140 os.mkdir(backupdir)
141 141 except OSError, err:
142 142 if err.errno != errno.EEXIST:
143 143 raise
144 144 try:
145 145 # backup continues
146 146 for f in tobackup:
147 147 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
148 148 dir=backupdir)
149 149 os.close(fd)
150 150 ui.debug('backup %r as %r\n' % (f, tmpname))
151 151 util.copyfile(repo.wjoin(f), tmpname)
152 152 shutil.copystat(repo.wjoin(f), tmpname)
153 153 backups[f] = tmpname
154 154
155 155 fp = cStringIO.StringIO()
156 156 for c in chunks:
157 157 fname = c.filename()
158 158 if fname in backups or fname in newandmodifiedfiles:
159 159 c.write(fp)
160 160 dopatch = fp.tell()
161 161 fp.seek(0)
162 162
163 163 [os.unlink(c) for c in newandmodifiedfiles]
164 164
165 165 # 3a. apply filtered patch to clean repo (clean)
166 166 if backups:
167 167 # Equivalent to hg.revert
168 168 choices = lambda key: key in backups
169 169 mergemod.update(repo, repo.dirstate.p1(),
170 170 False, True, choices)
171 171
172 172 # 3b. (apply)
173 173 if dopatch:
174 174 try:
175 175 ui.debug('applying patch\n')
176 176 ui.debug(fp.getvalue())
177 177 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
178 178 except patch.PatchError, err:
179 179 raise util.Abort(str(err))
180 180 del fp
181 181
182 182 # 4. We prepared working directory according to filtered
183 183 # patch. Now is the time to delegate the job to
184 184 # commit/qrefresh or the like!
185 185
186 186 # Make all of the pathnames absolute.
187 187 newfiles = [repo.wjoin(nf) for nf in newfiles]
188 188 return commitfunc(ui, repo, *newfiles, **opts)
189 189 finally:
190 190 # 5. finally restore backed-up files
191 191 try:
192 192 for realname, tmpname in backups.iteritems():
193 193 ui.debug('restoring %r to %r\n' % (tmpname, realname))
194 194 util.copyfile(tmpname, repo.wjoin(realname))
195 195 # Our calls to copystat() here and above are a
196 196 # hack to trick any editors that have f open that
197 197 # we haven't modified them.
198 198 #
199 199 # Also note that this racy as an editor could
200 200 # notice the file's mtime before we've finished
201 201 # writing it.
202 202 shutil.copystat(tmpname, repo.wjoin(realname))
203 203 os.unlink(tmpname)
204 204 if tobackup:
205 205 os.rmdir(backupdir)
206 206 except OSError:
207 207 pass
208 208
209 209 return commit(ui, repo, recordfunc, pats, opts)
210 210
211 211 def findpossible(cmd, table, strict=False):
212 212 """
213 213 Return cmd -> (aliases, command table entry)
214 214 for each matching command.
215 215 Return debug commands (or their aliases) only if no normal command matches.
216 216 """
217 217 choice = {}
218 218 debugchoice = {}
219 219
220 220 if cmd in table:
221 221 # short-circuit exact matches, "log" alias beats "^log|history"
222 222 keys = [cmd]
223 223 else:
224 224 keys = table.keys()
225 225
226 226 allcmds = []
227 227 for e in keys:
228 228 aliases = parsealiases(e)
229 229 allcmds.extend(aliases)
230 230 found = None
231 231 if cmd in aliases:
232 232 found = cmd
233 233 elif not strict:
234 234 for a in aliases:
235 235 if a.startswith(cmd):
236 236 found = a
237 237 break
238 238 if found is not None:
239 239 if aliases[0].startswith("debug") or found.startswith("debug"):
240 240 debugchoice[found] = (aliases, table[e])
241 241 else:
242 242 choice[found] = (aliases, table[e])
243 243
244 244 if not choice and debugchoice:
245 245 choice = debugchoice
246 246
247 247 return choice, allcmds
248 248
249 249 def findcmd(cmd, table, strict=True):
250 250 """Return (aliases, command table entry) for command string."""
251 251 choice, allcmds = findpossible(cmd, table, strict)
252 252
253 253 if cmd in choice:
254 254 return choice[cmd]
255 255
256 256 if len(choice) > 1:
257 257 clist = choice.keys()
258 258 clist.sort()
259 259 raise error.AmbiguousCommand(cmd, clist)
260 260
261 261 if choice:
262 262 return choice.values()[0]
263 263
264 264 raise error.UnknownCommand(cmd, allcmds)
265 265
266 266 def findrepo(p):
267 267 while not os.path.isdir(os.path.join(p, ".hg")):
268 268 oldp, p = p, os.path.dirname(p)
269 269 if p == oldp:
270 270 return None
271 271
272 272 return p
273 273
274 274 def bailifchanged(repo, merge=True):
275 275 if merge and repo.dirstate.p2() != nullid:
276 276 raise util.Abort(_('outstanding uncommitted merge'))
277 277 modified, added, removed, deleted = repo.status()[:4]
278 278 if modified or added or removed or deleted:
279 279 raise util.Abort(_('uncommitted changes'))
280 280 ctx = repo[None]
281 281 for s in sorted(ctx.substate):
282 282 ctx.sub(s).bailifchanged()
283 283
284 284 def logmessage(ui, opts):
285 285 """ get the log message according to -m and -l option """
286 286 message = opts.get('message')
287 287 logfile = opts.get('logfile')
288 288
289 289 if message and logfile:
290 290 raise util.Abort(_('options --message and --logfile are mutually '
291 291 'exclusive'))
292 292 if not message and logfile:
293 293 try:
294 294 if logfile == '-':
295 295 message = ui.fin.read()
296 296 else:
297 297 message = '\n'.join(util.readfile(logfile).splitlines())
298 298 except IOError, inst:
299 299 raise util.Abort(_("can't read commit message '%s': %s") %
300 300 (logfile, inst.strerror))
301 301 return message
302 302
303 303 def mergeeditform(ctxorbool, baseformname):
304 304 """return appropriate editform name (referencing a committemplate)
305 305
306 306 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
307 307 merging is committed.
308 308
309 309 This returns baseformname with '.merge' appended if it is a merge,
310 310 otherwise '.normal' is appended.
311 311 """
312 312 if isinstance(ctxorbool, bool):
313 313 if ctxorbool:
314 314 return baseformname + ".merge"
315 315 elif 1 < len(ctxorbool.parents()):
316 316 return baseformname + ".merge"
317 317
318 318 return baseformname + ".normal"
319 319
320 320 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
321 321 editform='', **opts):
322 322 """get appropriate commit message editor according to '--edit' option
323 323
324 324 'finishdesc' is a function to be called with edited commit message
325 325 (= 'description' of the new changeset) just after editing, but
326 326 before checking empty-ness. It should return actual text to be
327 327 stored into history. This allows to change description before
328 328 storing.
329 329
330 330 'extramsg' is a extra message to be shown in the editor instead of
331 331 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
332 332 is automatically added.
333 333
334 334 'editform' is a dot-separated list of names, to distinguish
335 335 the purpose of commit text editing.
336 336
337 337 'getcommiteditor' returns 'commitforceeditor' regardless of
338 338 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
339 339 they are specific for usage in MQ.
340 340 """
341 341 if edit or finishdesc or extramsg:
342 342 return lambda r, c, s: commitforceeditor(r, c, s,
343 343 finishdesc=finishdesc,
344 344 extramsg=extramsg,
345 345 editform=editform)
346 346 elif editform:
347 347 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
348 348 else:
349 349 return commiteditor
350 350
351 351 def loglimit(opts):
352 352 """get the log limit according to option -l/--limit"""
353 353 limit = opts.get('limit')
354 354 if limit:
355 355 try:
356 356 limit = int(limit)
357 357 except ValueError:
358 358 raise util.Abort(_('limit must be a positive integer'))
359 359 if limit <= 0:
360 360 raise util.Abort(_('limit must be positive'))
361 361 else:
362 362 limit = None
363 363 return limit
364 364
365 365 def makefilename(repo, pat, node, desc=None,
366 366 total=None, seqno=None, revwidth=None, pathname=None):
367 367 node_expander = {
368 368 'H': lambda: hex(node),
369 369 'R': lambda: str(repo.changelog.rev(node)),
370 370 'h': lambda: short(node),
371 371 'm': lambda: re.sub('[^\w]', '_', str(desc))
372 372 }
373 373 expander = {
374 374 '%': lambda: '%',
375 375 'b': lambda: os.path.basename(repo.root),
376 376 }
377 377
378 378 try:
379 379 if node:
380 380 expander.update(node_expander)
381 381 if node:
382 382 expander['r'] = (lambda:
383 383 str(repo.changelog.rev(node)).zfill(revwidth or 0))
384 384 if total is not None:
385 385 expander['N'] = lambda: str(total)
386 386 if seqno is not None:
387 387 expander['n'] = lambda: str(seqno)
388 388 if total is not None and seqno is not None:
389 389 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
390 390 if pathname is not None:
391 391 expander['s'] = lambda: os.path.basename(pathname)
392 392 expander['d'] = lambda: os.path.dirname(pathname) or '.'
393 393 expander['p'] = lambda: pathname
394 394
395 395 newname = []
396 396 patlen = len(pat)
397 397 i = 0
398 398 while i < patlen:
399 399 c = pat[i]
400 400 if c == '%':
401 401 i += 1
402 402 c = pat[i]
403 403 c = expander[c]()
404 404 newname.append(c)
405 405 i += 1
406 406 return ''.join(newname)
407 407 except KeyError, inst:
408 408 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
409 409 inst.args[0])
410 410
411 411 def makefileobj(repo, pat, node=None, desc=None, total=None,
412 412 seqno=None, revwidth=None, mode='wb', modemap=None,
413 413 pathname=None):
414 414
415 415 writable = mode not in ('r', 'rb')
416 416
417 417 if not pat or pat == '-':
418 418 if writable:
419 419 fp = repo.ui.fout
420 420 else:
421 421 fp = repo.ui.fin
422 422 if util.safehasattr(fp, 'fileno'):
423 423 return os.fdopen(os.dup(fp.fileno()), mode)
424 424 else:
425 425 # if this fp can't be duped properly, return
426 426 # a dummy object that can be closed
427 427 class wrappedfileobj(object):
428 428 noop = lambda x: None
429 429 def __init__(self, f):
430 430 self.f = f
431 431 def __getattr__(self, attr):
432 432 if attr == 'close':
433 433 return self.noop
434 434 else:
435 435 return getattr(self.f, attr)
436 436
437 437 return wrappedfileobj(fp)
438 438 if util.safehasattr(pat, 'write') and writable:
439 439 return pat
440 440 if util.safehasattr(pat, 'read') and 'r' in mode:
441 441 return pat
442 442 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
443 443 if modemap is not None:
444 444 mode = modemap.get(fn, mode)
445 445 if mode == 'wb':
446 446 modemap[fn] = 'ab'
447 447 return open(fn, mode)
448 448
449 449 def openrevlog(repo, cmd, file_, opts):
450 450 """opens the changelog, manifest, a filelog or a given revlog"""
451 451 cl = opts['changelog']
452 452 mf = opts['manifest']
453 453 msg = None
454 454 if cl and mf:
455 455 msg = _('cannot specify --changelog and --manifest at the same time')
456 456 elif cl or mf:
457 457 if file_:
458 458 msg = _('cannot specify filename with --changelog or --manifest')
459 459 elif not repo:
460 460 msg = _('cannot specify --changelog or --manifest '
461 461 'without a repository')
462 462 if msg:
463 463 raise util.Abort(msg)
464 464
465 465 r = None
466 466 if repo:
467 467 if cl:
468 468 r = repo.unfiltered().changelog
469 469 elif mf:
470 470 r = repo.manifest
471 471 elif file_:
472 472 filelog = repo.file(file_)
473 473 if len(filelog):
474 474 r = filelog
475 475 if not r:
476 476 if not file_:
477 477 raise error.CommandError(cmd, _('invalid arguments'))
478 478 if not os.path.isfile(file_):
479 479 raise util.Abort(_("revlog '%s' not found") % file_)
480 480 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
481 481 file_[:-2] + ".i")
482 482 return r
483 483
484 484 def copy(ui, repo, pats, opts, rename=False):
485 485 # called with the repo lock held
486 486 #
487 487 # hgsep => pathname that uses "/" to separate directories
488 488 # ossep => pathname that uses os.sep to separate directories
489 489 cwd = repo.getcwd()
490 490 targets = {}
491 491 after = opts.get("after")
492 492 dryrun = opts.get("dry_run")
493 493 wctx = repo[None]
494 494
495 495 def walkpat(pat):
496 496 srcs = []
497 497 if after:
498 498 badstates = '?'
499 499 else:
500 500 badstates = '?r'
501 501 m = scmutil.match(repo[None], [pat], opts, globbed=True)
502 502 for abs in repo.walk(m):
503 503 state = repo.dirstate[abs]
504 504 rel = m.rel(abs)
505 505 exact = m.exact(abs)
506 506 if state in badstates:
507 507 if exact and state == '?':
508 508 ui.warn(_('%s: not copying - file is not managed\n') % rel)
509 509 if exact and state == 'r':
510 510 ui.warn(_('%s: not copying - file has been marked for'
511 511 ' remove\n') % rel)
512 512 continue
513 513 # abs: hgsep
514 514 # rel: ossep
515 515 srcs.append((abs, rel, exact))
516 516 return srcs
517 517
518 518 # abssrc: hgsep
519 519 # relsrc: ossep
520 520 # otarget: ossep
521 521 def copyfile(abssrc, relsrc, otarget, exact):
522 522 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
523 523 if '/' in abstarget:
524 524 # We cannot normalize abstarget itself, this would prevent
525 525 # case only renames, like a => A.
526 526 abspath, absname = abstarget.rsplit('/', 1)
527 527 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
528 528 reltarget = repo.pathto(abstarget, cwd)
529 529 target = repo.wjoin(abstarget)
530 530 src = repo.wjoin(abssrc)
531 531 state = repo.dirstate[abstarget]
532 532
533 533 scmutil.checkportable(ui, abstarget)
534 534
535 535 # check for collisions
536 536 prevsrc = targets.get(abstarget)
537 537 if prevsrc is not None:
538 538 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
539 539 (reltarget, repo.pathto(abssrc, cwd),
540 540 repo.pathto(prevsrc, cwd)))
541 541 return
542 542
543 543 # check for overwrites
544 544 exists = os.path.lexists(target)
545 545 samefile = False
546 546 if exists and abssrc != abstarget:
547 547 if (repo.dirstate.normalize(abssrc) ==
548 548 repo.dirstate.normalize(abstarget)):
549 549 if not rename:
550 550 ui.warn(_("%s: can't copy - same file\n") % reltarget)
551 551 return
552 552 exists = False
553 553 samefile = True
554 554
555 555 if not after and exists or after and state in 'mn':
556 556 if not opts['force']:
557 557 ui.warn(_('%s: not overwriting - file exists\n') %
558 558 reltarget)
559 559 return
560 560
561 561 if after:
562 562 if not exists:
563 563 if rename:
564 564 ui.warn(_('%s: not recording move - %s does not exist\n') %
565 565 (relsrc, reltarget))
566 566 else:
567 567 ui.warn(_('%s: not recording copy - %s does not exist\n') %
568 568 (relsrc, reltarget))
569 569 return
570 570 elif not dryrun:
571 571 try:
572 572 if exists:
573 573 os.unlink(target)
574 574 targetdir = os.path.dirname(target) or '.'
575 575 if not os.path.isdir(targetdir):
576 576 os.makedirs(targetdir)
577 577 if samefile:
578 578 tmp = target + "~hgrename"
579 579 os.rename(src, tmp)
580 580 os.rename(tmp, target)
581 581 else:
582 582 util.copyfile(src, target)
583 583 srcexists = True
584 584 except IOError, inst:
585 585 if inst.errno == errno.ENOENT:
586 586 ui.warn(_('%s: deleted in working directory\n') % relsrc)
587 587 srcexists = False
588 588 else:
589 589 ui.warn(_('%s: cannot copy - %s\n') %
590 590 (relsrc, inst.strerror))
591 591 return True # report a failure
592 592
593 593 if ui.verbose or not exact:
594 594 if rename:
595 595 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
596 596 else:
597 597 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
598 598
599 599 targets[abstarget] = abssrc
600 600
601 601 # fix up dirstate
602 602 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
603 603 dryrun=dryrun, cwd=cwd)
604 604 if rename and not dryrun:
605 605 if not after and srcexists and not samefile:
606 606 util.unlinkpath(repo.wjoin(abssrc))
607 607 wctx.forget([abssrc])
608 608
609 609 # pat: ossep
610 610 # dest ossep
611 611 # srcs: list of (hgsep, hgsep, ossep, bool)
612 612 # return: function that takes hgsep and returns ossep
613 613 def targetpathfn(pat, dest, srcs):
614 614 if os.path.isdir(pat):
615 615 abspfx = pathutil.canonpath(repo.root, cwd, pat)
616 616 abspfx = util.localpath(abspfx)
617 617 if destdirexists:
618 618 striplen = len(os.path.split(abspfx)[0])
619 619 else:
620 620 striplen = len(abspfx)
621 621 if striplen:
622 622 striplen += len(os.sep)
623 623 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
624 624 elif destdirexists:
625 625 res = lambda p: os.path.join(dest,
626 626 os.path.basename(util.localpath(p)))
627 627 else:
628 628 res = lambda p: dest
629 629 return res
630 630
631 631 # pat: ossep
632 632 # dest ossep
633 633 # srcs: list of (hgsep, hgsep, ossep, bool)
634 634 # return: function that takes hgsep and returns ossep
635 635 def targetpathafterfn(pat, dest, srcs):
636 636 if matchmod.patkind(pat):
637 637 # a mercurial pattern
638 638 res = lambda p: os.path.join(dest,
639 639 os.path.basename(util.localpath(p)))
640 640 else:
641 641 abspfx = pathutil.canonpath(repo.root, cwd, pat)
642 642 if len(abspfx) < len(srcs[0][0]):
643 643 # A directory. Either the target path contains the last
644 644 # component of the source path or it does not.
645 645 def evalpath(striplen):
646 646 score = 0
647 647 for s in srcs:
648 648 t = os.path.join(dest, util.localpath(s[0])[striplen:])
649 649 if os.path.lexists(t):
650 650 score += 1
651 651 return score
652 652
653 653 abspfx = util.localpath(abspfx)
654 654 striplen = len(abspfx)
655 655 if striplen:
656 656 striplen += len(os.sep)
657 657 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
658 658 score = evalpath(striplen)
659 659 striplen1 = len(os.path.split(abspfx)[0])
660 660 if striplen1:
661 661 striplen1 += len(os.sep)
662 662 if evalpath(striplen1) > score:
663 663 striplen = striplen1
664 664 res = lambda p: os.path.join(dest,
665 665 util.localpath(p)[striplen:])
666 666 else:
667 667 # a file
668 668 if destdirexists:
669 669 res = lambda p: os.path.join(dest,
670 670 os.path.basename(util.localpath(p)))
671 671 else:
672 672 res = lambda p: dest
673 673 return res
674 674
675 675 pats = scmutil.expandpats(pats)
676 676 if not pats:
677 677 raise util.Abort(_('no source or destination specified'))
678 678 if len(pats) == 1:
679 679 raise util.Abort(_('no destination specified'))
680 680 dest = pats.pop()
681 681 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
682 682 if not destdirexists:
683 683 if len(pats) > 1 or matchmod.patkind(pats[0]):
684 684 raise util.Abort(_('with multiple sources, destination must be an '
685 685 'existing directory'))
686 686 if util.endswithsep(dest):
687 687 raise util.Abort(_('destination %s is not a directory') % dest)
688 688
689 689 tfn = targetpathfn
690 690 if after:
691 691 tfn = targetpathafterfn
692 692 copylist = []
693 693 for pat in pats:
694 694 srcs = walkpat(pat)
695 695 if not srcs:
696 696 continue
697 697 copylist.append((tfn(pat, dest, srcs), srcs))
698 698 if not copylist:
699 699 raise util.Abort(_('no files to copy'))
700 700
701 701 errors = 0
702 702 for targetpath, srcs in copylist:
703 703 for abssrc, relsrc, exact in srcs:
704 704 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
705 705 errors += 1
706 706
707 707 if errors:
708 708 ui.warn(_('(consider using --after)\n'))
709 709
710 710 return errors != 0
711 711
712 712 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
713 713 runargs=None, appendpid=False):
714 714 '''Run a command as a service.'''
715 715
716 716 def writepid(pid):
717 717 if opts['pid_file']:
718 718 if appendpid:
719 719 mode = 'a'
720 720 else:
721 721 mode = 'w'
722 722 fp = open(opts['pid_file'], mode)
723 723 fp.write(str(pid) + '\n')
724 724 fp.close()
725 725
726 726 if opts['daemon'] and not opts['daemon_pipefds']:
727 727 # Signal child process startup with file removal
728 728 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
729 729 os.close(lockfd)
730 730 try:
731 731 if not runargs:
732 732 runargs = util.hgcmd() + sys.argv[1:]
733 733 runargs.append('--daemon-pipefds=%s' % lockpath)
734 734 # Don't pass --cwd to the child process, because we've already
735 735 # changed directory.
736 736 for i in xrange(1, len(runargs)):
737 737 if runargs[i].startswith('--cwd='):
738 738 del runargs[i]
739 739 break
740 740 elif runargs[i].startswith('--cwd'):
741 741 del runargs[i:i + 2]
742 742 break
743 743 def condfn():
744 744 return not os.path.exists(lockpath)
745 745 pid = util.rundetached(runargs, condfn)
746 746 if pid < 0:
747 747 raise util.Abort(_('child process failed to start'))
748 748 writepid(pid)
749 749 finally:
750 750 try:
751 751 os.unlink(lockpath)
752 752 except OSError, e:
753 753 if e.errno != errno.ENOENT:
754 754 raise
755 755 if parentfn:
756 756 return parentfn(pid)
757 757 else:
758 758 return
759 759
760 760 if initfn:
761 761 initfn()
762 762
763 763 if not opts['daemon']:
764 764 writepid(os.getpid())
765 765
766 766 if opts['daemon_pipefds']:
767 767 lockpath = opts['daemon_pipefds']
768 768 try:
769 769 os.setsid()
770 770 except AttributeError:
771 771 pass
772 772 os.unlink(lockpath)
773 773 util.hidewindow()
774 774 sys.stdout.flush()
775 775 sys.stderr.flush()
776 776
777 777 nullfd = os.open(os.devnull, os.O_RDWR)
778 778 logfilefd = nullfd
779 779 if logfile:
780 780 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
781 781 os.dup2(nullfd, 0)
782 782 os.dup2(logfilefd, 1)
783 783 os.dup2(logfilefd, 2)
784 784 if nullfd not in (0, 1, 2):
785 785 os.close(nullfd)
786 786 if logfile and logfilefd not in (0, 1, 2):
787 787 os.close(logfilefd)
788 788
789 789 if runfn:
790 790 return runfn()
791 791
792 792 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
793 793 """Utility function used by commands.import to import a single patch
794 794
795 795 This function is explicitly defined here to help the evolve extension to
796 796 wrap this part of the import logic.
797 797
798 798 The API is currently a bit ugly because it a simple code translation from
799 799 the import command. Feel free to make it better.
800 800
801 801 :hunk: a patch (as a binary string)
802 802 :parents: nodes that will be parent of the created commit
803 803 :opts: the full dict of option passed to the import command
804 804 :msgs: list to save commit message to.
805 805 (used in case we need to save it when failing)
806 806 :updatefunc: a function that update a repo to a given node
807 807 updatefunc(<repo>, <node>)
808 808 """
809 809 tmpname, message, user, date, branch, nodeid, p1, p2 = \
810 810 patch.extract(ui, hunk)
811 811
812 812 update = not opts.get('bypass')
813 813 strip = opts["strip"]
814 814 prefix = opts["prefix"]
815 815 sim = float(opts.get('similarity') or 0)
816 816 if not tmpname:
817 817 return (None, None, False)
818 818 msg = _('applied to working directory')
819 819
820 820 rejects = False
821 821
822 822 try:
823 823 cmdline_message = logmessage(ui, opts)
824 824 if cmdline_message:
825 825 # pickup the cmdline msg
826 826 message = cmdline_message
827 827 elif message:
828 828 # pickup the patch msg
829 829 message = message.strip()
830 830 else:
831 831 # launch the editor
832 832 message = None
833 833 ui.debug('message:\n%s\n' % message)
834 834
835 835 if len(parents) == 1:
836 836 parents.append(repo[nullid])
837 837 if opts.get('exact'):
838 838 if not nodeid or not p1:
839 839 raise util.Abort(_('not a Mercurial patch'))
840 840 p1 = repo[p1]
841 841 p2 = repo[p2 or nullid]
842 842 elif p2:
843 843 try:
844 844 p1 = repo[p1]
845 845 p2 = repo[p2]
846 846 # Without any options, consider p2 only if the
847 847 # patch is being applied on top of the recorded
848 848 # first parent.
849 849 if p1 != parents[0]:
850 850 p1 = parents[0]
851 851 p2 = repo[nullid]
852 852 except error.RepoError:
853 853 p1, p2 = parents
854 854 if p2.node() == nullid:
855 855 ui.warn(_("warning: import the patch as a normal revision\n"
856 856 "(use --exact to import the patch as a merge)\n"))
857 857 else:
858 858 p1, p2 = parents
859 859
860 860 n = None
861 861 if update:
862 862 repo.dirstate.beginparentchange()
863 863 if p1 != parents[0]:
864 864 updatefunc(repo, p1.node())
865 865 if p2 != parents[1]:
866 866 repo.setparents(p1.node(), p2.node())
867 867
868 868 if opts.get('exact') or opts.get('import_branch'):
869 869 repo.dirstate.setbranch(branch or 'default')
870 870
871 871 partial = opts.get('partial', False)
872 872 files = set()
873 873 try:
874 874 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
875 875 files=files, eolmode=None, similarity=sim / 100.0)
876 876 except patch.PatchError, e:
877 877 if not partial:
878 878 raise util.Abort(str(e))
879 879 if partial:
880 880 rejects = True
881 881
882 882 files = list(files)
883 883 if opts.get('no_commit'):
884 884 if message:
885 885 msgs.append(message)
886 886 else:
887 887 if opts.get('exact') or p2:
888 888 # If you got here, you either use --force and know what
889 889 # you are doing or used --exact or a merge patch while
890 890 # being updated to its first parent.
891 891 m = None
892 892 else:
893 893 m = scmutil.matchfiles(repo, files or [])
894 894 editform = mergeeditform(repo[None], 'import.normal')
895 895 if opts.get('exact'):
896 896 editor = None
897 897 else:
898 898 editor = getcommiteditor(editform=editform, **opts)
899 899 n = repo.commit(message, opts.get('user') or user,
900 900 opts.get('date') or date, match=m,
901 901 editor=editor, force=partial)
902 902 repo.dirstate.endparentchange()
903 903 else:
904 904 if opts.get('exact') or opts.get('import_branch'):
905 905 branch = branch or 'default'
906 906 else:
907 907 branch = p1.branch()
908 908 store = patch.filestore()
909 909 try:
910 910 files = set()
911 911 try:
912 912 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
913 913 files, eolmode=None)
914 914 except patch.PatchError, e:
915 915 raise util.Abort(str(e))
916 916 if opts.get('exact'):
917 917 editor = None
918 918 else:
919 919 editor = getcommiteditor(editform='import.bypass')
920 920 memctx = context.makememctx(repo, (p1.node(), p2.node()),
921 921 message,
922 922 opts.get('user') or user,
923 923 opts.get('date') or date,
924 924 branch, files, store,
925 925 editor=editor)
926 926 n = memctx.commit()
927 927 finally:
928 928 store.close()
929 929 if opts.get('exact') and opts.get('no_commit'):
930 930 # --exact with --no-commit is still useful in that it does merge
931 931 # and branch bits
932 932 ui.warn(_("warning: can't check exact import with --no-commit\n"))
933 933 elif opts.get('exact') and hex(n) != nodeid:
934 934 raise util.Abort(_('patch is damaged or loses information'))
935 935 if n:
936 936 # i18n: refers to a short changeset id
937 937 msg = _('created %s') % short(n)
938 938 return (msg, n, rejects)
939 939 finally:
940 940 os.unlink(tmpname)
941 941
942 942 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
943 943 opts=None):
944 944 '''export changesets as hg patches.'''
945 945
946 946 total = len(revs)
947 947 revwidth = max([len(str(rev)) for rev in revs])
948 948 filemode = {}
949 949
950 950 def single(rev, seqno, fp):
951 951 ctx = repo[rev]
952 952 node = ctx.node()
953 953 parents = [p.node() for p in ctx.parents() if p]
954 954 branch = ctx.branch()
955 955 if switch_parent:
956 956 parents.reverse()
957 957
958 958 if parents:
959 959 prev = parents[0]
960 960 else:
961 961 prev = nullid
962 962
963 963 shouldclose = False
964 964 if not fp and len(template) > 0:
965 965 desc_lines = ctx.description().rstrip().split('\n')
966 966 desc = desc_lines[0] #Commit always has a first line.
967 967 fp = makefileobj(repo, template, node, desc=desc, total=total,
968 968 seqno=seqno, revwidth=revwidth, mode='wb',
969 969 modemap=filemode)
970 970 if fp != template:
971 971 shouldclose = True
972 972 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
973 973 repo.ui.note("%s\n" % fp.name)
974 974
975 975 if not fp:
976 976 write = repo.ui.write
977 977 else:
978 978 def write(s, **kw):
979 979 fp.write(s)
980 980
981 981 write("# HG changeset patch\n")
982 982 write("# User %s\n" % ctx.user())
983 983 write("# Date %d %d\n" % ctx.date())
984 984 write("# %s\n" % util.datestr(ctx.date()))
985 985 if branch and branch != 'default':
986 986 write("# Branch %s\n" % branch)
987 987 write("# Node ID %s\n" % hex(node))
988 988 write("# Parent %s\n" % hex(prev))
989 989 if len(parents) > 1:
990 990 write("# Parent %s\n" % hex(parents[1]))
991 991 write(ctx.description().rstrip())
992 992 write("\n\n")
993 993
994 994 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
995 995 write(chunk, label=label)
996 996
997 997 if shouldclose:
998 998 fp.close()
999 999
1000 1000 for seqno, rev in enumerate(revs):
1001 1001 single(rev, seqno + 1, fp)
1002 1002
1003 1003 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1004 1004 changes=None, stat=False, fp=None, prefix='',
1005 1005 root='', listsubrepos=False):
1006 1006 '''show diff or diffstat.'''
1007 1007 if fp is None:
1008 1008 write = ui.write
1009 1009 else:
1010 1010 def write(s, **kw):
1011 1011 fp.write(s)
1012 1012
1013 1013 if root:
1014 1014 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1015 1015 else:
1016 1016 relroot = ''
1017 1017 if relroot != '':
1018 1018 # XXX relative roots currently don't work if the root is within a
1019 1019 # subrepo
1020 1020 uirelroot = match.uipath(relroot)
1021 1021 relroot += '/'
1022 1022 for matchroot in match.files():
1023 1023 if not matchroot.startswith(relroot):
1024 1024 ui.warn(_('warning: %s not inside relative root %s\n') % (
1025 1025 match.uipath(matchroot), uirelroot))
1026 1026
1027 1027 if stat:
1028 1028 diffopts = diffopts.copy(context=0)
1029 1029 width = 80
1030 1030 if not ui.plain():
1031 1031 width = ui.termwidth()
1032 1032 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1033 1033 prefix=prefix, relroot=relroot)
1034 1034 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1035 1035 width=width,
1036 1036 git=diffopts.git):
1037 1037 write(chunk, label=label)
1038 1038 else:
1039 1039 for chunk, label in patch.diffui(repo, node1, node2, match,
1040 1040 changes, diffopts, prefix=prefix,
1041 1041 relroot=relroot):
1042 1042 write(chunk, label=label)
1043 1043
1044 1044 if listsubrepos:
1045 1045 ctx1 = repo[node1]
1046 1046 ctx2 = repo[node2]
1047 1047 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1048 1048 tempnode2 = node2
1049 1049 try:
1050 1050 if node2 is not None:
1051 1051 tempnode2 = ctx2.substate[subpath][1]
1052 1052 except KeyError:
1053 1053 # A subrepo that existed in node1 was deleted between node1 and
1054 1054 # node2 (inclusive). Thus, ctx2's substate won't contain that
1055 1055 # subpath. The best we can do is to ignore it.
1056 1056 tempnode2 = None
1057 1057 submatch = matchmod.narrowmatcher(subpath, match)
1058 1058 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1059 1059 stat=stat, fp=fp, prefix=prefix)
1060 1060
1061 1061 class changeset_printer(object):
1062 1062 '''show changeset information when templating not requested.'''
1063 1063
1064 1064 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1065 1065 self.ui = ui
1066 1066 self.repo = repo
1067 1067 self.buffered = buffered
1068 1068 self.matchfn = matchfn
1069 1069 self.diffopts = diffopts
1070 1070 self.header = {}
1071 1071 self.hunk = {}
1072 1072 self.lastheader = None
1073 1073 self.footer = None
1074 1074
1075 1075 def flush(self, rev):
1076 1076 if rev in self.header:
1077 1077 h = self.header[rev]
1078 1078 if h != self.lastheader:
1079 1079 self.lastheader = h
1080 1080 self.ui.write(h)
1081 1081 del self.header[rev]
1082 1082 if rev in self.hunk:
1083 1083 self.ui.write(self.hunk[rev])
1084 1084 del self.hunk[rev]
1085 1085 return 1
1086 1086 return 0
1087 1087
1088 1088 def close(self):
1089 1089 if self.footer:
1090 1090 self.ui.write(self.footer)
1091 1091
1092 1092 def show(self, ctx, copies=None, matchfn=None, **props):
1093 1093 if self.buffered:
1094 1094 self.ui.pushbuffer()
1095 1095 self._show(ctx, copies, matchfn, props)
1096 1096 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1097 1097 else:
1098 1098 self._show(ctx, copies, matchfn, props)
1099 1099
1100 1100 def _show(self, ctx, copies, matchfn, props):
1101 1101 '''show a single changeset or file revision'''
1102 1102 changenode = ctx.node()
1103 1103 rev = ctx.rev()
1104 1104
1105 1105 if self.ui.quiet:
1106 1106 self.ui.write("%d:%s\n" % (rev, short(changenode)),
1107 1107 label='log.node')
1108 1108 return
1109 1109
1110 1110 date = util.datestr(ctx.date())
1111 1111
1112 1112 if self.ui.debugflag:
1113 1113 hexfunc = hex
1114 1114 else:
1115 1115 hexfunc = short
1116 1116
1117 1117 # i18n: column positioning for "hg log"
1118 1118 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
1119 1119 label='log.changeset changeset.%s' % ctx.phasestr())
1120 1120
1121 1121 # branches are shown first before any other names due to backwards
1122 1122 # compatibility
1123 1123 branch = ctx.branch()
1124 1124 # don't show the default branch name
1125 1125 if branch != 'default':
1126 1126 # i18n: column positioning for "hg log"
1127 1127 self.ui.write(_("branch: %s\n") % branch,
1128 1128 label='log.branch')
1129 1129
1130 1130 for name, ns in self.repo.names.iteritems():
1131 1131 # branches has special logic already handled above, so here we just
1132 1132 # skip it
1133 1133 if name == 'branches':
1134 1134 continue
1135 1135 # we will use the templatename as the color name since those two
1136 1136 # should be the same
1137 1137 for name in ns.names(self.repo, changenode):
1138 1138 self.ui.write(ns.logfmt % name,
1139 1139 label='log.%s' % ns.colorname)
1140 1140 if self.ui.debugflag:
1141 1141 # i18n: column positioning for "hg log"
1142 1142 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
1143 1143 label='log.phase')
1144 1144 for pctx in self._meaningful_parentrevs(ctx):
1145 1145 label = 'log.parent changeset.%s' % pctx.phasestr()
1146 1146 # i18n: column positioning for "hg log"
1147 1147 self.ui.write(_("parent: %d:%s\n")
1148 1148 % (pctx.rev(), hexfunc(pctx.node())),
1149 1149 label=label)
1150 1150
1151 1151 if self.ui.debugflag:
1152 1152 mnode = ctx.manifestnode()
1153 1153 # i18n: column positioning for "hg log"
1154 1154 self.ui.write(_("manifest: %d:%s\n") %
1155 1155 (self.repo.manifest.rev(mnode), hex(mnode)),
1156 1156 label='ui.debug log.manifest')
1157 1157 # i18n: column positioning for "hg log"
1158 1158 self.ui.write(_("user: %s\n") % ctx.user(),
1159 1159 label='log.user')
1160 1160 # i18n: column positioning for "hg log"
1161 1161 self.ui.write(_("date: %s\n") % date,
1162 1162 label='log.date')
1163 1163
1164 1164 if self.ui.debugflag:
1165 1165 files = ctx.p1().status(ctx)[:3]
1166 1166 for key, value in zip([# i18n: column positioning for "hg log"
1167 1167 _("files:"),
1168 1168 # i18n: column positioning for "hg log"
1169 1169 _("files+:"),
1170 1170 # i18n: column positioning for "hg log"
1171 1171 _("files-:")], files):
1172 1172 if value:
1173 1173 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1174 1174 label='ui.debug log.files')
1175 1175 elif ctx.files() and self.ui.verbose:
1176 1176 # i18n: column positioning for "hg log"
1177 1177 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1178 1178 label='ui.note log.files')
1179 1179 if copies and self.ui.verbose:
1180 1180 copies = ['%s (%s)' % c for c in copies]
1181 1181 # i18n: column positioning for "hg log"
1182 1182 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1183 1183 label='ui.note log.copies')
1184 1184
1185 1185 extra = ctx.extra()
1186 1186 if extra and self.ui.debugflag:
1187 1187 for key, value in sorted(extra.items()):
1188 1188 # i18n: column positioning for "hg log"
1189 1189 self.ui.write(_("extra: %s=%s\n")
1190 1190 % (key, value.encode('string_escape')),
1191 1191 label='ui.debug log.extra')
1192 1192
1193 1193 description = ctx.description().strip()
1194 1194 if description:
1195 1195 if self.ui.verbose:
1196 1196 self.ui.write(_("description:\n"),
1197 1197 label='ui.note log.description')
1198 1198 self.ui.write(description,
1199 1199 label='ui.note log.description')
1200 1200 self.ui.write("\n\n")
1201 1201 else:
1202 1202 # i18n: column positioning for "hg log"
1203 1203 self.ui.write(_("summary: %s\n") %
1204 1204 description.splitlines()[0],
1205 1205 label='log.summary')
1206 1206 self.ui.write("\n")
1207 1207
1208 1208 self.showpatch(changenode, matchfn)
1209 1209
1210 1210 def showpatch(self, node, matchfn):
1211 1211 if not matchfn:
1212 1212 matchfn = self.matchfn
1213 1213 if matchfn:
1214 1214 stat = self.diffopts.get('stat')
1215 1215 diff = self.diffopts.get('patch')
1216 1216 diffopts = patch.diffallopts(self.ui, self.diffopts)
1217 1217 prev = self.repo.changelog.parents(node)[0]
1218 1218 if stat:
1219 1219 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1220 1220 match=matchfn, stat=True)
1221 1221 if diff:
1222 1222 if stat:
1223 1223 self.ui.write("\n")
1224 1224 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1225 1225 match=matchfn, stat=False)
1226 1226 self.ui.write("\n")
1227 1227
1228 1228 def _meaningful_parentrevs(self, ctx):
1229 1229 """Return list of meaningful (or all if debug) parentrevs for rev.
1230 1230
1231 1231 For merges (two non-nullrev revisions) both parents are meaningful.
1232 1232 Otherwise the first parent revision is considered meaningful if it
1233 1233 is not the preceding revision.
1234 1234 """
1235 1235 parents = ctx.parents()
1236 1236 if len(parents) > 1:
1237 1237 return parents
1238 1238 if self.ui.debugflag:
1239 1239 return [parents[0], self.repo['null']]
1240 1240 if parents[0].rev() >= ctx.rev() - 1:
1241 1241 return []
1242 1242 return parents
1243 1243
1244 1244 class jsonchangeset(changeset_printer):
1245 1245 '''format changeset information.'''
1246 1246
1247 1247 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1248 1248 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1249 1249 self.cache = {}
1250 1250 self._first = True
1251 1251
1252 1252 def close(self):
1253 1253 if not self._first:
1254 1254 self.ui.write("\n]\n")
1255 1255 else:
1256 1256 self.ui.write("[]\n")
1257 1257
1258 1258 def _show(self, ctx, copies, matchfn, props):
1259 1259 '''show a single changeset or file revision'''
1260 1260 hexnode = hex(ctx.node())
1261 1261 rev = ctx.rev()
1262 1262 j = encoding.jsonescape
1263 1263
1264 1264 if self._first:
1265 1265 self.ui.write("[\n {")
1266 1266 self._first = False
1267 1267 else:
1268 1268 self.ui.write(",\n {")
1269 1269
1270 1270 if self.ui.quiet:
1271 1271 self.ui.write('\n "rev": %d' % rev)
1272 1272 self.ui.write(',\n "node": "%s"' % hexnode)
1273 1273 self.ui.write('\n }')
1274 1274 return
1275 1275
1276 1276 self.ui.write('\n "rev": %d' % rev)
1277 1277 self.ui.write(',\n "node": "%s"' % hexnode)
1278 1278 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1279 1279 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1280 1280 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1281 1281 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1282 1282 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1283 1283
1284 1284 self.ui.write(',\n "bookmarks": [%s]' %
1285 1285 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1286 1286 self.ui.write(',\n "tags": [%s]' %
1287 1287 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1288 1288 self.ui.write(',\n "parents": [%s]' %
1289 1289 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1290 1290
1291 1291 if self.ui.debugflag:
1292 1292 self.ui.write(',\n "manifest": "%s"' % hex(ctx.manifestnode()))
1293 1293
1294 1294 self.ui.write(',\n "extra": {%s}' %
1295 1295 ", ".join('"%s": "%s"' % (j(k), j(v))
1296 1296 for k, v in ctx.extra().items()))
1297 1297
1298 1298 files = ctx.p1().status(ctx)
1299 1299 self.ui.write(',\n "modified": [%s]' %
1300 1300 ", ".join('"%s"' % j(f) for f in files[0]))
1301 1301 self.ui.write(',\n "added": [%s]' %
1302 1302 ", ".join('"%s"' % j(f) for f in files[1]))
1303 1303 self.ui.write(',\n "removed": [%s]' %
1304 1304 ", ".join('"%s"' % j(f) for f in files[2]))
1305 1305
1306 1306 elif self.ui.verbose:
1307 1307 self.ui.write(',\n "files": [%s]' %
1308 1308 ", ".join('"%s"' % j(f) for f in ctx.files()))
1309 1309
1310 1310 if copies:
1311 1311 self.ui.write(',\n "copies": {%s}' %
1312 1312 ", ".join('"%s": "%s"' % (j(k), j(v))
1313 1313 for k, v in copies))
1314 1314
1315 1315 matchfn = self.matchfn
1316 1316 if matchfn:
1317 1317 stat = self.diffopts.get('stat')
1318 1318 diff = self.diffopts.get('patch')
1319 1319 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1320 1320 node, prev = ctx.node(), ctx.p1().node()
1321 1321 if stat:
1322 1322 self.ui.pushbuffer()
1323 1323 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1324 1324 match=matchfn, stat=True)
1325 1325 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1326 1326 if diff:
1327 1327 self.ui.pushbuffer()
1328 1328 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1329 1329 match=matchfn, stat=False)
1330 1330 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1331 1331
1332 1332 self.ui.write("\n }")
1333 1333
1334 1334 class changeset_templater(changeset_printer):
1335 1335 '''format changeset information.'''
1336 1336
1337 1337 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1338 1338 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1339 1339 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1340 1340 defaulttempl = {
1341 1341 'parent': '{rev}:{node|formatnode} ',
1342 1342 'manifest': '{rev}:{node|formatnode}',
1343 1343 'file_copy': '{name} ({source})',
1344 1344 'extra': '{key}={value|stringescape}'
1345 1345 }
1346 1346 # filecopy is preserved for compatibility reasons
1347 1347 defaulttempl['filecopy'] = defaulttempl['file_copy']
1348 1348 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1349 1349 cache=defaulttempl)
1350 1350 if tmpl:
1351 1351 self.t.cache['changeset'] = tmpl
1352 1352
1353 1353 self.cache = {}
1354 1354
1355 1355 def _show(self, ctx, copies, matchfn, props):
1356 1356 '''show a single changeset or file revision'''
1357 1357
1358 1358 showlist = templatekw.showlist
1359 1359
1360 1360 # showparents() behaviour depends on ui trace level which
1361 1361 # causes unexpected behaviours at templating level and makes
1362 1362 # it harder to extract it in a standalone function. Its
1363 1363 # behaviour cannot be changed so leave it here for now.
1364 1364 def showparents(**args):
1365 1365 ctx = args['ctx']
1366 1366 parents = [[('rev', p.rev()),
1367 1367 ('node', p.hex()),
1368 1368 ('phase', p.phasestr())]
1369 1369 for p in self._meaningful_parentrevs(ctx)]
1370 1370 return showlist('parent', parents, **args)
1371 1371
1372 1372 props = props.copy()
1373 1373 props.update(templatekw.keywords)
1374 1374 props['parents'] = showparents
1375 1375 props['templ'] = self.t
1376 1376 props['ctx'] = ctx
1377 1377 props['repo'] = self.repo
1378 1378 props['revcache'] = {'copies': copies}
1379 1379 props['cache'] = self.cache
1380 1380
1381 1381 # find correct templates for current mode
1382 1382
1383 1383 tmplmodes = [
1384 1384 (True, None),
1385 1385 (self.ui.verbose, 'verbose'),
1386 1386 (self.ui.quiet, 'quiet'),
1387 1387 (self.ui.debugflag, 'debug'),
1388 1388 ]
1389 1389
1390 1390 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1391 1391 for mode, postfix in tmplmodes:
1392 1392 for type in types:
1393 1393 cur = postfix and ('%s_%s' % (type, postfix)) or type
1394 1394 if mode and cur in self.t:
1395 1395 types[type] = cur
1396 1396
1397 1397 try:
1398 1398
1399 1399 # write header
1400 1400 if types['header']:
1401 1401 h = templater.stringify(self.t(types['header'], **props))
1402 1402 if self.buffered:
1403 1403 self.header[ctx.rev()] = h
1404 1404 else:
1405 1405 if self.lastheader != h:
1406 1406 self.lastheader = h
1407 1407 self.ui.write(h)
1408 1408
1409 1409 # write changeset metadata, then patch if requested
1410 1410 key = types['changeset']
1411 1411 self.ui.write(templater.stringify(self.t(key, **props)))
1412 1412 self.showpatch(ctx.node(), matchfn)
1413 1413
1414 1414 if types['footer']:
1415 1415 if not self.footer:
1416 1416 self.footer = templater.stringify(self.t(types['footer'],
1417 1417 **props))
1418 1418
1419 1419 except KeyError, inst:
1420 1420 msg = _("%s: no key named '%s'")
1421 1421 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1422 1422 except SyntaxError, inst:
1423 1423 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1424 1424
1425 1425 def gettemplate(ui, tmpl, style):
1426 1426 """
1427 1427 Find the template matching the given template spec or style.
1428 1428 """
1429 1429
1430 1430 # ui settings
1431 1431 if not tmpl and not style: # template are stronger than style
1432 1432 tmpl = ui.config('ui', 'logtemplate')
1433 1433 if tmpl:
1434 1434 try:
1435 1435 tmpl = templater.parsestring(tmpl)
1436 1436 except SyntaxError:
1437 1437 tmpl = templater.parsestring(tmpl, quoted=False)
1438 1438 return tmpl, None
1439 1439 else:
1440 1440 style = util.expandpath(ui.config('ui', 'style', ''))
1441 1441
1442 1442 if not tmpl and style:
1443 1443 mapfile = style
1444 1444 if not os.path.split(mapfile)[0]:
1445 1445 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1446 1446 or templater.templatepath(mapfile))
1447 1447 if mapname:
1448 1448 mapfile = mapname
1449 1449 return None, mapfile
1450 1450
1451 1451 if not tmpl:
1452 1452 return None, None
1453 1453
1454 1454 # looks like a literal template?
1455 1455 if '{' in tmpl:
1456 1456 return tmpl, None
1457 1457
1458 1458 # perhaps a stock style?
1459 1459 if not os.path.split(tmpl)[0]:
1460 1460 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1461 1461 or templater.templatepath(tmpl))
1462 1462 if mapname and os.path.isfile(mapname):
1463 1463 return None, mapname
1464 1464
1465 1465 # perhaps it's a reference to [templates]
1466 1466 t = ui.config('templates', tmpl)
1467 1467 if t:
1468 1468 try:
1469 1469 tmpl = templater.parsestring(t)
1470 1470 except SyntaxError:
1471 1471 tmpl = templater.parsestring(t, quoted=False)
1472 1472 return tmpl, None
1473 1473
1474 1474 if tmpl == 'list':
1475 1475 ui.write(_("available styles: %s\n") % templater.stylelist())
1476 1476 raise util.Abort(_("specify a template"))
1477 1477
1478 1478 # perhaps it's a path to a map or a template
1479 1479 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1480 1480 # is it a mapfile for a style?
1481 1481 if os.path.basename(tmpl).startswith("map-"):
1482 1482 return None, os.path.realpath(tmpl)
1483 1483 tmpl = open(tmpl).read()
1484 1484 return tmpl, None
1485 1485
1486 1486 # constant string?
1487 1487 return tmpl, None
1488 1488
1489 1489 def show_changeset(ui, repo, opts, buffered=False):
1490 1490 """show one changeset using template or regular display.
1491 1491
1492 1492 Display format will be the first non-empty hit of:
1493 1493 1. option 'template'
1494 1494 2. option 'style'
1495 1495 3. [ui] setting 'logtemplate'
1496 1496 4. [ui] setting 'style'
1497 1497 If all of these values are either the unset or the empty string,
1498 1498 regular display via changeset_printer() is done.
1499 1499 """
1500 1500 # options
1501 1501 matchfn = None
1502 1502 if opts.get('patch') or opts.get('stat'):
1503 1503 matchfn = scmutil.matchall(repo)
1504 1504
1505 1505 if opts.get('template') == 'json':
1506 1506 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1507 1507
1508 1508 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1509 1509
1510 1510 if not tmpl and not mapfile:
1511 1511 return changeset_printer(ui, repo, matchfn, opts, buffered)
1512 1512
1513 1513 try:
1514 1514 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1515 1515 buffered)
1516 1516 except SyntaxError, inst:
1517 1517 raise util.Abort(inst.args[0])
1518 1518 return t
1519 1519
1520 1520 def showmarker(ui, marker):
1521 1521 """utility function to display obsolescence marker in a readable way
1522 1522
1523 1523 To be used by debug function."""
1524 1524 ui.write(hex(marker.precnode()))
1525 1525 for repl in marker.succnodes():
1526 1526 ui.write(' ')
1527 1527 ui.write(hex(repl))
1528 1528 ui.write(' %X ' % marker.flags())
1529 1529 parents = marker.parentnodes()
1530 1530 if parents is not None:
1531 1531 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1532 1532 ui.write('(%s) ' % util.datestr(marker.date()))
1533 1533 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1534 1534 sorted(marker.metadata().items())
1535 1535 if t[0] != 'date')))
1536 1536 ui.write('\n')
1537 1537
1538 1538 def finddate(ui, repo, date):
1539 1539 """Find the tipmost changeset that matches the given date spec"""
1540 1540
1541 1541 df = util.matchdate(date)
1542 1542 m = scmutil.matchall(repo)
1543 1543 results = {}
1544 1544
1545 1545 def prep(ctx, fns):
1546 1546 d = ctx.date()
1547 1547 if df(d[0]):
1548 1548 results[ctx.rev()] = d
1549 1549
1550 1550 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1551 1551 rev = ctx.rev()
1552 1552 if rev in results:
1553 1553 ui.status(_("found revision %s from %s\n") %
1554 1554 (rev, util.datestr(results[rev])))
1555 1555 return str(rev)
1556 1556
1557 1557 raise util.Abort(_("revision matching date not found"))
1558 1558
1559 1559 def increasingwindows(windowsize=8, sizelimit=512):
1560 1560 while True:
1561 1561 yield windowsize
1562 1562 if windowsize < sizelimit:
1563 1563 windowsize *= 2
1564 1564
1565 1565 class FileWalkError(Exception):
1566 1566 pass
1567 1567
1568 1568 def walkfilerevs(repo, match, follow, revs, fncache):
1569 1569 '''Walks the file history for the matched files.
1570 1570
1571 1571 Returns the changeset revs that are involved in the file history.
1572 1572
1573 1573 Throws FileWalkError if the file history can't be walked using
1574 1574 filelogs alone.
1575 1575 '''
1576 1576 wanted = set()
1577 1577 copies = []
1578 1578 minrev, maxrev = min(revs), max(revs)
1579 1579 def filerevgen(filelog, last):
1580 1580 """
1581 1581 Only files, no patterns. Check the history of each file.
1582 1582
1583 1583 Examines filelog entries within minrev, maxrev linkrev range
1584 1584 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1585 1585 tuples in backwards order
1586 1586 """
1587 1587 cl_count = len(repo)
1588 1588 revs = []
1589 1589 for j in xrange(0, last + 1):
1590 1590 linkrev = filelog.linkrev(j)
1591 1591 if linkrev < minrev:
1592 1592 continue
1593 1593 # only yield rev for which we have the changelog, it can
1594 1594 # happen while doing "hg log" during a pull or commit
1595 1595 if linkrev >= cl_count:
1596 1596 break
1597 1597
1598 1598 parentlinkrevs = []
1599 1599 for p in filelog.parentrevs(j):
1600 1600 if p != nullrev:
1601 1601 parentlinkrevs.append(filelog.linkrev(p))
1602 1602 n = filelog.node(j)
1603 1603 revs.append((linkrev, parentlinkrevs,
1604 1604 follow and filelog.renamed(n)))
1605 1605
1606 1606 return reversed(revs)
1607 1607 def iterfiles():
1608 1608 pctx = repo['.']
1609 1609 for filename in match.files():
1610 1610 if follow:
1611 1611 if filename not in pctx:
1612 1612 raise util.Abort(_('cannot follow file not in parent '
1613 1613 'revision: "%s"') % filename)
1614 1614 yield filename, pctx[filename].filenode()
1615 1615 else:
1616 1616 yield filename, None
1617 1617 for filename_node in copies:
1618 1618 yield filename_node
1619 1619
1620 1620 for file_, node in iterfiles():
1621 1621 filelog = repo.file(file_)
1622 1622 if not len(filelog):
1623 1623 if node is None:
1624 1624 # A zero count may be a directory or deleted file, so
1625 1625 # try to find matching entries on the slow path.
1626 1626 if follow:
1627 1627 raise util.Abort(
1628 1628 _('cannot follow nonexistent file: "%s"') % file_)
1629 1629 raise FileWalkError("Cannot walk via filelog")
1630 1630 else:
1631 1631 continue
1632 1632
1633 1633 if node is None:
1634 1634 last = len(filelog) - 1
1635 1635 else:
1636 1636 last = filelog.rev(node)
1637 1637
1638 1638 # keep track of all ancestors of the file
1639 1639 ancestors = set([filelog.linkrev(last)])
1640 1640
1641 1641 # iterate from latest to oldest revision
1642 1642 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1643 1643 if not follow:
1644 1644 if rev > maxrev:
1645 1645 continue
1646 1646 else:
1647 1647 # Note that last might not be the first interesting
1648 1648 # rev to us:
1649 1649 # if the file has been changed after maxrev, we'll
1650 1650 # have linkrev(last) > maxrev, and we still need
1651 1651 # to explore the file graph
1652 1652 if rev not in ancestors:
1653 1653 continue
1654 1654 # XXX insert 1327 fix here
1655 1655 if flparentlinkrevs:
1656 1656 ancestors.update(flparentlinkrevs)
1657 1657
1658 1658 fncache.setdefault(rev, []).append(file_)
1659 1659 wanted.add(rev)
1660 1660 if copied:
1661 1661 copies.append(copied)
1662 1662
1663 1663 return wanted
1664 1664
1665 1665 class _followfilter(object):
1666 1666 def __init__(self, repo, onlyfirst=False):
1667 1667 self.repo = repo
1668 1668 self.startrev = nullrev
1669 1669 self.roots = set()
1670 1670 self.onlyfirst = onlyfirst
1671 1671
1672 1672 def match(self, rev):
1673 1673 def realparents(rev):
1674 1674 if self.onlyfirst:
1675 1675 return self.repo.changelog.parentrevs(rev)[0:1]
1676 1676 else:
1677 1677 return filter(lambda x: x != nullrev,
1678 1678 self.repo.changelog.parentrevs(rev))
1679 1679
1680 1680 if self.startrev == nullrev:
1681 1681 self.startrev = rev
1682 1682 return True
1683 1683
1684 1684 if rev > self.startrev:
1685 1685 # forward: all descendants
1686 1686 if not self.roots:
1687 1687 self.roots.add(self.startrev)
1688 1688 for parent in realparents(rev):
1689 1689 if parent in self.roots:
1690 1690 self.roots.add(rev)
1691 1691 return True
1692 1692 else:
1693 1693 # backwards: all parents
1694 1694 if not self.roots:
1695 1695 self.roots.update(realparents(self.startrev))
1696 1696 if rev in self.roots:
1697 1697 self.roots.remove(rev)
1698 1698 self.roots.update(realparents(rev))
1699 1699 return True
1700 1700
1701 1701 return False
1702 1702
1703 1703 def walkchangerevs(repo, match, opts, prepare):
1704 1704 '''Iterate over files and the revs in which they changed.
1705 1705
1706 1706 Callers most commonly need to iterate backwards over the history
1707 1707 in which they are interested. Doing so has awful (quadratic-looking)
1708 1708 performance, so we use iterators in a "windowed" way.
1709 1709
1710 1710 We walk a window of revisions in the desired order. Within the
1711 1711 window, we first walk forwards to gather data, then in the desired
1712 1712 order (usually backwards) to display it.
1713 1713
1714 1714 This function returns an iterator yielding contexts. Before
1715 1715 yielding each context, the iterator will first call the prepare
1716 1716 function on each context in the window in forward order.'''
1717 1717
1718 1718 follow = opts.get('follow') or opts.get('follow_first')
1719 1719 revs = _logrevs(repo, opts)
1720 1720 if not revs:
1721 1721 return []
1722 1722 wanted = set()
1723 1723 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1724 1724 fncache = {}
1725 1725 change = repo.changectx
1726 1726
1727 1727 # First step is to fill wanted, the set of revisions that we want to yield.
1728 1728 # When it does not induce extra cost, we also fill fncache for revisions in
1729 1729 # wanted: a cache of filenames that were changed (ctx.files()) and that
1730 1730 # match the file filtering conditions.
1731 1731
1732 1732 if match.always():
1733 1733 # No files, no patterns. Display all revs.
1734 1734 wanted = revs
1735 1735
1736 1736 if not slowpath and match.files():
1737 1737 # We only have to read through the filelog to find wanted revisions
1738 1738
1739 1739 try:
1740 1740 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1741 1741 except FileWalkError:
1742 1742 slowpath = True
1743 1743
1744 1744 # We decided to fall back to the slowpath because at least one
1745 1745 # of the paths was not a file. Check to see if at least one of them
1746 1746 # existed in history, otherwise simply return
1747 1747 for path in match.files():
1748 1748 if path == '.' or path in repo.store:
1749 1749 break
1750 1750 else:
1751 1751 return []
1752 1752
1753 1753 if slowpath:
1754 1754 # We have to read the changelog to match filenames against
1755 1755 # changed files
1756 1756
1757 1757 if follow:
1758 1758 raise util.Abort(_('can only follow copies/renames for explicit '
1759 1759 'filenames'))
1760 1760
1761 1761 # The slow path checks files modified in every changeset.
1762 1762 # This is really slow on large repos, so compute the set lazily.
1763 1763 class lazywantedset(object):
1764 1764 def __init__(self):
1765 1765 self.set = set()
1766 1766 self.revs = set(revs)
1767 1767
1768 1768 # No need to worry about locality here because it will be accessed
1769 1769 # in the same order as the increasing window below.
1770 1770 def __contains__(self, value):
1771 1771 if value in self.set:
1772 1772 return True
1773 1773 elif not value in self.revs:
1774 1774 return False
1775 1775 else:
1776 1776 self.revs.discard(value)
1777 1777 ctx = change(value)
1778 1778 matches = filter(match, ctx.files())
1779 1779 if matches:
1780 1780 fncache[value] = matches
1781 1781 self.set.add(value)
1782 1782 return True
1783 1783 return False
1784 1784
1785 1785 def discard(self, value):
1786 1786 self.revs.discard(value)
1787 1787 self.set.discard(value)
1788 1788
1789 1789 wanted = lazywantedset()
1790 1790
1791 1791 # it might be worthwhile to do this in the iterator if the rev range
1792 1792 # is descending and the prune args are all within that range
1793 1793 for rev in opts.get('prune', ()):
1794 1794 rev = repo[rev].rev()
1795 1795 ff = _followfilter(repo)
1796 1796 stop = min(revs[0], revs[-1])
1797 1797 for x in xrange(rev, stop - 1, -1):
1798 1798 if ff.match(x):
1799 1799 wanted = wanted - [x]
1800 1800
1801 1801 # Now that wanted is correctly initialized, we can iterate over the
1802 1802 # revision range, yielding only revisions in wanted.
1803 1803 def iterate():
1804 1804 if follow and not match.files():
1805 1805 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1806 1806 def want(rev):
1807 1807 return ff.match(rev) and rev in wanted
1808 1808 else:
1809 1809 def want(rev):
1810 1810 return rev in wanted
1811 1811
1812 1812 it = iter(revs)
1813 1813 stopiteration = False
1814 1814 for windowsize in increasingwindows():
1815 1815 nrevs = []
1816 1816 for i in xrange(windowsize):
1817 1817 try:
1818 1818 rev = it.next()
1819 1819 if want(rev):
1820 1820 nrevs.append(rev)
1821 1821 except (StopIteration):
1822 1822 stopiteration = True
1823 1823 break
1824 1824 for rev in sorted(nrevs):
1825 1825 fns = fncache.get(rev)
1826 1826 ctx = change(rev)
1827 1827 if not fns:
1828 1828 def fns_generator():
1829 1829 for f in ctx.files():
1830 1830 if match(f):
1831 1831 yield f
1832 1832 fns = fns_generator()
1833 1833 prepare(ctx, fns)
1834 1834 for rev in nrevs:
1835 1835 yield change(rev)
1836 1836
1837 1837 if stopiteration:
1838 1838 break
1839 1839
1840 1840 return iterate()
1841 1841
1842 1842 def _makefollowlogfilematcher(repo, files, followfirst):
1843 1843 # When displaying a revision with --patch --follow FILE, we have
1844 1844 # to know which file of the revision must be diffed. With
1845 1845 # --follow, we want the names of the ancestors of FILE in the
1846 1846 # revision, stored in "fcache". "fcache" is populated by
1847 1847 # reproducing the graph traversal already done by --follow revset
1848 1848 # and relating linkrevs to file names (which is not "correct" but
1849 1849 # good enough).
1850 1850 fcache = {}
1851 1851 fcacheready = [False]
1852 1852 pctx = repo['.']
1853 1853
1854 1854 def populate():
1855 1855 for fn in files:
1856 1856 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1857 1857 for c in i:
1858 1858 fcache.setdefault(c.linkrev(), set()).add(c.path())
1859 1859
1860 1860 def filematcher(rev):
1861 1861 if not fcacheready[0]:
1862 1862 # Lazy initialization
1863 1863 fcacheready[0] = True
1864 1864 populate()
1865 1865 return scmutil.matchfiles(repo, fcache.get(rev, []))
1866 1866
1867 1867 return filematcher
1868 1868
1869 1869 def _makenofollowlogfilematcher(repo, pats, opts):
1870 1870 '''hook for extensions to override the filematcher for non-follow cases'''
1871 1871 return None
1872 1872
1873 1873 def _makelogrevset(repo, pats, opts, revs):
1874 1874 """Return (expr, filematcher) where expr is a revset string built
1875 1875 from log options and file patterns or None. If --stat or --patch
1876 1876 are not passed filematcher is None. Otherwise it is a callable
1877 1877 taking a revision number and returning a match objects filtering
1878 1878 the files to be detailed when displaying the revision.
1879 1879 """
1880 1880 opt2revset = {
1881 1881 'no_merges': ('not merge()', None),
1882 1882 'only_merges': ('merge()', None),
1883 1883 '_ancestors': ('ancestors(%(val)s)', None),
1884 1884 '_fancestors': ('_firstancestors(%(val)s)', None),
1885 1885 '_descendants': ('descendants(%(val)s)', None),
1886 1886 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1887 1887 '_matchfiles': ('_matchfiles(%(val)s)', None),
1888 1888 'date': ('date(%(val)r)', None),
1889 1889 'branch': ('branch(%(val)r)', ' or '),
1890 1890 '_patslog': ('filelog(%(val)r)', ' or '),
1891 1891 '_patsfollow': ('follow(%(val)r)', ' or '),
1892 1892 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1893 1893 'keyword': ('keyword(%(val)r)', ' or '),
1894 1894 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1895 1895 'user': ('user(%(val)r)', ' or '),
1896 1896 }
1897 1897
1898 1898 opts = dict(opts)
1899 1899 # follow or not follow?
1900 1900 follow = opts.get('follow') or opts.get('follow_first')
1901 1901 if opts.get('follow_first'):
1902 1902 followfirst = 1
1903 1903 else:
1904 1904 followfirst = 0
1905 1905 # --follow with FILE behaviour depends on revs...
1906 1906 it = iter(revs)
1907 1907 startrev = it.next()
1908 1908 try:
1909 1909 followdescendants = startrev < it.next()
1910 1910 except (StopIteration):
1911 1911 followdescendants = False
1912 1912
1913 1913 # branch and only_branch are really aliases and must be handled at
1914 1914 # the same time
1915 1915 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1916 1916 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1917 1917 # pats/include/exclude are passed to match.match() directly in
1918 1918 # _matchfiles() revset but walkchangerevs() builds its matcher with
1919 1919 # scmutil.match(). The difference is input pats are globbed on
1920 1920 # platforms without shell expansion (windows).
1921 pctx = repo[None]
1922 match, pats = scmutil.matchandpats(pctx, pats, opts)
1921 wctx = repo[None]
1922 match, pats = scmutil.matchandpats(wctx, pats, opts)
1923 1923 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1924 1924 if not slowpath:
1925 1925 for f in match.files():
1926 if follow and f not in pctx:
1926 if follow and f not in wctx:
1927 1927 # If the file exists, it may be a directory, so let it
1928 1928 # take the slow path.
1929 1929 if os.path.exists(repo.wjoin(f)):
1930 1930 slowpath = True
1931 1931 continue
1932 1932 else:
1933 1933 raise util.Abort(_('cannot follow file not in parent '
1934 1934 'revision: "%s"') % f)
1935 1935 filelog = repo.file(f)
1936 1936 if not filelog:
1937 1937 # A zero count may be a directory or deleted file, so
1938 1938 # try to find matching entries on the slow path.
1939 1939 if follow:
1940 1940 raise util.Abort(
1941 1941 _('cannot follow nonexistent file: "%s"') % f)
1942 1942 slowpath = True
1943 1943
1944 1944 # We decided to fall back to the slowpath because at least one
1945 1945 # of the paths was not a file. Check to see if at least one of them
1946 1946 # existed in history - in that case, we'll continue down the
1947 1947 # slowpath; otherwise, we can turn off the slowpath
1948 1948 if slowpath:
1949 1949 for path in match.files():
1950 1950 if path == '.' or path in repo.store:
1951 1951 break
1952 1952 else:
1953 1953 slowpath = False
1954 1954
1955 1955 fpats = ('_patsfollow', '_patsfollowfirst')
1956 1956 fnopats = (('_ancestors', '_fancestors'),
1957 1957 ('_descendants', '_fdescendants'))
1958 1958 if slowpath:
1959 1959 # See walkchangerevs() slow path.
1960 1960 #
1961 1961 # pats/include/exclude cannot be represented as separate
1962 1962 # revset expressions as their filtering logic applies at file
1963 1963 # level. For instance "-I a -X a" matches a revision touching
1964 1964 # "a" and "b" while "file(a) and not file(b)" does
1965 1965 # not. Besides, filesets are evaluated against the working
1966 1966 # directory.
1967 1967 matchargs = ['r:', 'd:relpath']
1968 1968 for p in pats:
1969 1969 matchargs.append('p:' + p)
1970 1970 for p in opts.get('include', []):
1971 1971 matchargs.append('i:' + p)
1972 1972 for p in opts.get('exclude', []):
1973 1973 matchargs.append('x:' + p)
1974 1974 matchargs = ','.join(('%r' % p) for p in matchargs)
1975 1975 opts['_matchfiles'] = matchargs
1976 1976 if follow:
1977 1977 opts[fnopats[0][followfirst]] = '.'
1978 1978 else:
1979 1979 if follow:
1980 1980 if pats:
1981 1981 # follow() revset interprets its file argument as a
1982 1982 # manifest entry, so use match.files(), not pats.
1983 1983 opts[fpats[followfirst]] = list(match.files())
1984 1984 else:
1985 1985 op = fnopats[followdescendants][followfirst]
1986 1986 opts[op] = 'rev(%d)' % startrev
1987 1987 else:
1988 1988 opts['_patslog'] = list(pats)
1989 1989
1990 1990 filematcher = None
1991 1991 if opts.get('patch') or opts.get('stat'):
1992 1992 # When following files, track renames via a special matcher.
1993 1993 # If we're forced to take the slowpath it means we're following
1994 1994 # at least one pattern/directory, so don't bother with rename tracking.
1995 1995 if follow and not match.always() and not slowpath:
1996 1996 # _makefollowlogfilematcher expects its files argument to be
1997 1997 # relative to the repo root, so use match.files(), not pats.
1998 1998 filematcher = _makefollowlogfilematcher(repo, match.files(),
1999 1999 followfirst)
2000 2000 else:
2001 2001 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2002 2002 if filematcher is None:
2003 2003 filematcher = lambda rev: match
2004 2004
2005 2005 expr = []
2006 2006 for op, val in sorted(opts.iteritems()):
2007 2007 if not val:
2008 2008 continue
2009 2009 if op not in opt2revset:
2010 2010 continue
2011 2011 revop, andor = opt2revset[op]
2012 2012 if '%(val)' not in revop:
2013 2013 expr.append(revop)
2014 2014 else:
2015 2015 if not isinstance(val, list):
2016 2016 e = revop % {'val': val}
2017 2017 else:
2018 2018 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2019 2019 expr.append(e)
2020 2020
2021 2021 if expr:
2022 2022 expr = '(' + ' and '.join(expr) + ')'
2023 2023 else:
2024 2024 expr = None
2025 2025 return expr, filematcher
2026 2026
2027 2027 def _logrevs(repo, opts):
2028 2028 # Default --rev value depends on --follow but --follow behaviour
2029 2029 # depends on revisions resolved from --rev...
2030 2030 follow = opts.get('follow') or opts.get('follow_first')
2031 2031 if opts.get('rev'):
2032 2032 revs = scmutil.revrange(repo, opts['rev'])
2033 2033 elif follow and repo.dirstate.p1() == nullid:
2034 2034 revs = revset.baseset()
2035 2035 elif follow:
2036 2036 revs = repo.revs('reverse(:.)')
2037 2037 else:
2038 2038 revs = revset.spanset(repo)
2039 2039 revs.reverse()
2040 2040 return revs
2041 2041
2042 2042 def getgraphlogrevs(repo, pats, opts):
2043 2043 """Return (revs, expr, filematcher) where revs is an iterable of
2044 2044 revision numbers, expr is a revset string built from log options
2045 2045 and file patterns or None, and used to filter 'revs'. If --stat or
2046 2046 --patch are not passed filematcher is None. Otherwise it is a
2047 2047 callable taking a revision number and returning a match objects
2048 2048 filtering the files to be detailed when displaying the revision.
2049 2049 """
2050 2050 limit = loglimit(opts)
2051 2051 revs = _logrevs(repo, opts)
2052 2052 if not revs:
2053 2053 return revset.baseset(), None, None
2054 2054 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2055 2055 if opts.get('rev'):
2056 2056 # User-specified revs might be unsorted, but don't sort before
2057 2057 # _makelogrevset because it might depend on the order of revs
2058 2058 revs.sort(reverse=True)
2059 2059 if expr:
2060 2060 # Revset matchers often operate faster on revisions in changelog
2061 2061 # order, because most filters deal with the changelog.
2062 2062 revs.reverse()
2063 2063 matcher = revset.match(repo.ui, expr)
2064 2064 # Revset matches can reorder revisions. "A or B" typically returns
2065 2065 # returns the revision matching A then the revision matching B. Sort
2066 2066 # again to fix that.
2067 2067 revs = matcher(repo, revs)
2068 2068 revs.sort(reverse=True)
2069 2069 if limit is not None:
2070 2070 limitedrevs = []
2071 2071 for idx, rev in enumerate(revs):
2072 2072 if idx >= limit:
2073 2073 break
2074 2074 limitedrevs.append(rev)
2075 2075 revs = revset.baseset(limitedrevs)
2076 2076
2077 2077 return revs, expr, filematcher
2078 2078
2079 2079 def getlogrevs(repo, pats, opts):
2080 2080 """Return (revs, expr, filematcher) where revs is an iterable of
2081 2081 revision numbers, expr is a revset string built from log options
2082 2082 and file patterns or None, and used to filter 'revs'. If --stat or
2083 2083 --patch are not passed filematcher is None. Otherwise it is a
2084 2084 callable taking a revision number and returning a match objects
2085 2085 filtering the files to be detailed when displaying the revision.
2086 2086 """
2087 2087 limit = loglimit(opts)
2088 2088 revs = _logrevs(repo, opts)
2089 2089 if not revs:
2090 2090 return revset.baseset([]), None, None
2091 2091 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2092 2092 if expr:
2093 2093 # Revset matchers often operate faster on revisions in changelog
2094 2094 # order, because most filters deal with the changelog.
2095 2095 if not opts.get('rev'):
2096 2096 revs.reverse()
2097 2097 matcher = revset.match(repo.ui, expr)
2098 2098 # Revset matches can reorder revisions. "A or B" typically returns
2099 2099 # returns the revision matching A then the revision matching B. Sort
2100 2100 # again to fix that.
2101 2101 revs = matcher(repo, revs)
2102 2102 if not opts.get('rev'):
2103 2103 revs.sort(reverse=True)
2104 2104 if limit is not None:
2105 2105 count = 0
2106 2106 limitedrevs = []
2107 2107 it = iter(revs)
2108 2108 while count < limit:
2109 2109 try:
2110 2110 limitedrevs.append(it.next())
2111 2111 except (StopIteration):
2112 2112 break
2113 2113 count += 1
2114 2114 revs = revset.baseset(limitedrevs)
2115 2115
2116 2116 return revs, expr, filematcher
2117 2117
2118 2118 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2119 2119 filematcher=None):
2120 2120 seen, state = [], graphmod.asciistate()
2121 2121 for rev, type, ctx, parents in dag:
2122 2122 char = 'o'
2123 2123 if ctx.node() in showparents:
2124 2124 char = '@'
2125 2125 elif ctx.obsolete():
2126 2126 char = 'x'
2127 2127 elif ctx.closesbranch():
2128 2128 char = '_'
2129 2129 copies = None
2130 2130 if getrenamed and ctx.rev():
2131 2131 copies = []
2132 2132 for fn in ctx.files():
2133 2133 rename = getrenamed(fn, ctx.rev())
2134 2134 if rename:
2135 2135 copies.append((fn, rename[0]))
2136 2136 revmatchfn = None
2137 2137 if filematcher is not None:
2138 2138 revmatchfn = filematcher(ctx.rev())
2139 2139 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2140 2140 lines = displayer.hunk.pop(rev).split('\n')
2141 2141 if not lines[-1]:
2142 2142 del lines[-1]
2143 2143 displayer.flush(rev)
2144 2144 edges = edgefn(type, char, lines, seen, rev, parents)
2145 2145 for type, char, lines, coldata in edges:
2146 2146 graphmod.ascii(ui, state, type, char, lines, coldata)
2147 2147 displayer.close()
2148 2148
2149 2149 def graphlog(ui, repo, *pats, **opts):
2150 2150 # Parameters are identical to log command ones
2151 2151 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2152 2152 revdag = graphmod.dagwalker(repo, revs)
2153 2153
2154 2154 getrenamed = None
2155 2155 if opts.get('copies'):
2156 2156 endrev = None
2157 2157 if opts.get('rev'):
2158 2158 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2159 2159 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2160 2160 displayer = show_changeset(ui, repo, opts, buffered=True)
2161 2161 showparents = [ctx.node() for ctx in repo[None].parents()]
2162 2162 displaygraph(ui, revdag, displayer, showparents,
2163 2163 graphmod.asciiedges, getrenamed, filematcher)
2164 2164
2165 2165 def checkunsupportedgraphflags(pats, opts):
2166 2166 for op in ["newest_first"]:
2167 2167 if op in opts and opts[op]:
2168 2168 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2169 2169 % op.replace("_", "-"))
2170 2170
2171 2171 def graphrevs(repo, nodes, opts):
2172 2172 limit = loglimit(opts)
2173 2173 nodes.reverse()
2174 2174 if limit is not None:
2175 2175 nodes = nodes[:limit]
2176 2176 return graphmod.nodes(repo, nodes)
2177 2177
2178 2178 def add(ui, repo, match, prefix, explicitonly, **opts):
2179 2179 join = lambda f: os.path.join(prefix, f)
2180 2180 bad = []
2181 2181 oldbad = match.bad
2182 2182 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2183 2183 names = []
2184 2184 wctx = repo[None]
2185 2185 cca = None
2186 2186 abort, warn = scmutil.checkportabilityalert(ui)
2187 2187 if abort or warn:
2188 2188 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2189 2189 for f in wctx.walk(match):
2190 2190 exact = match.exact(f)
2191 2191 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2192 2192 if cca:
2193 2193 cca(f)
2194 2194 names.append(f)
2195 2195 if ui.verbose or not exact:
2196 2196 ui.status(_('adding %s\n') % match.rel(f))
2197 2197
2198 2198 for subpath in sorted(wctx.substate):
2199 2199 sub = wctx.sub(subpath)
2200 2200 try:
2201 2201 submatch = matchmod.narrowmatcher(subpath, match)
2202 2202 if opts.get('subrepos'):
2203 2203 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2204 2204 else:
2205 2205 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2206 2206 except error.LookupError:
2207 2207 ui.status(_("skipping missing subrepository: %s\n")
2208 2208 % join(subpath))
2209 2209
2210 2210 if not opts.get('dry_run'):
2211 2211 rejected = wctx.add(names, prefix)
2212 2212 bad.extend(f for f in rejected if f in match.files())
2213 2213 return bad
2214 2214
2215 2215 def forget(ui, repo, match, prefix, explicitonly):
2216 2216 join = lambda f: os.path.join(prefix, f)
2217 2217 bad = []
2218 2218 oldbad = match.bad
2219 2219 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2220 2220 wctx = repo[None]
2221 2221 forgot = []
2222 2222 s = repo.status(match=match, clean=True)
2223 2223 forget = sorted(s[0] + s[1] + s[3] + s[6])
2224 2224 if explicitonly:
2225 2225 forget = [f for f in forget if match.exact(f)]
2226 2226
2227 2227 for subpath in sorted(wctx.substate):
2228 2228 sub = wctx.sub(subpath)
2229 2229 try:
2230 2230 submatch = matchmod.narrowmatcher(subpath, match)
2231 2231 subbad, subforgot = sub.forget(submatch, prefix)
2232 2232 bad.extend([subpath + '/' + f for f in subbad])
2233 2233 forgot.extend([subpath + '/' + f for f in subforgot])
2234 2234 except error.LookupError:
2235 2235 ui.status(_("skipping missing subrepository: %s\n")
2236 2236 % join(subpath))
2237 2237
2238 2238 if not explicitonly:
2239 2239 for f in match.files():
2240 2240 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2241 2241 if f not in forgot:
2242 2242 if repo.wvfs.exists(f):
2243 2243 ui.warn(_('not removing %s: '
2244 2244 'file is already untracked\n')
2245 2245 % match.rel(f))
2246 2246 bad.append(f)
2247 2247
2248 2248 for f in forget:
2249 2249 if ui.verbose or not match.exact(f):
2250 2250 ui.status(_('removing %s\n') % match.rel(f))
2251 2251
2252 2252 rejected = wctx.forget(forget, prefix)
2253 2253 bad.extend(f for f in rejected if f in match.files())
2254 2254 forgot.extend(f for f in forget if f not in rejected)
2255 2255 return bad, forgot
2256 2256
2257 2257 def files(ui, ctx, m, fm, fmt, subrepos):
2258 2258 rev = ctx.rev()
2259 2259 ret = 1
2260 2260 ds = ctx.repo().dirstate
2261 2261
2262 2262 for f in ctx.matches(m):
2263 2263 if rev is None and ds[f] == 'r':
2264 2264 continue
2265 2265 fm.startitem()
2266 2266 if ui.verbose:
2267 2267 fc = ctx[f]
2268 2268 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2269 2269 fm.data(abspath=f)
2270 2270 fm.write('path', fmt, m.rel(f))
2271 2271 ret = 0
2272 2272
2273 2273 if subrepos:
2274 2274 for subpath in sorted(ctx.substate):
2275 2275 sub = ctx.sub(subpath)
2276 2276 try:
2277 2277 submatch = matchmod.narrowmatcher(subpath, m)
2278 2278 if sub.printfiles(ui, submatch, fm, fmt) == 0:
2279 2279 ret = 0
2280 2280 except error.LookupError:
2281 2281 ui.status(_("skipping missing subrepository: %s\n")
2282 2282 % m.abs(subpath))
2283 2283
2284 2284 return ret
2285 2285
2286 2286 def remove(ui, repo, m, prefix, after, force, subrepos):
2287 2287 join = lambda f: os.path.join(prefix, f)
2288 2288 ret = 0
2289 2289 s = repo.status(match=m, clean=True)
2290 2290 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2291 2291
2292 2292 wctx = repo[None]
2293 2293
2294 2294 for subpath in sorted(wctx.substate):
2295 2295 def matchessubrepo(matcher, subpath):
2296 2296 if matcher.exact(subpath):
2297 2297 return True
2298 2298 for f in matcher.files():
2299 2299 if f.startswith(subpath):
2300 2300 return True
2301 2301 return False
2302 2302
2303 2303 if subrepos or matchessubrepo(m, subpath):
2304 2304 sub = wctx.sub(subpath)
2305 2305 try:
2306 2306 submatch = matchmod.narrowmatcher(subpath, m)
2307 2307 if sub.removefiles(submatch, prefix, after, force, subrepos):
2308 2308 ret = 1
2309 2309 except error.LookupError:
2310 2310 ui.status(_("skipping missing subrepository: %s\n")
2311 2311 % join(subpath))
2312 2312
2313 2313 # warn about failure to delete explicit files/dirs
2314 2314 deleteddirs = scmutil.dirs(deleted)
2315 2315 for f in m.files():
2316 2316 def insubrepo():
2317 2317 for subpath in wctx.substate:
2318 2318 if f.startswith(subpath):
2319 2319 return True
2320 2320 return False
2321 2321
2322 2322 isdir = f in deleteddirs or f in wctx.dirs()
2323 2323 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2324 2324 continue
2325 2325
2326 2326 if repo.wvfs.exists(f):
2327 2327 if repo.wvfs.isdir(f):
2328 2328 ui.warn(_('not removing %s: no tracked files\n')
2329 2329 % m.rel(f))
2330 2330 else:
2331 2331 ui.warn(_('not removing %s: file is untracked\n')
2332 2332 % m.rel(f))
2333 2333 # missing files will generate a warning elsewhere
2334 2334 ret = 1
2335 2335
2336 2336 if force:
2337 2337 list = modified + deleted + clean + added
2338 2338 elif after:
2339 2339 list = deleted
2340 2340 for f in modified + added + clean:
2341 2341 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2342 2342 ret = 1
2343 2343 else:
2344 2344 list = deleted + clean
2345 2345 for f in modified:
2346 2346 ui.warn(_('not removing %s: file is modified (use -f'
2347 2347 ' to force removal)\n') % m.rel(f))
2348 2348 ret = 1
2349 2349 for f in added:
2350 2350 ui.warn(_('not removing %s: file has been marked for add'
2351 2351 ' (use forget to undo)\n') % m.rel(f))
2352 2352 ret = 1
2353 2353
2354 2354 for f in sorted(list):
2355 2355 if ui.verbose or not m.exact(f):
2356 2356 ui.status(_('removing %s\n') % m.rel(f))
2357 2357
2358 2358 wlock = repo.wlock()
2359 2359 try:
2360 2360 if not after:
2361 2361 for f in list:
2362 2362 if f in added:
2363 2363 continue # we never unlink added files on remove
2364 2364 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2365 2365 repo[None].forget(list)
2366 2366 finally:
2367 2367 wlock.release()
2368 2368
2369 2369 return ret
2370 2370
2371 2371 def cat(ui, repo, ctx, matcher, prefix, **opts):
2372 2372 err = 1
2373 2373
2374 2374 def write(path):
2375 2375 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2376 2376 pathname=os.path.join(prefix, path))
2377 2377 data = ctx[path].data()
2378 2378 if opts.get('decode'):
2379 2379 data = repo.wwritedata(path, data)
2380 2380 fp.write(data)
2381 2381 fp.close()
2382 2382
2383 2383 # Automation often uses hg cat on single files, so special case it
2384 2384 # for performance to avoid the cost of parsing the manifest.
2385 2385 if len(matcher.files()) == 1 and not matcher.anypats():
2386 2386 file = matcher.files()[0]
2387 2387 mf = repo.manifest
2388 2388 mfnode = ctx._changeset[0]
2389 2389 if mf.find(mfnode, file)[0]:
2390 2390 write(file)
2391 2391 return 0
2392 2392
2393 2393 # Don't warn about "missing" files that are really in subrepos
2394 2394 bad = matcher.bad
2395 2395
2396 2396 def badfn(path, msg):
2397 2397 for subpath in ctx.substate:
2398 2398 if path.startswith(subpath):
2399 2399 return
2400 2400 bad(path, msg)
2401 2401
2402 2402 matcher.bad = badfn
2403 2403
2404 2404 for abs in ctx.walk(matcher):
2405 2405 write(abs)
2406 2406 err = 0
2407 2407
2408 2408 matcher.bad = bad
2409 2409
2410 2410 for subpath in sorted(ctx.substate):
2411 2411 sub = ctx.sub(subpath)
2412 2412 try:
2413 2413 submatch = matchmod.narrowmatcher(subpath, matcher)
2414 2414
2415 2415 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2416 2416 **opts):
2417 2417 err = 0
2418 2418 except error.RepoLookupError:
2419 2419 ui.status(_("skipping missing subrepository: %s\n")
2420 2420 % os.path.join(prefix, subpath))
2421 2421
2422 2422 return err
2423 2423
2424 2424 def commit(ui, repo, commitfunc, pats, opts):
2425 2425 '''commit the specified files or all outstanding changes'''
2426 2426 date = opts.get('date')
2427 2427 if date:
2428 2428 opts['date'] = util.parsedate(date)
2429 2429 message = logmessage(ui, opts)
2430 2430 matcher = scmutil.match(repo[None], pats, opts)
2431 2431
2432 2432 # extract addremove carefully -- this function can be called from a command
2433 2433 # that doesn't support addremove
2434 2434 if opts.get('addremove'):
2435 2435 if scmutil.addremove(repo, matcher, "", opts) != 0:
2436 2436 raise util.Abort(
2437 2437 _("failed to mark all new/missing files as added/removed"))
2438 2438
2439 2439 return commitfunc(ui, repo, message, matcher, opts)
2440 2440
2441 2441 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2442 2442 # amend will reuse the existing user if not specified, but the obsolete
2443 2443 # marker creation requires that the current user's name is specified.
2444 2444 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2445 2445 ui.username() # raise exception if username not set
2446 2446
2447 2447 ui.note(_('amending changeset %s\n') % old)
2448 2448 base = old.p1()
2449 2449
2450 2450 wlock = lock = newid = None
2451 2451 try:
2452 2452 wlock = repo.wlock()
2453 2453 lock = repo.lock()
2454 2454 tr = repo.transaction('amend')
2455 2455 try:
2456 2456 # See if we got a message from -m or -l, if not, open the editor
2457 2457 # with the message of the changeset to amend
2458 2458 message = logmessage(ui, opts)
2459 2459 # ensure logfile does not conflict with later enforcement of the
2460 2460 # message. potential logfile content has been processed by
2461 2461 # `logmessage` anyway.
2462 2462 opts.pop('logfile')
2463 2463 # First, do a regular commit to record all changes in the working
2464 2464 # directory (if there are any)
2465 2465 ui.callhooks = False
2466 2466 currentbookmark = repo._bookmarkcurrent
2467 2467 try:
2468 2468 repo._bookmarkcurrent = None
2469 2469 opts['message'] = 'temporary amend commit for %s' % old
2470 2470 node = commit(ui, repo, commitfunc, pats, opts)
2471 2471 finally:
2472 2472 repo._bookmarkcurrent = currentbookmark
2473 2473 ui.callhooks = True
2474 2474 ctx = repo[node]
2475 2475
2476 2476 # Participating changesets:
2477 2477 #
2478 2478 # node/ctx o - new (intermediate) commit that contains changes
2479 2479 # | from working dir to go into amending commit
2480 2480 # | (or a workingctx if there were no changes)
2481 2481 # |
2482 2482 # old o - changeset to amend
2483 2483 # |
2484 2484 # base o - parent of amending changeset
2485 2485
2486 2486 # Update extra dict from amended commit (e.g. to preserve graft
2487 2487 # source)
2488 2488 extra.update(old.extra())
2489 2489
2490 2490 # Also update it from the intermediate commit or from the wctx
2491 2491 extra.update(ctx.extra())
2492 2492
2493 2493 if len(old.parents()) > 1:
2494 2494 # ctx.files() isn't reliable for merges, so fall back to the
2495 2495 # slower repo.status() method
2496 2496 files = set([fn for st in repo.status(base, old)[:3]
2497 2497 for fn in st])
2498 2498 else:
2499 2499 files = set(old.files())
2500 2500
2501 2501 # Second, we use either the commit we just did, or if there were no
2502 2502 # changes the parent of the working directory as the version of the
2503 2503 # files in the final amend commit
2504 2504 if node:
2505 2505 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2506 2506
2507 2507 user = ctx.user()
2508 2508 date = ctx.date()
2509 2509 # Recompute copies (avoid recording a -> b -> a)
2510 2510 copied = copies.pathcopies(base, ctx)
2511 2511 if old.p2:
2512 2512 copied.update(copies.pathcopies(old.p2(), ctx))
2513 2513
2514 2514 # Prune files which were reverted by the updates: if old
2515 2515 # introduced file X and our intermediate commit, node,
2516 2516 # renamed that file, then those two files are the same and
2517 2517 # we can discard X from our list of files. Likewise if X
2518 2518 # was deleted, it's no longer relevant
2519 2519 files.update(ctx.files())
2520 2520
2521 2521 def samefile(f):
2522 2522 if f in ctx.manifest():
2523 2523 a = ctx.filectx(f)
2524 2524 if f in base.manifest():
2525 2525 b = base.filectx(f)
2526 2526 return (not a.cmp(b)
2527 2527 and a.flags() == b.flags())
2528 2528 else:
2529 2529 return False
2530 2530 else:
2531 2531 return f not in base.manifest()
2532 2532 files = [f for f in files if not samefile(f)]
2533 2533
2534 2534 def filectxfn(repo, ctx_, path):
2535 2535 try:
2536 2536 fctx = ctx[path]
2537 2537 flags = fctx.flags()
2538 2538 mctx = context.memfilectx(repo,
2539 2539 fctx.path(), fctx.data(),
2540 2540 islink='l' in flags,
2541 2541 isexec='x' in flags,
2542 2542 copied=copied.get(path))
2543 2543 return mctx
2544 2544 except KeyError:
2545 2545 return None
2546 2546 else:
2547 2547 ui.note(_('copying changeset %s to %s\n') % (old, base))
2548 2548
2549 2549 # Use version of files as in the old cset
2550 2550 def filectxfn(repo, ctx_, path):
2551 2551 try:
2552 2552 return old.filectx(path)
2553 2553 except KeyError:
2554 2554 return None
2555 2555
2556 2556 user = opts.get('user') or old.user()
2557 2557 date = opts.get('date') or old.date()
2558 2558 editform = mergeeditform(old, 'commit.amend')
2559 2559 editor = getcommiteditor(editform=editform, **opts)
2560 2560 if not message:
2561 2561 editor = getcommiteditor(edit=True, editform=editform)
2562 2562 message = old.description()
2563 2563
2564 2564 pureextra = extra.copy()
2565 2565 extra['amend_source'] = old.hex()
2566 2566
2567 2567 new = context.memctx(repo,
2568 2568 parents=[base.node(), old.p2().node()],
2569 2569 text=message,
2570 2570 files=files,
2571 2571 filectxfn=filectxfn,
2572 2572 user=user,
2573 2573 date=date,
2574 2574 extra=extra,
2575 2575 editor=editor)
2576 2576
2577 2577 newdesc = changelog.stripdesc(new.description())
2578 2578 if ((not node)
2579 2579 and newdesc == old.description()
2580 2580 and user == old.user()
2581 2581 and date == old.date()
2582 2582 and pureextra == old.extra()):
2583 2583 # nothing changed. continuing here would create a new node
2584 2584 # anyway because of the amend_source noise.
2585 2585 #
2586 2586 # This not what we expect from amend.
2587 2587 return old.node()
2588 2588
2589 2589 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2590 2590 try:
2591 2591 if opts.get('secret'):
2592 2592 commitphase = 'secret'
2593 2593 else:
2594 2594 commitphase = old.phase()
2595 2595 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2596 2596 newid = repo.commitctx(new)
2597 2597 finally:
2598 2598 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2599 2599 if newid != old.node():
2600 2600 # Reroute the working copy parent to the new changeset
2601 2601 repo.setparents(newid, nullid)
2602 2602
2603 2603 # Move bookmarks from old parent to amend commit
2604 2604 bms = repo.nodebookmarks(old.node())
2605 2605 if bms:
2606 2606 marks = repo._bookmarks
2607 2607 for bm in bms:
2608 2608 marks[bm] = newid
2609 2609 marks.write()
2610 2610 #commit the whole amend process
2611 2611 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2612 2612 if createmarkers and newid != old.node():
2613 2613 # mark the new changeset as successor of the rewritten one
2614 2614 new = repo[newid]
2615 2615 obs = [(old, (new,))]
2616 2616 if node:
2617 2617 obs.append((ctx, ()))
2618 2618
2619 2619 obsolete.createmarkers(repo, obs)
2620 2620 tr.close()
2621 2621 finally:
2622 2622 tr.release()
2623 2623 if not createmarkers and newid != old.node():
2624 2624 # Strip the intermediate commit (if there was one) and the amended
2625 2625 # commit
2626 2626 if node:
2627 2627 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2628 2628 ui.note(_('stripping amended changeset %s\n') % old)
2629 2629 repair.strip(ui, repo, old.node(), topic='amend-backup')
2630 2630 finally:
2631 2631 if newid is None:
2632 2632 repo.dirstate.invalidate()
2633 2633 lockmod.release(lock, wlock)
2634 2634 return newid
2635 2635
2636 2636 def commiteditor(repo, ctx, subs, editform=''):
2637 2637 if ctx.description():
2638 2638 return ctx.description()
2639 2639 return commitforceeditor(repo, ctx, subs, editform=editform)
2640 2640
2641 2641 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2642 2642 editform=''):
2643 2643 if not extramsg:
2644 2644 extramsg = _("Leave message empty to abort commit.")
2645 2645
2646 2646 forms = [e for e in editform.split('.') if e]
2647 2647 forms.insert(0, 'changeset')
2648 2648 while forms:
2649 2649 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2650 2650 if tmpl:
2651 2651 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2652 2652 break
2653 2653 forms.pop()
2654 2654 else:
2655 2655 committext = buildcommittext(repo, ctx, subs, extramsg)
2656 2656
2657 2657 # run editor in the repository root
2658 2658 olddir = os.getcwd()
2659 2659 os.chdir(repo.root)
2660 2660 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2661 2661 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2662 2662 os.chdir(olddir)
2663 2663
2664 2664 if finishdesc:
2665 2665 text = finishdesc(text)
2666 2666 if not text.strip():
2667 2667 raise util.Abort(_("empty commit message"))
2668 2668
2669 2669 return text
2670 2670
2671 2671 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2672 2672 ui = repo.ui
2673 2673 tmpl, mapfile = gettemplate(ui, tmpl, None)
2674 2674
2675 2675 try:
2676 2676 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2677 2677 except SyntaxError, inst:
2678 2678 raise util.Abort(inst.args[0])
2679 2679
2680 2680 for k, v in repo.ui.configitems('committemplate'):
2681 2681 if k != 'changeset':
2682 2682 t.t.cache[k] = v
2683 2683
2684 2684 if not extramsg:
2685 2685 extramsg = '' # ensure that extramsg is string
2686 2686
2687 2687 ui.pushbuffer()
2688 2688 t.show(ctx, extramsg=extramsg)
2689 2689 return ui.popbuffer()
2690 2690
2691 2691 def buildcommittext(repo, ctx, subs, extramsg):
2692 2692 edittext = []
2693 2693 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2694 2694 if ctx.description():
2695 2695 edittext.append(ctx.description())
2696 2696 edittext.append("")
2697 2697 edittext.append("") # Empty line between message and comments.
2698 2698 edittext.append(_("HG: Enter commit message."
2699 2699 " Lines beginning with 'HG:' are removed."))
2700 2700 edittext.append("HG: %s" % extramsg)
2701 2701 edittext.append("HG: --")
2702 2702 edittext.append(_("HG: user: %s") % ctx.user())
2703 2703 if ctx.p2():
2704 2704 edittext.append(_("HG: branch merge"))
2705 2705 if ctx.branch():
2706 2706 edittext.append(_("HG: branch '%s'") % ctx.branch())
2707 2707 if bookmarks.iscurrent(repo):
2708 2708 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
2709 2709 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2710 2710 edittext.extend([_("HG: added %s") % f for f in added])
2711 2711 edittext.extend([_("HG: changed %s") % f for f in modified])
2712 2712 edittext.extend([_("HG: removed %s") % f for f in removed])
2713 2713 if not added and not modified and not removed:
2714 2714 edittext.append(_("HG: no files changed"))
2715 2715 edittext.append("")
2716 2716
2717 2717 return "\n".join(edittext)
2718 2718
2719 2719 def commitstatus(repo, node, branch, bheads=None, opts={}):
2720 2720 ctx = repo[node]
2721 2721 parents = ctx.parents()
2722 2722
2723 2723 if (not opts.get('amend') and bheads and node not in bheads and not
2724 2724 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2725 2725 repo.ui.status(_('created new head\n'))
2726 2726 # The message is not printed for initial roots. For the other
2727 2727 # changesets, it is printed in the following situations:
2728 2728 #
2729 2729 # Par column: for the 2 parents with ...
2730 2730 # N: null or no parent
2731 2731 # B: parent is on another named branch
2732 2732 # C: parent is a regular non head changeset
2733 2733 # H: parent was a branch head of the current branch
2734 2734 # Msg column: whether we print "created new head" message
2735 2735 # In the following, it is assumed that there already exists some
2736 2736 # initial branch heads of the current branch, otherwise nothing is
2737 2737 # printed anyway.
2738 2738 #
2739 2739 # Par Msg Comment
2740 2740 # N N y additional topo root
2741 2741 #
2742 2742 # B N y additional branch root
2743 2743 # C N y additional topo head
2744 2744 # H N n usual case
2745 2745 #
2746 2746 # B B y weird additional branch root
2747 2747 # C B y branch merge
2748 2748 # H B n merge with named branch
2749 2749 #
2750 2750 # C C y additional head from merge
2751 2751 # C H n merge with a head
2752 2752 #
2753 2753 # H H n head merge: head count decreases
2754 2754
2755 2755 if not opts.get('close_branch'):
2756 2756 for r in parents:
2757 2757 if r.closesbranch() and r.branch() == branch:
2758 2758 repo.ui.status(_('reopening closed branch head %d\n') % r)
2759 2759
2760 2760 if repo.ui.debugflag:
2761 2761 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2762 2762 elif repo.ui.verbose:
2763 2763 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2764 2764
2765 2765 def revert(ui, repo, ctx, parents, *pats, **opts):
2766 2766 parent, p2 = parents
2767 2767 node = ctx.node()
2768 2768
2769 2769 mf = ctx.manifest()
2770 2770 if node == p2:
2771 2771 parent = p2
2772 2772 if node == parent:
2773 2773 pmf = mf
2774 2774 else:
2775 2775 pmf = None
2776 2776
2777 2777 # need all matching names in dirstate and manifest of target rev,
2778 2778 # so have to walk both. do not print errors if files exist in one
2779 2779 # but not other. in both cases, filesets should be evaluated against
2780 2780 # workingctx to get consistent result (issue4497). this means 'set:**'
2781 2781 # cannot be used to select missing files from target rev.
2782 2782
2783 2783 # `names` is a mapping for all elements in working copy and target revision
2784 2784 # The mapping is in the form:
2785 2785 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2786 2786 names = {}
2787 2787
2788 2788 wlock = repo.wlock()
2789 2789 try:
2790 2790 ## filling of the `names` mapping
2791 2791 # walk dirstate to fill `names`
2792 2792
2793 2793 interactive = opts.get('interactive', False)
2794 2794 wctx = repo[None]
2795 2795 m = scmutil.match(wctx, pats, opts)
2796 2796
2797 2797 # we'll need this later
2798 2798 targetsubs = sorted(s for s in wctx.substate if m(s))
2799 2799
2800 2800 if not m.always():
2801 2801 m.bad = lambda x, y: False
2802 2802 for abs in repo.walk(m):
2803 2803 names[abs] = m.rel(abs), m.exact(abs)
2804 2804
2805 2805 # walk target manifest to fill `names`
2806 2806
2807 2807 def badfn(path, msg):
2808 2808 if path in names:
2809 2809 return
2810 2810 if path in ctx.substate:
2811 2811 return
2812 2812 path_ = path + '/'
2813 2813 for f in names:
2814 2814 if f.startswith(path_):
2815 2815 return
2816 2816 ui.warn("%s: %s\n" % (m.rel(path), msg))
2817 2817
2818 2818 m.bad = badfn
2819 2819 for abs in ctx.walk(m):
2820 2820 if abs not in names:
2821 2821 names[abs] = m.rel(abs), m.exact(abs)
2822 2822
2823 2823 # Find status of all file in `names`.
2824 2824 m = scmutil.matchfiles(repo, names)
2825 2825
2826 2826 changes = repo.status(node1=node, match=m,
2827 2827 unknown=True, ignored=True, clean=True)
2828 2828 else:
2829 2829 changes = repo.status(node1=node, match=m)
2830 2830 for kind in changes:
2831 2831 for abs in kind:
2832 2832 names[abs] = m.rel(abs), m.exact(abs)
2833 2833
2834 2834 m = scmutil.matchfiles(repo, names)
2835 2835
2836 2836 modified = set(changes.modified)
2837 2837 added = set(changes.added)
2838 2838 removed = set(changes.removed)
2839 2839 _deleted = set(changes.deleted)
2840 2840 unknown = set(changes.unknown)
2841 2841 unknown.update(changes.ignored)
2842 2842 clean = set(changes.clean)
2843 2843 modadded = set()
2844 2844
2845 2845 # split between files known in target manifest and the others
2846 2846 smf = set(mf)
2847 2847
2848 2848 # determine the exact nature of the deleted changesets
2849 2849 deladded = _deleted - smf
2850 2850 deleted = _deleted - deladded
2851 2851
2852 2852 # We need to account for the state of the file in the dirstate,
2853 2853 # even when we revert against something else than parent. This will
2854 2854 # slightly alter the behavior of revert (doing back up or not, delete
2855 2855 # or just forget etc).
2856 2856 if parent == node:
2857 2857 dsmodified = modified
2858 2858 dsadded = added
2859 2859 dsremoved = removed
2860 2860 # store all local modifications, useful later for rename detection
2861 2861 localchanges = dsmodified | dsadded
2862 2862 modified, added, removed = set(), set(), set()
2863 2863 else:
2864 2864 changes = repo.status(node1=parent, match=m)
2865 2865 dsmodified = set(changes.modified)
2866 2866 dsadded = set(changes.added)
2867 2867 dsremoved = set(changes.removed)
2868 2868 # store all local modifications, useful later for rename detection
2869 2869 localchanges = dsmodified | dsadded
2870 2870
2871 2871 # only take into account for removes between wc and target
2872 2872 clean |= dsremoved - removed
2873 2873 dsremoved &= removed
2874 2874 # distinct between dirstate remove and other
2875 2875 removed -= dsremoved
2876 2876
2877 2877 modadded = added & dsmodified
2878 2878 added -= modadded
2879 2879
2880 2880 # tell newly modified apart.
2881 2881 dsmodified &= modified
2882 2882 dsmodified |= modified & dsadded # dirstate added may needs backup
2883 2883 modified -= dsmodified
2884 2884
2885 2885 # We need to wait for some post-processing to update this set
2886 2886 # before making the distinction. The dirstate will be used for
2887 2887 # that purpose.
2888 2888 dsadded = added
2889 2889
2890 2890 # in case of merge, files that are actually added can be reported as
2891 2891 # modified, we need to post process the result
2892 2892 if p2 != nullid:
2893 2893 if pmf is None:
2894 2894 # only need parent manifest in the merge case,
2895 2895 # so do not read by default
2896 2896 pmf = repo[parent].manifest()
2897 2897 mergeadd = dsmodified - set(pmf)
2898 2898 dsadded |= mergeadd
2899 2899 dsmodified -= mergeadd
2900 2900
2901 2901 # if f is a rename, update `names` to also revert the source
2902 2902 cwd = repo.getcwd()
2903 2903 for f in localchanges:
2904 2904 src = repo.dirstate.copied(f)
2905 2905 # XXX should we check for rename down to target node?
2906 2906 if src and src not in names and repo.dirstate[src] == 'r':
2907 2907 dsremoved.add(src)
2908 2908 names[src] = (repo.pathto(src, cwd), True)
2909 2909
2910 2910 # distinguish between file to forget and the other
2911 2911 added = set()
2912 2912 for abs in dsadded:
2913 2913 if repo.dirstate[abs] != 'a':
2914 2914 added.add(abs)
2915 2915 dsadded -= added
2916 2916
2917 2917 for abs in deladded:
2918 2918 if repo.dirstate[abs] == 'a':
2919 2919 dsadded.add(abs)
2920 2920 deladded -= dsadded
2921 2921
2922 2922 # For files marked as removed, we check if an unknown file is present at
2923 2923 # the same path. If a such file exists it may need to be backed up.
2924 2924 # Making the distinction at this stage helps have simpler backup
2925 2925 # logic.
2926 2926 removunk = set()
2927 2927 for abs in removed:
2928 2928 target = repo.wjoin(abs)
2929 2929 if os.path.lexists(target):
2930 2930 removunk.add(abs)
2931 2931 removed -= removunk
2932 2932
2933 2933 dsremovunk = set()
2934 2934 for abs in dsremoved:
2935 2935 target = repo.wjoin(abs)
2936 2936 if os.path.lexists(target):
2937 2937 dsremovunk.add(abs)
2938 2938 dsremoved -= dsremovunk
2939 2939
2940 2940 # action to be actually performed by revert
2941 2941 # (<list of file>, message>) tuple
2942 2942 actions = {'revert': ([], _('reverting %s\n')),
2943 2943 'add': ([], _('adding %s\n')),
2944 2944 'remove': ([], _('removing %s\n')),
2945 2945 'drop': ([], _('removing %s\n')),
2946 2946 'forget': ([], _('forgetting %s\n')),
2947 2947 'undelete': ([], _('undeleting %s\n')),
2948 2948 'noop': (None, _('no changes needed to %s\n')),
2949 2949 'unknown': (None, _('file not managed: %s\n')),
2950 2950 }
2951 2951
2952 2952 # "constant" that convey the backup strategy.
2953 2953 # All set to `discard` if `no-backup` is set do avoid checking
2954 2954 # no_backup lower in the code.
2955 2955 # These values are ordered for comparison purposes
2956 2956 backup = 2 # unconditionally do backup
2957 2957 check = 1 # check if the existing file differs from target
2958 2958 discard = 0 # never do backup
2959 2959 if opts.get('no_backup'):
2960 2960 backup = check = discard
2961 2961
2962 2962 backupanddel = actions['remove']
2963 2963 if not opts.get('no_backup'):
2964 2964 backupanddel = actions['drop']
2965 2965
2966 2966 disptable = (
2967 2967 # dispatch table:
2968 2968 # file state
2969 2969 # action
2970 2970 # make backup
2971 2971
2972 2972 ## Sets that results that will change file on disk
2973 2973 # Modified compared to target, no local change
2974 2974 (modified, actions['revert'], discard),
2975 2975 # Modified compared to target, but local file is deleted
2976 2976 (deleted, actions['revert'], discard),
2977 2977 # Modified compared to target, local change
2978 2978 (dsmodified, actions['revert'], backup),
2979 2979 # Added since target
2980 2980 (added, actions['remove'], discard),
2981 2981 # Added in working directory
2982 2982 (dsadded, actions['forget'], discard),
2983 2983 # Added since target, have local modification
2984 2984 (modadded, backupanddel, backup),
2985 2985 # Added since target but file is missing in working directory
2986 2986 (deladded, actions['drop'], discard),
2987 2987 # Removed since target, before working copy parent
2988 2988 (removed, actions['add'], discard),
2989 2989 # Same as `removed` but an unknown file exists at the same path
2990 2990 (removunk, actions['add'], check),
2991 2991 # Removed since targe, marked as such in working copy parent
2992 2992 (dsremoved, actions['undelete'], discard),
2993 2993 # Same as `dsremoved` but an unknown file exists at the same path
2994 2994 (dsremovunk, actions['undelete'], check),
2995 2995 ## the following sets does not result in any file changes
2996 2996 # File with no modification
2997 2997 (clean, actions['noop'], discard),
2998 2998 # Existing file, not tracked anywhere
2999 2999 (unknown, actions['unknown'], discard),
3000 3000 )
3001 3001
3002 3002 for abs, (rel, exact) in sorted(names.items()):
3003 3003 # target file to be touch on disk (relative to cwd)
3004 3004 target = repo.wjoin(abs)
3005 3005 # search the entry in the dispatch table.
3006 3006 # if the file is in any of these sets, it was touched in the working
3007 3007 # directory parent and we are sure it needs to be reverted.
3008 3008 for table, (xlist, msg), dobackup in disptable:
3009 3009 if abs not in table:
3010 3010 continue
3011 3011 if xlist is not None:
3012 3012 xlist.append(abs)
3013 3013 if dobackup and (backup <= dobackup
3014 3014 or wctx[abs].cmp(ctx[abs])):
3015 3015 bakname = "%s.orig" % rel
3016 3016 ui.note(_('saving current version of %s as %s\n') %
3017 3017 (rel, bakname))
3018 3018 if not opts.get('dry_run'):
3019 3019 if interactive:
3020 3020 util.copyfile(target, bakname)
3021 3021 else:
3022 3022 util.rename(target, bakname)
3023 3023 if ui.verbose or not exact:
3024 3024 if not isinstance(msg, basestring):
3025 3025 msg = msg(abs)
3026 3026 ui.status(msg % rel)
3027 3027 elif exact:
3028 3028 ui.warn(msg % rel)
3029 3029 break
3030 3030
3031 3031 if not opts.get('dry_run'):
3032 3032 needdata = ('revert', 'add', 'undelete')
3033 3033 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3034 3034 _performrevert(repo, parents, ctx, actions, interactive)
3035 3035
3036 3036 if targetsubs:
3037 3037 # Revert the subrepos on the revert list
3038 3038 for sub in targetsubs:
3039 3039 try:
3040 3040 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3041 3041 except KeyError:
3042 3042 raise util.Abort("subrepository '%s' does not exist in %s!"
3043 3043 % (sub, short(ctx.node())))
3044 3044 finally:
3045 3045 wlock.release()
3046 3046
3047 3047 def _revertprefetch(repo, ctx, *files):
3048 3048 """Let extension changing the storage layer prefetch content"""
3049 3049 pass
3050 3050
3051 3051 def _performrevert(repo, parents, ctx, actions, interactive=False):
3052 3052 """function that actually perform all the actions computed for revert
3053 3053
3054 3054 This is an independent function to let extension to plug in and react to
3055 3055 the imminent revert.
3056 3056
3057 3057 Make sure you have the working directory locked when calling this function.
3058 3058 """
3059 3059 parent, p2 = parents
3060 3060 node = ctx.node()
3061 3061 def checkout(f):
3062 3062 fc = ctx[f]
3063 3063 repo.wwrite(f, fc.data(), fc.flags())
3064 3064
3065 3065 audit_path = pathutil.pathauditor(repo.root)
3066 3066 for f in actions['forget'][0]:
3067 3067 repo.dirstate.drop(f)
3068 3068 for f in actions['remove'][0]:
3069 3069 audit_path(f)
3070 3070 util.unlinkpath(repo.wjoin(f))
3071 3071 repo.dirstate.remove(f)
3072 3072 for f in actions['drop'][0]:
3073 3073 audit_path(f)
3074 3074 repo.dirstate.remove(f)
3075 3075
3076 3076 normal = None
3077 3077 if node == parent:
3078 3078 # We're reverting to our parent. If possible, we'd like status
3079 3079 # to report the file as clean. We have to use normallookup for
3080 3080 # merges to avoid losing information about merged/dirty files.
3081 3081 if p2 != nullid:
3082 3082 normal = repo.dirstate.normallookup
3083 3083 else:
3084 3084 normal = repo.dirstate.normal
3085 3085
3086 3086 if interactive:
3087 3087 # Prompt the user for changes to revert
3088 3088 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3089 3089 m = scmutil.match(ctx, torevert, {})
3090 3090 diff = patch.diff(repo, None, ctx.node(), m)
3091 3091 originalchunks = patch.parsepatch(diff)
3092 3092 try:
3093 3093 chunks = recordfilter(repo.ui, originalchunks)
3094 3094 except patch.PatchError, err:
3095 3095 raise util.Abort(_('error parsing patch: %s') % err)
3096 3096
3097 3097 # Apply changes
3098 3098 fp = cStringIO.StringIO()
3099 3099 for c in chunks:
3100 3100 c.write(fp)
3101 3101 dopatch = fp.tell()
3102 3102 fp.seek(0)
3103 3103 if dopatch:
3104 3104 try:
3105 3105 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3106 3106 except patch.PatchError, err:
3107 3107 raise util.Abort(str(err))
3108 3108 del fp
3109 3109
3110 3110 for f in actions['revert'][0]:
3111 3111 if normal:
3112 3112 normal(f)
3113 3113
3114 3114 else:
3115 3115 for f in actions['revert'][0]:
3116 3116 checkout(f)
3117 3117 if normal:
3118 3118 normal(f)
3119 3119
3120 3120 for f in actions['add'][0]:
3121 3121 checkout(f)
3122 3122 repo.dirstate.add(f)
3123 3123
3124 3124 normal = repo.dirstate.normallookup
3125 3125 if node == parent and p2 == nullid:
3126 3126 normal = repo.dirstate.normal
3127 3127 for f in actions['undelete'][0]:
3128 3128 checkout(f)
3129 3129 normal(f)
3130 3130
3131 3131 copied = copies.pathcopies(repo[parent], ctx)
3132 3132
3133 3133 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3134 3134 if f in copied:
3135 3135 repo.dirstate.copy(copied[f], f)
3136 3136
3137 3137 def command(table):
3138 3138 """Returns a function object to be used as a decorator for making commands.
3139 3139
3140 3140 This function receives a command table as its argument. The table should
3141 3141 be a dict.
3142 3142
3143 3143 The returned function can be used as a decorator for adding commands
3144 3144 to that command table. This function accepts multiple arguments to define
3145 3145 a command.
3146 3146
3147 3147 The first argument is the command name.
3148 3148
3149 3149 The options argument is an iterable of tuples defining command arguments.
3150 3150 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3151 3151
3152 3152 The synopsis argument defines a short, one line summary of how to use the
3153 3153 command. This shows up in the help output.
3154 3154
3155 3155 The norepo argument defines whether the command does not require a
3156 3156 local repository. Most commands operate against a repository, thus the
3157 3157 default is False.
3158 3158
3159 3159 The optionalrepo argument defines whether the command optionally requires
3160 3160 a local repository.
3161 3161
3162 3162 The inferrepo argument defines whether to try to find a repository from the
3163 3163 command line arguments. If True, arguments will be examined for potential
3164 3164 repository locations. See ``findrepo()``. If a repository is found, it
3165 3165 will be used.
3166 3166 """
3167 3167 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3168 3168 inferrepo=False):
3169 3169 def decorator(func):
3170 3170 if synopsis:
3171 3171 table[name] = func, list(options), synopsis
3172 3172 else:
3173 3173 table[name] = func, list(options)
3174 3174
3175 3175 if norepo:
3176 3176 # Avoid import cycle.
3177 3177 import commands
3178 3178 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3179 3179
3180 3180 if optionalrepo:
3181 3181 import commands
3182 3182 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3183 3183
3184 3184 if inferrepo:
3185 3185 import commands
3186 3186 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3187 3187
3188 3188 return func
3189 3189 return decorator
3190 3190
3191 3191 return cmd
3192 3192
3193 3193 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3194 3194 # commands.outgoing. "missing" is "missing" of the result of
3195 3195 # "findcommonoutgoing()"
3196 3196 outgoinghooks = util.hooks()
3197 3197
3198 3198 # a list of (ui, repo) functions called by commands.summary
3199 3199 summaryhooks = util.hooks()
3200 3200
3201 3201 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3202 3202 #
3203 3203 # functions should return tuple of booleans below, if 'changes' is None:
3204 3204 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3205 3205 #
3206 3206 # otherwise, 'changes' is a tuple of tuples below:
3207 3207 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3208 3208 # - (desturl, destbranch, destpeer, outgoing)
3209 3209 summaryremotehooks = util.hooks()
3210 3210
3211 3211 # A list of state files kept by multistep operations like graft.
3212 3212 # Since graft cannot be aborted, it is considered 'clearable' by update.
3213 3213 # note: bisect is intentionally excluded
3214 3214 # (state file, clearable, allowcommit, error, hint)
3215 3215 unfinishedstates = [
3216 3216 ('graftstate', True, False, _('graft in progress'),
3217 3217 _("use 'hg graft --continue' or 'hg update' to abort")),
3218 3218 ('updatestate', True, False, _('last update was interrupted'),
3219 3219 _("use 'hg update' to get a consistent checkout"))
3220 3220 ]
3221 3221
3222 3222 def checkunfinished(repo, commit=False):
3223 3223 '''Look for an unfinished multistep operation, like graft, and abort
3224 3224 if found. It's probably good to check this right before
3225 3225 bailifchanged().
3226 3226 '''
3227 3227 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3228 3228 if commit and allowcommit:
3229 3229 continue
3230 3230 if repo.vfs.exists(f):
3231 3231 raise util.Abort(msg, hint=hint)
3232 3232
3233 3233 def clearunfinished(repo):
3234 3234 '''Check for unfinished operations (as above), and clear the ones
3235 3235 that are clearable.
3236 3236 '''
3237 3237 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3238 3238 if not clearable and repo.vfs.exists(f):
3239 3239 raise util.Abort(msg, hint=hint)
3240 3240 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3241 3241 if clearable and repo.vfs.exists(f):
3242 3242 util.unlink(repo.join(f))
General Comments 0
You need to be logged in to leave comments. Login now