##// END OF EJS Templates
commit: use `dirstate.change_files` to scope the associated `addremove`...
marmoute -
r50924:28dfb2df default
parent child Browse files
Show More
@@ -1,1890 +1,1903 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import contextlib
12 12 import copy
13 13 import os
14 14
15 15 from mercurial.i18n import _
16 16
17 17 from mercurial.pycompat import open
18 18
19 19 from mercurial.hgweb import webcommands
20 20
21 21 from mercurial import (
22 22 archival,
23 23 cmdutil,
24 24 copies as copiesmod,
25 25 dirstate,
26 26 error,
27 27 exchange,
28 28 extensions,
29 29 exthelper,
30 30 filemerge,
31 31 hg,
32 32 logcmdutil,
33 33 match as matchmod,
34 34 merge,
35 35 mergestate as mergestatemod,
36 36 pathutil,
37 37 pycompat,
38 38 scmutil,
39 39 smartset,
40 40 subrepo,
41 41 url as urlmod,
42 42 util,
43 43 )
44 44
45 45 from mercurial.upgrade_utils import (
46 46 actions as upgrade_actions,
47 47 )
48 48
49 49 from . import (
50 50 lfcommands,
51 51 lfutil,
52 52 storefactory,
53 53 )
54 54
55 55 ACTION_ADD = mergestatemod.ACTION_ADD
56 56 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
57 57 ACTION_GET = mergestatemod.ACTION_GET
58 58 ACTION_KEEP = mergestatemod.ACTION_KEEP
59 59 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
60 60
61 61 eh = exthelper.exthelper()
62 62
63 63 lfstatus = lfutil.lfstatus
64 64
65 65 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
66 66
67 67 # -- Utility functions: commonly/repeatedly needed functionality ---------------
68 68
69 69
70 70 def composelargefilematcher(match, manifest):
71 71 """create a matcher that matches only the largefiles in the original
72 72 matcher"""
73 73 m = copy.copy(match)
74 74 lfile = lambda f: lfutil.standin(f) in manifest
75 75 m._files = [lf for lf in m._files if lfile(lf)]
76 76 m._fileset = set(m._files)
77 77 m.always = lambda: False
78 78 origmatchfn = m.matchfn
79 79 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
80 80 return m
81 81
82 82
83 83 def composenormalfilematcher(match, manifest, exclude=None):
84 84 excluded = set()
85 85 if exclude is not None:
86 86 excluded.update(exclude)
87 87
88 88 m = copy.copy(match)
89 89 notlfile = lambda f: not (
90 90 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
91 91 )
92 92 m._files = [lf for lf in m._files if notlfile(lf)]
93 93 m._fileset = set(m._files)
94 94 m.always = lambda: False
95 95 origmatchfn = m.matchfn
96 96 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
97 97 return m
98 98
99 99
100 100 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
101 101 large = opts.get('large')
102 102 lfsize = lfutil.getminsize(
103 103 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
104 104 )
105 105
106 106 lfmatcher = None
107 107 if lfutil.islfilesrepo(repo):
108 108 lfpats = ui.configlist(lfutil.longname, b'patterns')
109 109 if lfpats:
110 110 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
111 111
112 112 lfnames = []
113 113 m = matcher
114 114
115 115 wctx = repo[None]
116 116 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
117 117 exact = m.exact(f)
118 118 lfile = lfutil.standin(f) in wctx
119 119 nfile = f in wctx
120 120 exists = lfile or nfile
121 121
122 122 # Don't warn the user when they attempt to add a normal tracked file.
123 123 # The normal add code will do that for us.
124 124 if exact and exists:
125 125 if lfile:
126 126 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
127 127 continue
128 128
129 129 if (exact or not exists) and not lfutil.isstandin(f):
130 130 # In case the file was removed previously, but not committed
131 131 # (issue3507)
132 132 if not repo.wvfs.exists(f):
133 133 continue
134 134
135 135 abovemin = (
136 136 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
137 137 )
138 138 if large or abovemin or (lfmatcher and lfmatcher(f)):
139 139 lfnames.append(f)
140 140 if ui.verbose or not exact:
141 141 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
142 142
143 143 bad = []
144 144
145 145 # Need to lock, otherwise there could be a race condition between
146 146 # when standins are created and added to the repo.
147 147 with repo.wlock():
148 148 if not opts.get('dry_run'):
149 149 standins = []
150 150 lfdirstate = lfutil.openlfdirstate(ui, repo)
151 151 for f in lfnames:
152 152 standinname = lfutil.standin(f)
153 153 lfutil.writestandin(
154 154 repo,
155 155 standinname,
156 156 hash=b'',
157 157 executable=lfutil.getexecutable(repo.wjoin(f)),
158 158 )
159 159 standins.append(standinname)
160 160 lfdirstate.set_tracked(f)
161 161 lfdirstate.write(repo.currenttransaction())
162 162 bad += [
163 163 lfutil.splitstandin(f)
164 164 for f in repo[None].add(standins)
165 165 if f in m.files()
166 166 ]
167 167
168 168 added = [f for f in lfnames if f not in bad]
169 169 return added, bad
170 170
171 171
172 172 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
173 173 after = opts.get('after')
174 174 m = composelargefilematcher(matcher, repo[None].manifest())
175 175 with lfstatus(repo):
176 176 s = repo.status(match=m, clean=not isaddremove)
177 177 manifest = repo[None].manifest()
178 178 modified, added, deleted, clean = [
179 179 [f for f in list if lfutil.standin(f) in manifest]
180 180 for list in (s.modified, s.added, s.deleted, s.clean)
181 181 ]
182 182
183 183 def warn(files, msg):
184 184 for f in files:
185 185 ui.warn(msg % uipathfn(f))
186 186 return int(len(files) > 0)
187 187
188 188 if after:
189 189 remove = deleted
190 190 result = warn(
191 191 modified + added + clean, _(b'not removing %s: file still exists\n')
192 192 )
193 193 else:
194 194 remove = deleted + clean
195 195 result = warn(
196 196 modified,
197 197 _(
198 198 b'not removing %s: file is modified (use -f'
199 199 b' to force removal)\n'
200 200 ),
201 201 )
202 202 result = (
203 203 warn(
204 204 added,
205 205 _(
206 206 b'not removing %s: file has been marked for add'
207 207 b' (use forget to undo)\n'
208 208 ),
209 209 )
210 210 or result
211 211 )
212 212
213 213 # Need to lock because standin files are deleted then removed from the
214 214 # repository and we could race in-between.
215 215 with repo.wlock():
216 216 lfdirstate = lfutil.openlfdirstate(ui, repo)
217 217 for f in sorted(remove):
218 218 if ui.verbose or not m.exact(f):
219 219 ui.status(_(b'removing %s\n') % uipathfn(f))
220 220
221 221 if not dryrun:
222 222 if not after:
223 223 repo.wvfs.unlinkpath(f, ignoremissing=True)
224 224
225 225 if dryrun:
226 226 return result
227 227
228 228 remove = [lfutil.standin(f) for f in remove]
229 229 # If this is being called by addremove, let the original addremove
230 230 # function handle this.
231 231 if not isaddremove:
232 232 for f in remove:
233 233 repo.wvfs.unlinkpath(f, ignoremissing=True)
234 234 repo[None].forget(remove)
235 235
236 236 for f in remove:
237 237 lfdirstate.set_untracked(lfutil.splitstandin(f))
238 238
239 239 lfdirstate.write(repo.currenttransaction())
240 240
241 241 return result
242 242
243 243
244 244 # For overriding mercurial.hgweb.webcommands so that largefiles will
245 245 # appear at their right place in the manifests.
246 246 @eh.wrapfunction(webcommands, b'decodepath')
247 247 def decodepath(orig, path):
248 248 return lfutil.splitstandin(path) or path
249 249
250 250
251 251 # -- Wrappers: modify existing commands --------------------------------
252 252
253 253
254 254 @eh.wrapcommand(
255 255 b'add',
256 256 opts=[
257 257 (b'', b'large', None, _(b'add as largefile')),
258 258 (b'', b'normal', None, _(b'add as normal file')),
259 259 (
260 260 b'',
261 261 b'lfsize',
262 262 b'',
263 263 _(
264 264 b'add all files above this size (in megabytes) '
265 265 b'as largefiles (default: 10)'
266 266 ),
267 267 ),
268 268 ],
269 269 )
270 270 def overrideadd(orig, ui, repo, *pats, **opts):
271 271 if opts.get('normal') and opts.get('large'):
272 272 raise error.Abort(_(b'--normal cannot be used with --large'))
273 273 return orig(ui, repo, *pats, **opts)
274 274
275 275
276 276 @eh.wrapfunction(cmdutil, b'add')
277 277 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
278 278 # The --normal flag short circuits this override
279 279 if opts.get('normal'):
280 280 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
281 281
282 282 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
283 283 normalmatcher = composenormalfilematcher(
284 284 matcher, repo[None].manifest(), ladded
285 285 )
286 286 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
287 287
288 288 bad.extend(f for f in lbad)
289 289 return bad
290 290
291 291
292 292 @eh.wrapfunction(cmdutil, b'remove')
293 293 def cmdutilremove(
294 294 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
295 295 ):
296 296 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
297 297 result = orig(
298 298 ui,
299 299 repo,
300 300 normalmatcher,
301 301 prefix,
302 302 uipathfn,
303 303 after,
304 304 force,
305 305 subrepos,
306 306 dryrun,
307 307 )
308 308 return (
309 309 removelargefiles(
310 310 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
311 311 )
312 312 or result
313 313 )
314 314
315 315
316 316 @eh.wrapfunction(dirstate.dirstate, b'_changing')
317 317 @contextlib.contextmanager
318 318 def _changing(orig, self, repo, change_type):
319 319 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
320 320 try:
321 321 lfd = getattr(self, '_large_file_dirstate', False)
322 322 if sub_dirstate is None and not lfd:
323 323 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
324 324 self._sub_dirstate = sub_dirstate
325 325 if not lfd:
326 326 assert self._sub_dirstate is not None
327 327 with orig(self, repo, change_type):
328 328 if sub_dirstate is None:
329 329 yield
330 330 else:
331 331 with sub_dirstate._changing(repo, change_type):
332 332 yield
333 333 finally:
334 334 self._sub_dirstate = pre
335 335
336 336
337 337 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
338 338 def overridestatusfn(orig, repo, rev2, **opts):
339 339 with lfstatus(repo._repo):
340 340 return orig(repo, rev2, **opts)
341 341
342 342
343 343 @eh.wrapcommand(b'status')
344 344 def overridestatus(orig, ui, repo, *pats, **opts):
345 345 with lfstatus(repo):
346 346 return orig(ui, repo, *pats, **opts)
347 347
348 348
349 349 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
350 350 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
351 351 with lfstatus(repo._repo):
352 352 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
353 353
354 354
355 355 @eh.wrapcommand(b'log')
356 356 def overridelog(orig, ui, repo, *pats, **opts):
357 357 def overridematchandpats(
358 358 orig,
359 359 ctx,
360 360 pats=(),
361 361 opts=None,
362 362 globbed=False,
363 363 default=b'relpath',
364 364 badfn=None,
365 365 ):
366 366 """Matcher that merges root directory with .hglf, suitable for log.
367 367 It is still possible to match .hglf directly.
368 368 For any listed files run log on the standin too.
369 369 matchfn tries both the given filename and with .hglf stripped.
370 370 """
371 371 if opts is None:
372 372 opts = {}
373 373 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
374 374 m, p = copy.copy(matchandpats)
375 375
376 376 if m.always():
377 377 # We want to match everything anyway, so there's no benefit trying
378 378 # to add standins.
379 379 return matchandpats
380 380
381 381 pats = set(p)
382 382
383 383 def fixpats(pat, tostandin=lfutil.standin):
384 384 if pat.startswith(b'set:'):
385 385 return pat
386 386
387 387 kindpat = matchmod._patsplit(pat, None)
388 388
389 389 if kindpat[0] is not None:
390 390 return kindpat[0] + b':' + tostandin(kindpat[1])
391 391 return tostandin(kindpat[1])
392 392
393 393 cwd = repo.getcwd()
394 394 if cwd:
395 395 hglf = lfutil.shortname
396 396 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
397 397
398 398 def tostandin(f):
399 399 # The file may already be a standin, so truncate the back
400 400 # prefix and test before mangling it. This avoids turning
401 401 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
402 402 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
403 403 return f
404 404
405 405 # An absolute path is from outside the repo, so truncate the
406 406 # path to the root before building the standin. Otherwise cwd
407 407 # is somewhere in the repo, relative to root, and needs to be
408 408 # prepended before building the standin.
409 409 if os.path.isabs(cwd):
410 410 f = f[len(back) :]
411 411 else:
412 412 f = cwd + b'/' + f
413 413 return back + lfutil.standin(f)
414 414
415 415 else:
416 416
417 417 def tostandin(f):
418 418 if lfutil.isstandin(f):
419 419 return f
420 420 return lfutil.standin(f)
421 421
422 422 pats.update(fixpats(f, tostandin) for f in p)
423 423
424 424 for i in range(0, len(m._files)):
425 425 # Don't add '.hglf' to m.files, since that is already covered by '.'
426 426 if m._files[i] == b'.':
427 427 continue
428 428 standin = lfutil.standin(m._files[i])
429 429 # If the "standin" is a directory, append instead of replace to
430 430 # support naming a directory on the command line with only
431 431 # largefiles. The original directory is kept to support normal
432 432 # files.
433 433 if standin in ctx:
434 434 m._files[i] = standin
435 435 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
436 436 m._files.append(standin)
437 437
438 438 m._fileset = set(m._files)
439 439 m.always = lambda: False
440 440 origmatchfn = m.matchfn
441 441
442 442 def lfmatchfn(f):
443 443 lf = lfutil.splitstandin(f)
444 444 if lf is not None and origmatchfn(lf):
445 445 return True
446 446 r = origmatchfn(f)
447 447 return r
448 448
449 449 m.matchfn = lfmatchfn
450 450
451 451 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
452 452 return m, pats
453 453
454 454 # For hg log --patch, the match object is used in two different senses:
455 455 # (1) to determine what revisions should be printed out, and
456 456 # (2) to determine what files to print out diffs for.
457 457 # The magic matchandpats override should be used for case (1) but not for
458 458 # case (2).
459 459 oldmatchandpats = scmutil.matchandpats
460 460
461 461 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
462 462 wctx = repo[None]
463 463 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
464 464 return lambda ctx: match
465 465
466 466 wrappedmatchandpats = extensions.wrappedfunction(
467 467 scmutil, b'matchandpats', overridematchandpats
468 468 )
469 469 wrappedmakefilematcher = extensions.wrappedfunction(
470 470 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
471 471 )
472 472 with wrappedmatchandpats, wrappedmakefilematcher:
473 473 return orig(ui, repo, *pats, **opts)
474 474
475 475
476 476 @eh.wrapcommand(
477 477 b'verify',
478 478 opts=[
479 479 (
480 480 b'',
481 481 b'large',
482 482 None,
483 483 _(b'verify that all largefiles in current revision exists'),
484 484 ),
485 485 (
486 486 b'',
487 487 b'lfa',
488 488 None,
489 489 _(b'verify largefiles in all revisions, not just current'),
490 490 ),
491 491 (
492 492 b'',
493 493 b'lfc',
494 494 None,
495 495 _(b'verify local largefile contents, not just existence'),
496 496 ),
497 497 ],
498 498 )
499 499 def overrideverify(orig, ui, repo, *pats, **opts):
500 500 large = opts.pop('large', False)
501 501 all = opts.pop('lfa', False)
502 502 contents = opts.pop('lfc', False)
503 503
504 504 result = orig(ui, repo, *pats, **opts)
505 505 if large or all or contents:
506 506 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
507 507 return result
508 508
509 509
510 510 @eh.wrapcommand(
511 511 b'debugstate',
512 512 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
513 513 )
514 514 def overridedebugstate(orig, ui, repo, *pats, **opts):
515 515 large = opts.pop('large', False)
516 516 if large:
517 517
518 518 class fakerepo:
519 519 dirstate = lfutil.openlfdirstate(ui, repo)
520 520
521 521 orig(ui, fakerepo, *pats, **opts)
522 522 else:
523 523 orig(ui, repo, *pats, **opts)
524 524
525 525
526 526 # Before starting the manifest merge, merge.updates will call
527 527 # _checkunknownfile to check if there are any files in the merged-in
528 528 # changeset that collide with unknown files in the working copy.
529 529 #
530 530 # The largefiles are seen as unknown, so this prevents us from merging
531 531 # in a file 'foo' if we already have a largefile with the same name.
532 532 #
533 533 # The overridden function filters the unknown files by removing any
534 534 # largefiles. This makes the merge proceed and we can then handle this
535 535 # case further in the overridden calculateupdates function below.
536 536 @eh.wrapfunction(merge, b'_checkunknownfile')
537 537 def overridecheckunknownfile(
538 538 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
539 539 ):
540 540 if lfutil.standin(dirstate.normalize(f)) in wctx:
541 541 return False
542 542 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
543 543
544 544
545 545 # The manifest merge handles conflicts on the manifest level. We want
546 546 # to handle changes in largefile-ness of files at this level too.
547 547 #
548 548 # The strategy is to run the original calculateupdates and then process
549 549 # the action list it outputs. There are two cases we need to deal with:
550 550 #
551 551 # 1. Normal file in p1, largefile in p2. Here the largefile is
552 552 # detected via its standin file, which will enter the working copy
553 553 # with a "get" action. It is not "merge" since the standin is all
554 554 # Mercurial is concerned with at this level -- the link to the
555 555 # existing normal file is not relevant here.
556 556 #
557 557 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
558 558 # since the largefile will be present in the working copy and
559 559 # different from the normal file in p2. Mercurial therefore
560 560 # triggers a merge action.
561 561 #
562 562 # In both cases, we prompt the user and emit new actions to either
563 563 # remove the standin (if the normal file was kept) or to remove the
564 564 # normal file and get the standin (if the largefile was kept). The
565 565 # default prompt answer is to use the largefile version since it was
566 566 # presumably changed on purpose.
567 567 #
568 568 # Finally, the merge.applyupdates function will then take care of
569 569 # writing the files into the working copy and lfcommands.updatelfiles
570 570 # will update the largefiles.
571 571 @eh.wrapfunction(merge, b'calculateupdates')
572 572 def overridecalculateupdates(
573 573 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
574 574 ):
575 575 overwrite = force and not branchmerge
576 576 mresult = origfn(
577 577 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
578 578 )
579 579
580 580 if overwrite:
581 581 return mresult
582 582
583 583 # Convert to dictionary with filename as key and action as value.
584 584 lfiles = set()
585 585 for f in mresult.files():
586 586 splitstandin = lfutil.splitstandin(f)
587 587 if splitstandin is not None and splitstandin in p1:
588 588 lfiles.add(splitstandin)
589 589 elif lfutil.standin(f) in p1:
590 590 lfiles.add(f)
591 591
592 592 for lfile in sorted(lfiles):
593 593 standin = lfutil.standin(lfile)
594 594 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
595 595 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
596 596
597 597 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
598 598 if sm == ACTION_DELETED_CHANGED:
599 599 f1, f2, fa, move, anc = sargs
600 600 sargs = (p2[f2].flags(), False)
601 601 # Case 1: normal file in the working copy, largefile in
602 602 # the second parent
603 603 usermsg = (
604 604 _(
605 605 b'remote turned local normal file %s into a largefile\n'
606 606 b'use (l)argefile or keep (n)ormal file?'
607 607 b'$$ &Largefile $$ &Normal file'
608 608 )
609 609 % lfile
610 610 )
611 611 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
612 612 mresult.addfile(
613 613 lfile, ACTION_REMOVE, None, b'replaced by standin'
614 614 )
615 615 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
616 616 else: # keep local normal file
617 617 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
618 618 if branchmerge:
619 619 mresult.addfile(
620 620 standin,
621 621 ACTION_KEEP,
622 622 None,
623 623 b'replaced by non-standin',
624 624 )
625 625 else:
626 626 mresult.addfile(
627 627 standin,
628 628 ACTION_REMOVE,
629 629 None,
630 630 b'replaced by non-standin',
631 631 )
632 632 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
633 633 if lm == ACTION_DELETED_CHANGED:
634 634 f1, f2, fa, move, anc = largs
635 635 largs = (p2[f2].flags(), False)
636 636 # Case 2: largefile in the working copy, normal file in
637 637 # the second parent
638 638 usermsg = (
639 639 _(
640 640 b'remote turned local largefile %s into a normal file\n'
641 641 b'keep (l)argefile or use (n)ormal file?'
642 642 b'$$ &Largefile $$ &Normal file'
643 643 )
644 644 % lfile
645 645 )
646 646 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
647 647 if branchmerge:
648 648 # largefile can be restored from standin safely
649 649 mresult.addfile(
650 650 lfile,
651 651 ACTION_KEEP,
652 652 None,
653 653 b'replaced by standin',
654 654 )
655 655 mresult.addfile(
656 656 standin, ACTION_KEEP, None, b'replaces standin'
657 657 )
658 658 else:
659 659 # "lfile" should be marked as "removed" without
660 660 # removal of itself
661 661 mresult.addfile(
662 662 lfile,
663 663 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
664 664 None,
665 665 b'forget non-standin largefile',
666 666 )
667 667
668 668 # linear-merge should treat this largefile as 're-added'
669 669 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
670 670 else: # pick remote normal file
671 671 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
672 672 mresult.addfile(
673 673 standin,
674 674 ACTION_REMOVE,
675 675 None,
676 676 b'replaced by non-standin',
677 677 )
678 678
679 679 return mresult
680 680
681 681
682 682 @eh.wrapfunction(mergestatemod, b'recordupdates')
683 683 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
684 684 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
685 685 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
686 686 with lfdirstate.changing_parents(repo):
687 687 for lfile, args, msg in actions[
688 688 MERGE_ACTION_LARGEFILE_MARK_REMOVED
689 689 ]:
690 690 # this should be executed before 'orig', to execute 'remove'
691 691 # before all other actions
692 692 repo.dirstate.update_file(
693 693 lfile, p1_tracked=True, wc_tracked=False
694 694 )
695 695 # make sure lfile doesn't get synclfdirstate'd as normal
696 696 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
697 697 lfdirstate.write(repo.currenttransaction())
698 698
699 699 return orig(repo, actions, branchmerge, getfiledata)
700 700
701 701
702 702 # Override filemerge to prompt the user about how they wish to merge
703 703 # largefiles. This will handle identical edits without prompting the user.
704 704 @eh.wrapfunction(filemerge, b'filemerge')
705 705 def overridefilemerge(
706 706 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
707 707 ):
708 708 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
709 709 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
710 710
711 711 ahash = lfutil.readasstandin(fca).lower()
712 712 dhash = lfutil.readasstandin(fcd).lower()
713 713 ohash = lfutil.readasstandin(fco).lower()
714 714 if (
715 715 ohash != ahash
716 716 and ohash != dhash
717 717 and (
718 718 dhash == ahash
719 719 or repo.ui.promptchoice(
720 720 _(
721 721 b'largefile %s has a merge conflict\nancestor was %s\n'
722 722 b'you can keep (l)ocal %s or take (o)ther %s.\n'
723 723 b'what do you want to do?'
724 724 b'$$ &Local $$ &Other'
725 725 )
726 726 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
727 727 0,
728 728 )
729 729 == 1
730 730 )
731 731 ):
732 732 repo.wwrite(fcd.path(), fco.data(), fco.flags())
733 733 return 0, False
734 734
735 735
736 736 @eh.wrapfunction(copiesmod, b'pathcopies')
737 737 def copiespathcopies(orig, ctx1, ctx2, match=None):
738 738 copies = orig(ctx1, ctx2, match=match)
739 739 updated = {}
740 740
741 741 for k, v in copies.items():
742 742 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
743 743
744 744 return updated
745 745
746 746
747 747 # Copy first changes the matchers to match standins instead of
748 748 # largefiles. Then it overrides util.copyfile in that function it
749 749 # checks if the destination largefile already exists. It also keeps a
750 750 # list of copied files so that the largefiles can be copied and the
751 751 # dirstate updated.
752 752 @eh.wrapfunction(cmdutil, b'copy')
753 753 def overridecopy(orig, ui, repo, pats, opts, rename=False):
754 754 # doesn't remove largefile on rename
755 755 if len(pats) < 2:
756 756 # this isn't legal, let the original function deal with it
757 757 return orig(ui, repo, pats, opts, rename)
758 758
759 759 # This could copy both lfiles and normal files in one command,
760 760 # but we don't want to do that. First replace their matcher to
761 761 # only match normal files and run it, then replace it to just
762 762 # match largefiles and run it again.
763 763 nonormalfiles = False
764 764 nolfiles = False
765 765 manifest = repo[None].manifest()
766 766
767 767 def normalfilesmatchfn(
768 768 orig,
769 769 ctx,
770 770 pats=(),
771 771 opts=None,
772 772 globbed=False,
773 773 default=b'relpath',
774 774 badfn=None,
775 775 ):
776 776 if opts is None:
777 777 opts = {}
778 778 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
779 779 return composenormalfilematcher(match, manifest)
780 780
781 781 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
782 782 try:
783 783 result = orig(ui, repo, pats, opts, rename)
784 784 except error.Abort as e:
785 785 if e.message != _(b'no files to copy'):
786 786 raise e
787 787 else:
788 788 nonormalfiles = True
789 789 result = 0
790 790
791 791 # The first rename can cause our current working directory to be removed.
792 792 # In that case there is nothing left to copy/rename so just quit.
793 793 try:
794 794 repo.getcwd()
795 795 except OSError:
796 796 return result
797 797
798 798 def makestandin(relpath):
799 799 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
800 800 return repo.wvfs.join(lfutil.standin(path))
801 801
802 802 fullpats = scmutil.expandpats(pats)
803 803 dest = fullpats[-1]
804 804
805 805 if os.path.isdir(dest):
806 806 if not os.path.isdir(makestandin(dest)):
807 807 os.makedirs(makestandin(dest))
808 808
809 809 try:
810 810 # When we call orig below it creates the standins but we don't add
811 811 # them to the dir state until later so lock during that time.
812 812 wlock = repo.wlock()
813 813
814 814 manifest = repo[None].manifest()
815 815
816 816 def overridematch(
817 817 orig,
818 818 ctx,
819 819 pats=(),
820 820 opts=None,
821 821 globbed=False,
822 822 default=b'relpath',
823 823 badfn=None,
824 824 ):
825 825 if opts is None:
826 826 opts = {}
827 827 newpats = []
828 828 # The patterns were previously mangled to add the standin
829 829 # directory; we need to remove that now
830 830 for pat in pats:
831 831 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
832 832 newpats.append(pat.replace(lfutil.shortname, b''))
833 833 else:
834 834 newpats.append(pat)
835 835 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
836 836 m = copy.copy(match)
837 837 lfile = lambda f: lfutil.standin(f) in manifest
838 838 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
839 839 m._fileset = set(m._files)
840 840 origmatchfn = m.matchfn
841 841
842 842 def matchfn(f):
843 843 lfile = lfutil.splitstandin(f)
844 844 return (
845 845 lfile is not None
846 846 and (f in manifest)
847 847 and origmatchfn(lfile)
848 848 or None
849 849 )
850 850
851 851 m.matchfn = matchfn
852 852 return m
853 853
854 854 listpats = []
855 855 for pat in pats:
856 856 if matchmod.patkind(pat) is not None:
857 857 listpats.append(pat)
858 858 else:
859 859 listpats.append(makestandin(pat))
860 860
861 861 copiedfiles = []
862 862
863 863 def overridecopyfile(orig, src, dest, *args, **kwargs):
864 864 if lfutil.shortname in src and dest.startswith(
865 865 repo.wjoin(lfutil.shortname)
866 866 ):
867 867 destlfile = dest.replace(lfutil.shortname, b'')
868 868 if not opts[b'force'] and os.path.exists(destlfile):
869 869 raise IOError(
870 870 b'', _(b'destination largefile already exists')
871 871 )
872 872 copiedfiles.append((src, dest))
873 873 orig(src, dest, *args, **kwargs)
874 874
875 875 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
876 876 with extensions.wrappedfunction(scmutil, b'match', overridematch):
877 877 result += orig(ui, repo, listpats, opts, rename)
878 878
879 879 lfdirstate = lfutil.openlfdirstate(ui, repo)
880 880 for (src, dest) in copiedfiles:
881 881 if lfutil.shortname in src and dest.startswith(
882 882 repo.wjoin(lfutil.shortname)
883 883 ):
884 884 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
885 885 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
886 886 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
887 887 if not os.path.isdir(destlfiledir):
888 888 os.makedirs(destlfiledir)
889 889 if rename:
890 890 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
891 891
892 892 # The file is gone, but this deletes any empty parent
893 893 # directories as a side-effect.
894 894 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
895 895 lfdirstate.set_untracked(srclfile)
896 896 else:
897 897 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
898 898
899 899 lfdirstate.set_tracked(destlfile)
900 900 lfdirstate.write(repo.currenttransaction())
901 901 except error.Abort as e:
902 902 if e.message != _(b'no files to copy'):
903 903 raise e
904 904 else:
905 905 nolfiles = True
906 906 finally:
907 907 wlock.release()
908 908
909 909 if nolfiles and nonormalfiles:
910 910 raise error.Abort(_(b'no files to copy'))
911 911
912 912 return result
913 913
914 914
915 915 # When the user calls revert, we have to be careful to not revert any
916 916 # changes to other largefiles accidentally. This means we have to keep
917 917 # track of the largefiles that are being reverted so we only pull down
918 918 # the necessary largefiles.
919 919 #
920 920 # Standins are only updated (to match the hash of largefiles) before
921 921 # commits. Update the standins then run the original revert, changing
922 922 # the matcher to hit standins instead of largefiles. Based on the
923 923 # resulting standins update the largefiles.
924 924 @eh.wrapfunction(cmdutil, b'revert')
925 925 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
926 926 # Because we put the standins in a bad state (by updating them)
927 927 # and then return them to a correct state we need to lock to
928 928 # prevent others from changing them in their incorrect state.
929 929 with repo.wlock():
930 930 lfdirstate = lfutil.openlfdirstate(ui, repo)
931 931 s = lfutil.lfdirstatestatus(lfdirstate, repo)
932 932 lfdirstate.write(repo.currenttransaction())
933 933 for lfile in s.modified:
934 934 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
935 935 for lfile in s.deleted:
936 936 fstandin = lfutil.standin(lfile)
937 937 if repo.wvfs.exists(fstandin):
938 938 repo.wvfs.unlink(fstandin)
939 939
940 940 oldstandins = lfutil.getstandinsstate(repo)
941 941
942 942 def overridematch(
943 943 orig,
944 944 mctx,
945 945 pats=(),
946 946 opts=None,
947 947 globbed=False,
948 948 default=b'relpath',
949 949 badfn=None,
950 950 ):
951 951 if opts is None:
952 952 opts = {}
953 953 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
954 954 m = copy.copy(match)
955 955
956 956 # revert supports recursing into subrepos, and though largefiles
957 957 # currently doesn't work correctly in that case, this match is
958 958 # called, so the lfdirstate above may not be the correct one for
959 959 # this invocation of match.
960 960 lfdirstate = lfutil.openlfdirstate(
961 961 mctx.repo().ui, mctx.repo(), False
962 962 )
963 963
964 964 wctx = repo[None]
965 965 matchfiles = []
966 966 for f in m._files:
967 967 standin = lfutil.standin(f)
968 968 if standin in ctx or standin in mctx:
969 969 matchfiles.append(standin)
970 970 elif standin in wctx or lfdirstate.get_entry(f).removed:
971 971 continue
972 972 else:
973 973 matchfiles.append(f)
974 974 m._files = matchfiles
975 975 m._fileset = set(m._files)
976 976 origmatchfn = m.matchfn
977 977
978 978 def matchfn(f):
979 979 lfile = lfutil.splitstandin(f)
980 980 if lfile is not None:
981 981 return origmatchfn(lfile) and (f in ctx or f in mctx)
982 982 return origmatchfn(f)
983 983
984 984 m.matchfn = matchfn
985 985 return m
986 986
987 987 with extensions.wrappedfunction(scmutil, b'match', overridematch):
988 988 orig(ui, repo, ctx, *pats, **opts)
989 989
990 990 newstandins = lfutil.getstandinsstate(repo)
991 991 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
992 992 # lfdirstate should be 'normallookup'-ed for updated files,
993 993 # because reverting doesn't touch dirstate for 'normal' files
994 994 # when target revision is explicitly specified: in such case,
995 995 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
996 996 # of target (standin) file.
997 997 lfcommands.updatelfiles(
998 998 ui, repo, filelist, printmessage=False, normallookup=True
999 999 )
1000 1000
1001 1001
1002 1002 # after pulling changesets, we need to take some extra care to get
1003 1003 # largefiles updated remotely
1004 1004 @eh.wrapcommand(
1005 1005 b'pull',
1006 1006 opts=[
1007 1007 (
1008 1008 b'',
1009 1009 b'all-largefiles',
1010 1010 None,
1011 1011 _(b'download all pulled versions of largefiles (DEPRECATED)'),
1012 1012 ),
1013 1013 (
1014 1014 b'',
1015 1015 b'lfrev',
1016 1016 [],
1017 1017 _(b'download largefiles for these revisions'),
1018 1018 _(b'REV'),
1019 1019 ),
1020 1020 ],
1021 1021 )
1022 1022 def overridepull(orig, ui, repo, source=None, **opts):
1023 1023 revsprepull = len(repo)
1024 1024 if not source:
1025 1025 source = b'default'
1026 1026 repo.lfpullsource = source
1027 1027 result = orig(ui, repo, source, **opts)
1028 1028 revspostpull = len(repo)
1029 1029 lfrevs = opts.get('lfrev', [])
1030 1030 if opts.get('all_largefiles'):
1031 1031 lfrevs.append(b'pulled()')
1032 1032 if lfrevs and revspostpull > revsprepull:
1033 1033 numcached = 0
1034 1034 repo.firstpulled = revsprepull # for pulled() revset expression
1035 1035 try:
1036 1036 for rev in logcmdutil.revrange(repo, lfrevs):
1037 1037 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1038 1038 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1039 1039 numcached += len(cached)
1040 1040 finally:
1041 1041 del repo.firstpulled
1042 1042 ui.status(_(b"%d largefiles cached\n") % numcached)
1043 1043 return result
1044 1044
1045 1045
1046 1046 @eh.wrapcommand(
1047 1047 b'push',
1048 1048 opts=[
1049 1049 (
1050 1050 b'',
1051 1051 b'lfrev',
1052 1052 [],
1053 1053 _(b'upload largefiles for these revisions'),
1054 1054 _(b'REV'),
1055 1055 )
1056 1056 ],
1057 1057 )
1058 1058 def overridepush(orig, ui, repo, *args, **kwargs):
1059 1059 """Override push command and store --lfrev parameters in opargs"""
1060 1060 lfrevs = kwargs.pop('lfrev', None)
1061 1061 if lfrevs:
1062 1062 opargs = kwargs.setdefault('opargs', {})
1063 1063 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1064 1064 return orig(ui, repo, *args, **kwargs)
1065 1065
1066 1066
1067 1067 @eh.wrapfunction(exchange, b'pushoperation')
1068 1068 def exchangepushoperation(orig, *args, **kwargs):
1069 1069 """Override pushoperation constructor and store lfrevs parameter"""
1070 1070 lfrevs = kwargs.pop('lfrevs', None)
1071 1071 pushop = orig(*args, **kwargs)
1072 1072 pushop.lfrevs = lfrevs
1073 1073 return pushop
1074 1074
1075 1075
1076 1076 @eh.revsetpredicate(b'pulled()')
1077 1077 def pulledrevsetsymbol(repo, subset, x):
1078 1078 """Changesets that just has been pulled.
1079 1079
1080 1080 Only available with largefiles from pull --lfrev expressions.
1081 1081
1082 1082 .. container:: verbose
1083 1083
1084 1084 Some examples:
1085 1085
1086 1086 - pull largefiles for all new changesets::
1087 1087
1088 1088 hg pull -lfrev "pulled()"
1089 1089
1090 1090 - pull largefiles for all new branch heads::
1091 1091
1092 1092 hg pull -lfrev "head(pulled()) and not closed()"
1093 1093
1094 1094 """
1095 1095
1096 1096 try:
1097 1097 firstpulled = repo.firstpulled
1098 1098 except AttributeError:
1099 1099 raise error.Abort(_(b"pulled() only available in --lfrev"))
1100 1100 return smartset.baseset([r for r in subset if r >= firstpulled])
1101 1101
1102 1102
1103 1103 @eh.wrapcommand(
1104 1104 b'clone',
1105 1105 opts=[
1106 1106 (
1107 1107 b'',
1108 1108 b'all-largefiles',
1109 1109 None,
1110 1110 _(b'download all versions of all largefiles'),
1111 1111 )
1112 1112 ],
1113 1113 )
1114 1114 def overrideclone(orig, ui, source, dest=None, **opts):
1115 1115 d = dest
1116 1116 if d is None:
1117 1117 d = hg.defaultdest(source)
1118 1118 if opts.get('all_largefiles') and not hg.islocal(d):
1119 1119 raise error.Abort(
1120 1120 _(b'--all-largefiles is incompatible with non-local destination %s')
1121 1121 % d
1122 1122 )
1123 1123
1124 1124 return orig(ui, source, dest, **opts)
1125 1125
1126 1126
1127 1127 @eh.wrapfunction(hg, b'clone')
1128 1128 def hgclone(orig, ui, opts, *args, **kwargs):
1129 1129 result = orig(ui, opts, *args, **kwargs)
1130 1130
1131 1131 if result is not None:
1132 1132 sourcerepo, destrepo = result
1133 1133 repo = destrepo.local()
1134 1134
1135 1135 # When cloning to a remote repo (like through SSH), no repo is available
1136 1136 # from the peer. Therefore the largefiles can't be downloaded and the
1137 1137 # hgrc can't be updated.
1138 1138 if not repo:
1139 1139 return result
1140 1140
1141 1141 # Caching is implicitly limited to 'rev' option, since the dest repo was
1142 1142 # truncated at that point. The user may expect a download count with
1143 1143 # this option, so attempt whether or not this is a largefile repo.
1144 1144 if opts.get(b'all_largefiles'):
1145 1145 success, missing = lfcommands.downloadlfiles(ui, repo)
1146 1146
1147 1147 if missing != 0:
1148 1148 return None
1149 1149
1150 1150 return result
1151 1151
1152 1152
1153 1153 @eh.wrapcommand(b'rebase', extension=b'rebase')
1154 1154 def overriderebasecmd(orig, ui, repo, **opts):
1155 1155 if not util.safehasattr(repo, b'_largefilesenabled'):
1156 1156 return orig(ui, repo, **opts)
1157 1157
1158 1158 resuming = opts.get('continue')
1159 1159 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1160 1160 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1161 1161 try:
1162 1162 with ui.configoverride(
1163 1163 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1164 1164 ):
1165 1165 return orig(ui, repo, **opts)
1166 1166 finally:
1167 1167 repo._lfstatuswriters.pop()
1168 1168 repo._lfcommithooks.pop()
1169 1169
1170 1170
1171 1171 @eh.extsetup
1172 1172 def overriderebase(ui):
1173 1173 try:
1174 1174 rebase = extensions.find(b'rebase')
1175 1175 except KeyError:
1176 1176 pass
1177 1177 else:
1178 1178
1179 1179 def _dorebase(orig, *args, **kwargs):
1180 1180 kwargs['inmemory'] = False
1181 1181 return orig(*args, **kwargs)
1182 1182
1183 1183 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1184 1184
1185 1185
1186 1186 @eh.wrapcommand(b'archive')
1187 1187 def overridearchivecmd(orig, ui, repo, dest, **opts):
1188 1188 with lfstatus(repo.unfiltered()):
1189 1189 return orig(ui, repo.unfiltered(), dest, **opts)
1190 1190
1191 1191
1192 1192 @eh.wrapfunction(webcommands, b'archive')
1193 1193 def hgwebarchive(orig, web):
1194 1194 with lfstatus(web.repo):
1195 1195 return orig(web)
1196 1196
1197 1197
1198 1198 @eh.wrapfunction(archival, b'archive')
1199 1199 def overridearchive(
1200 1200 orig,
1201 1201 repo,
1202 1202 dest,
1203 1203 node,
1204 1204 kind,
1205 1205 decode=True,
1206 1206 match=None,
1207 1207 prefix=b'',
1208 1208 mtime=None,
1209 1209 subrepos=None,
1210 1210 ):
1211 1211 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1212 1212 # unfiltered repo's attr, so check that as well.
1213 1213 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1214 1214 return orig(
1215 1215 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1216 1216 )
1217 1217
1218 1218 # No need to lock because we are only reading history and
1219 1219 # largefile caches, neither of which are modified.
1220 1220 if node is not None:
1221 1221 lfcommands.cachelfiles(repo.ui, repo, node)
1222 1222
1223 1223 if kind not in archival.archivers:
1224 1224 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1225 1225
1226 1226 ctx = repo[node]
1227 1227
1228 1228 if kind == b'files':
1229 1229 if prefix:
1230 1230 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1231 1231 else:
1232 1232 prefix = archival.tidyprefix(dest, kind, prefix)
1233 1233
1234 1234 def write(name, mode, islink, getdata):
1235 1235 if match and not match(name):
1236 1236 return
1237 1237 data = getdata()
1238 1238 if decode:
1239 1239 data = repo.wwritedata(name, data)
1240 1240 archiver.addfile(prefix + name, mode, islink, data)
1241 1241
1242 1242 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1243 1243
1244 1244 if repo.ui.configbool(b"ui", b"archivemeta"):
1245 1245 write(
1246 1246 b'.hg_archival.txt',
1247 1247 0o644,
1248 1248 False,
1249 1249 lambda: archival.buildmetadata(ctx),
1250 1250 )
1251 1251
1252 1252 for f in ctx:
1253 1253 ff = ctx.flags(f)
1254 1254 getdata = ctx[f].data
1255 1255 lfile = lfutil.splitstandin(f)
1256 1256 if lfile is not None:
1257 1257 if node is not None:
1258 1258 path = lfutil.findfile(repo, getdata().strip())
1259 1259
1260 1260 if path is None:
1261 1261 raise error.Abort(
1262 1262 _(
1263 1263 b'largefile %s not found in repo store or system cache'
1264 1264 )
1265 1265 % lfile
1266 1266 )
1267 1267 else:
1268 1268 path = lfile
1269 1269
1270 1270 f = lfile
1271 1271
1272 1272 getdata = lambda: util.readfile(path)
1273 1273 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1274 1274
1275 1275 if subrepos:
1276 1276 for subpath in sorted(ctx.substate):
1277 1277 sub = ctx.workingsub(subpath)
1278 1278 submatch = matchmod.subdirmatcher(subpath, match)
1279 1279 subprefix = prefix + subpath + b'/'
1280 1280
1281 1281 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1282 1282 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1283 1283 # allow only hgsubrepos to set this, instead of the current scheme
1284 1284 # where the parent sets this for the child.
1285 1285 with (
1286 1286 util.safehasattr(sub, '_repo')
1287 1287 and lfstatus(sub._repo)
1288 1288 or util.nullcontextmanager()
1289 1289 ):
1290 1290 sub.archive(archiver, subprefix, submatch)
1291 1291
1292 1292 archiver.done()
1293 1293
1294 1294
1295 1295 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1296 1296 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1297 1297 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1298 1298 if not lfenabled or not repo._repo.lfstatus:
1299 1299 return orig(repo, archiver, prefix, match, decode)
1300 1300
1301 1301 repo._get(repo._state + (b'hg',))
1302 1302 rev = repo._state[1]
1303 1303 ctx = repo._repo[rev]
1304 1304
1305 1305 if ctx.node() is not None:
1306 1306 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1307 1307
1308 1308 def write(name, mode, islink, getdata):
1309 1309 # At this point, the standin has been replaced with the largefile name,
1310 1310 # so the normal matcher works here without the lfutil variants.
1311 1311 if match and not match(f):
1312 1312 return
1313 1313 data = getdata()
1314 1314 if decode:
1315 1315 data = repo._repo.wwritedata(name, data)
1316 1316
1317 1317 archiver.addfile(prefix + name, mode, islink, data)
1318 1318
1319 1319 for f in ctx:
1320 1320 ff = ctx.flags(f)
1321 1321 getdata = ctx[f].data
1322 1322 lfile = lfutil.splitstandin(f)
1323 1323 if lfile is not None:
1324 1324 if ctx.node() is not None:
1325 1325 path = lfutil.findfile(repo._repo, getdata().strip())
1326 1326
1327 1327 if path is None:
1328 1328 raise error.Abort(
1329 1329 _(
1330 1330 b'largefile %s not found in repo store or system cache'
1331 1331 )
1332 1332 % lfile
1333 1333 )
1334 1334 else:
1335 1335 path = lfile
1336 1336
1337 1337 f = lfile
1338 1338
1339 1339 getdata = lambda: util.readfile(os.path.join(prefix, path))
1340 1340
1341 1341 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1342 1342
1343 1343 for subpath in sorted(ctx.substate):
1344 1344 sub = ctx.workingsub(subpath)
1345 1345 submatch = matchmod.subdirmatcher(subpath, match)
1346 1346 subprefix = prefix + subpath + b'/'
1347 1347 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1348 1348 # infer and possibly set lfstatus at the top of this function. That
1349 1349 # would allow only hgsubrepos to set this, instead of the current scheme
1350 1350 # where the parent sets this for the child.
1351 1351 with (
1352 1352 util.safehasattr(sub, '_repo')
1353 1353 and lfstatus(sub._repo)
1354 1354 or util.nullcontextmanager()
1355 1355 ):
1356 1356 sub.archive(archiver, subprefix, submatch, decode)
1357 1357
1358 1358
1359 1359 # If a largefile is modified, the change is not reflected in its
1360 1360 # standin until a commit. cmdutil.bailifchanged() raises an exception
1361 1361 # if the repo has uncommitted changes. Wrap it to also check if
1362 1362 # largefiles were changed. This is used by bisect, backout and fetch.
1363 1363 @eh.wrapfunction(cmdutil, b'bailifchanged')
1364 1364 def overridebailifchanged(orig, repo, *args, **kwargs):
1365 1365 orig(repo, *args, **kwargs)
1366 1366 with lfstatus(repo):
1367 1367 s = repo.status()
1368 1368 if s.modified or s.added or s.removed or s.deleted:
1369 1369 raise error.Abort(_(b'uncommitted changes'))
1370 1370
1371 1371
1372 1372 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1373 1373 def postcommitstatus(orig, repo, *args, **kwargs):
1374 1374 with lfstatus(repo):
1375 1375 return orig(repo, *args, **kwargs)
1376 1376
1377 1377
1378 1378 @eh.wrapfunction(cmdutil, b'forget')
1379 1379 def cmdutilforget(
1380 1380 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1381 1381 ):
1382 1382 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1383 1383 bad, forgot = orig(
1384 1384 ui,
1385 1385 repo,
1386 1386 normalmatcher,
1387 1387 prefix,
1388 1388 uipathfn,
1389 1389 explicitonly,
1390 1390 dryrun,
1391 1391 interactive,
1392 1392 )
1393 1393 m = composelargefilematcher(match, repo[None].manifest())
1394 1394
1395 1395 with lfstatus(repo):
1396 1396 s = repo.status(match=m, clean=True)
1397 1397 manifest = repo[None].manifest()
1398 1398 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1399 1399 forget = [f for f in forget if lfutil.standin(f) in manifest]
1400 1400
1401 1401 for f in forget:
1402 1402 fstandin = lfutil.standin(f)
1403 1403 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1404 1404 ui.warn(
1405 1405 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1406 1406 )
1407 1407 bad.append(f)
1408 1408
1409 1409 for f in forget:
1410 1410 if ui.verbose or not m.exact(f):
1411 1411 ui.status(_(b'removing %s\n') % uipathfn(f))
1412 1412
1413 1413 # Need to lock because standin files are deleted then removed from the
1414 1414 # repository and we could race in-between.
1415 1415 with repo.wlock():
1416 1416 lfdirstate = lfutil.openlfdirstate(ui, repo)
1417 1417 for f in forget:
1418 1418 lfdirstate.set_untracked(f)
1419 1419 lfdirstate.write(repo.currenttransaction())
1420 1420 standins = [lfutil.standin(f) for f in forget]
1421 1421 for f in standins:
1422 1422 repo.wvfs.unlinkpath(f, ignoremissing=True)
1423 1423 rejected = repo[None].forget(standins)
1424 1424
1425 1425 bad.extend(f for f in rejected if f in m.files())
1426 1426 forgot.extend(f for f in forget if f not in rejected)
1427 1427 return bad, forgot
1428 1428
1429 1429
1430 1430 def _getoutgoings(repo, other, missing, addfunc):
1431 1431 """get pairs of filename and largefile hash in outgoing revisions
1432 1432 in 'missing'.
1433 1433
1434 1434 largefiles already existing on 'other' repository are ignored.
1435 1435
1436 1436 'addfunc' is invoked with each unique pairs of filename and
1437 1437 largefile hash value.
1438 1438 """
1439 1439 knowns = set()
1440 1440 lfhashes = set()
1441 1441
1442 1442 def dedup(fn, lfhash):
1443 1443 k = (fn, lfhash)
1444 1444 if k not in knowns:
1445 1445 knowns.add(k)
1446 1446 lfhashes.add(lfhash)
1447 1447
1448 1448 lfutil.getlfilestoupload(repo, missing, dedup)
1449 1449 if lfhashes:
1450 1450 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1451 1451 for fn, lfhash in knowns:
1452 1452 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1453 1453 addfunc(fn, lfhash)
1454 1454
1455 1455
1456 1456 def outgoinghook(ui, repo, other, opts, missing):
1457 1457 if opts.pop(b'large', None):
1458 1458 lfhashes = set()
1459 1459 if ui.debugflag:
1460 1460 toupload = {}
1461 1461
1462 1462 def addfunc(fn, lfhash):
1463 1463 if fn not in toupload:
1464 1464 toupload[fn] = [] # pytype: disable=unsupported-operands
1465 1465 toupload[fn].append(lfhash)
1466 1466 lfhashes.add(lfhash)
1467 1467
1468 1468 def showhashes(fn):
1469 1469 for lfhash in sorted(toupload[fn]):
1470 1470 ui.debug(b' %s\n' % lfhash)
1471 1471
1472 1472 else:
1473 1473 toupload = set()
1474 1474
1475 1475 def addfunc(fn, lfhash):
1476 1476 toupload.add(fn)
1477 1477 lfhashes.add(lfhash)
1478 1478
1479 1479 def showhashes(fn):
1480 1480 pass
1481 1481
1482 1482 _getoutgoings(repo, other, missing, addfunc)
1483 1483
1484 1484 if not toupload:
1485 1485 ui.status(_(b'largefiles: no files to upload\n'))
1486 1486 else:
1487 1487 ui.status(
1488 1488 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1489 1489 )
1490 1490 for file in sorted(toupload):
1491 1491 ui.status(lfutil.splitstandin(file) + b'\n')
1492 1492 showhashes(file)
1493 1493 ui.status(b'\n')
1494 1494
1495 1495
1496 1496 @eh.wrapcommand(
1497 1497 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1498 1498 )
1499 1499 def _outgoingcmd(orig, *args, **kwargs):
1500 1500 # Nothing to do here other than add the extra help option- the hook above
1501 1501 # processes it.
1502 1502 return orig(*args, **kwargs)
1503 1503
1504 1504
1505 1505 def summaryremotehook(ui, repo, opts, changes):
1506 1506 largeopt = opts.get(b'large', False)
1507 1507 if changes is None:
1508 1508 if largeopt:
1509 1509 return (False, True) # only outgoing check is needed
1510 1510 else:
1511 1511 return (False, False)
1512 1512 elif largeopt:
1513 1513 url, branch, peer, outgoing = changes[1]
1514 1514 if peer is None:
1515 1515 # i18n: column positioning for "hg summary"
1516 1516 ui.status(_(b'largefiles: (no remote repo)\n'))
1517 1517 return
1518 1518
1519 1519 toupload = set()
1520 1520 lfhashes = set()
1521 1521
1522 1522 def addfunc(fn, lfhash):
1523 1523 toupload.add(fn)
1524 1524 lfhashes.add(lfhash)
1525 1525
1526 1526 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1527 1527
1528 1528 if not toupload:
1529 1529 # i18n: column positioning for "hg summary"
1530 1530 ui.status(_(b'largefiles: (no files to upload)\n'))
1531 1531 else:
1532 1532 # i18n: column positioning for "hg summary"
1533 1533 ui.status(
1534 1534 _(b'largefiles: %d entities for %d files to upload\n')
1535 1535 % (len(lfhashes), len(toupload))
1536 1536 )
1537 1537
1538 1538
1539 1539 @eh.wrapcommand(
1540 1540 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1541 1541 )
1542 1542 def overridesummary(orig, ui, repo, *pats, **opts):
1543 1543 with lfstatus(repo):
1544 1544 orig(ui, repo, *pats, **opts)
1545 1545
1546 1546
1547 1547 @eh.wrapfunction(scmutil, b'addremove')
1548 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1548 def scmutiladdremove(
1549 orig,
1550 repo,
1551 matcher,
1552 prefix,
1553 uipathfn,
1554 opts=None,
1555 open_tr=None,
1556 ):
1549 1557 if opts is None:
1550 1558 opts = {}
1551 1559 if not lfutil.islfilesrepo(repo):
1552 return orig(repo, matcher, prefix, uipathfn, opts)
1560 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1553 1561 # Get the list of missing largefiles so we can remove them
1554 1562 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1555 1563 unsure, s, mtime_boundary = lfdirstate.status(
1556 1564 matchmod.always(),
1557 1565 subrepos=[],
1558 1566 ignored=False,
1559 1567 clean=False,
1560 1568 unknown=False,
1561 1569 )
1562 1570
1571 # open the transaction and changing_files context
1572 if open_tr is not None:
1573 open_tr()
1574
1563 1575 # Call into the normal remove code, but the removing of the standin, we want
1564 1576 # to have handled by original addremove. Monkey patching here makes sure
1565 1577 # we don't remove the standin in the largefiles code, preventing a very
1566 1578 # confused state later.
1567 1579 if s.deleted:
1568 1580 m = copy.copy(matcher)
1569 1581
1570 1582 # The m._files and m._map attributes are not changed to the deleted list
1571 1583 # because that affects the m.exact() test, which in turn governs whether
1572 1584 # or not the file name is printed, and how. Simply limit the original
1573 1585 # matches to those in the deleted status list.
1574 1586 matchfn = m.matchfn
1575 1587 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1576 1588
1577 1589 removelargefiles(
1578 1590 repo.ui,
1579 1591 repo,
1580 1592 True,
1581 1593 m,
1582 1594 uipathfn,
1583 1595 opts.get(b'dry_run'),
1584 1596 **pycompat.strkwargs(opts)
1585 1597 )
1586 1598 # Call into the normal add code, and any files that *should* be added as
1587 1599 # largefiles will be
1588 1600 added, bad = addlargefiles(
1589 1601 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1590 1602 )
1591 1603 # Now that we've handled largefiles, hand off to the original addremove
1592 1604 # function to take care of the rest. Make sure it doesn't do anything with
1593 1605 # largefiles by passing a matcher that will ignore them.
1594 1606 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1595 return orig(repo, matcher, prefix, uipathfn, opts)
1607
1608 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1596 1609
1597 1610
1598 1611 # Calling purge with --all will cause the largefiles to be deleted.
1599 1612 # Override repo.status to prevent this from happening.
1600 1613 @eh.wrapcommand(b'purge')
1601 1614 def overridepurge(orig, ui, repo, *dirs, **opts):
1602 1615 # XXX Monkey patching a repoview will not work. The assigned attribute will
1603 1616 # be set on the unfiltered repo, but we will only lookup attributes in the
1604 1617 # unfiltered repo if the lookup in the repoview object itself fails. As the
1605 1618 # monkey patched method exists on the repoview class the lookup will not
1606 1619 # fail. As a result, the original version will shadow the monkey patched
1607 1620 # one, defeating the monkey patch.
1608 1621 #
1609 1622 # As a work around we use an unfiltered repo here. We should do something
1610 1623 # cleaner instead.
1611 1624 repo = repo.unfiltered()
1612 1625 oldstatus = repo.status
1613 1626
1614 1627 def overridestatus(
1615 1628 node1=b'.',
1616 1629 node2=None,
1617 1630 match=None,
1618 1631 ignored=False,
1619 1632 clean=False,
1620 1633 unknown=False,
1621 1634 listsubrepos=False,
1622 1635 ):
1623 1636 r = oldstatus(
1624 1637 node1, node2, match, ignored, clean, unknown, listsubrepos
1625 1638 )
1626 1639 lfdirstate = lfutil.openlfdirstate(ui, repo)
1627 1640 unknown = [
1628 1641 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1629 1642 ]
1630 1643 ignored = [
1631 1644 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1632 1645 ]
1633 1646 return scmutil.status(
1634 1647 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1635 1648 )
1636 1649
1637 1650 repo.status = overridestatus
1638 1651 orig(ui, repo, *dirs, **opts)
1639 1652 repo.status = oldstatus
1640 1653
1641 1654
1642 1655 @eh.wrapcommand(b'rollback')
1643 1656 def overriderollback(orig, ui, repo, **opts):
1644 1657 with repo.wlock():
1645 1658 before = repo.dirstate.parents()
1646 1659 orphans = {
1647 1660 f
1648 1661 for f in repo.dirstate
1649 1662 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1650 1663 }
1651 1664 result = orig(ui, repo, **opts)
1652 1665 after = repo.dirstate.parents()
1653 1666 if before == after:
1654 1667 return result # no need to restore standins
1655 1668
1656 1669 pctx = repo[b'.']
1657 1670 for f in repo.dirstate:
1658 1671 if lfutil.isstandin(f):
1659 1672 orphans.discard(f)
1660 1673 if repo.dirstate.get_entry(f).removed:
1661 1674 repo.wvfs.unlinkpath(f, ignoremissing=True)
1662 1675 elif f in pctx:
1663 1676 fctx = pctx[f]
1664 1677 repo.wwrite(f, fctx.data(), fctx.flags())
1665 1678 else:
1666 1679 # content of standin is not so important in 'a',
1667 1680 # 'm' or 'n' (coming from the 2nd parent) cases
1668 1681 lfutil.writestandin(repo, f, b'', False)
1669 1682 for standin in orphans:
1670 1683 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1671 1684
1672 1685 return result
1673 1686
1674 1687
1675 1688 @eh.wrapcommand(b'transplant', extension=b'transplant')
1676 1689 def overridetransplant(orig, ui, repo, *revs, **opts):
1677 1690 resuming = opts.get('continue')
1678 1691 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1679 1692 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1680 1693 try:
1681 1694 result = orig(ui, repo, *revs, **opts)
1682 1695 finally:
1683 1696 repo._lfstatuswriters.pop()
1684 1697 repo._lfcommithooks.pop()
1685 1698 return result
1686 1699
1687 1700
1688 1701 @eh.wrapcommand(b'cat')
1689 1702 def overridecat(orig, ui, repo, file1, *pats, **opts):
1690 1703 opts = pycompat.byteskwargs(opts)
1691 1704 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1692 1705 err = 1
1693 1706 notbad = set()
1694 1707 m = scmutil.match(ctx, (file1,) + pats, opts)
1695 1708 origmatchfn = m.matchfn
1696 1709
1697 1710 def lfmatchfn(f):
1698 1711 if origmatchfn(f):
1699 1712 return True
1700 1713 lf = lfutil.splitstandin(f)
1701 1714 if lf is None:
1702 1715 return False
1703 1716 notbad.add(lf)
1704 1717 return origmatchfn(lf)
1705 1718
1706 1719 m.matchfn = lfmatchfn
1707 1720 origbadfn = m.bad
1708 1721
1709 1722 def lfbadfn(f, msg):
1710 1723 if not f in notbad:
1711 1724 origbadfn(f, msg)
1712 1725
1713 1726 m.bad = lfbadfn
1714 1727
1715 1728 origvisitdirfn = m.visitdir
1716 1729
1717 1730 def lfvisitdirfn(dir):
1718 1731 if dir == lfutil.shortname:
1719 1732 return True
1720 1733 ret = origvisitdirfn(dir)
1721 1734 if ret:
1722 1735 return ret
1723 1736 lf = lfutil.splitstandin(dir)
1724 1737 if lf is None:
1725 1738 return False
1726 1739 return origvisitdirfn(lf)
1727 1740
1728 1741 m.visitdir = lfvisitdirfn
1729 1742
1730 1743 for f in ctx.walk(m):
1731 1744 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1732 1745 lf = lfutil.splitstandin(f)
1733 1746 if lf is None or origmatchfn(f):
1734 1747 # duplicating unreachable code from commands.cat
1735 1748 data = ctx[f].data()
1736 1749 if opts.get(b'decode'):
1737 1750 data = repo.wwritedata(f, data)
1738 1751 fp.write(data)
1739 1752 else:
1740 1753 hash = lfutil.readasstandin(ctx[f])
1741 1754 if not lfutil.inusercache(repo.ui, hash):
1742 1755 store = storefactory.openstore(repo)
1743 1756 success, missing = store.get([(lf, hash)])
1744 1757 if len(success) != 1:
1745 1758 raise error.Abort(
1746 1759 _(
1747 1760 b'largefile %s is not in cache and could not be '
1748 1761 b'downloaded'
1749 1762 )
1750 1763 % lf
1751 1764 )
1752 1765 path = lfutil.usercachepath(repo.ui, hash)
1753 1766 with open(path, b"rb") as fpin:
1754 1767 for chunk in util.filechunkiter(fpin):
1755 1768 fp.write(chunk)
1756 1769 err = 0
1757 1770 return err
1758 1771
1759 1772
1760 1773 @eh.wrapfunction(merge, b'_update')
1761 1774 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1762 1775 matcher = kwargs.get('matcher', None)
1763 1776 # note if this is a partial update
1764 1777 partial = matcher and not matcher.always()
1765 1778 with repo.wlock():
1766 1779 # branch | | |
1767 1780 # merge | force | partial | action
1768 1781 # -------+-------+---------+--------------
1769 1782 # x | x | x | linear-merge
1770 1783 # o | x | x | branch-merge
1771 1784 # x | o | x | overwrite (as clean update)
1772 1785 # o | o | x | force-branch-merge (*1)
1773 1786 # x | x | o | (*)
1774 1787 # o | x | o | (*)
1775 1788 # x | o | o | overwrite (as revert)
1776 1789 # o | o | o | (*)
1777 1790 #
1778 1791 # (*) don't care
1779 1792 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1780 1793
1781 1794 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1782 1795 unsure, s, mtime_boundary = lfdirstate.status(
1783 1796 matchmod.always(),
1784 1797 subrepos=[],
1785 1798 ignored=False,
1786 1799 clean=True,
1787 1800 unknown=False,
1788 1801 )
1789 1802 oldclean = set(s.clean)
1790 1803 pctx = repo[b'.']
1791 1804 dctx = repo[node]
1792 1805 for lfile in unsure + s.modified:
1793 1806 lfileabs = repo.wvfs.join(lfile)
1794 1807 if not repo.wvfs.exists(lfileabs):
1795 1808 continue
1796 1809 lfhash = lfutil.hashfile(lfileabs)
1797 1810 standin = lfutil.standin(lfile)
1798 1811 lfutil.writestandin(
1799 1812 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1800 1813 )
1801 1814 if standin in pctx and lfhash == lfutil.readasstandin(
1802 1815 pctx[standin]
1803 1816 ):
1804 1817 oldclean.add(lfile)
1805 1818 for lfile in s.added:
1806 1819 fstandin = lfutil.standin(lfile)
1807 1820 if fstandin not in dctx:
1808 1821 # in this case, content of standin file is meaningless
1809 1822 # (in dctx, lfile is unknown, or normal file)
1810 1823 continue
1811 1824 lfutil.updatestandin(repo, lfile, fstandin)
1812 1825 # mark all clean largefiles as dirty, just in case the update gets
1813 1826 # interrupted before largefiles and lfdirstate are synchronized
1814 1827 for lfile in oldclean:
1815 1828 lfdirstate.set_possibly_dirty(lfile)
1816 1829 lfdirstate.write(repo.currenttransaction())
1817 1830
1818 1831 oldstandins = lfutil.getstandinsstate(repo)
1819 1832 wc = kwargs.get('wc')
1820 1833 if wc and wc.isinmemory():
1821 1834 # largefiles is not a good candidate for in-memory merge (large
1822 1835 # files, custom dirstate, matcher usage).
1823 1836 raise error.ProgrammingError(
1824 1837 b'largefiles is not compatible with in-memory merge'
1825 1838 )
1826 1839 with lfdirstate.changing_parents(repo):
1827 1840 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1828 1841
1829 1842 newstandins = lfutil.getstandinsstate(repo)
1830 1843 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1831 1844
1832 1845 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1833 1846 # all the ones that didn't change as clean
1834 1847 for lfile in oldclean.difference(filelist):
1835 1848 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1836 1849 lfdirstate.write(repo.currenttransaction())
1837 1850
1838 1851 if branchmerge or force or partial:
1839 1852 filelist.extend(s.deleted + s.removed)
1840 1853
1841 1854 lfcommands.updatelfiles(
1842 1855 repo.ui, repo, filelist=filelist, normallookup=partial
1843 1856 )
1844 1857
1845 1858 return result
1846 1859
1847 1860
1848 1861 @eh.wrapfunction(scmutil, b'marktouched')
1849 1862 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1850 1863 result = orig(repo, files, *args, **kwargs)
1851 1864
1852 1865 filelist = []
1853 1866 for f in files:
1854 1867 lf = lfutil.splitstandin(f)
1855 1868 if lf is not None:
1856 1869 filelist.append(lf)
1857 1870 if filelist:
1858 1871 lfcommands.updatelfiles(
1859 1872 repo.ui,
1860 1873 repo,
1861 1874 filelist=filelist,
1862 1875 printmessage=False,
1863 1876 normallookup=True,
1864 1877 )
1865 1878
1866 1879 return result
1867 1880
1868 1881
1869 1882 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1870 1883 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1871 1884 def upgraderequirements(orig, repo):
1872 1885 reqs = orig(repo)
1873 1886 if b'largefiles' in repo.requirements:
1874 1887 reqs.add(b'largefiles')
1875 1888 return reqs
1876 1889
1877 1890
1878 1891 _lfscheme = b'largefile://'
1879 1892
1880 1893
1881 1894 @eh.wrapfunction(urlmod, b'open')
1882 1895 def openlargefile(orig, ui, url_, data=None, **kwargs):
1883 1896 if url_.startswith(_lfscheme):
1884 1897 if data:
1885 1898 msg = b"cannot use data on a 'largefile://' url"
1886 1899 raise error.ProgrammingError(msg)
1887 1900 lfid = url_[len(_lfscheme) :]
1888 1901 return storefactory.getlfile(ui, lfid)
1889 1902 else:
1890 1903 return orig(ui, url_, data=data, **kwargs)
@@ -1,4009 +1,4102 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import copy as copymod
10 10 import errno
11 11 import os
12 12 import re
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 hex,
17 17 nullrev,
18 18 short,
19 19 )
20 20 from .pycompat import (
21 21 getattr,
22 22 open,
23 23 setattr,
24 24 )
25 25 from .thirdparty import attr
26 26
27 27 from . import (
28 28 bookmarks,
29 29 changelog,
30 30 copies,
31 31 crecord as crecordmod,
32 dirstateguard,
33 32 encoding,
34 33 error,
35 34 formatter,
36 35 logcmdutil,
37 36 match as matchmod,
38 37 merge as mergemod,
39 38 mergestate as mergestatemod,
40 39 mergeutil,
41 40 obsolete,
42 41 patch,
43 42 pathutil,
44 43 phases,
45 44 pycompat,
46 45 repair,
47 46 revlog,
48 47 rewriteutil,
49 48 scmutil,
50 49 state as statemod,
51 50 subrepoutil,
52 51 templatekw,
53 52 templater,
54 53 util,
55 54 vfs as vfsmod,
56 55 )
57 56
58 57 from .utils import (
59 58 dateutil,
60 59 stringutil,
61 60 )
62 61
63 62 from .revlogutils import (
64 63 constants as revlog_constants,
65 64 )
66 65
67 66 if pycompat.TYPE_CHECKING:
68 67 from typing import (
69 68 Any,
70 69 Dict,
71 70 )
72 71
73 72 for t in (Any, Dict):
74 73 assert t
75 74
76 75 stringio = util.stringio
77 76
78 77 # templates of common command options
79 78
80 79 dryrunopts = [
81 80 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
82 81 ]
83 82
84 83 confirmopts = [
85 84 (b'', b'confirm', None, _(b'ask before applying actions')),
86 85 ]
87 86
88 87 remoteopts = [
89 88 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
90 89 (
91 90 b'',
92 91 b'remotecmd',
93 92 b'',
94 93 _(b'specify hg command to run on the remote side'),
95 94 _(b'CMD'),
96 95 ),
97 96 (
98 97 b'',
99 98 b'insecure',
100 99 None,
101 100 _(b'do not verify server certificate (ignoring web.cacerts config)'),
102 101 ),
103 102 ]
104 103
105 104 walkopts = [
106 105 (
107 106 b'I',
108 107 b'include',
109 108 [],
110 109 _(b'include names matching the given patterns'),
111 110 _(b'PATTERN'),
112 111 ),
113 112 (
114 113 b'X',
115 114 b'exclude',
116 115 [],
117 116 _(b'exclude names matching the given patterns'),
118 117 _(b'PATTERN'),
119 118 ),
120 119 ]
121 120
122 121 commitopts = [
123 122 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
124 123 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
125 124 ]
126 125
127 126 commitopts2 = [
128 127 (
129 128 b'd',
130 129 b'date',
131 130 b'',
132 131 _(b'record the specified date as commit date'),
133 132 _(b'DATE'),
134 133 ),
135 134 (
136 135 b'u',
137 136 b'user',
138 137 b'',
139 138 _(b'record the specified user as committer'),
140 139 _(b'USER'),
141 140 ),
142 141 ]
143 142
144 143 commitopts3 = [
145 144 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
146 145 (b'U', b'currentuser', None, _(b'record the current user as committer')),
147 146 ]
148 147
149 148 formatteropts = [
150 149 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
151 150 ]
152 151
153 152 templateopts = [
154 153 (
155 154 b'',
156 155 b'style',
157 156 b'',
158 157 _(b'display using template map file (DEPRECATED)'),
159 158 _(b'STYLE'),
160 159 ),
161 160 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
162 161 ]
163 162
164 163 logopts = [
165 164 (b'p', b'patch', None, _(b'show patch')),
166 165 (b'g', b'git', None, _(b'use git extended diff format')),
167 166 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
168 167 (b'M', b'no-merges', None, _(b'do not show merges')),
169 168 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
170 169 (b'G', b'graph', None, _(b"show the revision DAG")),
171 170 ] + templateopts
172 171
173 172 diffopts = [
174 173 (b'a', b'text', None, _(b'treat all files as text')),
175 174 (
176 175 b'g',
177 176 b'git',
178 177 None,
179 178 _(b'use git extended diff format (DEFAULT: diff.git)'),
180 179 ),
181 180 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
182 181 (b'', b'nodates', None, _(b'omit dates from diff headers')),
183 182 ]
184 183
185 184 diffwsopts = [
186 185 (
187 186 b'w',
188 187 b'ignore-all-space',
189 188 None,
190 189 _(b'ignore white space when comparing lines'),
191 190 ),
192 191 (
193 192 b'b',
194 193 b'ignore-space-change',
195 194 None,
196 195 _(b'ignore changes in the amount of white space'),
197 196 ),
198 197 (
199 198 b'B',
200 199 b'ignore-blank-lines',
201 200 None,
202 201 _(b'ignore changes whose lines are all blank'),
203 202 ),
204 203 (
205 204 b'Z',
206 205 b'ignore-space-at-eol',
207 206 None,
208 207 _(b'ignore changes in whitespace at EOL'),
209 208 ),
210 209 ]
211 210
212 211 diffopts2 = (
213 212 [
214 213 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
215 214 (
216 215 b'p',
217 216 b'show-function',
218 217 None,
219 218 _(
220 219 b'show which function each change is in (DEFAULT: diff.showfunc)'
221 220 ),
222 221 ),
223 222 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
224 223 ]
225 224 + diffwsopts
226 225 + [
227 226 (
228 227 b'U',
229 228 b'unified',
230 229 b'',
231 230 _(b'number of lines of context to show'),
232 231 _(b'NUM'),
233 232 ),
234 233 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
235 234 (
236 235 b'',
237 236 b'root',
238 237 b'',
239 238 _(b'produce diffs relative to subdirectory'),
240 239 _(b'DIR'),
241 240 ),
242 241 ]
243 242 )
244 243
245 244 mergetoolopts = [
246 245 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
247 246 ]
248 247
249 248 similarityopts = [
250 249 (
251 250 b's',
252 251 b'similarity',
253 252 b'',
254 253 _(b'guess renamed files by similarity (0<=s<=100)'),
255 254 _(b'SIMILARITY'),
256 255 )
257 256 ]
258 257
259 258 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
260 259
261 260 debugrevlogopts = [
262 261 (b'c', b'changelog', False, _(b'open changelog')),
263 262 (b'm', b'manifest', False, _(b'open manifest')),
264 263 (b'', b'dir', b'', _(b'open directory manifest')),
265 264 ]
266 265
267 266 # special string such that everything below this line will be ingored in the
268 267 # editor text
269 268 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
270 269
271 270
272 271 def check_at_most_one_arg(opts, *args):
273 272 """abort if more than one of the arguments are in opts
274 273
275 274 Returns the unique argument or None if none of them were specified.
276 275 """
277 276
278 277 def to_display(name):
279 278 return pycompat.sysbytes(name).replace(b'_', b'-')
280 279
281 280 previous = None
282 281 for x in args:
283 282 if opts.get(x):
284 283 if previous:
285 284 raise error.InputError(
286 285 _(b'cannot specify both --%s and --%s')
287 286 % (to_display(previous), to_display(x))
288 287 )
289 288 previous = x
290 289 return previous
291 290
292 291
293 292 def check_incompatible_arguments(opts, first, others):
294 293 """abort if the first argument is given along with any of the others
295 294
296 295 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
297 296 among themselves, and they're passed as a single collection.
298 297 """
299 298 for other in others:
300 299 check_at_most_one_arg(opts, first, other)
301 300
302 301
303 302 def resolve_commit_options(ui, opts):
304 303 """modify commit options dict to handle related options
305 304
306 305 The return value indicates that ``rewrite.update-timestamp`` is the reason
307 306 the ``date`` option is set.
308 307 """
309 308 check_at_most_one_arg(opts, 'date', 'currentdate')
310 309 check_at_most_one_arg(opts, 'user', 'currentuser')
311 310
312 311 datemaydiffer = False # date-only change should be ignored?
313 312
314 313 if opts.get('currentdate'):
315 314 opts['date'] = b'%d %d' % dateutil.makedate()
316 315 elif (
317 316 not opts.get('date')
318 317 and ui.configbool(b'rewrite', b'update-timestamp')
319 318 and opts.get('currentdate') is None
320 319 ):
321 320 opts['date'] = b'%d %d' % dateutil.makedate()
322 321 datemaydiffer = True
323 322
324 323 if opts.get('currentuser'):
325 324 opts['user'] = ui.username()
326 325
327 326 return datemaydiffer
328 327
329 328
330 329 def check_note_size(opts):
331 330 """make sure note is of valid format"""
332 331
333 332 note = opts.get('note')
334 333 if not note:
335 334 return
336 335
337 336 if len(note) > 255:
338 337 raise error.InputError(_(b"cannot store a note of more than 255 bytes"))
339 338 if b'\n' in note:
340 339 raise error.InputError(_(b"note cannot contain a newline"))
341 340
342 341
343 342 def ishunk(x):
344 343 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
345 344 return isinstance(x, hunkclasses)
346 345
347 346
348 347 def isheader(x):
349 348 headerclasses = (crecordmod.uiheader, patch.header)
350 349 return isinstance(x, headerclasses)
351 350
352 351
353 352 def newandmodified(chunks):
354 353 newlyaddedandmodifiedfiles = set()
355 354 alsorestore = set()
356 355 for chunk in chunks:
357 356 if isheader(chunk) and chunk.isnewfile():
358 357 newlyaddedandmodifiedfiles.add(chunk.filename())
359 358 alsorestore.update(set(chunk.files()) - {chunk.filename()})
360 359 return newlyaddedandmodifiedfiles, alsorestore
361 360
362 361
363 362 def parsealiases(cmd):
364 363 base_aliases = cmd.split(b"|")
365 364 all_aliases = set(base_aliases)
366 365 extra_aliases = []
367 366 for alias in base_aliases:
368 367 if b'-' in alias:
369 368 folded_alias = alias.replace(b'-', b'')
370 369 if folded_alias not in all_aliases:
371 370 all_aliases.add(folded_alias)
372 371 extra_aliases.append(folded_alias)
373 372 base_aliases.extend(extra_aliases)
374 373 return base_aliases
375 374
376 375
377 376 def setupwrapcolorwrite(ui):
378 377 # wrap ui.write so diff output can be labeled/colorized
379 378 def wrapwrite(orig, *args, **kw):
380 379 label = kw.pop('label', b'')
381 380 for chunk, l in patch.difflabel(lambda: args):
382 381 orig(chunk, label=label + l)
383 382
384 383 oldwrite = ui.write
385 384
386 385 def wrap(*args, **kwargs):
387 386 return wrapwrite(oldwrite, *args, **kwargs)
388 387
389 388 setattr(ui, 'write', wrap)
390 389 return oldwrite
391 390
392 391
393 392 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
394 393 try:
395 394 if usecurses:
396 395 if testfile:
397 396 recordfn = crecordmod.testdecorator(
398 397 testfile, crecordmod.testchunkselector
399 398 )
400 399 else:
401 400 recordfn = crecordmod.chunkselector
402 401
403 402 return crecordmod.filterpatch(
404 403 ui, originalhunks, recordfn, operation
405 404 )
406 405 except crecordmod.fallbackerror as e:
407 406 ui.warn(b'%s\n' % e)
408 407 ui.warn(_(b'falling back to text mode\n'))
409 408
410 409 return patch.filterpatch(ui, originalhunks, match, operation)
411 410
412 411
413 412 def recordfilter(ui, originalhunks, match, operation=None):
414 413 """Prompts the user to filter the originalhunks and return a list of
415 414 selected hunks.
416 415 *operation* is used for to build ui messages to indicate the user what
417 416 kind of filtering they are doing: reverting, committing, shelving, etc.
418 417 (see patch.filterpatch).
419 418 """
420 419 usecurses = crecordmod.checkcurses(ui)
421 420 testfile = ui.config(b'experimental', b'crecordtest')
422 421 oldwrite = setupwrapcolorwrite(ui)
423 422 try:
424 423 newchunks, newopts = filterchunks(
425 424 ui, originalhunks, usecurses, testfile, match, operation
426 425 )
427 426 finally:
428 427 ui.write = oldwrite
429 428 return newchunks, newopts
430 429
431 430
432 431 def dorecord(
433 432 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
434 433 ):
435 434 opts = pycompat.byteskwargs(opts)
436 435 if not ui.interactive():
437 436 if cmdsuggest:
438 437 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
439 438 else:
440 439 msg = _(b'running non-interactively')
441 440 raise error.InputError(msg)
442 441
443 442 # make sure username is set before going interactive
444 443 if not opts.get(b'user'):
445 444 ui.username() # raise exception, username not provided
446 445
447 446 def recordfunc(ui, repo, message, match, opts):
448 447 """This is generic record driver.
449 448
450 449 Its job is to interactively filter local changes, and
451 450 accordingly prepare working directory into a state in which the
452 451 job can be delegated to a non-interactive commit command such as
453 452 'commit' or 'qrefresh'.
454 453
455 454 After the actual job is done by non-interactive command, the
456 455 working directory is restored to its original state.
457 456
458 457 In the end we'll record interesting changes, and everything else
459 458 will be left in place, so the user can continue working.
460 459 """
461 460 if not opts.get(b'interactive-unshelve'):
462 461 checkunfinished(repo, commit=True)
463 462 wctx = repo[None]
464 463 merge = len(wctx.parents()) > 1
465 464 if merge:
466 465 raise error.InputError(
467 466 _(
468 467 b'cannot partially commit a merge '
469 468 b'(use "hg commit" instead)'
470 469 )
471 470 )
472 471
473 472 def fail(f, msg):
474 473 raise error.InputError(b'%s: %s' % (f, msg))
475 474
476 475 force = opts.get(b'force')
477 476 if not force:
478 477 match = matchmod.badmatch(match, fail)
479 478
480 479 status = repo.status(match=match)
481 480
482 481 overrides = {(b'ui', b'commitsubrepos'): True}
483 482
484 483 with repo.ui.configoverride(overrides, b'record'):
485 484 # subrepoutil.precommit() modifies the status
486 485 tmpstatus = scmutil.status(
487 486 copymod.copy(status.modified),
488 487 copymod.copy(status.added),
489 488 copymod.copy(status.removed),
490 489 copymod.copy(status.deleted),
491 490 copymod.copy(status.unknown),
492 491 copymod.copy(status.ignored),
493 492 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
494 493 )
495 494
496 495 # Force allows -X subrepo to skip the subrepo.
497 496 subs, commitsubs, newstate = subrepoutil.precommit(
498 497 repo.ui, wctx, tmpstatus, match, force=True
499 498 )
500 499 for s in subs:
501 500 if s in commitsubs:
502 501 dirtyreason = wctx.sub(s).dirtyreason(True)
503 502 raise error.Abort(dirtyreason)
504 503
505 504 if not force:
506 505 repo.checkcommitpatterns(wctx, match, status, fail)
507 506 diffopts = patch.difffeatureopts(
508 507 ui,
509 508 opts=opts,
510 509 whitespace=True,
511 510 section=b'commands',
512 511 configprefix=b'commit.interactive.',
513 512 )
514 513 diffopts.nodates = True
515 514 diffopts.git = True
516 515 diffopts.showfunc = True
517 516 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
518 517 original_headers = patch.parsepatch(originaldiff)
519 518 match = scmutil.match(repo[None], pats)
520 519
521 520 # 1. filter patch, since we are intending to apply subset of it
522 521 try:
523 522 chunks, newopts = filterfn(ui, original_headers, match)
524 523 except error.PatchParseError as err:
525 524 raise error.InputError(_(b'error parsing patch: %s') % err)
526 525 except error.PatchApplicationError as err:
527 526 raise error.StateError(_(b'error applying patch: %s') % err)
528 527 opts.update(newopts)
529 528
530 529 # We need to keep a backup of files that have been newly added and
531 530 # modified during the recording process because there is a previous
532 531 # version without the edit in the workdir. We also will need to restore
533 532 # files that were the sources of renames so that the patch application
534 533 # works.
535 534 newlyaddedandmodifiedfiles, alsorestore = newandmodified(chunks)
536 535 contenders = set()
537 536 for h in chunks:
538 537 if isheader(h):
539 538 contenders.update(set(h.files()))
540 539
541 540 changed = status.modified + status.added + status.removed
542 541 newfiles = [f for f in changed if f in contenders]
543 542 if not newfiles:
544 543 ui.status(_(b'no changes to record\n'))
545 544 return 0
546 545
547 546 modified = set(status.modified)
548 547
549 548 # 2. backup changed files, so we can restore them in the end
550 549
551 550 if backupall:
552 551 tobackup = changed
553 552 else:
554 553 tobackup = [
555 554 f
556 555 for f in newfiles
557 556 if f in modified or f in newlyaddedandmodifiedfiles
558 557 ]
559 558 backups = {}
560 559 if tobackup:
561 560 backupdir = repo.vfs.join(b'record-backups')
562 561 try:
563 562 os.mkdir(backupdir)
564 563 except FileExistsError:
565 564 pass
566 565 try:
567 566 # backup continues
568 567 for f in tobackup:
569 568 fd, tmpname = pycompat.mkstemp(
570 569 prefix=os.path.basename(f) + b'.', dir=backupdir
571 570 )
572 571 os.close(fd)
573 572 ui.debug(b'backup %r as %r\n' % (f, tmpname))
574 573 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
575 574 backups[f] = tmpname
576 575
577 576 fp = stringio()
578 577 for c in chunks:
579 578 fname = c.filename()
580 579 if fname in backups:
581 580 c.write(fp)
582 581 dopatch = fp.tell()
583 582 fp.seek(0)
584 583
585 584 # 2.5 optionally review / modify patch in text editor
586 585 if opts.get(b'review', False):
587 586 patchtext = (
588 587 crecordmod.diffhelptext
589 588 + crecordmod.patchhelptext
590 589 + fp.read()
591 590 )
592 591 reviewedpatch = ui.edit(
593 592 patchtext, b"", action=b"diff", repopath=repo.path
594 593 )
595 594 fp.truncate(0)
596 595 fp.write(reviewedpatch)
597 596 fp.seek(0)
598 597
599 598 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
600 599 # 3a. apply filtered patch to clean repo (clean)
601 600 if backups:
602 601 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
603 602 mergemod.revert_to(repo[b'.'], matcher=m)
604 603
605 604 # 3b. (apply)
606 605 if dopatch:
607 606 try:
608 607 ui.debug(b'applying patch\n')
609 608 ui.debug(fp.getvalue())
610 609 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
611 610 except error.PatchParseError as err:
612 611 raise error.InputError(pycompat.bytestr(err))
613 612 except error.PatchApplicationError as err:
614 613 raise error.StateError(pycompat.bytestr(err))
615 614 del fp
616 615
617 616 # 4. We prepared working directory according to filtered
618 617 # patch. Now is the time to delegate the job to
619 618 # commit/qrefresh or the like!
620 619
621 620 # Make all of the pathnames absolute.
622 621 newfiles = [repo.wjoin(nf) for nf in newfiles]
623 622 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
624 623 finally:
625 624 # 5. finally restore backed-up files
626 625 try:
627 626 dirstate = repo.dirstate
628 627 for realname, tmpname in backups.items():
629 628 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
630 629
631 630 if dirstate.get_entry(realname).maybe_clean:
632 631 # without normallookup, restoring timestamp
633 632 # may cause partially committed files
634 633 # to be treated as unmodified
635 634
636 635 # XXX-PENDINGCHANGE: We should clarify the context in
637 636 # which this function is called to make sure it
638 637 # already called within a `pendingchange`, However we
639 638 # are taking a shortcut here in order to be able to
640 639 # quickly deprecated the older API.
641 640 with dirstate.changing_parents(repo):
642 641 dirstate.update_file(
643 642 realname,
644 643 p1_tracked=True,
645 644 wc_tracked=True,
646 645 possibly_dirty=True,
647 646 )
648 647
649 648 # copystat=True here and above are a hack to trick any
650 649 # editors that have f open that we haven't modified them.
651 650 #
652 651 # Also note that this racy as an editor could notice the
653 652 # file's mtime before we've finished writing it.
654 653 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
655 654 os.unlink(tmpname)
656 655 if tobackup:
657 656 os.rmdir(backupdir)
658 657 except OSError:
659 658 pass
660 659
661 660 def recordinwlock(ui, repo, message, match, opts):
662 661 with repo.wlock():
663 662 return recordfunc(ui, repo, message, match, opts)
664 663
665 664 return commit(ui, repo, recordinwlock, pats, opts)
666 665
667 666
668 667 class dirnode:
669 668 """
670 669 Represent a directory in user working copy with information required for
671 670 the purpose of tersing its status.
672 671
673 672 path is the path to the directory, without a trailing '/'
674 673
675 674 statuses is a set of statuses of all files in this directory (this includes
676 675 all the files in all the subdirectories too)
677 676
678 677 files is a list of files which are direct child of this directory
679 678
680 679 subdirs is a dictionary of sub-directory name as the key and it's own
681 680 dirnode object as the value
682 681 """
683 682
684 683 def __init__(self, dirpath):
685 684 self.path = dirpath
686 685 self.statuses = set()
687 686 self.files = []
688 687 self.subdirs = {}
689 688
690 689 def _addfileindir(self, filename, status):
691 690 """Add a file in this directory as a direct child."""
692 691 self.files.append((filename, status))
693 692
694 693 def addfile(self, filename, status):
695 694 """
696 695 Add a file to this directory or to its direct parent directory.
697 696
698 697 If the file is not direct child of this directory, we traverse to the
699 698 directory of which this file is a direct child of and add the file
700 699 there.
701 700 """
702 701
703 702 # the filename contains a path separator, it means it's not the direct
704 703 # child of this directory
705 704 if b'/' in filename:
706 705 subdir, filep = filename.split(b'/', 1)
707 706
708 707 # does the dirnode object for subdir exists
709 708 if subdir not in self.subdirs:
710 709 subdirpath = pathutil.join(self.path, subdir)
711 710 self.subdirs[subdir] = dirnode(subdirpath)
712 711
713 712 # try adding the file in subdir
714 713 self.subdirs[subdir].addfile(filep, status)
715 714
716 715 else:
717 716 self._addfileindir(filename, status)
718 717
719 718 if status not in self.statuses:
720 719 self.statuses.add(status)
721 720
722 721 def iterfilepaths(self):
723 722 """Yield (status, path) for files directly under this directory."""
724 723 for f, st in self.files:
725 724 yield st, pathutil.join(self.path, f)
726 725
727 726 def tersewalk(self, terseargs):
728 727 """
729 728 Yield (status, path) obtained by processing the status of this
730 729 dirnode.
731 730
732 731 terseargs is the string of arguments passed by the user with `--terse`
733 732 flag.
734 733
735 734 Following are the cases which can happen:
736 735
737 736 1) All the files in the directory (including all the files in its
738 737 subdirectories) share the same status and the user has asked us to terse
739 738 that status. -> yield (status, dirpath). dirpath will end in '/'.
740 739
741 740 2) Otherwise, we do following:
742 741
743 742 a) Yield (status, filepath) for all the files which are in this
744 743 directory (only the ones in this directory, not the subdirs)
745 744
746 745 b) Recurse the function on all the subdirectories of this
747 746 directory
748 747 """
749 748
750 749 if len(self.statuses) == 1:
751 750 onlyst = self.statuses.pop()
752 751
753 752 # Making sure we terse only when the status abbreviation is
754 753 # passed as terse argument
755 754 if onlyst in terseargs:
756 755 yield onlyst, self.path + b'/'
757 756 return
758 757
759 758 # add the files to status list
760 759 for st, fpath in self.iterfilepaths():
761 760 yield st, fpath
762 761
763 762 # recurse on the subdirs
764 763 for dirobj in self.subdirs.values():
765 764 for st, fpath in dirobj.tersewalk(terseargs):
766 765 yield st, fpath
767 766
768 767
769 768 def tersedir(statuslist, terseargs):
770 769 """
771 770 Terse the status if all the files in a directory shares the same status.
772 771
773 772 statuslist is scmutil.status() object which contains a list of files for
774 773 each status.
775 774 terseargs is string which is passed by the user as the argument to `--terse`
776 775 flag.
777 776
778 777 The function makes a tree of objects of dirnode class, and at each node it
779 778 stores the information required to know whether we can terse a certain
780 779 directory or not.
781 780 """
782 781 # the order matters here as that is used to produce final list
783 782 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
784 783
785 784 # checking the argument validity
786 785 for s in pycompat.bytestr(terseargs):
787 786 if s not in allst:
788 787 raise error.InputError(_(b"'%s' not recognized") % s)
789 788
790 789 # creating a dirnode object for the root of the repo
791 790 rootobj = dirnode(b'')
792 791 pstatus = (
793 792 b'modified',
794 793 b'added',
795 794 b'deleted',
796 795 b'clean',
797 796 b'unknown',
798 797 b'ignored',
799 798 b'removed',
800 799 )
801 800
802 801 tersedict = {}
803 802 for attrname in pstatus:
804 803 statuschar = attrname[0:1]
805 804 for f in getattr(statuslist, attrname):
806 805 rootobj.addfile(f, statuschar)
807 806 tersedict[statuschar] = []
808 807
809 808 # we won't be tersing the root dir, so add files in it
810 809 for st, fpath in rootobj.iterfilepaths():
811 810 tersedict[st].append(fpath)
812 811
813 812 # process each sub-directory and build tersedict
814 813 for subdir in rootobj.subdirs.values():
815 814 for st, f in subdir.tersewalk(terseargs):
816 815 tersedict[st].append(f)
817 816
818 817 tersedlist = []
819 818 for st in allst:
820 819 tersedict[st].sort()
821 820 tersedlist.append(tersedict[st])
822 821
823 822 return scmutil.status(*tersedlist)
824 823
825 824
826 825 def _commentlines(raw):
827 826 '''Surround lineswith a comment char and a new line'''
828 827 lines = raw.splitlines()
829 828 commentedlines = [b'# %s' % line for line in lines]
830 829 return b'\n'.join(commentedlines) + b'\n'
831 830
832 831
833 832 @attr.s(frozen=True)
834 833 class morestatus:
835 834 repo = attr.ib()
836 835 unfinishedop = attr.ib()
837 836 unfinishedmsg = attr.ib()
838 837 activemerge = attr.ib()
839 838 unresolvedpaths = attr.ib()
840 839 _formattedpaths = attr.ib(init=False, default=set())
841 840 _label = b'status.morestatus'
842 841
843 842 def formatfile(self, path, fm):
844 843 self._formattedpaths.add(path)
845 844 if self.activemerge and path in self.unresolvedpaths:
846 845 fm.data(unresolved=True)
847 846
848 847 def formatfooter(self, fm):
849 848 if self.unfinishedop or self.unfinishedmsg:
850 849 fm.startitem()
851 850 fm.data(itemtype=b'morestatus')
852 851
853 852 if self.unfinishedop:
854 853 fm.data(unfinished=self.unfinishedop)
855 854 statemsg = (
856 855 _(b'The repository is in an unfinished *%s* state.')
857 856 % self.unfinishedop
858 857 )
859 858 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
860 859 if self.unfinishedmsg:
861 860 fm.data(unfinishedmsg=self.unfinishedmsg)
862 861
863 862 # May also start new data items.
864 863 self._formatconflicts(fm)
865 864
866 865 if self.unfinishedmsg:
867 866 fm.plain(
868 867 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
869 868 )
870 869
871 870 def _formatconflicts(self, fm):
872 871 if not self.activemerge:
873 872 return
874 873
875 874 if self.unresolvedpaths:
876 875 mergeliststr = b'\n'.join(
877 876 [
878 877 b' %s'
879 878 % util.pathto(self.repo.root, encoding.getcwd(), path)
880 879 for path in self.unresolvedpaths
881 880 ]
882 881 )
883 882 msg = (
884 883 _(
885 884 b'''Unresolved merge conflicts:
886 885
887 886 %s
888 887
889 888 To mark files as resolved: hg resolve --mark FILE'''
890 889 )
891 890 % mergeliststr
892 891 )
893 892
894 893 # If any paths with unresolved conflicts were not previously
895 894 # formatted, output them now.
896 895 for f in self.unresolvedpaths:
897 896 if f in self._formattedpaths:
898 897 # Already output.
899 898 continue
900 899 fm.startitem()
901 900 fm.context(repo=self.repo)
902 901 # We can't claim to know the status of the file - it may just
903 902 # have been in one of the states that were not requested for
904 903 # display, so it could be anything.
905 904 fm.data(itemtype=b'file', path=f, unresolved=True)
906 905
907 906 else:
908 907 msg = _(b'No unresolved merge conflicts.')
909 908
910 909 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
911 910
912 911
913 912 def readmorestatus(repo):
914 913 """Returns a morestatus object if the repo has unfinished state."""
915 914 statetuple = statemod.getrepostate(repo)
916 915 mergestate = mergestatemod.mergestate.read(repo)
917 916 activemerge = mergestate.active()
918 917 if not statetuple and not activemerge:
919 918 return None
920 919
921 920 unfinishedop = unfinishedmsg = unresolved = None
922 921 if statetuple:
923 922 unfinishedop, unfinishedmsg = statetuple
924 923 if activemerge:
925 924 unresolved = sorted(mergestate.unresolved())
926 925 return morestatus(
927 926 repo, unfinishedop, unfinishedmsg, activemerge, unresolved
928 927 )
929 928
930 929
931 930 def findpossible(cmd, table, strict=False):
932 931 """
933 932 Return cmd -> (aliases, command table entry)
934 933 for each matching command.
935 934 Return debug commands (or their aliases) only if no normal command matches.
936 935 """
937 936 choice = {}
938 937 debugchoice = {}
939 938
940 939 if cmd in table:
941 940 # short-circuit exact matches, "log" alias beats "log|history"
942 941 keys = [cmd]
943 942 else:
944 943 keys = table.keys()
945 944
946 945 allcmds = []
947 946 for e in keys:
948 947 aliases = parsealiases(e)
949 948 allcmds.extend(aliases)
950 949 found = None
951 950 if cmd in aliases:
952 951 found = cmd
953 952 elif not strict:
954 953 for a in aliases:
955 954 if a.startswith(cmd):
956 955 found = a
957 956 break
958 957 if found is not None:
959 958 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
960 959 debugchoice[found] = (aliases, table[e])
961 960 else:
962 961 choice[found] = (aliases, table[e])
963 962
964 963 if not choice and debugchoice:
965 964 choice = debugchoice
966 965
967 966 return choice, allcmds
968 967
969 968
970 969 def findcmd(cmd, table, strict=True):
971 970 """Return (aliases, command table entry) for command string."""
972 971 choice, allcmds = findpossible(cmd, table, strict)
973 972
974 973 if cmd in choice:
975 974 return choice[cmd]
976 975
977 976 if len(choice) > 1:
978 977 clist = sorted(choice)
979 978 raise error.AmbiguousCommand(cmd, clist)
980 979
981 980 if choice:
982 981 return list(choice.values())[0]
983 982
984 983 raise error.UnknownCommand(cmd, allcmds)
985 984
986 985
987 986 def changebranch(ui, repo, revs, label, opts):
988 987 """Change the branch name of given revs to label"""
989 988
990 989 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
991 990 # abort in case of uncommitted merge or dirty wdir
992 991 bailifchanged(repo)
993 992 revs = logcmdutil.revrange(repo, revs)
994 993 if not revs:
995 994 raise error.InputError(b"empty revision set")
996 995 roots = repo.revs(b'roots(%ld)', revs)
997 996 if len(roots) > 1:
998 997 raise error.InputError(
999 998 _(b"cannot change branch of non-linear revisions")
1000 999 )
1001 1000 rewriteutil.precheck(repo, revs, b'change branch of')
1002 1001
1003 1002 root = repo[roots.first()]
1004 1003 rpb = {parent.branch() for parent in root.parents()}
1005 1004 if (
1006 1005 not opts.get(b'force')
1007 1006 and label not in rpb
1008 1007 and label in repo.branchmap()
1009 1008 ):
1010 1009 raise error.InputError(
1011 1010 _(b"a branch of the same name already exists")
1012 1011 )
1013 1012
1014 1013 # make sure only topological heads
1015 1014 if repo.revs(b'heads(%ld) - head()', revs):
1016 1015 raise error.InputError(
1017 1016 _(b"cannot change branch in middle of a stack")
1018 1017 )
1019 1018
1020 1019 replacements = {}
1021 1020 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1022 1021 # mercurial.subrepo -> mercurial.cmdutil
1023 1022 from . import context
1024 1023
1025 1024 for rev in revs:
1026 1025 ctx = repo[rev]
1027 1026 oldbranch = ctx.branch()
1028 1027 # check if ctx has same branch
1029 1028 if oldbranch == label:
1030 1029 continue
1031 1030
1032 1031 def filectxfn(repo, newctx, path):
1033 1032 try:
1034 1033 return ctx[path]
1035 1034 except error.ManifestLookupError:
1036 1035 return None
1037 1036
1038 1037 ui.debug(
1039 1038 b"changing branch of '%s' from '%s' to '%s'\n"
1040 1039 % (hex(ctx.node()), oldbranch, label)
1041 1040 )
1042 1041 extra = ctx.extra()
1043 1042 extra[b'branch_change'] = hex(ctx.node())
1044 1043 # While changing branch of set of linear commits, make sure that
1045 1044 # we base our commits on new parent rather than old parent which
1046 1045 # was obsoleted while changing the branch
1047 1046 p1 = ctx.p1().node()
1048 1047 p2 = ctx.p2().node()
1049 1048 if p1 in replacements:
1050 1049 p1 = replacements[p1][0]
1051 1050 if p2 in replacements:
1052 1051 p2 = replacements[p2][0]
1053 1052
1054 1053 mc = context.memctx(
1055 1054 repo,
1056 1055 (p1, p2),
1057 1056 ctx.description(),
1058 1057 ctx.files(),
1059 1058 filectxfn,
1060 1059 user=ctx.user(),
1061 1060 date=ctx.date(),
1062 1061 extra=extra,
1063 1062 branch=label,
1064 1063 )
1065 1064
1066 1065 newnode = repo.commitctx(mc)
1067 1066 replacements[ctx.node()] = (newnode,)
1068 1067 ui.debug(b'new node id is %s\n' % hex(newnode))
1069 1068
1070 1069 # create obsmarkers and move bookmarks
1071 1070 scmutil.cleanupnodes(
1072 1071 repo, replacements, b'branch-change', fixphase=True
1073 1072 )
1074 1073
1075 1074 # move the working copy too
1076 1075 wctx = repo[None]
1077 1076 # in-progress merge is a bit too complex for now.
1078 1077 if len(wctx.parents()) == 1:
1079 1078 newid = replacements.get(wctx.p1().node())
1080 1079 if newid is not None:
1081 1080 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1082 1081 # mercurial.cmdutil
1083 1082 from . import hg
1084 1083
1085 1084 hg.update(repo, newid[0], quietempty=True)
1086 1085
1087 1086 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1088 1087
1089 1088
1090 1089 def findrepo(p):
1091 1090 while not os.path.isdir(os.path.join(p, b".hg")):
1092 1091 oldp, p = p, os.path.dirname(p)
1093 1092 if p == oldp:
1094 1093 return None
1095 1094
1096 1095 return p
1097 1096
1098 1097
1099 1098 def bailifchanged(repo, merge=True, hint=None):
1100 1099 """enforce the precondition that working directory must be clean.
1101 1100
1102 1101 'merge' can be set to false if a pending uncommitted merge should be
1103 1102 ignored (such as when 'update --check' runs).
1104 1103
1105 1104 'hint' is the usual hint given to Abort exception.
1106 1105 """
1107 1106
1108 1107 if merge and repo.dirstate.p2() != repo.nullid:
1109 1108 raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
1110 1109 st = repo.status()
1111 1110 if st.modified or st.added or st.removed or st.deleted:
1112 1111 raise error.StateError(_(b'uncommitted changes'), hint=hint)
1113 1112 ctx = repo[None]
1114 1113 for s in sorted(ctx.substate):
1115 1114 ctx.sub(s).bailifchanged(hint=hint)
1116 1115
1117 1116
1118 1117 def logmessage(ui, opts):
1119 1118 """get the log message according to -m and -l option"""
1120 1119
1121 1120 check_at_most_one_arg(opts, b'message', b'logfile')
1122 1121
1123 1122 message = opts.get(b'message')
1124 1123 logfile = opts.get(b'logfile')
1125 1124
1126 1125 if not message and logfile:
1127 1126 try:
1128 1127 if isstdiofilename(logfile):
1129 1128 message = ui.fin.read()
1130 1129 else:
1131 1130 message = b'\n'.join(util.readfile(logfile).splitlines())
1132 1131 except IOError as inst:
1133 1132 raise error.Abort(
1134 1133 _(b"can't read commit message '%s': %s")
1135 1134 % (logfile, encoding.strtolocal(inst.strerror))
1136 1135 )
1137 1136 return message
1138 1137
1139 1138
1140 1139 def mergeeditform(ctxorbool, baseformname):
1141 1140 """return appropriate editform name (referencing a committemplate)
1142 1141
1143 1142 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1144 1143 merging is committed.
1145 1144
1146 1145 This returns baseformname with '.merge' appended if it is a merge,
1147 1146 otherwise '.normal' is appended.
1148 1147 """
1149 1148 if isinstance(ctxorbool, bool):
1150 1149 if ctxorbool:
1151 1150 return baseformname + b".merge"
1152 1151 elif len(ctxorbool.parents()) > 1:
1153 1152 return baseformname + b".merge"
1154 1153
1155 1154 return baseformname + b".normal"
1156 1155
1157 1156
1158 1157 def getcommiteditor(
1159 1158 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1160 1159 ):
1161 1160 """get appropriate commit message editor according to '--edit' option
1162 1161
1163 1162 'finishdesc' is a function to be called with edited commit message
1164 1163 (= 'description' of the new changeset) just after editing, but
1165 1164 before checking empty-ness. It should return actual text to be
1166 1165 stored into history. This allows to change description before
1167 1166 storing.
1168 1167
1169 1168 'extramsg' is a extra message to be shown in the editor instead of
1170 1169 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1171 1170 is automatically added.
1172 1171
1173 1172 'editform' is a dot-separated list of names, to distinguish
1174 1173 the purpose of commit text editing.
1175 1174
1176 1175 'getcommiteditor' returns 'commitforceeditor' regardless of
1177 1176 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1178 1177 they are specific for usage in MQ.
1179 1178 """
1180 1179 if edit or finishdesc or extramsg:
1181 1180 return lambda r, c, s: commitforceeditor(
1182 1181 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1183 1182 )
1184 1183 elif editform:
1185 1184 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1186 1185 else:
1187 1186 return commiteditor
1188 1187
1189 1188
1190 1189 def _escapecommandtemplate(tmpl):
1191 1190 parts = []
1192 1191 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1193 1192 if typ == b'string':
1194 1193 parts.append(stringutil.escapestr(tmpl[start:end]))
1195 1194 else:
1196 1195 parts.append(tmpl[start:end])
1197 1196 return b''.join(parts)
1198 1197
1199 1198
1200 1199 def rendercommandtemplate(ui, tmpl, props):
1201 1200 r"""Expand a literal template 'tmpl' in a way suitable for command line
1202 1201
1203 1202 '\' in outermost string is not taken as an escape character because it
1204 1203 is a directory separator on Windows.
1205 1204
1206 1205 >>> from . import ui as uimod
1207 1206 >>> ui = uimod.ui()
1208 1207 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1209 1208 'c:\\foo'
1210 1209 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1211 1210 'c:{path}'
1212 1211 """
1213 1212 if not tmpl:
1214 1213 return tmpl
1215 1214 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1216 1215 return t.renderdefault(props)
1217 1216
1218 1217
1219 1218 def rendertemplate(ctx, tmpl, props=None):
1220 1219 """Expand a literal template 'tmpl' byte-string against one changeset
1221 1220
1222 1221 Each props item must be a stringify-able value or a callable returning
1223 1222 such value, i.e. no bare list nor dict should be passed.
1224 1223 """
1225 1224 repo = ctx.repo()
1226 1225 tres = formatter.templateresources(repo.ui, repo)
1227 1226 t = formatter.maketemplater(
1228 1227 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1229 1228 )
1230 1229 mapping = {b'ctx': ctx}
1231 1230 if props:
1232 1231 mapping.update(props)
1233 1232 return t.renderdefault(mapping)
1234 1233
1235 1234
1236 1235 def format_changeset_summary(ui, ctx, command=None, default_spec=None):
1237 1236 """Format a changeset summary (one line)."""
1238 1237 spec = None
1239 1238 if command:
1240 1239 spec = ui.config(
1241 1240 b'command-templates', b'oneline-summary.%s' % command, None
1242 1241 )
1243 1242 if not spec:
1244 1243 spec = ui.config(b'command-templates', b'oneline-summary')
1245 1244 if not spec:
1246 1245 spec = default_spec
1247 1246 if not spec:
1248 1247 spec = (
1249 1248 b'{separate(" ", '
1250 1249 b'label("oneline-summary.changeset", "{rev}:{node|short}")'
1251 1250 b', '
1252 1251 b'join(filter(namespaces % "{ifeq(namespace, "branches", "", join(names % "{label("oneline-summary.{namespace}", name)}", " "))}"), " ")'
1253 1252 b')} '
1254 1253 b'"{label("oneline-summary.desc", desc|firstline)}"'
1255 1254 )
1256 1255 text = rendertemplate(ctx, spec)
1257 1256 return text.split(b'\n')[0]
1258 1257
1259 1258
1260 1259 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1261 1260 r"""Convert old-style filename format string to template string
1262 1261
1263 1262 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1264 1263 'foo-{reporoot|basename}-{seqno}.patch'
1265 1264 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1266 1265 '{rev}{tags % "{tag}"}{node}'
1267 1266
1268 1267 '\' in outermost strings has to be escaped because it is a directory
1269 1268 separator on Windows:
1270 1269
1271 1270 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1272 1271 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1273 1272 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1274 1273 '\\\\\\\\foo\\\\bar.patch'
1275 1274 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1276 1275 '\\\\{tags % "{tag}"}'
1277 1276
1278 1277 but inner strings follow the template rules (i.e. '\' is taken as an
1279 1278 escape character):
1280 1279
1281 1280 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1282 1281 '{"c:\\tmp"}'
1283 1282 """
1284 1283 expander = {
1285 1284 b'H': b'{node}',
1286 1285 b'R': b'{rev}',
1287 1286 b'h': b'{node|short}',
1288 1287 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1289 1288 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1290 1289 b'%': b'%',
1291 1290 b'b': b'{reporoot|basename}',
1292 1291 }
1293 1292 if total is not None:
1294 1293 expander[b'N'] = b'{total}'
1295 1294 if seqno is not None:
1296 1295 expander[b'n'] = b'{seqno}'
1297 1296 if total is not None and seqno is not None:
1298 1297 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1299 1298 if pathname is not None:
1300 1299 expander[b's'] = b'{pathname|basename}'
1301 1300 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1302 1301 expander[b'p'] = b'{pathname}'
1303 1302
1304 1303 newname = []
1305 1304 for typ, start, end in templater.scantemplate(pat, raw=True):
1306 1305 if typ != b'string':
1307 1306 newname.append(pat[start:end])
1308 1307 continue
1309 1308 i = start
1310 1309 while i < end:
1311 1310 n = pat.find(b'%', i, end)
1312 1311 if n < 0:
1313 1312 newname.append(stringutil.escapestr(pat[i:end]))
1314 1313 break
1315 1314 newname.append(stringutil.escapestr(pat[i:n]))
1316 1315 if n + 2 > end:
1317 1316 raise error.Abort(
1318 1317 _(b"incomplete format spec in output filename")
1319 1318 )
1320 1319 c = pat[n + 1 : n + 2]
1321 1320 i = n + 2
1322 1321 try:
1323 1322 newname.append(expander[c])
1324 1323 except KeyError:
1325 1324 raise error.Abort(
1326 1325 _(b"invalid format spec '%%%s' in output filename") % c
1327 1326 )
1328 1327 return b''.join(newname)
1329 1328
1330 1329
1331 1330 def makefilename(ctx, pat, **props):
1332 1331 if not pat:
1333 1332 return pat
1334 1333 tmpl = _buildfntemplate(pat, **props)
1335 1334 # BUG: alias expansion shouldn't be made against template fragments
1336 1335 # rewritten from %-format strings, but we have no easy way to partially
1337 1336 # disable the expansion.
1338 1337 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1339 1338
1340 1339
1341 1340 def isstdiofilename(pat):
1342 1341 """True if the given pat looks like a filename denoting stdin/stdout"""
1343 1342 return not pat or pat == b'-'
1344 1343
1345 1344
1346 1345 class _unclosablefile:
1347 1346 def __init__(self, fp):
1348 1347 self._fp = fp
1349 1348
1350 1349 def close(self):
1351 1350 pass
1352 1351
1353 1352 def __iter__(self):
1354 1353 return iter(self._fp)
1355 1354
1356 1355 def __getattr__(self, attr):
1357 1356 return getattr(self._fp, attr)
1358 1357
1359 1358 def __enter__(self):
1360 1359 return self
1361 1360
1362 1361 def __exit__(self, exc_type, exc_value, exc_tb):
1363 1362 pass
1364 1363
1365 1364
1366 1365 def makefileobj(ctx, pat, mode=b'wb', **props):
1367 1366 writable = mode not in (b'r', b'rb')
1368 1367
1369 1368 if isstdiofilename(pat):
1370 1369 repo = ctx.repo()
1371 1370 if writable:
1372 1371 fp = repo.ui.fout
1373 1372 else:
1374 1373 fp = repo.ui.fin
1375 1374 return _unclosablefile(fp)
1376 1375 fn = makefilename(ctx, pat, **props)
1377 1376 return open(fn, mode)
1378 1377
1379 1378
1380 1379 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1381 1380 """opens the changelog, manifest, a filelog or a given revlog"""
1382 1381 cl = opts[b'changelog']
1383 1382 mf = opts[b'manifest']
1384 1383 dir = opts[b'dir']
1385 1384 msg = None
1386 1385 if cl and mf:
1387 1386 msg = _(b'cannot specify --changelog and --manifest at the same time')
1388 1387 elif cl and dir:
1389 1388 msg = _(b'cannot specify --changelog and --dir at the same time')
1390 1389 elif cl or mf or dir:
1391 1390 if file_:
1392 1391 msg = _(b'cannot specify filename with --changelog or --manifest')
1393 1392 elif not repo:
1394 1393 msg = _(
1395 1394 b'cannot specify --changelog or --manifest or --dir '
1396 1395 b'without a repository'
1397 1396 )
1398 1397 if msg:
1399 1398 raise error.InputError(msg)
1400 1399
1401 1400 r = None
1402 1401 if repo:
1403 1402 if cl:
1404 1403 r = repo.unfiltered().changelog
1405 1404 elif dir:
1406 1405 if not scmutil.istreemanifest(repo):
1407 1406 raise error.InputError(
1408 1407 _(
1409 1408 b"--dir can only be used on repos with "
1410 1409 b"treemanifest enabled"
1411 1410 )
1412 1411 )
1413 1412 if not dir.endswith(b'/'):
1414 1413 dir = dir + b'/'
1415 1414 dirlog = repo.manifestlog.getstorage(dir)
1416 1415 if len(dirlog):
1417 1416 r = dirlog
1418 1417 elif mf:
1419 1418 r = repo.manifestlog.getstorage(b'')
1420 1419 elif file_:
1421 1420 filelog = repo.file(file_)
1422 1421 if len(filelog):
1423 1422 r = filelog
1424 1423
1425 1424 # Not all storage may be revlogs. If requested, try to return an actual
1426 1425 # revlog instance.
1427 1426 if returnrevlog:
1428 1427 if isinstance(r, revlog.revlog):
1429 1428 pass
1430 1429 elif util.safehasattr(r, b'_revlog'):
1431 1430 r = r._revlog # pytype: disable=attribute-error
1432 1431 elif r is not None:
1433 1432 raise error.InputError(
1434 1433 _(b'%r does not appear to be a revlog') % r
1435 1434 )
1436 1435
1437 1436 if not r:
1438 1437 if not returnrevlog:
1439 1438 raise error.InputError(_(b'cannot give path to non-revlog'))
1440 1439
1441 1440 if not file_:
1442 1441 raise error.CommandError(cmd, _(b'invalid arguments'))
1443 1442 if not os.path.isfile(file_):
1444 1443 raise error.InputError(_(b"revlog '%s' not found") % file_)
1445 1444
1446 1445 target = (revlog_constants.KIND_OTHER, b'free-form:%s' % file_)
1447 1446 r = revlog.revlog(
1448 1447 vfsmod.vfs(encoding.getcwd(), audit=False),
1449 1448 target=target,
1450 1449 radix=file_[:-2],
1451 1450 )
1452 1451 return r
1453 1452
1454 1453
1455 1454 def openrevlog(repo, cmd, file_, opts):
1456 1455 """Obtain a revlog backing storage of an item.
1457 1456
1458 1457 This is similar to ``openstorage()`` except it always returns a revlog.
1459 1458
1460 1459 In most cases, a caller cares about the main storage object - not the
1461 1460 revlog backing it. Therefore, this function should only be used by code
1462 1461 that needs to examine low-level revlog implementation details. e.g. debug
1463 1462 commands.
1464 1463 """
1465 1464 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1466 1465
1467 1466
1468 1467 def copy(ui, repo, pats, opts, rename=False):
1469 1468 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1470 1469
1471 1470 # called with the repo lock held
1472 1471 #
1473 1472 # hgsep => pathname that uses "/" to separate directories
1474 1473 # ossep => pathname that uses os.sep to separate directories
1475 1474 cwd = repo.getcwd()
1476 1475 targets = {}
1477 1476 forget = opts.get(b"forget")
1478 1477 after = opts.get(b"after")
1479 1478 dryrun = opts.get(b"dry_run")
1480 1479 rev = opts.get(b'at_rev')
1481 1480 if rev:
1482 1481 if not forget and not after:
1483 1482 # TODO: Remove this restriction and make it also create the copy
1484 1483 # targets (and remove the rename source if rename==True).
1485 1484 raise error.InputError(_(b'--at-rev requires --after'))
1486 1485 ctx = logcmdutil.revsingle(repo, rev)
1487 1486 if len(ctx.parents()) > 1:
1488 1487 raise error.InputError(
1489 1488 _(b'cannot mark/unmark copy in merge commit')
1490 1489 )
1491 1490 else:
1492 1491 ctx = repo[None]
1493 1492
1494 1493 pctx = ctx.p1()
1495 1494
1496 1495 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1497 1496
1498 1497 if forget:
1499 1498 if ctx.rev() is None:
1500 1499 new_ctx = ctx
1501 1500 else:
1502 1501 if len(ctx.parents()) > 1:
1503 1502 raise error.InputError(_(b'cannot unmark copy in merge commit'))
1504 1503 # avoid cycle context -> subrepo -> cmdutil
1505 1504 from . import context
1506 1505
1507 1506 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1508 1507 new_ctx = context.overlayworkingctx(repo)
1509 1508 new_ctx.setbase(ctx.p1())
1510 1509 mergemod.graft(repo, ctx, wctx=new_ctx)
1511 1510
1512 1511 match = scmutil.match(ctx, pats, opts)
1513 1512
1514 1513 current_copies = ctx.p1copies()
1515 1514 current_copies.update(ctx.p2copies())
1516 1515
1517 1516 uipathfn = scmutil.getuipathfn(repo)
1518 1517 for f in ctx.walk(match):
1519 1518 if f in current_copies:
1520 1519 new_ctx[f].markcopied(None)
1521 1520 elif match.exact(f):
1522 1521 ui.warn(
1523 1522 _(
1524 1523 b'%s: not unmarking as copy - file is not marked as copied\n'
1525 1524 )
1526 1525 % uipathfn(f)
1527 1526 )
1528 1527
1529 1528 if ctx.rev() is not None:
1530 1529 with repo.lock():
1531 1530 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1532 1531 new_node = mem_ctx.commit()
1533 1532
1534 1533 if repo.dirstate.p1() == ctx.node():
1535 1534 with repo.dirstate.changing_parents(repo):
1536 1535 scmutil.movedirstate(repo, repo[new_node])
1537 1536 replacements = {ctx.node(): [new_node]}
1538 1537 scmutil.cleanupnodes(
1539 1538 repo, replacements, b'uncopy', fixphase=True
1540 1539 )
1541 1540
1542 1541 return
1543 1542
1544 1543 pats = scmutil.expandpats(pats)
1545 1544 if not pats:
1546 1545 raise error.InputError(_(b'no source or destination specified'))
1547 1546 if len(pats) == 1:
1548 1547 raise error.InputError(_(b'no destination specified'))
1549 1548 dest = pats.pop()
1550 1549
1551 1550 def walkpat(pat):
1552 1551 srcs = []
1553 1552 # TODO: Inline and simplify the non-working-copy version of this code
1554 1553 # since it shares very little with the working-copy version of it.
1555 1554 ctx_to_walk = ctx if ctx.rev() is None else pctx
1556 1555 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1557 1556 for abs in ctx_to_walk.walk(m):
1558 1557 rel = uipathfn(abs)
1559 1558 exact = m.exact(abs)
1560 1559 if abs not in ctx:
1561 1560 if abs in pctx:
1562 1561 if not after:
1563 1562 if exact:
1564 1563 ui.warn(
1565 1564 _(
1566 1565 b'%s: not copying - file has been marked '
1567 1566 b'for remove\n'
1568 1567 )
1569 1568 % rel
1570 1569 )
1571 1570 continue
1572 1571 else:
1573 1572 if exact:
1574 1573 ui.warn(
1575 1574 _(b'%s: not copying - file is not managed\n') % rel
1576 1575 )
1577 1576 continue
1578 1577
1579 1578 # abs: hgsep
1580 1579 # rel: ossep
1581 1580 srcs.append((abs, rel, exact))
1582 1581 return srcs
1583 1582
1584 1583 if ctx.rev() is not None:
1585 1584 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1586 1585 absdest = pathutil.canonpath(repo.root, cwd, dest)
1587 1586 if ctx.hasdir(absdest):
1588 1587 raise error.InputError(
1589 1588 _(b'%s: --at-rev does not support a directory as destination')
1590 1589 % uipathfn(absdest)
1591 1590 )
1592 1591 if absdest not in ctx:
1593 1592 raise error.InputError(
1594 1593 _(b'%s: copy destination does not exist in %s')
1595 1594 % (uipathfn(absdest), ctx)
1596 1595 )
1597 1596
1598 1597 # avoid cycle context -> subrepo -> cmdutil
1599 1598 from . import context
1600 1599
1601 1600 copylist = []
1602 1601 for pat in pats:
1603 1602 srcs = walkpat(pat)
1604 1603 if not srcs:
1605 1604 continue
1606 1605 for abs, rel, exact in srcs:
1607 1606 copylist.append(abs)
1608 1607
1609 1608 if not copylist:
1610 1609 raise error.InputError(_(b'no files to copy'))
1611 1610 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1612 1611 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1613 1612 # existing functions below.
1614 1613 if len(copylist) != 1:
1615 1614 raise error.InputError(_(b'--at-rev requires a single source'))
1616 1615
1617 1616 new_ctx = context.overlayworkingctx(repo)
1618 1617 new_ctx.setbase(ctx.p1())
1619 1618 mergemod.graft(repo, ctx, wctx=new_ctx)
1620 1619
1621 1620 new_ctx.markcopied(absdest, copylist[0])
1622 1621
1623 1622 with repo.lock():
1624 1623 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1625 1624 new_node = mem_ctx.commit()
1626 1625
1627 1626 if repo.dirstate.p1() == ctx.node():
1628 1627 with repo.dirstate.changing_parents(repo):
1629 1628 scmutil.movedirstate(repo, repo[new_node])
1630 1629 replacements = {ctx.node(): [new_node]}
1631 1630 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1632 1631
1633 1632 return
1634 1633
1635 1634 # abssrc: hgsep
1636 1635 # relsrc: ossep
1637 1636 # otarget: ossep
1638 1637 def copyfile(abssrc, relsrc, otarget, exact):
1639 1638 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1640 1639 if b'/' in abstarget:
1641 1640 # We cannot normalize abstarget itself, this would prevent
1642 1641 # case only renames, like a => A.
1643 1642 abspath, absname = abstarget.rsplit(b'/', 1)
1644 1643 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1645 1644 reltarget = repo.pathto(abstarget, cwd)
1646 1645 target = repo.wjoin(abstarget)
1647 1646 src = repo.wjoin(abssrc)
1648 1647 entry = repo.dirstate.get_entry(abstarget)
1649 1648
1650 1649 already_commited = entry.tracked and not entry.added
1651 1650
1652 1651 scmutil.checkportable(ui, abstarget)
1653 1652
1654 1653 # check for collisions
1655 1654 prevsrc = targets.get(abstarget)
1656 1655 if prevsrc is not None:
1657 1656 ui.warn(
1658 1657 _(b'%s: not overwriting - %s collides with %s\n')
1659 1658 % (
1660 1659 reltarget,
1661 1660 repo.pathto(abssrc, cwd),
1662 1661 repo.pathto(prevsrc, cwd),
1663 1662 )
1664 1663 )
1665 1664 return True # report a failure
1666 1665
1667 1666 # check for overwrites
1668 1667 exists = os.path.lexists(target)
1669 1668 samefile = False
1670 1669 if exists and abssrc != abstarget:
1671 1670 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1672 1671 abstarget
1673 1672 ):
1674 1673 if not rename:
1675 1674 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1676 1675 return True # report a failure
1677 1676 exists = False
1678 1677 samefile = True
1679 1678
1680 1679 if not after and exists or after and already_commited:
1681 1680 if not opts[b'force']:
1682 1681 if already_commited:
1683 1682 msg = _(b'%s: not overwriting - file already committed\n')
1684 1683 # Check if if the target was added in the parent and the
1685 1684 # source already existed in the grandparent.
1686 1685 looks_like_copy_in_pctx = abstarget in pctx and any(
1687 1686 abssrc in gpctx and abstarget not in gpctx
1688 1687 for gpctx in pctx.parents()
1689 1688 )
1690 1689 if looks_like_copy_in_pctx:
1691 1690 if rename:
1692 1691 hint = _(
1693 1692 b"('hg rename --at-rev .' to record the rename "
1694 1693 b"in the parent of the working copy)\n"
1695 1694 )
1696 1695 else:
1697 1696 hint = _(
1698 1697 b"('hg copy --at-rev .' to record the copy in "
1699 1698 b"the parent of the working copy)\n"
1700 1699 )
1701 1700 else:
1702 1701 if after:
1703 1702 flags = b'--after --force'
1704 1703 else:
1705 1704 flags = b'--force'
1706 1705 if rename:
1707 1706 hint = (
1708 1707 _(
1709 1708 b"('hg rename %s' to replace the file by "
1710 1709 b'recording a rename)\n'
1711 1710 )
1712 1711 % flags
1713 1712 )
1714 1713 else:
1715 1714 hint = (
1716 1715 _(
1717 1716 b"('hg copy %s' to replace the file by "
1718 1717 b'recording a copy)\n'
1719 1718 )
1720 1719 % flags
1721 1720 )
1722 1721 else:
1723 1722 msg = _(b'%s: not overwriting - file exists\n')
1724 1723 if rename:
1725 1724 hint = _(
1726 1725 b"('hg rename --after' to record the rename)\n"
1727 1726 )
1728 1727 else:
1729 1728 hint = _(b"('hg copy --after' to record the copy)\n")
1730 1729 ui.warn(msg % reltarget)
1731 1730 ui.warn(hint)
1732 1731 return True # report a failure
1733 1732
1734 1733 if after:
1735 1734 if not exists:
1736 1735 if rename:
1737 1736 ui.warn(
1738 1737 _(b'%s: not recording move - %s does not exist\n')
1739 1738 % (relsrc, reltarget)
1740 1739 )
1741 1740 else:
1742 1741 ui.warn(
1743 1742 _(b'%s: not recording copy - %s does not exist\n')
1744 1743 % (relsrc, reltarget)
1745 1744 )
1746 1745 return True # report a failure
1747 1746 elif not dryrun:
1748 1747 try:
1749 1748 if exists:
1750 1749 os.unlink(target)
1751 1750 targetdir = os.path.dirname(target) or b'.'
1752 1751 if not os.path.isdir(targetdir):
1753 1752 os.makedirs(targetdir)
1754 1753 if samefile:
1755 1754 tmp = target + b"~hgrename"
1756 1755 os.rename(src, tmp)
1757 1756 os.rename(tmp, target)
1758 1757 else:
1759 1758 # Preserve stat info on renames, not on copies; this matches
1760 1759 # Linux CLI behavior.
1761 1760 util.copyfile(src, target, copystat=rename)
1762 1761 srcexists = True
1763 1762 except IOError as inst:
1764 1763 if inst.errno == errno.ENOENT:
1765 1764 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1766 1765 srcexists = False
1767 1766 else:
1768 1767 ui.warn(
1769 1768 _(b'%s: cannot copy - %s\n')
1770 1769 % (relsrc, encoding.strtolocal(inst.strerror))
1771 1770 )
1772 1771 return True # report a failure
1773 1772
1774 1773 if ui.verbose or not exact:
1775 1774 if rename:
1776 1775 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1777 1776 else:
1778 1777 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1779 1778
1780 1779 targets[abstarget] = abssrc
1781 1780
1782 1781 # fix up dirstate
1783 1782 scmutil.dirstatecopy(
1784 1783 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1785 1784 )
1786 1785 if rename and not dryrun:
1787 1786 if not after and srcexists and not samefile:
1788 1787 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1789 1788 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1790 1789 ctx.forget([abssrc])
1791 1790
1792 1791 # pat: ossep
1793 1792 # dest ossep
1794 1793 # srcs: list of (hgsep, hgsep, ossep, bool)
1795 1794 # return: function that takes hgsep and returns ossep
1796 1795 def targetpathfn(pat, dest, srcs):
1797 1796 if os.path.isdir(pat):
1798 1797 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1799 1798 abspfx = util.localpath(abspfx)
1800 1799 if destdirexists:
1801 1800 striplen = len(os.path.split(abspfx)[0])
1802 1801 else:
1803 1802 striplen = len(abspfx)
1804 1803 if striplen:
1805 1804 striplen += len(pycompat.ossep)
1806 1805 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1807 1806 elif destdirexists:
1808 1807 res = lambda p: os.path.join(
1809 1808 dest, os.path.basename(util.localpath(p))
1810 1809 )
1811 1810 else:
1812 1811 res = lambda p: dest
1813 1812 return res
1814 1813
1815 1814 # pat: ossep
1816 1815 # dest ossep
1817 1816 # srcs: list of (hgsep, hgsep, ossep, bool)
1818 1817 # return: function that takes hgsep and returns ossep
1819 1818 def targetpathafterfn(pat, dest, srcs):
1820 1819 if matchmod.patkind(pat):
1821 1820 # a mercurial pattern
1822 1821 res = lambda p: os.path.join(
1823 1822 dest, os.path.basename(util.localpath(p))
1824 1823 )
1825 1824 else:
1826 1825 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1827 1826 if len(abspfx) < len(srcs[0][0]):
1828 1827 # A directory. Either the target path contains the last
1829 1828 # component of the source path or it does not.
1830 1829 def evalpath(striplen):
1831 1830 score = 0
1832 1831 for s in srcs:
1833 1832 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1834 1833 if os.path.lexists(t):
1835 1834 score += 1
1836 1835 return score
1837 1836
1838 1837 abspfx = util.localpath(abspfx)
1839 1838 striplen = len(abspfx)
1840 1839 if striplen:
1841 1840 striplen += len(pycompat.ossep)
1842 1841 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1843 1842 score = evalpath(striplen)
1844 1843 striplen1 = len(os.path.split(abspfx)[0])
1845 1844 if striplen1:
1846 1845 striplen1 += len(pycompat.ossep)
1847 1846 if evalpath(striplen1) > score:
1848 1847 striplen = striplen1
1849 1848 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1850 1849 else:
1851 1850 # a file
1852 1851 if destdirexists:
1853 1852 res = lambda p: os.path.join(
1854 1853 dest, os.path.basename(util.localpath(p))
1855 1854 )
1856 1855 else:
1857 1856 res = lambda p: dest
1858 1857 return res
1859 1858
1860 1859 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1861 1860 if not destdirexists:
1862 1861 if len(pats) > 1 or matchmod.patkind(pats[0]):
1863 1862 raise error.InputError(
1864 1863 _(
1865 1864 b'with multiple sources, destination must be an '
1866 1865 b'existing directory'
1867 1866 )
1868 1867 )
1869 1868 if util.endswithsep(dest):
1870 1869 raise error.InputError(
1871 1870 _(b'destination %s is not a directory') % dest
1872 1871 )
1873 1872
1874 1873 tfn = targetpathfn
1875 1874 if after:
1876 1875 tfn = targetpathafterfn
1877 1876 copylist = []
1878 1877 for pat in pats:
1879 1878 srcs = walkpat(pat)
1880 1879 if not srcs:
1881 1880 continue
1882 1881 copylist.append((tfn(pat, dest, srcs), srcs))
1883 1882 if not copylist:
1884 1883 hint = None
1885 1884 if rename:
1886 1885 hint = _(b'maybe you meant to use --after --at-rev=.')
1887 1886 raise error.InputError(_(b'no files to copy'), hint=hint)
1888 1887
1889 1888 errors = 0
1890 1889 for targetpath, srcs in copylist:
1891 1890 for abssrc, relsrc, exact in srcs:
1892 1891 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1893 1892 errors += 1
1894 1893
1895 1894 return errors != 0
1896 1895
1897 1896
1898 1897 ## facility to let extension process additional data into an import patch
1899 1898 # list of identifier to be executed in order
1900 1899 extrapreimport = [] # run before commit
1901 1900 extrapostimport = [] # run after commit
1902 1901 # mapping from identifier to actual import function
1903 1902 #
1904 1903 # 'preimport' are run before the commit is made and are provided the following
1905 1904 # arguments:
1906 1905 # - repo: the localrepository instance,
1907 1906 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1908 1907 # - extra: the future extra dictionary of the changeset, please mutate it,
1909 1908 # - opts: the import options.
1910 1909 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1911 1910 # mutation of in memory commit and more. Feel free to rework the code to get
1912 1911 # there.
1913 1912 extrapreimportmap = {}
1914 1913 # 'postimport' are run after the commit is made and are provided the following
1915 1914 # argument:
1916 1915 # - ctx: the changectx created by import.
1917 1916 extrapostimportmap = {}
1918 1917
1919 1918
1920 1919 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1921 1920 """Utility function used by commands.import to import a single patch
1922 1921
1923 1922 This function is explicitly defined here to help the evolve extension to
1924 1923 wrap this part of the import logic.
1925 1924
1926 1925 The API is currently a bit ugly because it a simple code translation from
1927 1926 the import command. Feel free to make it better.
1928 1927
1929 1928 :patchdata: a dictionary containing parsed patch data (such as from
1930 1929 ``patch.extract()``)
1931 1930 :parents: nodes that will be parent of the created commit
1932 1931 :opts: the full dict of option passed to the import command
1933 1932 :msgs: list to save commit message to.
1934 1933 (used in case we need to save it when failing)
1935 1934 :updatefunc: a function that update a repo to a given node
1936 1935 updatefunc(<repo>, <node>)
1937 1936 """
1938 1937 # avoid cycle context -> subrepo -> cmdutil
1939 1938 from . import context
1940 1939
1941 1940 tmpname = patchdata.get(b'filename')
1942 1941 message = patchdata.get(b'message')
1943 1942 user = opts.get(b'user') or patchdata.get(b'user')
1944 1943 date = opts.get(b'date') or patchdata.get(b'date')
1945 1944 branch = patchdata.get(b'branch')
1946 1945 nodeid = patchdata.get(b'nodeid')
1947 1946 p1 = patchdata.get(b'p1')
1948 1947 p2 = patchdata.get(b'p2')
1949 1948
1950 1949 nocommit = opts.get(b'no_commit')
1951 1950 importbranch = opts.get(b'import_branch')
1952 1951 update = not opts.get(b'bypass')
1953 1952 strip = opts[b"strip"]
1954 1953 prefix = opts[b"prefix"]
1955 1954 sim = float(opts.get(b'similarity') or 0)
1956 1955
1957 1956 if not tmpname:
1958 1957 return None, None, False
1959 1958
1960 1959 rejects = False
1961 1960
1962 1961 cmdline_message = logmessage(ui, opts)
1963 1962 if cmdline_message:
1964 1963 # pickup the cmdline msg
1965 1964 message = cmdline_message
1966 1965 elif message:
1967 1966 # pickup the patch msg
1968 1967 message = message.strip()
1969 1968 else:
1970 1969 # launch the editor
1971 1970 message = None
1972 1971 ui.debug(b'message:\n%s\n' % (message or b''))
1973 1972
1974 1973 if len(parents) == 1:
1975 1974 parents.append(repo[nullrev])
1976 1975 if opts.get(b'exact'):
1977 1976 if not nodeid or not p1:
1978 1977 raise error.InputError(_(b'not a Mercurial patch'))
1979 1978 p1 = repo[p1]
1980 1979 p2 = repo[p2 or nullrev]
1981 1980 elif p2:
1982 1981 try:
1983 1982 p1 = repo[p1]
1984 1983 p2 = repo[p2]
1985 1984 # Without any options, consider p2 only if the
1986 1985 # patch is being applied on top of the recorded
1987 1986 # first parent.
1988 1987 if p1 != parents[0]:
1989 1988 p1 = parents[0]
1990 1989 p2 = repo[nullrev]
1991 1990 except error.RepoError:
1992 1991 p1, p2 = parents
1993 1992 if p2.rev() == nullrev:
1994 1993 ui.warn(
1995 1994 _(
1996 1995 b"warning: import the patch as a normal revision\n"
1997 1996 b"(use --exact to import the patch as a merge)\n"
1998 1997 )
1999 1998 )
2000 1999 else:
2001 2000 p1, p2 = parents
2002 2001
2003 2002 n = None
2004 2003 if update:
2005 2004 if p1 != parents[0]:
2006 2005 updatefunc(repo, p1.node())
2007 2006 if p2 != parents[1]:
2008 2007 repo.setparents(p1.node(), p2.node())
2009 2008
2010 2009 if opts.get(b'exact') or importbranch:
2011 2010 repo.dirstate.setbranch(branch or b'default')
2012 2011
2013 2012 partial = opts.get(b'partial', False)
2014 2013 files = set()
2015 2014 try:
2016 2015 patch.patch(
2017 2016 ui,
2018 2017 repo,
2019 2018 tmpname,
2020 2019 strip=strip,
2021 2020 prefix=prefix,
2022 2021 files=files,
2023 2022 eolmode=None,
2024 2023 similarity=sim / 100.0,
2025 2024 )
2026 2025 except error.PatchParseError as e:
2027 2026 raise error.InputError(
2028 2027 pycompat.bytestr(e),
2029 2028 hint=_(
2030 2029 b'check that whitespace in the patch has not been mangled'
2031 2030 ),
2032 2031 )
2033 2032 except error.PatchApplicationError as e:
2034 2033 if not partial:
2035 2034 raise error.StateError(pycompat.bytestr(e))
2036 2035 if partial:
2037 2036 rejects = True
2038 2037
2039 2038 files = list(files)
2040 2039 if nocommit:
2041 2040 if message:
2042 2041 msgs.append(message)
2043 2042 else:
2044 2043 if opts.get(b'exact') or p2:
2045 2044 # If you got here, you either use --force and know what
2046 2045 # you are doing or used --exact or a merge patch while
2047 2046 # being updated to its first parent.
2048 2047 m = None
2049 2048 else:
2050 2049 m = scmutil.matchfiles(repo, files or [])
2051 2050 editform = mergeeditform(repo[None], b'import.normal')
2052 2051 if opts.get(b'exact'):
2053 2052 editor = None
2054 2053 else:
2055 2054 editor = getcommiteditor(
2056 2055 editform=editform, **pycompat.strkwargs(opts)
2057 2056 )
2058 2057 extra = {}
2059 2058 for idfunc in extrapreimport:
2060 2059 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
2061 2060 overrides = {}
2062 2061 if partial:
2063 2062 overrides[(b'ui', b'allowemptycommit')] = True
2064 2063 if opts.get(b'secret'):
2065 2064 overrides[(b'phases', b'new-commit')] = b'secret'
2066 2065 with repo.ui.configoverride(overrides, b'import'):
2067 2066 n = repo.commit(
2068 2067 message, user, date, match=m, editor=editor, extra=extra
2069 2068 )
2070 2069 for idfunc in extrapostimport:
2071 2070 extrapostimportmap[idfunc](repo[n])
2072 2071 else:
2073 2072 if opts.get(b'exact') or importbranch:
2074 2073 branch = branch or b'default'
2075 2074 else:
2076 2075 branch = p1.branch()
2077 2076 store = patch.filestore()
2078 2077 try:
2079 2078 files = set()
2080 2079 try:
2081 2080 patch.patchrepo(
2082 2081 ui,
2083 2082 repo,
2084 2083 p1,
2085 2084 store,
2086 2085 tmpname,
2087 2086 strip,
2088 2087 prefix,
2089 2088 files,
2090 2089 eolmode=None,
2091 2090 )
2092 2091 except error.PatchParseError as e:
2093 2092 raise error.InputError(
2094 2093 stringutil.forcebytestr(e),
2095 2094 hint=_(
2096 2095 b'check that whitespace in the patch has not been mangled'
2097 2096 ),
2098 2097 )
2099 2098 except error.PatchApplicationError as e:
2100 2099 raise error.StateError(stringutil.forcebytestr(e))
2101 2100 if opts.get(b'exact'):
2102 2101 editor = None
2103 2102 else:
2104 2103 editor = getcommiteditor(editform=b'import.bypass')
2105 2104 memctx = context.memctx(
2106 2105 repo,
2107 2106 (p1.node(), p2.node()),
2108 2107 message,
2109 2108 files=files,
2110 2109 filectxfn=store,
2111 2110 user=user,
2112 2111 date=date,
2113 2112 branch=branch,
2114 2113 editor=editor,
2115 2114 )
2116 2115
2117 2116 overrides = {}
2118 2117 if opts.get(b'secret'):
2119 2118 overrides[(b'phases', b'new-commit')] = b'secret'
2120 2119 with repo.ui.configoverride(overrides, b'import'):
2121 2120 n = memctx.commit()
2122 2121 finally:
2123 2122 store.close()
2124 2123 if opts.get(b'exact') and nocommit:
2125 2124 # --exact with --no-commit is still useful in that it does merge
2126 2125 # and branch bits
2127 2126 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2128 2127 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2129 2128 raise error.Abort(_(b'patch is damaged or loses information'))
2130 2129 msg = _(b'applied to working directory')
2131 2130 if n:
2132 2131 # i18n: refers to a short changeset id
2133 2132 msg = _(b'created %s') % short(n)
2134 2133 return msg, n, rejects
2135 2134
2136 2135
2137 2136 # facility to let extensions include additional data in an exported patch
2138 2137 # list of identifiers to be executed in order
2139 2138 extraexport = []
2140 2139 # mapping from identifier to actual export function
2141 2140 # function as to return a string to be added to the header or None
2142 2141 # it is given two arguments (sequencenumber, changectx)
2143 2142 extraexportmap = {}
2144 2143
2145 2144
2146 2145 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2147 2146 node = scmutil.binnode(ctx)
2148 2147 parents = [p.node() for p in ctx.parents() if p]
2149 2148 branch = ctx.branch()
2150 2149 if switch_parent:
2151 2150 parents.reverse()
2152 2151
2153 2152 if parents:
2154 2153 prev = parents[0]
2155 2154 else:
2156 2155 prev = repo.nullid
2157 2156
2158 2157 fm.context(ctx=ctx)
2159 2158 fm.plain(b'# HG changeset patch\n')
2160 2159 fm.write(b'user', b'# User %s\n', ctx.user())
2161 2160 fm.plain(b'# Date %d %d\n' % ctx.date())
2162 2161 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2163 2162 fm.condwrite(
2164 2163 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2165 2164 )
2166 2165 fm.write(b'node', b'# Node ID %s\n', hex(node))
2167 2166 fm.plain(b'# Parent %s\n' % hex(prev))
2168 2167 if len(parents) > 1:
2169 2168 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2170 2169 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2171 2170
2172 2171 # TODO: redesign extraexportmap function to support formatter
2173 2172 for headerid in extraexport:
2174 2173 header = extraexportmap[headerid](seqno, ctx)
2175 2174 if header is not None:
2176 2175 fm.plain(b'# %s\n' % header)
2177 2176
2178 2177 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2179 2178 fm.plain(b'\n')
2180 2179
2181 2180 if fm.isplain():
2182 2181 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2183 2182 for chunk, label in chunkiter:
2184 2183 fm.plain(chunk, label=label)
2185 2184 else:
2186 2185 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2187 2186 # TODO: make it structured?
2188 2187 fm.data(diff=b''.join(chunkiter))
2189 2188
2190 2189
2191 2190 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2192 2191 """Export changesets to stdout or a single file"""
2193 2192 for seqno, rev in enumerate(revs, 1):
2194 2193 ctx = repo[rev]
2195 2194 if not dest.startswith(b'<'):
2196 2195 repo.ui.note(b"%s\n" % dest)
2197 2196 fm.startitem()
2198 2197 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2199 2198
2200 2199
2201 2200 def _exportfntemplate(
2202 2201 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2203 2202 ):
2204 2203 """Export changesets to possibly multiple files"""
2205 2204 total = len(revs)
2206 2205 revwidth = max(len(str(rev)) for rev in revs)
2207 2206 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2208 2207
2209 2208 for seqno, rev in enumerate(revs, 1):
2210 2209 ctx = repo[rev]
2211 2210 dest = makefilename(
2212 2211 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2213 2212 )
2214 2213 filemap.setdefault(dest, []).append((seqno, rev))
2215 2214
2216 2215 for dest in filemap:
2217 2216 with formatter.maybereopen(basefm, dest) as fm:
2218 2217 repo.ui.note(b"%s\n" % dest)
2219 2218 for seqno, rev in filemap[dest]:
2220 2219 fm.startitem()
2221 2220 ctx = repo[rev]
2222 2221 _exportsingle(
2223 2222 repo, ctx, fm, match, switch_parent, seqno, diffopts
2224 2223 )
2225 2224
2226 2225
2227 2226 def _prefetchchangedfiles(repo, revs, match):
2228 2227 allfiles = set()
2229 2228 for rev in revs:
2230 2229 for file in repo[rev].files():
2231 2230 if not match or match(file):
2232 2231 allfiles.add(file)
2233 2232 match = scmutil.matchfiles(repo, allfiles)
2234 2233 revmatches = [(rev, match) for rev in revs]
2235 2234 scmutil.prefetchfiles(repo, revmatches)
2236 2235
2237 2236
2238 2237 def export(
2239 2238 repo,
2240 2239 revs,
2241 2240 basefm,
2242 2241 fntemplate=b'hg-%h.patch',
2243 2242 switch_parent=False,
2244 2243 opts=None,
2245 2244 match=None,
2246 2245 ):
2247 2246 """export changesets as hg patches
2248 2247
2249 2248 Args:
2250 2249 repo: The repository from which we're exporting revisions.
2251 2250 revs: A list of revisions to export as revision numbers.
2252 2251 basefm: A formatter to which patches should be written.
2253 2252 fntemplate: An optional string to use for generating patch file names.
2254 2253 switch_parent: If True, show diffs against second parent when not nullid.
2255 2254 Default is false, which always shows diff against p1.
2256 2255 opts: diff options to use for generating the patch.
2257 2256 match: If specified, only export changes to files matching this matcher.
2258 2257
2259 2258 Returns:
2260 2259 Nothing.
2261 2260
2262 2261 Side Effect:
2263 2262 "HG Changeset Patch" data is emitted to one of the following
2264 2263 destinations:
2265 2264 fntemplate specified: Each rev is written to a unique file named using
2266 2265 the given template.
2267 2266 Otherwise: All revs will be written to basefm.
2268 2267 """
2269 2268 _prefetchchangedfiles(repo, revs, match)
2270 2269
2271 2270 if not fntemplate:
2272 2271 _exportfile(
2273 2272 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2274 2273 )
2275 2274 else:
2276 2275 _exportfntemplate(
2277 2276 repo, revs, basefm, fntemplate, switch_parent, opts, match
2278 2277 )
2279 2278
2280 2279
2281 2280 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2282 2281 """Export changesets to the given file stream"""
2283 2282 _prefetchchangedfiles(repo, revs, match)
2284 2283
2285 2284 dest = getattr(fp, 'name', b'<unnamed>')
2286 2285 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2287 2286 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2288 2287
2289 2288
2290 2289 def showmarker(fm, marker, index=None):
2291 2290 """utility function to display obsolescence marker in a readable way
2292 2291
2293 2292 To be used by debug function."""
2294 2293 if index is not None:
2295 2294 fm.write(b'index', b'%i ', index)
2296 2295 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2297 2296 succs = marker.succnodes()
2298 2297 fm.condwrite(
2299 2298 succs,
2300 2299 b'succnodes',
2301 2300 b'%s ',
2302 2301 fm.formatlist(map(hex, succs), name=b'node'),
2303 2302 )
2304 2303 fm.write(b'flag', b'%X ', marker.flags())
2305 2304 parents = marker.parentnodes()
2306 2305 if parents is not None:
2307 2306 fm.write(
2308 2307 b'parentnodes',
2309 2308 b'{%s} ',
2310 2309 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2311 2310 )
2312 2311 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2313 2312 meta = marker.metadata().copy()
2314 2313 meta.pop(b'date', None)
2315 2314 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2316 2315 fm.write(
2317 2316 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2318 2317 )
2319 2318 fm.plain(b'\n')
2320 2319
2321 2320
2322 2321 def finddate(ui, repo, date):
2323 2322 """Find the tipmost changeset that matches the given date spec"""
2324 2323 mrevs = repo.revs(b'date(%s)', date)
2325 2324 try:
2326 2325 rev = mrevs.max()
2327 2326 except ValueError:
2328 2327 raise error.InputError(_(b"revision matching date not found"))
2329 2328
2330 2329 ui.status(
2331 2330 _(b"found revision %d from %s\n")
2332 2331 % (rev, dateutil.datestr(repo[rev].date()))
2333 2332 )
2334 2333 return b'%d' % rev
2335 2334
2336 2335
2337 2336 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2338 2337 bad = []
2339 2338
2340 2339 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2341 2340 names = []
2342 2341 wctx = repo[None]
2343 2342 cca = None
2344 2343 abort, warn = scmutil.checkportabilityalert(ui)
2345 2344 if abort or warn:
2346 2345 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2347 2346
2348 2347 match = repo.narrowmatch(match, includeexact=True)
2349 2348 badmatch = matchmod.badmatch(match, badfn)
2350 2349 dirstate = repo.dirstate
2351 2350 # We don't want to just call wctx.walk here, since it would return a lot of
2352 2351 # clean files, which we aren't interested in and takes time.
2353 2352 for f in sorted(
2354 2353 dirstate.walk(
2355 2354 badmatch,
2356 2355 subrepos=sorted(wctx.substate),
2357 2356 unknown=True,
2358 2357 ignored=False,
2359 2358 full=False,
2360 2359 )
2361 2360 ):
2362 2361 exact = match.exact(f)
2363 2362 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2364 2363 if cca:
2365 2364 cca(f)
2366 2365 names.append(f)
2367 2366 if ui.verbose or not exact:
2368 2367 ui.status(
2369 2368 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2370 2369 )
2371 2370
2372 2371 for subpath in sorted(wctx.substate):
2373 2372 sub = wctx.sub(subpath)
2374 2373 try:
2375 2374 submatch = matchmod.subdirmatcher(subpath, match)
2376 2375 subprefix = repo.wvfs.reljoin(prefix, subpath)
2377 2376 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2378 2377 if opts.get('subrepos'):
2379 2378 bad.extend(
2380 2379 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2381 2380 )
2382 2381 else:
2383 2382 bad.extend(
2384 2383 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2385 2384 )
2386 2385 except error.LookupError:
2387 2386 ui.status(
2388 2387 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2389 2388 )
2390 2389
2391 2390 if not opts.get('dry_run'):
2392 2391 rejected = wctx.add(names, prefix)
2393 2392 bad.extend(f for f in rejected if f in match.files())
2394 2393 return bad
2395 2394
2396 2395
2397 2396 def addwebdirpath(repo, serverpath, webconf):
2398 2397 webconf[serverpath] = repo.root
2399 2398 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2400 2399
2401 2400 for r in repo.revs(b'filelog("path:.hgsub")'):
2402 2401 ctx = repo[r]
2403 2402 for subpath in ctx.substate:
2404 2403 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2405 2404
2406 2405
2407 2406 def forget(
2408 2407 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2409 2408 ):
2410 2409 if dryrun and interactive:
2411 2410 raise error.InputError(
2412 2411 _(b"cannot specify both --dry-run and --interactive")
2413 2412 )
2414 2413 bad = []
2415 2414 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2416 2415 wctx = repo[None]
2417 2416 forgot = []
2418 2417
2419 2418 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2420 2419 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2421 2420 if explicitonly:
2422 2421 forget = [f for f in forget if match.exact(f)]
2423 2422
2424 2423 for subpath in sorted(wctx.substate):
2425 2424 sub = wctx.sub(subpath)
2426 2425 submatch = matchmod.subdirmatcher(subpath, match)
2427 2426 subprefix = repo.wvfs.reljoin(prefix, subpath)
2428 2427 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2429 2428 try:
2430 2429 subbad, subforgot = sub.forget(
2431 2430 submatch,
2432 2431 subprefix,
2433 2432 subuipathfn,
2434 2433 dryrun=dryrun,
2435 2434 interactive=interactive,
2436 2435 )
2437 2436 bad.extend([subpath + b'/' + f for f in subbad])
2438 2437 forgot.extend([subpath + b'/' + f for f in subforgot])
2439 2438 except error.LookupError:
2440 2439 ui.status(
2441 2440 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2442 2441 )
2443 2442
2444 2443 if not explicitonly:
2445 2444 for f in match.files():
2446 2445 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2447 2446 if f not in forgot:
2448 2447 if repo.wvfs.exists(f):
2449 2448 # Don't complain if the exact case match wasn't given.
2450 2449 # But don't do this until after checking 'forgot', so
2451 2450 # that subrepo files aren't normalized, and this op is
2452 2451 # purely from data cached by the status walk above.
2453 2452 if repo.dirstate.normalize(f) in repo.dirstate:
2454 2453 continue
2455 2454 ui.warn(
2456 2455 _(
2457 2456 b'not removing %s: '
2458 2457 b'file is already untracked\n'
2459 2458 )
2460 2459 % uipathfn(f)
2461 2460 )
2462 2461 bad.append(f)
2463 2462
2464 2463 if interactive:
2465 2464 responses = _(
2466 2465 b'[Ynsa?]'
2467 2466 b'$$ &Yes, forget this file'
2468 2467 b'$$ &No, skip this file'
2469 2468 b'$$ &Skip remaining files'
2470 2469 b'$$ Include &all remaining files'
2471 2470 b'$$ &? (display help)'
2472 2471 )
2473 2472 for filename in forget[:]:
2474 2473 r = ui.promptchoice(
2475 2474 _(b'forget %s %s') % (uipathfn(filename), responses)
2476 2475 )
2477 2476 if r == 4: # ?
2478 2477 while r == 4:
2479 2478 for c, t in ui.extractchoices(responses)[1]:
2480 2479 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2481 2480 r = ui.promptchoice(
2482 2481 _(b'forget %s %s') % (uipathfn(filename), responses)
2483 2482 )
2484 2483 if r == 0: # yes
2485 2484 continue
2486 2485 elif r == 1: # no
2487 2486 forget.remove(filename)
2488 2487 elif r == 2: # Skip
2489 2488 fnindex = forget.index(filename)
2490 2489 del forget[fnindex:]
2491 2490 break
2492 2491 elif r == 3: # All
2493 2492 break
2494 2493
2495 2494 for f in forget:
2496 2495 if ui.verbose or not match.exact(f) or interactive:
2497 2496 ui.status(
2498 2497 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2499 2498 )
2500 2499
2501 2500 if not dryrun:
2502 2501 rejected = wctx.forget(forget, prefix)
2503 2502 bad.extend(f for f in rejected if f in match.files())
2504 2503 forgot.extend(f for f in forget if f not in rejected)
2505 2504 return bad, forgot
2506 2505
2507 2506
2508 2507 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2509 2508 ret = 1
2510 2509
2511 2510 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2512 2511 if fm.isplain() and not needsfctx:
2513 2512 # Fast path. The speed-up comes from skipping the formatter, and batching
2514 2513 # calls to ui.write.
2515 2514 buf = []
2516 2515 for f in ctx.matches(m):
2517 2516 buf.append(fmt % uipathfn(f))
2518 2517 if len(buf) > 100:
2519 2518 ui.write(b''.join(buf))
2520 2519 del buf[:]
2521 2520 ret = 0
2522 2521 if buf:
2523 2522 ui.write(b''.join(buf))
2524 2523 else:
2525 2524 for f in ctx.matches(m):
2526 2525 fm.startitem()
2527 2526 fm.context(ctx=ctx)
2528 2527 if needsfctx:
2529 2528 fc = ctx[f]
2530 2529 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2531 2530 fm.data(path=f)
2532 2531 fm.plain(fmt % uipathfn(f))
2533 2532 ret = 0
2534 2533
2535 2534 for subpath in sorted(ctx.substate):
2536 2535 submatch = matchmod.subdirmatcher(subpath, m)
2537 2536 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2538 2537 if subrepos or m.exact(subpath) or any(submatch.files()):
2539 2538 sub = ctx.sub(subpath)
2540 2539 try:
2541 2540 recurse = m.exact(subpath) or subrepos
2542 2541 if (
2543 2542 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2544 2543 == 0
2545 2544 ):
2546 2545 ret = 0
2547 2546 except error.LookupError:
2548 2547 ui.status(
2549 2548 _(b"skipping missing subrepository: %s\n")
2550 2549 % uipathfn(subpath)
2551 2550 )
2552 2551
2553 2552 return ret
2554 2553
2555 2554
2556 2555 def remove(
2557 2556 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2558 2557 ):
2559 2558 ret = 0
2560 2559 s = repo.status(match=m, clean=True)
2561 2560 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2562 2561
2563 2562 wctx = repo[None]
2564 2563
2565 2564 if warnings is None:
2566 2565 warnings = []
2567 2566 warn = True
2568 2567 else:
2569 2568 warn = False
2570 2569
2571 2570 subs = sorted(wctx.substate)
2572 2571 progress = ui.makeprogress(
2573 2572 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2574 2573 )
2575 2574 for subpath in subs:
2576 2575 submatch = matchmod.subdirmatcher(subpath, m)
2577 2576 subprefix = repo.wvfs.reljoin(prefix, subpath)
2578 2577 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2579 2578 if subrepos or m.exact(subpath) or any(submatch.files()):
2580 2579 progress.increment()
2581 2580 sub = wctx.sub(subpath)
2582 2581 try:
2583 2582 if sub.removefiles(
2584 2583 submatch,
2585 2584 subprefix,
2586 2585 subuipathfn,
2587 2586 after,
2588 2587 force,
2589 2588 subrepos,
2590 2589 dryrun,
2591 2590 warnings,
2592 2591 ):
2593 2592 ret = 1
2594 2593 except error.LookupError:
2595 2594 warnings.append(
2596 2595 _(b"skipping missing subrepository: %s\n")
2597 2596 % uipathfn(subpath)
2598 2597 )
2599 2598 progress.complete()
2600 2599
2601 2600 # warn about failure to delete explicit files/dirs
2602 2601 deleteddirs = pathutil.dirs(deleted)
2603 2602 files = m.files()
2604 2603 progress = ui.makeprogress(
2605 2604 _(b'deleting'), total=len(files), unit=_(b'files')
2606 2605 )
2607 2606 for f in files:
2608 2607
2609 2608 def insubrepo():
2610 2609 for subpath in wctx.substate:
2611 2610 if f.startswith(subpath + b'/'):
2612 2611 return True
2613 2612 return False
2614 2613
2615 2614 progress.increment()
2616 2615 isdir = f in deleteddirs or wctx.hasdir(f)
2617 2616 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2618 2617 continue
2619 2618
2620 2619 if repo.wvfs.exists(f):
2621 2620 if repo.wvfs.isdir(f):
2622 2621 warnings.append(
2623 2622 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2624 2623 )
2625 2624 else:
2626 2625 warnings.append(
2627 2626 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2628 2627 )
2629 2628 # missing files will generate a warning elsewhere
2630 2629 ret = 1
2631 2630 progress.complete()
2632 2631
2633 2632 if force:
2634 2633 list = modified + deleted + clean + added
2635 2634 elif after:
2636 2635 list = deleted
2637 2636 remaining = modified + added + clean
2638 2637 progress = ui.makeprogress(
2639 2638 _(b'skipping'), total=len(remaining), unit=_(b'files')
2640 2639 )
2641 2640 for f in remaining:
2642 2641 progress.increment()
2643 2642 if ui.verbose or (f in files):
2644 2643 warnings.append(
2645 2644 _(b'not removing %s: file still exists\n') % uipathfn(f)
2646 2645 )
2647 2646 ret = 1
2648 2647 progress.complete()
2649 2648 else:
2650 2649 list = deleted + clean
2651 2650 progress = ui.makeprogress(
2652 2651 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2653 2652 )
2654 2653 for f in modified:
2655 2654 progress.increment()
2656 2655 warnings.append(
2657 2656 _(
2658 2657 b'not removing %s: file is modified (use -f'
2659 2658 b' to force removal)\n'
2660 2659 )
2661 2660 % uipathfn(f)
2662 2661 )
2663 2662 ret = 1
2664 2663 for f in added:
2665 2664 progress.increment()
2666 2665 warnings.append(
2667 2666 _(
2668 2667 b"not removing %s: file has been marked for add"
2669 2668 b" (use 'hg forget' to undo add)\n"
2670 2669 )
2671 2670 % uipathfn(f)
2672 2671 )
2673 2672 ret = 1
2674 2673 progress.complete()
2675 2674
2676 2675 list = sorted(list)
2677 2676 progress = ui.makeprogress(
2678 2677 _(b'deleting'), total=len(list), unit=_(b'files')
2679 2678 )
2680 2679 for f in list:
2681 2680 if ui.verbose or not m.exact(f):
2682 2681 progress.increment()
2683 2682 ui.status(
2684 2683 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2685 2684 )
2686 2685 progress.complete()
2687 2686
2688 2687 if not dryrun:
2689 2688 with repo.wlock():
2690 2689 if not after:
2691 2690 for f in list:
2692 2691 if f in added:
2693 2692 continue # we never unlink added files on remove
2694 2693 rmdir = repo.ui.configbool(
2695 2694 b'experimental', b'removeemptydirs'
2696 2695 )
2697 2696 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2698 2697 repo[None].forget(list)
2699 2698
2700 2699 if warn:
2701 2700 for warning in warnings:
2702 2701 ui.warn(warning)
2703 2702
2704 2703 return ret
2705 2704
2706 2705
2707 2706 def _catfmtneedsdata(fm):
2708 2707 return not fm.datahint() or b'data' in fm.datahint()
2709 2708
2710 2709
2711 2710 def _updatecatformatter(fm, ctx, matcher, path, decode):
2712 2711 """Hook for adding data to the formatter used by ``hg cat``.
2713 2712
2714 2713 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2715 2714 this method first."""
2716 2715
2717 2716 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2718 2717 # wasn't requested.
2719 2718 data = b''
2720 2719 if _catfmtneedsdata(fm):
2721 2720 data = ctx[path].data()
2722 2721 if decode:
2723 2722 data = ctx.repo().wwritedata(path, data)
2724 2723 fm.startitem()
2725 2724 fm.context(ctx=ctx)
2726 2725 fm.write(b'data', b'%s', data)
2727 2726 fm.data(path=path)
2728 2727
2729 2728
2730 2729 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2731 2730 err = 1
2732 2731 opts = pycompat.byteskwargs(opts)
2733 2732
2734 2733 def write(path):
2735 2734 filename = None
2736 2735 if fntemplate:
2737 2736 filename = makefilename(
2738 2737 ctx, fntemplate, pathname=os.path.join(prefix, path)
2739 2738 )
2740 2739 # attempt to create the directory if it does not already exist
2741 2740 try:
2742 2741 os.makedirs(os.path.dirname(filename))
2743 2742 except OSError:
2744 2743 pass
2745 2744 with formatter.maybereopen(basefm, filename) as fm:
2746 2745 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2747 2746
2748 2747 # Automation often uses hg cat on single files, so special case it
2749 2748 # for performance to avoid the cost of parsing the manifest.
2750 2749 if len(matcher.files()) == 1 and not matcher.anypats():
2751 2750 file = matcher.files()[0]
2752 2751 mfl = repo.manifestlog
2753 2752 mfnode = ctx.manifestnode()
2754 2753 try:
2755 2754 if mfnode and mfl[mfnode].find(file)[0]:
2756 2755 if _catfmtneedsdata(basefm):
2757 2756 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2758 2757 write(file)
2759 2758 return 0
2760 2759 except KeyError:
2761 2760 pass
2762 2761
2763 2762 if _catfmtneedsdata(basefm):
2764 2763 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2765 2764
2766 2765 for abs in ctx.walk(matcher):
2767 2766 write(abs)
2768 2767 err = 0
2769 2768
2770 2769 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2771 2770 for subpath in sorted(ctx.substate):
2772 2771 sub = ctx.sub(subpath)
2773 2772 try:
2774 2773 submatch = matchmod.subdirmatcher(subpath, matcher)
2775 2774 subprefix = os.path.join(prefix, subpath)
2776 2775 if not sub.cat(
2777 2776 submatch,
2778 2777 basefm,
2779 2778 fntemplate,
2780 2779 subprefix,
2781 2780 **pycompat.strkwargs(opts)
2782 2781 ):
2783 2782 err = 0
2784 2783 except error.RepoLookupError:
2785 2784 ui.status(
2786 2785 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2787 2786 )
2788 2787
2789 2788 return err
2790 2789
2791 2790
2791 class _AddRemoveContext:
2792 """a small (hacky) context to deal with lazy opening of context
2793
2794 This is to be used in the `commit` function right below. This deals with
2795 lazily open a `changing_files` context inside a `transaction` that span the
2796 full commit operation.
2797
2798 We need :
2799 - a `changing_files` context to wrap the dirstate change within the
2800 "addremove" operation,
2801 - a transaction to make sure these change are not written right after the
2802 addremove, but when the commit operation succeed.
2803
2804 However it get complicated because:
2805 - opening a transaction "this early" shuffle hooks order, especially the
2806 `precommit` one happening after the `pretxtopen` one which I am not too
2807 enthusiastic about.
2808 - the `mq` extensions + the `record` extension stacks many layers of call
2809 to implement `qrefresh --interactive` and this result with `mq` calling a
2810 `strip` in the middle of this function. Which prevent the existence of
2811 transaction wrapping all of its function code. (however, `qrefresh` never
2812 call the `addremove` bits.
2813 - the largefile extensions (and maybe other extensions?) wraps `addremove`
2814 so slicing `addremove` in smaller bits is a complex endeavour.
2815
2816 So I eventually took a this shortcut that open the transaction if we
2817 actually needs it, not disturbing much of the rest of the code.
2818
2819 It will result in some hooks order change for `hg commit --addremove`,
2820 however it seems a corner case enough to ignore that for now (hopefully).
2821
2822 Notes that None of the above problems seems insurmountable, however I have
2823 been fighting with this specific piece of code for a couple of day already
2824 and I need a solution to keep moving forward on the bigger work around
2825 `changing_files` context that is being introduced at the same time as this
2826 hack.
2827
2828 Each problem seems to have a solution:
2829 - the hook order issue could be solved by refactoring the many-layer stack
2830 that currently composes a commit and calling them earlier,
2831 - the mq issue could be solved by refactoring `mq` so that the final strip
2832 is done after transaction closure. Be warned that the mq code is quite
2833 antic however.
2834 - large-file could be reworked in parallel of the `addremove` to be
2835 friendlier to this.
2836
2837 However each of these tasks are too much a diversion right now. In addition
2838 they will be much easier to undertake when the `changing_files` dust has
2839 settled."""
2840
2841 def __init__(self, repo):
2842 self._repo = repo
2843 self._transaction = None
2844 self._dirstate_context = None
2845 self._state = None
2846
2847 def __enter__(self):
2848 assert self._state is None
2849 self._state = True
2850 return self
2851
2852 def open_transaction(self):
2853 """open a `transaction` and `changing_files` context
2854
2855 Call this when you know that change to the dirstate will be needed and
2856 we need to open the transaction early
2857
2858 This will also open the dirstate `changing_files` context, so you should
2859 call `close_dirstate_context` when the distate changes are done.
2860 """
2861 assert self._state is not None
2862 if self._transaction is None:
2863 self._transaction = self._repo.transaction(b'commit')
2864 self._transaction.__enter__()
2865 if self._dirstate_context is None:
2866 self._dirstate_context = self._repo.dirstate.changing_files(
2867 self._repo
2868 )
2869 self._dirstate_context.__enter__()
2870
2871 def close_dirstate_context(self):
2872 """close the change_files if any
2873
2874 Call this after the (potential) `open_transaction` call to close the
2875 (potential) changing_files context.
2876 """
2877 if self._dirstate_context is not None:
2878 self._dirstate_context.__exit__(None, None, None)
2879 self._dirstate_context = None
2880
2881 def __exit__(self, *args):
2882 if self._dirstate_context is not None:
2883 self._dirstate_context.__exit__(*args)
2884 if self._transaction is not None:
2885 self._transaction.__exit__(*args)
2886
2887
2792 2888 def commit(ui, repo, commitfunc, pats, opts):
2793 2889 '''commit the specified files or all outstanding changes'''
2794 2890 date = opts.get(b'date')
2795 2891 if date:
2796 2892 opts[b'date'] = dateutil.parsedate(date)
2797 2893
2798 dsguard = None
2799 # extract addremove carefully -- this function can be called from a command
2800 # that doesn't support addremove
2801 if opts.get(b'addremove'):
2802 dsguard = dirstateguard.dirstateguard(repo, b'commit')
2803 with dsguard or util.nullcontextmanager():
2894 with repo.wlock(), repo.lock():
2804 2895 message = logmessage(ui, opts)
2805 2896 matcher = scmutil.match(repo[None], pats, opts)
2806 if True:
2897
2898 with _AddRemoveContext(repo) as c:
2807 2899 # extract addremove carefully -- this function can be called from a
2808 2900 # command that doesn't support addremove
2809 2901 if opts.get(b'addremove'):
2810 2902 relative = scmutil.anypats(pats, opts)
2811 2903 uipathfn = scmutil.getuipathfn(
2812 2904 repo,
2813 2905 legacyrelativevalue=relative,
2814 2906 )
2815 2907 r = scmutil.addremove(
2816 2908 repo,
2817 2909 matcher,
2818 2910 b"",
2819 2911 uipathfn,
2820 2912 opts,
2913 open_tr=c.open_transaction,
2821 2914 )
2822 2915 m = _(b"failed to mark all new/missing files as added/removed")
2823 2916 if r != 0:
2824 2917 raise error.Abort(m)
2825
2918 c.close_dirstate_context()
2826 2919 return commitfunc(ui, repo, message, matcher, opts)
2827 2920
2828 2921
2829 2922 def samefile(f, ctx1, ctx2):
2830 2923 if f in ctx1.manifest():
2831 2924 a = ctx1.filectx(f)
2832 2925 if f in ctx2.manifest():
2833 2926 b = ctx2.filectx(f)
2834 2927 return not a.cmp(b) and a.flags() == b.flags()
2835 2928 else:
2836 2929 return False
2837 2930 else:
2838 2931 return f not in ctx2.manifest()
2839 2932
2840 2933
2841 2934 def amend(ui, repo, old, extra, pats, opts):
2842 2935 # avoid cycle context -> subrepo -> cmdutil
2843 2936 from . import context
2844 2937
2845 2938 # amend will reuse the existing user if not specified, but the obsolete
2846 2939 # marker creation requires that the current user's name is specified.
2847 2940 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2848 2941 ui.username() # raise exception if username not set
2849 2942
2850 2943 ui.note(_(b'amending changeset %s\n') % old)
2851 2944 base = old.p1()
2852 2945
2853 2946 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
2854 2947 # Participating changesets:
2855 2948 #
2856 2949 # wctx o - workingctx that contains changes from working copy
2857 2950 # | to go into amending commit
2858 2951 # |
2859 2952 # old o - changeset to amend
2860 2953 # |
2861 2954 # base o - first parent of the changeset to amend
2862 2955 wctx = repo[None]
2863 2956
2864 2957 # Copy to avoid mutating input
2865 2958 extra = extra.copy()
2866 2959 # Update extra dict from amended commit (e.g. to preserve graft
2867 2960 # source)
2868 2961 extra.update(old.extra())
2869 2962
2870 2963 # Also update it from the from the wctx
2871 2964 extra.update(wctx.extra())
2872 2965
2873 2966 # date-only change should be ignored?
2874 2967 datemaydiffer = resolve_commit_options(ui, opts)
2875 2968 opts = pycompat.byteskwargs(opts)
2876 2969
2877 2970 date = old.date()
2878 2971 if opts.get(b'date'):
2879 2972 date = dateutil.parsedate(opts.get(b'date'))
2880 2973 user = opts.get(b'user') or old.user()
2881 2974
2882 2975 if len(old.parents()) > 1:
2883 2976 # ctx.files() isn't reliable for merges, so fall back to the
2884 2977 # slower repo.status() method
2885 2978 st = base.status(old)
2886 2979 files = set(st.modified) | set(st.added) | set(st.removed)
2887 2980 else:
2888 2981 files = set(old.files())
2889 2982
2890 2983 # add/remove the files to the working copy if the "addremove" option
2891 2984 # was specified.
2892 2985 matcher = scmutil.match(wctx, pats, opts)
2893 2986 relative = scmutil.anypats(pats, opts)
2894 2987 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2895 2988 if opts.get(b'addremove') and scmutil.addremove(
2896 2989 repo, matcher, b"", uipathfn, opts
2897 2990 ):
2898 2991 raise error.Abort(
2899 2992 _(b"failed to mark all new/missing files as added/removed")
2900 2993 )
2901 2994
2902 2995 # Check subrepos. This depends on in-place wctx._status update in
2903 2996 # subrepo.precommit(). To minimize the risk of this hack, we do
2904 2997 # nothing if .hgsub does not exist.
2905 2998 if b'.hgsub' in wctx or b'.hgsub' in old:
2906 2999 subs, commitsubs, newsubstate = subrepoutil.precommit(
2907 3000 ui, wctx, wctx._status, matcher
2908 3001 )
2909 3002 # amend should abort if commitsubrepos is enabled
2910 3003 assert not commitsubs
2911 3004 if subs:
2912 3005 subrepoutil.writestate(repo, newsubstate)
2913 3006
2914 3007 ms = mergestatemod.mergestate.read(repo)
2915 3008 mergeutil.checkunresolved(ms)
2916 3009
2917 3010 filestoamend = {f for f in wctx.files() if matcher(f)}
2918 3011
2919 3012 changes = len(filestoamend) > 0
2920 3013 changeset_copies = (
2921 3014 repo.ui.config(b'experimental', b'copies.read-from')
2922 3015 != b'filelog-only'
2923 3016 )
2924 3017 # If there are changes to amend or if copy information needs to be read
2925 3018 # from the changeset extras, we cannot take the fast path of using
2926 3019 # filectxs from the old commit.
2927 3020 if changes or changeset_copies:
2928 3021 # Recompute copies (avoid recording a -> b -> a)
2929 3022 copied = copies.pathcopies(base, wctx)
2930 3023 if old.p2():
2931 3024 copied.update(copies.pathcopies(old.p2(), wctx))
2932 3025
2933 3026 # Prune files which were reverted by the updates: if old
2934 3027 # introduced file X and the file was renamed in the working
2935 3028 # copy, then those two files are the same and
2936 3029 # we can discard X from our list of files. Likewise if X
2937 3030 # was removed, it's no longer relevant. If X is missing (aka
2938 3031 # deleted), old X must be preserved.
2939 3032 files.update(filestoamend)
2940 3033 files = [
2941 3034 f
2942 3035 for f in files
2943 3036 if (f not in filestoamend or not samefile(f, wctx, base))
2944 3037 ]
2945 3038
2946 3039 def filectxfn(repo, ctx_, path):
2947 3040 try:
2948 3041 # If the file being considered is not amongst the files
2949 3042 # to be amended, we should use the file context from the
2950 3043 # old changeset. This avoids issues when only some files in
2951 3044 # the working copy are being amended but there are also
2952 3045 # changes to other files from the old changeset.
2953 3046 if path in filestoamend:
2954 3047 # Return None for removed files.
2955 3048 if path in wctx.removed():
2956 3049 return None
2957 3050 fctx = wctx[path]
2958 3051 else:
2959 3052 fctx = old.filectx(path)
2960 3053 flags = fctx.flags()
2961 3054 mctx = context.memfilectx(
2962 3055 repo,
2963 3056 ctx_,
2964 3057 fctx.path(),
2965 3058 fctx.data(),
2966 3059 islink=b'l' in flags,
2967 3060 isexec=b'x' in flags,
2968 3061 copysource=copied.get(path),
2969 3062 )
2970 3063 return mctx
2971 3064 except KeyError:
2972 3065 return None
2973 3066
2974 3067 else:
2975 3068 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
2976 3069
2977 3070 # Use version of files as in the old cset
2978 3071 def filectxfn(repo, ctx_, path):
2979 3072 try:
2980 3073 return old.filectx(path)
2981 3074 except KeyError:
2982 3075 return None
2983 3076
2984 3077 # See if we got a message from -m or -l, if not, open the editor with
2985 3078 # the message of the changeset to amend.
2986 3079 message = logmessage(ui, opts)
2987 3080
2988 3081 editform = mergeeditform(old, b'commit.amend')
2989 3082
2990 3083 if not message:
2991 3084 message = old.description()
2992 3085 # Default if message isn't provided and --edit is not passed is to
2993 3086 # invoke editor, but allow --no-edit. If somehow we don't have any
2994 3087 # description, let's always start the editor.
2995 3088 doedit = not message or opts.get(b'edit') in [True, None]
2996 3089 else:
2997 3090 # Default if message is provided is to not invoke editor, but allow
2998 3091 # --edit.
2999 3092 doedit = opts.get(b'edit') is True
3000 3093 editor = getcommiteditor(edit=doedit, editform=editform)
3001 3094
3002 3095 pureextra = extra.copy()
3003 3096 extra[b'amend_source'] = old.hex()
3004 3097
3005 3098 new = context.memctx(
3006 3099 repo,
3007 3100 parents=[base.node(), old.p2().node()],
3008 3101 text=message,
3009 3102 files=files,
3010 3103 filectxfn=filectxfn,
3011 3104 user=user,
3012 3105 date=date,
3013 3106 extra=extra,
3014 3107 editor=editor,
3015 3108 )
3016 3109
3017 3110 newdesc = changelog.stripdesc(new.description())
3018 3111 if (
3019 3112 (not changes)
3020 3113 and newdesc == old.description()
3021 3114 and user == old.user()
3022 3115 and (date == old.date() or datemaydiffer)
3023 3116 and pureextra == old.extra()
3024 3117 ):
3025 3118 # nothing changed. continuing here would create a new node
3026 3119 # anyway because of the amend_source noise.
3027 3120 #
3028 3121 # This not what we expect from amend.
3029 3122 return old.node()
3030 3123
3031 3124 commitphase = None
3032 3125 if opts.get(b'secret'):
3033 3126 commitphase = phases.secret
3034 3127 elif opts.get(b'draft'):
3035 3128 commitphase = phases.draft
3036 3129 newid = repo.commitctx(new)
3037 3130 ms.reset()
3038 3131
3039 3132 with repo.dirstate.changing_parents(repo):
3040 3133 # Reroute the working copy parent to the new changeset
3041 3134 repo.setparents(newid, repo.nullid)
3042 3135
3043 3136 # Fixing the dirstate because localrepo.commitctx does not update
3044 3137 # it. This is rather convenient because we did not need to update
3045 3138 # the dirstate for all the files in the new commit which commitctx
3046 3139 # could have done if it updated the dirstate. Now, we can
3047 3140 # selectively update the dirstate only for the amended files.
3048 3141 dirstate = repo.dirstate
3049 3142
3050 3143 # Update the state of the files which were added and modified in the
3051 3144 # amend to "normal" in the dirstate. We need to use "normallookup" since
3052 3145 # the files may have changed since the command started; using "normal"
3053 3146 # would mark them as clean but with uncommitted contents.
3054 3147 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3055 3148 for f in normalfiles:
3056 3149 dirstate.update_file(
3057 3150 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
3058 3151 )
3059 3152
3060 3153 # Update the state of files which were removed in the amend
3061 3154 # to "removed" in the dirstate.
3062 3155 removedfiles = set(wctx.removed()) & filestoamend
3063 3156 for f in removedfiles:
3064 3157 dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
3065 3158
3066 3159 mapping = {old.node(): (newid,)}
3067 3160 obsmetadata = None
3068 3161 if opts.get(b'note'):
3069 3162 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3070 3163 backup = ui.configbool(b'rewrite', b'backup-bundle')
3071 3164 scmutil.cleanupnodes(
3072 3165 repo,
3073 3166 mapping,
3074 3167 b'amend',
3075 3168 metadata=obsmetadata,
3076 3169 fixphase=True,
3077 3170 targetphase=commitphase,
3078 3171 backup=backup,
3079 3172 )
3080 3173
3081 3174 return newid
3082 3175
3083 3176
3084 3177 def commiteditor(repo, ctx, subs, editform=b''):
3085 3178 if ctx.description():
3086 3179 return ctx.description()
3087 3180 return commitforceeditor(
3088 3181 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3089 3182 )
3090 3183
3091 3184
3092 3185 def commitforceeditor(
3093 3186 repo,
3094 3187 ctx,
3095 3188 subs,
3096 3189 finishdesc=None,
3097 3190 extramsg=None,
3098 3191 editform=b'',
3099 3192 unchangedmessagedetection=False,
3100 3193 ):
3101 3194 if not extramsg:
3102 3195 extramsg = _(b"Leave message empty to abort commit.")
3103 3196
3104 3197 forms = [e for e in editform.split(b'.') if e]
3105 3198 forms.insert(0, b'changeset')
3106 3199 templatetext = None
3107 3200 while forms:
3108 3201 ref = b'.'.join(forms)
3109 3202 if repo.ui.config(b'committemplate', ref):
3110 3203 templatetext = committext = buildcommittemplate(
3111 3204 repo, ctx, subs, extramsg, ref
3112 3205 )
3113 3206 break
3114 3207 forms.pop()
3115 3208 else:
3116 3209 committext = buildcommittext(repo, ctx, subs, extramsg)
3117 3210
3118 3211 # run editor in the repository root
3119 3212 olddir = encoding.getcwd()
3120 3213 os.chdir(repo.root)
3121 3214
3122 3215 # make in-memory changes visible to external process
3123 3216 tr = repo.currenttransaction()
3124 3217 repo.dirstate.write(tr)
3125 3218 pending = tr and tr.writepending() and repo.root
3126 3219
3127 3220 editortext = repo.ui.edit(
3128 3221 committext,
3129 3222 ctx.user(),
3130 3223 ctx.extra(),
3131 3224 editform=editform,
3132 3225 pending=pending,
3133 3226 repopath=repo.path,
3134 3227 action=b'commit',
3135 3228 )
3136 3229 text = editortext
3137 3230
3138 3231 # strip away anything below this special string (used for editors that want
3139 3232 # to display the diff)
3140 3233 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3141 3234 if stripbelow:
3142 3235 text = text[: stripbelow.start()]
3143 3236
3144 3237 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3145 3238 os.chdir(olddir)
3146 3239
3147 3240 if finishdesc:
3148 3241 text = finishdesc(text)
3149 3242 if not text.strip():
3150 3243 raise error.InputError(_(b"empty commit message"))
3151 3244 if unchangedmessagedetection and editortext == templatetext:
3152 3245 raise error.InputError(_(b"commit message unchanged"))
3153 3246
3154 3247 return text
3155 3248
3156 3249
3157 3250 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3158 3251 ui = repo.ui
3159 3252 spec = formatter.reference_templatespec(ref)
3160 3253 t = logcmdutil.changesettemplater(ui, repo, spec)
3161 3254 t.t.cache.update(
3162 3255 (k, templater.unquotestring(v))
3163 3256 for k, v in repo.ui.configitems(b'committemplate')
3164 3257 )
3165 3258
3166 3259 if not extramsg:
3167 3260 extramsg = b'' # ensure that extramsg is string
3168 3261
3169 3262 ui.pushbuffer()
3170 3263 t.show(ctx, extramsg=extramsg)
3171 3264 return ui.popbuffer()
3172 3265
3173 3266
3174 3267 def hgprefix(msg):
3175 3268 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3176 3269
3177 3270
3178 3271 def buildcommittext(repo, ctx, subs, extramsg):
3179 3272 edittext = []
3180 3273 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3181 3274 if ctx.description():
3182 3275 edittext.append(ctx.description())
3183 3276 edittext.append(b"")
3184 3277 edittext.append(b"") # Empty line between message and comments.
3185 3278 edittext.append(
3186 3279 hgprefix(
3187 3280 _(
3188 3281 b"Enter commit message."
3189 3282 b" Lines beginning with 'HG:' are removed."
3190 3283 )
3191 3284 )
3192 3285 )
3193 3286 edittext.append(hgprefix(extramsg))
3194 3287 edittext.append(b"HG: --")
3195 3288 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3196 3289 if ctx.p2():
3197 3290 edittext.append(hgprefix(_(b"branch merge")))
3198 3291 if ctx.branch():
3199 3292 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3200 3293 if bookmarks.isactivewdirparent(repo):
3201 3294 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3202 3295 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3203 3296 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3204 3297 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3205 3298 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3206 3299 if not added and not modified and not removed:
3207 3300 edittext.append(hgprefix(_(b"no files changed")))
3208 3301 edittext.append(b"")
3209 3302
3210 3303 return b"\n".join(edittext)
3211 3304
3212 3305
3213 3306 def commitstatus(repo, node, branch, bheads=None, tip=None, opts=None):
3214 3307 if opts is None:
3215 3308 opts = {}
3216 3309 ctx = repo[node]
3217 3310 parents = ctx.parents()
3218 3311
3219 3312 if tip is not None and repo.changelog.tip() == tip:
3220 3313 # avoid reporting something like "committed new head" when
3221 3314 # recommitting old changesets, and issue a helpful warning
3222 3315 # for most instances
3223 3316 repo.ui.warn(_(b"warning: commit already existed in the repository!\n"))
3224 3317 elif (
3225 3318 not opts.get(b'amend')
3226 3319 and bheads
3227 3320 and node not in bheads
3228 3321 and not any(
3229 3322 p.node() in bheads and p.branch() == branch for p in parents
3230 3323 )
3231 3324 ):
3232 3325 repo.ui.status(_(b'created new head\n'))
3233 3326 # The message is not printed for initial roots. For the other
3234 3327 # changesets, it is printed in the following situations:
3235 3328 #
3236 3329 # Par column: for the 2 parents with ...
3237 3330 # N: null or no parent
3238 3331 # B: parent is on another named branch
3239 3332 # C: parent is a regular non head changeset
3240 3333 # H: parent was a branch head of the current branch
3241 3334 # Msg column: whether we print "created new head" message
3242 3335 # In the following, it is assumed that there already exists some
3243 3336 # initial branch heads of the current branch, otherwise nothing is
3244 3337 # printed anyway.
3245 3338 #
3246 3339 # Par Msg Comment
3247 3340 # N N y additional topo root
3248 3341 #
3249 3342 # B N y additional branch root
3250 3343 # C N y additional topo head
3251 3344 # H N n usual case
3252 3345 #
3253 3346 # B B y weird additional branch root
3254 3347 # C B y branch merge
3255 3348 # H B n merge with named branch
3256 3349 #
3257 3350 # C C y additional head from merge
3258 3351 # C H n merge with a head
3259 3352 #
3260 3353 # H H n head merge: head count decreases
3261 3354
3262 3355 if not opts.get(b'close_branch'):
3263 3356 for r in parents:
3264 3357 if r.closesbranch() and r.branch() == branch:
3265 3358 repo.ui.status(
3266 3359 _(b'reopening closed branch head %d\n') % r.rev()
3267 3360 )
3268 3361
3269 3362 if repo.ui.debugflag:
3270 3363 repo.ui.write(
3271 3364 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3272 3365 )
3273 3366 elif repo.ui.verbose:
3274 3367 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3275 3368
3276 3369
3277 3370 def postcommitstatus(repo, pats, opts):
3278 3371 return repo.status(match=scmutil.match(repo[None], pats, opts))
3279 3372
3280 3373
3281 3374 def revert(ui, repo, ctx, *pats, **opts):
3282 3375 opts = pycompat.byteskwargs(opts)
3283 3376 parent, p2 = repo.dirstate.parents()
3284 3377 node = ctx.node()
3285 3378
3286 3379 mf = ctx.manifest()
3287 3380 if node == p2:
3288 3381 parent = p2
3289 3382
3290 3383 # need all matching names in dirstate and manifest of target rev,
3291 3384 # so have to walk both. do not print errors if files exist in one
3292 3385 # but not other. in both cases, filesets should be evaluated against
3293 3386 # workingctx to get consistent result (issue4497). this means 'set:**'
3294 3387 # cannot be used to select missing files from target rev.
3295 3388
3296 3389 # `names` is a mapping for all elements in working copy and target revision
3297 3390 # The mapping is in the form:
3298 3391 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3299 3392 names = {}
3300 3393 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3301 3394
3302 3395 with repo.wlock():
3303 3396 ## filling of the `names` mapping
3304 3397 # walk dirstate to fill `names`
3305 3398
3306 3399 interactive = opts.get(b'interactive', False)
3307 3400 wctx = repo[None]
3308 3401 m = scmutil.match(wctx, pats, opts)
3309 3402
3310 3403 # we'll need this later
3311 3404 targetsubs = sorted(s for s in wctx.substate if m(s))
3312 3405
3313 3406 if not m.always():
3314 3407 matcher = matchmod.badmatch(m, lambda x, y: False)
3315 3408 for abs in wctx.walk(matcher):
3316 3409 names[abs] = m.exact(abs)
3317 3410
3318 3411 # walk target manifest to fill `names`
3319 3412
3320 3413 def badfn(path, msg):
3321 3414 if path in names:
3322 3415 return
3323 3416 if path in ctx.substate:
3324 3417 return
3325 3418 path_ = path + b'/'
3326 3419 for f in names:
3327 3420 if f.startswith(path_):
3328 3421 return
3329 3422 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3330 3423
3331 3424 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3332 3425 if abs not in names:
3333 3426 names[abs] = m.exact(abs)
3334 3427
3335 3428 # Find status of all file in `names`.
3336 3429 m = scmutil.matchfiles(repo, names)
3337 3430
3338 3431 changes = repo.status(
3339 3432 node1=node, match=m, unknown=True, ignored=True, clean=True
3340 3433 )
3341 3434 else:
3342 3435 changes = repo.status(node1=node, match=m)
3343 3436 for kind in changes:
3344 3437 for abs in kind:
3345 3438 names[abs] = m.exact(abs)
3346 3439
3347 3440 m = scmutil.matchfiles(repo, names)
3348 3441
3349 3442 modified = set(changes.modified)
3350 3443 added = set(changes.added)
3351 3444 removed = set(changes.removed)
3352 3445 _deleted = set(changes.deleted)
3353 3446 unknown = set(changes.unknown)
3354 3447 unknown.update(changes.ignored)
3355 3448 clean = set(changes.clean)
3356 3449 modadded = set()
3357 3450
3358 3451 # We need to account for the state of the file in the dirstate,
3359 3452 # even when we revert against something else than parent. This will
3360 3453 # slightly alter the behavior of revert (doing back up or not, delete
3361 3454 # or just forget etc).
3362 3455 if parent == node:
3363 3456 dsmodified = modified
3364 3457 dsadded = added
3365 3458 dsremoved = removed
3366 3459 # store all local modifications, useful later for rename detection
3367 3460 localchanges = dsmodified | dsadded
3368 3461 modified, added, removed = set(), set(), set()
3369 3462 else:
3370 3463 changes = repo.status(node1=parent, match=m)
3371 3464 dsmodified = set(changes.modified)
3372 3465 dsadded = set(changes.added)
3373 3466 dsremoved = set(changes.removed)
3374 3467 # store all local modifications, useful later for rename detection
3375 3468 localchanges = dsmodified | dsadded
3376 3469
3377 3470 # only take into account for removes between wc and target
3378 3471 clean |= dsremoved - removed
3379 3472 dsremoved &= removed
3380 3473 # distinct between dirstate remove and other
3381 3474 removed -= dsremoved
3382 3475
3383 3476 modadded = added & dsmodified
3384 3477 added -= modadded
3385 3478
3386 3479 # tell newly modified apart.
3387 3480 dsmodified &= modified
3388 3481 dsmodified |= modified & dsadded # dirstate added may need backup
3389 3482 modified -= dsmodified
3390 3483
3391 3484 # We need to wait for some post-processing to update this set
3392 3485 # before making the distinction. The dirstate will be used for
3393 3486 # that purpose.
3394 3487 dsadded = added
3395 3488
3396 3489 # in case of merge, files that are actually added can be reported as
3397 3490 # modified, we need to post process the result
3398 3491 if p2 != repo.nullid:
3399 3492 mergeadd = set(dsmodified)
3400 3493 for path in dsmodified:
3401 3494 if path in mf:
3402 3495 mergeadd.remove(path)
3403 3496 dsadded |= mergeadd
3404 3497 dsmodified -= mergeadd
3405 3498
3406 3499 # if f is a rename, update `names` to also revert the source
3407 3500 for f in localchanges:
3408 3501 src = repo.dirstate.copied(f)
3409 3502 # XXX should we check for rename down to target node?
3410 3503 if (
3411 3504 src
3412 3505 and src not in names
3413 3506 and repo.dirstate.get_entry(src).removed
3414 3507 ):
3415 3508 dsremoved.add(src)
3416 3509 names[src] = True
3417 3510
3418 3511 # determine the exact nature of the deleted changesets
3419 3512 deladded = set(_deleted)
3420 3513 for path in _deleted:
3421 3514 if path in mf:
3422 3515 deladded.remove(path)
3423 3516 deleted = _deleted - deladded
3424 3517
3425 3518 # distinguish between file to forget and the other
3426 3519 added = set()
3427 3520 for abs in dsadded:
3428 3521 if not repo.dirstate.get_entry(abs).added:
3429 3522 added.add(abs)
3430 3523 dsadded -= added
3431 3524
3432 3525 for abs in deladded:
3433 3526 if repo.dirstate.get_entry(abs).added:
3434 3527 dsadded.add(abs)
3435 3528 deladded -= dsadded
3436 3529
3437 3530 # For files marked as removed, we check if an unknown file is present at
3438 3531 # the same path. If a such file exists it may need to be backed up.
3439 3532 # Making the distinction at this stage helps have simpler backup
3440 3533 # logic.
3441 3534 removunk = set()
3442 3535 for abs in removed:
3443 3536 target = repo.wjoin(abs)
3444 3537 if os.path.lexists(target):
3445 3538 removunk.add(abs)
3446 3539 removed -= removunk
3447 3540
3448 3541 dsremovunk = set()
3449 3542 for abs in dsremoved:
3450 3543 target = repo.wjoin(abs)
3451 3544 if os.path.lexists(target):
3452 3545 dsremovunk.add(abs)
3453 3546 dsremoved -= dsremovunk
3454 3547
3455 3548 # action to be actually performed by revert
3456 3549 # (<list of file>, message>) tuple
3457 3550 actions = {
3458 3551 b'revert': ([], _(b'reverting %s\n')),
3459 3552 b'add': ([], _(b'adding %s\n')),
3460 3553 b'remove': ([], _(b'removing %s\n')),
3461 3554 b'drop': ([], _(b'removing %s\n')),
3462 3555 b'forget': ([], _(b'forgetting %s\n')),
3463 3556 b'undelete': ([], _(b'undeleting %s\n')),
3464 3557 b'noop': (None, _(b'no changes needed to %s\n')),
3465 3558 b'unknown': (None, _(b'file not managed: %s\n')),
3466 3559 }
3467 3560
3468 3561 # "constant" that convey the backup strategy.
3469 3562 # All set to `discard` if `no-backup` is set do avoid checking
3470 3563 # no_backup lower in the code.
3471 3564 # These values are ordered for comparison purposes
3472 3565 backupinteractive = 3 # do backup if interactively modified
3473 3566 backup = 2 # unconditionally do backup
3474 3567 check = 1 # check if the existing file differs from target
3475 3568 discard = 0 # never do backup
3476 3569 if opts.get(b'no_backup'):
3477 3570 backupinteractive = backup = check = discard
3478 3571 if interactive:
3479 3572 dsmodifiedbackup = backupinteractive
3480 3573 else:
3481 3574 dsmodifiedbackup = backup
3482 3575 tobackup = set()
3483 3576
3484 3577 backupanddel = actions[b'remove']
3485 3578 if not opts.get(b'no_backup'):
3486 3579 backupanddel = actions[b'drop']
3487 3580
3488 3581 disptable = (
3489 3582 # dispatch table:
3490 3583 # file state
3491 3584 # action
3492 3585 # make backup
3493 3586 ## Sets that results that will change file on disk
3494 3587 # Modified compared to target, no local change
3495 3588 (modified, actions[b'revert'], discard),
3496 3589 # Modified compared to target, but local file is deleted
3497 3590 (deleted, actions[b'revert'], discard),
3498 3591 # Modified compared to target, local change
3499 3592 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3500 3593 # Added since target
3501 3594 (added, actions[b'remove'], discard),
3502 3595 # Added in working directory
3503 3596 (dsadded, actions[b'forget'], discard),
3504 3597 # Added since target, have local modification
3505 3598 (modadded, backupanddel, backup),
3506 3599 # Added since target but file is missing in working directory
3507 3600 (deladded, actions[b'drop'], discard),
3508 3601 # Removed since target, before working copy parent
3509 3602 (removed, actions[b'add'], discard),
3510 3603 # Same as `removed` but an unknown file exists at the same path
3511 3604 (removunk, actions[b'add'], check),
3512 3605 # Removed since targe, marked as such in working copy parent
3513 3606 (dsremoved, actions[b'undelete'], discard),
3514 3607 # Same as `dsremoved` but an unknown file exists at the same path
3515 3608 (dsremovunk, actions[b'undelete'], check),
3516 3609 ## the following sets does not result in any file changes
3517 3610 # File with no modification
3518 3611 (clean, actions[b'noop'], discard),
3519 3612 # Existing file, not tracked anywhere
3520 3613 (unknown, actions[b'unknown'], discard),
3521 3614 )
3522 3615
3523 3616 for abs, exact in sorted(names.items()):
3524 3617 # target file to be touch on disk (relative to cwd)
3525 3618 target = repo.wjoin(abs)
3526 3619 # search the entry in the dispatch table.
3527 3620 # if the file is in any of these sets, it was touched in the working
3528 3621 # directory parent and we are sure it needs to be reverted.
3529 3622 for table, (xlist, msg), dobackup in disptable:
3530 3623 if abs not in table:
3531 3624 continue
3532 3625 if xlist is not None:
3533 3626 xlist.append(abs)
3534 3627 if dobackup:
3535 3628 # If in interactive mode, don't automatically create
3536 3629 # .orig files (issue4793)
3537 3630 if dobackup == backupinteractive:
3538 3631 tobackup.add(abs)
3539 3632 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3540 3633 absbakname = scmutil.backuppath(ui, repo, abs)
3541 3634 bakname = os.path.relpath(
3542 3635 absbakname, start=repo.root
3543 3636 )
3544 3637 ui.note(
3545 3638 _(b'saving current version of %s as %s\n')
3546 3639 % (uipathfn(abs), uipathfn(bakname))
3547 3640 )
3548 3641 if not opts.get(b'dry_run'):
3549 3642 if interactive:
3550 3643 util.copyfile(target, absbakname)
3551 3644 else:
3552 3645 util.rename(target, absbakname)
3553 3646 if opts.get(b'dry_run'):
3554 3647 if ui.verbose or not exact:
3555 3648 ui.status(msg % uipathfn(abs))
3556 3649 elif exact:
3557 3650 ui.warn(msg % uipathfn(abs))
3558 3651 break
3559 3652
3560 3653 if not opts.get(b'dry_run'):
3561 3654 needdata = (b'revert', b'add', b'undelete')
3562 3655 oplist = [actions[name][0] for name in needdata]
3563 3656 prefetch = scmutil.prefetchfiles
3564 3657 matchfiles = scmutil.matchfiles(
3565 3658 repo, [f for sublist in oplist for f in sublist]
3566 3659 )
3567 3660 prefetch(
3568 3661 repo,
3569 3662 [(ctx.rev(), matchfiles)],
3570 3663 )
3571 3664 match = scmutil.match(repo[None], pats)
3572 3665 _performrevert(
3573 3666 repo,
3574 3667 ctx,
3575 3668 names,
3576 3669 uipathfn,
3577 3670 actions,
3578 3671 match,
3579 3672 interactive,
3580 3673 tobackup,
3581 3674 )
3582 3675
3583 3676 if targetsubs:
3584 3677 # Revert the subrepos on the revert list
3585 3678 for sub in targetsubs:
3586 3679 try:
3587 3680 wctx.sub(sub).revert(
3588 3681 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3589 3682 )
3590 3683 except KeyError:
3591 3684 raise error.Abort(
3592 3685 b"subrepository '%s' does not exist in %s!"
3593 3686 % (sub, short(ctx.node()))
3594 3687 )
3595 3688
3596 3689
3597 3690 def _performrevert(
3598 3691 repo,
3599 3692 ctx,
3600 3693 names,
3601 3694 uipathfn,
3602 3695 actions,
3603 3696 match,
3604 3697 interactive=False,
3605 3698 tobackup=None,
3606 3699 ):
3607 3700 """function that actually perform all the actions computed for revert
3608 3701
3609 3702 This is an independent function to let extension to plug in and react to
3610 3703 the imminent revert.
3611 3704
3612 3705 Make sure you have the working directory locked when calling this function.
3613 3706 """
3614 3707 parent, p2 = repo.dirstate.parents()
3615 3708 node = ctx.node()
3616 3709 excluded_files = []
3617 3710
3618 3711 def checkout(f):
3619 3712 fc = ctx[f]
3620 3713 repo.wwrite(f, fc.data(), fc.flags())
3621 3714
3622 3715 def doremove(f):
3623 3716 try:
3624 3717 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3625 3718 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3626 3719 except OSError:
3627 3720 pass
3628 3721 repo.dirstate.set_untracked(f)
3629 3722
3630 3723 def prntstatusmsg(action, f):
3631 3724 exact = names[f]
3632 3725 if repo.ui.verbose or not exact:
3633 3726 repo.ui.status(actions[action][1] % uipathfn(f))
3634 3727
3635 3728 audit_path = pathutil.pathauditor(repo.root, cached=True)
3636 3729 for f in actions[b'forget'][0]:
3637 3730 if interactive:
3638 3731 choice = repo.ui.promptchoice(
3639 3732 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3640 3733 )
3641 3734 if choice == 0:
3642 3735 prntstatusmsg(b'forget', f)
3643 3736 repo.dirstate.set_untracked(f)
3644 3737 else:
3645 3738 excluded_files.append(f)
3646 3739 else:
3647 3740 prntstatusmsg(b'forget', f)
3648 3741 repo.dirstate.set_untracked(f)
3649 3742 for f in actions[b'remove'][0]:
3650 3743 audit_path(f)
3651 3744 if interactive:
3652 3745 choice = repo.ui.promptchoice(
3653 3746 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3654 3747 )
3655 3748 if choice == 0:
3656 3749 prntstatusmsg(b'remove', f)
3657 3750 doremove(f)
3658 3751 else:
3659 3752 excluded_files.append(f)
3660 3753 else:
3661 3754 prntstatusmsg(b'remove', f)
3662 3755 doremove(f)
3663 3756 for f in actions[b'drop'][0]:
3664 3757 audit_path(f)
3665 3758 prntstatusmsg(b'drop', f)
3666 3759 repo.dirstate.set_untracked(f)
3667 3760
3668 3761 # We are reverting to our parent. If possible, we had like `hg status`
3669 3762 # to report the file as clean. We have to be less agressive for
3670 3763 # merges to avoid losing information about copy introduced by the merge.
3671 3764 # This might comes with bugs ?
3672 3765 reset_copy = p2 == repo.nullid
3673 3766
3674 3767 def normal(filename):
3675 3768 return repo.dirstate.set_tracked(filename, reset_copy=reset_copy)
3676 3769
3677 3770 newlyaddedandmodifiedfiles = set()
3678 3771 if interactive:
3679 3772 # Prompt the user for changes to revert
3680 3773 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3681 3774 m = scmutil.matchfiles(repo, torevert)
3682 3775 diffopts = patch.difffeatureopts(
3683 3776 repo.ui,
3684 3777 whitespace=True,
3685 3778 section=b'commands',
3686 3779 configprefix=b'revert.interactive.',
3687 3780 )
3688 3781 diffopts.nodates = True
3689 3782 diffopts.git = True
3690 3783 operation = b'apply'
3691 3784 if node == parent:
3692 3785 if repo.ui.configbool(
3693 3786 b'experimental', b'revert.interactive.select-to-keep'
3694 3787 ):
3695 3788 operation = b'keep'
3696 3789 else:
3697 3790 operation = b'discard'
3698 3791
3699 3792 if operation == b'apply':
3700 3793 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3701 3794 else:
3702 3795 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3703 3796 original_headers = patch.parsepatch(diff)
3704 3797
3705 3798 try:
3706 3799
3707 3800 chunks, opts = recordfilter(
3708 3801 repo.ui, original_headers, match, operation=operation
3709 3802 )
3710 3803 if operation == b'discard':
3711 3804 chunks = patch.reversehunks(chunks)
3712 3805
3713 3806 except error.PatchParseError as err:
3714 3807 raise error.InputError(_(b'error parsing patch: %s') % err)
3715 3808 except error.PatchApplicationError as err:
3716 3809 raise error.StateError(_(b'error applying patch: %s') % err)
3717 3810
3718 3811 # FIXME: when doing an interactive revert of a copy, there's no way of
3719 3812 # performing a partial revert of the added file, the only option is
3720 3813 # "remove added file <name> (Yn)?", so we don't need to worry about the
3721 3814 # alsorestore value. Ideally we'd be able to partially revert
3722 3815 # copied/renamed files.
3723 3816 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(chunks)
3724 3817 if tobackup is None:
3725 3818 tobackup = set()
3726 3819 # Apply changes
3727 3820 fp = stringio()
3728 3821 # chunks are serialized per file, but files aren't sorted
3729 3822 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3730 3823 prntstatusmsg(b'revert', f)
3731 3824 files = set()
3732 3825 for c in chunks:
3733 3826 if ishunk(c):
3734 3827 abs = c.header.filename()
3735 3828 # Create a backup file only if this hunk should be backed up
3736 3829 if c.header.filename() in tobackup:
3737 3830 target = repo.wjoin(abs)
3738 3831 bakname = scmutil.backuppath(repo.ui, repo, abs)
3739 3832 util.copyfile(target, bakname)
3740 3833 tobackup.remove(abs)
3741 3834 if abs not in files:
3742 3835 files.add(abs)
3743 3836 if operation == b'keep':
3744 3837 checkout(abs)
3745 3838 c.write(fp)
3746 3839 dopatch = fp.tell()
3747 3840 fp.seek(0)
3748 3841 if dopatch:
3749 3842 try:
3750 3843 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3751 3844 except error.PatchParseError as err:
3752 3845 raise error.InputError(pycompat.bytestr(err))
3753 3846 except error.PatchApplicationError as err:
3754 3847 raise error.StateError(pycompat.bytestr(err))
3755 3848 del fp
3756 3849 else:
3757 3850 for f in actions[b'revert'][0]:
3758 3851 prntstatusmsg(b'revert', f)
3759 3852 checkout(f)
3760 3853 if normal:
3761 3854 normal(f)
3762 3855
3763 3856 for f in actions[b'add'][0]:
3764 3857 # Don't checkout modified files, they are already created by the diff
3765 3858 if f in newlyaddedandmodifiedfiles:
3766 3859 continue
3767 3860
3768 3861 if interactive:
3769 3862 choice = repo.ui.promptchoice(
3770 3863 _(b"add new file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3771 3864 )
3772 3865 if choice != 0:
3773 3866 continue
3774 3867 prntstatusmsg(b'add', f)
3775 3868 checkout(f)
3776 3869 repo.dirstate.set_tracked(f)
3777 3870
3778 3871 for f in actions[b'undelete'][0]:
3779 3872 if interactive:
3780 3873 choice = repo.ui.promptchoice(
3781 3874 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3782 3875 )
3783 3876 if choice == 0:
3784 3877 prntstatusmsg(b'undelete', f)
3785 3878 checkout(f)
3786 3879 normal(f)
3787 3880 else:
3788 3881 excluded_files.append(f)
3789 3882 else:
3790 3883 prntstatusmsg(b'undelete', f)
3791 3884 checkout(f)
3792 3885 normal(f)
3793 3886
3794 3887 copied = copies.pathcopies(repo[parent], ctx)
3795 3888
3796 3889 for f in (
3797 3890 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
3798 3891 ):
3799 3892 if f in copied:
3800 3893 repo.dirstate.copy(copied[f], f)
3801 3894
3802 3895
3803 3896 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3804 3897 # commands.outgoing. "missing" is "missing" of the result of
3805 3898 # "findcommonoutgoing()"
3806 3899 outgoinghooks = util.hooks()
3807 3900
3808 3901 # a list of (ui, repo) functions called by commands.summary
3809 3902 summaryhooks = util.hooks()
3810 3903
3811 3904 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3812 3905 #
3813 3906 # functions should return tuple of booleans below, if 'changes' is None:
3814 3907 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3815 3908 #
3816 3909 # otherwise, 'changes' is a tuple of tuples below:
3817 3910 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3818 3911 # - (desturl, destbranch, destpeer, outgoing)
3819 3912 summaryremotehooks = util.hooks()
3820 3913
3821 3914
3822 3915 def checkunfinished(repo, commit=False, skipmerge=False):
3823 3916 """Look for an unfinished multistep operation, like graft, and abort
3824 3917 if found. It's probably good to check this right before
3825 3918 bailifchanged().
3826 3919 """
3827 3920 # Check for non-clearable states first, so things like rebase will take
3828 3921 # precedence over update.
3829 3922 for state in statemod._unfinishedstates:
3830 3923 if (
3831 3924 state._clearable
3832 3925 or (commit and state._allowcommit)
3833 3926 or state._reportonly
3834 3927 ):
3835 3928 continue
3836 3929 if state.isunfinished(repo):
3837 3930 raise error.StateError(state.msg(), hint=state.hint())
3838 3931
3839 3932 for s in statemod._unfinishedstates:
3840 3933 if (
3841 3934 not s._clearable
3842 3935 or (commit and s._allowcommit)
3843 3936 or (s._opname == b'merge' and skipmerge)
3844 3937 or s._reportonly
3845 3938 ):
3846 3939 continue
3847 3940 if s.isunfinished(repo):
3848 3941 raise error.StateError(s.msg(), hint=s.hint())
3849 3942
3850 3943
3851 3944 def clearunfinished(repo):
3852 3945 """Check for unfinished operations (as above), and clear the ones
3853 3946 that are clearable.
3854 3947 """
3855 3948 for state in statemod._unfinishedstates:
3856 3949 if state._reportonly:
3857 3950 continue
3858 3951 if not state._clearable and state.isunfinished(repo):
3859 3952 raise error.StateError(state.msg(), hint=state.hint())
3860 3953
3861 3954 for s in statemod._unfinishedstates:
3862 3955 if s._opname == b'merge' or s._reportonly:
3863 3956 continue
3864 3957 if s._clearable and s.isunfinished(repo):
3865 3958 util.unlink(repo.vfs.join(s._fname))
3866 3959
3867 3960
3868 3961 def getunfinishedstate(repo):
3869 3962 """Checks for unfinished operations and returns statecheck object
3870 3963 for it"""
3871 3964 for state in statemod._unfinishedstates:
3872 3965 if state.isunfinished(repo):
3873 3966 return state
3874 3967 return None
3875 3968
3876 3969
3877 3970 def howtocontinue(repo):
3878 3971 """Check for an unfinished operation and return the command to finish
3879 3972 it.
3880 3973
3881 3974 statemod._unfinishedstates list is checked for an unfinished operation
3882 3975 and the corresponding message to finish it is generated if a method to
3883 3976 continue is supported by the operation.
3884 3977
3885 3978 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3886 3979 a boolean.
3887 3980 """
3888 3981 contmsg = _(b"continue: %s")
3889 3982 for state in statemod._unfinishedstates:
3890 3983 if not state._continueflag:
3891 3984 continue
3892 3985 if state.isunfinished(repo):
3893 3986 return contmsg % state.continuemsg(), True
3894 3987 if repo[None].dirty(missing=True, merge=False, branch=False):
3895 3988 return contmsg % _(b"hg commit"), False
3896 3989 return None, None
3897 3990
3898 3991
3899 3992 def checkafterresolved(repo):
3900 3993 """Inform the user about the next action after completing hg resolve
3901 3994
3902 3995 If there's a an unfinished operation that supports continue flag,
3903 3996 howtocontinue will yield repo.ui.warn as the reporter.
3904 3997
3905 3998 Otherwise, it will yield repo.ui.note.
3906 3999 """
3907 4000 msg, warning = howtocontinue(repo)
3908 4001 if msg is not None:
3909 4002 if warning:
3910 4003 repo.ui.warn(b"%s\n" % msg)
3911 4004 else:
3912 4005 repo.ui.note(b"%s\n" % msg)
3913 4006
3914 4007
3915 4008 def wrongtooltocontinue(repo, task):
3916 4009 """Raise an abort suggesting how to properly continue if there is an
3917 4010 active task.
3918 4011
3919 4012 Uses howtocontinue() to find the active task.
3920 4013
3921 4014 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3922 4015 a hint.
3923 4016 """
3924 4017 after = howtocontinue(repo)
3925 4018 hint = None
3926 4019 if after[1]:
3927 4020 hint = after[0]
3928 4021 raise error.StateError(_(b'no %s in progress') % task, hint=hint)
3929 4022
3930 4023
3931 4024 def abortgraft(ui, repo, graftstate):
3932 4025 """abort the interrupted graft and rollbacks to the state before interrupted
3933 4026 graft"""
3934 4027 if not graftstate.exists():
3935 4028 raise error.StateError(_(b"no interrupted graft to abort"))
3936 4029 statedata = readgraftstate(repo, graftstate)
3937 4030 newnodes = statedata.get(b'newnodes')
3938 4031 if newnodes is None:
3939 4032 # and old graft state which does not have all the data required to abort
3940 4033 # the graft
3941 4034 raise error.Abort(_(b"cannot abort using an old graftstate"))
3942 4035
3943 4036 # changeset from which graft operation was started
3944 4037 if len(newnodes) > 0:
3945 4038 startctx = repo[newnodes[0]].p1()
3946 4039 else:
3947 4040 startctx = repo[b'.']
3948 4041 # whether to strip or not
3949 4042 cleanup = False
3950 4043
3951 4044 if newnodes:
3952 4045 newnodes = [repo[r].rev() for r in newnodes]
3953 4046 cleanup = True
3954 4047 # checking that none of the newnodes turned public or is public
3955 4048 immutable = [c for c in newnodes if not repo[c].mutable()]
3956 4049 if immutable:
3957 4050 repo.ui.warn(
3958 4051 _(b"cannot clean up public changesets %s\n")
3959 4052 % b', '.join(bytes(repo[r]) for r in immutable),
3960 4053 hint=_(b"see 'hg help phases' for details"),
3961 4054 )
3962 4055 cleanup = False
3963 4056
3964 4057 # checking that no new nodes are created on top of grafted revs
3965 4058 desc = set(repo.changelog.descendants(newnodes))
3966 4059 if desc - set(newnodes):
3967 4060 repo.ui.warn(
3968 4061 _(
3969 4062 b"new changesets detected on destination "
3970 4063 b"branch, can't strip\n"
3971 4064 )
3972 4065 )
3973 4066 cleanup = False
3974 4067
3975 4068 if cleanup:
3976 4069 with repo.wlock(), repo.lock():
3977 4070 mergemod.clean_update(startctx)
3978 4071 # stripping the new nodes created
3979 4072 strippoints = [
3980 4073 c.node() for c in repo.set(b"roots(%ld)", newnodes)
3981 4074 ]
3982 4075 repair.strip(repo.ui, repo, strippoints, backup=False)
3983 4076
3984 4077 if not cleanup:
3985 4078 # we don't update to the startnode if we can't strip
3986 4079 startctx = repo[b'.']
3987 4080 mergemod.clean_update(startctx)
3988 4081
3989 4082 ui.status(_(b"graft aborted\n"))
3990 4083 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
3991 4084 graftstate.delete()
3992 4085 return 0
3993 4086
3994 4087
3995 4088 def readgraftstate(repo, graftstate):
3996 4089 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
3997 4090 """read the graft state file and return a dict of the data stored in it"""
3998 4091 try:
3999 4092 return graftstate.read()
4000 4093 except error.CorruptedState:
4001 4094 nodes = repo.vfs.read(b'graftstate').splitlines()
4002 4095 return {b'nodes': nodes}
4003 4096
4004 4097
4005 4098 def hgabortgraft(ui, repo):
4006 4099 """abort logic for aborting graft using 'hg abort'"""
4007 4100 with repo.wlock():
4008 4101 graftstate = statemod.cmdstate(repo, b'graftstate')
4009 4102 return abortgraft(ui, repo, graftstate)
@@ -1,2313 +1,2315 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import binascii
10 10 import errno
11 11 import glob
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullrev,
23 23 short,
24 24 wdirrev,
25 25 )
26 26 from .pycompat import getattr
27 27 from .thirdparty import attr
28 28 from . import (
29 29 copies as copiesmod,
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 policy,
38 38 pycompat,
39 39 requirements as requirementsmod,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 hashutil,
50 50 procutil,
51 51 stringutil,
52 52 )
53 53
54 54 if pycompat.iswindows:
55 55 from . import scmwindows as scmplatform
56 56 else:
57 57 from . import scmposix as scmplatform
58 58
59 59 parsers = policy.importmod('parsers')
60 60 rustrevlog = policy.importrust('revlog')
61 61
62 62 termsize = scmplatform.termsize
63 63
64 64
65 65 @attr.s(slots=True, repr=False)
66 66 class status:
67 67 """Struct with a list of files per status.
68 68
69 69 The 'deleted', 'unknown' and 'ignored' properties are only
70 70 relevant to the working copy.
71 71 """
72 72
73 73 modified = attr.ib(default=attr.Factory(list))
74 74 added = attr.ib(default=attr.Factory(list))
75 75 removed = attr.ib(default=attr.Factory(list))
76 76 deleted = attr.ib(default=attr.Factory(list))
77 77 unknown = attr.ib(default=attr.Factory(list))
78 78 ignored = attr.ib(default=attr.Factory(list))
79 79 clean = attr.ib(default=attr.Factory(list))
80 80
81 81 def __iter__(self):
82 82 yield self.modified
83 83 yield self.added
84 84 yield self.removed
85 85 yield self.deleted
86 86 yield self.unknown
87 87 yield self.ignored
88 88 yield self.clean
89 89
90 90 def __repr__(self):
91 91 return (
92 92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 93 r'unknown=%s, ignored=%s, clean=%s>'
94 94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95 95
96 96
97 97 def itersubrepos(ctx1, ctx2):
98 98 """find subrepos in ctx1 or ctx2"""
99 99 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 101 # has been modified (in ctx2) but not yet committed (in ctx1).
102 102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104 104
105 105 missing = set()
106 106
107 107 for subpath in ctx2.substate:
108 108 if subpath not in ctx1.substate:
109 109 del subpaths[subpath]
110 110 missing.add(subpath)
111 111
112 112 for subpath, ctx in sorted(subpaths.items()):
113 113 yield subpath, ctx.sub(subpath)
114 114
115 115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 116 # status and diff will have an accurate result when it does
117 117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 118 # against itself.
119 119 for subpath in missing:
120 120 yield subpath, ctx2.nullsub(subpath, ctx1)
121 121
122 122
123 123 def nochangesfound(ui, repo, excluded=None):
124 124 """Report no changes for push/pull, excluded is None or a list of
125 125 nodes excluded from the push/pull.
126 126 """
127 127 secretlist = []
128 128 if excluded:
129 129 for n in excluded:
130 130 ctx = repo[n]
131 131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 132 secretlist.append(n)
133 133
134 134 if secretlist:
135 135 ui.status(
136 136 _(b"no changes found (ignored %d secret changesets)\n")
137 137 % len(secretlist)
138 138 )
139 139 else:
140 140 ui.status(_(b"no changes found\n"))
141 141
142 142
143 143 def callcatch(ui, func):
144 144 """call func() with global exception handling
145 145
146 146 return func() if no exception happens. otherwise do some error handling
147 147 and return an exit code accordingly. does not handle all exceptions.
148 148 """
149 149 coarse_exit_code = -1
150 150 detailed_exit_code = -1
151 151 try:
152 152 try:
153 153 return func()
154 154 except: # re-raises
155 155 ui.traceback()
156 156 raise
157 157 # Global exception handling, alphabetically
158 158 # Mercurial-specific first, followed by built-in and library exceptions
159 159 except error.LockHeld as inst:
160 160 detailed_exit_code = 20
161 161 if inst.errno == errno.ETIMEDOUT:
162 162 reason = _(b'timed out waiting for lock held by %r') % (
163 163 pycompat.bytestr(inst.locker)
164 164 )
165 165 else:
166 166 reason = _(b'lock held by %r') % inst.locker
167 167 ui.error(
168 168 _(b"abort: %s: %s\n")
169 169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
170 170 )
171 171 if not inst.locker:
172 172 ui.error(_(b"(lock might be very busy)\n"))
173 173 except error.LockUnavailable as inst:
174 174 detailed_exit_code = 20
175 175 ui.error(
176 176 _(b"abort: could not lock %s: %s\n")
177 177 % (
178 178 inst.desc or stringutil.forcebytestr(inst.filename),
179 179 encoding.strtolocal(inst.strerror),
180 180 )
181 181 )
182 182 except error.RepoError as inst:
183 183 if isinstance(inst, error.RepoLookupError):
184 184 detailed_exit_code = 10
185 185 ui.error(_(b"abort: %s\n") % inst)
186 186 if inst.hint:
187 187 ui.error(_(b"(%s)\n") % inst.hint)
188 188 except error.ResponseError as inst:
189 189 ui.error(_(b"abort: %s") % inst.args[0])
190 190 msg = inst.args[1]
191 191 if isinstance(msg, type(u'')):
192 192 msg = pycompat.sysbytes(msg)
193 193 if msg is None:
194 194 ui.error(b"\n")
195 195 elif not isinstance(msg, bytes):
196 196 ui.error(b" %r\n" % (msg,))
197 197 elif not msg:
198 198 ui.error(_(b" empty string\n"))
199 199 else:
200 200 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
201 201 except error.CensoredNodeError as inst:
202 202 ui.error(_(b"abort: file censored %s\n") % inst)
203 203 except error.WdirUnsupported:
204 204 ui.error(_(b"abort: working directory revision cannot be specified\n"))
205 205 except error.Error as inst:
206 206 if inst.detailed_exit_code is not None:
207 207 detailed_exit_code = inst.detailed_exit_code
208 208 if inst.coarse_exit_code is not None:
209 209 coarse_exit_code = inst.coarse_exit_code
210 210 ui.error(inst.format())
211 211 except error.WorkerError as inst:
212 212 # Don't print a message -- the worker already should have
213 213 return inst.status_code
214 214 except ImportError as inst:
215 215 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
216 216 m = stringutil.forcebytestr(inst).split()[-1]
217 217 if m in b"mpatch bdiff".split():
218 218 ui.error(_(b"(did you forget to compile extensions?)\n"))
219 219 elif m in b"zlib".split():
220 220 ui.error(_(b"(is your Python install correct?)\n"))
221 221 except util.urlerr.httperror as inst:
222 222 detailed_exit_code = 100
223 223 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
224 224 except util.urlerr.urlerror as inst:
225 225 detailed_exit_code = 100
226 226 try: # usually it is in the form (errno, strerror)
227 227 reason = inst.reason.args[1]
228 228 except (AttributeError, IndexError):
229 229 # it might be anything, for example a string
230 230 reason = inst.reason
231 231 if isinstance(reason, str):
232 232 # SSLError of Python 2.7.9 contains a unicode
233 233 reason = encoding.unitolocal(reason)
234 234 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
235 235 except (IOError, OSError) as inst:
236 236 if (
237 237 util.safehasattr(inst, b"args")
238 238 and inst.args
239 239 and inst.args[0] == errno.EPIPE
240 240 ):
241 241 pass
242 242 elif getattr(inst, "strerror", None): # common IOError or OSError
243 243 if getattr(inst, "filename", None) is not None:
244 244 ui.error(
245 245 _(b"abort: %s: '%s'\n")
246 246 % (
247 247 encoding.strtolocal(inst.strerror),
248 248 stringutil.forcebytestr(inst.filename),
249 249 )
250 250 )
251 251 else:
252 252 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
253 253 else: # suspicious IOError
254 254 raise
255 255 except MemoryError:
256 256 ui.error(_(b"abort: out of memory\n"))
257 257 except SystemExit as inst:
258 258 # Commands shouldn't sys.exit directly, but give a return code.
259 259 # Just in case catch this and and pass exit code to caller.
260 260 detailed_exit_code = 254
261 261 coarse_exit_code = inst.code
262 262
263 263 if ui.configbool(b'ui', b'detailed-exit-code'):
264 264 return detailed_exit_code
265 265 else:
266 266 return coarse_exit_code
267 267
268 268
269 269 def checknewlabel(repo, lbl, kind):
270 270 # Do not use the "kind" parameter in ui output.
271 271 # It makes strings difficult to translate.
272 272 if lbl in [b'tip', b'.', b'null']:
273 273 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
274 274 for c in (b':', b'\0', b'\n', b'\r'):
275 275 if c in lbl:
276 276 raise error.InputError(
277 277 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
278 278 )
279 279 try:
280 280 int(lbl)
281 281 if b'_' in lbl:
282 282 # If label contains underscores, Python might consider it an
283 283 # integer (with "_" as visual separators), but we do not.
284 284 # See PEP 515 - Underscores in Numeric Literals.
285 285 raise ValueError
286 286 raise error.InputError(_(b"cannot use an integer as a name"))
287 287 except ValueError:
288 288 pass
289 289 if lbl.strip() != lbl:
290 290 raise error.InputError(
291 291 _(b"leading or trailing whitespace in name %r") % lbl
292 292 )
293 293
294 294
295 295 def checkfilename(f):
296 296 '''Check that the filename f is an acceptable filename for a tracked file'''
297 297 if b'\r' in f or b'\n' in f:
298 298 raise error.InputError(
299 299 _(b"'\\n' and '\\r' disallowed in filenames: %r")
300 300 % pycompat.bytestr(f)
301 301 )
302 302
303 303
304 304 def checkportable(ui, f):
305 305 '''Check if filename f is portable and warn or abort depending on config'''
306 306 checkfilename(f)
307 307 abort, warn = checkportabilityalert(ui)
308 308 if abort or warn:
309 309 msg = util.checkwinfilename(f)
310 310 if msg:
311 311 msg = b"%s: %s" % (msg, procutil.shellquote(f))
312 312 if abort:
313 313 raise error.InputError(msg)
314 314 ui.warn(_(b"warning: %s\n") % msg)
315 315
316 316
317 317 def checkportabilityalert(ui):
318 318 """check if the user's config requests nothing, a warning, or abort for
319 319 non-portable filenames"""
320 320 val = ui.config(b'ui', b'portablefilenames')
321 321 lval = val.lower()
322 322 bval = stringutil.parsebool(val)
323 323 abort = pycompat.iswindows or lval == b'abort'
324 324 warn = bval or lval == b'warn'
325 325 if bval is None and not (warn or abort or lval == b'ignore'):
326 326 raise error.ConfigError(
327 327 _(b"ui.portablefilenames value is invalid ('%s')") % val
328 328 )
329 329 return abort, warn
330 330
331 331
332 332 class casecollisionauditor:
333 333 def __init__(self, ui, abort, dirstate):
334 334 self._ui = ui
335 335 self._abort = abort
336 336 allfiles = b'\0'.join(dirstate)
337 337 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
338 338 self._dirstate = dirstate
339 339 # The purpose of _newfiles is so that we don't complain about
340 340 # case collisions if someone were to call this object with the
341 341 # same filename twice.
342 342 self._newfiles = set()
343 343
344 344 def __call__(self, f):
345 345 if f in self._newfiles:
346 346 return
347 347 fl = encoding.lower(f)
348 348 if fl in self._loweredfiles and f not in self._dirstate:
349 349 msg = _(b'possible case-folding collision for %s') % f
350 350 if self._abort:
351 351 raise error.StateError(msg)
352 352 self._ui.warn(_(b"warning: %s\n") % msg)
353 353 self._loweredfiles.add(fl)
354 354 self._newfiles.add(f)
355 355
356 356
357 357 def filteredhash(repo, maxrev, needobsolete=False):
358 358 """build hash of filtered revisions in the current repoview.
359 359
360 360 Multiple caches perform up-to-date validation by checking that the
361 361 tiprev and tipnode stored in the cache file match the current repository.
362 362 However, this is not sufficient for validating repoviews because the set
363 363 of revisions in the view may change without the repository tiprev and
364 364 tipnode changing.
365 365
366 366 This function hashes all the revs filtered from the view (and, optionally,
367 367 all obsolete revs) up to maxrev and returns that SHA-1 digest.
368 368 """
369 369 cl = repo.changelog
370 370 if needobsolete:
371 371 obsrevs = obsolete.getrevs(repo, b'obsolete')
372 372 if not cl.filteredrevs and not obsrevs:
373 373 return None
374 374 key = (maxrev, hash(cl.filteredrevs), hash(obsrevs))
375 375 else:
376 376 if not cl.filteredrevs:
377 377 return None
378 378 key = maxrev
379 379 obsrevs = frozenset()
380 380
381 381 result = cl._filteredrevs_hashcache.get(key)
382 382 if not result:
383 383 revs = sorted(r for r in cl.filteredrevs | obsrevs if r <= maxrev)
384 384 if revs:
385 385 s = hashutil.sha1()
386 386 for rev in revs:
387 387 s.update(b'%d;' % rev)
388 388 result = s.digest()
389 389 cl._filteredrevs_hashcache[key] = result
390 390 return result
391 391
392 392
393 393 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
394 394 """yield every hg repository under path, always recursively.
395 395 The recurse flag will only control recursion into repo working dirs"""
396 396
397 397 def errhandler(err):
398 398 if err.filename == path:
399 399 raise err
400 400
401 401 samestat = getattr(os.path, 'samestat', None)
402 402 if followsym and samestat is not None:
403 403
404 404 def adddir(dirlst, dirname):
405 405 dirstat = os.stat(dirname)
406 406 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
407 407 if not match:
408 408 dirlst.append(dirstat)
409 409 return not match
410 410
411 411 else:
412 412 followsym = False
413 413
414 414 if (seen_dirs is None) and followsym:
415 415 seen_dirs = []
416 416 adddir(seen_dirs, path)
417 417 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
418 418 dirs.sort()
419 419 if b'.hg' in dirs:
420 420 yield root # found a repository
421 421 qroot = os.path.join(root, b'.hg', b'patches')
422 422 if os.path.isdir(os.path.join(qroot, b'.hg')):
423 423 yield qroot # we have a patch queue repo here
424 424 if recurse:
425 425 # avoid recursing inside the .hg directory
426 426 dirs.remove(b'.hg')
427 427 else:
428 428 dirs[:] = [] # don't descend further
429 429 elif followsym:
430 430 newdirs = []
431 431 for d in dirs:
432 432 fname = os.path.join(root, d)
433 433 if adddir(seen_dirs, fname):
434 434 if os.path.islink(fname):
435 435 for hgname in walkrepos(fname, True, seen_dirs):
436 436 yield hgname
437 437 else:
438 438 newdirs.append(d)
439 439 dirs[:] = newdirs
440 440
441 441
442 442 def binnode(ctx):
443 443 """Return binary node id for a given basectx"""
444 444 node = ctx.node()
445 445 if node is None:
446 446 return ctx.repo().nodeconstants.wdirid
447 447 return node
448 448
449 449
450 450 def intrev(ctx):
451 451 """Return integer for a given basectx that can be used in comparison or
452 452 arithmetic operation"""
453 453 rev = ctx.rev()
454 454 if rev is None:
455 455 return wdirrev
456 456 return rev
457 457
458 458
459 459 def formatchangeid(ctx):
460 460 """Format changectx as '{rev}:{node|formatnode}', which is the default
461 461 template provided by logcmdutil.changesettemplater"""
462 462 repo = ctx.repo()
463 463 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
464 464
465 465
466 466 def formatrevnode(ui, rev, node):
467 467 """Format given revision and node depending on the current verbosity"""
468 468 if ui.debugflag:
469 469 hexfunc = hex
470 470 else:
471 471 hexfunc = short
472 472 return b'%d:%s' % (rev, hexfunc(node))
473 473
474 474
475 475 def resolvehexnodeidprefix(repo, prefix):
476 476 if prefix.startswith(b'x'):
477 477 prefix = prefix[1:]
478 478 try:
479 479 # Uses unfiltered repo because it's faster when prefix is ambiguous/
480 480 # This matches the shortesthexnodeidprefix() function below.
481 481 node = repo.unfiltered().changelog._partialmatch(prefix)
482 482 except error.AmbiguousPrefixLookupError:
483 483 revset = repo.ui.config(
484 484 b'experimental', b'revisions.disambiguatewithin'
485 485 )
486 486 if revset:
487 487 # Clear config to avoid infinite recursion
488 488 configoverrides = {
489 489 (b'experimental', b'revisions.disambiguatewithin'): None
490 490 }
491 491 with repo.ui.configoverride(configoverrides):
492 492 revs = repo.anyrevs([revset], user=True)
493 493 matches = []
494 494 for rev in revs:
495 495 node = repo.changelog.node(rev)
496 496 if hex(node).startswith(prefix):
497 497 matches.append(node)
498 498 if len(matches) == 1:
499 499 return matches[0]
500 500 raise
501 501 if node is None:
502 502 return
503 503 repo.changelog.rev(node) # make sure node isn't filtered
504 504 return node
505 505
506 506
507 507 def mayberevnum(repo, prefix):
508 508 """Checks if the given prefix may be mistaken for a revision number"""
509 509 try:
510 510 i = int(prefix)
511 511 # if we are a pure int, then starting with zero will not be
512 512 # confused as a rev; or, obviously, if the int is larger
513 513 # than the value of the tip rev. We still need to disambiguate if
514 514 # prefix == '0', since that *is* a valid revnum.
515 515 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
516 516 return False
517 517 return True
518 518 except ValueError:
519 519 return False
520 520
521 521
522 522 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
523 523 """Find the shortest unambiguous prefix that matches hexnode.
524 524
525 525 If "cache" is not None, it must be a dictionary that can be used for
526 526 caching between calls to this method.
527 527 """
528 528 # _partialmatch() of filtered changelog could take O(len(repo)) time,
529 529 # which would be unacceptably slow. so we look for hash collision in
530 530 # unfiltered space, which means some hashes may be slightly longer.
531 531
532 532 minlength = max(minlength, 1)
533 533
534 534 def disambiguate(prefix):
535 535 """Disambiguate against revnums."""
536 536 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
537 537 if mayberevnum(repo, prefix):
538 538 return b'x' + prefix
539 539 else:
540 540 return prefix
541 541
542 542 hexnode = hex(node)
543 543 for length in range(len(prefix), len(hexnode) + 1):
544 544 prefix = hexnode[:length]
545 545 if not mayberevnum(repo, prefix):
546 546 return prefix
547 547
548 548 cl = repo.unfiltered().changelog
549 549 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
550 550 if revset:
551 551 revs = None
552 552 if cache is not None:
553 553 revs = cache.get(b'disambiguationrevset')
554 554 if revs is None:
555 555 revs = repo.anyrevs([revset], user=True)
556 556 if cache is not None:
557 557 cache[b'disambiguationrevset'] = revs
558 558 if cl.rev(node) in revs:
559 559 hexnode = hex(node)
560 560 nodetree = None
561 561 if cache is not None:
562 562 nodetree = cache.get(b'disambiguationnodetree')
563 563 if not nodetree:
564 564 if util.safehasattr(parsers, 'nodetree'):
565 565 # The CExt is the only implementation to provide a nodetree
566 566 # class so far.
567 567 index = cl.index
568 568 if util.safehasattr(index, 'get_cindex'):
569 569 # the rust wrapped need to give access to its internal index
570 570 index = index.get_cindex()
571 571 nodetree = parsers.nodetree(index, len(revs))
572 572 for r in revs:
573 573 nodetree.insert(r)
574 574 if cache is not None:
575 575 cache[b'disambiguationnodetree'] = nodetree
576 576 if nodetree is not None:
577 577 length = max(nodetree.shortest(node), minlength)
578 578 prefix = hexnode[:length]
579 579 return disambiguate(prefix)
580 580 for length in range(minlength, len(hexnode) + 1):
581 581 matches = []
582 582 prefix = hexnode[:length]
583 583 for rev in revs:
584 584 otherhexnode = repo[rev].hex()
585 585 if prefix == otherhexnode[:length]:
586 586 matches.append(otherhexnode)
587 587 if len(matches) == 1:
588 588 return disambiguate(prefix)
589 589
590 590 try:
591 591 return disambiguate(cl.shortest(node, minlength))
592 592 except error.LookupError:
593 593 raise error.RepoLookupError()
594 594
595 595
596 596 def isrevsymbol(repo, symbol):
597 597 """Checks if a symbol exists in the repo.
598 598
599 599 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
600 600 symbol is an ambiguous nodeid prefix.
601 601 """
602 602 try:
603 603 revsymbol(repo, symbol)
604 604 return True
605 605 except error.RepoLookupError:
606 606 return False
607 607
608 608
609 609 def revsymbol(repo, symbol):
610 610 """Returns a context given a single revision symbol (as string).
611 611
612 612 This is similar to revsingle(), but accepts only a single revision symbol,
613 613 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
614 614 not "max(public())".
615 615 """
616 616 if not isinstance(symbol, bytes):
617 617 msg = (
618 618 b"symbol (%s of type %s) was not a string, did you mean "
619 619 b"repo[symbol]?" % (symbol, type(symbol))
620 620 )
621 621 raise error.ProgrammingError(msg)
622 622 try:
623 623 if symbol in (b'.', b'tip', b'null'):
624 624 return repo[symbol]
625 625
626 626 try:
627 627 r = int(symbol)
628 628 if b'%d' % r != symbol:
629 629 raise ValueError
630 630 l = len(repo.changelog)
631 631 if r < 0:
632 632 r += l
633 633 if r < 0 or r >= l and r != wdirrev:
634 634 raise ValueError
635 635 return repo[r]
636 636 except error.FilteredIndexError:
637 637 raise
638 638 except (ValueError, OverflowError, IndexError):
639 639 pass
640 640
641 641 if len(symbol) == 2 * repo.nodeconstants.nodelen:
642 642 try:
643 643 node = bin(symbol)
644 644 rev = repo.changelog.rev(node)
645 645 return repo[rev]
646 646 except error.FilteredLookupError:
647 647 raise
648 648 except (binascii.Error, LookupError):
649 649 pass
650 650
651 651 # look up bookmarks through the name interface
652 652 try:
653 653 node = repo.names.singlenode(repo, symbol)
654 654 rev = repo.changelog.rev(node)
655 655 return repo[rev]
656 656 except KeyError:
657 657 pass
658 658
659 659 node = resolvehexnodeidprefix(repo, symbol)
660 660 if node is not None:
661 661 rev = repo.changelog.rev(node)
662 662 return repo[rev]
663 663
664 664 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
665 665
666 666 except error.WdirUnsupported:
667 667 return repo[None]
668 668 except (
669 669 error.FilteredIndexError,
670 670 error.FilteredLookupError,
671 671 error.FilteredRepoLookupError,
672 672 ):
673 673 raise _filterederror(repo, symbol)
674 674
675 675
676 676 def _filterederror(repo, changeid):
677 677 """build an exception to be raised about a filtered changeid
678 678
679 679 This is extracted in a function to help extensions (eg: evolve) to
680 680 experiment with various message variants."""
681 681 if repo.filtername.startswith(b'visible'):
682 682
683 683 # Check if the changeset is obsolete
684 684 unfilteredrepo = repo.unfiltered()
685 685 ctx = revsymbol(unfilteredrepo, changeid)
686 686
687 687 # If the changeset is obsolete, enrich the message with the reason
688 688 # that made this changeset not visible
689 689 if ctx.obsolete():
690 690 msg = obsutil._getfilteredreason(repo, changeid, ctx)
691 691 else:
692 692 msg = _(b"hidden revision '%s'") % changeid
693 693
694 694 hint = _(b'use --hidden to access hidden revisions')
695 695
696 696 return error.FilteredRepoLookupError(msg, hint=hint)
697 697 msg = _(b"filtered revision '%s' (not in '%s' subset)")
698 698 msg %= (changeid, repo.filtername)
699 699 return error.FilteredRepoLookupError(msg)
700 700
701 701
702 702 def revsingle(repo, revspec, default=b'.', localalias=None):
703 703 if not revspec and revspec != 0:
704 704 return repo[default]
705 705
706 706 l = revrange(repo, [revspec], localalias=localalias)
707 707 if not l:
708 708 raise error.InputError(_(b'empty revision set'))
709 709 return repo[l.last()]
710 710
711 711
712 712 def _pairspec(revspec):
713 713 tree = revsetlang.parse(revspec)
714 714 return tree and tree[0] in (
715 715 b'range',
716 716 b'rangepre',
717 717 b'rangepost',
718 718 b'rangeall',
719 719 )
720 720
721 721
722 722 def revpair(repo, revs):
723 723 if not revs:
724 724 return repo[b'.'], repo[None]
725 725
726 726 l = revrange(repo, revs)
727 727
728 728 if not l:
729 729 raise error.InputError(_(b'empty revision range'))
730 730
731 731 first = l.first()
732 732 second = l.last()
733 733
734 734 if (
735 735 first == second
736 736 and len(revs) >= 2
737 737 and not all(revrange(repo, [r]) for r in revs)
738 738 ):
739 739 raise error.InputError(_(b'empty revision on one side of range'))
740 740
741 741 # if top-level is range expression, the result must always be a pair
742 742 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
743 743 return repo[first], repo[None]
744 744
745 745 return repo[first], repo[second]
746 746
747 747
748 748 def revrange(repo, specs, localalias=None):
749 749 """Execute 1 to many revsets and return the union.
750 750
751 751 This is the preferred mechanism for executing revsets using user-specified
752 752 config options, such as revset aliases.
753 753
754 754 The revsets specified by ``specs`` will be executed via a chained ``OR``
755 755 expression. If ``specs`` is empty, an empty result is returned.
756 756
757 757 ``specs`` can contain integers, in which case they are assumed to be
758 758 revision numbers.
759 759
760 760 It is assumed the revsets are already formatted. If you have arguments
761 761 that need to be expanded in the revset, call ``revsetlang.formatspec()``
762 762 and pass the result as an element of ``specs``.
763 763
764 764 Specifying a single revset is allowed.
765 765
766 766 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
767 767 integer revisions.
768 768 """
769 769 allspecs = []
770 770 for spec in specs:
771 771 if isinstance(spec, int):
772 772 spec = revsetlang.formatspec(b'%d', spec)
773 773 allspecs.append(spec)
774 774 return repo.anyrevs(allspecs, user=True, localalias=localalias)
775 775
776 776
777 777 def increasingwindows(windowsize=8, sizelimit=512):
778 778 while True:
779 779 yield windowsize
780 780 if windowsize < sizelimit:
781 781 windowsize *= 2
782 782
783 783
784 784 def walkchangerevs(repo, revs, makefilematcher, prepare):
785 785 """Iterate over files and the revs in a "windowed" way.
786 786
787 787 Callers most commonly need to iterate backwards over the history
788 788 in which they are interested. Doing so has awful (quadratic-looking)
789 789 performance, so we use iterators in a "windowed" way.
790 790
791 791 We walk a window of revisions in the desired order. Within the
792 792 window, we first walk forwards to gather data, then in the desired
793 793 order (usually backwards) to display it.
794 794
795 795 This function returns an iterator yielding contexts. Before
796 796 yielding each context, the iterator will first call the prepare
797 797 function on each context in the window in forward order."""
798 798
799 799 if not revs:
800 800 return []
801 801 change = repo.__getitem__
802 802
803 803 def iterate():
804 804 it = iter(revs)
805 805 stopiteration = False
806 806 for windowsize in increasingwindows():
807 807 nrevs = []
808 808 for i in range(windowsize):
809 809 rev = next(it, None)
810 810 if rev is None:
811 811 stopiteration = True
812 812 break
813 813 nrevs.append(rev)
814 814 for rev in sorted(nrevs):
815 815 ctx = change(rev)
816 816 prepare(ctx, makefilematcher(ctx))
817 817 for rev in nrevs:
818 818 yield change(rev)
819 819
820 820 if stopiteration:
821 821 break
822 822
823 823 return iterate()
824 824
825 825
826 826 def meaningfulparents(repo, ctx):
827 827 """Return list of meaningful (or all if debug) parentrevs for rev.
828 828
829 829 For merges (two non-nullrev revisions) both parents are meaningful.
830 830 Otherwise the first parent revision is considered meaningful if it
831 831 is not the preceding revision.
832 832 """
833 833 parents = ctx.parents()
834 834 if len(parents) > 1:
835 835 return parents
836 836 if repo.ui.debugflag:
837 837 return [parents[0], repo[nullrev]]
838 838 if parents[0].rev() >= intrev(ctx) - 1:
839 839 return []
840 840 return parents
841 841
842 842
843 843 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
844 844 """Return a function that produced paths for presenting to the user.
845 845
846 846 The returned function takes a repo-relative path and produces a path
847 847 that can be presented in the UI.
848 848
849 849 Depending on the value of ui.relative-paths, either a repo-relative or
850 850 cwd-relative path will be produced.
851 851
852 852 legacyrelativevalue is the value to use if ui.relative-paths=legacy
853 853
854 854 If forcerelativevalue is not None, then that value will be used regardless
855 855 of what ui.relative-paths is set to.
856 856 """
857 857 if forcerelativevalue is not None:
858 858 relative = forcerelativevalue
859 859 else:
860 860 config = repo.ui.config(b'ui', b'relative-paths')
861 861 if config == b'legacy':
862 862 relative = legacyrelativevalue
863 863 else:
864 864 relative = stringutil.parsebool(config)
865 865 if relative is None:
866 866 raise error.ConfigError(
867 867 _(b"ui.relative-paths is not a boolean ('%s')") % config
868 868 )
869 869
870 870 if relative:
871 871 cwd = repo.getcwd()
872 872 if cwd != b'':
873 873 # this branch would work even if cwd == b'' (ie cwd = repo
874 874 # root), but its generality makes the returned function slower
875 875 pathto = repo.pathto
876 876 return lambda f: pathto(f, cwd)
877 877 if repo.ui.configbool(b'ui', b'slash'):
878 878 return lambda f: f
879 879 else:
880 880 return util.localpath
881 881
882 882
883 883 def subdiruipathfn(subpath, uipathfn):
884 884 '''Create a new uipathfn that treats the file as relative to subpath.'''
885 885 return lambda f: uipathfn(posixpath.join(subpath, f))
886 886
887 887
888 888 def anypats(pats, opts):
889 889 """Checks if any patterns, including --include and --exclude were given.
890 890
891 891 Some commands (e.g. addremove) use this condition for deciding whether to
892 892 print absolute or relative paths.
893 893 """
894 894 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
895 895
896 896
897 897 def expandpats(pats):
898 898 """Expand bare globs when running on windows.
899 899 On posix we assume it already has already been done by sh."""
900 900 if not util.expandglobs:
901 901 return list(pats)
902 902 ret = []
903 903 for kindpat in pats:
904 904 kind, pat = matchmod._patsplit(kindpat, None)
905 905 if kind is None:
906 906 try:
907 907 globbed = glob.glob(pat)
908 908 except re.error:
909 909 globbed = [pat]
910 910 if globbed:
911 911 ret.extend(globbed)
912 912 continue
913 913 ret.append(kindpat)
914 914 return ret
915 915
916 916
917 917 def matchandpats(
918 918 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
919 919 ):
920 920 """Return a matcher and the patterns that were used.
921 921 The matcher will warn about bad matches, unless an alternate badfn callback
922 922 is provided."""
923 923 if opts is None:
924 924 opts = {}
925 925 if not globbed and default == b'relpath':
926 926 pats = expandpats(pats or [])
927 927
928 928 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
929 929
930 930 def bad(f, msg):
931 931 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
932 932
933 933 if badfn is None:
934 934 badfn = bad
935 935
936 936 m = ctx.match(
937 937 pats,
938 938 opts.get(b'include'),
939 939 opts.get(b'exclude'),
940 940 default,
941 941 listsubrepos=opts.get(b'subrepos'),
942 942 badfn=badfn,
943 943 )
944 944
945 945 if m.always():
946 946 pats = []
947 947 return m, pats
948 948
949 949
950 950 def match(
951 951 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
952 952 ):
953 953 '''Return a matcher that will warn about bad matches.'''
954 954 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
955 955
956 956
957 957 def matchall(repo):
958 958 '''Return a matcher that will efficiently match everything.'''
959 959 return matchmod.always()
960 960
961 961
962 962 def matchfiles(repo, files, badfn=None):
963 963 '''Return a matcher that will efficiently match exactly these files.'''
964 964 return matchmod.exact(files, badfn=badfn)
965 965
966 966
967 967 def parsefollowlinespattern(repo, rev, pat, msg):
968 968 """Return a file name from `pat` pattern suitable for usage in followlines
969 969 logic.
970 970 """
971 971 if not matchmod.patkind(pat):
972 972 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
973 973 else:
974 974 ctx = repo[rev]
975 975 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
976 976 files = [f for f in ctx if m(f)]
977 977 if len(files) != 1:
978 978 raise error.ParseError(msg)
979 979 return files[0]
980 980
981 981
982 982 def getorigvfs(ui, repo):
983 983 """return a vfs suitable to save 'orig' file
984 984
985 985 return None if no special directory is configured"""
986 986 origbackuppath = ui.config(b'ui', b'origbackuppath')
987 987 if not origbackuppath:
988 988 return None
989 989 return vfs.vfs(repo.wvfs.join(origbackuppath))
990 990
991 991
992 992 def backuppath(ui, repo, filepath):
993 993 """customize where working copy backup files (.orig files) are created
994 994
995 995 Fetch user defined path from config file: [ui] origbackuppath = <path>
996 996 Fall back to default (filepath with .orig suffix) if not specified
997 997
998 998 filepath is repo-relative
999 999
1000 1000 Returns an absolute path
1001 1001 """
1002 1002 origvfs = getorigvfs(ui, repo)
1003 1003 if origvfs is None:
1004 1004 return repo.wjoin(filepath + b".orig")
1005 1005
1006 1006 origbackupdir = origvfs.dirname(filepath)
1007 1007 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1008 1008 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1009 1009
1010 1010 # Remove any files that conflict with the backup file's path
1011 1011 for f in reversed(list(pathutil.finddirs(filepath))):
1012 1012 if origvfs.isfileorlink(f):
1013 1013 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1014 1014 origvfs.unlink(f)
1015 1015 break
1016 1016
1017 1017 origvfs.makedirs(origbackupdir)
1018 1018
1019 1019 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1020 1020 ui.note(
1021 1021 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1022 1022 )
1023 1023 origvfs.rmtree(filepath, forcibly=True)
1024 1024
1025 1025 return origvfs.join(filepath)
1026 1026
1027 1027
1028 1028 class _containsnode:
1029 1029 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1030 1030
1031 1031 def __init__(self, repo, revcontainer):
1032 1032 self._torev = repo.changelog.rev
1033 1033 self._revcontains = revcontainer.__contains__
1034 1034
1035 1035 def __contains__(self, node):
1036 1036 return self._revcontains(self._torev(node))
1037 1037
1038 1038
1039 1039 def cleanupnodes(
1040 1040 repo,
1041 1041 replacements,
1042 1042 operation,
1043 1043 moves=None,
1044 1044 metadata=None,
1045 1045 fixphase=False,
1046 1046 targetphase=None,
1047 1047 backup=True,
1048 1048 ):
1049 1049 """do common cleanups when old nodes are replaced by new nodes
1050 1050
1051 1051 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1052 1052 (we might also want to move working directory parent in the future)
1053 1053
1054 1054 By default, bookmark moves are calculated automatically from 'replacements',
1055 1055 but 'moves' can be used to override that. Also, 'moves' may include
1056 1056 additional bookmark moves that should not have associated obsmarkers.
1057 1057
1058 1058 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1059 1059 have replacements. operation is a string, like "rebase".
1060 1060
1061 1061 metadata is dictionary containing metadata to be stored in obsmarker if
1062 1062 obsolescence is enabled.
1063 1063 """
1064 1064 assert fixphase or targetphase is None
1065 1065 if not replacements and not moves:
1066 1066 return
1067 1067
1068 1068 # translate mapping's other forms
1069 1069 if not util.safehasattr(replacements, b'items'):
1070 1070 replacements = {(n,): () for n in replacements}
1071 1071 else:
1072 1072 # upgrading non tuple "source" to tuple ones for BC
1073 1073 repls = {}
1074 1074 for key, value in replacements.items():
1075 1075 if not isinstance(key, tuple):
1076 1076 key = (key,)
1077 1077 repls[key] = value
1078 1078 replacements = repls
1079 1079
1080 1080 # Unfiltered repo is needed since nodes in replacements might be hidden.
1081 1081 unfi = repo.unfiltered()
1082 1082
1083 1083 # Calculate bookmark movements
1084 1084 if moves is None:
1085 1085 moves = {}
1086 1086 for oldnodes, newnodes in replacements.items():
1087 1087 for oldnode in oldnodes:
1088 1088 if oldnode in moves:
1089 1089 continue
1090 1090 if len(newnodes) > 1:
1091 1091 # usually a split, take the one with biggest rev number
1092 1092 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1093 1093 elif len(newnodes) == 0:
1094 1094 # move bookmark backwards
1095 1095 allreplaced = []
1096 1096 for rep in replacements:
1097 1097 allreplaced.extend(rep)
1098 1098 roots = list(
1099 1099 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1100 1100 )
1101 1101 if roots:
1102 1102 newnode = roots[0].node()
1103 1103 else:
1104 1104 newnode = repo.nullid
1105 1105 else:
1106 1106 newnode = newnodes[0]
1107 1107 moves[oldnode] = newnode
1108 1108
1109 1109 allnewnodes = [n for ns in replacements.values() for n in ns]
1110 1110 toretract = {}
1111 1111 toadvance = {}
1112 1112 if fixphase:
1113 1113 precursors = {}
1114 1114 for oldnodes, newnodes in replacements.items():
1115 1115 for oldnode in oldnodes:
1116 1116 for newnode in newnodes:
1117 1117 precursors.setdefault(newnode, []).append(oldnode)
1118 1118
1119 1119 allnewnodes.sort(key=lambda n: unfi[n].rev())
1120 1120 newphases = {}
1121 1121
1122 1122 def phase(ctx):
1123 1123 return newphases.get(ctx.node(), ctx.phase())
1124 1124
1125 1125 for newnode in allnewnodes:
1126 1126 ctx = unfi[newnode]
1127 1127 parentphase = max(phase(p) for p in ctx.parents())
1128 1128 if targetphase is None:
1129 1129 oldphase = max(
1130 1130 unfi[oldnode].phase() for oldnode in precursors[newnode]
1131 1131 )
1132 1132 newphase = max(oldphase, parentphase)
1133 1133 else:
1134 1134 newphase = max(targetphase, parentphase)
1135 1135 newphases[newnode] = newphase
1136 1136 if newphase > ctx.phase():
1137 1137 toretract.setdefault(newphase, []).append(newnode)
1138 1138 elif newphase < ctx.phase():
1139 1139 toadvance.setdefault(newphase, []).append(newnode)
1140 1140
1141 1141 with repo.transaction(b'cleanup') as tr:
1142 1142 # Move bookmarks
1143 1143 bmarks = repo._bookmarks
1144 1144 bmarkchanges = []
1145 1145 for oldnode, newnode in moves.items():
1146 1146 oldbmarks = repo.nodebookmarks(oldnode)
1147 1147 if not oldbmarks:
1148 1148 continue
1149 1149 from . import bookmarks # avoid import cycle
1150 1150
1151 1151 repo.ui.debug(
1152 1152 b'moving bookmarks %r from %s to %s\n'
1153 1153 % (
1154 1154 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1155 1155 hex(oldnode),
1156 1156 hex(newnode),
1157 1157 )
1158 1158 )
1159 1159 # Delete divergent bookmarks being parents of related newnodes
1160 1160 deleterevs = repo.revs(
1161 1161 b'parents(roots(%ln & (::%n))) - parents(%n)',
1162 1162 allnewnodes,
1163 1163 newnode,
1164 1164 oldnode,
1165 1165 )
1166 1166 deletenodes = _containsnode(repo, deleterevs)
1167 1167 for name in oldbmarks:
1168 1168 bmarkchanges.append((name, newnode))
1169 1169 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1170 1170 bmarkchanges.append((b, None))
1171 1171
1172 1172 if bmarkchanges:
1173 1173 bmarks.applychanges(repo, tr, bmarkchanges)
1174 1174
1175 1175 for phase, nodes in toretract.items():
1176 1176 phases.retractboundary(repo, tr, phase, nodes)
1177 1177 for phase, nodes in toadvance.items():
1178 1178 phases.advanceboundary(repo, tr, phase, nodes)
1179 1179
1180 1180 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1181 1181 # Obsolete or strip nodes
1182 1182 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1183 1183 # If a node is already obsoleted, and we want to obsolete it
1184 1184 # without a successor, skip that obssolete request since it's
1185 1185 # unnecessary. That's the "if s or not isobs(n)" check below.
1186 1186 # Also sort the node in topology order, that might be useful for
1187 1187 # some obsstore logic.
1188 1188 # NOTE: the sorting might belong to createmarkers.
1189 1189 torev = unfi.changelog.rev
1190 1190 sortfunc = lambda ns: torev(ns[0][0])
1191 1191 rels = []
1192 1192 for ns, s in sorted(replacements.items(), key=sortfunc):
1193 1193 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1194 1194 rels.append(rel)
1195 1195 if rels:
1196 1196 obsolete.createmarkers(
1197 1197 repo, rels, operation=operation, metadata=metadata
1198 1198 )
1199 1199 elif phases.supportarchived(repo) and mayusearchived:
1200 1200 # this assume we do not have "unstable" nodes above the cleaned ones
1201 1201 allreplaced = set()
1202 1202 for ns in replacements.keys():
1203 1203 allreplaced.update(ns)
1204 1204 if backup:
1205 1205 from . import repair # avoid import cycle
1206 1206
1207 1207 node = min(allreplaced, key=repo.changelog.rev)
1208 1208 repair.backupbundle(
1209 1209 repo, allreplaced, allreplaced, node, operation
1210 1210 )
1211 1211 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1212 1212 else:
1213 1213 from . import repair # avoid import cycle
1214 1214
1215 1215 tostrip = list(n for ns in replacements for n in ns)
1216 1216 if tostrip:
1217 1217 repair.delayedstrip(
1218 1218 repo.ui, repo, tostrip, operation, backup=backup
1219 1219 )
1220 1220
1221 1221
1222 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1222 def addremove(repo, matcher, prefix, uipathfn, opts=None, open_tr=None):
1223 1223 if opts is None:
1224 1224 opts = {}
1225 1225 m = matcher
1226 1226 dry_run = opts.get(b'dry_run')
1227 1227 try:
1228 1228 similarity = float(opts.get(b'similarity') or 0)
1229 1229 except ValueError:
1230 1230 raise error.InputError(_(b'similarity must be a number'))
1231 1231 if similarity < 0 or similarity > 100:
1232 1232 raise error.InputError(_(b'similarity must be between 0 and 100'))
1233 1233 similarity /= 100.0
1234 1234
1235 1235 ret = 0
1236 1236
1237 1237 wctx = repo[None]
1238 1238 for subpath in sorted(wctx.substate):
1239 1239 submatch = matchmod.subdirmatcher(subpath, m)
1240 1240 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1241 1241 sub = wctx.sub(subpath)
1242 1242 subprefix = repo.wvfs.reljoin(prefix, subpath)
1243 1243 subuipathfn = subdiruipathfn(subpath, uipathfn)
1244 1244 try:
1245 1245 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1246 1246 ret = 1
1247 1247 except error.LookupError:
1248 1248 repo.ui.status(
1249 1249 _(b"skipping missing subrepository: %s\n")
1250 1250 % uipathfn(subpath)
1251 1251 )
1252 1252
1253 1253 rejected = []
1254 1254
1255 1255 def badfn(f, msg):
1256 1256 if f in m.files():
1257 1257 m.bad(f, msg)
1258 1258 rejected.append(f)
1259 1259
1260 1260 badmatch = matchmod.badmatch(m, badfn)
1261 1261 added, unknown, deleted, removed, forgotten = _interestingfiles(
1262 1262 repo, badmatch
1263 1263 )
1264 1264
1265 1265 unknownset = set(unknown + forgotten)
1266 1266 toprint = unknownset.copy()
1267 1267 toprint.update(deleted)
1268 1268 for abs in sorted(toprint):
1269 1269 if repo.ui.verbose or not m.exact(abs):
1270 1270 if abs in unknownset:
1271 1271 status = _(b'adding %s\n') % uipathfn(abs)
1272 1272 label = b'ui.addremove.added'
1273 1273 else:
1274 1274 status = _(b'removing %s\n') % uipathfn(abs)
1275 1275 label = b'ui.addremove.removed'
1276 1276 repo.ui.status(status, label=label)
1277 1277
1278 1278 renames = _findrenames(
1279 1279 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1280 1280 )
1281 1281
1282 if not dry_run:
1282 if not dry_run and (unknown or forgotten or deleted or renames):
1283 if open_tr is not None:
1284 open_tr()
1283 1285 _markchanges(repo, unknown + forgotten, deleted, renames)
1284 1286
1285 1287 for f in rejected:
1286 1288 if f in m.files():
1287 1289 return 1
1288 1290 return ret
1289 1291
1290 1292
1291 1293 def marktouched(repo, files, similarity=0.0):
1292 1294 """Assert that files have somehow been operated upon. files are relative to
1293 1295 the repo root."""
1294 1296 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1295 1297 rejected = []
1296 1298
1297 1299 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1298 1300
1299 1301 if repo.ui.verbose:
1300 1302 unknownset = set(unknown + forgotten)
1301 1303 toprint = unknownset.copy()
1302 1304 toprint.update(deleted)
1303 1305 for abs in sorted(toprint):
1304 1306 if abs in unknownset:
1305 1307 status = _(b'adding %s\n') % abs
1306 1308 else:
1307 1309 status = _(b'removing %s\n') % abs
1308 1310 repo.ui.status(status)
1309 1311
1310 1312 # TODO: We should probably have the caller pass in uipathfn and apply it to
1311 1313 # the messages above too. legacyrelativevalue=True is consistent with how
1312 1314 # it used to work.
1313 1315 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1314 1316 renames = _findrenames(
1315 1317 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1316 1318 )
1317 1319
1318 1320 _markchanges(repo, unknown + forgotten, deleted, renames)
1319 1321
1320 1322 for f in rejected:
1321 1323 if f in m.files():
1322 1324 return 1
1323 1325 return 0
1324 1326
1325 1327
1326 1328 def _interestingfiles(repo, matcher):
1327 1329 """Walk dirstate with matcher, looking for files that addremove would care
1328 1330 about.
1329 1331
1330 1332 This is different from dirstate.status because it doesn't care about
1331 1333 whether files are modified or clean."""
1332 1334 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1333 1335 audit_path = pathutil.pathauditor(repo.root, cached=True)
1334 1336
1335 1337 ctx = repo[None]
1336 1338 dirstate = repo.dirstate
1337 1339 matcher = repo.narrowmatch(matcher, includeexact=True)
1338 1340 walkresults = dirstate.walk(
1339 1341 matcher,
1340 1342 subrepos=sorted(ctx.substate),
1341 1343 unknown=True,
1342 1344 ignored=False,
1343 1345 full=False,
1344 1346 )
1345 1347 for abs, st in walkresults.items():
1346 1348 entry = dirstate.get_entry(abs)
1347 1349 if (not entry.any_tracked) and audit_path.check(abs):
1348 1350 unknown.append(abs)
1349 1351 elif (not entry.removed) and not st:
1350 1352 deleted.append(abs)
1351 1353 elif entry.removed and st:
1352 1354 forgotten.append(abs)
1353 1355 # for finding renames
1354 1356 elif entry.removed and not st:
1355 1357 removed.append(abs)
1356 1358 elif entry.added:
1357 1359 added.append(abs)
1358 1360
1359 1361 return added, unknown, deleted, removed, forgotten
1360 1362
1361 1363
1362 1364 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1363 1365 '''Find renames from removed files to added ones.'''
1364 1366 renames = {}
1365 1367 if similarity > 0:
1366 1368 for old, new, score in similar.findrenames(
1367 1369 repo, added, removed, similarity
1368 1370 ):
1369 1371 if (
1370 1372 repo.ui.verbose
1371 1373 or not matcher.exact(old)
1372 1374 or not matcher.exact(new)
1373 1375 ):
1374 1376 repo.ui.status(
1375 1377 _(
1376 1378 b'recording removal of %s as rename to %s '
1377 1379 b'(%d%% similar)\n'
1378 1380 )
1379 1381 % (uipathfn(old), uipathfn(new), score * 100)
1380 1382 )
1381 1383 renames[new] = old
1382 1384 return renames
1383 1385
1384 1386
1385 1387 def _markchanges(repo, unknown, deleted, renames):
1386 1388 """Marks the files in unknown as added, the files in deleted as removed,
1387 1389 and the files in renames as copied."""
1388 1390 wctx = repo[None]
1389 1391 with repo.wlock():
1390 1392 wctx.forget(deleted)
1391 1393 wctx.add(unknown)
1392 1394 for new, old in renames.items():
1393 1395 wctx.copy(old, new)
1394 1396
1395 1397
1396 1398 def getrenamedfn(repo, endrev=None):
1397 1399 if copiesmod.usechangesetcentricalgo(repo):
1398 1400
1399 1401 def getrenamed(fn, rev):
1400 1402 ctx = repo[rev]
1401 1403 p1copies = ctx.p1copies()
1402 1404 if fn in p1copies:
1403 1405 return p1copies[fn]
1404 1406 p2copies = ctx.p2copies()
1405 1407 if fn in p2copies:
1406 1408 return p2copies[fn]
1407 1409 return None
1408 1410
1409 1411 return getrenamed
1410 1412
1411 1413 rcache = {}
1412 1414 if endrev is None:
1413 1415 endrev = len(repo)
1414 1416
1415 1417 def getrenamed(fn, rev):
1416 1418 """looks up all renames for a file (up to endrev) the first
1417 1419 time the file is given. It indexes on the changerev and only
1418 1420 parses the manifest if linkrev != changerev.
1419 1421 Returns rename info for fn at changerev rev."""
1420 1422 if fn not in rcache:
1421 1423 rcache[fn] = {}
1422 1424 fl = repo.file(fn)
1423 1425 for i in fl:
1424 1426 lr = fl.linkrev(i)
1425 1427 renamed = fl.renamed(fl.node(i))
1426 1428 rcache[fn][lr] = renamed and renamed[0]
1427 1429 if lr >= endrev:
1428 1430 break
1429 1431 if rev in rcache[fn]:
1430 1432 return rcache[fn][rev]
1431 1433
1432 1434 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1433 1435 # filectx logic.
1434 1436 try:
1435 1437 return repo[rev][fn].copysource()
1436 1438 except error.LookupError:
1437 1439 return None
1438 1440
1439 1441 return getrenamed
1440 1442
1441 1443
1442 1444 def getcopiesfn(repo, endrev=None):
1443 1445 if copiesmod.usechangesetcentricalgo(repo):
1444 1446
1445 1447 def copiesfn(ctx):
1446 1448 if ctx.p2copies():
1447 1449 allcopies = ctx.p1copies().copy()
1448 1450 # There should be no overlap
1449 1451 allcopies.update(ctx.p2copies())
1450 1452 return sorted(allcopies.items())
1451 1453 else:
1452 1454 return sorted(ctx.p1copies().items())
1453 1455
1454 1456 else:
1455 1457 getrenamed = getrenamedfn(repo, endrev)
1456 1458
1457 1459 def copiesfn(ctx):
1458 1460 copies = []
1459 1461 for fn in ctx.files():
1460 1462 rename = getrenamed(fn, ctx.rev())
1461 1463 if rename:
1462 1464 copies.append((fn, rename))
1463 1465 return copies
1464 1466
1465 1467 return copiesfn
1466 1468
1467 1469
1468 1470 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1469 1471 """Update the dirstate to reflect the intent of copying src to dst. For
1470 1472 different reasons it might not end with dst being marked as copied from src.
1471 1473 """
1472 1474 origsrc = repo.dirstate.copied(src) or src
1473 1475 if dst == origsrc: # copying back a copy?
1474 1476 entry = repo.dirstate.get_entry(dst)
1475 1477 if (entry.added or not entry.tracked) and not dryrun:
1476 1478 repo.dirstate.set_tracked(dst)
1477 1479 else:
1478 1480 if repo.dirstate.get_entry(origsrc).added and origsrc == src:
1479 1481 if not ui.quiet:
1480 1482 ui.warn(
1481 1483 _(
1482 1484 b"%s has not been committed yet, so no copy "
1483 1485 b"data will be stored for %s.\n"
1484 1486 )
1485 1487 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1486 1488 )
1487 1489 if not repo.dirstate.get_entry(dst).tracked and not dryrun:
1488 1490 wctx.add([dst])
1489 1491 elif not dryrun:
1490 1492 wctx.copy(origsrc, dst)
1491 1493
1492 1494
1493 1495 def movedirstate(repo, newctx, match=None):
1494 1496 """Move the dirstate to newctx and adjust it as necessary.
1495 1497
1496 1498 A matcher can be provided as an optimization. It is probably a bug to pass
1497 1499 a matcher that doesn't match all the differences between the parent of the
1498 1500 working copy and newctx.
1499 1501 """
1500 1502 oldctx = repo[b'.']
1501 1503 ds = repo.dirstate
1502 1504 copies = dict(ds.copies())
1503 1505 ds.setparents(newctx.node(), repo.nullid)
1504 1506 s = newctx.status(oldctx, match=match)
1505 1507
1506 1508 for f in s.modified:
1507 1509 ds.update_file_p1(f, p1_tracked=True)
1508 1510
1509 1511 for f in s.added:
1510 1512 ds.update_file_p1(f, p1_tracked=False)
1511 1513
1512 1514 for f in s.removed:
1513 1515 ds.update_file_p1(f, p1_tracked=True)
1514 1516
1515 1517 # Merge old parent and old working dir copies
1516 1518 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1517 1519 oldcopies.update(copies)
1518 1520 copies = {dst: oldcopies.get(src, src) for dst, src in oldcopies.items()}
1519 1521 # Adjust the dirstate copies
1520 1522 for dst, src in copies.items():
1521 1523 if src not in newctx or dst in newctx or not ds.get_entry(dst).added:
1522 1524 src = None
1523 1525 ds.copy(src, dst)
1524 1526 repo._quick_access_changeid_invalidate()
1525 1527
1526 1528
1527 1529 def filterrequirements(requirements):
1528 1530 """filters the requirements into two sets:
1529 1531
1530 1532 wcreq: requirements which should be written in .hg/requires
1531 1533 storereq: which should be written in .hg/store/requires
1532 1534
1533 1535 Returns (wcreq, storereq)
1534 1536 """
1535 1537 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1536 1538 wc, store = set(), set()
1537 1539 for r in requirements:
1538 1540 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1539 1541 wc.add(r)
1540 1542 else:
1541 1543 store.add(r)
1542 1544 return wc, store
1543 1545 return requirements, None
1544 1546
1545 1547
1546 1548 def istreemanifest(repo):
1547 1549 """returns whether the repository is using treemanifest or not"""
1548 1550 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1549 1551
1550 1552
1551 1553 def writereporequirements(repo, requirements=None):
1552 1554 """writes requirements for the repo
1553 1555
1554 1556 Requirements are written to .hg/requires and .hg/store/requires based
1555 1557 on whether share-safe mode is enabled and which requirements are wdir
1556 1558 requirements and which are store requirements
1557 1559 """
1558 1560 if requirements:
1559 1561 repo.requirements = requirements
1560 1562 wcreq, storereq = filterrequirements(repo.requirements)
1561 1563 if wcreq is not None:
1562 1564 writerequires(repo.vfs, wcreq)
1563 1565 if storereq is not None:
1564 1566 writerequires(repo.svfs, storereq)
1565 1567 elif repo.ui.configbool(b'format', b'usestore'):
1566 1568 # only remove store requires if we are using store
1567 1569 repo.svfs.tryunlink(b'requires')
1568 1570
1569 1571
1570 1572 def writerequires(opener, requirements):
1571 1573 with opener(b'requires', b'w', atomictemp=True) as fp:
1572 1574 for r in sorted(requirements):
1573 1575 fp.write(b"%s\n" % r)
1574 1576
1575 1577
1576 1578 class filecachesubentry:
1577 1579 def __init__(self, path, stat):
1578 1580 self.path = path
1579 1581 self.cachestat = None
1580 1582 self._cacheable = None
1581 1583
1582 1584 if stat:
1583 1585 self.cachestat = filecachesubentry.stat(self.path)
1584 1586
1585 1587 if self.cachestat:
1586 1588 self._cacheable = self.cachestat.cacheable()
1587 1589 else:
1588 1590 # None means we don't know yet
1589 1591 self._cacheable = None
1590 1592
1591 1593 def refresh(self):
1592 1594 if self.cacheable():
1593 1595 self.cachestat = filecachesubentry.stat(self.path)
1594 1596
1595 1597 def cacheable(self):
1596 1598 if self._cacheable is not None:
1597 1599 return self._cacheable
1598 1600
1599 1601 # we don't know yet, assume it is for now
1600 1602 return True
1601 1603
1602 1604 def changed(self):
1603 1605 # no point in going further if we can't cache it
1604 1606 if not self.cacheable():
1605 1607 return True
1606 1608
1607 1609 newstat = filecachesubentry.stat(self.path)
1608 1610
1609 1611 # we may not know if it's cacheable yet, check again now
1610 1612 if newstat and self._cacheable is None:
1611 1613 self._cacheable = newstat.cacheable()
1612 1614
1613 1615 # check again
1614 1616 if not self._cacheable:
1615 1617 return True
1616 1618
1617 1619 if self.cachestat != newstat:
1618 1620 self.cachestat = newstat
1619 1621 return True
1620 1622 else:
1621 1623 return False
1622 1624
1623 1625 @staticmethod
1624 1626 def stat(path):
1625 1627 try:
1626 1628 return util.cachestat(path)
1627 1629 except FileNotFoundError:
1628 1630 pass
1629 1631
1630 1632
1631 1633 class filecacheentry:
1632 1634 def __init__(self, paths, stat=True):
1633 1635 self._entries = []
1634 1636 for path in paths:
1635 1637 self._entries.append(filecachesubentry(path, stat))
1636 1638
1637 1639 def changed(self):
1638 1640 '''true if any entry has changed'''
1639 1641 for entry in self._entries:
1640 1642 if entry.changed():
1641 1643 return True
1642 1644 return False
1643 1645
1644 1646 def refresh(self):
1645 1647 for entry in self._entries:
1646 1648 entry.refresh()
1647 1649
1648 1650
1649 1651 class filecache:
1650 1652 """A property like decorator that tracks files under .hg/ for updates.
1651 1653
1652 1654 On first access, the files defined as arguments are stat()ed and the
1653 1655 results cached. The decorated function is called. The results are stashed
1654 1656 away in a ``_filecache`` dict on the object whose method is decorated.
1655 1657
1656 1658 On subsequent access, the cached result is used as it is set to the
1657 1659 instance dictionary.
1658 1660
1659 1661 On external property set/delete operations, the caller must update the
1660 1662 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1661 1663 instead of directly setting <attr>.
1662 1664
1663 1665 When using the property API, the cached data is always used if available.
1664 1666 No stat() is performed to check if the file has changed.
1665 1667
1666 1668 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1667 1669 can populate an entry before the property's getter is called. In this case,
1668 1670 entries in ``_filecache`` will be used during property operations,
1669 1671 if available. If the underlying file changes, it is up to external callers
1670 1672 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1671 1673 method result as well as possibly calling ``del obj._filecache[attr]`` to
1672 1674 remove the ``filecacheentry``.
1673 1675 """
1674 1676
1675 1677 def __init__(self, *paths):
1676 1678 self.paths = paths
1677 1679
1678 1680 def tracked_paths(self, obj):
1679 1681 return [self.join(obj, path) for path in self.paths]
1680 1682
1681 1683 def join(self, obj, fname):
1682 1684 """Used to compute the runtime path of a cached file.
1683 1685
1684 1686 Users should subclass filecache and provide their own version of this
1685 1687 function to call the appropriate join function on 'obj' (an instance
1686 1688 of the class that its member function was decorated).
1687 1689 """
1688 1690 raise NotImplementedError
1689 1691
1690 1692 def __call__(self, func):
1691 1693 self.func = func
1692 1694 self.sname = func.__name__
1693 1695 self.name = pycompat.sysbytes(self.sname)
1694 1696 return self
1695 1697
1696 1698 def __get__(self, obj, type=None):
1697 1699 # if accessed on the class, return the descriptor itself.
1698 1700 if obj is None:
1699 1701 return self
1700 1702
1701 1703 assert self.sname not in obj.__dict__
1702 1704
1703 1705 entry = obj._filecache.get(self.name)
1704 1706
1705 1707 if entry:
1706 1708 if entry.changed():
1707 1709 entry.obj = self.func(obj)
1708 1710 else:
1709 1711 paths = self.tracked_paths(obj)
1710 1712
1711 1713 # We stat -before- creating the object so our cache doesn't lie if
1712 1714 # a writer modified between the time we read and stat
1713 1715 entry = filecacheentry(paths, True)
1714 1716 entry.obj = self.func(obj)
1715 1717
1716 1718 obj._filecache[self.name] = entry
1717 1719
1718 1720 obj.__dict__[self.sname] = entry.obj
1719 1721 return entry.obj
1720 1722
1721 1723 # don't implement __set__(), which would make __dict__ lookup as slow as
1722 1724 # function call.
1723 1725
1724 1726 def set(self, obj, value):
1725 1727 if self.name not in obj._filecache:
1726 1728 # we add an entry for the missing value because X in __dict__
1727 1729 # implies X in _filecache
1728 1730 paths = self.tracked_paths(obj)
1729 1731 ce = filecacheentry(paths, False)
1730 1732 obj._filecache[self.name] = ce
1731 1733 else:
1732 1734 ce = obj._filecache[self.name]
1733 1735
1734 1736 ce.obj = value # update cached copy
1735 1737 obj.__dict__[self.sname] = value # update copy returned by obj.x
1736 1738
1737 1739
1738 1740 def extdatasource(repo, source):
1739 1741 """Gather a map of rev -> value dict from the specified source
1740 1742
1741 1743 A source spec is treated as a URL, with a special case shell: type
1742 1744 for parsing the output from a shell command.
1743 1745
1744 1746 The data is parsed as a series of newline-separated records where
1745 1747 each record is a revision specifier optionally followed by a space
1746 1748 and a freeform string value. If the revision is known locally, it
1747 1749 is converted to a rev, otherwise the record is skipped.
1748 1750
1749 1751 Note that both key and value are treated as UTF-8 and converted to
1750 1752 the local encoding. This allows uniformity between local and
1751 1753 remote data sources.
1752 1754 """
1753 1755
1754 1756 spec = repo.ui.config(b"extdata", source)
1755 1757 if not spec:
1756 1758 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1757 1759
1758 1760 data = {}
1759 1761 src = proc = None
1760 1762 try:
1761 1763 if spec.startswith(b"shell:"):
1762 1764 # external commands should be run relative to the repo root
1763 1765 cmd = spec[6:]
1764 1766 proc = subprocess.Popen(
1765 1767 procutil.tonativestr(cmd),
1766 1768 shell=True,
1767 1769 bufsize=-1,
1768 1770 close_fds=procutil.closefds,
1769 1771 stdout=subprocess.PIPE,
1770 1772 cwd=procutil.tonativestr(repo.root),
1771 1773 )
1772 1774 src = proc.stdout
1773 1775 else:
1774 1776 # treat as a URL or file
1775 1777 src = url.open(repo.ui, spec)
1776 1778 for l in src:
1777 1779 if b" " in l:
1778 1780 k, v = l.strip().split(b" ", 1)
1779 1781 else:
1780 1782 k, v = l.strip(), b""
1781 1783
1782 1784 k = encoding.tolocal(k)
1783 1785 try:
1784 1786 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1785 1787 except (error.LookupError, error.RepoLookupError, error.InputError):
1786 1788 pass # we ignore data for nodes that don't exist locally
1787 1789 finally:
1788 1790 if proc:
1789 1791 try:
1790 1792 proc.communicate()
1791 1793 except ValueError:
1792 1794 # This happens if we started iterating src and then
1793 1795 # get a parse error on a line. It should be safe to ignore.
1794 1796 pass
1795 1797 if src:
1796 1798 src.close()
1797 1799 if proc and proc.returncode != 0:
1798 1800 raise error.Abort(
1799 1801 _(b"extdata command '%s' failed: %s")
1800 1802 % (cmd, procutil.explainexit(proc.returncode))
1801 1803 )
1802 1804
1803 1805 return data
1804 1806
1805 1807
1806 1808 class progress:
1807 1809 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1808 1810 self.ui = ui
1809 1811 self.pos = 0
1810 1812 self.topic = topic
1811 1813 self.unit = unit
1812 1814 self.total = total
1813 1815 self.debug = ui.configbool(b'progress', b'debug')
1814 1816 self._updatebar = updatebar
1815 1817
1816 1818 def __enter__(self):
1817 1819 return self
1818 1820
1819 1821 def __exit__(self, exc_type, exc_value, exc_tb):
1820 1822 self.complete()
1821 1823
1822 1824 def update(self, pos, item=b"", total=None):
1823 1825 assert pos is not None
1824 1826 if total:
1825 1827 self.total = total
1826 1828 self.pos = pos
1827 1829 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1828 1830 if self.debug:
1829 1831 self._printdebug(item)
1830 1832
1831 1833 def increment(self, step=1, item=b"", total=None):
1832 1834 self.update(self.pos + step, item, total)
1833 1835
1834 1836 def complete(self):
1835 1837 self.pos = None
1836 1838 self.unit = b""
1837 1839 self.total = None
1838 1840 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1839 1841
1840 1842 def _printdebug(self, item):
1841 1843 unit = b''
1842 1844 if self.unit:
1843 1845 unit = b' ' + self.unit
1844 1846 if item:
1845 1847 item = b' ' + item
1846 1848
1847 1849 if self.total:
1848 1850 pct = 100.0 * self.pos / self.total
1849 1851 self.ui.debug(
1850 1852 b'%s:%s %d/%d%s (%4.2f%%)\n'
1851 1853 % (self.topic, item, self.pos, self.total, unit, pct)
1852 1854 )
1853 1855 else:
1854 1856 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1855 1857
1856 1858
1857 1859 def gdinitconfig(ui):
1858 1860 """helper function to know if a repo should be created as general delta"""
1859 1861 # experimental config: format.generaldelta
1860 1862 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1861 1863 b'format', b'usegeneraldelta'
1862 1864 )
1863 1865
1864 1866
1865 1867 def gddeltaconfig(ui):
1866 1868 """helper function to know if incoming deltas should be optimized
1867 1869
1868 1870 The `format.generaldelta` config is an old form of the config that also
1869 1871 implies that incoming delta-bases should be never be trusted. This function
1870 1872 exists for this purpose.
1871 1873 """
1872 1874 # experimental config: format.generaldelta
1873 1875 return ui.configbool(b'format', b'generaldelta')
1874 1876
1875 1877
1876 1878 class simplekeyvaluefile:
1877 1879 """A simple file with key=value lines
1878 1880
1879 1881 Keys must be alphanumerics and start with a letter, values must not
1880 1882 contain '\n' characters"""
1881 1883
1882 1884 firstlinekey = b'__firstline'
1883 1885
1884 1886 def __init__(self, vfs, path, keys=None):
1885 1887 self.vfs = vfs
1886 1888 self.path = path
1887 1889
1888 1890 def read(self, firstlinenonkeyval=False):
1889 1891 """Read the contents of a simple key-value file
1890 1892
1891 1893 'firstlinenonkeyval' indicates whether the first line of file should
1892 1894 be treated as a key-value pair or reuturned fully under the
1893 1895 __firstline key."""
1894 1896 lines = self.vfs.readlines(self.path)
1895 1897 d = {}
1896 1898 if firstlinenonkeyval:
1897 1899 if not lines:
1898 1900 e = _(b"empty simplekeyvalue file")
1899 1901 raise error.CorruptedState(e)
1900 1902 # we don't want to include '\n' in the __firstline
1901 1903 d[self.firstlinekey] = lines[0][:-1]
1902 1904 del lines[0]
1903 1905
1904 1906 try:
1905 1907 # the 'if line.strip()' part prevents us from failing on empty
1906 1908 # lines which only contain '\n' therefore are not skipped
1907 1909 # by 'if line'
1908 1910 updatedict = dict(
1909 1911 line[:-1].split(b'=', 1) for line in lines if line.strip()
1910 1912 )
1911 1913 if self.firstlinekey in updatedict:
1912 1914 e = _(b"%r can't be used as a key")
1913 1915 raise error.CorruptedState(e % self.firstlinekey)
1914 1916 d.update(updatedict)
1915 1917 except ValueError as e:
1916 1918 raise error.CorruptedState(stringutil.forcebytestr(e))
1917 1919 return d
1918 1920
1919 1921 def write(self, data, firstline=None):
1920 1922 """Write key=>value mapping to a file
1921 1923 data is a dict. Keys must be alphanumerical and start with a letter.
1922 1924 Values must not contain newline characters.
1923 1925
1924 1926 If 'firstline' is not None, it is written to file before
1925 1927 everything else, as it is, not in a key=value form"""
1926 1928 lines = []
1927 1929 if firstline is not None:
1928 1930 lines.append(b'%s\n' % firstline)
1929 1931
1930 1932 for k, v in data.items():
1931 1933 if k == self.firstlinekey:
1932 1934 e = b"key name '%s' is reserved" % self.firstlinekey
1933 1935 raise error.ProgrammingError(e)
1934 1936 if not k[0:1].isalpha():
1935 1937 e = b"keys must start with a letter in a key-value file"
1936 1938 raise error.ProgrammingError(e)
1937 1939 if not k.isalnum():
1938 1940 e = b"invalid key name in a simple key-value file"
1939 1941 raise error.ProgrammingError(e)
1940 1942 if b'\n' in v:
1941 1943 e = b"invalid value in a simple key-value file"
1942 1944 raise error.ProgrammingError(e)
1943 1945 lines.append(b"%s=%s\n" % (k, v))
1944 1946 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1945 1947 fp.write(b''.join(lines))
1946 1948
1947 1949
1948 1950 _reportobsoletedsource = [
1949 1951 b'debugobsolete',
1950 1952 b'pull',
1951 1953 b'push',
1952 1954 b'serve',
1953 1955 b'unbundle',
1954 1956 ]
1955 1957
1956 1958 _reportnewcssource = [
1957 1959 b'pull',
1958 1960 b'unbundle',
1959 1961 ]
1960 1962
1961 1963
1962 1964 def prefetchfiles(repo, revmatches):
1963 1965 """Invokes the registered file prefetch functions, allowing extensions to
1964 1966 ensure the corresponding files are available locally, before the command
1965 1967 uses them.
1966 1968
1967 1969 Args:
1968 1970 revmatches: a list of (revision, match) tuples to indicate the files to
1969 1971 fetch at each revision. If any of the match elements is None, it matches
1970 1972 all files.
1971 1973 """
1972 1974
1973 1975 def _matcher(m):
1974 1976 if m:
1975 1977 assert isinstance(m, matchmod.basematcher)
1976 1978 # The command itself will complain about files that don't exist, so
1977 1979 # don't duplicate the message.
1978 1980 return matchmod.badmatch(m, lambda fn, msg: None)
1979 1981 else:
1980 1982 return matchall(repo)
1981 1983
1982 1984 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1983 1985
1984 1986 fileprefetchhooks(repo, revbadmatches)
1985 1987
1986 1988
1987 1989 # a list of (repo, revs, match) prefetch functions
1988 1990 fileprefetchhooks = util.hooks()
1989 1991
1990 1992 # A marker that tells the evolve extension to suppress its own reporting
1991 1993 _reportstroubledchangesets = True
1992 1994
1993 1995
1994 1996 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1995 1997 """register a callback to issue a summary after the transaction is closed
1996 1998
1997 1999 If as_validator is true, then the callbacks are registered as transaction
1998 2000 validators instead
1999 2001 """
2000 2002
2001 2003 def txmatch(sources):
2002 2004 return any(txnname.startswith(source) for source in sources)
2003 2005
2004 2006 categories = []
2005 2007
2006 2008 def reportsummary(func):
2007 2009 """decorator for report callbacks."""
2008 2010 # The repoview life cycle is shorter than the one of the actual
2009 2011 # underlying repository. So the filtered object can die before the
2010 2012 # weakref is used leading to troubles. We keep a reference to the
2011 2013 # unfiltered object and restore the filtering when retrieving the
2012 2014 # repository through the weakref.
2013 2015 filtername = repo.filtername
2014 2016 reporef = weakref.ref(repo.unfiltered())
2015 2017
2016 2018 def wrapped(tr):
2017 2019 repo = reporef()
2018 2020 if filtername:
2019 2021 assert repo is not None # help pytype
2020 2022 repo = repo.filtered(filtername)
2021 2023 func(repo, tr)
2022 2024
2023 2025 newcat = b'%02i-txnreport' % len(categories)
2024 2026 if as_validator:
2025 2027 otr.addvalidator(newcat, wrapped)
2026 2028 else:
2027 2029 otr.addpostclose(newcat, wrapped)
2028 2030 categories.append(newcat)
2029 2031 return wrapped
2030 2032
2031 2033 @reportsummary
2032 2034 def reportchangegroup(repo, tr):
2033 2035 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2034 2036 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2035 2037 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2036 2038 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2037 2039 if cgchangesets or cgrevisions or cgfiles:
2038 2040 htext = b""
2039 2041 if cgheads:
2040 2042 htext = _(b" (%+d heads)") % cgheads
2041 2043 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2042 2044 if as_validator:
2043 2045 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2044 2046 assert repo is not None # help pytype
2045 2047 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2046 2048
2047 2049 if txmatch(_reportobsoletedsource):
2048 2050
2049 2051 @reportsummary
2050 2052 def reportobsoleted(repo, tr):
2051 2053 obsoleted = obsutil.getobsoleted(repo, tr)
2052 2054 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2053 2055 if newmarkers:
2054 2056 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2055 2057 if obsoleted:
2056 2058 msg = _(b'obsoleted %i changesets\n')
2057 2059 if as_validator:
2058 2060 msg = _(b'obsoleting %i changesets\n')
2059 2061 repo.ui.status(msg % len(obsoleted))
2060 2062
2061 2063 if obsolete.isenabled(
2062 2064 repo, obsolete.createmarkersopt
2063 2065 ) and repo.ui.configbool(
2064 2066 b'experimental', b'evolution.report-instabilities'
2065 2067 ):
2066 2068 instabilitytypes = [
2067 2069 (b'orphan', b'orphan'),
2068 2070 (b'phase-divergent', b'phasedivergent'),
2069 2071 (b'content-divergent', b'contentdivergent'),
2070 2072 ]
2071 2073
2072 2074 def getinstabilitycounts(repo):
2073 2075 filtered = repo.changelog.filteredrevs
2074 2076 counts = {}
2075 2077 for instability, revset in instabilitytypes:
2076 2078 counts[instability] = len(
2077 2079 set(obsolete.getrevs(repo, revset)) - filtered
2078 2080 )
2079 2081 return counts
2080 2082
2081 2083 oldinstabilitycounts = getinstabilitycounts(repo)
2082 2084
2083 2085 @reportsummary
2084 2086 def reportnewinstabilities(repo, tr):
2085 2087 newinstabilitycounts = getinstabilitycounts(repo)
2086 2088 for instability, revset in instabilitytypes:
2087 2089 delta = (
2088 2090 newinstabilitycounts[instability]
2089 2091 - oldinstabilitycounts[instability]
2090 2092 )
2091 2093 msg = getinstabilitymessage(delta, instability)
2092 2094 if msg:
2093 2095 repo.ui.warn(msg)
2094 2096
2095 2097 if txmatch(_reportnewcssource):
2096 2098
2097 2099 @reportsummary
2098 2100 def reportnewcs(repo, tr):
2099 2101 """Report the range of new revisions pulled/unbundled."""
2100 2102 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2101 2103 unfi = repo.unfiltered()
2102 2104 if origrepolen >= len(unfi):
2103 2105 return
2104 2106
2105 2107 # Compute the bounds of new visible revisions' range.
2106 2108 revs = smartset.spanset(repo, start=origrepolen)
2107 2109 if revs:
2108 2110 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2109 2111
2110 2112 if minrev == maxrev:
2111 2113 revrange = minrev
2112 2114 else:
2113 2115 revrange = b'%s:%s' % (minrev, maxrev)
2114 2116 draft = len(repo.revs(b'%ld and draft()', revs))
2115 2117 secret = len(repo.revs(b'%ld and secret()', revs))
2116 2118 if not (draft or secret):
2117 2119 msg = _(b'new changesets %s\n') % revrange
2118 2120 elif draft and secret:
2119 2121 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2120 2122 msg %= (revrange, draft, secret)
2121 2123 elif draft:
2122 2124 msg = _(b'new changesets %s (%d drafts)\n')
2123 2125 msg %= (revrange, draft)
2124 2126 elif secret:
2125 2127 msg = _(b'new changesets %s (%d secrets)\n')
2126 2128 msg %= (revrange, secret)
2127 2129 else:
2128 2130 errormsg = b'entered unreachable condition'
2129 2131 raise error.ProgrammingError(errormsg)
2130 2132 repo.ui.status(msg)
2131 2133
2132 2134 # search new changesets directly pulled as obsolete
2133 2135 duplicates = tr.changes.get(b'revduplicates', ())
2134 2136 obsadded = unfi.revs(
2135 2137 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2136 2138 )
2137 2139 cl = repo.changelog
2138 2140 extinctadded = [r for r in obsadded if r not in cl]
2139 2141 if extinctadded:
2140 2142 # They are not just obsolete, but obsolete and invisible
2141 2143 # we call them "extinct" internally but the terms have not been
2142 2144 # exposed to users.
2143 2145 msg = b'(%d other changesets obsolete on arrival)\n'
2144 2146 repo.ui.status(msg % len(extinctadded))
2145 2147
2146 2148 @reportsummary
2147 2149 def reportphasechanges(repo, tr):
2148 2150 """Report statistics of phase changes for changesets pre-existing
2149 2151 pull/unbundle.
2150 2152 """
2151 2153 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2152 2154 published = []
2153 2155 for revs, (old, new) in tr.changes.get(b'phases', []):
2154 2156 if new != phases.public:
2155 2157 continue
2156 2158 published.extend(rev for rev in revs if rev < origrepolen)
2157 2159 if not published:
2158 2160 return
2159 2161 msg = _(b'%d local changesets published\n')
2160 2162 if as_validator:
2161 2163 msg = _(b'%d local changesets will be published\n')
2162 2164 repo.ui.status(msg % len(published))
2163 2165
2164 2166
2165 2167 def getinstabilitymessage(delta, instability):
2166 2168 """function to return the message to show warning about new instabilities
2167 2169
2168 2170 exists as a separate function so that extension can wrap to show more
2169 2171 information like how to fix instabilities"""
2170 2172 if delta > 0:
2171 2173 return _(b'%i new %s changesets\n') % (delta, instability)
2172 2174
2173 2175
2174 2176 def nodesummaries(repo, nodes, maxnumnodes=4):
2175 2177 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2176 2178 return b' '.join(short(h) for h in nodes)
2177 2179 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2178 2180 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2179 2181
2180 2182
2181 2183 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2182 2184 """check that no named branch has multiple heads"""
2183 2185 if desc in (b'strip', b'repair'):
2184 2186 # skip the logic during strip
2185 2187 return
2186 2188 visible = repo.filtered(filtername)
2187 2189 # possible improvement: we could restrict the check to affected branch
2188 2190 bm = visible.branchmap()
2189 2191 for name in bm:
2190 2192 heads = bm.branchheads(name, closed=accountclosed)
2191 2193 if len(heads) > 1:
2192 2194 msg = _(b'rejecting multiple heads on branch "%s"')
2193 2195 msg %= name
2194 2196 hint = _(b'%d heads: %s')
2195 2197 hint %= (len(heads), nodesummaries(repo, heads))
2196 2198 raise error.Abort(msg, hint=hint)
2197 2199
2198 2200
2199 2201 def wrapconvertsink(sink):
2200 2202 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2201 2203 before it is used, whether or not the convert extension was formally loaded.
2202 2204 """
2203 2205 return sink
2204 2206
2205 2207
2206 2208 def unhidehashlikerevs(repo, specs, hiddentype):
2207 2209 """parse the user specs and unhide changesets whose hash or revision number
2208 2210 is passed.
2209 2211
2210 2212 hiddentype can be: 1) 'warn': warn while unhiding changesets
2211 2213 2) 'nowarn': don't warn while unhiding changesets
2212 2214
2213 2215 returns a repo object with the required changesets unhidden
2214 2216 """
2215 2217 if not specs:
2216 2218 return repo
2217 2219
2218 2220 if not repo.filtername or not repo.ui.configbool(
2219 2221 b'experimental', b'directaccess'
2220 2222 ):
2221 2223 return repo
2222 2224
2223 2225 if repo.filtername not in (b'visible', b'visible-hidden'):
2224 2226 return repo
2225 2227
2226 2228 symbols = set()
2227 2229 for spec in specs:
2228 2230 try:
2229 2231 tree = revsetlang.parse(spec)
2230 2232 except error.ParseError: # will be reported by scmutil.revrange()
2231 2233 continue
2232 2234
2233 2235 symbols.update(revsetlang.gethashlikesymbols(tree))
2234 2236
2235 2237 if not symbols:
2236 2238 return repo
2237 2239
2238 2240 revs = _getrevsfromsymbols(repo, symbols)
2239 2241
2240 2242 if not revs:
2241 2243 return repo
2242 2244
2243 2245 if hiddentype == b'warn':
2244 2246 unfi = repo.unfiltered()
2245 2247 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2246 2248 repo.ui.warn(
2247 2249 _(
2248 2250 b"warning: accessing hidden changesets for write "
2249 2251 b"operation: %s\n"
2250 2252 )
2251 2253 % revstr
2252 2254 )
2253 2255
2254 2256 # we have to use new filtername to separate branch/tags cache until we can
2255 2257 # disbale these cache when revisions are dynamically pinned.
2256 2258 return repo.filtered(b'visible-hidden', revs)
2257 2259
2258 2260
2259 2261 def _getrevsfromsymbols(repo, symbols):
2260 2262 """parse the list of symbols and returns a set of revision numbers of hidden
2261 2263 changesets present in symbols"""
2262 2264 revs = set()
2263 2265 unfi = repo.unfiltered()
2264 2266 unficl = unfi.changelog
2265 2267 cl = repo.changelog
2266 2268 tiprev = len(unficl)
2267 2269 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2268 2270 for s in symbols:
2269 2271 try:
2270 2272 n = int(s)
2271 2273 if n <= tiprev:
2272 2274 if not allowrevnums:
2273 2275 continue
2274 2276 else:
2275 2277 if n not in cl:
2276 2278 revs.add(n)
2277 2279 continue
2278 2280 except ValueError:
2279 2281 pass
2280 2282
2281 2283 try:
2282 2284 s = resolvehexnodeidprefix(unfi, s)
2283 2285 except (error.LookupError, error.WdirUnsupported):
2284 2286 s = None
2285 2287
2286 2288 if s is not None:
2287 2289 rev = unficl.rev(s)
2288 2290 if rev not in cl:
2289 2291 revs.add(rev)
2290 2292
2291 2293 return revs
2292 2294
2293 2295
2294 2296 def bookmarkrevs(repo, mark):
2295 2297 """Select revisions reachable by a given bookmark
2296 2298
2297 2299 If the bookmarked revision isn't a head, an empty set will be returned.
2298 2300 """
2299 2301 return repo.revs(format_bookmark_revspec(mark))
2300 2302
2301 2303
2302 2304 def format_bookmark_revspec(mark):
2303 2305 """Build a revset expression to select revisions reachable by a given
2304 2306 bookmark"""
2305 2307 mark = b'literal:' + mark
2306 2308 return revsetlang.formatspec(
2307 2309 b"ancestors(bookmark(%s)) - "
2308 2310 b"ancestors(head() and not bookmark(%s)) - "
2309 2311 b"ancestors(bookmark() and not bookmark(%s))",
2310 2312 mark,
2311 2313 mark,
2312 2314 mark,
2313 2315 )
@@ -1,537 +1,533 b''
1 1 #require repofncache
2 2
3 3 An extension which will set fncache chunksize to 1 byte to make sure that logic
4 4 does not break
5 5
6 6 $ cat > chunksize.py <<EOF
7 7 > from mercurial import store
8 8 > store.fncache_chunksize = 1
9 9 > EOF
10 10
11 11 $ cat >> $HGRCPATH <<EOF
12 12 > [extensions]
13 13 > chunksize = $TESTTMP/chunksize.py
14 14 > EOF
15 15
16 16 Init repo1:
17 17
18 18 $ hg init repo1
19 19 $ cd repo1
20 20 $ echo "some text" > a
21 21 $ hg add
22 22 adding a
23 23 $ hg ci -m first
24 24 $ cat .hg/store/fncache | sort
25 25 data/a.i
26 26
27 27 Testing a.i/b:
28 28
29 29 $ mkdir a.i
30 30 $ echo "some other text" > a.i/b
31 31 $ hg add
32 32 adding a.i/b
33 33 $ hg ci -m second
34 34 $ cat .hg/store/fncache | sort
35 35 data/a.i
36 36 data/a.i.hg/b.i
37 37
38 38 Testing a.i.hg/c:
39 39
40 40 $ mkdir a.i.hg
41 41 $ echo "yet another text" > a.i.hg/c
42 42 $ hg add
43 43 adding a.i.hg/c
44 44 $ hg ci -m third
45 45 $ cat .hg/store/fncache | sort
46 46 data/a.i
47 47 data/a.i.hg.hg/c.i
48 48 data/a.i.hg/b.i
49 49
50 50 Testing verify:
51 51
52 52 $ hg verify -q
53 53
54 54 $ rm .hg/store/fncache
55 55
56 56 $ hg verify
57 57 checking changesets
58 58 checking manifests
59 59 crosschecking files in changesets and manifests
60 60 checking files
61 61 warning: revlog 'data/a.i' not in fncache!
62 62 warning: revlog 'data/a.i.hg/c.i' not in fncache!
63 63 warning: revlog 'data/a.i/b.i' not in fncache!
64 64 checking dirstate
65 65 checked 3 changesets with 3 changes to 3 files
66 66 3 warnings encountered!
67 67 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
68 68
69 69 Follow the hint to make sure it works
70 70
71 71 $ hg debugrebuildfncache
72 72 adding data/a.i
73 73 adding data/a.i.hg/c.i
74 74 adding data/a.i/b.i
75 75 3 items added, 0 removed from fncache
76 76
77 77 $ hg verify -q
78 78
79 79 $ cd ..
80 80
81 81 Non store repo:
82 82
83 83 $ hg --config format.usestore=False init foo
84 84 $ cd foo
85 85 $ mkdir tst.d
86 86 $ echo foo > tst.d/foo
87 87 $ hg ci -Amfoo
88 88 adding tst.d/foo
89 89 $ find .hg | sort
90 90 .hg
91 91 .hg/00changelog.i
92 92 .hg/00manifest.i
93 93 .hg/cache
94 94 .hg/cache/branch2-served
95 95 .hg/cache/rbc-names-v1
96 96 .hg/cache/rbc-revs-v1
97 97 .hg/data
98 98 .hg/data/tst.d.hg
99 99 .hg/data/tst.d.hg/foo.i
100 100 .hg/dirstate
101 101 .hg/fsmonitor.state (fsmonitor !)
102 102 .hg/last-message.txt
103 103 .hg/phaseroots
104 104 .hg/requires
105 105 .hg/undo
106 .hg/undo.backup.dirstate
107 106 .hg/undo.backupfiles
108 107 .hg/undo.bookmarks
109 108 .hg/undo.branch
110 109 .hg/undo.desc
111 .hg/undo.dirstate
112 110 .hg/undo.phaseroots
113 111 .hg/wcache
114 112 .hg/wcache/checkisexec (execbit !)
115 113 .hg/wcache/checklink (symlink !)
116 114 .hg/wcache/checklink-target (symlink !)
117 115 .hg/wcache/manifestfulltextcache (reporevlogstore !)
118 116 $ cd ..
119 117
120 118 Non fncache repo:
121 119
122 120 $ hg --config format.usefncache=False init bar
123 121 $ cd bar
124 122 $ mkdir tst.d
125 123 $ echo foo > tst.d/Foo
126 124 $ hg ci -Amfoo
127 125 adding tst.d/Foo
128 126 $ find .hg | sort
129 127 .hg
130 128 .hg/00changelog.i
131 129 .hg/cache
132 130 .hg/cache/branch2-served
133 131 .hg/cache/rbc-names-v1
134 132 .hg/cache/rbc-revs-v1
135 133 .hg/dirstate
136 134 .hg/fsmonitor.state (fsmonitor !)
137 135 .hg/last-message.txt
138 136 .hg/requires
139 137 .hg/store
140 138 .hg/store/00changelog.i
141 139 .hg/store/00manifest.i
142 140 .hg/store/data
143 141 .hg/store/data/tst.d.hg
144 142 .hg/store/data/tst.d.hg/_foo.i
145 143 .hg/store/phaseroots
146 144 .hg/store/requires
147 145 .hg/store/undo
148 146 .hg/store/undo.backupfiles
149 147 .hg/store/undo.phaseroots
150 .hg/undo.backup.dirstate
151 148 .hg/undo.bookmarks
152 149 .hg/undo.branch
153 150 .hg/undo.desc
154 .hg/undo.dirstate
155 151 .hg/wcache
156 152 .hg/wcache/checkisexec (execbit !)
157 153 .hg/wcache/checklink (symlink !)
158 154 .hg/wcache/checklink-target (symlink !)
159 155 .hg/wcache/manifestfulltextcache (reporevlogstore !)
160 156 $ cd ..
161 157
162 158 Encoding of reserved / long paths in the store
163 159
164 160 $ hg init r2
165 161 $ cd r2
166 162 $ cat <<EOF > .hg/hgrc
167 163 > [ui]
168 164 > portablefilenames = ignore
169 165 > EOF
170 166
171 167 $ hg import -q --bypass - <<EOF
172 168 > # HG changeset patch
173 169 > # User test
174 170 > # Date 0 0
175 171 > # Node ID 1c7a2f7cb77be1a0def34e4c7cabc562ad98fbd7
176 172 > # Parent 0000000000000000000000000000000000000000
177 173 > 1
178 174 >
179 175 > diff --git a/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
180 176 > new file mode 100644
181 177 > --- /dev/null
182 178 > +++ b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
183 179 > @@ -0,0 +1,1 @@
184 180 > +foo
185 181 > diff --git a/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
186 182 > new file mode 100644
187 183 > --- /dev/null
188 184 > +++ b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
189 185 > @@ -0,0 +1,1 @@
190 186 > +foo
191 187 > diff --git a/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
192 188 > new file mode 100644
193 189 > --- /dev/null
194 190 > +++ b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
195 191 > @@ -0,0 +1,1 @@
196 192 > +foo
197 193 > diff --git a/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
198 194 > new file mode 100644
199 195 > --- /dev/null
200 196 > +++ b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
201 197 > @@ -0,0 +1,1 @@
202 198 > +foo
203 199 > diff --git a/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
204 200 > new file mode 100644
205 201 > --- /dev/null
206 202 > +++ b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
207 203 > @@ -0,0 +1,1 @@
208 204 > +foo
209 205 > EOF
210 206
211 207 $ find .hg/store -name *.i | sort
212 208 .hg/store/00changelog.i
213 209 .hg/store/00manifest.i
214 210 .hg/store/data/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i
215 211 .hg/store/dh/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxx168e07b38e65eff86ab579afaaa8e30bfbe0f35f.i
216 212 .hg/store/dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i
217 213 .hg/store/dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i
218 214 .hg/store/dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilename0d8e1f4187c650e2f1fdca9fd90f786bc0976b6b.i
219 215
220 216 $ cd ..
221 217
222 218 Aborting lock does not prevent fncache writes
223 219
224 220 $ cat > exceptionext.py <<EOF
225 221 > import os
226 222 > from mercurial import commands, error, extensions
227 223 >
228 224 > def lockexception(orig, vfs, lockname, wait, releasefn, *args, **kwargs):
229 225 > def releasewrap():
230 226 > l.held = False # ensure __del__ is a noop
231 227 > raise error.Abort(b"forced lock failure")
232 228 > l = orig(vfs, lockname, wait, releasewrap, *args, **kwargs)
233 229 > return l
234 230 >
235 231 > def reposetup(ui, repo):
236 232 > extensions.wrapfunction(repo, '_lock', lockexception)
237 233 >
238 234 > cmdtable = {}
239 235 >
240 236 > # wrap "commit" command to prevent wlock from being '__del__()'-ed
241 237 > # at the end of dispatching (for intentional "forced lcok failure")
242 238 > def commitwrap(orig, ui, repo, *pats, **opts):
243 239 > repo = repo.unfiltered() # to use replaced repo._lock certainly
244 240 > wlock = repo.wlock()
245 241 > try:
246 242 > return orig(ui, repo, *pats, **opts)
247 243 > finally:
248 244 > # multiple 'relase()' is needed for complete releasing wlock,
249 245 > # because "forced" abort at last releasing store lock
250 246 > # prevents wlock from being released at same 'lockmod.release()'
251 247 > for i in range(wlock.held):
252 248 > wlock.release()
253 249 >
254 250 > def extsetup(ui):
255 251 > extensions.wrapcommand(commands.table, b"commit", commitwrap)
256 252 > EOF
257 253 $ extpath=`pwd`/exceptionext.py
258 254 $ hg init fncachetxn
259 255 $ cd fncachetxn
260 256 $ printf "[extensions]\nexceptionext=$extpath\n" >> .hg/hgrc
261 257 $ touch y
262 258 $ hg ci -qAm y
263 259 abort: forced lock failure
264 260 [255]
265 261 $ cat .hg/store/fncache
266 262 data/y.i
267 263
268 264 Aborting transaction prevents fncache change
269 265
270 266 $ cat > ../exceptionext.py <<EOF
271 267 > import os
272 268 > from mercurial import commands, error, extensions, localrepo
273 269 >
274 270 > def wrapper(orig, self, *args, **kwargs):
275 271 > tr = orig(self, *args, **kwargs)
276 272 > def fail(tr):
277 273 > raise error.Abort(b"forced transaction failure")
278 274 > # zzz prefix to ensure it sorted after store.write
279 275 > tr.addfinalize(b'zzz-forcefails', fail)
280 276 > return tr
281 277 >
282 278 > def uisetup(ui):
283 279 > extensions.wrapfunction(
284 280 > localrepo.localrepository, b'transaction', wrapper)
285 281 >
286 282 > cmdtable = {}
287 283 >
288 284 > EOF
289 285
290 286 Clean cached version
291 287 $ rm -f "${extpath}c"
292 288 $ rm -Rf "`dirname $extpath`/__pycache__"
293 289
294 290 $ touch z
295 291 $ hg ci -qAm z
296 292 transaction abort!
297 293 rollback completed
298 294 abort: forced transaction failure
299 295 [255]
300 296 $ cat .hg/store/fncache
301 297 data/y.i
302 298
303 299 Aborted transactions can be recovered later
304 300
305 301 $ cat > ../exceptionext.py <<EOF
306 302 > import os
307 303 > import signal
308 304 > from mercurial import (
309 305 > commands,
310 306 > error,
311 307 > extensions,
312 308 > localrepo,
313 309 > transaction,
314 310 > )
315 311 >
316 312 > def trwrapper(orig, self, *args, **kwargs):
317 313 > tr = orig(self, *args, **kwargs)
318 314 > def fail(tr):
319 315 > os.kill(os.getpid(), signal.SIGKILL)
320 316 > # zzz prefix to ensure it sorted after store.write
321 317 > tr.addfinalize(b'zzz-forcefails', fail)
322 318 > return tr
323 319 >
324 320 > def uisetup(ui):
325 321 > extensions.wrapfunction(localrepo.localrepository, 'transaction',
326 322 > trwrapper)
327 323 >
328 324 > cmdtable = {}
329 325 >
330 326 > EOF
331 327
332 328 Clean cached versions
333 329 $ rm -f "${extpath}c"
334 330 $ rm -Rf "`dirname $extpath`/__pycache__"
335 331
336 332 $ hg up -q 1
337 333 $ touch z
338 334 # Cannot rely on the return code value as chg use a different one.
339 335 # So we use a `|| echo` trick
340 336 # XXX-CHG fixing chg behavior would be nice here.
341 337 $ hg ci -qAm z || echo "He's Dead, Jim." 2>/dev/null
342 338 Killed (?)
343 339 He's Dead, Jim.
344 340 $ cat .hg/store/fncache | sort
345 341 data/y.i
346 342 data/z.i
347 343 $ hg recover --verify
348 344 rolling back interrupted transaction
349 345 checking changesets
350 346 checking manifests
351 347 crosschecking files in changesets and manifests
352 348 checking files
353 349 checking dirstate
354 350 checked 1 changesets with 1 changes to 1 files
355 351 $ cat .hg/store/fncache
356 352 data/y.i
357 353
358 354 $ cd ..
359 355
360 356 debugrebuildfncache does nothing unless repo has fncache requirement
361 357
362 358 $ hg --config format.usefncache=false init nofncache
363 359 $ cd nofncache
364 360 $ hg debugrebuildfncache
365 361 (not rebuilding fncache because repository does not support fncache)
366 362
367 363 $ cd ..
368 364
369 365 debugrebuildfncache works on empty repository
370 366
371 367 $ hg init empty
372 368 $ cd empty
373 369 $ hg debugrebuildfncache
374 370 fncache already up to date
375 371 $ cd ..
376 372
377 373 debugrebuildfncache on an up to date repository no-ops
378 374
379 375 $ hg init repo
380 376 $ cd repo
381 377 $ echo initial > foo
382 378 $ echo initial > .bar
383 379 $ hg commit -A -m initial
384 380 adding .bar
385 381 adding foo
386 382
387 383 $ cat .hg/store/fncache | sort
388 384 data/.bar.i
389 385 data/foo.i
390 386
391 387 $ hg debugrebuildfncache
392 388 fncache already up to date
393 389
394 390 debugrebuildfncache restores deleted fncache file
395 391
396 392 $ rm -f .hg/store/fncache
397 393 $ hg debugrebuildfncache
398 394 adding data/.bar.i
399 395 adding data/foo.i
400 396 2 items added, 0 removed from fncache
401 397
402 398 $ cat .hg/store/fncache | sort
403 399 data/.bar.i
404 400 data/foo.i
405 401
406 402 Rebuild after rebuild should no-op
407 403
408 404 $ hg debugrebuildfncache
409 405 fncache already up to date
410 406
411 407 A single missing file should get restored, an extra file should be removed
412 408
413 409 $ cat > .hg/store/fncache << EOF
414 410 > data/foo.i
415 411 > data/bad-entry.i
416 412 > EOF
417 413
418 414 $ hg debugrebuildfncache
419 415 removing data/bad-entry.i
420 416 adding data/.bar.i
421 417 1 items added, 1 removed from fncache
422 418
423 419 $ cat .hg/store/fncache | sort
424 420 data/.bar.i
425 421 data/foo.i
426 422
427 423 debugrebuildfncache recovers from truncated line in fncache
428 424
429 425 $ printf a > .hg/store/fncache
430 426 $ hg debugrebuildfncache
431 427 fncache does not ends with a newline
432 428 adding data/.bar.i
433 429 adding data/foo.i
434 430 2 items added, 0 removed from fncache
435 431
436 432 $ cat .hg/store/fncache | sort
437 433 data/.bar.i
438 434 data/foo.i
439 435
440 436 $ cd ..
441 437
442 438 Try a simple variation without dotencode to ensure fncache is ignorant of encoding
443 439
444 440 $ hg --config format.dotencode=false init nodotencode
445 441 $ cd nodotencode
446 442 $ echo initial > foo
447 443 $ echo initial > .bar
448 444 $ hg commit -A -m initial
449 445 adding .bar
450 446 adding foo
451 447
452 448 $ cat .hg/store/fncache | sort
453 449 data/.bar.i
454 450 data/foo.i
455 451
456 452 $ rm .hg/store/fncache
457 453 $ hg debugrebuildfncache
458 454 adding data/.bar.i
459 455 adding data/foo.i
460 456 2 items added, 0 removed from fncache
461 457
462 458 $ cat .hg/store/fncache | sort
463 459 data/.bar.i
464 460 data/foo.i
465 461
466 462 $ cd ..
467 463
468 464 In repositories that have accumulated a large number of files over time, the
469 465 fncache file is going to be large. If we possibly can avoid loading it, so much the better.
470 466 The cache should not loaded when committing changes to existing files, or when unbundling
471 467 changesets that only contain changes to existing files:
472 468
473 469 $ cat > fncacheloadwarn.py << EOF
474 470 > from mercurial import extensions, localrepo
475 471 >
476 472 > def extsetup(ui):
477 473 > def wrapstore(orig, requirements, *args):
478 474 > store = orig(requirements, *args)
479 475 > if b'store' in requirements and b'fncache' in requirements:
480 476 > instrumentfncachestore(store, ui)
481 477 > return store
482 478 > extensions.wrapfunction(localrepo, 'makestore', wrapstore)
483 479 >
484 480 > def instrumentfncachestore(fncachestore, ui):
485 481 > class instrumentedfncache(type(fncachestore.fncache)):
486 482 > def _load(self):
487 483 > ui.warn(b'fncache load triggered!\n')
488 484 > super(instrumentedfncache, self)._load()
489 485 > fncachestore.fncache.__class__ = instrumentedfncache
490 486 > EOF
491 487
492 488 $ fncachextpath=`pwd`/fncacheloadwarn.py
493 489 $ hg init nofncacheload
494 490 $ cd nofncacheload
495 491 $ printf "[extensions]\nfncacheloadwarn=$fncachextpath\n" >> .hg/hgrc
496 492
497 493 A new file should trigger a load, as we'd want to update the fncache set in that case:
498 494
499 495 $ touch foo
500 496 $ hg ci -qAm foo
501 497 fncache load triggered!
502 498
503 499 But modifying that file should not:
504 500
505 501 $ echo bar >> foo
506 502 $ hg ci -qm foo
507 503
508 504 If a transaction has been aborted, the zero-size truncated index file will
509 505 not prevent the fncache from being loaded; rather than actually abort
510 506 a transaction, we simulate the situation by creating a zero-size index file:
511 507
512 508 $ touch .hg/store/data/bar.i
513 509 $ touch bar
514 510 $ hg ci -qAm bar
515 511 fncache load triggered!
516 512
517 513 Unbundling should follow the same rules; existing files should not cause a load:
518 514
519 515 (loading during the clone is expected)
520 516 $ hg clone -q . tobundle
521 517 fncache load triggered!
522 518 fncache load triggered!
523 519
524 520 $ echo 'new line' > tobundle/bar
525 521 $ hg -R tobundle ci -qm bar
526 522 $ hg -R tobundle bundle -q barupdated.hg
527 523 $ hg unbundle -q barupdated.hg
528 524
529 525 but adding new files should:
530 526
531 527 $ touch tobundle/newfile
532 528 $ hg -R tobundle ci -qAm newfile
533 529 $ hg -R tobundle bundle -q newfile.hg
534 530 $ hg unbundle -q newfile.hg
535 531 fncache load triggered!
536 532
537 533 $ cd ..
@@ -1,184 +1,182 b''
1 1 #require unix-permissions
2 2
3 3 test that new files created in .hg inherit the permissions from .hg/store
4 4
5 5 $ mkdir dir
6 6
7 7 just in case somebody has a strange $TMPDIR
8 8
9 9 $ chmod g-s dir
10 10 $ cd dir
11 11
12 12 $ cat >printmodes.py <<EOF
13 13 > import os
14 14 > import sys
15 15 >
16 16 > allnames = []
17 17 > isdir = {}
18 18 > for root, dirs, files in os.walk(sys.argv[1]):
19 19 > for d in dirs:
20 20 > name = os.path.join(root, d)
21 21 > isdir[name] = 1
22 22 > allnames.append(name)
23 23 > for f in files:
24 24 > name = os.path.join(root, f)
25 25 > allnames.append(name)
26 26 > allnames.sort()
27 27 > for name in allnames:
28 28 > suffix = name in isdir and '/' or ''
29 29 > print('%05o %s%s' % (os.lstat(name).st_mode & 0o7777, name, suffix))
30 30 > EOF
31 31
32 32 $ cat >mode.py <<EOF
33 33 > import os
34 34 > import sys
35 35 > print('%05o' % os.lstat(sys.argv[1]).st_mode)
36 36 > EOF
37 37
38 38 $ umask 077
39 39
40 40 $ hg init repo
41 41 $ cd repo
42 42
43 43 $ chmod 0770 .hg/store .hg/cache .hg/wcache
44 44
45 45 before commit
46 46 store can be written by the group, other files cannot
47 47 store is setgid
48 48
49 49 $ "$PYTHON" ../printmodes.py .
50 50 00700 ./.hg/
51 51 00600 ./.hg/00changelog.i
52 52 00770 ./.hg/cache/
53 53 00600 ./.hg/requires
54 54 00770 ./.hg/store/
55 55 00600 ./.hg/store/requires
56 56 00770 ./.hg/wcache/
57 57
58 58 $ mkdir dir
59 59 $ touch foo dir/bar
60 60 $ hg ci -qAm 'add files'
61 61
62 62 after commit
63 63 working dir files can only be written by the owner
64 64 files created in .hg can be written by the group
65 65 (in particular, store/**, dirstate, branch cache file, undo files)
66 66 new directories are setgid
67 67
68 68 $ "$PYTHON" ../printmodes.py .
69 69 00700 ./.hg/
70 70 00600 ./.hg/00changelog.i
71 71 00770 ./.hg/cache/
72 72 00660 ./.hg/cache/branch2-served
73 73 00660 ./.hg/cache/rbc-names-v1
74 74 00660 ./.hg/cache/rbc-revs-v1
75 75 00660 ./.hg/dirstate
76 76 00660 ./.hg/fsmonitor.state (fsmonitor !)
77 77 00660 ./.hg/last-message.txt
78 78 00600 ./.hg/requires
79 79 00770 ./.hg/store/
80 80 00660 ./.hg/store/00changelog.i
81 81 00660 ./.hg/store/00manifest.i
82 82 00770 ./.hg/store/data/
83 83 00770 ./.hg/store/data/dir/
84 84 00660 ./.hg/store/data/dir/bar.i (reporevlogstore !)
85 85 00660 ./.hg/store/data/foo.i (reporevlogstore !)
86 86 00770 ./.hg/store/data/dir/bar/ (reposimplestore !)
87 87 00660 ./.hg/store/data/dir/bar/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
88 88 00660 ./.hg/store/data/dir/bar/index (reposimplestore !)
89 89 00770 ./.hg/store/data/foo/ (reposimplestore !)
90 90 00660 ./.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
91 91 00660 ./.hg/store/data/foo/index (reposimplestore !)
92 92 00660 ./.hg/store/fncache (repofncache !)
93 93 00660 ./.hg/store/phaseroots
94 94 00600 ./.hg/store/requires
95 95 00660 ./.hg/store/undo
96 96 00660 ./.hg/store/undo.backupfiles
97 97 00660 ./.hg/store/undo.phaseroots
98 00660 ./.hg/undo.backup.dirstate
99 98 00660 ./.hg/undo.bookmarks
100 99 00660 ./.hg/undo.branch
101 100 00660 ./.hg/undo.desc
102 00660 ./.hg/undo.dirstate
103 101 00770 ./.hg/wcache/
104 102 00711 ./.hg/wcache/checkisexec
105 103 007.. ./.hg/wcache/checklink (re)
106 104 00600 ./.hg/wcache/checklink-target
107 105 00660 ./.hg/wcache/manifestfulltextcache (reporevlogstore !)
108 106 00700 ./dir/
109 107 00600 ./dir/bar
110 108 00600 ./foo
111 109
112 110 $ umask 007
113 111 $ hg init ../push
114 112
115 113 before push
116 114 group can write everything
117 115
118 116 $ "$PYTHON" ../printmodes.py ../push
119 117 00770 ../push/.hg/
120 118 00660 ../push/.hg/00changelog.i
121 119 00770 ../push/.hg/cache/
122 120 00660 ../push/.hg/requires
123 121 00770 ../push/.hg/store/
124 122 00660 ../push/.hg/store/requires
125 123 00770 ../push/.hg/wcache/
126 124
127 125 $ umask 077
128 126 $ hg -q push ../push
129 127
130 128 after push
131 129 group can still write everything
132 130
133 131 $ "$PYTHON" ../printmodes.py ../push
134 132 00770 ../push/.hg/
135 133 00660 ../push/.hg/00changelog.i
136 134 00770 ../push/.hg/cache/
137 135 00660 ../push/.hg/cache/branch2-base
138 136 00660 ../push/.hg/cache/rbc-names-v1
139 137 00660 ../push/.hg/cache/rbc-revs-v1
140 138 00660 ../push/.hg/requires
141 139 00770 ../push/.hg/store/
142 140 00660 ../push/.hg/store/00changelog.i
143 141 00660 ../push/.hg/store/00manifest.i
144 142 00770 ../push/.hg/store/data/
145 143 00770 ../push/.hg/store/data/dir/
146 144 00660 ../push/.hg/store/data/dir/bar.i (reporevlogstore !)
147 145 00660 ../push/.hg/store/data/foo.i (reporevlogstore !)
148 146 00770 ../push/.hg/store/data/dir/bar/ (reposimplestore !)
149 147 00660 ../push/.hg/store/data/dir/bar/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
150 148 00660 ../push/.hg/store/data/dir/bar/index (reposimplestore !)
151 149 00770 ../push/.hg/store/data/foo/ (reposimplestore !)
152 150 00660 ../push/.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
153 151 00660 ../push/.hg/store/data/foo/index (reposimplestore !)
154 152 00660 ../push/.hg/store/fncache (repofncache !)
155 153 00660 ../push/.hg/store/requires
156 154 00660 ../push/.hg/store/undo
157 155 00660 ../push/.hg/store/undo.backupfiles
158 156 00660 ../push/.hg/store/undo.phaseroots
159 157 00660 ../push/.hg/undo.bookmarks
160 158 00660 ../push/.hg/undo.branch
161 159 00660 ../push/.hg/undo.desc
162 160 00770 ../push/.hg/wcache/
163 161
164 162
165 163 Test that we don't lose the setgid bit when we call chmod.
166 164 Not all systems support setgid directories (e.g. HFS+), so
167 165 just check that directories have the same mode.
168 166
169 167 $ cd ..
170 168 $ hg init setgid
171 169 $ cd setgid
172 170 $ chmod g+rwx .hg/store
173 171 $ chmod g+s .hg/store 2> /dev/null || true
174 172 $ mkdir dir
175 173 $ touch dir/file
176 174 $ hg ci -qAm 'add dir/file'
177 175 $ storemode=`"$PYTHON" ../mode.py .hg/store`
178 176 $ dirmode=`"$PYTHON" ../mode.py .hg/store/data/dir`
179 177 $ if [ "$storemode" != "$dirmode" ]; then
180 178 > echo "$storemode != $dirmode"
181 179 > fi
182 180 $ cd ..
183 181
184 182 $ cd .. # g-s dir
@@ -1,125 +1,125 b''
1 1 #testcases skip-detection fail-if-detected
2 2
3 3 Test situations that "should" only be reproducible:
4 4 - on networked filesystems, or
5 5 - user using `hg debuglocks` to eliminate the lock file, or
6 6 - something (that doesn't respect the lock file) writing to the .hg directory
7 7 while we're running
8 8
9 9
10 10 Initial setup
11 11 -------------
12 12
13 13 $ hg init base-repo
14 14 $ cd base-repo
15 15
16 16 $ cat > "$TESTTMP_FORWARD_SLASH/waitlock_editor.sh" <<EOF
17 17 > [ -n "\${WAITLOCK_ANNOUNCE:-}" ] && touch "\${WAITLOCK_ANNOUNCE}"
18 18 > f="\${WAITLOCK_FILE}"
19 19 > start=\`date +%s\`
20 20 > timeout=5
21 21 > "$RUNTESTDIR_FORWARD_SLASH/testlib/wait-on-file" "\$timeout" "\$f"
22 22 > if [ \$# -gt 1 ]; then
23 23 > cat "\$@"
24 24 > fi
25 25 > EOF
26 26
27 27 Things behave differently if we don't already have a 00changelog.i file when
28 28 this all starts, so let's make one.
29 29
30 30 $ echo r0 > r0
31 31 $ hg commit -qAm 'r0'
32 32
33 33 $ cd ..
34 34 $ cp -R base-repo main-client
35 35 $ cp -R base-repo racing-client
36 36
37 37 $ mkdir sync
38 38 $ EDITOR_STARTED="$TESTTMP_FORWARD_SLASH/sync/.editor_started"
39 39 $ MISCHIEF_MANAGED="$TESTTMP_FORWARD_SLASH/sync/.mischief_managed"
40 40 $ JOBS_FINISHED="$TESTTMP_FORWARD_SLASH/sync/.jobs_finished"
41 41
42 42 Actual test
43 43 -----------
44 44
45 45 Start an hg commit that will take a while
46 46
47 47 $ cd main-client
48 48
49 49 #if fail-if-detected
50 50 $ cat >> $HGRCPATH << EOF
51 51 > [debug]
52 52 > revlog.verifyposition.changelog = fail
53 53 > EOF
54 54 #endif
55 55
56 56 $ echo foo > foo
57 57 $ (
58 58 > unset HGEDITOR;
59 59 > WAITLOCK_ANNOUNCE="${EDITOR_STARTED}" \
60 60 > WAITLOCK_FILE="${MISCHIEF_MANAGED}" \
61 61 > hg commit -qAm 'r1 (foo)' --edit foo \
62 62 > --config ui.editor="sh $TESTTMP_FORWARD_SLASH/waitlock_editor.sh" \
63 63 > > .foo_commit_out 2>&1 ;\
64 64 > touch "${JOBS_FINISHED}"
65 65 > ) &
66 66
67 67 Wait for the "editor" to actually start
68 68 $ sh "$RUNTESTDIR_FORWARD_SLASH/testlib/wait-on-file" 5 "${EDITOR_STARTED}"
69 69
70 70
71 71 Do a concurrent edition
72 72 $ cd ../racing-client
73 73 $ touch ../pre-race
74 74 $ sleep 1
75 75 $ echo bar > bar
76 76 $ hg --repository ../racing-client commit -qAm 'r2 (bar)' bar
77 77 $ hg --repository ../racing-client debugrevlogindex -c
78 78 rev linkrev nodeid p1 p2
79 79 0 0 222799e2f90b 000000000000 000000000000
80 80 1 1 6f124f6007a0 222799e2f90b 000000000000
81 81
82 82 We simulate an network FS race by overwriting raced repo content with the new
83 83 content of the files changed in the racing repository
84 84
85 85 $ for x in `find . -type f -newer ../pre-race`; do
86 86 > cp $x ../main-client/$x
87 87 > done
88 88 $ cd ../main-client
89 89
90 90 Awaken the editor from that first commit
91 91 $ touch "${MISCHIEF_MANAGED}"
92 92 And wait for it to finish
93 93 $ WAITLOCK_FILE="${JOBS_FINISHED}" sh "$TESTTMP_FORWARD_SLASH/waitlock_editor.sh"
94 94
95 95 #if skip-detection
96 96 (Ensure there was no output)
97 97 $ cat .foo_commit_out
98 98 And observe a corrupted repository -- rev 2's linkrev is 1, which should never
99 99 happen for the changelog (the linkrev should always refer to itself).
100 100 $ hg debugrevlogindex -c
101 101 rev linkrev nodeid p1 p2
102 102 0 0 222799e2f90b 000000000000 000000000000
103 103 1 1 6f124f6007a0 222799e2f90b 000000000000
104 104 2 1 ac80e6205bb2 222799e2f90b 000000000000
105 105 #endif
106 106
107 107 #if fail-if-detected
108 108 $ cat .foo_commit_out
109 note: commit message saved in .hg/last-message.txt
110 note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
109 111 transaction abort!
110 112 rollback completed
111 note: commit message saved in .hg/last-message.txt
112 note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
113 113 abort: 00changelog.i: file cursor at position 249, expected 121
114 114 And no corruption in the changelog.
115 115 $ hg debugrevlogindex -c
116 116 rev linkrev nodeid p1 p2
117 117 0 0 222799e2f90b 000000000000 000000000000
118 118 1 1 6f124f6007a0 222799e2f90b 000000000000 (missing-correct-output !)
119 119 And, because of transactions, there's none in the manifestlog either.
120 120 $ hg debugrevlogindex -m
121 121 rev linkrev nodeid p1 p2
122 122 0 0 7b7020262a56 000000000000 000000000000
123 123 1 1 ad3fe36d86d9 7b7020262a56 000000000000
124 124 #endif
125 125
General Comments 0
You need to be logged in to leave comments. Login now