##// END OF EJS Templates
cat: don't prefetch files unless the output requires it...
Matt Harbison -
r42678:561cd02c default
parent child Browse files
Show More
@@ -1,3398 +1,3408
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy as copymod
11 11 import errno
12 12 import os
13 13 import re
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 19 nullrev,
20 20 short,
21 21 )
22 22
23 23 from . import (
24 24 bookmarks,
25 25 changelog,
26 26 copies,
27 27 crecord as crecordmod,
28 28 dirstateguard,
29 29 encoding,
30 30 error,
31 31 formatter,
32 32 logcmdutil,
33 33 match as matchmod,
34 34 merge as mergemod,
35 35 mergeutil,
36 36 obsolete,
37 37 patch,
38 38 pathutil,
39 39 phases,
40 40 pycompat,
41 41 revlog,
42 42 rewriteutil,
43 43 scmutil,
44 44 smartset,
45 45 subrepoutil,
46 46 templatekw,
47 47 templater,
48 48 util,
49 49 vfs as vfsmod,
50 50 )
51 51
52 52 from .utils import (
53 53 dateutil,
54 54 stringutil,
55 55 )
56 56
57 57 stringio = util.stringio
58 58
59 59 # templates of common command options
60 60
61 61 dryrunopts = [
62 62 ('n', 'dry-run', None,
63 63 _('do not perform actions, just print output')),
64 64 ]
65 65
66 66 confirmopts = [
67 67 ('', 'confirm', None,
68 68 _('ask before applying actions')),
69 69 ]
70 70
71 71 remoteopts = [
72 72 ('e', 'ssh', '',
73 73 _('specify ssh command to use'), _('CMD')),
74 74 ('', 'remotecmd', '',
75 75 _('specify hg command to run on the remote side'), _('CMD')),
76 76 ('', 'insecure', None,
77 77 _('do not verify server certificate (ignoring web.cacerts config)')),
78 78 ]
79 79
80 80 walkopts = [
81 81 ('I', 'include', [],
82 82 _('include names matching the given patterns'), _('PATTERN')),
83 83 ('X', 'exclude', [],
84 84 _('exclude names matching the given patterns'), _('PATTERN')),
85 85 ]
86 86
87 87 commitopts = [
88 88 ('m', 'message', '',
89 89 _('use text as commit message'), _('TEXT')),
90 90 ('l', 'logfile', '',
91 91 _('read commit message from file'), _('FILE')),
92 92 ]
93 93
94 94 commitopts2 = [
95 95 ('d', 'date', '',
96 96 _('record the specified date as commit date'), _('DATE')),
97 97 ('u', 'user', '',
98 98 _('record the specified user as committer'), _('USER')),
99 99 ]
100 100
101 101 formatteropts = [
102 102 ('T', 'template', '',
103 103 _('display with template'), _('TEMPLATE')),
104 104 ]
105 105
106 106 templateopts = [
107 107 ('', 'style', '',
108 108 _('display using template map file (DEPRECATED)'), _('STYLE')),
109 109 ('T', 'template', '',
110 110 _('display with template'), _('TEMPLATE')),
111 111 ]
112 112
113 113 logopts = [
114 114 ('p', 'patch', None, _('show patch')),
115 115 ('g', 'git', None, _('use git extended diff format')),
116 116 ('l', 'limit', '',
117 117 _('limit number of changes displayed'), _('NUM')),
118 118 ('M', 'no-merges', None, _('do not show merges')),
119 119 ('', 'stat', None, _('output diffstat-style summary of changes')),
120 120 ('G', 'graph', None, _("show the revision DAG")),
121 121 ] + templateopts
122 122
123 123 diffopts = [
124 124 ('a', 'text', None, _('treat all files as text')),
125 125 ('g', 'git', None, _('use git extended diff format')),
126 126 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
127 127 ('', 'nodates', None, _('omit dates from diff headers'))
128 128 ]
129 129
130 130 diffwsopts = [
131 131 ('w', 'ignore-all-space', None,
132 132 _('ignore white space when comparing lines')),
133 133 ('b', 'ignore-space-change', None,
134 134 _('ignore changes in the amount of white space')),
135 135 ('B', 'ignore-blank-lines', None,
136 136 _('ignore changes whose lines are all blank')),
137 137 ('Z', 'ignore-space-at-eol', None,
138 138 _('ignore changes in whitespace at EOL')),
139 139 ]
140 140
141 141 diffopts2 = [
142 142 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
143 143 ('p', 'show-function', None, _('show which function each change is in')),
144 144 ('', 'reverse', None, _('produce a diff that undoes the changes')),
145 145 ] + diffwsopts + [
146 146 ('U', 'unified', '',
147 147 _('number of lines of context to show'), _('NUM')),
148 148 ('', 'stat', None, _('output diffstat-style summary of changes')),
149 149 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
150 150 ]
151 151
152 152 mergetoolopts = [
153 153 ('t', 'tool', '', _('specify merge tool'), _('TOOL')),
154 154 ]
155 155
156 156 similarityopts = [
157 157 ('s', 'similarity', '',
158 158 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
159 159 ]
160 160
161 161 subrepoopts = [
162 162 ('S', 'subrepos', None,
163 163 _('recurse into subrepositories'))
164 164 ]
165 165
166 166 debugrevlogopts = [
167 167 ('c', 'changelog', False, _('open changelog')),
168 168 ('m', 'manifest', False, _('open manifest')),
169 169 ('', 'dir', '', _('open directory manifest')),
170 170 ]
171 171
172 172 # special string such that everything below this line will be ingored in the
173 173 # editor text
174 174 _linebelow = "^HG: ------------------------ >8 ------------------------$"
175 175
176 176 def ishunk(x):
177 177 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
178 178 return isinstance(x, hunkclasses)
179 179
180 180 def newandmodified(chunks, originalchunks):
181 181 newlyaddedandmodifiedfiles = set()
182 182 for chunk in chunks:
183 183 if (ishunk(chunk) and chunk.header.isnewfile() and chunk not in
184 184 originalchunks):
185 185 newlyaddedandmodifiedfiles.add(chunk.header.filename())
186 186 return newlyaddedandmodifiedfiles
187 187
188 188 def parsealiases(cmd):
189 189 return cmd.split("|")
190 190
191 191 def setupwrapcolorwrite(ui):
192 192 # wrap ui.write so diff output can be labeled/colorized
193 193 def wrapwrite(orig, *args, **kw):
194 194 label = kw.pop(r'label', '')
195 195 for chunk, l in patch.difflabel(lambda: args):
196 196 orig(chunk, label=label + l)
197 197
198 198 oldwrite = ui.write
199 199 def wrap(*args, **kwargs):
200 200 return wrapwrite(oldwrite, *args, **kwargs)
201 201 setattr(ui, 'write', wrap)
202 202 return oldwrite
203 203
204 204 def filterchunks(ui, originalhunks, usecurses, testfile, match,
205 205 operation=None):
206 206 try:
207 207 if usecurses:
208 208 if testfile:
209 209 recordfn = crecordmod.testdecorator(
210 210 testfile, crecordmod.testchunkselector)
211 211 else:
212 212 recordfn = crecordmod.chunkselector
213 213
214 214 return crecordmod.filterpatch(ui, originalhunks, recordfn,
215 215 operation)
216 216 except crecordmod.fallbackerror as e:
217 217 ui.warn('%s\n' % e.message)
218 218 ui.warn(_('falling back to text mode\n'))
219 219
220 220 return patch.filterpatch(ui, originalhunks, match, operation)
221 221
222 222 def recordfilter(ui, originalhunks, match, operation=None):
223 223 """ Prompts the user to filter the originalhunks and return a list of
224 224 selected hunks.
225 225 *operation* is used for to build ui messages to indicate the user what
226 226 kind of filtering they are doing: reverting, committing, shelving, etc.
227 227 (see patch.filterpatch).
228 228 """
229 229 usecurses = crecordmod.checkcurses(ui)
230 230 testfile = ui.config('experimental', 'crecordtest')
231 231 oldwrite = setupwrapcolorwrite(ui)
232 232 try:
233 233 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
234 234 testfile, match, operation)
235 235 finally:
236 236 ui.write = oldwrite
237 237 return newchunks, newopts
238 238
239 239 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
240 240 filterfn, *pats, **opts):
241 241 opts = pycompat.byteskwargs(opts)
242 242 if not ui.interactive():
243 243 if cmdsuggest:
244 244 msg = _('running non-interactively, use %s instead') % cmdsuggest
245 245 else:
246 246 msg = _('running non-interactively')
247 247 raise error.Abort(msg)
248 248
249 249 # make sure username is set before going interactive
250 250 if not opts.get('user'):
251 251 ui.username() # raise exception, username not provided
252 252
253 253 def recordfunc(ui, repo, message, match, opts):
254 254 """This is generic record driver.
255 255
256 256 Its job is to interactively filter local changes, and
257 257 accordingly prepare working directory into a state in which the
258 258 job can be delegated to a non-interactive commit command such as
259 259 'commit' or 'qrefresh'.
260 260
261 261 After the actual job is done by non-interactive command, the
262 262 working directory is restored to its original state.
263 263
264 264 In the end we'll record interesting changes, and everything else
265 265 will be left in place, so the user can continue working.
266 266 """
267 267
268 268 checkunfinished(repo, commit=True)
269 269 wctx = repo[None]
270 270 merge = len(wctx.parents()) > 1
271 271 if merge:
272 272 raise error.Abort(_('cannot partially commit a merge '
273 273 '(use "hg commit" instead)'))
274 274
275 275 def fail(f, msg):
276 276 raise error.Abort('%s: %s' % (f, msg))
277 277
278 278 force = opts.get('force')
279 279 if not force:
280 280 vdirs = []
281 281 match = matchmod.badmatch(match, fail)
282 282 match.explicitdir = vdirs.append
283 283
284 284 status = repo.status(match=match)
285 285
286 286 overrides = {(b'ui', b'commitsubrepos'): True}
287 287
288 288 with repo.ui.configoverride(overrides, b'record'):
289 289 # subrepoutil.precommit() modifies the status
290 290 tmpstatus = scmutil.status(copymod.copy(status[0]),
291 291 copymod.copy(status[1]),
292 292 copymod.copy(status[2]),
293 293 copymod.copy(status[3]),
294 294 copymod.copy(status[4]),
295 295 copymod.copy(status[5]),
296 296 copymod.copy(status[6]))
297 297
298 298 # Force allows -X subrepo to skip the subrepo.
299 299 subs, commitsubs, newstate = subrepoutil.precommit(
300 300 repo.ui, wctx, tmpstatus, match, force=True)
301 301 for s in subs:
302 302 if s in commitsubs:
303 303 dirtyreason = wctx.sub(s).dirtyreason(True)
304 304 raise error.Abort(dirtyreason)
305 305
306 306 if not force:
307 307 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
308 308 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True,
309 309 section='commands',
310 310 configprefix='commit.interactive.')
311 311 diffopts.nodates = True
312 312 diffopts.git = True
313 313 diffopts.showfunc = True
314 314 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
315 315 originalchunks = patch.parsepatch(originaldiff)
316 316 match = scmutil.match(repo[None], pats)
317 317
318 318 # 1. filter patch, since we are intending to apply subset of it
319 319 try:
320 320 chunks, newopts = filterfn(ui, originalchunks, match)
321 321 except error.PatchError as err:
322 322 raise error.Abort(_('error parsing patch: %s') % err)
323 323 opts.update(newopts)
324 324
325 325 # We need to keep a backup of files that have been newly added and
326 326 # modified during the recording process because there is a previous
327 327 # version without the edit in the workdir
328 328 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
329 329 contenders = set()
330 330 for h in chunks:
331 331 try:
332 332 contenders.update(set(h.files()))
333 333 except AttributeError:
334 334 pass
335 335
336 336 changed = status.modified + status.added + status.removed
337 337 newfiles = [f for f in changed if f in contenders]
338 338 if not newfiles:
339 339 ui.status(_('no changes to record\n'))
340 340 return 0
341 341
342 342 modified = set(status.modified)
343 343
344 344 # 2. backup changed files, so we can restore them in the end
345 345
346 346 if backupall:
347 347 tobackup = changed
348 348 else:
349 349 tobackup = [f for f in newfiles if f in modified or f in
350 350 newlyaddedandmodifiedfiles]
351 351 backups = {}
352 352 if tobackup:
353 353 backupdir = repo.vfs.join('record-backups')
354 354 try:
355 355 os.mkdir(backupdir)
356 356 except OSError as err:
357 357 if err.errno != errno.EEXIST:
358 358 raise
359 359 try:
360 360 # backup continues
361 361 for f in tobackup:
362 362 fd, tmpname = pycompat.mkstemp(prefix=f.replace('/', '_') + '.',
363 363 dir=backupdir)
364 364 os.close(fd)
365 365 ui.debug('backup %r as %r\n' % (f, tmpname))
366 366 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
367 367 backups[f] = tmpname
368 368
369 369 fp = stringio()
370 370 for c in chunks:
371 371 fname = c.filename()
372 372 if fname in backups:
373 373 c.write(fp)
374 374 dopatch = fp.tell()
375 375 fp.seek(0)
376 376
377 377 # 2.5 optionally review / modify patch in text editor
378 378 if opts.get('review', False):
379 379 patchtext = (crecordmod.diffhelptext
380 380 + crecordmod.patchhelptext
381 381 + fp.read())
382 382 reviewedpatch = ui.edit(patchtext, "",
383 383 action="diff",
384 384 repopath=repo.path)
385 385 fp.truncate(0)
386 386 fp.write(reviewedpatch)
387 387 fp.seek(0)
388 388
389 389 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
390 390 # 3a. apply filtered patch to clean repo (clean)
391 391 if backups:
392 392 # Equivalent to hg.revert
393 393 m = scmutil.matchfiles(repo, backups.keys())
394 394 mergemod.update(repo, repo.dirstate.p1(), branchmerge=False,
395 395 force=True, matcher=m)
396 396
397 397 # 3b. (apply)
398 398 if dopatch:
399 399 try:
400 400 ui.debug('applying patch\n')
401 401 ui.debug(fp.getvalue())
402 402 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
403 403 except error.PatchError as err:
404 404 raise error.Abort(pycompat.bytestr(err))
405 405 del fp
406 406
407 407 # 4. We prepared working directory according to filtered
408 408 # patch. Now is the time to delegate the job to
409 409 # commit/qrefresh or the like!
410 410
411 411 # Make all of the pathnames absolute.
412 412 newfiles = [repo.wjoin(nf) for nf in newfiles]
413 413 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
414 414 finally:
415 415 # 5. finally restore backed-up files
416 416 try:
417 417 dirstate = repo.dirstate
418 418 for realname, tmpname in backups.iteritems():
419 419 ui.debug('restoring %r to %r\n' % (tmpname, realname))
420 420
421 421 if dirstate[realname] == 'n':
422 422 # without normallookup, restoring timestamp
423 423 # may cause partially committed files
424 424 # to be treated as unmodified
425 425 dirstate.normallookup(realname)
426 426
427 427 # copystat=True here and above are a hack to trick any
428 428 # editors that have f open that we haven't modified them.
429 429 #
430 430 # Also note that this racy as an editor could notice the
431 431 # file's mtime before we've finished writing it.
432 432 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
433 433 os.unlink(tmpname)
434 434 if tobackup:
435 435 os.rmdir(backupdir)
436 436 except OSError:
437 437 pass
438 438
439 439 def recordinwlock(ui, repo, message, match, opts):
440 440 with repo.wlock():
441 441 return recordfunc(ui, repo, message, match, opts)
442 442
443 443 return commit(ui, repo, recordinwlock, pats, opts)
444 444
445 445 class dirnode(object):
446 446 """
447 447 Represent a directory in user working copy with information required for
448 448 the purpose of tersing its status.
449 449
450 450 path is the path to the directory, without a trailing '/'
451 451
452 452 statuses is a set of statuses of all files in this directory (this includes
453 453 all the files in all the subdirectories too)
454 454
455 455 files is a list of files which are direct child of this directory
456 456
457 457 subdirs is a dictionary of sub-directory name as the key and it's own
458 458 dirnode object as the value
459 459 """
460 460
461 461 def __init__(self, dirpath):
462 462 self.path = dirpath
463 463 self.statuses = set()
464 464 self.files = []
465 465 self.subdirs = {}
466 466
467 467 def _addfileindir(self, filename, status):
468 468 """Add a file in this directory as a direct child."""
469 469 self.files.append((filename, status))
470 470
471 471 def addfile(self, filename, status):
472 472 """
473 473 Add a file to this directory or to its direct parent directory.
474 474
475 475 If the file is not direct child of this directory, we traverse to the
476 476 directory of which this file is a direct child of and add the file
477 477 there.
478 478 """
479 479
480 480 # the filename contains a path separator, it means it's not the direct
481 481 # child of this directory
482 482 if '/' in filename:
483 483 subdir, filep = filename.split('/', 1)
484 484
485 485 # does the dirnode object for subdir exists
486 486 if subdir not in self.subdirs:
487 487 subdirpath = pathutil.join(self.path, subdir)
488 488 self.subdirs[subdir] = dirnode(subdirpath)
489 489
490 490 # try adding the file in subdir
491 491 self.subdirs[subdir].addfile(filep, status)
492 492
493 493 else:
494 494 self._addfileindir(filename, status)
495 495
496 496 if status not in self.statuses:
497 497 self.statuses.add(status)
498 498
499 499 def iterfilepaths(self):
500 500 """Yield (status, path) for files directly under this directory."""
501 501 for f, st in self.files:
502 502 yield st, pathutil.join(self.path, f)
503 503
504 504 def tersewalk(self, terseargs):
505 505 """
506 506 Yield (status, path) obtained by processing the status of this
507 507 dirnode.
508 508
509 509 terseargs is the string of arguments passed by the user with `--terse`
510 510 flag.
511 511
512 512 Following are the cases which can happen:
513 513
514 514 1) All the files in the directory (including all the files in its
515 515 subdirectories) share the same status and the user has asked us to terse
516 516 that status. -> yield (status, dirpath). dirpath will end in '/'.
517 517
518 518 2) Otherwise, we do following:
519 519
520 520 a) Yield (status, filepath) for all the files which are in this
521 521 directory (only the ones in this directory, not the subdirs)
522 522
523 523 b) Recurse the function on all the subdirectories of this
524 524 directory
525 525 """
526 526
527 527 if len(self.statuses) == 1:
528 528 onlyst = self.statuses.pop()
529 529
530 530 # Making sure we terse only when the status abbreviation is
531 531 # passed as terse argument
532 532 if onlyst in terseargs:
533 533 yield onlyst, self.path + '/'
534 534 return
535 535
536 536 # add the files to status list
537 537 for st, fpath in self.iterfilepaths():
538 538 yield st, fpath
539 539
540 540 #recurse on the subdirs
541 541 for dirobj in self.subdirs.values():
542 542 for st, fpath in dirobj.tersewalk(terseargs):
543 543 yield st, fpath
544 544
545 545 def tersedir(statuslist, terseargs):
546 546 """
547 547 Terse the status if all the files in a directory shares the same status.
548 548
549 549 statuslist is scmutil.status() object which contains a list of files for
550 550 each status.
551 551 terseargs is string which is passed by the user as the argument to `--terse`
552 552 flag.
553 553
554 554 The function makes a tree of objects of dirnode class, and at each node it
555 555 stores the information required to know whether we can terse a certain
556 556 directory or not.
557 557 """
558 558 # the order matters here as that is used to produce final list
559 559 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
560 560
561 561 # checking the argument validity
562 562 for s in pycompat.bytestr(terseargs):
563 563 if s not in allst:
564 564 raise error.Abort(_("'%s' not recognized") % s)
565 565
566 566 # creating a dirnode object for the root of the repo
567 567 rootobj = dirnode('')
568 568 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
569 569 'ignored', 'removed')
570 570
571 571 tersedict = {}
572 572 for attrname in pstatus:
573 573 statuschar = attrname[0:1]
574 574 for f in getattr(statuslist, attrname):
575 575 rootobj.addfile(f, statuschar)
576 576 tersedict[statuschar] = []
577 577
578 578 # we won't be tersing the root dir, so add files in it
579 579 for st, fpath in rootobj.iterfilepaths():
580 580 tersedict[st].append(fpath)
581 581
582 582 # process each sub-directory and build tersedict
583 583 for subdir in rootobj.subdirs.values():
584 584 for st, f in subdir.tersewalk(terseargs):
585 585 tersedict[st].append(f)
586 586
587 587 tersedlist = []
588 588 for st in allst:
589 589 tersedict[st].sort()
590 590 tersedlist.append(tersedict[st])
591 591
592 592 return tersedlist
593 593
594 594 def _commentlines(raw):
595 595 '''Surround lineswith a comment char and a new line'''
596 596 lines = raw.splitlines()
597 597 commentedlines = ['# %s' % line for line in lines]
598 598 return '\n'.join(commentedlines) + '\n'
599 599
600 600 def _conflictsmsg(repo):
601 601 mergestate = mergemod.mergestate.read(repo)
602 602 if not mergestate.active():
603 603 return
604 604
605 605 m = scmutil.match(repo[None])
606 606 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
607 607 if unresolvedlist:
608 608 mergeliststr = '\n'.join(
609 609 [' %s' % util.pathto(repo.root, encoding.getcwd(), path)
610 610 for path in sorted(unresolvedlist)])
611 611 msg = _('''Unresolved merge conflicts:
612 612
613 613 %s
614 614
615 615 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
616 616 else:
617 617 msg = _('No unresolved merge conflicts.')
618 618
619 619 return _commentlines(msg)
620 620
621 621 def _helpmessage(continuecmd, abortcmd):
622 622 msg = _('To continue: %s\n'
623 623 'To abort: %s') % (continuecmd, abortcmd)
624 624 return _commentlines(msg)
625 625
626 626 def _rebasemsg():
627 627 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
628 628
629 629 def _histeditmsg():
630 630 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
631 631
632 632 def _unshelvemsg():
633 633 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
634 634
635 635 def _graftmsg():
636 636 return _helpmessage('hg graft --continue', 'hg graft --abort')
637 637
638 638 def _mergemsg():
639 639 return _helpmessage('hg commit', 'hg merge --abort')
640 640
641 641 def _bisectmsg():
642 642 msg = _('To mark the changeset good: hg bisect --good\n'
643 643 'To mark the changeset bad: hg bisect --bad\n'
644 644 'To abort: hg bisect --reset\n')
645 645 return _commentlines(msg)
646 646
647 647 def fileexistspredicate(filename):
648 648 return lambda repo: repo.vfs.exists(filename)
649 649
650 650 def _mergepredicate(repo):
651 651 return len(repo[None].parents()) > 1
652 652
653 653 STATES = (
654 654 # (state, predicate to detect states, helpful message function)
655 655 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
656 656 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
657 657 ('graft', fileexistspredicate('graftstate'), _graftmsg),
658 658 ('unshelve', fileexistspredicate('shelvedstate'), _unshelvemsg),
659 659 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
660 660 # The merge state is part of a list that will be iterated over.
661 661 # They need to be last because some of the other unfinished states may also
662 662 # be in a merge or update state (eg. rebase, histedit, graft, etc).
663 663 # We want those to have priority.
664 664 ('merge', _mergepredicate, _mergemsg),
665 665 )
666 666
667 667 def _getrepostate(repo):
668 668 # experimental config: commands.status.skipstates
669 669 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
670 670 for state, statedetectionpredicate, msgfn in STATES:
671 671 if state in skip:
672 672 continue
673 673 if statedetectionpredicate(repo):
674 674 return (state, statedetectionpredicate, msgfn)
675 675
676 676 def morestatus(repo, fm):
677 677 statetuple = _getrepostate(repo)
678 678 label = 'status.morestatus'
679 679 if statetuple:
680 680 state, statedetectionpredicate, helpfulmsg = statetuple
681 681 statemsg = _('The repository is in an unfinished *%s* state.') % state
682 682 fm.plain('%s\n' % _commentlines(statemsg), label=label)
683 683 conmsg = _conflictsmsg(repo)
684 684 if conmsg:
685 685 fm.plain('%s\n' % conmsg, label=label)
686 686 if helpfulmsg:
687 687 helpmsg = helpfulmsg()
688 688 fm.plain('%s\n' % helpmsg, label=label)
689 689
690 690 def findpossible(cmd, table, strict=False):
691 691 """
692 692 Return cmd -> (aliases, command table entry)
693 693 for each matching command.
694 694 Return debug commands (or their aliases) only if no normal command matches.
695 695 """
696 696 choice = {}
697 697 debugchoice = {}
698 698
699 699 if cmd in table:
700 700 # short-circuit exact matches, "log" alias beats "log|history"
701 701 keys = [cmd]
702 702 else:
703 703 keys = table.keys()
704 704
705 705 allcmds = []
706 706 for e in keys:
707 707 aliases = parsealiases(e)
708 708 allcmds.extend(aliases)
709 709 found = None
710 710 if cmd in aliases:
711 711 found = cmd
712 712 elif not strict:
713 713 for a in aliases:
714 714 if a.startswith(cmd):
715 715 found = a
716 716 break
717 717 if found is not None:
718 718 if aliases[0].startswith("debug") or found.startswith("debug"):
719 719 debugchoice[found] = (aliases, table[e])
720 720 else:
721 721 choice[found] = (aliases, table[e])
722 722
723 723 if not choice and debugchoice:
724 724 choice = debugchoice
725 725
726 726 return choice, allcmds
727 727
728 728 def findcmd(cmd, table, strict=True):
729 729 """Return (aliases, command table entry) for command string."""
730 730 choice, allcmds = findpossible(cmd, table, strict)
731 731
732 732 if cmd in choice:
733 733 return choice[cmd]
734 734
735 735 if len(choice) > 1:
736 736 clist = sorted(choice)
737 737 raise error.AmbiguousCommand(cmd, clist)
738 738
739 739 if choice:
740 740 return list(choice.values())[0]
741 741
742 742 raise error.UnknownCommand(cmd, allcmds)
743 743
744 744 def changebranch(ui, repo, revs, label):
745 745 """ Change the branch name of given revs to label """
746 746
747 747 with repo.wlock(), repo.lock(), repo.transaction('branches'):
748 748 # abort in case of uncommitted merge or dirty wdir
749 749 bailifchanged(repo)
750 750 revs = scmutil.revrange(repo, revs)
751 751 if not revs:
752 752 raise error.Abort("empty revision set")
753 753 roots = repo.revs('roots(%ld)', revs)
754 754 if len(roots) > 1:
755 755 raise error.Abort(_("cannot change branch of non-linear revisions"))
756 756 rewriteutil.precheck(repo, revs, 'change branch of')
757 757
758 758 root = repo[roots.first()]
759 759 rpb = {parent.branch() for parent in root.parents()}
760 760 if label not in rpb and label in repo.branchmap():
761 761 raise error.Abort(_("a branch of the same name already exists"))
762 762
763 763 if repo.revs('obsolete() and %ld', revs):
764 764 raise error.Abort(_("cannot change branch of a obsolete changeset"))
765 765
766 766 # make sure only topological heads
767 767 if repo.revs('heads(%ld) - head()', revs):
768 768 raise error.Abort(_("cannot change branch in middle of a stack"))
769 769
770 770 replacements = {}
771 771 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
772 772 # mercurial.subrepo -> mercurial.cmdutil
773 773 from . import context
774 774 for rev in revs:
775 775 ctx = repo[rev]
776 776 oldbranch = ctx.branch()
777 777 # check if ctx has same branch
778 778 if oldbranch == label:
779 779 continue
780 780
781 781 def filectxfn(repo, newctx, path):
782 782 try:
783 783 return ctx[path]
784 784 except error.ManifestLookupError:
785 785 return None
786 786
787 787 ui.debug("changing branch of '%s' from '%s' to '%s'\n"
788 788 % (hex(ctx.node()), oldbranch, label))
789 789 extra = ctx.extra()
790 790 extra['branch_change'] = hex(ctx.node())
791 791 # While changing branch of set of linear commits, make sure that
792 792 # we base our commits on new parent rather than old parent which
793 793 # was obsoleted while changing the branch
794 794 p1 = ctx.p1().node()
795 795 p2 = ctx.p2().node()
796 796 if p1 in replacements:
797 797 p1 = replacements[p1][0]
798 798 if p2 in replacements:
799 799 p2 = replacements[p2][0]
800 800
801 801 mc = context.memctx(repo, (p1, p2),
802 802 ctx.description(),
803 803 ctx.files(),
804 804 filectxfn,
805 805 user=ctx.user(),
806 806 date=ctx.date(),
807 807 extra=extra,
808 808 branch=label)
809 809
810 810 newnode = repo.commitctx(mc)
811 811 replacements[ctx.node()] = (newnode,)
812 812 ui.debug('new node id is %s\n' % hex(newnode))
813 813
814 814 # create obsmarkers and move bookmarks
815 815 scmutil.cleanupnodes(repo, replacements, 'branch-change', fixphase=True)
816 816
817 817 # move the working copy too
818 818 wctx = repo[None]
819 819 # in-progress merge is a bit too complex for now.
820 820 if len(wctx.parents()) == 1:
821 821 newid = replacements.get(wctx.p1().node())
822 822 if newid is not None:
823 823 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
824 824 # mercurial.cmdutil
825 825 from . import hg
826 826 hg.update(repo, newid[0], quietempty=True)
827 827
828 828 ui.status(_("changed branch on %d changesets\n") % len(replacements))
829 829
830 830 def findrepo(p):
831 831 while not os.path.isdir(os.path.join(p, ".hg")):
832 832 oldp, p = p, os.path.dirname(p)
833 833 if p == oldp:
834 834 return None
835 835
836 836 return p
837 837
838 838 def bailifchanged(repo, merge=True, hint=None):
839 839 """ enforce the precondition that working directory must be clean.
840 840
841 841 'merge' can be set to false if a pending uncommitted merge should be
842 842 ignored (such as when 'update --check' runs).
843 843
844 844 'hint' is the usual hint given to Abort exception.
845 845 """
846 846
847 847 if merge and repo.dirstate.p2() != nullid:
848 848 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
849 849 modified, added, removed, deleted = repo.status()[:4]
850 850 if modified or added or removed or deleted:
851 851 raise error.Abort(_('uncommitted changes'), hint=hint)
852 852 ctx = repo[None]
853 853 for s in sorted(ctx.substate):
854 854 ctx.sub(s).bailifchanged(hint=hint)
855 855
856 856 def logmessage(ui, opts):
857 857 """ get the log message according to -m and -l option """
858 858 message = opts.get('message')
859 859 logfile = opts.get('logfile')
860 860
861 861 if message and logfile:
862 862 raise error.Abort(_('options --message and --logfile are mutually '
863 863 'exclusive'))
864 864 if not message and logfile:
865 865 try:
866 866 if isstdiofilename(logfile):
867 867 message = ui.fin.read()
868 868 else:
869 869 message = '\n'.join(util.readfile(logfile).splitlines())
870 870 except IOError as inst:
871 871 raise error.Abort(_("can't read commit message '%s': %s") %
872 872 (logfile, encoding.strtolocal(inst.strerror)))
873 873 return message
874 874
875 875 def mergeeditform(ctxorbool, baseformname):
876 876 """return appropriate editform name (referencing a committemplate)
877 877
878 878 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
879 879 merging is committed.
880 880
881 881 This returns baseformname with '.merge' appended if it is a merge,
882 882 otherwise '.normal' is appended.
883 883 """
884 884 if isinstance(ctxorbool, bool):
885 885 if ctxorbool:
886 886 return baseformname + ".merge"
887 887 elif len(ctxorbool.parents()) > 1:
888 888 return baseformname + ".merge"
889 889
890 890 return baseformname + ".normal"
891 891
892 892 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
893 893 editform='', **opts):
894 894 """get appropriate commit message editor according to '--edit' option
895 895
896 896 'finishdesc' is a function to be called with edited commit message
897 897 (= 'description' of the new changeset) just after editing, but
898 898 before checking empty-ness. It should return actual text to be
899 899 stored into history. This allows to change description before
900 900 storing.
901 901
902 902 'extramsg' is a extra message to be shown in the editor instead of
903 903 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
904 904 is automatically added.
905 905
906 906 'editform' is a dot-separated list of names, to distinguish
907 907 the purpose of commit text editing.
908 908
909 909 'getcommiteditor' returns 'commitforceeditor' regardless of
910 910 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
911 911 they are specific for usage in MQ.
912 912 """
913 913 if edit or finishdesc or extramsg:
914 914 return lambda r, c, s: commitforceeditor(r, c, s,
915 915 finishdesc=finishdesc,
916 916 extramsg=extramsg,
917 917 editform=editform)
918 918 elif editform:
919 919 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
920 920 else:
921 921 return commiteditor
922 922
923 923 def _escapecommandtemplate(tmpl):
924 924 parts = []
925 925 for typ, start, end in templater.scantemplate(tmpl, raw=True):
926 926 if typ == b'string':
927 927 parts.append(stringutil.escapestr(tmpl[start:end]))
928 928 else:
929 929 parts.append(tmpl[start:end])
930 930 return b''.join(parts)
931 931
932 932 def rendercommandtemplate(ui, tmpl, props):
933 933 r"""Expand a literal template 'tmpl' in a way suitable for command line
934 934
935 935 '\' in outermost string is not taken as an escape character because it
936 936 is a directory separator on Windows.
937 937
938 938 >>> from . import ui as uimod
939 939 >>> ui = uimod.ui()
940 940 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
941 941 'c:\\foo'
942 942 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
943 943 'c:{path}'
944 944 """
945 945 if not tmpl:
946 946 return tmpl
947 947 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
948 948 return t.renderdefault(props)
949 949
950 950 def rendertemplate(ctx, tmpl, props=None):
951 951 """Expand a literal template 'tmpl' byte-string against one changeset
952 952
953 953 Each props item must be a stringify-able value or a callable returning
954 954 such value, i.e. no bare list nor dict should be passed.
955 955 """
956 956 repo = ctx.repo()
957 957 tres = formatter.templateresources(repo.ui, repo)
958 958 t = formatter.maketemplater(repo.ui, tmpl, defaults=templatekw.keywords,
959 959 resources=tres)
960 960 mapping = {'ctx': ctx}
961 961 if props:
962 962 mapping.update(props)
963 963 return t.renderdefault(mapping)
964 964
965 965 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
966 966 r"""Convert old-style filename format string to template string
967 967
968 968 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
969 969 'foo-{reporoot|basename}-{seqno}.patch'
970 970 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
971 971 '{rev}{tags % "{tag}"}{node}'
972 972
973 973 '\' in outermost strings has to be escaped because it is a directory
974 974 separator on Windows:
975 975
976 976 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
977 977 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
978 978 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
979 979 '\\\\\\\\foo\\\\bar.patch'
980 980 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
981 981 '\\\\{tags % "{tag}"}'
982 982
983 983 but inner strings follow the template rules (i.e. '\' is taken as an
984 984 escape character):
985 985
986 986 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
987 987 '{"c:\\tmp"}'
988 988 """
989 989 expander = {
990 990 b'H': b'{node}',
991 991 b'R': b'{rev}',
992 992 b'h': b'{node|short}',
993 993 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
994 994 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
995 995 b'%': b'%',
996 996 b'b': b'{reporoot|basename}',
997 997 }
998 998 if total is not None:
999 999 expander[b'N'] = b'{total}'
1000 1000 if seqno is not None:
1001 1001 expander[b'n'] = b'{seqno}'
1002 1002 if total is not None and seqno is not None:
1003 1003 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1004 1004 if pathname is not None:
1005 1005 expander[b's'] = b'{pathname|basename}'
1006 1006 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1007 1007 expander[b'p'] = b'{pathname}'
1008 1008
1009 1009 newname = []
1010 1010 for typ, start, end in templater.scantemplate(pat, raw=True):
1011 1011 if typ != b'string':
1012 1012 newname.append(pat[start:end])
1013 1013 continue
1014 1014 i = start
1015 1015 while i < end:
1016 1016 n = pat.find(b'%', i, end)
1017 1017 if n < 0:
1018 1018 newname.append(stringutil.escapestr(pat[i:end]))
1019 1019 break
1020 1020 newname.append(stringutil.escapestr(pat[i:n]))
1021 1021 if n + 2 > end:
1022 1022 raise error.Abort(_("incomplete format spec in output "
1023 1023 "filename"))
1024 1024 c = pat[n + 1:n + 2]
1025 1025 i = n + 2
1026 1026 try:
1027 1027 newname.append(expander[c])
1028 1028 except KeyError:
1029 1029 raise error.Abort(_("invalid format spec '%%%s' in output "
1030 1030 "filename") % c)
1031 1031 return ''.join(newname)
1032 1032
1033 1033 def makefilename(ctx, pat, **props):
1034 1034 if not pat:
1035 1035 return pat
1036 1036 tmpl = _buildfntemplate(pat, **props)
1037 1037 # BUG: alias expansion shouldn't be made against template fragments
1038 1038 # rewritten from %-format strings, but we have no easy way to partially
1039 1039 # disable the expansion.
1040 1040 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1041 1041
1042 1042 def isstdiofilename(pat):
1043 1043 """True if the given pat looks like a filename denoting stdin/stdout"""
1044 1044 return not pat or pat == '-'
1045 1045
1046 1046 class _unclosablefile(object):
1047 1047 def __init__(self, fp):
1048 1048 self._fp = fp
1049 1049
1050 1050 def close(self):
1051 1051 pass
1052 1052
1053 1053 def __iter__(self):
1054 1054 return iter(self._fp)
1055 1055
1056 1056 def __getattr__(self, attr):
1057 1057 return getattr(self._fp, attr)
1058 1058
1059 1059 def __enter__(self):
1060 1060 return self
1061 1061
1062 1062 def __exit__(self, exc_type, exc_value, exc_tb):
1063 1063 pass
1064 1064
1065 1065 def makefileobj(ctx, pat, mode='wb', **props):
1066 1066 writable = mode not in ('r', 'rb')
1067 1067
1068 1068 if isstdiofilename(pat):
1069 1069 repo = ctx.repo()
1070 1070 if writable:
1071 1071 fp = repo.ui.fout
1072 1072 else:
1073 1073 fp = repo.ui.fin
1074 1074 return _unclosablefile(fp)
1075 1075 fn = makefilename(ctx, pat, **props)
1076 1076 return open(fn, mode)
1077 1077
1078 1078 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1079 1079 """opens the changelog, manifest, a filelog or a given revlog"""
1080 1080 cl = opts['changelog']
1081 1081 mf = opts['manifest']
1082 1082 dir = opts['dir']
1083 1083 msg = None
1084 1084 if cl and mf:
1085 1085 msg = _('cannot specify --changelog and --manifest at the same time')
1086 1086 elif cl and dir:
1087 1087 msg = _('cannot specify --changelog and --dir at the same time')
1088 1088 elif cl or mf or dir:
1089 1089 if file_:
1090 1090 msg = _('cannot specify filename with --changelog or --manifest')
1091 1091 elif not repo:
1092 1092 msg = _('cannot specify --changelog or --manifest or --dir '
1093 1093 'without a repository')
1094 1094 if msg:
1095 1095 raise error.Abort(msg)
1096 1096
1097 1097 r = None
1098 1098 if repo:
1099 1099 if cl:
1100 1100 r = repo.unfiltered().changelog
1101 1101 elif dir:
1102 1102 if 'treemanifest' not in repo.requirements:
1103 1103 raise error.Abort(_("--dir can only be used on repos with "
1104 1104 "treemanifest enabled"))
1105 1105 if not dir.endswith('/'):
1106 1106 dir = dir + '/'
1107 1107 dirlog = repo.manifestlog.getstorage(dir)
1108 1108 if len(dirlog):
1109 1109 r = dirlog
1110 1110 elif mf:
1111 1111 r = repo.manifestlog.getstorage(b'')
1112 1112 elif file_:
1113 1113 filelog = repo.file(file_)
1114 1114 if len(filelog):
1115 1115 r = filelog
1116 1116
1117 1117 # Not all storage may be revlogs. If requested, try to return an actual
1118 1118 # revlog instance.
1119 1119 if returnrevlog:
1120 1120 if isinstance(r, revlog.revlog):
1121 1121 pass
1122 1122 elif util.safehasattr(r, '_revlog'):
1123 1123 r = r._revlog
1124 1124 elif r is not None:
1125 1125 raise error.Abort(_('%r does not appear to be a revlog') % r)
1126 1126
1127 1127 if not r:
1128 1128 if not returnrevlog:
1129 1129 raise error.Abort(_('cannot give path to non-revlog'))
1130 1130
1131 1131 if not file_:
1132 1132 raise error.CommandError(cmd, _('invalid arguments'))
1133 1133 if not os.path.isfile(file_):
1134 1134 raise error.Abort(_("revlog '%s' not found") % file_)
1135 1135 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
1136 1136 file_[:-2] + ".i")
1137 1137 return r
1138 1138
1139 1139 def openrevlog(repo, cmd, file_, opts):
1140 1140 """Obtain a revlog backing storage of an item.
1141 1141
1142 1142 This is similar to ``openstorage()`` except it always returns a revlog.
1143 1143
1144 1144 In most cases, a caller cares about the main storage object - not the
1145 1145 revlog backing it. Therefore, this function should only be used by code
1146 1146 that needs to examine low-level revlog implementation details. e.g. debug
1147 1147 commands.
1148 1148 """
1149 1149 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1150 1150
1151 1151 def copy(ui, repo, pats, opts, rename=False):
1152 1152 # called with the repo lock held
1153 1153 #
1154 1154 # hgsep => pathname that uses "/" to separate directories
1155 1155 # ossep => pathname that uses os.sep to separate directories
1156 1156 cwd = repo.getcwd()
1157 1157 targets = {}
1158 1158 after = opts.get("after")
1159 1159 dryrun = opts.get("dry_run")
1160 1160 wctx = repo[None]
1161 1161
1162 1162 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1163 1163 def walkpat(pat):
1164 1164 srcs = []
1165 1165 if after:
1166 1166 badstates = '?'
1167 1167 else:
1168 1168 badstates = '?r'
1169 1169 m = scmutil.match(wctx, [pat], opts, globbed=True)
1170 1170 for abs in wctx.walk(m):
1171 1171 state = repo.dirstate[abs]
1172 1172 rel = uipathfn(abs)
1173 1173 exact = m.exact(abs)
1174 1174 if state in badstates:
1175 1175 if exact and state == '?':
1176 1176 ui.warn(_('%s: not copying - file is not managed\n') % rel)
1177 1177 if exact and state == 'r':
1178 1178 ui.warn(_('%s: not copying - file has been marked for'
1179 1179 ' remove\n') % rel)
1180 1180 continue
1181 1181 # abs: hgsep
1182 1182 # rel: ossep
1183 1183 srcs.append((abs, rel, exact))
1184 1184 return srcs
1185 1185
1186 1186 # abssrc: hgsep
1187 1187 # relsrc: ossep
1188 1188 # otarget: ossep
1189 1189 def copyfile(abssrc, relsrc, otarget, exact):
1190 1190 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1191 1191 if '/' in abstarget:
1192 1192 # We cannot normalize abstarget itself, this would prevent
1193 1193 # case only renames, like a => A.
1194 1194 abspath, absname = abstarget.rsplit('/', 1)
1195 1195 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
1196 1196 reltarget = repo.pathto(abstarget, cwd)
1197 1197 target = repo.wjoin(abstarget)
1198 1198 src = repo.wjoin(abssrc)
1199 1199 state = repo.dirstate[abstarget]
1200 1200
1201 1201 scmutil.checkportable(ui, abstarget)
1202 1202
1203 1203 # check for collisions
1204 1204 prevsrc = targets.get(abstarget)
1205 1205 if prevsrc is not None:
1206 1206 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1207 1207 (reltarget, repo.pathto(abssrc, cwd),
1208 1208 repo.pathto(prevsrc, cwd)))
1209 1209 return True # report a failure
1210 1210
1211 1211 # check for overwrites
1212 1212 exists = os.path.lexists(target)
1213 1213 samefile = False
1214 1214 if exists and abssrc != abstarget:
1215 1215 if (repo.dirstate.normalize(abssrc) ==
1216 1216 repo.dirstate.normalize(abstarget)):
1217 1217 if not rename:
1218 1218 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1219 1219 return True # report a failure
1220 1220 exists = False
1221 1221 samefile = True
1222 1222
1223 1223 if not after and exists or after and state in 'mn':
1224 1224 if not opts['force']:
1225 1225 if state in 'mn':
1226 1226 msg = _('%s: not overwriting - file already committed\n')
1227 1227 if after:
1228 1228 flags = '--after --force'
1229 1229 else:
1230 1230 flags = '--force'
1231 1231 if rename:
1232 1232 hint = _("('hg rename %s' to replace the file by "
1233 1233 'recording a rename)\n') % flags
1234 1234 else:
1235 1235 hint = _("('hg copy %s' to replace the file by "
1236 1236 'recording a copy)\n') % flags
1237 1237 else:
1238 1238 msg = _('%s: not overwriting - file exists\n')
1239 1239 if rename:
1240 1240 hint = _("('hg rename --after' to record the rename)\n")
1241 1241 else:
1242 1242 hint = _("('hg copy --after' to record the copy)\n")
1243 1243 ui.warn(msg % reltarget)
1244 1244 ui.warn(hint)
1245 1245 return True # report a failure
1246 1246
1247 1247 if after:
1248 1248 if not exists:
1249 1249 if rename:
1250 1250 ui.warn(_('%s: not recording move - %s does not exist\n') %
1251 1251 (relsrc, reltarget))
1252 1252 else:
1253 1253 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1254 1254 (relsrc, reltarget))
1255 1255 return True # report a failure
1256 1256 elif not dryrun:
1257 1257 try:
1258 1258 if exists:
1259 1259 os.unlink(target)
1260 1260 targetdir = os.path.dirname(target) or '.'
1261 1261 if not os.path.isdir(targetdir):
1262 1262 os.makedirs(targetdir)
1263 1263 if samefile:
1264 1264 tmp = target + "~hgrename"
1265 1265 os.rename(src, tmp)
1266 1266 os.rename(tmp, target)
1267 1267 else:
1268 1268 # Preserve stat info on renames, not on copies; this matches
1269 1269 # Linux CLI behavior.
1270 1270 util.copyfile(src, target, copystat=rename)
1271 1271 srcexists = True
1272 1272 except IOError as inst:
1273 1273 if inst.errno == errno.ENOENT:
1274 1274 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1275 1275 srcexists = False
1276 1276 else:
1277 1277 ui.warn(_('%s: cannot copy - %s\n') %
1278 1278 (relsrc, encoding.strtolocal(inst.strerror)))
1279 1279 return True # report a failure
1280 1280
1281 1281 if ui.verbose or not exact:
1282 1282 if rename:
1283 1283 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1284 1284 else:
1285 1285 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1286 1286
1287 1287 targets[abstarget] = abssrc
1288 1288
1289 1289 # fix up dirstate
1290 1290 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1291 1291 dryrun=dryrun, cwd=cwd)
1292 1292 if rename and not dryrun:
1293 1293 if not after and srcexists and not samefile:
1294 1294 rmdir = repo.ui.configbool('experimental', 'removeemptydirs')
1295 1295 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1296 1296 wctx.forget([abssrc])
1297 1297
1298 1298 # pat: ossep
1299 1299 # dest ossep
1300 1300 # srcs: list of (hgsep, hgsep, ossep, bool)
1301 1301 # return: function that takes hgsep and returns ossep
1302 1302 def targetpathfn(pat, dest, srcs):
1303 1303 if os.path.isdir(pat):
1304 1304 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1305 1305 abspfx = util.localpath(abspfx)
1306 1306 if destdirexists:
1307 1307 striplen = len(os.path.split(abspfx)[0])
1308 1308 else:
1309 1309 striplen = len(abspfx)
1310 1310 if striplen:
1311 1311 striplen += len(pycompat.ossep)
1312 1312 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1313 1313 elif destdirexists:
1314 1314 res = lambda p: os.path.join(dest,
1315 1315 os.path.basename(util.localpath(p)))
1316 1316 else:
1317 1317 res = lambda p: dest
1318 1318 return res
1319 1319
1320 1320 # pat: ossep
1321 1321 # dest ossep
1322 1322 # srcs: list of (hgsep, hgsep, ossep, bool)
1323 1323 # return: function that takes hgsep and returns ossep
1324 1324 def targetpathafterfn(pat, dest, srcs):
1325 1325 if matchmod.patkind(pat):
1326 1326 # a mercurial pattern
1327 1327 res = lambda p: os.path.join(dest,
1328 1328 os.path.basename(util.localpath(p)))
1329 1329 else:
1330 1330 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1331 1331 if len(abspfx) < len(srcs[0][0]):
1332 1332 # A directory. Either the target path contains the last
1333 1333 # component of the source path or it does not.
1334 1334 def evalpath(striplen):
1335 1335 score = 0
1336 1336 for s in srcs:
1337 1337 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1338 1338 if os.path.lexists(t):
1339 1339 score += 1
1340 1340 return score
1341 1341
1342 1342 abspfx = util.localpath(abspfx)
1343 1343 striplen = len(abspfx)
1344 1344 if striplen:
1345 1345 striplen += len(pycompat.ossep)
1346 1346 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1347 1347 score = evalpath(striplen)
1348 1348 striplen1 = len(os.path.split(abspfx)[0])
1349 1349 if striplen1:
1350 1350 striplen1 += len(pycompat.ossep)
1351 1351 if evalpath(striplen1) > score:
1352 1352 striplen = striplen1
1353 1353 res = lambda p: os.path.join(dest,
1354 1354 util.localpath(p)[striplen:])
1355 1355 else:
1356 1356 # a file
1357 1357 if destdirexists:
1358 1358 res = lambda p: os.path.join(dest,
1359 1359 os.path.basename(util.localpath(p)))
1360 1360 else:
1361 1361 res = lambda p: dest
1362 1362 return res
1363 1363
1364 1364 pats = scmutil.expandpats(pats)
1365 1365 if not pats:
1366 1366 raise error.Abort(_('no source or destination specified'))
1367 1367 if len(pats) == 1:
1368 1368 raise error.Abort(_('no destination specified'))
1369 1369 dest = pats.pop()
1370 1370 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1371 1371 if not destdirexists:
1372 1372 if len(pats) > 1 or matchmod.patkind(pats[0]):
1373 1373 raise error.Abort(_('with multiple sources, destination must be an '
1374 1374 'existing directory'))
1375 1375 if util.endswithsep(dest):
1376 1376 raise error.Abort(_('destination %s is not a directory') % dest)
1377 1377
1378 1378 tfn = targetpathfn
1379 1379 if after:
1380 1380 tfn = targetpathafterfn
1381 1381 copylist = []
1382 1382 for pat in pats:
1383 1383 srcs = walkpat(pat)
1384 1384 if not srcs:
1385 1385 continue
1386 1386 copylist.append((tfn(pat, dest, srcs), srcs))
1387 1387 if not copylist:
1388 1388 raise error.Abort(_('no files to copy'))
1389 1389
1390 1390 errors = 0
1391 1391 for targetpath, srcs in copylist:
1392 1392 for abssrc, relsrc, exact in srcs:
1393 1393 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1394 1394 errors += 1
1395 1395
1396 1396 return errors != 0
1397 1397
1398 1398 ## facility to let extension process additional data into an import patch
1399 1399 # list of identifier to be executed in order
1400 1400 extrapreimport = [] # run before commit
1401 1401 extrapostimport = [] # run after commit
1402 1402 # mapping from identifier to actual import function
1403 1403 #
1404 1404 # 'preimport' are run before the commit is made and are provided the following
1405 1405 # arguments:
1406 1406 # - repo: the localrepository instance,
1407 1407 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1408 1408 # - extra: the future extra dictionary of the changeset, please mutate it,
1409 1409 # - opts: the import options.
1410 1410 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1411 1411 # mutation of in memory commit and more. Feel free to rework the code to get
1412 1412 # there.
1413 1413 extrapreimportmap = {}
1414 1414 # 'postimport' are run after the commit is made and are provided the following
1415 1415 # argument:
1416 1416 # - ctx: the changectx created by import.
1417 1417 extrapostimportmap = {}
1418 1418
1419 1419 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1420 1420 """Utility function used by commands.import to import a single patch
1421 1421
1422 1422 This function is explicitly defined here to help the evolve extension to
1423 1423 wrap this part of the import logic.
1424 1424
1425 1425 The API is currently a bit ugly because it a simple code translation from
1426 1426 the import command. Feel free to make it better.
1427 1427
1428 1428 :patchdata: a dictionary containing parsed patch data (such as from
1429 1429 ``patch.extract()``)
1430 1430 :parents: nodes that will be parent of the created commit
1431 1431 :opts: the full dict of option passed to the import command
1432 1432 :msgs: list to save commit message to.
1433 1433 (used in case we need to save it when failing)
1434 1434 :updatefunc: a function that update a repo to a given node
1435 1435 updatefunc(<repo>, <node>)
1436 1436 """
1437 1437 # avoid cycle context -> subrepo -> cmdutil
1438 1438 from . import context
1439 1439
1440 1440 tmpname = patchdata.get('filename')
1441 1441 message = patchdata.get('message')
1442 1442 user = opts.get('user') or patchdata.get('user')
1443 1443 date = opts.get('date') or patchdata.get('date')
1444 1444 branch = patchdata.get('branch')
1445 1445 nodeid = patchdata.get('nodeid')
1446 1446 p1 = patchdata.get('p1')
1447 1447 p2 = patchdata.get('p2')
1448 1448
1449 1449 nocommit = opts.get('no_commit')
1450 1450 importbranch = opts.get('import_branch')
1451 1451 update = not opts.get('bypass')
1452 1452 strip = opts["strip"]
1453 1453 prefix = opts["prefix"]
1454 1454 sim = float(opts.get('similarity') or 0)
1455 1455
1456 1456 if not tmpname:
1457 1457 return None, None, False
1458 1458
1459 1459 rejects = False
1460 1460
1461 1461 cmdline_message = logmessage(ui, opts)
1462 1462 if cmdline_message:
1463 1463 # pickup the cmdline msg
1464 1464 message = cmdline_message
1465 1465 elif message:
1466 1466 # pickup the patch msg
1467 1467 message = message.strip()
1468 1468 else:
1469 1469 # launch the editor
1470 1470 message = None
1471 1471 ui.debug('message:\n%s\n' % (message or ''))
1472 1472
1473 1473 if len(parents) == 1:
1474 1474 parents.append(repo[nullid])
1475 1475 if opts.get('exact'):
1476 1476 if not nodeid or not p1:
1477 1477 raise error.Abort(_('not a Mercurial patch'))
1478 1478 p1 = repo[p1]
1479 1479 p2 = repo[p2 or nullid]
1480 1480 elif p2:
1481 1481 try:
1482 1482 p1 = repo[p1]
1483 1483 p2 = repo[p2]
1484 1484 # Without any options, consider p2 only if the
1485 1485 # patch is being applied on top of the recorded
1486 1486 # first parent.
1487 1487 if p1 != parents[0]:
1488 1488 p1 = parents[0]
1489 1489 p2 = repo[nullid]
1490 1490 except error.RepoError:
1491 1491 p1, p2 = parents
1492 1492 if p2.node() == nullid:
1493 1493 ui.warn(_("warning: import the patch as a normal revision\n"
1494 1494 "(use --exact to import the patch as a merge)\n"))
1495 1495 else:
1496 1496 p1, p2 = parents
1497 1497
1498 1498 n = None
1499 1499 if update:
1500 1500 if p1 != parents[0]:
1501 1501 updatefunc(repo, p1.node())
1502 1502 if p2 != parents[1]:
1503 1503 repo.setparents(p1.node(), p2.node())
1504 1504
1505 1505 if opts.get('exact') or importbranch:
1506 1506 repo.dirstate.setbranch(branch or 'default')
1507 1507
1508 1508 partial = opts.get('partial', False)
1509 1509 files = set()
1510 1510 try:
1511 1511 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1512 1512 files=files, eolmode=None, similarity=sim / 100.0)
1513 1513 except error.PatchError as e:
1514 1514 if not partial:
1515 1515 raise error.Abort(pycompat.bytestr(e))
1516 1516 if partial:
1517 1517 rejects = True
1518 1518
1519 1519 files = list(files)
1520 1520 if nocommit:
1521 1521 if message:
1522 1522 msgs.append(message)
1523 1523 else:
1524 1524 if opts.get('exact') or p2:
1525 1525 # If you got here, you either use --force and know what
1526 1526 # you are doing or used --exact or a merge patch while
1527 1527 # being updated to its first parent.
1528 1528 m = None
1529 1529 else:
1530 1530 m = scmutil.matchfiles(repo, files or [])
1531 1531 editform = mergeeditform(repo[None], 'import.normal')
1532 1532 if opts.get('exact'):
1533 1533 editor = None
1534 1534 else:
1535 1535 editor = getcommiteditor(editform=editform,
1536 1536 **pycompat.strkwargs(opts))
1537 1537 extra = {}
1538 1538 for idfunc in extrapreimport:
1539 1539 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1540 1540 overrides = {}
1541 1541 if partial:
1542 1542 overrides[('ui', 'allowemptycommit')] = True
1543 1543 with repo.ui.configoverride(overrides, 'import'):
1544 1544 n = repo.commit(message, user,
1545 1545 date, match=m,
1546 1546 editor=editor, extra=extra)
1547 1547 for idfunc in extrapostimport:
1548 1548 extrapostimportmap[idfunc](repo[n])
1549 1549 else:
1550 1550 if opts.get('exact') or importbranch:
1551 1551 branch = branch or 'default'
1552 1552 else:
1553 1553 branch = p1.branch()
1554 1554 store = patch.filestore()
1555 1555 try:
1556 1556 files = set()
1557 1557 try:
1558 1558 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1559 1559 files, eolmode=None)
1560 1560 except error.PatchError as e:
1561 1561 raise error.Abort(stringutil.forcebytestr(e))
1562 1562 if opts.get('exact'):
1563 1563 editor = None
1564 1564 else:
1565 1565 editor = getcommiteditor(editform='import.bypass')
1566 1566 memctx = context.memctx(repo, (p1.node(), p2.node()),
1567 1567 message,
1568 1568 files=files,
1569 1569 filectxfn=store,
1570 1570 user=user,
1571 1571 date=date,
1572 1572 branch=branch,
1573 1573 editor=editor)
1574 1574 n = memctx.commit()
1575 1575 finally:
1576 1576 store.close()
1577 1577 if opts.get('exact') and nocommit:
1578 1578 # --exact with --no-commit is still useful in that it does merge
1579 1579 # and branch bits
1580 1580 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1581 1581 elif opts.get('exact') and (not n or hex(n) != nodeid):
1582 1582 raise error.Abort(_('patch is damaged or loses information'))
1583 1583 msg = _('applied to working directory')
1584 1584 if n:
1585 1585 # i18n: refers to a short changeset id
1586 1586 msg = _('created %s') % short(n)
1587 1587 return msg, n, rejects
1588 1588
1589 1589 # facility to let extensions include additional data in an exported patch
1590 1590 # list of identifiers to be executed in order
1591 1591 extraexport = []
1592 1592 # mapping from identifier to actual export function
1593 1593 # function as to return a string to be added to the header or None
1594 1594 # it is given two arguments (sequencenumber, changectx)
1595 1595 extraexportmap = {}
1596 1596
1597 1597 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
1598 1598 node = scmutil.binnode(ctx)
1599 1599 parents = [p.node() for p in ctx.parents() if p]
1600 1600 branch = ctx.branch()
1601 1601 if switch_parent:
1602 1602 parents.reverse()
1603 1603
1604 1604 if parents:
1605 1605 prev = parents[0]
1606 1606 else:
1607 1607 prev = nullid
1608 1608
1609 1609 fm.context(ctx=ctx)
1610 1610 fm.plain('# HG changeset patch\n')
1611 1611 fm.write('user', '# User %s\n', ctx.user())
1612 1612 fm.plain('# Date %d %d\n' % ctx.date())
1613 1613 fm.write('date', '# %s\n', fm.formatdate(ctx.date()))
1614 1614 fm.condwrite(branch and branch != 'default',
1615 1615 'branch', '# Branch %s\n', branch)
1616 1616 fm.write('node', '# Node ID %s\n', hex(node))
1617 1617 fm.plain('# Parent %s\n' % hex(prev))
1618 1618 if len(parents) > 1:
1619 1619 fm.plain('# Parent %s\n' % hex(parents[1]))
1620 1620 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name='node'))
1621 1621
1622 1622 # TODO: redesign extraexportmap function to support formatter
1623 1623 for headerid in extraexport:
1624 1624 header = extraexportmap[headerid](seqno, ctx)
1625 1625 if header is not None:
1626 1626 fm.plain('# %s\n' % header)
1627 1627
1628 1628 fm.write('desc', '%s\n', ctx.description().rstrip())
1629 1629 fm.plain('\n')
1630 1630
1631 1631 if fm.isplain():
1632 1632 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
1633 1633 for chunk, label in chunkiter:
1634 1634 fm.plain(chunk, label=label)
1635 1635 else:
1636 1636 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
1637 1637 # TODO: make it structured?
1638 1638 fm.data(diff=b''.join(chunkiter))
1639 1639
1640 1640 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
1641 1641 """Export changesets to stdout or a single file"""
1642 1642 for seqno, rev in enumerate(revs, 1):
1643 1643 ctx = repo[rev]
1644 1644 if not dest.startswith('<'):
1645 1645 repo.ui.note("%s\n" % dest)
1646 1646 fm.startitem()
1647 1647 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
1648 1648
1649 1649 def _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, diffopts,
1650 1650 match):
1651 1651 """Export changesets to possibly multiple files"""
1652 1652 total = len(revs)
1653 1653 revwidth = max(len(str(rev)) for rev in revs)
1654 1654 filemap = util.sortdict() # filename: [(seqno, rev), ...]
1655 1655
1656 1656 for seqno, rev in enumerate(revs, 1):
1657 1657 ctx = repo[rev]
1658 1658 dest = makefilename(ctx, fntemplate,
1659 1659 total=total, seqno=seqno, revwidth=revwidth)
1660 1660 filemap.setdefault(dest, []).append((seqno, rev))
1661 1661
1662 1662 for dest in filemap:
1663 1663 with formatter.maybereopen(basefm, dest) as fm:
1664 1664 repo.ui.note("%s\n" % dest)
1665 1665 for seqno, rev in filemap[dest]:
1666 1666 fm.startitem()
1667 1667 ctx = repo[rev]
1668 1668 _exportsingle(repo, ctx, fm, match, switch_parent, seqno,
1669 1669 diffopts)
1670 1670
1671 1671 def _prefetchchangedfiles(repo, revs, match):
1672 1672 allfiles = set()
1673 1673 for rev in revs:
1674 1674 for file in repo[rev].files():
1675 1675 if not match or match(file):
1676 1676 allfiles.add(file)
1677 1677 scmutil.prefetchfiles(repo, revs, scmutil.matchfiles(repo, allfiles))
1678 1678
1679 1679 def export(repo, revs, basefm, fntemplate='hg-%h.patch', switch_parent=False,
1680 1680 opts=None, match=None):
1681 1681 '''export changesets as hg patches
1682 1682
1683 1683 Args:
1684 1684 repo: The repository from which we're exporting revisions.
1685 1685 revs: A list of revisions to export as revision numbers.
1686 1686 basefm: A formatter to which patches should be written.
1687 1687 fntemplate: An optional string to use for generating patch file names.
1688 1688 switch_parent: If True, show diffs against second parent when not nullid.
1689 1689 Default is false, which always shows diff against p1.
1690 1690 opts: diff options to use for generating the patch.
1691 1691 match: If specified, only export changes to files matching this matcher.
1692 1692
1693 1693 Returns:
1694 1694 Nothing.
1695 1695
1696 1696 Side Effect:
1697 1697 "HG Changeset Patch" data is emitted to one of the following
1698 1698 destinations:
1699 1699 fntemplate specified: Each rev is written to a unique file named using
1700 1700 the given template.
1701 1701 Otherwise: All revs will be written to basefm.
1702 1702 '''
1703 1703 _prefetchchangedfiles(repo, revs, match)
1704 1704
1705 1705 if not fntemplate:
1706 1706 _exportfile(repo, revs, basefm, '<unnamed>', switch_parent, opts, match)
1707 1707 else:
1708 1708 _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, opts,
1709 1709 match)
1710 1710
1711 1711 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
1712 1712 """Export changesets to the given file stream"""
1713 1713 _prefetchchangedfiles(repo, revs, match)
1714 1714
1715 1715 dest = getattr(fp, 'name', '<unnamed>')
1716 1716 with formatter.formatter(repo.ui, fp, 'export', {}) as fm:
1717 1717 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
1718 1718
1719 1719 def showmarker(fm, marker, index=None):
1720 1720 """utility function to display obsolescence marker in a readable way
1721 1721
1722 1722 To be used by debug function."""
1723 1723 if index is not None:
1724 1724 fm.write('index', '%i ', index)
1725 1725 fm.write('prednode', '%s ', hex(marker.prednode()))
1726 1726 succs = marker.succnodes()
1727 1727 fm.condwrite(succs, 'succnodes', '%s ',
1728 1728 fm.formatlist(map(hex, succs), name='node'))
1729 1729 fm.write('flag', '%X ', marker.flags())
1730 1730 parents = marker.parentnodes()
1731 1731 if parents is not None:
1732 1732 fm.write('parentnodes', '{%s} ',
1733 1733 fm.formatlist(map(hex, parents), name='node', sep=', '))
1734 1734 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1735 1735 meta = marker.metadata().copy()
1736 1736 meta.pop('date', None)
1737 1737 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
1738 1738 fm.write('metadata', '{%s}', fm.formatdict(smeta, fmt='%r: %r', sep=', '))
1739 1739 fm.plain('\n')
1740 1740
1741 1741 def finddate(ui, repo, date):
1742 1742 """Find the tipmost changeset that matches the given date spec"""
1743 1743
1744 1744 df = dateutil.matchdate(date)
1745 1745 m = scmutil.matchall(repo)
1746 1746 results = {}
1747 1747
1748 1748 def prep(ctx, fns):
1749 1749 d = ctx.date()
1750 1750 if df(d[0]):
1751 1751 results[ctx.rev()] = d
1752 1752
1753 1753 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1754 1754 rev = ctx.rev()
1755 1755 if rev in results:
1756 1756 ui.status(_("found revision %s from %s\n") %
1757 1757 (rev, dateutil.datestr(results[rev])))
1758 1758 return '%d' % rev
1759 1759
1760 1760 raise error.Abort(_("revision matching date not found"))
1761 1761
1762 1762 def increasingwindows(windowsize=8, sizelimit=512):
1763 1763 while True:
1764 1764 yield windowsize
1765 1765 if windowsize < sizelimit:
1766 1766 windowsize *= 2
1767 1767
1768 1768 def _walkrevs(repo, opts):
1769 1769 # Default --rev value depends on --follow but --follow behavior
1770 1770 # depends on revisions resolved from --rev...
1771 1771 follow = opts.get('follow') or opts.get('follow_first')
1772 1772 if opts.get('rev'):
1773 1773 revs = scmutil.revrange(repo, opts['rev'])
1774 1774 elif follow and repo.dirstate.p1() == nullid:
1775 1775 revs = smartset.baseset()
1776 1776 elif follow:
1777 1777 revs = repo.revs('reverse(:.)')
1778 1778 else:
1779 1779 revs = smartset.spanset(repo)
1780 1780 revs.reverse()
1781 1781 return revs
1782 1782
1783 1783 class FileWalkError(Exception):
1784 1784 pass
1785 1785
1786 1786 def walkfilerevs(repo, match, follow, revs, fncache):
1787 1787 '''Walks the file history for the matched files.
1788 1788
1789 1789 Returns the changeset revs that are involved in the file history.
1790 1790
1791 1791 Throws FileWalkError if the file history can't be walked using
1792 1792 filelogs alone.
1793 1793 '''
1794 1794 wanted = set()
1795 1795 copies = []
1796 1796 minrev, maxrev = min(revs), max(revs)
1797 1797 def filerevs(filelog, last):
1798 1798 """
1799 1799 Only files, no patterns. Check the history of each file.
1800 1800
1801 1801 Examines filelog entries within minrev, maxrev linkrev range
1802 1802 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1803 1803 tuples in backwards order
1804 1804 """
1805 1805 cl_count = len(repo)
1806 1806 revs = []
1807 1807 for j in pycompat.xrange(0, last + 1):
1808 1808 linkrev = filelog.linkrev(j)
1809 1809 if linkrev < minrev:
1810 1810 continue
1811 1811 # only yield rev for which we have the changelog, it can
1812 1812 # happen while doing "hg log" during a pull or commit
1813 1813 if linkrev >= cl_count:
1814 1814 break
1815 1815
1816 1816 parentlinkrevs = []
1817 1817 for p in filelog.parentrevs(j):
1818 1818 if p != nullrev:
1819 1819 parentlinkrevs.append(filelog.linkrev(p))
1820 1820 n = filelog.node(j)
1821 1821 revs.append((linkrev, parentlinkrevs,
1822 1822 follow and filelog.renamed(n)))
1823 1823
1824 1824 return reversed(revs)
1825 1825 def iterfiles():
1826 1826 pctx = repo['.']
1827 1827 for filename in match.files():
1828 1828 if follow:
1829 1829 if filename not in pctx:
1830 1830 raise error.Abort(_('cannot follow file not in parent '
1831 1831 'revision: "%s"') % filename)
1832 1832 yield filename, pctx[filename].filenode()
1833 1833 else:
1834 1834 yield filename, None
1835 1835 for filename_node in copies:
1836 1836 yield filename_node
1837 1837
1838 1838 for file_, node in iterfiles():
1839 1839 filelog = repo.file(file_)
1840 1840 if not len(filelog):
1841 1841 if node is None:
1842 1842 # A zero count may be a directory or deleted file, so
1843 1843 # try to find matching entries on the slow path.
1844 1844 if follow:
1845 1845 raise error.Abort(
1846 1846 _('cannot follow nonexistent file: "%s"') % file_)
1847 1847 raise FileWalkError("Cannot walk via filelog")
1848 1848 else:
1849 1849 continue
1850 1850
1851 1851 if node is None:
1852 1852 last = len(filelog) - 1
1853 1853 else:
1854 1854 last = filelog.rev(node)
1855 1855
1856 1856 # keep track of all ancestors of the file
1857 1857 ancestors = {filelog.linkrev(last)}
1858 1858
1859 1859 # iterate from latest to oldest revision
1860 1860 for rev, flparentlinkrevs, copied in filerevs(filelog, last):
1861 1861 if not follow:
1862 1862 if rev > maxrev:
1863 1863 continue
1864 1864 else:
1865 1865 # Note that last might not be the first interesting
1866 1866 # rev to us:
1867 1867 # if the file has been changed after maxrev, we'll
1868 1868 # have linkrev(last) > maxrev, and we still need
1869 1869 # to explore the file graph
1870 1870 if rev not in ancestors:
1871 1871 continue
1872 1872 # XXX insert 1327 fix here
1873 1873 if flparentlinkrevs:
1874 1874 ancestors.update(flparentlinkrevs)
1875 1875
1876 1876 fncache.setdefault(rev, []).append(file_)
1877 1877 wanted.add(rev)
1878 1878 if copied:
1879 1879 copies.append(copied)
1880 1880
1881 1881 return wanted
1882 1882
1883 1883 class _followfilter(object):
1884 1884 def __init__(self, repo, onlyfirst=False):
1885 1885 self.repo = repo
1886 1886 self.startrev = nullrev
1887 1887 self.roots = set()
1888 1888 self.onlyfirst = onlyfirst
1889 1889
1890 1890 def match(self, rev):
1891 1891 def realparents(rev):
1892 1892 if self.onlyfirst:
1893 1893 return self.repo.changelog.parentrevs(rev)[0:1]
1894 1894 else:
1895 1895 return filter(lambda x: x != nullrev,
1896 1896 self.repo.changelog.parentrevs(rev))
1897 1897
1898 1898 if self.startrev == nullrev:
1899 1899 self.startrev = rev
1900 1900 return True
1901 1901
1902 1902 if rev > self.startrev:
1903 1903 # forward: all descendants
1904 1904 if not self.roots:
1905 1905 self.roots.add(self.startrev)
1906 1906 for parent in realparents(rev):
1907 1907 if parent in self.roots:
1908 1908 self.roots.add(rev)
1909 1909 return True
1910 1910 else:
1911 1911 # backwards: all parents
1912 1912 if not self.roots:
1913 1913 self.roots.update(realparents(self.startrev))
1914 1914 if rev in self.roots:
1915 1915 self.roots.remove(rev)
1916 1916 self.roots.update(realparents(rev))
1917 1917 return True
1918 1918
1919 1919 return False
1920 1920
1921 1921 def walkchangerevs(repo, match, opts, prepare):
1922 1922 '''Iterate over files and the revs in which they changed.
1923 1923
1924 1924 Callers most commonly need to iterate backwards over the history
1925 1925 in which they are interested. Doing so has awful (quadratic-looking)
1926 1926 performance, so we use iterators in a "windowed" way.
1927 1927
1928 1928 We walk a window of revisions in the desired order. Within the
1929 1929 window, we first walk forwards to gather data, then in the desired
1930 1930 order (usually backwards) to display it.
1931 1931
1932 1932 This function returns an iterator yielding contexts. Before
1933 1933 yielding each context, the iterator will first call the prepare
1934 1934 function on each context in the window in forward order.'''
1935 1935
1936 1936 allfiles = opts.get('all_files')
1937 1937 follow = opts.get('follow') or opts.get('follow_first')
1938 1938 revs = _walkrevs(repo, opts)
1939 1939 if not revs:
1940 1940 return []
1941 1941 wanted = set()
1942 1942 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
1943 1943 fncache = {}
1944 1944 change = repo.__getitem__
1945 1945
1946 1946 # First step is to fill wanted, the set of revisions that we want to yield.
1947 1947 # When it does not induce extra cost, we also fill fncache for revisions in
1948 1948 # wanted: a cache of filenames that were changed (ctx.files()) and that
1949 1949 # match the file filtering conditions.
1950 1950
1951 1951 if match.always() or allfiles:
1952 1952 # No files, no patterns. Display all revs.
1953 1953 wanted = revs
1954 1954 elif not slowpath:
1955 1955 # We only have to read through the filelog to find wanted revisions
1956 1956
1957 1957 try:
1958 1958 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1959 1959 except FileWalkError:
1960 1960 slowpath = True
1961 1961
1962 1962 # We decided to fall back to the slowpath because at least one
1963 1963 # of the paths was not a file. Check to see if at least one of them
1964 1964 # existed in history, otherwise simply return
1965 1965 for path in match.files():
1966 1966 if path == '.' or path in repo.store:
1967 1967 break
1968 1968 else:
1969 1969 return []
1970 1970
1971 1971 if slowpath:
1972 1972 # We have to read the changelog to match filenames against
1973 1973 # changed files
1974 1974
1975 1975 if follow:
1976 1976 raise error.Abort(_('can only follow copies/renames for explicit '
1977 1977 'filenames'))
1978 1978
1979 1979 # The slow path checks files modified in every changeset.
1980 1980 # This is really slow on large repos, so compute the set lazily.
1981 1981 class lazywantedset(object):
1982 1982 def __init__(self):
1983 1983 self.set = set()
1984 1984 self.revs = set(revs)
1985 1985
1986 1986 # No need to worry about locality here because it will be accessed
1987 1987 # in the same order as the increasing window below.
1988 1988 def __contains__(self, value):
1989 1989 if value in self.set:
1990 1990 return True
1991 1991 elif not value in self.revs:
1992 1992 return False
1993 1993 else:
1994 1994 self.revs.discard(value)
1995 1995 ctx = change(value)
1996 1996 if allfiles:
1997 1997 matches = list(ctx.manifest().walk(match))
1998 1998 else:
1999 1999 matches = [f for f in ctx.files() if match(f)]
2000 2000 if matches:
2001 2001 fncache[value] = matches
2002 2002 self.set.add(value)
2003 2003 return True
2004 2004 return False
2005 2005
2006 2006 def discard(self, value):
2007 2007 self.revs.discard(value)
2008 2008 self.set.discard(value)
2009 2009
2010 2010 wanted = lazywantedset()
2011 2011
2012 2012 # it might be worthwhile to do this in the iterator if the rev range
2013 2013 # is descending and the prune args are all within that range
2014 2014 for rev in opts.get('prune', ()):
2015 2015 rev = repo[rev].rev()
2016 2016 ff = _followfilter(repo)
2017 2017 stop = min(revs[0], revs[-1])
2018 2018 for x in pycompat.xrange(rev, stop - 1, -1):
2019 2019 if ff.match(x):
2020 2020 wanted = wanted - [x]
2021 2021
2022 2022 # Now that wanted is correctly initialized, we can iterate over the
2023 2023 # revision range, yielding only revisions in wanted.
2024 2024 def iterate():
2025 2025 if follow and match.always():
2026 2026 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
2027 2027 def want(rev):
2028 2028 return ff.match(rev) and rev in wanted
2029 2029 else:
2030 2030 def want(rev):
2031 2031 return rev in wanted
2032 2032
2033 2033 it = iter(revs)
2034 2034 stopiteration = False
2035 2035 for windowsize in increasingwindows():
2036 2036 nrevs = []
2037 2037 for i in pycompat.xrange(windowsize):
2038 2038 rev = next(it, None)
2039 2039 if rev is None:
2040 2040 stopiteration = True
2041 2041 break
2042 2042 elif want(rev):
2043 2043 nrevs.append(rev)
2044 2044 for rev in sorted(nrevs):
2045 2045 fns = fncache.get(rev)
2046 2046 ctx = change(rev)
2047 2047 if not fns:
2048 2048 def fns_generator():
2049 2049 if allfiles:
2050 2050 fiter = iter(ctx)
2051 2051 else:
2052 2052 fiter = ctx.files()
2053 2053 for f in fiter:
2054 2054 if match(f):
2055 2055 yield f
2056 2056 fns = fns_generator()
2057 2057 prepare(ctx, fns)
2058 2058 for rev in nrevs:
2059 2059 yield change(rev)
2060 2060
2061 2061 if stopiteration:
2062 2062 break
2063 2063
2064 2064 return iterate()
2065 2065
2066 2066 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2067 2067 bad = []
2068 2068
2069 2069 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2070 2070 names = []
2071 2071 wctx = repo[None]
2072 2072 cca = None
2073 2073 abort, warn = scmutil.checkportabilityalert(ui)
2074 2074 if abort or warn:
2075 2075 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2076 2076
2077 2077 match = repo.narrowmatch(match, includeexact=True)
2078 2078 badmatch = matchmod.badmatch(match, badfn)
2079 2079 dirstate = repo.dirstate
2080 2080 # We don't want to just call wctx.walk here, since it would return a lot of
2081 2081 # clean files, which we aren't interested in and takes time.
2082 2082 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2083 2083 unknown=True, ignored=False, full=False)):
2084 2084 exact = match.exact(f)
2085 2085 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2086 2086 if cca:
2087 2087 cca(f)
2088 2088 names.append(f)
2089 2089 if ui.verbose or not exact:
2090 2090 ui.status(_('adding %s\n') % uipathfn(f),
2091 2091 label='ui.addremove.added')
2092 2092
2093 2093 for subpath in sorted(wctx.substate):
2094 2094 sub = wctx.sub(subpath)
2095 2095 try:
2096 2096 submatch = matchmod.subdirmatcher(subpath, match)
2097 2097 subprefix = repo.wvfs.reljoin(prefix, subpath)
2098 2098 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2099 2099 if opts.get(r'subrepos'):
2100 2100 bad.extend(sub.add(ui, submatch, subprefix, subuipathfn, False,
2101 2101 **opts))
2102 2102 else:
2103 2103 bad.extend(sub.add(ui, submatch, subprefix, subuipathfn, True,
2104 2104 **opts))
2105 2105 except error.LookupError:
2106 2106 ui.status(_("skipping missing subrepository: %s\n")
2107 2107 % uipathfn(subpath))
2108 2108
2109 2109 if not opts.get(r'dry_run'):
2110 2110 rejected = wctx.add(names, prefix)
2111 2111 bad.extend(f for f in rejected if f in match.files())
2112 2112 return bad
2113 2113
2114 2114 def addwebdirpath(repo, serverpath, webconf):
2115 2115 webconf[serverpath] = repo.root
2116 2116 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2117 2117
2118 2118 for r in repo.revs('filelog("path:.hgsub")'):
2119 2119 ctx = repo[r]
2120 2120 for subpath in ctx.substate:
2121 2121 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2122 2122
2123 2123 def forget(ui, repo, match, prefix, uipathfn, explicitonly, dryrun,
2124 2124 interactive):
2125 2125 if dryrun and interactive:
2126 2126 raise error.Abort(_("cannot specify both --dry-run and --interactive"))
2127 2127 bad = []
2128 2128 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2129 2129 wctx = repo[None]
2130 2130 forgot = []
2131 2131
2132 2132 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2133 2133 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2134 2134 if explicitonly:
2135 2135 forget = [f for f in forget if match.exact(f)]
2136 2136
2137 2137 for subpath in sorted(wctx.substate):
2138 2138 sub = wctx.sub(subpath)
2139 2139 submatch = matchmod.subdirmatcher(subpath, match)
2140 2140 subprefix = repo.wvfs.reljoin(prefix, subpath)
2141 2141 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2142 2142 try:
2143 2143 subbad, subforgot = sub.forget(submatch, subprefix, subuipathfn,
2144 2144 dryrun=dryrun,
2145 2145 interactive=interactive)
2146 2146 bad.extend([subpath + '/' + f for f in subbad])
2147 2147 forgot.extend([subpath + '/' + f for f in subforgot])
2148 2148 except error.LookupError:
2149 2149 ui.status(_("skipping missing subrepository: %s\n")
2150 2150 % uipathfn(subpath))
2151 2151
2152 2152 if not explicitonly:
2153 2153 for f in match.files():
2154 2154 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2155 2155 if f not in forgot:
2156 2156 if repo.wvfs.exists(f):
2157 2157 # Don't complain if the exact case match wasn't given.
2158 2158 # But don't do this until after checking 'forgot', so
2159 2159 # that subrepo files aren't normalized, and this op is
2160 2160 # purely from data cached by the status walk above.
2161 2161 if repo.dirstate.normalize(f) in repo.dirstate:
2162 2162 continue
2163 2163 ui.warn(_('not removing %s: '
2164 2164 'file is already untracked\n')
2165 2165 % uipathfn(f))
2166 2166 bad.append(f)
2167 2167
2168 2168 if interactive:
2169 2169 responses = _('[Ynsa?]'
2170 2170 '$$ &Yes, forget this file'
2171 2171 '$$ &No, skip this file'
2172 2172 '$$ &Skip remaining files'
2173 2173 '$$ Include &all remaining files'
2174 2174 '$$ &? (display help)')
2175 2175 for filename in forget[:]:
2176 2176 r = ui.promptchoice(_('forget %s %s') %
2177 2177 (uipathfn(filename), responses))
2178 2178 if r == 4: # ?
2179 2179 while r == 4:
2180 2180 for c, t in ui.extractchoices(responses)[1]:
2181 2181 ui.write('%s - %s\n' % (c, encoding.lower(t)))
2182 2182 r = ui.promptchoice(_('forget %s %s') %
2183 2183 (uipathfn(filename), responses))
2184 2184 if r == 0: # yes
2185 2185 continue
2186 2186 elif r == 1: # no
2187 2187 forget.remove(filename)
2188 2188 elif r == 2: # Skip
2189 2189 fnindex = forget.index(filename)
2190 2190 del forget[fnindex:]
2191 2191 break
2192 2192 elif r == 3: # All
2193 2193 break
2194 2194
2195 2195 for f in forget:
2196 2196 if ui.verbose or not match.exact(f) or interactive:
2197 2197 ui.status(_('removing %s\n') % uipathfn(f),
2198 2198 label='ui.addremove.removed')
2199 2199
2200 2200 if not dryrun:
2201 2201 rejected = wctx.forget(forget, prefix)
2202 2202 bad.extend(f for f in rejected if f in match.files())
2203 2203 forgot.extend(f for f in forget if f not in rejected)
2204 2204 return bad, forgot
2205 2205
2206 2206 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2207 2207 ret = 1
2208 2208
2209 2209 needsfctx = ui.verbose or {'size', 'flags'} & fm.datahint()
2210 2210 for f in ctx.matches(m):
2211 2211 fm.startitem()
2212 2212 fm.context(ctx=ctx)
2213 2213 if needsfctx:
2214 2214 fc = ctx[f]
2215 2215 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2216 2216 fm.data(path=f)
2217 2217 fm.plain(fmt % uipathfn(f))
2218 2218 ret = 0
2219 2219
2220 2220 for subpath in sorted(ctx.substate):
2221 2221 submatch = matchmod.subdirmatcher(subpath, m)
2222 2222 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2223 2223 if (subrepos or m.exact(subpath) or any(submatch.files())):
2224 2224 sub = ctx.sub(subpath)
2225 2225 try:
2226 2226 recurse = m.exact(subpath) or subrepos
2227 2227 if sub.printfiles(ui, submatch, subuipathfn, fm, fmt,
2228 2228 recurse) == 0:
2229 2229 ret = 0
2230 2230 except error.LookupError:
2231 2231 ui.status(_("skipping missing subrepository: %s\n")
2232 2232 % uipathfn(subpath))
2233 2233
2234 2234 return ret
2235 2235
2236 2236 def remove(ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun,
2237 2237 warnings=None):
2238 2238 ret = 0
2239 2239 s = repo.status(match=m, clean=True)
2240 2240 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2241 2241
2242 2242 wctx = repo[None]
2243 2243
2244 2244 if warnings is None:
2245 2245 warnings = []
2246 2246 warn = True
2247 2247 else:
2248 2248 warn = False
2249 2249
2250 2250 subs = sorted(wctx.substate)
2251 2251 progress = ui.makeprogress(_('searching'), total=len(subs),
2252 2252 unit=_('subrepos'))
2253 2253 for subpath in subs:
2254 2254 submatch = matchmod.subdirmatcher(subpath, m)
2255 2255 subprefix = repo.wvfs.reljoin(prefix, subpath)
2256 2256 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2257 2257 if subrepos or m.exact(subpath) or any(submatch.files()):
2258 2258 progress.increment()
2259 2259 sub = wctx.sub(subpath)
2260 2260 try:
2261 2261 if sub.removefiles(submatch, subprefix, subuipathfn, after,
2262 2262 force, subrepos, dryrun, warnings):
2263 2263 ret = 1
2264 2264 except error.LookupError:
2265 2265 warnings.append(_("skipping missing subrepository: %s\n")
2266 2266 % uipathfn(subpath))
2267 2267 progress.complete()
2268 2268
2269 2269 # warn about failure to delete explicit files/dirs
2270 2270 deleteddirs = util.dirs(deleted)
2271 2271 files = m.files()
2272 2272 progress = ui.makeprogress(_('deleting'), total=len(files),
2273 2273 unit=_('files'))
2274 2274 for f in files:
2275 2275 def insubrepo():
2276 2276 for subpath in wctx.substate:
2277 2277 if f.startswith(subpath + '/'):
2278 2278 return True
2279 2279 return False
2280 2280
2281 2281 progress.increment()
2282 2282 isdir = f in deleteddirs or wctx.hasdir(f)
2283 2283 if (f in repo.dirstate or isdir or f == '.'
2284 2284 or insubrepo() or f in subs):
2285 2285 continue
2286 2286
2287 2287 if repo.wvfs.exists(f):
2288 2288 if repo.wvfs.isdir(f):
2289 2289 warnings.append(_('not removing %s: no tracked files\n')
2290 2290 % uipathfn(f))
2291 2291 else:
2292 2292 warnings.append(_('not removing %s: file is untracked\n')
2293 2293 % uipathfn(f))
2294 2294 # missing files will generate a warning elsewhere
2295 2295 ret = 1
2296 2296 progress.complete()
2297 2297
2298 2298 if force:
2299 2299 list = modified + deleted + clean + added
2300 2300 elif after:
2301 2301 list = deleted
2302 2302 remaining = modified + added + clean
2303 2303 progress = ui.makeprogress(_('skipping'), total=len(remaining),
2304 2304 unit=_('files'))
2305 2305 for f in remaining:
2306 2306 progress.increment()
2307 2307 if ui.verbose or (f in files):
2308 2308 warnings.append(_('not removing %s: file still exists\n')
2309 2309 % uipathfn(f))
2310 2310 ret = 1
2311 2311 progress.complete()
2312 2312 else:
2313 2313 list = deleted + clean
2314 2314 progress = ui.makeprogress(_('skipping'),
2315 2315 total=(len(modified) + len(added)),
2316 2316 unit=_('files'))
2317 2317 for f in modified:
2318 2318 progress.increment()
2319 2319 warnings.append(_('not removing %s: file is modified (use -f'
2320 2320 ' to force removal)\n') % uipathfn(f))
2321 2321 ret = 1
2322 2322 for f in added:
2323 2323 progress.increment()
2324 2324 warnings.append(_("not removing %s: file has been marked for add"
2325 2325 " (use 'hg forget' to undo add)\n") % uipathfn(f))
2326 2326 ret = 1
2327 2327 progress.complete()
2328 2328
2329 2329 list = sorted(list)
2330 2330 progress = ui.makeprogress(_('deleting'), total=len(list),
2331 2331 unit=_('files'))
2332 2332 for f in list:
2333 2333 if ui.verbose or not m.exact(f):
2334 2334 progress.increment()
2335 2335 ui.status(_('removing %s\n') % uipathfn(f),
2336 2336 label='ui.addremove.removed')
2337 2337 progress.complete()
2338 2338
2339 2339 if not dryrun:
2340 2340 with repo.wlock():
2341 2341 if not after:
2342 2342 for f in list:
2343 2343 if f in added:
2344 2344 continue # we never unlink added files on remove
2345 2345 rmdir = repo.ui.configbool('experimental',
2346 2346 'removeemptydirs')
2347 2347 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2348 2348 repo[None].forget(list)
2349 2349
2350 2350 if warn:
2351 2351 for warning in warnings:
2352 2352 ui.warn(warning)
2353 2353
2354 2354 return ret
2355 2355
2356 def _catfmtneedsdata(fm):
2357 return not fm.datahint() or 'data' in fm.datahint()
2358
2356 2359 def _updatecatformatter(fm, ctx, matcher, path, decode):
2357 2360 """Hook for adding data to the formatter used by ``hg cat``.
2358 2361
2359 2362 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2360 2363 this method first."""
2361 data = ctx[path].data()
2362 if decode:
2363 data = ctx.repo().wwritedata(path, data)
2364
2365 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2366 # wasn't requested.
2367 data = b''
2368 if _catfmtneedsdata(fm):
2369 data = ctx[path].data()
2370 if decode:
2371 data = ctx.repo().wwritedata(path, data)
2364 2372 fm.startitem()
2365 2373 fm.context(ctx=ctx)
2366 2374 fm.write('data', '%s', data)
2367 2375 fm.data(path=path)
2368 2376
2369 2377 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2370 2378 err = 1
2371 2379 opts = pycompat.byteskwargs(opts)
2372 2380
2373 2381 def write(path):
2374 2382 filename = None
2375 2383 if fntemplate:
2376 2384 filename = makefilename(ctx, fntemplate,
2377 2385 pathname=os.path.join(prefix, path))
2378 2386 # attempt to create the directory if it does not already exist
2379 2387 try:
2380 2388 os.makedirs(os.path.dirname(filename))
2381 2389 except OSError:
2382 2390 pass
2383 2391 with formatter.maybereopen(basefm, filename) as fm:
2384 2392 _updatecatformatter(fm, ctx, matcher, path, opts.get('decode'))
2385 2393
2386 2394 # Automation often uses hg cat on single files, so special case it
2387 2395 # for performance to avoid the cost of parsing the manifest.
2388 2396 if len(matcher.files()) == 1 and not matcher.anypats():
2389 2397 file = matcher.files()[0]
2390 2398 mfl = repo.manifestlog
2391 2399 mfnode = ctx.manifestnode()
2392 2400 try:
2393 2401 if mfnode and mfl[mfnode].find(file)[0]:
2394 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2402 if _catfmtneedsdata(basefm):
2403 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2395 2404 write(file)
2396 2405 return 0
2397 2406 except KeyError:
2398 2407 pass
2399 2408
2400 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2409 if _catfmtneedsdata(basefm):
2410 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2401 2411
2402 2412 for abs in ctx.walk(matcher):
2403 2413 write(abs)
2404 2414 err = 0
2405 2415
2406 2416 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2407 2417 for subpath in sorted(ctx.substate):
2408 2418 sub = ctx.sub(subpath)
2409 2419 try:
2410 2420 submatch = matchmod.subdirmatcher(subpath, matcher)
2411 2421 subprefix = os.path.join(prefix, subpath)
2412 2422 if not sub.cat(submatch, basefm, fntemplate, subprefix,
2413 2423 **pycompat.strkwargs(opts)):
2414 2424 err = 0
2415 2425 except error.RepoLookupError:
2416 2426 ui.status(_("skipping missing subrepository: %s\n") %
2417 2427 uipathfn(subpath))
2418 2428
2419 2429 return err
2420 2430
2421 2431 def commit(ui, repo, commitfunc, pats, opts):
2422 2432 '''commit the specified files or all outstanding changes'''
2423 2433 date = opts.get('date')
2424 2434 if date:
2425 2435 opts['date'] = dateutil.parsedate(date)
2426 2436 message = logmessage(ui, opts)
2427 2437 matcher = scmutil.match(repo[None], pats, opts)
2428 2438
2429 2439 dsguard = None
2430 2440 # extract addremove carefully -- this function can be called from a command
2431 2441 # that doesn't support addremove
2432 2442 if opts.get('addremove'):
2433 2443 dsguard = dirstateguard.dirstateguard(repo, 'commit')
2434 2444 with dsguard or util.nullcontextmanager():
2435 2445 if dsguard:
2436 2446 relative = scmutil.anypats(pats, opts)
2437 2447 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2438 2448 if scmutil.addremove(repo, matcher, "", uipathfn, opts) != 0:
2439 2449 raise error.Abort(
2440 2450 _("failed to mark all new/missing files as added/removed"))
2441 2451
2442 2452 return commitfunc(ui, repo, message, matcher, opts)
2443 2453
2444 2454 def samefile(f, ctx1, ctx2):
2445 2455 if f in ctx1.manifest():
2446 2456 a = ctx1.filectx(f)
2447 2457 if f in ctx2.manifest():
2448 2458 b = ctx2.filectx(f)
2449 2459 return (not a.cmp(b)
2450 2460 and a.flags() == b.flags())
2451 2461 else:
2452 2462 return False
2453 2463 else:
2454 2464 return f not in ctx2.manifest()
2455 2465
2456 2466 def amend(ui, repo, old, extra, pats, opts):
2457 2467 # avoid cycle context -> subrepo -> cmdutil
2458 2468 from . import context
2459 2469
2460 2470 # amend will reuse the existing user if not specified, but the obsolete
2461 2471 # marker creation requires that the current user's name is specified.
2462 2472 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2463 2473 ui.username() # raise exception if username not set
2464 2474
2465 2475 ui.note(_('amending changeset %s\n') % old)
2466 2476 base = old.p1()
2467 2477
2468 2478 with repo.wlock(), repo.lock(), repo.transaction('amend'):
2469 2479 # Participating changesets:
2470 2480 #
2471 2481 # wctx o - workingctx that contains changes from working copy
2472 2482 # | to go into amending commit
2473 2483 # |
2474 2484 # old o - changeset to amend
2475 2485 # |
2476 2486 # base o - first parent of the changeset to amend
2477 2487 wctx = repo[None]
2478 2488
2479 2489 # Copy to avoid mutating input
2480 2490 extra = extra.copy()
2481 2491 # Update extra dict from amended commit (e.g. to preserve graft
2482 2492 # source)
2483 2493 extra.update(old.extra())
2484 2494
2485 2495 # Also update it from the from the wctx
2486 2496 extra.update(wctx.extra())
2487 2497
2488 2498 user = opts.get('user') or old.user()
2489 2499
2490 2500 datemaydiffer = False # date-only change should be ignored?
2491 2501 if opts.get('date') and opts.get('currentdate'):
2492 2502 raise error.Abort(_('--date and --currentdate are mutually '
2493 2503 'exclusive'))
2494 2504 if opts.get('date'):
2495 2505 date = dateutil.parsedate(opts.get('date'))
2496 2506 elif opts.get('currentdate'):
2497 2507 date = dateutil.makedate()
2498 2508 elif (ui.configbool('rewrite', 'update-timestamp')
2499 2509 and opts.get('currentdate') is None):
2500 2510 date = dateutil.makedate()
2501 2511 datemaydiffer = True
2502 2512 else:
2503 2513 date = old.date()
2504 2514
2505 2515 if len(old.parents()) > 1:
2506 2516 # ctx.files() isn't reliable for merges, so fall back to the
2507 2517 # slower repo.status() method
2508 2518 files = {fn for st in base.status(old)[:3] for fn in st}
2509 2519 else:
2510 2520 files = set(old.files())
2511 2521
2512 2522 # add/remove the files to the working copy if the "addremove" option
2513 2523 # was specified.
2514 2524 matcher = scmutil.match(wctx, pats, opts)
2515 2525 relative = scmutil.anypats(pats, opts)
2516 2526 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2517 2527 if (opts.get('addremove')
2518 2528 and scmutil.addremove(repo, matcher, "", uipathfn, opts)):
2519 2529 raise error.Abort(
2520 2530 _("failed to mark all new/missing files as added/removed"))
2521 2531
2522 2532 # Check subrepos. This depends on in-place wctx._status update in
2523 2533 # subrepo.precommit(). To minimize the risk of this hack, we do
2524 2534 # nothing if .hgsub does not exist.
2525 2535 if '.hgsub' in wctx or '.hgsub' in old:
2526 2536 subs, commitsubs, newsubstate = subrepoutil.precommit(
2527 2537 ui, wctx, wctx._status, matcher)
2528 2538 # amend should abort if commitsubrepos is enabled
2529 2539 assert not commitsubs
2530 2540 if subs:
2531 2541 subrepoutil.writestate(repo, newsubstate)
2532 2542
2533 2543 ms = mergemod.mergestate.read(repo)
2534 2544 mergeutil.checkunresolved(ms)
2535 2545
2536 2546 filestoamend = set(f for f in wctx.files() if matcher(f))
2537 2547
2538 2548 changes = (len(filestoamend) > 0)
2539 2549 if changes:
2540 2550 # Recompute copies (avoid recording a -> b -> a)
2541 2551 copied = copies.pathcopies(base, wctx, matcher)
2542 2552 if old.p2:
2543 2553 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2544 2554
2545 2555 # Prune files which were reverted by the updates: if old
2546 2556 # introduced file X and the file was renamed in the working
2547 2557 # copy, then those two files are the same and
2548 2558 # we can discard X from our list of files. Likewise if X
2549 2559 # was removed, it's no longer relevant. If X is missing (aka
2550 2560 # deleted), old X must be preserved.
2551 2561 files.update(filestoamend)
2552 2562 files = [f for f in files if (not samefile(f, wctx, base)
2553 2563 or f in wctx.deleted())]
2554 2564
2555 2565 def filectxfn(repo, ctx_, path):
2556 2566 try:
2557 2567 # If the file being considered is not amongst the files
2558 2568 # to be amended, we should return the file context from the
2559 2569 # old changeset. This avoids issues when only some files in
2560 2570 # the working copy are being amended but there are also
2561 2571 # changes to other files from the old changeset.
2562 2572 if path not in filestoamend:
2563 2573 return old.filectx(path)
2564 2574
2565 2575 # Return None for removed files.
2566 2576 if path in wctx.removed():
2567 2577 return None
2568 2578
2569 2579 fctx = wctx[path]
2570 2580 flags = fctx.flags()
2571 2581 mctx = context.memfilectx(repo, ctx_,
2572 2582 fctx.path(), fctx.data(),
2573 2583 islink='l' in flags,
2574 2584 isexec='x' in flags,
2575 2585 copysource=copied.get(path))
2576 2586 return mctx
2577 2587 except KeyError:
2578 2588 return None
2579 2589 else:
2580 2590 ui.note(_('copying changeset %s to %s\n') % (old, base))
2581 2591
2582 2592 # Use version of files as in the old cset
2583 2593 def filectxfn(repo, ctx_, path):
2584 2594 try:
2585 2595 return old.filectx(path)
2586 2596 except KeyError:
2587 2597 return None
2588 2598
2589 2599 # See if we got a message from -m or -l, if not, open the editor with
2590 2600 # the message of the changeset to amend.
2591 2601 message = logmessage(ui, opts)
2592 2602
2593 2603 editform = mergeeditform(old, 'commit.amend')
2594 2604
2595 2605 if not message:
2596 2606 message = old.description()
2597 2607 # Default if message isn't provided and --edit is not passed is to
2598 2608 # invoke editor, but allow --no-edit. If somehow we don't have any
2599 2609 # description, let's always start the editor.
2600 2610 doedit = not message or opts.get('edit') in [True, None]
2601 2611 else:
2602 2612 # Default if message is provided is to not invoke editor, but allow
2603 2613 # --edit.
2604 2614 doedit = opts.get('edit') is True
2605 2615 editor = getcommiteditor(edit=doedit, editform=editform)
2606 2616
2607 2617 pureextra = extra.copy()
2608 2618 extra['amend_source'] = old.hex()
2609 2619
2610 2620 new = context.memctx(repo,
2611 2621 parents=[base.node(), old.p2().node()],
2612 2622 text=message,
2613 2623 files=files,
2614 2624 filectxfn=filectxfn,
2615 2625 user=user,
2616 2626 date=date,
2617 2627 extra=extra,
2618 2628 editor=editor)
2619 2629
2620 2630 newdesc = changelog.stripdesc(new.description())
2621 2631 if ((not changes)
2622 2632 and newdesc == old.description()
2623 2633 and user == old.user()
2624 2634 and (date == old.date() or datemaydiffer)
2625 2635 and pureextra == old.extra()):
2626 2636 # nothing changed. continuing here would create a new node
2627 2637 # anyway because of the amend_source noise.
2628 2638 #
2629 2639 # This not what we expect from amend.
2630 2640 return old.node()
2631 2641
2632 2642 commitphase = None
2633 2643 if opts.get('secret'):
2634 2644 commitphase = phases.secret
2635 2645 newid = repo.commitctx(new)
2636 2646
2637 2647 # Reroute the working copy parent to the new changeset
2638 2648 repo.setparents(newid, nullid)
2639 2649 mapping = {old.node(): (newid,)}
2640 2650 obsmetadata = None
2641 2651 if opts.get('note'):
2642 2652 obsmetadata = {'note': encoding.fromlocal(opts['note'])}
2643 2653 backup = ui.configbool('rewrite', 'backup-bundle')
2644 2654 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata,
2645 2655 fixphase=True, targetphase=commitphase,
2646 2656 backup=backup)
2647 2657
2648 2658 # Fixing the dirstate because localrepo.commitctx does not update
2649 2659 # it. This is rather convenient because we did not need to update
2650 2660 # the dirstate for all the files in the new commit which commitctx
2651 2661 # could have done if it updated the dirstate. Now, we can
2652 2662 # selectively update the dirstate only for the amended files.
2653 2663 dirstate = repo.dirstate
2654 2664
2655 2665 # Update the state of the files which were added and
2656 2666 # and modified in the amend to "normal" in the dirstate.
2657 2667 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
2658 2668 for f in normalfiles:
2659 2669 dirstate.normal(f)
2660 2670
2661 2671 # Update the state of files which were removed in the amend
2662 2672 # to "removed" in the dirstate.
2663 2673 removedfiles = set(wctx.removed()) & filestoamend
2664 2674 for f in removedfiles:
2665 2675 dirstate.drop(f)
2666 2676
2667 2677 return newid
2668 2678
2669 2679 def commiteditor(repo, ctx, subs, editform=''):
2670 2680 if ctx.description():
2671 2681 return ctx.description()
2672 2682 return commitforceeditor(repo, ctx, subs, editform=editform,
2673 2683 unchangedmessagedetection=True)
2674 2684
2675 2685 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2676 2686 editform='', unchangedmessagedetection=False):
2677 2687 if not extramsg:
2678 2688 extramsg = _("Leave message empty to abort commit.")
2679 2689
2680 2690 forms = [e for e in editform.split('.') if e]
2681 2691 forms.insert(0, 'changeset')
2682 2692 templatetext = None
2683 2693 while forms:
2684 2694 ref = '.'.join(forms)
2685 2695 if repo.ui.config('committemplate', ref):
2686 2696 templatetext = committext = buildcommittemplate(
2687 2697 repo, ctx, subs, extramsg, ref)
2688 2698 break
2689 2699 forms.pop()
2690 2700 else:
2691 2701 committext = buildcommittext(repo, ctx, subs, extramsg)
2692 2702
2693 2703 # run editor in the repository root
2694 2704 olddir = encoding.getcwd()
2695 2705 os.chdir(repo.root)
2696 2706
2697 2707 # make in-memory changes visible to external process
2698 2708 tr = repo.currenttransaction()
2699 2709 repo.dirstate.write(tr)
2700 2710 pending = tr and tr.writepending() and repo.root
2701 2711
2702 2712 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2703 2713 editform=editform, pending=pending,
2704 2714 repopath=repo.path, action='commit')
2705 2715 text = editortext
2706 2716
2707 2717 # strip away anything below this special string (used for editors that want
2708 2718 # to display the diff)
2709 2719 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
2710 2720 if stripbelow:
2711 2721 text = text[:stripbelow.start()]
2712 2722
2713 2723 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2714 2724 os.chdir(olddir)
2715 2725
2716 2726 if finishdesc:
2717 2727 text = finishdesc(text)
2718 2728 if not text.strip():
2719 2729 raise error.Abort(_("empty commit message"))
2720 2730 if unchangedmessagedetection and editortext == templatetext:
2721 2731 raise error.Abort(_("commit message unchanged"))
2722 2732
2723 2733 return text
2724 2734
2725 2735 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
2726 2736 ui = repo.ui
2727 2737 spec = formatter.templatespec(ref, None, None)
2728 2738 t = logcmdutil.changesettemplater(ui, repo, spec)
2729 2739 t.t.cache.update((k, templater.unquotestring(v))
2730 2740 for k, v in repo.ui.configitems('committemplate'))
2731 2741
2732 2742 if not extramsg:
2733 2743 extramsg = '' # ensure that extramsg is string
2734 2744
2735 2745 ui.pushbuffer()
2736 2746 t.show(ctx, extramsg=extramsg)
2737 2747 return ui.popbuffer()
2738 2748
2739 2749 def hgprefix(msg):
2740 2750 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2741 2751
2742 2752 def buildcommittext(repo, ctx, subs, extramsg):
2743 2753 edittext = []
2744 2754 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2745 2755 if ctx.description():
2746 2756 edittext.append(ctx.description())
2747 2757 edittext.append("")
2748 2758 edittext.append("") # Empty line between message and comments.
2749 2759 edittext.append(hgprefix(_("Enter commit message."
2750 2760 " Lines beginning with 'HG:' are removed.")))
2751 2761 edittext.append(hgprefix(extramsg))
2752 2762 edittext.append("HG: --")
2753 2763 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2754 2764 if ctx.p2():
2755 2765 edittext.append(hgprefix(_("branch merge")))
2756 2766 if ctx.branch():
2757 2767 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2758 2768 if bookmarks.isactivewdirparent(repo):
2759 2769 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2760 2770 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2761 2771 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2762 2772 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2763 2773 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2764 2774 if not added and not modified and not removed:
2765 2775 edittext.append(hgprefix(_("no files changed")))
2766 2776 edittext.append("")
2767 2777
2768 2778 return "\n".join(edittext)
2769 2779
2770 2780 def commitstatus(repo, node, branch, bheads=None, opts=None):
2771 2781 if opts is None:
2772 2782 opts = {}
2773 2783 ctx = repo[node]
2774 2784 parents = ctx.parents()
2775 2785
2776 2786 if (not opts.get('amend') and bheads and node not in bheads and not
2777 2787 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2778 2788 repo.ui.status(_('created new head\n'))
2779 2789 # The message is not printed for initial roots. For the other
2780 2790 # changesets, it is printed in the following situations:
2781 2791 #
2782 2792 # Par column: for the 2 parents with ...
2783 2793 # N: null or no parent
2784 2794 # B: parent is on another named branch
2785 2795 # C: parent is a regular non head changeset
2786 2796 # H: parent was a branch head of the current branch
2787 2797 # Msg column: whether we print "created new head" message
2788 2798 # In the following, it is assumed that there already exists some
2789 2799 # initial branch heads of the current branch, otherwise nothing is
2790 2800 # printed anyway.
2791 2801 #
2792 2802 # Par Msg Comment
2793 2803 # N N y additional topo root
2794 2804 #
2795 2805 # B N y additional branch root
2796 2806 # C N y additional topo head
2797 2807 # H N n usual case
2798 2808 #
2799 2809 # B B y weird additional branch root
2800 2810 # C B y branch merge
2801 2811 # H B n merge with named branch
2802 2812 #
2803 2813 # C C y additional head from merge
2804 2814 # C H n merge with a head
2805 2815 #
2806 2816 # H H n head merge: head count decreases
2807 2817
2808 2818 if not opts.get('close_branch'):
2809 2819 for r in parents:
2810 2820 if r.closesbranch() and r.branch() == branch:
2811 2821 repo.ui.status(_('reopening closed branch head %d\n') % r.rev())
2812 2822
2813 2823 if repo.ui.debugflag:
2814 2824 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx.hex()))
2815 2825 elif repo.ui.verbose:
2816 2826 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx))
2817 2827
2818 2828 def postcommitstatus(repo, pats, opts):
2819 2829 return repo.status(match=scmutil.match(repo[None], pats, opts))
2820 2830
2821 2831 def revert(ui, repo, ctx, parents, *pats, **opts):
2822 2832 opts = pycompat.byteskwargs(opts)
2823 2833 parent, p2 = parents
2824 2834 node = ctx.node()
2825 2835
2826 2836 mf = ctx.manifest()
2827 2837 if node == p2:
2828 2838 parent = p2
2829 2839
2830 2840 # need all matching names in dirstate and manifest of target rev,
2831 2841 # so have to walk both. do not print errors if files exist in one
2832 2842 # but not other. in both cases, filesets should be evaluated against
2833 2843 # workingctx to get consistent result (issue4497). this means 'set:**'
2834 2844 # cannot be used to select missing files from target rev.
2835 2845
2836 2846 # `names` is a mapping for all elements in working copy and target revision
2837 2847 # The mapping is in the form:
2838 2848 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2839 2849 names = {}
2840 2850 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2841 2851
2842 2852 with repo.wlock():
2843 2853 ## filling of the `names` mapping
2844 2854 # walk dirstate to fill `names`
2845 2855
2846 2856 interactive = opts.get('interactive', False)
2847 2857 wctx = repo[None]
2848 2858 m = scmutil.match(wctx, pats, opts)
2849 2859
2850 2860 # we'll need this later
2851 2861 targetsubs = sorted(s for s in wctx.substate if m(s))
2852 2862
2853 2863 if not m.always():
2854 2864 matcher = matchmod.badmatch(m, lambda x, y: False)
2855 2865 for abs in wctx.walk(matcher):
2856 2866 names[abs] = m.exact(abs)
2857 2867
2858 2868 # walk target manifest to fill `names`
2859 2869
2860 2870 def badfn(path, msg):
2861 2871 if path in names:
2862 2872 return
2863 2873 if path in ctx.substate:
2864 2874 return
2865 2875 path_ = path + '/'
2866 2876 for f in names:
2867 2877 if f.startswith(path_):
2868 2878 return
2869 2879 ui.warn("%s: %s\n" % (uipathfn(path), msg))
2870 2880
2871 2881 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2872 2882 if abs not in names:
2873 2883 names[abs] = m.exact(abs)
2874 2884
2875 2885 # Find status of all file in `names`.
2876 2886 m = scmutil.matchfiles(repo, names)
2877 2887
2878 2888 changes = repo.status(node1=node, match=m,
2879 2889 unknown=True, ignored=True, clean=True)
2880 2890 else:
2881 2891 changes = repo.status(node1=node, match=m)
2882 2892 for kind in changes:
2883 2893 for abs in kind:
2884 2894 names[abs] = m.exact(abs)
2885 2895
2886 2896 m = scmutil.matchfiles(repo, names)
2887 2897
2888 2898 modified = set(changes.modified)
2889 2899 added = set(changes.added)
2890 2900 removed = set(changes.removed)
2891 2901 _deleted = set(changes.deleted)
2892 2902 unknown = set(changes.unknown)
2893 2903 unknown.update(changes.ignored)
2894 2904 clean = set(changes.clean)
2895 2905 modadded = set()
2896 2906
2897 2907 # We need to account for the state of the file in the dirstate,
2898 2908 # even when we revert against something else than parent. This will
2899 2909 # slightly alter the behavior of revert (doing back up or not, delete
2900 2910 # or just forget etc).
2901 2911 if parent == node:
2902 2912 dsmodified = modified
2903 2913 dsadded = added
2904 2914 dsremoved = removed
2905 2915 # store all local modifications, useful later for rename detection
2906 2916 localchanges = dsmodified | dsadded
2907 2917 modified, added, removed = set(), set(), set()
2908 2918 else:
2909 2919 changes = repo.status(node1=parent, match=m)
2910 2920 dsmodified = set(changes.modified)
2911 2921 dsadded = set(changes.added)
2912 2922 dsremoved = set(changes.removed)
2913 2923 # store all local modifications, useful later for rename detection
2914 2924 localchanges = dsmodified | dsadded
2915 2925
2916 2926 # only take into account for removes between wc and target
2917 2927 clean |= dsremoved - removed
2918 2928 dsremoved &= removed
2919 2929 # distinct between dirstate remove and other
2920 2930 removed -= dsremoved
2921 2931
2922 2932 modadded = added & dsmodified
2923 2933 added -= modadded
2924 2934
2925 2935 # tell newly modified apart.
2926 2936 dsmodified &= modified
2927 2937 dsmodified |= modified & dsadded # dirstate added may need backup
2928 2938 modified -= dsmodified
2929 2939
2930 2940 # We need to wait for some post-processing to update this set
2931 2941 # before making the distinction. The dirstate will be used for
2932 2942 # that purpose.
2933 2943 dsadded = added
2934 2944
2935 2945 # in case of merge, files that are actually added can be reported as
2936 2946 # modified, we need to post process the result
2937 2947 if p2 != nullid:
2938 2948 mergeadd = set(dsmodified)
2939 2949 for path in dsmodified:
2940 2950 if path in mf:
2941 2951 mergeadd.remove(path)
2942 2952 dsadded |= mergeadd
2943 2953 dsmodified -= mergeadd
2944 2954
2945 2955 # if f is a rename, update `names` to also revert the source
2946 2956 for f in localchanges:
2947 2957 src = repo.dirstate.copied(f)
2948 2958 # XXX should we check for rename down to target node?
2949 2959 if src and src not in names and repo.dirstate[src] == 'r':
2950 2960 dsremoved.add(src)
2951 2961 names[src] = True
2952 2962
2953 2963 # determine the exact nature of the deleted changesets
2954 2964 deladded = set(_deleted)
2955 2965 for path in _deleted:
2956 2966 if path in mf:
2957 2967 deladded.remove(path)
2958 2968 deleted = _deleted - deladded
2959 2969
2960 2970 # distinguish between file to forget and the other
2961 2971 added = set()
2962 2972 for abs in dsadded:
2963 2973 if repo.dirstate[abs] != 'a':
2964 2974 added.add(abs)
2965 2975 dsadded -= added
2966 2976
2967 2977 for abs in deladded:
2968 2978 if repo.dirstate[abs] == 'a':
2969 2979 dsadded.add(abs)
2970 2980 deladded -= dsadded
2971 2981
2972 2982 # For files marked as removed, we check if an unknown file is present at
2973 2983 # the same path. If a such file exists it may need to be backed up.
2974 2984 # Making the distinction at this stage helps have simpler backup
2975 2985 # logic.
2976 2986 removunk = set()
2977 2987 for abs in removed:
2978 2988 target = repo.wjoin(abs)
2979 2989 if os.path.lexists(target):
2980 2990 removunk.add(abs)
2981 2991 removed -= removunk
2982 2992
2983 2993 dsremovunk = set()
2984 2994 for abs in dsremoved:
2985 2995 target = repo.wjoin(abs)
2986 2996 if os.path.lexists(target):
2987 2997 dsremovunk.add(abs)
2988 2998 dsremoved -= dsremovunk
2989 2999
2990 3000 # action to be actually performed by revert
2991 3001 # (<list of file>, message>) tuple
2992 3002 actions = {'revert': ([], _('reverting %s\n')),
2993 3003 'add': ([], _('adding %s\n')),
2994 3004 'remove': ([], _('removing %s\n')),
2995 3005 'drop': ([], _('removing %s\n')),
2996 3006 'forget': ([], _('forgetting %s\n')),
2997 3007 'undelete': ([], _('undeleting %s\n')),
2998 3008 'noop': (None, _('no changes needed to %s\n')),
2999 3009 'unknown': (None, _('file not managed: %s\n')),
3000 3010 }
3001 3011
3002 3012 # "constant" that convey the backup strategy.
3003 3013 # All set to `discard` if `no-backup` is set do avoid checking
3004 3014 # no_backup lower in the code.
3005 3015 # These values are ordered for comparison purposes
3006 3016 backupinteractive = 3 # do backup if interactively modified
3007 3017 backup = 2 # unconditionally do backup
3008 3018 check = 1 # check if the existing file differs from target
3009 3019 discard = 0 # never do backup
3010 3020 if opts.get('no_backup'):
3011 3021 backupinteractive = backup = check = discard
3012 3022 if interactive:
3013 3023 dsmodifiedbackup = backupinteractive
3014 3024 else:
3015 3025 dsmodifiedbackup = backup
3016 3026 tobackup = set()
3017 3027
3018 3028 backupanddel = actions['remove']
3019 3029 if not opts.get('no_backup'):
3020 3030 backupanddel = actions['drop']
3021 3031
3022 3032 disptable = (
3023 3033 # dispatch table:
3024 3034 # file state
3025 3035 # action
3026 3036 # make backup
3027 3037
3028 3038 ## Sets that results that will change file on disk
3029 3039 # Modified compared to target, no local change
3030 3040 (modified, actions['revert'], discard),
3031 3041 # Modified compared to target, but local file is deleted
3032 3042 (deleted, actions['revert'], discard),
3033 3043 # Modified compared to target, local change
3034 3044 (dsmodified, actions['revert'], dsmodifiedbackup),
3035 3045 # Added since target
3036 3046 (added, actions['remove'], discard),
3037 3047 # Added in working directory
3038 3048 (dsadded, actions['forget'], discard),
3039 3049 # Added since target, have local modification
3040 3050 (modadded, backupanddel, backup),
3041 3051 # Added since target but file is missing in working directory
3042 3052 (deladded, actions['drop'], discard),
3043 3053 # Removed since target, before working copy parent
3044 3054 (removed, actions['add'], discard),
3045 3055 # Same as `removed` but an unknown file exists at the same path
3046 3056 (removunk, actions['add'], check),
3047 3057 # Removed since targe, marked as such in working copy parent
3048 3058 (dsremoved, actions['undelete'], discard),
3049 3059 # Same as `dsremoved` but an unknown file exists at the same path
3050 3060 (dsremovunk, actions['undelete'], check),
3051 3061 ## the following sets does not result in any file changes
3052 3062 # File with no modification
3053 3063 (clean, actions['noop'], discard),
3054 3064 # Existing file, not tracked anywhere
3055 3065 (unknown, actions['unknown'], discard),
3056 3066 )
3057 3067
3058 3068 for abs, exact in sorted(names.items()):
3059 3069 # target file to be touch on disk (relative to cwd)
3060 3070 target = repo.wjoin(abs)
3061 3071 # search the entry in the dispatch table.
3062 3072 # if the file is in any of these sets, it was touched in the working
3063 3073 # directory parent and we are sure it needs to be reverted.
3064 3074 for table, (xlist, msg), dobackup in disptable:
3065 3075 if abs not in table:
3066 3076 continue
3067 3077 if xlist is not None:
3068 3078 xlist.append(abs)
3069 3079 if dobackup:
3070 3080 # If in interactive mode, don't automatically create
3071 3081 # .orig files (issue4793)
3072 3082 if dobackup == backupinteractive:
3073 3083 tobackup.add(abs)
3074 3084 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3075 3085 absbakname = scmutil.backuppath(ui, repo, abs)
3076 3086 bakname = os.path.relpath(absbakname,
3077 3087 start=repo.root)
3078 3088 ui.note(_('saving current version of %s as %s\n') %
3079 3089 (uipathfn(abs), uipathfn(bakname)))
3080 3090 if not opts.get('dry_run'):
3081 3091 if interactive:
3082 3092 util.copyfile(target, absbakname)
3083 3093 else:
3084 3094 util.rename(target, absbakname)
3085 3095 if opts.get('dry_run'):
3086 3096 if ui.verbose or not exact:
3087 3097 ui.status(msg % uipathfn(abs))
3088 3098 elif exact:
3089 3099 ui.warn(msg % uipathfn(abs))
3090 3100 break
3091 3101
3092 3102 if not opts.get('dry_run'):
3093 3103 needdata = ('revert', 'add', 'undelete')
3094 3104 oplist = [actions[name][0] for name in needdata]
3095 3105 prefetch = scmutil.prefetchfiles
3096 3106 matchfiles = scmutil.matchfiles
3097 3107 prefetch(repo, [ctx.rev()],
3098 3108 matchfiles(repo,
3099 3109 [f for sublist in oplist for f in sublist]))
3100 3110 match = scmutil.match(repo[None], pats)
3101 3111 _performrevert(repo, parents, ctx, names, uipathfn, actions,
3102 3112 match, interactive, tobackup)
3103 3113
3104 3114 if targetsubs:
3105 3115 # Revert the subrepos on the revert list
3106 3116 for sub in targetsubs:
3107 3117 try:
3108 3118 wctx.sub(sub).revert(ctx.substate[sub], *pats,
3109 3119 **pycompat.strkwargs(opts))
3110 3120 except KeyError:
3111 3121 raise error.Abort("subrepository '%s' does not exist in %s!"
3112 3122 % (sub, short(ctx.node())))
3113 3123
3114 3124 def _performrevert(repo, parents, ctx, names, uipathfn, actions,
3115 3125 match, interactive=False, tobackup=None):
3116 3126 """function that actually perform all the actions computed for revert
3117 3127
3118 3128 This is an independent function to let extension to plug in and react to
3119 3129 the imminent revert.
3120 3130
3121 3131 Make sure you have the working directory locked when calling this function.
3122 3132 """
3123 3133 parent, p2 = parents
3124 3134 node = ctx.node()
3125 3135 excluded_files = []
3126 3136
3127 3137 def checkout(f):
3128 3138 fc = ctx[f]
3129 3139 repo.wwrite(f, fc.data(), fc.flags())
3130 3140
3131 3141 def doremove(f):
3132 3142 try:
3133 3143 rmdir = repo.ui.configbool('experimental', 'removeemptydirs')
3134 3144 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3135 3145 except OSError:
3136 3146 pass
3137 3147 repo.dirstate.remove(f)
3138 3148
3139 3149 def prntstatusmsg(action, f):
3140 3150 exact = names[f]
3141 3151 if repo.ui.verbose or not exact:
3142 3152 repo.ui.status(actions[action][1] % uipathfn(f))
3143 3153
3144 3154 audit_path = pathutil.pathauditor(repo.root, cached=True)
3145 3155 for f in actions['forget'][0]:
3146 3156 if interactive:
3147 3157 choice = repo.ui.promptchoice(
3148 3158 _("forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f))
3149 3159 if choice == 0:
3150 3160 prntstatusmsg('forget', f)
3151 3161 repo.dirstate.drop(f)
3152 3162 else:
3153 3163 excluded_files.append(f)
3154 3164 else:
3155 3165 prntstatusmsg('forget', f)
3156 3166 repo.dirstate.drop(f)
3157 3167 for f in actions['remove'][0]:
3158 3168 audit_path(f)
3159 3169 if interactive:
3160 3170 choice = repo.ui.promptchoice(
3161 3171 _("remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f))
3162 3172 if choice == 0:
3163 3173 prntstatusmsg('remove', f)
3164 3174 doremove(f)
3165 3175 else:
3166 3176 excluded_files.append(f)
3167 3177 else:
3168 3178 prntstatusmsg('remove', f)
3169 3179 doremove(f)
3170 3180 for f in actions['drop'][0]:
3171 3181 audit_path(f)
3172 3182 prntstatusmsg('drop', f)
3173 3183 repo.dirstate.remove(f)
3174 3184
3175 3185 normal = None
3176 3186 if node == parent:
3177 3187 # We're reverting to our parent. If possible, we'd like status
3178 3188 # to report the file as clean. We have to use normallookup for
3179 3189 # merges to avoid losing information about merged/dirty files.
3180 3190 if p2 != nullid:
3181 3191 normal = repo.dirstate.normallookup
3182 3192 else:
3183 3193 normal = repo.dirstate.normal
3184 3194
3185 3195 newlyaddedandmodifiedfiles = set()
3186 3196 if interactive:
3187 3197 # Prompt the user for changes to revert
3188 3198 torevert = [f for f in actions['revert'][0] if f not in excluded_files]
3189 3199 m = scmutil.matchfiles(repo, torevert)
3190 3200 diffopts = patch.difffeatureopts(repo.ui, whitespace=True,
3191 3201 section='commands',
3192 3202 configprefix='revert.interactive.')
3193 3203 diffopts.nodates = True
3194 3204 diffopts.git = True
3195 3205 operation = 'apply'
3196 3206 if node == parent:
3197 3207 if repo.ui.configbool('experimental',
3198 3208 'revert.interactive.select-to-keep'):
3199 3209 operation = 'keep'
3200 3210 else:
3201 3211 operation = 'discard'
3202 3212
3203 3213 if operation == 'apply':
3204 3214 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3205 3215 else:
3206 3216 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3207 3217 originalchunks = patch.parsepatch(diff)
3208 3218
3209 3219 try:
3210 3220
3211 3221 chunks, opts = recordfilter(repo.ui, originalchunks, match,
3212 3222 operation=operation)
3213 3223 if operation == 'discard':
3214 3224 chunks = patch.reversehunks(chunks)
3215 3225
3216 3226 except error.PatchError as err:
3217 3227 raise error.Abort(_('error parsing patch: %s') % err)
3218 3228
3219 3229 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3220 3230 if tobackup is None:
3221 3231 tobackup = set()
3222 3232 # Apply changes
3223 3233 fp = stringio()
3224 3234 # chunks are serialized per file, but files aren't sorted
3225 3235 for f in sorted(set(c.header.filename() for c in chunks if ishunk(c))):
3226 3236 prntstatusmsg('revert', f)
3227 3237 files = set()
3228 3238 for c in chunks:
3229 3239 if ishunk(c):
3230 3240 abs = c.header.filename()
3231 3241 # Create a backup file only if this hunk should be backed up
3232 3242 if c.header.filename() in tobackup:
3233 3243 target = repo.wjoin(abs)
3234 3244 bakname = scmutil.backuppath(repo.ui, repo, abs)
3235 3245 util.copyfile(target, bakname)
3236 3246 tobackup.remove(abs)
3237 3247 if abs not in files:
3238 3248 files.add(abs)
3239 3249 if operation == 'keep':
3240 3250 checkout(abs)
3241 3251 c.write(fp)
3242 3252 dopatch = fp.tell()
3243 3253 fp.seek(0)
3244 3254 if dopatch:
3245 3255 try:
3246 3256 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3247 3257 except error.PatchError as err:
3248 3258 raise error.Abort(pycompat.bytestr(err))
3249 3259 del fp
3250 3260 else:
3251 3261 for f in actions['revert'][0]:
3252 3262 prntstatusmsg('revert', f)
3253 3263 checkout(f)
3254 3264 if normal:
3255 3265 normal(f)
3256 3266
3257 3267 for f in actions['add'][0]:
3258 3268 # Don't checkout modified files, they are already created by the diff
3259 3269 if f not in newlyaddedandmodifiedfiles:
3260 3270 prntstatusmsg('add', f)
3261 3271 checkout(f)
3262 3272 repo.dirstate.add(f)
3263 3273
3264 3274 normal = repo.dirstate.normallookup
3265 3275 if node == parent and p2 == nullid:
3266 3276 normal = repo.dirstate.normal
3267 3277 for f in actions['undelete'][0]:
3268 3278 if interactive:
3269 3279 choice = repo.ui.promptchoice(
3270 3280 _("add back removed file %s (Yn)?$$ &Yes $$ &No") % f)
3271 3281 if choice == 0:
3272 3282 prntstatusmsg('undelete', f)
3273 3283 checkout(f)
3274 3284 normal(f)
3275 3285 else:
3276 3286 excluded_files.append(f)
3277 3287 else:
3278 3288 prntstatusmsg('undelete', f)
3279 3289 checkout(f)
3280 3290 normal(f)
3281 3291
3282 3292 copied = copies.pathcopies(repo[parent], ctx)
3283 3293
3284 3294 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3285 3295 if f in copied:
3286 3296 repo.dirstate.copy(copied[f], f)
3287 3297
3288 3298 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3289 3299 # commands.outgoing. "missing" is "missing" of the result of
3290 3300 # "findcommonoutgoing()"
3291 3301 outgoinghooks = util.hooks()
3292 3302
3293 3303 # a list of (ui, repo) functions called by commands.summary
3294 3304 summaryhooks = util.hooks()
3295 3305
3296 3306 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3297 3307 #
3298 3308 # functions should return tuple of booleans below, if 'changes' is None:
3299 3309 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3300 3310 #
3301 3311 # otherwise, 'changes' is a tuple of tuples below:
3302 3312 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3303 3313 # - (desturl, destbranch, destpeer, outgoing)
3304 3314 summaryremotehooks = util.hooks()
3305 3315
3306 3316 # A list of state files kept by multistep operations like graft.
3307 3317 # Since graft cannot be aborted, it is considered 'clearable' by update.
3308 3318 # note: bisect is intentionally excluded
3309 3319 # (state file, clearable, allowcommit, error, hint)
3310 3320 unfinishedstates = [
3311 3321 ('graftstate', True, False, _('graft in progress'),
3312 3322 _("use 'hg graft --continue' or 'hg graft --stop' to stop")),
3313 3323 ('updatestate', True, False, _('last update was interrupted'),
3314 3324 _("use 'hg update' to get a consistent checkout"))
3315 3325 ]
3316 3326
3317 3327 def checkunfinished(repo, commit=False):
3318 3328 '''Look for an unfinished multistep operation, like graft, and abort
3319 3329 if found. It's probably good to check this right before
3320 3330 bailifchanged().
3321 3331 '''
3322 3332 # Check for non-clearable states first, so things like rebase will take
3323 3333 # precedence over update.
3324 3334 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3325 3335 if clearable or (commit and allowcommit):
3326 3336 continue
3327 3337 if repo.vfs.exists(f):
3328 3338 raise error.Abort(msg, hint=hint)
3329 3339
3330 3340 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3331 3341 if not clearable or (commit and allowcommit):
3332 3342 continue
3333 3343 if repo.vfs.exists(f):
3334 3344 raise error.Abort(msg, hint=hint)
3335 3345
3336 3346 def clearunfinished(repo):
3337 3347 '''Check for unfinished operations (as above), and clear the ones
3338 3348 that are clearable.
3339 3349 '''
3340 3350 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3341 3351 if not clearable and repo.vfs.exists(f):
3342 3352 raise error.Abort(msg, hint=hint)
3343 3353 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3344 3354 if clearable and repo.vfs.exists(f):
3345 3355 util.unlink(repo.vfs.join(f))
3346 3356
3347 3357 afterresolvedstates = [
3348 3358 ('graftstate',
3349 3359 _('hg graft --continue')),
3350 3360 ]
3351 3361
3352 3362 def howtocontinue(repo):
3353 3363 '''Check for an unfinished operation and return the command to finish
3354 3364 it.
3355 3365
3356 3366 afterresolvedstates tuples define a .hg/{file} and the corresponding
3357 3367 command needed to finish it.
3358 3368
3359 3369 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3360 3370 a boolean.
3361 3371 '''
3362 3372 contmsg = _("continue: %s")
3363 3373 for f, msg in afterresolvedstates:
3364 3374 if repo.vfs.exists(f):
3365 3375 return contmsg % msg, True
3366 3376 if repo[None].dirty(missing=True, merge=False, branch=False):
3367 3377 return contmsg % _("hg commit"), False
3368 3378 return None, None
3369 3379
3370 3380 def checkafterresolved(repo):
3371 3381 '''Inform the user about the next action after completing hg resolve
3372 3382
3373 3383 If there's a matching afterresolvedstates, howtocontinue will yield
3374 3384 repo.ui.warn as the reporter.
3375 3385
3376 3386 Otherwise, it will yield repo.ui.note.
3377 3387 '''
3378 3388 msg, warning = howtocontinue(repo)
3379 3389 if msg is not None:
3380 3390 if warning:
3381 3391 repo.ui.warn("%s\n" % msg)
3382 3392 else:
3383 3393 repo.ui.note("%s\n" % msg)
3384 3394
3385 3395 def wrongtooltocontinue(repo, task):
3386 3396 '''Raise an abort suggesting how to properly continue if there is an
3387 3397 active task.
3388 3398
3389 3399 Uses howtocontinue() to find the active task.
3390 3400
3391 3401 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3392 3402 a hint.
3393 3403 '''
3394 3404 after = howtocontinue(repo)
3395 3405 hint = None
3396 3406 if after[1]:
3397 3407 hint = after[0]
3398 3408 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,667 +1,714
1 1 #testcases lfsremote-on lfsremote-off
2 2 #require serve no-reposimplestore no-chg
3 3
4 4 This test splits `hg serve` with and without using the extension into separate
5 5 tests cases. The tests are broken down as follows, where "LFS"/"No-LFS"
6 6 indicates whether or not there are commits that use an LFS file, and "D"/"E"
7 7 indicates whether or not the extension is loaded. The "X" cases are not tested
8 8 individually, because the lfs requirement causes the process to bail early if
9 9 the extension is disabled.
10 10
11 11 . Server
12 12 .
13 13 . No-LFS LFS
14 14 . +----------------------------+
15 15 . | || D | E | D | E |
16 16 . |---++=======================|
17 17 . C | D || N/A | #1 | X | #4 |
18 18 . l No +---++-----------------------|
19 19 . i LFS | E || #2 | #2 | X | #5 |
20 20 . e +---++-----------------------|
21 21 . n | D || X | X | X | X |
22 22 . t LFS |---++-----------------------|
23 23 . | E || #3 | #3 | X | #6 |
24 24 . |---++-----------------------+
25 25
26 26 make command server magic visible
27 27
28 28 #if windows
29 29 $ PYTHONPATH="$TESTDIR/../contrib;$PYTHONPATH"
30 30 #else
31 31 $ PYTHONPATH="$TESTDIR/../contrib:$PYTHONPATH"
32 32 #endif
33 33 $ export PYTHONPATH
34 34
35 35 $ hg init server
36 36 $ SERVER_REQUIRES="$TESTTMP/server/.hg/requires"
37 37
38 38 $ cat > $TESTTMP/debugprocessors.py <<EOF
39 39 > from mercurial import (
40 40 > cmdutil,
41 41 > commands,
42 42 > pycompat,
43 43 > registrar,
44 44 > )
45 45 > cmdtable = {}
46 46 > command = registrar.command(cmdtable)
47 47 > @command(b'debugprocessors', [], b'FILE')
48 48 > def debugprocessors(ui, repo, file_=None, **opts):
49 49 > opts = pycompat.byteskwargs(opts)
50 50 > opts[b'changelog'] = False
51 51 > opts[b'manifest'] = False
52 52 > opts[b'dir'] = False
53 53 > rl = cmdutil.openrevlog(repo, b'debugprocessors', file_, opts)
54 54 > for flag, proc in rl._flagprocessors.items():
55 55 > ui.status(b"registered processor '%#x'\n" % (flag))
56 56 > EOF
57 57
58 58 Skip the experimental.changegroup3=True config. Failure to agree on this comes
59 59 first, and causes an "abort: no common changegroup version" if the extension is
60 60 only loaded on one side. If that *is* enabled, the subsequent failure is "abort:
61 61 missing processor for flag '0x2000'!" if the extension is only loaded on one side
62 62 (possibly also masked by the Internal Server Error message).
63 63 $ cat >> $HGRCPATH <<EOF
64 64 > [extensions]
65 65 > debugprocessors = $TESTTMP/debugprocessors.py
66 66 > [experimental]
67 67 > lfs.disableusercache = True
68 68 > [lfs]
69 69 > threshold=10
70 70 > [web]
71 71 > allow_push=*
72 72 > push_ssl=False
73 73 > EOF
74 74
75 75 $ cp $HGRCPATH $HGRCPATH.orig
76 76
77 77 #if lfsremote-on
78 78 $ hg --config extensions.lfs= -R server \
79 79 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
80 80 #else
81 81 $ hg --config extensions.lfs=! -R server \
82 82 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
83 83 #endif
84 84
85 85 $ cat hg.pid >> $DAEMON_PIDS
86 86 $ hg clone -q http://localhost:$HGPORT client
87 87 $ grep 'lfs' client/.hg/requires $SERVER_REQUIRES
88 88 [1]
89 89
90 90 This trivial repo will force commandserver to load the extension, but not call
91 91 reposetup() on another repo actually being operated on. This gives coverage
92 92 that wrapper functions are not assuming reposetup() was called.
93 93
94 94 $ hg init $TESTTMP/cmdservelfs
95 95 $ cat >> $TESTTMP/cmdservelfs/.hg/hgrc << EOF
96 96 > [extensions]
97 97 > lfs =
98 98 > EOF
99 99
100 100 --------------------------------------------------------------------------------
101 101 Case #1: client with non-lfs content and the extension disabled; server with
102 102 non-lfs content, and the extension enabled.
103 103
104 104 $ cd client
105 105 $ echo 'non-lfs' > nonlfs.txt
106 106 >>> from __future__ import absolute_import
107 107 >>> from hgclient import check, readchannel, runcommand
108 108 >>> @check
109 109 ... def diff(server):
110 110 ... readchannel(server)
111 111 ... # run an arbitrary command in the repo with the extension loaded
112 112 ... runcommand(server, [b'id', b'-R', b'../cmdservelfs'])
113 113 ... # now run a command in a repo without the extension to ensure that
114 114 ... # files are added safely..
115 115 ... runcommand(server, [b'ci', b'-Aqm', b'non-lfs'])
116 116 ... # .. and that scmutil.prefetchfiles() safely no-ops..
117 117 ... runcommand(server, [b'diff', b'-r', b'.~1'])
118 118 ... # .. and that debugupgraderepo safely no-ops.
119 119 ... runcommand(server, [b'debugupgraderepo', b'-q', b'--run'])
120 120 *** runcommand id -R ../cmdservelfs
121 121 000000000000 tip
122 122 *** runcommand ci -Aqm non-lfs
123 123 *** runcommand diff -r .~1
124 124 diff -r 000000000000 nonlfs.txt
125 125 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
126 126 +++ b/nonlfs.txt Thu Jan 01 00:00:00 1970 +0000
127 127 @@ -0,0 +1,1 @@
128 128 +non-lfs
129 129 *** runcommand debugupgraderepo -q --run
130 130 upgrade will perform the following actions:
131 131
132 132 requirements
133 133 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
134 134
135 135 beginning upgrade...
136 136 repository locked and read-only
137 137 creating temporary repository to stage migrated data: * (glob)
138 138 (it is safe to interrupt this process any time before data migration completes)
139 139 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
140 140 migrating 324 bytes in store; 129 bytes tracked data
141 141 migrating 1 filelogs containing 1 revisions (73 bytes in store; 8 bytes tracked data)
142 142 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
143 143 migrating 1 manifests containing 1 revisions (117 bytes in store; 52 bytes tracked data)
144 144 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
145 145 migrating changelog containing 1 revisions (134 bytes in store; 69 bytes tracked data)
146 146 finished migrating 1 changelog revisions; change in size: 0 bytes
147 147 finished migrating 3 total revisions; total change in store size: 0 bytes
148 148 copying phaseroots
149 149 data fully migrated to temporary repository
150 150 marking source repository as being upgraded; clients will be unable to read from repository
151 151 starting in-place swap of repository data
152 152 replaced files will be backed up at * (glob)
153 153 replacing store...
154 154 store replacement complete; repository was inconsistent for *s (glob)
155 155 finalizing requirements file and making repository readable again
156 156 removing temporary repository * (glob)
157 157 copy of old repository backed up at * (glob)
158 158 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
159 159
160 160 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
161 161 [1]
162 162
163 163 #if lfsremote-on
164 164
165 165 $ hg push -q
166 166 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
167 167 [1]
168 168
169 169 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client1_clone
170 170 $ grep 'lfs' $TESTTMP/client1_clone/.hg/requires $SERVER_REQUIRES
171 171 [1]
172 172
173 173 $ hg init $TESTTMP/client1_pull
174 174 $ hg -R $TESTTMP/client1_pull pull -q http://localhost:$HGPORT
175 175 $ grep 'lfs' $TESTTMP/client1_pull/.hg/requires $SERVER_REQUIRES
176 176 [1]
177 177
178 178 $ hg identify http://localhost:$HGPORT
179 179 d437e1d24fbd
180 180
181 181 #endif
182 182
183 183 --------------------------------------------------------------------------------
184 184 Case #2: client with non-lfs content and the extension enabled; server with
185 185 non-lfs content, and the extension state controlled by #testcases.
186 186
187 187 $ cat >> $HGRCPATH <<EOF
188 188 > [extensions]
189 189 > lfs =
190 190 > EOF
191 191 $ echo 'non-lfs' > nonlfs2.txt
192 192 $ hg ci -Aqm 'non-lfs file with lfs client'
193 193
194 194 Since no lfs content has been added yet, the push is allowed, even when the
195 195 extension is not enabled remotely.
196 196
197 197 $ hg push -q
198 198 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
199 199 [1]
200 200
201 201 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client2_clone
202 202 $ grep 'lfs' $TESTTMP/client2_clone/.hg/requires $SERVER_REQUIRES
203 203 [1]
204 204
205 205 $ hg init $TESTTMP/client2_pull
206 206 $ hg -R $TESTTMP/client2_pull pull -q http://localhost:$HGPORT
207 207 $ grep 'lfs' $TESTTMP/client2_pull/.hg/requires $SERVER_REQUIRES
208 208 [1]
209 209
210 210 $ hg identify http://localhost:$HGPORT
211 211 1477875038c6
212 212
213 213 --------------------------------------------------------------------------------
214 214 Case #3: client with lfs content and the extension enabled; server with
215 215 non-lfs content, and the extension state controlled by #testcases. The server
216 216 should have an 'lfs' requirement after it picks up its first commit with a blob.
217 217
218 218 $ echo 'this is a big lfs file' > lfs.bin
219 219 $ hg ci -Aqm 'lfs'
220 220 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
221 221 .hg/requires:lfs
222 222
223 223 #if lfsremote-off
224 224 $ hg push -q
225 225 abort: required features are not supported in the destination: lfs
226 226 (enable the lfs extension on the server)
227 227 [255]
228 228 #else
229 229 $ hg push -q
230 230 #endif
231 231 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
232 232 .hg/requires:lfs
233 233 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
234 234
235 235 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client3_clone
236 236 $ grep 'lfs' $TESTTMP/client3_clone/.hg/requires $SERVER_REQUIRES || true
237 237 $TESTTMP/client3_clone/.hg/requires:lfs (lfsremote-on !)
238 238 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
239 239
240 240 $ hg init $TESTTMP/client3_pull
241 241 $ hg -R $TESTTMP/client3_pull pull -q http://localhost:$HGPORT
242 242 $ grep 'lfs' $TESTTMP/client3_pull/.hg/requires $SERVER_REQUIRES || true
243 243 $TESTTMP/client3_pull/.hg/requires:lfs (lfsremote-on !)
244 244 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
245 245
246 246 Test that the commit/changegroup requirement check hook can be run multiple
247 247 times.
248 248
249 249 $ hg clone -qr 0 http://localhost:$HGPORT $TESTTMP/cmdserve_client3
250 250
251 251 $ cd ../cmdserve_client3
252 252
253 253 >>> from __future__ import absolute_import
254 254 >>> from hgclient import check, readchannel, runcommand
255 255 >>> @check
256 256 ... def addrequirement(server):
257 257 ... readchannel(server)
258 258 ... # change the repo in a way that adds the lfs requirement
259 259 ... runcommand(server, [b'pull', b'-qu'])
260 260 ... # Now cause the requirement adding hook to fire again, without going
261 261 ... # through reposetup() again.
262 262 ... with open('file.txt', 'wb') as fp:
263 263 ... fp.write(b'data')
264 264 ... runcommand(server, [b'ci', b'-Aqm', b'non-lfs'])
265 265 *** runcommand pull -qu
266 266 *** runcommand ci -Aqm non-lfs
267 267
268 268 $ cd ../client
269 269
270 270 The difference here is the push failed above when the extension isn't
271 271 enabled on the server.
272 272 $ hg identify http://localhost:$HGPORT
273 273 8374dc4052cb (lfsremote-on !)
274 274 1477875038c6 (lfsremote-off !)
275 275
276 276 Don't bother testing the lfsremote-off cases- the server won't be able
277 277 to launch if there's lfs content and the extension is disabled.
278 278
279 279 #if lfsremote-on
280 280
281 281 --------------------------------------------------------------------------------
282 282 Case #4: client with non-lfs content and the extension disabled; server with
283 283 lfs content, and the extension enabled.
284 284
285 285 $ cat >> $HGRCPATH <<EOF
286 286 > [extensions]
287 287 > lfs = !
288 288 > EOF
289 289
290 290 $ hg init $TESTTMP/client4
291 291 $ cd $TESTTMP/client4
292 292 $ cat >> .hg/hgrc <<EOF
293 293 > [paths]
294 294 > default = http://localhost:$HGPORT
295 295 > EOF
296 296 $ echo 'non-lfs' > nonlfs2.txt
297 297 $ hg ci -Aqm 'non-lfs'
298 298 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
299 299 $TESTTMP/server/.hg/requires:lfs
300 300
301 301 $ hg push -q --force
302 302 warning: repository is unrelated
303 303 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
304 304 $TESTTMP/server/.hg/requires:lfs
305 305
306 306 $ hg clone http://localhost:$HGPORT $TESTTMP/client4_clone
307 307 (remote is using large file support (lfs), but it is explicitly disabled in the local configuration)
308 308 abort: repository requires features unknown to this Mercurial: lfs!
309 309 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
310 310 [255]
311 311 $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES
312 312 grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$
313 313 $TESTTMP/server/.hg/requires:lfs
314 314 [2]
315 315
316 316 TODO: fail more gracefully.
317 317
318 318 $ hg init $TESTTMP/client4_pull
319 319 $ hg -R $TESTTMP/client4_pull pull http://localhost:$HGPORT
320 320 pulling from http://localhost:$HGPORT/
321 321 requesting all changes
322 322 remote: abort: no common changegroup version
323 323 abort: pull failed on remote
324 324 [255]
325 325 $ grep 'lfs' $TESTTMP/client4_pull/.hg/requires $SERVER_REQUIRES
326 326 $TESTTMP/server/.hg/requires:lfs
327 327
328 328 $ hg identify http://localhost:$HGPORT
329 329 03b080fa9d93
330 330
331 331 --------------------------------------------------------------------------------
332 332 Case #5: client with non-lfs content and the extension enabled; server with
333 333 lfs content, and the extension enabled.
334 334
335 335 $ cat >> $HGRCPATH <<EOF
336 336 > [extensions]
337 337 > lfs =
338 338 > EOF
339 339 $ echo 'non-lfs' > nonlfs3.txt
340 340 $ hg ci -Aqm 'non-lfs file with lfs client'
341 341
342 342 $ hg push -q
343 343 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
344 344 $TESTTMP/server/.hg/requires:lfs
345 345
346 346 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client5_clone
347 347 $ grep 'lfs' $TESTTMP/client5_clone/.hg/requires $SERVER_REQUIRES
348 348 $TESTTMP/client5_clone/.hg/requires:lfs
349 349 $TESTTMP/server/.hg/requires:lfs
350 350
351 351 $ hg init $TESTTMP/client5_pull
352 352 $ hg -R $TESTTMP/client5_pull pull -q http://localhost:$HGPORT
353 353 $ grep 'lfs' $TESTTMP/client5_pull/.hg/requires $SERVER_REQUIRES
354 354 $TESTTMP/client5_pull/.hg/requires:lfs
355 355 $TESTTMP/server/.hg/requires:lfs
356 356
357 357 $ hg identify http://localhost:$HGPORT
358 358 c729025cc5e3
359 359
360 360 $ mv $HGRCPATH $HGRCPATH.tmp
361 361 $ cp $HGRCPATH.orig $HGRCPATH
362 362
363 363 >>> from __future__ import absolute_import
364 364 >>> from hgclient import bprint, check, readchannel, runcommand, stdout
365 365 >>> @check
366 366 ... def checkflags(server):
367 367 ... readchannel(server)
368 368 ... bprint(b'')
369 369 ... bprint(b'# LFS required- both lfs and non-lfs revlogs have 0x2000 flag')
370 370 ... stdout.flush()
371 371 ... runcommand(server, [b'debugprocessors', b'lfs.bin', b'-R',
372 372 ... b'../server'])
373 373 ... runcommand(server, [b'debugprocessors', b'nonlfs2.txt', b'-R',
374 374 ... b'../server'])
375 375 ... runcommand(server, [b'config', b'extensions', b'--cwd',
376 376 ... b'../server'])
377 377 ...
378 378 ... bprint(b"\n# LFS not enabled- revlogs don't have 0x2000 flag")
379 379 ... stdout.flush()
380 380 ... runcommand(server, [b'debugprocessors', b'nonlfs3.txt'])
381 381 ... runcommand(server, [b'config', b'extensions'])
382 382
383 383 # LFS required- both lfs and non-lfs revlogs have 0x2000 flag
384 384 *** runcommand debugprocessors lfs.bin -R ../server
385 385 registered processor '0x8000'
386 386 registered processor '0x2000'
387 387 *** runcommand debugprocessors nonlfs2.txt -R ../server
388 388 registered processor '0x8000'
389 389 registered processor '0x2000'
390 390 *** runcommand config extensions --cwd ../server
391 391 extensions.debugprocessors=$TESTTMP/debugprocessors.py
392 392 extensions.lfs=
393 393
394 394 # LFS not enabled- revlogs don't have 0x2000 flag
395 395 *** runcommand debugprocessors nonlfs3.txt
396 396 registered processor '0x8000'
397 397 *** runcommand config extensions
398 398 extensions.debugprocessors=$TESTTMP/debugprocessors.py
399 399
400 400 $ rm $HGRCPATH
401 401 $ mv $HGRCPATH.tmp $HGRCPATH
402 402
403 403 $ hg clone $TESTTMP/client $TESTTMP/nonlfs -qr 0 --config extensions.lfs=
404 404 $ cat >> $TESTTMP/nonlfs/.hg/hgrc <<EOF
405 405 > [extensions]
406 406 > lfs = !
407 407 > EOF
408 408
409 409 >>> from __future__ import absolute_import, print_function
410 410 >>> from hgclient import bprint, check, readchannel, runcommand, stdout
411 411 >>> @check
412 412 ... def checkflags2(server):
413 413 ... readchannel(server)
414 414 ... bprint(b'')
415 415 ... bprint(b'# LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag')
416 416 ... stdout.flush()
417 417 ... runcommand(server, [b'debugprocessors', b'lfs.bin', b'-R',
418 418 ... b'../server'])
419 419 ... runcommand(server, [b'debugprocessors', b'nonlfs2.txt', b'-R',
420 420 ... b'../server'])
421 421 ... runcommand(server, [b'config', b'extensions', b'--cwd',
422 422 ... b'../server'])
423 423 ...
424 424 ... bprint(b'\n# LFS enabled without requirement- revlogs have 0x2000 flag')
425 425 ... stdout.flush()
426 426 ... runcommand(server, [b'debugprocessors', b'nonlfs3.txt'])
427 427 ... runcommand(server, [b'config', b'extensions'])
428 428 ...
429 429 ... bprint(b"\n# LFS disabled locally- revlogs don't have 0x2000 flag")
430 430 ... stdout.flush()
431 431 ... runcommand(server, [b'debugprocessors', b'nonlfs.txt', b'-R',
432 432 ... b'../nonlfs'])
433 433 ... runcommand(server, [b'config', b'extensions', b'--cwd',
434 434 ... b'../nonlfs'])
435 435
436 436 # LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag
437 437 *** runcommand debugprocessors lfs.bin -R ../server
438 438 registered processor '0x8000'
439 439 registered processor '0x2000'
440 440 *** runcommand debugprocessors nonlfs2.txt -R ../server
441 441 registered processor '0x8000'
442 442 registered processor '0x2000'
443 443 *** runcommand config extensions --cwd ../server
444 444 extensions.debugprocessors=$TESTTMP/debugprocessors.py
445 445 extensions.lfs=
446 446
447 447 # LFS enabled without requirement- revlogs have 0x2000 flag
448 448 *** runcommand debugprocessors nonlfs3.txt
449 449 registered processor '0x8000'
450 450 registered processor '0x2000'
451 451 *** runcommand config extensions
452 452 extensions.debugprocessors=$TESTTMP/debugprocessors.py
453 453 extensions.lfs=
454 454
455 455 # LFS disabled locally- revlogs don't have 0x2000 flag
456 456 *** runcommand debugprocessors nonlfs.txt -R ../nonlfs
457 457 registered processor '0x8000'
458 458 *** runcommand config extensions --cwd ../nonlfs
459 459 extensions.debugprocessors=$TESTTMP/debugprocessors.py
460 460 extensions.lfs=!
461 461
462 462 --------------------------------------------------------------------------------
463 463 Case #6: client with lfs content and the extension enabled; server with
464 464 lfs content, and the extension enabled.
465 465
466 466 $ echo 'this is another lfs file' > lfs2.txt
467 467 $ hg ci -Aqm 'lfs file with lfs client'
468 468
469 469 $ hg --config paths.default= push -v http://localhost:$HGPORT
470 470 pushing to http://localhost:$HGPORT/
471 471 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
472 472 searching for changes
473 473 remote has heads on branch 'default' that are not known locally: 8374dc4052cb
474 474 lfs: uploading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
475 475 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
476 476 lfs: uploaded 1 files (25 bytes)
477 477 1 changesets found
478 478 uncompressed size of bundle content:
479 479 206 (changelog)
480 480 172 (manifests)
481 481 275 lfs2.txt
482 482 remote: adding changesets
483 483 remote: adding manifests
484 484 remote: adding file changes
485 485 remote: added 1 changesets with 1 changes to 1 files
486 486 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
487 487 .hg/requires:lfs
488 488 $TESTTMP/server/.hg/requires:lfs
489 489
490 490 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client6_clone
491 491 $ grep 'lfs' $TESTTMP/client6_clone/.hg/requires $SERVER_REQUIRES
492 492 $TESTTMP/client6_clone/.hg/requires:lfs
493 493 $TESTTMP/server/.hg/requires:lfs
494 494
495 495 $ hg init $TESTTMP/client6_pull
496 496 $ hg -R $TESTTMP/client6_pull pull -u -v http://localhost:$HGPORT
497 497 pulling from http://localhost:$HGPORT/
498 498 requesting all changes
499 499 adding changesets
500 500 adding manifests
501 501 adding file changes
502 502 added 6 changesets with 5 changes to 5 files (+1 heads)
503 503 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
504 504 new changesets d437e1d24fbd:d3b84d50eacb
505 505 resolving manifests
506 506 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
507 507 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
508 508 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
509 509 lfs: downloaded 1 files (25 bytes)
510 510 getting lfs2.txt
511 511 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
512 512 getting nonlfs2.txt
513 513 getting nonlfs3.txt
514 514 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
515 515 updated to "d3b84d50eacb: lfs file with lfs client"
516 516 1 other heads for branch "default"
517 517 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
518 518 $ grep 'lfs' $TESTTMP/client6_pull/.hg/requires $SERVER_REQUIRES
519 519 $TESTTMP/client6_pull/.hg/requires:lfs
520 520 $TESTTMP/server/.hg/requires:lfs
521 521
522 522 $ hg identify http://localhost:$HGPORT
523 523 d3b84d50eacb
524 524
525 525 --------------------------------------------------------------------------------
526 526 Misc: process dies early if a requirement exists and the extension is disabled
527 527
528 528 $ hg --config extensions.lfs=! summary
529 529 abort: repository requires features unknown to this Mercurial: lfs!
530 530 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
531 531 [255]
532 532
533 533 $ echo 'this is an lfs file' > $TESTTMP/client6_clone/lfspair1.bin
534 534 $ echo 'this is an lfs file too' > $TESTTMP/client6_clone/lfspair2.bin
535 535 $ hg -R $TESTTMP/client6_clone ci -Aqm 'add lfs pair'
536 536 $ hg -R $TESTTMP/client6_clone push -q
537 537
538 538 $ hg clone -qU http://localhost:$HGPORT $TESTTMP/bulkfetch
539 539
540 Cat doesn't prefetch unless data is needed (e.g. '-T {rawdata}' doesn't need it)
541
542 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair1.bin -T '{rawdata}\n{path}\n'
543 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
544 version https://git-lfs.github.com/spec/v1
545 oid sha256:cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
546 size 20
547 x-is-binary 0
548
549 lfspair1.bin
550
551 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair1.bin -T json
552 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
553 [lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
554 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
555 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
556 lfs: downloaded 1 files (20 bytes)
557 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
558
559 {
560 "data": "this is an lfs file\n",
561 "path": "lfspair1.bin",
562 "rawdata": "version https://git-lfs.github.com/spec/v1\noid sha256:cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782\nsize 20\nx-is-binary 0\n"
563 }
564 ]
565
566 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
567
568 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair1.bin -T '{data}\n'
569 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
570 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
571 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
572 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
573 lfs: downloaded 1 files (20 bytes)
574 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
575 this is an lfs file
576
577 $ hg --cwd $TESTTMP/bulkfetch cat -vr tip lfspair2.bin
578 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
579 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
580 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
581 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
582 lfs: downloaded 1 files (24 bytes)
583 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
584 this is an lfs file too
585
540 586 Export will prefetch all needed files across all needed revisions
541 587
588 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
542 589 $ hg -R $TESTTMP/bulkfetch -v export -r 0:tip -o all.export
543 590 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
544 591 exporting patches:
545 592 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
546 593 lfs: need to transfer 4 objects (92 bytes)
547 594 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
548 595 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
549 596 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
550 597 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
551 598 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
552 599 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
553 600 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
554 601 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
555 602 lfs: downloaded 4 files (92 bytes)
556 603 all.export
557 604 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
558 605 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
559 606 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
560 607 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
561 608
562 609 Export with selected files is used with `extdiff --patch`
563 610
564 611 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
565 612 $ hg --config extensions.extdiff= \
566 613 > -R $TESTTMP/bulkfetch -v extdiff -r 2:tip --patch $TESTTMP/bulkfetch/lfs.bin
567 614 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
568 615 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
569 616 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
570 617 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
571 618 lfs: downloaded 1 files (23 bytes)
572 619 */hg-8374dc4052cb.patch (glob)
573 620 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
574 621 */hg-9640b57e77b1.patch (glob)
575 622 --- */hg-8374dc4052cb.patch * (glob)
576 623 +++ */hg-9640b57e77b1.patch * (glob)
577 624 @@ -2,12 +2,7 @@
578 625 # User test
579 626 # Date 0 0
580 627 # Thu Jan 01 00:00:00 1970 +0000
581 628 -# Node ID 8374dc4052cbd388e79d9dc4ddb29784097aa354
582 629 -# Parent 1477875038c60152e391238920a16381c627b487
583 630 -lfs
584 631 +# Node ID 9640b57e77b14c3a0144fb4478b6cc13e13ea0d1
585 632 +# Parent d3b84d50eacbd56638e11abce6b8616aaba54420
586 633 +add lfs pair
587 634
588 635 -diff -r 1477875038c6 -r 8374dc4052cb lfs.bin
589 636 ---- /dev/null Thu Jan 01 00:00:00 1970 +0000
590 637 -+++ b/lfs.bin Thu Jan 01 00:00:00 1970 +0000
591 638 -@@ -0,0 +1,1 @@
592 639 -+this is a big lfs file
593 640 cleaning up temp directory
594 641 [1]
595 642
596 643 Diff will prefetch files
597 644
598 645 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
599 646 $ hg -R $TESTTMP/bulkfetch -v diff -r 2:tip
600 647 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
601 648 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
602 649 lfs: need to transfer 4 objects (92 bytes)
603 650 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
604 651 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
605 652 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
606 653 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
607 654 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
608 655 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
609 656 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
610 657 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
611 658 lfs: downloaded 4 files (92 bytes)
612 659 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
613 660 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
614 661 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
615 662 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
616 663 diff -r 8374dc4052cb -r 9640b57e77b1 lfs.bin
617 664 --- a/lfs.bin Thu Jan 01 00:00:00 1970 +0000
618 665 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
619 666 @@ -1,1 +0,0 @@
620 667 -this is a big lfs file
621 668 diff -r 8374dc4052cb -r 9640b57e77b1 lfs2.txt
622 669 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
623 670 +++ b/lfs2.txt Thu Jan 01 00:00:00 1970 +0000
624 671 @@ -0,0 +1,1 @@
625 672 +this is another lfs file
626 673 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair1.bin
627 674 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
628 675 +++ b/lfspair1.bin Thu Jan 01 00:00:00 1970 +0000
629 676 @@ -0,0 +1,1 @@
630 677 +this is an lfs file
631 678 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair2.bin
632 679 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
633 680 +++ b/lfspair2.bin Thu Jan 01 00:00:00 1970 +0000
634 681 @@ -0,0 +1,1 @@
635 682 +this is an lfs file too
636 683 diff -r 8374dc4052cb -r 9640b57e77b1 nonlfs.txt
637 684 --- a/nonlfs.txt Thu Jan 01 00:00:00 1970 +0000
638 685 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
639 686 @@ -1,1 +0,0 @@
640 687 -non-lfs
641 688 diff -r 8374dc4052cb -r 9640b57e77b1 nonlfs3.txt
642 689 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
643 690 +++ b/nonlfs3.txt Thu Jan 01 00:00:00 1970 +0000
644 691 @@ -0,0 +1,1 @@
645 692 +non-lfs
646 693
647 694 Only the files required by diff are prefetched
648 695
649 696 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
650 697 $ hg -R $TESTTMP/bulkfetch -v diff -r 2:tip $TESTTMP/bulkfetch/lfspair2.bin
651 698 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
652 699 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
653 700 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
654 701 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
655 702 lfs: downloaded 1 files (24 bytes)
656 703 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
657 704 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair2.bin
658 705 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
659 706 +++ b/lfspair2.bin Thu Jan 01 00:00:00 1970 +0000
660 707 @@ -0,0 +1,1 @@
661 708 +this is an lfs file too
662 709
663 710 #endif
664 711
665 712 $ "$PYTHON" $TESTDIR/killdaemons.py $DAEMON_PIDS
666 713
667 714 $ cat $TESTTMP/errors.log
General Comments 0
You need to be logged in to leave comments. Login now