##// END OF EJS Templates
split: handle partial commit of renames when doing split or record (issue5723)...
Kyle Lippincott -
r43122:3cf09184 default
parent child Browse files
Show More
@@ -1,3427 +1,3439 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy as copymod
11 11 import errno
12 12 import os
13 13 import re
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 19 nullrev,
20 20 short,
21 21 )
22 22
23 23 from . import (
24 24 bookmarks,
25 25 changelog,
26 26 copies,
27 27 crecord as crecordmod,
28 28 dirstateguard,
29 29 encoding,
30 30 error,
31 31 formatter,
32 32 logcmdutil,
33 33 match as matchmod,
34 34 merge as mergemod,
35 35 mergeutil,
36 36 obsolete,
37 37 patch,
38 38 pathutil,
39 39 phases,
40 40 pycompat,
41 41 repair,
42 42 revlog,
43 43 rewriteutil,
44 44 scmutil,
45 45 smartset,
46 46 state as statemod,
47 47 subrepoutil,
48 48 templatekw,
49 49 templater,
50 50 util,
51 51 vfs as vfsmod,
52 52 )
53 53
54 54 from .utils import (
55 55 dateutil,
56 56 stringutil,
57 57 )
58 58
59 59 stringio = util.stringio
60 60
61 61 # templates of common command options
62 62
63 63 dryrunopts = [
64 64 ('n', 'dry-run', None,
65 65 _('do not perform actions, just print output')),
66 66 ]
67 67
68 68 confirmopts = [
69 69 ('', 'confirm', None,
70 70 _('ask before applying actions')),
71 71 ]
72 72
73 73 remoteopts = [
74 74 ('e', 'ssh', '',
75 75 _('specify ssh command to use'), _('CMD')),
76 76 ('', 'remotecmd', '',
77 77 _('specify hg command to run on the remote side'), _('CMD')),
78 78 ('', 'insecure', None,
79 79 _('do not verify server certificate (ignoring web.cacerts config)')),
80 80 ]
81 81
82 82 walkopts = [
83 83 ('I', 'include', [],
84 84 _('include names matching the given patterns'), _('PATTERN')),
85 85 ('X', 'exclude', [],
86 86 _('exclude names matching the given patterns'), _('PATTERN')),
87 87 ]
88 88
89 89 commitopts = [
90 90 ('m', 'message', '',
91 91 _('use text as commit message'), _('TEXT')),
92 92 ('l', 'logfile', '',
93 93 _('read commit message from file'), _('FILE')),
94 94 ]
95 95
96 96 commitopts2 = [
97 97 ('d', 'date', '',
98 98 _('record the specified date as commit date'), _('DATE')),
99 99 ('u', 'user', '',
100 100 _('record the specified user as committer'), _('USER')),
101 101 ]
102 102
103 103 formatteropts = [
104 104 ('T', 'template', '',
105 105 _('display with template'), _('TEMPLATE')),
106 106 ]
107 107
108 108 templateopts = [
109 109 ('', 'style', '',
110 110 _('display using template map file (DEPRECATED)'), _('STYLE')),
111 111 ('T', 'template', '',
112 112 _('display with template'), _('TEMPLATE')),
113 113 ]
114 114
115 115 logopts = [
116 116 ('p', 'patch', None, _('show patch')),
117 117 ('g', 'git', None, _('use git extended diff format')),
118 118 ('l', 'limit', '',
119 119 _('limit number of changes displayed'), _('NUM')),
120 120 ('M', 'no-merges', None, _('do not show merges')),
121 121 ('', 'stat', None, _('output diffstat-style summary of changes')),
122 122 ('G', 'graph', None, _("show the revision DAG")),
123 123 ] + templateopts
124 124
125 125 diffopts = [
126 126 ('a', 'text', None, _('treat all files as text')),
127 127 ('g', 'git', None, _('use git extended diff format')),
128 128 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
129 129 ('', 'nodates', None, _('omit dates from diff headers'))
130 130 ]
131 131
132 132 diffwsopts = [
133 133 ('w', 'ignore-all-space', None,
134 134 _('ignore white space when comparing lines')),
135 135 ('b', 'ignore-space-change', None,
136 136 _('ignore changes in the amount of white space')),
137 137 ('B', 'ignore-blank-lines', None,
138 138 _('ignore changes whose lines are all blank')),
139 139 ('Z', 'ignore-space-at-eol', None,
140 140 _('ignore changes in whitespace at EOL')),
141 141 ]
142 142
143 143 diffopts2 = [
144 144 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
145 145 ('p', 'show-function', None, _('show which function each change is in')),
146 146 ('', 'reverse', None, _('produce a diff that undoes the changes')),
147 147 ] + diffwsopts + [
148 148 ('U', 'unified', '',
149 149 _('number of lines of context to show'), _('NUM')),
150 150 ('', 'stat', None, _('output diffstat-style summary of changes')),
151 151 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
152 152 ]
153 153
154 154 mergetoolopts = [
155 155 ('t', 'tool', '', _('specify merge tool'), _('TOOL')),
156 156 ]
157 157
158 158 similarityopts = [
159 159 ('s', 'similarity', '',
160 160 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
161 161 ]
162 162
163 163 subrepoopts = [
164 164 ('S', 'subrepos', None,
165 165 _('recurse into subrepositories'))
166 166 ]
167 167
168 168 debugrevlogopts = [
169 169 ('c', 'changelog', False, _('open changelog')),
170 170 ('m', 'manifest', False, _('open manifest')),
171 171 ('', 'dir', '', _('open directory manifest')),
172 172 ]
173 173
174 174 # special string such that everything below this line will be ingored in the
175 175 # editor text
176 176 _linebelow = "^HG: ------------------------ >8 ------------------------$"
177 177
178 178 def ishunk(x):
179 179 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
180 180 return isinstance(x, hunkclasses)
181 181
182 182 def newandmodified(chunks, originalchunks):
183 183 newlyaddedandmodifiedfiles = set()
184 alsorestore = set()
184 185 for chunk in chunks:
185 186 if (ishunk(chunk) and chunk.header.isnewfile() and chunk not in
186 187 originalchunks):
187 188 newlyaddedandmodifiedfiles.add(chunk.header.filename())
188 return newlyaddedandmodifiedfiles
189 alsorestore.update(set(chunk.header.files()) -
190 set([chunk.header.filename()]))
191 return newlyaddedandmodifiedfiles, alsorestore
189 192
190 193 def parsealiases(cmd):
191 194 return cmd.split("|")
192 195
193 196 def setupwrapcolorwrite(ui):
194 197 # wrap ui.write so diff output can be labeled/colorized
195 198 def wrapwrite(orig, *args, **kw):
196 199 label = kw.pop(r'label', '')
197 200 for chunk, l in patch.difflabel(lambda: args):
198 201 orig(chunk, label=label + l)
199 202
200 203 oldwrite = ui.write
201 204 def wrap(*args, **kwargs):
202 205 return wrapwrite(oldwrite, *args, **kwargs)
203 206 setattr(ui, 'write', wrap)
204 207 return oldwrite
205 208
206 209 def filterchunks(ui, originalhunks, usecurses, testfile, match,
207 210 operation=None):
208 211 try:
209 212 if usecurses:
210 213 if testfile:
211 214 recordfn = crecordmod.testdecorator(
212 215 testfile, crecordmod.testchunkselector)
213 216 else:
214 217 recordfn = crecordmod.chunkselector
215 218
216 219 return crecordmod.filterpatch(ui, originalhunks, recordfn,
217 220 operation)
218 221 except crecordmod.fallbackerror as e:
219 222 ui.warn('%s\n' % e.message)
220 223 ui.warn(_('falling back to text mode\n'))
221 224
222 225 return patch.filterpatch(ui, originalhunks, match, operation)
223 226
224 227 def recordfilter(ui, originalhunks, match, operation=None):
225 228 """ Prompts the user to filter the originalhunks and return a list of
226 229 selected hunks.
227 230 *operation* is used for to build ui messages to indicate the user what
228 231 kind of filtering they are doing: reverting, committing, shelving, etc.
229 232 (see patch.filterpatch).
230 233 """
231 234 usecurses = crecordmod.checkcurses(ui)
232 235 testfile = ui.config('experimental', 'crecordtest')
233 236 oldwrite = setupwrapcolorwrite(ui)
234 237 try:
235 238 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
236 239 testfile, match, operation)
237 240 finally:
238 241 ui.write = oldwrite
239 242 return newchunks, newopts
240 243
241 244 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
242 245 filterfn, *pats, **opts):
243 246 opts = pycompat.byteskwargs(opts)
244 247 if not ui.interactive():
245 248 if cmdsuggest:
246 249 msg = _('running non-interactively, use %s instead') % cmdsuggest
247 250 else:
248 251 msg = _('running non-interactively')
249 252 raise error.Abort(msg)
250 253
251 254 # make sure username is set before going interactive
252 255 if not opts.get('user'):
253 256 ui.username() # raise exception, username not provided
254 257
255 258 def recordfunc(ui, repo, message, match, opts):
256 259 """This is generic record driver.
257 260
258 261 Its job is to interactively filter local changes, and
259 262 accordingly prepare working directory into a state in which the
260 263 job can be delegated to a non-interactive commit command such as
261 264 'commit' or 'qrefresh'.
262 265
263 266 After the actual job is done by non-interactive command, the
264 267 working directory is restored to its original state.
265 268
266 269 In the end we'll record interesting changes, and everything else
267 270 will be left in place, so the user can continue working.
268 271 """
269 272 if not opts.get('interactive-unshelve'):
270 273 checkunfinished(repo, commit=True)
271 274 wctx = repo[None]
272 275 merge = len(wctx.parents()) > 1
273 276 if merge:
274 277 raise error.Abort(_('cannot partially commit a merge '
275 278 '(use "hg commit" instead)'))
276 279
277 280 def fail(f, msg):
278 281 raise error.Abort('%s: %s' % (f, msg))
279 282
280 283 force = opts.get('force')
281 284 if not force:
282 285 vdirs = []
283 286 match = matchmod.badmatch(match, fail)
284 287 match.explicitdir = vdirs.append
285 288
286 289 status = repo.status(match=match)
287 290
288 291 overrides = {(b'ui', b'commitsubrepos'): True}
289 292
290 293 with repo.ui.configoverride(overrides, b'record'):
291 294 # subrepoutil.precommit() modifies the status
292 295 tmpstatus = scmutil.status(copymod.copy(status[0]),
293 296 copymod.copy(status[1]),
294 297 copymod.copy(status[2]),
295 298 copymod.copy(status[3]),
296 299 copymod.copy(status[4]),
297 300 copymod.copy(status[5]),
298 301 copymod.copy(status[6]))
299 302
300 303 # Force allows -X subrepo to skip the subrepo.
301 304 subs, commitsubs, newstate = subrepoutil.precommit(
302 305 repo.ui, wctx, tmpstatus, match, force=True)
303 306 for s in subs:
304 307 if s in commitsubs:
305 308 dirtyreason = wctx.sub(s).dirtyreason(True)
306 309 raise error.Abort(dirtyreason)
307 310
308 311 if not force:
309 312 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
310 313 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True,
311 314 section='commands',
312 315 configprefix='commit.interactive.')
313 316 diffopts.nodates = True
314 317 diffopts.git = True
315 318 diffopts.showfunc = True
316 319 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
317 320 originalchunks = patch.parsepatch(originaldiff)
318 321 match = scmutil.match(repo[None], pats)
319 322
320 323 # 1. filter patch, since we are intending to apply subset of it
321 324 try:
322 325 chunks, newopts = filterfn(ui, originalchunks, match)
323 326 except error.PatchError as err:
324 327 raise error.Abort(_('error parsing patch: %s') % err)
325 328 opts.update(newopts)
326 329
327 330 # We need to keep a backup of files that have been newly added and
328 331 # modified during the recording process because there is a previous
329 # version without the edit in the workdir
330 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
332 # version without the edit in the workdir. We also will need to restore
333 # files that were the sources of renames so that the patch application
334 # works.
335 newlyaddedandmodifiedfiles, alsorestore = newandmodified(chunks,
336 originalchunks)
331 337 contenders = set()
332 338 for h in chunks:
333 339 try:
334 340 contenders.update(set(h.files()))
335 341 except AttributeError:
336 342 pass
337 343
338 344 changed = status.modified + status.added + status.removed
339 345 newfiles = [f for f in changed if f in contenders]
340 346 if not newfiles:
341 347 ui.status(_('no changes to record\n'))
342 348 return 0
343 349
344 350 modified = set(status.modified)
345 351
346 352 # 2. backup changed files, so we can restore them in the end
347 353
348 354 if backupall:
349 355 tobackup = changed
350 356 else:
351 357 tobackup = [f for f in newfiles if f in modified or f in
352 358 newlyaddedandmodifiedfiles]
353 359 backups = {}
354 360 if tobackup:
355 361 backupdir = repo.vfs.join('record-backups')
356 362 try:
357 363 os.mkdir(backupdir)
358 364 except OSError as err:
359 365 if err.errno != errno.EEXIST:
360 366 raise
361 367 try:
362 368 # backup continues
363 369 for f in tobackup:
364 370 fd, tmpname = pycompat.mkstemp(prefix=f.replace('/', '_') + '.',
365 371 dir=backupdir)
366 372 os.close(fd)
367 373 ui.debug('backup %r as %r\n' % (f, tmpname))
368 374 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
369 375 backups[f] = tmpname
370 376
371 377 fp = stringio()
372 378 for c in chunks:
373 379 fname = c.filename()
374 380 if fname in backups:
375 381 c.write(fp)
376 382 dopatch = fp.tell()
377 383 fp.seek(0)
378 384
379 385 # 2.5 optionally review / modify patch in text editor
380 386 if opts.get('review', False):
381 387 patchtext = (crecordmod.diffhelptext
382 388 + crecordmod.patchhelptext
383 389 + fp.read())
384 390 reviewedpatch = ui.edit(patchtext, "",
385 391 action="diff",
386 392 repopath=repo.path)
387 393 fp.truncate(0)
388 394 fp.write(reviewedpatch)
389 395 fp.seek(0)
390 396
391 397 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
392 398 # 3a. apply filtered patch to clean repo (clean)
393 399 if backups:
394 400 # Equivalent to hg.revert
395 m = scmutil.matchfiles(repo, backups.keys())
401 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
396 402 mergemod.update(repo, repo.dirstate.p1(), branchmerge=False,
397 403 force=True, matcher=m)
398 404
399 405 # 3b. (apply)
400 406 if dopatch:
401 407 try:
402 408 ui.debug('applying patch\n')
403 409 ui.debug(fp.getvalue())
404 410 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
405 411 except error.PatchError as err:
406 412 raise error.Abort(pycompat.bytestr(err))
407 413 del fp
408 414
409 415 # 4. We prepared working directory according to filtered
410 416 # patch. Now is the time to delegate the job to
411 417 # commit/qrefresh or the like!
412 418
413 419 # Make all of the pathnames absolute.
414 420 newfiles = [repo.wjoin(nf) for nf in newfiles]
415 421 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
416 422 finally:
417 423 # 5. finally restore backed-up files
418 424 try:
419 425 dirstate = repo.dirstate
420 426 for realname, tmpname in backups.iteritems():
421 427 ui.debug('restoring %r to %r\n' % (tmpname, realname))
422 428
423 429 if dirstate[realname] == 'n':
424 430 # without normallookup, restoring timestamp
425 431 # may cause partially committed files
426 432 # to be treated as unmodified
427 433 dirstate.normallookup(realname)
428 434
429 435 # copystat=True here and above are a hack to trick any
430 436 # editors that have f open that we haven't modified them.
431 437 #
432 438 # Also note that this racy as an editor could notice the
433 439 # file's mtime before we've finished writing it.
434 440 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
435 441 os.unlink(tmpname)
436 442 if tobackup:
437 443 os.rmdir(backupdir)
438 444 except OSError:
439 445 pass
440 446
441 447 def recordinwlock(ui, repo, message, match, opts):
442 448 with repo.wlock():
443 449 return recordfunc(ui, repo, message, match, opts)
444 450
445 451 return commit(ui, repo, recordinwlock, pats, opts)
446 452
447 453 class dirnode(object):
448 454 """
449 455 Represent a directory in user working copy with information required for
450 456 the purpose of tersing its status.
451 457
452 458 path is the path to the directory, without a trailing '/'
453 459
454 460 statuses is a set of statuses of all files in this directory (this includes
455 461 all the files in all the subdirectories too)
456 462
457 463 files is a list of files which are direct child of this directory
458 464
459 465 subdirs is a dictionary of sub-directory name as the key and it's own
460 466 dirnode object as the value
461 467 """
462 468
463 469 def __init__(self, dirpath):
464 470 self.path = dirpath
465 471 self.statuses = set()
466 472 self.files = []
467 473 self.subdirs = {}
468 474
469 475 def _addfileindir(self, filename, status):
470 476 """Add a file in this directory as a direct child."""
471 477 self.files.append((filename, status))
472 478
473 479 def addfile(self, filename, status):
474 480 """
475 481 Add a file to this directory or to its direct parent directory.
476 482
477 483 If the file is not direct child of this directory, we traverse to the
478 484 directory of which this file is a direct child of and add the file
479 485 there.
480 486 """
481 487
482 488 # the filename contains a path separator, it means it's not the direct
483 489 # child of this directory
484 490 if '/' in filename:
485 491 subdir, filep = filename.split('/', 1)
486 492
487 493 # does the dirnode object for subdir exists
488 494 if subdir not in self.subdirs:
489 495 subdirpath = pathutil.join(self.path, subdir)
490 496 self.subdirs[subdir] = dirnode(subdirpath)
491 497
492 498 # try adding the file in subdir
493 499 self.subdirs[subdir].addfile(filep, status)
494 500
495 501 else:
496 502 self._addfileindir(filename, status)
497 503
498 504 if status not in self.statuses:
499 505 self.statuses.add(status)
500 506
501 507 def iterfilepaths(self):
502 508 """Yield (status, path) for files directly under this directory."""
503 509 for f, st in self.files:
504 510 yield st, pathutil.join(self.path, f)
505 511
506 512 def tersewalk(self, terseargs):
507 513 """
508 514 Yield (status, path) obtained by processing the status of this
509 515 dirnode.
510 516
511 517 terseargs is the string of arguments passed by the user with `--terse`
512 518 flag.
513 519
514 520 Following are the cases which can happen:
515 521
516 522 1) All the files in the directory (including all the files in its
517 523 subdirectories) share the same status and the user has asked us to terse
518 524 that status. -> yield (status, dirpath). dirpath will end in '/'.
519 525
520 526 2) Otherwise, we do following:
521 527
522 528 a) Yield (status, filepath) for all the files which are in this
523 529 directory (only the ones in this directory, not the subdirs)
524 530
525 531 b) Recurse the function on all the subdirectories of this
526 532 directory
527 533 """
528 534
529 535 if len(self.statuses) == 1:
530 536 onlyst = self.statuses.pop()
531 537
532 538 # Making sure we terse only when the status abbreviation is
533 539 # passed as terse argument
534 540 if onlyst in terseargs:
535 541 yield onlyst, self.path + '/'
536 542 return
537 543
538 544 # add the files to status list
539 545 for st, fpath in self.iterfilepaths():
540 546 yield st, fpath
541 547
542 548 #recurse on the subdirs
543 549 for dirobj in self.subdirs.values():
544 550 for st, fpath in dirobj.tersewalk(terseargs):
545 551 yield st, fpath
546 552
547 553 def tersedir(statuslist, terseargs):
548 554 """
549 555 Terse the status if all the files in a directory shares the same status.
550 556
551 557 statuslist is scmutil.status() object which contains a list of files for
552 558 each status.
553 559 terseargs is string which is passed by the user as the argument to `--terse`
554 560 flag.
555 561
556 562 The function makes a tree of objects of dirnode class, and at each node it
557 563 stores the information required to know whether we can terse a certain
558 564 directory or not.
559 565 """
560 566 # the order matters here as that is used to produce final list
561 567 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
562 568
563 569 # checking the argument validity
564 570 for s in pycompat.bytestr(terseargs):
565 571 if s not in allst:
566 572 raise error.Abort(_("'%s' not recognized") % s)
567 573
568 574 # creating a dirnode object for the root of the repo
569 575 rootobj = dirnode('')
570 576 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
571 577 'ignored', 'removed')
572 578
573 579 tersedict = {}
574 580 for attrname in pstatus:
575 581 statuschar = attrname[0:1]
576 582 for f in getattr(statuslist, attrname):
577 583 rootobj.addfile(f, statuschar)
578 584 tersedict[statuschar] = []
579 585
580 586 # we won't be tersing the root dir, so add files in it
581 587 for st, fpath in rootobj.iterfilepaths():
582 588 tersedict[st].append(fpath)
583 589
584 590 # process each sub-directory and build tersedict
585 591 for subdir in rootobj.subdirs.values():
586 592 for st, f in subdir.tersewalk(terseargs):
587 593 tersedict[st].append(f)
588 594
589 595 tersedlist = []
590 596 for st in allst:
591 597 tersedict[st].sort()
592 598 tersedlist.append(tersedict[st])
593 599
594 600 return tersedlist
595 601
596 602 def _commentlines(raw):
597 603 '''Surround lineswith a comment char and a new line'''
598 604 lines = raw.splitlines()
599 605 commentedlines = ['# %s' % line for line in lines]
600 606 return '\n'.join(commentedlines) + '\n'
601 607
602 608 def _conflictsmsg(repo):
603 609 mergestate = mergemod.mergestate.read(repo)
604 610 if not mergestate.active():
605 611 return
606 612
607 613 m = scmutil.match(repo[None])
608 614 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
609 615 if unresolvedlist:
610 616 mergeliststr = '\n'.join(
611 617 [' %s' % util.pathto(repo.root, encoding.getcwd(), path)
612 618 for path in sorted(unresolvedlist)])
613 619 msg = _('''Unresolved merge conflicts:
614 620
615 621 %s
616 622
617 623 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
618 624 else:
619 625 msg = _('No unresolved merge conflicts.')
620 626
621 627 return _commentlines(msg)
622 628
623 629 def morestatus(repo, fm):
624 630 statetuple = statemod.getrepostate(repo)
625 631 label = 'status.morestatus'
626 632 if statetuple:
627 633 state, helpfulmsg = statetuple
628 634 statemsg = _('The repository is in an unfinished *%s* state.') % state
629 635 fm.plain('%s\n' % _commentlines(statemsg), label=label)
630 636 conmsg = _conflictsmsg(repo)
631 637 if conmsg:
632 638 fm.plain('%s\n' % conmsg, label=label)
633 639 if helpfulmsg:
634 640 fm.plain('%s\n' % _commentlines(helpfulmsg), label=label)
635 641
636 642 def findpossible(cmd, table, strict=False):
637 643 """
638 644 Return cmd -> (aliases, command table entry)
639 645 for each matching command.
640 646 Return debug commands (or their aliases) only if no normal command matches.
641 647 """
642 648 choice = {}
643 649 debugchoice = {}
644 650
645 651 if cmd in table:
646 652 # short-circuit exact matches, "log" alias beats "log|history"
647 653 keys = [cmd]
648 654 else:
649 655 keys = table.keys()
650 656
651 657 allcmds = []
652 658 for e in keys:
653 659 aliases = parsealiases(e)
654 660 allcmds.extend(aliases)
655 661 found = None
656 662 if cmd in aliases:
657 663 found = cmd
658 664 elif not strict:
659 665 for a in aliases:
660 666 if a.startswith(cmd):
661 667 found = a
662 668 break
663 669 if found is not None:
664 670 if aliases[0].startswith("debug") or found.startswith("debug"):
665 671 debugchoice[found] = (aliases, table[e])
666 672 else:
667 673 choice[found] = (aliases, table[e])
668 674
669 675 if not choice and debugchoice:
670 676 choice = debugchoice
671 677
672 678 return choice, allcmds
673 679
674 680 def findcmd(cmd, table, strict=True):
675 681 """Return (aliases, command table entry) for command string."""
676 682 choice, allcmds = findpossible(cmd, table, strict)
677 683
678 684 if cmd in choice:
679 685 return choice[cmd]
680 686
681 687 if len(choice) > 1:
682 688 clist = sorted(choice)
683 689 raise error.AmbiguousCommand(cmd, clist)
684 690
685 691 if choice:
686 692 return list(choice.values())[0]
687 693
688 694 raise error.UnknownCommand(cmd, allcmds)
689 695
690 696 def changebranch(ui, repo, revs, label):
691 697 """ Change the branch name of given revs to label """
692 698
693 699 with repo.wlock(), repo.lock(), repo.transaction('branches'):
694 700 # abort in case of uncommitted merge or dirty wdir
695 701 bailifchanged(repo)
696 702 revs = scmutil.revrange(repo, revs)
697 703 if not revs:
698 704 raise error.Abort("empty revision set")
699 705 roots = repo.revs('roots(%ld)', revs)
700 706 if len(roots) > 1:
701 707 raise error.Abort(_("cannot change branch of non-linear revisions"))
702 708 rewriteutil.precheck(repo, revs, 'change branch of')
703 709
704 710 root = repo[roots.first()]
705 711 rpb = {parent.branch() for parent in root.parents()}
706 712 if label not in rpb and label in repo.branchmap():
707 713 raise error.Abort(_("a branch of the same name already exists"))
708 714
709 715 if repo.revs('obsolete() and %ld', revs):
710 716 raise error.Abort(_("cannot change branch of a obsolete changeset"))
711 717
712 718 # make sure only topological heads
713 719 if repo.revs('heads(%ld) - head()', revs):
714 720 raise error.Abort(_("cannot change branch in middle of a stack"))
715 721
716 722 replacements = {}
717 723 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
718 724 # mercurial.subrepo -> mercurial.cmdutil
719 725 from . import context
720 726 for rev in revs:
721 727 ctx = repo[rev]
722 728 oldbranch = ctx.branch()
723 729 # check if ctx has same branch
724 730 if oldbranch == label:
725 731 continue
726 732
727 733 def filectxfn(repo, newctx, path):
728 734 try:
729 735 return ctx[path]
730 736 except error.ManifestLookupError:
731 737 return None
732 738
733 739 ui.debug("changing branch of '%s' from '%s' to '%s'\n"
734 740 % (hex(ctx.node()), oldbranch, label))
735 741 extra = ctx.extra()
736 742 extra['branch_change'] = hex(ctx.node())
737 743 # While changing branch of set of linear commits, make sure that
738 744 # we base our commits on new parent rather than old parent which
739 745 # was obsoleted while changing the branch
740 746 p1 = ctx.p1().node()
741 747 p2 = ctx.p2().node()
742 748 if p1 in replacements:
743 749 p1 = replacements[p1][0]
744 750 if p2 in replacements:
745 751 p2 = replacements[p2][0]
746 752
747 753 mc = context.memctx(repo, (p1, p2),
748 754 ctx.description(),
749 755 ctx.files(),
750 756 filectxfn,
751 757 user=ctx.user(),
752 758 date=ctx.date(),
753 759 extra=extra,
754 760 branch=label)
755 761
756 762 newnode = repo.commitctx(mc)
757 763 replacements[ctx.node()] = (newnode,)
758 764 ui.debug('new node id is %s\n' % hex(newnode))
759 765
760 766 # create obsmarkers and move bookmarks
761 767 scmutil.cleanupnodes(repo, replacements, 'branch-change', fixphase=True)
762 768
763 769 # move the working copy too
764 770 wctx = repo[None]
765 771 # in-progress merge is a bit too complex for now.
766 772 if len(wctx.parents()) == 1:
767 773 newid = replacements.get(wctx.p1().node())
768 774 if newid is not None:
769 775 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
770 776 # mercurial.cmdutil
771 777 from . import hg
772 778 hg.update(repo, newid[0], quietempty=True)
773 779
774 780 ui.status(_("changed branch on %d changesets\n") % len(replacements))
775 781
776 782 def findrepo(p):
777 783 while not os.path.isdir(os.path.join(p, ".hg")):
778 784 oldp, p = p, os.path.dirname(p)
779 785 if p == oldp:
780 786 return None
781 787
782 788 return p
783 789
784 790 def bailifchanged(repo, merge=True, hint=None):
785 791 """ enforce the precondition that working directory must be clean.
786 792
787 793 'merge' can be set to false if a pending uncommitted merge should be
788 794 ignored (such as when 'update --check' runs).
789 795
790 796 'hint' is the usual hint given to Abort exception.
791 797 """
792 798
793 799 if merge and repo.dirstate.p2() != nullid:
794 800 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
795 801 modified, added, removed, deleted = repo.status()[:4]
796 802 if modified or added or removed or deleted:
797 803 raise error.Abort(_('uncommitted changes'), hint=hint)
798 804 ctx = repo[None]
799 805 for s in sorted(ctx.substate):
800 806 ctx.sub(s).bailifchanged(hint=hint)
801 807
802 808 def logmessage(ui, opts):
803 809 """ get the log message according to -m and -l option """
804 810 message = opts.get('message')
805 811 logfile = opts.get('logfile')
806 812
807 813 if message and logfile:
808 814 raise error.Abort(_('options --message and --logfile are mutually '
809 815 'exclusive'))
810 816 if not message and logfile:
811 817 try:
812 818 if isstdiofilename(logfile):
813 819 message = ui.fin.read()
814 820 else:
815 821 message = '\n'.join(util.readfile(logfile).splitlines())
816 822 except IOError as inst:
817 823 raise error.Abort(_("can't read commit message '%s': %s") %
818 824 (logfile, encoding.strtolocal(inst.strerror)))
819 825 return message
820 826
821 827 def mergeeditform(ctxorbool, baseformname):
822 828 """return appropriate editform name (referencing a committemplate)
823 829
824 830 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
825 831 merging is committed.
826 832
827 833 This returns baseformname with '.merge' appended if it is a merge,
828 834 otherwise '.normal' is appended.
829 835 """
830 836 if isinstance(ctxorbool, bool):
831 837 if ctxorbool:
832 838 return baseformname + ".merge"
833 839 elif len(ctxorbool.parents()) > 1:
834 840 return baseformname + ".merge"
835 841
836 842 return baseformname + ".normal"
837 843
838 844 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
839 845 editform='', **opts):
840 846 """get appropriate commit message editor according to '--edit' option
841 847
842 848 'finishdesc' is a function to be called with edited commit message
843 849 (= 'description' of the new changeset) just after editing, but
844 850 before checking empty-ness. It should return actual text to be
845 851 stored into history. This allows to change description before
846 852 storing.
847 853
848 854 'extramsg' is a extra message to be shown in the editor instead of
849 855 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
850 856 is automatically added.
851 857
852 858 'editform' is a dot-separated list of names, to distinguish
853 859 the purpose of commit text editing.
854 860
855 861 'getcommiteditor' returns 'commitforceeditor' regardless of
856 862 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
857 863 they are specific for usage in MQ.
858 864 """
859 865 if edit or finishdesc or extramsg:
860 866 return lambda r, c, s: commitforceeditor(r, c, s,
861 867 finishdesc=finishdesc,
862 868 extramsg=extramsg,
863 869 editform=editform)
864 870 elif editform:
865 871 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
866 872 else:
867 873 return commiteditor
868 874
869 875 def _escapecommandtemplate(tmpl):
870 876 parts = []
871 877 for typ, start, end in templater.scantemplate(tmpl, raw=True):
872 878 if typ == b'string':
873 879 parts.append(stringutil.escapestr(tmpl[start:end]))
874 880 else:
875 881 parts.append(tmpl[start:end])
876 882 return b''.join(parts)
877 883
878 884 def rendercommandtemplate(ui, tmpl, props):
879 885 r"""Expand a literal template 'tmpl' in a way suitable for command line
880 886
881 887 '\' in outermost string is not taken as an escape character because it
882 888 is a directory separator on Windows.
883 889
884 890 >>> from . import ui as uimod
885 891 >>> ui = uimod.ui()
886 892 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
887 893 'c:\\foo'
888 894 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
889 895 'c:{path}'
890 896 """
891 897 if not tmpl:
892 898 return tmpl
893 899 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
894 900 return t.renderdefault(props)
895 901
896 902 def rendertemplate(ctx, tmpl, props=None):
897 903 """Expand a literal template 'tmpl' byte-string against one changeset
898 904
899 905 Each props item must be a stringify-able value or a callable returning
900 906 such value, i.e. no bare list nor dict should be passed.
901 907 """
902 908 repo = ctx.repo()
903 909 tres = formatter.templateresources(repo.ui, repo)
904 910 t = formatter.maketemplater(repo.ui, tmpl, defaults=templatekw.keywords,
905 911 resources=tres)
906 912 mapping = {'ctx': ctx}
907 913 if props:
908 914 mapping.update(props)
909 915 return t.renderdefault(mapping)
910 916
911 917 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
912 918 r"""Convert old-style filename format string to template string
913 919
914 920 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
915 921 'foo-{reporoot|basename}-{seqno}.patch'
916 922 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
917 923 '{rev}{tags % "{tag}"}{node}'
918 924
919 925 '\' in outermost strings has to be escaped because it is a directory
920 926 separator on Windows:
921 927
922 928 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
923 929 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
924 930 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
925 931 '\\\\\\\\foo\\\\bar.patch'
926 932 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
927 933 '\\\\{tags % "{tag}"}'
928 934
929 935 but inner strings follow the template rules (i.e. '\' is taken as an
930 936 escape character):
931 937
932 938 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
933 939 '{"c:\\tmp"}'
934 940 """
935 941 expander = {
936 942 b'H': b'{node}',
937 943 b'R': b'{rev}',
938 944 b'h': b'{node|short}',
939 945 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
940 946 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
941 947 b'%': b'%',
942 948 b'b': b'{reporoot|basename}',
943 949 }
944 950 if total is not None:
945 951 expander[b'N'] = b'{total}'
946 952 if seqno is not None:
947 953 expander[b'n'] = b'{seqno}'
948 954 if total is not None and seqno is not None:
949 955 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
950 956 if pathname is not None:
951 957 expander[b's'] = b'{pathname|basename}'
952 958 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
953 959 expander[b'p'] = b'{pathname}'
954 960
955 961 newname = []
956 962 for typ, start, end in templater.scantemplate(pat, raw=True):
957 963 if typ != b'string':
958 964 newname.append(pat[start:end])
959 965 continue
960 966 i = start
961 967 while i < end:
962 968 n = pat.find(b'%', i, end)
963 969 if n < 0:
964 970 newname.append(stringutil.escapestr(pat[i:end]))
965 971 break
966 972 newname.append(stringutil.escapestr(pat[i:n]))
967 973 if n + 2 > end:
968 974 raise error.Abort(_("incomplete format spec in output "
969 975 "filename"))
970 976 c = pat[n + 1:n + 2]
971 977 i = n + 2
972 978 try:
973 979 newname.append(expander[c])
974 980 except KeyError:
975 981 raise error.Abort(_("invalid format spec '%%%s' in output "
976 982 "filename") % c)
977 983 return ''.join(newname)
978 984
979 985 def makefilename(ctx, pat, **props):
980 986 if not pat:
981 987 return pat
982 988 tmpl = _buildfntemplate(pat, **props)
983 989 # BUG: alias expansion shouldn't be made against template fragments
984 990 # rewritten from %-format strings, but we have no easy way to partially
985 991 # disable the expansion.
986 992 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
987 993
988 994 def isstdiofilename(pat):
989 995 """True if the given pat looks like a filename denoting stdin/stdout"""
990 996 return not pat or pat == '-'
991 997
992 998 class _unclosablefile(object):
993 999 def __init__(self, fp):
994 1000 self._fp = fp
995 1001
996 1002 def close(self):
997 1003 pass
998 1004
999 1005 def __iter__(self):
1000 1006 return iter(self._fp)
1001 1007
1002 1008 def __getattr__(self, attr):
1003 1009 return getattr(self._fp, attr)
1004 1010
1005 1011 def __enter__(self):
1006 1012 return self
1007 1013
1008 1014 def __exit__(self, exc_type, exc_value, exc_tb):
1009 1015 pass
1010 1016
1011 1017 def makefileobj(ctx, pat, mode='wb', **props):
1012 1018 writable = mode not in ('r', 'rb')
1013 1019
1014 1020 if isstdiofilename(pat):
1015 1021 repo = ctx.repo()
1016 1022 if writable:
1017 1023 fp = repo.ui.fout
1018 1024 else:
1019 1025 fp = repo.ui.fin
1020 1026 return _unclosablefile(fp)
1021 1027 fn = makefilename(ctx, pat, **props)
1022 1028 return open(fn, mode)
1023 1029
1024 1030 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1025 1031 """opens the changelog, manifest, a filelog or a given revlog"""
1026 1032 cl = opts['changelog']
1027 1033 mf = opts['manifest']
1028 1034 dir = opts['dir']
1029 1035 msg = None
1030 1036 if cl and mf:
1031 1037 msg = _('cannot specify --changelog and --manifest at the same time')
1032 1038 elif cl and dir:
1033 1039 msg = _('cannot specify --changelog and --dir at the same time')
1034 1040 elif cl or mf or dir:
1035 1041 if file_:
1036 1042 msg = _('cannot specify filename with --changelog or --manifest')
1037 1043 elif not repo:
1038 1044 msg = _('cannot specify --changelog or --manifest or --dir '
1039 1045 'without a repository')
1040 1046 if msg:
1041 1047 raise error.Abort(msg)
1042 1048
1043 1049 r = None
1044 1050 if repo:
1045 1051 if cl:
1046 1052 r = repo.unfiltered().changelog
1047 1053 elif dir:
1048 1054 if 'treemanifest' not in repo.requirements:
1049 1055 raise error.Abort(_("--dir can only be used on repos with "
1050 1056 "treemanifest enabled"))
1051 1057 if not dir.endswith('/'):
1052 1058 dir = dir + '/'
1053 1059 dirlog = repo.manifestlog.getstorage(dir)
1054 1060 if len(dirlog):
1055 1061 r = dirlog
1056 1062 elif mf:
1057 1063 r = repo.manifestlog.getstorage(b'')
1058 1064 elif file_:
1059 1065 filelog = repo.file(file_)
1060 1066 if len(filelog):
1061 1067 r = filelog
1062 1068
1063 1069 # Not all storage may be revlogs. If requested, try to return an actual
1064 1070 # revlog instance.
1065 1071 if returnrevlog:
1066 1072 if isinstance(r, revlog.revlog):
1067 1073 pass
1068 1074 elif util.safehasattr(r, '_revlog'):
1069 1075 r = r._revlog
1070 1076 elif r is not None:
1071 1077 raise error.Abort(_('%r does not appear to be a revlog') % r)
1072 1078
1073 1079 if not r:
1074 1080 if not returnrevlog:
1075 1081 raise error.Abort(_('cannot give path to non-revlog'))
1076 1082
1077 1083 if not file_:
1078 1084 raise error.CommandError(cmd, _('invalid arguments'))
1079 1085 if not os.path.isfile(file_):
1080 1086 raise error.Abort(_("revlog '%s' not found") % file_)
1081 1087 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
1082 1088 file_[:-2] + ".i")
1083 1089 return r
1084 1090
1085 1091 def openrevlog(repo, cmd, file_, opts):
1086 1092 """Obtain a revlog backing storage of an item.
1087 1093
1088 1094 This is similar to ``openstorage()`` except it always returns a revlog.
1089 1095
1090 1096 In most cases, a caller cares about the main storage object - not the
1091 1097 revlog backing it. Therefore, this function should only be used by code
1092 1098 that needs to examine low-level revlog implementation details. e.g. debug
1093 1099 commands.
1094 1100 """
1095 1101 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1096 1102
1097 1103 def copy(ui, repo, pats, opts, rename=False):
1098 1104 # called with the repo lock held
1099 1105 #
1100 1106 # hgsep => pathname that uses "/" to separate directories
1101 1107 # ossep => pathname that uses os.sep to separate directories
1102 1108 cwd = repo.getcwd()
1103 1109 targets = {}
1104 1110 after = opts.get("after")
1105 1111 dryrun = opts.get("dry_run")
1106 1112 wctx = repo[None]
1107 1113
1108 1114 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1109 1115 def walkpat(pat):
1110 1116 srcs = []
1111 1117 if after:
1112 1118 badstates = '?'
1113 1119 else:
1114 1120 badstates = '?r'
1115 1121 m = scmutil.match(wctx, [pat], opts, globbed=True)
1116 1122 for abs in wctx.walk(m):
1117 1123 state = repo.dirstate[abs]
1118 1124 rel = uipathfn(abs)
1119 1125 exact = m.exact(abs)
1120 1126 if state in badstates:
1121 1127 if exact and state == '?':
1122 1128 ui.warn(_('%s: not copying - file is not managed\n') % rel)
1123 1129 if exact and state == 'r':
1124 1130 ui.warn(_('%s: not copying - file has been marked for'
1125 1131 ' remove\n') % rel)
1126 1132 continue
1127 1133 # abs: hgsep
1128 1134 # rel: ossep
1129 1135 srcs.append((abs, rel, exact))
1130 1136 return srcs
1131 1137
1132 1138 # abssrc: hgsep
1133 1139 # relsrc: ossep
1134 1140 # otarget: ossep
1135 1141 def copyfile(abssrc, relsrc, otarget, exact):
1136 1142 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1137 1143 if '/' in abstarget:
1138 1144 # We cannot normalize abstarget itself, this would prevent
1139 1145 # case only renames, like a => A.
1140 1146 abspath, absname = abstarget.rsplit('/', 1)
1141 1147 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
1142 1148 reltarget = repo.pathto(abstarget, cwd)
1143 1149 target = repo.wjoin(abstarget)
1144 1150 src = repo.wjoin(abssrc)
1145 1151 state = repo.dirstate[abstarget]
1146 1152
1147 1153 scmutil.checkportable(ui, abstarget)
1148 1154
1149 1155 # check for collisions
1150 1156 prevsrc = targets.get(abstarget)
1151 1157 if prevsrc is not None:
1152 1158 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1153 1159 (reltarget, repo.pathto(abssrc, cwd),
1154 1160 repo.pathto(prevsrc, cwd)))
1155 1161 return True # report a failure
1156 1162
1157 1163 # check for overwrites
1158 1164 exists = os.path.lexists(target)
1159 1165 samefile = False
1160 1166 if exists and abssrc != abstarget:
1161 1167 if (repo.dirstate.normalize(abssrc) ==
1162 1168 repo.dirstate.normalize(abstarget)):
1163 1169 if not rename:
1164 1170 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1165 1171 return True # report a failure
1166 1172 exists = False
1167 1173 samefile = True
1168 1174
1169 1175 if not after and exists or after and state in 'mn':
1170 1176 if not opts['force']:
1171 1177 if state in 'mn':
1172 1178 msg = _('%s: not overwriting - file already committed\n')
1173 1179 if after:
1174 1180 flags = '--after --force'
1175 1181 else:
1176 1182 flags = '--force'
1177 1183 if rename:
1178 1184 hint = _("('hg rename %s' to replace the file by "
1179 1185 'recording a rename)\n') % flags
1180 1186 else:
1181 1187 hint = _("('hg copy %s' to replace the file by "
1182 1188 'recording a copy)\n') % flags
1183 1189 else:
1184 1190 msg = _('%s: not overwriting - file exists\n')
1185 1191 if rename:
1186 1192 hint = _("('hg rename --after' to record the rename)\n")
1187 1193 else:
1188 1194 hint = _("('hg copy --after' to record the copy)\n")
1189 1195 ui.warn(msg % reltarget)
1190 1196 ui.warn(hint)
1191 1197 return True # report a failure
1192 1198
1193 1199 if after:
1194 1200 if not exists:
1195 1201 if rename:
1196 1202 ui.warn(_('%s: not recording move - %s does not exist\n') %
1197 1203 (relsrc, reltarget))
1198 1204 else:
1199 1205 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1200 1206 (relsrc, reltarget))
1201 1207 return True # report a failure
1202 1208 elif not dryrun:
1203 1209 try:
1204 1210 if exists:
1205 1211 os.unlink(target)
1206 1212 targetdir = os.path.dirname(target) or '.'
1207 1213 if not os.path.isdir(targetdir):
1208 1214 os.makedirs(targetdir)
1209 1215 if samefile:
1210 1216 tmp = target + "~hgrename"
1211 1217 os.rename(src, tmp)
1212 1218 os.rename(tmp, target)
1213 1219 else:
1214 1220 # Preserve stat info on renames, not on copies; this matches
1215 1221 # Linux CLI behavior.
1216 1222 util.copyfile(src, target, copystat=rename)
1217 1223 srcexists = True
1218 1224 except IOError as inst:
1219 1225 if inst.errno == errno.ENOENT:
1220 1226 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1221 1227 srcexists = False
1222 1228 else:
1223 1229 ui.warn(_('%s: cannot copy - %s\n') %
1224 1230 (relsrc, encoding.strtolocal(inst.strerror)))
1225 1231 return True # report a failure
1226 1232
1227 1233 if ui.verbose or not exact:
1228 1234 if rename:
1229 1235 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1230 1236 else:
1231 1237 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1232 1238
1233 1239 targets[abstarget] = abssrc
1234 1240
1235 1241 # fix up dirstate
1236 1242 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1237 1243 dryrun=dryrun, cwd=cwd)
1238 1244 if rename and not dryrun:
1239 1245 if not after and srcexists and not samefile:
1240 1246 rmdir = repo.ui.configbool('experimental', 'removeemptydirs')
1241 1247 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1242 1248 wctx.forget([abssrc])
1243 1249
1244 1250 # pat: ossep
1245 1251 # dest ossep
1246 1252 # srcs: list of (hgsep, hgsep, ossep, bool)
1247 1253 # return: function that takes hgsep and returns ossep
1248 1254 def targetpathfn(pat, dest, srcs):
1249 1255 if os.path.isdir(pat):
1250 1256 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1251 1257 abspfx = util.localpath(abspfx)
1252 1258 if destdirexists:
1253 1259 striplen = len(os.path.split(abspfx)[0])
1254 1260 else:
1255 1261 striplen = len(abspfx)
1256 1262 if striplen:
1257 1263 striplen += len(pycompat.ossep)
1258 1264 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1259 1265 elif destdirexists:
1260 1266 res = lambda p: os.path.join(dest,
1261 1267 os.path.basename(util.localpath(p)))
1262 1268 else:
1263 1269 res = lambda p: dest
1264 1270 return res
1265 1271
1266 1272 # pat: ossep
1267 1273 # dest ossep
1268 1274 # srcs: list of (hgsep, hgsep, ossep, bool)
1269 1275 # return: function that takes hgsep and returns ossep
1270 1276 def targetpathafterfn(pat, dest, srcs):
1271 1277 if matchmod.patkind(pat):
1272 1278 # a mercurial pattern
1273 1279 res = lambda p: os.path.join(dest,
1274 1280 os.path.basename(util.localpath(p)))
1275 1281 else:
1276 1282 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1277 1283 if len(abspfx) < len(srcs[0][0]):
1278 1284 # A directory. Either the target path contains the last
1279 1285 # component of the source path or it does not.
1280 1286 def evalpath(striplen):
1281 1287 score = 0
1282 1288 for s in srcs:
1283 1289 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1284 1290 if os.path.lexists(t):
1285 1291 score += 1
1286 1292 return score
1287 1293
1288 1294 abspfx = util.localpath(abspfx)
1289 1295 striplen = len(abspfx)
1290 1296 if striplen:
1291 1297 striplen += len(pycompat.ossep)
1292 1298 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1293 1299 score = evalpath(striplen)
1294 1300 striplen1 = len(os.path.split(abspfx)[0])
1295 1301 if striplen1:
1296 1302 striplen1 += len(pycompat.ossep)
1297 1303 if evalpath(striplen1) > score:
1298 1304 striplen = striplen1
1299 1305 res = lambda p: os.path.join(dest,
1300 1306 util.localpath(p)[striplen:])
1301 1307 else:
1302 1308 # a file
1303 1309 if destdirexists:
1304 1310 res = lambda p: os.path.join(dest,
1305 1311 os.path.basename(util.localpath(p)))
1306 1312 else:
1307 1313 res = lambda p: dest
1308 1314 return res
1309 1315
1310 1316 pats = scmutil.expandpats(pats)
1311 1317 if not pats:
1312 1318 raise error.Abort(_('no source or destination specified'))
1313 1319 if len(pats) == 1:
1314 1320 raise error.Abort(_('no destination specified'))
1315 1321 dest = pats.pop()
1316 1322 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1317 1323 if not destdirexists:
1318 1324 if len(pats) > 1 or matchmod.patkind(pats[0]):
1319 1325 raise error.Abort(_('with multiple sources, destination must be an '
1320 1326 'existing directory'))
1321 1327 if util.endswithsep(dest):
1322 1328 raise error.Abort(_('destination %s is not a directory') % dest)
1323 1329
1324 1330 tfn = targetpathfn
1325 1331 if after:
1326 1332 tfn = targetpathafterfn
1327 1333 copylist = []
1328 1334 for pat in pats:
1329 1335 srcs = walkpat(pat)
1330 1336 if not srcs:
1331 1337 continue
1332 1338 copylist.append((tfn(pat, dest, srcs), srcs))
1333 1339 if not copylist:
1334 1340 raise error.Abort(_('no files to copy'))
1335 1341
1336 1342 errors = 0
1337 1343 for targetpath, srcs in copylist:
1338 1344 for abssrc, relsrc, exact in srcs:
1339 1345 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1340 1346 errors += 1
1341 1347
1342 1348 return errors != 0
1343 1349
1344 1350 ## facility to let extension process additional data into an import patch
1345 1351 # list of identifier to be executed in order
1346 1352 extrapreimport = [] # run before commit
1347 1353 extrapostimport = [] # run after commit
1348 1354 # mapping from identifier to actual import function
1349 1355 #
1350 1356 # 'preimport' are run before the commit is made and are provided the following
1351 1357 # arguments:
1352 1358 # - repo: the localrepository instance,
1353 1359 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1354 1360 # - extra: the future extra dictionary of the changeset, please mutate it,
1355 1361 # - opts: the import options.
1356 1362 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1357 1363 # mutation of in memory commit and more. Feel free to rework the code to get
1358 1364 # there.
1359 1365 extrapreimportmap = {}
1360 1366 # 'postimport' are run after the commit is made and are provided the following
1361 1367 # argument:
1362 1368 # - ctx: the changectx created by import.
1363 1369 extrapostimportmap = {}
1364 1370
1365 1371 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1366 1372 """Utility function used by commands.import to import a single patch
1367 1373
1368 1374 This function is explicitly defined here to help the evolve extension to
1369 1375 wrap this part of the import logic.
1370 1376
1371 1377 The API is currently a bit ugly because it a simple code translation from
1372 1378 the import command. Feel free to make it better.
1373 1379
1374 1380 :patchdata: a dictionary containing parsed patch data (such as from
1375 1381 ``patch.extract()``)
1376 1382 :parents: nodes that will be parent of the created commit
1377 1383 :opts: the full dict of option passed to the import command
1378 1384 :msgs: list to save commit message to.
1379 1385 (used in case we need to save it when failing)
1380 1386 :updatefunc: a function that update a repo to a given node
1381 1387 updatefunc(<repo>, <node>)
1382 1388 """
1383 1389 # avoid cycle context -> subrepo -> cmdutil
1384 1390 from . import context
1385 1391
1386 1392 tmpname = patchdata.get('filename')
1387 1393 message = patchdata.get('message')
1388 1394 user = opts.get('user') or patchdata.get('user')
1389 1395 date = opts.get('date') or patchdata.get('date')
1390 1396 branch = patchdata.get('branch')
1391 1397 nodeid = patchdata.get('nodeid')
1392 1398 p1 = patchdata.get('p1')
1393 1399 p2 = patchdata.get('p2')
1394 1400
1395 1401 nocommit = opts.get('no_commit')
1396 1402 importbranch = opts.get('import_branch')
1397 1403 update = not opts.get('bypass')
1398 1404 strip = opts["strip"]
1399 1405 prefix = opts["prefix"]
1400 1406 sim = float(opts.get('similarity') or 0)
1401 1407
1402 1408 if not tmpname:
1403 1409 return None, None, False
1404 1410
1405 1411 rejects = False
1406 1412
1407 1413 cmdline_message = logmessage(ui, opts)
1408 1414 if cmdline_message:
1409 1415 # pickup the cmdline msg
1410 1416 message = cmdline_message
1411 1417 elif message:
1412 1418 # pickup the patch msg
1413 1419 message = message.strip()
1414 1420 else:
1415 1421 # launch the editor
1416 1422 message = None
1417 1423 ui.debug('message:\n%s\n' % (message or ''))
1418 1424
1419 1425 if len(parents) == 1:
1420 1426 parents.append(repo[nullid])
1421 1427 if opts.get('exact'):
1422 1428 if not nodeid or not p1:
1423 1429 raise error.Abort(_('not a Mercurial patch'))
1424 1430 p1 = repo[p1]
1425 1431 p2 = repo[p2 or nullid]
1426 1432 elif p2:
1427 1433 try:
1428 1434 p1 = repo[p1]
1429 1435 p2 = repo[p2]
1430 1436 # Without any options, consider p2 only if the
1431 1437 # patch is being applied on top of the recorded
1432 1438 # first parent.
1433 1439 if p1 != parents[0]:
1434 1440 p1 = parents[0]
1435 1441 p2 = repo[nullid]
1436 1442 except error.RepoError:
1437 1443 p1, p2 = parents
1438 1444 if p2.node() == nullid:
1439 1445 ui.warn(_("warning: import the patch as a normal revision\n"
1440 1446 "(use --exact to import the patch as a merge)\n"))
1441 1447 else:
1442 1448 p1, p2 = parents
1443 1449
1444 1450 n = None
1445 1451 if update:
1446 1452 if p1 != parents[0]:
1447 1453 updatefunc(repo, p1.node())
1448 1454 if p2 != parents[1]:
1449 1455 repo.setparents(p1.node(), p2.node())
1450 1456
1451 1457 if opts.get('exact') or importbranch:
1452 1458 repo.dirstate.setbranch(branch or 'default')
1453 1459
1454 1460 partial = opts.get('partial', False)
1455 1461 files = set()
1456 1462 try:
1457 1463 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1458 1464 files=files, eolmode=None, similarity=sim / 100.0)
1459 1465 except error.PatchError as e:
1460 1466 if not partial:
1461 1467 raise error.Abort(pycompat.bytestr(e))
1462 1468 if partial:
1463 1469 rejects = True
1464 1470
1465 1471 files = list(files)
1466 1472 if nocommit:
1467 1473 if message:
1468 1474 msgs.append(message)
1469 1475 else:
1470 1476 if opts.get('exact') or p2:
1471 1477 # If you got here, you either use --force and know what
1472 1478 # you are doing or used --exact or a merge patch while
1473 1479 # being updated to its first parent.
1474 1480 m = None
1475 1481 else:
1476 1482 m = scmutil.matchfiles(repo, files or [])
1477 1483 editform = mergeeditform(repo[None], 'import.normal')
1478 1484 if opts.get('exact'):
1479 1485 editor = None
1480 1486 else:
1481 1487 editor = getcommiteditor(editform=editform,
1482 1488 **pycompat.strkwargs(opts))
1483 1489 extra = {}
1484 1490 for idfunc in extrapreimport:
1485 1491 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1486 1492 overrides = {}
1487 1493 if partial:
1488 1494 overrides[('ui', 'allowemptycommit')] = True
1489 1495 with repo.ui.configoverride(overrides, 'import'):
1490 1496 n = repo.commit(message, user,
1491 1497 date, match=m,
1492 1498 editor=editor, extra=extra)
1493 1499 for idfunc in extrapostimport:
1494 1500 extrapostimportmap[idfunc](repo[n])
1495 1501 else:
1496 1502 if opts.get('exact') or importbranch:
1497 1503 branch = branch or 'default'
1498 1504 else:
1499 1505 branch = p1.branch()
1500 1506 store = patch.filestore()
1501 1507 try:
1502 1508 files = set()
1503 1509 try:
1504 1510 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1505 1511 files, eolmode=None)
1506 1512 except error.PatchError as e:
1507 1513 raise error.Abort(stringutil.forcebytestr(e))
1508 1514 if opts.get('exact'):
1509 1515 editor = None
1510 1516 else:
1511 1517 editor = getcommiteditor(editform='import.bypass')
1512 1518 memctx = context.memctx(repo, (p1.node(), p2.node()),
1513 1519 message,
1514 1520 files=files,
1515 1521 filectxfn=store,
1516 1522 user=user,
1517 1523 date=date,
1518 1524 branch=branch,
1519 1525 editor=editor)
1520 1526 n = memctx.commit()
1521 1527 finally:
1522 1528 store.close()
1523 1529 if opts.get('exact') and nocommit:
1524 1530 # --exact with --no-commit is still useful in that it does merge
1525 1531 # and branch bits
1526 1532 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1527 1533 elif opts.get('exact') and (not n or hex(n) != nodeid):
1528 1534 raise error.Abort(_('patch is damaged or loses information'))
1529 1535 msg = _('applied to working directory')
1530 1536 if n:
1531 1537 # i18n: refers to a short changeset id
1532 1538 msg = _('created %s') % short(n)
1533 1539 return msg, n, rejects
1534 1540
1535 1541 # facility to let extensions include additional data in an exported patch
1536 1542 # list of identifiers to be executed in order
1537 1543 extraexport = []
1538 1544 # mapping from identifier to actual export function
1539 1545 # function as to return a string to be added to the header or None
1540 1546 # it is given two arguments (sequencenumber, changectx)
1541 1547 extraexportmap = {}
1542 1548
1543 1549 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
1544 1550 node = scmutil.binnode(ctx)
1545 1551 parents = [p.node() for p in ctx.parents() if p]
1546 1552 branch = ctx.branch()
1547 1553 if switch_parent:
1548 1554 parents.reverse()
1549 1555
1550 1556 if parents:
1551 1557 prev = parents[0]
1552 1558 else:
1553 1559 prev = nullid
1554 1560
1555 1561 fm.context(ctx=ctx)
1556 1562 fm.plain('# HG changeset patch\n')
1557 1563 fm.write('user', '# User %s\n', ctx.user())
1558 1564 fm.plain('# Date %d %d\n' % ctx.date())
1559 1565 fm.write('date', '# %s\n', fm.formatdate(ctx.date()))
1560 1566 fm.condwrite(branch and branch != 'default',
1561 1567 'branch', '# Branch %s\n', branch)
1562 1568 fm.write('node', '# Node ID %s\n', hex(node))
1563 1569 fm.plain('# Parent %s\n' % hex(prev))
1564 1570 if len(parents) > 1:
1565 1571 fm.plain('# Parent %s\n' % hex(parents[1]))
1566 1572 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name='node'))
1567 1573
1568 1574 # TODO: redesign extraexportmap function to support formatter
1569 1575 for headerid in extraexport:
1570 1576 header = extraexportmap[headerid](seqno, ctx)
1571 1577 if header is not None:
1572 1578 fm.plain('# %s\n' % header)
1573 1579
1574 1580 fm.write('desc', '%s\n', ctx.description().rstrip())
1575 1581 fm.plain('\n')
1576 1582
1577 1583 if fm.isplain():
1578 1584 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
1579 1585 for chunk, label in chunkiter:
1580 1586 fm.plain(chunk, label=label)
1581 1587 else:
1582 1588 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
1583 1589 # TODO: make it structured?
1584 1590 fm.data(diff=b''.join(chunkiter))
1585 1591
1586 1592 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
1587 1593 """Export changesets to stdout or a single file"""
1588 1594 for seqno, rev in enumerate(revs, 1):
1589 1595 ctx = repo[rev]
1590 1596 if not dest.startswith('<'):
1591 1597 repo.ui.note("%s\n" % dest)
1592 1598 fm.startitem()
1593 1599 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
1594 1600
1595 1601 def _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, diffopts,
1596 1602 match):
1597 1603 """Export changesets to possibly multiple files"""
1598 1604 total = len(revs)
1599 1605 revwidth = max(len(str(rev)) for rev in revs)
1600 1606 filemap = util.sortdict() # filename: [(seqno, rev), ...]
1601 1607
1602 1608 for seqno, rev in enumerate(revs, 1):
1603 1609 ctx = repo[rev]
1604 1610 dest = makefilename(ctx, fntemplate,
1605 1611 total=total, seqno=seqno, revwidth=revwidth)
1606 1612 filemap.setdefault(dest, []).append((seqno, rev))
1607 1613
1608 1614 for dest in filemap:
1609 1615 with formatter.maybereopen(basefm, dest) as fm:
1610 1616 repo.ui.note("%s\n" % dest)
1611 1617 for seqno, rev in filemap[dest]:
1612 1618 fm.startitem()
1613 1619 ctx = repo[rev]
1614 1620 _exportsingle(repo, ctx, fm, match, switch_parent, seqno,
1615 1621 diffopts)
1616 1622
1617 1623 def _prefetchchangedfiles(repo, revs, match):
1618 1624 allfiles = set()
1619 1625 for rev in revs:
1620 1626 for file in repo[rev].files():
1621 1627 if not match or match(file):
1622 1628 allfiles.add(file)
1623 1629 scmutil.prefetchfiles(repo, revs, scmutil.matchfiles(repo, allfiles))
1624 1630
1625 1631 def export(repo, revs, basefm, fntemplate='hg-%h.patch', switch_parent=False,
1626 1632 opts=None, match=None):
1627 1633 '''export changesets as hg patches
1628 1634
1629 1635 Args:
1630 1636 repo: The repository from which we're exporting revisions.
1631 1637 revs: A list of revisions to export as revision numbers.
1632 1638 basefm: A formatter to which patches should be written.
1633 1639 fntemplate: An optional string to use for generating patch file names.
1634 1640 switch_parent: If True, show diffs against second parent when not nullid.
1635 1641 Default is false, which always shows diff against p1.
1636 1642 opts: diff options to use for generating the patch.
1637 1643 match: If specified, only export changes to files matching this matcher.
1638 1644
1639 1645 Returns:
1640 1646 Nothing.
1641 1647
1642 1648 Side Effect:
1643 1649 "HG Changeset Patch" data is emitted to one of the following
1644 1650 destinations:
1645 1651 fntemplate specified: Each rev is written to a unique file named using
1646 1652 the given template.
1647 1653 Otherwise: All revs will be written to basefm.
1648 1654 '''
1649 1655 _prefetchchangedfiles(repo, revs, match)
1650 1656
1651 1657 if not fntemplate:
1652 1658 _exportfile(repo, revs, basefm, '<unnamed>', switch_parent, opts, match)
1653 1659 else:
1654 1660 _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, opts,
1655 1661 match)
1656 1662
1657 1663 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
1658 1664 """Export changesets to the given file stream"""
1659 1665 _prefetchchangedfiles(repo, revs, match)
1660 1666
1661 1667 dest = getattr(fp, 'name', '<unnamed>')
1662 1668 with formatter.formatter(repo.ui, fp, 'export', {}) as fm:
1663 1669 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
1664 1670
1665 1671 def showmarker(fm, marker, index=None):
1666 1672 """utility function to display obsolescence marker in a readable way
1667 1673
1668 1674 To be used by debug function."""
1669 1675 if index is not None:
1670 1676 fm.write('index', '%i ', index)
1671 1677 fm.write('prednode', '%s ', hex(marker.prednode()))
1672 1678 succs = marker.succnodes()
1673 1679 fm.condwrite(succs, 'succnodes', '%s ',
1674 1680 fm.formatlist(map(hex, succs), name='node'))
1675 1681 fm.write('flag', '%X ', marker.flags())
1676 1682 parents = marker.parentnodes()
1677 1683 if parents is not None:
1678 1684 fm.write('parentnodes', '{%s} ',
1679 1685 fm.formatlist(map(hex, parents), name='node', sep=', '))
1680 1686 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1681 1687 meta = marker.metadata().copy()
1682 1688 meta.pop('date', None)
1683 1689 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
1684 1690 fm.write('metadata', '{%s}', fm.formatdict(smeta, fmt='%r: %r', sep=', '))
1685 1691 fm.plain('\n')
1686 1692
1687 1693 def finddate(ui, repo, date):
1688 1694 """Find the tipmost changeset that matches the given date spec"""
1689 1695
1690 1696 df = dateutil.matchdate(date)
1691 1697 m = scmutil.matchall(repo)
1692 1698 results = {}
1693 1699
1694 1700 def prep(ctx, fns):
1695 1701 d = ctx.date()
1696 1702 if df(d[0]):
1697 1703 results[ctx.rev()] = d
1698 1704
1699 1705 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1700 1706 rev = ctx.rev()
1701 1707 if rev in results:
1702 1708 ui.status(_("found revision %s from %s\n") %
1703 1709 (rev, dateutil.datestr(results[rev])))
1704 1710 return '%d' % rev
1705 1711
1706 1712 raise error.Abort(_("revision matching date not found"))
1707 1713
1708 1714 def increasingwindows(windowsize=8, sizelimit=512):
1709 1715 while True:
1710 1716 yield windowsize
1711 1717 if windowsize < sizelimit:
1712 1718 windowsize *= 2
1713 1719
1714 1720 def _walkrevs(repo, opts):
1715 1721 # Default --rev value depends on --follow but --follow behavior
1716 1722 # depends on revisions resolved from --rev...
1717 1723 follow = opts.get('follow') or opts.get('follow_first')
1718 1724 if opts.get('rev'):
1719 1725 revs = scmutil.revrange(repo, opts['rev'])
1720 1726 elif follow and repo.dirstate.p1() == nullid:
1721 1727 revs = smartset.baseset()
1722 1728 elif follow:
1723 1729 revs = repo.revs('reverse(:.)')
1724 1730 else:
1725 1731 revs = smartset.spanset(repo)
1726 1732 revs.reverse()
1727 1733 return revs
1728 1734
1729 1735 class FileWalkError(Exception):
1730 1736 pass
1731 1737
1732 1738 def walkfilerevs(repo, match, follow, revs, fncache):
1733 1739 '''Walks the file history for the matched files.
1734 1740
1735 1741 Returns the changeset revs that are involved in the file history.
1736 1742
1737 1743 Throws FileWalkError if the file history can't be walked using
1738 1744 filelogs alone.
1739 1745 '''
1740 1746 wanted = set()
1741 1747 copies = []
1742 1748 minrev, maxrev = min(revs), max(revs)
1743 1749 def filerevs(filelog, last):
1744 1750 """
1745 1751 Only files, no patterns. Check the history of each file.
1746 1752
1747 1753 Examines filelog entries within minrev, maxrev linkrev range
1748 1754 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1749 1755 tuples in backwards order
1750 1756 """
1751 1757 cl_count = len(repo)
1752 1758 revs = []
1753 1759 for j in pycompat.xrange(0, last + 1):
1754 1760 linkrev = filelog.linkrev(j)
1755 1761 if linkrev < minrev:
1756 1762 continue
1757 1763 # only yield rev for which we have the changelog, it can
1758 1764 # happen while doing "hg log" during a pull or commit
1759 1765 if linkrev >= cl_count:
1760 1766 break
1761 1767
1762 1768 parentlinkrevs = []
1763 1769 for p in filelog.parentrevs(j):
1764 1770 if p != nullrev:
1765 1771 parentlinkrevs.append(filelog.linkrev(p))
1766 1772 n = filelog.node(j)
1767 1773 revs.append((linkrev, parentlinkrevs,
1768 1774 follow and filelog.renamed(n)))
1769 1775
1770 1776 return reversed(revs)
1771 1777 def iterfiles():
1772 1778 pctx = repo['.']
1773 1779 for filename in match.files():
1774 1780 if follow:
1775 1781 if filename not in pctx:
1776 1782 raise error.Abort(_('cannot follow file not in parent '
1777 1783 'revision: "%s"') % filename)
1778 1784 yield filename, pctx[filename].filenode()
1779 1785 else:
1780 1786 yield filename, None
1781 1787 for filename_node in copies:
1782 1788 yield filename_node
1783 1789
1784 1790 for file_, node in iterfiles():
1785 1791 filelog = repo.file(file_)
1786 1792 if not len(filelog):
1787 1793 if node is None:
1788 1794 # A zero count may be a directory or deleted file, so
1789 1795 # try to find matching entries on the slow path.
1790 1796 if follow:
1791 1797 raise error.Abort(
1792 1798 _('cannot follow nonexistent file: "%s"') % file_)
1793 1799 raise FileWalkError("Cannot walk via filelog")
1794 1800 else:
1795 1801 continue
1796 1802
1797 1803 if node is None:
1798 1804 last = len(filelog) - 1
1799 1805 else:
1800 1806 last = filelog.rev(node)
1801 1807
1802 1808 # keep track of all ancestors of the file
1803 1809 ancestors = {filelog.linkrev(last)}
1804 1810
1805 1811 # iterate from latest to oldest revision
1806 1812 for rev, flparentlinkrevs, copied in filerevs(filelog, last):
1807 1813 if not follow:
1808 1814 if rev > maxrev:
1809 1815 continue
1810 1816 else:
1811 1817 # Note that last might not be the first interesting
1812 1818 # rev to us:
1813 1819 # if the file has been changed after maxrev, we'll
1814 1820 # have linkrev(last) > maxrev, and we still need
1815 1821 # to explore the file graph
1816 1822 if rev not in ancestors:
1817 1823 continue
1818 1824 # XXX insert 1327 fix here
1819 1825 if flparentlinkrevs:
1820 1826 ancestors.update(flparentlinkrevs)
1821 1827
1822 1828 fncache.setdefault(rev, []).append(file_)
1823 1829 wanted.add(rev)
1824 1830 if copied:
1825 1831 copies.append(copied)
1826 1832
1827 1833 return wanted
1828 1834
1829 1835 class _followfilter(object):
1830 1836 def __init__(self, repo, onlyfirst=False):
1831 1837 self.repo = repo
1832 1838 self.startrev = nullrev
1833 1839 self.roots = set()
1834 1840 self.onlyfirst = onlyfirst
1835 1841
1836 1842 def match(self, rev):
1837 1843 def realparents(rev):
1838 1844 if self.onlyfirst:
1839 1845 return self.repo.changelog.parentrevs(rev)[0:1]
1840 1846 else:
1841 1847 return filter(lambda x: x != nullrev,
1842 1848 self.repo.changelog.parentrevs(rev))
1843 1849
1844 1850 if self.startrev == nullrev:
1845 1851 self.startrev = rev
1846 1852 return True
1847 1853
1848 1854 if rev > self.startrev:
1849 1855 # forward: all descendants
1850 1856 if not self.roots:
1851 1857 self.roots.add(self.startrev)
1852 1858 for parent in realparents(rev):
1853 1859 if parent in self.roots:
1854 1860 self.roots.add(rev)
1855 1861 return True
1856 1862 else:
1857 1863 # backwards: all parents
1858 1864 if not self.roots:
1859 1865 self.roots.update(realparents(self.startrev))
1860 1866 if rev in self.roots:
1861 1867 self.roots.remove(rev)
1862 1868 self.roots.update(realparents(rev))
1863 1869 return True
1864 1870
1865 1871 return False
1866 1872
1867 1873 def walkchangerevs(repo, match, opts, prepare):
1868 1874 '''Iterate over files and the revs in which they changed.
1869 1875
1870 1876 Callers most commonly need to iterate backwards over the history
1871 1877 in which they are interested. Doing so has awful (quadratic-looking)
1872 1878 performance, so we use iterators in a "windowed" way.
1873 1879
1874 1880 We walk a window of revisions in the desired order. Within the
1875 1881 window, we first walk forwards to gather data, then in the desired
1876 1882 order (usually backwards) to display it.
1877 1883
1878 1884 This function returns an iterator yielding contexts. Before
1879 1885 yielding each context, the iterator will first call the prepare
1880 1886 function on each context in the window in forward order.'''
1881 1887
1882 1888 allfiles = opts.get('all_files')
1883 1889 follow = opts.get('follow') or opts.get('follow_first')
1884 1890 revs = _walkrevs(repo, opts)
1885 1891 if not revs:
1886 1892 return []
1887 1893 wanted = set()
1888 1894 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
1889 1895 fncache = {}
1890 1896 change = repo.__getitem__
1891 1897
1892 1898 # First step is to fill wanted, the set of revisions that we want to yield.
1893 1899 # When it does not induce extra cost, we also fill fncache for revisions in
1894 1900 # wanted: a cache of filenames that were changed (ctx.files()) and that
1895 1901 # match the file filtering conditions.
1896 1902
1897 1903 if match.always() or allfiles:
1898 1904 # No files, no patterns. Display all revs.
1899 1905 wanted = revs
1900 1906 elif not slowpath:
1901 1907 # We only have to read through the filelog to find wanted revisions
1902 1908
1903 1909 try:
1904 1910 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1905 1911 except FileWalkError:
1906 1912 slowpath = True
1907 1913
1908 1914 # We decided to fall back to the slowpath because at least one
1909 1915 # of the paths was not a file. Check to see if at least one of them
1910 1916 # existed in history, otherwise simply return
1911 1917 for path in match.files():
1912 1918 if path == '.' or path in repo.store:
1913 1919 break
1914 1920 else:
1915 1921 return []
1916 1922
1917 1923 if slowpath:
1918 1924 # We have to read the changelog to match filenames against
1919 1925 # changed files
1920 1926
1921 1927 if follow:
1922 1928 raise error.Abort(_('can only follow copies/renames for explicit '
1923 1929 'filenames'))
1924 1930
1925 1931 # The slow path checks files modified in every changeset.
1926 1932 # This is really slow on large repos, so compute the set lazily.
1927 1933 class lazywantedset(object):
1928 1934 def __init__(self):
1929 1935 self.set = set()
1930 1936 self.revs = set(revs)
1931 1937
1932 1938 # No need to worry about locality here because it will be accessed
1933 1939 # in the same order as the increasing window below.
1934 1940 def __contains__(self, value):
1935 1941 if value in self.set:
1936 1942 return True
1937 1943 elif not value in self.revs:
1938 1944 return False
1939 1945 else:
1940 1946 self.revs.discard(value)
1941 1947 ctx = change(value)
1942 1948 if allfiles:
1943 1949 matches = list(ctx.manifest().walk(match))
1944 1950 else:
1945 1951 matches = [f for f in ctx.files() if match(f)]
1946 1952 if matches:
1947 1953 fncache[value] = matches
1948 1954 self.set.add(value)
1949 1955 return True
1950 1956 return False
1951 1957
1952 1958 def discard(self, value):
1953 1959 self.revs.discard(value)
1954 1960 self.set.discard(value)
1955 1961
1956 1962 wanted = lazywantedset()
1957 1963
1958 1964 # it might be worthwhile to do this in the iterator if the rev range
1959 1965 # is descending and the prune args are all within that range
1960 1966 for rev in opts.get('prune', ()):
1961 1967 rev = repo[rev].rev()
1962 1968 ff = _followfilter(repo)
1963 1969 stop = min(revs[0], revs[-1])
1964 1970 for x in pycompat.xrange(rev, stop - 1, -1):
1965 1971 if ff.match(x):
1966 1972 wanted = wanted - [x]
1967 1973
1968 1974 # Now that wanted is correctly initialized, we can iterate over the
1969 1975 # revision range, yielding only revisions in wanted.
1970 1976 def iterate():
1971 1977 if follow and match.always():
1972 1978 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1973 1979 def want(rev):
1974 1980 return ff.match(rev) and rev in wanted
1975 1981 else:
1976 1982 def want(rev):
1977 1983 return rev in wanted
1978 1984
1979 1985 it = iter(revs)
1980 1986 stopiteration = False
1981 1987 for windowsize in increasingwindows():
1982 1988 nrevs = []
1983 1989 for i in pycompat.xrange(windowsize):
1984 1990 rev = next(it, None)
1985 1991 if rev is None:
1986 1992 stopiteration = True
1987 1993 break
1988 1994 elif want(rev):
1989 1995 nrevs.append(rev)
1990 1996 for rev in sorted(nrevs):
1991 1997 fns = fncache.get(rev)
1992 1998 ctx = change(rev)
1993 1999 if not fns:
1994 2000 def fns_generator():
1995 2001 if allfiles:
1996 2002 fiter = iter(ctx)
1997 2003 else:
1998 2004 fiter = ctx.files()
1999 2005 for f in fiter:
2000 2006 if match(f):
2001 2007 yield f
2002 2008 fns = fns_generator()
2003 2009 prepare(ctx, fns)
2004 2010 for rev in nrevs:
2005 2011 yield change(rev)
2006 2012
2007 2013 if stopiteration:
2008 2014 break
2009 2015
2010 2016 return iterate()
2011 2017
2012 2018 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2013 2019 bad = []
2014 2020
2015 2021 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2016 2022 names = []
2017 2023 wctx = repo[None]
2018 2024 cca = None
2019 2025 abort, warn = scmutil.checkportabilityalert(ui)
2020 2026 if abort or warn:
2021 2027 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2022 2028
2023 2029 match = repo.narrowmatch(match, includeexact=True)
2024 2030 badmatch = matchmod.badmatch(match, badfn)
2025 2031 dirstate = repo.dirstate
2026 2032 # We don't want to just call wctx.walk here, since it would return a lot of
2027 2033 # clean files, which we aren't interested in and takes time.
2028 2034 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2029 2035 unknown=True, ignored=False, full=False)):
2030 2036 exact = match.exact(f)
2031 2037 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2032 2038 if cca:
2033 2039 cca(f)
2034 2040 names.append(f)
2035 2041 if ui.verbose or not exact:
2036 2042 ui.status(_('adding %s\n') % uipathfn(f),
2037 2043 label='ui.addremove.added')
2038 2044
2039 2045 for subpath in sorted(wctx.substate):
2040 2046 sub = wctx.sub(subpath)
2041 2047 try:
2042 2048 submatch = matchmod.subdirmatcher(subpath, match)
2043 2049 subprefix = repo.wvfs.reljoin(prefix, subpath)
2044 2050 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2045 2051 if opts.get(r'subrepos'):
2046 2052 bad.extend(sub.add(ui, submatch, subprefix, subuipathfn, False,
2047 2053 **opts))
2048 2054 else:
2049 2055 bad.extend(sub.add(ui, submatch, subprefix, subuipathfn, True,
2050 2056 **opts))
2051 2057 except error.LookupError:
2052 2058 ui.status(_("skipping missing subrepository: %s\n")
2053 2059 % uipathfn(subpath))
2054 2060
2055 2061 if not opts.get(r'dry_run'):
2056 2062 rejected = wctx.add(names, prefix)
2057 2063 bad.extend(f for f in rejected if f in match.files())
2058 2064 return bad
2059 2065
2060 2066 def addwebdirpath(repo, serverpath, webconf):
2061 2067 webconf[serverpath] = repo.root
2062 2068 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2063 2069
2064 2070 for r in repo.revs('filelog("path:.hgsub")'):
2065 2071 ctx = repo[r]
2066 2072 for subpath in ctx.substate:
2067 2073 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2068 2074
2069 2075 def forget(ui, repo, match, prefix, uipathfn, explicitonly, dryrun,
2070 2076 interactive):
2071 2077 if dryrun and interactive:
2072 2078 raise error.Abort(_("cannot specify both --dry-run and --interactive"))
2073 2079 bad = []
2074 2080 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2075 2081 wctx = repo[None]
2076 2082 forgot = []
2077 2083
2078 2084 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2079 2085 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2080 2086 if explicitonly:
2081 2087 forget = [f for f in forget if match.exact(f)]
2082 2088
2083 2089 for subpath in sorted(wctx.substate):
2084 2090 sub = wctx.sub(subpath)
2085 2091 submatch = matchmod.subdirmatcher(subpath, match)
2086 2092 subprefix = repo.wvfs.reljoin(prefix, subpath)
2087 2093 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2088 2094 try:
2089 2095 subbad, subforgot = sub.forget(submatch, subprefix, subuipathfn,
2090 2096 dryrun=dryrun,
2091 2097 interactive=interactive)
2092 2098 bad.extend([subpath + '/' + f for f in subbad])
2093 2099 forgot.extend([subpath + '/' + f for f in subforgot])
2094 2100 except error.LookupError:
2095 2101 ui.status(_("skipping missing subrepository: %s\n")
2096 2102 % uipathfn(subpath))
2097 2103
2098 2104 if not explicitonly:
2099 2105 for f in match.files():
2100 2106 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2101 2107 if f not in forgot:
2102 2108 if repo.wvfs.exists(f):
2103 2109 # Don't complain if the exact case match wasn't given.
2104 2110 # But don't do this until after checking 'forgot', so
2105 2111 # that subrepo files aren't normalized, and this op is
2106 2112 # purely from data cached by the status walk above.
2107 2113 if repo.dirstate.normalize(f) in repo.dirstate:
2108 2114 continue
2109 2115 ui.warn(_('not removing %s: '
2110 2116 'file is already untracked\n')
2111 2117 % uipathfn(f))
2112 2118 bad.append(f)
2113 2119
2114 2120 if interactive:
2115 2121 responses = _('[Ynsa?]'
2116 2122 '$$ &Yes, forget this file'
2117 2123 '$$ &No, skip this file'
2118 2124 '$$ &Skip remaining files'
2119 2125 '$$ Include &all remaining files'
2120 2126 '$$ &? (display help)')
2121 2127 for filename in forget[:]:
2122 2128 r = ui.promptchoice(_('forget %s %s') %
2123 2129 (uipathfn(filename), responses))
2124 2130 if r == 4: # ?
2125 2131 while r == 4:
2126 2132 for c, t in ui.extractchoices(responses)[1]:
2127 2133 ui.write('%s - %s\n' % (c, encoding.lower(t)))
2128 2134 r = ui.promptchoice(_('forget %s %s') %
2129 2135 (uipathfn(filename), responses))
2130 2136 if r == 0: # yes
2131 2137 continue
2132 2138 elif r == 1: # no
2133 2139 forget.remove(filename)
2134 2140 elif r == 2: # Skip
2135 2141 fnindex = forget.index(filename)
2136 2142 del forget[fnindex:]
2137 2143 break
2138 2144 elif r == 3: # All
2139 2145 break
2140 2146
2141 2147 for f in forget:
2142 2148 if ui.verbose or not match.exact(f) or interactive:
2143 2149 ui.status(_('removing %s\n') % uipathfn(f),
2144 2150 label='ui.addremove.removed')
2145 2151
2146 2152 if not dryrun:
2147 2153 rejected = wctx.forget(forget, prefix)
2148 2154 bad.extend(f for f in rejected if f in match.files())
2149 2155 forgot.extend(f for f in forget if f not in rejected)
2150 2156 return bad, forgot
2151 2157
2152 2158 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2153 2159 ret = 1
2154 2160
2155 2161 needsfctx = ui.verbose or {'size', 'flags'} & fm.datahint()
2156 2162 for f in ctx.matches(m):
2157 2163 fm.startitem()
2158 2164 fm.context(ctx=ctx)
2159 2165 if needsfctx:
2160 2166 fc = ctx[f]
2161 2167 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2162 2168 fm.data(path=f)
2163 2169 fm.plain(fmt % uipathfn(f))
2164 2170 ret = 0
2165 2171
2166 2172 for subpath in sorted(ctx.substate):
2167 2173 submatch = matchmod.subdirmatcher(subpath, m)
2168 2174 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2169 2175 if (subrepos or m.exact(subpath) or any(submatch.files())):
2170 2176 sub = ctx.sub(subpath)
2171 2177 try:
2172 2178 recurse = m.exact(subpath) or subrepos
2173 2179 if sub.printfiles(ui, submatch, subuipathfn, fm, fmt,
2174 2180 recurse) == 0:
2175 2181 ret = 0
2176 2182 except error.LookupError:
2177 2183 ui.status(_("skipping missing subrepository: %s\n")
2178 2184 % uipathfn(subpath))
2179 2185
2180 2186 return ret
2181 2187
2182 2188 def remove(ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun,
2183 2189 warnings=None):
2184 2190 ret = 0
2185 2191 s = repo.status(match=m, clean=True)
2186 2192 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2187 2193
2188 2194 wctx = repo[None]
2189 2195
2190 2196 if warnings is None:
2191 2197 warnings = []
2192 2198 warn = True
2193 2199 else:
2194 2200 warn = False
2195 2201
2196 2202 subs = sorted(wctx.substate)
2197 2203 progress = ui.makeprogress(_('searching'), total=len(subs),
2198 2204 unit=_('subrepos'))
2199 2205 for subpath in subs:
2200 2206 submatch = matchmod.subdirmatcher(subpath, m)
2201 2207 subprefix = repo.wvfs.reljoin(prefix, subpath)
2202 2208 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2203 2209 if subrepos or m.exact(subpath) or any(submatch.files()):
2204 2210 progress.increment()
2205 2211 sub = wctx.sub(subpath)
2206 2212 try:
2207 2213 if sub.removefiles(submatch, subprefix, subuipathfn, after,
2208 2214 force, subrepos, dryrun, warnings):
2209 2215 ret = 1
2210 2216 except error.LookupError:
2211 2217 warnings.append(_("skipping missing subrepository: %s\n")
2212 2218 % uipathfn(subpath))
2213 2219 progress.complete()
2214 2220
2215 2221 # warn about failure to delete explicit files/dirs
2216 2222 deleteddirs = util.dirs(deleted)
2217 2223 files = m.files()
2218 2224 progress = ui.makeprogress(_('deleting'), total=len(files),
2219 2225 unit=_('files'))
2220 2226 for f in files:
2221 2227 def insubrepo():
2222 2228 for subpath in wctx.substate:
2223 2229 if f.startswith(subpath + '/'):
2224 2230 return True
2225 2231 return False
2226 2232
2227 2233 progress.increment()
2228 2234 isdir = f in deleteddirs or wctx.hasdir(f)
2229 2235 if (f in repo.dirstate or isdir or f == '.'
2230 2236 or insubrepo() or f in subs):
2231 2237 continue
2232 2238
2233 2239 if repo.wvfs.exists(f):
2234 2240 if repo.wvfs.isdir(f):
2235 2241 warnings.append(_('not removing %s: no tracked files\n')
2236 2242 % uipathfn(f))
2237 2243 else:
2238 2244 warnings.append(_('not removing %s: file is untracked\n')
2239 2245 % uipathfn(f))
2240 2246 # missing files will generate a warning elsewhere
2241 2247 ret = 1
2242 2248 progress.complete()
2243 2249
2244 2250 if force:
2245 2251 list = modified + deleted + clean + added
2246 2252 elif after:
2247 2253 list = deleted
2248 2254 remaining = modified + added + clean
2249 2255 progress = ui.makeprogress(_('skipping'), total=len(remaining),
2250 2256 unit=_('files'))
2251 2257 for f in remaining:
2252 2258 progress.increment()
2253 2259 if ui.verbose or (f in files):
2254 2260 warnings.append(_('not removing %s: file still exists\n')
2255 2261 % uipathfn(f))
2256 2262 ret = 1
2257 2263 progress.complete()
2258 2264 else:
2259 2265 list = deleted + clean
2260 2266 progress = ui.makeprogress(_('skipping'),
2261 2267 total=(len(modified) + len(added)),
2262 2268 unit=_('files'))
2263 2269 for f in modified:
2264 2270 progress.increment()
2265 2271 warnings.append(_('not removing %s: file is modified (use -f'
2266 2272 ' to force removal)\n') % uipathfn(f))
2267 2273 ret = 1
2268 2274 for f in added:
2269 2275 progress.increment()
2270 2276 warnings.append(_("not removing %s: file has been marked for add"
2271 2277 " (use 'hg forget' to undo add)\n") % uipathfn(f))
2272 2278 ret = 1
2273 2279 progress.complete()
2274 2280
2275 2281 list = sorted(list)
2276 2282 progress = ui.makeprogress(_('deleting'), total=len(list),
2277 2283 unit=_('files'))
2278 2284 for f in list:
2279 2285 if ui.verbose or not m.exact(f):
2280 2286 progress.increment()
2281 2287 ui.status(_('removing %s\n') % uipathfn(f),
2282 2288 label='ui.addremove.removed')
2283 2289 progress.complete()
2284 2290
2285 2291 if not dryrun:
2286 2292 with repo.wlock():
2287 2293 if not after:
2288 2294 for f in list:
2289 2295 if f in added:
2290 2296 continue # we never unlink added files on remove
2291 2297 rmdir = repo.ui.configbool('experimental',
2292 2298 'removeemptydirs')
2293 2299 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2294 2300 repo[None].forget(list)
2295 2301
2296 2302 if warn:
2297 2303 for warning in warnings:
2298 2304 ui.warn(warning)
2299 2305
2300 2306 return ret
2301 2307
2302 2308 def _catfmtneedsdata(fm):
2303 2309 return not fm.datahint() or 'data' in fm.datahint()
2304 2310
2305 2311 def _updatecatformatter(fm, ctx, matcher, path, decode):
2306 2312 """Hook for adding data to the formatter used by ``hg cat``.
2307 2313
2308 2314 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2309 2315 this method first."""
2310 2316
2311 2317 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2312 2318 # wasn't requested.
2313 2319 data = b''
2314 2320 if _catfmtneedsdata(fm):
2315 2321 data = ctx[path].data()
2316 2322 if decode:
2317 2323 data = ctx.repo().wwritedata(path, data)
2318 2324 fm.startitem()
2319 2325 fm.context(ctx=ctx)
2320 2326 fm.write('data', '%s', data)
2321 2327 fm.data(path=path)
2322 2328
2323 2329 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2324 2330 err = 1
2325 2331 opts = pycompat.byteskwargs(opts)
2326 2332
2327 2333 def write(path):
2328 2334 filename = None
2329 2335 if fntemplate:
2330 2336 filename = makefilename(ctx, fntemplate,
2331 2337 pathname=os.path.join(prefix, path))
2332 2338 # attempt to create the directory if it does not already exist
2333 2339 try:
2334 2340 os.makedirs(os.path.dirname(filename))
2335 2341 except OSError:
2336 2342 pass
2337 2343 with formatter.maybereopen(basefm, filename) as fm:
2338 2344 _updatecatformatter(fm, ctx, matcher, path, opts.get('decode'))
2339 2345
2340 2346 # Automation often uses hg cat on single files, so special case it
2341 2347 # for performance to avoid the cost of parsing the manifest.
2342 2348 if len(matcher.files()) == 1 and not matcher.anypats():
2343 2349 file = matcher.files()[0]
2344 2350 mfl = repo.manifestlog
2345 2351 mfnode = ctx.manifestnode()
2346 2352 try:
2347 2353 if mfnode and mfl[mfnode].find(file)[0]:
2348 2354 if _catfmtneedsdata(basefm):
2349 2355 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2350 2356 write(file)
2351 2357 return 0
2352 2358 except KeyError:
2353 2359 pass
2354 2360
2355 2361 if _catfmtneedsdata(basefm):
2356 2362 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2357 2363
2358 2364 for abs in ctx.walk(matcher):
2359 2365 write(abs)
2360 2366 err = 0
2361 2367
2362 2368 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2363 2369 for subpath in sorted(ctx.substate):
2364 2370 sub = ctx.sub(subpath)
2365 2371 try:
2366 2372 submatch = matchmod.subdirmatcher(subpath, matcher)
2367 2373 subprefix = os.path.join(prefix, subpath)
2368 2374 if not sub.cat(submatch, basefm, fntemplate, subprefix,
2369 2375 **pycompat.strkwargs(opts)):
2370 2376 err = 0
2371 2377 except error.RepoLookupError:
2372 2378 ui.status(_("skipping missing subrepository: %s\n") %
2373 2379 uipathfn(subpath))
2374 2380
2375 2381 return err
2376 2382
2377 2383 def commit(ui, repo, commitfunc, pats, opts):
2378 2384 '''commit the specified files or all outstanding changes'''
2379 2385 date = opts.get('date')
2380 2386 if date:
2381 2387 opts['date'] = dateutil.parsedate(date)
2382 2388 message = logmessage(ui, opts)
2383 2389 matcher = scmutil.match(repo[None], pats, opts)
2384 2390
2385 2391 dsguard = None
2386 2392 # extract addremove carefully -- this function can be called from a command
2387 2393 # that doesn't support addremove
2388 2394 if opts.get('addremove'):
2389 2395 dsguard = dirstateguard.dirstateguard(repo, 'commit')
2390 2396 with dsguard or util.nullcontextmanager():
2391 2397 if dsguard:
2392 2398 relative = scmutil.anypats(pats, opts)
2393 2399 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2394 2400 if scmutil.addremove(repo, matcher, "", uipathfn, opts) != 0:
2395 2401 raise error.Abort(
2396 2402 _("failed to mark all new/missing files as added/removed"))
2397 2403
2398 2404 return commitfunc(ui, repo, message, matcher, opts)
2399 2405
2400 2406 def samefile(f, ctx1, ctx2):
2401 2407 if f in ctx1.manifest():
2402 2408 a = ctx1.filectx(f)
2403 2409 if f in ctx2.manifest():
2404 2410 b = ctx2.filectx(f)
2405 2411 return (not a.cmp(b)
2406 2412 and a.flags() == b.flags())
2407 2413 else:
2408 2414 return False
2409 2415 else:
2410 2416 return f not in ctx2.manifest()
2411 2417
2412 2418 def amend(ui, repo, old, extra, pats, opts):
2413 2419 # avoid cycle context -> subrepo -> cmdutil
2414 2420 from . import context
2415 2421
2416 2422 # amend will reuse the existing user if not specified, but the obsolete
2417 2423 # marker creation requires that the current user's name is specified.
2418 2424 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2419 2425 ui.username() # raise exception if username not set
2420 2426
2421 2427 ui.note(_('amending changeset %s\n') % old)
2422 2428 base = old.p1()
2423 2429
2424 2430 with repo.wlock(), repo.lock(), repo.transaction('amend'):
2425 2431 # Participating changesets:
2426 2432 #
2427 2433 # wctx o - workingctx that contains changes from working copy
2428 2434 # | to go into amending commit
2429 2435 # |
2430 2436 # old o - changeset to amend
2431 2437 # |
2432 2438 # base o - first parent of the changeset to amend
2433 2439 wctx = repo[None]
2434 2440
2435 2441 # Copy to avoid mutating input
2436 2442 extra = extra.copy()
2437 2443 # Update extra dict from amended commit (e.g. to preserve graft
2438 2444 # source)
2439 2445 extra.update(old.extra())
2440 2446
2441 2447 # Also update it from the from the wctx
2442 2448 extra.update(wctx.extra())
2443 2449
2444 2450 user = opts.get('user') or old.user()
2445 2451
2446 2452 datemaydiffer = False # date-only change should be ignored?
2447 2453 if opts.get('date') and opts.get('currentdate'):
2448 2454 raise error.Abort(_('--date and --currentdate are mutually '
2449 2455 'exclusive'))
2450 2456 if opts.get('date'):
2451 2457 date = dateutil.parsedate(opts.get('date'))
2452 2458 elif opts.get('currentdate'):
2453 2459 date = dateutil.makedate()
2454 2460 elif (ui.configbool('rewrite', 'update-timestamp')
2455 2461 and opts.get('currentdate') is None):
2456 2462 date = dateutil.makedate()
2457 2463 datemaydiffer = True
2458 2464 else:
2459 2465 date = old.date()
2460 2466
2461 2467 if len(old.parents()) > 1:
2462 2468 # ctx.files() isn't reliable for merges, so fall back to the
2463 2469 # slower repo.status() method
2464 2470 files = {fn for st in base.status(old)[:3] for fn in st}
2465 2471 else:
2466 2472 files = set(old.files())
2467 2473
2468 2474 # add/remove the files to the working copy if the "addremove" option
2469 2475 # was specified.
2470 2476 matcher = scmutil.match(wctx, pats, opts)
2471 2477 relative = scmutil.anypats(pats, opts)
2472 2478 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2473 2479 if (opts.get('addremove')
2474 2480 and scmutil.addremove(repo, matcher, "", uipathfn, opts)):
2475 2481 raise error.Abort(
2476 2482 _("failed to mark all new/missing files as added/removed"))
2477 2483
2478 2484 # Check subrepos. This depends on in-place wctx._status update in
2479 2485 # subrepo.precommit(). To minimize the risk of this hack, we do
2480 2486 # nothing if .hgsub does not exist.
2481 2487 if '.hgsub' in wctx or '.hgsub' in old:
2482 2488 subs, commitsubs, newsubstate = subrepoutil.precommit(
2483 2489 ui, wctx, wctx._status, matcher)
2484 2490 # amend should abort if commitsubrepos is enabled
2485 2491 assert not commitsubs
2486 2492 if subs:
2487 2493 subrepoutil.writestate(repo, newsubstate)
2488 2494
2489 2495 ms = mergemod.mergestate.read(repo)
2490 2496 mergeutil.checkunresolved(ms)
2491 2497
2492 2498 filestoamend = set(f for f in wctx.files() if matcher(f))
2493 2499
2494 2500 changes = (len(filestoamend) > 0)
2495 2501 if changes:
2496 2502 # Recompute copies (avoid recording a -> b -> a)
2497 2503 copied = copies.pathcopies(base, wctx, matcher)
2498 2504 if old.p2:
2499 2505 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2500 2506
2501 2507 # Prune files which were reverted by the updates: if old
2502 2508 # introduced file X and the file was renamed in the working
2503 2509 # copy, then those two files are the same and
2504 2510 # we can discard X from our list of files. Likewise if X
2505 2511 # was removed, it's no longer relevant. If X is missing (aka
2506 2512 # deleted), old X must be preserved.
2507 2513 files.update(filestoamend)
2508 2514 files = [f for f in files if (f not in filestoamend
2509 2515 or not samefile(f, wctx, base))]
2510 2516
2511 2517 def filectxfn(repo, ctx_, path):
2512 2518 try:
2513 2519 # If the file being considered is not amongst the files
2514 2520 # to be amended, we should return the file context from the
2515 2521 # old changeset. This avoids issues when only some files in
2516 2522 # the working copy are being amended but there are also
2517 2523 # changes to other files from the old changeset.
2518 2524 if path not in filestoamend:
2519 2525 return old.filectx(path)
2520 2526
2521 2527 # Return None for removed files.
2522 2528 if path in wctx.removed():
2523 2529 return None
2524 2530
2525 2531 fctx = wctx[path]
2526 2532 flags = fctx.flags()
2527 2533 mctx = context.memfilectx(repo, ctx_,
2528 2534 fctx.path(), fctx.data(),
2529 2535 islink='l' in flags,
2530 2536 isexec='x' in flags,
2531 2537 copysource=copied.get(path))
2532 2538 return mctx
2533 2539 except KeyError:
2534 2540 return None
2535 2541 else:
2536 2542 ui.note(_('copying changeset %s to %s\n') % (old, base))
2537 2543
2538 2544 # Use version of files as in the old cset
2539 2545 def filectxfn(repo, ctx_, path):
2540 2546 try:
2541 2547 return old.filectx(path)
2542 2548 except KeyError:
2543 2549 return None
2544 2550
2545 2551 # See if we got a message from -m or -l, if not, open the editor with
2546 2552 # the message of the changeset to amend.
2547 2553 message = logmessage(ui, opts)
2548 2554
2549 2555 editform = mergeeditform(old, 'commit.amend')
2550 2556
2551 2557 if not message:
2552 2558 message = old.description()
2553 2559 # Default if message isn't provided and --edit is not passed is to
2554 2560 # invoke editor, but allow --no-edit. If somehow we don't have any
2555 2561 # description, let's always start the editor.
2556 2562 doedit = not message or opts.get('edit') in [True, None]
2557 2563 else:
2558 2564 # Default if message is provided is to not invoke editor, but allow
2559 2565 # --edit.
2560 2566 doedit = opts.get('edit') is True
2561 2567 editor = getcommiteditor(edit=doedit, editform=editform)
2562 2568
2563 2569 pureextra = extra.copy()
2564 2570 extra['amend_source'] = old.hex()
2565 2571
2566 2572 new = context.memctx(repo,
2567 2573 parents=[base.node(), old.p2().node()],
2568 2574 text=message,
2569 2575 files=files,
2570 2576 filectxfn=filectxfn,
2571 2577 user=user,
2572 2578 date=date,
2573 2579 extra=extra,
2574 2580 editor=editor)
2575 2581
2576 2582 newdesc = changelog.stripdesc(new.description())
2577 2583 if ((not changes)
2578 2584 and newdesc == old.description()
2579 2585 and user == old.user()
2580 2586 and (date == old.date() or datemaydiffer)
2581 2587 and pureextra == old.extra()):
2582 2588 # nothing changed. continuing here would create a new node
2583 2589 # anyway because of the amend_source noise.
2584 2590 #
2585 2591 # This not what we expect from amend.
2586 2592 return old.node()
2587 2593
2588 2594 commitphase = None
2589 2595 if opts.get('secret'):
2590 2596 commitphase = phases.secret
2591 2597 newid = repo.commitctx(new)
2592 2598
2593 2599 # Reroute the working copy parent to the new changeset
2594 2600 repo.setparents(newid, nullid)
2595 2601 mapping = {old.node(): (newid,)}
2596 2602 obsmetadata = None
2597 2603 if opts.get('note'):
2598 2604 obsmetadata = {'note': encoding.fromlocal(opts['note'])}
2599 2605 backup = ui.configbool('rewrite', 'backup-bundle')
2600 2606 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata,
2601 2607 fixphase=True, targetphase=commitphase,
2602 2608 backup=backup)
2603 2609
2604 2610 # Fixing the dirstate because localrepo.commitctx does not update
2605 2611 # it. This is rather convenient because we did not need to update
2606 2612 # the dirstate for all the files in the new commit which commitctx
2607 2613 # could have done if it updated the dirstate. Now, we can
2608 2614 # selectively update the dirstate only for the amended files.
2609 2615 dirstate = repo.dirstate
2610 2616
2611 2617 # Update the state of the files which were added and
2612 2618 # and modified in the amend to "normal" in the dirstate.
2613 2619 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
2614 2620 for f in normalfiles:
2615 2621 dirstate.normal(f)
2616 2622
2617 2623 # Update the state of files which were removed in the amend
2618 2624 # to "removed" in the dirstate.
2619 2625 removedfiles = set(wctx.removed()) & filestoamend
2620 2626 for f in removedfiles:
2621 2627 dirstate.drop(f)
2622 2628
2623 2629 return newid
2624 2630
2625 2631 def commiteditor(repo, ctx, subs, editform=''):
2626 2632 if ctx.description():
2627 2633 return ctx.description()
2628 2634 return commitforceeditor(repo, ctx, subs, editform=editform,
2629 2635 unchangedmessagedetection=True)
2630 2636
2631 2637 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2632 2638 editform='', unchangedmessagedetection=False):
2633 2639 if not extramsg:
2634 2640 extramsg = _("Leave message empty to abort commit.")
2635 2641
2636 2642 forms = [e for e in editform.split('.') if e]
2637 2643 forms.insert(0, 'changeset')
2638 2644 templatetext = None
2639 2645 while forms:
2640 2646 ref = '.'.join(forms)
2641 2647 if repo.ui.config('committemplate', ref):
2642 2648 templatetext = committext = buildcommittemplate(
2643 2649 repo, ctx, subs, extramsg, ref)
2644 2650 break
2645 2651 forms.pop()
2646 2652 else:
2647 2653 committext = buildcommittext(repo, ctx, subs, extramsg)
2648 2654
2649 2655 # run editor in the repository root
2650 2656 olddir = encoding.getcwd()
2651 2657 os.chdir(repo.root)
2652 2658
2653 2659 # make in-memory changes visible to external process
2654 2660 tr = repo.currenttransaction()
2655 2661 repo.dirstate.write(tr)
2656 2662 pending = tr and tr.writepending() and repo.root
2657 2663
2658 2664 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2659 2665 editform=editform, pending=pending,
2660 2666 repopath=repo.path, action='commit')
2661 2667 text = editortext
2662 2668
2663 2669 # strip away anything below this special string (used for editors that want
2664 2670 # to display the diff)
2665 2671 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
2666 2672 if stripbelow:
2667 2673 text = text[:stripbelow.start()]
2668 2674
2669 2675 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2670 2676 os.chdir(olddir)
2671 2677
2672 2678 if finishdesc:
2673 2679 text = finishdesc(text)
2674 2680 if not text.strip():
2675 2681 raise error.Abort(_("empty commit message"))
2676 2682 if unchangedmessagedetection and editortext == templatetext:
2677 2683 raise error.Abort(_("commit message unchanged"))
2678 2684
2679 2685 return text
2680 2686
2681 2687 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
2682 2688 ui = repo.ui
2683 2689 spec = formatter.templatespec(ref, None, None)
2684 2690 t = logcmdutil.changesettemplater(ui, repo, spec)
2685 2691 t.t.cache.update((k, templater.unquotestring(v))
2686 2692 for k, v in repo.ui.configitems('committemplate'))
2687 2693
2688 2694 if not extramsg:
2689 2695 extramsg = '' # ensure that extramsg is string
2690 2696
2691 2697 ui.pushbuffer()
2692 2698 t.show(ctx, extramsg=extramsg)
2693 2699 return ui.popbuffer()
2694 2700
2695 2701 def hgprefix(msg):
2696 2702 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2697 2703
2698 2704 def buildcommittext(repo, ctx, subs, extramsg):
2699 2705 edittext = []
2700 2706 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2701 2707 if ctx.description():
2702 2708 edittext.append(ctx.description())
2703 2709 edittext.append("")
2704 2710 edittext.append("") # Empty line between message and comments.
2705 2711 edittext.append(hgprefix(_("Enter commit message."
2706 2712 " Lines beginning with 'HG:' are removed.")))
2707 2713 edittext.append(hgprefix(extramsg))
2708 2714 edittext.append("HG: --")
2709 2715 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2710 2716 if ctx.p2():
2711 2717 edittext.append(hgprefix(_("branch merge")))
2712 2718 if ctx.branch():
2713 2719 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2714 2720 if bookmarks.isactivewdirparent(repo):
2715 2721 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2716 2722 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2717 2723 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2718 2724 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2719 2725 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2720 2726 if not added and not modified and not removed:
2721 2727 edittext.append(hgprefix(_("no files changed")))
2722 2728 edittext.append("")
2723 2729
2724 2730 return "\n".join(edittext)
2725 2731
2726 2732 def commitstatus(repo, node, branch, bheads=None, opts=None):
2727 2733 if opts is None:
2728 2734 opts = {}
2729 2735 ctx = repo[node]
2730 2736 parents = ctx.parents()
2731 2737
2732 2738 if (not opts.get('amend') and bheads and node not in bheads and not
2733 2739 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2734 2740 repo.ui.status(_('created new head\n'))
2735 2741 # The message is not printed for initial roots. For the other
2736 2742 # changesets, it is printed in the following situations:
2737 2743 #
2738 2744 # Par column: for the 2 parents with ...
2739 2745 # N: null or no parent
2740 2746 # B: parent is on another named branch
2741 2747 # C: parent is a regular non head changeset
2742 2748 # H: parent was a branch head of the current branch
2743 2749 # Msg column: whether we print "created new head" message
2744 2750 # In the following, it is assumed that there already exists some
2745 2751 # initial branch heads of the current branch, otherwise nothing is
2746 2752 # printed anyway.
2747 2753 #
2748 2754 # Par Msg Comment
2749 2755 # N N y additional topo root
2750 2756 #
2751 2757 # B N y additional branch root
2752 2758 # C N y additional topo head
2753 2759 # H N n usual case
2754 2760 #
2755 2761 # B B y weird additional branch root
2756 2762 # C B y branch merge
2757 2763 # H B n merge with named branch
2758 2764 #
2759 2765 # C C y additional head from merge
2760 2766 # C H n merge with a head
2761 2767 #
2762 2768 # H H n head merge: head count decreases
2763 2769
2764 2770 if not opts.get('close_branch'):
2765 2771 for r in parents:
2766 2772 if r.closesbranch() and r.branch() == branch:
2767 2773 repo.ui.status(_('reopening closed branch head %d\n') % r.rev())
2768 2774
2769 2775 if repo.ui.debugflag:
2770 2776 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx.hex()))
2771 2777 elif repo.ui.verbose:
2772 2778 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx))
2773 2779
2774 2780 def postcommitstatus(repo, pats, opts):
2775 2781 return repo.status(match=scmutil.match(repo[None], pats, opts))
2776 2782
2777 2783 def revert(ui, repo, ctx, parents, *pats, **opts):
2778 2784 opts = pycompat.byteskwargs(opts)
2779 2785 parent, p2 = parents
2780 2786 node = ctx.node()
2781 2787
2782 2788 mf = ctx.manifest()
2783 2789 if node == p2:
2784 2790 parent = p2
2785 2791
2786 2792 # need all matching names in dirstate and manifest of target rev,
2787 2793 # so have to walk both. do not print errors if files exist in one
2788 2794 # but not other. in both cases, filesets should be evaluated against
2789 2795 # workingctx to get consistent result (issue4497). this means 'set:**'
2790 2796 # cannot be used to select missing files from target rev.
2791 2797
2792 2798 # `names` is a mapping for all elements in working copy and target revision
2793 2799 # The mapping is in the form:
2794 2800 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2795 2801 names = {}
2796 2802 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2797 2803
2798 2804 with repo.wlock():
2799 2805 ## filling of the `names` mapping
2800 2806 # walk dirstate to fill `names`
2801 2807
2802 2808 interactive = opts.get('interactive', False)
2803 2809 wctx = repo[None]
2804 2810 m = scmutil.match(wctx, pats, opts)
2805 2811
2806 2812 # we'll need this later
2807 2813 targetsubs = sorted(s for s in wctx.substate if m(s))
2808 2814
2809 2815 if not m.always():
2810 2816 matcher = matchmod.badmatch(m, lambda x, y: False)
2811 2817 for abs in wctx.walk(matcher):
2812 2818 names[abs] = m.exact(abs)
2813 2819
2814 2820 # walk target manifest to fill `names`
2815 2821
2816 2822 def badfn(path, msg):
2817 2823 if path in names:
2818 2824 return
2819 2825 if path in ctx.substate:
2820 2826 return
2821 2827 path_ = path + '/'
2822 2828 for f in names:
2823 2829 if f.startswith(path_):
2824 2830 return
2825 2831 ui.warn("%s: %s\n" % (uipathfn(path), msg))
2826 2832
2827 2833 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2828 2834 if abs not in names:
2829 2835 names[abs] = m.exact(abs)
2830 2836
2831 2837 # Find status of all file in `names`.
2832 2838 m = scmutil.matchfiles(repo, names)
2833 2839
2834 2840 changes = repo.status(node1=node, match=m,
2835 2841 unknown=True, ignored=True, clean=True)
2836 2842 else:
2837 2843 changes = repo.status(node1=node, match=m)
2838 2844 for kind in changes:
2839 2845 for abs in kind:
2840 2846 names[abs] = m.exact(abs)
2841 2847
2842 2848 m = scmutil.matchfiles(repo, names)
2843 2849
2844 2850 modified = set(changes.modified)
2845 2851 added = set(changes.added)
2846 2852 removed = set(changes.removed)
2847 2853 _deleted = set(changes.deleted)
2848 2854 unknown = set(changes.unknown)
2849 2855 unknown.update(changes.ignored)
2850 2856 clean = set(changes.clean)
2851 2857 modadded = set()
2852 2858
2853 2859 # We need to account for the state of the file in the dirstate,
2854 2860 # even when we revert against something else than parent. This will
2855 2861 # slightly alter the behavior of revert (doing back up or not, delete
2856 2862 # or just forget etc).
2857 2863 if parent == node:
2858 2864 dsmodified = modified
2859 2865 dsadded = added
2860 2866 dsremoved = removed
2861 2867 # store all local modifications, useful later for rename detection
2862 2868 localchanges = dsmodified | dsadded
2863 2869 modified, added, removed = set(), set(), set()
2864 2870 else:
2865 2871 changes = repo.status(node1=parent, match=m)
2866 2872 dsmodified = set(changes.modified)
2867 2873 dsadded = set(changes.added)
2868 2874 dsremoved = set(changes.removed)
2869 2875 # store all local modifications, useful later for rename detection
2870 2876 localchanges = dsmodified | dsadded
2871 2877
2872 2878 # only take into account for removes between wc and target
2873 2879 clean |= dsremoved - removed
2874 2880 dsremoved &= removed
2875 2881 # distinct between dirstate remove and other
2876 2882 removed -= dsremoved
2877 2883
2878 2884 modadded = added & dsmodified
2879 2885 added -= modadded
2880 2886
2881 2887 # tell newly modified apart.
2882 2888 dsmodified &= modified
2883 2889 dsmodified |= modified & dsadded # dirstate added may need backup
2884 2890 modified -= dsmodified
2885 2891
2886 2892 # We need to wait for some post-processing to update this set
2887 2893 # before making the distinction. The dirstate will be used for
2888 2894 # that purpose.
2889 2895 dsadded = added
2890 2896
2891 2897 # in case of merge, files that are actually added can be reported as
2892 2898 # modified, we need to post process the result
2893 2899 if p2 != nullid:
2894 2900 mergeadd = set(dsmodified)
2895 2901 for path in dsmodified:
2896 2902 if path in mf:
2897 2903 mergeadd.remove(path)
2898 2904 dsadded |= mergeadd
2899 2905 dsmodified -= mergeadd
2900 2906
2901 2907 # if f is a rename, update `names` to also revert the source
2902 2908 for f in localchanges:
2903 2909 src = repo.dirstate.copied(f)
2904 2910 # XXX should we check for rename down to target node?
2905 2911 if src and src not in names and repo.dirstate[src] == 'r':
2906 2912 dsremoved.add(src)
2907 2913 names[src] = True
2908 2914
2909 2915 # determine the exact nature of the deleted changesets
2910 2916 deladded = set(_deleted)
2911 2917 for path in _deleted:
2912 2918 if path in mf:
2913 2919 deladded.remove(path)
2914 2920 deleted = _deleted - deladded
2915 2921
2916 2922 # distinguish between file to forget and the other
2917 2923 added = set()
2918 2924 for abs in dsadded:
2919 2925 if repo.dirstate[abs] != 'a':
2920 2926 added.add(abs)
2921 2927 dsadded -= added
2922 2928
2923 2929 for abs in deladded:
2924 2930 if repo.dirstate[abs] == 'a':
2925 2931 dsadded.add(abs)
2926 2932 deladded -= dsadded
2927 2933
2928 2934 # For files marked as removed, we check if an unknown file is present at
2929 2935 # the same path. If a such file exists it may need to be backed up.
2930 2936 # Making the distinction at this stage helps have simpler backup
2931 2937 # logic.
2932 2938 removunk = set()
2933 2939 for abs in removed:
2934 2940 target = repo.wjoin(abs)
2935 2941 if os.path.lexists(target):
2936 2942 removunk.add(abs)
2937 2943 removed -= removunk
2938 2944
2939 2945 dsremovunk = set()
2940 2946 for abs in dsremoved:
2941 2947 target = repo.wjoin(abs)
2942 2948 if os.path.lexists(target):
2943 2949 dsremovunk.add(abs)
2944 2950 dsremoved -= dsremovunk
2945 2951
2946 2952 # action to be actually performed by revert
2947 2953 # (<list of file>, message>) tuple
2948 2954 actions = {'revert': ([], _('reverting %s\n')),
2949 2955 'add': ([], _('adding %s\n')),
2950 2956 'remove': ([], _('removing %s\n')),
2951 2957 'drop': ([], _('removing %s\n')),
2952 2958 'forget': ([], _('forgetting %s\n')),
2953 2959 'undelete': ([], _('undeleting %s\n')),
2954 2960 'noop': (None, _('no changes needed to %s\n')),
2955 2961 'unknown': (None, _('file not managed: %s\n')),
2956 2962 }
2957 2963
2958 2964 # "constant" that convey the backup strategy.
2959 2965 # All set to `discard` if `no-backup` is set do avoid checking
2960 2966 # no_backup lower in the code.
2961 2967 # These values are ordered for comparison purposes
2962 2968 backupinteractive = 3 # do backup if interactively modified
2963 2969 backup = 2 # unconditionally do backup
2964 2970 check = 1 # check if the existing file differs from target
2965 2971 discard = 0 # never do backup
2966 2972 if opts.get('no_backup'):
2967 2973 backupinteractive = backup = check = discard
2968 2974 if interactive:
2969 2975 dsmodifiedbackup = backupinteractive
2970 2976 else:
2971 2977 dsmodifiedbackup = backup
2972 2978 tobackup = set()
2973 2979
2974 2980 backupanddel = actions['remove']
2975 2981 if not opts.get('no_backup'):
2976 2982 backupanddel = actions['drop']
2977 2983
2978 2984 disptable = (
2979 2985 # dispatch table:
2980 2986 # file state
2981 2987 # action
2982 2988 # make backup
2983 2989
2984 2990 ## Sets that results that will change file on disk
2985 2991 # Modified compared to target, no local change
2986 2992 (modified, actions['revert'], discard),
2987 2993 # Modified compared to target, but local file is deleted
2988 2994 (deleted, actions['revert'], discard),
2989 2995 # Modified compared to target, local change
2990 2996 (dsmodified, actions['revert'], dsmodifiedbackup),
2991 2997 # Added since target
2992 2998 (added, actions['remove'], discard),
2993 2999 # Added in working directory
2994 3000 (dsadded, actions['forget'], discard),
2995 3001 # Added since target, have local modification
2996 3002 (modadded, backupanddel, backup),
2997 3003 # Added since target but file is missing in working directory
2998 3004 (deladded, actions['drop'], discard),
2999 3005 # Removed since target, before working copy parent
3000 3006 (removed, actions['add'], discard),
3001 3007 # Same as `removed` but an unknown file exists at the same path
3002 3008 (removunk, actions['add'], check),
3003 3009 # Removed since targe, marked as such in working copy parent
3004 3010 (dsremoved, actions['undelete'], discard),
3005 3011 # Same as `dsremoved` but an unknown file exists at the same path
3006 3012 (dsremovunk, actions['undelete'], check),
3007 3013 ## the following sets does not result in any file changes
3008 3014 # File with no modification
3009 3015 (clean, actions['noop'], discard),
3010 3016 # Existing file, not tracked anywhere
3011 3017 (unknown, actions['unknown'], discard),
3012 3018 )
3013 3019
3014 3020 for abs, exact in sorted(names.items()):
3015 3021 # target file to be touch on disk (relative to cwd)
3016 3022 target = repo.wjoin(abs)
3017 3023 # search the entry in the dispatch table.
3018 3024 # if the file is in any of these sets, it was touched in the working
3019 3025 # directory parent and we are sure it needs to be reverted.
3020 3026 for table, (xlist, msg), dobackup in disptable:
3021 3027 if abs not in table:
3022 3028 continue
3023 3029 if xlist is not None:
3024 3030 xlist.append(abs)
3025 3031 if dobackup:
3026 3032 # If in interactive mode, don't automatically create
3027 3033 # .orig files (issue4793)
3028 3034 if dobackup == backupinteractive:
3029 3035 tobackup.add(abs)
3030 3036 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3031 3037 absbakname = scmutil.backuppath(ui, repo, abs)
3032 3038 bakname = os.path.relpath(absbakname,
3033 3039 start=repo.root)
3034 3040 ui.note(_('saving current version of %s as %s\n') %
3035 3041 (uipathfn(abs), uipathfn(bakname)))
3036 3042 if not opts.get('dry_run'):
3037 3043 if interactive:
3038 3044 util.copyfile(target, absbakname)
3039 3045 else:
3040 3046 util.rename(target, absbakname)
3041 3047 if opts.get('dry_run'):
3042 3048 if ui.verbose or not exact:
3043 3049 ui.status(msg % uipathfn(abs))
3044 3050 elif exact:
3045 3051 ui.warn(msg % uipathfn(abs))
3046 3052 break
3047 3053
3048 3054 if not opts.get('dry_run'):
3049 3055 needdata = ('revert', 'add', 'undelete')
3050 3056 oplist = [actions[name][0] for name in needdata]
3051 3057 prefetch = scmutil.prefetchfiles
3052 3058 matchfiles = scmutil.matchfiles
3053 3059 prefetch(repo, [ctx.rev()],
3054 3060 matchfiles(repo,
3055 3061 [f for sublist in oplist for f in sublist]))
3056 3062 match = scmutil.match(repo[None], pats)
3057 3063 _performrevert(repo, parents, ctx, names, uipathfn, actions,
3058 3064 match, interactive, tobackup)
3059 3065
3060 3066 if targetsubs:
3061 3067 # Revert the subrepos on the revert list
3062 3068 for sub in targetsubs:
3063 3069 try:
3064 3070 wctx.sub(sub).revert(ctx.substate[sub], *pats,
3065 3071 **pycompat.strkwargs(opts))
3066 3072 except KeyError:
3067 3073 raise error.Abort("subrepository '%s' does not exist in %s!"
3068 3074 % (sub, short(ctx.node())))
3069 3075
3070 3076 def _performrevert(repo, parents, ctx, names, uipathfn, actions,
3071 3077 match, interactive=False, tobackup=None):
3072 3078 """function that actually perform all the actions computed for revert
3073 3079
3074 3080 This is an independent function to let extension to plug in and react to
3075 3081 the imminent revert.
3076 3082
3077 3083 Make sure you have the working directory locked when calling this function.
3078 3084 """
3079 3085 parent, p2 = parents
3080 3086 node = ctx.node()
3081 3087 excluded_files = []
3082 3088
3083 3089 def checkout(f):
3084 3090 fc = ctx[f]
3085 3091 repo.wwrite(f, fc.data(), fc.flags())
3086 3092
3087 3093 def doremove(f):
3088 3094 try:
3089 3095 rmdir = repo.ui.configbool('experimental', 'removeemptydirs')
3090 3096 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3091 3097 except OSError:
3092 3098 pass
3093 3099 repo.dirstate.remove(f)
3094 3100
3095 3101 def prntstatusmsg(action, f):
3096 3102 exact = names[f]
3097 3103 if repo.ui.verbose or not exact:
3098 3104 repo.ui.status(actions[action][1] % uipathfn(f))
3099 3105
3100 3106 audit_path = pathutil.pathauditor(repo.root, cached=True)
3101 3107 for f in actions['forget'][0]:
3102 3108 if interactive:
3103 3109 choice = repo.ui.promptchoice(
3104 3110 _("forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f))
3105 3111 if choice == 0:
3106 3112 prntstatusmsg('forget', f)
3107 3113 repo.dirstate.drop(f)
3108 3114 else:
3109 3115 excluded_files.append(f)
3110 3116 else:
3111 3117 prntstatusmsg('forget', f)
3112 3118 repo.dirstate.drop(f)
3113 3119 for f in actions['remove'][0]:
3114 3120 audit_path(f)
3115 3121 if interactive:
3116 3122 choice = repo.ui.promptchoice(
3117 3123 _("remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f))
3118 3124 if choice == 0:
3119 3125 prntstatusmsg('remove', f)
3120 3126 doremove(f)
3121 3127 else:
3122 3128 excluded_files.append(f)
3123 3129 else:
3124 3130 prntstatusmsg('remove', f)
3125 3131 doremove(f)
3126 3132 for f in actions['drop'][0]:
3127 3133 audit_path(f)
3128 3134 prntstatusmsg('drop', f)
3129 3135 repo.dirstate.remove(f)
3130 3136
3131 3137 normal = None
3132 3138 if node == parent:
3133 3139 # We're reverting to our parent. If possible, we'd like status
3134 3140 # to report the file as clean. We have to use normallookup for
3135 3141 # merges to avoid losing information about merged/dirty files.
3136 3142 if p2 != nullid:
3137 3143 normal = repo.dirstate.normallookup
3138 3144 else:
3139 3145 normal = repo.dirstate.normal
3140 3146
3141 3147 newlyaddedandmodifiedfiles = set()
3142 3148 if interactive:
3143 3149 # Prompt the user for changes to revert
3144 3150 torevert = [f for f in actions['revert'][0] if f not in excluded_files]
3145 3151 m = scmutil.matchfiles(repo, torevert)
3146 3152 diffopts = patch.difffeatureopts(repo.ui, whitespace=True,
3147 3153 section='commands',
3148 3154 configprefix='revert.interactive.')
3149 3155 diffopts.nodates = True
3150 3156 diffopts.git = True
3151 3157 operation = 'apply'
3152 3158 if node == parent:
3153 3159 if repo.ui.configbool('experimental',
3154 3160 'revert.interactive.select-to-keep'):
3155 3161 operation = 'keep'
3156 3162 else:
3157 3163 operation = 'discard'
3158 3164
3159 3165 if operation == 'apply':
3160 3166 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3161 3167 else:
3162 3168 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3163 3169 originalchunks = patch.parsepatch(diff)
3164 3170
3165 3171 try:
3166 3172
3167 3173 chunks, opts = recordfilter(repo.ui, originalchunks, match,
3168 3174 operation=operation)
3169 3175 if operation == 'discard':
3170 3176 chunks = patch.reversehunks(chunks)
3171 3177
3172 3178 except error.PatchError as err:
3173 3179 raise error.Abort(_('error parsing patch: %s') % err)
3174 3180
3175 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3181 # FIXME: when doing an interactive revert of a copy, there's no way of
3182 # performing a partial revert of the added file, the only option is
3183 # "remove added file <name> (Yn)?", so we don't need to worry about the
3184 # alsorestore value. Ideally we'd be able to partially revert
3185 # copied/renamed files.
3186 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3187 chunks, originalchunks)
3176 3188 if tobackup is None:
3177 3189 tobackup = set()
3178 3190 # Apply changes
3179 3191 fp = stringio()
3180 3192 # chunks are serialized per file, but files aren't sorted
3181 3193 for f in sorted(set(c.header.filename() for c in chunks if ishunk(c))):
3182 3194 prntstatusmsg('revert', f)
3183 3195 files = set()
3184 3196 for c in chunks:
3185 3197 if ishunk(c):
3186 3198 abs = c.header.filename()
3187 3199 # Create a backup file only if this hunk should be backed up
3188 3200 if c.header.filename() in tobackup:
3189 3201 target = repo.wjoin(abs)
3190 3202 bakname = scmutil.backuppath(repo.ui, repo, abs)
3191 3203 util.copyfile(target, bakname)
3192 3204 tobackup.remove(abs)
3193 3205 if abs not in files:
3194 3206 files.add(abs)
3195 3207 if operation == 'keep':
3196 3208 checkout(abs)
3197 3209 c.write(fp)
3198 3210 dopatch = fp.tell()
3199 3211 fp.seek(0)
3200 3212 if dopatch:
3201 3213 try:
3202 3214 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3203 3215 except error.PatchError as err:
3204 3216 raise error.Abort(pycompat.bytestr(err))
3205 3217 del fp
3206 3218 else:
3207 3219 for f in actions['revert'][0]:
3208 3220 prntstatusmsg('revert', f)
3209 3221 checkout(f)
3210 3222 if normal:
3211 3223 normal(f)
3212 3224
3213 3225 for f in actions['add'][0]:
3214 3226 # Don't checkout modified files, they are already created by the diff
3215 3227 if f not in newlyaddedandmodifiedfiles:
3216 3228 prntstatusmsg('add', f)
3217 3229 checkout(f)
3218 3230 repo.dirstate.add(f)
3219 3231
3220 3232 normal = repo.dirstate.normallookup
3221 3233 if node == parent and p2 == nullid:
3222 3234 normal = repo.dirstate.normal
3223 3235 for f in actions['undelete'][0]:
3224 3236 if interactive:
3225 3237 choice = repo.ui.promptchoice(
3226 3238 _("add back removed file %s (Yn)?$$ &Yes $$ &No") % f)
3227 3239 if choice == 0:
3228 3240 prntstatusmsg('undelete', f)
3229 3241 checkout(f)
3230 3242 normal(f)
3231 3243 else:
3232 3244 excluded_files.append(f)
3233 3245 else:
3234 3246 prntstatusmsg('undelete', f)
3235 3247 checkout(f)
3236 3248 normal(f)
3237 3249
3238 3250 copied = copies.pathcopies(repo[parent], ctx)
3239 3251
3240 3252 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3241 3253 if f in copied:
3242 3254 repo.dirstate.copy(copied[f], f)
3243 3255
3244 3256 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3245 3257 # commands.outgoing. "missing" is "missing" of the result of
3246 3258 # "findcommonoutgoing()"
3247 3259 outgoinghooks = util.hooks()
3248 3260
3249 3261 # a list of (ui, repo) functions called by commands.summary
3250 3262 summaryhooks = util.hooks()
3251 3263
3252 3264 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3253 3265 #
3254 3266 # functions should return tuple of booleans below, if 'changes' is None:
3255 3267 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3256 3268 #
3257 3269 # otherwise, 'changes' is a tuple of tuples below:
3258 3270 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3259 3271 # - (desturl, destbranch, destpeer, outgoing)
3260 3272 summaryremotehooks = util.hooks()
3261 3273
3262 3274
3263 3275 def checkunfinished(repo, commit=False, skipmerge=False):
3264 3276 '''Look for an unfinished multistep operation, like graft, and abort
3265 3277 if found. It's probably good to check this right before
3266 3278 bailifchanged().
3267 3279 '''
3268 3280 # Check for non-clearable states first, so things like rebase will take
3269 3281 # precedence over update.
3270 3282 for state in statemod._unfinishedstates:
3271 3283 if (state._clearable or (commit and state._allowcommit) or
3272 3284 state._reportonly):
3273 3285 continue
3274 3286 if state.isunfinished(repo):
3275 3287 raise error.Abort(state.msg(), hint=state.hint())
3276 3288
3277 3289 for s in statemod._unfinishedstates:
3278 3290 if (not s._clearable or (commit and s._allowcommit) or
3279 3291 (s._opname == 'merge' and skipmerge) or s._reportonly):
3280 3292 continue
3281 3293 if s.isunfinished(repo):
3282 3294 raise error.Abort(s.msg(), hint=s.hint())
3283 3295
3284 3296 def clearunfinished(repo):
3285 3297 '''Check for unfinished operations (as above), and clear the ones
3286 3298 that are clearable.
3287 3299 '''
3288 3300 for state in statemod._unfinishedstates:
3289 3301 if state._reportonly:
3290 3302 continue
3291 3303 if not state._clearable and state.isunfinished(repo):
3292 3304 raise error.Abort(state.msg(), hint=state.hint())
3293 3305
3294 3306 for s in statemod._unfinishedstates:
3295 3307 if s._opname == 'merge' or state._reportonly:
3296 3308 continue
3297 3309 if s._clearable and s.isunfinished(repo):
3298 3310 util.unlink(repo.vfs.join(s._fname))
3299 3311
3300 3312 def getunfinishedstate(repo):
3301 3313 ''' Checks for unfinished operations and returns statecheck object
3302 3314 for it'''
3303 3315 for state in statemod._unfinishedstates:
3304 3316 if state.isunfinished(repo):
3305 3317 return state
3306 3318 return None
3307 3319
3308 3320 def howtocontinue(repo):
3309 3321 '''Check for an unfinished operation and return the command to finish
3310 3322 it.
3311 3323
3312 3324 statemod._unfinishedstates list is checked for an unfinished operation
3313 3325 and the corresponding message to finish it is generated if a method to
3314 3326 continue is supported by the operation.
3315 3327
3316 3328 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3317 3329 a boolean.
3318 3330 '''
3319 3331 contmsg = _("continue: %s")
3320 3332 for state in statemod._unfinishedstates:
3321 3333 if not state._continueflag:
3322 3334 continue
3323 3335 if state.isunfinished(repo):
3324 3336 return contmsg % state.continuemsg(), True
3325 3337 if repo[None].dirty(missing=True, merge=False, branch=False):
3326 3338 return contmsg % _("hg commit"), False
3327 3339 return None, None
3328 3340
3329 3341 def checkafterresolved(repo):
3330 3342 '''Inform the user about the next action after completing hg resolve
3331 3343
3332 3344 If there's a an unfinished operation that supports continue flag,
3333 3345 howtocontinue will yield repo.ui.warn as the reporter.
3334 3346
3335 3347 Otherwise, it will yield repo.ui.note.
3336 3348 '''
3337 3349 msg, warning = howtocontinue(repo)
3338 3350 if msg is not None:
3339 3351 if warning:
3340 3352 repo.ui.warn("%s\n" % msg)
3341 3353 else:
3342 3354 repo.ui.note("%s\n" % msg)
3343 3355
3344 3356 def wrongtooltocontinue(repo, task):
3345 3357 '''Raise an abort suggesting how to properly continue if there is an
3346 3358 active task.
3347 3359
3348 3360 Uses howtocontinue() to find the active task.
3349 3361
3350 3362 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3351 3363 a hint.
3352 3364 '''
3353 3365 after = howtocontinue(repo)
3354 3366 hint = None
3355 3367 if after[1]:
3356 3368 hint = after[0]
3357 3369 raise error.Abort(_('no %s in progress') % task, hint=hint)
3358 3370
3359 3371 def abortgraft(ui, repo, graftstate):
3360 3372 """abort the interrupted graft and rollbacks to the state before interrupted
3361 3373 graft"""
3362 3374 if not graftstate.exists():
3363 3375 raise error.Abort(_("no interrupted graft to abort"))
3364 3376 statedata = readgraftstate(repo, graftstate)
3365 3377 newnodes = statedata.get('newnodes')
3366 3378 if newnodes is None:
3367 3379 # and old graft state which does not have all the data required to abort
3368 3380 # the graft
3369 3381 raise error.Abort(_("cannot abort using an old graftstate"))
3370 3382
3371 3383 # changeset from which graft operation was started
3372 3384 if len(newnodes) > 0:
3373 3385 startctx = repo[newnodes[0]].p1()
3374 3386 else:
3375 3387 startctx = repo['.']
3376 3388 # whether to strip or not
3377 3389 cleanup = False
3378 3390 from . import hg
3379 3391 if newnodes:
3380 3392 newnodes = [repo[r].rev() for r in newnodes]
3381 3393 cleanup = True
3382 3394 # checking that none of the newnodes turned public or is public
3383 3395 immutable = [c for c in newnodes if not repo[c].mutable()]
3384 3396 if immutable:
3385 3397 repo.ui.warn(_("cannot clean up public changesets %s\n")
3386 3398 % ', '.join(bytes(repo[r]) for r in immutable),
3387 3399 hint=_("see 'hg help phases' for details"))
3388 3400 cleanup = False
3389 3401
3390 3402 # checking that no new nodes are created on top of grafted revs
3391 3403 desc = set(repo.changelog.descendants(newnodes))
3392 3404 if desc - set(newnodes):
3393 3405 repo.ui.warn(_("new changesets detected on destination "
3394 3406 "branch, can't strip\n"))
3395 3407 cleanup = False
3396 3408
3397 3409 if cleanup:
3398 3410 with repo.wlock(), repo.lock():
3399 3411 hg.updaterepo(repo, startctx.node(), overwrite=True)
3400 3412 # stripping the new nodes created
3401 3413 strippoints = [c.node() for c in repo.set("roots(%ld)",
3402 3414 newnodes)]
3403 3415 repair.strip(repo.ui, repo, strippoints, backup=False)
3404 3416
3405 3417 if not cleanup:
3406 3418 # we don't update to the startnode if we can't strip
3407 3419 startctx = repo['.']
3408 3420 hg.updaterepo(repo, startctx.node(), overwrite=True)
3409 3421
3410 3422 ui.status(_("graft aborted\n"))
3411 3423 ui.status(_("working directory is now at %s\n") % startctx.hex()[:12])
3412 3424 graftstate.delete()
3413 3425 return 0
3414 3426
3415 3427 def readgraftstate(repo, graftstate):
3416 3428 """read the graft state file and return a dict of the data stored in it"""
3417 3429 try:
3418 3430 return graftstate.read()
3419 3431 except error.CorruptedState:
3420 3432 nodes = repo.vfs.read('graftstate').splitlines()
3421 3433 return {'nodes': nodes}
3422 3434
3423 3435 def hgabortgraft(ui, repo):
3424 3436 """ abort logic for aborting graft using 'hg abort'"""
3425 3437 with repo.wlock():
3426 3438 graftstate = statemod.cmdstate(repo, 'graftstate')
3427 3439 return abortgraft(ui, repo, graftstate)
@@ -1,2869 +1,2869 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import, print_function
10 10
11 11 import collections
12 12 import contextlib
13 13 import copy
14 14 import email
15 15 import errno
16 16 import hashlib
17 17 import os
18 18 import re
19 19 import shutil
20 20 import zlib
21 21
22 22 from .i18n import _
23 23 from .node import (
24 24 hex,
25 25 short,
26 26 )
27 27 from . import (
28 28 copies,
29 29 diffhelper,
30 30 diffutil,
31 31 encoding,
32 32 error,
33 33 mail,
34 34 mdiff,
35 35 pathutil,
36 36 pycompat,
37 37 scmutil,
38 38 similar,
39 39 util,
40 40 vfs as vfsmod,
41 41 )
42 42 from .utils import (
43 43 dateutil,
44 44 procutil,
45 45 stringutil,
46 46 )
47 47
48 48 stringio = util.stringio
49 49
50 50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 52 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
53 53 b'[^ \ta-zA-Z0-9_\x80-\xff])')
54 54
55 55 PatchError = error.PatchError
56 56
57 57 # public functions
58 58
59 59 def split(stream):
60 60 '''return an iterator of individual patches from a stream'''
61 61 def isheader(line, inheader):
62 62 if inheader and line.startswith((' ', '\t')):
63 63 # continuation
64 64 return True
65 65 if line.startswith((' ', '-', '+')):
66 66 # diff line - don't check for header pattern in there
67 67 return False
68 68 l = line.split(': ', 1)
69 69 return len(l) == 2 and ' ' not in l[0]
70 70
71 71 def chunk(lines):
72 72 return stringio(''.join(lines))
73 73
74 74 def hgsplit(stream, cur):
75 75 inheader = True
76 76
77 77 for line in stream:
78 78 if not line.strip():
79 79 inheader = False
80 80 if not inheader and line.startswith('# HG changeset patch'):
81 81 yield chunk(cur)
82 82 cur = []
83 83 inheader = True
84 84
85 85 cur.append(line)
86 86
87 87 if cur:
88 88 yield chunk(cur)
89 89
90 90 def mboxsplit(stream, cur):
91 91 for line in stream:
92 92 if line.startswith('From '):
93 93 for c in split(chunk(cur[1:])):
94 94 yield c
95 95 cur = []
96 96
97 97 cur.append(line)
98 98
99 99 if cur:
100 100 for c in split(chunk(cur[1:])):
101 101 yield c
102 102
103 103 def mimesplit(stream, cur):
104 104 def msgfp(m):
105 105 fp = stringio()
106 106 g = email.Generator.Generator(fp, mangle_from_=False)
107 107 g.flatten(m)
108 108 fp.seek(0)
109 109 return fp
110 110
111 111 for line in stream:
112 112 cur.append(line)
113 113 c = chunk(cur)
114 114
115 115 m = mail.parse(c)
116 116 if not m.is_multipart():
117 117 yield msgfp(m)
118 118 else:
119 119 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
120 120 for part in m.walk():
121 121 ct = part.get_content_type()
122 122 if ct not in ok_types:
123 123 continue
124 124 yield msgfp(part)
125 125
126 126 def headersplit(stream, cur):
127 127 inheader = False
128 128
129 129 for line in stream:
130 130 if not inheader and isheader(line, inheader):
131 131 yield chunk(cur)
132 132 cur = []
133 133 inheader = True
134 134 if inheader and not isheader(line, inheader):
135 135 inheader = False
136 136
137 137 cur.append(line)
138 138
139 139 if cur:
140 140 yield chunk(cur)
141 141
142 142 def remainder(cur):
143 143 yield chunk(cur)
144 144
145 145 class fiter(object):
146 146 def __init__(self, fp):
147 147 self.fp = fp
148 148
149 149 def __iter__(self):
150 150 return self
151 151
152 152 def next(self):
153 153 l = self.fp.readline()
154 154 if not l:
155 155 raise StopIteration
156 156 return l
157 157
158 158 __next__ = next
159 159
160 160 inheader = False
161 161 cur = []
162 162
163 163 mimeheaders = ['content-type']
164 164
165 165 if not util.safehasattr(stream, 'next'):
166 166 # http responses, for example, have readline but not next
167 167 stream = fiter(stream)
168 168
169 169 for line in stream:
170 170 cur.append(line)
171 171 if line.startswith('# HG changeset patch'):
172 172 return hgsplit(stream, cur)
173 173 elif line.startswith('From '):
174 174 return mboxsplit(stream, cur)
175 175 elif isheader(line, inheader):
176 176 inheader = True
177 177 if line.split(':', 1)[0].lower() in mimeheaders:
178 178 # let email parser handle this
179 179 return mimesplit(stream, cur)
180 180 elif line.startswith('--- ') and inheader:
181 181 # No evil headers seen by diff start, split by hand
182 182 return headersplit(stream, cur)
183 183 # Not enough info, keep reading
184 184
185 185 # if we are here, we have a very plain patch
186 186 return remainder(cur)
187 187
188 188 ## Some facility for extensible patch parsing:
189 189 # list of pairs ("header to match", "data key")
190 190 patchheadermap = [('Date', 'date'),
191 191 ('Branch', 'branch'),
192 192 ('Node ID', 'nodeid'),
193 193 ]
194 194
195 195 @contextlib.contextmanager
196 196 def extract(ui, fileobj):
197 197 '''extract patch from data read from fileobj.
198 198
199 199 patch can be a normal patch or contained in an email message.
200 200
201 201 return a dictionary. Standard keys are:
202 202 - filename,
203 203 - message,
204 204 - user,
205 205 - date,
206 206 - branch,
207 207 - node,
208 208 - p1,
209 209 - p2.
210 210 Any item can be missing from the dictionary. If filename is missing,
211 211 fileobj did not contain a patch. Caller must unlink filename when done.'''
212 212
213 213 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
214 214 tmpfp = os.fdopen(fd, r'wb')
215 215 try:
216 216 yield _extract(ui, fileobj, tmpname, tmpfp)
217 217 finally:
218 218 tmpfp.close()
219 219 os.unlink(tmpname)
220 220
221 221 def _extract(ui, fileobj, tmpname, tmpfp):
222 222
223 223 # attempt to detect the start of a patch
224 224 # (this heuristic is borrowed from quilt)
225 225 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
226 226 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
227 227 br'---[ \t].*?^\+\+\+[ \t]|'
228 228 br'\*\*\*[ \t].*?^---[ \t])',
229 229 re.MULTILINE | re.DOTALL)
230 230
231 231 data = {}
232 232
233 233 msg = mail.parse(fileobj)
234 234
235 235 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
236 236 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
237 237 if not subject and not data['user']:
238 238 # Not an email, restore parsed headers if any
239 239 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
240 240 for h in msg.items()) + '\n'
241 241
242 242 # should try to parse msg['Date']
243 243 parents = []
244 244
245 245 if subject:
246 246 if subject.startswith('[PATCH'):
247 247 pend = subject.find(']')
248 248 if pend >= 0:
249 249 subject = subject[pend + 1:].lstrip()
250 250 subject = re.sub(br'\n[ \t]+', ' ', subject)
251 251 ui.debug('Subject: %s\n' % subject)
252 252 if data['user']:
253 253 ui.debug('From: %s\n' % data['user'])
254 254 diffs_seen = 0
255 255 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
256 256 message = ''
257 257 for part in msg.walk():
258 258 content_type = pycompat.bytestr(part.get_content_type())
259 259 ui.debug('Content-Type: %s\n' % content_type)
260 260 if content_type not in ok_types:
261 261 continue
262 262 payload = part.get_payload(decode=True)
263 263 m = diffre.search(payload)
264 264 if m:
265 265 hgpatch = False
266 266 hgpatchheader = False
267 267 ignoretext = False
268 268
269 269 ui.debug('found patch at byte %d\n' % m.start(0))
270 270 diffs_seen += 1
271 271 cfp = stringio()
272 272 for line in payload[:m.start(0)].splitlines():
273 273 if line.startswith('# HG changeset patch') and not hgpatch:
274 274 ui.debug('patch generated by hg export\n')
275 275 hgpatch = True
276 276 hgpatchheader = True
277 277 # drop earlier commit message content
278 278 cfp.seek(0)
279 279 cfp.truncate()
280 280 subject = None
281 281 elif hgpatchheader:
282 282 if line.startswith('# User '):
283 283 data['user'] = line[7:]
284 284 ui.debug('From: %s\n' % data['user'])
285 285 elif line.startswith("# Parent "):
286 286 parents.append(line[9:].lstrip())
287 287 elif line.startswith("# "):
288 288 for header, key in patchheadermap:
289 289 prefix = '# %s ' % header
290 290 if line.startswith(prefix):
291 291 data[key] = line[len(prefix):]
292 292 else:
293 293 hgpatchheader = False
294 294 elif line == '---':
295 295 ignoretext = True
296 296 if not hgpatchheader and not ignoretext:
297 297 cfp.write(line)
298 298 cfp.write('\n')
299 299 message = cfp.getvalue()
300 300 if tmpfp:
301 301 tmpfp.write(payload)
302 302 if not payload.endswith('\n'):
303 303 tmpfp.write('\n')
304 304 elif not diffs_seen and message and content_type == 'text/plain':
305 305 message += '\n' + payload
306 306
307 307 if subject and not message.startswith(subject):
308 308 message = '%s\n%s' % (subject, message)
309 309 data['message'] = message
310 310 tmpfp.close()
311 311 if parents:
312 312 data['p1'] = parents.pop(0)
313 313 if parents:
314 314 data['p2'] = parents.pop(0)
315 315
316 316 if diffs_seen:
317 317 data['filename'] = tmpname
318 318
319 319 return data
320 320
321 321 class patchmeta(object):
322 322 """Patched file metadata
323 323
324 324 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
325 325 or COPY. 'path' is patched file path. 'oldpath' is set to the
326 326 origin file when 'op' is either COPY or RENAME, None otherwise. If
327 327 file mode is changed, 'mode' is a tuple (islink, isexec) where
328 328 'islink' is True if the file is a symlink and 'isexec' is True if
329 329 the file is executable. Otherwise, 'mode' is None.
330 330 """
331 331 def __init__(self, path):
332 332 self.path = path
333 333 self.oldpath = None
334 334 self.mode = None
335 335 self.op = 'MODIFY'
336 336 self.binary = False
337 337
338 338 def setmode(self, mode):
339 339 islink = mode & 0o20000
340 340 isexec = mode & 0o100
341 341 self.mode = (islink, isexec)
342 342
343 343 def copy(self):
344 344 other = patchmeta(self.path)
345 345 other.oldpath = self.oldpath
346 346 other.mode = self.mode
347 347 other.op = self.op
348 348 other.binary = self.binary
349 349 return other
350 350
351 351 def _ispatchinga(self, afile):
352 352 if afile == '/dev/null':
353 353 return self.op == 'ADD'
354 354 return afile == 'a/' + (self.oldpath or self.path)
355 355
356 356 def _ispatchingb(self, bfile):
357 357 if bfile == '/dev/null':
358 358 return self.op == 'DELETE'
359 359 return bfile == 'b/' + self.path
360 360
361 361 def ispatching(self, afile, bfile):
362 362 return self._ispatchinga(afile) and self._ispatchingb(bfile)
363 363
364 364 def __repr__(self):
365 365 return r"<patchmeta %s %r>" % (self.op, self.path)
366 366
367 367 def readgitpatch(lr):
368 368 """extract git-style metadata about patches from <patchname>"""
369 369
370 370 # Filter patch for git information
371 371 gp = None
372 372 gitpatches = []
373 373 for line in lr:
374 374 line = line.rstrip(' \r\n')
375 375 if line.startswith('diff --git a/'):
376 376 m = gitre.match(line)
377 377 if m:
378 378 if gp:
379 379 gitpatches.append(gp)
380 380 dst = m.group(2)
381 381 gp = patchmeta(dst)
382 382 elif gp:
383 383 if line.startswith('--- '):
384 384 gitpatches.append(gp)
385 385 gp = None
386 386 continue
387 387 if line.startswith('rename from '):
388 388 gp.op = 'RENAME'
389 389 gp.oldpath = line[12:]
390 390 elif line.startswith('rename to '):
391 391 gp.path = line[10:]
392 392 elif line.startswith('copy from '):
393 393 gp.op = 'COPY'
394 394 gp.oldpath = line[10:]
395 395 elif line.startswith('copy to '):
396 396 gp.path = line[8:]
397 397 elif line.startswith('deleted file'):
398 398 gp.op = 'DELETE'
399 399 elif line.startswith('new file mode '):
400 400 gp.op = 'ADD'
401 401 gp.setmode(int(line[-6:], 8))
402 402 elif line.startswith('new mode '):
403 403 gp.setmode(int(line[-6:], 8))
404 404 elif line.startswith('GIT binary patch'):
405 405 gp.binary = True
406 406 if gp:
407 407 gitpatches.append(gp)
408 408
409 409 return gitpatches
410 410
411 411 class linereader(object):
412 412 # simple class to allow pushing lines back into the input stream
413 413 def __init__(self, fp):
414 414 self.fp = fp
415 415 self.buf = []
416 416
417 417 def push(self, line):
418 418 if line is not None:
419 419 self.buf.append(line)
420 420
421 421 def readline(self):
422 422 if self.buf:
423 423 l = self.buf[0]
424 424 del self.buf[0]
425 425 return l
426 426 return self.fp.readline()
427 427
428 428 def __iter__(self):
429 429 return iter(self.readline, '')
430 430
431 431 class abstractbackend(object):
432 432 def __init__(self, ui):
433 433 self.ui = ui
434 434
435 435 def getfile(self, fname):
436 436 """Return target file data and flags as a (data, (islink,
437 437 isexec)) tuple. Data is None if file is missing/deleted.
438 438 """
439 439 raise NotImplementedError
440 440
441 441 def setfile(self, fname, data, mode, copysource):
442 442 """Write data to target file fname and set its mode. mode is a
443 443 (islink, isexec) tuple. If data is None, the file content should
444 444 be left unchanged. If the file is modified after being copied,
445 445 copysource is set to the original file name.
446 446 """
447 447 raise NotImplementedError
448 448
449 449 def unlink(self, fname):
450 450 """Unlink target file."""
451 451 raise NotImplementedError
452 452
453 453 def writerej(self, fname, failed, total, lines):
454 454 """Write rejected lines for fname. total is the number of hunks
455 455 which failed to apply and total the total number of hunks for this
456 456 files.
457 457 """
458 458
459 459 def exists(self, fname):
460 460 raise NotImplementedError
461 461
462 462 def close(self):
463 463 raise NotImplementedError
464 464
465 465 class fsbackend(abstractbackend):
466 466 def __init__(self, ui, basedir):
467 467 super(fsbackend, self).__init__(ui)
468 468 self.opener = vfsmod.vfs(basedir)
469 469
470 470 def getfile(self, fname):
471 471 if self.opener.islink(fname):
472 472 return (self.opener.readlink(fname), (True, False))
473 473
474 474 isexec = False
475 475 try:
476 476 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
477 477 except OSError as e:
478 478 if e.errno != errno.ENOENT:
479 479 raise
480 480 try:
481 481 return (self.opener.read(fname), (False, isexec))
482 482 except IOError as e:
483 483 if e.errno != errno.ENOENT:
484 484 raise
485 485 return None, None
486 486
487 487 def setfile(self, fname, data, mode, copysource):
488 488 islink, isexec = mode
489 489 if data is None:
490 490 self.opener.setflags(fname, islink, isexec)
491 491 return
492 492 if islink:
493 493 self.opener.symlink(data, fname)
494 494 else:
495 495 self.opener.write(fname, data)
496 496 if isexec:
497 497 self.opener.setflags(fname, False, True)
498 498
499 499 def unlink(self, fname):
500 500 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
501 501 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
502 502
503 503 def writerej(self, fname, failed, total, lines):
504 504 fname = fname + ".rej"
505 505 self.ui.warn(
506 506 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
507 507 (failed, total, fname))
508 508 fp = self.opener(fname, 'w')
509 509 fp.writelines(lines)
510 510 fp.close()
511 511
512 512 def exists(self, fname):
513 513 return self.opener.lexists(fname)
514 514
515 515 class workingbackend(fsbackend):
516 516 def __init__(self, ui, repo, similarity):
517 517 super(workingbackend, self).__init__(ui, repo.root)
518 518 self.repo = repo
519 519 self.similarity = similarity
520 520 self.removed = set()
521 521 self.changed = set()
522 522 self.copied = []
523 523
524 524 def _checkknown(self, fname):
525 525 if self.repo.dirstate[fname] == '?' and self.exists(fname):
526 526 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
527 527
528 528 def setfile(self, fname, data, mode, copysource):
529 529 self._checkknown(fname)
530 530 super(workingbackend, self).setfile(fname, data, mode, copysource)
531 531 if copysource is not None:
532 532 self.copied.append((copysource, fname))
533 533 self.changed.add(fname)
534 534
535 535 def unlink(self, fname):
536 536 self._checkknown(fname)
537 537 super(workingbackend, self).unlink(fname)
538 538 self.removed.add(fname)
539 539 self.changed.add(fname)
540 540
541 541 def close(self):
542 542 wctx = self.repo[None]
543 543 changed = set(self.changed)
544 544 for src, dst in self.copied:
545 545 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
546 546 if self.removed:
547 547 wctx.forget(sorted(self.removed))
548 548 for f in self.removed:
549 549 if f not in self.repo.dirstate:
550 550 # File was deleted and no longer belongs to the
551 551 # dirstate, it was probably marked added then
552 552 # deleted, and should not be considered by
553 553 # marktouched().
554 554 changed.discard(f)
555 555 if changed:
556 556 scmutil.marktouched(self.repo, changed, self.similarity)
557 557 return sorted(self.changed)
558 558
559 559 class filestore(object):
560 560 def __init__(self, maxsize=None):
561 561 self.opener = None
562 562 self.files = {}
563 563 self.created = 0
564 564 self.maxsize = maxsize
565 565 if self.maxsize is None:
566 566 self.maxsize = 4*(2**20)
567 567 self.size = 0
568 568 self.data = {}
569 569
570 570 def setfile(self, fname, data, mode, copied=None):
571 571 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
572 572 self.data[fname] = (data, mode, copied)
573 573 self.size += len(data)
574 574 else:
575 575 if self.opener is None:
576 576 root = pycompat.mkdtemp(prefix='hg-patch-')
577 577 self.opener = vfsmod.vfs(root)
578 578 # Avoid filename issues with these simple names
579 579 fn = '%d' % self.created
580 580 self.opener.write(fn, data)
581 581 self.created += 1
582 582 self.files[fname] = (fn, mode, copied)
583 583
584 584 def getfile(self, fname):
585 585 if fname in self.data:
586 586 return self.data[fname]
587 587 if not self.opener or fname not in self.files:
588 588 return None, None, None
589 589 fn, mode, copied = self.files[fname]
590 590 return self.opener.read(fn), mode, copied
591 591
592 592 def close(self):
593 593 if self.opener:
594 594 shutil.rmtree(self.opener.base)
595 595
596 596 class repobackend(abstractbackend):
597 597 def __init__(self, ui, repo, ctx, store):
598 598 super(repobackend, self).__init__(ui)
599 599 self.repo = repo
600 600 self.ctx = ctx
601 601 self.store = store
602 602 self.changed = set()
603 603 self.removed = set()
604 604 self.copied = {}
605 605
606 606 def _checkknown(self, fname):
607 607 if fname not in self.ctx:
608 608 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
609 609
610 610 def getfile(self, fname):
611 611 try:
612 612 fctx = self.ctx[fname]
613 613 except error.LookupError:
614 614 return None, None
615 615 flags = fctx.flags()
616 616 return fctx.data(), ('l' in flags, 'x' in flags)
617 617
618 618 def setfile(self, fname, data, mode, copysource):
619 619 if copysource:
620 620 self._checkknown(copysource)
621 621 if data is None:
622 622 data = self.ctx[fname].data()
623 623 self.store.setfile(fname, data, mode, copysource)
624 624 self.changed.add(fname)
625 625 if copysource:
626 626 self.copied[fname] = copysource
627 627
628 628 def unlink(self, fname):
629 629 self._checkknown(fname)
630 630 self.removed.add(fname)
631 631
632 632 def exists(self, fname):
633 633 return fname in self.ctx
634 634
635 635 def close(self):
636 636 return self.changed | self.removed
637 637
638 638 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
639 639 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
640 640 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
641 641 eolmodes = ['strict', 'crlf', 'lf', 'auto']
642 642
643 643 class patchfile(object):
644 644 def __init__(self, ui, gp, backend, store, eolmode='strict'):
645 645 self.fname = gp.path
646 646 self.eolmode = eolmode
647 647 self.eol = None
648 648 self.backend = backend
649 649 self.ui = ui
650 650 self.lines = []
651 651 self.exists = False
652 652 self.missing = True
653 653 self.mode = gp.mode
654 654 self.copysource = gp.oldpath
655 655 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
656 656 self.remove = gp.op == 'DELETE'
657 657 if self.copysource is None:
658 658 data, mode = backend.getfile(self.fname)
659 659 else:
660 660 data, mode = store.getfile(self.copysource)[:2]
661 661 if data is not None:
662 662 self.exists = self.copysource is None or backend.exists(self.fname)
663 663 self.missing = False
664 664 if data:
665 665 self.lines = mdiff.splitnewlines(data)
666 666 if self.mode is None:
667 667 self.mode = mode
668 668 if self.lines:
669 669 # Normalize line endings
670 670 if self.lines[0].endswith('\r\n'):
671 671 self.eol = '\r\n'
672 672 elif self.lines[0].endswith('\n'):
673 673 self.eol = '\n'
674 674 if eolmode != 'strict':
675 675 nlines = []
676 676 for l in self.lines:
677 677 if l.endswith('\r\n'):
678 678 l = l[:-2] + '\n'
679 679 nlines.append(l)
680 680 self.lines = nlines
681 681 else:
682 682 if self.create:
683 683 self.missing = False
684 684 if self.mode is None:
685 685 self.mode = (False, False)
686 686 if self.missing:
687 687 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
688 688 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
689 689 "current directory)\n"))
690 690
691 691 self.hash = {}
692 692 self.dirty = 0
693 693 self.offset = 0
694 694 self.skew = 0
695 695 self.rej = []
696 696 self.fileprinted = False
697 697 self.printfile(False)
698 698 self.hunks = 0
699 699
700 700 def writelines(self, fname, lines, mode):
701 701 if self.eolmode == 'auto':
702 702 eol = self.eol
703 703 elif self.eolmode == 'crlf':
704 704 eol = '\r\n'
705 705 else:
706 706 eol = '\n'
707 707
708 708 if self.eolmode != 'strict' and eol and eol != '\n':
709 709 rawlines = []
710 710 for l in lines:
711 711 if l and l.endswith('\n'):
712 712 l = l[:-1] + eol
713 713 rawlines.append(l)
714 714 lines = rawlines
715 715
716 716 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
717 717
718 718 def printfile(self, warn):
719 719 if self.fileprinted:
720 720 return
721 721 if warn or self.ui.verbose:
722 722 self.fileprinted = True
723 723 s = _("patching file %s\n") % self.fname
724 724 if warn:
725 725 self.ui.warn(s)
726 726 else:
727 727 self.ui.note(s)
728 728
729 729
730 730 def findlines(self, l, linenum):
731 731 # looks through the hash and finds candidate lines. The
732 732 # result is a list of line numbers sorted based on distance
733 733 # from linenum
734 734
735 735 cand = self.hash.get(l, [])
736 736 if len(cand) > 1:
737 737 # resort our list of potentials forward then back.
738 738 cand.sort(key=lambda x: abs(x - linenum))
739 739 return cand
740 740
741 741 def write_rej(self):
742 742 # our rejects are a little different from patch(1). This always
743 743 # creates rejects in the same form as the original patch. A file
744 744 # header is inserted so that you can run the reject through patch again
745 745 # without having to type the filename.
746 746 if not self.rej:
747 747 return
748 748 base = os.path.basename(self.fname)
749 749 lines = ["--- %s\n+++ %s\n" % (base, base)]
750 750 for x in self.rej:
751 751 for l in x.hunk:
752 752 lines.append(l)
753 753 if l[-1:] != '\n':
754 754 lines.append("\n\\ No newline at end of file\n")
755 755 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
756 756
757 757 def apply(self, h):
758 758 if not h.complete():
759 759 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
760 760 (h.number, h.desc, len(h.a), h.lena, len(h.b),
761 761 h.lenb))
762 762
763 763 self.hunks += 1
764 764
765 765 if self.missing:
766 766 self.rej.append(h)
767 767 return -1
768 768
769 769 if self.exists and self.create:
770 770 if self.copysource:
771 771 self.ui.warn(_("cannot create %s: destination already "
772 772 "exists\n") % self.fname)
773 773 else:
774 774 self.ui.warn(_("file %s already exists\n") % self.fname)
775 775 self.rej.append(h)
776 776 return -1
777 777
778 778 if isinstance(h, binhunk):
779 779 if self.remove:
780 780 self.backend.unlink(self.fname)
781 781 else:
782 782 l = h.new(self.lines)
783 783 self.lines[:] = l
784 784 self.offset += len(l)
785 785 self.dirty = True
786 786 return 0
787 787
788 788 horig = h
789 789 if (self.eolmode in ('crlf', 'lf')
790 790 or self.eolmode == 'auto' and self.eol):
791 791 # If new eols are going to be normalized, then normalize
792 792 # hunk data before patching. Otherwise, preserve input
793 793 # line-endings.
794 794 h = h.getnormalized()
795 795
796 796 # fast case first, no offsets, no fuzz
797 797 old, oldstart, new, newstart = h.fuzzit(0, False)
798 798 oldstart += self.offset
799 799 orig_start = oldstart
800 800 # if there's skew we want to emit the "(offset %d lines)" even
801 801 # when the hunk cleanly applies at start + skew, so skip the
802 802 # fast case code
803 803 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
804 804 if self.remove:
805 805 self.backend.unlink(self.fname)
806 806 else:
807 807 self.lines[oldstart:oldstart + len(old)] = new
808 808 self.offset += len(new) - len(old)
809 809 self.dirty = True
810 810 return 0
811 811
812 812 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
813 813 self.hash = {}
814 814 for x, s in enumerate(self.lines):
815 815 self.hash.setdefault(s, []).append(x)
816 816
817 817 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
818 818 for toponly in [True, False]:
819 819 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
820 820 oldstart = oldstart + self.offset + self.skew
821 821 oldstart = min(oldstart, len(self.lines))
822 822 if old:
823 823 cand = self.findlines(old[0][1:], oldstart)
824 824 else:
825 825 # Only adding lines with no or fuzzed context, just
826 826 # take the skew in account
827 827 cand = [oldstart]
828 828
829 829 for l in cand:
830 830 if not old or diffhelper.testhunk(old, self.lines, l):
831 831 self.lines[l : l + len(old)] = new
832 832 self.offset += len(new) - len(old)
833 833 self.skew = l - orig_start
834 834 self.dirty = True
835 835 offset = l - orig_start - fuzzlen
836 836 if fuzzlen:
837 837 msg = _("Hunk #%d succeeded at %d "
838 838 "with fuzz %d "
839 839 "(offset %d lines).\n")
840 840 self.printfile(True)
841 841 self.ui.warn(msg %
842 842 (h.number, l + 1, fuzzlen, offset))
843 843 else:
844 844 msg = _("Hunk #%d succeeded at %d "
845 845 "(offset %d lines).\n")
846 846 self.ui.note(msg % (h.number, l + 1, offset))
847 847 return fuzzlen
848 848 self.printfile(True)
849 849 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
850 850 self.rej.append(horig)
851 851 return -1
852 852
853 853 def close(self):
854 854 if self.dirty:
855 855 self.writelines(self.fname, self.lines, self.mode)
856 856 self.write_rej()
857 857 return len(self.rej)
858 858
859 859 class header(object):
860 860 """patch header
861 861 """
862 862 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
863 863 diff_re = re.compile('diff -r .* (.*)$')
864 864 allhunks_re = re.compile('(?:index|deleted file) ')
865 865 pretty_re = re.compile('(?:new file|deleted file) ')
866 866 special_re = re.compile('(?:index|deleted|copy|rename|new mode) ')
867 newfile_re = re.compile('(?:new file|copy to)')
867 newfile_re = re.compile('(?:new file|copy to|rename to)')
868 868
869 869 def __init__(self, header):
870 870 self.header = header
871 871 self.hunks = []
872 872
873 873 def binary(self):
874 874 return any(h.startswith('index ') for h in self.header)
875 875
876 876 def pretty(self, fp):
877 877 for h in self.header:
878 878 if h.startswith('index '):
879 879 fp.write(_('this modifies a binary file (all or nothing)\n'))
880 880 break
881 881 if self.pretty_re.match(h):
882 882 fp.write(h)
883 883 if self.binary():
884 884 fp.write(_('this is a binary file\n'))
885 885 break
886 886 if h.startswith('---'):
887 887 fp.write(_('%d hunks, %d lines changed\n') %
888 888 (len(self.hunks),
889 889 sum([max(h.added, h.removed) for h in self.hunks])))
890 890 break
891 891 fp.write(h)
892 892
893 893 def write(self, fp):
894 894 fp.write(''.join(self.header))
895 895
896 896 def allhunks(self):
897 897 return any(self.allhunks_re.match(h) for h in self.header)
898 898
899 899 def files(self):
900 900 match = self.diffgit_re.match(self.header[0])
901 901 if match:
902 902 fromfile, tofile = match.groups()
903 903 if fromfile == tofile:
904 904 return [fromfile]
905 905 return [fromfile, tofile]
906 906 else:
907 907 return self.diff_re.match(self.header[0]).groups()
908 908
909 909 def filename(self):
910 910 return self.files()[-1]
911 911
912 912 def __repr__(self):
913 913 return '<header %s>' % (' '.join(map(repr, self.files())))
914 914
915 915 def isnewfile(self):
916 916 return any(self.newfile_re.match(h) for h in self.header)
917 917
918 918 def special(self):
919 919 # Special files are shown only at the header level and not at the hunk
920 920 # level for example a file that has been deleted is a special file.
921 921 # The user cannot change the content of the operation, in the case of
922 922 # the deleted file he has to take the deletion or not take it, he
923 923 # cannot take some of it.
924 924 # Newly added files are special if they are empty, they are not special
925 925 # if they have some content as we want to be able to change it
926 926 nocontent = len(self.header) == 2
927 927 emptynewfile = self.isnewfile() and nocontent
928 928 return (emptynewfile
929 929 or any(self.special_re.match(h) for h in self.header))
930 930
931 931 class recordhunk(object):
932 932 """patch hunk
933 933
934 934 XXX shouldn't we merge this with the other hunk class?
935 935 """
936 936
937 937 def __init__(self, header, fromline, toline, proc, before, hunk, after,
938 938 maxcontext=None):
939 939 def trimcontext(lines, reverse=False):
940 940 if maxcontext is not None:
941 941 delta = len(lines) - maxcontext
942 942 if delta > 0:
943 943 if reverse:
944 944 return delta, lines[delta:]
945 945 else:
946 946 return delta, lines[:maxcontext]
947 947 return 0, lines
948 948
949 949 self.header = header
950 950 trimedbefore, self.before = trimcontext(before, True)
951 951 self.fromline = fromline + trimedbefore
952 952 self.toline = toline + trimedbefore
953 953 _trimedafter, self.after = trimcontext(after, False)
954 954 self.proc = proc
955 955 self.hunk = hunk
956 956 self.added, self.removed = self.countchanges(self.hunk)
957 957
958 958 def __eq__(self, v):
959 959 if not isinstance(v, recordhunk):
960 960 return False
961 961
962 962 return ((v.hunk == self.hunk) and
963 963 (v.proc == self.proc) and
964 964 (self.fromline == v.fromline) and
965 965 (self.header.files() == v.header.files()))
966 966
967 967 def __hash__(self):
968 968 return hash((tuple(self.hunk),
969 969 tuple(self.header.files()),
970 970 self.fromline,
971 971 self.proc))
972 972
973 973 def countchanges(self, hunk):
974 974 """hunk -> (n+,n-)"""
975 975 add = len([h for h in hunk if h.startswith('+')])
976 976 rem = len([h for h in hunk if h.startswith('-')])
977 977 return add, rem
978 978
979 979 def reversehunk(self):
980 980 """return another recordhunk which is the reverse of the hunk
981 981
982 982 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
983 983 that, swap fromline/toline and +/- signs while keep other things
984 984 unchanged.
985 985 """
986 986 m = {'+': '-', '-': '+', '\\': '\\'}
987 987 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
988 988 return recordhunk(self.header, self.toline, self.fromline, self.proc,
989 989 self.before, hunk, self.after)
990 990
991 991 def write(self, fp):
992 992 delta = len(self.before) + len(self.after)
993 993 if self.after and self.after[-1] == '\\ No newline at end of file\n':
994 994 delta -= 1
995 995 fromlen = delta + self.removed
996 996 tolen = delta + self.added
997 997 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
998 998 (self.fromline, fromlen, self.toline, tolen,
999 999 self.proc and (' ' + self.proc)))
1000 1000 fp.write(''.join(self.before + self.hunk + self.after))
1001 1001
1002 1002 pretty = write
1003 1003
1004 1004 def filename(self):
1005 1005 return self.header.filename()
1006 1006
1007 1007 def __repr__(self):
1008 1008 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1009 1009
1010 1010 def getmessages():
1011 1011 return {
1012 1012 'multiple': {
1013 1013 'apply': _("apply change %d/%d to '%s'?"),
1014 1014 'discard': _("discard change %d/%d to '%s'?"),
1015 1015 'keep': _("keep change %d/%d to '%s'?"),
1016 1016 'record': _("record change %d/%d to '%s'?"),
1017 1017 },
1018 1018 'single': {
1019 1019 'apply': _("apply this change to '%s'?"),
1020 1020 'discard': _("discard this change to '%s'?"),
1021 1021 'keep': _("keep this change to '%s'?"),
1022 1022 'record': _("record this change to '%s'?"),
1023 1023 },
1024 1024 'help': {
1025 1025 'apply': _('[Ynesfdaq?]'
1026 1026 '$$ &Yes, apply this change'
1027 1027 '$$ &No, skip this change'
1028 1028 '$$ &Edit this change manually'
1029 1029 '$$ &Skip remaining changes to this file'
1030 1030 '$$ Apply remaining changes to this &file'
1031 1031 '$$ &Done, skip remaining changes and files'
1032 1032 '$$ Apply &all changes to all remaining files'
1033 1033 '$$ &Quit, applying no changes'
1034 1034 '$$ &? (display help)'),
1035 1035 'discard': _('[Ynesfdaq?]'
1036 1036 '$$ &Yes, discard this change'
1037 1037 '$$ &No, skip this change'
1038 1038 '$$ &Edit this change manually'
1039 1039 '$$ &Skip remaining changes to this file'
1040 1040 '$$ Discard remaining changes to this &file'
1041 1041 '$$ &Done, skip remaining changes and files'
1042 1042 '$$ Discard &all changes to all remaining files'
1043 1043 '$$ &Quit, discarding no changes'
1044 1044 '$$ &? (display help)'),
1045 1045 'keep': _('[Ynesfdaq?]'
1046 1046 '$$ &Yes, keep this change'
1047 1047 '$$ &No, skip this change'
1048 1048 '$$ &Edit this change manually'
1049 1049 '$$ &Skip remaining changes to this file'
1050 1050 '$$ Keep remaining changes to this &file'
1051 1051 '$$ &Done, skip remaining changes and files'
1052 1052 '$$ Keep &all changes to all remaining files'
1053 1053 '$$ &Quit, keeping all changes'
1054 1054 '$$ &? (display help)'),
1055 1055 'record': _('[Ynesfdaq?]'
1056 1056 '$$ &Yes, record this change'
1057 1057 '$$ &No, skip this change'
1058 1058 '$$ &Edit this change manually'
1059 1059 '$$ &Skip remaining changes to this file'
1060 1060 '$$ Record remaining changes to this &file'
1061 1061 '$$ &Done, skip remaining changes and files'
1062 1062 '$$ Record &all changes to all remaining files'
1063 1063 '$$ &Quit, recording no changes'
1064 1064 '$$ &? (display help)'),
1065 1065 }
1066 1066 }
1067 1067
1068 1068 def filterpatch(ui, headers, match, operation=None):
1069 1069 """Interactively filter patch chunks into applied-only chunks"""
1070 1070 messages = getmessages()
1071 1071
1072 1072 if operation is None:
1073 1073 operation = 'record'
1074 1074
1075 1075 def prompt(skipfile, skipall, query, chunk):
1076 1076 """prompt query, and process base inputs
1077 1077
1078 1078 - y/n for the rest of file
1079 1079 - y/n for the rest
1080 1080 - ? (help)
1081 1081 - q (quit)
1082 1082
1083 1083 Return True/False and possibly updated skipfile and skipall.
1084 1084 """
1085 1085 newpatches = None
1086 1086 if skipall is not None:
1087 1087 return skipall, skipfile, skipall, newpatches
1088 1088 if skipfile is not None:
1089 1089 return skipfile, skipfile, skipall, newpatches
1090 1090 while True:
1091 1091 resps = messages['help'][operation]
1092 1092 # IMPORTANT: keep the last line of this prompt short (<40 english
1093 1093 # chars is a good target) because of issue6158.
1094 1094 r = ui.promptchoice("%s\n(enter ? for help) %s" % (query, resps))
1095 1095 ui.write("\n")
1096 1096 if r == 8: # ?
1097 1097 for c, t in ui.extractchoices(resps)[1]:
1098 1098 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1099 1099 continue
1100 1100 elif r == 0: # yes
1101 1101 ret = True
1102 1102 elif r == 1: # no
1103 1103 ret = False
1104 1104 elif r == 2: # Edit patch
1105 1105 if chunk is None:
1106 1106 ui.write(_('cannot edit patch for whole file'))
1107 1107 ui.write("\n")
1108 1108 continue
1109 1109 if chunk.header.binary():
1110 1110 ui.write(_('cannot edit patch for binary file'))
1111 1111 ui.write("\n")
1112 1112 continue
1113 1113 # Patch comment based on the Git one (based on comment at end of
1114 1114 # https://mercurial-scm.org/wiki/RecordExtension)
1115 1115 phelp = '---' + _("""
1116 1116 To remove '-' lines, make them ' ' lines (context).
1117 1117 To remove '+' lines, delete them.
1118 1118 Lines starting with # will be removed from the patch.
1119 1119
1120 1120 If the patch applies cleanly, the edited hunk will immediately be
1121 1121 added to the record list. If it does not apply cleanly, a rejects
1122 1122 file will be generated: you can use that when you try again. If
1123 1123 all lines of the hunk are removed, then the edit is aborted and
1124 1124 the hunk is left unchanged.
1125 1125 """)
1126 1126 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1127 1127 suffix=".diff")
1128 1128 ncpatchfp = None
1129 1129 try:
1130 1130 # Write the initial patch
1131 1131 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1132 1132 chunk.header.write(f)
1133 1133 chunk.write(f)
1134 1134 f.write(''.join(['# ' + i + '\n'
1135 1135 for i in phelp.splitlines()]))
1136 1136 f.close()
1137 1137 # Start the editor and wait for it to complete
1138 1138 editor = ui.geteditor()
1139 1139 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1140 1140 environ={'HGUSER': ui.username()},
1141 1141 blockedtag='filterpatch')
1142 1142 if ret != 0:
1143 1143 ui.warn(_("editor exited with exit code %d\n") % ret)
1144 1144 continue
1145 1145 # Remove comment lines
1146 1146 patchfp = open(patchfn, r'rb')
1147 1147 ncpatchfp = stringio()
1148 1148 for line in util.iterfile(patchfp):
1149 1149 line = util.fromnativeeol(line)
1150 1150 if not line.startswith('#'):
1151 1151 ncpatchfp.write(line)
1152 1152 patchfp.close()
1153 1153 ncpatchfp.seek(0)
1154 1154 newpatches = parsepatch(ncpatchfp)
1155 1155 finally:
1156 1156 os.unlink(patchfn)
1157 1157 del ncpatchfp
1158 1158 # Signal that the chunk shouldn't be applied as-is, but
1159 1159 # provide the new patch to be used instead.
1160 1160 ret = False
1161 1161 elif r == 3: # Skip
1162 1162 ret = skipfile = False
1163 1163 elif r == 4: # file (Record remaining)
1164 1164 ret = skipfile = True
1165 1165 elif r == 5: # done, skip remaining
1166 1166 ret = skipall = False
1167 1167 elif r == 6: # all
1168 1168 ret = skipall = True
1169 1169 elif r == 7: # quit
1170 1170 raise error.Abort(_('user quit'))
1171 1171 return ret, skipfile, skipall, newpatches
1172 1172
1173 1173 seen = set()
1174 1174 applied = {} # 'filename' -> [] of chunks
1175 1175 skipfile, skipall = None, None
1176 1176 pos, total = 1, sum(len(h.hunks) for h in headers)
1177 1177 for h in headers:
1178 1178 pos += len(h.hunks)
1179 1179 skipfile = None
1180 1180 fixoffset = 0
1181 1181 hdr = ''.join(h.header)
1182 1182 if hdr in seen:
1183 1183 continue
1184 1184 seen.add(hdr)
1185 1185 if skipall is None:
1186 1186 h.pretty(ui)
1187 1187 files = h.files()
1188 1188 msg = (_('examine changes to %s?') %
1189 1189 _(' and ').join("'%s'" % f for f in files))
1190 1190 if all(match.exact(f) for f in files):
1191 1191 r, skipall, np = True, None, None
1192 1192 else:
1193 1193 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1194 1194 if not r:
1195 1195 continue
1196 1196 applied[h.filename()] = [h]
1197 1197 if h.allhunks():
1198 1198 applied[h.filename()] += h.hunks
1199 1199 continue
1200 1200 for i, chunk in enumerate(h.hunks):
1201 1201 if skipfile is None and skipall is None:
1202 1202 chunk.pretty(ui)
1203 1203 if total == 1:
1204 1204 msg = messages['single'][operation] % chunk.filename()
1205 1205 else:
1206 1206 idx = pos - len(h.hunks) + i
1207 1207 msg = messages['multiple'][operation] % (idx, total,
1208 1208 chunk.filename())
1209 1209 r, skipfile, skipall, newpatches = prompt(skipfile,
1210 1210 skipall, msg, chunk)
1211 1211 if r:
1212 1212 if fixoffset:
1213 1213 chunk = copy.copy(chunk)
1214 1214 chunk.toline += fixoffset
1215 1215 applied[chunk.filename()].append(chunk)
1216 1216 elif newpatches is not None:
1217 1217 for newpatch in newpatches:
1218 1218 for newhunk in newpatch.hunks:
1219 1219 if fixoffset:
1220 1220 newhunk.toline += fixoffset
1221 1221 applied[newhunk.filename()].append(newhunk)
1222 1222 else:
1223 1223 fixoffset += chunk.removed - chunk.added
1224 1224 return (sum([h for h in applied.itervalues()
1225 1225 if h[0].special() or len(h) > 1], []), {})
1226 1226 class hunk(object):
1227 1227 def __init__(self, desc, num, lr, context):
1228 1228 self.number = num
1229 1229 self.desc = desc
1230 1230 self.hunk = [desc]
1231 1231 self.a = []
1232 1232 self.b = []
1233 1233 self.starta = self.lena = None
1234 1234 self.startb = self.lenb = None
1235 1235 if lr is not None:
1236 1236 if context:
1237 1237 self.read_context_hunk(lr)
1238 1238 else:
1239 1239 self.read_unified_hunk(lr)
1240 1240
1241 1241 def getnormalized(self):
1242 1242 """Return a copy with line endings normalized to LF."""
1243 1243
1244 1244 def normalize(lines):
1245 1245 nlines = []
1246 1246 for line in lines:
1247 1247 if line.endswith('\r\n'):
1248 1248 line = line[:-2] + '\n'
1249 1249 nlines.append(line)
1250 1250 return nlines
1251 1251
1252 1252 # Dummy object, it is rebuilt manually
1253 1253 nh = hunk(self.desc, self.number, None, None)
1254 1254 nh.number = self.number
1255 1255 nh.desc = self.desc
1256 1256 nh.hunk = self.hunk
1257 1257 nh.a = normalize(self.a)
1258 1258 nh.b = normalize(self.b)
1259 1259 nh.starta = self.starta
1260 1260 nh.startb = self.startb
1261 1261 nh.lena = self.lena
1262 1262 nh.lenb = self.lenb
1263 1263 return nh
1264 1264
1265 1265 def read_unified_hunk(self, lr):
1266 1266 m = unidesc.match(self.desc)
1267 1267 if not m:
1268 1268 raise PatchError(_("bad hunk #%d") % self.number)
1269 1269 self.starta, self.lena, self.startb, self.lenb = m.groups()
1270 1270 if self.lena is None:
1271 1271 self.lena = 1
1272 1272 else:
1273 1273 self.lena = int(self.lena)
1274 1274 if self.lenb is None:
1275 1275 self.lenb = 1
1276 1276 else:
1277 1277 self.lenb = int(self.lenb)
1278 1278 self.starta = int(self.starta)
1279 1279 self.startb = int(self.startb)
1280 1280 try:
1281 1281 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1282 1282 self.a, self.b)
1283 1283 except error.ParseError as e:
1284 1284 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1285 1285 # if we hit eof before finishing out the hunk, the last line will
1286 1286 # be zero length. Lets try to fix it up.
1287 1287 while len(self.hunk[-1]) == 0:
1288 1288 del self.hunk[-1]
1289 1289 del self.a[-1]
1290 1290 del self.b[-1]
1291 1291 self.lena -= 1
1292 1292 self.lenb -= 1
1293 1293 self._fixnewline(lr)
1294 1294
1295 1295 def read_context_hunk(self, lr):
1296 1296 self.desc = lr.readline()
1297 1297 m = contextdesc.match(self.desc)
1298 1298 if not m:
1299 1299 raise PatchError(_("bad hunk #%d") % self.number)
1300 1300 self.starta, aend = m.groups()
1301 1301 self.starta = int(self.starta)
1302 1302 if aend is None:
1303 1303 aend = self.starta
1304 1304 self.lena = int(aend) - self.starta
1305 1305 if self.starta:
1306 1306 self.lena += 1
1307 1307 for x in pycompat.xrange(self.lena):
1308 1308 l = lr.readline()
1309 1309 if l.startswith('---'):
1310 1310 # lines addition, old block is empty
1311 1311 lr.push(l)
1312 1312 break
1313 1313 s = l[2:]
1314 1314 if l.startswith('- ') or l.startswith('! '):
1315 1315 u = '-' + s
1316 1316 elif l.startswith(' '):
1317 1317 u = ' ' + s
1318 1318 else:
1319 1319 raise PatchError(_("bad hunk #%d old text line %d") %
1320 1320 (self.number, x))
1321 1321 self.a.append(u)
1322 1322 self.hunk.append(u)
1323 1323
1324 1324 l = lr.readline()
1325 1325 if l.startswith(br'\ '):
1326 1326 s = self.a[-1][:-1]
1327 1327 self.a[-1] = s
1328 1328 self.hunk[-1] = s
1329 1329 l = lr.readline()
1330 1330 m = contextdesc.match(l)
1331 1331 if not m:
1332 1332 raise PatchError(_("bad hunk #%d") % self.number)
1333 1333 self.startb, bend = m.groups()
1334 1334 self.startb = int(self.startb)
1335 1335 if bend is None:
1336 1336 bend = self.startb
1337 1337 self.lenb = int(bend) - self.startb
1338 1338 if self.startb:
1339 1339 self.lenb += 1
1340 1340 hunki = 1
1341 1341 for x in pycompat.xrange(self.lenb):
1342 1342 l = lr.readline()
1343 1343 if l.startswith(br'\ '):
1344 1344 # XXX: the only way to hit this is with an invalid line range.
1345 1345 # The no-eol marker is not counted in the line range, but I
1346 1346 # guess there are diff(1) out there which behave differently.
1347 1347 s = self.b[-1][:-1]
1348 1348 self.b[-1] = s
1349 1349 self.hunk[hunki - 1] = s
1350 1350 continue
1351 1351 if not l:
1352 1352 # line deletions, new block is empty and we hit EOF
1353 1353 lr.push(l)
1354 1354 break
1355 1355 s = l[2:]
1356 1356 if l.startswith('+ ') or l.startswith('! '):
1357 1357 u = '+' + s
1358 1358 elif l.startswith(' '):
1359 1359 u = ' ' + s
1360 1360 elif len(self.b) == 0:
1361 1361 # line deletions, new block is empty
1362 1362 lr.push(l)
1363 1363 break
1364 1364 else:
1365 1365 raise PatchError(_("bad hunk #%d old text line %d") %
1366 1366 (self.number, x))
1367 1367 self.b.append(s)
1368 1368 while True:
1369 1369 if hunki >= len(self.hunk):
1370 1370 h = ""
1371 1371 else:
1372 1372 h = self.hunk[hunki]
1373 1373 hunki += 1
1374 1374 if h == u:
1375 1375 break
1376 1376 elif h.startswith('-'):
1377 1377 continue
1378 1378 else:
1379 1379 self.hunk.insert(hunki - 1, u)
1380 1380 break
1381 1381
1382 1382 if not self.a:
1383 1383 # this happens when lines were only added to the hunk
1384 1384 for x in self.hunk:
1385 1385 if x.startswith('-') or x.startswith(' '):
1386 1386 self.a.append(x)
1387 1387 if not self.b:
1388 1388 # this happens when lines were only deleted from the hunk
1389 1389 for x in self.hunk:
1390 1390 if x.startswith('+') or x.startswith(' '):
1391 1391 self.b.append(x[1:])
1392 1392 # @@ -start,len +start,len @@
1393 1393 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1394 1394 self.startb, self.lenb)
1395 1395 self.hunk[0] = self.desc
1396 1396 self._fixnewline(lr)
1397 1397
1398 1398 def _fixnewline(self, lr):
1399 1399 l = lr.readline()
1400 1400 if l.startswith(br'\ '):
1401 1401 diffhelper.fixnewline(self.hunk, self.a, self.b)
1402 1402 else:
1403 1403 lr.push(l)
1404 1404
1405 1405 def complete(self):
1406 1406 return len(self.a) == self.lena and len(self.b) == self.lenb
1407 1407
1408 1408 def _fuzzit(self, old, new, fuzz, toponly):
1409 1409 # this removes context lines from the top and bottom of list 'l'. It
1410 1410 # checks the hunk to make sure only context lines are removed, and then
1411 1411 # returns a new shortened list of lines.
1412 1412 fuzz = min(fuzz, len(old))
1413 1413 if fuzz:
1414 1414 top = 0
1415 1415 bot = 0
1416 1416 hlen = len(self.hunk)
1417 1417 for x in pycompat.xrange(hlen - 1):
1418 1418 # the hunk starts with the @@ line, so use x+1
1419 1419 if self.hunk[x + 1].startswith(' '):
1420 1420 top += 1
1421 1421 else:
1422 1422 break
1423 1423 if not toponly:
1424 1424 for x in pycompat.xrange(hlen - 1):
1425 1425 if self.hunk[hlen - bot - 1].startswith(' '):
1426 1426 bot += 1
1427 1427 else:
1428 1428 break
1429 1429
1430 1430 bot = min(fuzz, bot)
1431 1431 top = min(fuzz, top)
1432 1432 return old[top:len(old) - bot], new[top:len(new) - bot], top
1433 1433 return old, new, 0
1434 1434
1435 1435 def fuzzit(self, fuzz, toponly):
1436 1436 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1437 1437 oldstart = self.starta + top
1438 1438 newstart = self.startb + top
1439 1439 # zero length hunk ranges already have their start decremented
1440 1440 if self.lena and oldstart > 0:
1441 1441 oldstart -= 1
1442 1442 if self.lenb and newstart > 0:
1443 1443 newstart -= 1
1444 1444 return old, oldstart, new, newstart
1445 1445
1446 1446 class binhunk(object):
1447 1447 'A binary patch file.'
1448 1448 def __init__(self, lr, fname):
1449 1449 self.text = None
1450 1450 self.delta = False
1451 1451 self.hunk = ['GIT binary patch\n']
1452 1452 self._fname = fname
1453 1453 self._read(lr)
1454 1454
1455 1455 def complete(self):
1456 1456 return self.text is not None
1457 1457
1458 1458 def new(self, lines):
1459 1459 if self.delta:
1460 1460 return [applybindelta(self.text, ''.join(lines))]
1461 1461 return [self.text]
1462 1462
1463 1463 def _read(self, lr):
1464 1464 def getline(lr, hunk):
1465 1465 l = lr.readline()
1466 1466 hunk.append(l)
1467 1467 return l.rstrip('\r\n')
1468 1468
1469 1469 while True:
1470 1470 line = getline(lr, self.hunk)
1471 1471 if not line:
1472 1472 raise PatchError(_('could not extract "%s" binary data')
1473 1473 % self._fname)
1474 1474 if line.startswith('literal '):
1475 1475 size = int(line[8:].rstrip())
1476 1476 break
1477 1477 if line.startswith('delta '):
1478 1478 size = int(line[6:].rstrip())
1479 1479 self.delta = True
1480 1480 break
1481 1481 dec = []
1482 1482 line = getline(lr, self.hunk)
1483 1483 while len(line) > 1:
1484 1484 l = line[0:1]
1485 1485 if l <= 'Z' and l >= 'A':
1486 1486 l = ord(l) - ord('A') + 1
1487 1487 else:
1488 1488 l = ord(l) - ord('a') + 27
1489 1489 try:
1490 1490 dec.append(util.b85decode(line[1:])[:l])
1491 1491 except ValueError as e:
1492 1492 raise PatchError(_('could not decode "%s" binary patch: %s')
1493 1493 % (self._fname, stringutil.forcebytestr(e)))
1494 1494 line = getline(lr, self.hunk)
1495 1495 text = zlib.decompress(''.join(dec))
1496 1496 if len(text) != size:
1497 1497 raise PatchError(_('"%s" length is %d bytes, should be %d')
1498 1498 % (self._fname, len(text), size))
1499 1499 self.text = text
1500 1500
1501 1501 def parsefilename(str):
1502 1502 # --- filename \t|space stuff
1503 1503 s = str[4:].rstrip('\r\n')
1504 1504 i = s.find('\t')
1505 1505 if i < 0:
1506 1506 i = s.find(' ')
1507 1507 if i < 0:
1508 1508 return s
1509 1509 return s[:i]
1510 1510
1511 1511 def reversehunks(hunks):
1512 1512 '''reverse the signs in the hunks given as argument
1513 1513
1514 1514 This function operates on hunks coming out of patch.filterpatch, that is
1515 1515 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1516 1516
1517 1517 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1518 1518 ... --- a/folder1/g
1519 1519 ... +++ b/folder1/g
1520 1520 ... @@ -1,7 +1,7 @@
1521 1521 ... +firstline
1522 1522 ... c
1523 1523 ... 1
1524 1524 ... 2
1525 1525 ... + 3
1526 1526 ... -4
1527 1527 ... 5
1528 1528 ... d
1529 1529 ... +lastline"""
1530 1530 >>> hunks = parsepatch([rawpatch])
1531 1531 >>> hunkscomingfromfilterpatch = []
1532 1532 >>> for h in hunks:
1533 1533 ... hunkscomingfromfilterpatch.append(h)
1534 1534 ... hunkscomingfromfilterpatch.extend(h.hunks)
1535 1535
1536 1536 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1537 1537 >>> from . import util
1538 1538 >>> fp = util.stringio()
1539 1539 >>> for c in reversedhunks:
1540 1540 ... c.write(fp)
1541 1541 >>> fp.seek(0) or None
1542 1542 >>> reversedpatch = fp.read()
1543 1543 >>> print(pycompat.sysstr(reversedpatch))
1544 1544 diff --git a/folder1/g b/folder1/g
1545 1545 --- a/folder1/g
1546 1546 +++ b/folder1/g
1547 1547 @@ -1,4 +1,3 @@
1548 1548 -firstline
1549 1549 c
1550 1550 1
1551 1551 2
1552 1552 @@ -2,6 +1,6 @@
1553 1553 c
1554 1554 1
1555 1555 2
1556 1556 - 3
1557 1557 +4
1558 1558 5
1559 1559 d
1560 1560 @@ -6,3 +5,2 @@
1561 1561 5
1562 1562 d
1563 1563 -lastline
1564 1564
1565 1565 '''
1566 1566
1567 1567 newhunks = []
1568 1568 for c in hunks:
1569 1569 if util.safehasattr(c, 'reversehunk'):
1570 1570 c = c.reversehunk()
1571 1571 newhunks.append(c)
1572 1572 return newhunks
1573 1573
1574 1574 def parsepatch(originalchunks, maxcontext=None):
1575 1575 """patch -> [] of headers -> [] of hunks
1576 1576
1577 1577 If maxcontext is not None, trim context lines if necessary.
1578 1578
1579 1579 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1580 1580 ... --- a/folder1/g
1581 1581 ... +++ b/folder1/g
1582 1582 ... @@ -1,8 +1,10 @@
1583 1583 ... 1
1584 1584 ... 2
1585 1585 ... -3
1586 1586 ... 4
1587 1587 ... 5
1588 1588 ... 6
1589 1589 ... +6.1
1590 1590 ... +6.2
1591 1591 ... 7
1592 1592 ... 8
1593 1593 ... +9'''
1594 1594 >>> out = util.stringio()
1595 1595 >>> headers = parsepatch([rawpatch], maxcontext=1)
1596 1596 >>> for header in headers:
1597 1597 ... header.write(out)
1598 1598 ... for hunk in header.hunks:
1599 1599 ... hunk.write(out)
1600 1600 >>> print(pycompat.sysstr(out.getvalue()))
1601 1601 diff --git a/folder1/g b/folder1/g
1602 1602 --- a/folder1/g
1603 1603 +++ b/folder1/g
1604 1604 @@ -2,3 +2,2 @@
1605 1605 2
1606 1606 -3
1607 1607 4
1608 1608 @@ -6,2 +5,4 @@
1609 1609 6
1610 1610 +6.1
1611 1611 +6.2
1612 1612 7
1613 1613 @@ -8,1 +9,2 @@
1614 1614 8
1615 1615 +9
1616 1616 """
1617 1617 class parser(object):
1618 1618 """patch parsing state machine"""
1619 1619 def __init__(self):
1620 1620 self.fromline = 0
1621 1621 self.toline = 0
1622 1622 self.proc = ''
1623 1623 self.header = None
1624 1624 self.context = []
1625 1625 self.before = []
1626 1626 self.hunk = []
1627 1627 self.headers = []
1628 1628
1629 1629 def addrange(self, limits):
1630 1630 self.addcontext([])
1631 1631 fromstart, fromend, tostart, toend, proc = limits
1632 1632 self.fromline = int(fromstart)
1633 1633 self.toline = int(tostart)
1634 1634 self.proc = proc
1635 1635
1636 1636 def addcontext(self, context):
1637 1637 if self.hunk:
1638 1638 h = recordhunk(self.header, self.fromline, self.toline,
1639 1639 self.proc, self.before, self.hunk, context, maxcontext)
1640 1640 self.header.hunks.append(h)
1641 1641 self.fromline += len(self.before) + h.removed
1642 1642 self.toline += len(self.before) + h.added
1643 1643 self.before = []
1644 1644 self.hunk = []
1645 1645 self.context = context
1646 1646
1647 1647 def addhunk(self, hunk):
1648 1648 if self.context:
1649 1649 self.before = self.context
1650 1650 self.context = []
1651 1651 if self.hunk:
1652 1652 self.addcontext([])
1653 1653 self.hunk = hunk
1654 1654
1655 1655 def newfile(self, hdr):
1656 1656 self.addcontext([])
1657 1657 h = header(hdr)
1658 1658 self.headers.append(h)
1659 1659 self.header = h
1660 1660
1661 1661 def addother(self, line):
1662 1662 pass # 'other' lines are ignored
1663 1663
1664 1664 def finished(self):
1665 1665 self.addcontext([])
1666 1666 return self.headers
1667 1667
1668 1668 transitions = {
1669 1669 'file': {'context': addcontext,
1670 1670 'file': newfile,
1671 1671 'hunk': addhunk,
1672 1672 'range': addrange},
1673 1673 'context': {'file': newfile,
1674 1674 'hunk': addhunk,
1675 1675 'range': addrange,
1676 1676 'other': addother},
1677 1677 'hunk': {'context': addcontext,
1678 1678 'file': newfile,
1679 1679 'range': addrange},
1680 1680 'range': {'context': addcontext,
1681 1681 'hunk': addhunk},
1682 1682 'other': {'other': addother},
1683 1683 }
1684 1684
1685 1685 p = parser()
1686 1686 fp = stringio()
1687 1687 fp.write(''.join(originalchunks))
1688 1688 fp.seek(0)
1689 1689
1690 1690 state = 'context'
1691 1691 for newstate, data in scanpatch(fp):
1692 1692 try:
1693 1693 p.transitions[state][newstate](p, data)
1694 1694 except KeyError:
1695 1695 raise PatchError('unhandled transition: %s -> %s' %
1696 1696 (state, newstate))
1697 1697 state = newstate
1698 1698 del fp
1699 1699 return p.finished()
1700 1700
1701 1701 def pathtransform(path, strip, prefix):
1702 1702 '''turn a path from a patch into a path suitable for the repository
1703 1703
1704 1704 prefix, if not empty, is expected to be normalized with a / at the end.
1705 1705
1706 1706 Returns (stripped components, path in repository).
1707 1707
1708 1708 >>> pathtransform(b'a/b/c', 0, b'')
1709 1709 ('', 'a/b/c')
1710 1710 >>> pathtransform(b' a/b/c ', 0, b'')
1711 1711 ('', ' a/b/c')
1712 1712 >>> pathtransform(b' a/b/c ', 2, b'')
1713 1713 ('a/b/', 'c')
1714 1714 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1715 1715 ('', 'd/e/a/b/c')
1716 1716 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1717 1717 ('a//b/', 'd/e/c')
1718 1718 >>> pathtransform(b'a/b/c', 3, b'')
1719 1719 Traceback (most recent call last):
1720 1720 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1721 1721 '''
1722 1722 pathlen = len(path)
1723 1723 i = 0
1724 1724 if strip == 0:
1725 1725 return '', prefix + path.rstrip()
1726 1726 count = strip
1727 1727 while count > 0:
1728 1728 i = path.find('/', i)
1729 1729 if i == -1:
1730 1730 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1731 1731 (count, strip, path))
1732 1732 i += 1
1733 1733 # consume '//' in the path
1734 1734 while i < pathlen - 1 and path[i:i + 1] == '/':
1735 1735 i += 1
1736 1736 count -= 1
1737 1737 return path[:i].lstrip(), prefix + path[i:].rstrip()
1738 1738
1739 1739 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1740 1740 nulla = afile_orig == "/dev/null"
1741 1741 nullb = bfile_orig == "/dev/null"
1742 1742 create = nulla and hunk.starta == 0 and hunk.lena == 0
1743 1743 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1744 1744 abase, afile = pathtransform(afile_orig, strip, prefix)
1745 1745 gooda = not nulla and backend.exists(afile)
1746 1746 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1747 1747 if afile == bfile:
1748 1748 goodb = gooda
1749 1749 else:
1750 1750 goodb = not nullb and backend.exists(bfile)
1751 1751 missing = not goodb and not gooda and not create
1752 1752
1753 1753 # some diff programs apparently produce patches where the afile is
1754 1754 # not /dev/null, but afile starts with bfile
1755 1755 abasedir = afile[:afile.rfind('/') + 1]
1756 1756 bbasedir = bfile[:bfile.rfind('/') + 1]
1757 1757 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1758 1758 and hunk.starta == 0 and hunk.lena == 0):
1759 1759 create = True
1760 1760 missing = False
1761 1761
1762 1762 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1763 1763 # diff is between a file and its backup. In this case, the original
1764 1764 # file should be patched (see original mpatch code).
1765 1765 isbackup = (abase == bbase and bfile.startswith(afile))
1766 1766 fname = None
1767 1767 if not missing:
1768 1768 if gooda and goodb:
1769 1769 if isbackup:
1770 1770 fname = afile
1771 1771 else:
1772 1772 fname = bfile
1773 1773 elif gooda:
1774 1774 fname = afile
1775 1775
1776 1776 if not fname:
1777 1777 if not nullb:
1778 1778 if isbackup:
1779 1779 fname = afile
1780 1780 else:
1781 1781 fname = bfile
1782 1782 elif not nulla:
1783 1783 fname = afile
1784 1784 else:
1785 1785 raise PatchError(_("undefined source and destination files"))
1786 1786
1787 1787 gp = patchmeta(fname)
1788 1788 if create:
1789 1789 gp.op = 'ADD'
1790 1790 elif remove:
1791 1791 gp.op = 'DELETE'
1792 1792 return gp
1793 1793
1794 1794 def scanpatch(fp):
1795 1795 """like patch.iterhunks, but yield different events
1796 1796
1797 1797 - ('file', [header_lines + fromfile + tofile])
1798 1798 - ('context', [context_lines])
1799 1799 - ('hunk', [hunk_lines])
1800 1800 - ('range', (-start,len, +start,len, proc))
1801 1801 """
1802 1802 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1803 1803 lr = linereader(fp)
1804 1804
1805 1805 def scanwhile(first, p):
1806 1806 """scan lr while predicate holds"""
1807 1807 lines = [first]
1808 1808 for line in iter(lr.readline, ''):
1809 1809 if p(line):
1810 1810 lines.append(line)
1811 1811 else:
1812 1812 lr.push(line)
1813 1813 break
1814 1814 return lines
1815 1815
1816 1816 for line in iter(lr.readline, ''):
1817 1817 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1818 1818 def notheader(line):
1819 1819 s = line.split(None, 1)
1820 1820 return not s or s[0] not in ('---', 'diff')
1821 1821 header = scanwhile(line, notheader)
1822 1822 fromfile = lr.readline()
1823 1823 if fromfile.startswith('---'):
1824 1824 tofile = lr.readline()
1825 1825 header += [fromfile, tofile]
1826 1826 else:
1827 1827 lr.push(fromfile)
1828 1828 yield 'file', header
1829 1829 elif line.startswith(' '):
1830 1830 cs = (' ', '\\')
1831 1831 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1832 1832 elif line.startswith(('-', '+')):
1833 1833 cs = ('-', '+', '\\')
1834 1834 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1835 1835 else:
1836 1836 m = lines_re.match(line)
1837 1837 if m:
1838 1838 yield 'range', m.groups()
1839 1839 else:
1840 1840 yield 'other', line
1841 1841
1842 1842 def scangitpatch(lr, firstline):
1843 1843 """
1844 1844 Git patches can emit:
1845 1845 - rename a to b
1846 1846 - change b
1847 1847 - copy a to c
1848 1848 - change c
1849 1849
1850 1850 We cannot apply this sequence as-is, the renamed 'a' could not be
1851 1851 found for it would have been renamed already. And we cannot copy
1852 1852 from 'b' instead because 'b' would have been changed already. So
1853 1853 we scan the git patch for copy and rename commands so we can
1854 1854 perform the copies ahead of time.
1855 1855 """
1856 1856 pos = 0
1857 1857 try:
1858 1858 pos = lr.fp.tell()
1859 1859 fp = lr.fp
1860 1860 except IOError:
1861 1861 fp = stringio(lr.fp.read())
1862 1862 gitlr = linereader(fp)
1863 1863 gitlr.push(firstline)
1864 1864 gitpatches = readgitpatch(gitlr)
1865 1865 fp.seek(pos)
1866 1866 return gitpatches
1867 1867
1868 1868 def iterhunks(fp):
1869 1869 """Read a patch and yield the following events:
1870 1870 - ("file", afile, bfile, firsthunk): select a new target file.
1871 1871 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1872 1872 "file" event.
1873 1873 - ("git", gitchanges): current diff is in git format, gitchanges
1874 1874 maps filenames to gitpatch records. Unique event.
1875 1875 """
1876 1876 afile = ""
1877 1877 bfile = ""
1878 1878 state = None
1879 1879 hunknum = 0
1880 1880 emitfile = newfile = False
1881 1881 gitpatches = None
1882 1882
1883 1883 # our states
1884 1884 BFILE = 1
1885 1885 context = None
1886 1886 lr = linereader(fp)
1887 1887
1888 1888 for x in iter(lr.readline, ''):
1889 1889 if state == BFILE and (
1890 1890 (not context and x.startswith('@'))
1891 1891 or (context is not False and x.startswith('***************'))
1892 1892 or x.startswith('GIT binary patch')):
1893 1893 gp = None
1894 1894 if (gitpatches and
1895 1895 gitpatches[-1].ispatching(afile, bfile)):
1896 1896 gp = gitpatches.pop()
1897 1897 if x.startswith('GIT binary patch'):
1898 1898 h = binhunk(lr, gp.path)
1899 1899 else:
1900 1900 if context is None and x.startswith('***************'):
1901 1901 context = True
1902 1902 h = hunk(x, hunknum + 1, lr, context)
1903 1903 hunknum += 1
1904 1904 if emitfile:
1905 1905 emitfile = False
1906 1906 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1907 1907 yield 'hunk', h
1908 1908 elif x.startswith('diff --git a/'):
1909 1909 m = gitre.match(x.rstrip(' \r\n'))
1910 1910 if not m:
1911 1911 continue
1912 1912 if gitpatches is None:
1913 1913 # scan whole input for git metadata
1914 1914 gitpatches = scangitpatch(lr, x)
1915 1915 yield 'git', [g.copy() for g in gitpatches
1916 1916 if g.op in ('COPY', 'RENAME')]
1917 1917 gitpatches.reverse()
1918 1918 afile = 'a/' + m.group(1)
1919 1919 bfile = 'b/' + m.group(2)
1920 1920 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1921 1921 gp = gitpatches.pop()
1922 1922 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1923 1923 if not gitpatches:
1924 1924 raise PatchError(_('failed to synchronize metadata for "%s"')
1925 1925 % afile[2:])
1926 1926 newfile = True
1927 1927 elif x.startswith('---'):
1928 1928 # check for a unified diff
1929 1929 l2 = lr.readline()
1930 1930 if not l2.startswith('+++'):
1931 1931 lr.push(l2)
1932 1932 continue
1933 1933 newfile = True
1934 1934 context = False
1935 1935 afile = parsefilename(x)
1936 1936 bfile = parsefilename(l2)
1937 1937 elif x.startswith('***'):
1938 1938 # check for a context diff
1939 1939 l2 = lr.readline()
1940 1940 if not l2.startswith('---'):
1941 1941 lr.push(l2)
1942 1942 continue
1943 1943 l3 = lr.readline()
1944 1944 lr.push(l3)
1945 1945 if not l3.startswith("***************"):
1946 1946 lr.push(l2)
1947 1947 continue
1948 1948 newfile = True
1949 1949 context = True
1950 1950 afile = parsefilename(x)
1951 1951 bfile = parsefilename(l2)
1952 1952
1953 1953 if newfile:
1954 1954 newfile = False
1955 1955 emitfile = True
1956 1956 state = BFILE
1957 1957 hunknum = 0
1958 1958
1959 1959 while gitpatches:
1960 1960 gp = gitpatches.pop()
1961 1961 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1962 1962
1963 1963 def applybindelta(binchunk, data):
1964 1964 """Apply a binary delta hunk
1965 1965 The algorithm used is the algorithm from git's patch-delta.c
1966 1966 """
1967 1967 def deltahead(binchunk):
1968 1968 i = 0
1969 1969 for c in pycompat.bytestr(binchunk):
1970 1970 i += 1
1971 1971 if not (ord(c) & 0x80):
1972 1972 return i
1973 1973 return i
1974 1974 out = ""
1975 1975 s = deltahead(binchunk)
1976 1976 binchunk = binchunk[s:]
1977 1977 s = deltahead(binchunk)
1978 1978 binchunk = binchunk[s:]
1979 1979 i = 0
1980 1980 while i < len(binchunk):
1981 1981 cmd = ord(binchunk[i:i + 1])
1982 1982 i += 1
1983 1983 if (cmd & 0x80):
1984 1984 offset = 0
1985 1985 size = 0
1986 1986 if (cmd & 0x01):
1987 1987 offset = ord(binchunk[i:i + 1])
1988 1988 i += 1
1989 1989 if (cmd & 0x02):
1990 1990 offset |= ord(binchunk[i:i + 1]) << 8
1991 1991 i += 1
1992 1992 if (cmd & 0x04):
1993 1993 offset |= ord(binchunk[i:i + 1]) << 16
1994 1994 i += 1
1995 1995 if (cmd & 0x08):
1996 1996 offset |= ord(binchunk[i:i + 1]) << 24
1997 1997 i += 1
1998 1998 if (cmd & 0x10):
1999 1999 size = ord(binchunk[i:i + 1])
2000 2000 i += 1
2001 2001 if (cmd & 0x20):
2002 2002 size |= ord(binchunk[i:i + 1]) << 8
2003 2003 i += 1
2004 2004 if (cmd & 0x40):
2005 2005 size |= ord(binchunk[i:i + 1]) << 16
2006 2006 i += 1
2007 2007 if size == 0:
2008 2008 size = 0x10000
2009 2009 offset_end = offset + size
2010 2010 out += data[offset:offset_end]
2011 2011 elif cmd != 0:
2012 2012 offset_end = i + cmd
2013 2013 out += binchunk[i:offset_end]
2014 2014 i += cmd
2015 2015 else:
2016 2016 raise PatchError(_('unexpected delta opcode 0'))
2017 2017 return out
2018 2018
2019 2019 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2020 2020 """Reads a patch from fp and tries to apply it.
2021 2021
2022 2022 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2023 2023 there was any fuzz.
2024 2024
2025 2025 If 'eolmode' is 'strict', the patch content and patched file are
2026 2026 read in binary mode. Otherwise, line endings are ignored when
2027 2027 patching then normalized according to 'eolmode'.
2028 2028 """
2029 2029 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2030 2030 prefix=prefix, eolmode=eolmode)
2031 2031
2032 2032 def _canonprefix(repo, prefix):
2033 2033 if prefix:
2034 2034 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2035 2035 if prefix != '':
2036 2036 prefix += '/'
2037 2037 return prefix
2038 2038
2039 2039 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2040 2040 eolmode='strict'):
2041 2041 prefix = _canonprefix(backend.repo, prefix)
2042 2042 def pstrip(p):
2043 2043 return pathtransform(p, strip - 1, prefix)[1]
2044 2044
2045 2045 rejects = 0
2046 2046 err = 0
2047 2047 current_file = None
2048 2048
2049 2049 for state, values in iterhunks(fp):
2050 2050 if state == 'hunk':
2051 2051 if not current_file:
2052 2052 continue
2053 2053 ret = current_file.apply(values)
2054 2054 if ret > 0:
2055 2055 err = 1
2056 2056 elif state == 'file':
2057 2057 if current_file:
2058 2058 rejects += current_file.close()
2059 2059 current_file = None
2060 2060 afile, bfile, first_hunk, gp = values
2061 2061 if gp:
2062 2062 gp.path = pstrip(gp.path)
2063 2063 if gp.oldpath:
2064 2064 gp.oldpath = pstrip(gp.oldpath)
2065 2065 else:
2066 2066 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2067 2067 prefix)
2068 2068 if gp.op == 'RENAME':
2069 2069 backend.unlink(gp.oldpath)
2070 2070 if not first_hunk:
2071 2071 if gp.op == 'DELETE':
2072 2072 backend.unlink(gp.path)
2073 2073 continue
2074 2074 data, mode = None, None
2075 2075 if gp.op in ('RENAME', 'COPY'):
2076 2076 data, mode = store.getfile(gp.oldpath)[:2]
2077 2077 if data is None:
2078 2078 # This means that the old path does not exist
2079 2079 raise PatchError(_("source file '%s' does not exist")
2080 2080 % gp.oldpath)
2081 2081 if gp.mode:
2082 2082 mode = gp.mode
2083 2083 if gp.op == 'ADD':
2084 2084 # Added files without content have no hunk and
2085 2085 # must be created
2086 2086 data = ''
2087 2087 if data or mode:
2088 2088 if (gp.op in ('ADD', 'RENAME', 'COPY')
2089 2089 and backend.exists(gp.path)):
2090 2090 raise PatchError(_("cannot create %s: destination "
2091 2091 "already exists") % gp.path)
2092 2092 backend.setfile(gp.path, data, mode, gp.oldpath)
2093 2093 continue
2094 2094 try:
2095 2095 current_file = patcher(ui, gp, backend, store,
2096 2096 eolmode=eolmode)
2097 2097 except PatchError as inst:
2098 2098 ui.warn(str(inst) + '\n')
2099 2099 current_file = None
2100 2100 rejects += 1
2101 2101 continue
2102 2102 elif state == 'git':
2103 2103 for gp in values:
2104 2104 path = pstrip(gp.oldpath)
2105 2105 data, mode = backend.getfile(path)
2106 2106 if data is None:
2107 2107 # The error ignored here will trigger a getfile()
2108 2108 # error in a place more appropriate for error
2109 2109 # handling, and will not interrupt the patching
2110 2110 # process.
2111 2111 pass
2112 2112 else:
2113 2113 store.setfile(path, data, mode)
2114 2114 else:
2115 2115 raise error.Abort(_('unsupported parser state: %s') % state)
2116 2116
2117 2117 if current_file:
2118 2118 rejects += current_file.close()
2119 2119
2120 2120 if rejects:
2121 2121 return -1
2122 2122 return err
2123 2123
2124 2124 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2125 2125 similarity):
2126 2126 """use <patcher> to apply <patchname> to the working directory.
2127 2127 returns whether patch was applied with fuzz factor."""
2128 2128
2129 2129 fuzz = False
2130 2130 args = []
2131 2131 cwd = repo.root
2132 2132 if cwd:
2133 2133 args.append('-d %s' % procutil.shellquote(cwd))
2134 2134 cmd = ('%s %s -p%d < %s'
2135 2135 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2136 2136 ui.debug('Using external patch tool: %s\n' % cmd)
2137 2137 fp = procutil.popen(cmd, 'rb')
2138 2138 try:
2139 2139 for line in util.iterfile(fp):
2140 2140 line = line.rstrip()
2141 2141 ui.note(line + '\n')
2142 2142 if line.startswith('patching file '):
2143 2143 pf = util.parsepatchoutput(line)
2144 2144 printed_file = False
2145 2145 files.add(pf)
2146 2146 elif line.find('with fuzz') >= 0:
2147 2147 fuzz = True
2148 2148 if not printed_file:
2149 2149 ui.warn(pf + '\n')
2150 2150 printed_file = True
2151 2151 ui.warn(line + '\n')
2152 2152 elif line.find('saving rejects to file') >= 0:
2153 2153 ui.warn(line + '\n')
2154 2154 elif line.find('FAILED') >= 0:
2155 2155 if not printed_file:
2156 2156 ui.warn(pf + '\n')
2157 2157 printed_file = True
2158 2158 ui.warn(line + '\n')
2159 2159 finally:
2160 2160 if files:
2161 2161 scmutil.marktouched(repo, files, similarity)
2162 2162 code = fp.close()
2163 2163 if code:
2164 2164 raise PatchError(_("patch command failed: %s") %
2165 2165 procutil.explainexit(code))
2166 2166 return fuzz
2167 2167
2168 2168 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2169 2169 eolmode='strict'):
2170 2170 if files is None:
2171 2171 files = set()
2172 2172 if eolmode is None:
2173 2173 eolmode = ui.config('patch', 'eol')
2174 2174 if eolmode.lower() not in eolmodes:
2175 2175 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2176 2176 eolmode = eolmode.lower()
2177 2177
2178 2178 store = filestore()
2179 2179 try:
2180 2180 fp = open(patchobj, 'rb')
2181 2181 except TypeError:
2182 2182 fp = patchobj
2183 2183 try:
2184 2184 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2185 2185 eolmode=eolmode)
2186 2186 finally:
2187 2187 if fp != patchobj:
2188 2188 fp.close()
2189 2189 files.update(backend.close())
2190 2190 store.close()
2191 2191 if ret < 0:
2192 2192 raise PatchError(_('patch failed to apply'))
2193 2193 return ret > 0
2194 2194
2195 2195 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2196 2196 eolmode='strict', similarity=0):
2197 2197 """use builtin patch to apply <patchobj> to the working directory.
2198 2198 returns whether patch was applied with fuzz factor."""
2199 2199 backend = workingbackend(ui, repo, similarity)
2200 2200 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2201 2201
2202 2202 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2203 2203 eolmode='strict'):
2204 2204 backend = repobackend(ui, repo, ctx, store)
2205 2205 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2206 2206
2207 2207 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2208 2208 similarity=0):
2209 2209 """Apply <patchname> to the working directory.
2210 2210
2211 2211 'eolmode' specifies how end of lines should be handled. It can be:
2212 2212 - 'strict': inputs are read in binary mode, EOLs are preserved
2213 2213 - 'crlf': EOLs are ignored when patching and reset to CRLF
2214 2214 - 'lf': EOLs are ignored when patching and reset to LF
2215 2215 - None: get it from user settings, default to 'strict'
2216 2216 'eolmode' is ignored when using an external patcher program.
2217 2217
2218 2218 Returns whether patch was applied with fuzz factor.
2219 2219 """
2220 2220 patcher = ui.config('ui', 'patch')
2221 2221 if files is None:
2222 2222 files = set()
2223 2223 if patcher:
2224 2224 return _externalpatch(ui, repo, patcher, patchname, strip,
2225 2225 files, similarity)
2226 2226 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2227 2227 similarity)
2228 2228
2229 2229 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2230 2230 backend = fsbackend(ui, repo.root)
2231 2231 prefix = _canonprefix(repo, prefix)
2232 2232 with open(patchpath, 'rb') as fp:
2233 2233 changed = set()
2234 2234 for state, values in iterhunks(fp):
2235 2235 if state == 'file':
2236 2236 afile, bfile, first_hunk, gp = values
2237 2237 if gp:
2238 2238 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2239 2239 if gp.oldpath:
2240 2240 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2241 2241 prefix)[1]
2242 2242 else:
2243 2243 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2244 2244 prefix)
2245 2245 changed.add(gp.path)
2246 2246 if gp.op == 'RENAME':
2247 2247 changed.add(gp.oldpath)
2248 2248 elif state not in ('hunk', 'git'):
2249 2249 raise error.Abort(_('unsupported parser state: %s') % state)
2250 2250 return changed
2251 2251
2252 2252 class GitDiffRequired(Exception):
2253 2253 pass
2254 2254
2255 2255 diffopts = diffutil.diffallopts
2256 2256 diffallopts = diffutil.diffallopts
2257 2257 difffeatureopts = diffutil.difffeatureopts
2258 2258
2259 2259 def diff(repo, node1=None, node2=None, match=None, changes=None,
2260 2260 opts=None, losedatafn=None, pathfn=None, copy=None,
2261 2261 copysourcematch=None, hunksfilterfn=None):
2262 2262 '''yields diff of changes to files between two nodes, or node and
2263 2263 working directory.
2264 2264
2265 2265 if node1 is None, use first dirstate parent instead.
2266 2266 if node2 is None, compare node1 with working directory.
2267 2267
2268 2268 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2269 2269 every time some change cannot be represented with the current
2270 2270 patch format. Return False to upgrade to git patch format, True to
2271 2271 accept the loss or raise an exception to abort the diff. It is
2272 2272 called with the name of current file being diffed as 'fn'. If set
2273 2273 to None, patches will always be upgraded to git format when
2274 2274 necessary.
2275 2275
2276 2276 prefix is a filename prefix that is prepended to all filenames on
2277 2277 display (used for subrepos).
2278 2278
2279 2279 relroot, if not empty, must be normalized with a trailing /. Any match
2280 2280 patterns that fall outside it will be ignored.
2281 2281
2282 2282 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2283 2283 information.
2284 2284
2285 2285 if copysourcematch is not None, then copy sources will be filtered by this
2286 2286 matcher
2287 2287
2288 2288 hunksfilterfn, if not None, should be a function taking a filectx and
2289 2289 hunks generator that may yield filtered hunks.
2290 2290 '''
2291 2291 if not node1 and not node2:
2292 2292 node1 = repo.dirstate.p1()
2293 2293
2294 2294 ctx1 = repo[node1]
2295 2295 ctx2 = repo[node2]
2296 2296
2297 2297 for fctx1, fctx2, hdr, hunks in diffhunks(
2298 2298 repo, ctx1=ctx1, ctx2=ctx2, match=match, changes=changes, opts=opts,
2299 2299 losedatafn=losedatafn, pathfn=pathfn, copy=copy,
2300 2300 copysourcematch=copysourcematch):
2301 2301 if hunksfilterfn is not None:
2302 2302 # If the file has been removed, fctx2 is None; but this should
2303 2303 # not occur here since we catch removed files early in
2304 2304 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2305 2305 assert fctx2 is not None, (
2306 2306 'fctx2 unexpectly None in diff hunks filtering')
2307 2307 hunks = hunksfilterfn(fctx2, hunks)
2308 2308 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2309 2309 if hdr and (text or len(hdr) > 1):
2310 2310 yield '\n'.join(hdr) + '\n'
2311 2311 if text:
2312 2312 yield text
2313 2313
2314 2314 def diffhunks(repo, ctx1, ctx2, match=None, changes=None, opts=None,
2315 2315 losedatafn=None, pathfn=None, copy=None, copysourcematch=None):
2316 2316 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2317 2317 where `header` is a list of diff headers and `hunks` is an iterable of
2318 2318 (`hunkrange`, `hunklines`) tuples.
2319 2319
2320 2320 See diff() for the meaning of parameters.
2321 2321 """
2322 2322
2323 2323 if opts is None:
2324 2324 opts = mdiff.defaultopts
2325 2325
2326 2326 def lrugetfilectx():
2327 2327 cache = {}
2328 2328 order = collections.deque()
2329 2329 def getfilectx(f, ctx):
2330 2330 fctx = ctx.filectx(f, filelog=cache.get(f))
2331 2331 if f not in cache:
2332 2332 if len(cache) > 20:
2333 2333 del cache[order.popleft()]
2334 2334 cache[f] = fctx.filelog()
2335 2335 else:
2336 2336 order.remove(f)
2337 2337 order.append(f)
2338 2338 return fctx
2339 2339 return getfilectx
2340 2340 getfilectx = lrugetfilectx()
2341 2341
2342 2342 if not changes:
2343 2343 changes = ctx1.status(ctx2, match=match)
2344 2344 modified, added, removed = changes[:3]
2345 2345
2346 2346 if not modified and not added and not removed:
2347 2347 return []
2348 2348
2349 2349 if repo.ui.debugflag:
2350 2350 hexfunc = hex
2351 2351 else:
2352 2352 hexfunc = short
2353 2353 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2354 2354
2355 2355 if copy is None:
2356 2356 copy = {}
2357 2357 if opts.git or opts.upgrade:
2358 2358 copy = copies.pathcopies(ctx1, ctx2, match=match)
2359 2359
2360 2360 if copysourcematch:
2361 2361 # filter out copies where source side isn't inside the matcher
2362 2362 # (copies.pathcopies() already filtered out the destination)
2363 2363 copy = {dst: src for dst, src in copy.iteritems()
2364 2364 if copysourcematch(src)}
2365 2365
2366 2366 modifiedset = set(modified)
2367 2367 addedset = set(added)
2368 2368 removedset = set(removed)
2369 2369 for f in modified:
2370 2370 if f not in ctx1:
2371 2371 # Fix up added, since merged-in additions appear as
2372 2372 # modifications during merges
2373 2373 modifiedset.remove(f)
2374 2374 addedset.add(f)
2375 2375 for f in removed:
2376 2376 if f not in ctx1:
2377 2377 # Merged-in additions that are then removed are reported as removed.
2378 2378 # They are not in ctx1, so We don't want to show them in the diff.
2379 2379 removedset.remove(f)
2380 2380 modified = sorted(modifiedset)
2381 2381 added = sorted(addedset)
2382 2382 removed = sorted(removedset)
2383 2383 for dst, src in list(copy.items()):
2384 2384 if src not in ctx1:
2385 2385 # Files merged in during a merge and then copied/renamed are
2386 2386 # reported as copies. We want to show them in the diff as additions.
2387 2387 del copy[dst]
2388 2388
2389 2389 prefetchmatch = scmutil.matchfiles(
2390 2390 repo, list(modifiedset | addedset | removedset))
2391 2391 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2392 2392
2393 2393 def difffn(opts, losedata):
2394 2394 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2395 2395 copy, getfilectx, opts, losedata, pathfn)
2396 2396 if opts.upgrade and not opts.git:
2397 2397 try:
2398 2398 def losedata(fn):
2399 2399 if not losedatafn or not losedatafn(fn=fn):
2400 2400 raise GitDiffRequired
2401 2401 # Buffer the whole output until we are sure it can be generated
2402 2402 return list(difffn(opts.copy(git=False), losedata))
2403 2403 except GitDiffRequired:
2404 2404 return difffn(opts.copy(git=True), None)
2405 2405 else:
2406 2406 return difffn(opts, None)
2407 2407
2408 2408 def diffsinglehunk(hunklines):
2409 2409 """yield tokens for a list of lines in a single hunk"""
2410 2410 for line in hunklines:
2411 2411 # chomp
2412 2412 chompline = line.rstrip('\r\n')
2413 2413 # highlight tabs and trailing whitespace
2414 2414 stripline = chompline.rstrip()
2415 2415 if line.startswith('-'):
2416 2416 label = 'diff.deleted'
2417 2417 elif line.startswith('+'):
2418 2418 label = 'diff.inserted'
2419 2419 else:
2420 2420 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2421 2421 for token in tabsplitter.findall(stripline):
2422 2422 if token.startswith('\t'):
2423 2423 yield (token, 'diff.tab')
2424 2424 else:
2425 2425 yield (token, label)
2426 2426
2427 2427 if chompline != stripline:
2428 2428 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2429 2429 if chompline != line:
2430 2430 yield (line[len(chompline):], '')
2431 2431
2432 2432 def diffsinglehunkinline(hunklines):
2433 2433 """yield tokens for a list of lines in a single hunk, with inline colors"""
2434 2434 # prepare deleted, and inserted content
2435 2435 a = ''
2436 2436 b = ''
2437 2437 for line in hunklines:
2438 2438 if line[0:1] == '-':
2439 2439 a += line[1:]
2440 2440 elif line[0:1] == '+':
2441 2441 b += line[1:]
2442 2442 else:
2443 2443 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2444 2444 # fast path: if either side is empty, use diffsinglehunk
2445 2445 if not a or not b:
2446 2446 for t in diffsinglehunk(hunklines):
2447 2447 yield t
2448 2448 return
2449 2449 # re-split the content into words
2450 2450 al = wordsplitter.findall(a)
2451 2451 bl = wordsplitter.findall(b)
2452 2452 # re-arrange the words to lines since the diff algorithm is line-based
2453 2453 aln = [s if s == '\n' else s + '\n' for s in al]
2454 2454 bln = [s if s == '\n' else s + '\n' for s in bl]
2455 2455 an = ''.join(aln)
2456 2456 bn = ''.join(bln)
2457 2457 # run the diff algorithm, prepare atokens and btokens
2458 2458 atokens = []
2459 2459 btokens = []
2460 2460 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2461 2461 for (a1, a2, b1, b2), btype in blocks:
2462 2462 changed = btype == '!'
2463 2463 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2464 2464 atokens.append((changed, token))
2465 2465 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2466 2466 btokens.append((changed, token))
2467 2467
2468 2468 # yield deleted tokens, then inserted ones
2469 2469 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2470 2470 ('+', 'diff.inserted', btokens)]:
2471 2471 nextisnewline = True
2472 2472 for changed, token in tokens:
2473 2473 if nextisnewline:
2474 2474 yield (prefix, label)
2475 2475 nextisnewline = False
2476 2476 # special handling line end
2477 2477 isendofline = token.endswith('\n')
2478 2478 if isendofline:
2479 2479 chomp = token[:-1] # chomp
2480 2480 if chomp.endswith('\r'):
2481 2481 chomp = chomp[:-1]
2482 2482 endofline = token[len(chomp):]
2483 2483 token = chomp.rstrip() # detect spaces at the end
2484 2484 endspaces = chomp[len(token):]
2485 2485 # scan tabs
2486 2486 for maybetab in tabsplitter.findall(token):
2487 2487 if b'\t' == maybetab[0:1]:
2488 2488 currentlabel = 'diff.tab'
2489 2489 else:
2490 2490 if changed:
2491 2491 currentlabel = label + '.changed'
2492 2492 else:
2493 2493 currentlabel = label + '.unchanged'
2494 2494 yield (maybetab, currentlabel)
2495 2495 if isendofline:
2496 2496 if endspaces:
2497 2497 yield (endspaces, 'diff.trailingwhitespace')
2498 2498 yield (endofline, '')
2499 2499 nextisnewline = True
2500 2500
2501 2501 def difflabel(func, *args, **kw):
2502 2502 '''yields 2-tuples of (output, label) based on the output of func()'''
2503 2503 if kw.get(r'opts') and kw[r'opts'].worddiff:
2504 2504 dodiffhunk = diffsinglehunkinline
2505 2505 else:
2506 2506 dodiffhunk = diffsinglehunk
2507 2507 headprefixes = [('diff', 'diff.diffline'),
2508 2508 ('copy', 'diff.extended'),
2509 2509 ('rename', 'diff.extended'),
2510 2510 ('old', 'diff.extended'),
2511 2511 ('new', 'diff.extended'),
2512 2512 ('deleted', 'diff.extended'),
2513 2513 ('index', 'diff.extended'),
2514 2514 ('similarity', 'diff.extended'),
2515 2515 ('---', 'diff.file_a'),
2516 2516 ('+++', 'diff.file_b')]
2517 2517 textprefixes = [('@', 'diff.hunk'),
2518 2518 # - and + are handled by diffsinglehunk
2519 2519 ]
2520 2520 head = False
2521 2521
2522 2522 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2523 2523 hunkbuffer = []
2524 2524 def consumehunkbuffer():
2525 2525 if hunkbuffer:
2526 2526 for token in dodiffhunk(hunkbuffer):
2527 2527 yield token
2528 2528 hunkbuffer[:] = []
2529 2529
2530 2530 for chunk in func(*args, **kw):
2531 2531 lines = chunk.split('\n')
2532 2532 linecount = len(lines)
2533 2533 for i, line in enumerate(lines):
2534 2534 if head:
2535 2535 if line.startswith('@'):
2536 2536 head = False
2537 2537 else:
2538 2538 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2539 2539 head = True
2540 2540 diffline = False
2541 2541 if not head and line and line.startswith(('+', '-')):
2542 2542 diffline = True
2543 2543
2544 2544 prefixes = textprefixes
2545 2545 if head:
2546 2546 prefixes = headprefixes
2547 2547 if diffline:
2548 2548 # buffered
2549 2549 bufferedline = line
2550 2550 if i + 1 < linecount:
2551 2551 bufferedline += "\n"
2552 2552 hunkbuffer.append(bufferedline)
2553 2553 else:
2554 2554 # unbuffered
2555 2555 for token in consumehunkbuffer():
2556 2556 yield token
2557 2557 stripline = line.rstrip()
2558 2558 for prefix, label in prefixes:
2559 2559 if stripline.startswith(prefix):
2560 2560 yield (stripline, label)
2561 2561 if line != stripline:
2562 2562 yield (line[len(stripline):],
2563 2563 'diff.trailingwhitespace')
2564 2564 break
2565 2565 else:
2566 2566 yield (line, '')
2567 2567 if i + 1 < linecount:
2568 2568 yield ('\n', '')
2569 2569 for token in consumehunkbuffer():
2570 2570 yield token
2571 2571
2572 2572 def diffui(*args, **kw):
2573 2573 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2574 2574 return difflabel(diff, *args, **kw)
2575 2575
2576 2576 def _filepairs(modified, added, removed, copy, opts):
2577 2577 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2578 2578 before and f2 is the the name after. For added files, f1 will be None,
2579 2579 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2580 2580 or 'rename' (the latter two only if opts.git is set).'''
2581 2581 gone = set()
2582 2582
2583 2583 copyto = dict([(v, k) for k, v in copy.items()])
2584 2584
2585 2585 addedset, removedset = set(added), set(removed)
2586 2586
2587 2587 for f in sorted(modified + added + removed):
2588 2588 copyop = None
2589 2589 f1, f2 = f, f
2590 2590 if f in addedset:
2591 2591 f1 = None
2592 2592 if f in copy:
2593 2593 if opts.git:
2594 2594 f1 = copy[f]
2595 2595 if f1 in removedset and f1 not in gone:
2596 2596 copyop = 'rename'
2597 2597 gone.add(f1)
2598 2598 else:
2599 2599 copyop = 'copy'
2600 2600 elif f in removedset:
2601 2601 f2 = None
2602 2602 if opts.git:
2603 2603 # have we already reported a copy above?
2604 2604 if (f in copyto and copyto[f] in addedset
2605 2605 and copy[copyto[f]] == f):
2606 2606 continue
2607 2607 yield f1, f2, copyop
2608 2608
2609 2609 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2610 2610 copy, getfilectx, opts, losedatafn, pathfn):
2611 2611 '''given input data, generate a diff and yield it in blocks
2612 2612
2613 2613 If generating a diff would lose data like flags or binary data and
2614 2614 losedatafn is not None, it will be called.
2615 2615
2616 2616 pathfn is applied to every path in the diff output.
2617 2617 '''
2618 2618
2619 2619 def gitindex(text):
2620 2620 if not text:
2621 2621 text = ""
2622 2622 l = len(text)
2623 2623 s = hashlib.sha1('blob %d\0' % l)
2624 2624 s.update(text)
2625 2625 return hex(s.digest())
2626 2626
2627 2627 if opts.noprefix:
2628 2628 aprefix = bprefix = ''
2629 2629 else:
2630 2630 aprefix = 'a/'
2631 2631 bprefix = 'b/'
2632 2632
2633 2633 def diffline(f, revs):
2634 2634 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2635 2635 return 'diff %s %s' % (revinfo, f)
2636 2636
2637 2637 def isempty(fctx):
2638 2638 return fctx is None or fctx.size() == 0
2639 2639
2640 2640 date1 = dateutil.datestr(ctx1.date())
2641 2641 date2 = dateutil.datestr(ctx2.date())
2642 2642
2643 2643 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2644 2644
2645 2645 if not pathfn:
2646 2646 pathfn = lambda f: f
2647 2647
2648 2648 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2649 2649 content1 = None
2650 2650 content2 = None
2651 2651 fctx1 = None
2652 2652 fctx2 = None
2653 2653 flag1 = None
2654 2654 flag2 = None
2655 2655 if f1:
2656 2656 fctx1 = getfilectx(f1, ctx1)
2657 2657 if opts.git or losedatafn:
2658 2658 flag1 = ctx1.flags(f1)
2659 2659 if f2:
2660 2660 fctx2 = getfilectx(f2, ctx2)
2661 2661 if opts.git or losedatafn:
2662 2662 flag2 = ctx2.flags(f2)
2663 2663 # if binary is True, output "summary" or "base85", but not "text diff"
2664 2664 if opts.text:
2665 2665 binary = False
2666 2666 else:
2667 2667 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2668 2668
2669 2669 if losedatafn and not opts.git:
2670 2670 if (binary or
2671 2671 # copy/rename
2672 2672 f2 in copy or
2673 2673 # empty file creation
2674 2674 (not f1 and isempty(fctx2)) or
2675 2675 # empty file deletion
2676 2676 (isempty(fctx1) and not f2) or
2677 2677 # create with flags
2678 2678 (not f1 and flag2) or
2679 2679 # change flags
2680 2680 (f1 and f2 and flag1 != flag2)):
2681 2681 losedatafn(f2 or f1)
2682 2682
2683 2683 path1 = pathfn(f1 or f2)
2684 2684 path2 = pathfn(f2 or f1)
2685 2685 header = []
2686 2686 if opts.git:
2687 2687 header.append('diff --git %s%s %s%s' %
2688 2688 (aprefix, path1, bprefix, path2))
2689 2689 if not f1: # added
2690 2690 header.append('new file mode %s' % gitmode[flag2])
2691 2691 elif not f2: # removed
2692 2692 header.append('deleted file mode %s' % gitmode[flag1])
2693 2693 else: # modified/copied/renamed
2694 2694 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2695 2695 if mode1 != mode2:
2696 2696 header.append('old mode %s' % mode1)
2697 2697 header.append('new mode %s' % mode2)
2698 2698 if copyop is not None:
2699 2699 if opts.showsimilarity:
2700 2700 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2701 2701 header.append('similarity index %d%%' % sim)
2702 2702 header.append('%s from %s' % (copyop, path1))
2703 2703 header.append('%s to %s' % (copyop, path2))
2704 2704 elif revs:
2705 2705 header.append(diffline(path1, revs))
2706 2706
2707 2707 # fctx.is | diffopts | what to | is fctx.data()
2708 2708 # binary() | text nobinary git index | output? | outputted?
2709 2709 # ------------------------------------|----------------------------
2710 2710 # yes | no no no * | summary | no
2711 2711 # yes | no no yes * | base85 | yes
2712 2712 # yes | no yes no * | summary | no
2713 2713 # yes | no yes yes 0 | summary | no
2714 2714 # yes | no yes yes >0 | summary | semi [1]
2715 2715 # yes | yes * * * | text diff | yes
2716 2716 # no | * * * * | text diff | yes
2717 2717 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2718 2718 if binary and (not opts.git or (opts.git and opts.nobinary and not
2719 2719 opts.index)):
2720 2720 # fast path: no binary content will be displayed, content1 and
2721 2721 # content2 are only used for equivalent test. cmp() could have a
2722 2722 # fast path.
2723 2723 if fctx1 is not None:
2724 2724 content1 = b'\0'
2725 2725 if fctx2 is not None:
2726 2726 if fctx1 is not None and not fctx1.cmp(fctx2):
2727 2727 content2 = b'\0' # not different
2728 2728 else:
2729 2729 content2 = b'\0\0'
2730 2730 else:
2731 2731 # normal path: load contents
2732 2732 if fctx1 is not None:
2733 2733 content1 = fctx1.data()
2734 2734 if fctx2 is not None:
2735 2735 content2 = fctx2.data()
2736 2736
2737 2737 if binary and opts.git and not opts.nobinary:
2738 2738 text = mdiff.b85diff(content1, content2)
2739 2739 if text:
2740 2740 header.append('index %s..%s' %
2741 2741 (gitindex(content1), gitindex(content2)))
2742 2742 hunks = (None, [text]),
2743 2743 else:
2744 2744 if opts.git and opts.index > 0:
2745 2745 flag = flag1
2746 2746 if flag is None:
2747 2747 flag = flag2
2748 2748 header.append('index %s..%s %s' %
2749 2749 (gitindex(content1)[0:opts.index],
2750 2750 gitindex(content2)[0:opts.index],
2751 2751 gitmode[flag]))
2752 2752
2753 2753 uheaders, hunks = mdiff.unidiff(content1, date1,
2754 2754 content2, date2,
2755 2755 path1, path2,
2756 2756 binary=binary, opts=opts)
2757 2757 header.extend(uheaders)
2758 2758 yield fctx1, fctx2, header, hunks
2759 2759
2760 2760 def diffstatsum(stats):
2761 2761 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2762 2762 for f, a, r, b in stats:
2763 2763 maxfile = max(maxfile, encoding.colwidth(f))
2764 2764 maxtotal = max(maxtotal, a + r)
2765 2765 addtotal += a
2766 2766 removetotal += r
2767 2767 binary = binary or b
2768 2768
2769 2769 return maxfile, maxtotal, addtotal, removetotal, binary
2770 2770
2771 2771 def diffstatdata(lines):
2772 2772 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
2773 2773
2774 2774 results = []
2775 2775 filename, adds, removes, isbinary = None, 0, 0, False
2776 2776
2777 2777 def addresult():
2778 2778 if filename:
2779 2779 results.append((filename, adds, removes, isbinary))
2780 2780
2781 2781 # inheader is used to track if a line is in the
2782 2782 # header portion of the diff. This helps properly account
2783 2783 # for lines that start with '--' or '++'
2784 2784 inheader = False
2785 2785
2786 2786 for line in lines:
2787 2787 if line.startswith('diff'):
2788 2788 addresult()
2789 2789 # starting a new file diff
2790 2790 # set numbers to 0 and reset inheader
2791 2791 inheader = True
2792 2792 adds, removes, isbinary = 0, 0, False
2793 2793 if line.startswith('diff --git a/'):
2794 2794 filename = gitre.search(line).group(2)
2795 2795 elif line.startswith('diff -r'):
2796 2796 # format: "diff -r ... -r ... filename"
2797 2797 filename = diffre.search(line).group(1)
2798 2798 elif line.startswith('@@'):
2799 2799 inheader = False
2800 2800 elif line.startswith('+') and not inheader:
2801 2801 adds += 1
2802 2802 elif line.startswith('-') and not inheader:
2803 2803 removes += 1
2804 2804 elif (line.startswith('GIT binary patch') or
2805 2805 line.startswith('Binary file')):
2806 2806 isbinary = True
2807 2807 elif line.startswith('rename from'):
2808 2808 filename = line[12:]
2809 2809 elif line.startswith('rename to'):
2810 2810 filename += ' => %s' % line[10:]
2811 2811 addresult()
2812 2812 return results
2813 2813
2814 2814 def diffstat(lines, width=80):
2815 2815 output = []
2816 2816 stats = diffstatdata(lines)
2817 2817 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2818 2818
2819 2819 countwidth = len(str(maxtotal))
2820 2820 if hasbinary and countwidth < 3:
2821 2821 countwidth = 3
2822 2822 graphwidth = width - countwidth - maxname - 6
2823 2823 if graphwidth < 10:
2824 2824 graphwidth = 10
2825 2825
2826 2826 def scale(i):
2827 2827 if maxtotal <= graphwidth:
2828 2828 return i
2829 2829 # If diffstat runs out of room it doesn't print anything,
2830 2830 # which isn't very useful, so always print at least one + or -
2831 2831 # if there were at least some changes.
2832 2832 return max(i * graphwidth // maxtotal, int(bool(i)))
2833 2833
2834 2834 for filename, adds, removes, isbinary in stats:
2835 2835 if isbinary:
2836 2836 count = 'Bin'
2837 2837 else:
2838 2838 count = '%d' % (adds + removes)
2839 2839 pluses = '+' * scale(adds)
2840 2840 minuses = '-' * scale(removes)
2841 2841 output.append(' %s%s | %*s %s%s\n' %
2842 2842 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2843 2843 countwidth, count, pluses, minuses))
2844 2844
2845 2845 if stats:
2846 2846 output.append(_(' %d files changed, %d insertions(+), '
2847 2847 '%d deletions(-)\n')
2848 2848 % (len(stats), totaladds, totalremoves))
2849 2849
2850 2850 return ''.join(output)
2851 2851
2852 2852 def diffstatui(*args, **kw):
2853 2853 '''like diffstat(), but yields 2-tuples of (output, label) for
2854 2854 ui.write()
2855 2855 '''
2856 2856
2857 2857 for line in diffstat(*args, **kw).splitlines():
2858 2858 if line and line[-1] in '+-':
2859 2859 name, graph = line.rsplit(' ', 1)
2860 2860 yield (name + ' ', '')
2861 2861 m = re.search(br'\++', graph)
2862 2862 if m:
2863 2863 yield (m.group(0), 'diffstat.inserted')
2864 2864 m = re.search(br'-+', graph)
2865 2865 if m:
2866 2866 yield (m.group(0), 'diffstat.deleted')
2867 2867 else:
2868 2868 yield (line, '')
2869 2869 yield ('\n', '')
@@ -1,888 +1,978 b''
1 1 #testcases obsstore-on obsstore-off
2 2
3 3 $ cat > $TESTTMP/editor.py <<EOF
4 4 > #!"$PYTHON"
5 5 > import os
6 6 > import sys
7 7 > path = os.path.join(os.environ['TESTTMP'], 'messages')
8 8 > messages = open(path).read().split('--\n')
9 9 > prompt = open(sys.argv[1]).read()
10 10 > sys.stdout.write(''.join('EDITOR: %s' % l for l in prompt.splitlines(True)))
11 11 > sys.stdout.flush()
12 12 > with open(sys.argv[1], 'w') as f:
13 13 > f.write(messages[0])
14 14 > with open(path, 'w') as f:
15 15 > f.write('--\n'.join(messages[1:]))
16 16 > EOF
17 17
18 18 $ cat >> $HGRCPATH <<EOF
19 19 > [extensions]
20 20 > drawdag=$TESTDIR/drawdag.py
21 21 > split=
22 22 > [ui]
23 23 > interactive=1
24 24 > color=no
25 25 > paginate=never
26 26 > [diff]
27 27 > git=1
28 28 > unified=0
29 29 > [commands]
30 30 > commit.interactive.unified=0
31 31 > [alias]
32 32 > glog=log -G -T '{rev}:{node|short} {desc} {bookmarks}\n'
33 33 > EOF
34 34
35 35 #if obsstore-on
36 36 $ cat >> $HGRCPATH <<EOF
37 37 > [experimental]
38 38 > evolution=all
39 39 > EOF
40 40 #endif
41 41
42 42 $ hg init a
43 43 $ cd a
44 44
45 45 Nothing to split
46 46
47 47 $ hg split
48 48 nothing to split
49 49 [1]
50 50
51 51 $ hg commit -m empty --config ui.allowemptycommit=1
52 52 $ hg split
53 53 abort: cannot split an empty revision
54 54 [255]
55 55
56 56 $ rm -rf .hg
57 57 $ hg init
58 58
59 59 Cannot split working directory
60 60
61 61 $ hg split -r 'wdir()'
62 62 abort: cannot split working directory
63 63 [255]
64 64
65 65 Generate some content. The sed filter drop CR on Windows, which is dropped in
66 66 the a > b line.
67 67
68 68 $ $TESTDIR/seq.py 1 5 | sed 's/\r$//' >> a
69 69 $ hg ci -m a1 -A a -q
70 70 $ hg bookmark -i r1
71 71 $ sed 's/1/11/;s/3/33/;s/5/55/' a > b
72 72 $ mv b a
73 73 $ hg ci -m a2 -q
74 74 $ hg bookmark -i r2
75 75
76 76 Cannot split a public changeset
77 77
78 78 $ hg phase --public -r 'all()'
79 79 $ hg split .
80 80 abort: cannot split public changeset
81 81 (see 'hg help phases' for details)
82 82 [255]
83 83
84 84 $ hg phase --draft -f -r 'all()'
85 85
86 86 Cannot split while working directory is dirty
87 87
88 88 $ touch dirty
89 89 $ hg add dirty
90 90 $ hg split .
91 91 abort: uncommitted changes
92 92 [255]
93 93 $ hg forget dirty
94 94 $ rm dirty
95 95
96 96 Make a clean directory for future tests to build off of
97 97
98 98 $ cp -R . ../clean
99 99
100 100 Split a head
101 101
102 102 $ hg bookmark r3
103 103
104 104 $ hg split 'all()'
105 105 abort: cannot split multiple revisions
106 106 [255]
107 107
108 108 This function splits a bit strangely primarily to avoid changing the behavior of
109 109 the test after a bug was fixed with how split/commit --interactive handled
110 110 `commands.commit.interactive.unified=0`: when there were no context lines,
111 111 it kept only the last diff hunk. When running split, this meant that runsplit
112 112 was always recording three commits, one for each diff hunk, in reverse order
113 113 (the base commit was the last diff hunk in the file).
114 114 $ runsplit() {
115 115 > cat > $TESTTMP/messages <<EOF
116 116 > split 1
117 117 > --
118 118 > split 2
119 119 > --
120 120 > split 3
121 121 > EOF
122 122 > cat <<EOF | hg split "$@"
123 123 > y
124 124 > n
125 125 > n
126 126 > y
127 127 > y
128 128 > n
129 129 > y
130 130 > y
131 131 > y
132 132 > EOF
133 133 > }
134 134
135 135 $ HGEDITOR=false runsplit
136 136 diff --git a/a b/a
137 137 3 hunks, 3 lines changed
138 138 examine changes to 'a'?
139 139 (enter ? for help) [Ynesfdaq?] y
140 140
141 141 @@ -1,1 +1,1 @@
142 142 -1
143 143 +11
144 144 record change 1/3 to 'a'?
145 145 (enter ? for help) [Ynesfdaq?] n
146 146
147 147 @@ -3,1 +3,1 @@ 2
148 148 -3
149 149 +33
150 150 record change 2/3 to 'a'?
151 151 (enter ? for help) [Ynesfdaq?] n
152 152
153 153 @@ -5,1 +5,1 @@ 4
154 154 -5
155 155 +55
156 156 record change 3/3 to 'a'?
157 157 (enter ? for help) [Ynesfdaq?] y
158 158
159 159 transaction abort!
160 160 rollback completed
161 161 abort: edit failed: false exited with status 1
162 162 [255]
163 163 $ hg status
164 164
165 165 $ HGEDITOR="\"$PYTHON\" $TESTTMP/editor.py"
166 166 $ runsplit
167 167 diff --git a/a b/a
168 168 3 hunks, 3 lines changed
169 169 examine changes to 'a'?
170 170 (enter ? for help) [Ynesfdaq?] y
171 171
172 172 @@ -1,1 +1,1 @@
173 173 -1
174 174 +11
175 175 record change 1/3 to 'a'?
176 176 (enter ? for help) [Ynesfdaq?] n
177 177
178 178 @@ -3,1 +3,1 @@ 2
179 179 -3
180 180 +33
181 181 record change 2/3 to 'a'?
182 182 (enter ? for help) [Ynesfdaq?] n
183 183
184 184 @@ -5,1 +5,1 @@ 4
185 185 -5
186 186 +55
187 187 record change 3/3 to 'a'?
188 188 (enter ? for help) [Ynesfdaq?] y
189 189
190 190 EDITOR: HG: Splitting 1df0d5c5a3ab. Write commit message for the first split changeset.
191 191 EDITOR: a2
192 192 EDITOR:
193 193 EDITOR:
194 194 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
195 195 EDITOR: HG: Leave message empty to abort commit.
196 196 EDITOR: HG: --
197 197 EDITOR: HG: user: test
198 198 EDITOR: HG: branch 'default'
199 199 EDITOR: HG: changed a
200 200 created new head
201 201 diff --git a/a b/a
202 202 2 hunks, 2 lines changed
203 203 examine changes to 'a'?
204 204 (enter ? for help) [Ynesfdaq?] y
205 205
206 206 @@ -1,1 +1,1 @@
207 207 -1
208 208 +11
209 209 record change 1/2 to 'a'?
210 210 (enter ? for help) [Ynesfdaq?] n
211 211
212 212 @@ -3,1 +3,1 @@ 2
213 213 -3
214 214 +33
215 215 record change 2/2 to 'a'?
216 216 (enter ? for help) [Ynesfdaq?] y
217 217
218 218 EDITOR: HG: Splitting 1df0d5c5a3ab. So far it has been split into:
219 219 EDITOR: HG: - e704349bd21b: split 1
220 220 EDITOR: HG: Write commit message for the next split changeset.
221 221 EDITOR: a2
222 222 EDITOR:
223 223 EDITOR:
224 224 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
225 225 EDITOR: HG: Leave message empty to abort commit.
226 226 EDITOR: HG: --
227 227 EDITOR: HG: user: test
228 228 EDITOR: HG: branch 'default'
229 229 EDITOR: HG: changed a
230 230 diff --git a/a b/a
231 231 1 hunks, 1 lines changed
232 232 examine changes to 'a'?
233 233 (enter ? for help) [Ynesfdaq?] y
234 234
235 235 @@ -1,1 +1,1 @@
236 236 -1
237 237 +11
238 238 record this change to 'a'?
239 239 (enter ? for help) [Ynesfdaq?] y
240 240
241 241 EDITOR: HG: Splitting 1df0d5c5a3ab. So far it has been split into:
242 242 EDITOR: HG: - e704349bd21b: split 1
243 243 EDITOR: HG: - a09ad58faae3: split 2
244 244 EDITOR: HG: Write commit message for the next split changeset.
245 245 EDITOR: a2
246 246 EDITOR:
247 247 EDITOR:
248 248 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
249 249 EDITOR: HG: Leave message empty to abort commit.
250 250 EDITOR: HG: --
251 251 EDITOR: HG: user: test
252 252 EDITOR: HG: branch 'default'
253 253 EDITOR: HG: changed a
254 254 saved backup bundle to $TESTTMP/a/.hg/strip-backup/1df0d5c5a3ab-8341b760-split.hg (obsstore-off !)
255 255
256 256 #if obsstore-off
257 257 $ hg bookmark
258 258 r1 0:a61bcde8c529
259 259 r2 3:00eebaf8d2e2
260 260 * r3 3:00eebaf8d2e2
261 261 $ hg glog -p
262 262 @ 3:00eebaf8d2e2 split 3 r2 r3
263 263 | diff --git a/a b/a
264 264 | --- a/a
265 265 | +++ b/a
266 266 | @@ -1,1 +1,1 @@
267 267 | -1
268 268 | +11
269 269 |
270 270 o 2:a09ad58faae3 split 2
271 271 | diff --git a/a b/a
272 272 | --- a/a
273 273 | +++ b/a
274 274 | @@ -3,1 +3,1 @@
275 275 | -3
276 276 | +33
277 277 |
278 278 o 1:e704349bd21b split 1
279 279 | diff --git a/a b/a
280 280 | --- a/a
281 281 | +++ b/a
282 282 | @@ -5,1 +5,1 @@
283 283 | -5
284 284 | +55
285 285 |
286 286 o 0:a61bcde8c529 a1 r1
287 287 diff --git a/a b/a
288 288 new file mode 100644
289 289 --- /dev/null
290 290 +++ b/a
291 291 @@ -0,0 +1,5 @@
292 292 +1
293 293 +2
294 294 +3
295 295 +4
296 296 +5
297 297
298 298 #else
299 299 $ hg bookmark
300 300 r1 0:a61bcde8c529
301 301 r2 4:00eebaf8d2e2
302 302 * r3 4:00eebaf8d2e2
303 303 $ hg glog
304 304 @ 4:00eebaf8d2e2 split 3 r2 r3
305 305 |
306 306 o 3:a09ad58faae3 split 2
307 307 |
308 308 o 2:e704349bd21b split 1
309 309 |
310 310 o 0:a61bcde8c529 a1 r1
311 311
312 312 #endif
313 313
314 314 Split a head while working parent is not that head
315 315
316 316 $ cp -R $TESTTMP/clean $TESTTMP/b
317 317 $ cd $TESTTMP/b
318 318
319 319 $ hg up 0 -q
320 320 $ hg bookmark r3
321 321
322 322 $ runsplit tip >/dev/null
323 323
324 324 #if obsstore-off
325 325 $ hg bookmark
326 326 r1 0:a61bcde8c529
327 327 r2 3:00eebaf8d2e2
328 328 * r3 0:a61bcde8c529
329 329 $ hg glog
330 330 o 3:00eebaf8d2e2 split 3 r2
331 331 |
332 332 o 2:a09ad58faae3 split 2
333 333 |
334 334 o 1:e704349bd21b split 1
335 335 |
336 336 @ 0:a61bcde8c529 a1 r1 r3
337 337
338 338 #else
339 339 $ hg bookmark
340 340 r1 0:a61bcde8c529
341 341 r2 4:00eebaf8d2e2
342 342 * r3 0:a61bcde8c529
343 343 $ hg glog
344 344 o 4:00eebaf8d2e2 split 3 r2
345 345 |
346 346 o 3:a09ad58faae3 split 2
347 347 |
348 348 o 2:e704349bd21b split 1
349 349 |
350 350 @ 0:a61bcde8c529 a1 r1 r3
351 351
352 352 #endif
353 353
354 354 Split a non-head
355 355
356 356 $ cp -R $TESTTMP/clean $TESTTMP/c
357 357 $ cd $TESTTMP/c
358 358 $ echo d > d
359 359 $ hg ci -m d1 -A d
360 360 $ hg bookmark -i d1
361 361 $ echo 2 >> d
362 362 $ hg ci -m d2
363 363 $ echo 3 >> d
364 364 $ hg ci -m d3
365 365 $ hg bookmark -i d3
366 366 $ hg up '.^' -q
367 367 $ hg bookmark d2
368 368 $ cp -R . ../d
369 369
370 370 $ runsplit -r 1 | grep rebasing
371 371 rebasing 2:b5c5ea414030 "d1" (d1)
372 372 rebasing 3:f4a0a8d004cc "d2" (d2)
373 373 rebasing 4:777940761eba "d3" (d3)
374 374 #if obsstore-off
375 375 $ hg bookmark
376 376 d1 4:c4b449ef030e
377 377 * d2 5:c9dd00ab36a3
378 378 d3 6:19f476bc865c
379 379 r1 0:a61bcde8c529
380 380 r2 3:00eebaf8d2e2
381 381 $ hg glog -p
382 382 o 6:19f476bc865c d3 d3
383 383 | diff --git a/d b/d
384 384 | --- a/d
385 385 | +++ b/d
386 386 | @@ -2,0 +3,1 @@
387 387 | +3
388 388 |
389 389 @ 5:c9dd00ab36a3 d2 d2
390 390 | diff --git a/d b/d
391 391 | --- a/d
392 392 | +++ b/d
393 393 | @@ -1,0 +2,1 @@
394 394 | +2
395 395 |
396 396 o 4:c4b449ef030e d1 d1
397 397 | diff --git a/d b/d
398 398 | new file mode 100644
399 399 | --- /dev/null
400 400 | +++ b/d
401 401 | @@ -0,0 +1,1 @@
402 402 | +d
403 403 |
404 404 o 3:00eebaf8d2e2 split 3 r2
405 405 | diff --git a/a b/a
406 406 | --- a/a
407 407 | +++ b/a
408 408 | @@ -1,1 +1,1 @@
409 409 | -1
410 410 | +11
411 411 |
412 412 o 2:a09ad58faae3 split 2
413 413 | diff --git a/a b/a
414 414 | --- a/a
415 415 | +++ b/a
416 416 | @@ -3,1 +3,1 @@
417 417 | -3
418 418 | +33
419 419 |
420 420 o 1:e704349bd21b split 1
421 421 | diff --git a/a b/a
422 422 | --- a/a
423 423 | +++ b/a
424 424 | @@ -5,1 +5,1 @@
425 425 | -5
426 426 | +55
427 427 |
428 428 o 0:a61bcde8c529 a1 r1
429 429 diff --git a/a b/a
430 430 new file mode 100644
431 431 --- /dev/null
432 432 +++ b/a
433 433 @@ -0,0 +1,5 @@
434 434 +1
435 435 +2
436 436 +3
437 437 +4
438 438 +5
439 439
440 440 #else
441 441 $ hg bookmark
442 442 d1 8:c4b449ef030e
443 443 * d2 9:c9dd00ab36a3
444 444 d3 10:19f476bc865c
445 445 r1 0:a61bcde8c529
446 446 r2 7:00eebaf8d2e2
447 447 $ hg glog
448 448 o 10:19f476bc865c d3 d3
449 449 |
450 450 @ 9:c9dd00ab36a3 d2 d2
451 451 |
452 452 o 8:c4b449ef030e d1 d1
453 453 |
454 454 o 7:00eebaf8d2e2 split 3 r2
455 455 |
456 456 o 6:a09ad58faae3 split 2
457 457 |
458 458 o 5:e704349bd21b split 1
459 459 |
460 460 o 0:a61bcde8c529 a1 r1
461 461
462 462 #endif
463 463
464 464 Split a non-head without rebase
465 465
466 466 $ cd $TESTTMP/d
467 467 #if obsstore-off
468 468 $ runsplit -r 1 --no-rebase
469 469 abort: cannot split changeset with children without rebase
470 470 [255]
471 471 #else
472 472 $ runsplit -r 1 --no-rebase >/dev/null
473 473 3 new orphan changesets
474 474 $ hg bookmark
475 475 d1 2:b5c5ea414030
476 476 * d2 3:f4a0a8d004cc
477 477 d3 4:777940761eba
478 478 r1 0:a61bcde8c529
479 479 r2 7:00eebaf8d2e2
480 480
481 481 $ hg glog
482 482 o 7:00eebaf8d2e2 split 3 r2
483 483 |
484 484 o 6:a09ad58faae3 split 2
485 485 |
486 486 o 5:e704349bd21b split 1
487 487 |
488 488 | * 4:777940761eba d3 d3
489 489 | |
490 490 | @ 3:f4a0a8d004cc d2 d2
491 491 | |
492 492 | * 2:b5c5ea414030 d1 d1
493 493 | |
494 494 | x 1:1df0d5c5a3ab a2
495 495 |/
496 496 o 0:a61bcde8c529 a1 r1
497 497
498 498 #endif
499 499
500 500 Split a non-head with obsoleted descendants
501 501
502 502 #if obsstore-on
503 503 $ hg init $TESTTMP/e
504 504 $ cd $TESTTMP/e
505 505 $ hg debugdrawdag <<'EOS'
506 506 > H I J
507 507 > | | |
508 508 > F G1 G2 # amend: G1 -> G2
509 509 > | | / # prune: F
510 510 > C D E
511 511 > \|/
512 512 > B
513 513 > |
514 514 > A
515 515 > EOS
516 516 2 new orphan changesets
517 517 $ eval `hg tags -T '{tag}={node}\n'`
518 518 $ rm .hg/localtags
519 519 $ hg split $B --config experimental.evolution=createmarkers
520 520 abort: split would leave orphaned changesets behind
521 521 [255]
522 522 $ cat > $TESTTMP/messages <<EOF
523 523 > Split B
524 524 > EOF
525 525 $ cat <<EOF | hg split $B
526 526 > y
527 527 > y
528 528 > EOF
529 529 diff --git a/B b/B
530 530 new file mode 100644
531 531 examine changes to 'B'?
532 532 (enter ? for help) [Ynesfdaq?] y
533 533
534 534 @@ -0,0 +1,1 @@
535 535 +B
536 536 \ No newline at end of file
537 537 record this change to 'B'?
538 538 (enter ? for help) [Ynesfdaq?] y
539 539
540 540 EDITOR: HG: Splitting 112478962961. Write commit message for the first split changeset.
541 541 EDITOR: B
542 542 EDITOR:
543 543 EDITOR:
544 544 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
545 545 EDITOR: HG: Leave message empty to abort commit.
546 546 EDITOR: HG: --
547 547 EDITOR: HG: user: test
548 548 EDITOR: HG: branch 'default'
549 549 EDITOR: HG: added B
550 550 created new head
551 551 rebasing 2:26805aba1e60 "C"
552 552 rebasing 3:be0ef73c17ad "D"
553 553 rebasing 4:49cb92066bfd "E"
554 554 rebasing 7:97a6268cc7ef "G2"
555 555 rebasing 10:e2f1e425c0db "J"
556 556 $ hg glog -r 'sort(all(), topo)'
557 557 o 16:556c085f8b52 J
558 558 |
559 559 o 15:8761f6c9123f G2
560 560 |
561 561 o 14:a7aeffe59b65 E
562 562 |
563 563 | o 13:e1e914ede9ab D
564 564 |/
565 565 | o 12:01947e9b98aa C
566 566 |/
567 567 o 11:0947baa74d47 Split B
568 568 |
569 569 | * 9:88ede1d5ee13 I
570 570 | |
571 571 | x 6:af8cbf225b7b G1
572 572 | |
573 573 | x 3:be0ef73c17ad D
574 574 | |
575 575 | | * 8:74863e5b5074 H
576 576 | | |
577 577 | | x 5:ee481a2a1e69 F
578 578 | | |
579 579 | | x 2:26805aba1e60 C
580 580 | |/
581 581 | x 1:112478962961 B
582 582 |/
583 583 o 0:426bada5c675 A
584 584
585 585 #endif
586 586
587 587 Preserve secret phase in split
588 588
589 589 $ cp -R $TESTTMP/clean $TESTTMP/phases1
590 590 $ cd $TESTTMP/phases1
591 591 $ hg phase --secret -fr tip
592 592 $ hg log -T '{short(node)} {phase}\n'
593 593 1df0d5c5a3ab secret
594 594 a61bcde8c529 draft
595 595 $ runsplit tip >/dev/null
596 596 $ hg log -T '{short(node)} {phase}\n'
597 597 00eebaf8d2e2 secret
598 598 a09ad58faae3 secret
599 599 e704349bd21b secret
600 600 a61bcde8c529 draft
601 601
602 602 Do not move things to secret even if phases.new-commit=secret
603 603
604 604 $ cp -R $TESTTMP/clean $TESTTMP/phases2
605 605 $ cd $TESTTMP/phases2
606 606 $ cat >> .hg/hgrc <<EOF
607 607 > [phases]
608 608 > new-commit=secret
609 609 > EOF
610 610 $ hg log -T '{short(node)} {phase}\n'
611 611 1df0d5c5a3ab draft
612 612 a61bcde8c529 draft
613 613 $ runsplit tip >/dev/null
614 614 $ hg log -T '{short(node)} {phase}\n'
615 615 00eebaf8d2e2 draft
616 616 a09ad58faae3 draft
617 617 e704349bd21b draft
618 618 a61bcde8c529 draft
619 619
620 620 `hg split` with ignoreblanklines=1 does not infinite loop
621 621
622 622 $ mkdir $TESTTMP/f
623 623 $ hg init $TESTTMP/f/a
624 624 $ cd $TESTTMP/f/a
625 625 $ printf '1\n2\n3\n4\n5\n' > foo
626 626 $ cp foo bar
627 627 $ hg ci -qAm initial
628 628 $ printf '1\n\n2\n3\ntest\n4\n5\n' > bar
629 629 $ printf '1\n2\n3\ntest\n4\n5\n' > foo
630 630 $ hg ci -qm splitme
631 631 $ cat > $TESTTMP/messages <<EOF
632 632 > split 1
633 633 > --
634 634 > split 2
635 635 > EOF
636 636 $ printf 'f\nn\nf\n' | hg --config extensions.split= --config diff.ignoreblanklines=1 split
637 637 diff --git a/bar b/bar
638 638 2 hunks, 2 lines changed
639 639 examine changes to 'bar'?
640 640 (enter ? for help) [Ynesfdaq?] f
641 641
642 642 diff --git a/foo b/foo
643 643 1 hunks, 1 lines changed
644 644 examine changes to 'foo'?
645 645 (enter ? for help) [Ynesfdaq?] n
646 646
647 647 EDITOR: HG: Splitting dd3c45017cbf. Write commit message for the first split changeset.
648 648 EDITOR: splitme
649 649 EDITOR:
650 650 EDITOR:
651 651 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
652 652 EDITOR: HG: Leave message empty to abort commit.
653 653 EDITOR: HG: --
654 654 EDITOR: HG: user: test
655 655 EDITOR: HG: branch 'default'
656 656 EDITOR: HG: changed bar
657 657 created new head
658 658 diff --git a/foo b/foo
659 659 1 hunks, 1 lines changed
660 660 examine changes to 'foo'?
661 661 (enter ? for help) [Ynesfdaq?] f
662 662
663 663 EDITOR: HG: Splitting dd3c45017cbf. So far it has been split into:
664 664 EDITOR: HG: - f205aea1c624: split 1
665 665 EDITOR: HG: Write commit message for the next split changeset.
666 666 EDITOR: splitme
667 667 EDITOR:
668 668 EDITOR:
669 669 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
670 670 EDITOR: HG: Leave message empty to abort commit.
671 671 EDITOR: HG: --
672 672 EDITOR: HG: user: test
673 673 EDITOR: HG: branch 'default'
674 674 EDITOR: HG: changed foo
675 675 saved backup bundle to $TESTTMP/f/a/.hg/strip-backup/dd3c45017cbf-463441b5-split.hg (obsstore-off !)
676 676
677 677 Let's try that again, with a slightly different set of patches, to ensure that
678 678 the ignoreblanklines thing isn't somehow position dependent.
679 679
680 680 $ hg init $TESTTMP/f/b
681 681 $ cd $TESTTMP/f/b
682 682 $ printf '1\n2\n3\n4\n5\n' > foo
683 683 $ cp foo bar
684 684 $ hg ci -qAm initial
685 685 $ printf '1\n2\n3\ntest\n4\n5\n' > bar
686 686 $ printf '1\n2\n3\ntest\n4\n\n5\n' > foo
687 687 $ hg ci -qm splitme
688 688 $ cat > $TESTTMP/messages <<EOF
689 689 > split 1
690 690 > --
691 691 > split 2
692 692 > EOF
693 693 $ printf 'f\nn\nf\n' | hg --config extensions.split= --config diff.ignoreblanklines=1 split
694 694 diff --git a/bar b/bar
695 695 1 hunks, 1 lines changed
696 696 examine changes to 'bar'?
697 697 (enter ? for help) [Ynesfdaq?] f
698 698
699 699 diff --git a/foo b/foo
700 700 2 hunks, 2 lines changed
701 701 examine changes to 'foo'?
702 702 (enter ? for help) [Ynesfdaq?] n
703 703
704 704 EDITOR: HG: Splitting 904c80b40a4a. Write commit message for the first split changeset.
705 705 EDITOR: splitme
706 706 EDITOR:
707 707 EDITOR:
708 708 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
709 709 EDITOR: HG: Leave message empty to abort commit.
710 710 EDITOR: HG: --
711 711 EDITOR: HG: user: test
712 712 EDITOR: HG: branch 'default'
713 713 EDITOR: HG: changed bar
714 714 created new head
715 715 diff --git a/foo b/foo
716 716 2 hunks, 2 lines changed
717 717 examine changes to 'foo'?
718 718 (enter ? for help) [Ynesfdaq?] f
719 719
720 720 EDITOR: HG: Splitting 904c80b40a4a. So far it has been split into:
721 721 EDITOR: HG: - ffecf40fa954: split 1
722 722 EDITOR: HG: Write commit message for the next split changeset.
723 723 EDITOR: splitme
724 724 EDITOR:
725 725 EDITOR:
726 726 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
727 727 EDITOR: HG: Leave message empty to abort commit.
728 728 EDITOR: HG: --
729 729 EDITOR: HG: user: test
730 730 EDITOR: HG: branch 'default'
731 731 EDITOR: HG: changed foo
732 732 saved backup bundle to $TESTTMP/f/b/.hg/strip-backup/904c80b40a4a-47fb907f-split.hg (obsstore-off !)
733 733
734 734
735 735 Testing the case in split when commiting flag-only file changes (issue5864)
736 736 ---------------------------------------------------------------------------
737 737 $ hg init $TESTTMP/issue5864
738 738 $ cd $TESTTMP/issue5864
739 739 $ echo foo > foo
740 740 $ hg add foo
741 741 $ hg ci -m "initial"
742 742 $ hg import -q --bypass -m "make executable" - <<EOF
743 743 > diff --git a/foo b/foo
744 744 > old mode 100644
745 745 > new mode 100755
746 746 > EOF
747 747 $ hg up -q
748 748
749 749 $ hg glog
750 750 @ 1:3a2125f0f4cb make executable
751 751 |
752 752 o 0:51f273a58d82 initial
753 753
754 754
755 755 #if no-windows
756 756 $ cat > $TESTTMP/messages <<EOF
757 757 > split 1
758 758 > EOF
759 759 $ printf 'y\n' | hg split
760 760 diff --git a/foo b/foo
761 761 old mode 100644
762 762 new mode 100755
763 763 examine changes to 'foo'?
764 764 (enter ? for help) [Ynesfdaq?] y
765 765
766 766 EDITOR: HG: Splitting 3a2125f0f4cb. Write commit message for the first split changeset.
767 767 EDITOR: make executable
768 768 EDITOR:
769 769 EDITOR:
770 770 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
771 771 EDITOR: HG: Leave message empty to abort commit.
772 772 EDITOR: HG: --
773 773 EDITOR: HG: user: test
774 774 EDITOR: HG: branch 'default'
775 775 EDITOR: HG: changed foo
776 776 created new head
777 777 saved backup bundle to $TESTTMP/issue5864/.hg/strip-backup/3a2125f0f4cb-629e4432-split.hg (obsstore-off !)
778 778
779 779 $ hg log -G -T "{node|short} {desc}\n"
780 780 @ b154670c87da split 1
781 781 |
782 782 o 51f273a58d82 initial
783 783
784 784 #else
785 785
786 786 TODO: Fix this on Windows. See issue 2020 and 5883
787 787
788 788 $ printf 'y\ny\ny\n' | hg split
789 789 abort: cannot split an empty revision
790 790 [255]
791 791 #endif
792 792
793 Test that splitting moves works properly (issue5723)
794 ----------------------------------------------------
795
796 $ hg init $TESTTMP/issue5723-mv
797 $ cd $TESTTMP/issue5723-mv
798 $ printf '1\n2\n' > file
799 $ hg ci -qAm initial
800 $ hg mv file file2
801 $ printf 'a\nb\n1\n2\n3\n4\n' > file2
802 $ cat > $TESTTMP/messages <<EOF
803 > split1, keeping only the numbered lines
804 > --
805 > split2, keeping the lettered lines
806 > EOF
807 $ hg ci -m 'move and modify'
808 $ printf 'y\nn\na\na\n' | hg split
809 diff --git a/file b/file2
810 rename from file
811 rename to file2
812 2 hunks, 4 lines changed
813 examine changes to 'file' and 'file2'?
814 (enter ? for help) [Ynesfdaq?] y
815
816 @@ -0,0 +1,2 @@
817 +a
818 +b
819 record change 1/2 to 'file2'?
820 (enter ? for help) [Ynesfdaq?] n
821
822 @@ -2,0 +5,2 @@ 2
823 +3
824 +4
825 record change 2/2 to 'file2'?
826 (enter ? for help) [Ynesfdaq?] a
827
828 EDITOR: HG: Splitting 8c42fa635116. Write commit message for the first split changeset.
829 EDITOR: move and modify
830 EDITOR:
831 EDITOR:
832 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
833 EDITOR: HG: Leave message empty to abort commit.
834 EDITOR: HG: --
835 EDITOR: HG: user: test
836 EDITOR: HG: branch 'default'
837 EDITOR: HG: added file2
838 EDITOR: HG: removed file
839 created new head
840 diff --git a/file2 b/file2
841 1 hunks, 2 lines changed
842 examine changes to 'file2'?
843 (enter ? for help) [Ynesfdaq?] a
844
845 EDITOR: HG: Splitting 8c42fa635116. So far it has been split into:
846 EDITOR: HG: - 478be2a70c27: split1, keeping only the numbered lines
847 EDITOR: HG: Write commit message for the next split changeset.
848 EDITOR: move and modify
849 EDITOR:
850 EDITOR:
851 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
852 EDITOR: HG: Leave message empty to abort commit.
853 EDITOR: HG: --
854 EDITOR: HG: user: test
855 EDITOR: HG: branch 'default'
856 EDITOR: HG: changed file2
857 saved backup bundle to $TESTTMP/issue5723-mv/.hg/strip-backup/8c42fa635116-a38044d4-split.hg (obsstore-off !)
858 $ hg log -T '{desc}: {files%"{file} "}\n'
859 split2, keeping the lettered lines: file2
860 split1, keeping only the numbered lines: file file2
861 initial: file
862 $ cat file2
863 a
864 b
865 1
866 2
867 3
868 4
869 $ hg cat -r ".^" file2
870 1
871 2
872 3
873 4
874 $ hg cat -r . file2
875 a
876 b
877 1
878 2
879 3
880 4
881
882
793 883 Test that splitting copies works properly (issue5723)
794 884 ----------------------------------------------------
795 885
796 886 $ hg init $TESTTMP/issue5723-cp
797 887 $ cd $TESTTMP/issue5723-cp
798 888 $ printf '1\n2\n' > file
799 889 $ hg ci -qAm initial
800 890 $ hg cp file file2
801 891 $ printf 'a\nb\n1\n2\n3\n4\n' > file2
802 892 Also modify 'file' to prove that the changes aren't being pulled in
803 893 accidentally.
804 894 $ printf 'this is the new contents of "file"' > file
805 895 $ cat > $TESTTMP/messages <<EOF
806 896 > split1, keeping "file" and only the numbered lines in file2
807 897 > --
808 898 > split2, keeping the lettered lines in file2
809 899 > EOF
810 900 $ hg ci -m 'copy file->file2, modify both'
811 901 $ printf 'f\ny\nn\na\na\n' | hg split
812 902 diff --git a/file b/file
813 903 1 hunks, 2 lines changed
814 904 examine changes to 'file'?
815 905 (enter ? for help) [Ynesfdaq?] f
816 906
817 907 diff --git a/file b/file2
818 908 copy from file
819 909 copy to file2
820 910 2 hunks, 4 lines changed
821 911 examine changes to 'file' and 'file2'?
822 912 (enter ? for help) [Ynesfdaq?] y
823 913
824 914 @@ -0,0 +1,2 @@
825 915 +a
826 916 +b
827 917 record change 2/3 to 'file2'?
828 918 (enter ? for help) [Ynesfdaq?] n
829 919
830 920 @@ -2,0 +5,2 @@ 2
831 921 +3
832 922 +4
833 923 record change 3/3 to 'file2'?
834 924 (enter ? for help) [Ynesfdaq?] a
835 925
836 926 EDITOR: HG: Splitting 41c861dfa61e. Write commit message for the first split changeset.
837 927 EDITOR: copy file->file2, modify both
838 928 EDITOR:
839 929 EDITOR:
840 930 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
841 931 EDITOR: HG: Leave message empty to abort commit.
842 932 EDITOR: HG: --
843 933 EDITOR: HG: user: test
844 934 EDITOR: HG: branch 'default'
845 935 EDITOR: HG: added file2
846 936 EDITOR: HG: changed file
847 937 created new head
848 938 diff --git a/file2 b/file2
849 939 1 hunks, 2 lines changed
850 940 examine changes to 'file2'?
851 941 (enter ? for help) [Ynesfdaq?] a
852 942
853 943 EDITOR: HG: Splitting 41c861dfa61e. So far it has been split into:
854 944 EDITOR: HG: - 4b19e06610eb: split1, keeping "file" and only the numbered lines in file2
855 945 EDITOR: HG: Write commit message for the next split changeset.
856 946 EDITOR: copy file->file2, modify both
857 947 EDITOR:
858 948 EDITOR:
859 949 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
860 950 EDITOR: HG: Leave message empty to abort commit.
861 951 EDITOR: HG: --
862 952 EDITOR: HG: user: test
863 953 EDITOR: HG: branch 'default'
864 954 EDITOR: HG: changed file2
865 955 saved backup bundle to $TESTTMP/issue5723-cp/.hg/strip-backup/41c861dfa61e-467e8d3c-split.hg (obsstore-off !)
866 956 $ hg log -T '{desc}: {files%"{file} "}\n'
867 957 split2, keeping the lettered lines in file2: file2
868 958 split1, keeping "file" and only the numbered lines in file2: file file2
869 959 initial: file
870 960 $ cat file2
871 961 a
872 962 b
873 963 1
874 964 2
875 965 3
876 966 4
877 967 $ hg cat -r ".^" file2
878 968 1
879 969 2
880 970 3
881 971 4
882 972 $ hg cat -r . file2
883 973 a
884 974 b
885 975 1
886 976 2
887 977 3
888 978 4
General Comments 0
You need to be logged in to leave comments. Login now