##// END OF EJS Templates
diff: pass a diff hunks filter function from changeset_printer to patch.diff()...
Denis Laxalde -
r34857:890afefa default
parent child Browse files
Show More
@@ -1,3876 +1,3881 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import itertools
12 12 import os
13 13 import re
14 14 import tempfile
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 short,
22 22 )
23 23
24 24 from . import (
25 25 bookmarks,
26 26 changelog,
27 27 copies,
28 28 crecord as crecordmod,
29 29 dirstateguard,
30 30 encoding,
31 31 error,
32 32 formatter,
33 33 graphmod,
34 34 match as matchmod,
35 35 obsolete,
36 36 patch,
37 37 pathutil,
38 38 pycompat,
39 39 registrar,
40 40 revlog,
41 41 revset,
42 42 scmutil,
43 43 smartset,
44 44 templatekw,
45 45 templater,
46 46 util,
47 47 vfs as vfsmod,
48 48 )
49 49 stringio = util.stringio
50 50
51 51 # templates of common command options
52 52
53 53 dryrunopts = [
54 54 ('n', 'dry-run', None,
55 55 _('do not perform actions, just print output')),
56 56 ]
57 57
58 58 remoteopts = [
59 59 ('e', 'ssh', '',
60 60 _('specify ssh command to use'), _('CMD')),
61 61 ('', 'remotecmd', '',
62 62 _('specify hg command to run on the remote side'), _('CMD')),
63 63 ('', 'insecure', None,
64 64 _('do not verify server certificate (ignoring web.cacerts config)')),
65 65 ]
66 66
67 67 walkopts = [
68 68 ('I', 'include', [],
69 69 _('include names matching the given patterns'), _('PATTERN')),
70 70 ('X', 'exclude', [],
71 71 _('exclude names matching the given patterns'), _('PATTERN')),
72 72 ]
73 73
74 74 commitopts = [
75 75 ('m', 'message', '',
76 76 _('use text as commit message'), _('TEXT')),
77 77 ('l', 'logfile', '',
78 78 _('read commit message from file'), _('FILE')),
79 79 ]
80 80
81 81 commitopts2 = [
82 82 ('d', 'date', '',
83 83 _('record the specified date as commit date'), _('DATE')),
84 84 ('u', 'user', '',
85 85 _('record the specified user as committer'), _('USER')),
86 86 ]
87 87
88 88 # hidden for now
89 89 formatteropts = [
90 90 ('T', 'template', '',
91 91 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
92 92 ]
93 93
94 94 templateopts = [
95 95 ('', 'style', '',
96 96 _('display using template map file (DEPRECATED)'), _('STYLE')),
97 97 ('T', 'template', '',
98 98 _('display with template'), _('TEMPLATE')),
99 99 ]
100 100
101 101 logopts = [
102 102 ('p', 'patch', None, _('show patch')),
103 103 ('g', 'git', None, _('use git extended diff format')),
104 104 ('l', 'limit', '',
105 105 _('limit number of changes displayed'), _('NUM')),
106 106 ('M', 'no-merges', None, _('do not show merges')),
107 107 ('', 'stat', None, _('output diffstat-style summary of changes')),
108 108 ('G', 'graph', None, _("show the revision DAG")),
109 109 ] + templateopts
110 110
111 111 diffopts = [
112 112 ('a', 'text', None, _('treat all files as text')),
113 113 ('g', 'git', None, _('use git extended diff format')),
114 114 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
115 115 ('', 'nodates', None, _('omit dates from diff headers'))
116 116 ]
117 117
118 118 diffwsopts = [
119 119 ('w', 'ignore-all-space', None,
120 120 _('ignore white space when comparing lines')),
121 121 ('b', 'ignore-space-change', None,
122 122 _('ignore changes in the amount of white space')),
123 123 ('B', 'ignore-blank-lines', None,
124 124 _('ignore changes whose lines are all blank')),
125 125 ('Z', 'ignore-space-at-eol', None,
126 126 _('ignore changes in whitespace at EOL')),
127 127 ]
128 128
129 129 diffopts2 = [
130 130 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
131 131 ('p', 'show-function', None, _('show which function each change is in')),
132 132 ('', 'reverse', None, _('produce a diff that undoes the changes')),
133 133 ] + diffwsopts + [
134 134 ('U', 'unified', '',
135 135 _('number of lines of context to show'), _('NUM')),
136 136 ('', 'stat', None, _('output diffstat-style summary of changes')),
137 137 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
138 138 ]
139 139
140 140 mergetoolopts = [
141 141 ('t', 'tool', '', _('specify merge tool')),
142 142 ]
143 143
144 144 similarityopts = [
145 145 ('s', 'similarity', '',
146 146 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
147 147 ]
148 148
149 149 subrepoopts = [
150 150 ('S', 'subrepos', None,
151 151 _('recurse into subrepositories'))
152 152 ]
153 153
154 154 debugrevlogopts = [
155 155 ('c', 'changelog', False, _('open changelog')),
156 156 ('m', 'manifest', False, _('open manifest')),
157 157 ('', 'dir', '', _('open directory manifest')),
158 158 ]
159 159
160 160 # special string such that everything below this line will be ingored in the
161 161 # editor text
162 162 _linebelow = "^HG: ------------------------ >8 ------------------------$"
163 163
164 164 def ishunk(x):
165 165 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
166 166 return isinstance(x, hunkclasses)
167 167
168 168 def newandmodified(chunks, originalchunks):
169 169 newlyaddedandmodifiedfiles = set()
170 170 for chunk in chunks:
171 171 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
172 172 originalchunks:
173 173 newlyaddedandmodifiedfiles.add(chunk.header.filename())
174 174 return newlyaddedandmodifiedfiles
175 175
176 176 def parsealiases(cmd):
177 177 return cmd.lstrip("^").split("|")
178 178
179 179 def setupwrapcolorwrite(ui):
180 180 # wrap ui.write so diff output can be labeled/colorized
181 181 def wrapwrite(orig, *args, **kw):
182 182 label = kw.pop('label', '')
183 183 for chunk, l in patch.difflabel(lambda: args):
184 184 orig(chunk, label=label + l)
185 185
186 186 oldwrite = ui.write
187 187 def wrap(*args, **kwargs):
188 188 return wrapwrite(oldwrite, *args, **kwargs)
189 189 setattr(ui, 'write', wrap)
190 190 return oldwrite
191 191
192 192 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
193 193 if usecurses:
194 194 if testfile:
195 195 recordfn = crecordmod.testdecorator(testfile,
196 196 crecordmod.testchunkselector)
197 197 else:
198 198 recordfn = crecordmod.chunkselector
199 199
200 200 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
201 201
202 202 else:
203 203 return patch.filterpatch(ui, originalhunks, operation)
204 204
205 205 def recordfilter(ui, originalhunks, operation=None):
206 206 """ Prompts the user to filter the originalhunks and return a list of
207 207 selected hunks.
208 208 *operation* is used for to build ui messages to indicate the user what
209 209 kind of filtering they are doing: reverting, committing, shelving, etc.
210 210 (see patch.filterpatch).
211 211 """
212 212 usecurses = crecordmod.checkcurses(ui)
213 213 testfile = ui.config('experimental', 'crecordtest')
214 214 oldwrite = setupwrapcolorwrite(ui)
215 215 try:
216 216 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
217 217 testfile, operation)
218 218 finally:
219 219 ui.write = oldwrite
220 220 return newchunks, newopts
221 221
222 222 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
223 223 filterfn, *pats, **opts):
224 224 from . import merge as mergemod
225 225 opts = pycompat.byteskwargs(opts)
226 226 if not ui.interactive():
227 227 if cmdsuggest:
228 228 msg = _('running non-interactively, use %s instead') % cmdsuggest
229 229 else:
230 230 msg = _('running non-interactively')
231 231 raise error.Abort(msg)
232 232
233 233 # make sure username is set before going interactive
234 234 if not opts.get('user'):
235 235 ui.username() # raise exception, username not provided
236 236
237 237 def recordfunc(ui, repo, message, match, opts):
238 238 """This is generic record driver.
239 239
240 240 Its job is to interactively filter local changes, and
241 241 accordingly prepare working directory into a state in which the
242 242 job can be delegated to a non-interactive commit command such as
243 243 'commit' or 'qrefresh'.
244 244
245 245 After the actual job is done by non-interactive command, the
246 246 working directory is restored to its original state.
247 247
248 248 In the end we'll record interesting changes, and everything else
249 249 will be left in place, so the user can continue working.
250 250 """
251 251
252 252 checkunfinished(repo, commit=True)
253 253 wctx = repo[None]
254 254 merge = len(wctx.parents()) > 1
255 255 if merge:
256 256 raise error.Abort(_('cannot partially commit a merge '
257 257 '(use "hg commit" instead)'))
258 258
259 259 def fail(f, msg):
260 260 raise error.Abort('%s: %s' % (f, msg))
261 261
262 262 force = opts.get('force')
263 263 if not force:
264 264 vdirs = []
265 265 match.explicitdir = vdirs.append
266 266 match.bad = fail
267 267
268 268 status = repo.status(match=match)
269 269 if not force:
270 270 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
271 271 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
272 272 diffopts.nodates = True
273 273 diffopts.git = True
274 274 diffopts.showfunc = True
275 275 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
276 276 originalchunks = patch.parsepatch(originaldiff)
277 277
278 278 # 1. filter patch, since we are intending to apply subset of it
279 279 try:
280 280 chunks, newopts = filterfn(ui, originalchunks)
281 281 except error.PatchError as err:
282 282 raise error.Abort(_('error parsing patch: %s') % err)
283 283 opts.update(newopts)
284 284
285 285 # We need to keep a backup of files that have been newly added and
286 286 # modified during the recording process because there is a previous
287 287 # version without the edit in the workdir
288 288 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
289 289 contenders = set()
290 290 for h in chunks:
291 291 try:
292 292 contenders.update(set(h.files()))
293 293 except AttributeError:
294 294 pass
295 295
296 296 changed = status.modified + status.added + status.removed
297 297 newfiles = [f for f in changed if f in contenders]
298 298 if not newfiles:
299 299 ui.status(_('no changes to record\n'))
300 300 return 0
301 301
302 302 modified = set(status.modified)
303 303
304 304 # 2. backup changed files, so we can restore them in the end
305 305
306 306 if backupall:
307 307 tobackup = changed
308 308 else:
309 309 tobackup = [f for f in newfiles if f in modified or f in \
310 310 newlyaddedandmodifiedfiles]
311 311 backups = {}
312 312 if tobackup:
313 313 backupdir = repo.vfs.join('record-backups')
314 314 try:
315 315 os.mkdir(backupdir)
316 316 except OSError as err:
317 317 if err.errno != errno.EEXIST:
318 318 raise
319 319 try:
320 320 # backup continues
321 321 for f in tobackup:
322 322 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
323 323 dir=backupdir)
324 324 os.close(fd)
325 325 ui.debug('backup %r as %r\n' % (f, tmpname))
326 326 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
327 327 backups[f] = tmpname
328 328
329 329 fp = stringio()
330 330 for c in chunks:
331 331 fname = c.filename()
332 332 if fname in backups:
333 333 c.write(fp)
334 334 dopatch = fp.tell()
335 335 fp.seek(0)
336 336
337 337 # 2.5 optionally review / modify patch in text editor
338 338 if opts.get('review', False):
339 339 patchtext = (crecordmod.diffhelptext
340 340 + crecordmod.patchhelptext
341 341 + fp.read())
342 342 reviewedpatch = ui.edit(patchtext, "",
343 343 action="diff",
344 344 repopath=repo.path)
345 345 fp.truncate(0)
346 346 fp.write(reviewedpatch)
347 347 fp.seek(0)
348 348
349 349 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
350 350 # 3a. apply filtered patch to clean repo (clean)
351 351 if backups:
352 352 # Equivalent to hg.revert
353 353 m = scmutil.matchfiles(repo, backups.keys())
354 354 mergemod.update(repo, repo.dirstate.p1(),
355 355 False, True, matcher=m)
356 356
357 357 # 3b. (apply)
358 358 if dopatch:
359 359 try:
360 360 ui.debug('applying patch\n')
361 361 ui.debug(fp.getvalue())
362 362 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
363 363 except error.PatchError as err:
364 364 raise error.Abort(str(err))
365 365 del fp
366 366
367 367 # 4. We prepared working directory according to filtered
368 368 # patch. Now is the time to delegate the job to
369 369 # commit/qrefresh or the like!
370 370
371 371 # Make all of the pathnames absolute.
372 372 newfiles = [repo.wjoin(nf) for nf in newfiles]
373 373 return commitfunc(ui, repo, *newfiles, **opts)
374 374 finally:
375 375 # 5. finally restore backed-up files
376 376 try:
377 377 dirstate = repo.dirstate
378 378 for realname, tmpname in backups.iteritems():
379 379 ui.debug('restoring %r to %r\n' % (tmpname, realname))
380 380
381 381 if dirstate[realname] == 'n':
382 382 # without normallookup, restoring timestamp
383 383 # may cause partially committed files
384 384 # to be treated as unmodified
385 385 dirstate.normallookup(realname)
386 386
387 387 # copystat=True here and above are a hack to trick any
388 388 # editors that have f open that we haven't modified them.
389 389 #
390 390 # Also note that this racy as an editor could notice the
391 391 # file's mtime before we've finished writing it.
392 392 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
393 393 os.unlink(tmpname)
394 394 if tobackup:
395 395 os.rmdir(backupdir)
396 396 except OSError:
397 397 pass
398 398
399 399 def recordinwlock(ui, repo, message, match, opts):
400 400 with repo.wlock():
401 401 return recordfunc(ui, repo, message, match, opts)
402 402
403 403 return commit(ui, repo, recordinwlock, pats, opts)
404 404
405 405
406 406 # extracted at module level as it's required each time a file will be added
407 407 # to dirnode class object below
408 408 pathsep = pycompat.ossep
409 409
410 410 class dirnode(object):
411 411 """
412 412 Represent a directory in user working copy with information required for
413 413 the purpose of tersing its status.
414 414
415 415 path is the path to the directory
416 416
417 417 statuses is a set of statuses of all files in this directory (this includes
418 418 all the files in all the subdirectories too)
419 419
420 420 files is a list of files which are direct child of this directory
421 421
422 422 subdirs is a dictionary of sub-directory name as the key and it's own
423 423 dirnode object as the value
424 424 """
425 425
426 426 def __init__(self, dirpath):
427 427 self.path = dirpath
428 428 self.statuses = set([])
429 429 self.files = []
430 430 self.subdirs = {}
431 431
432 432 def _addfileindir(self, filename, status):
433 433 """Add a file in this directory as a direct child."""
434 434 self.files.append((filename, status))
435 435
436 436 def addfile(self, filename, status):
437 437 """
438 438 Add a file to this directory or to its direct parent directory.
439 439
440 440 If the file is not direct child of this directory, we traverse to the
441 441 directory of which this file is a direct child of and add the file
442 442 there.
443 443 """
444 444
445 445 # the filename contains a path separator, it means it's not the direct
446 446 # child of this directory
447 447 if pathsep in filename:
448 448 subdir, filep = filename.split(pathsep, 1)
449 449
450 450 # does the dirnode object for subdir exists
451 451 if subdir not in self.subdirs:
452 452 subdirpath = os.path.join(self.path, subdir)
453 453 self.subdirs[subdir] = dirnode(subdirpath)
454 454
455 455 # try adding the file in subdir
456 456 self.subdirs[subdir].addfile(filep, status)
457 457
458 458 else:
459 459 self._addfileindir(filename, status)
460 460
461 461 if status not in self.statuses:
462 462 self.statuses.add(status)
463 463
464 464 def iterfilepaths(self):
465 465 """Yield (status, path) for files directly under this directory."""
466 466 for f, st in self.files:
467 467 yield st, os.path.join(self.path, f)
468 468
469 469 def tersewalk(self, terseargs):
470 470 """
471 471 Yield (status, path) obtained by processing the status of this
472 472 dirnode.
473 473
474 474 terseargs is the string of arguments passed by the user with `--terse`
475 475 flag.
476 476
477 477 Following are the cases which can happen:
478 478
479 479 1) All the files in the directory (including all the files in its
480 480 subdirectories) share the same status and the user has asked us to terse
481 481 that status. -> yield (status, dirpath)
482 482
483 483 2) Otherwise, we do following:
484 484
485 485 a) Yield (status, filepath) for all the files which are in this
486 486 directory (only the ones in this directory, not the subdirs)
487 487
488 488 b) Recurse the function on all the subdirectories of this
489 489 directory
490 490 """
491 491
492 492 if len(self.statuses) == 1:
493 493 onlyst = self.statuses.pop()
494 494
495 495 # Making sure we terse only when the status abbreviation is
496 496 # passed as terse argument
497 497 if onlyst in terseargs:
498 498 yield onlyst, self.path + pycompat.ossep
499 499 return
500 500
501 501 # add the files to status list
502 502 for st, fpath in self.iterfilepaths():
503 503 yield st, fpath
504 504
505 505 #recurse on the subdirs
506 506 for dirobj in self.subdirs.values():
507 507 for st, fpath in dirobj.tersewalk(terseargs):
508 508 yield st, fpath
509 509
510 510 def tersedir(statuslist, terseargs):
511 511 """
512 512 Terse the status if all the files in a directory shares the same status.
513 513
514 514 statuslist is scmutil.status() object which contains a list of files for
515 515 each status.
516 516 terseargs is string which is passed by the user as the argument to `--terse`
517 517 flag.
518 518
519 519 The function makes a tree of objects of dirnode class, and at each node it
520 520 stores the information required to know whether we can terse a certain
521 521 directory or not.
522 522 """
523 523 # the order matters here as that is used to produce final list
524 524 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
525 525
526 526 # checking the argument validity
527 527 for s in terseargs:
528 528 if s not in allst:
529 529 raise error.Abort(_("'%s' not recognized") % s)
530 530
531 531 # creating a dirnode object for the root of the repo
532 532 rootobj = dirnode('')
533 533 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
534 534 'ignored', 'removed')
535 535
536 536 tersedict = {}
537 537 for attrname in pstatus:
538 538 for f in getattr(statuslist, attrname):
539 539 rootobj.addfile(f, attrname[0])
540 540 tersedict[attrname[0]] = []
541 541
542 542 # we won't be tersing the root dir, so add files in it
543 543 for st, fpath in rootobj.iterfilepaths():
544 544 tersedict[st].append(fpath)
545 545
546 546 # process each sub-directory and build tersedict
547 547 for subdir in rootobj.subdirs.values():
548 548 for st, f in subdir.tersewalk(terseargs):
549 549 tersedict[st].append(f)
550 550
551 551 tersedlist = []
552 552 for st in allst:
553 553 tersedict[st].sort()
554 554 tersedlist.append(tersedict[st])
555 555
556 556 return tersedlist
557 557
558 558 def _commentlines(raw):
559 559 '''Surround lineswith a comment char and a new line'''
560 560 lines = raw.splitlines()
561 561 commentedlines = ['# %s' % line for line in lines]
562 562 return '\n'.join(commentedlines) + '\n'
563 563
564 564 def _conflictsmsg(repo):
565 565 # avoid merge cycle
566 566 from . import merge as mergemod
567 567 mergestate = mergemod.mergestate.read(repo)
568 568 if not mergestate.active():
569 569 return
570 570
571 571 m = scmutil.match(repo[None])
572 572 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
573 573 if unresolvedlist:
574 574 mergeliststr = '\n'.join(
575 575 [' %s' % os.path.relpath(
576 576 os.path.join(repo.root, path),
577 577 pycompat.getcwd()) for path in unresolvedlist])
578 578 msg = _('''Unresolved merge conflicts:
579 579
580 580 %s
581 581
582 582 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
583 583 else:
584 584 msg = _('No unresolved merge conflicts.')
585 585
586 586 return _commentlines(msg)
587 587
588 588 def _helpmessage(continuecmd, abortcmd):
589 589 msg = _('To continue: %s\n'
590 590 'To abort: %s') % (continuecmd, abortcmd)
591 591 return _commentlines(msg)
592 592
593 593 def _rebasemsg():
594 594 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
595 595
596 596 def _histeditmsg():
597 597 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
598 598
599 599 def _unshelvemsg():
600 600 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
601 601
602 602 def _updatecleanmsg(dest=None):
603 603 warning = _('warning: this will discard uncommitted changes')
604 604 return 'hg update --clean %s (%s)' % (dest or '.', warning)
605 605
606 606 def _graftmsg():
607 607 # tweakdefaults requires `update` to have a rev hence the `.`
608 608 return _helpmessage('hg graft --continue', _updatecleanmsg())
609 609
610 610 def _mergemsg():
611 611 # tweakdefaults requires `update` to have a rev hence the `.`
612 612 return _helpmessage('hg commit', _updatecleanmsg())
613 613
614 614 def _bisectmsg():
615 615 msg = _('To mark the changeset good: hg bisect --good\n'
616 616 'To mark the changeset bad: hg bisect --bad\n'
617 617 'To abort: hg bisect --reset\n')
618 618 return _commentlines(msg)
619 619
620 620 def fileexistspredicate(filename):
621 621 return lambda repo: repo.vfs.exists(filename)
622 622
623 623 def _mergepredicate(repo):
624 624 return len(repo[None].parents()) > 1
625 625
626 626 STATES = (
627 627 # (state, predicate to detect states, helpful message function)
628 628 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
629 629 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
630 630 ('graft', fileexistspredicate('graftstate'), _graftmsg),
631 631 ('unshelve', fileexistspredicate('unshelverebasestate'), _unshelvemsg),
632 632 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
633 633 # The merge state is part of a list that will be iterated over.
634 634 # They need to be last because some of the other unfinished states may also
635 635 # be in a merge or update state (eg. rebase, histedit, graft, etc).
636 636 # We want those to have priority.
637 637 ('merge', _mergepredicate, _mergemsg),
638 638 )
639 639
640 640 def _getrepostate(repo):
641 641 # experimental config: commands.status.skipstates
642 642 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
643 643 for state, statedetectionpredicate, msgfn in STATES:
644 644 if state in skip:
645 645 continue
646 646 if statedetectionpredicate(repo):
647 647 return (state, statedetectionpredicate, msgfn)
648 648
649 649 def morestatus(repo, fm):
650 650 statetuple = _getrepostate(repo)
651 651 label = 'status.morestatus'
652 652 if statetuple:
653 653 fm.startitem()
654 654 state, statedetectionpredicate, helpfulmsg = statetuple
655 655 statemsg = _('The repository is in an unfinished *%s* state.') % state
656 656 fm.write('statemsg', '%s\n', _commentlines(statemsg), label=label)
657 657 conmsg = _conflictsmsg(repo)
658 658 if conmsg:
659 659 fm.write('conflictsmsg', '%s\n', conmsg, label=label)
660 660 if helpfulmsg:
661 661 helpmsg = helpfulmsg()
662 662 fm.write('helpmsg', '%s\n', helpmsg, label=label)
663 663
664 664 def findpossible(cmd, table, strict=False):
665 665 """
666 666 Return cmd -> (aliases, command table entry)
667 667 for each matching command.
668 668 Return debug commands (or their aliases) only if no normal command matches.
669 669 """
670 670 choice = {}
671 671 debugchoice = {}
672 672
673 673 if cmd in table:
674 674 # short-circuit exact matches, "log" alias beats "^log|history"
675 675 keys = [cmd]
676 676 else:
677 677 keys = table.keys()
678 678
679 679 allcmds = []
680 680 for e in keys:
681 681 aliases = parsealiases(e)
682 682 allcmds.extend(aliases)
683 683 found = None
684 684 if cmd in aliases:
685 685 found = cmd
686 686 elif not strict:
687 687 for a in aliases:
688 688 if a.startswith(cmd):
689 689 found = a
690 690 break
691 691 if found is not None:
692 692 if aliases[0].startswith("debug") or found.startswith("debug"):
693 693 debugchoice[found] = (aliases, table[e])
694 694 else:
695 695 choice[found] = (aliases, table[e])
696 696
697 697 if not choice and debugchoice:
698 698 choice = debugchoice
699 699
700 700 return choice, allcmds
701 701
702 702 def findcmd(cmd, table, strict=True):
703 703 """Return (aliases, command table entry) for command string."""
704 704 choice, allcmds = findpossible(cmd, table, strict)
705 705
706 706 if cmd in choice:
707 707 return choice[cmd]
708 708
709 709 if len(choice) > 1:
710 710 clist = sorted(choice)
711 711 raise error.AmbiguousCommand(cmd, clist)
712 712
713 713 if choice:
714 714 return list(choice.values())[0]
715 715
716 716 raise error.UnknownCommand(cmd, allcmds)
717 717
718 718 def findrepo(p):
719 719 while not os.path.isdir(os.path.join(p, ".hg")):
720 720 oldp, p = p, os.path.dirname(p)
721 721 if p == oldp:
722 722 return None
723 723
724 724 return p
725 725
726 726 def bailifchanged(repo, merge=True, hint=None):
727 727 """ enforce the precondition that working directory must be clean.
728 728
729 729 'merge' can be set to false if a pending uncommitted merge should be
730 730 ignored (such as when 'update --check' runs).
731 731
732 732 'hint' is the usual hint given to Abort exception.
733 733 """
734 734
735 735 if merge and repo.dirstate.p2() != nullid:
736 736 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
737 737 modified, added, removed, deleted = repo.status()[:4]
738 738 if modified or added or removed or deleted:
739 739 raise error.Abort(_('uncommitted changes'), hint=hint)
740 740 ctx = repo[None]
741 741 for s in sorted(ctx.substate):
742 742 ctx.sub(s).bailifchanged(hint=hint)
743 743
744 744 def logmessage(ui, opts):
745 745 """ get the log message according to -m and -l option """
746 746 message = opts.get('message')
747 747 logfile = opts.get('logfile')
748 748
749 749 if message and logfile:
750 750 raise error.Abort(_('options --message and --logfile are mutually '
751 751 'exclusive'))
752 752 if not message and logfile:
753 753 try:
754 754 if isstdiofilename(logfile):
755 755 message = ui.fin.read()
756 756 else:
757 757 message = '\n'.join(util.readfile(logfile).splitlines())
758 758 except IOError as inst:
759 759 raise error.Abort(_("can't read commit message '%s': %s") %
760 760 (logfile, encoding.strtolocal(inst.strerror)))
761 761 return message
762 762
763 763 def mergeeditform(ctxorbool, baseformname):
764 764 """return appropriate editform name (referencing a committemplate)
765 765
766 766 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
767 767 merging is committed.
768 768
769 769 This returns baseformname with '.merge' appended if it is a merge,
770 770 otherwise '.normal' is appended.
771 771 """
772 772 if isinstance(ctxorbool, bool):
773 773 if ctxorbool:
774 774 return baseformname + ".merge"
775 775 elif 1 < len(ctxorbool.parents()):
776 776 return baseformname + ".merge"
777 777
778 778 return baseformname + ".normal"
779 779
780 780 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
781 781 editform='', **opts):
782 782 """get appropriate commit message editor according to '--edit' option
783 783
784 784 'finishdesc' is a function to be called with edited commit message
785 785 (= 'description' of the new changeset) just after editing, but
786 786 before checking empty-ness. It should return actual text to be
787 787 stored into history. This allows to change description before
788 788 storing.
789 789
790 790 'extramsg' is a extra message to be shown in the editor instead of
791 791 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
792 792 is automatically added.
793 793
794 794 'editform' is a dot-separated list of names, to distinguish
795 795 the purpose of commit text editing.
796 796
797 797 'getcommiteditor' returns 'commitforceeditor' regardless of
798 798 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
799 799 they are specific for usage in MQ.
800 800 """
801 801 if edit or finishdesc or extramsg:
802 802 return lambda r, c, s: commitforceeditor(r, c, s,
803 803 finishdesc=finishdesc,
804 804 extramsg=extramsg,
805 805 editform=editform)
806 806 elif editform:
807 807 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
808 808 else:
809 809 return commiteditor
810 810
811 811 def loglimit(opts):
812 812 """get the log limit according to option -l/--limit"""
813 813 limit = opts.get('limit')
814 814 if limit:
815 815 try:
816 816 limit = int(limit)
817 817 except ValueError:
818 818 raise error.Abort(_('limit must be a positive integer'))
819 819 if limit <= 0:
820 820 raise error.Abort(_('limit must be positive'))
821 821 else:
822 822 limit = None
823 823 return limit
824 824
825 825 def makefilename(repo, pat, node, desc=None,
826 826 total=None, seqno=None, revwidth=None, pathname=None):
827 827 node_expander = {
828 828 'H': lambda: hex(node),
829 829 'R': lambda: str(repo.changelog.rev(node)),
830 830 'h': lambda: short(node),
831 831 'm': lambda: re.sub('[^\w]', '_', str(desc))
832 832 }
833 833 expander = {
834 834 '%': lambda: '%',
835 835 'b': lambda: os.path.basename(repo.root),
836 836 }
837 837
838 838 try:
839 839 if node:
840 840 expander.update(node_expander)
841 841 if node:
842 842 expander['r'] = (lambda:
843 843 str(repo.changelog.rev(node)).zfill(revwidth or 0))
844 844 if total is not None:
845 845 expander['N'] = lambda: str(total)
846 846 if seqno is not None:
847 847 expander['n'] = lambda: str(seqno)
848 848 if total is not None and seqno is not None:
849 849 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
850 850 if pathname is not None:
851 851 expander['s'] = lambda: os.path.basename(pathname)
852 852 expander['d'] = lambda: os.path.dirname(pathname) or '.'
853 853 expander['p'] = lambda: pathname
854 854
855 855 newname = []
856 856 patlen = len(pat)
857 857 i = 0
858 858 while i < patlen:
859 859 c = pat[i:i + 1]
860 860 if c == '%':
861 861 i += 1
862 862 c = pat[i:i + 1]
863 863 c = expander[c]()
864 864 newname.append(c)
865 865 i += 1
866 866 return ''.join(newname)
867 867 except KeyError as inst:
868 868 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
869 869 inst.args[0])
870 870
871 871 def isstdiofilename(pat):
872 872 """True if the given pat looks like a filename denoting stdin/stdout"""
873 873 return not pat or pat == '-'
874 874
875 875 class _unclosablefile(object):
876 876 def __init__(self, fp):
877 877 self._fp = fp
878 878
879 879 def close(self):
880 880 pass
881 881
882 882 def __iter__(self):
883 883 return iter(self._fp)
884 884
885 885 def __getattr__(self, attr):
886 886 return getattr(self._fp, attr)
887 887
888 888 def __enter__(self):
889 889 return self
890 890
891 891 def __exit__(self, exc_type, exc_value, exc_tb):
892 892 pass
893 893
894 894 def makefileobj(repo, pat, node=None, desc=None, total=None,
895 895 seqno=None, revwidth=None, mode='wb', modemap=None,
896 896 pathname=None):
897 897
898 898 writable = mode not in ('r', 'rb')
899 899
900 900 if isstdiofilename(pat):
901 901 if writable:
902 902 fp = repo.ui.fout
903 903 else:
904 904 fp = repo.ui.fin
905 905 return _unclosablefile(fp)
906 906 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
907 907 if modemap is not None:
908 908 mode = modemap.get(fn, mode)
909 909 if mode == 'wb':
910 910 modemap[fn] = 'ab'
911 911 return open(fn, mode)
912 912
913 913 def openrevlog(repo, cmd, file_, opts):
914 914 """opens the changelog, manifest, a filelog or a given revlog"""
915 915 cl = opts['changelog']
916 916 mf = opts['manifest']
917 917 dir = opts['dir']
918 918 msg = None
919 919 if cl and mf:
920 920 msg = _('cannot specify --changelog and --manifest at the same time')
921 921 elif cl and dir:
922 922 msg = _('cannot specify --changelog and --dir at the same time')
923 923 elif cl or mf or dir:
924 924 if file_:
925 925 msg = _('cannot specify filename with --changelog or --manifest')
926 926 elif not repo:
927 927 msg = _('cannot specify --changelog or --manifest or --dir '
928 928 'without a repository')
929 929 if msg:
930 930 raise error.Abort(msg)
931 931
932 932 r = None
933 933 if repo:
934 934 if cl:
935 935 r = repo.unfiltered().changelog
936 936 elif dir:
937 937 if 'treemanifest' not in repo.requirements:
938 938 raise error.Abort(_("--dir can only be used on repos with "
939 939 "treemanifest enabled"))
940 940 dirlog = repo.manifestlog._revlog.dirlog(dir)
941 941 if len(dirlog):
942 942 r = dirlog
943 943 elif mf:
944 944 r = repo.manifestlog._revlog
945 945 elif file_:
946 946 filelog = repo.file(file_)
947 947 if len(filelog):
948 948 r = filelog
949 949 if not r:
950 950 if not file_:
951 951 raise error.CommandError(cmd, _('invalid arguments'))
952 952 if not os.path.isfile(file_):
953 953 raise error.Abort(_("revlog '%s' not found") % file_)
954 954 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
955 955 file_[:-2] + ".i")
956 956 return r
957 957
958 958 def copy(ui, repo, pats, opts, rename=False):
959 959 # called with the repo lock held
960 960 #
961 961 # hgsep => pathname that uses "/" to separate directories
962 962 # ossep => pathname that uses os.sep to separate directories
963 963 cwd = repo.getcwd()
964 964 targets = {}
965 965 after = opts.get("after")
966 966 dryrun = opts.get("dry_run")
967 967 wctx = repo[None]
968 968
969 969 def walkpat(pat):
970 970 srcs = []
971 971 if after:
972 972 badstates = '?'
973 973 else:
974 974 badstates = '?r'
975 975 m = scmutil.match(wctx, [pat], opts, globbed=True)
976 976 for abs in wctx.walk(m):
977 977 state = repo.dirstate[abs]
978 978 rel = m.rel(abs)
979 979 exact = m.exact(abs)
980 980 if state in badstates:
981 981 if exact and state == '?':
982 982 ui.warn(_('%s: not copying - file is not managed\n') % rel)
983 983 if exact and state == 'r':
984 984 ui.warn(_('%s: not copying - file has been marked for'
985 985 ' remove\n') % rel)
986 986 continue
987 987 # abs: hgsep
988 988 # rel: ossep
989 989 srcs.append((abs, rel, exact))
990 990 return srcs
991 991
992 992 # abssrc: hgsep
993 993 # relsrc: ossep
994 994 # otarget: ossep
995 995 def copyfile(abssrc, relsrc, otarget, exact):
996 996 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
997 997 if '/' in abstarget:
998 998 # We cannot normalize abstarget itself, this would prevent
999 999 # case only renames, like a => A.
1000 1000 abspath, absname = abstarget.rsplit('/', 1)
1001 1001 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
1002 1002 reltarget = repo.pathto(abstarget, cwd)
1003 1003 target = repo.wjoin(abstarget)
1004 1004 src = repo.wjoin(abssrc)
1005 1005 state = repo.dirstate[abstarget]
1006 1006
1007 1007 scmutil.checkportable(ui, abstarget)
1008 1008
1009 1009 # check for collisions
1010 1010 prevsrc = targets.get(abstarget)
1011 1011 if prevsrc is not None:
1012 1012 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1013 1013 (reltarget, repo.pathto(abssrc, cwd),
1014 1014 repo.pathto(prevsrc, cwd)))
1015 1015 return
1016 1016
1017 1017 # check for overwrites
1018 1018 exists = os.path.lexists(target)
1019 1019 samefile = False
1020 1020 if exists and abssrc != abstarget:
1021 1021 if (repo.dirstate.normalize(abssrc) ==
1022 1022 repo.dirstate.normalize(abstarget)):
1023 1023 if not rename:
1024 1024 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1025 1025 return
1026 1026 exists = False
1027 1027 samefile = True
1028 1028
1029 1029 if not after and exists or after and state in 'mn':
1030 1030 if not opts['force']:
1031 1031 if state in 'mn':
1032 1032 msg = _('%s: not overwriting - file already committed\n')
1033 1033 if after:
1034 1034 flags = '--after --force'
1035 1035 else:
1036 1036 flags = '--force'
1037 1037 if rename:
1038 1038 hint = _('(hg rename %s to replace the file by '
1039 1039 'recording a rename)\n') % flags
1040 1040 else:
1041 1041 hint = _('(hg copy %s to replace the file by '
1042 1042 'recording a copy)\n') % flags
1043 1043 else:
1044 1044 msg = _('%s: not overwriting - file exists\n')
1045 1045 if rename:
1046 1046 hint = _('(hg rename --after to record the rename)\n')
1047 1047 else:
1048 1048 hint = _('(hg copy --after to record the copy)\n')
1049 1049 ui.warn(msg % reltarget)
1050 1050 ui.warn(hint)
1051 1051 return
1052 1052
1053 1053 if after:
1054 1054 if not exists:
1055 1055 if rename:
1056 1056 ui.warn(_('%s: not recording move - %s does not exist\n') %
1057 1057 (relsrc, reltarget))
1058 1058 else:
1059 1059 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1060 1060 (relsrc, reltarget))
1061 1061 return
1062 1062 elif not dryrun:
1063 1063 try:
1064 1064 if exists:
1065 1065 os.unlink(target)
1066 1066 targetdir = os.path.dirname(target) or '.'
1067 1067 if not os.path.isdir(targetdir):
1068 1068 os.makedirs(targetdir)
1069 1069 if samefile:
1070 1070 tmp = target + "~hgrename"
1071 1071 os.rename(src, tmp)
1072 1072 os.rename(tmp, target)
1073 1073 else:
1074 1074 util.copyfile(src, target)
1075 1075 srcexists = True
1076 1076 except IOError as inst:
1077 1077 if inst.errno == errno.ENOENT:
1078 1078 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1079 1079 srcexists = False
1080 1080 else:
1081 1081 ui.warn(_('%s: cannot copy - %s\n') %
1082 1082 (relsrc, encoding.strtolocal(inst.strerror)))
1083 1083 return True # report a failure
1084 1084
1085 1085 if ui.verbose or not exact:
1086 1086 if rename:
1087 1087 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1088 1088 else:
1089 1089 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1090 1090
1091 1091 targets[abstarget] = abssrc
1092 1092
1093 1093 # fix up dirstate
1094 1094 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1095 1095 dryrun=dryrun, cwd=cwd)
1096 1096 if rename and not dryrun:
1097 1097 if not after and srcexists and not samefile:
1098 1098 repo.wvfs.unlinkpath(abssrc)
1099 1099 wctx.forget([abssrc])
1100 1100
1101 1101 # pat: ossep
1102 1102 # dest ossep
1103 1103 # srcs: list of (hgsep, hgsep, ossep, bool)
1104 1104 # return: function that takes hgsep and returns ossep
1105 1105 def targetpathfn(pat, dest, srcs):
1106 1106 if os.path.isdir(pat):
1107 1107 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1108 1108 abspfx = util.localpath(abspfx)
1109 1109 if destdirexists:
1110 1110 striplen = len(os.path.split(abspfx)[0])
1111 1111 else:
1112 1112 striplen = len(abspfx)
1113 1113 if striplen:
1114 1114 striplen += len(pycompat.ossep)
1115 1115 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1116 1116 elif destdirexists:
1117 1117 res = lambda p: os.path.join(dest,
1118 1118 os.path.basename(util.localpath(p)))
1119 1119 else:
1120 1120 res = lambda p: dest
1121 1121 return res
1122 1122
1123 1123 # pat: ossep
1124 1124 # dest ossep
1125 1125 # srcs: list of (hgsep, hgsep, ossep, bool)
1126 1126 # return: function that takes hgsep and returns ossep
1127 1127 def targetpathafterfn(pat, dest, srcs):
1128 1128 if matchmod.patkind(pat):
1129 1129 # a mercurial pattern
1130 1130 res = lambda p: os.path.join(dest,
1131 1131 os.path.basename(util.localpath(p)))
1132 1132 else:
1133 1133 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1134 1134 if len(abspfx) < len(srcs[0][0]):
1135 1135 # A directory. Either the target path contains the last
1136 1136 # component of the source path or it does not.
1137 1137 def evalpath(striplen):
1138 1138 score = 0
1139 1139 for s in srcs:
1140 1140 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1141 1141 if os.path.lexists(t):
1142 1142 score += 1
1143 1143 return score
1144 1144
1145 1145 abspfx = util.localpath(abspfx)
1146 1146 striplen = len(abspfx)
1147 1147 if striplen:
1148 1148 striplen += len(pycompat.ossep)
1149 1149 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1150 1150 score = evalpath(striplen)
1151 1151 striplen1 = len(os.path.split(abspfx)[0])
1152 1152 if striplen1:
1153 1153 striplen1 += len(pycompat.ossep)
1154 1154 if evalpath(striplen1) > score:
1155 1155 striplen = striplen1
1156 1156 res = lambda p: os.path.join(dest,
1157 1157 util.localpath(p)[striplen:])
1158 1158 else:
1159 1159 # a file
1160 1160 if destdirexists:
1161 1161 res = lambda p: os.path.join(dest,
1162 1162 os.path.basename(util.localpath(p)))
1163 1163 else:
1164 1164 res = lambda p: dest
1165 1165 return res
1166 1166
1167 1167 pats = scmutil.expandpats(pats)
1168 1168 if not pats:
1169 1169 raise error.Abort(_('no source or destination specified'))
1170 1170 if len(pats) == 1:
1171 1171 raise error.Abort(_('no destination specified'))
1172 1172 dest = pats.pop()
1173 1173 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1174 1174 if not destdirexists:
1175 1175 if len(pats) > 1 or matchmod.patkind(pats[0]):
1176 1176 raise error.Abort(_('with multiple sources, destination must be an '
1177 1177 'existing directory'))
1178 1178 if util.endswithsep(dest):
1179 1179 raise error.Abort(_('destination %s is not a directory') % dest)
1180 1180
1181 1181 tfn = targetpathfn
1182 1182 if after:
1183 1183 tfn = targetpathafterfn
1184 1184 copylist = []
1185 1185 for pat in pats:
1186 1186 srcs = walkpat(pat)
1187 1187 if not srcs:
1188 1188 continue
1189 1189 copylist.append((tfn(pat, dest, srcs), srcs))
1190 1190 if not copylist:
1191 1191 raise error.Abort(_('no files to copy'))
1192 1192
1193 1193 errors = 0
1194 1194 for targetpath, srcs in copylist:
1195 1195 for abssrc, relsrc, exact in srcs:
1196 1196 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1197 1197 errors += 1
1198 1198
1199 1199 if errors:
1200 1200 ui.warn(_('(consider using --after)\n'))
1201 1201
1202 1202 return errors != 0
1203 1203
1204 1204 ## facility to let extension process additional data into an import patch
1205 1205 # list of identifier to be executed in order
1206 1206 extrapreimport = [] # run before commit
1207 1207 extrapostimport = [] # run after commit
1208 1208 # mapping from identifier to actual import function
1209 1209 #
1210 1210 # 'preimport' are run before the commit is made and are provided the following
1211 1211 # arguments:
1212 1212 # - repo: the localrepository instance,
1213 1213 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1214 1214 # - extra: the future extra dictionary of the changeset, please mutate it,
1215 1215 # - opts: the import options.
1216 1216 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1217 1217 # mutation of in memory commit and more. Feel free to rework the code to get
1218 1218 # there.
1219 1219 extrapreimportmap = {}
1220 1220 # 'postimport' are run after the commit is made and are provided the following
1221 1221 # argument:
1222 1222 # - ctx: the changectx created by import.
1223 1223 extrapostimportmap = {}
1224 1224
1225 1225 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
1226 1226 """Utility function used by commands.import to import a single patch
1227 1227
1228 1228 This function is explicitly defined here to help the evolve extension to
1229 1229 wrap this part of the import logic.
1230 1230
1231 1231 The API is currently a bit ugly because it a simple code translation from
1232 1232 the import command. Feel free to make it better.
1233 1233
1234 1234 :hunk: a patch (as a binary string)
1235 1235 :parents: nodes that will be parent of the created commit
1236 1236 :opts: the full dict of option passed to the import command
1237 1237 :msgs: list to save commit message to.
1238 1238 (used in case we need to save it when failing)
1239 1239 :updatefunc: a function that update a repo to a given node
1240 1240 updatefunc(<repo>, <node>)
1241 1241 """
1242 1242 # avoid cycle context -> subrepo -> cmdutil
1243 1243 from . import context
1244 1244 extractdata = patch.extract(ui, hunk)
1245 1245 tmpname = extractdata.get('filename')
1246 1246 message = extractdata.get('message')
1247 1247 user = opts.get('user') or extractdata.get('user')
1248 1248 date = opts.get('date') or extractdata.get('date')
1249 1249 branch = extractdata.get('branch')
1250 1250 nodeid = extractdata.get('nodeid')
1251 1251 p1 = extractdata.get('p1')
1252 1252 p2 = extractdata.get('p2')
1253 1253
1254 1254 nocommit = opts.get('no_commit')
1255 1255 importbranch = opts.get('import_branch')
1256 1256 update = not opts.get('bypass')
1257 1257 strip = opts["strip"]
1258 1258 prefix = opts["prefix"]
1259 1259 sim = float(opts.get('similarity') or 0)
1260 1260 if not tmpname:
1261 1261 return (None, None, False)
1262 1262
1263 1263 rejects = False
1264 1264
1265 1265 try:
1266 1266 cmdline_message = logmessage(ui, opts)
1267 1267 if cmdline_message:
1268 1268 # pickup the cmdline msg
1269 1269 message = cmdline_message
1270 1270 elif message:
1271 1271 # pickup the patch msg
1272 1272 message = message.strip()
1273 1273 else:
1274 1274 # launch the editor
1275 1275 message = None
1276 1276 ui.debug('message:\n%s\n' % message)
1277 1277
1278 1278 if len(parents) == 1:
1279 1279 parents.append(repo[nullid])
1280 1280 if opts.get('exact'):
1281 1281 if not nodeid or not p1:
1282 1282 raise error.Abort(_('not a Mercurial patch'))
1283 1283 p1 = repo[p1]
1284 1284 p2 = repo[p2 or nullid]
1285 1285 elif p2:
1286 1286 try:
1287 1287 p1 = repo[p1]
1288 1288 p2 = repo[p2]
1289 1289 # Without any options, consider p2 only if the
1290 1290 # patch is being applied on top of the recorded
1291 1291 # first parent.
1292 1292 if p1 != parents[0]:
1293 1293 p1 = parents[0]
1294 1294 p2 = repo[nullid]
1295 1295 except error.RepoError:
1296 1296 p1, p2 = parents
1297 1297 if p2.node() == nullid:
1298 1298 ui.warn(_("warning: import the patch as a normal revision\n"
1299 1299 "(use --exact to import the patch as a merge)\n"))
1300 1300 else:
1301 1301 p1, p2 = parents
1302 1302
1303 1303 n = None
1304 1304 if update:
1305 1305 if p1 != parents[0]:
1306 1306 updatefunc(repo, p1.node())
1307 1307 if p2 != parents[1]:
1308 1308 repo.setparents(p1.node(), p2.node())
1309 1309
1310 1310 if opts.get('exact') or importbranch:
1311 1311 repo.dirstate.setbranch(branch or 'default')
1312 1312
1313 1313 partial = opts.get('partial', False)
1314 1314 files = set()
1315 1315 try:
1316 1316 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1317 1317 files=files, eolmode=None, similarity=sim / 100.0)
1318 1318 except error.PatchError as e:
1319 1319 if not partial:
1320 1320 raise error.Abort(str(e))
1321 1321 if partial:
1322 1322 rejects = True
1323 1323
1324 1324 files = list(files)
1325 1325 if nocommit:
1326 1326 if message:
1327 1327 msgs.append(message)
1328 1328 else:
1329 1329 if opts.get('exact') or p2:
1330 1330 # If you got here, you either use --force and know what
1331 1331 # you are doing or used --exact or a merge patch while
1332 1332 # being updated to its first parent.
1333 1333 m = None
1334 1334 else:
1335 1335 m = scmutil.matchfiles(repo, files or [])
1336 1336 editform = mergeeditform(repo[None], 'import.normal')
1337 1337 if opts.get('exact'):
1338 1338 editor = None
1339 1339 else:
1340 1340 editor = getcommiteditor(editform=editform, **opts)
1341 1341 extra = {}
1342 1342 for idfunc in extrapreimport:
1343 1343 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1344 1344 overrides = {}
1345 1345 if partial:
1346 1346 overrides[('ui', 'allowemptycommit')] = True
1347 1347 with repo.ui.configoverride(overrides, 'import'):
1348 1348 n = repo.commit(message, user,
1349 1349 date, match=m,
1350 1350 editor=editor, extra=extra)
1351 1351 for idfunc in extrapostimport:
1352 1352 extrapostimportmap[idfunc](repo[n])
1353 1353 else:
1354 1354 if opts.get('exact') or importbranch:
1355 1355 branch = branch or 'default'
1356 1356 else:
1357 1357 branch = p1.branch()
1358 1358 store = patch.filestore()
1359 1359 try:
1360 1360 files = set()
1361 1361 try:
1362 1362 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1363 1363 files, eolmode=None)
1364 1364 except error.PatchError as e:
1365 1365 raise error.Abort(str(e))
1366 1366 if opts.get('exact'):
1367 1367 editor = None
1368 1368 else:
1369 1369 editor = getcommiteditor(editform='import.bypass')
1370 1370 memctx = context.memctx(repo, (p1.node(), p2.node()),
1371 1371 message,
1372 1372 files=files,
1373 1373 filectxfn=store,
1374 1374 user=user,
1375 1375 date=date,
1376 1376 branch=branch,
1377 1377 editor=editor)
1378 1378 n = memctx.commit()
1379 1379 finally:
1380 1380 store.close()
1381 1381 if opts.get('exact') and nocommit:
1382 1382 # --exact with --no-commit is still useful in that it does merge
1383 1383 # and branch bits
1384 1384 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1385 1385 elif opts.get('exact') and hex(n) != nodeid:
1386 1386 raise error.Abort(_('patch is damaged or loses information'))
1387 1387 msg = _('applied to working directory')
1388 1388 if n:
1389 1389 # i18n: refers to a short changeset id
1390 1390 msg = _('created %s') % short(n)
1391 1391 return (msg, n, rejects)
1392 1392 finally:
1393 1393 os.unlink(tmpname)
1394 1394
1395 1395 # facility to let extensions include additional data in an exported patch
1396 1396 # list of identifiers to be executed in order
1397 1397 extraexport = []
1398 1398 # mapping from identifier to actual export function
1399 1399 # function as to return a string to be added to the header or None
1400 1400 # it is given two arguments (sequencenumber, changectx)
1401 1401 extraexportmap = {}
1402 1402
1403 1403 def _exportsingle(repo, ctx, match, switch_parent, rev, seqno, write, diffopts):
1404 1404 node = scmutil.binnode(ctx)
1405 1405 parents = [p.node() for p in ctx.parents() if p]
1406 1406 branch = ctx.branch()
1407 1407 if switch_parent:
1408 1408 parents.reverse()
1409 1409
1410 1410 if parents:
1411 1411 prev = parents[0]
1412 1412 else:
1413 1413 prev = nullid
1414 1414
1415 1415 write("# HG changeset patch\n")
1416 1416 write("# User %s\n" % ctx.user())
1417 1417 write("# Date %d %d\n" % ctx.date())
1418 1418 write("# %s\n" % util.datestr(ctx.date()))
1419 1419 if branch and branch != 'default':
1420 1420 write("# Branch %s\n" % branch)
1421 1421 write("# Node ID %s\n" % hex(node))
1422 1422 write("# Parent %s\n" % hex(prev))
1423 1423 if len(parents) > 1:
1424 1424 write("# Parent %s\n" % hex(parents[1]))
1425 1425
1426 1426 for headerid in extraexport:
1427 1427 header = extraexportmap[headerid](seqno, ctx)
1428 1428 if header is not None:
1429 1429 write('# %s\n' % header)
1430 1430 write(ctx.description().rstrip())
1431 1431 write("\n\n")
1432 1432
1433 1433 for chunk, label in patch.diffui(repo, prev, node, match, opts=diffopts):
1434 1434 write(chunk, label=label)
1435 1435
1436 1436 def export(repo, revs, fntemplate='hg-%h.patch', fp=None, switch_parent=False,
1437 1437 opts=None, match=None):
1438 1438 '''export changesets as hg patches
1439 1439
1440 1440 Args:
1441 1441 repo: The repository from which we're exporting revisions.
1442 1442 revs: A list of revisions to export as revision numbers.
1443 1443 fntemplate: An optional string to use for generating patch file names.
1444 1444 fp: An optional file-like object to which patches should be written.
1445 1445 switch_parent: If True, show diffs against second parent when not nullid.
1446 1446 Default is false, which always shows diff against p1.
1447 1447 opts: diff options to use for generating the patch.
1448 1448 match: If specified, only export changes to files matching this matcher.
1449 1449
1450 1450 Returns:
1451 1451 Nothing.
1452 1452
1453 1453 Side Effect:
1454 1454 "HG Changeset Patch" data is emitted to one of the following
1455 1455 destinations:
1456 1456 fp is specified: All revs are written to the specified
1457 1457 file-like object.
1458 1458 fntemplate specified: Each rev is written to a unique file named using
1459 1459 the given template.
1460 1460 Neither fp nor template specified: All revs written to repo.ui.write()
1461 1461 '''
1462 1462
1463 1463 total = len(revs)
1464 1464 revwidth = max(len(str(rev)) for rev in revs)
1465 1465 filemode = {}
1466 1466
1467 1467 write = None
1468 1468 dest = '<unnamed>'
1469 1469 if fp:
1470 1470 dest = getattr(fp, 'name', dest)
1471 1471 def write(s, **kw):
1472 1472 fp.write(s)
1473 1473 elif not fntemplate:
1474 1474 write = repo.ui.write
1475 1475
1476 1476 for seqno, rev in enumerate(revs, 1):
1477 1477 ctx = repo[rev]
1478 1478 fo = None
1479 1479 if not fp and fntemplate:
1480 1480 desc_lines = ctx.description().rstrip().split('\n')
1481 1481 desc = desc_lines[0] #Commit always has a first line.
1482 1482 fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
1483 1483 total=total, seqno=seqno, revwidth=revwidth,
1484 1484 mode='wb', modemap=filemode)
1485 1485 dest = fo.name
1486 1486 def write(s, **kw):
1487 1487 fo.write(s)
1488 1488 if not dest.startswith('<'):
1489 1489 repo.ui.note("%s\n" % dest)
1490 1490 _exportsingle(
1491 1491 repo, ctx, match, switch_parent, rev, seqno, write, opts)
1492 1492 if fo is not None:
1493 1493 fo.close()
1494 1494
1495 1495 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1496 1496 changes=None, stat=False, fp=None, prefix='',
1497 root='', listsubrepos=False):
1497 root='', listsubrepos=False, hunksfilterfn=None):
1498 1498 '''show diff or diffstat.'''
1499 1499 if fp is None:
1500 1500 write = ui.write
1501 1501 else:
1502 1502 def write(s, **kw):
1503 1503 fp.write(s)
1504 1504
1505 1505 if root:
1506 1506 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1507 1507 else:
1508 1508 relroot = ''
1509 1509 if relroot != '':
1510 1510 # XXX relative roots currently don't work if the root is within a
1511 1511 # subrepo
1512 1512 uirelroot = match.uipath(relroot)
1513 1513 relroot += '/'
1514 1514 for matchroot in match.files():
1515 1515 if not matchroot.startswith(relroot):
1516 1516 ui.warn(_('warning: %s not inside relative root %s\n') % (
1517 1517 match.uipath(matchroot), uirelroot))
1518 1518
1519 1519 if stat:
1520 1520 diffopts = diffopts.copy(context=0)
1521 1521 width = 80
1522 1522 if not ui.plain():
1523 1523 width = ui.termwidth()
1524 1524 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1525 prefix=prefix, relroot=relroot)
1525 prefix=prefix, relroot=relroot,
1526 hunksfilterfn=hunksfilterfn)
1526 1527 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1527 1528 width=width):
1528 1529 write(chunk, label=label)
1529 1530 else:
1530 1531 for chunk, label in patch.diffui(repo, node1, node2, match,
1531 1532 changes, diffopts, prefix=prefix,
1532 relroot=relroot):
1533 relroot=relroot,
1534 hunksfilterfn=hunksfilterfn):
1533 1535 write(chunk, label=label)
1534 1536
1535 1537 if listsubrepos:
1536 1538 ctx1 = repo[node1]
1537 1539 ctx2 = repo[node2]
1538 1540 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1539 1541 tempnode2 = node2
1540 1542 try:
1541 1543 if node2 is not None:
1542 1544 tempnode2 = ctx2.substate[subpath][1]
1543 1545 except KeyError:
1544 1546 # A subrepo that existed in node1 was deleted between node1 and
1545 1547 # node2 (inclusive). Thus, ctx2's substate won't contain that
1546 1548 # subpath. The best we can do is to ignore it.
1547 1549 tempnode2 = None
1548 1550 submatch = matchmod.subdirmatcher(subpath, match)
1549 1551 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1550 1552 stat=stat, fp=fp, prefix=prefix)
1551 1553
1552 1554 def _changesetlabels(ctx):
1553 1555 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1554 1556 if ctx.obsolete():
1555 1557 labels.append('changeset.obsolete')
1556 1558 if ctx.isunstable():
1557 1559 labels.append('changeset.unstable')
1558 1560 for instability in ctx.instabilities():
1559 1561 labels.append('instability.%s' % instability)
1560 1562 return ' '.join(labels)
1561 1563
1562 1564 class changeset_printer(object):
1563 1565 '''show changeset information when templating not requested.'''
1564 1566
1565 1567 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1566 1568 self.ui = ui
1567 1569 self.repo = repo
1568 1570 self.buffered = buffered
1569 1571 self.matchfn = matchfn
1570 1572 self.diffopts = diffopts
1571 1573 self.header = {}
1572 1574 self.hunk = {}
1573 1575 self.lastheader = None
1574 1576 self.footer = None
1575 1577
1576 1578 def flush(self, ctx):
1577 1579 rev = ctx.rev()
1578 1580 if rev in self.header:
1579 1581 h = self.header[rev]
1580 1582 if h != self.lastheader:
1581 1583 self.lastheader = h
1582 1584 self.ui.write(h)
1583 1585 del self.header[rev]
1584 1586 if rev in self.hunk:
1585 1587 self.ui.write(self.hunk[rev])
1586 1588 del self.hunk[rev]
1587 1589 return 1
1588 1590 return 0
1589 1591
1590 1592 def close(self):
1591 1593 if self.footer:
1592 1594 self.ui.write(self.footer)
1593 1595
1594 def show(self, ctx, copies=None, matchfn=None, **props):
1596 def show(self, ctx, copies=None, matchfn=None, hunksfilterfn=None,
1597 **props):
1595 1598 props = pycompat.byteskwargs(props)
1596 1599 if self.buffered:
1597 1600 self.ui.pushbuffer(labeled=True)
1598 self._show(ctx, copies, matchfn, props)
1601 self._show(ctx, copies, matchfn, hunksfilterfn, props)
1599 1602 self.hunk[ctx.rev()] = self.ui.popbuffer()
1600 1603 else:
1601 self._show(ctx, copies, matchfn, props)
1602
1603 def _show(self, ctx, copies, matchfn, props):
1604 self._show(ctx, copies, matchfn, hunksfilterfn, props)
1605
1606 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1604 1607 '''show a single changeset or file revision'''
1605 1608 changenode = ctx.node()
1606 1609 rev = ctx.rev()
1607 1610
1608 1611 if self.ui.quiet:
1609 1612 self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
1610 1613 label='log.node')
1611 1614 return
1612 1615
1613 1616 date = util.datestr(ctx.date())
1614 1617
1615 1618 # i18n: column positioning for "hg log"
1616 1619 self.ui.write(_("changeset: %s\n") % scmutil.formatchangeid(ctx),
1617 1620 label=_changesetlabels(ctx))
1618 1621
1619 1622 # branches are shown first before any other names due to backwards
1620 1623 # compatibility
1621 1624 branch = ctx.branch()
1622 1625 # don't show the default branch name
1623 1626 if branch != 'default':
1624 1627 # i18n: column positioning for "hg log"
1625 1628 self.ui.write(_("branch: %s\n") % branch,
1626 1629 label='log.branch')
1627 1630
1628 1631 for nsname, ns in self.repo.names.iteritems():
1629 1632 # branches has special logic already handled above, so here we just
1630 1633 # skip it
1631 1634 if nsname == 'branches':
1632 1635 continue
1633 1636 # we will use the templatename as the color name since those two
1634 1637 # should be the same
1635 1638 for name in ns.names(self.repo, changenode):
1636 1639 self.ui.write(ns.logfmt % name,
1637 1640 label='log.%s' % ns.colorname)
1638 1641 if self.ui.debugflag:
1639 1642 # i18n: column positioning for "hg log"
1640 1643 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1641 1644 label='log.phase')
1642 1645 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1643 1646 label = 'log.parent changeset.%s' % pctx.phasestr()
1644 1647 # i18n: column positioning for "hg log"
1645 1648 self.ui.write(_("parent: %s\n") % scmutil.formatchangeid(pctx),
1646 1649 label=label)
1647 1650
1648 1651 if self.ui.debugflag and rev is not None:
1649 1652 mnode = ctx.manifestnode()
1650 1653 mrev = self.repo.manifestlog._revlog.rev(mnode)
1651 1654 # i18n: column positioning for "hg log"
1652 1655 self.ui.write(_("manifest: %s\n")
1653 1656 % scmutil.formatrevnode(self.ui, mrev, mnode),
1654 1657 label='ui.debug log.manifest')
1655 1658 # i18n: column positioning for "hg log"
1656 1659 self.ui.write(_("user: %s\n") % ctx.user(),
1657 1660 label='log.user')
1658 1661 # i18n: column positioning for "hg log"
1659 1662 self.ui.write(_("date: %s\n") % date,
1660 1663 label='log.date')
1661 1664
1662 1665 if ctx.isunstable():
1663 1666 # i18n: column positioning for "hg log"
1664 1667 instabilities = ctx.instabilities()
1665 1668 self.ui.write(_("instability: %s\n") % ', '.join(instabilities),
1666 1669 label='log.instability')
1667 1670
1668 1671 elif ctx.obsolete():
1669 1672 self._showobsfate(ctx)
1670 1673
1671 1674 self._exthook(ctx)
1672 1675
1673 1676 if self.ui.debugflag:
1674 1677 files = ctx.p1().status(ctx)[:3]
1675 1678 for key, value in zip([# i18n: column positioning for "hg log"
1676 1679 _("files:"),
1677 1680 # i18n: column positioning for "hg log"
1678 1681 _("files+:"),
1679 1682 # i18n: column positioning for "hg log"
1680 1683 _("files-:")], files):
1681 1684 if value:
1682 1685 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1683 1686 label='ui.debug log.files')
1684 1687 elif ctx.files() and self.ui.verbose:
1685 1688 # i18n: column positioning for "hg log"
1686 1689 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1687 1690 label='ui.note log.files')
1688 1691 if copies and self.ui.verbose:
1689 1692 copies = ['%s (%s)' % c for c in copies]
1690 1693 # i18n: column positioning for "hg log"
1691 1694 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1692 1695 label='ui.note log.copies')
1693 1696
1694 1697 extra = ctx.extra()
1695 1698 if extra and self.ui.debugflag:
1696 1699 for key, value in sorted(extra.items()):
1697 1700 # i18n: column positioning for "hg log"
1698 1701 self.ui.write(_("extra: %s=%s\n")
1699 1702 % (key, util.escapestr(value)),
1700 1703 label='ui.debug log.extra')
1701 1704
1702 1705 description = ctx.description().strip()
1703 1706 if description:
1704 1707 if self.ui.verbose:
1705 1708 self.ui.write(_("description:\n"),
1706 1709 label='ui.note log.description')
1707 1710 self.ui.write(description,
1708 1711 label='ui.note log.description')
1709 1712 self.ui.write("\n\n")
1710 1713 else:
1711 1714 # i18n: column positioning for "hg log"
1712 1715 self.ui.write(_("summary: %s\n") %
1713 1716 description.splitlines()[0],
1714 1717 label='log.summary')
1715 1718 self.ui.write("\n")
1716 1719
1717 self.showpatch(ctx, matchfn)
1720 self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
1718 1721
1719 1722 def _showobsfate(self, ctx):
1720 1723 obsfate = templatekw.showobsfate(repo=self.repo, ctx=ctx, ui=self.ui)
1721 1724
1722 1725 if obsfate:
1723 1726 for obsfateline in obsfate:
1724 1727 # i18n: column positioning for "hg log"
1725 1728 self.ui.write(_("obsfate: %s\n") % obsfateline,
1726 1729 label='log.obsfate')
1727 1730
1728 1731 def _exthook(self, ctx):
1729 1732 '''empty method used by extension as a hook point
1730 1733 '''
1731 1734
1732 def showpatch(self, ctx, matchfn):
1735 def showpatch(self, ctx, matchfn, hunksfilterfn=None):
1733 1736 if not matchfn:
1734 1737 matchfn = self.matchfn
1735 1738 if matchfn:
1736 1739 stat = self.diffopts.get('stat')
1737 1740 diff = self.diffopts.get('patch')
1738 1741 diffopts = patch.diffallopts(self.ui, self.diffopts)
1739 1742 node = ctx.node()
1740 1743 prev = ctx.p1().node()
1741 1744 if stat:
1742 1745 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1743 match=matchfn, stat=True)
1746 match=matchfn, stat=True,
1747 hunksfilterfn=hunksfilterfn)
1744 1748 if diff:
1745 1749 if stat:
1746 1750 self.ui.write("\n")
1747 1751 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1748 match=matchfn, stat=False)
1752 match=matchfn, stat=False,
1753 hunksfilterfn=hunksfilterfn)
1749 1754 self.ui.write("\n")
1750 1755
1751 1756 class jsonchangeset(changeset_printer):
1752 1757 '''format changeset information.'''
1753 1758
1754 1759 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1755 1760 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1756 1761 self.cache = {}
1757 1762 self._first = True
1758 1763
1759 1764 def close(self):
1760 1765 if not self._first:
1761 1766 self.ui.write("\n]\n")
1762 1767 else:
1763 1768 self.ui.write("[]\n")
1764 1769
1765 def _show(self, ctx, copies, matchfn, props):
1770 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1766 1771 '''show a single changeset or file revision'''
1767 1772 rev = ctx.rev()
1768 1773 if rev is None:
1769 1774 jrev = jnode = 'null'
1770 1775 else:
1771 1776 jrev = '%d' % rev
1772 1777 jnode = '"%s"' % hex(ctx.node())
1773 1778 j = encoding.jsonescape
1774 1779
1775 1780 if self._first:
1776 1781 self.ui.write("[\n {")
1777 1782 self._first = False
1778 1783 else:
1779 1784 self.ui.write(",\n {")
1780 1785
1781 1786 if self.ui.quiet:
1782 1787 self.ui.write(('\n "rev": %s') % jrev)
1783 1788 self.ui.write((',\n "node": %s') % jnode)
1784 1789 self.ui.write('\n }')
1785 1790 return
1786 1791
1787 1792 self.ui.write(('\n "rev": %s') % jrev)
1788 1793 self.ui.write((',\n "node": %s') % jnode)
1789 1794 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1790 1795 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1791 1796 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1792 1797 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1793 1798 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1794 1799
1795 1800 self.ui.write((',\n "bookmarks": [%s]') %
1796 1801 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1797 1802 self.ui.write((',\n "tags": [%s]') %
1798 1803 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1799 1804 self.ui.write((',\n "parents": [%s]') %
1800 1805 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1801 1806
1802 1807 if self.ui.debugflag:
1803 1808 if rev is None:
1804 1809 jmanifestnode = 'null'
1805 1810 else:
1806 1811 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1807 1812 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1808 1813
1809 1814 self.ui.write((',\n "extra": {%s}') %
1810 1815 ", ".join('"%s": "%s"' % (j(k), j(v))
1811 1816 for k, v in ctx.extra().items()))
1812 1817
1813 1818 files = ctx.p1().status(ctx)
1814 1819 self.ui.write((',\n "modified": [%s]') %
1815 1820 ", ".join('"%s"' % j(f) for f in files[0]))
1816 1821 self.ui.write((',\n "added": [%s]') %
1817 1822 ", ".join('"%s"' % j(f) for f in files[1]))
1818 1823 self.ui.write((',\n "removed": [%s]') %
1819 1824 ", ".join('"%s"' % j(f) for f in files[2]))
1820 1825
1821 1826 elif self.ui.verbose:
1822 1827 self.ui.write((',\n "files": [%s]') %
1823 1828 ", ".join('"%s"' % j(f) for f in ctx.files()))
1824 1829
1825 1830 if copies:
1826 1831 self.ui.write((',\n "copies": {%s}') %
1827 1832 ", ".join('"%s": "%s"' % (j(k), j(v))
1828 1833 for k, v in copies))
1829 1834
1830 1835 matchfn = self.matchfn
1831 1836 if matchfn:
1832 1837 stat = self.diffopts.get('stat')
1833 1838 diff = self.diffopts.get('patch')
1834 1839 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1835 1840 node, prev = ctx.node(), ctx.p1().node()
1836 1841 if stat:
1837 1842 self.ui.pushbuffer()
1838 1843 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1839 1844 match=matchfn, stat=True)
1840 1845 self.ui.write((',\n "diffstat": "%s"')
1841 1846 % j(self.ui.popbuffer()))
1842 1847 if diff:
1843 1848 self.ui.pushbuffer()
1844 1849 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1845 1850 match=matchfn, stat=False)
1846 1851 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1847 1852
1848 1853 self.ui.write("\n }")
1849 1854
1850 1855 class changeset_templater(changeset_printer):
1851 1856 '''format changeset information.'''
1852 1857
1853 1858 # Arguments before "buffered" used to be positional. Consider not
1854 1859 # adding/removing arguments before "buffered" to not break callers.
1855 1860 def __init__(self, ui, repo, tmplspec, matchfn=None, diffopts=None,
1856 1861 buffered=False):
1857 1862 diffopts = diffopts or {}
1858 1863
1859 1864 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1860 1865 self.t = formatter.loadtemplater(ui, tmplspec,
1861 1866 cache=templatekw.defaulttempl)
1862 1867 self._counter = itertools.count()
1863 1868 self.cache = {}
1864 1869
1865 1870 self._tref = tmplspec.ref
1866 1871 self._parts = {'header': '', 'footer': '',
1867 1872 tmplspec.ref: tmplspec.ref,
1868 1873 'docheader': '', 'docfooter': '',
1869 1874 'separator': ''}
1870 1875 if tmplspec.mapfile:
1871 1876 # find correct templates for current mode, for backward
1872 1877 # compatibility with 'log -v/-q/--debug' using a mapfile
1873 1878 tmplmodes = [
1874 1879 (True, ''),
1875 1880 (self.ui.verbose, '_verbose'),
1876 1881 (self.ui.quiet, '_quiet'),
1877 1882 (self.ui.debugflag, '_debug'),
1878 1883 ]
1879 1884 for mode, postfix in tmplmodes:
1880 1885 for t in self._parts:
1881 1886 cur = t + postfix
1882 1887 if mode and cur in self.t:
1883 1888 self._parts[t] = cur
1884 1889 else:
1885 1890 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
1886 1891 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
1887 1892 self._parts.update(m)
1888 1893
1889 1894 if self._parts['docheader']:
1890 1895 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1891 1896
1892 1897 def close(self):
1893 1898 if self._parts['docfooter']:
1894 1899 if not self.footer:
1895 1900 self.footer = ""
1896 1901 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1897 1902 return super(changeset_templater, self).close()
1898 1903
1899 def _show(self, ctx, copies, matchfn, props):
1904 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1900 1905 '''show a single changeset or file revision'''
1901 1906 props = props.copy()
1902 1907 props.update(templatekw.keywords)
1903 1908 props['templ'] = self.t
1904 1909 props['ctx'] = ctx
1905 1910 props['repo'] = self.repo
1906 1911 props['ui'] = self.repo.ui
1907 1912 props['index'] = index = next(self._counter)
1908 1913 props['revcache'] = {'copies': copies}
1909 1914 props['cache'] = self.cache
1910 1915 props = pycompat.strkwargs(props)
1911 1916
1912 1917 # write separator, which wouldn't work well with the header part below
1913 1918 # since there's inherently a conflict between header (across items) and
1914 1919 # separator (per item)
1915 1920 if self._parts['separator'] and index > 0:
1916 1921 self.ui.write(templater.stringify(self.t(self._parts['separator'])))
1917 1922
1918 1923 # write header
1919 1924 if self._parts['header']:
1920 1925 h = templater.stringify(self.t(self._parts['header'], **props))
1921 1926 if self.buffered:
1922 1927 self.header[ctx.rev()] = h
1923 1928 else:
1924 1929 if self.lastheader != h:
1925 1930 self.lastheader = h
1926 1931 self.ui.write(h)
1927 1932
1928 1933 # write changeset metadata, then patch if requested
1929 1934 key = self._parts[self._tref]
1930 1935 self.ui.write(templater.stringify(self.t(key, **props)))
1931 self.showpatch(ctx, matchfn)
1936 self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
1932 1937
1933 1938 if self._parts['footer']:
1934 1939 if not self.footer:
1935 1940 self.footer = templater.stringify(
1936 1941 self.t(self._parts['footer'], **props))
1937 1942
1938 1943 def logtemplatespec(tmpl, mapfile):
1939 1944 if mapfile:
1940 1945 return formatter.templatespec('changeset', tmpl, mapfile)
1941 1946 else:
1942 1947 return formatter.templatespec('', tmpl, None)
1943 1948
1944 1949 def _lookuplogtemplate(ui, tmpl, style):
1945 1950 """Find the template matching the given template spec or style
1946 1951
1947 1952 See formatter.lookuptemplate() for details.
1948 1953 """
1949 1954
1950 1955 # ui settings
1951 1956 if not tmpl and not style: # template are stronger than style
1952 1957 tmpl = ui.config('ui', 'logtemplate')
1953 1958 if tmpl:
1954 1959 return logtemplatespec(templater.unquotestring(tmpl), None)
1955 1960 else:
1956 1961 style = util.expandpath(ui.config('ui', 'style'))
1957 1962
1958 1963 if not tmpl and style:
1959 1964 mapfile = style
1960 1965 if not os.path.split(mapfile)[0]:
1961 1966 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1962 1967 or templater.templatepath(mapfile))
1963 1968 if mapname:
1964 1969 mapfile = mapname
1965 1970 return logtemplatespec(None, mapfile)
1966 1971
1967 1972 if not tmpl:
1968 1973 return logtemplatespec(None, None)
1969 1974
1970 1975 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1971 1976
1972 1977 def makelogtemplater(ui, repo, tmpl, buffered=False):
1973 1978 """Create a changeset_templater from a literal template 'tmpl'"""
1974 1979 spec = logtemplatespec(tmpl, None)
1975 1980 return changeset_templater(ui, repo, spec, buffered=buffered)
1976 1981
1977 1982 def show_changeset(ui, repo, opts, buffered=False):
1978 1983 """show one changeset using template or regular display.
1979 1984
1980 1985 Display format will be the first non-empty hit of:
1981 1986 1. option 'template'
1982 1987 2. option 'style'
1983 1988 3. [ui] setting 'logtemplate'
1984 1989 4. [ui] setting 'style'
1985 1990 If all of these values are either the unset or the empty string,
1986 1991 regular display via changeset_printer() is done.
1987 1992 """
1988 1993 # options
1989 1994 match = None
1990 1995 if opts.get('patch') or opts.get('stat'):
1991 1996 match = scmutil.matchall(repo)
1992 1997
1993 1998 if opts.get('template') == 'json':
1994 1999 return jsonchangeset(ui, repo, match, opts, buffered)
1995 2000
1996 2001 spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style'))
1997 2002
1998 2003 if not spec.ref and not spec.tmpl and not spec.mapfile:
1999 2004 return changeset_printer(ui, repo, match, opts, buffered)
2000 2005
2001 2006 return changeset_templater(ui, repo, spec, match, opts, buffered)
2002 2007
2003 2008 def showmarker(fm, marker, index=None):
2004 2009 """utility function to display obsolescence marker in a readable way
2005 2010
2006 2011 To be used by debug function."""
2007 2012 if index is not None:
2008 2013 fm.write('index', '%i ', index)
2009 2014 fm.write('prednode', '%s ', hex(marker.prednode()))
2010 2015 succs = marker.succnodes()
2011 2016 fm.condwrite(succs, 'succnodes', '%s ',
2012 2017 fm.formatlist(map(hex, succs), name='node'))
2013 2018 fm.write('flag', '%X ', marker.flags())
2014 2019 parents = marker.parentnodes()
2015 2020 if parents is not None:
2016 2021 fm.write('parentnodes', '{%s} ',
2017 2022 fm.formatlist(map(hex, parents), name='node', sep=', '))
2018 2023 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
2019 2024 meta = marker.metadata().copy()
2020 2025 meta.pop('date', None)
2021 2026 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
2022 2027 fm.plain('\n')
2023 2028
2024 2029 def finddate(ui, repo, date):
2025 2030 """Find the tipmost changeset that matches the given date spec"""
2026 2031
2027 2032 df = util.matchdate(date)
2028 2033 m = scmutil.matchall(repo)
2029 2034 results = {}
2030 2035
2031 2036 def prep(ctx, fns):
2032 2037 d = ctx.date()
2033 2038 if df(d[0]):
2034 2039 results[ctx.rev()] = d
2035 2040
2036 2041 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
2037 2042 rev = ctx.rev()
2038 2043 if rev in results:
2039 2044 ui.status(_("found revision %s from %s\n") %
2040 2045 (rev, util.datestr(results[rev])))
2041 2046 return '%d' % rev
2042 2047
2043 2048 raise error.Abort(_("revision matching date not found"))
2044 2049
2045 2050 def increasingwindows(windowsize=8, sizelimit=512):
2046 2051 while True:
2047 2052 yield windowsize
2048 2053 if windowsize < sizelimit:
2049 2054 windowsize *= 2
2050 2055
2051 2056 class FileWalkError(Exception):
2052 2057 pass
2053 2058
2054 2059 def walkfilerevs(repo, match, follow, revs, fncache):
2055 2060 '''Walks the file history for the matched files.
2056 2061
2057 2062 Returns the changeset revs that are involved in the file history.
2058 2063
2059 2064 Throws FileWalkError if the file history can't be walked using
2060 2065 filelogs alone.
2061 2066 '''
2062 2067 wanted = set()
2063 2068 copies = []
2064 2069 minrev, maxrev = min(revs), max(revs)
2065 2070 def filerevgen(filelog, last):
2066 2071 """
2067 2072 Only files, no patterns. Check the history of each file.
2068 2073
2069 2074 Examines filelog entries within minrev, maxrev linkrev range
2070 2075 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2071 2076 tuples in backwards order
2072 2077 """
2073 2078 cl_count = len(repo)
2074 2079 revs = []
2075 2080 for j in xrange(0, last + 1):
2076 2081 linkrev = filelog.linkrev(j)
2077 2082 if linkrev < minrev:
2078 2083 continue
2079 2084 # only yield rev for which we have the changelog, it can
2080 2085 # happen while doing "hg log" during a pull or commit
2081 2086 if linkrev >= cl_count:
2082 2087 break
2083 2088
2084 2089 parentlinkrevs = []
2085 2090 for p in filelog.parentrevs(j):
2086 2091 if p != nullrev:
2087 2092 parentlinkrevs.append(filelog.linkrev(p))
2088 2093 n = filelog.node(j)
2089 2094 revs.append((linkrev, parentlinkrevs,
2090 2095 follow and filelog.renamed(n)))
2091 2096
2092 2097 return reversed(revs)
2093 2098 def iterfiles():
2094 2099 pctx = repo['.']
2095 2100 for filename in match.files():
2096 2101 if follow:
2097 2102 if filename not in pctx:
2098 2103 raise error.Abort(_('cannot follow file not in parent '
2099 2104 'revision: "%s"') % filename)
2100 2105 yield filename, pctx[filename].filenode()
2101 2106 else:
2102 2107 yield filename, None
2103 2108 for filename_node in copies:
2104 2109 yield filename_node
2105 2110
2106 2111 for file_, node in iterfiles():
2107 2112 filelog = repo.file(file_)
2108 2113 if not len(filelog):
2109 2114 if node is None:
2110 2115 # A zero count may be a directory or deleted file, so
2111 2116 # try to find matching entries on the slow path.
2112 2117 if follow:
2113 2118 raise error.Abort(
2114 2119 _('cannot follow nonexistent file: "%s"') % file_)
2115 2120 raise FileWalkError("Cannot walk via filelog")
2116 2121 else:
2117 2122 continue
2118 2123
2119 2124 if node is None:
2120 2125 last = len(filelog) - 1
2121 2126 else:
2122 2127 last = filelog.rev(node)
2123 2128
2124 2129 # keep track of all ancestors of the file
2125 2130 ancestors = {filelog.linkrev(last)}
2126 2131
2127 2132 # iterate from latest to oldest revision
2128 2133 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
2129 2134 if not follow:
2130 2135 if rev > maxrev:
2131 2136 continue
2132 2137 else:
2133 2138 # Note that last might not be the first interesting
2134 2139 # rev to us:
2135 2140 # if the file has been changed after maxrev, we'll
2136 2141 # have linkrev(last) > maxrev, and we still need
2137 2142 # to explore the file graph
2138 2143 if rev not in ancestors:
2139 2144 continue
2140 2145 # XXX insert 1327 fix here
2141 2146 if flparentlinkrevs:
2142 2147 ancestors.update(flparentlinkrevs)
2143 2148
2144 2149 fncache.setdefault(rev, []).append(file_)
2145 2150 wanted.add(rev)
2146 2151 if copied:
2147 2152 copies.append(copied)
2148 2153
2149 2154 return wanted
2150 2155
2151 2156 class _followfilter(object):
2152 2157 def __init__(self, repo, onlyfirst=False):
2153 2158 self.repo = repo
2154 2159 self.startrev = nullrev
2155 2160 self.roots = set()
2156 2161 self.onlyfirst = onlyfirst
2157 2162
2158 2163 def match(self, rev):
2159 2164 def realparents(rev):
2160 2165 if self.onlyfirst:
2161 2166 return self.repo.changelog.parentrevs(rev)[0:1]
2162 2167 else:
2163 2168 return filter(lambda x: x != nullrev,
2164 2169 self.repo.changelog.parentrevs(rev))
2165 2170
2166 2171 if self.startrev == nullrev:
2167 2172 self.startrev = rev
2168 2173 return True
2169 2174
2170 2175 if rev > self.startrev:
2171 2176 # forward: all descendants
2172 2177 if not self.roots:
2173 2178 self.roots.add(self.startrev)
2174 2179 for parent in realparents(rev):
2175 2180 if parent in self.roots:
2176 2181 self.roots.add(rev)
2177 2182 return True
2178 2183 else:
2179 2184 # backwards: all parents
2180 2185 if not self.roots:
2181 2186 self.roots.update(realparents(self.startrev))
2182 2187 if rev in self.roots:
2183 2188 self.roots.remove(rev)
2184 2189 self.roots.update(realparents(rev))
2185 2190 return True
2186 2191
2187 2192 return False
2188 2193
2189 2194 def walkchangerevs(repo, match, opts, prepare):
2190 2195 '''Iterate over files and the revs in which they changed.
2191 2196
2192 2197 Callers most commonly need to iterate backwards over the history
2193 2198 in which they are interested. Doing so has awful (quadratic-looking)
2194 2199 performance, so we use iterators in a "windowed" way.
2195 2200
2196 2201 We walk a window of revisions in the desired order. Within the
2197 2202 window, we first walk forwards to gather data, then in the desired
2198 2203 order (usually backwards) to display it.
2199 2204
2200 2205 This function returns an iterator yielding contexts. Before
2201 2206 yielding each context, the iterator will first call the prepare
2202 2207 function on each context in the window in forward order.'''
2203 2208
2204 2209 follow = opts.get('follow') or opts.get('follow_first')
2205 2210 revs = _logrevs(repo, opts)
2206 2211 if not revs:
2207 2212 return []
2208 2213 wanted = set()
2209 2214 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2210 2215 opts.get('removed'))
2211 2216 fncache = {}
2212 2217 change = repo.changectx
2213 2218
2214 2219 # First step is to fill wanted, the set of revisions that we want to yield.
2215 2220 # When it does not induce extra cost, we also fill fncache for revisions in
2216 2221 # wanted: a cache of filenames that were changed (ctx.files()) and that
2217 2222 # match the file filtering conditions.
2218 2223
2219 2224 if match.always():
2220 2225 # No files, no patterns. Display all revs.
2221 2226 wanted = revs
2222 2227 elif not slowpath:
2223 2228 # We only have to read through the filelog to find wanted revisions
2224 2229
2225 2230 try:
2226 2231 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2227 2232 except FileWalkError:
2228 2233 slowpath = True
2229 2234
2230 2235 # We decided to fall back to the slowpath because at least one
2231 2236 # of the paths was not a file. Check to see if at least one of them
2232 2237 # existed in history, otherwise simply return
2233 2238 for path in match.files():
2234 2239 if path == '.' or path in repo.store:
2235 2240 break
2236 2241 else:
2237 2242 return []
2238 2243
2239 2244 if slowpath:
2240 2245 # We have to read the changelog to match filenames against
2241 2246 # changed files
2242 2247
2243 2248 if follow:
2244 2249 raise error.Abort(_('can only follow copies/renames for explicit '
2245 2250 'filenames'))
2246 2251
2247 2252 # The slow path checks files modified in every changeset.
2248 2253 # This is really slow on large repos, so compute the set lazily.
2249 2254 class lazywantedset(object):
2250 2255 def __init__(self):
2251 2256 self.set = set()
2252 2257 self.revs = set(revs)
2253 2258
2254 2259 # No need to worry about locality here because it will be accessed
2255 2260 # in the same order as the increasing window below.
2256 2261 def __contains__(self, value):
2257 2262 if value in self.set:
2258 2263 return True
2259 2264 elif not value in self.revs:
2260 2265 return False
2261 2266 else:
2262 2267 self.revs.discard(value)
2263 2268 ctx = change(value)
2264 2269 matches = filter(match, ctx.files())
2265 2270 if matches:
2266 2271 fncache[value] = matches
2267 2272 self.set.add(value)
2268 2273 return True
2269 2274 return False
2270 2275
2271 2276 def discard(self, value):
2272 2277 self.revs.discard(value)
2273 2278 self.set.discard(value)
2274 2279
2275 2280 wanted = lazywantedset()
2276 2281
2277 2282 # it might be worthwhile to do this in the iterator if the rev range
2278 2283 # is descending and the prune args are all within that range
2279 2284 for rev in opts.get('prune', ()):
2280 2285 rev = repo[rev].rev()
2281 2286 ff = _followfilter(repo)
2282 2287 stop = min(revs[0], revs[-1])
2283 2288 for x in xrange(rev, stop - 1, -1):
2284 2289 if ff.match(x):
2285 2290 wanted = wanted - [x]
2286 2291
2287 2292 # Now that wanted is correctly initialized, we can iterate over the
2288 2293 # revision range, yielding only revisions in wanted.
2289 2294 def iterate():
2290 2295 if follow and match.always():
2291 2296 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
2292 2297 def want(rev):
2293 2298 return ff.match(rev) and rev in wanted
2294 2299 else:
2295 2300 def want(rev):
2296 2301 return rev in wanted
2297 2302
2298 2303 it = iter(revs)
2299 2304 stopiteration = False
2300 2305 for windowsize in increasingwindows():
2301 2306 nrevs = []
2302 2307 for i in xrange(windowsize):
2303 2308 rev = next(it, None)
2304 2309 if rev is None:
2305 2310 stopiteration = True
2306 2311 break
2307 2312 elif want(rev):
2308 2313 nrevs.append(rev)
2309 2314 for rev in sorted(nrevs):
2310 2315 fns = fncache.get(rev)
2311 2316 ctx = change(rev)
2312 2317 if not fns:
2313 2318 def fns_generator():
2314 2319 for f in ctx.files():
2315 2320 if match(f):
2316 2321 yield f
2317 2322 fns = fns_generator()
2318 2323 prepare(ctx, fns)
2319 2324 for rev in nrevs:
2320 2325 yield change(rev)
2321 2326
2322 2327 if stopiteration:
2323 2328 break
2324 2329
2325 2330 return iterate()
2326 2331
2327 2332 def _makefollowlogfilematcher(repo, files, followfirst):
2328 2333 # When displaying a revision with --patch --follow FILE, we have
2329 2334 # to know which file of the revision must be diffed. With
2330 2335 # --follow, we want the names of the ancestors of FILE in the
2331 2336 # revision, stored in "fcache". "fcache" is populated by
2332 2337 # reproducing the graph traversal already done by --follow revset
2333 2338 # and relating revs to file names (which is not "correct" but
2334 2339 # good enough).
2335 2340 fcache = {}
2336 2341 fcacheready = [False]
2337 2342 pctx = repo['.']
2338 2343
2339 2344 def populate():
2340 2345 for fn in files:
2341 2346 fctx = pctx[fn]
2342 2347 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
2343 2348 for c in fctx.ancestors(followfirst=followfirst):
2344 2349 fcache.setdefault(c.rev(), set()).add(c.path())
2345 2350
2346 2351 def filematcher(rev):
2347 2352 if not fcacheready[0]:
2348 2353 # Lazy initialization
2349 2354 fcacheready[0] = True
2350 2355 populate()
2351 2356 return scmutil.matchfiles(repo, fcache.get(rev, []))
2352 2357
2353 2358 return filematcher
2354 2359
2355 2360 def _makenofollowlogfilematcher(repo, pats, opts):
2356 2361 '''hook for extensions to override the filematcher for non-follow cases'''
2357 2362 return None
2358 2363
2359 2364 def _makelogrevset(repo, pats, opts, revs):
2360 2365 """Return (expr, filematcher) where expr is a revset string built
2361 2366 from log options and file patterns or None. If --stat or --patch
2362 2367 are not passed filematcher is None. Otherwise it is a callable
2363 2368 taking a revision number and returning a match objects filtering
2364 2369 the files to be detailed when displaying the revision.
2365 2370 """
2366 2371 opt2revset = {
2367 2372 'no_merges': ('not merge()', None),
2368 2373 'only_merges': ('merge()', None),
2369 2374 '_ancestors': ('ancestors(%(val)s)', None),
2370 2375 '_fancestors': ('_firstancestors(%(val)s)', None),
2371 2376 '_descendants': ('descendants(%(val)s)', None),
2372 2377 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2373 2378 '_matchfiles': ('_matchfiles(%(val)s)', None),
2374 2379 'date': ('date(%(val)r)', None),
2375 2380 'branch': ('branch(%(val)r)', ' or '),
2376 2381 '_patslog': ('filelog(%(val)r)', ' or '),
2377 2382 '_patsfollow': ('follow(%(val)r)', ' or '),
2378 2383 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2379 2384 'keyword': ('keyword(%(val)r)', ' or '),
2380 2385 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2381 2386 'user': ('user(%(val)r)', ' or '),
2382 2387 }
2383 2388
2384 2389 opts = dict(opts)
2385 2390 # follow or not follow?
2386 2391 follow = opts.get('follow') or opts.get('follow_first')
2387 2392 if opts.get('follow_first'):
2388 2393 followfirst = 1
2389 2394 else:
2390 2395 followfirst = 0
2391 2396 # --follow with FILE behavior depends on revs...
2392 2397 it = iter(revs)
2393 2398 startrev = next(it)
2394 2399 followdescendants = startrev < next(it, startrev)
2395 2400
2396 2401 # branch and only_branch are really aliases and must be handled at
2397 2402 # the same time
2398 2403 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2399 2404 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2400 2405 # pats/include/exclude are passed to match.match() directly in
2401 2406 # _matchfiles() revset but walkchangerevs() builds its matcher with
2402 2407 # scmutil.match(). The difference is input pats are globbed on
2403 2408 # platforms without shell expansion (windows).
2404 2409 wctx = repo[None]
2405 2410 match, pats = scmutil.matchandpats(wctx, pats, opts)
2406 2411 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2407 2412 opts.get('removed'))
2408 2413 if not slowpath:
2409 2414 for f in match.files():
2410 2415 if follow and f not in wctx:
2411 2416 # If the file exists, it may be a directory, so let it
2412 2417 # take the slow path.
2413 2418 if os.path.exists(repo.wjoin(f)):
2414 2419 slowpath = True
2415 2420 continue
2416 2421 else:
2417 2422 raise error.Abort(_('cannot follow file not in parent '
2418 2423 'revision: "%s"') % f)
2419 2424 filelog = repo.file(f)
2420 2425 if not filelog:
2421 2426 # A zero count may be a directory or deleted file, so
2422 2427 # try to find matching entries on the slow path.
2423 2428 if follow:
2424 2429 raise error.Abort(
2425 2430 _('cannot follow nonexistent file: "%s"') % f)
2426 2431 slowpath = True
2427 2432
2428 2433 # We decided to fall back to the slowpath because at least one
2429 2434 # of the paths was not a file. Check to see if at least one of them
2430 2435 # existed in history - in that case, we'll continue down the
2431 2436 # slowpath; otherwise, we can turn off the slowpath
2432 2437 if slowpath:
2433 2438 for path in match.files():
2434 2439 if path == '.' or path in repo.store:
2435 2440 break
2436 2441 else:
2437 2442 slowpath = False
2438 2443
2439 2444 fpats = ('_patsfollow', '_patsfollowfirst')
2440 2445 fnopats = (('_ancestors', '_fancestors'),
2441 2446 ('_descendants', '_fdescendants'))
2442 2447 if slowpath:
2443 2448 # See walkchangerevs() slow path.
2444 2449 #
2445 2450 # pats/include/exclude cannot be represented as separate
2446 2451 # revset expressions as their filtering logic applies at file
2447 2452 # level. For instance "-I a -X a" matches a revision touching
2448 2453 # "a" and "b" while "file(a) and not file(b)" does
2449 2454 # not. Besides, filesets are evaluated against the working
2450 2455 # directory.
2451 2456 matchargs = ['r:', 'd:relpath']
2452 2457 for p in pats:
2453 2458 matchargs.append('p:' + p)
2454 2459 for p in opts.get('include', []):
2455 2460 matchargs.append('i:' + p)
2456 2461 for p in opts.get('exclude', []):
2457 2462 matchargs.append('x:' + p)
2458 2463 matchargs = ','.join(('%r' % p) for p in matchargs)
2459 2464 opts['_matchfiles'] = matchargs
2460 2465 if follow:
2461 2466 opts[fnopats[0][followfirst]] = '.'
2462 2467 else:
2463 2468 if follow:
2464 2469 if pats:
2465 2470 # follow() revset interprets its file argument as a
2466 2471 # manifest entry, so use match.files(), not pats.
2467 2472 opts[fpats[followfirst]] = list(match.files())
2468 2473 else:
2469 2474 op = fnopats[followdescendants][followfirst]
2470 2475 opts[op] = 'rev(%d)' % startrev
2471 2476 else:
2472 2477 opts['_patslog'] = list(pats)
2473 2478
2474 2479 filematcher = None
2475 2480 if opts.get('patch') or opts.get('stat'):
2476 2481 # When following files, track renames via a special matcher.
2477 2482 # If we're forced to take the slowpath it means we're following
2478 2483 # at least one pattern/directory, so don't bother with rename tracking.
2479 2484 if follow and not match.always() and not slowpath:
2480 2485 # _makefollowlogfilematcher expects its files argument to be
2481 2486 # relative to the repo root, so use match.files(), not pats.
2482 2487 filematcher = _makefollowlogfilematcher(repo, match.files(),
2483 2488 followfirst)
2484 2489 else:
2485 2490 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2486 2491 if filematcher is None:
2487 2492 filematcher = lambda rev: match
2488 2493
2489 2494 expr = []
2490 2495 for op, val in sorted(opts.iteritems()):
2491 2496 if not val:
2492 2497 continue
2493 2498 if op not in opt2revset:
2494 2499 continue
2495 2500 revop, andor = opt2revset[op]
2496 2501 if '%(val)' not in revop:
2497 2502 expr.append(revop)
2498 2503 else:
2499 2504 if not isinstance(val, list):
2500 2505 e = revop % {'val': val}
2501 2506 else:
2502 2507 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2503 2508 expr.append(e)
2504 2509
2505 2510 if expr:
2506 2511 expr = '(' + ' and '.join(expr) + ')'
2507 2512 else:
2508 2513 expr = None
2509 2514 return expr, filematcher
2510 2515
2511 2516 def _logrevs(repo, opts):
2512 2517 # Default --rev value depends on --follow but --follow behavior
2513 2518 # depends on revisions resolved from --rev...
2514 2519 follow = opts.get('follow') or opts.get('follow_first')
2515 2520 if opts.get('rev'):
2516 2521 revs = scmutil.revrange(repo, opts['rev'])
2517 2522 elif follow and repo.dirstate.p1() == nullid:
2518 2523 revs = smartset.baseset()
2519 2524 elif follow:
2520 2525 revs = repo.revs('reverse(:.)')
2521 2526 else:
2522 2527 revs = smartset.spanset(repo)
2523 2528 revs.reverse()
2524 2529 return revs
2525 2530
2526 2531 def getgraphlogrevs(repo, pats, opts):
2527 2532 """Return (revs, expr, filematcher) where revs is an iterable of
2528 2533 revision numbers, expr is a revset string built from log options
2529 2534 and file patterns or None, and used to filter 'revs'. If --stat or
2530 2535 --patch are not passed filematcher is None. Otherwise it is a
2531 2536 callable taking a revision number and returning a match objects
2532 2537 filtering the files to be detailed when displaying the revision.
2533 2538 """
2534 2539 limit = loglimit(opts)
2535 2540 revs = _logrevs(repo, opts)
2536 2541 if not revs:
2537 2542 return smartset.baseset(), None, None
2538 2543 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2539 2544 if opts.get('rev'):
2540 2545 # User-specified revs might be unsorted, but don't sort before
2541 2546 # _makelogrevset because it might depend on the order of revs
2542 2547 if not (revs.isdescending() or revs.istopo()):
2543 2548 revs.sort(reverse=True)
2544 2549 if expr:
2545 2550 matcher = revset.match(repo.ui, expr)
2546 2551 revs = matcher(repo, revs)
2547 2552 if limit is not None:
2548 2553 limitedrevs = []
2549 2554 for idx, rev in enumerate(revs):
2550 2555 if idx >= limit:
2551 2556 break
2552 2557 limitedrevs.append(rev)
2553 2558 revs = smartset.baseset(limitedrevs)
2554 2559
2555 2560 return revs, expr, filematcher
2556 2561
2557 2562 def getlogrevs(repo, pats, opts):
2558 2563 """Return (revs, expr, filematcher) where revs is an iterable of
2559 2564 revision numbers, expr is a revset string built from log options
2560 2565 and file patterns or None, and used to filter 'revs'. If --stat or
2561 2566 --patch are not passed filematcher is None. Otherwise it is a
2562 2567 callable taking a revision number and returning a match objects
2563 2568 filtering the files to be detailed when displaying the revision.
2564 2569 """
2565 2570 limit = loglimit(opts)
2566 2571 revs = _logrevs(repo, opts)
2567 2572 if not revs:
2568 2573 return smartset.baseset([]), None, None
2569 2574 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2570 2575 if expr:
2571 2576 matcher = revset.match(repo.ui, expr)
2572 2577 revs = matcher(repo, revs)
2573 2578 if limit is not None:
2574 2579 limitedrevs = []
2575 2580 for idx, r in enumerate(revs):
2576 2581 if limit <= idx:
2577 2582 break
2578 2583 limitedrevs.append(r)
2579 2584 revs = smartset.baseset(limitedrevs)
2580 2585
2581 2586 return revs, expr, filematcher
2582 2587
2583 2588 def _graphnodeformatter(ui, displayer):
2584 2589 spec = ui.config('ui', 'graphnodetemplate')
2585 2590 if not spec:
2586 2591 return templatekw.showgraphnode # fast path for "{graphnode}"
2587 2592
2588 2593 spec = templater.unquotestring(spec)
2589 2594 templ = formatter.maketemplater(ui, spec)
2590 2595 cache = {}
2591 2596 if isinstance(displayer, changeset_templater):
2592 2597 cache = displayer.cache # reuse cache of slow templates
2593 2598 props = templatekw.keywords.copy()
2594 2599 props['templ'] = templ
2595 2600 props['cache'] = cache
2596 2601 def formatnode(repo, ctx):
2597 2602 props['ctx'] = ctx
2598 2603 props['repo'] = repo
2599 2604 props['ui'] = repo.ui
2600 2605 props['revcache'] = {}
2601 2606 return templ.render(props)
2602 2607 return formatnode
2603 2608
2604 2609 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2605 2610 filematcher=None, props=None):
2606 2611 props = props or {}
2607 2612 formatnode = _graphnodeformatter(ui, displayer)
2608 2613 state = graphmod.asciistate()
2609 2614 styles = state['styles']
2610 2615
2611 2616 # only set graph styling if HGPLAIN is not set.
2612 2617 if ui.plain('graph'):
2613 2618 # set all edge styles to |, the default pre-3.8 behaviour
2614 2619 styles.update(dict.fromkeys(styles, '|'))
2615 2620 else:
2616 2621 edgetypes = {
2617 2622 'parent': graphmod.PARENT,
2618 2623 'grandparent': graphmod.GRANDPARENT,
2619 2624 'missing': graphmod.MISSINGPARENT
2620 2625 }
2621 2626 for name, key in edgetypes.items():
2622 2627 # experimental config: experimental.graphstyle.*
2623 2628 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2624 2629 styles[key])
2625 2630 if not styles[key]:
2626 2631 styles[key] = None
2627 2632
2628 2633 # experimental config: experimental.graphshorten
2629 2634 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2630 2635
2631 2636 for rev, type, ctx, parents in dag:
2632 2637 char = formatnode(repo, ctx)
2633 2638 copies = None
2634 2639 if getrenamed and ctx.rev():
2635 2640 copies = []
2636 2641 for fn in ctx.files():
2637 2642 rename = getrenamed(fn, ctx.rev())
2638 2643 if rename:
2639 2644 copies.append((fn, rename[0]))
2640 2645 revmatchfn = None
2641 2646 if filematcher is not None:
2642 2647 revmatchfn = filematcher(ctx.rev())
2643 2648 edges = edgefn(type, char, state, rev, parents)
2644 2649 firstedge = next(edges)
2645 2650 width = firstedge[2]
2646 2651 displayer.show(ctx, copies=copies, matchfn=revmatchfn,
2647 2652 _graphwidth=width, **props)
2648 2653 lines = displayer.hunk.pop(rev).split('\n')
2649 2654 if not lines[-1]:
2650 2655 del lines[-1]
2651 2656 displayer.flush(ctx)
2652 2657 for type, char, width, coldata in itertools.chain([firstedge], edges):
2653 2658 graphmod.ascii(ui, state, type, char, lines, coldata)
2654 2659 lines = []
2655 2660 displayer.close()
2656 2661
2657 2662 def graphlog(ui, repo, pats, opts):
2658 2663 # Parameters are identical to log command ones
2659 2664 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2660 2665 revdag = graphmod.dagwalker(repo, revs)
2661 2666
2662 2667 getrenamed = None
2663 2668 if opts.get('copies'):
2664 2669 endrev = None
2665 2670 if opts.get('rev'):
2666 2671 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2667 2672 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2668 2673
2669 2674 ui.pager('log')
2670 2675 displayer = show_changeset(ui, repo, opts, buffered=True)
2671 2676 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2672 2677 filematcher)
2673 2678
2674 2679 def checkunsupportedgraphflags(pats, opts):
2675 2680 for op in ["newest_first"]:
2676 2681 if op in opts and opts[op]:
2677 2682 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2678 2683 % op.replace("_", "-"))
2679 2684
2680 2685 def graphrevs(repo, nodes, opts):
2681 2686 limit = loglimit(opts)
2682 2687 nodes.reverse()
2683 2688 if limit is not None:
2684 2689 nodes = nodes[:limit]
2685 2690 return graphmod.nodes(repo, nodes)
2686 2691
2687 2692 def add(ui, repo, match, prefix, explicitonly, **opts):
2688 2693 join = lambda f: os.path.join(prefix, f)
2689 2694 bad = []
2690 2695
2691 2696 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2692 2697 names = []
2693 2698 wctx = repo[None]
2694 2699 cca = None
2695 2700 abort, warn = scmutil.checkportabilityalert(ui)
2696 2701 if abort or warn:
2697 2702 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2698 2703
2699 2704 badmatch = matchmod.badmatch(match, badfn)
2700 2705 dirstate = repo.dirstate
2701 2706 # We don't want to just call wctx.walk here, since it would return a lot of
2702 2707 # clean files, which we aren't interested in and takes time.
2703 2708 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2704 2709 unknown=True, ignored=False, full=False)):
2705 2710 exact = match.exact(f)
2706 2711 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2707 2712 if cca:
2708 2713 cca(f)
2709 2714 names.append(f)
2710 2715 if ui.verbose or not exact:
2711 2716 ui.status(_('adding %s\n') % match.rel(f))
2712 2717
2713 2718 for subpath in sorted(wctx.substate):
2714 2719 sub = wctx.sub(subpath)
2715 2720 try:
2716 2721 submatch = matchmod.subdirmatcher(subpath, match)
2717 2722 if opts.get(r'subrepos'):
2718 2723 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2719 2724 else:
2720 2725 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2721 2726 except error.LookupError:
2722 2727 ui.status(_("skipping missing subrepository: %s\n")
2723 2728 % join(subpath))
2724 2729
2725 2730 if not opts.get(r'dry_run'):
2726 2731 rejected = wctx.add(names, prefix)
2727 2732 bad.extend(f for f in rejected if f in match.files())
2728 2733 return bad
2729 2734
2730 2735 def addwebdirpath(repo, serverpath, webconf):
2731 2736 webconf[serverpath] = repo.root
2732 2737 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2733 2738
2734 2739 for r in repo.revs('filelog("path:.hgsub")'):
2735 2740 ctx = repo[r]
2736 2741 for subpath in ctx.substate:
2737 2742 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2738 2743
2739 2744 def forget(ui, repo, match, prefix, explicitonly):
2740 2745 join = lambda f: os.path.join(prefix, f)
2741 2746 bad = []
2742 2747 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2743 2748 wctx = repo[None]
2744 2749 forgot = []
2745 2750
2746 2751 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2747 2752 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2748 2753 if explicitonly:
2749 2754 forget = [f for f in forget if match.exact(f)]
2750 2755
2751 2756 for subpath in sorted(wctx.substate):
2752 2757 sub = wctx.sub(subpath)
2753 2758 try:
2754 2759 submatch = matchmod.subdirmatcher(subpath, match)
2755 2760 subbad, subforgot = sub.forget(submatch, prefix)
2756 2761 bad.extend([subpath + '/' + f for f in subbad])
2757 2762 forgot.extend([subpath + '/' + f for f in subforgot])
2758 2763 except error.LookupError:
2759 2764 ui.status(_("skipping missing subrepository: %s\n")
2760 2765 % join(subpath))
2761 2766
2762 2767 if not explicitonly:
2763 2768 for f in match.files():
2764 2769 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2765 2770 if f not in forgot:
2766 2771 if repo.wvfs.exists(f):
2767 2772 # Don't complain if the exact case match wasn't given.
2768 2773 # But don't do this until after checking 'forgot', so
2769 2774 # that subrepo files aren't normalized, and this op is
2770 2775 # purely from data cached by the status walk above.
2771 2776 if repo.dirstate.normalize(f) in repo.dirstate:
2772 2777 continue
2773 2778 ui.warn(_('not removing %s: '
2774 2779 'file is already untracked\n')
2775 2780 % match.rel(f))
2776 2781 bad.append(f)
2777 2782
2778 2783 for f in forget:
2779 2784 if ui.verbose or not match.exact(f):
2780 2785 ui.status(_('removing %s\n') % match.rel(f))
2781 2786
2782 2787 rejected = wctx.forget(forget, prefix)
2783 2788 bad.extend(f for f in rejected if f in match.files())
2784 2789 forgot.extend(f for f in forget if f not in rejected)
2785 2790 return bad, forgot
2786 2791
2787 2792 def files(ui, ctx, m, fm, fmt, subrepos):
2788 2793 rev = ctx.rev()
2789 2794 ret = 1
2790 2795 ds = ctx.repo().dirstate
2791 2796
2792 2797 for f in ctx.matches(m):
2793 2798 if rev is None and ds[f] == 'r':
2794 2799 continue
2795 2800 fm.startitem()
2796 2801 if ui.verbose:
2797 2802 fc = ctx[f]
2798 2803 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2799 2804 fm.data(abspath=f)
2800 2805 fm.write('path', fmt, m.rel(f))
2801 2806 ret = 0
2802 2807
2803 2808 for subpath in sorted(ctx.substate):
2804 2809 submatch = matchmod.subdirmatcher(subpath, m)
2805 2810 if (subrepos or m.exact(subpath) or any(submatch.files())):
2806 2811 sub = ctx.sub(subpath)
2807 2812 try:
2808 2813 recurse = m.exact(subpath) or subrepos
2809 2814 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2810 2815 ret = 0
2811 2816 except error.LookupError:
2812 2817 ui.status(_("skipping missing subrepository: %s\n")
2813 2818 % m.abs(subpath))
2814 2819
2815 2820 return ret
2816 2821
2817 2822 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2818 2823 join = lambda f: os.path.join(prefix, f)
2819 2824 ret = 0
2820 2825 s = repo.status(match=m, clean=True)
2821 2826 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2822 2827
2823 2828 wctx = repo[None]
2824 2829
2825 2830 if warnings is None:
2826 2831 warnings = []
2827 2832 warn = True
2828 2833 else:
2829 2834 warn = False
2830 2835
2831 2836 subs = sorted(wctx.substate)
2832 2837 total = len(subs)
2833 2838 count = 0
2834 2839 for subpath in subs:
2835 2840 count += 1
2836 2841 submatch = matchmod.subdirmatcher(subpath, m)
2837 2842 if subrepos or m.exact(subpath) or any(submatch.files()):
2838 2843 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2839 2844 sub = wctx.sub(subpath)
2840 2845 try:
2841 2846 if sub.removefiles(submatch, prefix, after, force, subrepos,
2842 2847 warnings):
2843 2848 ret = 1
2844 2849 except error.LookupError:
2845 2850 warnings.append(_("skipping missing subrepository: %s\n")
2846 2851 % join(subpath))
2847 2852 ui.progress(_('searching'), None)
2848 2853
2849 2854 # warn about failure to delete explicit files/dirs
2850 2855 deleteddirs = util.dirs(deleted)
2851 2856 files = m.files()
2852 2857 total = len(files)
2853 2858 count = 0
2854 2859 for f in files:
2855 2860 def insubrepo():
2856 2861 for subpath in wctx.substate:
2857 2862 if f.startswith(subpath + '/'):
2858 2863 return True
2859 2864 return False
2860 2865
2861 2866 count += 1
2862 2867 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2863 2868 isdir = f in deleteddirs or wctx.hasdir(f)
2864 2869 if (f in repo.dirstate or isdir or f == '.'
2865 2870 or insubrepo() or f in subs):
2866 2871 continue
2867 2872
2868 2873 if repo.wvfs.exists(f):
2869 2874 if repo.wvfs.isdir(f):
2870 2875 warnings.append(_('not removing %s: no tracked files\n')
2871 2876 % m.rel(f))
2872 2877 else:
2873 2878 warnings.append(_('not removing %s: file is untracked\n')
2874 2879 % m.rel(f))
2875 2880 # missing files will generate a warning elsewhere
2876 2881 ret = 1
2877 2882 ui.progress(_('deleting'), None)
2878 2883
2879 2884 if force:
2880 2885 list = modified + deleted + clean + added
2881 2886 elif after:
2882 2887 list = deleted
2883 2888 remaining = modified + added + clean
2884 2889 total = len(remaining)
2885 2890 count = 0
2886 2891 for f in remaining:
2887 2892 count += 1
2888 2893 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2889 2894 warnings.append(_('not removing %s: file still exists\n')
2890 2895 % m.rel(f))
2891 2896 ret = 1
2892 2897 ui.progress(_('skipping'), None)
2893 2898 else:
2894 2899 list = deleted + clean
2895 2900 total = len(modified) + len(added)
2896 2901 count = 0
2897 2902 for f in modified:
2898 2903 count += 1
2899 2904 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2900 2905 warnings.append(_('not removing %s: file is modified (use -f'
2901 2906 ' to force removal)\n') % m.rel(f))
2902 2907 ret = 1
2903 2908 for f in added:
2904 2909 count += 1
2905 2910 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2906 2911 warnings.append(_("not removing %s: file has been marked for add"
2907 2912 " (use 'hg forget' to undo add)\n") % m.rel(f))
2908 2913 ret = 1
2909 2914 ui.progress(_('skipping'), None)
2910 2915
2911 2916 list = sorted(list)
2912 2917 total = len(list)
2913 2918 count = 0
2914 2919 for f in list:
2915 2920 count += 1
2916 2921 if ui.verbose or not m.exact(f):
2917 2922 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2918 2923 ui.status(_('removing %s\n') % m.rel(f))
2919 2924 ui.progress(_('deleting'), None)
2920 2925
2921 2926 with repo.wlock():
2922 2927 if not after:
2923 2928 for f in list:
2924 2929 if f in added:
2925 2930 continue # we never unlink added files on remove
2926 2931 repo.wvfs.unlinkpath(f, ignoremissing=True)
2927 2932 repo[None].forget(list)
2928 2933
2929 2934 if warn:
2930 2935 for warning in warnings:
2931 2936 ui.warn(warning)
2932 2937
2933 2938 return ret
2934 2939
2935 2940 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2936 2941 err = 1
2937 2942
2938 2943 def write(path):
2939 2944 filename = None
2940 2945 if fntemplate:
2941 2946 filename = makefilename(repo, fntemplate, ctx.node(),
2942 2947 pathname=os.path.join(prefix, path))
2943 2948 with formatter.maybereopen(basefm, filename, opts) as fm:
2944 2949 data = ctx[path].data()
2945 2950 if opts.get('decode'):
2946 2951 data = repo.wwritedata(path, data)
2947 2952 fm.startitem()
2948 2953 fm.write('data', '%s', data)
2949 2954 fm.data(abspath=path, path=matcher.rel(path))
2950 2955
2951 2956 # Automation often uses hg cat on single files, so special case it
2952 2957 # for performance to avoid the cost of parsing the manifest.
2953 2958 if len(matcher.files()) == 1 and not matcher.anypats():
2954 2959 file = matcher.files()[0]
2955 2960 mfl = repo.manifestlog
2956 2961 mfnode = ctx.manifestnode()
2957 2962 try:
2958 2963 if mfnode and mfl[mfnode].find(file)[0]:
2959 2964 write(file)
2960 2965 return 0
2961 2966 except KeyError:
2962 2967 pass
2963 2968
2964 2969 for abs in ctx.walk(matcher):
2965 2970 write(abs)
2966 2971 err = 0
2967 2972
2968 2973 for subpath in sorted(ctx.substate):
2969 2974 sub = ctx.sub(subpath)
2970 2975 try:
2971 2976 submatch = matchmod.subdirmatcher(subpath, matcher)
2972 2977
2973 2978 if not sub.cat(submatch, basefm, fntemplate,
2974 2979 os.path.join(prefix, sub._path), **opts):
2975 2980 err = 0
2976 2981 except error.RepoLookupError:
2977 2982 ui.status(_("skipping missing subrepository: %s\n")
2978 2983 % os.path.join(prefix, subpath))
2979 2984
2980 2985 return err
2981 2986
2982 2987 def commit(ui, repo, commitfunc, pats, opts):
2983 2988 '''commit the specified files or all outstanding changes'''
2984 2989 date = opts.get('date')
2985 2990 if date:
2986 2991 opts['date'] = util.parsedate(date)
2987 2992 message = logmessage(ui, opts)
2988 2993 matcher = scmutil.match(repo[None], pats, opts)
2989 2994
2990 2995 dsguard = None
2991 2996 # extract addremove carefully -- this function can be called from a command
2992 2997 # that doesn't support addremove
2993 2998 if opts.get('addremove'):
2994 2999 dsguard = dirstateguard.dirstateguard(repo, 'commit')
2995 3000 with dsguard or util.nullcontextmanager():
2996 3001 if dsguard:
2997 3002 if scmutil.addremove(repo, matcher, "", opts) != 0:
2998 3003 raise error.Abort(
2999 3004 _("failed to mark all new/missing files as added/removed"))
3000 3005
3001 3006 return commitfunc(ui, repo, message, matcher, opts)
3002 3007
3003 3008 def samefile(f, ctx1, ctx2):
3004 3009 if f in ctx1.manifest():
3005 3010 a = ctx1.filectx(f)
3006 3011 if f in ctx2.manifest():
3007 3012 b = ctx2.filectx(f)
3008 3013 return (not a.cmp(b)
3009 3014 and a.flags() == b.flags())
3010 3015 else:
3011 3016 return False
3012 3017 else:
3013 3018 return f not in ctx2.manifest()
3014 3019
3015 3020 def amend(ui, repo, old, extra, pats, opts):
3016 3021 # avoid cycle context -> subrepo -> cmdutil
3017 3022 from . import context
3018 3023
3019 3024 # amend will reuse the existing user if not specified, but the obsolete
3020 3025 # marker creation requires that the current user's name is specified.
3021 3026 if obsolete.isenabled(repo, obsolete.createmarkersopt):
3022 3027 ui.username() # raise exception if username not set
3023 3028
3024 3029 ui.note(_('amending changeset %s\n') % old)
3025 3030 base = old.p1()
3026 3031
3027 3032 with repo.wlock(), repo.lock(), repo.transaction('amend'):
3028 3033 # Participating changesets:
3029 3034 #
3030 3035 # wctx o - workingctx that contains changes from working copy
3031 3036 # | to go into amending commit
3032 3037 # |
3033 3038 # old o - changeset to amend
3034 3039 # |
3035 3040 # base o - first parent of the changeset to amend
3036 3041 wctx = repo[None]
3037 3042
3038 3043 # Update extra dict from amended commit (e.g. to preserve graft
3039 3044 # source)
3040 3045 extra.update(old.extra())
3041 3046
3042 3047 # Also update it from the from the wctx
3043 3048 extra.update(wctx.extra())
3044 3049
3045 3050 user = opts.get('user') or old.user()
3046 3051 date = opts.get('date') or old.date()
3047 3052
3048 3053 # Parse the date to allow comparison between date and old.date()
3049 3054 date = util.parsedate(date)
3050 3055
3051 3056 if len(old.parents()) > 1:
3052 3057 # ctx.files() isn't reliable for merges, so fall back to the
3053 3058 # slower repo.status() method
3054 3059 files = set([fn for st in repo.status(base, old)[:3]
3055 3060 for fn in st])
3056 3061 else:
3057 3062 files = set(old.files())
3058 3063
3059 3064 # add/remove the files to the working copy if the "addremove" option
3060 3065 # was specified.
3061 3066 matcher = scmutil.match(wctx, pats, opts)
3062 3067 if (opts.get('addremove')
3063 3068 and scmutil.addremove(repo, matcher, "", opts)):
3064 3069 raise error.Abort(
3065 3070 _("failed to mark all new/missing files as added/removed"))
3066 3071
3067 3072 filestoamend = set(f for f in wctx.files() if matcher(f))
3068 3073
3069 3074 changes = (len(filestoamend) > 0)
3070 3075 if changes:
3071 3076 # Recompute copies (avoid recording a -> b -> a)
3072 3077 copied = copies.pathcopies(base, wctx, matcher)
3073 3078 if old.p2:
3074 3079 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3075 3080
3076 3081 # Prune files which were reverted by the updates: if old
3077 3082 # introduced file X and the file was renamed in the working
3078 3083 # copy, then those two files are the same and
3079 3084 # we can discard X from our list of files. Likewise if X
3080 3085 # was deleted, it's no longer relevant
3081 3086 files.update(filestoamend)
3082 3087 files = [f for f in files if not samefile(f, wctx, base)]
3083 3088
3084 3089 def filectxfn(repo, ctx_, path):
3085 3090 try:
3086 3091 # If the file being considered is not amongst the files
3087 3092 # to be amended, we should return the file context from the
3088 3093 # old changeset. This avoids issues when only some files in
3089 3094 # the working copy are being amended but there are also
3090 3095 # changes to other files from the old changeset.
3091 3096 if path not in filestoamend:
3092 3097 return old.filectx(path)
3093 3098
3094 3099 fctx = wctx[path]
3095 3100
3096 3101 # Return None for removed files.
3097 3102 if not fctx.exists():
3098 3103 return None
3099 3104
3100 3105 flags = fctx.flags()
3101 3106 mctx = context.memfilectx(repo,
3102 3107 fctx.path(), fctx.data(),
3103 3108 islink='l' in flags,
3104 3109 isexec='x' in flags,
3105 3110 copied=copied.get(path))
3106 3111 return mctx
3107 3112 except KeyError:
3108 3113 return None
3109 3114 else:
3110 3115 ui.note(_('copying changeset %s to %s\n') % (old, base))
3111 3116
3112 3117 # Use version of files as in the old cset
3113 3118 def filectxfn(repo, ctx_, path):
3114 3119 try:
3115 3120 return old.filectx(path)
3116 3121 except KeyError:
3117 3122 return None
3118 3123
3119 3124 # See if we got a message from -m or -l, if not, open the editor with
3120 3125 # the message of the changeset to amend.
3121 3126 message = logmessage(ui, opts)
3122 3127
3123 3128 editform = mergeeditform(old, 'commit.amend')
3124 3129 editor = getcommiteditor(editform=editform,
3125 3130 **pycompat.strkwargs(opts))
3126 3131
3127 3132 if not message:
3128 3133 editor = getcommiteditor(edit=True, editform=editform)
3129 3134 message = old.description()
3130 3135
3131 3136 pureextra = extra.copy()
3132 3137 extra['amend_source'] = old.hex()
3133 3138
3134 3139 new = context.memctx(repo,
3135 3140 parents=[base.node(), old.p2().node()],
3136 3141 text=message,
3137 3142 files=files,
3138 3143 filectxfn=filectxfn,
3139 3144 user=user,
3140 3145 date=date,
3141 3146 extra=extra,
3142 3147 editor=editor)
3143 3148
3144 3149 newdesc = changelog.stripdesc(new.description())
3145 3150 if ((not changes)
3146 3151 and newdesc == old.description()
3147 3152 and user == old.user()
3148 3153 and date == old.date()
3149 3154 and pureextra == old.extra()):
3150 3155 # nothing changed. continuing here would create a new node
3151 3156 # anyway because of the amend_source noise.
3152 3157 #
3153 3158 # This not what we expect from amend.
3154 3159 return old.node()
3155 3160
3156 3161 if opts.get('secret'):
3157 3162 commitphase = 'secret'
3158 3163 else:
3159 3164 commitphase = old.phase()
3160 3165 overrides = {('phases', 'new-commit'): commitphase}
3161 3166 with ui.configoverride(overrides, 'amend'):
3162 3167 newid = repo.commitctx(new)
3163 3168
3164 3169 # Reroute the working copy parent to the new changeset
3165 3170 repo.setparents(newid, nullid)
3166 3171 mapping = {old.node(): (newid,)}
3167 3172 obsmetadata = None
3168 3173 if opts.get('note'):
3169 3174 obsmetadata = {'note': opts['note']}
3170 3175 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata)
3171 3176
3172 3177 # Fixing the dirstate because localrepo.commitctx does not update
3173 3178 # it. This is rather convenient because we did not need to update
3174 3179 # the dirstate for all the files in the new commit which commitctx
3175 3180 # could have done if it updated the dirstate. Now, we can
3176 3181 # selectively update the dirstate only for the amended files.
3177 3182 dirstate = repo.dirstate
3178 3183
3179 3184 # Update the state of the files which were added and
3180 3185 # and modified in the amend to "normal" in the dirstate.
3181 3186 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3182 3187 for f in normalfiles:
3183 3188 dirstate.normal(f)
3184 3189
3185 3190 # Update the state of files which were removed in the amend
3186 3191 # to "removed" in the dirstate.
3187 3192 removedfiles = set(wctx.removed()) & filestoamend
3188 3193 for f in removedfiles:
3189 3194 dirstate.drop(f)
3190 3195
3191 3196 return newid
3192 3197
3193 3198 def commiteditor(repo, ctx, subs, editform=''):
3194 3199 if ctx.description():
3195 3200 return ctx.description()
3196 3201 return commitforceeditor(repo, ctx, subs, editform=editform,
3197 3202 unchangedmessagedetection=True)
3198 3203
3199 3204 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
3200 3205 editform='', unchangedmessagedetection=False):
3201 3206 if not extramsg:
3202 3207 extramsg = _("Leave message empty to abort commit.")
3203 3208
3204 3209 forms = [e for e in editform.split('.') if e]
3205 3210 forms.insert(0, 'changeset')
3206 3211 templatetext = None
3207 3212 while forms:
3208 3213 ref = '.'.join(forms)
3209 3214 if repo.ui.config('committemplate', ref):
3210 3215 templatetext = committext = buildcommittemplate(
3211 3216 repo, ctx, subs, extramsg, ref)
3212 3217 break
3213 3218 forms.pop()
3214 3219 else:
3215 3220 committext = buildcommittext(repo, ctx, subs, extramsg)
3216 3221
3217 3222 # run editor in the repository root
3218 3223 olddir = pycompat.getcwd()
3219 3224 os.chdir(repo.root)
3220 3225
3221 3226 # make in-memory changes visible to external process
3222 3227 tr = repo.currenttransaction()
3223 3228 repo.dirstate.write(tr)
3224 3229 pending = tr and tr.writepending() and repo.root
3225 3230
3226 3231 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
3227 3232 editform=editform, pending=pending,
3228 3233 repopath=repo.path, action='commit')
3229 3234 text = editortext
3230 3235
3231 3236 # strip away anything below this special string (used for editors that want
3232 3237 # to display the diff)
3233 3238 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3234 3239 if stripbelow:
3235 3240 text = text[:stripbelow.start()]
3236 3241
3237 3242 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
3238 3243 os.chdir(olddir)
3239 3244
3240 3245 if finishdesc:
3241 3246 text = finishdesc(text)
3242 3247 if not text.strip():
3243 3248 raise error.Abort(_("empty commit message"))
3244 3249 if unchangedmessagedetection and editortext == templatetext:
3245 3250 raise error.Abort(_("commit message unchanged"))
3246 3251
3247 3252 return text
3248 3253
3249 3254 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3250 3255 ui = repo.ui
3251 3256 spec = formatter.templatespec(ref, None, None)
3252 3257 t = changeset_templater(ui, repo, spec, None, {}, False)
3253 3258 t.t.cache.update((k, templater.unquotestring(v))
3254 3259 for k, v in repo.ui.configitems('committemplate'))
3255 3260
3256 3261 if not extramsg:
3257 3262 extramsg = '' # ensure that extramsg is string
3258 3263
3259 3264 ui.pushbuffer()
3260 3265 t.show(ctx, extramsg=extramsg)
3261 3266 return ui.popbuffer()
3262 3267
3263 3268 def hgprefix(msg):
3264 3269 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
3265 3270
3266 3271 def buildcommittext(repo, ctx, subs, extramsg):
3267 3272 edittext = []
3268 3273 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3269 3274 if ctx.description():
3270 3275 edittext.append(ctx.description())
3271 3276 edittext.append("")
3272 3277 edittext.append("") # Empty line between message and comments.
3273 3278 edittext.append(hgprefix(_("Enter commit message."
3274 3279 " Lines beginning with 'HG:' are removed.")))
3275 3280 edittext.append(hgprefix(extramsg))
3276 3281 edittext.append("HG: --")
3277 3282 edittext.append(hgprefix(_("user: %s") % ctx.user()))
3278 3283 if ctx.p2():
3279 3284 edittext.append(hgprefix(_("branch merge")))
3280 3285 if ctx.branch():
3281 3286 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
3282 3287 if bookmarks.isactivewdirparent(repo):
3283 3288 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
3284 3289 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
3285 3290 edittext.extend([hgprefix(_("added %s") % f) for f in added])
3286 3291 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
3287 3292 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
3288 3293 if not added and not modified and not removed:
3289 3294 edittext.append(hgprefix(_("no files changed")))
3290 3295 edittext.append("")
3291 3296
3292 3297 return "\n".join(edittext)
3293 3298
3294 3299 def commitstatus(repo, node, branch, bheads=None, opts=None):
3295 3300 if opts is None:
3296 3301 opts = {}
3297 3302 ctx = repo[node]
3298 3303 parents = ctx.parents()
3299 3304
3300 3305 if (not opts.get('amend') and bheads and node not in bheads and not
3301 3306 [x for x in parents if x.node() in bheads and x.branch() == branch]):
3302 3307 repo.ui.status(_('created new head\n'))
3303 3308 # The message is not printed for initial roots. For the other
3304 3309 # changesets, it is printed in the following situations:
3305 3310 #
3306 3311 # Par column: for the 2 parents with ...
3307 3312 # N: null or no parent
3308 3313 # B: parent is on another named branch
3309 3314 # C: parent is a regular non head changeset
3310 3315 # H: parent was a branch head of the current branch
3311 3316 # Msg column: whether we print "created new head" message
3312 3317 # In the following, it is assumed that there already exists some
3313 3318 # initial branch heads of the current branch, otherwise nothing is
3314 3319 # printed anyway.
3315 3320 #
3316 3321 # Par Msg Comment
3317 3322 # N N y additional topo root
3318 3323 #
3319 3324 # B N y additional branch root
3320 3325 # C N y additional topo head
3321 3326 # H N n usual case
3322 3327 #
3323 3328 # B B y weird additional branch root
3324 3329 # C B y branch merge
3325 3330 # H B n merge with named branch
3326 3331 #
3327 3332 # C C y additional head from merge
3328 3333 # C H n merge with a head
3329 3334 #
3330 3335 # H H n head merge: head count decreases
3331 3336
3332 3337 if not opts.get('close_branch'):
3333 3338 for r in parents:
3334 3339 if r.closesbranch() and r.branch() == branch:
3335 3340 repo.ui.status(_('reopening closed branch head %d\n') % r)
3336 3341
3337 3342 if repo.ui.debugflag:
3338 3343 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
3339 3344 elif repo.ui.verbose:
3340 3345 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
3341 3346
3342 3347 def postcommitstatus(repo, pats, opts):
3343 3348 return repo.status(match=scmutil.match(repo[None], pats, opts))
3344 3349
3345 3350 def revert(ui, repo, ctx, parents, *pats, **opts):
3346 3351 parent, p2 = parents
3347 3352 node = ctx.node()
3348 3353
3349 3354 mf = ctx.manifest()
3350 3355 if node == p2:
3351 3356 parent = p2
3352 3357
3353 3358 # need all matching names in dirstate and manifest of target rev,
3354 3359 # so have to walk both. do not print errors if files exist in one
3355 3360 # but not other. in both cases, filesets should be evaluated against
3356 3361 # workingctx to get consistent result (issue4497). this means 'set:**'
3357 3362 # cannot be used to select missing files from target rev.
3358 3363
3359 3364 # `names` is a mapping for all elements in working copy and target revision
3360 3365 # The mapping is in the form:
3361 3366 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3362 3367 names = {}
3363 3368
3364 3369 with repo.wlock():
3365 3370 ## filling of the `names` mapping
3366 3371 # walk dirstate to fill `names`
3367 3372
3368 3373 interactive = opts.get('interactive', False)
3369 3374 wctx = repo[None]
3370 3375 m = scmutil.match(wctx, pats, opts)
3371 3376
3372 3377 # we'll need this later
3373 3378 targetsubs = sorted(s for s in wctx.substate if m(s))
3374 3379
3375 3380 if not m.always():
3376 3381 matcher = matchmod.badmatch(m, lambda x, y: False)
3377 3382 for abs in wctx.walk(matcher):
3378 3383 names[abs] = m.rel(abs), m.exact(abs)
3379 3384
3380 3385 # walk target manifest to fill `names`
3381 3386
3382 3387 def badfn(path, msg):
3383 3388 if path in names:
3384 3389 return
3385 3390 if path in ctx.substate:
3386 3391 return
3387 3392 path_ = path + '/'
3388 3393 for f in names:
3389 3394 if f.startswith(path_):
3390 3395 return
3391 3396 ui.warn("%s: %s\n" % (m.rel(path), msg))
3392 3397
3393 3398 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3394 3399 if abs not in names:
3395 3400 names[abs] = m.rel(abs), m.exact(abs)
3396 3401
3397 3402 # Find status of all file in `names`.
3398 3403 m = scmutil.matchfiles(repo, names)
3399 3404
3400 3405 changes = repo.status(node1=node, match=m,
3401 3406 unknown=True, ignored=True, clean=True)
3402 3407 else:
3403 3408 changes = repo.status(node1=node, match=m)
3404 3409 for kind in changes:
3405 3410 for abs in kind:
3406 3411 names[abs] = m.rel(abs), m.exact(abs)
3407 3412
3408 3413 m = scmutil.matchfiles(repo, names)
3409 3414
3410 3415 modified = set(changes.modified)
3411 3416 added = set(changes.added)
3412 3417 removed = set(changes.removed)
3413 3418 _deleted = set(changes.deleted)
3414 3419 unknown = set(changes.unknown)
3415 3420 unknown.update(changes.ignored)
3416 3421 clean = set(changes.clean)
3417 3422 modadded = set()
3418 3423
3419 3424 # We need to account for the state of the file in the dirstate,
3420 3425 # even when we revert against something else than parent. This will
3421 3426 # slightly alter the behavior of revert (doing back up or not, delete
3422 3427 # or just forget etc).
3423 3428 if parent == node:
3424 3429 dsmodified = modified
3425 3430 dsadded = added
3426 3431 dsremoved = removed
3427 3432 # store all local modifications, useful later for rename detection
3428 3433 localchanges = dsmodified | dsadded
3429 3434 modified, added, removed = set(), set(), set()
3430 3435 else:
3431 3436 changes = repo.status(node1=parent, match=m)
3432 3437 dsmodified = set(changes.modified)
3433 3438 dsadded = set(changes.added)
3434 3439 dsremoved = set(changes.removed)
3435 3440 # store all local modifications, useful later for rename detection
3436 3441 localchanges = dsmodified | dsadded
3437 3442
3438 3443 # only take into account for removes between wc and target
3439 3444 clean |= dsremoved - removed
3440 3445 dsremoved &= removed
3441 3446 # distinct between dirstate remove and other
3442 3447 removed -= dsremoved
3443 3448
3444 3449 modadded = added & dsmodified
3445 3450 added -= modadded
3446 3451
3447 3452 # tell newly modified apart.
3448 3453 dsmodified &= modified
3449 3454 dsmodified |= modified & dsadded # dirstate added may need backup
3450 3455 modified -= dsmodified
3451 3456
3452 3457 # We need to wait for some post-processing to update this set
3453 3458 # before making the distinction. The dirstate will be used for
3454 3459 # that purpose.
3455 3460 dsadded = added
3456 3461
3457 3462 # in case of merge, files that are actually added can be reported as
3458 3463 # modified, we need to post process the result
3459 3464 if p2 != nullid:
3460 3465 mergeadd = set(dsmodified)
3461 3466 for path in dsmodified:
3462 3467 if path in mf:
3463 3468 mergeadd.remove(path)
3464 3469 dsadded |= mergeadd
3465 3470 dsmodified -= mergeadd
3466 3471
3467 3472 # if f is a rename, update `names` to also revert the source
3468 3473 cwd = repo.getcwd()
3469 3474 for f in localchanges:
3470 3475 src = repo.dirstate.copied(f)
3471 3476 # XXX should we check for rename down to target node?
3472 3477 if src and src not in names and repo.dirstate[src] == 'r':
3473 3478 dsremoved.add(src)
3474 3479 names[src] = (repo.pathto(src, cwd), True)
3475 3480
3476 3481 # determine the exact nature of the deleted changesets
3477 3482 deladded = set(_deleted)
3478 3483 for path in _deleted:
3479 3484 if path in mf:
3480 3485 deladded.remove(path)
3481 3486 deleted = _deleted - deladded
3482 3487
3483 3488 # distinguish between file to forget and the other
3484 3489 added = set()
3485 3490 for abs in dsadded:
3486 3491 if repo.dirstate[abs] != 'a':
3487 3492 added.add(abs)
3488 3493 dsadded -= added
3489 3494
3490 3495 for abs in deladded:
3491 3496 if repo.dirstate[abs] == 'a':
3492 3497 dsadded.add(abs)
3493 3498 deladded -= dsadded
3494 3499
3495 3500 # For files marked as removed, we check if an unknown file is present at
3496 3501 # the same path. If a such file exists it may need to be backed up.
3497 3502 # Making the distinction at this stage helps have simpler backup
3498 3503 # logic.
3499 3504 removunk = set()
3500 3505 for abs in removed:
3501 3506 target = repo.wjoin(abs)
3502 3507 if os.path.lexists(target):
3503 3508 removunk.add(abs)
3504 3509 removed -= removunk
3505 3510
3506 3511 dsremovunk = set()
3507 3512 for abs in dsremoved:
3508 3513 target = repo.wjoin(abs)
3509 3514 if os.path.lexists(target):
3510 3515 dsremovunk.add(abs)
3511 3516 dsremoved -= dsremovunk
3512 3517
3513 3518 # action to be actually performed by revert
3514 3519 # (<list of file>, message>) tuple
3515 3520 actions = {'revert': ([], _('reverting %s\n')),
3516 3521 'add': ([], _('adding %s\n')),
3517 3522 'remove': ([], _('removing %s\n')),
3518 3523 'drop': ([], _('removing %s\n')),
3519 3524 'forget': ([], _('forgetting %s\n')),
3520 3525 'undelete': ([], _('undeleting %s\n')),
3521 3526 'noop': (None, _('no changes needed to %s\n')),
3522 3527 'unknown': (None, _('file not managed: %s\n')),
3523 3528 }
3524 3529
3525 3530 # "constant" that convey the backup strategy.
3526 3531 # All set to `discard` if `no-backup` is set do avoid checking
3527 3532 # no_backup lower in the code.
3528 3533 # These values are ordered for comparison purposes
3529 3534 backupinteractive = 3 # do backup if interactively modified
3530 3535 backup = 2 # unconditionally do backup
3531 3536 check = 1 # check if the existing file differs from target
3532 3537 discard = 0 # never do backup
3533 3538 if opts.get('no_backup'):
3534 3539 backupinteractive = backup = check = discard
3535 3540 if interactive:
3536 3541 dsmodifiedbackup = backupinteractive
3537 3542 else:
3538 3543 dsmodifiedbackup = backup
3539 3544 tobackup = set()
3540 3545
3541 3546 backupanddel = actions['remove']
3542 3547 if not opts.get('no_backup'):
3543 3548 backupanddel = actions['drop']
3544 3549
3545 3550 disptable = (
3546 3551 # dispatch table:
3547 3552 # file state
3548 3553 # action
3549 3554 # make backup
3550 3555
3551 3556 ## Sets that results that will change file on disk
3552 3557 # Modified compared to target, no local change
3553 3558 (modified, actions['revert'], discard),
3554 3559 # Modified compared to target, but local file is deleted
3555 3560 (deleted, actions['revert'], discard),
3556 3561 # Modified compared to target, local change
3557 3562 (dsmodified, actions['revert'], dsmodifiedbackup),
3558 3563 # Added since target
3559 3564 (added, actions['remove'], discard),
3560 3565 # Added in working directory
3561 3566 (dsadded, actions['forget'], discard),
3562 3567 # Added since target, have local modification
3563 3568 (modadded, backupanddel, backup),
3564 3569 # Added since target but file is missing in working directory
3565 3570 (deladded, actions['drop'], discard),
3566 3571 # Removed since target, before working copy parent
3567 3572 (removed, actions['add'], discard),
3568 3573 # Same as `removed` but an unknown file exists at the same path
3569 3574 (removunk, actions['add'], check),
3570 3575 # Removed since targe, marked as such in working copy parent
3571 3576 (dsremoved, actions['undelete'], discard),
3572 3577 # Same as `dsremoved` but an unknown file exists at the same path
3573 3578 (dsremovunk, actions['undelete'], check),
3574 3579 ## the following sets does not result in any file changes
3575 3580 # File with no modification
3576 3581 (clean, actions['noop'], discard),
3577 3582 # Existing file, not tracked anywhere
3578 3583 (unknown, actions['unknown'], discard),
3579 3584 )
3580 3585
3581 3586 for abs, (rel, exact) in sorted(names.items()):
3582 3587 # target file to be touch on disk (relative to cwd)
3583 3588 target = repo.wjoin(abs)
3584 3589 # search the entry in the dispatch table.
3585 3590 # if the file is in any of these sets, it was touched in the working
3586 3591 # directory parent and we are sure it needs to be reverted.
3587 3592 for table, (xlist, msg), dobackup in disptable:
3588 3593 if abs not in table:
3589 3594 continue
3590 3595 if xlist is not None:
3591 3596 xlist.append(abs)
3592 3597 if dobackup:
3593 3598 # If in interactive mode, don't automatically create
3594 3599 # .orig files (issue4793)
3595 3600 if dobackup == backupinteractive:
3596 3601 tobackup.add(abs)
3597 3602 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3598 3603 bakname = scmutil.origpath(ui, repo, rel)
3599 3604 ui.note(_('saving current version of %s as %s\n') %
3600 3605 (rel, bakname))
3601 3606 if not opts.get('dry_run'):
3602 3607 if interactive:
3603 3608 util.copyfile(target, bakname)
3604 3609 else:
3605 3610 util.rename(target, bakname)
3606 3611 if ui.verbose or not exact:
3607 3612 if not isinstance(msg, basestring):
3608 3613 msg = msg(abs)
3609 3614 ui.status(msg % rel)
3610 3615 elif exact:
3611 3616 ui.warn(msg % rel)
3612 3617 break
3613 3618
3614 3619 if not opts.get('dry_run'):
3615 3620 needdata = ('revert', 'add', 'undelete')
3616 3621 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3617 3622 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3618 3623
3619 3624 if targetsubs:
3620 3625 # Revert the subrepos on the revert list
3621 3626 for sub in targetsubs:
3622 3627 try:
3623 3628 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3624 3629 except KeyError:
3625 3630 raise error.Abort("subrepository '%s' does not exist in %s!"
3626 3631 % (sub, short(ctx.node())))
3627 3632
3628 3633 def _revertprefetch(repo, ctx, *files):
3629 3634 """Let extension changing the storage layer prefetch content"""
3630 3635
3631 3636 def _performrevert(repo, parents, ctx, actions, interactive=False,
3632 3637 tobackup=None):
3633 3638 """function that actually perform all the actions computed for revert
3634 3639
3635 3640 This is an independent function to let extension to plug in and react to
3636 3641 the imminent revert.
3637 3642
3638 3643 Make sure you have the working directory locked when calling this function.
3639 3644 """
3640 3645 parent, p2 = parents
3641 3646 node = ctx.node()
3642 3647 excluded_files = []
3643 3648 matcher_opts = {"exclude": excluded_files}
3644 3649
3645 3650 def checkout(f):
3646 3651 fc = ctx[f]
3647 3652 repo.wwrite(f, fc.data(), fc.flags())
3648 3653
3649 3654 def doremove(f):
3650 3655 try:
3651 3656 repo.wvfs.unlinkpath(f)
3652 3657 except OSError:
3653 3658 pass
3654 3659 repo.dirstate.remove(f)
3655 3660
3656 3661 audit_path = pathutil.pathauditor(repo.root, cached=True)
3657 3662 for f in actions['forget'][0]:
3658 3663 if interactive:
3659 3664 choice = repo.ui.promptchoice(
3660 3665 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3661 3666 if choice == 0:
3662 3667 repo.dirstate.drop(f)
3663 3668 else:
3664 3669 excluded_files.append(repo.wjoin(f))
3665 3670 else:
3666 3671 repo.dirstate.drop(f)
3667 3672 for f in actions['remove'][0]:
3668 3673 audit_path(f)
3669 3674 if interactive:
3670 3675 choice = repo.ui.promptchoice(
3671 3676 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3672 3677 if choice == 0:
3673 3678 doremove(f)
3674 3679 else:
3675 3680 excluded_files.append(repo.wjoin(f))
3676 3681 else:
3677 3682 doremove(f)
3678 3683 for f in actions['drop'][0]:
3679 3684 audit_path(f)
3680 3685 repo.dirstate.remove(f)
3681 3686
3682 3687 normal = None
3683 3688 if node == parent:
3684 3689 # We're reverting to our parent. If possible, we'd like status
3685 3690 # to report the file as clean. We have to use normallookup for
3686 3691 # merges to avoid losing information about merged/dirty files.
3687 3692 if p2 != nullid:
3688 3693 normal = repo.dirstate.normallookup
3689 3694 else:
3690 3695 normal = repo.dirstate.normal
3691 3696
3692 3697 newlyaddedandmodifiedfiles = set()
3693 3698 if interactive:
3694 3699 # Prompt the user for changes to revert
3695 3700 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3696 3701 m = scmutil.match(ctx, torevert, matcher_opts)
3697 3702 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3698 3703 diffopts.nodates = True
3699 3704 diffopts.git = True
3700 3705 operation = 'discard'
3701 3706 reversehunks = True
3702 3707 if node != parent:
3703 3708 operation = 'revert'
3704 3709 reversehunks = repo.ui.configbool('experimental',
3705 3710 'revertalternateinteractivemode')
3706 3711 if reversehunks:
3707 3712 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3708 3713 else:
3709 3714 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3710 3715 originalchunks = patch.parsepatch(diff)
3711 3716
3712 3717 try:
3713 3718
3714 3719 chunks, opts = recordfilter(repo.ui, originalchunks,
3715 3720 operation=operation)
3716 3721 if reversehunks:
3717 3722 chunks = patch.reversehunks(chunks)
3718 3723
3719 3724 except error.PatchError as err:
3720 3725 raise error.Abort(_('error parsing patch: %s') % err)
3721 3726
3722 3727 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3723 3728 if tobackup is None:
3724 3729 tobackup = set()
3725 3730 # Apply changes
3726 3731 fp = stringio()
3727 3732 for c in chunks:
3728 3733 # Create a backup file only if this hunk should be backed up
3729 3734 if ishunk(c) and c.header.filename() in tobackup:
3730 3735 abs = c.header.filename()
3731 3736 target = repo.wjoin(abs)
3732 3737 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3733 3738 util.copyfile(target, bakname)
3734 3739 tobackup.remove(abs)
3735 3740 c.write(fp)
3736 3741 dopatch = fp.tell()
3737 3742 fp.seek(0)
3738 3743 if dopatch:
3739 3744 try:
3740 3745 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3741 3746 except error.PatchError as err:
3742 3747 raise error.Abort(str(err))
3743 3748 del fp
3744 3749 else:
3745 3750 for f in actions['revert'][0]:
3746 3751 checkout(f)
3747 3752 if normal:
3748 3753 normal(f)
3749 3754
3750 3755 for f in actions['add'][0]:
3751 3756 # Don't checkout modified files, they are already created by the diff
3752 3757 if f not in newlyaddedandmodifiedfiles:
3753 3758 checkout(f)
3754 3759 repo.dirstate.add(f)
3755 3760
3756 3761 normal = repo.dirstate.normallookup
3757 3762 if node == parent and p2 == nullid:
3758 3763 normal = repo.dirstate.normal
3759 3764 for f in actions['undelete'][0]:
3760 3765 checkout(f)
3761 3766 normal(f)
3762 3767
3763 3768 copied = copies.pathcopies(repo[parent], ctx)
3764 3769
3765 3770 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3766 3771 if f in copied:
3767 3772 repo.dirstate.copy(copied[f], f)
3768 3773
3769 3774 class command(registrar.command):
3770 3775 def _doregister(self, func, name, *args, **kwargs):
3771 3776 func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
3772 3777 return super(command, self)._doregister(func, name, *args, **kwargs)
3773 3778
3774 3779 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3775 3780 # commands.outgoing. "missing" is "missing" of the result of
3776 3781 # "findcommonoutgoing()"
3777 3782 outgoinghooks = util.hooks()
3778 3783
3779 3784 # a list of (ui, repo) functions called by commands.summary
3780 3785 summaryhooks = util.hooks()
3781 3786
3782 3787 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3783 3788 #
3784 3789 # functions should return tuple of booleans below, if 'changes' is None:
3785 3790 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3786 3791 #
3787 3792 # otherwise, 'changes' is a tuple of tuples below:
3788 3793 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3789 3794 # - (desturl, destbranch, destpeer, outgoing)
3790 3795 summaryremotehooks = util.hooks()
3791 3796
3792 3797 # A list of state files kept by multistep operations like graft.
3793 3798 # Since graft cannot be aborted, it is considered 'clearable' by update.
3794 3799 # note: bisect is intentionally excluded
3795 3800 # (state file, clearable, allowcommit, error, hint)
3796 3801 unfinishedstates = [
3797 3802 ('graftstate', True, False, _('graft in progress'),
3798 3803 _("use 'hg graft --continue' or 'hg update' to abort")),
3799 3804 ('updatestate', True, False, _('last update was interrupted'),
3800 3805 _("use 'hg update' to get a consistent checkout"))
3801 3806 ]
3802 3807
3803 3808 def checkunfinished(repo, commit=False):
3804 3809 '''Look for an unfinished multistep operation, like graft, and abort
3805 3810 if found. It's probably good to check this right before
3806 3811 bailifchanged().
3807 3812 '''
3808 3813 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3809 3814 if commit and allowcommit:
3810 3815 continue
3811 3816 if repo.vfs.exists(f):
3812 3817 raise error.Abort(msg, hint=hint)
3813 3818
3814 3819 def clearunfinished(repo):
3815 3820 '''Check for unfinished operations (as above), and clear the ones
3816 3821 that are clearable.
3817 3822 '''
3818 3823 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3819 3824 if not clearable and repo.vfs.exists(f):
3820 3825 raise error.Abort(msg, hint=hint)
3821 3826 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3822 3827 if clearable and repo.vfs.exists(f):
3823 3828 util.unlink(repo.vfs.join(f))
3824 3829
3825 3830 afterresolvedstates = [
3826 3831 ('graftstate',
3827 3832 _('hg graft --continue')),
3828 3833 ]
3829 3834
3830 3835 def howtocontinue(repo):
3831 3836 '''Check for an unfinished operation and return the command to finish
3832 3837 it.
3833 3838
3834 3839 afterresolvedstates tuples define a .hg/{file} and the corresponding
3835 3840 command needed to finish it.
3836 3841
3837 3842 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3838 3843 a boolean.
3839 3844 '''
3840 3845 contmsg = _("continue: %s")
3841 3846 for f, msg in afterresolvedstates:
3842 3847 if repo.vfs.exists(f):
3843 3848 return contmsg % msg, True
3844 3849 if repo[None].dirty(missing=True, merge=False, branch=False):
3845 3850 return contmsg % _("hg commit"), False
3846 3851 return None, None
3847 3852
3848 3853 def checkafterresolved(repo):
3849 3854 '''Inform the user about the next action after completing hg resolve
3850 3855
3851 3856 If there's a matching afterresolvedstates, howtocontinue will yield
3852 3857 repo.ui.warn as the reporter.
3853 3858
3854 3859 Otherwise, it will yield repo.ui.note.
3855 3860 '''
3856 3861 msg, warning = howtocontinue(repo)
3857 3862 if msg is not None:
3858 3863 if warning:
3859 3864 repo.ui.warn("%s\n" % msg)
3860 3865 else:
3861 3866 repo.ui.note("%s\n" % msg)
3862 3867
3863 3868 def wrongtooltocontinue(repo, task):
3864 3869 '''Raise an abort suggesting how to properly continue if there is an
3865 3870 active task.
3866 3871
3867 3872 Uses howtocontinue() to find the active task.
3868 3873
3869 3874 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3870 3875 a hint.
3871 3876 '''
3872 3877 after = howtocontinue(repo)
3873 3878 hint = None
3874 3879 if after[1]:
3875 3880 hint = after[0]
3876 3881 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,2798 +1,2805 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import, print_function
10 10
11 11 import collections
12 12 import copy
13 13 import email
14 14 import errno
15 15 import hashlib
16 16 import os
17 17 import posixpath
18 18 import re
19 19 import shutil
20 20 import tempfile
21 21 import zlib
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 hex,
26 26 short,
27 27 )
28 28 from . import (
29 29 copies,
30 30 encoding,
31 31 error,
32 32 mail,
33 33 mdiff,
34 34 pathutil,
35 35 policy,
36 36 pycompat,
37 37 scmutil,
38 38 similar,
39 39 util,
40 40 vfs as vfsmod,
41 41 )
42 42
43 43 diffhelpers = policy.importmod(r'diffhelpers')
44 44 stringio = util.stringio
45 45
46 46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48 48
49 49 PatchError = error.PatchError
50 50
51 51 # public functions
52 52
53 53 def split(stream):
54 54 '''return an iterator of individual patches from a stream'''
55 55 def isheader(line, inheader):
56 56 if inheader and line[0] in (' ', '\t'):
57 57 # continuation
58 58 return True
59 59 if line[0] in (' ', '-', '+'):
60 60 # diff line - don't check for header pattern in there
61 61 return False
62 62 l = line.split(': ', 1)
63 63 return len(l) == 2 and ' ' not in l[0]
64 64
65 65 def chunk(lines):
66 66 return stringio(''.join(lines))
67 67
68 68 def hgsplit(stream, cur):
69 69 inheader = True
70 70
71 71 for line in stream:
72 72 if not line.strip():
73 73 inheader = False
74 74 if not inheader and line.startswith('# HG changeset patch'):
75 75 yield chunk(cur)
76 76 cur = []
77 77 inheader = True
78 78
79 79 cur.append(line)
80 80
81 81 if cur:
82 82 yield chunk(cur)
83 83
84 84 def mboxsplit(stream, cur):
85 85 for line in stream:
86 86 if line.startswith('From '):
87 87 for c in split(chunk(cur[1:])):
88 88 yield c
89 89 cur = []
90 90
91 91 cur.append(line)
92 92
93 93 if cur:
94 94 for c in split(chunk(cur[1:])):
95 95 yield c
96 96
97 97 def mimesplit(stream, cur):
98 98 def msgfp(m):
99 99 fp = stringio()
100 100 g = email.Generator.Generator(fp, mangle_from_=False)
101 101 g.flatten(m)
102 102 fp.seek(0)
103 103 return fp
104 104
105 105 for line in stream:
106 106 cur.append(line)
107 107 c = chunk(cur)
108 108
109 109 m = email.Parser.Parser().parse(c)
110 110 if not m.is_multipart():
111 111 yield msgfp(m)
112 112 else:
113 113 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
114 114 for part in m.walk():
115 115 ct = part.get_content_type()
116 116 if ct not in ok_types:
117 117 continue
118 118 yield msgfp(part)
119 119
120 120 def headersplit(stream, cur):
121 121 inheader = False
122 122
123 123 for line in stream:
124 124 if not inheader and isheader(line, inheader):
125 125 yield chunk(cur)
126 126 cur = []
127 127 inheader = True
128 128 if inheader and not isheader(line, inheader):
129 129 inheader = False
130 130
131 131 cur.append(line)
132 132
133 133 if cur:
134 134 yield chunk(cur)
135 135
136 136 def remainder(cur):
137 137 yield chunk(cur)
138 138
139 139 class fiter(object):
140 140 def __init__(self, fp):
141 141 self.fp = fp
142 142
143 143 def __iter__(self):
144 144 return self
145 145
146 146 def next(self):
147 147 l = self.fp.readline()
148 148 if not l:
149 149 raise StopIteration
150 150 return l
151 151
152 152 inheader = False
153 153 cur = []
154 154
155 155 mimeheaders = ['content-type']
156 156
157 157 if not util.safehasattr(stream, 'next'):
158 158 # http responses, for example, have readline but not next
159 159 stream = fiter(stream)
160 160
161 161 for line in stream:
162 162 cur.append(line)
163 163 if line.startswith('# HG changeset patch'):
164 164 return hgsplit(stream, cur)
165 165 elif line.startswith('From '):
166 166 return mboxsplit(stream, cur)
167 167 elif isheader(line, inheader):
168 168 inheader = True
169 169 if line.split(':', 1)[0].lower() in mimeheaders:
170 170 # let email parser handle this
171 171 return mimesplit(stream, cur)
172 172 elif line.startswith('--- ') and inheader:
173 173 # No evil headers seen by diff start, split by hand
174 174 return headersplit(stream, cur)
175 175 # Not enough info, keep reading
176 176
177 177 # if we are here, we have a very plain patch
178 178 return remainder(cur)
179 179
180 180 ## Some facility for extensible patch parsing:
181 181 # list of pairs ("header to match", "data key")
182 182 patchheadermap = [('Date', 'date'),
183 183 ('Branch', 'branch'),
184 184 ('Node ID', 'nodeid'),
185 185 ]
186 186
187 187 def extract(ui, fileobj):
188 188 '''extract patch from data read from fileobj.
189 189
190 190 patch can be a normal patch or contained in an email message.
191 191
192 192 return a dictionary. Standard keys are:
193 193 - filename,
194 194 - message,
195 195 - user,
196 196 - date,
197 197 - branch,
198 198 - node,
199 199 - p1,
200 200 - p2.
201 201 Any item can be missing from the dictionary. If filename is missing,
202 202 fileobj did not contain a patch. Caller must unlink filename when done.'''
203 203
204 204 # attempt to detect the start of a patch
205 205 # (this heuristic is borrowed from quilt)
206 206 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
207 207 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
208 208 br'---[ \t].*?^\+\+\+[ \t]|'
209 209 br'\*\*\*[ \t].*?^---[ \t])',
210 210 re.MULTILINE | re.DOTALL)
211 211
212 212 data = {}
213 213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
214 214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
215 215 try:
216 216 msg = email.Parser.Parser().parse(fileobj)
217 217
218 218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
219 219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
220 220 if not subject and not data['user']:
221 221 # Not an email, restore parsed headers if any
222 222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
223 223
224 224 # should try to parse msg['Date']
225 225 parents = []
226 226
227 227 if subject:
228 228 if subject.startswith('[PATCH'):
229 229 pend = subject.find(']')
230 230 if pend >= 0:
231 231 subject = subject[pend + 1:].lstrip()
232 232 subject = re.sub(br'\n[ \t]+', ' ', subject)
233 233 ui.debug('Subject: %s\n' % subject)
234 234 if data['user']:
235 235 ui.debug('From: %s\n' % data['user'])
236 236 diffs_seen = 0
237 237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
238 238 message = ''
239 239 for part in msg.walk():
240 240 content_type = part.get_content_type()
241 241 ui.debug('Content-Type: %s\n' % content_type)
242 242 if content_type not in ok_types:
243 243 continue
244 244 payload = part.get_payload(decode=True)
245 245 m = diffre.search(payload)
246 246 if m:
247 247 hgpatch = False
248 248 hgpatchheader = False
249 249 ignoretext = False
250 250
251 251 ui.debug('found patch at byte %d\n' % m.start(0))
252 252 diffs_seen += 1
253 253 cfp = stringio()
254 254 for line in payload[:m.start(0)].splitlines():
255 255 if line.startswith('# HG changeset patch') and not hgpatch:
256 256 ui.debug('patch generated by hg export\n')
257 257 hgpatch = True
258 258 hgpatchheader = True
259 259 # drop earlier commit message content
260 260 cfp.seek(0)
261 261 cfp.truncate()
262 262 subject = None
263 263 elif hgpatchheader:
264 264 if line.startswith('# User '):
265 265 data['user'] = line[7:]
266 266 ui.debug('From: %s\n' % data['user'])
267 267 elif line.startswith("# Parent "):
268 268 parents.append(line[9:].lstrip())
269 269 elif line.startswith("# "):
270 270 for header, key in patchheadermap:
271 271 prefix = '# %s ' % header
272 272 if line.startswith(prefix):
273 273 data[key] = line[len(prefix):]
274 274 else:
275 275 hgpatchheader = False
276 276 elif line == '---':
277 277 ignoretext = True
278 278 if not hgpatchheader and not ignoretext:
279 279 cfp.write(line)
280 280 cfp.write('\n')
281 281 message = cfp.getvalue()
282 282 if tmpfp:
283 283 tmpfp.write(payload)
284 284 if not payload.endswith('\n'):
285 285 tmpfp.write('\n')
286 286 elif not diffs_seen and message and content_type == 'text/plain':
287 287 message += '\n' + payload
288 288 except: # re-raises
289 289 tmpfp.close()
290 290 os.unlink(tmpname)
291 291 raise
292 292
293 293 if subject and not message.startswith(subject):
294 294 message = '%s\n%s' % (subject, message)
295 295 data['message'] = message
296 296 tmpfp.close()
297 297 if parents:
298 298 data['p1'] = parents.pop(0)
299 299 if parents:
300 300 data['p2'] = parents.pop(0)
301 301
302 302 if diffs_seen:
303 303 data['filename'] = tmpname
304 304 else:
305 305 os.unlink(tmpname)
306 306 return data
307 307
308 308 class patchmeta(object):
309 309 """Patched file metadata
310 310
311 311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
312 312 or COPY. 'path' is patched file path. 'oldpath' is set to the
313 313 origin file when 'op' is either COPY or RENAME, None otherwise. If
314 314 file mode is changed, 'mode' is a tuple (islink, isexec) where
315 315 'islink' is True if the file is a symlink and 'isexec' is True if
316 316 the file is executable. Otherwise, 'mode' is None.
317 317 """
318 318 def __init__(self, path):
319 319 self.path = path
320 320 self.oldpath = None
321 321 self.mode = None
322 322 self.op = 'MODIFY'
323 323 self.binary = False
324 324
325 325 def setmode(self, mode):
326 326 islink = mode & 0o20000
327 327 isexec = mode & 0o100
328 328 self.mode = (islink, isexec)
329 329
330 330 def copy(self):
331 331 other = patchmeta(self.path)
332 332 other.oldpath = self.oldpath
333 333 other.mode = self.mode
334 334 other.op = self.op
335 335 other.binary = self.binary
336 336 return other
337 337
338 338 def _ispatchinga(self, afile):
339 339 if afile == '/dev/null':
340 340 return self.op == 'ADD'
341 341 return afile == 'a/' + (self.oldpath or self.path)
342 342
343 343 def _ispatchingb(self, bfile):
344 344 if bfile == '/dev/null':
345 345 return self.op == 'DELETE'
346 346 return bfile == 'b/' + self.path
347 347
348 348 def ispatching(self, afile, bfile):
349 349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
350 350
351 351 def __repr__(self):
352 352 return "<patchmeta %s %r>" % (self.op, self.path)
353 353
354 354 def readgitpatch(lr):
355 355 """extract git-style metadata about patches from <patchname>"""
356 356
357 357 # Filter patch for git information
358 358 gp = None
359 359 gitpatches = []
360 360 for line in lr:
361 361 line = line.rstrip(' \r\n')
362 362 if line.startswith('diff --git a/'):
363 363 m = gitre.match(line)
364 364 if m:
365 365 if gp:
366 366 gitpatches.append(gp)
367 367 dst = m.group(2)
368 368 gp = patchmeta(dst)
369 369 elif gp:
370 370 if line.startswith('--- '):
371 371 gitpatches.append(gp)
372 372 gp = None
373 373 continue
374 374 if line.startswith('rename from '):
375 375 gp.op = 'RENAME'
376 376 gp.oldpath = line[12:]
377 377 elif line.startswith('rename to '):
378 378 gp.path = line[10:]
379 379 elif line.startswith('copy from '):
380 380 gp.op = 'COPY'
381 381 gp.oldpath = line[10:]
382 382 elif line.startswith('copy to '):
383 383 gp.path = line[8:]
384 384 elif line.startswith('deleted file'):
385 385 gp.op = 'DELETE'
386 386 elif line.startswith('new file mode '):
387 387 gp.op = 'ADD'
388 388 gp.setmode(int(line[-6:], 8))
389 389 elif line.startswith('new mode '):
390 390 gp.setmode(int(line[-6:], 8))
391 391 elif line.startswith('GIT binary patch'):
392 392 gp.binary = True
393 393 if gp:
394 394 gitpatches.append(gp)
395 395
396 396 return gitpatches
397 397
398 398 class linereader(object):
399 399 # simple class to allow pushing lines back into the input stream
400 400 def __init__(self, fp):
401 401 self.fp = fp
402 402 self.buf = []
403 403
404 404 def push(self, line):
405 405 if line is not None:
406 406 self.buf.append(line)
407 407
408 408 def readline(self):
409 409 if self.buf:
410 410 l = self.buf[0]
411 411 del self.buf[0]
412 412 return l
413 413 return self.fp.readline()
414 414
415 415 def __iter__(self):
416 416 return iter(self.readline, '')
417 417
418 418 class abstractbackend(object):
419 419 def __init__(self, ui):
420 420 self.ui = ui
421 421
422 422 def getfile(self, fname):
423 423 """Return target file data and flags as a (data, (islink,
424 424 isexec)) tuple. Data is None if file is missing/deleted.
425 425 """
426 426 raise NotImplementedError
427 427
428 428 def setfile(self, fname, data, mode, copysource):
429 429 """Write data to target file fname and set its mode. mode is a
430 430 (islink, isexec) tuple. If data is None, the file content should
431 431 be left unchanged. If the file is modified after being copied,
432 432 copysource is set to the original file name.
433 433 """
434 434 raise NotImplementedError
435 435
436 436 def unlink(self, fname):
437 437 """Unlink target file."""
438 438 raise NotImplementedError
439 439
440 440 def writerej(self, fname, failed, total, lines):
441 441 """Write rejected lines for fname. total is the number of hunks
442 442 which failed to apply and total the total number of hunks for this
443 443 files.
444 444 """
445 445
446 446 def exists(self, fname):
447 447 raise NotImplementedError
448 448
449 449 def close(self):
450 450 raise NotImplementedError
451 451
452 452 class fsbackend(abstractbackend):
453 453 def __init__(self, ui, basedir):
454 454 super(fsbackend, self).__init__(ui)
455 455 self.opener = vfsmod.vfs(basedir)
456 456
457 457 def getfile(self, fname):
458 458 if self.opener.islink(fname):
459 459 return (self.opener.readlink(fname), (True, False))
460 460
461 461 isexec = False
462 462 try:
463 463 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
464 464 except OSError as e:
465 465 if e.errno != errno.ENOENT:
466 466 raise
467 467 try:
468 468 return (self.opener.read(fname), (False, isexec))
469 469 except IOError as e:
470 470 if e.errno != errno.ENOENT:
471 471 raise
472 472 return None, None
473 473
474 474 def setfile(self, fname, data, mode, copysource):
475 475 islink, isexec = mode
476 476 if data is None:
477 477 self.opener.setflags(fname, islink, isexec)
478 478 return
479 479 if islink:
480 480 self.opener.symlink(data, fname)
481 481 else:
482 482 self.opener.write(fname, data)
483 483 if isexec:
484 484 self.opener.setflags(fname, False, True)
485 485
486 486 def unlink(self, fname):
487 487 self.opener.unlinkpath(fname, ignoremissing=True)
488 488
489 489 def writerej(self, fname, failed, total, lines):
490 490 fname = fname + ".rej"
491 491 self.ui.warn(
492 492 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
493 493 (failed, total, fname))
494 494 fp = self.opener(fname, 'w')
495 495 fp.writelines(lines)
496 496 fp.close()
497 497
498 498 def exists(self, fname):
499 499 return self.opener.lexists(fname)
500 500
501 501 class workingbackend(fsbackend):
502 502 def __init__(self, ui, repo, similarity):
503 503 super(workingbackend, self).__init__(ui, repo.root)
504 504 self.repo = repo
505 505 self.similarity = similarity
506 506 self.removed = set()
507 507 self.changed = set()
508 508 self.copied = []
509 509
510 510 def _checkknown(self, fname):
511 511 if self.repo.dirstate[fname] == '?' and self.exists(fname):
512 512 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
513 513
514 514 def setfile(self, fname, data, mode, copysource):
515 515 self._checkknown(fname)
516 516 super(workingbackend, self).setfile(fname, data, mode, copysource)
517 517 if copysource is not None:
518 518 self.copied.append((copysource, fname))
519 519 self.changed.add(fname)
520 520
521 521 def unlink(self, fname):
522 522 self._checkknown(fname)
523 523 super(workingbackend, self).unlink(fname)
524 524 self.removed.add(fname)
525 525 self.changed.add(fname)
526 526
527 527 def close(self):
528 528 wctx = self.repo[None]
529 529 changed = set(self.changed)
530 530 for src, dst in self.copied:
531 531 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
532 532 if self.removed:
533 533 wctx.forget(sorted(self.removed))
534 534 for f in self.removed:
535 535 if f not in self.repo.dirstate:
536 536 # File was deleted and no longer belongs to the
537 537 # dirstate, it was probably marked added then
538 538 # deleted, and should not be considered by
539 539 # marktouched().
540 540 changed.discard(f)
541 541 if changed:
542 542 scmutil.marktouched(self.repo, changed, self.similarity)
543 543 return sorted(self.changed)
544 544
545 545 class filestore(object):
546 546 def __init__(self, maxsize=None):
547 547 self.opener = None
548 548 self.files = {}
549 549 self.created = 0
550 550 self.maxsize = maxsize
551 551 if self.maxsize is None:
552 552 self.maxsize = 4*(2**20)
553 553 self.size = 0
554 554 self.data = {}
555 555
556 556 def setfile(self, fname, data, mode, copied=None):
557 557 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
558 558 self.data[fname] = (data, mode, copied)
559 559 self.size += len(data)
560 560 else:
561 561 if self.opener is None:
562 562 root = tempfile.mkdtemp(prefix='hg-patch-')
563 563 self.opener = vfsmod.vfs(root)
564 564 # Avoid filename issues with these simple names
565 565 fn = str(self.created)
566 566 self.opener.write(fn, data)
567 567 self.created += 1
568 568 self.files[fname] = (fn, mode, copied)
569 569
570 570 def getfile(self, fname):
571 571 if fname in self.data:
572 572 return self.data[fname]
573 573 if not self.opener or fname not in self.files:
574 574 return None, None, None
575 575 fn, mode, copied = self.files[fname]
576 576 return self.opener.read(fn), mode, copied
577 577
578 578 def close(self):
579 579 if self.opener:
580 580 shutil.rmtree(self.opener.base)
581 581
582 582 class repobackend(abstractbackend):
583 583 def __init__(self, ui, repo, ctx, store):
584 584 super(repobackend, self).__init__(ui)
585 585 self.repo = repo
586 586 self.ctx = ctx
587 587 self.store = store
588 588 self.changed = set()
589 589 self.removed = set()
590 590 self.copied = {}
591 591
592 592 def _checkknown(self, fname):
593 593 if fname not in self.ctx:
594 594 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
595 595
596 596 def getfile(self, fname):
597 597 try:
598 598 fctx = self.ctx[fname]
599 599 except error.LookupError:
600 600 return None, None
601 601 flags = fctx.flags()
602 602 return fctx.data(), ('l' in flags, 'x' in flags)
603 603
604 604 def setfile(self, fname, data, mode, copysource):
605 605 if copysource:
606 606 self._checkknown(copysource)
607 607 if data is None:
608 608 data = self.ctx[fname].data()
609 609 self.store.setfile(fname, data, mode, copysource)
610 610 self.changed.add(fname)
611 611 if copysource:
612 612 self.copied[fname] = copysource
613 613
614 614 def unlink(self, fname):
615 615 self._checkknown(fname)
616 616 self.removed.add(fname)
617 617
618 618 def exists(self, fname):
619 619 return fname in self.ctx
620 620
621 621 def close(self):
622 622 return self.changed | self.removed
623 623
624 624 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
625 625 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
626 626 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
627 627 eolmodes = ['strict', 'crlf', 'lf', 'auto']
628 628
629 629 class patchfile(object):
630 630 def __init__(self, ui, gp, backend, store, eolmode='strict'):
631 631 self.fname = gp.path
632 632 self.eolmode = eolmode
633 633 self.eol = None
634 634 self.backend = backend
635 635 self.ui = ui
636 636 self.lines = []
637 637 self.exists = False
638 638 self.missing = True
639 639 self.mode = gp.mode
640 640 self.copysource = gp.oldpath
641 641 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
642 642 self.remove = gp.op == 'DELETE'
643 643 if self.copysource is None:
644 644 data, mode = backend.getfile(self.fname)
645 645 else:
646 646 data, mode = store.getfile(self.copysource)[:2]
647 647 if data is not None:
648 648 self.exists = self.copysource is None or backend.exists(self.fname)
649 649 self.missing = False
650 650 if data:
651 651 self.lines = mdiff.splitnewlines(data)
652 652 if self.mode is None:
653 653 self.mode = mode
654 654 if self.lines:
655 655 # Normalize line endings
656 656 if self.lines[0].endswith('\r\n'):
657 657 self.eol = '\r\n'
658 658 elif self.lines[0].endswith('\n'):
659 659 self.eol = '\n'
660 660 if eolmode != 'strict':
661 661 nlines = []
662 662 for l in self.lines:
663 663 if l.endswith('\r\n'):
664 664 l = l[:-2] + '\n'
665 665 nlines.append(l)
666 666 self.lines = nlines
667 667 else:
668 668 if self.create:
669 669 self.missing = False
670 670 if self.mode is None:
671 671 self.mode = (False, False)
672 672 if self.missing:
673 673 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
674 674 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
675 675 "current directory)\n"))
676 676
677 677 self.hash = {}
678 678 self.dirty = 0
679 679 self.offset = 0
680 680 self.skew = 0
681 681 self.rej = []
682 682 self.fileprinted = False
683 683 self.printfile(False)
684 684 self.hunks = 0
685 685
686 686 def writelines(self, fname, lines, mode):
687 687 if self.eolmode == 'auto':
688 688 eol = self.eol
689 689 elif self.eolmode == 'crlf':
690 690 eol = '\r\n'
691 691 else:
692 692 eol = '\n'
693 693
694 694 if self.eolmode != 'strict' and eol and eol != '\n':
695 695 rawlines = []
696 696 for l in lines:
697 697 if l and l[-1] == '\n':
698 698 l = l[:-1] + eol
699 699 rawlines.append(l)
700 700 lines = rawlines
701 701
702 702 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
703 703
704 704 def printfile(self, warn):
705 705 if self.fileprinted:
706 706 return
707 707 if warn or self.ui.verbose:
708 708 self.fileprinted = True
709 709 s = _("patching file %s\n") % self.fname
710 710 if warn:
711 711 self.ui.warn(s)
712 712 else:
713 713 self.ui.note(s)
714 714
715 715
716 716 def findlines(self, l, linenum):
717 717 # looks through the hash and finds candidate lines. The
718 718 # result is a list of line numbers sorted based on distance
719 719 # from linenum
720 720
721 721 cand = self.hash.get(l, [])
722 722 if len(cand) > 1:
723 723 # resort our list of potentials forward then back.
724 724 cand.sort(key=lambda x: abs(x - linenum))
725 725 return cand
726 726
727 727 def write_rej(self):
728 728 # our rejects are a little different from patch(1). This always
729 729 # creates rejects in the same form as the original patch. A file
730 730 # header is inserted so that you can run the reject through patch again
731 731 # without having to type the filename.
732 732 if not self.rej:
733 733 return
734 734 base = os.path.basename(self.fname)
735 735 lines = ["--- %s\n+++ %s\n" % (base, base)]
736 736 for x in self.rej:
737 737 for l in x.hunk:
738 738 lines.append(l)
739 739 if l[-1:] != '\n':
740 740 lines.append("\n\ No newline at end of file\n")
741 741 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
742 742
743 743 def apply(self, h):
744 744 if not h.complete():
745 745 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
746 746 (h.number, h.desc, len(h.a), h.lena, len(h.b),
747 747 h.lenb))
748 748
749 749 self.hunks += 1
750 750
751 751 if self.missing:
752 752 self.rej.append(h)
753 753 return -1
754 754
755 755 if self.exists and self.create:
756 756 if self.copysource:
757 757 self.ui.warn(_("cannot create %s: destination already "
758 758 "exists\n") % self.fname)
759 759 else:
760 760 self.ui.warn(_("file %s already exists\n") % self.fname)
761 761 self.rej.append(h)
762 762 return -1
763 763
764 764 if isinstance(h, binhunk):
765 765 if self.remove:
766 766 self.backend.unlink(self.fname)
767 767 else:
768 768 l = h.new(self.lines)
769 769 self.lines[:] = l
770 770 self.offset += len(l)
771 771 self.dirty = True
772 772 return 0
773 773
774 774 horig = h
775 775 if (self.eolmode in ('crlf', 'lf')
776 776 or self.eolmode == 'auto' and self.eol):
777 777 # If new eols are going to be normalized, then normalize
778 778 # hunk data before patching. Otherwise, preserve input
779 779 # line-endings.
780 780 h = h.getnormalized()
781 781
782 782 # fast case first, no offsets, no fuzz
783 783 old, oldstart, new, newstart = h.fuzzit(0, False)
784 784 oldstart += self.offset
785 785 orig_start = oldstart
786 786 # if there's skew we want to emit the "(offset %d lines)" even
787 787 # when the hunk cleanly applies at start + skew, so skip the
788 788 # fast case code
789 789 if (self.skew == 0 and
790 790 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
791 791 if self.remove:
792 792 self.backend.unlink(self.fname)
793 793 else:
794 794 self.lines[oldstart:oldstart + len(old)] = new
795 795 self.offset += len(new) - len(old)
796 796 self.dirty = True
797 797 return 0
798 798
799 799 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
800 800 self.hash = {}
801 801 for x, s in enumerate(self.lines):
802 802 self.hash.setdefault(s, []).append(x)
803 803
804 804 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
805 805 for toponly in [True, False]:
806 806 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
807 807 oldstart = oldstart + self.offset + self.skew
808 808 oldstart = min(oldstart, len(self.lines))
809 809 if old:
810 810 cand = self.findlines(old[0][1:], oldstart)
811 811 else:
812 812 # Only adding lines with no or fuzzed context, just
813 813 # take the skew in account
814 814 cand = [oldstart]
815 815
816 816 for l in cand:
817 817 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
818 818 self.lines[l : l + len(old)] = new
819 819 self.offset += len(new) - len(old)
820 820 self.skew = l - orig_start
821 821 self.dirty = True
822 822 offset = l - orig_start - fuzzlen
823 823 if fuzzlen:
824 824 msg = _("Hunk #%d succeeded at %d "
825 825 "with fuzz %d "
826 826 "(offset %d lines).\n")
827 827 self.printfile(True)
828 828 self.ui.warn(msg %
829 829 (h.number, l + 1, fuzzlen, offset))
830 830 else:
831 831 msg = _("Hunk #%d succeeded at %d "
832 832 "(offset %d lines).\n")
833 833 self.ui.note(msg % (h.number, l + 1, offset))
834 834 return fuzzlen
835 835 self.printfile(True)
836 836 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
837 837 self.rej.append(horig)
838 838 return -1
839 839
840 840 def close(self):
841 841 if self.dirty:
842 842 self.writelines(self.fname, self.lines, self.mode)
843 843 self.write_rej()
844 844 return len(self.rej)
845 845
846 846 class header(object):
847 847 """patch header
848 848 """
849 849 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
850 850 diff_re = re.compile('diff -r .* (.*)$')
851 851 allhunks_re = re.compile('(?:index|deleted file) ')
852 852 pretty_re = re.compile('(?:new file|deleted file) ')
853 853 special_re = re.compile('(?:index|deleted|copy|rename) ')
854 854 newfile_re = re.compile('(?:new file)')
855 855
856 856 def __init__(self, header):
857 857 self.header = header
858 858 self.hunks = []
859 859
860 860 def binary(self):
861 861 return any(h.startswith('index ') for h in self.header)
862 862
863 863 def pretty(self, fp):
864 864 for h in self.header:
865 865 if h.startswith('index '):
866 866 fp.write(_('this modifies a binary file (all or nothing)\n'))
867 867 break
868 868 if self.pretty_re.match(h):
869 869 fp.write(h)
870 870 if self.binary():
871 871 fp.write(_('this is a binary file\n'))
872 872 break
873 873 if h.startswith('---'):
874 874 fp.write(_('%d hunks, %d lines changed\n') %
875 875 (len(self.hunks),
876 876 sum([max(h.added, h.removed) for h in self.hunks])))
877 877 break
878 878 fp.write(h)
879 879
880 880 def write(self, fp):
881 881 fp.write(''.join(self.header))
882 882
883 883 def allhunks(self):
884 884 return any(self.allhunks_re.match(h) for h in self.header)
885 885
886 886 def files(self):
887 887 match = self.diffgit_re.match(self.header[0])
888 888 if match:
889 889 fromfile, tofile = match.groups()
890 890 if fromfile == tofile:
891 891 return [fromfile]
892 892 return [fromfile, tofile]
893 893 else:
894 894 return self.diff_re.match(self.header[0]).groups()
895 895
896 896 def filename(self):
897 897 return self.files()[-1]
898 898
899 899 def __repr__(self):
900 900 return '<header %s>' % (' '.join(map(repr, self.files())))
901 901
902 902 def isnewfile(self):
903 903 return any(self.newfile_re.match(h) for h in self.header)
904 904
905 905 def special(self):
906 906 # Special files are shown only at the header level and not at the hunk
907 907 # level for example a file that has been deleted is a special file.
908 908 # The user cannot change the content of the operation, in the case of
909 909 # the deleted file he has to take the deletion or not take it, he
910 910 # cannot take some of it.
911 911 # Newly added files are special if they are empty, they are not special
912 912 # if they have some content as we want to be able to change it
913 913 nocontent = len(self.header) == 2
914 914 emptynewfile = self.isnewfile() and nocontent
915 915 return emptynewfile or \
916 916 any(self.special_re.match(h) for h in self.header)
917 917
918 918 class recordhunk(object):
919 919 """patch hunk
920 920
921 921 XXX shouldn't we merge this with the other hunk class?
922 922 """
923 923
924 924 def __init__(self, header, fromline, toline, proc, before, hunk, after,
925 925 maxcontext=None):
926 926 def trimcontext(lines, reverse=False):
927 927 if maxcontext is not None:
928 928 delta = len(lines) - maxcontext
929 929 if delta > 0:
930 930 if reverse:
931 931 return delta, lines[delta:]
932 932 else:
933 933 return delta, lines[:maxcontext]
934 934 return 0, lines
935 935
936 936 self.header = header
937 937 trimedbefore, self.before = trimcontext(before, True)
938 938 self.fromline = fromline + trimedbefore
939 939 self.toline = toline + trimedbefore
940 940 _trimedafter, self.after = trimcontext(after, False)
941 941 self.proc = proc
942 942 self.hunk = hunk
943 943 self.added, self.removed = self.countchanges(self.hunk)
944 944
945 945 def __eq__(self, v):
946 946 if not isinstance(v, recordhunk):
947 947 return False
948 948
949 949 return ((v.hunk == self.hunk) and
950 950 (v.proc == self.proc) and
951 951 (self.fromline == v.fromline) and
952 952 (self.header.files() == v.header.files()))
953 953
954 954 def __hash__(self):
955 955 return hash((tuple(self.hunk),
956 956 tuple(self.header.files()),
957 957 self.fromline,
958 958 self.proc))
959 959
960 960 def countchanges(self, hunk):
961 961 """hunk -> (n+,n-)"""
962 962 add = len([h for h in hunk if h.startswith('+')])
963 963 rem = len([h for h in hunk if h.startswith('-')])
964 964 return add, rem
965 965
966 966 def reversehunk(self):
967 967 """return another recordhunk which is the reverse of the hunk
968 968
969 969 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
970 970 that, swap fromline/toline and +/- signs while keep other things
971 971 unchanged.
972 972 """
973 973 m = {'+': '-', '-': '+', '\\': '\\'}
974 974 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
975 975 return recordhunk(self.header, self.toline, self.fromline, self.proc,
976 976 self.before, hunk, self.after)
977 977
978 978 def write(self, fp):
979 979 delta = len(self.before) + len(self.after)
980 980 if self.after and self.after[-1] == '\\ No newline at end of file\n':
981 981 delta -= 1
982 982 fromlen = delta + self.removed
983 983 tolen = delta + self.added
984 984 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
985 985 (self.fromline, fromlen, self.toline, tolen,
986 986 self.proc and (' ' + self.proc)))
987 987 fp.write(''.join(self.before + self.hunk + self.after))
988 988
989 989 pretty = write
990 990
991 991 def filename(self):
992 992 return self.header.filename()
993 993
994 994 def __repr__(self):
995 995 return '<hunk %r@%d>' % (self.filename(), self.fromline)
996 996
997 997 def getmessages():
998 998 return {
999 999 'multiple': {
1000 1000 'discard': _("discard change %d/%d to '%s'?"),
1001 1001 'record': _("record change %d/%d to '%s'?"),
1002 1002 'revert': _("revert change %d/%d to '%s'?"),
1003 1003 },
1004 1004 'single': {
1005 1005 'discard': _("discard this change to '%s'?"),
1006 1006 'record': _("record this change to '%s'?"),
1007 1007 'revert': _("revert this change to '%s'?"),
1008 1008 },
1009 1009 'help': {
1010 1010 'discard': _('[Ynesfdaq?]'
1011 1011 '$$ &Yes, discard this change'
1012 1012 '$$ &No, skip this change'
1013 1013 '$$ &Edit this change manually'
1014 1014 '$$ &Skip remaining changes to this file'
1015 1015 '$$ Discard remaining changes to this &file'
1016 1016 '$$ &Done, skip remaining changes and files'
1017 1017 '$$ Discard &all changes to all remaining files'
1018 1018 '$$ &Quit, discarding no changes'
1019 1019 '$$ &? (display help)'),
1020 1020 'record': _('[Ynesfdaq?]'
1021 1021 '$$ &Yes, record this change'
1022 1022 '$$ &No, skip this change'
1023 1023 '$$ &Edit this change manually'
1024 1024 '$$ &Skip remaining changes to this file'
1025 1025 '$$ Record remaining changes to this &file'
1026 1026 '$$ &Done, skip remaining changes and files'
1027 1027 '$$ Record &all changes to all remaining files'
1028 1028 '$$ &Quit, recording no changes'
1029 1029 '$$ &? (display help)'),
1030 1030 'revert': _('[Ynesfdaq?]'
1031 1031 '$$ &Yes, revert this change'
1032 1032 '$$ &No, skip this change'
1033 1033 '$$ &Edit this change manually'
1034 1034 '$$ &Skip remaining changes to this file'
1035 1035 '$$ Revert remaining changes to this &file'
1036 1036 '$$ &Done, skip remaining changes and files'
1037 1037 '$$ Revert &all changes to all remaining files'
1038 1038 '$$ &Quit, reverting no changes'
1039 1039 '$$ &? (display help)')
1040 1040 }
1041 1041 }
1042 1042
1043 1043 def filterpatch(ui, headers, operation=None):
1044 1044 """Interactively filter patch chunks into applied-only chunks"""
1045 1045 messages = getmessages()
1046 1046
1047 1047 if operation is None:
1048 1048 operation = 'record'
1049 1049
1050 1050 def prompt(skipfile, skipall, query, chunk):
1051 1051 """prompt query, and process base inputs
1052 1052
1053 1053 - y/n for the rest of file
1054 1054 - y/n for the rest
1055 1055 - ? (help)
1056 1056 - q (quit)
1057 1057
1058 1058 Return True/False and possibly updated skipfile and skipall.
1059 1059 """
1060 1060 newpatches = None
1061 1061 if skipall is not None:
1062 1062 return skipall, skipfile, skipall, newpatches
1063 1063 if skipfile is not None:
1064 1064 return skipfile, skipfile, skipall, newpatches
1065 1065 while True:
1066 1066 resps = messages['help'][operation]
1067 1067 r = ui.promptchoice("%s %s" % (query, resps))
1068 1068 ui.write("\n")
1069 1069 if r == 8: # ?
1070 1070 for c, t in ui.extractchoices(resps)[1]:
1071 1071 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1072 1072 continue
1073 1073 elif r == 0: # yes
1074 1074 ret = True
1075 1075 elif r == 1: # no
1076 1076 ret = False
1077 1077 elif r == 2: # Edit patch
1078 1078 if chunk is None:
1079 1079 ui.write(_('cannot edit patch for whole file'))
1080 1080 ui.write("\n")
1081 1081 continue
1082 1082 if chunk.header.binary():
1083 1083 ui.write(_('cannot edit patch for binary file'))
1084 1084 ui.write("\n")
1085 1085 continue
1086 1086 # Patch comment based on the Git one (based on comment at end of
1087 1087 # https://mercurial-scm.org/wiki/RecordExtension)
1088 1088 phelp = '---' + _("""
1089 1089 To remove '-' lines, make them ' ' lines (context).
1090 1090 To remove '+' lines, delete them.
1091 1091 Lines starting with # will be removed from the patch.
1092 1092
1093 1093 If the patch applies cleanly, the edited hunk will immediately be
1094 1094 added to the record list. If it does not apply cleanly, a rejects
1095 1095 file will be generated: you can use that when you try again. If
1096 1096 all lines of the hunk are removed, then the edit is aborted and
1097 1097 the hunk is left unchanged.
1098 1098 """)
1099 1099 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1100 1100 suffix=".diff", text=True)
1101 1101 ncpatchfp = None
1102 1102 try:
1103 1103 # Write the initial patch
1104 1104 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1105 1105 chunk.header.write(f)
1106 1106 chunk.write(f)
1107 1107 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1108 1108 f.close()
1109 1109 # Start the editor and wait for it to complete
1110 1110 editor = ui.geteditor()
1111 1111 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1112 1112 environ={'HGUSER': ui.username()},
1113 1113 blockedtag='filterpatch')
1114 1114 if ret != 0:
1115 1115 ui.warn(_("editor exited with exit code %d\n") % ret)
1116 1116 continue
1117 1117 # Remove comment lines
1118 1118 patchfp = open(patchfn)
1119 1119 ncpatchfp = stringio()
1120 1120 for line in util.iterfile(patchfp):
1121 1121 if not line.startswith('#'):
1122 1122 ncpatchfp.write(line)
1123 1123 patchfp.close()
1124 1124 ncpatchfp.seek(0)
1125 1125 newpatches = parsepatch(ncpatchfp)
1126 1126 finally:
1127 1127 os.unlink(patchfn)
1128 1128 del ncpatchfp
1129 1129 # Signal that the chunk shouldn't be applied as-is, but
1130 1130 # provide the new patch to be used instead.
1131 1131 ret = False
1132 1132 elif r == 3: # Skip
1133 1133 ret = skipfile = False
1134 1134 elif r == 4: # file (Record remaining)
1135 1135 ret = skipfile = True
1136 1136 elif r == 5: # done, skip remaining
1137 1137 ret = skipall = False
1138 1138 elif r == 6: # all
1139 1139 ret = skipall = True
1140 1140 elif r == 7: # quit
1141 1141 raise error.Abort(_('user quit'))
1142 1142 return ret, skipfile, skipall, newpatches
1143 1143
1144 1144 seen = set()
1145 1145 applied = {} # 'filename' -> [] of chunks
1146 1146 skipfile, skipall = None, None
1147 1147 pos, total = 1, sum(len(h.hunks) for h in headers)
1148 1148 for h in headers:
1149 1149 pos += len(h.hunks)
1150 1150 skipfile = None
1151 1151 fixoffset = 0
1152 1152 hdr = ''.join(h.header)
1153 1153 if hdr in seen:
1154 1154 continue
1155 1155 seen.add(hdr)
1156 1156 if skipall is None:
1157 1157 h.pretty(ui)
1158 1158 msg = (_('examine changes to %s?') %
1159 1159 _(' and ').join("'%s'" % f for f in h.files()))
1160 1160 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1161 1161 if not r:
1162 1162 continue
1163 1163 applied[h.filename()] = [h]
1164 1164 if h.allhunks():
1165 1165 applied[h.filename()] += h.hunks
1166 1166 continue
1167 1167 for i, chunk in enumerate(h.hunks):
1168 1168 if skipfile is None and skipall is None:
1169 1169 chunk.pretty(ui)
1170 1170 if total == 1:
1171 1171 msg = messages['single'][operation] % chunk.filename()
1172 1172 else:
1173 1173 idx = pos - len(h.hunks) + i
1174 1174 msg = messages['multiple'][operation] % (idx, total,
1175 1175 chunk.filename())
1176 1176 r, skipfile, skipall, newpatches = prompt(skipfile,
1177 1177 skipall, msg, chunk)
1178 1178 if r:
1179 1179 if fixoffset:
1180 1180 chunk = copy.copy(chunk)
1181 1181 chunk.toline += fixoffset
1182 1182 applied[chunk.filename()].append(chunk)
1183 1183 elif newpatches is not None:
1184 1184 for newpatch in newpatches:
1185 1185 for newhunk in newpatch.hunks:
1186 1186 if fixoffset:
1187 1187 newhunk.toline += fixoffset
1188 1188 applied[newhunk.filename()].append(newhunk)
1189 1189 else:
1190 1190 fixoffset += chunk.removed - chunk.added
1191 1191 return (sum([h for h in applied.itervalues()
1192 1192 if h[0].special() or len(h) > 1], []), {})
1193 1193 class hunk(object):
1194 1194 def __init__(self, desc, num, lr, context):
1195 1195 self.number = num
1196 1196 self.desc = desc
1197 1197 self.hunk = [desc]
1198 1198 self.a = []
1199 1199 self.b = []
1200 1200 self.starta = self.lena = None
1201 1201 self.startb = self.lenb = None
1202 1202 if lr is not None:
1203 1203 if context:
1204 1204 self.read_context_hunk(lr)
1205 1205 else:
1206 1206 self.read_unified_hunk(lr)
1207 1207
1208 1208 def getnormalized(self):
1209 1209 """Return a copy with line endings normalized to LF."""
1210 1210
1211 1211 def normalize(lines):
1212 1212 nlines = []
1213 1213 for line in lines:
1214 1214 if line.endswith('\r\n'):
1215 1215 line = line[:-2] + '\n'
1216 1216 nlines.append(line)
1217 1217 return nlines
1218 1218
1219 1219 # Dummy object, it is rebuilt manually
1220 1220 nh = hunk(self.desc, self.number, None, None)
1221 1221 nh.number = self.number
1222 1222 nh.desc = self.desc
1223 1223 nh.hunk = self.hunk
1224 1224 nh.a = normalize(self.a)
1225 1225 nh.b = normalize(self.b)
1226 1226 nh.starta = self.starta
1227 1227 nh.startb = self.startb
1228 1228 nh.lena = self.lena
1229 1229 nh.lenb = self.lenb
1230 1230 return nh
1231 1231
1232 1232 def read_unified_hunk(self, lr):
1233 1233 m = unidesc.match(self.desc)
1234 1234 if not m:
1235 1235 raise PatchError(_("bad hunk #%d") % self.number)
1236 1236 self.starta, self.lena, self.startb, self.lenb = m.groups()
1237 1237 if self.lena is None:
1238 1238 self.lena = 1
1239 1239 else:
1240 1240 self.lena = int(self.lena)
1241 1241 if self.lenb is None:
1242 1242 self.lenb = 1
1243 1243 else:
1244 1244 self.lenb = int(self.lenb)
1245 1245 self.starta = int(self.starta)
1246 1246 self.startb = int(self.startb)
1247 1247 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1248 1248 self.b)
1249 1249 # if we hit eof before finishing out the hunk, the last line will
1250 1250 # be zero length. Lets try to fix it up.
1251 1251 while len(self.hunk[-1]) == 0:
1252 1252 del self.hunk[-1]
1253 1253 del self.a[-1]
1254 1254 del self.b[-1]
1255 1255 self.lena -= 1
1256 1256 self.lenb -= 1
1257 1257 self._fixnewline(lr)
1258 1258
1259 1259 def read_context_hunk(self, lr):
1260 1260 self.desc = lr.readline()
1261 1261 m = contextdesc.match(self.desc)
1262 1262 if not m:
1263 1263 raise PatchError(_("bad hunk #%d") % self.number)
1264 1264 self.starta, aend = m.groups()
1265 1265 self.starta = int(self.starta)
1266 1266 if aend is None:
1267 1267 aend = self.starta
1268 1268 self.lena = int(aend) - self.starta
1269 1269 if self.starta:
1270 1270 self.lena += 1
1271 1271 for x in xrange(self.lena):
1272 1272 l = lr.readline()
1273 1273 if l.startswith('---'):
1274 1274 # lines addition, old block is empty
1275 1275 lr.push(l)
1276 1276 break
1277 1277 s = l[2:]
1278 1278 if l.startswith('- ') or l.startswith('! '):
1279 1279 u = '-' + s
1280 1280 elif l.startswith(' '):
1281 1281 u = ' ' + s
1282 1282 else:
1283 1283 raise PatchError(_("bad hunk #%d old text line %d") %
1284 1284 (self.number, x))
1285 1285 self.a.append(u)
1286 1286 self.hunk.append(u)
1287 1287
1288 1288 l = lr.readline()
1289 1289 if l.startswith('\ '):
1290 1290 s = self.a[-1][:-1]
1291 1291 self.a[-1] = s
1292 1292 self.hunk[-1] = s
1293 1293 l = lr.readline()
1294 1294 m = contextdesc.match(l)
1295 1295 if not m:
1296 1296 raise PatchError(_("bad hunk #%d") % self.number)
1297 1297 self.startb, bend = m.groups()
1298 1298 self.startb = int(self.startb)
1299 1299 if bend is None:
1300 1300 bend = self.startb
1301 1301 self.lenb = int(bend) - self.startb
1302 1302 if self.startb:
1303 1303 self.lenb += 1
1304 1304 hunki = 1
1305 1305 for x in xrange(self.lenb):
1306 1306 l = lr.readline()
1307 1307 if l.startswith('\ '):
1308 1308 # XXX: the only way to hit this is with an invalid line range.
1309 1309 # The no-eol marker is not counted in the line range, but I
1310 1310 # guess there are diff(1) out there which behave differently.
1311 1311 s = self.b[-1][:-1]
1312 1312 self.b[-1] = s
1313 1313 self.hunk[hunki - 1] = s
1314 1314 continue
1315 1315 if not l:
1316 1316 # line deletions, new block is empty and we hit EOF
1317 1317 lr.push(l)
1318 1318 break
1319 1319 s = l[2:]
1320 1320 if l.startswith('+ ') or l.startswith('! '):
1321 1321 u = '+' + s
1322 1322 elif l.startswith(' '):
1323 1323 u = ' ' + s
1324 1324 elif len(self.b) == 0:
1325 1325 # line deletions, new block is empty
1326 1326 lr.push(l)
1327 1327 break
1328 1328 else:
1329 1329 raise PatchError(_("bad hunk #%d old text line %d") %
1330 1330 (self.number, x))
1331 1331 self.b.append(s)
1332 1332 while True:
1333 1333 if hunki >= len(self.hunk):
1334 1334 h = ""
1335 1335 else:
1336 1336 h = self.hunk[hunki]
1337 1337 hunki += 1
1338 1338 if h == u:
1339 1339 break
1340 1340 elif h.startswith('-'):
1341 1341 continue
1342 1342 else:
1343 1343 self.hunk.insert(hunki - 1, u)
1344 1344 break
1345 1345
1346 1346 if not self.a:
1347 1347 # this happens when lines were only added to the hunk
1348 1348 for x in self.hunk:
1349 1349 if x.startswith('-') or x.startswith(' '):
1350 1350 self.a.append(x)
1351 1351 if not self.b:
1352 1352 # this happens when lines were only deleted from the hunk
1353 1353 for x in self.hunk:
1354 1354 if x.startswith('+') or x.startswith(' '):
1355 1355 self.b.append(x[1:])
1356 1356 # @@ -start,len +start,len @@
1357 1357 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1358 1358 self.startb, self.lenb)
1359 1359 self.hunk[0] = self.desc
1360 1360 self._fixnewline(lr)
1361 1361
1362 1362 def _fixnewline(self, lr):
1363 1363 l = lr.readline()
1364 1364 if l.startswith('\ '):
1365 1365 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1366 1366 else:
1367 1367 lr.push(l)
1368 1368
1369 1369 def complete(self):
1370 1370 return len(self.a) == self.lena and len(self.b) == self.lenb
1371 1371
1372 1372 def _fuzzit(self, old, new, fuzz, toponly):
1373 1373 # this removes context lines from the top and bottom of list 'l'. It
1374 1374 # checks the hunk to make sure only context lines are removed, and then
1375 1375 # returns a new shortened list of lines.
1376 1376 fuzz = min(fuzz, len(old))
1377 1377 if fuzz:
1378 1378 top = 0
1379 1379 bot = 0
1380 1380 hlen = len(self.hunk)
1381 1381 for x in xrange(hlen - 1):
1382 1382 # the hunk starts with the @@ line, so use x+1
1383 1383 if self.hunk[x + 1][0] == ' ':
1384 1384 top += 1
1385 1385 else:
1386 1386 break
1387 1387 if not toponly:
1388 1388 for x in xrange(hlen - 1):
1389 1389 if self.hunk[hlen - bot - 1][0] == ' ':
1390 1390 bot += 1
1391 1391 else:
1392 1392 break
1393 1393
1394 1394 bot = min(fuzz, bot)
1395 1395 top = min(fuzz, top)
1396 1396 return old[top:len(old) - bot], new[top:len(new) - bot], top
1397 1397 return old, new, 0
1398 1398
1399 1399 def fuzzit(self, fuzz, toponly):
1400 1400 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1401 1401 oldstart = self.starta + top
1402 1402 newstart = self.startb + top
1403 1403 # zero length hunk ranges already have their start decremented
1404 1404 if self.lena and oldstart > 0:
1405 1405 oldstart -= 1
1406 1406 if self.lenb and newstart > 0:
1407 1407 newstart -= 1
1408 1408 return old, oldstart, new, newstart
1409 1409
1410 1410 class binhunk(object):
1411 1411 'A binary patch file.'
1412 1412 def __init__(self, lr, fname):
1413 1413 self.text = None
1414 1414 self.delta = False
1415 1415 self.hunk = ['GIT binary patch\n']
1416 1416 self._fname = fname
1417 1417 self._read(lr)
1418 1418
1419 1419 def complete(self):
1420 1420 return self.text is not None
1421 1421
1422 1422 def new(self, lines):
1423 1423 if self.delta:
1424 1424 return [applybindelta(self.text, ''.join(lines))]
1425 1425 return [self.text]
1426 1426
1427 1427 def _read(self, lr):
1428 1428 def getline(lr, hunk):
1429 1429 l = lr.readline()
1430 1430 hunk.append(l)
1431 1431 return l.rstrip('\r\n')
1432 1432
1433 1433 size = 0
1434 1434 while True:
1435 1435 line = getline(lr, self.hunk)
1436 1436 if not line:
1437 1437 raise PatchError(_('could not extract "%s" binary data')
1438 1438 % self._fname)
1439 1439 if line.startswith('literal '):
1440 1440 size = int(line[8:].rstrip())
1441 1441 break
1442 1442 if line.startswith('delta '):
1443 1443 size = int(line[6:].rstrip())
1444 1444 self.delta = True
1445 1445 break
1446 1446 dec = []
1447 1447 line = getline(lr, self.hunk)
1448 1448 while len(line) > 1:
1449 1449 l = line[0]
1450 1450 if l <= 'Z' and l >= 'A':
1451 1451 l = ord(l) - ord('A') + 1
1452 1452 else:
1453 1453 l = ord(l) - ord('a') + 27
1454 1454 try:
1455 1455 dec.append(util.b85decode(line[1:])[:l])
1456 1456 except ValueError as e:
1457 1457 raise PatchError(_('could not decode "%s" binary patch: %s')
1458 1458 % (self._fname, str(e)))
1459 1459 line = getline(lr, self.hunk)
1460 1460 text = zlib.decompress(''.join(dec))
1461 1461 if len(text) != size:
1462 1462 raise PatchError(_('"%s" length is %d bytes, should be %d')
1463 1463 % (self._fname, len(text), size))
1464 1464 self.text = text
1465 1465
1466 1466 def parsefilename(str):
1467 1467 # --- filename \t|space stuff
1468 1468 s = str[4:].rstrip('\r\n')
1469 1469 i = s.find('\t')
1470 1470 if i < 0:
1471 1471 i = s.find(' ')
1472 1472 if i < 0:
1473 1473 return s
1474 1474 return s[:i]
1475 1475
1476 1476 def reversehunks(hunks):
1477 1477 '''reverse the signs in the hunks given as argument
1478 1478
1479 1479 This function operates on hunks coming out of patch.filterpatch, that is
1480 1480 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1481 1481
1482 1482 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1483 1483 ... --- a/folder1/g
1484 1484 ... +++ b/folder1/g
1485 1485 ... @@ -1,7 +1,7 @@
1486 1486 ... +firstline
1487 1487 ... c
1488 1488 ... 1
1489 1489 ... 2
1490 1490 ... + 3
1491 1491 ... -4
1492 1492 ... 5
1493 1493 ... d
1494 1494 ... +lastline"""
1495 1495 >>> hunks = parsepatch([rawpatch])
1496 1496 >>> hunkscomingfromfilterpatch = []
1497 1497 >>> for h in hunks:
1498 1498 ... hunkscomingfromfilterpatch.append(h)
1499 1499 ... hunkscomingfromfilterpatch.extend(h.hunks)
1500 1500
1501 1501 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1502 1502 >>> from . import util
1503 1503 >>> fp = util.stringio()
1504 1504 >>> for c in reversedhunks:
1505 1505 ... c.write(fp)
1506 1506 >>> fp.seek(0) or None
1507 1507 >>> reversedpatch = fp.read()
1508 1508 >>> print(pycompat.sysstr(reversedpatch))
1509 1509 diff --git a/folder1/g b/folder1/g
1510 1510 --- a/folder1/g
1511 1511 +++ b/folder1/g
1512 1512 @@ -1,4 +1,3 @@
1513 1513 -firstline
1514 1514 c
1515 1515 1
1516 1516 2
1517 1517 @@ -2,6 +1,6 @@
1518 1518 c
1519 1519 1
1520 1520 2
1521 1521 - 3
1522 1522 +4
1523 1523 5
1524 1524 d
1525 1525 @@ -6,3 +5,2 @@
1526 1526 5
1527 1527 d
1528 1528 -lastline
1529 1529
1530 1530 '''
1531 1531
1532 1532 newhunks = []
1533 1533 for c in hunks:
1534 1534 if util.safehasattr(c, 'reversehunk'):
1535 1535 c = c.reversehunk()
1536 1536 newhunks.append(c)
1537 1537 return newhunks
1538 1538
1539 1539 def parsepatch(originalchunks, maxcontext=None):
1540 1540 """patch -> [] of headers -> [] of hunks
1541 1541
1542 1542 If maxcontext is not None, trim context lines if necessary.
1543 1543
1544 1544 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1545 1545 ... --- a/folder1/g
1546 1546 ... +++ b/folder1/g
1547 1547 ... @@ -1,8 +1,10 @@
1548 1548 ... 1
1549 1549 ... 2
1550 1550 ... -3
1551 1551 ... 4
1552 1552 ... 5
1553 1553 ... 6
1554 1554 ... +6.1
1555 1555 ... +6.2
1556 1556 ... 7
1557 1557 ... 8
1558 1558 ... +9'''
1559 1559 >>> out = util.stringio()
1560 1560 >>> headers = parsepatch([rawpatch], maxcontext=1)
1561 1561 >>> for header in headers:
1562 1562 ... header.write(out)
1563 1563 ... for hunk in header.hunks:
1564 1564 ... hunk.write(out)
1565 1565 >>> print(pycompat.sysstr(out.getvalue()))
1566 1566 diff --git a/folder1/g b/folder1/g
1567 1567 --- a/folder1/g
1568 1568 +++ b/folder1/g
1569 1569 @@ -2,3 +2,2 @@
1570 1570 2
1571 1571 -3
1572 1572 4
1573 1573 @@ -6,2 +5,4 @@
1574 1574 6
1575 1575 +6.1
1576 1576 +6.2
1577 1577 7
1578 1578 @@ -8,1 +9,2 @@
1579 1579 8
1580 1580 +9
1581 1581 """
1582 1582 class parser(object):
1583 1583 """patch parsing state machine"""
1584 1584 def __init__(self):
1585 1585 self.fromline = 0
1586 1586 self.toline = 0
1587 1587 self.proc = ''
1588 1588 self.header = None
1589 1589 self.context = []
1590 1590 self.before = []
1591 1591 self.hunk = []
1592 1592 self.headers = []
1593 1593
1594 1594 def addrange(self, limits):
1595 1595 fromstart, fromend, tostart, toend, proc = limits
1596 1596 self.fromline = int(fromstart)
1597 1597 self.toline = int(tostart)
1598 1598 self.proc = proc
1599 1599
1600 1600 def addcontext(self, context):
1601 1601 if self.hunk:
1602 1602 h = recordhunk(self.header, self.fromline, self.toline,
1603 1603 self.proc, self.before, self.hunk, context, maxcontext)
1604 1604 self.header.hunks.append(h)
1605 1605 self.fromline += len(self.before) + h.removed
1606 1606 self.toline += len(self.before) + h.added
1607 1607 self.before = []
1608 1608 self.hunk = []
1609 1609 self.context = context
1610 1610
1611 1611 def addhunk(self, hunk):
1612 1612 if self.context:
1613 1613 self.before = self.context
1614 1614 self.context = []
1615 1615 self.hunk = hunk
1616 1616
1617 1617 def newfile(self, hdr):
1618 1618 self.addcontext([])
1619 1619 h = header(hdr)
1620 1620 self.headers.append(h)
1621 1621 self.header = h
1622 1622
1623 1623 def addother(self, line):
1624 1624 pass # 'other' lines are ignored
1625 1625
1626 1626 def finished(self):
1627 1627 self.addcontext([])
1628 1628 return self.headers
1629 1629
1630 1630 transitions = {
1631 1631 'file': {'context': addcontext,
1632 1632 'file': newfile,
1633 1633 'hunk': addhunk,
1634 1634 'range': addrange},
1635 1635 'context': {'file': newfile,
1636 1636 'hunk': addhunk,
1637 1637 'range': addrange,
1638 1638 'other': addother},
1639 1639 'hunk': {'context': addcontext,
1640 1640 'file': newfile,
1641 1641 'range': addrange},
1642 1642 'range': {'context': addcontext,
1643 1643 'hunk': addhunk},
1644 1644 'other': {'other': addother},
1645 1645 }
1646 1646
1647 1647 p = parser()
1648 1648 fp = stringio()
1649 1649 fp.write(''.join(originalchunks))
1650 1650 fp.seek(0)
1651 1651
1652 1652 state = 'context'
1653 1653 for newstate, data in scanpatch(fp):
1654 1654 try:
1655 1655 p.transitions[state][newstate](p, data)
1656 1656 except KeyError:
1657 1657 raise PatchError('unhandled transition: %s -> %s' %
1658 1658 (state, newstate))
1659 1659 state = newstate
1660 1660 del fp
1661 1661 return p.finished()
1662 1662
1663 1663 def pathtransform(path, strip, prefix):
1664 1664 '''turn a path from a patch into a path suitable for the repository
1665 1665
1666 1666 prefix, if not empty, is expected to be normalized with a / at the end.
1667 1667
1668 1668 Returns (stripped components, path in repository).
1669 1669
1670 1670 >>> pathtransform(b'a/b/c', 0, b'')
1671 1671 ('', 'a/b/c')
1672 1672 >>> pathtransform(b' a/b/c ', 0, b'')
1673 1673 ('', ' a/b/c')
1674 1674 >>> pathtransform(b' a/b/c ', 2, b'')
1675 1675 ('a/b/', 'c')
1676 1676 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1677 1677 ('', 'd/e/a/b/c')
1678 1678 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1679 1679 ('a//b/', 'd/e/c')
1680 1680 >>> pathtransform(b'a/b/c', 3, b'')
1681 1681 Traceback (most recent call last):
1682 1682 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1683 1683 '''
1684 1684 pathlen = len(path)
1685 1685 i = 0
1686 1686 if strip == 0:
1687 1687 return '', prefix + path.rstrip()
1688 1688 count = strip
1689 1689 while count > 0:
1690 1690 i = path.find('/', i)
1691 1691 if i == -1:
1692 1692 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1693 1693 (count, strip, path))
1694 1694 i += 1
1695 1695 # consume '//' in the path
1696 1696 while i < pathlen - 1 and path[i:i + 1] == '/':
1697 1697 i += 1
1698 1698 count -= 1
1699 1699 return path[:i].lstrip(), prefix + path[i:].rstrip()
1700 1700
1701 1701 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1702 1702 nulla = afile_orig == "/dev/null"
1703 1703 nullb = bfile_orig == "/dev/null"
1704 1704 create = nulla and hunk.starta == 0 and hunk.lena == 0
1705 1705 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1706 1706 abase, afile = pathtransform(afile_orig, strip, prefix)
1707 1707 gooda = not nulla and backend.exists(afile)
1708 1708 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1709 1709 if afile == bfile:
1710 1710 goodb = gooda
1711 1711 else:
1712 1712 goodb = not nullb and backend.exists(bfile)
1713 1713 missing = not goodb and not gooda and not create
1714 1714
1715 1715 # some diff programs apparently produce patches where the afile is
1716 1716 # not /dev/null, but afile starts with bfile
1717 1717 abasedir = afile[:afile.rfind('/') + 1]
1718 1718 bbasedir = bfile[:bfile.rfind('/') + 1]
1719 1719 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1720 1720 and hunk.starta == 0 and hunk.lena == 0):
1721 1721 create = True
1722 1722 missing = False
1723 1723
1724 1724 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1725 1725 # diff is between a file and its backup. In this case, the original
1726 1726 # file should be patched (see original mpatch code).
1727 1727 isbackup = (abase == bbase and bfile.startswith(afile))
1728 1728 fname = None
1729 1729 if not missing:
1730 1730 if gooda and goodb:
1731 1731 if isbackup:
1732 1732 fname = afile
1733 1733 else:
1734 1734 fname = bfile
1735 1735 elif gooda:
1736 1736 fname = afile
1737 1737
1738 1738 if not fname:
1739 1739 if not nullb:
1740 1740 if isbackup:
1741 1741 fname = afile
1742 1742 else:
1743 1743 fname = bfile
1744 1744 elif not nulla:
1745 1745 fname = afile
1746 1746 else:
1747 1747 raise PatchError(_("undefined source and destination files"))
1748 1748
1749 1749 gp = patchmeta(fname)
1750 1750 if create:
1751 1751 gp.op = 'ADD'
1752 1752 elif remove:
1753 1753 gp.op = 'DELETE'
1754 1754 return gp
1755 1755
1756 1756 def scanpatch(fp):
1757 1757 """like patch.iterhunks, but yield different events
1758 1758
1759 1759 - ('file', [header_lines + fromfile + tofile])
1760 1760 - ('context', [context_lines])
1761 1761 - ('hunk', [hunk_lines])
1762 1762 - ('range', (-start,len, +start,len, proc))
1763 1763 """
1764 1764 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1765 1765 lr = linereader(fp)
1766 1766
1767 1767 def scanwhile(first, p):
1768 1768 """scan lr while predicate holds"""
1769 1769 lines = [first]
1770 1770 for line in iter(lr.readline, ''):
1771 1771 if p(line):
1772 1772 lines.append(line)
1773 1773 else:
1774 1774 lr.push(line)
1775 1775 break
1776 1776 return lines
1777 1777
1778 1778 for line in iter(lr.readline, ''):
1779 1779 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1780 1780 def notheader(line):
1781 1781 s = line.split(None, 1)
1782 1782 return not s or s[0] not in ('---', 'diff')
1783 1783 header = scanwhile(line, notheader)
1784 1784 fromfile = lr.readline()
1785 1785 if fromfile.startswith('---'):
1786 1786 tofile = lr.readline()
1787 1787 header += [fromfile, tofile]
1788 1788 else:
1789 1789 lr.push(fromfile)
1790 1790 yield 'file', header
1791 1791 elif line[0:1] == ' ':
1792 1792 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1793 1793 elif line[0] in '-+':
1794 1794 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1795 1795 else:
1796 1796 m = lines_re.match(line)
1797 1797 if m:
1798 1798 yield 'range', m.groups()
1799 1799 else:
1800 1800 yield 'other', line
1801 1801
1802 1802 def scangitpatch(lr, firstline):
1803 1803 """
1804 1804 Git patches can emit:
1805 1805 - rename a to b
1806 1806 - change b
1807 1807 - copy a to c
1808 1808 - change c
1809 1809
1810 1810 We cannot apply this sequence as-is, the renamed 'a' could not be
1811 1811 found for it would have been renamed already. And we cannot copy
1812 1812 from 'b' instead because 'b' would have been changed already. So
1813 1813 we scan the git patch for copy and rename commands so we can
1814 1814 perform the copies ahead of time.
1815 1815 """
1816 1816 pos = 0
1817 1817 try:
1818 1818 pos = lr.fp.tell()
1819 1819 fp = lr.fp
1820 1820 except IOError:
1821 1821 fp = stringio(lr.fp.read())
1822 1822 gitlr = linereader(fp)
1823 1823 gitlr.push(firstline)
1824 1824 gitpatches = readgitpatch(gitlr)
1825 1825 fp.seek(pos)
1826 1826 return gitpatches
1827 1827
1828 1828 def iterhunks(fp):
1829 1829 """Read a patch and yield the following events:
1830 1830 - ("file", afile, bfile, firsthunk): select a new target file.
1831 1831 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1832 1832 "file" event.
1833 1833 - ("git", gitchanges): current diff is in git format, gitchanges
1834 1834 maps filenames to gitpatch records. Unique event.
1835 1835 """
1836 1836 afile = ""
1837 1837 bfile = ""
1838 1838 state = None
1839 1839 hunknum = 0
1840 1840 emitfile = newfile = False
1841 1841 gitpatches = None
1842 1842
1843 1843 # our states
1844 1844 BFILE = 1
1845 1845 context = None
1846 1846 lr = linereader(fp)
1847 1847
1848 1848 for x in iter(lr.readline, ''):
1849 1849 if state == BFILE and (
1850 1850 (not context and x[0] == '@')
1851 1851 or (context is not False and x.startswith('***************'))
1852 1852 or x.startswith('GIT binary patch')):
1853 1853 gp = None
1854 1854 if (gitpatches and
1855 1855 gitpatches[-1].ispatching(afile, bfile)):
1856 1856 gp = gitpatches.pop()
1857 1857 if x.startswith('GIT binary patch'):
1858 1858 h = binhunk(lr, gp.path)
1859 1859 else:
1860 1860 if context is None and x.startswith('***************'):
1861 1861 context = True
1862 1862 h = hunk(x, hunknum + 1, lr, context)
1863 1863 hunknum += 1
1864 1864 if emitfile:
1865 1865 emitfile = False
1866 1866 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1867 1867 yield 'hunk', h
1868 1868 elif x.startswith('diff --git a/'):
1869 1869 m = gitre.match(x.rstrip(' \r\n'))
1870 1870 if not m:
1871 1871 continue
1872 1872 if gitpatches is None:
1873 1873 # scan whole input for git metadata
1874 1874 gitpatches = scangitpatch(lr, x)
1875 1875 yield 'git', [g.copy() for g in gitpatches
1876 1876 if g.op in ('COPY', 'RENAME')]
1877 1877 gitpatches.reverse()
1878 1878 afile = 'a/' + m.group(1)
1879 1879 bfile = 'b/' + m.group(2)
1880 1880 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1881 1881 gp = gitpatches.pop()
1882 1882 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1883 1883 if not gitpatches:
1884 1884 raise PatchError(_('failed to synchronize metadata for "%s"')
1885 1885 % afile[2:])
1886 1886 gp = gitpatches[-1]
1887 1887 newfile = True
1888 1888 elif x.startswith('---'):
1889 1889 # check for a unified diff
1890 1890 l2 = lr.readline()
1891 1891 if not l2.startswith('+++'):
1892 1892 lr.push(l2)
1893 1893 continue
1894 1894 newfile = True
1895 1895 context = False
1896 1896 afile = parsefilename(x)
1897 1897 bfile = parsefilename(l2)
1898 1898 elif x.startswith('***'):
1899 1899 # check for a context diff
1900 1900 l2 = lr.readline()
1901 1901 if not l2.startswith('---'):
1902 1902 lr.push(l2)
1903 1903 continue
1904 1904 l3 = lr.readline()
1905 1905 lr.push(l3)
1906 1906 if not l3.startswith("***************"):
1907 1907 lr.push(l2)
1908 1908 continue
1909 1909 newfile = True
1910 1910 context = True
1911 1911 afile = parsefilename(x)
1912 1912 bfile = parsefilename(l2)
1913 1913
1914 1914 if newfile:
1915 1915 newfile = False
1916 1916 emitfile = True
1917 1917 state = BFILE
1918 1918 hunknum = 0
1919 1919
1920 1920 while gitpatches:
1921 1921 gp = gitpatches.pop()
1922 1922 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1923 1923
1924 1924 def applybindelta(binchunk, data):
1925 1925 """Apply a binary delta hunk
1926 1926 The algorithm used is the algorithm from git's patch-delta.c
1927 1927 """
1928 1928 def deltahead(binchunk):
1929 1929 i = 0
1930 1930 for c in binchunk:
1931 1931 i += 1
1932 1932 if not (ord(c) & 0x80):
1933 1933 return i
1934 1934 return i
1935 1935 out = ""
1936 1936 s = deltahead(binchunk)
1937 1937 binchunk = binchunk[s:]
1938 1938 s = deltahead(binchunk)
1939 1939 binchunk = binchunk[s:]
1940 1940 i = 0
1941 1941 while i < len(binchunk):
1942 1942 cmd = ord(binchunk[i])
1943 1943 i += 1
1944 1944 if (cmd & 0x80):
1945 1945 offset = 0
1946 1946 size = 0
1947 1947 if (cmd & 0x01):
1948 1948 offset = ord(binchunk[i])
1949 1949 i += 1
1950 1950 if (cmd & 0x02):
1951 1951 offset |= ord(binchunk[i]) << 8
1952 1952 i += 1
1953 1953 if (cmd & 0x04):
1954 1954 offset |= ord(binchunk[i]) << 16
1955 1955 i += 1
1956 1956 if (cmd & 0x08):
1957 1957 offset |= ord(binchunk[i]) << 24
1958 1958 i += 1
1959 1959 if (cmd & 0x10):
1960 1960 size = ord(binchunk[i])
1961 1961 i += 1
1962 1962 if (cmd & 0x20):
1963 1963 size |= ord(binchunk[i]) << 8
1964 1964 i += 1
1965 1965 if (cmd & 0x40):
1966 1966 size |= ord(binchunk[i]) << 16
1967 1967 i += 1
1968 1968 if size == 0:
1969 1969 size = 0x10000
1970 1970 offset_end = offset + size
1971 1971 out += data[offset:offset_end]
1972 1972 elif cmd != 0:
1973 1973 offset_end = i + cmd
1974 1974 out += binchunk[i:offset_end]
1975 1975 i += cmd
1976 1976 else:
1977 1977 raise PatchError(_('unexpected delta opcode 0'))
1978 1978 return out
1979 1979
1980 1980 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1981 1981 """Reads a patch from fp and tries to apply it.
1982 1982
1983 1983 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1984 1984 there was any fuzz.
1985 1985
1986 1986 If 'eolmode' is 'strict', the patch content and patched file are
1987 1987 read in binary mode. Otherwise, line endings are ignored when
1988 1988 patching then normalized according to 'eolmode'.
1989 1989 """
1990 1990 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1991 1991 prefix=prefix, eolmode=eolmode)
1992 1992
1993 1993 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1994 1994 eolmode='strict'):
1995 1995
1996 1996 if prefix:
1997 1997 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1998 1998 prefix)
1999 1999 if prefix != '':
2000 2000 prefix += '/'
2001 2001 def pstrip(p):
2002 2002 return pathtransform(p, strip - 1, prefix)[1]
2003 2003
2004 2004 rejects = 0
2005 2005 err = 0
2006 2006 current_file = None
2007 2007
2008 2008 for state, values in iterhunks(fp):
2009 2009 if state == 'hunk':
2010 2010 if not current_file:
2011 2011 continue
2012 2012 ret = current_file.apply(values)
2013 2013 if ret > 0:
2014 2014 err = 1
2015 2015 elif state == 'file':
2016 2016 if current_file:
2017 2017 rejects += current_file.close()
2018 2018 current_file = None
2019 2019 afile, bfile, first_hunk, gp = values
2020 2020 if gp:
2021 2021 gp.path = pstrip(gp.path)
2022 2022 if gp.oldpath:
2023 2023 gp.oldpath = pstrip(gp.oldpath)
2024 2024 else:
2025 2025 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2026 2026 prefix)
2027 2027 if gp.op == 'RENAME':
2028 2028 backend.unlink(gp.oldpath)
2029 2029 if not first_hunk:
2030 2030 if gp.op == 'DELETE':
2031 2031 backend.unlink(gp.path)
2032 2032 continue
2033 2033 data, mode = None, None
2034 2034 if gp.op in ('RENAME', 'COPY'):
2035 2035 data, mode = store.getfile(gp.oldpath)[:2]
2036 2036 if data is None:
2037 2037 # This means that the old path does not exist
2038 2038 raise PatchError(_("source file '%s' does not exist")
2039 2039 % gp.oldpath)
2040 2040 if gp.mode:
2041 2041 mode = gp.mode
2042 2042 if gp.op == 'ADD':
2043 2043 # Added files without content have no hunk and
2044 2044 # must be created
2045 2045 data = ''
2046 2046 if data or mode:
2047 2047 if (gp.op in ('ADD', 'RENAME', 'COPY')
2048 2048 and backend.exists(gp.path)):
2049 2049 raise PatchError(_("cannot create %s: destination "
2050 2050 "already exists") % gp.path)
2051 2051 backend.setfile(gp.path, data, mode, gp.oldpath)
2052 2052 continue
2053 2053 try:
2054 2054 current_file = patcher(ui, gp, backend, store,
2055 2055 eolmode=eolmode)
2056 2056 except PatchError as inst:
2057 2057 ui.warn(str(inst) + '\n')
2058 2058 current_file = None
2059 2059 rejects += 1
2060 2060 continue
2061 2061 elif state == 'git':
2062 2062 for gp in values:
2063 2063 path = pstrip(gp.oldpath)
2064 2064 data, mode = backend.getfile(path)
2065 2065 if data is None:
2066 2066 # The error ignored here will trigger a getfile()
2067 2067 # error in a place more appropriate for error
2068 2068 # handling, and will not interrupt the patching
2069 2069 # process.
2070 2070 pass
2071 2071 else:
2072 2072 store.setfile(path, data, mode)
2073 2073 else:
2074 2074 raise error.Abort(_('unsupported parser state: %s') % state)
2075 2075
2076 2076 if current_file:
2077 2077 rejects += current_file.close()
2078 2078
2079 2079 if rejects:
2080 2080 return -1
2081 2081 return err
2082 2082
2083 2083 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2084 2084 similarity):
2085 2085 """use <patcher> to apply <patchname> to the working directory.
2086 2086 returns whether patch was applied with fuzz factor."""
2087 2087
2088 2088 fuzz = False
2089 2089 args = []
2090 2090 cwd = repo.root
2091 2091 if cwd:
2092 2092 args.append('-d %s' % util.shellquote(cwd))
2093 2093 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2094 2094 util.shellquote(patchname)))
2095 2095 try:
2096 2096 for line in util.iterfile(fp):
2097 2097 line = line.rstrip()
2098 2098 ui.note(line + '\n')
2099 2099 if line.startswith('patching file '):
2100 2100 pf = util.parsepatchoutput(line)
2101 2101 printed_file = False
2102 2102 files.add(pf)
2103 2103 elif line.find('with fuzz') >= 0:
2104 2104 fuzz = True
2105 2105 if not printed_file:
2106 2106 ui.warn(pf + '\n')
2107 2107 printed_file = True
2108 2108 ui.warn(line + '\n')
2109 2109 elif line.find('saving rejects to file') >= 0:
2110 2110 ui.warn(line + '\n')
2111 2111 elif line.find('FAILED') >= 0:
2112 2112 if not printed_file:
2113 2113 ui.warn(pf + '\n')
2114 2114 printed_file = True
2115 2115 ui.warn(line + '\n')
2116 2116 finally:
2117 2117 if files:
2118 2118 scmutil.marktouched(repo, files, similarity)
2119 2119 code = fp.close()
2120 2120 if code:
2121 2121 raise PatchError(_("patch command failed: %s") %
2122 2122 util.explainexit(code)[0])
2123 2123 return fuzz
2124 2124
2125 2125 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2126 2126 eolmode='strict'):
2127 2127 if files is None:
2128 2128 files = set()
2129 2129 if eolmode is None:
2130 2130 eolmode = ui.config('patch', 'eol')
2131 2131 if eolmode.lower() not in eolmodes:
2132 2132 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2133 2133 eolmode = eolmode.lower()
2134 2134
2135 2135 store = filestore()
2136 2136 try:
2137 2137 fp = open(patchobj, 'rb')
2138 2138 except TypeError:
2139 2139 fp = patchobj
2140 2140 try:
2141 2141 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2142 2142 eolmode=eolmode)
2143 2143 finally:
2144 2144 if fp != patchobj:
2145 2145 fp.close()
2146 2146 files.update(backend.close())
2147 2147 store.close()
2148 2148 if ret < 0:
2149 2149 raise PatchError(_('patch failed to apply'))
2150 2150 return ret > 0
2151 2151
2152 2152 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2153 2153 eolmode='strict', similarity=0):
2154 2154 """use builtin patch to apply <patchobj> to the working directory.
2155 2155 returns whether patch was applied with fuzz factor."""
2156 2156 backend = workingbackend(ui, repo, similarity)
2157 2157 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2158 2158
2159 2159 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2160 2160 eolmode='strict'):
2161 2161 backend = repobackend(ui, repo, ctx, store)
2162 2162 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2163 2163
2164 2164 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2165 2165 similarity=0):
2166 2166 """Apply <patchname> to the working directory.
2167 2167
2168 2168 'eolmode' specifies how end of lines should be handled. It can be:
2169 2169 - 'strict': inputs are read in binary mode, EOLs are preserved
2170 2170 - 'crlf': EOLs are ignored when patching and reset to CRLF
2171 2171 - 'lf': EOLs are ignored when patching and reset to LF
2172 2172 - None: get it from user settings, default to 'strict'
2173 2173 'eolmode' is ignored when using an external patcher program.
2174 2174
2175 2175 Returns whether patch was applied with fuzz factor.
2176 2176 """
2177 2177 patcher = ui.config('ui', 'patch')
2178 2178 if files is None:
2179 2179 files = set()
2180 2180 if patcher:
2181 2181 return _externalpatch(ui, repo, patcher, patchname, strip,
2182 2182 files, similarity)
2183 2183 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2184 2184 similarity)
2185 2185
2186 2186 def changedfiles(ui, repo, patchpath, strip=1):
2187 2187 backend = fsbackend(ui, repo.root)
2188 2188 with open(patchpath, 'rb') as fp:
2189 2189 changed = set()
2190 2190 for state, values in iterhunks(fp):
2191 2191 if state == 'file':
2192 2192 afile, bfile, first_hunk, gp = values
2193 2193 if gp:
2194 2194 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2195 2195 if gp.oldpath:
2196 2196 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2197 2197 else:
2198 2198 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2199 2199 '')
2200 2200 changed.add(gp.path)
2201 2201 if gp.op == 'RENAME':
2202 2202 changed.add(gp.oldpath)
2203 2203 elif state not in ('hunk', 'git'):
2204 2204 raise error.Abort(_('unsupported parser state: %s') % state)
2205 2205 return changed
2206 2206
2207 2207 class GitDiffRequired(Exception):
2208 2208 pass
2209 2209
2210 2210 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2211 2211 '''return diffopts with all features supported and parsed'''
2212 2212 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2213 2213 git=True, whitespace=True, formatchanging=True)
2214 2214
2215 2215 diffopts = diffallopts
2216 2216
2217 2217 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2218 2218 whitespace=False, formatchanging=False):
2219 2219 '''return diffopts with only opted-in features parsed
2220 2220
2221 2221 Features:
2222 2222 - git: git-style diffs
2223 2223 - whitespace: whitespace options like ignoreblanklines and ignorews
2224 2224 - formatchanging: options that will likely break or cause correctness issues
2225 2225 with most diff parsers
2226 2226 '''
2227 2227 def get(key, name=None, getter=ui.configbool, forceplain=None):
2228 2228 if opts:
2229 2229 v = opts.get(key)
2230 2230 # diffopts flags are either None-default (which is passed
2231 2231 # through unchanged, so we can identify unset values), or
2232 2232 # some other falsey default (eg --unified, which defaults
2233 2233 # to an empty string). We only want to override the config
2234 2234 # entries from hgrc with command line values if they
2235 2235 # appear to have been set, which is any truthy value,
2236 2236 # True, or False.
2237 2237 if v or isinstance(v, bool):
2238 2238 return v
2239 2239 if forceplain is not None and ui.plain():
2240 2240 return forceplain
2241 2241 return getter(section, name or key, untrusted=untrusted)
2242 2242
2243 2243 # core options, expected to be understood by every diff parser
2244 2244 buildopts = {
2245 2245 'nodates': get('nodates'),
2246 2246 'showfunc': get('show_function', 'showfunc'),
2247 2247 'context': get('unified', getter=ui.config),
2248 2248 }
2249 2249
2250 2250 if git:
2251 2251 buildopts['git'] = get('git')
2252 2252
2253 2253 # since this is in the experimental section, we need to call
2254 2254 # ui.configbool directory
2255 2255 buildopts['showsimilarity'] = ui.configbool('experimental',
2256 2256 'extendedheader.similarity')
2257 2257
2258 2258 # need to inspect the ui object instead of using get() since we want to
2259 2259 # test for an int
2260 2260 hconf = ui.config('experimental', 'extendedheader.index')
2261 2261 if hconf is not None:
2262 2262 hlen = None
2263 2263 try:
2264 2264 # the hash config could be an integer (for length of hash) or a
2265 2265 # word (e.g. short, full, none)
2266 2266 hlen = int(hconf)
2267 2267 if hlen < 0 or hlen > 40:
2268 2268 msg = _("invalid length for extendedheader.index: '%d'\n")
2269 2269 ui.warn(msg % hlen)
2270 2270 except ValueError:
2271 2271 # default value
2272 2272 if hconf == 'short' or hconf == '':
2273 2273 hlen = 12
2274 2274 elif hconf == 'full':
2275 2275 hlen = 40
2276 2276 elif hconf != 'none':
2277 2277 msg = _("invalid value for extendedheader.index: '%s'\n")
2278 2278 ui.warn(msg % hconf)
2279 2279 finally:
2280 2280 buildopts['index'] = hlen
2281 2281
2282 2282 if whitespace:
2283 2283 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2284 2284 buildopts['ignorewsamount'] = get('ignore_space_change',
2285 2285 'ignorewsamount')
2286 2286 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2287 2287 'ignoreblanklines')
2288 2288 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2289 2289 if formatchanging:
2290 2290 buildopts['text'] = opts and opts.get('text')
2291 2291 binary = None if opts is None else opts.get('binary')
2292 2292 buildopts['nobinary'] = (not binary if binary is not None
2293 2293 else get('nobinary', forceplain=False))
2294 2294 buildopts['noprefix'] = get('noprefix', forceplain=False)
2295 2295
2296 2296 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2297 2297
2298 2298 def diff(repo, node1=None, node2=None, match=None, changes=None,
2299 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2299 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2300 hunksfilterfn=None):
2300 2301 '''yields diff of changes to files between two nodes, or node and
2301 2302 working directory.
2302 2303
2303 2304 if node1 is None, use first dirstate parent instead.
2304 2305 if node2 is None, compare node1 with working directory.
2305 2306
2306 2307 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2307 2308 every time some change cannot be represented with the current
2308 2309 patch format. Return False to upgrade to git patch format, True to
2309 2310 accept the loss or raise an exception to abort the diff. It is
2310 2311 called with the name of current file being diffed as 'fn'. If set
2311 2312 to None, patches will always be upgraded to git format when
2312 2313 necessary.
2313 2314
2314 2315 prefix is a filename prefix that is prepended to all filenames on
2315 2316 display (used for subrepos).
2316 2317
2317 2318 relroot, if not empty, must be normalized with a trailing /. Any match
2318 2319 patterns that fall outside it will be ignored.
2319 2320
2320 2321 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2321 information.'''
2322 information.
2323
2324 hunksfilterfn, if not None, should be a function taking a filectx and
2325 hunks generator that may yield filtered hunks.
2326 '''
2322 2327 for fctx1, fctx2, hdr, hunks in diffhunks(
2323 2328 repo, node1=node1, node2=node2,
2324 2329 match=match, changes=changes, opts=opts,
2325 2330 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2326 2331 ):
2332 if hunksfilterfn is not None:
2333 hunks = hunksfilterfn(fctx2, hunks)
2327 2334 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2328 2335 if hdr and (text or len(hdr) > 1):
2329 2336 yield '\n'.join(hdr) + '\n'
2330 2337 if text:
2331 2338 yield text
2332 2339
2333 2340 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2334 2341 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2335 2342 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2336 2343 where `header` is a list of diff headers and `hunks` is an iterable of
2337 2344 (`hunkrange`, `hunklines`) tuples.
2338 2345
2339 2346 See diff() for the meaning of parameters.
2340 2347 """
2341 2348
2342 2349 if opts is None:
2343 2350 opts = mdiff.defaultopts
2344 2351
2345 2352 if not node1 and not node2:
2346 2353 node1 = repo.dirstate.p1()
2347 2354
2348 2355 def lrugetfilectx():
2349 2356 cache = {}
2350 2357 order = collections.deque()
2351 2358 def getfilectx(f, ctx):
2352 2359 fctx = ctx.filectx(f, filelog=cache.get(f))
2353 2360 if f not in cache:
2354 2361 if len(cache) > 20:
2355 2362 del cache[order.popleft()]
2356 2363 cache[f] = fctx.filelog()
2357 2364 else:
2358 2365 order.remove(f)
2359 2366 order.append(f)
2360 2367 return fctx
2361 2368 return getfilectx
2362 2369 getfilectx = lrugetfilectx()
2363 2370
2364 2371 ctx1 = repo[node1]
2365 2372 ctx2 = repo[node2]
2366 2373
2367 2374 relfiltered = False
2368 2375 if relroot != '' and match.always():
2369 2376 # as a special case, create a new matcher with just the relroot
2370 2377 pats = [relroot]
2371 2378 match = scmutil.match(ctx2, pats, default='path')
2372 2379 relfiltered = True
2373 2380
2374 2381 if not changes:
2375 2382 changes = repo.status(ctx1, ctx2, match=match)
2376 2383 modified, added, removed = changes[:3]
2377 2384
2378 2385 if not modified and not added and not removed:
2379 2386 return []
2380 2387
2381 2388 if repo.ui.debugflag:
2382 2389 hexfunc = hex
2383 2390 else:
2384 2391 hexfunc = short
2385 2392 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2386 2393
2387 2394 if copy is None:
2388 2395 copy = {}
2389 2396 if opts.git or opts.upgrade:
2390 2397 copy = copies.pathcopies(ctx1, ctx2, match=match)
2391 2398
2392 2399 if relroot is not None:
2393 2400 if not relfiltered:
2394 2401 # XXX this would ideally be done in the matcher, but that is
2395 2402 # generally meant to 'or' patterns, not 'and' them. In this case we
2396 2403 # need to 'and' all the patterns from the matcher with relroot.
2397 2404 def filterrel(l):
2398 2405 return [f for f in l if f.startswith(relroot)]
2399 2406 modified = filterrel(modified)
2400 2407 added = filterrel(added)
2401 2408 removed = filterrel(removed)
2402 2409 relfiltered = True
2403 2410 # filter out copies where either side isn't inside the relative root
2404 2411 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2405 2412 if dst.startswith(relroot)
2406 2413 and src.startswith(relroot)))
2407 2414
2408 2415 modifiedset = set(modified)
2409 2416 addedset = set(added)
2410 2417 removedset = set(removed)
2411 2418 for f in modified:
2412 2419 if f not in ctx1:
2413 2420 # Fix up added, since merged-in additions appear as
2414 2421 # modifications during merges
2415 2422 modifiedset.remove(f)
2416 2423 addedset.add(f)
2417 2424 for f in removed:
2418 2425 if f not in ctx1:
2419 2426 # Merged-in additions that are then removed are reported as removed.
2420 2427 # They are not in ctx1, so We don't want to show them in the diff.
2421 2428 removedset.remove(f)
2422 2429 modified = sorted(modifiedset)
2423 2430 added = sorted(addedset)
2424 2431 removed = sorted(removedset)
2425 2432 for dst, src in copy.items():
2426 2433 if src not in ctx1:
2427 2434 # Files merged in during a merge and then copied/renamed are
2428 2435 # reported as copies. We want to show them in the diff as additions.
2429 2436 del copy[dst]
2430 2437
2431 2438 def difffn(opts, losedata):
2432 2439 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2433 2440 copy, getfilectx, opts, losedata, prefix, relroot)
2434 2441 if opts.upgrade and not opts.git:
2435 2442 try:
2436 2443 def losedata(fn):
2437 2444 if not losedatafn or not losedatafn(fn=fn):
2438 2445 raise GitDiffRequired
2439 2446 # Buffer the whole output until we are sure it can be generated
2440 2447 return list(difffn(opts.copy(git=False), losedata))
2441 2448 except GitDiffRequired:
2442 2449 return difffn(opts.copy(git=True), None)
2443 2450 else:
2444 2451 return difffn(opts, None)
2445 2452
2446 2453 def difflabel(func, *args, **kw):
2447 2454 '''yields 2-tuples of (output, label) based on the output of func()'''
2448 2455 headprefixes = [('diff', 'diff.diffline'),
2449 2456 ('copy', 'diff.extended'),
2450 2457 ('rename', 'diff.extended'),
2451 2458 ('old', 'diff.extended'),
2452 2459 ('new', 'diff.extended'),
2453 2460 ('deleted', 'diff.extended'),
2454 2461 ('index', 'diff.extended'),
2455 2462 ('similarity', 'diff.extended'),
2456 2463 ('---', 'diff.file_a'),
2457 2464 ('+++', 'diff.file_b')]
2458 2465 textprefixes = [('@', 'diff.hunk'),
2459 2466 ('-', 'diff.deleted'),
2460 2467 ('+', 'diff.inserted')]
2461 2468 head = False
2462 2469 for chunk in func(*args, **kw):
2463 2470 lines = chunk.split('\n')
2464 2471 for i, line in enumerate(lines):
2465 2472 if i != 0:
2466 2473 yield ('\n', '')
2467 2474 if head:
2468 2475 if line.startswith('@'):
2469 2476 head = False
2470 2477 else:
2471 2478 if line and line[0] not in ' +-@\\':
2472 2479 head = True
2473 2480 stripline = line
2474 2481 diffline = False
2475 2482 if not head and line and line[0] in '+-':
2476 2483 # highlight tabs and trailing whitespace, but only in
2477 2484 # changed lines
2478 2485 stripline = line.rstrip()
2479 2486 diffline = True
2480 2487
2481 2488 prefixes = textprefixes
2482 2489 if head:
2483 2490 prefixes = headprefixes
2484 2491 for prefix, label in prefixes:
2485 2492 if stripline.startswith(prefix):
2486 2493 if diffline:
2487 2494 for token in tabsplitter.findall(stripline):
2488 2495 if '\t' == token[0]:
2489 2496 yield (token, 'diff.tab')
2490 2497 else:
2491 2498 yield (token, label)
2492 2499 else:
2493 2500 yield (stripline, label)
2494 2501 break
2495 2502 else:
2496 2503 yield (line, '')
2497 2504 if line != stripline:
2498 2505 yield (line[len(stripline):], 'diff.trailingwhitespace')
2499 2506
2500 2507 def diffui(*args, **kw):
2501 2508 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2502 2509 return difflabel(diff, *args, **kw)
2503 2510
2504 2511 def _filepairs(modified, added, removed, copy, opts):
2505 2512 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2506 2513 before and f2 is the the name after. For added files, f1 will be None,
2507 2514 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2508 2515 or 'rename' (the latter two only if opts.git is set).'''
2509 2516 gone = set()
2510 2517
2511 2518 copyto = dict([(v, k) for k, v in copy.items()])
2512 2519
2513 2520 addedset, removedset = set(added), set(removed)
2514 2521
2515 2522 for f in sorted(modified + added + removed):
2516 2523 copyop = None
2517 2524 f1, f2 = f, f
2518 2525 if f in addedset:
2519 2526 f1 = None
2520 2527 if f in copy:
2521 2528 if opts.git:
2522 2529 f1 = copy[f]
2523 2530 if f1 in removedset and f1 not in gone:
2524 2531 copyop = 'rename'
2525 2532 gone.add(f1)
2526 2533 else:
2527 2534 copyop = 'copy'
2528 2535 elif f in removedset:
2529 2536 f2 = None
2530 2537 if opts.git:
2531 2538 # have we already reported a copy above?
2532 2539 if (f in copyto and copyto[f] in addedset
2533 2540 and copy[copyto[f]] == f):
2534 2541 continue
2535 2542 yield f1, f2, copyop
2536 2543
2537 2544 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2538 2545 copy, getfilectx, opts, losedatafn, prefix, relroot):
2539 2546 '''given input data, generate a diff and yield it in blocks
2540 2547
2541 2548 If generating a diff would lose data like flags or binary data and
2542 2549 losedatafn is not None, it will be called.
2543 2550
2544 2551 relroot is removed and prefix is added to every path in the diff output.
2545 2552
2546 2553 If relroot is not empty, this function expects every path in modified,
2547 2554 added, removed and copy to start with it.'''
2548 2555
2549 2556 def gitindex(text):
2550 2557 if not text:
2551 2558 text = ""
2552 2559 l = len(text)
2553 2560 s = hashlib.sha1('blob %d\0' % l)
2554 2561 s.update(text)
2555 2562 return s.hexdigest()
2556 2563
2557 2564 if opts.noprefix:
2558 2565 aprefix = bprefix = ''
2559 2566 else:
2560 2567 aprefix = 'a/'
2561 2568 bprefix = 'b/'
2562 2569
2563 2570 def diffline(f, revs):
2564 2571 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2565 2572 return 'diff %s %s' % (revinfo, f)
2566 2573
2567 2574 def isempty(fctx):
2568 2575 return fctx is None or fctx.size() == 0
2569 2576
2570 2577 date1 = util.datestr(ctx1.date())
2571 2578 date2 = util.datestr(ctx2.date())
2572 2579
2573 2580 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2574 2581
2575 2582 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2576 2583 or repo.ui.configbool('devel', 'check-relroot')):
2577 2584 for f in modified + added + removed + list(copy) + list(copy.values()):
2578 2585 if f is not None and not f.startswith(relroot):
2579 2586 raise AssertionError(
2580 2587 "file %s doesn't start with relroot %s" % (f, relroot))
2581 2588
2582 2589 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2583 2590 content1 = None
2584 2591 content2 = None
2585 2592 fctx1 = None
2586 2593 fctx2 = None
2587 2594 flag1 = None
2588 2595 flag2 = None
2589 2596 if f1:
2590 2597 fctx1 = getfilectx(f1, ctx1)
2591 2598 if opts.git or losedatafn:
2592 2599 flag1 = ctx1.flags(f1)
2593 2600 if f2:
2594 2601 fctx2 = getfilectx(f2, ctx2)
2595 2602 if opts.git or losedatafn:
2596 2603 flag2 = ctx2.flags(f2)
2597 2604 # if binary is True, output "summary" or "base85", but not "text diff"
2598 2605 binary = not opts.text and any(f.isbinary()
2599 2606 for f in [fctx1, fctx2] if f is not None)
2600 2607
2601 2608 if losedatafn and not opts.git:
2602 2609 if (binary or
2603 2610 # copy/rename
2604 2611 f2 in copy or
2605 2612 # empty file creation
2606 2613 (not f1 and isempty(fctx2)) or
2607 2614 # empty file deletion
2608 2615 (isempty(fctx1) and not f2) or
2609 2616 # create with flags
2610 2617 (not f1 and flag2) or
2611 2618 # change flags
2612 2619 (f1 and f2 and flag1 != flag2)):
2613 2620 losedatafn(f2 or f1)
2614 2621
2615 2622 path1 = f1 or f2
2616 2623 path2 = f2 or f1
2617 2624 path1 = posixpath.join(prefix, path1[len(relroot):])
2618 2625 path2 = posixpath.join(prefix, path2[len(relroot):])
2619 2626 header = []
2620 2627 if opts.git:
2621 2628 header.append('diff --git %s%s %s%s' %
2622 2629 (aprefix, path1, bprefix, path2))
2623 2630 if not f1: # added
2624 2631 header.append('new file mode %s' % gitmode[flag2])
2625 2632 elif not f2: # removed
2626 2633 header.append('deleted file mode %s' % gitmode[flag1])
2627 2634 else: # modified/copied/renamed
2628 2635 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2629 2636 if mode1 != mode2:
2630 2637 header.append('old mode %s' % mode1)
2631 2638 header.append('new mode %s' % mode2)
2632 2639 if copyop is not None:
2633 2640 if opts.showsimilarity:
2634 2641 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2635 2642 header.append('similarity index %d%%' % sim)
2636 2643 header.append('%s from %s' % (copyop, path1))
2637 2644 header.append('%s to %s' % (copyop, path2))
2638 2645 elif revs and not repo.ui.quiet:
2639 2646 header.append(diffline(path1, revs))
2640 2647
2641 2648 # fctx.is | diffopts | what to | is fctx.data()
2642 2649 # binary() | text nobinary git index | output? | outputted?
2643 2650 # ------------------------------------|----------------------------
2644 2651 # yes | no no no * | summary | no
2645 2652 # yes | no no yes * | base85 | yes
2646 2653 # yes | no yes no * | summary | no
2647 2654 # yes | no yes yes 0 | summary | no
2648 2655 # yes | no yes yes >0 | summary | semi [1]
2649 2656 # yes | yes * * * | text diff | yes
2650 2657 # no | * * * * | text diff | yes
2651 2658 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2652 2659 if binary and (not opts.git or (opts.git and opts.nobinary and not
2653 2660 opts.index)):
2654 2661 # fast path: no binary content will be displayed, content1 and
2655 2662 # content2 are only used for equivalent test. cmp() could have a
2656 2663 # fast path.
2657 2664 if fctx1 is not None:
2658 2665 content1 = b'\0'
2659 2666 if fctx2 is not None:
2660 2667 if fctx1 is not None and not fctx1.cmp(fctx2):
2661 2668 content2 = b'\0' # not different
2662 2669 else:
2663 2670 content2 = b'\0\0'
2664 2671 else:
2665 2672 # normal path: load contents
2666 2673 if fctx1 is not None:
2667 2674 content1 = fctx1.data()
2668 2675 if fctx2 is not None:
2669 2676 content2 = fctx2.data()
2670 2677
2671 2678 if binary and opts.git and not opts.nobinary:
2672 2679 text = mdiff.b85diff(content1, content2)
2673 2680 if text:
2674 2681 header.append('index %s..%s' %
2675 2682 (gitindex(content1), gitindex(content2)))
2676 2683 hunks = (None, [text]),
2677 2684 else:
2678 2685 if opts.git and opts.index > 0:
2679 2686 flag = flag1
2680 2687 if flag is None:
2681 2688 flag = flag2
2682 2689 header.append('index %s..%s %s' %
2683 2690 (gitindex(content1)[0:opts.index],
2684 2691 gitindex(content2)[0:opts.index],
2685 2692 gitmode[flag]))
2686 2693
2687 2694 uheaders, hunks = mdiff.unidiff(content1, date1,
2688 2695 content2, date2,
2689 2696 path1, path2, opts=opts)
2690 2697 header.extend(uheaders)
2691 2698 yield fctx1, fctx2, header, hunks
2692 2699
2693 2700 def diffstatsum(stats):
2694 2701 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2695 2702 for f, a, r, b in stats:
2696 2703 maxfile = max(maxfile, encoding.colwidth(f))
2697 2704 maxtotal = max(maxtotal, a + r)
2698 2705 addtotal += a
2699 2706 removetotal += r
2700 2707 binary = binary or b
2701 2708
2702 2709 return maxfile, maxtotal, addtotal, removetotal, binary
2703 2710
2704 2711 def diffstatdata(lines):
2705 2712 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2706 2713
2707 2714 results = []
2708 2715 filename, adds, removes, isbinary = None, 0, 0, False
2709 2716
2710 2717 def addresult():
2711 2718 if filename:
2712 2719 results.append((filename, adds, removes, isbinary))
2713 2720
2714 2721 # inheader is used to track if a line is in the
2715 2722 # header portion of the diff. This helps properly account
2716 2723 # for lines that start with '--' or '++'
2717 2724 inheader = False
2718 2725
2719 2726 for line in lines:
2720 2727 if line.startswith('diff'):
2721 2728 addresult()
2722 2729 # starting a new file diff
2723 2730 # set numbers to 0 and reset inheader
2724 2731 inheader = True
2725 2732 adds, removes, isbinary = 0, 0, False
2726 2733 if line.startswith('diff --git a/'):
2727 2734 filename = gitre.search(line).group(2)
2728 2735 elif line.startswith('diff -r'):
2729 2736 # format: "diff -r ... -r ... filename"
2730 2737 filename = diffre.search(line).group(1)
2731 2738 elif line.startswith('@@'):
2732 2739 inheader = False
2733 2740 elif line.startswith('+') and not inheader:
2734 2741 adds += 1
2735 2742 elif line.startswith('-') and not inheader:
2736 2743 removes += 1
2737 2744 elif (line.startswith('GIT binary patch') or
2738 2745 line.startswith('Binary file')):
2739 2746 isbinary = True
2740 2747 addresult()
2741 2748 return results
2742 2749
2743 2750 def diffstat(lines, width=80):
2744 2751 output = []
2745 2752 stats = diffstatdata(lines)
2746 2753 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2747 2754
2748 2755 countwidth = len(str(maxtotal))
2749 2756 if hasbinary and countwidth < 3:
2750 2757 countwidth = 3
2751 2758 graphwidth = width - countwidth - maxname - 6
2752 2759 if graphwidth < 10:
2753 2760 graphwidth = 10
2754 2761
2755 2762 def scale(i):
2756 2763 if maxtotal <= graphwidth:
2757 2764 return i
2758 2765 # If diffstat runs out of room it doesn't print anything,
2759 2766 # which isn't very useful, so always print at least one + or -
2760 2767 # if there were at least some changes.
2761 2768 return max(i * graphwidth // maxtotal, int(bool(i)))
2762 2769
2763 2770 for filename, adds, removes, isbinary in stats:
2764 2771 if isbinary:
2765 2772 count = 'Bin'
2766 2773 else:
2767 2774 count = '%d' % (adds + removes)
2768 2775 pluses = '+' * scale(adds)
2769 2776 minuses = '-' * scale(removes)
2770 2777 output.append(' %s%s | %*s %s%s\n' %
2771 2778 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2772 2779 countwidth, count, pluses, minuses))
2773 2780
2774 2781 if stats:
2775 2782 output.append(_(' %d files changed, %d insertions(+), '
2776 2783 '%d deletions(-)\n')
2777 2784 % (len(stats), totaladds, totalremoves))
2778 2785
2779 2786 return ''.join(output)
2780 2787
2781 2788 def diffstatui(*args, **kw):
2782 2789 '''like diffstat(), but yields 2-tuples of (output, label) for
2783 2790 ui.write()
2784 2791 '''
2785 2792
2786 2793 for line in diffstat(*args, **kw).splitlines():
2787 2794 if line and line[-1] in '+-':
2788 2795 name, graph = line.rsplit(' ', 1)
2789 2796 yield (name + ' ', '')
2790 2797 m = re.search(br'\++', graph)
2791 2798 if m:
2792 2799 yield (m.group(0), 'diffstat.inserted')
2793 2800 m = re.search(br'-+', graph)
2794 2801 if m:
2795 2802 yield (m.group(0), 'diffstat.deleted')
2796 2803 else:
2797 2804 yield (line, '')
2798 2805 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now