##// END OF EJS Templates
commit: add a way to return more information from the chunkselector...
Laurent Charignon -
r27155:8d3c5797 default
parent child Browse files
Show More
@@ -1,3408 +1,3409
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, bin, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, errno, re, tempfile, cStringIO, shutil
11 11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 12 import match as matchmod
13 13 import repair, graphmod, revset, phases, obsolete, pathutil
14 14 import changelog
15 15 import bookmarks
16 16 import encoding
17 17 import formatter
18 18 import crecord as crecordmod
19 19 import lock as lockmod
20 20
21 21 def ishunk(x):
22 22 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
23 23 return isinstance(x, hunkclasses)
24 24
25 25 def newandmodified(chunks, originalchunks):
26 26 newlyaddedandmodifiedfiles = set()
27 27 for chunk in chunks:
28 28 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
29 29 originalchunks:
30 30 newlyaddedandmodifiedfiles.add(chunk.header.filename())
31 31 return newlyaddedandmodifiedfiles
32 32
33 33 def parsealiases(cmd):
34 34 return cmd.lstrip("^").split("|")
35 35
36 36 def setupwrapcolorwrite(ui):
37 37 # wrap ui.write so diff output can be labeled/colorized
38 38 def wrapwrite(orig, *args, **kw):
39 39 label = kw.pop('label', '')
40 40 for chunk, l in patch.difflabel(lambda: args):
41 41 orig(chunk, label=label + l)
42 42
43 43 oldwrite = ui.write
44 44 def wrap(*args, **kwargs):
45 45 return wrapwrite(oldwrite, *args, **kwargs)
46 46 setattr(ui, 'write', wrap)
47 47 return oldwrite
48 48
49 49 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
50 50 if usecurses:
51 51 if testfile:
52 52 recordfn = crecordmod.testdecorator(testfile,
53 53 crecordmod.testchunkselector)
54 54 else:
55 55 recordfn = crecordmod.chunkselector
56 56
57 57 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
58 58
59 59 else:
60 60 return patch.filterpatch(ui, originalhunks, operation)
61 61
62 62 def recordfilter(ui, originalhunks, operation=None):
63 63 """ Prompts the user to filter the originalhunks and return a list of
64 64 selected hunks.
65 65 *operation* is used for ui purposes to indicate the user
66 66 what kind of filtering they are doing: reverting, committing, shelving, etc.
67 67 *operation* has to be a translated string.
68 68 """
69 69 usecurses = ui.configbool('experimental', 'crecord', False)
70 70 testfile = ui.config('experimental', 'crecordtest', None)
71 71 oldwrite = setupwrapcolorwrite(ui)
72 72 try:
73 newchunks = filterchunks(ui, originalhunks, usecurses, testfile,
74 operation)
73 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
74 testfile, operation)
75 75 finally:
76 76 ui.write = oldwrite
77 return newchunks
77 return newchunks, newopts
78 78
79 79 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
80 80 filterfn, *pats, **opts):
81 81 import merge as mergemod
82 82
83 83 if not ui.interactive():
84 84 if cmdsuggest:
85 85 msg = _('running non-interactively, use %s instead') % cmdsuggest
86 86 else:
87 87 msg = _('running non-interactively')
88 88 raise error.Abort(msg)
89 89
90 90 # make sure username is set before going interactive
91 91 if not opts.get('user'):
92 92 ui.username() # raise exception, username not provided
93 93
94 94 def recordfunc(ui, repo, message, match, opts):
95 95 """This is generic record driver.
96 96
97 97 Its job is to interactively filter local changes, and
98 98 accordingly prepare working directory into a state in which the
99 99 job can be delegated to a non-interactive commit command such as
100 100 'commit' or 'qrefresh'.
101 101
102 102 After the actual job is done by non-interactive command, the
103 103 working directory is restored to its original state.
104 104
105 105 In the end we'll record interesting changes, and everything else
106 106 will be left in place, so the user can continue working.
107 107 """
108 108
109 109 checkunfinished(repo, commit=True)
110 110 merge = len(repo[None].parents()) > 1
111 111 if merge:
112 112 raise error.Abort(_('cannot partially commit a merge '
113 113 '(use "hg commit" instead)'))
114 114
115 115 status = repo.status(match=match)
116 116 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
117 117 diffopts.nodates = True
118 118 diffopts.git = True
119 119 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
120 120 originalchunks = patch.parsepatch(originaldiff)
121 121
122 122 # 1. filter patch, so we have intending-to apply subset of it
123 123 try:
124 chunks = filterfn(ui, originalchunks)
124 chunks, newopts = filterfn(ui, originalchunks)
125 125 except patch.PatchError as err:
126 126 raise error.Abort(_('error parsing patch: %s') % err)
127 opts.update(newopts)
127 128
128 129 # We need to keep a backup of files that have been newly added and
129 130 # modified during the recording process because there is a previous
130 131 # version without the edit in the workdir
131 132 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
132 133 contenders = set()
133 134 for h in chunks:
134 135 try:
135 136 contenders.update(set(h.files()))
136 137 except AttributeError:
137 138 pass
138 139
139 140 changed = status.modified + status.added + status.removed
140 141 newfiles = [f for f in changed if f in contenders]
141 142 if not newfiles:
142 143 ui.status(_('no changes to record\n'))
143 144 return 0
144 145
145 146 modified = set(status.modified)
146 147
147 148 # 2. backup changed files, so we can restore them in the end
148 149
149 150 if backupall:
150 151 tobackup = changed
151 152 else:
152 153 tobackup = [f for f in newfiles if f in modified or f in \
153 154 newlyaddedandmodifiedfiles]
154 155 backups = {}
155 156 if tobackup:
156 157 backupdir = repo.join('record-backups')
157 158 try:
158 159 os.mkdir(backupdir)
159 160 except OSError as err:
160 161 if err.errno != errno.EEXIST:
161 162 raise
162 163 try:
163 164 # backup continues
164 165 for f in tobackup:
165 166 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
166 167 dir=backupdir)
167 168 os.close(fd)
168 169 ui.debug('backup %r as %r\n' % (f, tmpname))
169 170 util.copyfile(repo.wjoin(f), tmpname)
170 171 shutil.copystat(repo.wjoin(f), tmpname)
171 172 backups[f] = tmpname
172 173
173 174 fp = cStringIO.StringIO()
174 175 for c in chunks:
175 176 fname = c.filename()
176 177 if fname in backups:
177 178 c.write(fp)
178 179 dopatch = fp.tell()
179 180 fp.seek(0)
180 181
181 182 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
182 183 # 3a. apply filtered patch to clean repo (clean)
183 184 if backups:
184 185 # Equivalent to hg.revert
185 186 choices = lambda key: key in backups
186 187 mergemod.update(repo, repo.dirstate.p1(),
187 188 False, True, choices)
188 189
189 190 # 3b. (apply)
190 191 if dopatch:
191 192 try:
192 193 ui.debug('applying patch\n')
193 194 ui.debug(fp.getvalue())
194 195 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
195 196 except patch.PatchError as err:
196 197 raise error.Abort(str(err))
197 198 del fp
198 199
199 200 # 4. We prepared working directory according to filtered
200 201 # patch. Now is the time to delegate the job to
201 202 # commit/qrefresh or the like!
202 203
203 204 # Make all of the pathnames absolute.
204 205 newfiles = [repo.wjoin(nf) for nf in newfiles]
205 206 return commitfunc(ui, repo, *newfiles, **opts)
206 207 finally:
207 208 # 5. finally restore backed-up files
208 209 try:
209 210 dirstate = repo.dirstate
210 211 for realname, tmpname in backups.iteritems():
211 212 ui.debug('restoring %r to %r\n' % (tmpname, realname))
212 213
213 214 if dirstate[realname] == 'n':
214 215 # without normallookup, restoring timestamp
215 216 # may cause partially committed files
216 217 # to be treated as unmodified
217 218 dirstate.normallookup(realname)
218 219
219 220 util.copyfile(tmpname, repo.wjoin(realname))
220 221 # Our calls to copystat() here and above are a
221 222 # hack to trick any editors that have f open that
222 223 # we haven't modified them.
223 224 #
224 225 # Also note that this racy as an editor could
225 226 # notice the file's mtime before we've finished
226 227 # writing it.
227 228 shutil.copystat(tmpname, repo.wjoin(realname))
228 229 os.unlink(tmpname)
229 230 if tobackup:
230 231 os.rmdir(backupdir)
231 232 except OSError:
232 233 pass
233 234
234 235 def recordinwlock(ui, repo, message, match, opts):
235 236 wlock = repo.wlock()
236 237 try:
237 238 return recordfunc(ui, repo, message, match, opts)
238 239 finally:
239 240 wlock.release()
240 241
241 242 return commit(ui, repo, recordinwlock, pats, opts)
242 243
243 244 def findpossible(cmd, table, strict=False):
244 245 """
245 246 Return cmd -> (aliases, command table entry)
246 247 for each matching command.
247 248 Return debug commands (or their aliases) only if no normal command matches.
248 249 """
249 250 choice = {}
250 251 debugchoice = {}
251 252
252 253 if cmd in table:
253 254 # short-circuit exact matches, "log" alias beats "^log|history"
254 255 keys = [cmd]
255 256 else:
256 257 keys = table.keys()
257 258
258 259 allcmds = []
259 260 for e in keys:
260 261 aliases = parsealiases(e)
261 262 allcmds.extend(aliases)
262 263 found = None
263 264 if cmd in aliases:
264 265 found = cmd
265 266 elif not strict:
266 267 for a in aliases:
267 268 if a.startswith(cmd):
268 269 found = a
269 270 break
270 271 if found is not None:
271 272 if aliases[0].startswith("debug") or found.startswith("debug"):
272 273 debugchoice[found] = (aliases, table[e])
273 274 else:
274 275 choice[found] = (aliases, table[e])
275 276
276 277 if not choice and debugchoice:
277 278 choice = debugchoice
278 279
279 280 return choice, allcmds
280 281
281 282 def findcmd(cmd, table, strict=True):
282 283 """Return (aliases, command table entry) for command string."""
283 284 choice, allcmds = findpossible(cmd, table, strict)
284 285
285 286 if cmd in choice:
286 287 return choice[cmd]
287 288
288 289 if len(choice) > 1:
289 290 clist = choice.keys()
290 291 clist.sort()
291 292 raise error.AmbiguousCommand(cmd, clist)
292 293
293 294 if choice:
294 295 return choice.values()[0]
295 296
296 297 raise error.UnknownCommand(cmd, allcmds)
297 298
298 299 def findrepo(p):
299 300 while not os.path.isdir(os.path.join(p, ".hg")):
300 301 oldp, p = p, os.path.dirname(p)
301 302 if p == oldp:
302 303 return None
303 304
304 305 return p
305 306
306 307 def bailifchanged(repo, merge=True):
307 308 if merge and repo.dirstate.p2() != nullid:
308 309 raise error.Abort(_('outstanding uncommitted merge'))
309 310 modified, added, removed, deleted = repo.status()[:4]
310 311 if modified or added or removed or deleted:
311 312 raise error.Abort(_('uncommitted changes'))
312 313 ctx = repo[None]
313 314 for s in sorted(ctx.substate):
314 315 ctx.sub(s).bailifchanged()
315 316
316 317 def logmessage(ui, opts):
317 318 """ get the log message according to -m and -l option """
318 319 message = opts.get('message')
319 320 logfile = opts.get('logfile')
320 321
321 322 if message and logfile:
322 323 raise error.Abort(_('options --message and --logfile are mutually '
323 324 'exclusive'))
324 325 if not message and logfile:
325 326 try:
326 327 if logfile == '-':
327 328 message = ui.fin.read()
328 329 else:
329 330 message = '\n'.join(util.readfile(logfile).splitlines())
330 331 except IOError as inst:
331 332 raise error.Abort(_("can't read commit message '%s': %s") %
332 333 (logfile, inst.strerror))
333 334 return message
334 335
335 336 def mergeeditform(ctxorbool, baseformname):
336 337 """return appropriate editform name (referencing a committemplate)
337 338
338 339 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
339 340 merging is committed.
340 341
341 342 This returns baseformname with '.merge' appended if it is a merge,
342 343 otherwise '.normal' is appended.
343 344 """
344 345 if isinstance(ctxorbool, bool):
345 346 if ctxorbool:
346 347 return baseformname + ".merge"
347 348 elif 1 < len(ctxorbool.parents()):
348 349 return baseformname + ".merge"
349 350
350 351 return baseformname + ".normal"
351 352
352 353 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
353 354 editform='', **opts):
354 355 """get appropriate commit message editor according to '--edit' option
355 356
356 357 'finishdesc' is a function to be called with edited commit message
357 358 (= 'description' of the new changeset) just after editing, but
358 359 before checking empty-ness. It should return actual text to be
359 360 stored into history. This allows to change description before
360 361 storing.
361 362
362 363 'extramsg' is a extra message to be shown in the editor instead of
363 364 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
364 365 is automatically added.
365 366
366 367 'editform' is a dot-separated list of names, to distinguish
367 368 the purpose of commit text editing.
368 369
369 370 'getcommiteditor' returns 'commitforceeditor' regardless of
370 371 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
371 372 they are specific for usage in MQ.
372 373 """
373 374 if edit or finishdesc or extramsg:
374 375 return lambda r, c, s: commitforceeditor(r, c, s,
375 376 finishdesc=finishdesc,
376 377 extramsg=extramsg,
377 378 editform=editform)
378 379 elif editform:
379 380 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
380 381 else:
381 382 return commiteditor
382 383
383 384 def loglimit(opts):
384 385 """get the log limit according to option -l/--limit"""
385 386 limit = opts.get('limit')
386 387 if limit:
387 388 try:
388 389 limit = int(limit)
389 390 except ValueError:
390 391 raise error.Abort(_('limit must be a positive integer'))
391 392 if limit <= 0:
392 393 raise error.Abort(_('limit must be positive'))
393 394 else:
394 395 limit = None
395 396 return limit
396 397
397 398 def makefilename(repo, pat, node, desc=None,
398 399 total=None, seqno=None, revwidth=None, pathname=None):
399 400 node_expander = {
400 401 'H': lambda: hex(node),
401 402 'R': lambda: str(repo.changelog.rev(node)),
402 403 'h': lambda: short(node),
403 404 'm': lambda: re.sub('[^\w]', '_', str(desc))
404 405 }
405 406 expander = {
406 407 '%': lambda: '%',
407 408 'b': lambda: os.path.basename(repo.root),
408 409 }
409 410
410 411 try:
411 412 if node:
412 413 expander.update(node_expander)
413 414 if node:
414 415 expander['r'] = (lambda:
415 416 str(repo.changelog.rev(node)).zfill(revwidth or 0))
416 417 if total is not None:
417 418 expander['N'] = lambda: str(total)
418 419 if seqno is not None:
419 420 expander['n'] = lambda: str(seqno)
420 421 if total is not None and seqno is not None:
421 422 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
422 423 if pathname is not None:
423 424 expander['s'] = lambda: os.path.basename(pathname)
424 425 expander['d'] = lambda: os.path.dirname(pathname) or '.'
425 426 expander['p'] = lambda: pathname
426 427
427 428 newname = []
428 429 patlen = len(pat)
429 430 i = 0
430 431 while i < patlen:
431 432 c = pat[i]
432 433 if c == '%':
433 434 i += 1
434 435 c = pat[i]
435 436 c = expander[c]()
436 437 newname.append(c)
437 438 i += 1
438 439 return ''.join(newname)
439 440 except KeyError as inst:
440 441 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
441 442 inst.args[0])
442 443
443 444 def makefileobj(repo, pat, node=None, desc=None, total=None,
444 445 seqno=None, revwidth=None, mode='wb', modemap=None,
445 446 pathname=None):
446 447
447 448 writable = mode not in ('r', 'rb')
448 449
449 450 if not pat or pat == '-':
450 451 if writable:
451 452 fp = repo.ui.fout
452 453 else:
453 454 fp = repo.ui.fin
454 455 if util.safehasattr(fp, 'fileno'):
455 456 return os.fdopen(os.dup(fp.fileno()), mode)
456 457 else:
457 458 # if this fp can't be duped properly, return
458 459 # a dummy object that can be closed
459 460 class wrappedfileobj(object):
460 461 noop = lambda x: None
461 462 def __init__(self, f):
462 463 self.f = f
463 464 def __getattr__(self, attr):
464 465 if attr == 'close':
465 466 return self.noop
466 467 else:
467 468 return getattr(self.f, attr)
468 469
469 470 return wrappedfileobj(fp)
470 471 if util.safehasattr(pat, 'write') and writable:
471 472 return pat
472 473 if util.safehasattr(pat, 'read') and 'r' in mode:
473 474 return pat
474 475 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
475 476 if modemap is not None:
476 477 mode = modemap.get(fn, mode)
477 478 if mode == 'wb':
478 479 modemap[fn] = 'ab'
479 480 return open(fn, mode)
480 481
481 482 def openrevlog(repo, cmd, file_, opts):
482 483 """opens the changelog, manifest, a filelog or a given revlog"""
483 484 cl = opts['changelog']
484 485 mf = opts['manifest']
485 486 dir = opts['dir']
486 487 msg = None
487 488 if cl and mf:
488 489 msg = _('cannot specify --changelog and --manifest at the same time')
489 490 elif cl and dir:
490 491 msg = _('cannot specify --changelog and --dir at the same time')
491 492 elif cl or mf:
492 493 if file_:
493 494 msg = _('cannot specify filename with --changelog or --manifest')
494 495 elif not repo:
495 496 msg = _('cannot specify --changelog or --manifest or --dir '
496 497 'without a repository')
497 498 if msg:
498 499 raise error.Abort(msg)
499 500
500 501 r = None
501 502 if repo:
502 503 if cl:
503 504 r = repo.unfiltered().changelog
504 505 elif dir:
505 506 if 'treemanifest' not in repo.requirements:
506 507 raise error.Abort(_("--dir can only be used on repos with "
507 508 "treemanifest enabled"))
508 509 dirlog = repo.dirlog(file_)
509 510 if len(dirlog):
510 511 r = dirlog
511 512 elif mf:
512 513 r = repo.manifest
513 514 elif file_:
514 515 filelog = repo.file(file_)
515 516 if len(filelog):
516 517 r = filelog
517 518 if not r:
518 519 if not file_:
519 520 raise error.CommandError(cmd, _('invalid arguments'))
520 521 if not os.path.isfile(file_):
521 522 raise error.Abort(_("revlog '%s' not found") % file_)
522 523 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
523 524 file_[:-2] + ".i")
524 525 return r
525 526
526 527 def copy(ui, repo, pats, opts, rename=False):
527 528 # called with the repo lock held
528 529 #
529 530 # hgsep => pathname that uses "/" to separate directories
530 531 # ossep => pathname that uses os.sep to separate directories
531 532 cwd = repo.getcwd()
532 533 targets = {}
533 534 after = opts.get("after")
534 535 dryrun = opts.get("dry_run")
535 536 wctx = repo[None]
536 537
537 538 def walkpat(pat):
538 539 srcs = []
539 540 if after:
540 541 badstates = '?'
541 542 else:
542 543 badstates = '?r'
543 544 m = scmutil.match(repo[None], [pat], opts, globbed=True)
544 545 for abs in repo.walk(m):
545 546 state = repo.dirstate[abs]
546 547 rel = m.rel(abs)
547 548 exact = m.exact(abs)
548 549 if state in badstates:
549 550 if exact and state == '?':
550 551 ui.warn(_('%s: not copying - file is not managed\n') % rel)
551 552 if exact and state == 'r':
552 553 ui.warn(_('%s: not copying - file has been marked for'
553 554 ' remove\n') % rel)
554 555 continue
555 556 # abs: hgsep
556 557 # rel: ossep
557 558 srcs.append((abs, rel, exact))
558 559 return srcs
559 560
560 561 # abssrc: hgsep
561 562 # relsrc: ossep
562 563 # otarget: ossep
563 564 def copyfile(abssrc, relsrc, otarget, exact):
564 565 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
565 566 if '/' in abstarget:
566 567 # We cannot normalize abstarget itself, this would prevent
567 568 # case only renames, like a => A.
568 569 abspath, absname = abstarget.rsplit('/', 1)
569 570 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
570 571 reltarget = repo.pathto(abstarget, cwd)
571 572 target = repo.wjoin(abstarget)
572 573 src = repo.wjoin(abssrc)
573 574 state = repo.dirstate[abstarget]
574 575
575 576 scmutil.checkportable(ui, abstarget)
576 577
577 578 # check for collisions
578 579 prevsrc = targets.get(abstarget)
579 580 if prevsrc is not None:
580 581 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
581 582 (reltarget, repo.pathto(abssrc, cwd),
582 583 repo.pathto(prevsrc, cwd)))
583 584 return
584 585
585 586 # check for overwrites
586 587 exists = os.path.lexists(target)
587 588 samefile = False
588 589 if exists and abssrc != abstarget:
589 590 if (repo.dirstate.normalize(abssrc) ==
590 591 repo.dirstate.normalize(abstarget)):
591 592 if not rename:
592 593 ui.warn(_("%s: can't copy - same file\n") % reltarget)
593 594 return
594 595 exists = False
595 596 samefile = True
596 597
597 598 if not after and exists or after and state in 'mn':
598 599 if not opts['force']:
599 600 ui.warn(_('%s: not overwriting - file exists\n') %
600 601 reltarget)
601 602 return
602 603
603 604 if after:
604 605 if not exists:
605 606 if rename:
606 607 ui.warn(_('%s: not recording move - %s does not exist\n') %
607 608 (relsrc, reltarget))
608 609 else:
609 610 ui.warn(_('%s: not recording copy - %s does not exist\n') %
610 611 (relsrc, reltarget))
611 612 return
612 613 elif not dryrun:
613 614 try:
614 615 if exists:
615 616 os.unlink(target)
616 617 targetdir = os.path.dirname(target) or '.'
617 618 if not os.path.isdir(targetdir):
618 619 os.makedirs(targetdir)
619 620 if samefile:
620 621 tmp = target + "~hgrename"
621 622 os.rename(src, tmp)
622 623 os.rename(tmp, target)
623 624 else:
624 625 util.copyfile(src, target)
625 626 srcexists = True
626 627 except IOError as inst:
627 628 if inst.errno == errno.ENOENT:
628 629 ui.warn(_('%s: deleted in working directory\n') % relsrc)
629 630 srcexists = False
630 631 else:
631 632 ui.warn(_('%s: cannot copy - %s\n') %
632 633 (relsrc, inst.strerror))
633 634 return True # report a failure
634 635
635 636 if ui.verbose or not exact:
636 637 if rename:
637 638 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
638 639 else:
639 640 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
640 641
641 642 targets[abstarget] = abssrc
642 643
643 644 # fix up dirstate
644 645 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
645 646 dryrun=dryrun, cwd=cwd)
646 647 if rename and not dryrun:
647 648 if not after and srcexists and not samefile:
648 649 util.unlinkpath(repo.wjoin(abssrc))
649 650 wctx.forget([abssrc])
650 651
651 652 # pat: ossep
652 653 # dest ossep
653 654 # srcs: list of (hgsep, hgsep, ossep, bool)
654 655 # return: function that takes hgsep and returns ossep
655 656 def targetpathfn(pat, dest, srcs):
656 657 if os.path.isdir(pat):
657 658 abspfx = pathutil.canonpath(repo.root, cwd, pat)
658 659 abspfx = util.localpath(abspfx)
659 660 if destdirexists:
660 661 striplen = len(os.path.split(abspfx)[0])
661 662 else:
662 663 striplen = len(abspfx)
663 664 if striplen:
664 665 striplen += len(os.sep)
665 666 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
666 667 elif destdirexists:
667 668 res = lambda p: os.path.join(dest,
668 669 os.path.basename(util.localpath(p)))
669 670 else:
670 671 res = lambda p: dest
671 672 return res
672 673
673 674 # pat: ossep
674 675 # dest ossep
675 676 # srcs: list of (hgsep, hgsep, ossep, bool)
676 677 # return: function that takes hgsep and returns ossep
677 678 def targetpathafterfn(pat, dest, srcs):
678 679 if matchmod.patkind(pat):
679 680 # a mercurial pattern
680 681 res = lambda p: os.path.join(dest,
681 682 os.path.basename(util.localpath(p)))
682 683 else:
683 684 abspfx = pathutil.canonpath(repo.root, cwd, pat)
684 685 if len(abspfx) < len(srcs[0][0]):
685 686 # A directory. Either the target path contains the last
686 687 # component of the source path or it does not.
687 688 def evalpath(striplen):
688 689 score = 0
689 690 for s in srcs:
690 691 t = os.path.join(dest, util.localpath(s[0])[striplen:])
691 692 if os.path.lexists(t):
692 693 score += 1
693 694 return score
694 695
695 696 abspfx = util.localpath(abspfx)
696 697 striplen = len(abspfx)
697 698 if striplen:
698 699 striplen += len(os.sep)
699 700 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
700 701 score = evalpath(striplen)
701 702 striplen1 = len(os.path.split(abspfx)[0])
702 703 if striplen1:
703 704 striplen1 += len(os.sep)
704 705 if evalpath(striplen1) > score:
705 706 striplen = striplen1
706 707 res = lambda p: os.path.join(dest,
707 708 util.localpath(p)[striplen:])
708 709 else:
709 710 # a file
710 711 if destdirexists:
711 712 res = lambda p: os.path.join(dest,
712 713 os.path.basename(util.localpath(p)))
713 714 else:
714 715 res = lambda p: dest
715 716 return res
716 717
717 718 pats = scmutil.expandpats(pats)
718 719 if not pats:
719 720 raise error.Abort(_('no source or destination specified'))
720 721 if len(pats) == 1:
721 722 raise error.Abort(_('no destination specified'))
722 723 dest = pats.pop()
723 724 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
724 725 if not destdirexists:
725 726 if len(pats) > 1 or matchmod.patkind(pats[0]):
726 727 raise error.Abort(_('with multiple sources, destination must be an '
727 728 'existing directory'))
728 729 if util.endswithsep(dest):
729 730 raise error.Abort(_('destination %s is not a directory') % dest)
730 731
731 732 tfn = targetpathfn
732 733 if after:
733 734 tfn = targetpathafterfn
734 735 copylist = []
735 736 for pat in pats:
736 737 srcs = walkpat(pat)
737 738 if not srcs:
738 739 continue
739 740 copylist.append((tfn(pat, dest, srcs), srcs))
740 741 if not copylist:
741 742 raise error.Abort(_('no files to copy'))
742 743
743 744 errors = 0
744 745 for targetpath, srcs in copylist:
745 746 for abssrc, relsrc, exact in srcs:
746 747 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
747 748 errors += 1
748 749
749 750 if errors:
750 751 ui.warn(_('(consider using --after)\n'))
751 752
752 753 return errors != 0
753 754
754 755 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
755 756 runargs=None, appendpid=False):
756 757 '''Run a command as a service.'''
757 758
758 759 def writepid(pid):
759 760 if opts['pid_file']:
760 761 if appendpid:
761 762 mode = 'a'
762 763 else:
763 764 mode = 'w'
764 765 fp = open(opts['pid_file'], mode)
765 766 fp.write(str(pid) + '\n')
766 767 fp.close()
767 768
768 769 if opts['daemon'] and not opts['daemon_pipefds']:
769 770 # Signal child process startup with file removal
770 771 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
771 772 os.close(lockfd)
772 773 try:
773 774 if not runargs:
774 775 runargs = util.hgcmd() + sys.argv[1:]
775 776 runargs.append('--daemon-pipefds=%s' % lockpath)
776 777 # Don't pass --cwd to the child process, because we've already
777 778 # changed directory.
778 779 for i in xrange(1, len(runargs)):
779 780 if runargs[i].startswith('--cwd='):
780 781 del runargs[i]
781 782 break
782 783 elif runargs[i].startswith('--cwd'):
783 784 del runargs[i:i + 2]
784 785 break
785 786 def condfn():
786 787 return not os.path.exists(lockpath)
787 788 pid = util.rundetached(runargs, condfn)
788 789 if pid < 0:
789 790 raise error.Abort(_('child process failed to start'))
790 791 writepid(pid)
791 792 finally:
792 793 try:
793 794 os.unlink(lockpath)
794 795 except OSError as e:
795 796 if e.errno != errno.ENOENT:
796 797 raise
797 798 if parentfn:
798 799 return parentfn(pid)
799 800 else:
800 801 return
801 802
802 803 if initfn:
803 804 initfn()
804 805
805 806 if not opts['daemon']:
806 807 writepid(os.getpid())
807 808
808 809 if opts['daemon_pipefds']:
809 810 lockpath = opts['daemon_pipefds']
810 811 try:
811 812 os.setsid()
812 813 except AttributeError:
813 814 pass
814 815 os.unlink(lockpath)
815 816 util.hidewindow()
816 817 sys.stdout.flush()
817 818 sys.stderr.flush()
818 819
819 820 nullfd = os.open(os.devnull, os.O_RDWR)
820 821 logfilefd = nullfd
821 822 if logfile:
822 823 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
823 824 os.dup2(nullfd, 0)
824 825 os.dup2(logfilefd, 1)
825 826 os.dup2(logfilefd, 2)
826 827 if nullfd not in (0, 1, 2):
827 828 os.close(nullfd)
828 829 if logfile and logfilefd not in (0, 1, 2):
829 830 os.close(logfilefd)
830 831
831 832 if runfn:
832 833 return runfn()
833 834
834 835 ## facility to let extension process additional data into an import patch
835 836 # list of identifier to be executed in order
836 837 extrapreimport = [] # run before commit
837 838 extrapostimport = [] # run after commit
838 839 # mapping from identifier to actual import function
839 840 #
840 841 # 'preimport' are run before the commit is made and are provided the following
841 842 # arguments:
842 843 # - repo: the localrepository instance,
843 844 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
844 845 # - extra: the future extra dictionary of the changeset, please mutate it,
845 846 # - opts: the import options.
846 847 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
847 848 # mutation of in memory commit and more. Feel free to rework the code to get
848 849 # there.
849 850 extrapreimportmap = {}
850 851 # 'postimport' are run after the commit is made and are provided the following
851 852 # argument:
852 853 # - ctx: the changectx created by import.
853 854 extrapostimportmap = {}
854 855
855 856 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
856 857 """Utility function used by commands.import to import a single patch
857 858
858 859 This function is explicitly defined here to help the evolve extension to
859 860 wrap this part of the import logic.
860 861
861 862 The API is currently a bit ugly because it a simple code translation from
862 863 the import command. Feel free to make it better.
863 864
864 865 :hunk: a patch (as a binary string)
865 866 :parents: nodes that will be parent of the created commit
866 867 :opts: the full dict of option passed to the import command
867 868 :msgs: list to save commit message to.
868 869 (used in case we need to save it when failing)
869 870 :updatefunc: a function that update a repo to a given node
870 871 updatefunc(<repo>, <node>)
871 872 """
872 873 # avoid cycle context -> subrepo -> cmdutil
873 874 import context
874 875 extractdata = patch.extract(ui, hunk)
875 876 tmpname = extractdata.get('filename')
876 877 message = extractdata.get('message')
877 878 user = extractdata.get('user')
878 879 date = extractdata.get('date')
879 880 branch = extractdata.get('branch')
880 881 nodeid = extractdata.get('nodeid')
881 882 p1 = extractdata.get('p1')
882 883 p2 = extractdata.get('p2')
883 884
884 885 update = not opts.get('bypass')
885 886 strip = opts["strip"]
886 887 prefix = opts["prefix"]
887 888 sim = float(opts.get('similarity') or 0)
888 889 if not tmpname:
889 890 return (None, None, False)
890 891 msg = _('applied to working directory')
891 892
892 893 rejects = False
893 894
894 895 try:
895 896 cmdline_message = logmessage(ui, opts)
896 897 if cmdline_message:
897 898 # pickup the cmdline msg
898 899 message = cmdline_message
899 900 elif message:
900 901 # pickup the patch msg
901 902 message = message.strip()
902 903 else:
903 904 # launch the editor
904 905 message = None
905 906 ui.debug('message:\n%s\n' % message)
906 907
907 908 if len(parents) == 1:
908 909 parents.append(repo[nullid])
909 910 if opts.get('exact'):
910 911 if not nodeid or not p1:
911 912 raise error.Abort(_('not a Mercurial patch'))
912 913 p1 = repo[p1]
913 914 p2 = repo[p2 or nullid]
914 915 elif p2:
915 916 try:
916 917 p1 = repo[p1]
917 918 p2 = repo[p2]
918 919 # Without any options, consider p2 only if the
919 920 # patch is being applied on top of the recorded
920 921 # first parent.
921 922 if p1 != parents[0]:
922 923 p1 = parents[0]
923 924 p2 = repo[nullid]
924 925 except error.RepoError:
925 926 p1, p2 = parents
926 927 if p2.node() == nullid:
927 928 ui.warn(_("warning: import the patch as a normal revision\n"
928 929 "(use --exact to import the patch as a merge)\n"))
929 930 else:
930 931 p1, p2 = parents
931 932
932 933 n = None
933 934 if update:
934 935 if p1 != parents[0]:
935 936 updatefunc(repo, p1.node())
936 937 if p2 != parents[1]:
937 938 repo.setparents(p1.node(), p2.node())
938 939
939 940 if opts.get('exact') or opts.get('import_branch'):
940 941 repo.dirstate.setbranch(branch or 'default')
941 942
942 943 partial = opts.get('partial', False)
943 944 files = set()
944 945 try:
945 946 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
946 947 files=files, eolmode=None, similarity=sim / 100.0)
947 948 except patch.PatchError as e:
948 949 if not partial:
949 950 raise error.Abort(str(e))
950 951 if partial:
951 952 rejects = True
952 953
953 954 files = list(files)
954 955 if opts.get('no_commit'):
955 956 if message:
956 957 msgs.append(message)
957 958 else:
958 959 if opts.get('exact') or p2:
959 960 # If you got here, you either use --force and know what
960 961 # you are doing or used --exact or a merge patch while
961 962 # being updated to its first parent.
962 963 m = None
963 964 else:
964 965 m = scmutil.matchfiles(repo, files or [])
965 966 editform = mergeeditform(repo[None], 'import.normal')
966 967 if opts.get('exact'):
967 968 editor = None
968 969 else:
969 970 editor = getcommiteditor(editform=editform, **opts)
970 971 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
971 972 extra = {}
972 973 for idfunc in extrapreimport:
973 974 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
974 975 try:
975 976 if partial:
976 977 repo.ui.setconfig('ui', 'allowemptycommit', True)
977 978 n = repo.commit(message, opts.get('user') or user,
978 979 opts.get('date') or date, match=m,
979 980 editor=editor, extra=extra)
980 981 for idfunc in extrapostimport:
981 982 extrapostimportmap[idfunc](repo[n])
982 983 finally:
983 984 repo.ui.restoreconfig(allowemptyback)
984 985 else:
985 986 if opts.get('exact') or opts.get('import_branch'):
986 987 branch = branch or 'default'
987 988 else:
988 989 branch = p1.branch()
989 990 store = patch.filestore()
990 991 try:
991 992 files = set()
992 993 try:
993 994 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
994 995 files, eolmode=None)
995 996 except patch.PatchError as e:
996 997 raise error.Abort(str(e))
997 998 if opts.get('exact'):
998 999 editor = None
999 1000 else:
1000 1001 editor = getcommiteditor(editform='import.bypass')
1001 1002 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1002 1003 message,
1003 1004 opts.get('user') or user,
1004 1005 opts.get('date') or date,
1005 1006 branch, files, store,
1006 1007 editor=editor)
1007 1008 n = memctx.commit()
1008 1009 finally:
1009 1010 store.close()
1010 1011 if opts.get('exact') and opts.get('no_commit'):
1011 1012 # --exact with --no-commit is still useful in that it does merge
1012 1013 # and branch bits
1013 1014 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1014 1015 elif opts.get('exact') and hex(n) != nodeid:
1015 1016 raise error.Abort(_('patch is damaged or loses information'))
1016 1017 if n:
1017 1018 # i18n: refers to a short changeset id
1018 1019 msg = _('created %s') % short(n)
1019 1020 return (msg, n, rejects)
1020 1021 finally:
1021 1022 os.unlink(tmpname)
1022 1023
1023 1024 # facility to let extensions include additional data in an exported patch
1024 1025 # list of identifiers to be executed in order
1025 1026 extraexport = []
1026 1027 # mapping from identifier to actual export function
1027 1028 # function as to return a string to be added to the header or None
1028 1029 # it is given two arguments (sequencenumber, changectx)
1029 1030 extraexportmap = {}
1030 1031
1031 1032 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1032 1033 opts=None, match=None):
1033 1034 '''export changesets as hg patches.'''
1034 1035
1035 1036 total = len(revs)
1036 1037 revwidth = max([len(str(rev)) for rev in revs])
1037 1038 filemode = {}
1038 1039
1039 1040 def single(rev, seqno, fp):
1040 1041 ctx = repo[rev]
1041 1042 node = ctx.node()
1042 1043 parents = [p.node() for p in ctx.parents() if p]
1043 1044 branch = ctx.branch()
1044 1045 if switch_parent:
1045 1046 parents.reverse()
1046 1047
1047 1048 if parents:
1048 1049 prev = parents[0]
1049 1050 else:
1050 1051 prev = nullid
1051 1052
1052 1053 shouldclose = False
1053 1054 if not fp and len(template) > 0:
1054 1055 desc_lines = ctx.description().rstrip().split('\n')
1055 1056 desc = desc_lines[0] #Commit always has a first line.
1056 1057 fp = makefileobj(repo, template, node, desc=desc, total=total,
1057 1058 seqno=seqno, revwidth=revwidth, mode='wb',
1058 1059 modemap=filemode)
1059 1060 if fp != template:
1060 1061 shouldclose = True
1061 1062 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
1062 1063 repo.ui.note("%s\n" % fp.name)
1063 1064
1064 1065 if not fp:
1065 1066 write = repo.ui.write
1066 1067 else:
1067 1068 def write(s, **kw):
1068 1069 fp.write(s)
1069 1070
1070 1071 write("# HG changeset patch\n")
1071 1072 write("# User %s\n" % ctx.user())
1072 1073 write("# Date %d %d\n" % ctx.date())
1073 1074 write("# %s\n" % util.datestr(ctx.date()))
1074 1075 if branch and branch != 'default':
1075 1076 write("# Branch %s\n" % branch)
1076 1077 write("# Node ID %s\n" % hex(node))
1077 1078 write("# Parent %s\n" % hex(prev))
1078 1079 if len(parents) > 1:
1079 1080 write("# Parent %s\n" % hex(parents[1]))
1080 1081
1081 1082 for headerid in extraexport:
1082 1083 header = extraexportmap[headerid](seqno, ctx)
1083 1084 if header is not None:
1084 1085 write('# %s\n' % header)
1085 1086 write(ctx.description().rstrip())
1086 1087 write("\n\n")
1087 1088
1088 1089 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1089 1090 write(chunk, label=label)
1090 1091
1091 1092 if shouldclose:
1092 1093 fp.close()
1093 1094
1094 1095 for seqno, rev in enumerate(revs):
1095 1096 single(rev, seqno + 1, fp)
1096 1097
1097 1098 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1098 1099 changes=None, stat=False, fp=None, prefix='',
1099 1100 root='', listsubrepos=False):
1100 1101 '''show diff or diffstat.'''
1101 1102 if fp is None:
1102 1103 write = ui.write
1103 1104 else:
1104 1105 def write(s, **kw):
1105 1106 fp.write(s)
1106 1107
1107 1108 if root:
1108 1109 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1109 1110 else:
1110 1111 relroot = ''
1111 1112 if relroot != '':
1112 1113 # XXX relative roots currently don't work if the root is within a
1113 1114 # subrepo
1114 1115 uirelroot = match.uipath(relroot)
1115 1116 relroot += '/'
1116 1117 for matchroot in match.files():
1117 1118 if not matchroot.startswith(relroot):
1118 1119 ui.warn(_('warning: %s not inside relative root %s\n') % (
1119 1120 match.uipath(matchroot), uirelroot))
1120 1121
1121 1122 if stat:
1122 1123 diffopts = diffopts.copy(context=0)
1123 1124 width = 80
1124 1125 if not ui.plain():
1125 1126 width = ui.termwidth()
1126 1127 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1127 1128 prefix=prefix, relroot=relroot)
1128 1129 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1129 1130 width=width,
1130 1131 git=diffopts.git):
1131 1132 write(chunk, label=label)
1132 1133 else:
1133 1134 for chunk, label in patch.diffui(repo, node1, node2, match,
1134 1135 changes, diffopts, prefix=prefix,
1135 1136 relroot=relroot):
1136 1137 write(chunk, label=label)
1137 1138
1138 1139 if listsubrepos:
1139 1140 ctx1 = repo[node1]
1140 1141 ctx2 = repo[node2]
1141 1142 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1142 1143 tempnode2 = node2
1143 1144 try:
1144 1145 if node2 is not None:
1145 1146 tempnode2 = ctx2.substate[subpath][1]
1146 1147 except KeyError:
1147 1148 # A subrepo that existed in node1 was deleted between node1 and
1148 1149 # node2 (inclusive). Thus, ctx2's substate won't contain that
1149 1150 # subpath. The best we can do is to ignore it.
1150 1151 tempnode2 = None
1151 1152 submatch = matchmod.narrowmatcher(subpath, match)
1152 1153 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1153 1154 stat=stat, fp=fp, prefix=prefix)
1154 1155
1155 1156 class changeset_printer(object):
1156 1157 '''show changeset information when templating not requested.'''
1157 1158
1158 1159 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1159 1160 self.ui = ui
1160 1161 self.repo = repo
1161 1162 self.buffered = buffered
1162 1163 self.matchfn = matchfn
1163 1164 self.diffopts = diffopts
1164 1165 self.header = {}
1165 1166 self.hunk = {}
1166 1167 self.lastheader = None
1167 1168 self.footer = None
1168 1169
1169 1170 def flush(self, ctx):
1170 1171 rev = ctx.rev()
1171 1172 if rev in self.header:
1172 1173 h = self.header[rev]
1173 1174 if h != self.lastheader:
1174 1175 self.lastheader = h
1175 1176 self.ui.write(h)
1176 1177 del self.header[rev]
1177 1178 if rev in self.hunk:
1178 1179 self.ui.write(self.hunk[rev])
1179 1180 del self.hunk[rev]
1180 1181 return 1
1181 1182 return 0
1182 1183
1183 1184 def close(self):
1184 1185 if self.footer:
1185 1186 self.ui.write(self.footer)
1186 1187
1187 1188 def show(self, ctx, copies=None, matchfn=None, **props):
1188 1189 if self.buffered:
1189 1190 self.ui.pushbuffer(labeled=True)
1190 1191 self._show(ctx, copies, matchfn, props)
1191 1192 self.hunk[ctx.rev()] = self.ui.popbuffer()
1192 1193 else:
1193 1194 self._show(ctx, copies, matchfn, props)
1194 1195
1195 1196 def _show(self, ctx, copies, matchfn, props):
1196 1197 '''show a single changeset or file revision'''
1197 1198 changenode = ctx.node()
1198 1199 rev = ctx.rev()
1199 1200 if self.ui.debugflag:
1200 1201 hexfunc = hex
1201 1202 else:
1202 1203 hexfunc = short
1203 1204 # as of now, wctx.node() and wctx.rev() return None, but we want to
1204 1205 # show the same values as {node} and {rev} templatekw
1205 1206 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1206 1207
1207 1208 if self.ui.quiet:
1208 1209 self.ui.write("%d:%s\n" % revnode, label='log.node')
1209 1210 return
1210 1211
1211 1212 date = util.datestr(ctx.date())
1212 1213
1213 1214 # i18n: column positioning for "hg log"
1214 1215 self.ui.write(_("changeset: %d:%s\n") % revnode,
1215 1216 label='log.changeset changeset.%s' % ctx.phasestr())
1216 1217
1217 1218 # branches are shown first before any other names due to backwards
1218 1219 # compatibility
1219 1220 branch = ctx.branch()
1220 1221 # don't show the default branch name
1221 1222 if branch != 'default':
1222 1223 # i18n: column positioning for "hg log"
1223 1224 self.ui.write(_("branch: %s\n") % branch,
1224 1225 label='log.branch')
1225 1226
1226 1227 for name, ns in self.repo.names.iteritems():
1227 1228 # branches has special logic already handled above, so here we just
1228 1229 # skip it
1229 1230 if name == 'branches':
1230 1231 continue
1231 1232 # we will use the templatename as the color name since those two
1232 1233 # should be the same
1233 1234 for name in ns.names(self.repo, changenode):
1234 1235 self.ui.write(ns.logfmt % name,
1235 1236 label='log.%s' % ns.colorname)
1236 1237 if self.ui.debugflag:
1237 1238 # i18n: column positioning for "hg log"
1238 1239 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1239 1240 label='log.phase')
1240 1241 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1241 1242 label = 'log.parent changeset.%s' % pctx.phasestr()
1242 1243 # i18n: column positioning for "hg log"
1243 1244 self.ui.write(_("parent: %d:%s\n")
1244 1245 % (pctx.rev(), hexfunc(pctx.node())),
1245 1246 label=label)
1246 1247
1247 1248 if self.ui.debugflag and rev is not None:
1248 1249 mnode = ctx.manifestnode()
1249 1250 # i18n: column positioning for "hg log"
1250 1251 self.ui.write(_("manifest: %d:%s\n") %
1251 1252 (self.repo.manifest.rev(mnode), hex(mnode)),
1252 1253 label='ui.debug log.manifest')
1253 1254 # i18n: column positioning for "hg log"
1254 1255 self.ui.write(_("user: %s\n") % ctx.user(),
1255 1256 label='log.user')
1256 1257 # i18n: column positioning for "hg log"
1257 1258 self.ui.write(_("date: %s\n") % date,
1258 1259 label='log.date')
1259 1260
1260 1261 if self.ui.debugflag:
1261 1262 files = ctx.p1().status(ctx)[:3]
1262 1263 for key, value in zip([# i18n: column positioning for "hg log"
1263 1264 _("files:"),
1264 1265 # i18n: column positioning for "hg log"
1265 1266 _("files+:"),
1266 1267 # i18n: column positioning for "hg log"
1267 1268 _("files-:")], files):
1268 1269 if value:
1269 1270 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1270 1271 label='ui.debug log.files')
1271 1272 elif ctx.files() and self.ui.verbose:
1272 1273 # i18n: column positioning for "hg log"
1273 1274 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1274 1275 label='ui.note log.files')
1275 1276 if copies and self.ui.verbose:
1276 1277 copies = ['%s (%s)' % c for c in copies]
1277 1278 # i18n: column positioning for "hg log"
1278 1279 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1279 1280 label='ui.note log.copies')
1280 1281
1281 1282 extra = ctx.extra()
1282 1283 if extra and self.ui.debugflag:
1283 1284 for key, value in sorted(extra.items()):
1284 1285 # i18n: column positioning for "hg log"
1285 1286 self.ui.write(_("extra: %s=%s\n")
1286 1287 % (key, value.encode('string_escape')),
1287 1288 label='ui.debug log.extra')
1288 1289
1289 1290 description = ctx.description().strip()
1290 1291 if description:
1291 1292 if self.ui.verbose:
1292 1293 self.ui.write(_("description:\n"),
1293 1294 label='ui.note log.description')
1294 1295 self.ui.write(description,
1295 1296 label='ui.note log.description')
1296 1297 self.ui.write("\n\n")
1297 1298 else:
1298 1299 # i18n: column positioning for "hg log"
1299 1300 self.ui.write(_("summary: %s\n") %
1300 1301 description.splitlines()[0],
1301 1302 label='log.summary')
1302 1303 self.ui.write("\n")
1303 1304
1304 1305 self.showpatch(ctx, matchfn)
1305 1306
1306 1307 def showpatch(self, ctx, matchfn):
1307 1308 if not matchfn:
1308 1309 matchfn = self.matchfn
1309 1310 if matchfn:
1310 1311 stat = self.diffopts.get('stat')
1311 1312 diff = self.diffopts.get('patch')
1312 1313 diffopts = patch.diffallopts(self.ui, self.diffopts)
1313 1314 node = ctx.node()
1314 1315 prev = ctx.p1()
1315 1316 if stat:
1316 1317 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1317 1318 match=matchfn, stat=True)
1318 1319 if diff:
1319 1320 if stat:
1320 1321 self.ui.write("\n")
1321 1322 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1322 1323 match=matchfn, stat=False)
1323 1324 self.ui.write("\n")
1324 1325
1325 1326 class jsonchangeset(changeset_printer):
1326 1327 '''format changeset information.'''
1327 1328
1328 1329 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1329 1330 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1330 1331 self.cache = {}
1331 1332 self._first = True
1332 1333
1333 1334 def close(self):
1334 1335 if not self._first:
1335 1336 self.ui.write("\n]\n")
1336 1337 else:
1337 1338 self.ui.write("[]\n")
1338 1339
1339 1340 def _show(self, ctx, copies, matchfn, props):
1340 1341 '''show a single changeset or file revision'''
1341 1342 rev = ctx.rev()
1342 1343 if rev is None:
1343 1344 jrev = jnode = 'null'
1344 1345 else:
1345 1346 jrev = str(rev)
1346 1347 jnode = '"%s"' % hex(ctx.node())
1347 1348 j = encoding.jsonescape
1348 1349
1349 1350 if self._first:
1350 1351 self.ui.write("[\n {")
1351 1352 self._first = False
1352 1353 else:
1353 1354 self.ui.write(",\n {")
1354 1355
1355 1356 if self.ui.quiet:
1356 1357 self.ui.write('\n "rev": %s' % jrev)
1357 1358 self.ui.write(',\n "node": %s' % jnode)
1358 1359 self.ui.write('\n }')
1359 1360 return
1360 1361
1361 1362 self.ui.write('\n "rev": %s' % jrev)
1362 1363 self.ui.write(',\n "node": %s' % jnode)
1363 1364 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1364 1365 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1365 1366 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1366 1367 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1367 1368 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1368 1369
1369 1370 self.ui.write(',\n "bookmarks": [%s]' %
1370 1371 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1371 1372 self.ui.write(',\n "tags": [%s]' %
1372 1373 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1373 1374 self.ui.write(',\n "parents": [%s]' %
1374 1375 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1375 1376
1376 1377 if self.ui.debugflag:
1377 1378 if rev is None:
1378 1379 jmanifestnode = 'null'
1379 1380 else:
1380 1381 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1381 1382 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1382 1383
1383 1384 self.ui.write(',\n "extra": {%s}' %
1384 1385 ", ".join('"%s": "%s"' % (j(k), j(v))
1385 1386 for k, v in ctx.extra().items()))
1386 1387
1387 1388 files = ctx.p1().status(ctx)
1388 1389 self.ui.write(',\n "modified": [%s]' %
1389 1390 ", ".join('"%s"' % j(f) for f in files[0]))
1390 1391 self.ui.write(',\n "added": [%s]' %
1391 1392 ", ".join('"%s"' % j(f) for f in files[1]))
1392 1393 self.ui.write(',\n "removed": [%s]' %
1393 1394 ", ".join('"%s"' % j(f) for f in files[2]))
1394 1395
1395 1396 elif self.ui.verbose:
1396 1397 self.ui.write(',\n "files": [%s]' %
1397 1398 ", ".join('"%s"' % j(f) for f in ctx.files()))
1398 1399
1399 1400 if copies:
1400 1401 self.ui.write(',\n "copies": {%s}' %
1401 1402 ", ".join('"%s": "%s"' % (j(k), j(v))
1402 1403 for k, v in copies))
1403 1404
1404 1405 matchfn = self.matchfn
1405 1406 if matchfn:
1406 1407 stat = self.diffopts.get('stat')
1407 1408 diff = self.diffopts.get('patch')
1408 1409 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1409 1410 node, prev = ctx.node(), ctx.p1().node()
1410 1411 if stat:
1411 1412 self.ui.pushbuffer()
1412 1413 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1413 1414 match=matchfn, stat=True)
1414 1415 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1415 1416 if diff:
1416 1417 self.ui.pushbuffer()
1417 1418 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1418 1419 match=matchfn, stat=False)
1419 1420 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1420 1421
1421 1422 self.ui.write("\n }")
1422 1423
1423 1424 class changeset_templater(changeset_printer):
1424 1425 '''format changeset information.'''
1425 1426
1426 1427 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1427 1428 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1428 1429 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1429 1430 defaulttempl = {
1430 1431 'parent': '{rev}:{node|formatnode} ',
1431 1432 'manifest': '{rev}:{node|formatnode}',
1432 1433 'file_copy': '{name} ({source})',
1433 1434 'extra': '{key}={value|stringescape}'
1434 1435 }
1435 1436 # filecopy is preserved for compatibility reasons
1436 1437 defaulttempl['filecopy'] = defaulttempl['file_copy']
1437 1438 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1438 1439 cache=defaulttempl)
1439 1440 if tmpl:
1440 1441 self.t.cache['changeset'] = tmpl
1441 1442
1442 1443 self.cache = {}
1443 1444
1444 1445 # find correct templates for current mode
1445 1446 tmplmodes = [
1446 1447 (True, None),
1447 1448 (self.ui.verbose, 'verbose'),
1448 1449 (self.ui.quiet, 'quiet'),
1449 1450 (self.ui.debugflag, 'debug'),
1450 1451 ]
1451 1452
1452 1453 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1453 1454 'docheader': '', 'docfooter': ''}
1454 1455 for mode, postfix in tmplmodes:
1455 1456 for t in self._parts:
1456 1457 cur = t
1457 1458 if postfix:
1458 1459 cur += "_" + postfix
1459 1460 if mode and cur in self.t:
1460 1461 self._parts[t] = cur
1461 1462
1462 1463 if self._parts['docheader']:
1463 1464 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1464 1465
1465 1466 def close(self):
1466 1467 if self._parts['docfooter']:
1467 1468 if not self.footer:
1468 1469 self.footer = ""
1469 1470 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1470 1471 return super(changeset_templater, self).close()
1471 1472
1472 1473 def _show(self, ctx, copies, matchfn, props):
1473 1474 '''show a single changeset or file revision'''
1474 1475 props = props.copy()
1475 1476 props.update(templatekw.keywords)
1476 1477 props['templ'] = self.t
1477 1478 props['ctx'] = ctx
1478 1479 props['repo'] = self.repo
1479 1480 props['revcache'] = {'copies': copies}
1480 1481 props['cache'] = self.cache
1481 1482
1482 1483 try:
1483 1484 # write header
1484 1485 if self._parts['header']:
1485 1486 h = templater.stringify(self.t(self._parts['header'], **props))
1486 1487 if self.buffered:
1487 1488 self.header[ctx.rev()] = h
1488 1489 else:
1489 1490 if self.lastheader != h:
1490 1491 self.lastheader = h
1491 1492 self.ui.write(h)
1492 1493
1493 1494 # write changeset metadata, then patch if requested
1494 1495 key = self._parts['changeset']
1495 1496 self.ui.write(templater.stringify(self.t(key, **props)))
1496 1497 self.showpatch(ctx, matchfn)
1497 1498
1498 1499 if self._parts['footer']:
1499 1500 if not self.footer:
1500 1501 self.footer = templater.stringify(
1501 1502 self.t(self._parts['footer'], **props))
1502 1503 except KeyError as inst:
1503 1504 msg = _("%s: no key named '%s'")
1504 1505 raise error.Abort(msg % (self.t.mapfile, inst.args[0]))
1505 1506 except SyntaxError as inst:
1506 1507 raise error.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1507 1508
1508 1509 def gettemplate(ui, tmpl, style):
1509 1510 """
1510 1511 Find the template matching the given template spec or style.
1511 1512 """
1512 1513
1513 1514 # ui settings
1514 1515 if not tmpl and not style: # template are stronger than style
1515 1516 tmpl = ui.config('ui', 'logtemplate')
1516 1517 if tmpl:
1517 1518 try:
1518 1519 tmpl = templater.unquotestring(tmpl)
1519 1520 except SyntaxError:
1520 1521 pass
1521 1522 return tmpl, None
1522 1523 else:
1523 1524 style = util.expandpath(ui.config('ui', 'style', ''))
1524 1525
1525 1526 if not tmpl and style:
1526 1527 mapfile = style
1527 1528 if not os.path.split(mapfile)[0]:
1528 1529 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1529 1530 or templater.templatepath(mapfile))
1530 1531 if mapname:
1531 1532 mapfile = mapname
1532 1533 return None, mapfile
1533 1534
1534 1535 if not tmpl:
1535 1536 return None, None
1536 1537
1537 1538 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1538 1539
1539 1540 def show_changeset(ui, repo, opts, buffered=False):
1540 1541 """show one changeset using template or regular display.
1541 1542
1542 1543 Display format will be the first non-empty hit of:
1543 1544 1. option 'template'
1544 1545 2. option 'style'
1545 1546 3. [ui] setting 'logtemplate'
1546 1547 4. [ui] setting 'style'
1547 1548 If all of these values are either the unset or the empty string,
1548 1549 regular display via changeset_printer() is done.
1549 1550 """
1550 1551 # options
1551 1552 matchfn = None
1552 1553 if opts.get('patch') or opts.get('stat'):
1553 1554 matchfn = scmutil.matchall(repo)
1554 1555
1555 1556 if opts.get('template') == 'json':
1556 1557 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1557 1558
1558 1559 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1559 1560
1560 1561 if not tmpl and not mapfile:
1561 1562 return changeset_printer(ui, repo, matchfn, opts, buffered)
1562 1563
1563 1564 try:
1564 1565 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1565 1566 buffered)
1566 1567 except SyntaxError as inst:
1567 1568 raise error.Abort(inst.args[0])
1568 1569 return t
1569 1570
1570 1571 def showmarker(ui, marker):
1571 1572 """utility function to display obsolescence marker in a readable way
1572 1573
1573 1574 To be used by debug function."""
1574 1575 ui.write(hex(marker.precnode()))
1575 1576 for repl in marker.succnodes():
1576 1577 ui.write(' ')
1577 1578 ui.write(hex(repl))
1578 1579 ui.write(' %X ' % marker.flags())
1579 1580 parents = marker.parentnodes()
1580 1581 if parents is not None:
1581 1582 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1582 1583 ui.write('(%s) ' % util.datestr(marker.date()))
1583 1584 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1584 1585 sorted(marker.metadata().items())
1585 1586 if t[0] != 'date')))
1586 1587 ui.write('\n')
1587 1588
1588 1589 def finddate(ui, repo, date):
1589 1590 """Find the tipmost changeset that matches the given date spec"""
1590 1591
1591 1592 df = util.matchdate(date)
1592 1593 m = scmutil.matchall(repo)
1593 1594 results = {}
1594 1595
1595 1596 def prep(ctx, fns):
1596 1597 d = ctx.date()
1597 1598 if df(d[0]):
1598 1599 results[ctx.rev()] = d
1599 1600
1600 1601 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1601 1602 rev = ctx.rev()
1602 1603 if rev in results:
1603 1604 ui.status(_("found revision %s from %s\n") %
1604 1605 (rev, util.datestr(results[rev])))
1605 1606 return str(rev)
1606 1607
1607 1608 raise error.Abort(_("revision matching date not found"))
1608 1609
1609 1610 def increasingwindows(windowsize=8, sizelimit=512):
1610 1611 while True:
1611 1612 yield windowsize
1612 1613 if windowsize < sizelimit:
1613 1614 windowsize *= 2
1614 1615
1615 1616 class FileWalkError(Exception):
1616 1617 pass
1617 1618
1618 1619 def walkfilerevs(repo, match, follow, revs, fncache):
1619 1620 '''Walks the file history for the matched files.
1620 1621
1621 1622 Returns the changeset revs that are involved in the file history.
1622 1623
1623 1624 Throws FileWalkError if the file history can't be walked using
1624 1625 filelogs alone.
1625 1626 '''
1626 1627 wanted = set()
1627 1628 copies = []
1628 1629 minrev, maxrev = min(revs), max(revs)
1629 1630 def filerevgen(filelog, last):
1630 1631 """
1631 1632 Only files, no patterns. Check the history of each file.
1632 1633
1633 1634 Examines filelog entries within minrev, maxrev linkrev range
1634 1635 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1635 1636 tuples in backwards order
1636 1637 """
1637 1638 cl_count = len(repo)
1638 1639 revs = []
1639 1640 for j in xrange(0, last + 1):
1640 1641 linkrev = filelog.linkrev(j)
1641 1642 if linkrev < minrev:
1642 1643 continue
1643 1644 # only yield rev for which we have the changelog, it can
1644 1645 # happen while doing "hg log" during a pull or commit
1645 1646 if linkrev >= cl_count:
1646 1647 break
1647 1648
1648 1649 parentlinkrevs = []
1649 1650 for p in filelog.parentrevs(j):
1650 1651 if p != nullrev:
1651 1652 parentlinkrevs.append(filelog.linkrev(p))
1652 1653 n = filelog.node(j)
1653 1654 revs.append((linkrev, parentlinkrevs,
1654 1655 follow and filelog.renamed(n)))
1655 1656
1656 1657 return reversed(revs)
1657 1658 def iterfiles():
1658 1659 pctx = repo['.']
1659 1660 for filename in match.files():
1660 1661 if follow:
1661 1662 if filename not in pctx:
1662 1663 raise error.Abort(_('cannot follow file not in parent '
1663 1664 'revision: "%s"') % filename)
1664 1665 yield filename, pctx[filename].filenode()
1665 1666 else:
1666 1667 yield filename, None
1667 1668 for filename_node in copies:
1668 1669 yield filename_node
1669 1670
1670 1671 for file_, node in iterfiles():
1671 1672 filelog = repo.file(file_)
1672 1673 if not len(filelog):
1673 1674 if node is None:
1674 1675 # A zero count may be a directory or deleted file, so
1675 1676 # try to find matching entries on the slow path.
1676 1677 if follow:
1677 1678 raise error.Abort(
1678 1679 _('cannot follow nonexistent file: "%s"') % file_)
1679 1680 raise FileWalkError("Cannot walk via filelog")
1680 1681 else:
1681 1682 continue
1682 1683
1683 1684 if node is None:
1684 1685 last = len(filelog) - 1
1685 1686 else:
1686 1687 last = filelog.rev(node)
1687 1688
1688 1689 # keep track of all ancestors of the file
1689 1690 ancestors = set([filelog.linkrev(last)])
1690 1691
1691 1692 # iterate from latest to oldest revision
1692 1693 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1693 1694 if not follow:
1694 1695 if rev > maxrev:
1695 1696 continue
1696 1697 else:
1697 1698 # Note that last might not be the first interesting
1698 1699 # rev to us:
1699 1700 # if the file has been changed after maxrev, we'll
1700 1701 # have linkrev(last) > maxrev, and we still need
1701 1702 # to explore the file graph
1702 1703 if rev not in ancestors:
1703 1704 continue
1704 1705 # XXX insert 1327 fix here
1705 1706 if flparentlinkrevs:
1706 1707 ancestors.update(flparentlinkrevs)
1707 1708
1708 1709 fncache.setdefault(rev, []).append(file_)
1709 1710 wanted.add(rev)
1710 1711 if copied:
1711 1712 copies.append(copied)
1712 1713
1713 1714 return wanted
1714 1715
1715 1716 class _followfilter(object):
1716 1717 def __init__(self, repo, onlyfirst=False):
1717 1718 self.repo = repo
1718 1719 self.startrev = nullrev
1719 1720 self.roots = set()
1720 1721 self.onlyfirst = onlyfirst
1721 1722
1722 1723 def match(self, rev):
1723 1724 def realparents(rev):
1724 1725 if self.onlyfirst:
1725 1726 return self.repo.changelog.parentrevs(rev)[0:1]
1726 1727 else:
1727 1728 return filter(lambda x: x != nullrev,
1728 1729 self.repo.changelog.parentrevs(rev))
1729 1730
1730 1731 if self.startrev == nullrev:
1731 1732 self.startrev = rev
1732 1733 return True
1733 1734
1734 1735 if rev > self.startrev:
1735 1736 # forward: all descendants
1736 1737 if not self.roots:
1737 1738 self.roots.add(self.startrev)
1738 1739 for parent in realparents(rev):
1739 1740 if parent in self.roots:
1740 1741 self.roots.add(rev)
1741 1742 return True
1742 1743 else:
1743 1744 # backwards: all parents
1744 1745 if not self.roots:
1745 1746 self.roots.update(realparents(self.startrev))
1746 1747 if rev in self.roots:
1747 1748 self.roots.remove(rev)
1748 1749 self.roots.update(realparents(rev))
1749 1750 return True
1750 1751
1751 1752 return False
1752 1753
1753 1754 def walkchangerevs(repo, match, opts, prepare):
1754 1755 '''Iterate over files and the revs in which they changed.
1755 1756
1756 1757 Callers most commonly need to iterate backwards over the history
1757 1758 in which they are interested. Doing so has awful (quadratic-looking)
1758 1759 performance, so we use iterators in a "windowed" way.
1759 1760
1760 1761 We walk a window of revisions in the desired order. Within the
1761 1762 window, we first walk forwards to gather data, then in the desired
1762 1763 order (usually backwards) to display it.
1763 1764
1764 1765 This function returns an iterator yielding contexts. Before
1765 1766 yielding each context, the iterator will first call the prepare
1766 1767 function on each context in the window in forward order.'''
1767 1768
1768 1769 follow = opts.get('follow') or opts.get('follow_first')
1769 1770 revs = _logrevs(repo, opts)
1770 1771 if not revs:
1771 1772 return []
1772 1773 wanted = set()
1773 1774 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1774 1775 opts.get('removed'))
1775 1776 fncache = {}
1776 1777 change = repo.changectx
1777 1778
1778 1779 # First step is to fill wanted, the set of revisions that we want to yield.
1779 1780 # When it does not induce extra cost, we also fill fncache for revisions in
1780 1781 # wanted: a cache of filenames that were changed (ctx.files()) and that
1781 1782 # match the file filtering conditions.
1782 1783
1783 1784 if match.always():
1784 1785 # No files, no patterns. Display all revs.
1785 1786 wanted = revs
1786 1787 elif not slowpath:
1787 1788 # We only have to read through the filelog to find wanted revisions
1788 1789
1789 1790 try:
1790 1791 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1791 1792 except FileWalkError:
1792 1793 slowpath = True
1793 1794
1794 1795 # We decided to fall back to the slowpath because at least one
1795 1796 # of the paths was not a file. Check to see if at least one of them
1796 1797 # existed in history, otherwise simply return
1797 1798 for path in match.files():
1798 1799 if path == '.' or path in repo.store:
1799 1800 break
1800 1801 else:
1801 1802 return []
1802 1803
1803 1804 if slowpath:
1804 1805 # We have to read the changelog to match filenames against
1805 1806 # changed files
1806 1807
1807 1808 if follow:
1808 1809 raise error.Abort(_('can only follow copies/renames for explicit '
1809 1810 'filenames'))
1810 1811
1811 1812 # The slow path checks files modified in every changeset.
1812 1813 # This is really slow on large repos, so compute the set lazily.
1813 1814 class lazywantedset(object):
1814 1815 def __init__(self):
1815 1816 self.set = set()
1816 1817 self.revs = set(revs)
1817 1818
1818 1819 # No need to worry about locality here because it will be accessed
1819 1820 # in the same order as the increasing window below.
1820 1821 def __contains__(self, value):
1821 1822 if value in self.set:
1822 1823 return True
1823 1824 elif not value in self.revs:
1824 1825 return False
1825 1826 else:
1826 1827 self.revs.discard(value)
1827 1828 ctx = change(value)
1828 1829 matches = filter(match, ctx.files())
1829 1830 if matches:
1830 1831 fncache[value] = matches
1831 1832 self.set.add(value)
1832 1833 return True
1833 1834 return False
1834 1835
1835 1836 def discard(self, value):
1836 1837 self.revs.discard(value)
1837 1838 self.set.discard(value)
1838 1839
1839 1840 wanted = lazywantedset()
1840 1841
1841 1842 # it might be worthwhile to do this in the iterator if the rev range
1842 1843 # is descending and the prune args are all within that range
1843 1844 for rev in opts.get('prune', ()):
1844 1845 rev = repo[rev].rev()
1845 1846 ff = _followfilter(repo)
1846 1847 stop = min(revs[0], revs[-1])
1847 1848 for x in xrange(rev, stop - 1, -1):
1848 1849 if ff.match(x):
1849 1850 wanted = wanted - [x]
1850 1851
1851 1852 # Now that wanted is correctly initialized, we can iterate over the
1852 1853 # revision range, yielding only revisions in wanted.
1853 1854 def iterate():
1854 1855 if follow and match.always():
1855 1856 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1856 1857 def want(rev):
1857 1858 return ff.match(rev) and rev in wanted
1858 1859 else:
1859 1860 def want(rev):
1860 1861 return rev in wanted
1861 1862
1862 1863 it = iter(revs)
1863 1864 stopiteration = False
1864 1865 for windowsize in increasingwindows():
1865 1866 nrevs = []
1866 1867 for i in xrange(windowsize):
1867 1868 rev = next(it, None)
1868 1869 if rev is None:
1869 1870 stopiteration = True
1870 1871 break
1871 1872 elif want(rev):
1872 1873 nrevs.append(rev)
1873 1874 for rev in sorted(nrevs):
1874 1875 fns = fncache.get(rev)
1875 1876 ctx = change(rev)
1876 1877 if not fns:
1877 1878 def fns_generator():
1878 1879 for f in ctx.files():
1879 1880 if match(f):
1880 1881 yield f
1881 1882 fns = fns_generator()
1882 1883 prepare(ctx, fns)
1883 1884 for rev in nrevs:
1884 1885 yield change(rev)
1885 1886
1886 1887 if stopiteration:
1887 1888 break
1888 1889
1889 1890 return iterate()
1890 1891
1891 1892 def _makefollowlogfilematcher(repo, files, followfirst):
1892 1893 # When displaying a revision with --patch --follow FILE, we have
1893 1894 # to know which file of the revision must be diffed. With
1894 1895 # --follow, we want the names of the ancestors of FILE in the
1895 1896 # revision, stored in "fcache". "fcache" is populated by
1896 1897 # reproducing the graph traversal already done by --follow revset
1897 1898 # and relating linkrevs to file names (which is not "correct" but
1898 1899 # good enough).
1899 1900 fcache = {}
1900 1901 fcacheready = [False]
1901 1902 pctx = repo['.']
1902 1903
1903 1904 def populate():
1904 1905 for fn in files:
1905 1906 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1906 1907 for c in i:
1907 1908 fcache.setdefault(c.linkrev(), set()).add(c.path())
1908 1909
1909 1910 def filematcher(rev):
1910 1911 if not fcacheready[0]:
1911 1912 # Lazy initialization
1912 1913 fcacheready[0] = True
1913 1914 populate()
1914 1915 return scmutil.matchfiles(repo, fcache.get(rev, []))
1915 1916
1916 1917 return filematcher
1917 1918
1918 1919 def _makenofollowlogfilematcher(repo, pats, opts):
1919 1920 '''hook for extensions to override the filematcher for non-follow cases'''
1920 1921 return None
1921 1922
1922 1923 def _makelogrevset(repo, pats, opts, revs):
1923 1924 """Return (expr, filematcher) where expr is a revset string built
1924 1925 from log options and file patterns or None. If --stat or --patch
1925 1926 are not passed filematcher is None. Otherwise it is a callable
1926 1927 taking a revision number and returning a match objects filtering
1927 1928 the files to be detailed when displaying the revision.
1928 1929 """
1929 1930 opt2revset = {
1930 1931 'no_merges': ('not merge()', None),
1931 1932 'only_merges': ('merge()', None),
1932 1933 '_ancestors': ('ancestors(%(val)s)', None),
1933 1934 '_fancestors': ('_firstancestors(%(val)s)', None),
1934 1935 '_descendants': ('descendants(%(val)s)', None),
1935 1936 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1936 1937 '_matchfiles': ('_matchfiles(%(val)s)', None),
1937 1938 'date': ('date(%(val)r)', None),
1938 1939 'branch': ('branch(%(val)r)', ' or '),
1939 1940 '_patslog': ('filelog(%(val)r)', ' or '),
1940 1941 '_patsfollow': ('follow(%(val)r)', ' or '),
1941 1942 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1942 1943 'keyword': ('keyword(%(val)r)', ' or '),
1943 1944 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1944 1945 'user': ('user(%(val)r)', ' or '),
1945 1946 }
1946 1947
1947 1948 opts = dict(opts)
1948 1949 # follow or not follow?
1949 1950 follow = opts.get('follow') or opts.get('follow_first')
1950 1951 if opts.get('follow_first'):
1951 1952 followfirst = 1
1952 1953 else:
1953 1954 followfirst = 0
1954 1955 # --follow with FILE behavior depends on revs...
1955 1956 it = iter(revs)
1956 1957 startrev = it.next()
1957 1958 followdescendants = startrev < next(it, startrev)
1958 1959
1959 1960 # branch and only_branch are really aliases and must be handled at
1960 1961 # the same time
1961 1962 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1962 1963 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1963 1964 # pats/include/exclude are passed to match.match() directly in
1964 1965 # _matchfiles() revset but walkchangerevs() builds its matcher with
1965 1966 # scmutil.match(). The difference is input pats are globbed on
1966 1967 # platforms without shell expansion (windows).
1967 1968 wctx = repo[None]
1968 1969 match, pats = scmutil.matchandpats(wctx, pats, opts)
1969 1970 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1970 1971 opts.get('removed'))
1971 1972 if not slowpath:
1972 1973 for f in match.files():
1973 1974 if follow and f not in wctx:
1974 1975 # If the file exists, it may be a directory, so let it
1975 1976 # take the slow path.
1976 1977 if os.path.exists(repo.wjoin(f)):
1977 1978 slowpath = True
1978 1979 continue
1979 1980 else:
1980 1981 raise error.Abort(_('cannot follow file not in parent '
1981 1982 'revision: "%s"') % f)
1982 1983 filelog = repo.file(f)
1983 1984 if not filelog:
1984 1985 # A zero count may be a directory or deleted file, so
1985 1986 # try to find matching entries on the slow path.
1986 1987 if follow:
1987 1988 raise error.Abort(
1988 1989 _('cannot follow nonexistent file: "%s"') % f)
1989 1990 slowpath = True
1990 1991
1991 1992 # We decided to fall back to the slowpath because at least one
1992 1993 # of the paths was not a file. Check to see if at least one of them
1993 1994 # existed in history - in that case, we'll continue down the
1994 1995 # slowpath; otherwise, we can turn off the slowpath
1995 1996 if slowpath:
1996 1997 for path in match.files():
1997 1998 if path == '.' or path in repo.store:
1998 1999 break
1999 2000 else:
2000 2001 slowpath = False
2001 2002
2002 2003 fpats = ('_patsfollow', '_patsfollowfirst')
2003 2004 fnopats = (('_ancestors', '_fancestors'),
2004 2005 ('_descendants', '_fdescendants'))
2005 2006 if slowpath:
2006 2007 # See walkchangerevs() slow path.
2007 2008 #
2008 2009 # pats/include/exclude cannot be represented as separate
2009 2010 # revset expressions as their filtering logic applies at file
2010 2011 # level. For instance "-I a -X a" matches a revision touching
2011 2012 # "a" and "b" while "file(a) and not file(b)" does
2012 2013 # not. Besides, filesets are evaluated against the working
2013 2014 # directory.
2014 2015 matchargs = ['r:', 'd:relpath']
2015 2016 for p in pats:
2016 2017 matchargs.append('p:' + p)
2017 2018 for p in opts.get('include', []):
2018 2019 matchargs.append('i:' + p)
2019 2020 for p in opts.get('exclude', []):
2020 2021 matchargs.append('x:' + p)
2021 2022 matchargs = ','.join(('%r' % p) for p in matchargs)
2022 2023 opts['_matchfiles'] = matchargs
2023 2024 if follow:
2024 2025 opts[fnopats[0][followfirst]] = '.'
2025 2026 else:
2026 2027 if follow:
2027 2028 if pats:
2028 2029 # follow() revset interprets its file argument as a
2029 2030 # manifest entry, so use match.files(), not pats.
2030 2031 opts[fpats[followfirst]] = list(match.files())
2031 2032 else:
2032 2033 op = fnopats[followdescendants][followfirst]
2033 2034 opts[op] = 'rev(%d)' % startrev
2034 2035 else:
2035 2036 opts['_patslog'] = list(pats)
2036 2037
2037 2038 filematcher = None
2038 2039 if opts.get('patch') or opts.get('stat'):
2039 2040 # When following files, track renames via a special matcher.
2040 2041 # If we're forced to take the slowpath it means we're following
2041 2042 # at least one pattern/directory, so don't bother with rename tracking.
2042 2043 if follow and not match.always() and not slowpath:
2043 2044 # _makefollowlogfilematcher expects its files argument to be
2044 2045 # relative to the repo root, so use match.files(), not pats.
2045 2046 filematcher = _makefollowlogfilematcher(repo, match.files(),
2046 2047 followfirst)
2047 2048 else:
2048 2049 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2049 2050 if filematcher is None:
2050 2051 filematcher = lambda rev: match
2051 2052
2052 2053 expr = []
2053 2054 for op, val in sorted(opts.iteritems()):
2054 2055 if not val:
2055 2056 continue
2056 2057 if op not in opt2revset:
2057 2058 continue
2058 2059 revop, andor = opt2revset[op]
2059 2060 if '%(val)' not in revop:
2060 2061 expr.append(revop)
2061 2062 else:
2062 2063 if not isinstance(val, list):
2063 2064 e = revop % {'val': val}
2064 2065 else:
2065 2066 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2066 2067 expr.append(e)
2067 2068
2068 2069 if expr:
2069 2070 expr = '(' + ' and '.join(expr) + ')'
2070 2071 else:
2071 2072 expr = None
2072 2073 return expr, filematcher
2073 2074
2074 2075 def _logrevs(repo, opts):
2075 2076 # Default --rev value depends on --follow but --follow behavior
2076 2077 # depends on revisions resolved from --rev...
2077 2078 follow = opts.get('follow') or opts.get('follow_first')
2078 2079 if opts.get('rev'):
2079 2080 revs = scmutil.revrange(repo, opts['rev'])
2080 2081 elif follow and repo.dirstate.p1() == nullid:
2081 2082 revs = revset.baseset()
2082 2083 elif follow:
2083 2084 revs = repo.revs('reverse(:.)')
2084 2085 else:
2085 2086 revs = revset.spanset(repo)
2086 2087 revs.reverse()
2087 2088 return revs
2088 2089
2089 2090 def getgraphlogrevs(repo, pats, opts):
2090 2091 """Return (revs, expr, filematcher) where revs is an iterable of
2091 2092 revision numbers, expr is a revset string built from log options
2092 2093 and file patterns or None, and used to filter 'revs'. If --stat or
2093 2094 --patch are not passed filematcher is None. Otherwise it is a
2094 2095 callable taking a revision number and returning a match objects
2095 2096 filtering the files to be detailed when displaying the revision.
2096 2097 """
2097 2098 limit = loglimit(opts)
2098 2099 revs = _logrevs(repo, opts)
2099 2100 if not revs:
2100 2101 return revset.baseset(), None, None
2101 2102 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2102 2103 if opts.get('rev'):
2103 2104 # User-specified revs might be unsorted, but don't sort before
2104 2105 # _makelogrevset because it might depend on the order of revs
2105 2106 revs.sort(reverse=True)
2106 2107 if expr:
2107 2108 # Revset matchers often operate faster on revisions in changelog
2108 2109 # order, because most filters deal with the changelog.
2109 2110 revs.reverse()
2110 2111 matcher = revset.match(repo.ui, expr)
2111 2112 # Revset matches can reorder revisions. "A or B" typically returns
2112 2113 # returns the revision matching A then the revision matching B. Sort
2113 2114 # again to fix that.
2114 2115 revs = matcher(repo, revs)
2115 2116 revs.sort(reverse=True)
2116 2117 if limit is not None:
2117 2118 limitedrevs = []
2118 2119 for idx, rev in enumerate(revs):
2119 2120 if idx >= limit:
2120 2121 break
2121 2122 limitedrevs.append(rev)
2122 2123 revs = revset.baseset(limitedrevs)
2123 2124
2124 2125 return revs, expr, filematcher
2125 2126
2126 2127 def getlogrevs(repo, pats, opts):
2127 2128 """Return (revs, expr, filematcher) where revs is an iterable of
2128 2129 revision numbers, expr is a revset string built from log options
2129 2130 and file patterns or None, and used to filter 'revs'. If --stat or
2130 2131 --patch are not passed filematcher is None. Otherwise it is a
2131 2132 callable taking a revision number and returning a match objects
2132 2133 filtering the files to be detailed when displaying the revision.
2133 2134 """
2134 2135 limit = loglimit(opts)
2135 2136 revs = _logrevs(repo, opts)
2136 2137 if not revs:
2137 2138 return revset.baseset([]), None, None
2138 2139 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2139 2140 if expr:
2140 2141 # Revset matchers often operate faster on revisions in changelog
2141 2142 # order, because most filters deal with the changelog.
2142 2143 if not opts.get('rev'):
2143 2144 revs.reverse()
2144 2145 matcher = revset.match(repo.ui, expr)
2145 2146 # Revset matches can reorder revisions. "A or B" typically returns
2146 2147 # returns the revision matching A then the revision matching B. Sort
2147 2148 # again to fix that.
2148 2149 revs = matcher(repo, revs)
2149 2150 if not opts.get('rev'):
2150 2151 revs.sort(reverse=True)
2151 2152 if limit is not None:
2152 2153 limitedrevs = []
2153 2154 for idx, r in enumerate(revs):
2154 2155 if limit <= idx:
2155 2156 break
2156 2157 limitedrevs.append(r)
2157 2158 revs = revset.baseset(limitedrevs)
2158 2159
2159 2160 return revs, expr, filematcher
2160 2161
2161 2162 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2162 2163 filematcher=None):
2163 2164 seen, state = [], graphmod.asciistate()
2164 2165 for rev, type, ctx, parents in dag:
2165 2166 char = 'o'
2166 2167 if ctx.node() in showparents:
2167 2168 char = '@'
2168 2169 elif ctx.obsolete():
2169 2170 char = 'x'
2170 2171 elif ctx.closesbranch():
2171 2172 char = '_'
2172 2173 copies = None
2173 2174 if getrenamed and ctx.rev():
2174 2175 copies = []
2175 2176 for fn in ctx.files():
2176 2177 rename = getrenamed(fn, ctx.rev())
2177 2178 if rename:
2178 2179 copies.append((fn, rename[0]))
2179 2180 revmatchfn = None
2180 2181 if filematcher is not None:
2181 2182 revmatchfn = filematcher(ctx.rev())
2182 2183 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2183 2184 lines = displayer.hunk.pop(rev).split('\n')
2184 2185 if not lines[-1]:
2185 2186 del lines[-1]
2186 2187 displayer.flush(ctx)
2187 2188 edges = edgefn(type, char, lines, seen, rev, parents)
2188 2189 for type, char, lines, coldata in edges:
2189 2190 graphmod.ascii(ui, state, type, char, lines, coldata)
2190 2191 displayer.close()
2191 2192
2192 2193 def graphlog(ui, repo, *pats, **opts):
2193 2194 # Parameters are identical to log command ones
2194 2195 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2195 2196 revdag = graphmod.dagwalker(repo, revs)
2196 2197
2197 2198 getrenamed = None
2198 2199 if opts.get('copies'):
2199 2200 endrev = None
2200 2201 if opts.get('rev'):
2201 2202 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2202 2203 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2203 2204 displayer = show_changeset(ui, repo, opts, buffered=True)
2204 2205 showparents = [ctx.node() for ctx in repo[None].parents()]
2205 2206 displaygraph(ui, revdag, displayer, showparents,
2206 2207 graphmod.asciiedges, getrenamed, filematcher)
2207 2208
2208 2209 def checkunsupportedgraphflags(pats, opts):
2209 2210 for op in ["newest_first"]:
2210 2211 if op in opts and opts[op]:
2211 2212 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2212 2213 % op.replace("_", "-"))
2213 2214
2214 2215 def graphrevs(repo, nodes, opts):
2215 2216 limit = loglimit(opts)
2216 2217 nodes.reverse()
2217 2218 if limit is not None:
2218 2219 nodes = nodes[:limit]
2219 2220 return graphmod.nodes(repo, nodes)
2220 2221
2221 2222 def add(ui, repo, match, prefix, explicitonly, **opts):
2222 2223 join = lambda f: os.path.join(prefix, f)
2223 2224 bad = []
2224 2225
2225 2226 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2226 2227 names = []
2227 2228 wctx = repo[None]
2228 2229 cca = None
2229 2230 abort, warn = scmutil.checkportabilityalert(ui)
2230 2231 if abort or warn:
2231 2232 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2232 2233
2233 2234 badmatch = matchmod.badmatch(match, badfn)
2234 2235 dirstate = repo.dirstate
2235 2236 # We don't want to just call wctx.walk here, since it would return a lot of
2236 2237 # clean files, which we aren't interested in and takes time.
2237 2238 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2238 2239 True, False, full=False)):
2239 2240 exact = match.exact(f)
2240 2241 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2241 2242 if cca:
2242 2243 cca(f)
2243 2244 names.append(f)
2244 2245 if ui.verbose or not exact:
2245 2246 ui.status(_('adding %s\n') % match.rel(f))
2246 2247
2247 2248 for subpath in sorted(wctx.substate):
2248 2249 sub = wctx.sub(subpath)
2249 2250 try:
2250 2251 submatch = matchmod.narrowmatcher(subpath, match)
2251 2252 if opts.get('subrepos'):
2252 2253 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2253 2254 else:
2254 2255 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2255 2256 except error.LookupError:
2256 2257 ui.status(_("skipping missing subrepository: %s\n")
2257 2258 % join(subpath))
2258 2259
2259 2260 if not opts.get('dry_run'):
2260 2261 rejected = wctx.add(names, prefix)
2261 2262 bad.extend(f for f in rejected if f in match.files())
2262 2263 return bad
2263 2264
2264 2265 def forget(ui, repo, match, prefix, explicitonly):
2265 2266 join = lambda f: os.path.join(prefix, f)
2266 2267 bad = []
2267 2268 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2268 2269 wctx = repo[None]
2269 2270 forgot = []
2270 2271
2271 2272 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2272 2273 forget = sorted(s[0] + s[1] + s[3] + s[6])
2273 2274 if explicitonly:
2274 2275 forget = [f for f in forget if match.exact(f)]
2275 2276
2276 2277 for subpath in sorted(wctx.substate):
2277 2278 sub = wctx.sub(subpath)
2278 2279 try:
2279 2280 submatch = matchmod.narrowmatcher(subpath, match)
2280 2281 subbad, subforgot = sub.forget(submatch, prefix)
2281 2282 bad.extend([subpath + '/' + f for f in subbad])
2282 2283 forgot.extend([subpath + '/' + f for f in subforgot])
2283 2284 except error.LookupError:
2284 2285 ui.status(_("skipping missing subrepository: %s\n")
2285 2286 % join(subpath))
2286 2287
2287 2288 if not explicitonly:
2288 2289 for f in match.files():
2289 2290 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2290 2291 if f not in forgot:
2291 2292 if repo.wvfs.exists(f):
2292 2293 # Don't complain if the exact case match wasn't given.
2293 2294 # But don't do this until after checking 'forgot', so
2294 2295 # that subrepo files aren't normalized, and this op is
2295 2296 # purely from data cached by the status walk above.
2296 2297 if repo.dirstate.normalize(f) in repo.dirstate:
2297 2298 continue
2298 2299 ui.warn(_('not removing %s: '
2299 2300 'file is already untracked\n')
2300 2301 % match.rel(f))
2301 2302 bad.append(f)
2302 2303
2303 2304 for f in forget:
2304 2305 if ui.verbose or not match.exact(f):
2305 2306 ui.status(_('removing %s\n') % match.rel(f))
2306 2307
2307 2308 rejected = wctx.forget(forget, prefix)
2308 2309 bad.extend(f for f in rejected if f in match.files())
2309 2310 forgot.extend(f for f in forget if f not in rejected)
2310 2311 return bad, forgot
2311 2312
2312 2313 def files(ui, ctx, m, fm, fmt, subrepos):
2313 2314 rev = ctx.rev()
2314 2315 ret = 1
2315 2316 ds = ctx.repo().dirstate
2316 2317
2317 2318 for f in ctx.matches(m):
2318 2319 if rev is None and ds[f] == 'r':
2319 2320 continue
2320 2321 fm.startitem()
2321 2322 if ui.verbose:
2322 2323 fc = ctx[f]
2323 2324 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2324 2325 fm.data(abspath=f)
2325 2326 fm.write('path', fmt, m.rel(f))
2326 2327 ret = 0
2327 2328
2328 2329 for subpath in sorted(ctx.substate):
2329 2330 def matchessubrepo(subpath):
2330 2331 return (m.always() or m.exact(subpath)
2331 2332 or any(f.startswith(subpath + '/') for f in m.files()))
2332 2333
2333 2334 if subrepos or matchessubrepo(subpath):
2334 2335 sub = ctx.sub(subpath)
2335 2336 try:
2336 2337 submatch = matchmod.narrowmatcher(subpath, m)
2337 2338 if sub.printfiles(ui, submatch, fm, fmt, subrepos) == 0:
2338 2339 ret = 0
2339 2340 except error.LookupError:
2340 2341 ui.status(_("skipping missing subrepository: %s\n")
2341 2342 % m.abs(subpath))
2342 2343
2343 2344 return ret
2344 2345
2345 2346 def remove(ui, repo, m, prefix, after, force, subrepos):
2346 2347 join = lambda f: os.path.join(prefix, f)
2347 2348 ret = 0
2348 2349 s = repo.status(match=m, clean=True)
2349 2350 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2350 2351
2351 2352 wctx = repo[None]
2352 2353
2353 2354 for subpath in sorted(wctx.substate):
2354 2355 def matchessubrepo(matcher, subpath):
2355 2356 if matcher.exact(subpath):
2356 2357 return True
2357 2358 for f in matcher.files():
2358 2359 if f.startswith(subpath):
2359 2360 return True
2360 2361 return False
2361 2362
2362 2363 if subrepos or matchessubrepo(m, subpath):
2363 2364 sub = wctx.sub(subpath)
2364 2365 try:
2365 2366 submatch = matchmod.narrowmatcher(subpath, m)
2366 2367 if sub.removefiles(submatch, prefix, after, force, subrepos):
2367 2368 ret = 1
2368 2369 except error.LookupError:
2369 2370 ui.status(_("skipping missing subrepository: %s\n")
2370 2371 % join(subpath))
2371 2372
2372 2373 # warn about failure to delete explicit files/dirs
2373 2374 deleteddirs = util.dirs(deleted)
2374 2375 for f in m.files():
2375 2376 def insubrepo():
2376 2377 for subpath in wctx.substate:
2377 2378 if f.startswith(subpath):
2378 2379 return True
2379 2380 return False
2380 2381
2381 2382 isdir = f in deleteddirs or wctx.hasdir(f)
2382 2383 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2383 2384 continue
2384 2385
2385 2386 if repo.wvfs.exists(f):
2386 2387 if repo.wvfs.isdir(f):
2387 2388 ui.warn(_('not removing %s: no tracked files\n')
2388 2389 % m.rel(f))
2389 2390 else:
2390 2391 ui.warn(_('not removing %s: file is untracked\n')
2391 2392 % m.rel(f))
2392 2393 # missing files will generate a warning elsewhere
2393 2394 ret = 1
2394 2395
2395 2396 if force:
2396 2397 list = modified + deleted + clean + added
2397 2398 elif after:
2398 2399 list = deleted
2399 2400 for f in modified + added + clean:
2400 2401 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2401 2402 ret = 1
2402 2403 else:
2403 2404 list = deleted + clean
2404 2405 for f in modified:
2405 2406 ui.warn(_('not removing %s: file is modified (use -f'
2406 2407 ' to force removal)\n') % m.rel(f))
2407 2408 ret = 1
2408 2409 for f in added:
2409 2410 ui.warn(_('not removing %s: file has been marked for add'
2410 2411 ' (use forget to undo)\n') % m.rel(f))
2411 2412 ret = 1
2412 2413
2413 2414 for f in sorted(list):
2414 2415 if ui.verbose or not m.exact(f):
2415 2416 ui.status(_('removing %s\n') % m.rel(f))
2416 2417
2417 2418 wlock = repo.wlock()
2418 2419 try:
2419 2420 if not after:
2420 2421 for f in list:
2421 2422 if f in added:
2422 2423 continue # we never unlink added files on remove
2423 2424 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2424 2425 repo[None].forget(list)
2425 2426 finally:
2426 2427 wlock.release()
2427 2428
2428 2429 return ret
2429 2430
2430 2431 def cat(ui, repo, ctx, matcher, prefix, **opts):
2431 2432 err = 1
2432 2433
2433 2434 def write(path):
2434 2435 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2435 2436 pathname=os.path.join(prefix, path))
2436 2437 data = ctx[path].data()
2437 2438 if opts.get('decode'):
2438 2439 data = repo.wwritedata(path, data)
2439 2440 fp.write(data)
2440 2441 fp.close()
2441 2442
2442 2443 # Automation often uses hg cat on single files, so special case it
2443 2444 # for performance to avoid the cost of parsing the manifest.
2444 2445 if len(matcher.files()) == 1 and not matcher.anypats():
2445 2446 file = matcher.files()[0]
2446 2447 mf = repo.manifest
2447 2448 mfnode = ctx.manifestnode()
2448 2449 if mfnode and mf.find(mfnode, file)[0]:
2449 2450 write(file)
2450 2451 return 0
2451 2452
2452 2453 # Don't warn about "missing" files that are really in subrepos
2453 2454 def badfn(path, msg):
2454 2455 for subpath in ctx.substate:
2455 2456 if path.startswith(subpath):
2456 2457 return
2457 2458 matcher.bad(path, msg)
2458 2459
2459 2460 for abs in ctx.walk(matchmod.badmatch(matcher, badfn)):
2460 2461 write(abs)
2461 2462 err = 0
2462 2463
2463 2464 for subpath in sorted(ctx.substate):
2464 2465 sub = ctx.sub(subpath)
2465 2466 try:
2466 2467 submatch = matchmod.narrowmatcher(subpath, matcher)
2467 2468
2468 2469 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2469 2470 **opts):
2470 2471 err = 0
2471 2472 except error.RepoLookupError:
2472 2473 ui.status(_("skipping missing subrepository: %s\n")
2473 2474 % os.path.join(prefix, subpath))
2474 2475
2475 2476 return err
2476 2477
2477 2478 def commit(ui, repo, commitfunc, pats, opts):
2478 2479 '''commit the specified files or all outstanding changes'''
2479 2480 date = opts.get('date')
2480 2481 if date:
2481 2482 opts['date'] = util.parsedate(date)
2482 2483 message = logmessage(ui, opts)
2483 2484 matcher = scmutil.match(repo[None], pats, opts)
2484 2485
2485 2486 # extract addremove carefully -- this function can be called from a command
2486 2487 # that doesn't support addremove
2487 2488 if opts.get('addremove'):
2488 2489 if scmutil.addremove(repo, matcher, "", opts) != 0:
2489 2490 raise error.Abort(
2490 2491 _("failed to mark all new/missing files as added/removed"))
2491 2492
2492 2493 return commitfunc(ui, repo, message, matcher, opts)
2493 2494
2494 2495 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2495 2496 # avoid cycle context -> subrepo -> cmdutil
2496 2497 import context
2497 2498
2498 2499 # amend will reuse the existing user if not specified, but the obsolete
2499 2500 # marker creation requires that the current user's name is specified.
2500 2501 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2501 2502 ui.username() # raise exception if username not set
2502 2503
2503 2504 ui.note(_('amending changeset %s\n') % old)
2504 2505 base = old.p1()
2505 2506 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2506 2507
2507 2508 wlock = lock = newid = None
2508 2509 try:
2509 2510 wlock = repo.wlock()
2510 2511 lock = repo.lock()
2511 2512 tr = repo.transaction('amend')
2512 2513 try:
2513 2514 # See if we got a message from -m or -l, if not, open the editor
2514 2515 # with the message of the changeset to amend
2515 2516 message = logmessage(ui, opts)
2516 2517 # ensure logfile does not conflict with later enforcement of the
2517 2518 # message. potential logfile content has been processed by
2518 2519 # `logmessage` anyway.
2519 2520 opts.pop('logfile')
2520 2521 # First, do a regular commit to record all changes in the working
2521 2522 # directory (if there are any)
2522 2523 ui.callhooks = False
2523 2524 activebookmark = repo._activebookmark
2524 2525 try:
2525 2526 repo._activebookmark = None
2526 2527 opts['message'] = 'temporary amend commit for %s' % old
2527 2528 node = commit(ui, repo, commitfunc, pats, opts)
2528 2529 finally:
2529 2530 repo._activebookmark = activebookmark
2530 2531 ui.callhooks = True
2531 2532 ctx = repo[node]
2532 2533
2533 2534 # Participating changesets:
2534 2535 #
2535 2536 # node/ctx o - new (intermediate) commit that contains changes
2536 2537 # | from working dir to go into amending commit
2537 2538 # | (or a workingctx if there were no changes)
2538 2539 # |
2539 2540 # old o - changeset to amend
2540 2541 # |
2541 2542 # base o - parent of amending changeset
2542 2543
2543 2544 # Update extra dict from amended commit (e.g. to preserve graft
2544 2545 # source)
2545 2546 extra.update(old.extra())
2546 2547
2547 2548 # Also update it from the intermediate commit or from the wctx
2548 2549 extra.update(ctx.extra())
2549 2550
2550 2551 if len(old.parents()) > 1:
2551 2552 # ctx.files() isn't reliable for merges, so fall back to the
2552 2553 # slower repo.status() method
2553 2554 files = set([fn for st in repo.status(base, old)[:3]
2554 2555 for fn in st])
2555 2556 else:
2556 2557 files = set(old.files())
2557 2558
2558 2559 # Second, we use either the commit we just did, or if there were no
2559 2560 # changes the parent of the working directory as the version of the
2560 2561 # files in the final amend commit
2561 2562 if node:
2562 2563 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2563 2564
2564 2565 user = ctx.user()
2565 2566 date = ctx.date()
2566 2567 # Recompute copies (avoid recording a -> b -> a)
2567 2568 copied = copies.pathcopies(base, ctx)
2568 2569 if old.p2:
2569 2570 copied.update(copies.pathcopies(old.p2(), ctx))
2570 2571
2571 2572 # Prune files which were reverted by the updates: if old
2572 2573 # introduced file X and our intermediate commit, node,
2573 2574 # renamed that file, then those two files are the same and
2574 2575 # we can discard X from our list of files. Likewise if X
2575 2576 # was deleted, it's no longer relevant
2576 2577 files.update(ctx.files())
2577 2578
2578 2579 def samefile(f):
2579 2580 if f in ctx.manifest():
2580 2581 a = ctx.filectx(f)
2581 2582 if f in base.manifest():
2582 2583 b = base.filectx(f)
2583 2584 return (not a.cmp(b)
2584 2585 and a.flags() == b.flags())
2585 2586 else:
2586 2587 return False
2587 2588 else:
2588 2589 return f not in base.manifest()
2589 2590 files = [f for f in files if not samefile(f)]
2590 2591
2591 2592 def filectxfn(repo, ctx_, path):
2592 2593 try:
2593 2594 fctx = ctx[path]
2594 2595 flags = fctx.flags()
2595 2596 mctx = context.memfilectx(repo,
2596 2597 fctx.path(), fctx.data(),
2597 2598 islink='l' in flags,
2598 2599 isexec='x' in flags,
2599 2600 copied=copied.get(path))
2600 2601 return mctx
2601 2602 except KeyError:
2602 2603 return None
2603 2604 else:
2604 2605 ui.note(_('copying changeset %s to %s\n') % (old, base))
2605 2606
2606 2607 # Use version of files as in the old cset
2607 2608 def filectxfn(repo, ctx_, path):
2608 2609 try:
2609 2610 return old.filectx(path)
2610 2611 except KeyError:
2611 2612 return None
2612 2613
2613 2614 user = opts.get('user') or old.user()
2614 2615 date = opts.get('date') or old.date()
2615 2616 editform = mergeeditform(old, 'commit.amend')
2616 2617 editor = getcommiteditor(editform=editform, **opts)
2617 2618 if not message:
2618 2619 editor = getcommiteditor(edit=True, editform=editform)
2619 2620 message = old.description()
2620 2621
2621 2622 pureextra = extra.copy()
2622 2623 if 'amend_source' in pureextra:
2623 2624 del pureextra['amend_source']
2624 2625 pureoldextra = old.extra()
2625 2626 if 'amend_source' in pureoldextra:
2626 2627 del pureoldextra['amend_source']
2627 2628 extra['amend_source'] = old.hex()
2628 2629
2629 2630 new = context.memctx(repo,
2630 2631 parents=[base.node(), old.p2().node()],
2631 2632 text=message,
2632 2633 files=files,
2633 2634 filectxfn=filectxfn,
2634 2635 user=user,
2635 2636 date=date,
2636 2637 extra=extra,
2637 2638 editor=editor)
2638 2639
2639 2640 newdesc = changelog.stripdesc(new.description())
2640 2641 if ((not node)
2641 2642 and newdesc == old.description()
2642 2643 and user == old.user()
2643 2644 and date == old.date()
2644 2645 and pureextra == pureoldextra):
2645 2646 # nothing changed. continuing here would create a new node
2646 2647 # anyway because of the amend_source noise.
2647 2648 #
2648 2649 # This not what we expect from amend.
2649 2650 return old.node()
2650 2651
2651 2652 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2652 2653 try:
2653 2654 if opts.get('secret'):
2654 2655 commitphase = 'secret'
2655 2656 else:
2656 2657 commitphase = old.phase()
2657 2658 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2658 2659 newid = repo.commitctx(new)
2659 2660 finally:
2660 2661 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2661 2662 if newid != old.node():
2662 2663 # Reroute the working copy parent to the new changeset
2663 2664 repo.setparents(newid, nullid)
2664 2665
2665 2666 # Move bookmarks from old parent to amend commit
2666 2667 bms = repo.nodebookmarks(old.node())
2667 2668 if bms:
2668 2669 marks = repo._bookmarks
2669 2670 for bm in bms:
2670 2671 ui.debug('moving bookmarks %r from %s to %s\n' %
2671 2672 (marks, old.hex(), hex(newid)))
2672 2673 marks[bm] = newid
2673 2674 marks.recordchange(tr)
2674 2675 #commit the whole amend process
2675 2676 if createmarkers:
2676 2677 # mark the new changeset as successor of the rewritten one
2677 2678 new = repo[newid]
2678 2679 obs = [(old, (new,))]
2679 2680 if node:
2680 2681 obs.append((ctx, ()))
2681 2682
2682 2683 obsolete.createmarkers(repo, obs)
2683 2684 tr.close()
2684 2685 finally:
2685 2686 tr.release()
2686 2687 if not createmarkers and newid != old.node():
2687 2688 # Strip the intermediate commit (if there was one) and the amended
2688 2689 # commit
2689 2690 if node:
2690 2691 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2691 2692 ui.note(_('stripping amended changeset %s\n') % old)
2692 2693 repair.strip(ui, repo, old.node(), topic='amend-backup')
2693 2694 finally:
2694 2695 lockmod.release(lock, wlock)
2695 2696 return newid
2696 2697
2697 2698 def commiteditor(repo, ctx, subs, editform=''):
2698 2699 if ctx.description():
2699 2700 return ctx.description()
2700 2701 return commitforceeditor(repo, ctx, subs, editform=editform,
2701 2702 unchangedmessagedetection=True)
2702 2703
2703 2704 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2704 2705 editform='', unchangedmessagedetection=False):
2705 2706 if not extramsg:
2706 2707 extramsg = _("Leave message empty to abort commit.")
2707 2708
2708 2709 forms = [e for e in editform.split('.') if e]
2709 2710 forms.insert(0, 'changeset')
2710 2711 templatetext = None
2711 2712 while forms:
2712 2713 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2713 2714 if tmpl:
2714 2715 templatetext = committext = buildcommittemplate(
2715 2716 repo, ctx, subs, extramsg, tmpl)
2716 2717 break
2717 2718 forms.pop()
2718 2719 else:
2719 2720 committext = buildcommittext(repo, ctx, subs, extramsg)
2720 2721
2721 2722 # run editor in the repository root
2722 2723 olddir = os.getcwd()
2723 2724 os.chdir(repo.root)
2724 2725
2725 2726 # make in-memory changes visible to external process
2726 2727 tr = repo.currenttransaction()
2727 2728 repo.dirstate.write(tr)
2728 2729 pending = tr and tr.writepending() and repo.root
2729 2730
2730 2731 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2731 2732 editform=editform, pending=pending)
2732 2733 text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
2733 2734 os.chdir(olddir)
2734 2735
2735 2736 if finishdesc:
2736 2737 text = finishdesc(text)
2737 2738 if not text.strip():
2738 2739 raise error.Abort(_("empty commit message"))
2739 2740 if unchangedmessagedetection and editortext == templatetext:
2740 2741 raise error.Abort(_("commit message unchanged"))
2741 2742
2742 2743 return text
2743 2744
2744 2745 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2745 2746 ui = repo.ui
2746 2747 tmpl, mapfile = gettemplate(ui, tmpl, None)
2747 2748
2748 2749 try:
2749 2750 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2750 2751 except SyntaxError as inst:
2751 2752 raise error.Abort(inst.args[0])
2752 2753
2753 2754 for k, v in repo.ui.configitems('committemplate'):
2754 2755 if k != 'changeset':
2755 2756 t.t.cache[k] = v
2756 2757
2757 2758 if not extramsg:
2758 2759 extramsg = '' # ensure that extramsg is string
2759 2760
2760 2761 ui.pushbuffer()
2761 2762 t.show(ctx, extramsg=extramsg)
2762 2763 return ui.popbuffer()
2763 2764
2764 2765 def hgprefix(msg):
2765 2766 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2766 2767
2767 2768 def buildcommittext(repo, ctx, subs, extramsg):
2768 2769 edittext = []
2769 2770 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2770 2771 if ctx.description():
2771 2772 edittext.append(ctx.description())
2772 2773 edittext.append("")
2773 2774 edittext.append("") # Empty line between message and comments.
2774 2775 edittext.append(hgprefix(_("Enter commit message."
2775 2776 " Lines beginning with 'HG:' are removed.")))
2776 2777 edittext.append(hgprefix(extramsg))
2777 2778 edittext.append("HG: --")
2778 2779 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2779 2780 if ctx.p2():
2780 2781 edittext.append(hgprefix(_("branch merge")))
2781 2782 if ctx.branch():
2782 2783 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2783 2784 if bookmarks.isactivewdirparent(repo):
2784 2785 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2785 2786 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2786 2787 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2787 2788 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2788 2789 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2789 2790 if not added and not modified and not removed:
2790 2791 edittext.append(hgprefix(_("no files changed")))
2791 2792 edittext.append("")
2792 2793
2793 2794 return "\n".join(edittext)
2794 2795
2795 2796 def commitstatus(repo, node, branch, bheads=None, opts=None):
2796 2797 if opts is None:
2797 2798 opts = {}
2798 2799 ctx = repo[node]
2799 2800 parents = ctx.parents()
2800 2801
2801 2802 if (not opts.get('amend') and bheads and node not in bheads and not
2802 2803 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2803 2804 repo.ui.status(_('created new head\n'))
2804 2805 # The message is not printed for initial roots. For the other
2805 2806 # changesets, it is printed in the following situations:
2806 2807 #
2807 2808 # Par column: for the 2 parents with ...
2808 2809 # N: null or no parent
2809 2810 # B: parent is on another named branch
2810 2811 # C: parent is a regular non head changeset
2811 2812 # H: parent was a branch head of the current branch
2812 2813 # Msg column: whether we print "created new head" message
2813 2814 # In the following, it is assumed that there already exists some
2814 2815 # initial branch heads of the current branch, otherwise nothing is
2815 2816 # printed anyway.
2816 2817 #
2817 2818 # Par Msg Comment
2818 2819 # N N y additional topo root
2819 2820 #
2820 2821 # B N y additional branch root
2821 2822 # C N y additional topo head
2822 2823 # H N n usual case
2823 2824 #
2824 2825 # B B y weird additional branch root
2825 2826 # C B y branch merge
2826 2827 # H B n merge with named branch
2827 2828 #
2828 2829 # C C y additional head from merge
2829 2830 # C H n merge with a head
2830 2831 #
2831 2832 # H H n head merge: head count decreases
2832 2833
2833 2834 if not opts.get('close_branch'):
2834 2835 for r in parents:
2835 2836 if r.closesbranch() and r.branch() == branch:
2836 2837 repo.ui.status(_('reopening closed branch head %d\n') % r)
2837 2838
2838 2839 if repo.ui.debugflag:
2839 2840 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2840 2841 elif repo.ui.verbose:
2841 2842 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2842 2843
2843 2844 def revert(ui, repo, ctx, parents, *pats, **opts):
2844 2845 parent, p2 = parents
2845 2846 node = ctx.node()
2846 2847
2847 2848 mf = ctx.manifest()
2848 2849 if node == p2:
2849 2850 parent = p2
2850 2851 if node == parent:
2851 2852 pmf = mf
2852 2853 else:
2853 2854 pmf = None
2854 2855
2855 2856 # need all matching names in dirstate and manifest of target rev,
2856 2857 # so have to walk both. do not print errors if files exist in one
2857 2858 # but not other. in both cases, filesets should be evaluated against
2858 2859 # workingctx to get consistent result (issue4497). this means 'set:**'
2859 2860 # cannot be used to select missing files from target rev.
2860 2861
2861 2862 # `names` is a mapping for all elements in working copy and target revision
2862 2863 # The mapping is in the form:
2863 2864 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2864 2865 names = {}
2865 2866
2866 2867 wlock = repo.wlock()
2867 2868 try:
2868 2869 ## filling of the `names` mapping
2869 2870 # walk dirstate to fill `names`
2870 2871
2871 2872 interactive = opts.get('interactive', False)
2872 2873 wctx = repo[None]
2873 2874 m = scmutil.match(wctx, pats, opts)
2874 2875
2875 2876 # we'll need this later
2876 2877 targetsubs = sorted(s for s in wctx.substate if m(s))
2877 2878
2878 2879 if not m.always():
2879 2880 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2880 2881 names[abs] = m.rel(abs), m.exact(abs)
2881 2882
2882 2883 # walk target manifest to fill `names`
2883 2884
2884 2885 def badfn(path, msg):
2885 2886 if path in names:
2886 2887 return
2887 2888 if path in ctx.substate:
2888 2889 return
2889 2890 path_ = path + '/'
2890 2891 for f in names:
2891 2892 if f.startswith(path_):
2892 2893 return
2893 2894 ui.warn("%s: %s\n" % (m.rel(path), msg))
2894 2895
2895 2896 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2896 2897 if abs not in names:
2897 2898 names[abs] = m.rel(abs), m.exact(abs)
2898 2899
2899 2900 # Find status of all file in `names`.
2900 2901 m = scmutil.matchfiles(repo, names)
2901 2902
2902 2903 changes = repo.status(node1=node, match=m,
2903 2904 unknown=True, ignored=True, clean=True)
2904 2905 else:
2905 2906 changes = repo.status(node1=node, match=m)
2906 2907 for kind in changes:
2907 2908 for abs in kind:
2908 2909 names[abs] = m.rel(abs), m.exact(abs)
2909 2910
2910 2911 m = scmutil.matchfiles(repo, names)
2911 2912
2912 2913 modified = set(changes.modified)
2913 2914 added = set(changes.added)
2914 2915 removed = set(changes.removed)
2915 2916 _deleted = set(changes.deleted)
2916 2917 unknown = set(changes.unknown)
2917 2918 unknown.update(changes.ignored)
2918 2919 clean = set(changes.clean)
2919 2920 modadded = set()
2920 2921
2921 2922 # split between files known in target manifest and the others
2922 2923 smf = set(mf)
2923 2924
2924 2925 # determine the exact nature of the deleted changesets
2925 2926 deladded = _deleted - smf
2926 2927 deleted = _deleted - deladded
2927 2928
2928 2929 # We need to account for the state of the file in the dirstate,
2929 2930 # even when we revert against something else than parent. This will
2930 2931 # slightly alter the behavior of revert (doing back up or not, delete
2931 2932 # or just forget etc).
2932 2933 if parent == node:
2933 2934 dsmodified = modified
2934 2935 dsadded = added
2935 2936 dsremoved = removed
2936 2937 # store all local modifications, useful later for rename detection
2937 2938 localchanges = dsmodified | dsadded
2938 2939 modified, added, removed = set(), set(), set()
2939 2940 else:
2940 2941 changes = repo.status(node1=parent, match=m)
2941 2942 dsmodified = set(changes.modified)
2942 2943 dsadded = set(changes.added)
2943 2944 dsremoved = set(changes.removed)
2944 2945 # store all local modifications, useful later for rename detection
2945 2946 localchanges = dsmodified | dsadded
2946 2947
2947 2948 # only take into account for removes between wc and target
2948 2949 clean |= dsremoved - removed
2949 2950 dsremoved &= removed
2950 2951 # distinct between dirstate remove and other
2951 2952 removed -= dsremoved
2952 2953
2953 2954 modadded = added & dsmodified
2954 2955 added -= modadded
2955 2956
2956 2957 # tell newly modified apart.
2957 2958 dsmodified &= modified
2958 2959 dsmodified |= modified & dsadded # dirstate added may needs backup
2959 2960 modified -= dsmodified
2960 2961
2961 2962 # We need to wait for some post-processing to update this set
2962 2963 # before making the distinction. The dirstate will be used for
2963 2964 # that purpose.
2964 2965 dsadded = added
2965 2966
2966 2967 # in case of merge, files that are actually added can be reported as
2967 2968 # modified, we need to post process the result
2968 2969 if p2 != nullid:
2969 2970 if pmf is None:
2970 2971 # only need parent manifest in the merge case,
2971 2972 # so do not read by default
2972 2973 pmf = repo[parent].manifest()
2973 2974 mergeadd = dsmodified - set(pmf)
2974 2975 dsadded |= mergeadd
2975 2976 dsmodified -= mergeadd
2976 2977
2977 2978 # if f is a rename, update `names` to also revert the source
2978 2979 cwd = repo.getcwd()
2979 2980 for f in localchanges:
2980 2981 src = repo.dirstate.copied(f)
2981 2982 # XXX should we check for rename down to target node?
2982 2983 if src and src not in names and repo.dirstate[src] == 'r':
2983 2984 dsremoved.add(src)
2984 2985 names[src] = (repo.pathto(src, cwd), True)
2985 2986
2986 2987 # distinguish between file to forget and the other
2987 2988 added = set()
2988 2989 for abs in dsadded:
2989 2990 if repo.dirstate[abs] != 'a':
2990 2991 added.add(abs)
2991 2992 dsadded -= added
2992 2993
2993 2994 for abs in deladded:
2994 2995 if repo.dirstate[abs] == 'a':
2995 2996 dsadded.add(abs)
2996 2997 deladded -= dsadded
2997 2998
2998 2999 # For files marked as removed, we check if an unknown file is present at
2999 3000 # the same path. If a such file exists it may need to be backed up.
3000 3001 # Making the distinction at this stage helps have simpler backup
3001 3002 # logic.
3002 3003 removunk = set()
3003 3004 for abs in removed:
3004 3005 target = repo.wjoin(abs)
3005 3006 if os.path.lexists(target):
3006 3007 removunk.add(abs)
3007 3008 removed -= removunk
3008 3009
3009 3010 dsremovunk = set()
3010 3011 for abs in dsremoved:
3011 3012 target = repo.wjoin(abs)
3012 3013 if os.path.lexists(target):
3013 3014 dsremovunk.add(abs)
3014 3015 dsremoved -= dsremovunk
3015 3016
3016 3017 # action to be actually performed by revert
3017 3018 # (<list of file>, message>) tuple
3018 3019 actions = {'revert': ([], _('reverting %s\n')),
3019 3020 'add': ([], _('adding %s\n')),
3020 3021 'remove': ([], _('removing %s\n')),
3021 3022 'drop': ([], _('removing %s\n')),
3022 3023 'forget': ([], _('forgetting %s\n')),
3023 3024 'undelete': ([], _('undeleting %s\n')),
3024 3025 'noop': (None, _('no changes needed to %s\n')),
3025 3026 'unknown': (None, _('file not managed: %s\n')),
3026 3027 }
3027 3028
3028 3029 # "constant" that convey the backup strategy.
3029 3030 # All set to `discard` if `no-backup` is set do avoid checking
3030 3031 # no_backup lower in the code.
3031 3032 # These values are ordered for comparison purposes
3032 3033 backup = 2 # unconditionally do backup
3033 3034 check = 1 # check if the existing file differs from target
3034 3035 discard = 0 # never do backup
3035 3036 if opts.get('no_backup'):
3036 3037 backup = check = discard
3037 3038
3038 3039 backupanddel = actions['remove']
3039 3040 if not opts.get('no_backup'):
3040 3041 backupanddel = actions['drop']
3041 3042
3042 3043 disptable = (
3043 3044 # dispatch table:
3044 3045 # file state
3045 3046 # action
3046 3047 # make backup
3047 3048
3048 3049 ## Sets that results that will change file on disk
3049 3050 # Modified compared to target, no local change
3050 3051 (modified, actions['revert'], discard),
3051 3052 # Modified compared to target, but local file is deleted
3052 3053 (deleted, actions['revert'], discard),
3053 3054 # Modified compared to target, local change
3054 3055 (dsmodified, actions['revert'], backup),
3055 3056 # Added since target
3056 3057 (added, actions['remove'], discard),
3057 3058 # Added in working directory
3058 3059 (dsadded, actions['forget'], discard),
3059 3060 # Added since target, have local modification
3060 3061 (modadded, backupanddel, backup),
3061 3062 # Added since target but file is missing in working directory
3062 3063 (deladded, actions['drop'], discard),
3063 3064 # Removed since target, before working copy parent
3064 3065 (removed, actions['add'], discard),
3065 3066 # Same as `removed` but an unknown file exists at the same path
3066 3067 (removunk, actions['add'], check),
3067 3068 # Removed since targe, marked as such in working copy parent
3068 3069 (dsremoved, actions['undelete'], discard),
3069 3070 # Same as `dsremoved` but an unknown file exists at the same path
3070 3071 (dsremovunk, actions['undelete'], check),
3071 3072 ## the following sets does not result in any file changes
3072 3073 # File with no modification
3073 3074 (clean, actions['noop'], discard),
3074 3075 # Existing file, not tracked anywhere
3075 3076 (unknown, actions['unknown'], discard),
3076 3077 )
3077 3078
3078 3079 for abs, (rel, exact) in sorted(names.items()):
3079 3080 # target file to be touch on disk (relative to cwd)
3080 3081 target = repo.wjoin(abs)
3081 3082 # search the entry in the dispatch table.
3082 3083 # if the file is in any of these sets, it was touched in the working
3083 3084 # directory parent and we are sure it needs to be reverted.
3084 3085 for table, (xlist, msg), dobackup in disptable:
3085 3086 if abs not in table:
3086 3087 continue
3087 3088 if xlist is not None:
3088 3089 xlist.append(abs)
3089 3090 if dobackup and (backup <= dobackup
3090 3091 or wctx[abs].cmp(ctx[abs])):
3091 3092 bakname = origpath(ui, repo, rel)
3092 3093 ui.note(_('saving current version of %s as %s\n') %
3093 3094 (rel, bakname))
3094 3095 if not opts.get('dry_run'):
3095 3096 if interactive:
3096 3097 util.copyfile(target, bakname)
3097 3098 else:
3098 3099 util.rename(target, bakname)
3099 3100 if ui.verbose or not exact:
3100 3101 if not isinstance(msg, basestring):
3101 3102 msg = msg(abs)
3102 3103 ui.status(msg % rel)
3103 3104 elif exact:
3104 3105 ui.warn(msg % rel)
3105 3106 break
3106 3107
3107 3108 if not opts.get('dry_run'):
3108 3109 needdata = ('revert', 'add', 'undelete')
3109 3110 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3110 3111 _performrevert(repo, parents, ctx, actions, interactive)
3111 3112
3112 3113 if targetsubs:
3113 3114 # Revert the subrepos on the revert list
3114 3115 for sub in targetsubs:
3115 3116 try:
3116 3117 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3117 3118 except KeyError:
3118 3119 raise error.Abort("subrepository '%s' does not exist in %s!"
3119 3120 % (sub, short(ctx.node())))
3120 3121 finally:
3121 3122 wlock.release()
3122 3123
3123 3124 def origpath(ui, repo, filepath):
3124 3125 '''customize where .orig files are created
3125 3126
3126 3127 Fetch user defined path from config file: [ui] origbackuppath = <path>
3127 3128 Fall back to default (filepath) if not specified
3128 3129 '''
3129 3130 origbackuppath = ui.config('ui', 'origbackuppath', None)
3130 3131 if origbackuppath is None:
3131 3132 return filepath + ".orig"
3132 3133
3133 3134 filepathfromroot = os.path.relpath(filepath, start=repo.root)
3134 3135 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
3135 3136
3136 3137 origbackupdir = repo.vfs.dirname(fullorigpath)
3137 3138 if not repo.vfs.exists(origbackupdir):
3138 3139 ui.note(_('creating directory: %s\n') % origbackupdir)
3139 3140 util.makedirs(origbackupdir)
3140 3141
3141 3142 return fullorigpath + ".orig"
3142 3143
3143 3144 def _revertprefetch(repo, ctx, *files):
3144 3145 """Let extension changing the storage layer prefetch content"""
3145 3146 pass
3146 3147
3147 3148 def _performrevert(repo, parents, ctx, actions, interactive=False):
3148 3149 """function that actually perform all the actions computed for revert
3149 3150
3150 3151 This is an independent function to let extension to plug in and react to
3151 3152 the imminent revert.
3152 3153
3153 3154 Make sure you have the working directory locked when calling this function.
3154 3155 """
3155 3156 parent, p2 = parents
3156 3157 node = ctx.node()
3157 3158 def checkout(f):
3158 3159 fc = ctx[f]
3159 3160 repo.wwrite(f, fc.data(), fc.flags())
3160 3161
3161 3162 audit_path = pathutil.pathauditor(repo.root)
3162 3163 for f in actions['forget'][0]:
3163 3164 repo.dirstate.drop(f)
3164 3165 for f in actions['remove'][0]:
3165 3166 audit_path(f)
3166 3167 try:
3167 3168 util.unlinkpath(repo.wjoin(f))
3168 3169 except OSError:
3169 3170 pass
3170 3171 repo.dirstate.remove(f)
3171 3172 for f in actions['drop'][0]:
3172 3173 audit_path(f)
3173 3174 repo.dirstate.remove(f)
3174 3175
3175 3176 normal = None
3176 3177 if node == parent:
3177 3178 # We're reverting to our parent. If possible, we'd like status
3178 3179 # to report the file as clean. We have to use normallookup for
3179 3180 # merges to avoid losing information about merged/dirty files.
3180 3181 if p2 != nullid:
3181 3182 normal = repo.dirstate.normallookup
3182 3183 else:
3183 3184 normal = repo.dirstate.normal
3184 3185
3185 3186 newlyaddedandmodifiedfiles = set()
3186 3187 if interactive:
3187 3188 # Prompt the user for changes to revert
3188 3189 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3189 3190 m = scmutil.match(ctx, torevert, {})
3190 3191 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3191 3192 diffopts.nodates = True
3192 3193 diffopts.git = True
3193 3194 reversehunks = repo.ui.configbool('experimental',
3194 3195 'revertalternateinteractivemode',
3195 3196 True)
3196 3197 if reversehunks:
3197 3198 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3198 3199 else:
3199 3200 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3200 3201 originalchunks = patch.parsepatch(diff)
3201 3202
3202 3203 try:
3203 3204
3204 chunks = recordfilter(repo.ui, originalchunks)
3205 chunks, opts = recordfilter(repo.ui, originalchunks)
3205 3206 if reversehunks:
3206 3207 chunks = patch.reversehunks(chunks)
3207 3208
3208 3209 except patch.PatchError as err:
3209 3210 raise error.Abort(_('error parsing patch: %s') % err)
3210 3211
3211 3212 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3212 3213 # Apply changes
3213 3214 fp = cStringIO.StringIO()
3214 3215 for c in chunks:
3215 3216 c.write(fp)
3216 3217 dopatch = fp.tell()
3217 3218 fp.seek(0)
3218 3219 if dopatch:
3219 3220 try:
3220 3221 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3221 3222 except patch.PatchError as err:
3222 3223 raise error.Abort(str(err))
3223 3224 del fp
3224 3225 else:
3225 3226 for f in actions['revert'][0]:
3226 3227 checkout(f)
3227 3228 if normal:
3228 3229 normal(f)
3229 3230
3230 3231 for f in actions['add'][0]:
3231 3232 # Don't checkout modified files, they are already created by the diff
3232 3233 if f not in newlyaddedandmodifiedfiles:
3233 3234 checkout(f)
3234 3235 repo.dirstate.add(f)
3235 3236
3236 3237 normal = repo.dirstate.normallookup
3237 3238 if node == parent and p2 == nullid:
3238 3239 normal = repo.dirstate.normal
3239 3240 for f in actions['undelete'][0]:
3240 3241 checkout(f)
3241 3242 normal(f)
3242 3243
3243 3244 copied = copies.pathcopies(repo[parent], ctx)
3244 3245
3245 3246 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3246 3247 if f in copied:
3247 3248 repo.dirstate.copy(copied[f], f)
3248 3249
3249 3250 def command(table):
3250 3251 """Returns a function object to be used as a decorator for making commands.
3251 3252
3252 3253 This function receives a command table as its argument. The table should
3253 3254 be a dict.
3254 3255
3255 3256 The returned function can be used as a decorator for adding commands
3256 3257 to that command table. This function accepts multiple arguments to define
3257 3258 a command.
3258 3259
3259 3260 The first argument is the command name.
3260 3261
3261 3262 The options argument is an iterable of tuples defining command arguments.
3262 3263 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3263 3264
3264 3265 The synopsis argument defines a short, one line summary of how to use the
3265 3266 command. This shows up in the help output.
3266 3267
3267 3268 The norepo argument defines whether the command does not require a
3268 3269 local repository. Most commands operate against a repository, thus the
3269 3270 default is False.
3270 3271
3271 3272 The optionalrepo argument defines whether the command optionally requires
3272 3273 a local repository.
3273 3274
3274 3275 The inferrepo argument defines whether to try to find a repository from the
3275 3276 command line arguments. If True, arguments will be examined for potential
3276 3277 repository locations. See ``findrepo()``. If a repository is found, it
3277 3278 will be used.
3278 3279 """
3279 3280 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3280 3281 inferrepo=False):
3281 3282 def decorator(func):
3282 3283 if synopsis:
3283 3284 table[name] = func, list(options), synopsis
3284 3285 else:
3285 3286 table[name] = func, list(options)
3286 3287
3287 3288 if norepo:
3288 3289 # Avoid import cycle.
3289 3290 import commands
3290 3291 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3291 3292
3292 3293 if optionalrepo:
3293 3294 import commands
3294 3295 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3295 3296
3296 3297 if inferrepo:
3297 3298 import commands
3298 3299 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3299 3300
3300 3301 return func
3301 3302 return decorator
3302 3303
3303 3304 return cmd
3304 3305
3305 3306 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3306 3307 # commands.outgoing. "missing" is "missing" of the result of
3307 3308 # "findcommonoutgoing()"
3308 3309 outgoinghooks = util.hooks()
3309 3310
3310 3311 # a list of (ui, repo) functions called by commands.summary
3311 3312 summaryhooks = util.hooks()
3312 3313
3313 3314 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3314 3315 #
3315 3316 # functions should return tuple of booleans below, if 'changes' is None:
3316 3317 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3317 3318 #
3318 3319 # otherwise, 'changes' is a tuple of tuples below:
3319 3320 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3320 3321 # - (desturl, destbranch, destpeer, outgoing)
3321 3322 summaryremotehooks = util.hooks()
3322 3323
3323 3324 # A list of state files kept by multistep operations like graft.
3324 3325 # Since graft cannot be aborted, it is considered 'clearable' by update.
3325 3326 # note: bisect is intentionally excluded
3326 3327 # (state file, clearable, allowcommit, error, hint)
3327 3328 unfinishedstates = [
3328 3329 ('graftstate', True, False, _('graft in progress'),
3329 3330 _("use 'hg graft --continue' or 'hg update' to abort")),
3330 3331 ('updatestate', True, False, _('last update was interrupted'),
3331 3332 _("use 'hg update' to get a consistent checkout"))
3332 3333 ]
3333 3334
3334 3335 def checkunfinished(repo, commit=False):
3335 3336 '''Look for an unfinished multistep operation, like graft, and abort
3336 3337 if found. It's probably good to check this right before
3337 3338 bailifchanged().
3338 3339 '''
3339 3340 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3340 3341 if commit and allowcommit:
3341 3342 continue
3342 3343 if repo.vfs.exists(f):
3343 3344 raise error.Abort(msg, hint=hint)
3344 3345
3345 3346 def clearunfinished(repo):
3346 3347 '''Check for unfinished operations (as above), and clear the ones
3347 3348 that are clearable.
3348 3349 '''
3349 3350 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3350 3351 if not clearable and repo.vfs.exists(f):
3351 3352 raise error.Abort(msg, hint=hint)
3352 3353 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3353 3354 if clearable and repo.vfs.exists(f):
3354 3355 util.unlink(repo.join(f))
3355 3356
3356 3357 class dirstateguard(object):
3357 3358 '''Restore dirstate at unexpected failure.
3358 3359
3359 3360 At the construction, this class does:
3360 3361
3361 3362 - write current ``repo.dirstate`` out, and
3362 3363 - save ``.hg/dirstate`` into the backup file
3363 3364
3364 3365 This restores ``.hg/dirstate`` from backup file, if ``release()``
3365 3366 is invoked before ``close()``.
3366 3367
3367 3368 This just removes the backup file at ``close()`` before ``release()``.
3368 3369 '''
3369 3370
3370 3371 def __init__(self, repo, name):
3371 3372 self._repo = repo
3372 3373 self._suffix = '.backup.%s.%d' % (name, id(self))
3373 3374 repo.dirstate._savebackup(repo.currenttransaction(), self._suffix)
3374 3375 self._active = True
3375 3376 self._closed = False
3376 3377
3377 3378 def __del__(self):
3378 3379 if self._active: # still active
3379 3380 # this may occur, even if this class is used correctly:
3380 3381 # for example, releasing other resources like transaction
3381 3382 # may raise exception before ``dirstateguard.release`` in
3382 3383 # ``release(tr, ....)``.
3383 3384 self._abort()
3384 3385
3385 3386 def close(self):
3386 3387 if not self._active: # already inactivated
3387 3388 msg = (_("can't close already inactivated backup: dirstate%s")
3388 3389 % self._suffix)
3389 3390 raise error.Abort(msg)
3390 3391
3391 3392 self._repo.dirstate._clearbackup(self._repo.currenttransaction(),
3392 3393 self._suffix)
3393 3394 self._active = False
3394 3395 self._closed = True
3395 3396
3396 3397 def _abort(self):
3397 3398 self._repo.dirstate._restorebackup(self._repo.currenttransaction(),
3398 3399 self._suffix)
3399 3400 self._active = False
3400 3401
3401 3402 def release(self):
3402 3403 if not self._closed:
3403 3404 if not self._active: # already inactivated
3404 3405 msg = (_("can't release already inactivated backup:"
3405 3406 " dirstate%s")
3406 3407 % self._suffix)
3407 3408 raise error.Abort(msg)
3408 3409 self._abort()
@@ -1,1648 +1,1651
1 1 # stuff related specifically to patch manipulation / parsing
2 2 #
3 3 # Copyright 2008 Mark Edgington <edgimar@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # This code is based on the Mark Edgington's crecord extension.
9 9 # (Itself based on Bryan O'Sullivan's record extension.)
10 10
11 11 from __future__ import absolute_import
12 12
13 13 import cStringIO
14 14 import locale
15 15 import os
16 16 import re
17 17 import signal
18 18 import struct
19 19 import sys
20 20 import tempfile
21 21
22 22 from .i18n import _
23 23 from . import (
24 24 encoding,
25 25 error,
26 26 patch as patchmod,
27 27 )
28 28
29 29 # This is required for ncurses to display non-ASCII characters in default user
30 30 # locale encoding correctly. --immerrr
31 31 locale.setlocale(locale.LC_ALL, '')
32 32
33 33 # os.name is one of: 'posix', 'nt', 'dos', 'os2', 'mac', or 'ce'
34 34 if os.name == 'posix':
35 35 import curses
36 36 import fcntl
37 37 import termios
38 38 else:
39 39 # I have no idea if wcurses works with crecord...
40 40 try:
41 41 import wcurses as curses
42 42 except ImportError:
43 43 # wcurses is not shipped on Windows by default
44 44 pass
45 45
46 46 try:
47 47 curses
48 48 except NameError:
49 49 if os.name != 'nt': # Temporary hack to get running on Windows again
50 50 raise error.Abort(
51 51 _('the python curses/wcurses module is not available/installed'))
52 52
53 53 _origstdout = sys.__stdout__ # used by gethw()
54 54
55 55 class patchnode(object):
56 56 """abstract class for patch graph nodes
57 57 (i.e. patchroot, header, hunk, hunkline)
58 58 """
59 59
60 60 def firstchild(self):
61 61 raise NotImplementedError("method must be implemented by subclass")
62 62
63 63 def lastchild(self):
64 64 raise NotImplementedError("method must be implemented by subclass")
65 65
66 66 def allchildren(self):
67 67 "Return a list of all of the direct children of this node"
68 68 raise NotImplementedError("method must be implemented by subclass")
69 69 def nextsibling(self):
70 70 """
71 71 Return the closest next item of the same type where there are no items
72 72 of different types between the current item and this closest item.
73 73 If no such item exists, return None.
74 74
75 75 """
76 76 raise NotImplementedError("method must be implemented by subclass")
77 77
78 78 def prevsibling(self):
79 79 """
80 80 Return the closest previous item of the same type where there are no
81 81 items of different types between the current item and this closest item.
82 82 If no such item exists, return None.
83 83
84 84 """
85 85 raise NotImplementedError("method must be implemented by subclass")
86 86
87 87 def parentitem(self):
88 88 raise NotImplementedError("method must be implemented by subclass")
89 89
90 90
91 91 def nextitem(self, constrainlevel=True, skipfolded=True):
92 92 """
93 93 If constrainLevel == True, return the closest next item
94 94 of the same type where there are no items of different types between
95 95 the current item and this closest item.
96 96
97 97 If constrainLevel == False, then try to return the next item
98 98 closest to this item, regardless of item's type (header, hunk, or
99 99 HunkLine).
100 100
101 101 If skipFolded == True, and the current item is folded, then the child
102 102 items that are hidden due to folding will be skipped when determining
103 103 the next item.
104 104
105 105 If it is not possible to get the next item, return None.
106 106
107 107 """
108 108 try:
109 109 itemfolded = self.folded
110 110 except AttributeError:
111 111 itemfolded = False
112 112 if constrainlevel:
113 113 return self.nextsibling()
114 114 elif skipfolded and itemfolded:
115 115 nextitem = self.nextsibling()
116 116 if nextitem is None:
117 117 try:
118 118 nextitem = self.parentitem().nextsibling()
119 119 except AttributeError:
120 120 nextitem = None
121 121 return nextitem
122 122 else:
123 123 # try child
124 124 item = self.firstchild()
125 125 if item is not None:
126 126 return item
127 127
128 128 # else try next sibling
129 129 item = self.nextsibling()
130 130 if item is not None:
131 131 return item
132 132
133 133 try:
134 134 # else try parent's next sibling
135 135 item = self.parentitem().nextsibling()
136 136 if item is not None:
137 137 return item
138 138
139 139 # else return grandparent's next sibling (or None)
140 140 return self.parentitem().parentitem().nextsibling()
141 141
142 142 except AttributeError: # parent and/or grandparent was None
143 143 return None
144 144
145 145 def previtem(self, constrainlevel=True, skipfolded=True):
146 146 """
147 147 If constrainLevel == True, return the closest previous item
148 148 of the same type where there are no items of different types between
149 149 the current item and this closest item.
150 150
151 151 If constrainLevel == False, then try to return the previous item
152 152 closest to this item, regardless of item's type (header, hunk, or
153 153 HunkLine).
154 154
155 155 If skipFolded == True, and the current item is folded, then the items
156 156 that are hidden due to folding will be skipped when determining the
157 157 next item.
158 158
159 159 If it is not possible to get the previous item, return None.
160 160
161 161 """
162 162 if constrainlevel:
163 163 return self.prevsibling()
164 164 else:
165 165 # try previous sibling's last child's last child,
166 166 # else try previous sibling's last child, else try previous sibling
167 167 prevsibling = self.prevsibling()
168 168 if prevsibling is not None:
169 169 prevsiblinglastchild = prevsibling.lastchild()
170 170 if ((prevsiblinglastchild is not None) and
171 171 not prevsibling.folded):
172 172 prevsiblinglclc = prevsiblinglastchild.lastchild()
173 173 if ((prevsiblinglclc is not None) and
174 174 not prevsiblinglastchild.folded):
175 175 return prevsiblinglclc
176 176 else:
177 177 return prevsiblinglastchild
178 178 else:
179 179 return prevsibling
180 180
181 181 # try parent (or None)
182 182 return self.parentitem()
183 183
184 184 class patch(patchnode, list): # todo: rename patchroot
185 185 """
186 186 list of header objects representing the patch.
187 187
188 188 """
189 189 def __init__(self, headerlist):
190 190 self.extend(headerlist)
191 191 # add parent patch object reference to each header
192 192 for header in self:
193 193 header.patch = self
194 194
195 195 class uiheader(patchnode):
196 196 """patch header
197 197
198 198 xxx shouldn't we move this to mercurial/patch.py ?
199 199 """
200 200
201 201 def __init__(self, header):
202 202 self.nonuiheader = header
203 203 # flag to indicate whether to apply this chunk
204 204 self.applied = True
205 205 # flag which only affects the status display indicating if a node's
206 206 # children are partially applied (i.e. some applied, some not).
207 207 self.partial = False
208 208
209 209 # flag to indicate whether to display as folded/unfolded to user
210 210 self.folded = True
211 211
212 212 # list of all headers in patch
213 213 self.patch = None
214 214
215 215 # flag is False if this header was ever unfolded from initial state
216 216 self.neverunfolded = True
217 217 self.hunks = [uihunk(h, self) for h in self.hunks]
218 218
219 219
220 220 def prettystr(self):
221 221 x = cStringIO.StringIO()
222 222 self.pretty(x)
223 223 return x.getvalue()
224 224
225 225 def nextsibling(self):
226 226 numheadersinpatch = len(self.patch)
227 227 indexofthisheader = self.patch.index(self)
228 228
229 229 if indexofthisheader < numheadersinpatch - 1:
230 230 nextheader = self.patch[indexofthisheader + 1]
231 231 return nextheader
232 232 else:
233 233 return None
234 234
235 235 def prevsibling(self):
236 236 indexofthisheader = self.patch.index(self)
237 237 if indexofthisheader > 0:
238 238 previousheader = self.patch[indexofthisheader - 1]
239 239 return previousheader
240 240 else:
241 241 return None
242 242
243 243 def parentitem(self):
244 244 """
245 245 there is no 'real' parent item of a header that can be selected,
246 246 so return None.
247 247 """
248 248 return None
249 249
250 250 def firstchild(self):
251 251 "return the first child of this item, if one exists. otherwise None."
252 252 if len(self.hunks) > 0:
253 253 return self.hunks[0]
254 254 else:
255 255 return None
256 256
257 257 def lastchild(self):
258 258 "return the last child of this item, if one exists. otherwise None."
259 259 if len(self.hunks) > 0:
260 260 return self.hunks[-1]
261 261 else:
262 262 return None
263 263
264 264 def allchildren(self):
265 265 "return a list of all of the direct children of this node"
266 266 return self.hunks
267 267
268 268 def __getattr__(self, name):
269 269 return getattr(self.nonuiheader, name)
270 270
271 271 class uihunkline(patchnode):
272 272 "represents a changed line in a hunk"
273 273 def __init__(self, linetext, hunk):
274 274 self.linetext = linetext
275 275 self.applied = True
276 276 # the parent hunk to which this line belongs
277 277 self.hunk = hunk
278 278 # folding lines currently is not used/needed, but this flag is needed
279 279 # in the previtem method.
280 280 self.folded = False
281 281
282 282 def prettystr(self):
283 283 return self.linetext
284 284
285 285 def nextsibling(self):
286 286 numlinesinhunk = len(self.hunk.changedlines)
287 287 indexofthisline = self.hunk.changedlines.index(self)
288 288
289 289 if (indexofthisline < numlinesinhunk - 1):
290 290 nextline = self.hunk.changedlines[indexofthisline + 1]
291 291 return nextline
292 292 else:
293 293 return None
294 294
295 295 def prevsibling(self):
296 296 indexofthisline = self.hunk.changedlines.index(self)
297 297 if indexofthisline > 0:
298 298 previousline = self.hunk.changedlines[indexofthisline - 1]
299 299 return previousline
300 300 else:
301 301 return None
302 302
303 303 def parentitem(self):
304 304 "return the parent to the current item"
305 305 return self.hunk
306 306
307 307 def firstchild(self):
308 308 "return the first child of this item, if one exists. otherwise None."
309 309 # hunk-lines don't have children
310 310 return None
311 311
312 312 def lastchild(self):
313 313 "return the last child of this item, if one exists. otherwise None."
314 314 # hunk-lines don't have children
315 315 return None
316 316
317 317 class uihunk(patchnode):
318 318 """ui patch hunk, wraps a hunk and keep track of ui behavior """
319 319 maxcontext = 3
320 320
321 321 def __init__(self, hunk, header):
322 322 self._hunk = hunk
323 323 self.changedlines = [uihunkline(line, self) for line in hunk.hunk]
324 324 self.header = header
325 325 # used at end for detecting how many removed lines were un-applied
326 326 self.originalremoved = self.removed
327 327
328 328 # flag to indicate whether to display as folded/unfolded to user
329 329 self.folded = True
330 330 # flag to indicate whether to apply this chunk
331 331 self.applied = True
332 332 # flag which only affects the status display indicating if a node's
333 333 # children are partially applied (i.e. some applied, some not).
334 334 self.partial = False
335 335
336 336 def nextsibling(self):
337 337 numhunksinheader = len(self.header.hunks)
338 338 indexofthishunk = self.header.hunks.index(self)
339 339
340 340 if (indexofthishunk < numhunksinheader - 1):
341 341 nexthunk = self.header.hunks[indexofthishunk + 1]
342 342 return nexthunk
343 343 else:
344 344 return None
345 345
346 346 def prevsibling(self):
347 347 indexofthishunk = self.header.hunks.index(self)
348 348 if indexofthishunk > 0:
349 349 previoushunk = self.header.hunks[indexofthishunk - 1]
350 350 return previoushunk
351 351 else:
352 352 return None
353 353
354 354 def parentitem(self):
355 355 "return the parent to the current item"
356 356 return self.header
357 357
358 358 def firstchild(self):
359 359 "return the first child of this item, if one exists. otherwise None."
360 360 if len(self.changedlines) > 0:
361 361 return self.changedlines[0]
362 362 else:
363 363 return None
364 364
365 365 def lastchild(self):
366 366 "return the last child of this item, if one exists. otherwise None."
367 367 if len(self.changedlines) > 0:
368 368 return self.changedlines[-1]
369 369 else:
370 370 return None
371 371
372 372 def allchildren(self):
373 373 "return a list of all of the direct children of this node"
374 374 return self.changedlines
375 375 def countchanges(self):
376 376 """changedlines -> (n+,n-)"""
377 377 add = len([l for l in self.changedlines if l.applied
378 378 and l.prettystr()[0] == '+'])
379 379 rem = len([l for l in self.changedlines if l.applied
380 380 and l.prettystr()[0] == '-'])
381 381 return add, rem
382 382
383 383 def getfromtoline(self):
384 384 # calculate the number of removed lines converted to context lines
385 385 removedconvertedtocontext = self.originalremoved - self.removed
386 386
387 387 contextlen = (len(self.before) + len(self.after) +
388 388 removedconvertedtocontext)
389 389 if self.after and self.after[-1] == '\\ no newline at end of file\n':
390 390 contextlen -= 1
391 391 fromlen = contextlen + self.removed
392 392 tolen = contextlen + self.added
393 393
394 394 # diffutils manual, section "2.2.2.2 detailed description of unified
395 395 # format": "an empty hunk is considered to end at the line that
396 396 # precedes the hunk."
397 397 #
398 398 # so, if either of hunks is empty, decrease its line start. --immerrr
399 399 # but only do this if fromline > 0, to avoid having, e.g fromline=-1.
400 400 fromline, toline = self.fromline, self.toline
401 401 if fromline != 0:
402 402 if fromlen == 0:
403 403 fromline -= 1
404 404 if tolen == 0:
405 405 toline -= 1
406 406
407 407 fromtoline = '@@ -%d,%d +%d,%d @@%s\n' % (
408 408 fromline, fromlen, toline, tolen,
409 409 self.proc and (' ' + self.proc))
410 410 return fromtoline
411 411
412 412 def write(self, fp):
413 413 # updated self.added/removed, which are used by getfromtoline()
414 414 self.added, self.removed = self.countchanges()
415 415 fp.write(self.getfromtoline())
416 416
417 417 hunklinelist = []
418 418 # add the following to the list: (1) all applied lines, and
419 419 # (2) all unapplied removal lines (convert these to context lines)
420 420 for changedline in self.changedlines:
421 421 changedlinestr = changedline.prettystr()
422 422 if changedline.applied:
423 423 hunklinelist.append(changedlinestr)
424 424 elif changedlinestr[0] == "-":
425 425 hunklinelist.append(" " + changedlinestr[1:])
426 426
427 427 fp.write(''.join(self.before + hunklinelist + self.after))
428 428
429 429 pretty = write
430 430
431 431 def prettystr(self):
432 432 x = cStringIO.StringIO()
433 433 self.pretty(x)
434 434 return x.getvalue()
435 435
436 436 def __getattr__(self, name):
437 437 return getattr(self._hunk, name)
438 438 def __repr__(self):
439 439 return '<hunk %r@%d>' % (self.filename(), self.fromline)
440 440
441 441 def filterpatch(ui, chunks, chunkselector, operation=None):
442 442 """interactively filter patch chunks into applied-only chunks"""
443 443
444 444 if operation is None:
445 445 operation = _('confirm')
446 446 chunks = list(chunks)
447 447 # convert chunks list into structure suitable for displaying/modifying
448 448 # with curses. create a list of headers only.
449 449 headers = [c for c in chunks if isinstance(c, patchmod.header)]
450 450
451 451 # if there are no changed files
452 452 if len(headers) == 0:
453 453 return []
454 454 uiheaders = [uiheader(h) for h in headers]
455 455 # let user choose headers/hunks/lines, and mark their applied flags
456 456 # accordingly
457 chunkselector(ui, uiheaders)
457 ret = chunkselector(ui, uiheaders)
458 458 appliedhunklist = []
459 459 for hdr in uiheaders:
460 460 if (hdr.applied and
461 461 (hdr.special() or len([h for h in hdr.hunks if h.applied]) > 0)):
462 462 appliedhunklist.append(hdr)
463 463 fixoffset = 0
464 464 for hnk in hdr.hunks:
465 465 if hnk.applied:
466 466 appliedhunklist.append(hnk)
467 467 # adjust the 'to'-line offset of the hunk to be correct
468 468 # after de-activating some of the other hunks for this file
469 469 if fixoffset:
470 470 #hnk = copy.copy(hnk) # necessary??
471 471 hnk.toline += fixoffset
472 472 else:
473 473 fixoffset += hnk.removed - hnk.added
474 474
475 return appliedhunklist
475 return (appliedhunklist, ret)
476 476
477 477 def gethw():
478 478 """
479 479 magically get the current height and width of the window (without initscr)
480 480
481 481 this is a rip-off of a rip-off - taken from the bpython code. it is
482 482 useful / necessary because otherwise curses.initscr() must be called,
483 483 which can leave the terminal in a nasty state after exiting.
484 484
485 485 """
486 486 h, w = struct.unpack(
487 487 "hhhh", fcntl.ioctl(_origstdout, termios.TIOCGWINSZ, "\000"*8))[0:2]
488 488 return h, w
489 489
490 490 def chunkselector(ui, headerlist):
491 491 """
492 492 curses interface to get selection of chunks, and mark the applied flags
493 493 of the chosen chunks.
494 494
495 495 """
496 496 ui.write(_('starting interactive selection\n'))
497 497 chunkselector = curseschunkselector(headerlist, ui)
498 498 f = signal.getsignal(signal.SIGTSTP)
499 499 curses.wrapper(chunkselector.main)
500 500 if chunkselector.initerr is not None:
501 501 raise error.Abort(chunkselector.initerr)
502 502 # ncurses does not restore signal handler for SIGTSTP
503 503 signal.signal(signal.SIGTSTP, f)
504 return chunkselector.opts
504 505
505 506 def testdecorator(testfn, f):
506 507 def u(*args, **kwargs):
507 508 return f(testfn, *args, **kwargs)
508 509 return u
509 510
510 511 def testchunkselector(testfn, ui, headerlist):
511 512 """
512 513 test interface to get selection of chunks, and mark the applied flags
513 514 of the chosen chunks.
514 515
515 516 """
516 517 chunkselector = curseschunkselector(headerlist, ui)
517 518 if testfn and os.path.exists(testfn):
518 519 testf = open(testfn)
519 520 testcommands = map(lambda x: x.rstrip('\n'), testf.readlines())
520 521 testf.close()
521 522 while True:
522 523 if chunkselector.handlekeypressed(testcommands.pop(0), test=True):
523 524 break
525 return chunkselector.opts
524 526
525 527 class curseschunkselector(object):
526 528 def __init__(self, headerlist, ui):
527 529 # put the headers into a patch object
528 530 self.headerlist = patch(headerlist)
529 531
530 532 self.ui = ui
533 self.opts = {}
531 534
532 535 self.errorstr = None
533 536 # list of all chunks
534 537 self.chunklist = []
535 538 for h in headerlist:
536 539 self.chunklist.append(h)
537 540 self.chunklist.extend(h.hunks)
538 541
539 542 # dictionary mapping (fgcolor, bgcolor) pairs to the
540 543 # corresponding curses color-pair value.
541 544 self.colorpairs = {}
542 545 # maps custom nicknames of color-pairs to curses color-pair values
543 546 self.colorpairnames = {}
544 547
545 548 # the currently selected header, hunk, or hunk-line
546 549 self.currentselecteditem = self.headerlist[0]
547 550
548 551 # updated when printing out patch-display -- the 'lines' here are the
549 552 # line positions *in the pad*, not on the screen.
550 553 self.selecteditemstartline = 0
551 554 self.selecteditemendline = None
552 555
553 556 # define indentation levels
554 557 self.headerindentnumchars = 0
555 558 self.hunkindentnumchars = 3
556 559 self.hunklineindentnumchars = 6
557 560
558 561 # the first line of the pad to print to the screen
559 562 self.firstlineofpadtoprint = 0
560 563
561 564 # keeps track of the number of lines in the pad
562 565 self.numpadlines = None
563 566
564 567 self.numstatuslines = 2
565 568
566 569 # keep a running count of the number of lines printed to the pad
567 570 # (used for determining when the selected item begins/ends)
568 571 self.linesprintedtopadsofar = 0
569 572
570 573 # the first line of the pad which is visible on the screen
571 574 self.firstlineofpadtoprint = 0
572 575
573 576 # stores optional text for a commit comment provided by the user
574 577 self.commenttext = ""
575 578
576 579 # if the last 'toggle all' command caused all changes to be applied
577 580 self.waslasttoggleallapplied = True
578 581
579 582 def uparrowevent(self):
580 583 """
581 584 try to select the previous item to the current item that has the
582 585 most-indented level. for example, if a hunk is selected, try to select
583 586 the last hunkline of the hunk prior to the selected hunk. or, if
584 587 the first hunkline of a hunk is currently selected, then select the
585 588 hunk itself.
586 589
587 590 if the currently selected item is already at the top of the screen,
588 591 scroll the screen down to show the new-selected item.
589 592
590 593 """
591 594 currentitem = self.currentselecteditem
592 595
593 596 nextitem = currentitem.previtem(constrainlevel=False)
594 597
595 598 if nextitem is None:
596 599 # if no parent item (i.e. currentitem is the first header), then
597 600 # no change...
598 601 nextitem = currentitem
599 602
600 603 self.currentselecteditem = nextitem
601 604
602 605 def uparrowshiftevent(self):
603 606 """
604 607 select (if possible) the previous item on the same level as the
605 608 currently selected item. otherwise, select (if possible) the
606 609 parent-item of the currently selected item.
607 610
608 611 if the currently selected item is already at the top of the screen,
609 612 scroll the screen down to show the new-selected item.
610 613
611 614 """
612 615 currentitem = self.currentselecteditem
613 616 nextitem = currentitem.previtem()
614 617 # if there's no previous item on this level, try choosing the parent
615 618 if nextitem is None:
616 619 nextitem = currentitem.parentitem()
617 620 if nextitem is None:
618 621 # if no parent item (i.e. currentitem is the first header), then
619 622 # no change...
620 623 nextitem = currentitem
621 624
622 625 self.currentselecteditem = nextitem
623 626
624 627 def downarrowevent(self):
625 628 """
626 629 try to select the next item to the current item that has the
627 630 most-indented level. for example, if a hunk is selected, select
628 631 the first hunkline of the selected hunk. or, if the last hunkline of
629 632 a hunk is currently selected, then select the next hunk, if one exists,
630 633 or if not, the next header if one exists.
631 634
632 635 if the currently selected item is already at the bottom of the screen,
633 636 scroll the screen up to show the new-selected item.
634 637
635 638 """
636 639 #self.startprintline += 1 #debug
637 640 currentitem = self.currentselecteditem
638 641
639 642 nextitem = currentitem.nextitem(constrainlevel=False)
640 643 # if there's no next item, keep the selection as-is
641 644 if nextitem is None:
642 645 nextitem = currentitem
643 646
644 647 self.currentselecteditem = nextitem
645 648
646 649 def downarrowshiftevent(self):
647 650 """
648 651 if the cursor is already at the bottom chunk, scroll the screen up and
649 652 move the cursor-position to the subsequent chunk. otherwise, only move
650 653 the cursor position down one chunk.
651 654
652 655 """
653 656 # todo: update docstring
654 657
655 658 currentitem = self.currentselecteditem
656 659 nextitem = currentitem.nextitem()
657 660 # if there's no previous item on this level, try choosing the parent's
658 661 # nextitem.
659 662 if nextitem is None:
660 663 try:
661 664 nextitem = currentitem.parentitem().nextitem()
662 665 except AttributeError:
663 666 # parentitem returned None, so nextitem() can't be called
664 667 nextitem = None
665 668 if nextitem is None:
666 669 # if no next item on parent-level, then no change...
667 670 nextitem = currentitem
668 671
669 672 self.currentselecteditem = nextitem
670 673
671 674 def rightarrowevent(self):
672 675 """
673 676 select (if possible) the first of this item's child-items.
674 677
675 678 """
676 679 currentitem = self.currentselecteditem
677 680 nextitem = currentitem.firstchild()
678 681
679 682 # turn off folding if we want to show a child-item
680 683 if currentitem.folded:
681 684 self.togglefolded(currentitem)
682 685
683 686 if nextitem is None:
684 687 # if no next item on parent-level, then no change...
685 688 nextitem = currentitem
686 689
687 690 self.currentselecteditem = nextitem
688 691
689 692 def leftarrowevent(self):
690 693 """
691 694 if the current item can be folded (i.e. it is an unfolded header or
692 695 hunk), then fold it. otherwise try select (if possible) the parent
693 696 of this item.
694 697
695 698 """
696 699 currentitem = self.currentselecteditem
697 700
698 701 # try to fold the item
699 702 if not isinstance(currentitem, uihunkline):
700 703 if not currentitem.folded:
701 704 self.togglefolded(item=currentitem)
702 705 return
703 706
704 707 # if it can't be folded, try to select the parent item
705 708 nextitem = currentitem.parentitem()
706 709
707 710 if nextitem is None:
708 711 # if no item on parent-level, then no change...
709 712 nextitem = currentitem
710 713 if not nextitem.folded:
711 714 self.togglefolded(item=nextitem)
712 715
713 716 self.currentselecteditem = nextitem
714 717
715 718 def leftarrowshiftevent(self):
716 719 """
717 720 select the header of the current item (or fold current item if the
718 721 current item is already a header).
719 722
720 723 """
721 724 currentitem = self.currentselecteditem
722 725
723 726 if isinstance(currentitem, uiheader):
724 727 if not currentitem.folded:
725 728 self.togglefolded(item=currentitem)
726 729 return
727 730
728 731 # select the parent item recursively until we're at a header
729 732 while True:
730 733 nextitem = currentitem.parentitem()
731 734 if nextitem is None:
732 735 break
733 736 else:
734 737 currentitem = nextitem
735 738
736 739 self.currentselecteditem = currentitem
737 740
738 741 def updatescroll(self):
739 742 "scroll the screen to fully show the currently-selected"
740 743 selstart = self.selecteditemstartline
741 744 selend = self.selecteditemendline
742 745 #selnumlines = selend - selstart
743 746 padstart = self.firstlineofpadtoprint
744 747 padend = padstart + self.yscreensize - self.numstatuslines - 1
745 748 # 'buffered' pad start/end values which scroll with a certain
746 749 # top/bottom context margin
747 750 padstartbuffered = padstart + 3
748 751 padendbuffered = padend - 3
749 752
750 753 if selend > padendbuffered:
751 754 self.scrolllines(selend - padendbuffered)
752 755 elif selstart < padstartbuffered:
753 756 # negative values scroll in pgup direction
754 757 self.scrolllines(selstart - padstartbuffered)
755 758
756 759
757 760 def scrolllines(self, numlines):
758 761 "scroll the screen up (down) by numlines when numlines >0 (<0)."
759 762 self.firstlineofpadtoprint += numlines
760 763 if self.firstlineofpadtoprint < 0:
761 764 self.firstlineofpadtoprint = 0
762 765 if self.firstlineofpadtoprint > self.numpadlines - 1:
763 766 self.firstlineofpadtoprint = self.numpadlines - 1
764 767
765 768 def toggleapply(self, item=None):
766 769 """
767 770 toggle the applied flag of the specified item. if no item is specified,
768 771 toggle the flag of the currently selected item.
769 772
770 773 """
771 774 if item is None:
772 775 item = self.currentselecteditem
773 776
774 777 item.applied = not item.applied
775 778
776 779 if isinstance(item, uiheader):
777 780 item.partial = False
778 781 if item.applied:
779 782 # apply all its hunks
780 783 for hnk in item.hunks:
781 784 hnk.applied = True
782 785 # apply all their hunklines
783 786 for hunkline in hnk.changedlines:
784 787 hunkline.applied = True
785 788 else:
786 789 # un-apply all its hunks
787 790 for hnk in item.hunks:
788 791 hnk.applied = False
789 792 hnk.partial = False
790 793 # un-apply all their hunklines
791 794 for hunkline in hnk.changedlines:
792 795 hunkline.applied = False
793 796 elif isinstance(item, uihunk):
794 797 item.partial = False
795 798 # apply all it's hunklines
796 799 for hunkline in item.changedlines:
797 800 hunkline.applied = item.applied
798 801
799 802 siblingappliedstatus = [hnk.applied for hnk in item.header.hunks]
800 803 allsiblingsapplied = not (False in siblingappliedstatus)
801 804 nosiblingsapplied = not (True in siblingappliedstatus)
802 805
803 806 siblingspartialstatus = [hnk.partial for hnk in item.header.hunks]
804 807 somesiblingspartial = (True in siblingspartialstatus)
805 808
806 809 #cases where applied or partial should be removed from header
807 810
808 811 # if no 'sibling' hunks are applied (including this hunk)
809 812 if nosiblingsapplied:
810 813 if not item.header.special():
811 814 item.header.applied = False
812 815 item.header.partial = False
813 816 else: # some/all parent siblings are applied
814 817 item.header.applied = True
815 818 item.header.partial = (somesiblingspartial or
816 819 not allsiblingsapplied)
817 820
818 821 elif isinstance(item, uihunkline):
819 822 siblingappliedstatus = [ln.applied for ln in item.hunk.changedlines]
820 823 allsiblingsapplied = not (False in siblingappliedstatus)
821 824 nosiblingsapplied = not (True in siblingappliedstatus)
822 825
823 826 # if no 'sibling' lines are applied
824 827 if nosiblingsapplied:
825 828 item.hunk.applied = False
826 829 item.hunk.partial = False
827 830 elif allsiblingsapplied:
828 831 item.hunk.applied = True
829 832 item.hunk.partial = False
830 833 else: # some siblings applied
831 834 item.hunk.applied = True
832 835 item.hunk.partial = True
833 836
834 837 parentsiblingsapplied = [hnk.applied for hnk
835 838 in item.hunk.header.hunks]
836 839 noparentsiblingsapplied = not (True in parentsiblingsapplied)
837 840 allparentsiblingsapplied = not (False in parentsiblingsapplied)
838 841
839 842 parentsiblingspartial = [hnk.partial for hnk
840 843 in item.hunk.header.hunks]
841 844 someparentsiblingspartial = (True in parentsiblingspartial)
842 845
843 846 # if all parent hunks are not applied, un-apply header
844 847 if noparentsiblingsapplied:
845 848 if not item.hunk.header.special():
846 849 item.hunk.header.applied = False
847 850 item.hunk.header.partial = False
848 851 # set the applied and partial status of the header if needed
849 852 else: # some/all parent siblings are applied
850 853 item.hunk.header.applied = True
851 854 item.hunk.header.partial = (someparentsiblingspartial or
852 855 not allparentsiblingsapplied)
853 856
854 857 def toggleall(self):
855 858 "toggle the applied flag of all items."
856 859 if self.waslasttoggleallapplied: # then unapply them this time
857 860 for item in self.headerlist:
858 861 if item.applied:
859 862 self.toggleapply(item)
860 863 else:
861 864 for item in self.headerlist:
862 865 if not item.applied:
863 866 self.toggleapply(item)
864 867 self.waslasttoggleallapplied = not self.waslasttoggleallapplied
865 868
866 869 def togglefolded(self, item=None, foldparent=False):
867 870 "toggle folded flag of specified item (defaults to currently selected)"
868 871 if item is None:
869 872 item = self.currentselecteditem
870 873 if foldparent or (isinstance(item, uiheader) and item.neverunfolded):
871 874 if not isinstance(item, uiheader):
872 875 # we need to select the parent item in this case
873 876 self.currentselecteditem = item = item.parentitem()
874 877 elif item.neverunfolded:
875 878 item.neverunfolded = False
876 879
877 880 # also fold any foldable children of the parent/current item
878 881 if isinstance(item, uiheader): # the original or 'new' item
879 882 for child in item.allchildren():
880 883 child.folded = not item.folded
881 884
882 885 if isinstance(item, (uiheader, uihunk)):
883 886 item.folded = not item.folded
884 887
885 888
886 889 def alignstring(self, instr, window):
887 890 """
888 891 add whitespace to the end of a string in order to make it fill
889 892 the screen in the x direction. the current cursor position is
890 893 taken into account when making this calculation. the string can span
891 894 multiple lines.
892 895
893 896 """
894 897 y, xstart = window.getyx()
895 898 width = self.xscreensize
896 899 # turn tabs into spaces
897 900 instr = instr.expandtabs(4)
898 901 strwidth = encoding.colwidth(instr)
899 902 numspaces = (width - ((strwidth + xstart) % width) - 1)
900 903 return instr + " " * numspaces + "\n"
901 904
902 905 def printstring(self, window, text, fgcolor=None, bgcolor=None, pair=None,
903 906 pairname=None, attrlist=None, towin=True, align=True, showwhtspc=False):
904 907 """
905 908 print the string, text, with the specified colors and attributes, to
906 909 the specified curses window object.
907 910
908 911 the foreground and background colors are of the form
909 912 curses.color_xxxx, where xxxx is one of: [black, blue, cyan, green,
910 913 magenta, red, white, yellow]. if pairname is provided, a color
911 914 pair will be looked up in the self.colorpairnames dictionary.
912 915
913 916 attrlist is a list containing text attributes in the form of
914 917 curses.a_xxxx, where xxxx can be: [bold, dim, normal, standout,
915 918 underline].
916 919
917 920 if align == True, whitespace is added to the printed string such that
918 921 the string stretches to the right border of the window.
919 922
920 923 if showwhtspc == True, trailing whitespace of a string is highlighted.
921 924
922 925 """
923 926 # preprocess the text, converting tabs to spaces
924 927 text = text.expandtabs(4)
925 928 # strip \n, and convert control characters to ^[char] representation
926 929 text = re.sub(r'[\x00-\x08\x0a-\x1f]',
927 930 lambda m:'^' + chr(ord(m.group()) + 64), text.strip('\n'))
928 931
929 932 if pair is not None:
930 933 colorpair = pair
931 934 elif pairname is not None:
932 935 colorpair = self.colorpairnames[pairname]
933 936 else:
934 937 if fgcolor is None:
935 938 fgcolor = -1
936 939 if bgcolor is None:
937 940 bgcolor = -1
938 941 if (fgcolor, bgcolor) in self.colorpairs:
939 942 colorpair = self.colorpairs[(fgcolor, bgcolor)]
940 943 else:
941 944 colorpair = self.getcolorpair(fgcolor, bgcolor)
942 945 # add attributes if possible
943 946 if attrlist is None:
944 947 attrlist = []
945 948 if colorpair < 256:
946 949 # then it is safe to apply all attributes
947 950 for textattr in attrlist:
948 951 colorpair |= textattr
949 952 else:
950 953 # just apply a select few (safe?) attributes
951 954 for textattr in (curses.A_UNDERLINE, curses.A_BOLD):
952 955 if textattr in attrlist:
953 956 colorpair |= textattr
954 957
955 958 y, xstart = self.chunkpad.getyx()
956 959 t = "" # variable for counting lines printed
957 960 # if requested, show trailing whitespace
958 961 if showwhtspc:
959 962 origlen = len(text)
960 963 text = text.rstrip(' \n') # tabs have already been expanded
961 964 strippedlen = len(text)
962 965 numtrailingspaces = origlen - strippedlen
963 966
964 967 if towin:
965 968 window.addstr(text, colorpair)
966 969 t += text
967 970
968 971 if showwhtspc:
969 972 wscolorpair = colorpair | curses.A_REVERSE
970 973 if towin:
971 974 for i in range(numtrailingspaces):
972 975 window.addch(curses.ACS_CKBOARD, wscolorpair)
973 976 t += " " * numtrailingspaces
974 977
975 978 if align:
976 979 if towin:
977 980 extrawhitespace = self.alignstring("", window)
978 981 window.addstr(extrawhitespace, colorpair)
979 982 else:
980 983 # need to use t, since the x position hasn't incremented
981 984 extrawhitespace = self.alignstring(t, window)
982 985 t += extrawhitespace
983 986
984 987 # is reset to 0 at the beginning of printitem()
985 988
986 989 linesprinted = (xstart + len(t)) / self.xscreensize
987 990 self.linesprintedtopadsofar += linesprinted
988 991 return t
989 992
990 993 def updatescreen(self):
991 994 self.statuswin.erase()
992 995 self.chunkpad.erase()
993 996
994 997 printstring = self.printstring
995 998
996 999 # print out the status lines at the top
997 1000 try:
998 1001 if self.errorstr is not None:
999 1002 printstring(self.statuswin, self.errorstr, pairname='legend')
1000 1003 printstring(self.statuswin, 'Press any key to continue',
1001 1004 pairname='legend')
1002 1005 self.statuswin.refresh()
1003 1006 return
1004 1007 printstring(self.statuswin,
1005 1008 "SELECT CHUNKS: (j/k/up/dn/pgup/pgdn) move cursor; "
1006 1009 "(space/A) toggle hunk/all; (e)dit hunk;",
1007 1010 pairname="legend")
1008 1011 printstring(self.statuswin,
1009 1012 " (f)old/unfold; (c)onfirm applied; (q)uit; (?) help "
1010 1013 "| [X]=hunk applied **=folded",
1011 1014 pairname="legend")
1012 1015 except curses.error:
1013 1016 pass
1014 1017
1015 1018 # print out the patch in the remaining part of the window
1016 1019 try:
1017 1020 self.printitem()
1018 1021 self.updatescroll()
1019 1022 self.chunkpad.refresh(self.firstlineofpadtoprint, 0,
1020 1023 self.numstatuslines, 0,
1021 1024 self.yscreensize + 1 - self.numstatuslines,
1022 1025 self.xscreensize)
1023 1026 except curses.error:
1024 1027 pass
1025 1028
1026 1029 # refresh([pminrow, pmincol, sminrow, smincol, smaxrow, smaxcol])
1027 1030 self.statuswin.refresh()
1028 1031
1029 1032 def getstatusprefixstring(self, item):
1030 1033 """
1031 1034 create a string to prefix a line with which indicates whether 'item'
1032 1035 is applied and/or folded.
1033 1036
1034 1037 """
1035 1038 # create checkbox string
1036 1039 if item.applied:
1037 1040 if not isinstance(item, uihunkline) and item.partial:
1038 1041 checkbox = "[~]"
1039 1042 else:
1040 1043 checkbox = "[x]"
1041 1044 else:
1042 1045 checkbox = "[ ]"
1043 1046
1044 1047 try:
1045 1048 if item.folded:
1046 1049 checkbox += "**"
1047 1050 if isinstance(item, uiheader):
1048 1051 # one of "m", "a", or "d" (modified, added, deleted)
1049 1052 filestatus = item.changetype
1050 1053
1051 1054 checkbox += filestatus + " "
1052 1055 else:
1053 1056 checkbox += " "
1054 1057 if isinstance(item, uiheader):
1055 1058 # add two more spaces for headers
1056 1059 checkbox += " "
1057 1060 except AttributeError: # not foldable
1058 1061 checkbox += " "
1059 1062
1060 1063 return checkbox
1061 1064
1062 1065 def printheader(self, header, selected=False, towin=True,
1063 1066 ignorefolding=False):
1064 1067 """
1065 1068 print the header to the pad. if countlines is True, don't print
1066 1069 anything, but just count the number of lines which would be printed.
1067 1070
1068 1071 """
1069 1072 outstr = ""
1070 1073 text = header.prettystr()
1071 1074 chunkindex = self.chunklist.index(header)
1072 1075
1073 1076 if chunkindex != 0 and not header.folded:
1074 1077 # add separating line before headers
1075 1078 outstr += self.printstring(self.chunkpad, '_' * self.xscreensize,
1076 1079 towin=towin, align=False)
1077 1080 # select color-pair based on if the header is selected
1078 1081 colorpair = self.getcolorpair(name=selected and "selected" or "normal",
1079 1082 attrlist=[curses.A_BOLD])
1080 1083
1081 1084 # print out each line of the chunk, expanding it to screen width
1082 1085
1083 1086 # number of characters to indent lines on this level by
1084 1087 indentnumchars = 0
1085 1088 checkbox = self.getstatusprefixstring(header)
1086 1089 if not header.folded or ignorefolding:
1087 1090 textlist = text.split("\n")
1088 1091 linestr = checkbox + textlist[0]
1089 1092 else:
1090 1093 linestr = checkbox + header.filename()
1091 1094 outstr += self.printstring(self.chunkpad, linestr, pair=colorpair,
1092 1095 towin=towin)
1093 1096 if not header.folded or ignorefolding:
1094 1097 if len(textlist) > 1:
1095 1098 for line in textlist[1:]:
1096 1099 linestr = " "*(indentnumchars + len(checkbox)) + line
1097 1100 outstr += self.printstring(self.chunkpad, linestr,
1098 1101 pair=colorpair, towin=towin)
1099 1102
1100 1103 return outstr
1101 1104
1102 1105 def printhunklinesbefore(self, hunk, selected=False, towin=True,
1103 1106 ignorefolding=False):
1104 1107 "includes start/end line indicator"
1105 1108 outstr = ""
1106 1109 # where hunk is in list of siblings
1107 1110 hunkindex = hunk.header.hunks.index(hunk)
1108 1111
1109 1112 if hunkindex != 0:
1110 1113 # add separating line before headers
1111 1114 outstr += self.printstring(self.chunkpad, ' '*self.xscreensize,
1112 1115 towin=towin, align=False)
1113 1116
1114 1117 colorpair = self.getcolorpair(name=selected and "selected" or "normal",
1115 1118 attrlist=[curses.A_BOLD])
1116 1119
1117 1120 # print out from-to line with checkbox
1118 1121 checkbox = self.getstatusprefixstring(hunk)
1119 1122
1120 1123 lineprefix = " "*self.hunkindentnumchars + checkbox
1121 1124 frtoline = " " + hunk.getfromtoline().strip("\n")
1122 1125
1123 1126
1124 1127 outstr += self.printstring(self.chunkpad, lineprefix, towin=towin,
1125 1128 align=False) # add uncolored checkbox/indent
1126 1129 outstr += self.printstring(self.chunkpad, frtoline, pair=colorpair,
1127 1130 towin=towin)
1128 1131
1129 1132 if hunk.folded and not ignorefolding:
1130 1133 # skip remainder of output
1131 1134 return outstr
1132 1135
1133 1136 # print out lines of the chunk preceeding changed-lines
1134 1137 for line in hunk.before:
1135 1138 linestr = " "*(self.hunklineindentnumchars + len(checkbox)) + line
1136 1139 outstr += self.printstring(self.chunkpad, linestr, towin=towin)
1137 1140
1138 1141 return outstr
1139 1142
1140 1143 def printhunklinesafter(self, hunk, towin=True, ignorefolding=False):
1141 1144 outstr = ""
1142 1145 if hunk.folded and not ignorefolding:
1143 1146 return outstr
1144 1147
1145 1148 # a bit superfluous, but to avoid hard-coding indent amount
1146 1149 checkbox = self.getstatusprefixstring(hunk)
1147 1150 for line in hunk.after:
1148 1151 linestr = " "*(self.hunklineindentnumchars + len(checkbox)) + line
1149 1152 outstr += self.printstring(self.chunkpad, linestr, towin=towin)
1150 1153
1151 1154 return outstr
1152 1155
1153 1156 def printhunkchangedline(self, hunkline, selected=False, towin=True):
1154 1157 outstr = ""
1155 1158 checkbox = self.getstatusprefixstring(hunkline)
1156 1159
1157 1160 linestr = hunkline.prettystr().strip("\n")
1158 1161
1159 1162 # select color-pair based on whether line is an addition/removal
1160 1163 if selected:
1161 1164 colorpair = self.getcolorpair(name="selected")
1162 1165 elif linestr.startswith("+"):
1163 1166 colorpair = self.getcolorpair(name="addition")
1164 1167 elif linestr.startswith("-"):
1165 1168 colorpair = self.getcolorpair(name="deletion")
1166 1169 elif linestr.startswith("\\"):
1167 1170 colorpair = self.getcolorpair(name="normal")
1168 1171
1169 1172 lineprefix = " "*self.hunklineindentnumchars + checkbox
1170 1173 outstr += self.printstring(self.chunkpad, lineprefix, towin=towin,
1171 1174 align=False) # add uncolored checkbox/indent
1172 1175 outstr += self.printstring(self.chunkpad, linestr, pair=colorpair,
1173 1176 towin=towin, showwhtspc=True)
1174 1177 return outstr
1175 1178
1176 1179 def printitem(self, item=None, ignorefolding=False, recursechildren=True,
1177 1180 towin=True):
1178 1181 """
1179 1182 use __printitem() to print the the specified item.applied.
1180 1183 if item is not specified, then print the entire patch.
1181 1184 (hiding folded elements, etc. -- see __printitem() docstring)
1182 1185 """
1183 1186 if item is None:
1184 1187 item = self.headerlist
1185 1188 if recursechildren:
1186 1189 self.linesprintedtopadsofar = 0
1187 1190
1188 1191 outstr = []
1189 1192 self.__printitem(item, ignorefolding, recursechildren, outstr,
1190 1193 towin=towin)
1191 1194 return ''.join(outstr)
1192 1195
1193 1196 def outofdisplayedarea(self):
1194 1197 y, _ = self.chunkpad.getyx() # cursor location
1195 1198 # * 2 here works but an optimization would be the max number of
1196 1199 # consecutive non selectable lines
1197 1200 # i.e the max number of context line for any hunk in the patch
1198 1201 miny = min(0, self.firstlineofpadtoprint - self.yscreensize)
1199 1202 maxy = self.firstlineofpadtoprint + self.yscreensize * 2
1200 1203 return y < miny or y > maxy
1201 1204
1202 1205 def handleselection(self, item, recursechildren):
1203 1206 selected = (item is self.currentselecteditem)
1204 1207 if selected and recursechildren:
1205 1208 # assumes line numbering starting from line 0
1206 1209 self.selecteditemstartline = self.linesprintedtopadsofar
1207 1210 selecteditemlines = self.getnumlinesdisplayed(item,
1208 1211 recursechildren=False)
1209 1212 self.selecteditemendline = (self.selecteditemstartline +
1210 1213 selecteditemlines - 1)
1211 1214 return selected
1212 1215
1213 1216 def __printitem(self, item, ignorefolding, recursechildren, outstr,
1214 1217 towin=True):
1215 1218 """
1216 1219 recursive method for printing out patch/header/hunk/hunk-line data to
1217 1220 screen. also returns a string with all of the content of the displayed
1218 1221 patch (not including coloring, etc.).
1219 1222
1220 1223 if ignorefolding is True, then folded items are printed out.
1221 1224
1222 1225 if recursechildren is False, then only print the item without its
1223 1226 child items.
1224 1227
1225 1228 """
1226 1229 if towin and self.outofdisplayedarea():
1227 1230 return
1228 1231
1229 1232 selected = self.handleselection(item, recursechildren)
1230 1233
1231 1234 # patch object is a list of headers
1232 1235 if isinstance(item, patch):
1233 1236 if recursechildren:
1234 1237 for hdr in item:
1235 1238 self.__printitem(hdr, ignorefolding,
1236 1239 recursechildren, outstr, towin)
1237 1240 # todo: eliminate all isinstance() calls
1238 1241 if isinstance(item, uiheader):
1239 1242 outstr.append(self.printheader(item, selected, towin=towin,
1240 1243 ignorefolding=ignorefolding))
1241 1244 if recursechildren:
1242 1245 for hnk in item.hunks:
1243 1246 self.__printitem(hnk, ignorefolding,
1244 1247 recursechildren, outstr, towin)
1245 1248 elif (isinstance(item, uihunk) and
1246 1249 ((not item.header.folded) or ignorefolding)):
1247 1250 # print the hunk data which comes before the changed-lines
1248 1251 outstr.append(self.printhunklinesbefore(item, selected, towin=towin,
1249 1252 ignorefolding=ignorefolding))
1250 1253 if recursechildren:
1251 1254 for l in item.changedlines:
1252 1255 self.__printitem(l, ignorefolding,
1253 1256 recursechildren, outstr, towin)
1254 1257 outstr.append(self.printhunklinesafter(item, towin=towin,
1255 1258 ignorefolding=ignorefolding))
1256 1259 elif (isinstance(item, uihunkline) and
1257 1260 ((not item.hunk.folded) or ignorefolding)):
1258 1261 outstr.append(self.printhunkchangedline(item, selected,
1259 1262 towin=towin))
1260 1263
1261 1264 return outstr
1262 1265
1263 1266 def getnumlinesdisplayed(self, item=None, ignorefolding=False,
1264 1267 recursechildren=True):
1265 1268 """
1266 1269 return the number of lines which would be displayed if the item were
1267 1270 to be printed to the display. the item will not be printed to the
1268 1271 display (pad).
1269 1272 if no item is given, assume the entire patch.
1270 1273 if ignorefolding is True, folded items will be unfolded when counting
1271 1274 the number of lines.
1272 1275
1273 1276 """
1274 1277 # temporarily disable printing to windows by printstring
1275 1278 patchdisplaystring = self.printitem(item, ignorefolding,
1276 1279 recursechildren, towin=False)
1277 1280 numlines = len(patchdisplaystring) / self.xscreensize
1278 1281 return numlines
1279 1282
1280 1283 def sigwinchhandler(self, n, frame):
1281 1284 "handle window resizing"
1282 1285 try:
1283 1286 curses.endwin()
1284 1287 self.yscreensize, self.xscreensize = gethw()
1285 1288 self.statuswin.resize(self.numstatuslines, self.xscreensize)
1286 1289 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1287 1290 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1288 1291 # todo: try to resize commit message window if possible
1289 1292 except curses.error:
1290 1293 pass
1291 1294
1292 1295 def getcolorpair(self, fgcolor=None, bgcolor=None, name=None,
1293 1296 attrlist=None):
1294 1297 """
1295 1298 get a curses color pair, adding it to self.colorpairs if it is not
1296 1299 already defined. an optional string, name, can be passed as a shortcut
1297 1300 for referring to the color-pair. by default, if no arguments are
1298 1301 specified, the white foreground / black background color-pair is
1299 1302 returned.
1300 1303
1301 1304 it is expected that this function will be used exclusively for
1302 1305 initializing color pairs, and not curses.init_pair().
1303 1306
1304 1307 attrlist is used to 'flavor' the returned color-pair. this information
1305 1308 is not stored in self.colorpairs. it contains attribute values like
1306 1309 curses.A_BOLD.
1307 1310
1308 1311 """
1309 1312 if (name is not None) and name in self.colorpairnames:
1310 1313 # then get the associated color pair and return it
1311 1314 colorpair = self.colorpairnames[name]
1312 1315 else:
1313 1316 if fgcolor is None:
1314 1317 fgcolor = -1
1315 1318 if bgcolor is None:
1316 1319 bgcolor = -1
1317 1320 if (fgcolor, bgcolor) in self.colorpairs:
1318 1321 colorpair = self.colorpairs[(fgcolor, bgcolor)]
1319 1322 else:
1320 1323 pairindex = len(self.colorpairs) + 1
1321 1324 curses.init_pair(pairindex, fgcolor, bgcolor)
1322 1325 colorpair = self.colorpairs[(fgcolor, bgcolor)] = (
1323 1326 curses.color_pair(pairindex))
1324 1327 if name is not None:
1325 1328 self.colorpairnames[name] = curses.color_pair(pairindex)
1326 1329
1327 1330 # add attributes if possible
1328 1331 if attrlist is None:
1329 1332 attrlist = []
1330 1333 if colorpair < 256:
1331 1334 # then it is safe to apply all attributes
1332 1335 for textattr in attrlist:
1333 1336 colorpair |= textattr
1334 1337 else:
1335 1338 # just apply a select few (safe?) attributes
1336 1339 for textattrib in (curses.A_UNDERLINE, curses.A_BOLD):
1337 1340 if textattrib in attrlist:
1338 1341 colorpair |= textattrib
1339 1342 return colorpair
1340 1343
1341 1344 def initcolorpair(self, *args, **kwargs):
1342 1345 "same as getcolorpair."
1343 1346 self.getcolorpair(*args, **kwargs)
1344 1347
1345 1348 def helpwindow(self):
1346 1349 "print a help window to the screen. exit after any keypress."
1347 1350 helptext = """ [press any key to return to the patch-display]
1348 1351
1349 1352 crecord allows you to interactively choose among the changes you have made,
1350 1353 and confirm only those changes you select for further processing by the command
1351 1354 you are running (commit/shelve/revert), after confirming the selected
1352 1355 changes, the unselected changes are still present in your working copy, so you
1353 1356 can use crecord multiple times to split large changes into smaller changesets.
1354 1357 the following are valid keystrokes:
1355 1358
1356 1359 [space] : (un-)select item ([~]/[x] = partly/fully applied)
1357 1360 a : (un-)select all items
1358 1361 up/down-arrow [k/j] : go to previous/next unfolded item
1359 1362 pgup/pgdn [K/J] : go to previous/next item of same type
1360 1363 right/left-arrow [l/h] : go to child item / parent item
1361 1364 shift-left-arrow [H] : go to parent header / fold selected header
1362 1365 f : fold / unfold item, hiding/revealing its children
1363 1366 F : fold / unfold parent item and all of its ancestors
1364 1367 m : edit / resume editing the commit message
1365 1368 e : edit the currently selected hunk
1366 1369 a : toggle amend mode (hg rev >= 2.2)
1367 1370 c : confirm selected changes
1368 1371 r : review/edit and confirm selected changes
1369 1372 q : quit without confirming (no changes will be made)
1370 1373 ? : help (what you're currently reading)"""
1371 1374
1372 1375 helpwin = curses.newwin(self.yscreensize, 0, 0, 0)
1373 1376 helplines = helptext.split("\n")
1374 1377 helplines = helplines + [" "]*(
1375 1378 self.yscreensize - self.numstatuslines - len(helplines) - 1)
1376 1379 try:
1377 1380 for line in helplines:
1378 1381 self.printstring(helpwin, line, pairname="legend")
1379 1382 except curses.error:
1380 1383 pass
1381 1384 helpwin.refresh()
1382 1385 try:
1383 1386 helpwin.getkey()
1384 1387 except curses.error:
1385 1388 pass
1386 1389
1387 1390 def confirmationwindow(self, windowtext):
1388 1391 "display an informational window, then wait for and return a keypress."
1389 1392
1390 1393 confirmwin = curses.newwin(self.yscreensize, 0, 0, 0)
1391 1394 try:
1392 1395 lines = windowtext.split("\n")
1393 1396 for line in lines:
1394 1397 self.printstring(confirmwin, line, pairname="selected")
1395 1398 except curses.error:
1396 1399 pass
1397 1400 self.stdscr.refresh()
1398 1401 confirmwin.refresh()
1399 1402 try:
1400 1403 response = chr(self.stdscr.getch())
1401 1404 except ValueError:
1402 1405 response = None
1403 1406
1404 1407 return response
1405 1408
1406 1409 def confirmcommit(self, review=False):
1407 1410 """ask for 'y' to be pressed to confirm selected. return True if
1408 1411 confirmed."""
1409 1412 if review:
1410 1413 confirmtext = (
1411 1414 """if you answer yes to the following, the your currently chosen patch chunks
1412 1415 will be loaded into an editor. you may modify the patch from the editor, and
1413 1416 save the changes if you wish to change the patch. otherwise, you can just
1414 1417 close the editor without saving to accept the current patch as-is.
1415 1418
1416 1419 note: don't add/remove lines unless you also modify the range information.
1417 1420 failing to follow this rule will result in the commit aborting.
1418 1421
1419 1422 are you sure you want to review/edit and confirm the selected changes [yn]?
1420 1423 """)
1421 1424 else:
1422 1425 confirmtext = (
1423 1426 "are you sure you want to confirm the selected changes [yn]? ")
1424 1427
1425 1428 response = self.confirmationwindow(confirmtext)
1426 1429 if response is None:
1427 1430 response = "n"
1428 1431 if response.lower().startswith("y"):
1429 1432 return True
1430 1433 else:
1431 1434 return False
1432 1435
1433 1436 def recenterdisplayedarea(self):
1434 1437 """
1435 1438 once we scrolled with pg up pg down we can be pointing outside of the
1436 1439 display zone. we print the patch with towin=False to compute the
1437 1440 location of the selected item even though it is outside of the displayed
1438 1441 zone and then update the scroll.
1439 1442 """
1440 1443 self.printitem(towin=False)
1441 1444 self.updatescroll()
1442 1445
1443 1446 def toggleedit(self, item=None, test=False):
1444 1447 """
1445 1448 edit the currently selected chunk
1446 1449 """
1447 1450 def updateui(self):
1448 1451 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1449 1452 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1450 1453 self.updatescroll()
1451 1454 self.stdscr.refresh()
1452 1455 self.statuswin.refresh()
1453 1456 self.stdscr.keypad(1)
1454 1457
1455 1458 def editpatchwitheditor(self, chunk):
1456 1459 if chunk is None:
1457 1460 self.ui.write(_('cannot edit patch for whole file'))
1458 1461 self.ui.write("\n")
1459 1462 return None
1460 1463 if chunk.header.binary():
1461 1464 self.ui.write(_('cannot edit patch for binary file'))
1462 1465 self.ui.write("\n")
1463 1466 return None
1464 1467 # patch comment based on the git one (based on comment at end of
1465 1468 # https://mercurial-scm.org/wiki/recordextension)
1466 1469 phelp = '---' + _("""
1467 1470 to remove '-' lines, make them ' ' lines (context).
1468 1471 to remove '+' lines, delete them.
1469 1472 lines starting with # will be removed from the patch.
1470 1473
1471 1474 if the patch applies cleanly, the edited hunk will immediately be
1472 1475 added to the record list. if it does not apply cleanly, a rejects
1473 1476 file will be generated: you can use that when you try again. if
1474 1477 all lines of the hunk are removed, then the edit is aborted and
1475 1478 the hunk is left unchanged.
1476 1479 """)
1477 1480 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1478 1481 suffix=".diff", text=True)
1479 1482 ncpatchfp = None
1480 1483 try:
1481 1484 # write the initial patch
1482 1485 f = os.fdopen(patchfd, "w")
1483 1486 chunk.header.write(f)
1484 1487 chunk.write(f)
1485 1488 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1486 1489 f.close()
1487 1490 # start the editor and wait for it to complete
1488 1491 editor = self.ui.geteditor()
1489 1492 ret = self.ui.system("%s \"%s\"" % (editor, patchfn),
1490 1493 environ={'hguser': self.ui.username()})
1491 1494 if ret != 0:
1492 1495 self.errorstr = "Editor exited with status %d" % ret
1493 1496 return None
1494 1497 # remove comment lines
1495 1498 patchfp = open(patchfn)
1496 1499 ncpatchfp = cStringIO.StringIO()
1497 1500 for line in patchfp:
1498 1501 if not line.startswith('#'):
1499 1502 ncpatchfp.write(line)
1500 1503 patchfp.close()
1501 1504 ncpatchfp.seek(0)
1502 1505 newpatches = patchmod.parsepatch(ncpatchfp)
1503 1506 finally:
1504 1507 os.unlink(patchfn)
1505 1508 del ncpatchfp
1506 1509 return newpatches
1507 1510 if item is None:
1508 1511 item = self.currentselecteditem
1509 1512 if isinstance(item, uiheader):
1510 1513 return
1511 1514 if isinstance(item, uihunkline):
1512 1515 item = item.parentitem()
1513 1516 if not isinstance(item, uihunk):
1514 1517 return
1515 1518
1516 1519 beforeadded, beforeremoved = item.added, item.removed
1517 1520 newpatches = editpatchwitheditor(self, item)
1518 1521 if newpatches is None:
1519 1522 if not test:
1520 1523 updateui(self)
1521 1524 return
1522 1525 header = item.header
1523 1526 editedhunkindex = header.hunks.index(item)
1524 1527 hunksbefore = header.hunks[:editedhunkindex]
1525 1528 hunksafter = header.hunks[editedhunkindex + 1:]
1526 1529 newpatchheader = newpatches[0]
1527 1530 newhunks = [uihunk(h, header) for h in newpatchheader.hunks]
1528 1531 newadded = sum([h.added for h in newhunks])
1529 1532 newremoved = sum([h.removed for h in newhunks])
1530 1533 offset = (newadded - beforeadded) - (newremoved - beforeremoved)
1531 1534
1532 1535 for h in hunksafter:
1533 1536 h.toline += offset
1534 1537 for h in newhunks:
1535 1538 h.folded = False
1536 1539 header.hunks = hunksbefore + newhunks + hunksafter
1537 1540 if self.emptypatch():
1538 1541 header.hunks = hunksbefore + [item] + hunksafter
1539 1542 self.currentselecteditem = header
1540 1543
1541 1544 if not test:
1542 1545 updateui(self)
1543 1546
1544 1547 def emptypatch(self):
1545 1548 item = self.headerlist
1546 1549 if not item:
1547 1550 return True
1548 1551 for header in item:
1549 1552 if header.hunks:
1550 1553 return False
1551 1554 return True
1552 1555
1553 1556 def handlekeypressed(self, keypressed, test=False):
1554 1557 if keypressed in ["k", "KEY_UP"]:
1555 1558 self.uparrowevent()
1556 1559 if keypressed in ["K", "KEY_PPAGE"]:
1557 1560 self.uparrowshiftevent()
1558 1561 elif keypressed in ["j", "KEY_DOWN"]:
1559 1562 self.downarrowevent()
1560 1563 elif keypressed in ["J", "KEY_NPAGE"]:
1561 1564 self.downarrowshiftevent()
1562 1565 elif keypressed in ["l", "KEY_RIGHT"]:
1563 1566 self.rightarrowevent()
1564 1567 elif keypressed in ["h", "KEY_LEFT"]:
1565 1568 self.leftarrowevent()
1566 1569 elif keypressed in ["H", "KEY_SLEFT"]:
1567 1570 self.leftarrowshiftevent()
1568 1571 elif keypressed in ["q"]:
1569 1572 raise error.Abort(_('user quit'))
1570 1573 elif keypressed in ["c"]:
1571 1574 if self.confirmcommit():
1572 1575 return True
1573 1576 elif keypressed in ["r"]:
1574 1577 if self.confirmcommit(review=True):
1575 1578 return True
1576 1579 elif test and keypressed in ['X']:
1577 1580 return True
1578 1581 elif keypressed in [' '] or (test and keypressed in ["TOGGLE"]):
1579 1582 self.toggleapply()
1580 1583 elif keypressed in ['A']:
1581 1584 self.toggleall()
1582 1585 elif keypressed in ['e']:
1583 1586 self.toggleedit(test=test)
1584 1587 elif keypressed in ["f"]:
1585 1588 self.togglefolded()
1586 1589 elif keypressed in ["F"]:
1587 1590 self.togglefolded(foldparent=True)
1588 1591 elif keypressed in ["?"]:
1589 1592 self.helpwindow()
1590 1593 self.stdscr.clear()
1591 1594 self.stdscr.refresh()
1592 1595
1593 1596 def main(self, stdscr):
1594 1597 """
1595 1598 method to be wrapped by curses.wrapper() for selecting chunks.
1596 1599
1597 1600 """
1598 1601 signal.signal(signal.SIGWINCH, self.sigwinchhandler)
1599 1602 self.stdscr = stdscr
1600 1603 # error during initialization, cannot be printed in the curses
1601 1604 # interface, it should be printed by the calling code
1602 1605 self.initerr = None
1603 1606 self.yscreensize, self.xscreensize = self.stdscr.getmaxyx()
1604 1607
1605 1608 curses.start_color()
1606 1609 curses.use_default_colors()
1607 1610
1608 1611 # available colors: black, blue, cyan, green, magenta, white, yellow
1609 1612 # init_pair(color_id, foreground_color, background_color)
1610 1613 self.initcolorpair(None, None, name="normal")
1611 1614 self.initcolorpair(curses.COLOR_WHITE, curses.COLOR_MAGENTA,
1612 1615 name="selected")
1613 1616 self.initcolorpair(curses.COLOR_RED, None, name="deletion")
1614 1617 self.initcolorpair(curses.COLOR_GREEN, None, name="addition")
1615 1618 self.initcolorpair(curses.COLOR_WHITE, curses.COLOR_BLUE, name="legend")
1616 1619 # newwin([height, width,] begin_y, begin_x)
1617 1620 self.statuswin = curses.newwin(self.numstatuslines, 0, 0, 0)
1618 1621 self.statuswin.keypad(1) # interpret arrow-key, etc. esc sequences
1619 1622
1620 1623 # figure out how much space to allocate for the chunk-pad which is
1621 1624 # used for displaying the patch
1622 1625
1623 1626 # stupid hack to prevent getnumlinesdisplayed from failing
1624 1627 self.chunkpad = curses.newpad(1, self.xscreensize)
1625 1628
1626 1629 # add 1 so to account for last line text reaching end of line
1627 1630 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1628 1631
1629 1632 try:
1630 1633 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1631 1634 except curses.error:
1632 1635 self.initerr = _('this diff is too large to be displayed')
1633 1636 return
1634 1637 # initialize selecteitemendline (initial start-line is 0)
1635 1638 self.selecteditemendline = self.getnumlinesdisplayed(
1636 1639 self.currentselecteditem, recursechildren=False)
1637 1640
1638 1641 while True:
1639 1642 self.updatescreen()
1640 1643 try:
1641 1644 keypressed = self.statuswin.getkey()
1642 1645 if self.errorstr is not None:
1643 1646 self.errorstr = None
1644 1647 continue
1645 1648 except curses.error:
1646 1649 keypressed = "foobar"
1647 1650 if self.handlekeypressed(keypressed):
1648 1651 break
@@ -1,2557 +1,2557
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import collections
10 10 import cStringIO, email, os, errno, re, posixpath, copy
11 11 import tempfile, zlib, shutil
12 12
13 13 from i18n import _
14 14 from node import hex, short
15 15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
16 16 import pathutil
17 17
18 18 gitre = re.compile('diff --git a/(.*) b/(.*)')
19 19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
20 20
21 21 class PatchError(Exception):
22 22 pass
23 23
24 24
25 25 # public functions
26 26
27 27 def split(stream):
28 28 '''return an iterator of individual patches from a stream'''
29 29 def isheader(line, inheader):
30 30 if inheader and line[0] in (' ', '\t'):
31 31 # continuation
32 32 return True
33 33 if line[0] in (' ', '-', '+'):
34 34 # diff line - don't check for header pattern in there
35 35 return False
36 36 l = line.split(': ', 1)
37 37 return len(l) == 2 and ' ' not in l[0]
38 38
39 39 def chunk(lines):
40 40 return cStringIO.StringIO(''.join(lines))
41 41
42 42 def hgsplit(stream, cur):
43 43 inheader = True
44 44
45 45 for line in stream:
46 46 if not line.strip():
47 47 inheader = False
48 48 if not inheader and line.startswith('# HG changeset patch'):
49 49 yield chunk(cur)
50 50 cur = []
51 51 inheader = True
52 52
53 53 cur.append(line)
54 54
55 55 if cur:
56 56 yield chunk(cur)
57 57
58 58 def mboxsplit(stream, cur):
59 59 for line in stream:
60 60 if line.startswith('From '):
61 61 for c in split(chunk(cur[1:])):
62 62 yield c
63 63 cur = []
64 64
65 65 cur.append(line)
66 66
67 67 if cur:
68 68 for c in split(chunk(cur[1:])):
69 69 yield c
70 70
71 71 def mimesplit(stream, cur):
72 72 def msgfp(m):
73 73 fp = cStringIO.StringIO()
74 74 g = email.Generator.Generator(fp, mangle_from_=False)
75 75 g.flatten(m)
76 76 fp.seek(0)
77 77 return fp
78 78
79 79 for line in stream:
80 80 cur.append(line)
81 81 c = chunk(cur)
82 82
83 83 m = email.Parser.Parser().parse(c)
84 84 if not m.is_multipart():
85 85 yield msgfp(m)
86 86 else:
87 87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
88 88 for part in m.walk():
89 89 ct = part.get_content_type()
90 90 if ct not in ok_types:
91 91 continue
92 92 yield msgfp(part)
93 93
94 94 def headersplit(stream, cur):
95 95 inheader = False
96 96
97 97 for line in stream:
98 98 if not inheader and isheader(line, inheader):
99 99 yield chunk(cur)
100 100 cur = []
101 101 inheader = True
102 102 if inheader and not isheader(line, inheader):
103 103 inheader = False
104 104
105 105 cur.append(line)
106 106
107 107 if cur:
108 108 yield chunk(cur)
109 109
110 110 def remainder(cur):
111 111 yield chunk(cur)
112 112
113 113 class fiter(object):
114 114 def __init__(self, fp):
115 115 self.fp = fp
116 116
117 117 def __iter__(self):
118 118 return self
119 119
120 120 def next(self):
121 121 l = self.fp.readline()
122 122 if not l:
123 123 raise StopIteration
124 124 return l
125 125
126 126 inheader = False
127 127 cur = []
128 128
129 129 mimeheaders = ['content-type']
130 130
131 131 if not util.safehasattr(stream, 'next'):
132 132 # http responses, for example, have readline but not next
133 133 stream = fiter(stream)
134 134
135 135 for line in stream:
136 136 cur.append(line)
137 137 if line.startswith('# HG changeset patch'):
138 138 return hgsplit(stream, cur)
139 139 elif line.startswith('From '):
140 140 return mboxsplit(stream, cur)
141 141 elif isheader(line, inheader):
142 142 inheader = True
143 143 if line.split(':', 1)[0].lower() in mimeheaders:
144 144 # let email parser handle this
145 145 return mimesplit(stream, cur)
146 146 elif line.startswith('--- ') and inheader:
147 147 # No evil headers seen by diff start, split by hand
148 148 return headersplit(stream, cur)
149 149 # Not enough info, keep reading
150 150
151 151 # if we are here, we have a very plain patch
152 152 return remainder(cur)
153 153
154 154 ## Some facility for extensible patch parsing:
155 155 # list of pairs ("header to match", "data key")
156 156 patchheadermap = [('Date', 'date'),
157 157 ('Branch', 'branch'),
158 158 ('Node ID', 'nodeid'),
159 159 ]
160 160
161 161 def extract(ui, fileobj):
162 162 '''extract patch from data read from fileobj.
163 163
164 164 patch can be a normal patch or contained in an email message.
165 165
166 166 return a dictionary. Standard keys are:
167 167 - filename,
168 168 - message,
169 169 - user,
170 170 - date,
171 171 - branch,
172 172 - node,
173 173 - p1,
174 174 - p2.
175 175 Any item can be missing from the dictionary. If filename is missing,
176 176 fileobj did not contain a patch. Caller must unlink filename when done.'''
177 177
178 178 # attempt to detect the start of a patch
179 179 # (this heuristic is borrowed from quilt)
180 180 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
181 181 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
182 182 r'---[ \t].*?^\+\+\+[ \t]|'
183 183 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
184 184
185 185 data = {}
186 186 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
187 187 tmpfp = os.fdopen(fd, 'w')
188 188 try:
189 189 msg = email.Parser.Parser().parse(fileobj)
190 190
191 191 subject = msg['Subject']
192 192 data['user'] = msg['From']
193 193 if not subject and not data['user']:
194 194 # Not an email, restore parsed headers if any
195 195 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
196 196
197 197 # should try to parse msg['Date']
198 198 parents = []
199 199
200 200 if subject:
201 201 if subject.startswith('[PATCH'):
202 202 pend = subject.find(']')
203 203 if pend >= 0:
204 204 subject = subject[pend + 1:].lstrip()
205 205 subject = re.sub(r'\n[ \t]+', ' ', subject)
206 206 ui.debug('Subject: %s\n' % subject)
207 207 if data['user']:
208 208 ui.debug('From: %s\n' % data['user'])
209 209 diffs_seen = 0
210 210 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
211 211 message = ''
212 212 for part in msg.walk():
213 213 content_type = part.get_content_type()
214 214 ui.debug('Content-Type: %s\n' % content_type)
215 215 if content_type not in ok_types:
216 216 continue
217 217 payload = part.get_payload(decode=True)
218 218 m = diffre.search(payload)
219 219 if m:
220 220 hgpatch = False
221 221 hgpatchheader = False
222 222 ignoretext = False
223 223
224 224 ui.debug('found patch at byte %d\n' % m.start(0))
225 225 diffs_seen += 1
226 226 cfp = cStringIO.StringIO()
227 227 for line in payload[:m.start(0)].splitlines():
228 228 if line.startswith('# HG changeset patch') and not hgpatch:
229 229 ui.debug('patch generated by hg export\n')
230 230 hgpatch = True
231 231 hgpatchheader = True
232 232 # drop earlier commit message content
233 233 cfp.seek(0)
234 234 cfp.truncate()
235 235 subject = None
236 236 elif hgpatchheader:
237 237 if line.startswith('# User '):
238 238 data['user'] = line[7:]
239 239 ui.debug('From: %s\n' % data['user'])
240 240 elif line.startswith("# Parent "):
241 241 parents.append(line[9:].lstrip())
242 242 elif line.startswith("# "):
243 243 for header, key in patchheadermap:
244 244 prefix = '# %s ' % header
245 245 if line.startswith(prefix):
246 246 data[key] = line[len(prefix):]
247 247 else:
248 248 hgpatchheader = False
249 249 elif line == '---':
250 250 ignoretext = True
251 251 if not hgpatchheader and not ignoretext:
252 252 cfp.write(line)
253 253 cfp.write('\n')
254 254 message = cfp.getvalue()
255 255 if tmpfp:
256 256 tmpfp.write(payload)
257 257 if not payload.endswith('\n'):
258 258 tmpfp.write('\n')
259 259 elif not diffs_seen and message and content_type == 'text/plain':
260 260 message += '\n' + payload
261 261 except: # re-raises
262 262 tmpfp.close()
263 263 os.unlink(tmpname)
264 264 raise
265 265
266 266 if subject and not message.startswith(subject):
267 267 message = '%s\n%s' % (subject, message)
268 268 data['message'] = message
269 269 tmpfp.close()
270 270 if parents:
271 271 data['p1'] = parents.pop(0)
272 272 if parents:
273 273 data['p2'] = parents.pop(0)
274 274
275 275 if diffs_seen:
276 276 data['filename'] = tmpname
277 277 else:
278 278 os.unlink(tmpname)
279 279 return data
280 280
281 281 class patchmeta(object):
282 282 """Patched file metadata
283 283
284 284 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
285 285 or COPY. 'path' is patched file path. 'oldpath' is set to the
286 286 origin file when 'op' is either COPY or RENAME, None otherwise. If
287 287 file mode is changed, 'mode' is a tuple (islink, isexec) where
288 288 'islink' is True if the file is a symlink and 'isexec' is True if
289 289 the file is executable. Otherwise, 'mode' is None.
290 290 """
291 291 def __init__(self, path):
292 292 self.path = path
293 293 self.oldpath = None
294 294 self.mode = None
295 295 self.op = 'MODIFY'
296 296 self.binary = False
297 297
298 298 def setmode(self, mode):
299 299 islink = mode & 0o20000
300 300 isexec = mode & 0o100
301 301 self.mode = (islink, isexec)
302 302
303 303 def copy(self):
304 304 other = patchmeta(self.path)
305 305 other.oldpath = self.oldpath
306 306 other.mode = self.mode
307 307 other.op = self.op
308 308 other.binary = self.binary
309 309 return other
310 310
311 311 def _ispatchinga(self, afile):
312 312 if afile == '/dev/null':
313 313 return self.op == 'ADD'
314 314 return afile == 'a/' + (self.oldpath or self.path)
315 315
316 316 def _ispatchingb(self, bfile):
317 317 if bfile == '/dev/null':
318 318 return self.op == 'DELETE'
319 319 return bfile == 'b/' + self.path
320 320
321 321 def ispatching(self, afile, bfile):
322 322 return self._ispatchinga(afile) and self._ispatchingb(bfile)
323 323
324 324 def __repr__(self):
325 325 return "<patchmeta %s %r>" % (self.op, self.path)
326 326
327 327 def readgitpatch(lr):
328 328 """extract git-style metadata about patches from <patchname>"""
329 329
330 330 # Filter patch for git information
331 331 gp = None
332 332 gitpatches = []
333 333 for line in lr:
334 334 line = line.rstrip(' \r\n')
335 335 if line.startswith('diff --git a/'):
336 336 m = gitre.match(line)
337 337 if m:
338 338 if gp:
339 339 gitpatches.append(gp)
340 340 dst = m.group(2)
341 341 gp = patchmeta(dst)
342 342 elif gp:
343 343 if line.startswith('--- '):
344 344 gitpatches.append(gp)
345 345 gp = None
346 346 continue
347 347 if line.startswith('rename from '):
348 348 gp.op = 'RENAME'
349 349 gp.oldpath = line[12:]
350 350 elif line.startswith('rename to '):
351 351 gp.path = line[10:]
352 352 elif line.startswith('copy from '):
353 353 gp.op = 'COPY'
354 354 gp.oldpath = line[10:]
355 355 elif line.startswith('copy to '):
356 356 gp.path = line[8:]
357 357 elif line.startswith('deleted file'):
358 358 gp.op = 'DELETE'
359 359 elif line.startswith('new file mode '):
360 360 gp.op = 'ADD'
361 361 gp.setmode(int(line[-6:], 8))
362 362 elif line.startswith('new mode '):
363 363 gp.setmode(int(line[-6:], 8))
364 364 elif line.startswith('GIT binary patch'):
365 365 gp.binary = True
366 366 if gp:
367 367 gitpatches.append(gp)
368 368
369 369 return gitpatches
370 370
371 371 class linereader(object):
372 372 # simple class to allow pushing lines back into the input stream
373 373 def __init__(self, fp):
374 374 self.fp = fp
375 375 self.buf = []
376 376
377 377 def push(self, line):
378 378 if line is not None:
379 379 self.buf.append(line)
380 380
381 381 def readline(self):
382 382 if self.buf:
383 383 l = self.buf[0]
384 384 del self.buf[0]
385 385 return l
386 386 return self.fp.readline()
387 387
388 388 def __iter__(self):
389 389 while True:
390 390 l = self.readline()
391 391 if not l:
392 392 break
393 393 yield l
394 394
395 395 class abstractbackend(object):
396 396 def __init__(self, ui):
397 397 self.ui = ui
398 398
399 399 def getfile(self, fname):
400 400 """Return target file data and flags as a (data, (islink,
401 401 isexec)) tuple. Data is None if file is missing/deleted.
402 402 """
403 403 raise NotImplementedError
404 404
405 405 def setfile(self, fname, data, mode, copysource):
406 406 """Write data to target file fname and set its mode. mode is a
407 407 (islink, isexec) tuple. If data is None, the file content should
408 408 be left unchanged. If the file is modified after being copied,
409 409 copysource is set to the original file name.
410 410 """
411 411 raise NotImplementedError
412 412
413 413 def unlink(self, fname):
414 414 """Unlink target file."""
415 415 raise NotImplementedError
416 416
417 417 def writerej(self, fname, failed, total, lines):
418 418 """Write rejected lines for fname. total is the number of hunks
419 419 which failed to apply and total the total number of hunks for this
420 420 files.
421 421 """
422 422 pass
423 423
424 424 def exists(self, fname):
425 425 raise NotImplementedError
426 426
427 427 class fsbackend(abstractbackend):
428 428 def __init__(self, ui, basedir):
429 429 super(fsbackend, self).__init__(ui)
430 430 self.opener = scmutil.opener(basedir)
431 431
432 432 def _join(self, f):
433 433 return os.path.join(self.opener.base, f)
434 434
435 435 def getfile(self, fname):
436 436 if self.opener.islink(fname):
437 437 return (self.opener.readlink(fname), (True, False))
438 438
439 439 isexec = False
440 440 try:
441 441 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
442 442 except OSError as e:
443 443 if e.errno != errno.ENOENT:
444 444 raise
445 445 try:
446 446 return (self.opener.read(fname), (False, isexec))
447 447 except IOError as e:
448 448 if e.errno != errno.ENOENT:
449 449 raise
450 450 return None, None
451 451
452 452 def setfile(self, fname, data, mode, copysource):
453 453 islink, isexec = mode
454 454 if data is None:
455 455 self.opener.setflags(fname, islink, isexec)
456 456 return
457 457 if islink:
458 458 self.opener.symlink(data, fname)
459 459 else:
460 460 self.opener.write(fname, data)
461 461 if isexec:
462 462 self.opener.setflags(fname, False, True)
463 463
464 464 def unlink(self, fname):
465 465 self.opener.unlinkpath(fname, ignoremissing=True)
466 466
467 467 def writerej(self, fname, failed, total, lines):
468 468 fname = fname + ".rej"
469 469 self.ui.warn(
470 470 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
471 471 (failed, total, fname))
472 472 fp = self.opener(fname, 'w')
473 473 fp.writelines(lines)
474 474 fp.close()
475 475
476 476 def exists(self, fname):
477 477 return self.opener.lexists(fname)
478 478
479 479 class workingbackend(fsbackend):
480 480 def __init__(self, ui, repo, similarity):
481 481 super(workingbackend, self).__init__(ui, repo.root)
482 482 self.repo = repo
483 483 self.similarity = similarity
484 484 self.removed = set()
485 485 self.changed = set()
486 486 self.copied = []
487 487
488 488 def _checkknown(self, fname):
489 489 if self.repo.dirstate[fname] == '?' and self.exists(fname):
490 490 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
491 491
492 492 def setfile(self, fname, data, mode, copysource):
493 493 self._checkknown(fname)
494 494 super(workingbackend, self).setfile(fname, data, mode, copysource)
495 495 if copysource is not None:
496 496 self.copied.append((copysource, fname))
497 497 self.changed.add(fname)
498 498
499 499 def unlink(self, fname):
500 500 self._checkknown(fname)
501 501 super(workingbackend, self).unlink(fname)
502 502 self.removed.add(fname)
503 503 self.changed.add(fname)
504 504
505 505 def close(self):
506 506 wctx = self.repo[None]
507 507 changed = set(self.changed)
508 508 for src, dst in self.copied:
509 509 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
510 510 if self.removed:
511 511 wctx.forget(sorted(self.removed))
512 512 for f in self.removed:
513 513 if f not in self.repo.dirstate:
514 514 # File was deleted and no longer belongs to the
515 515 # dirstate, it was probably marked added then
516 516 # deleted, and should not be considered by
517 517 # marktouched().
518 518 changed.discard(f)
519 519 if changed:
520 520 scmutil.marktouched(self.repo, changed, self.similarity)
521 521 return sorted(self.changed)
522 522
523 523 class filestore(object):
524 524 def __init__(self, maxsize=None):
525 525 self.opener = None
526 526 self.files = {}
527 527 self.created = 0
528 528 self.maxsize = maxsize
529 529 if self.maxsize is None:
530 530 self.maxsize = 4*(2**20)
531 531 self.size = 0
532 532 self.data = {}
533 533
534 534 def setfile(self, fname, data, mode, copied=None):
535 535 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
536 536 self.data[fname] = (data, mode, copied)
537 537 self.size += len(data)
538 538 else:
539 539 if self.opener is None:
540 540 root = tempfile.mkdtemp(prefix='hg-patch-')
541 541 self.opener = scmutil.opener(root)
542 542 # Avoid filename issues with these simple names
543 543 fn = str(self.created)
544 544 self.opener.write(fn, data)
545 545 self.created += 1
546 546 self.files[fname] = (fn, mode, copied)
547 547
548 548 def getfile(self, fname):
549 549 if fname in self.data:
550 550 return self.data[fname]
551 551 if not self.opener or fname not in self.files:
552 552 return None, None, None
553 553 fn, mode, copied = self.files[fname]
554 554 return self.opener.read(fn), mode, copied
555 555
556 556 def close(self):
557 557 if self.opener:
558 558 shutil.rmtree(self.opener.base)
559 559
560 560 class repobackend(abstractbackend):
561 561 def __init__(self, ui, repo, ctx, store):
562 562 super(repobackend, self).__init__(ui)
563 563 self.repo = repo
564 564 self.ctx = ctx
565 565 self.store = store
566 566 self.changed = set()
567 567 self.removed = set()
568 568 self.copied = {}
569 569
570 570 def _checkknown(self, fname):
571 571 if fname not in self.ctx:
572 572 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
573 573
574 574 def getfile(self, fname):
575 575 try:
576 576 fctx = self.ctx[fname]
577 577 except error.LookupError:
578 578 return None, None
579 579 flags = fctx.flags()
580 580 return fctx.data(), ('l' in flags, 'x' in flags)
581 581
582 582 def setfile(self, fname, data, mode, copysource):
583 583 if copysource:
584 584 self._checkknown(copysource)
585 585 if data is None:
586 586 data = self.ctx[fname].data()
587 587 self.store.setfile(fname, data, mode, copysource)
588 588 self.changed.add(fname)
589 589 if copysource:
590 590 self.copied[fname] = copysource
591 591
592 592 def unlink(self, fname):
593 593 self._checkknown(fname)
594 594 self.removed.add(fname)
595 595
596 596 def exists(self, fname):
597 597 return fname in self.ctx
598 598
599 599 def close(self):
600 600 return self.changed | self.removed
601 601
602 602 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
603 603 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
604 604 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
605 605 eolmodes = ['strict', 'crlf', 'lf', 'auto']
606 606
607 607 class patchfile(object):
608 608 def __init__(self, ui, gp, backend, store, eolmode='strict'):
609 609 self.fname = gp.path
610 610 self.eolmode = eolmode
611 611 self.eol = None
612 612 self.backend = backend
613 613 self.ui = ui
614 614 self.lines = []
615 615 self.exists = False
616 616 self.missing = True
617 617 self.mode = gp.mode
618 618 self.copysource = gp.oldpath
619 619 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
620 620 self.remove = gp.op == 'DELETE'
621 621 if self.copysource is None:
622 622 data, mode = backend.getfile(self.fname)
623 623 else:
624 624 data, mode = store.getfile(self.copysource)[:2]
625 625 if data is not None:
626 626 self.exists = self.copysource is None or backend.exists(self.fname)
627 627 self.missing = False
628 628 if data:
629 629 self.lines = mdiff.splitnewlines(data)
630 630 if self.mode is None:
631 631 self.mode = mode
632 632 if self.lines:
633 633 # Normalize line endings
634 634 if self.lines[0].endswith('\r\n'):
635 635 self.eol = '\r\n'
636 636 elif self.lines[0].endswith('\n'):
637 637 self.eol = '\n'
638 638 if eolmode != 'strict':
639 639 nlines = []
640 640 for l in self.lines:
641 641 if l.endswith('\r\n'):
642 642 l = l[:-2] + '\n'
643 643 nlines.append(l)
644 644 self.lines = nlines
645 645 else:
646 646 if self.create:
647 647 self.missing = False
648 648 if self.mode is None:
649 649 self.mode = (False, False)
650 650 if self.missing:
651 651 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
652 652
653 653 self.hash = {}
654 654 self.dirty = 0
655 655 self.offset = 0
656 656 self.skew = 0
657 657 self.rej = []
658 658 self.fileprinted = False
659 659 self.printfile(False)
660 660 self.hunks = 0
661 661
662 662 def writelines(self, fname, lines, mode):
663 663 if self.eolmode == 'auto':
664 664 eol = self.eol
665 665 elif self.eolmode == 'crlf':
666 666 eol = '\r\n'
667 667 else:
668 668 eol = '\n'
669 669
670 670 if self.eolmode != 'strict' and eol and eol != '\n':
671 671 rawlines = []
672 672 for l in lines:
673 673 if l and l[-1] == '\n':
674 674 l = l[:-1] + eol
675 675 rawlines.append(l)
676 676 lines = rawlines
677 677
678 678 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
679 679
680 680 def printfile(self, warn):
681 681 if self.fileprinted:
682 682 return
683 683 if warn or self.ui.verbose:
684 684 self.fileprinted = True
685 685 s = _("patching file %s\n") % self.fname
686 686 if warn:
687 687 self.ui.warn(s)
688 688 else:
689 689 self.ui.note(s)
690 690
691 691
692 692 def findlines(self, l, linenum):
693 693 # looks through the hash and finds candidate lines. The
694 694 # result is a list of line numbers sorted based on distance
695 695 # from linenum
696 696
697 697 cand = self.hash.get(l, [])
698 698 if len(cand) > 1:
699 699 # resort our list of potentials forward then back.
700 700 cand.sort(key=lambda x: abs(x - linenum))
701 701 return cand
702 702
703 703 def write_rej(self):
704 704 # our rejects are a little different from patch(1). This always
705 705 # creates rejects in the same form as the original patch. A file
706 706 # header is inserted so that you can run the reject through patch again
707 707 # without having to type the filename.
708 708 if not self.rej:
709 709 return
710 710 base = os.path.basename(self.fname)
711 711 lines = ["--- %s\n+++ %s\n" % (base, base)]
712 712 for x in self.rej:
713 713 for l in x.hunk:
714 714 lines.append(l)
715 715 if l[-1] != '\n':
716 716 lines.append("\n\ No newline at end of file\n")
717 717 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
718 718
719 719 def apply(self, h):
720 720 if not h.complete():
721 721 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
722 722 (h.number, h.desc, len(h.a), h.lena, len(h.b),
723 723 h.lenb))
724 724
725 725 self.hunks += 1
726 726
727 727 if self.missing:
728 728 self.rej.append(h)
729 729 return -1
730 730
731 731 if self.exists and self.create:
732 732 if self.copysource:
733 733 self.ui.warn(_("cannot create %s: destination already "
734 734 "exists\n") % self.fname)
735 735 else:
736 736 self.ui.warn(_("file %s already exists\n") % self.fname)
737 737 self.rej.append(h)
738 738 return -1
739 739
740 740 if isinstance(h, binhunk):
741 741 if self.remove:
742 742 self.backend.unlink(self.fname)
743 743 else:
744 744 l = h.new(self.lines)
745 745 self.lines[:] = l
746 746 self.offset += len(l)
747 747 self.dirty = True
748 748 return 0
749 749
750 750 horig = h
751 751 if (self.eolmode in ('crlf', 'lf')
752 752 or self.eolmode == 'auto' and self.eol):
753 753 # If new eols are going to be normalized, then normalize
754 754 # hunk data before patching. Otherwise, preserve input
755 755 # line-endings.
756 756 h = h.getnormalized()
757 757
758 758 # fast case first, no offsets, no fuzz
759 759 old, oldstart, new, newstart = h.fuzzit(0, False)
760 760 oldstart += self.offset
761 761 orig_start = oldstart
762 762 # if there's skew we want to emit the "(offset %d lines)" even
763 763 # when the hunk cleanly applies at start + skew, so skip the
764 764 # fast case code
765 765 if (self.skew == 0 and
766 766 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
767 767 if self.remove:
768 768 self.backend.unlink(self.fname)
769 769 else:
770 770 self.lines[oldstart:oldstart + len(old)] = new
771 771 self.offset += len(new) - len(old)
772 772 self.dirty = True
773 773 return 0
774 774
775 775 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
776 776 self.hash = {}
777 777 for x, s in enumerate(self.lines):
778 778 self.hash.setdefault(s, []).append(x)
779 779
780 780 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
781 781 for toponly in [True, False]:
782 782 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
783 783 oldstart = oldstart + self.offset + self.skew
784 784 oldstart = min(oldstart, len(self.lines))
785 785 if old:
786 786 cand = self.findlines(old[0][1:], oldstart)
787 787 else:
788 788 # Only adding lines with no or fuzzed context, just
789 789 # take the skew in account
790 790 cand = [oldstart]
791 791
792 792 for l in cand:
793 793 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
794 794 self.lines[l : l + len(old)] = new
795 795 self.offset += len(new) - len(old)
796 796 self.skew = l - orig_start
797 797 self.dirty = True
798 798 offset = l - orig_start - fuzzlen
799 799 if fuzzlen:
800 800 msg = _("Hunk #%d succeeded at %d "
801 801 "with fuzz %d "
802 802 "(offset %d lines).\n")
803 803 self.printfile(True)
804 804 self.ui.warn(msg %
805 805 (h.number, l + 1, fuzzlen, offset))
806 806 else:
807 807 msg = _("Hunk #%d succeeded at %d "
808 808 "(offset %d lines).\n")
809 809 self.ui.note(msg % (h.number, l + 1, offset))
810 810 return fuzzlen
811 811 self.printfile(True)
812 812 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
813 813 self.rej.append(horig)
814 814 return -1
815 815
816 816 def close(self):
817 817 if self.dirty:
818 818 self.writelines(self.fname, self.lines, self.mode)
819 819 self.write_rej()
820 820 return len(self.rej)
821 821
822 822 class header(object):
823 823 """patch header
824 824 """
825 825 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
826 826 diff_re = re.compile('diff -r .* (.*)$')
827 827 allhunks_re = re.compile('(?:index|deleted file) ')
828 828 pretty_re = re.compile('(?:new file|deleted file) ')
829 829 special_re = re.compile('(?:index|deleted|copy|rename) ')
830 830 newfile_re = re.compile('(?:new file)')
831 831
832 832 def __init__(self, header):
833 833 self.header = header
834 834 self.hunks = []
835 835
836 836 def binary(self):
837 837 return any(h.startswith('index ') for h in self.header)
838 838
839 839 def pretty(self, fp):
840 840 for h in self.header:
841 841 if h.startswith('index '):
842 842 fp.write(_('this modifies a binary file (all or nothing)\n'))
843 843 break
844 844 if self.pretty_re.match(h):
845 845 fp.write(h)
846 846 if self.binary():
847 847 fp.write(_('this is a binary file\n'))
848 848 break
849 849 if h.startswith('---'):
850 850 fp.write(_('%d hunks, %d lines changed\n') %
851 851 (len(self.hunks),
852 852 sum([max(h.added, h.removed) for h in self.hunks])))
853 853 break
854 854 fp.write(h)
855 855
856 856 def write(self, fp):
857 857 fp.write(''.join(self.header))
858 858
859 859 def allhunks(self):
860 860 return any(self.allhunks_re.match(h) for h in self.header)
861 861
862 862 def files(self):
863 863 match = self.diffgit_re.match(self.header[0])
864 864 if match:
865 865 fromfile, tofile = match.groups()
866 866 if fromfile == tofile:
867 867 return [fromfile]
868 868 return [fromfile, tofile]
869 869 else:
870 870 return self.diff_re.match(self.header[0]).groups()
871 871
872 872 def filename(self):
873 873 return self.files()[-1]
874 874
875 875 def __repr__(self):
876 876 return '<header %s>' % (' '.join(map(repr, self.files())))
877 877
878 878 def isnewfile(self):
879 879 return any(self.newfile_re.match(h) for h in self.header)
880 880
881 881 def special(self):
882 882 # Special files are shown only at the header level and not at the hunk
883 883 # level for example a file that has been deleted is a special file.
884 884 # The user cannot change the content of the operation, in the case of
885 885 # the deleted file he has to take the deletion or not take it, he
886 886 # cannot take some of it.
887 887 # Newly added files are special if they are empty, they are not special
888 888 # if they have some content as we want to be able to change it
889 889 nocontent = len(self.header) == 2
890 890 emptynewfile = self.isnewfile() and nocontent
891 891 return emptynewfile or \
892 892 any(self.special_re.match(h) for h in self.header)
893 893
894 894 class recordhunk(object):
895 895 """patch hunk
896 896
897 897 XXX shouldn't we merge this with the other hunk class?
898 898 """
899 899 maxcontext = 3
900 900
901 901 def __init__(self, header, fromline, toline, proc, before, hunk, after):
902 902 def trimcontext(number, lines):
903 903 delta = len(lines) - self.maxcontext
904 904 if False and delta > 0:
905 905 return number + delta, lines[:self.maxcontext]
906 906 return number, lines
907 907
908 908 self.header = header
909 909 self.fromline, self.before = trimcontext(fromline, before)
910 910 self.toline, self.after = trimcontext(toline, after)
911 911 self.proc = proc
912 912 self.hunk = hunk
913 913 self.added, self.removed = self.countchanges(self.hunk)
914 914
915 915 def __eq__(self, v):
916 916 if not isinstance(v, recordhunk):
917 917 return False
918 918
919 919 return ((v.hunk == self.hunk) and
920 920 (v.proc == self.proc) and
921 921 (self.fromline == v.fromline) and
922 922 (self.header.files() == v.header.files()))
923 923
924 924 def __hash__(self):
925 925 return hash((tuple(self.hunk),
926 926 tuple(self.header.files()),
927 927 self.fromline,
928 928 self.proc))
929 929
930 930 def countchanges(self, hunk):
931 931 """hunk -> (n+,n-)"""
932 932 add = len([h for h in hunk if h[0] == '+'])
933 933 rem = len([h for h in hunk if h[0] == '-'])
934 934 return add, rem
935 935
936 936 def write(self, fp):
937 937 delta = len(self.before) + len(self.after)
938 938 if self.after and self.after[-1] == '\\ No newline at end of file\n':
939 939 delta -= 1
940 940 fromlen = delta + self.removed
941 941 tolen = delta + self.added
942 942 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
943 943 (self.fromline, fromlen, self.toline, tolen,
944 944 self.proc and (' ' + self.proc)))
945 945 fp.write(''.join(self.before + self.hunk + self.after))
946 946
947 947 pretty = write
948 948
949 949 def filename(self):
950 950 return self.header.filename()
951 951
952 952 def __repr__(self):
953 953 return '<hunk %r@%d>' % (self.filename(), self.fromline)
954 954
955 955 def filterpatch(ui, headers, operation=None):
956 956 """Interactively filter patch chunks into applied-only chunks"""
957 957 if operation is None:
958 958 operation = _('record')
959 959
960 960 def prompt(skipfile, skipall, query, chunk):
961 961 """prompt query, and process base inputs
962 962
963 963 - y/n for the rest of file
964 964 - y/n for the rest
965 965 - ? (help)
966 966 - q (quit)
967 967
968 968 Return True/False and possibly updated skipfile and skipall.
969 969 """
970 970 newpatches = None
971 971 if skipall is not None:
972 972 return skipall, skipfile, skipall, newpatches
973 973 if skipfile is not None:
974 974 return skipfile, skipfile, skipall, newpatches
975 975 while True:
976 976 resps = _('[Ynesfdaq?]'
977 977 '$$ &Yes, record this change'
978 978 '$$ &No, skip this change'
979 979 '$$ &Edit this change manually'
980 980 '$$ &Skip remaining changes to this file'
981 981 '$$ Record remaining changes to this &file'
982 982 '$$ &Done, skip remaining changes and files'
983 983 '$$ Record &all changes to all remaining files'
984 984 '$$ &Quit, recording no changes'
985 985 '$$ &? (display help)')
986 986 r = ui.promptchoice("%s %s" % (query, resps))
987 987 ui.write("\n")
988 988 if r == 8: # ?
989 989 for c, t in ui.extractchoices(resps)[1]:
990 990 ui.write('%s - %s\n' % (c, t.lower()))
991 991 continue
992 992 elif r == 0: # yes
993 993 ret = True
994 994 elif r == 1: # no
995 995 ret = False
996 996 elif r == 2: # Edit patch
997 997 if chunk is None:
998 998 ui.write(_('cannot edit patch for whole file'))
999 999 ui.write("\n")
1000 1000 continue
1001 1001 if chunk.header.binary():
1002 1002 ui.write(_('cannot edit patch for binary file'))
1003 1003 ui.write("\n")
1004 1004 continue
1005 1005 # Patch comment based on the Git one (based on comment at end of
1006 1006 # https://mercurial-scm.org/wiki/RecordExtension)
1007 1007 phelp = '---' + _("""
1008 1008 To remove '-' lines, make them ' ' lines (context).
1009 1009 To remove '+' lines, delete them.
1010 1010 Lines starting with # will be removed from the patch.
1011 1011
1012 1012 If the patch applies cleanly, the edited hunk will immediately be
1013 1013 added to the record list. If it does not apply cleanly, a rejects
1014 1014 file will be generated: you can use that when you try again. If
1015 1015 all lines of the hunk are removed, then the edit is aborted and
1016 1016 the hunk is left unchanged.
1017 1017 """)
1018 1018 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1019 1019 suffix=".diff", text=True)
1020 1020 ncpatchfp = None
1021 1021 try:
1022 1022 # Write the initial patch
1023 1023 f = os.fdopen(patchfd, "w")
1024 1024 chunk.header.write(f)
1025 1025 chunk.write(f)
1026 1026 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1027 1027 f.close()
1028 1028 # Start the editor and wait for it to complete
1029 1029 editor = ui.geteditor()
1030 1030 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1031 1031 environ={'HGUSER': ui.username()})
1032 1032 if ret != 0:
1033 1033 ui.warn(_("editor exited with exit code %d\n") % ret)
1034 1034 continue
1035 1035 # Remove comment lines
1036 1036 patchfp = open(patchfn)
1037 1037 ncpatchfp = cStringIO.StringIO()
1038 1038 for line in patchfp:
1039 1039 if not line.startswith('#'):
1040 1040 ncpatchfp.write(line)
1041 1041 patchfp.close()
1042 1042 ncpatchfp.seek(0)
1043 1043 newpatches = parsepatch(ncpatchfp)
1044 1044 finally:
1045 1045 os.unlink(patchfn)
1046 1046 del ncpatchfp
1047 1047 # Signal that the chunk shouldn't be applied as-is, but
1048 1048 # provide the new patch to be used instead.
1049 1049 ret = False
1050 1050 elif r == 3: # Skip
1051 1051 ret = skipfile = False
1052 1052 elif r == 4: # file (Record remaining)
1053 1053 ret = skipfile = True
1054 1054 elif r == 5: # done, skip remaining
1055 1055 ret = skipall = False
1056 1056 elif r == 6: # all
1057 1057 ret = skipall = True
1058 1058 elif r == 7: # quit
1059 1059 raise error.Abort(_('user quit'))
1060 1060 return ret, skipfile, skipall, newpatches
1061 1061
1062 1062 seen = set()
1063 1063 applied = {} # 'filename' -> [] of chunks
1064 1064 skipfile, skipall = None, None
1065 1065 pos, total = 1, sum(len(h.hunks) for h in headers)
1066 1066 for h in headers:
1067 1067 pos += len(h.hunks)
1068 1068 skipfile = None
1069 1069 fixoffset = 0
1070 1070 hdr = ''.join(h.header)
1071 1071 if hdr in seen:
1072 1072 continue
1073 1073 seen.add(hdr)
1074 1074 if skipall is None:
1075 1075 h.pretty(ui)
1076 1076 msg = (_('examine changes to %s?') %
1077 1077 _(' and ').join("'%s'" % f for f in h.files()))
1078 1078 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1079 1079 if not r:
1080 1080 continue
1081 1081 applied[h.filename()] = [h]
1082 1082 if h.allhunks():
1083 1083 applied[h.filename()] += h.hunks
1084 1084 continue
1085 1085 for i, chunk in enumerate(h.hunks):
1086 1086 if skipfile is None and skipall is None:
1087 1087 chunk.pretty(ui)
1088 1088 if total == 1:
1089 1089 msg = _("record this change to '%s'?") % chunk.filename()
1090 1090 else:
1091 1091 idx = pos - len(h.hunks) + i
1092 1092 msg = _("record change %d/%d to '%s'?") % (idx, total,
1093 1093 chunk.filename())
1094 1094 r, skipfile, skipall, newpatches = prompt(skipfile,
1095 1095 skipall, msg, chunk)
1096 1096 if r:
1097 1097 if fixoffset:
1098 1098 chunk = copy.copy(chunk)
1099 1099 chunk.toline += fixoffset
1100 1100 applied[chunk.filename()].append(chunk)
1101 1101 elif newpatches is not None:
1102 1102 for newpatch in newpatches:
1103 1103 for newhunk in newpatch.hunks:
1104 1104 if fixoffset:
1105 1105 newhunk.toline += fixoffset
1106 1106 applied[newhunk.filename()].append(newhunk)
1107 1107 else:
1108 1108 fixoffset += chunk.removed - chunk.added
1109 return sum([h for h in applied.itervalues()
1110 if h[0].special() or len(h) > 1], [])
1109 return (sum([h for h in applied.itervalues()
1110 if h[0].special() or len(h) > 1], []), {})
1111 1111 class hunk(object):
1112 1112 def __init__(self, desc, num, lr, context):
1113 1113 self.number = num
1114 1114 self.desc = desc
1115 1115 self.hunk = [desc]
1116 1116 self.a = []
1117 1117 self.b = []
1118 1118 self.starta = self.lena = None
1119 1119 self.startb = self.lenb = None
1120 1120 if lr is not None:
1121 1121 if context:
1122 1122 self.read_context_hunk(lr)
1123 1123 else:
1124 1124 self.read_unified_hunk(lr)
1125 1125
1126 1126 def getnormalized(self):
1127 1127 """Return a copy with line endings normalized to LF."""
1128 1128
1129 1129 def normalize(lines):
1130 1130 nlines = []
1131 1131 for line in lines:
1132 1132 if line.endswith('\r\n'):
1133 1133 line = line[:-2] + '\n'
1134 1134 nlines.append(line)
1135 1135 return nlines
1136 1136
1137 1137 # Dummy object, it is rebuilt manually
1138 1138 nh = hunk(self.desc, self.number, None, None)
1139 1139 nh.number = self.number
1140 1140 nh.desc = self.desc
1141 1141 nh.hunk = self.hunk
1142 1142 nh.a = normalize(self.a)
1143 1143 nh.b = normalize(self.b)
1144 1144 nh.starta = self.starta
1145 1145 nh.startb = self.startb
1146 1146 nh.lena = self.lena
1147 1147 nh.lenb = self.lenb
1148 1148 return nh
1149 1149
1150 1150 def read_unified_hunk(self, lr):
1151 1151 m = unidesc.match(self.desc)
1152 1152 if not m:
1153 1153 raise PatchError(_("bad hunk #%d") % self.number)
1154 1154 self.starta, self.lena, self.startb, self.lenb = m.groups()
1155 1155 if self.lena is None:
1156 1156 self.lena = 1
1157 1157 else:
1158 1158 self.lena = int(self.lena)
1159 1159 if self.lenb is None:
1160 1160 self.lenb = 1
1161 1161 else:
1162 1162 self.lenb = int(self.lenb)
1163 1163 self.starta = int(self.starta)
1164 1164 self.startb = int(self.startb)
1165 1165 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1166 1166 self.b)
1167 1167 # if we hit eof before finishing out the hunk, the last line will
1168 1168 # be zero length. Lets try to fix it up.
1169 1169 while len(self.hunk[-1]) == 0:
1170 1170 del self.hunk[-1]
1171 1171 del self.a[-1]
1172 1172 del self.b[-1]
1173 1173 self.lena -= 1
1174 1174 self.lenb -= 1
1175 1175 self._fixnewline(lr)
1176 1176
1177 1177 def read_context_hunk(self, lr):
1178 1178 self.desc = lr.readline()
1179 1179 m = contextdesc.match(self.desc)
1180 1180 if not m:
1181 1181 raise PatchError(_("bad hunk #%d") % self.number)
1182 1182 self.starta, aend = m.groups()
1183 1183 self.starta = int(self.starta)
1184 1184 if aend is None:
1185 1185 aend = self.starta
1186 1186 self.lena = int(aend) - self.starta
1187 1187 if self.starta:
1188 1188 self.lena += 1
1189 1189 for x in xrange(self.lena):
1190 1190 l = lr.readline()
1191 1191 if l.startswith('---'):
1192 1192 # lines addition, old block is empty
1193 1193 lr.push(l)
1194 1194 break
1195 1195 s = l[2:]
1196 1196 if l.startswith('- ') or l.startswith('! '):
1197 1197 u = '-' + s
1198 1198 elif l.startswith(' '):
1199 1199 u = ' ' + s
1200 1200 else:
1201 1201 raise PatchError(_("bad hunk #%d old text line %d") %
1202 1202 (self.number, x))
1203 1203 self.a.append(u)
1204 1204 self.hunk.append(u)
1205 1205
1206 1206 l = lr.readline()
1207 1207 if l.startswith('\ '):
1208 1208 s = self.a[-1][:-1]
1209 1209 self.a[-1] = s
1210 1210 self.hunk[-1] = s
1211 1211 l = lr.readline()
1212 1212 m = contextdesc.match(l)
1213 1213 if not m:
1214 1214 raise PatchError(_("bad hunk #%d") % self.number)
1215 1215 self.startb, bend = m.groups()
1216 1216 self.startb = int(self.startb)
1217 1217 if bend is None:
1218 1218 bend = self.startb
1219 1219 self.lenb = int(bend) - self.startb
1220 1220 if self.startb:
1221 1221 self.lenb += 1
1222 1222 hunki = 1
1223 1223 for x in xrange(self.lenb):
1224 1224 l = lr.readline()
1225 1225 if l.startswith('\ '):
1226 1226 # XXX: the only way to hit this is with an invalid line range.
1227 1227 # The no-eol marker is not counted in the line range, but I
1228 1228 # guess there are diff(1) out there which behave differently.
1229 1229 s = self.b[-1][:-1]
1230 1230 self.b[-1] = s
1231 1231 self.hunk[hunki - 1] = s
1232 1232 continue
1233 1233 if not l:
1234 1234 # line deletions, new block is empty and we hit EOF
1235 1235 lr.push(l)
1236 1236 break
1237 1237 s = l[2:]
1238 1238 if l.startswith('+ ') or l.startswith('! '):
1239 1239 u = '+' + s
1240 1240 elif l.startswith(' '):
1241 1241 u = ' ' + s
1242 1242 elif len(self.b) == 0:
1243 1243 # line deletions, new block is empty
1244 1244 lr.push(l)
1245 1245 break
1246 1246 else:
1247 1247 raise PatchError(_("bad hunk #%d old text line %d") %
1248 1248 (self.number, x))
1249 1249 self.b.append(s)
1250 1250 while True:
1251 1251 if hunki >= len(self.hunk):
1252 1252 h = ""
1253 1253 else:
1254 1254 h = self.hunk[hunki]
1255 1255 hunki += 1
1256 1256 if h == u:
1257 1257 break
1258 1258 elif h.startswith('-'):
1259 1259 continue
1260 1260 else:
1261 1261 self.hunk.insert(hunki - 1, u)
1262 1262 break
1263 1263
1264 1264 if not self.a:
1265 1265 # this happens when lines were only added to the hunk
1266 1266 for x in self.hunk:
1267 1267 if x.startswith('-') or x.startswith(' '):
1268 1268 self.a.append(x)
1269 1269 if not self.b:
1270 1270 # this happens when lines were only deleted from the hunk
1271 1271 for x in self.hunk:
1272 1272 if x.startswith('+') or x.startswith(' '):
1273 1273 self.b.append(x[1:])
1274 1274 # @@ -start,len +start,len @@
1275 1275 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1276 1276 self.startb, self.lenb)
1277 1277 self.hunk[0] = self.desc
1278 1278 self._fixnewline(lr)
1279 1279
1280 1280 def _fixnewline(self, lr):
1281 1281 l = lr.readline()
1282 1282 if l.startswith('\ '):
1283 1283 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1284 1284 else:
1285 1285 lr.push(l)
1286 1286
1287 1287 def complete(self):
1288 1288 return len(self.a) == self.lena and len(self.b) == self.lenb
1289 1289
1290 1290 def _fuzzit(self, old, new, fuzz, toponly):
1291 1291 # this removes context lines from the top and bottom of list 'l'. It
1292 1292 # checks the hunk to make sure only context lines are removed, and then
1293 1293 # returns a new shortened list of lines.
1294 1294 fuzz = min(fuzz, len(old))
1295 1295 if fuzz:
1296 1296 top = 0
1297 1297 bot = 0
1298 1298 hlen = len(self.hunk)
1299 1299 for x in xrange(hlen - 1):
1300 1300 # the hunk starts with the @@ line, so use x+1
1301 1301 if self.hunk[x + 1][0] == ' ':
1302 1302 top += 1
1303 1303 else:
1304 1304 break
1305 1305 if not toponly:
1306 1306 for x in xrange(hlen - 1):
1307 1307 if self.hunk[hlen - bot - 1][0] == ' ':
1308 1308 bot += 1
1309 1309 else:
1310 1310 break
1311 1311
1312 1312 bot = min(fuzz, bot)
1313 1313 top = min(fuzz, top)
1314 1314 return old[top:len(old) - bot], new[top:len(new) - bot], top
1315 1315 return old, new, 0
1316 1316
1317 1317 def fuzzit(self, fuzz, toponly):
1318 1318 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1319 1319 oldstart = self.starta + top
1320 1320 newstart = self.startb + top
1321 1321 # zero length hunk ranges already have their start decremented
1322 1322 if self.lena and oldstart > 0:
1323 1323 oldstart -= 1
1324 1324 if self.lenb and newstart > 0:
1325 1325 newstart -= 1
1326 1326 return old, oldstart, new, newstart
1327 1327
1328 1328 class binhunk(object):
1329 1329 'A binary patch file.'
1330 1330 def __init__(self, lr, fname):
1331 1331 self.text = None
1332 1332 self.delta = False
1333 1333 self.hunk = ['GIT binary patch\n']
1334 1334 self._fname = fname
1335 1335 self._read(lr)
1336 1336
1337 1337 def complete(self):
1338 1338 return self.text is not None
1339 1339
1340 1340 def new(self, lines):
1341 1341 if self.delta:
1342 1342 return [applybindelta(self.text, ''.join(lines))]
1343 1343 return [self.text]
1344 1344
1345 1345 def _read(self, lr):
1346 1346 def getline(lr, hunk):
1347 1347 l = lr.readline()
1348 1348 hunk.append(l)
1349 1349 return l.rstrip('\r\n')
1350 1350
1351 1351 size = 0
1352 1352 while True:
1353 1353 line = getline(lr, self.hunk)
1354 1354 if not line:
1355 1355 raise PatchError(_('could not extract "%s" binary data')
1356 1356 % self._fname)
1357 1357 if line.startswith('literal '):
1358 1358 size = int(line[8:].rstrip())
1359 1359 break
1360 1360 if line.startswith('delta '):
1361 1361 size = int(line[6:].rstrip())
1362 1362 self.delta = True
1363 1363 break
1364 1364 dec = []
1365 1365 line = getline(lr, self.hunk)
1366 1366 while len(line) > 1:
1367 1367 l = line[0]
1368 1368 if l <= 'Z' and l >= 'A':
1369 1369 l = ord(l) - ord('A') + 1
1370 1370 else:
1371 1371 l = ord(l) - ord('a') + 27
1372 1372 try:
1373 1373 dec.append(base85.b85decode(line[1:])[:l])
1374 1374 except ValueError as e:
1375 1375 raise PatchError(_('could not decode "%s" binary patch: %s')
1376 1376 % (self._fname, str(e)))
1377 1377 line = getline(lr, self.hunk)
1378 1378 text = zlib.decompress(''.join(dec))
1379 1379 if len(text) != size:
1380 1380 raise PatchError(_('"%s" length is %d bytes, should be %d')
1381 1381 % (self._fname, len(text), size))
1382 1382 self.text = text
1383 1383
1384 1384 def parsefilename(str):
1385 1385 # --- filename \t|space stuff
1386 1386 s = str[4:].rstrip('\r\n')
1387 1387 i = s.find('\t')
1388 1388 if i < 0:
1389 1389 i = s.find(' ')
1390 1390 if i < 0:
1391 1391 return s
1392 1392 return s[:i]
1393 1393
1394 1394 def reversehunks(hunks):
1395 1395 '''reverse the signs in the hunks given as argument
1396 1396
1397 1397 This function operates on hunks coming out of patch.filterpatch, that is
1398 1398 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1399 1399
1400 1400 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1401 1401 ... --- a/folder1/g
1402 1402 ... +++ b/folder1/g
1403 1403 ... @@ -1,7 +1,7 @@
1404 1404 ... +firstline
1405 1405 ... c
1406 1406 ... 1
1407 1407 ... 2
1408 1408 ... + 3
1409 1409 ... -4
1410 1410 ... 5
1411 1411 ... d
1412 1412 ... +lastline"""
1413 1413 >>> hunks = parsepatch(rawpatch)
1414 1414 >>> hunkscomingfromfilterpatch = []
1415 1415 >>> for h in hunks:
1416 1416 ... hunkscomingfromfilterpatch.append(h)
1417 1417 ... hunkscomingfromfilterpatch.extend(h.hunks)
1418 1418
1419 1419 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1420 1420 >>> fp = cStringIO.StringIO()
1421 1421 >>> for c in reversedhunks:
1422 1422 ... c.write(fp)
1423 1423 >>> fp.seek(0)
1424 1424 >>> reversedpatch = fp.read()
1425 1425 >>> print reversedpatch
1426 1426 diff --git a/folder1/g b/folder1/g
1427 1427 --- a/folder1/g
1428 1428 +++ b/folder1/g
1429 1429 @@ -1,4 +1,3 @@
1430 1430 -firstline
1431 1431 c
1432 1432 1
1433 1433 2
1434 1434 @@ -1,6 +2,6 @@
1435 1435 c
1436 1436 1
1437 1437 2
1438 1438 - 3
1439 1439 +4
1440 1440 5
1441 1441 d
1442 1442 @@ -5,3 +6,2 @@
1443 1443 5
1444 1444 d
1445 1445 -lastline
1446 1446
1447 1447 '''
1448 1448
1449 1449 import crecord as crecordmod
1450 1450 newhunks = []
1451 1451 for c in hunks:
1452 1452 if isinstance(c, crecordmod.uihunk):
1453 1453 # curses hunks encapsulate the record hunk in _hunk
1454 1454 c = c._hunk
1455 1455 if isinstance(c, recordhunk):
1456 1456 for j, line in enumerate(c.hunk):
1457 1457 if line.startswith("-"):
1458 1458 c.hunk[j] = "+" + c.hunk[j][1:]
1459 1459 elif line.startswith("+"):
1460 1460 c.hunk[j] = "-" + c.hunk[j][1:]
1461 1461 c.added, c.removed = c.removed, c.added
1462 1462 newhunks.append(c)
1463 1463 return newhunks
1464 1464
1465 1465 def parsepatch(originalchunks):
1466 1466 """patch -> [] of headers -> [] of hunks """
1467 1467 class parser(object):
1468 1468 """patch parsing state machine"""
1469 1469 def __init__(self):
1470 1470 self.fromline = 0
1471 1471 self.toline = 0
1472 1472 self.proc = ''
1473 1473 self.header = None
1474 1474 self.context = []
1475 1475 self.before = []
1476 1476 self.hunk = []
1477 1477 self.headers = []
1478 1478
1479 1479 def addrange(self, limits):
1480 1480 fromstart, fromend, tostart, toend, proc = limits
1481 1481 self.fromline = int(fromstart)
1482 1482 self.toline = int(tostart)
1483 1483 self.proc = proc
1484 1484
1485 1485 def addcontext(self, context):
1486 1486 if self.hunk:
1487 1487 h = recordhunk(self.header, self.fromline, self.toline,
1488 1488 self.proc, self.before, self.hunk, context)
1489 1489 self.header.hunks.append(h)
1490 1490 self.fromline += len(self.before) + h.removed
1491 1491 self.toline += len(self.before) + h.added
1492 1492 self.before = []
1493 1493 self.hunk = []
1494 1494 self.proc = ''
1495 1495 self.context = context
1496 1496
1497 1497 def addhunk(self, hunk):
1498 1498 if self.context:
1499 1499 self.before = self.context
1500 1500 self.context = []
1501 1501 self.hunk = hunk
1502 1502
1503 1503 def newfile(self, hdr):
1504 1504 self.addcontext([])
1505 1505 h = header(hdr)
1506 1506 self.headers.append(h)
1507 1507 self.header = h
1508 1508
1509 1509 def addother(self, line):
1510 1510 pass # 'other' lines are ignored
1511 1511
1512 1512 def finished(self):
1513 1513 self.addcontext([])
1514 1514 return self.headers
1515 1515
1516 1516 transitions = {
1517 1517 'file': {'context': addcontext,
1518 1518 'file': newfile,
1519 1519 'hunk': addhunk,
1520 1520 'range': addrange},
1521 1521 'context': {'file': newfile,
1522 1522 'hunk': addhunk,
1523 1523 'range': addrange,
1524 1524 'other': addother},
1525 1525 'hunk': {'context': addcontext,
1526 1526 'file': newfile,
1527 1527 'range': addrange},
1528 1528 'range': {'context': addcontext,
1529 1529 'hunk': addhunk},
1530 1530 'other': {'other': addother},
1531 1531 }
1532 1532
1533 1533 p = parser()
1534 1534 fp = cStringIO.StringIO()
1535 1535 fp.write(''.join(originalchunks))
1536 1536 fp.seek(0)
1537 1537
1538 1538 state = 'context'
1539 1539 for newstate, data in scanpatch(fp):
1540 1540 try:
1541 1541 p.transitions[state][newstate](p, data)
1542 1542 except KeyError:
1543 1543 raise PatchError('unhandled transition: %s -> %s' %
1544 1544 (state, newstate))
1545 1545 state = newstate
1546 1546 del fp
1547 1547 return p.finished()
1548 1548
1549 1549 def pathtransform(path, strip, prefix):
1550 1550 '''turn a path from a patch into a path suitable for the repository
1551 1551
1552 1552 prefix, if not empty, is expected to be normalized with a / at the end.
1553 1553
1554 1554 Returns (stripped components, path in repository).
1555 1555
1556 1556 >>> pathtransform('a/b/c', 0, '')
1557 1557 ('', 'a/b/c')
1558 1558 >>> pathtransform(' a/b/c ', 0, '')
1559 1559 ('', ' a/b/c')
1560 1560 >>> pathtransform(' a/b/c ', 2, '')
1561 1561 ('a/b/', 'c')
1562 1562 >>> pathtransform('a/b/c', 0, 'd/e/')
1563 1563 ('', 'd/e/a/b/c')
1564 1564 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1565 1565 ('a//b/', 'd/e/c')
1566 1566 >>> pathtransform('a/b/c', 3, '')
1567 1567 Traceback (most recent call last):
1568 1568 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1569 1569 '''
1570 1570 pathlen = len(path)
1571 1571 i = 0
1572 1572 if strip == 0:
1573 1573 return '', prefix + path.rstrip()
1574 1574 count = strip
1575 1575 while count > 0:
1576 1576 i = path.find('/', i)
1577 1577 if i == -1:
1578 1578 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1579 1579 (count, strip, path))
1580 1580 i += 1
1581 1581 # consume '//' in the path
1582 1582 while i < pathlen - 1 and path[i] == '/':
1583 1583 i += 1
1584 1584 count -= 1
1585 1585 return path[:i].lstrip(), prefix + path[i:].rstrip()
1586 1586
1587 1587 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1588 1588 nulla = afile_orig == "/dev/null"
1589 1589 nullb = bfile_orig == "/dev/null"
1590 1590 create = nulla and hunk.starta == 0 and hunk.lena == 0
1591 1591 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1592 1592 abase, afile = pathtransform(afile_orig, strip, prefix)
1593 1593 gooda = not nulla and backend.exists(afile)
1594 1594 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1595 1595 if afile == bfile:
1596 1596 goodb = gooda
1597 1597 else:
1598 1598 goodb = not nullb and backend.exists(bfile)
1599 1599 missing = not goodb and not gooda and not create
1600 1600
1601 1601 # some diff programs apparently produce patches where the afile is
1602 1602 # not /dev/null, but afile starts with bfile
1603 1603 abasedir = afile[:afile.rfind('/') + 1]
1604 1604 bbasedir = bfile[:bfile.rfind('/') + 1]
1605 1605 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1606 1606 and hunk.starta == 0 and hunk.lena == 0):
1607 1607 create = True
1608 1608 missing = False
1609 1609
1610 1610 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1611 1611 # diff is between a file and its backup. In this case, the original
1612 1612 # file should be patched (see original mpatch code).
1613 1613 isbackup = (abase == bbase and bfile.startswith(afile))
1614 1614 fname = None
1615 1615 if not missing:
1616 1616 if gooda and goodb:
1617 1617 if isbackup:
1618 1618 fname = afile
1619 1619 else:
1620 1620 fname = bfile
1621 1621 elif gooda:
1622 1622 fname = afile
1623 1623
1624 1624 if not fname:
1625 1625 if not nullb:
1626 1626 if isbackup:
1627 1627 fname = afile
1628 1628 else:
1629 1629 fname = bfile
1630 1630 elif not nulla:
1631 1631 fname = afile
1632 1632 else:
1633 1633 raise PatchError(_("undefined source and destination files"))
1634 1634
1635 1635 gp = patchmeta(fname)
1636 1636 if create:
1637 1637 gp.op = 'ADD'
1638 1638 elif remove:
1639 1639 gp.op = 'DELETE'
1640 1640 return gp
1641 1641
1642 1642 def scanpatch(fp):
1643 1643 """like patch.iterhunks, but yield different events
1644 1644
1645 1645 - ('file', [header_lines + fromfile + tofile])
1646 1646 - ('context', [context_lines])
1647 1647 - ('hunk', [hunk_lines])
1648 1648 - ('range', (-start,len, +start,len, proc))
1649 1649 """
1650 1650 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1651 1651 lr = linereader(fp)
1652 1652
1653 1653 def scanwhile(first, p):
1654 1654 """scan lr while predicate holds"""
1655 1655 lines = [first]
1656 1656 while True:
1657 1657 line = lr.readline()
1658 1658 if not line:
1659 1659 break
1660 1660 if p(line):
1661 1661 lines.append(line)
1662 1662 else:
1663 1663 lr.push(line)
1664 1664 break
1665 1665 return lines
1666 1666
1667 1667 while True:
1668 1668 line = lr.readline()
1669 1669 if not line:
1670 1670 break
1671 1671 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1672 1672 def notheader(line):
1673 1673 s = line.split(None, 1)
1674 1674 return not s or s[0] not in ('---', 'diff')
1675 1675 header = scanwhile(line, notheader)
1676 1676 fromfile = lr.readline()
1677 1677 if fromfile.startswith('---'):
1678 1678 tofile = lr.readline()
1679 1679 header += [fromfile, tofile]
1680 1680 else:
1681 1681 lr.push(fromfile)
1682 1682 yield 'file', header
1683 1683 elif line[0] == ' ':
1684 1684 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1685 1685 elif line[0] in '-+':
1686 1686 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1687 1687 else:
1688 1688 m = lines_re.match(line)
1689 1689 if m:
1690 1690 yield 'range', m.groups()
1691 1691 else:
1692 1692 yield 'other', line
1693 1693
1694 1694 def scangitpatch(lr, firstline):
1695 1695 """
1696 1696 Git patches can emit:
1697 1697 - rename a to b
1698 1698 - change b
1699 1699 - copy a to c
1700 1700 - change c
1701 1701
1702 1702 We cannot apply this sequence as-is, the renamed 'a' could not be
1703 1703 found for it would have been renamed already. And we cannot copy
1704 1704 from 'b' instead because 'b' would have been changed already. So
1705 1705 we scan the git patch for copy and rename commands so we can
1706 1706 perform the copies ahead of time.
1707 1707 """
1708 1708 pos = 0
1709 1709 try:
1710 1710 pos = lr.fp.tell()
1711 1711 fp = lr.fp
1712 1712 except IOError:
1713 1713 fp = cStringIO.StringIO(lr.fp.read())
1714 1714 gitlr = linereader(fp)
1715 1715 gitlr.push(firstline)
1716 1716 gitpatches = readgitpatch(gitlr)
1717 1717 fp.seek(pos)
1718 1718 return gitpatches
1719 1719
1720 1720 def iterhunks(fp):
1721 1721 """Read a patch and yield the following events:
1722 1722 - ("file", afile, bfile, firsthunk): select a new target file.
1723 1723 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1724 1724 "file" event.
1725 1725 - ("git", gitchanges): current diff is in git format, gitchanges
1726 1726 maps filenames to gitpatch records. Unique event.
1727 1727 """
1728 1728 afile = ""
1729 1729 bfile = ""
1730 1730 state = None
1731 1731 hunknum = 0
1732 1732 emitfile = newfile = False
1733 1733 gitpatches = None
1734 1734
1735 1735 # our states
1736 1736 BFILE = 1
1737 1737 context = None
1738 1738 lr = linereader(fp)
1739 1739
1740 1740 while True:
1741 1741 x = lr.readline()
1742 1742 if not x:
1743 1743 break
1744 1744 if state == BFILE and (
1745 1745 (not context and x[0] == '@')
1746 1746 or (context is not False and x.startswith('***************'))
1747 1747 or x.startswith('GIT binary patch')):
1748 1748 gp = None
1749 1749 if (gitpatches and
1750 1750 gitpatches[-1].ispatching(afile, bfile)):
1751 1751 gp = gitpatches.pop()
1752 1752 if x.startswith('GIT binary patch'):
1753 1753 h = binhunk(lr, gp.path)
1754 1754 else:
1755 1755 if context is None and x.startswith('***************'):
1756 1756 context = True
1757 1757 h = hunk(x, hunknum + 1, lr, context)
1758 1758 hunknum += 1
1759 1759 if emitfile:
1760 1760 emitfile = False
1761 1761 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1762 1762 yield 'hunk', h
1763 1763 elif x.startswith('diff --git a/'):
1764 1764 m = gitre.match(x.rstrip(' \r\n'))
1765 1765 if not m:
1766 1766 continue
1767 1767 if gitpatches is None:
1768 1768 # scan whole input for git metadata
1769 1769 gitpatches = scangitpatch(lr, x)
1770 1770 yield 'git', [g.copy() for g in gitpatches
1771 1771 if g.op in ('COPY', 'RENAME')]
1772 1772 gitpatches.reverse()
1773 1773 afile = 'a/' + m.group(1)
1774 1774 bfile = 'b/' + m.group(2)
1775 1775 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1776 1776 gp = gitpatches.pop()
1777 1777 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1778 1778 if not gitpatches:
1779 1779 raise PatchError(_('failed to synchronize metadata for "%s"')
1780 1780 % afile[2:])
1781 1781 gp = gitpatches[-1]
1782 1782 newfile = True
1783 1783 elif x.startswith('---'):
1784 1784 # check for a unified diff
1785 1785 l2 = lr.readline()
1786 1786 if not l2.startswith('+++'):
1787 1787 lr.push(l2)
1788 1788 continue
1789 1789 newfile = True
1790 1790 context = False
1791 1791 afile = parsefilename(x)
1792 1792 bfile = parsefilename(l2)
1793 1793 elif x.startswith('***'):
1794 1794 # check for a context diff
1795 1795 l2 = lr.readline()
1796 1796 if not l2.startswith('---'):
1797 1797 lr.push(l2)
1798 1798 continue
1799 1799 l3 = lr.readline()
1800 1800 lr.push(l3)
1801 1801 if not l3.startswith("***************"):
1802 1802 lr.push(l2)
1803 1803 continue
1804 1804 newfile = True
1805 1805 context = True
1806 1806 afile = parsefilename(x)
1807 1807 bfile = parsefilename(l2)
1808 1808
1809 1809 if newfile:
1810 1810 newfile = False
1811 1811 emitfile = True
1812 1812 state = BFILE
1813 1813 hunknum = 0
1814 1814
1815 1815 while gitpatches:
1816 1816 gp = gitpatches.pop()
1817 1817 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1818 1818
1819 1819 def applybindelta(binchunk, data):
1820 1820 """Apply a binary delta hunk
1821 1821 The algorithm used is the algorithm from git's patch-delta.c
1822 1822 """
1823 1823 def deltahead(binchunk):
1824 1824 i = 0
1825 1825 for c in binchunk:
1826 1826 i += 1
1827 1827 if not (ord(c) & 0x80):
1828 1828 return i
1829 1829 return i
1830 1830 out = ""
1831 1831 s = deltahead(binchunk)
1832 1832 binchunk = binchunk[s:]
1833 1833 s = deltahead(binchunk)
1834 1834 binchunk = binchunk[s:]
1835 1835 i = 0
1836 1836 while i < len(binchunk):
1837 1837 cmd = ord(binchunk[i])
1838 1838 i += 1
1839 1839 if (cmd & 0x80):
1840 1840 offset = 0
1841 1841 size = 0
1842 1842 if (cmd & 0x01):
1843 1843 offset = ord(binchunk[i])
1844 1844 i += 1
1845 1845 if (cmd & 0x02):
1846 1846 offset |= ord(binchunk[i]) << 8
1847 1847 i += 1
1848 1848 if (cmd & 0x04):
1849 1849 offset |= ord(binchunk[i]) << 16
1850 1850 i += 1
1851 1851 if (cmd & 0x08):
1852 1852 offset |= ord(binchunk[i]) << 24
1853 1853 i += 1
1854 1854 if (cmd & 0x10):
1855 1855 size = ord(binchunk[i])
1856 1856 i += 1
1857 1857 if (cmd & 0x20):
1858 1858 size |= ord(binchunk[i]) << 8
1859 1859 i += 1
1860 1860 if (cmd & 0x40):
1861 1861 size |= ord(binchunk[i]) << 16
1862 1862 i += 1
1863 1863 if size == 0:
1864 1864 size = 0x10000
1865 1865 offset_end = offset + size
1866 1866 out += data[offset:offset_end]
1867 1867 elif cmd != 0:
1868 1868 offset_end = i + cmd
1869 1869 out += binchunk[i:offset_end]
1870 1870 i += cmd
1871 1871 else:
1872 1872 raise PatchError(_('unexpected delta opcode 0'))
1873 1873 return out
1874 1874
1875 1875 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1876 1876 """Reads a patch from fp and tries to apply it.
1877 1877
1878 1878 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1879 1879 there was any fuzz.
1880 1880
1881 1881 If 'eolmode' is 'strict', the patch content and patched file are
1882 1882 read in binary mode. Otherwise, line endings are ignored when
1883 1883 patching then normalized according to 'eolmode'.
1884 1884 """
1885 1885 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1886 1886 prefix=prefix, eolmode=eolmode)
1887 1887
1888 1888 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1889 1889 eolmode='strict'):
1890 1890
1891 1891 if prefix:
1892 1892 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1893 1893 prefix)
1894 1894 if prefix != '':
1895 1895 prefix += '/'
1896 1896 def pstrip(p):
1897 1897 return pathtransform(p, strip - 1, prefix)[1]
1898 1898
1899 1899 rejects = 0
1900 1900 err = 0
1901 1901 current_file = None
1902 1902
1903 1903 for state, values in iterhunks(fp):
1904 1904 if state == 'hunk':
1905 1905 if not current_file:
1906 1906 continue
1907 1907 ret = current_file.apply(values)
1908 1908 if ret > 0:
1909 1909 err = 1
1910 1910 elif state == 'file':
1911 1911 if current_file:
1912 1912 rejects += current_file.close()
1913 1913 current_file = None
1914 1914 afile, bfile, first_hunk, gp = values
1915 1915 if gp:
1916 1916 gp.path = pstrip(gp.path)
1917 1917 if gp.oldpath:
1918 1918 gp.oldpath = pstrip(gp.oldpath)
1919 1919 else:
1920 1920 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1921 1921 prefix)
1922 1922 if gp.op == 'RENAME':
1923 1923 backend.unlink(gp.oldpath)
1924 1924 if not first_hunk:
1925 1925 if gp.op == 'DELETE':
1926 1926 backend.unlink(gp.path)
1927 1927 continue
1928 1928 data, mode = None, None
1929 1929 if gp.op in ('RENAME', 'COPY'):
1930 1930 data, mode = store.getfile(gp.oldpath)[:2]
1931 1931 # FIXME: failing getfile has never been handled here
1932 1932 assert data is not None
1933 1933 if gp.mode:
1934 1934 mode = gp.mode
1935 1935 if gp.op == 'ADD':
1936 1936 # Added files without content have no hunk and
1937 1937 # must be created
1938 1938 data = ''
1939 1939 if data or mode:
1940 1940 if (gp.op in ('ADD', 'RENAME', 'COPY')
1941 1941 and backend.exists(gp.path)):
1942 1942 raise PatchError(_("cannot create %s: destination "
1943 1943 "already exists") % gp.path)
1944 1944 backend.setfile(gp.path, data, mode, gp.oldpath)
1945 1945 continue
1946 1946 try:
1947 1947 current_file = patcher(ui, gp, backend, store,
1948 1948 eolmode=eolmode)
1949 1949 except PatchError as inst:
1950 1950 ui.warn(str(inst) + '\n')
1951 1951 current_file = None
1952 1952 rejects += 1
1953 1953 continue
1954 1954 elif state == 'git':
1955 1955 for gp in values:
1956 1956 path = pstrip(gp.oldpath)
1957 1957 data, mode = backend.getfile(path)
1958 1958 if data is None:
1959 1959 # The error ignored here will trigger a getfile()
1960 1960 # error in a place more appropriate for error
1961 1961 # handling, and will not interrupt the patching
1962 1962 # process.
1963 1963 pass
1964 1964 else:
1965 1965 store.setfile(path, data, mode)
1966 1966 else:
1967 1967 raise error.Abort(_('unsupported parser state: %s') % state)
1968 1968
1969 1969 if current_file:
1970 1970 rejects += current_file.close()
1971 1971
1972 1972 if rejects:
1973 1973 return -1
1974 1974 return err
1975 1975
1976 1976 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1977 1977 similarity):
1978 1978 """use <patcher> to apply <patchname> to the working directory.
1979 1979 returns whether patch was applied with fuzz factor."""
1980 1980
1981 1981 fuzz = False
1982 1982 args = []
1983 1983 cwd = repo.root
1984 1984 if cwd:
1985 1985 args.append('-d %s' % util.shellquote(cwd))
1986 1986 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1987 1987 util.shellquote(patchname)))
1988 1988 try:
1989 1989 for line in fp:
1990 1990 line = line.rstrip()
1991 1991 ui.note(line + '\n')
1992 1992 if line.startswith('patching file '):
1993 1993 pf = util.parsepatchoutput(line)
1994 1994 printed_file = False
1995 1995 files.add(pf)
1996 1996 elif line.find('with fuzz') >= 0:
1997 1997 fuzz = True
1998 1998 if not printed_file:
1999 1999 ui.warn(pf + '\n')
2000 2000 printed_file = True
2001 2001 ui.warn(line + '\n')
2002 2002 elif line.find('saving rejects to file') >= 0:
2003 2003 ui.warn(line + '\n')
2004 2004 elif line.find('FAILED') >= 0:
2005 2005 if not printed_file:
2006 2006 ui.warn(pf + '\n')
2007 2007 printed_file = True
2008 2008 ui.warn(line + '\n')
2009 2009 finally:
2010 2010 if files:
2011 2011 scmutil.marktouched(repo, files, similarity)
2012 2012 code = fp.close()
2013 2013 if code:
2014 2014 raise PatchError(_("patch command failed: %s") %
2015 2015 util.explainexit(code)[0])
2016 2016 return fuzz
2017 2017
2018 2018 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2019 2019 eolmode='strict'):
2020 2020 if files is None:
2021 2021 files = set()
2022 2022 if eolmode is None:
2023 2023 eolmode = ui.config('patch', 'eol', 'strict')
2024 2024 if eolmode.lower() not in eolmodes:
2025 2025 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2026 2026 eolmode = eolmode.lower()
2027 2027
2028 2028 store = filestore()
2029 2029 try:
2030 2030 fp = open(patchobj, 'rb')
2031 2031 except TypeError:
2032 2032 fp = patchobj
2033 2033 try:
2034 2034 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2035 2035 eolmode=eolmode)
2036 2036 finally:
2037 2037 if fp != patchobj:
2038 2038 fp.close()
2039 2039 files.update(backend.close())
2040 2040 store.close()
2041 2041 if ret < 0:
2042 2042 raise PatchError(_('patch failed to apply'))
2043 2043 return ret > 0
2044 2044
2045 2045 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2046 2046 eolmode='strict', similarity=0):
2047 2047 """use builtin patch to apply <patchobj> to the working directory.
2048 2048 returns whether patch was applied with fuzz factor."""
2049 2049 backend = workingbackend(ui, repo, similarity)
2050 2050 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2051 2051
2052 2052 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2053 2053 eolmode='strict'):
2054 2054 backend = repobackend(ui, repo, ctx, store)
2055 2055 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2056 2056
2057 2057 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2058 2058 similarity=0):
2059 2059 """Apply <patchname> to the working directory.
2060 2060
2061 2061 'eolmode' specifies how end of lines should be handled. It can be:
2062 2062 - 'strict': inputs are read in binary mode, EOLs are preserved
2063 2063 - 'crlf': EOLs are ignored when patching and reset to CRLF
2064 2064 - 'lf': EOLs are ignored when patching and reset to LF
2065 2065 - None: get it from user settings, default to 'strict'
2066 2066 'eolmode' is ignored when using an external patcher program.
2067 2067
2068 2068 Returns whether patch was applied with fuzz factor.
2069 2069 """
2070 2070 patcher = ui.config('ui', 'patch')
2071 2071 if files is None:
2072 2072 files = set()
2073 2073 if patcher:
2074 2074 return _externalpatch(ui, repo, patcher, patchname, strip,
2075 2075 files, similarity)
2076 2076 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2077 2077 similarity)
2078 2078
2079 2079 def changedfiles(ui, repo, patchpath, strip=1):
2080 2080 backend = fsbackend(ui, repo.root)
2081 2081 fp = open(patchpath, 'rb')
2082 2082 try:
2083 2083 changed = set()
2084 2084 for state, values in iterhunks(fp):
2085 2085 if state == 'file':
2086 2086 afile, bfile, first_hunk, gp = values
2087 2087 if gp:
2088 2088 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2089 2089 if gp.oldpath:
2090 2090 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2091 2091 else:
2092 2092 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2093 2093 '')
2094 2094 changed.add(gp.path)
2095 2095 if gp.op == 'RENAME':
2096 2096 changed.add(gp.oldpath)
2097 2097 elif state not in ('hunk', 'git'):
2098 2098 raise error.Abort(_('unsupported parser state: %s') % state)
2099 2099 return changed
2100 2100 finally:
2101 2101 fp.close()
2102 2102
2103 2103 class GitDiffRequired(Exception):
2104 2104 pass
2105 2105
2106 2106 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2107 2107 '''return diffopts with all features supported and parsed'''
2108 2108 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2109 2109 git=True, whitespace=True, formatchanging=True)
2110 2110
2111 2111 diffopts = diffallopts
2112 2112
2113 2113 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2114 2114 whitespace=False, formatchanging=False):
2115 2115 '''return diffopts with only opted-in features parsed
2116 2116
2117 2117 Features:
2118 2118 - git: git-style diffs
2119 2119 - whitespace: whitespace options like ignoreblanklines and ignorews
2120 2120 - formatchanging: options that will likely break or cause correctness issues
2121 2121 with most diff parsers
2122 2122 '''
2123 2123 def get(key, name=None, getter=ui.configbool, forceplain=None):
2124 2124 if opts:
2125 2125 v = opts.get(key)
2126 2126 if v:
2127 2127 return v
2128 2128 if forceplain is not None and ui.plain():
2129 2129 return forceplain
2130 2130 return getter(section, name or key, None, untrusted=untrusted)
2131 2131
2132 2132 # core options, expected to be understood by every diff parser
2133 2133 buildopts = {
2134 2134 'nodates': get('nodates'),
2135 2135 'showfunc': get('show_function', 'showfunc'),
2136 2136 'context': get('unified', getter=ui.config),
2137 2137 }
2138 2138
2139 2139 if git:
2140 2140 buildopts['git'] = get('git')
2141 2141 if whitespace:
2142 2142 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2143 2143 buildopts['ignorewsamount'] = get('ignore_space_change',
2144 2144 'ignorewsamount')
2145 2145 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2146 2146 'ignoreblanklines')
2147 2147 if formatchanging:
2148 2148 buildopts['text'] = opts and opts.get('text')
2149 2149 buildopts['nobinary'] = get('nobinary')
2150 2150 buildopts['noprefix'] = get('noprefix', forceplain=False)
2151 2151
2152 2152 return mdiff.diffopts(**buildopts)
2153 2153
2154 2154 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2155 2155 losedatafn=None, prefix='', relroot=''):
2156 2156 '''yields diff of changes to files between two nodes, or node and
2157 2157 working directory.
2158 2158
2159 2159 if node1 is None, use first dirstate parent instead.
2160 2160 if node2 is None, compare node1 with working directory.
2161 2161
2162 2162 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2163 2163 every time some change cannot be represented with the current
2164 2164 patch format. Return False to upgrade to git patch format, True to
2165 2165 accept the loss or raise an exception to abort the diff. It is
2166 2166 called with the name of current file being diffed as 'fn'. If set
2167 2167 to None, patches will always be upgraded to git format when
2168 2168 necessary.
2169 2169
2170 2170 prefix is a filename prefix that is prepended to all filenames on
2171 2171 display (used for subrepos).
2172 2172
2173 2173 relroot, if not empty, must be normalized with a trailing /. Any match
2174 2174 patterns that fall outside it will be ignored.'''
2175 2175
2176 2176 if opts is None:
2177 2177 opts = mdiff.defaultopts
2178 2178
2179 2179 if not node1 and not node2:
2180 2180 node1 = repo.dirstate.p1()
2181 2181
2182 2182 def lrugetfilectx():
2183 2183 cache = {}
2184 2184 order = collections.deque()
2185 2185 def getfilectx(f, ctx):
2186 2186 fctx = ctx.filectx(f, filelog=cache.get(f))
2187 2187 if f not in cache:
2188 2188 if len(cache) > 20:
2189 2189 del cache[order.popleft()]
2190 2190 cache[f] = fctx.filelog()
2191 2191 else:
2192 2192 order.remove(f)
2193 2193 order.append(f)
2194 2194 return fctx
2195 2195 return getfilectx
2196 2196 getfilectx = lrugetfilectx()
2197 2197
2198 2198 ctx1 = repo[node1]
2199 2199 ctx2 = repo[node2]
2200 2200
2201 2201 relfiltered = False
2202 2202 if relroot != '' and match.always():
2203 2203 # as a special case, create a new matcher with just the relroot
2204 2204 pats = [relroot]
2205 2205 match = scmutil.match(ctx2, pats, default='path')
2206 2206 relfiltered = True
2207 2207
2208 2208 if not changes:
2209 2209 changes = repo.status(ctx1, ctx2, match=match)
2210 2210 modified, added, removed = changes[:3]
2211 2211
2212 2212 if not modified and not added and not removed:
2213 2213 return []
2214 2214
2215 2215 if repo.ui.debugflag:
2216 2216 hexfunc = hex
2217 2217 else:
2218 2218 hexfunc = short
2219 2219 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2220 2220
2221 2221 copy = {}
2222 2222 if opts.git or opts.upgrade:
2223 2223 copy = copies.pathcopies(ctx1, ctx2, match=match)
2224 2224
2225 2225 if relroot is not None:
2226 2226 if not relfiltered:
2227 2227 # XXX this would ideally be done in the matcher, but that is
2228 2228 # generally meant to 'or' patterns, not 'and' them. In this case we
2229 2229 # need to 'and' all the patterns from the matcher with relroot.
2230 2230 def filterrel(l):
2231 2231 return [f for f in l if f.startswith(relroot)]
2232 2232 modified = filterrel(modified)
2233 2233 added = filterrel(added)
2234 2234 removed = filterrel(removed)
2235 2235 relfiltered = True
2236 2236 # filter out copies where either side isn't inside the relative root
2237 2237 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2238 2238 if dst.startswith(relroot)
2239 2239 and src.startswith(relroot)))
2240 2240
2241 2241 def difffn(opts, losedata):
2242 2242 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2243 2243 copy, getfilectx, opts, losedata, prefix, relroot)
2244 2244 if opts.upgrade and not opts.git:
2245 2245 try:
2246 2246 def losedata(fn):
2247 2247 if not losedatafn or not losedatafn(fn=fn):
2248 2248 raise GitDiffRequired
2249 2249 # Buffer the whole output until we are sure it can be generated
2250 2250 return list(difffn(opts.copy(git=False), losedata))
2251 2251 except GitDiffRequired:
2252 2252 return difffn(opts.copy(git=True), None)
2253 2253 else:
2254 2254 return difffn(opts, None)
2255 2255
2256 2256 def difflabel(func, *args, **kw):
2257 2257 '''yields 2-tuples of (output, label) based on the output of func()'''
2258 2258 headprefixes = [('diff', 'diff.diffline'),
2259 2259 ('copy', 'diff.extended'),
2260 2260 ('rename', 'diff.extended'),
2261 2261 ('old', 'diff.extended'),
2262 2262 ('new', 'diff.extended'),
2263 2263 ('deleted', 'diff.extended'),
2264 2264 ('---', 'diff.file_a'),
2265 2265 ('+++', 'diff.file_b')]
2266 2266 textprefixes = [('@', 'diff.hunk'),
2267 2267 ('-', 'diff.deleted'),
2268 2268 ('+', 'diff.inserted')]
2269 2269 head = False
2270 2270 for chunk in func(*args, **kw):
2271 2271 lines = chunk.split('\n')
2272 2272 for i, line in enumerate(lines):
2273 2273 if i != 0:
2274 2274 yield ('\n', '')
2275 2275 if head:
2276 2276 if line.startswith('@'):
2277 2277 head = False
2278 2278 else:
2279 2279 if line and line[0] not in ' +-@\\':
2280 2280 head = True
2281 2281 stripline = line
2282 2282 diffline = False
2283 2283 if not head and line and line[0] in '+-':
2284 2284 # highlight tabs and trailing whitespace, but only in
2285 2285 # changed lines
2286 2286 stripline = line.rstrip()
2287 2287 diffline = True
2288 2288
2289 2289 prefixes = textprefixes
2290 2290 if head:
2291 2291 prefixes = headprefixes
2292 2292 for prefix, label in prefixes:
2293 2293 if stripline.startswith(prefix):
2294 2294 if diffline:
2295 2295 for token in tabsplitter.findall(stripline):
2296 2296 if '\t' == token[0]:
2297 2297 yield (token, 'diff.tab')
2298 2298 else:
2299 2299 yield (token, label)
2300 2300 else:
2301 2301 yield (stripline, label)
2302 2302 break
2303 2303 else:
2304 2304 yield (line, '')
2305 2305 if line != stripline:
2306 2306 yield (line[len(stripline):], 'diff.trailingwhitespace')
2307 2307
2308 2308 def diffui(*args, **kw):
2309 2309 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2310 2310 return difflabel(diff, *args, **kw)
2311 2311
2312 2312 def _filepairs(ctx1, modified, added, removed, copy, opts):
2313 2313 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2314 2314 before and f2 is the the name after. For added files, f1 will be None,
2315 2315 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2316 2316 or 'rename' (the latter two only if opts.git is set).'''
2317 2317 gone = set()
2318 2318
2319 2319 copyto = dict([(v, k) for k, v in copy.items()])
2320 2320
2321 2321 addedset, removedset = set(added), set(removed)
2322 2322 # Fix up added, since merged-in additions appear as
2323 2323 # modifications during merges
2324 2324 for f in modified:
2325 2325 if f not in ctx1:
2326 2326 addedset.add(f)
2327 2327
2328 2328 for f in sorted(modified + added + removed):
2329 2329 copyop = None
2330 2330 f1, f2 = f, f
2331 2331 if f in addedset:
2332 2332 f1 = None
2333 2333 if f in copy:
2334 2334 if opts.git:
2335 2335 f1 = copy[f]
2336 2336 if f1 in removedset and f1 not in gone:
2337 2337 copyop = 'rename'
2338 2338 gone.add(f1)
2339 2339 else:
2340 2340 copyop = 'copy'
2341 2341 elif f in removedset:
2342 2342 f2 = None
2343 2343 if opts.git:
2344 2344 # have we already reported a copy above?
2345 2345 if (f in copyto and copyto[f] in addedset
2346 2346 and copy[copyto[f]] == f):
2347 2347 continue
2348 2348 yield f1, f2, copyop
2349 2349
2350 2350 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2351 2351 copy, getfilectx, opts, losedatafn, prefix, relroot):
2352 2352 '''given input data, generate a diff and yield it in blocks
2353 2353
2354 2354 If generating a diff would lose data like flags or binary data and
2355 2355 losedatafn is not None, it will be called.
2356 2356
2357 2357 relroot is removed and prefix is added to every path in the diff output.
2358 2358
2359 2359 If relroot is not empty, this function expects every path in modified,
2360 2360 added, removed and copy to start with it.'''
2361 2361
2362 2362 def gitindex(text):
2363 2363 if not text:
2364 2364 text = ""
2365 2365 l = len(text)
2366 2366 s = util.sha1('blob %d\0' % l)
2367 2367 s.update(text)
2368 2368 return s.hexdigest()
2369 2369
2370 2370 if opts.noprefix:
2371 2371 aprefix = bprefix = ''
2372 2372 else:
2373 2373 aprefix = 'a/'
2374 2374 bprefix = 'b/'
2375 2375
2376 2376 def diffline(f, revs):
2377 2377 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2378 2378 return 'diff %s %s' % (revinfo, f)
2379 2379
2380 2380 date1 = util.datestr(ctx1.date())
2381 2381 date2 = util.datestr(ctx2.date())
2382 2382
2383 2383 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2384 2384
2385 2385 if relroot != '' and (repo.ui.configbool('devel', 'all')
2386 2386 or repo.ui.configbool('devel', 'check-relroot')):
2387 2387 for f in modified + added + removed + copy.keys() + copy.values():
2388 2388 if f is not None and not f.startswith(relroot):
2389 2389 raise AssertionError(
2390 2390 "file %s doesn't start with relroot %s" % (f, relroot))
2391 2391
2392 2392 for f1, f2, copyop in _filepairs(
2393 2393 ctx1, modified, added, removed, copy, opts):
2394 2394 content1 = None
2395 2395 content2 = None
2396 2396 flag1 = None
2397 2397 flag2 = None
2398 2398 if f1:
2399 2399 content1 = getfilectx(f1, ctx1).data()
2400 2400 if opts.git or losedatafn:
2401 2401 flag1 = ctx1.flags(f1)
2402 2402 if f2:
2403 2403 content2 = getfilectx(f2, ctx2).data()
2404 2404 if opts.git or losedatafn:
2405 2405 flag2 = ctx2.flags(f2)
2406 2406 binary = False
2407 2407 if opts.git or losedatafn:
2408 2408 binary = util.binary(content1) or util.binary(content2)
2409 2409
2410 2410 if losedatafn and not opts.git:
2411 2411 if (binary or
2412 2412 # copy/rename
2413 2413 f2 in copy or
2414 2414 # empty file creation
2415 2415 (not f1 and not content2) or
2416 2416 # empty file deletion
2417 2417 (not content1 and not f2) or
2418 2418 # create with flags
2419 2419 (not f1 and flag2) or
2420 2420 # change flags
2421 2421 (f1 and f2 and flag1 != flag2)):
2422 2422 losedatafn(f2 or f1)
2423 2423
2424 2424 path1 = f1 or f2
2425 2425 path2 = f2 or f1
2426 2426 path1 = posixpath.join(prefix, path1[len(relroot):])
2427 2427 path2 = posixpath.join(prefix, path2[len(relroot):])
2428 2428 header = []
2429 2429 if opts.git:
2430 2430 header.append('diff --git %s%s %s%s' %
2431 2431 (aprefix, path1, bprefix, path2))
2432 2432 if not f1: # added
2433 2433 header.append('new file mode %s' % gitmode[flag2])
2434 2434 elif not f2: # removed
2435 2435 header.append('deleted file mode %s' % gitmode[flag1])
2436 2436 else: # modified/copied/renamed
2437 2437 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2438 2438 if mode1 != mode2:
2439 2439 header.append('old mode %s' % mode1)
2440 2440 header.append('new mode %s' % mode2)
2441 2441 if copyop is not None:
2442 2442 header.append('%s from %s' % (copyop, path1))
2443 2443 header.append('%s to %s' % (copyop, path2))
2444 2444 elif revs and not repo.ui.quiet:
2445 2445 header.append(diffline(path1, revs))
2446 2446
2447 2447 if binary and opts.git and not opts.nobinary:
2448 2448 text = mdiff.b85diff(content1, content2)
2449 2449 if text:
2450 2450 header.append('index %s..%s' %
2451 2451 (gitindex(content1), gitindex(content2)))
2452 2452 else:
2453 2453 text = mdiff.unidiff(content1, date1,
2454 2454 content2, date2,
2455 2455 path1, path2, opts=opts)
2456 2456 if header and (text or len(header) > 1):
2457 2457 yield '\n'.join(header) + '\n'
2458 2458 if text:
2459 2459 yield text
2460 2460
2461 2461 def diffstatsum(stats):
2462 2462 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2463 2463 for f, a, r, b in stats:
2464 2464 maxfile = max(maxfile, encoding.colwidth(f))
2465 2465 maxtotal = max(maxtotal, a + r)
2466 2466 addtotal += a
2467 2467 removetotal += r
2468 2468 binary = binary or b
2469 2469
2470 2470 return maxfile, maxtotal, addtotal, removetotal, binary
2471 2471
2472 2472 def diffstatdata(lines):
2473 2473 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2474 2474
2475 2475 results = []
2476 2476 filename, adds, removes, isbinary = None, 0, 0, False
2477 2477
2478 2478 def addresult():
2479 2479 if filename:
2480 2480 results.append((filename, adds, removes, isbinary))
2481 2481
2482 2482 for line in lines:
2483 2483 if line.startswith('diff'):
2484 2484 addresult()
2485 2485 # set numbers to 0 anyway when starting new file
2486 2486 adds, removes, isbinary = 0, 0, False
2487 2487 if line.startswith('diff --git a/'):
2488 2488 filename = gitre.search(line).group(2)
2489 2489 elif line.startswith('diff -r'):
2490 2490 # format: "diff -r ... -r ... filename"
2491 2491 filename = diffre.search(line).group(1)
2492 2492 elif line.startswith('+') and not line.startswith('+++ '):
2493 2493 adds += 1
2494 2494 elif line.startswith('-') and not line.startswith('--- '):
2495 2495 removes += 1
2496 2496 elif (line.startswith('GIT binary patch') or
2497 2497 line.startswith('Binary file')):
2498 2498 isbinary = True
2499 2499 addresult()
2500 2500 return results
2501 2501
2502 2502 def diffstat(lines, width=80, git=False):
2503 2503 output = []
2504 2504 stats = diffstatdata(lines)
2505 2505 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2506 2506
2507 2507 countwidth = len(str(maxtotal))
2508 2508 if hasbinary and countwidth < 3:
2509 2509 countwidth = 3
2510 2510 graphwidth = width - countwidth - maxname - 6
2511 2511 if graphwidth < 10:
2512 2512 graphwidth = 10
2513 2513
2514 2514 def scale(i):
2515 2515 if maxtotal <= graphwidth:
2516 2516 return i
2517 2517 # If diffstat runs out of room it doesn't print anything,
2518 2518 # which isn't very useful, so always print at least one + or -
2519 2519 # if there were at least some changes.
2520 2520 return max(i * graphwidth // maxtotal, int(bool(i)))
2521 2521
2522 2522 for filename, adds, removes, isbinary in stats:
2523 2523 if isbinary:
2524 2524 count = 'Bin'
2525 2525 else:
2526 2526 count = adds + removes
2527 2527 pluses = '+' * scale(adds)
2528 2528 minuses = '-' * scale(removes)
2529 2529 output.append(' %s%s | %*s %s%s\n' %
2530 2530 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2531 2531 countwidth, count, pluses, minuses))
2532 2532
2533 2533 if stats:
2534 2534 output.append(_(' %d files changed, %d insertions(+), '
2535 2535 '%d deletions(-)\n')
2536 2536 % (len(stats), totaladds, totalremoves))
2537 2537
2538 2538 return ''.join(output)
2539 2539
2540 2540 def diffstatui(*args, **kw):
2541 2541 '''like diffstat(), but yields 2-tuples of (output, label) for
2542 2542 ui.write()
2543 2543 '''
2544 2544
2545 2545 for line in diffstat(*args, **kw).splitlines():
2546 2546 if line and line[-1] in '+-':
2547 2547 name, graph = line.rsplit(' ', 1)
2548 2548 yield (name + ' ', '')
2549 2549 m = re.search(r'\++', graph)
2550 2550 if m:
2551 2551 yield (m.group(0), 'diffstat.inserted')
2552 2552 m = re.search(r'-+', graph)
2553 2553 if m:
2554 2554 yield (m.group(0), 'diffstat.deleted')
2555 2555 else:
2556 2556 yield (line, '')
2557 2557 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now