##// END OF EJS Templates
templater: add simple interface for unnamed template (API)...
Yuya Nishihara -
r32873:2ecce24d default
parent child Browse files
Show More
@@ -1,3591 +1,3591 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import itertools
12 12 import os
13 13 import re
14 14 import tempfile
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 short,
22 22 )
23 23
24 24 from . import (
25 25 bookmarks,
26 26 changelog,
27 27 copies,
28 28 crecord as crecordmod,
29 29 encoding,
30 30 error,
31 31 formatter,
32 32 graphmod,
33 33 lock as lockmod,
34 34 match as matchmod,
35 35 obsolete,
36 36 patch,
37 37 pathutil,
38 38 phases,
39 39 pycompat,
40 40 registrar,
41 41 repair,
42 42 revlog,
43 43 revset,
44 44 scmutil,
45 45 smartset,
46 46 templatekw,
47 47 templater,
48 48 util,
49 49 vfs as vfsmod,
50 50 )
51 51 stringio = util.stringio
52 52
53 53 # templates of common command options
54 54
55 55 dryrunopts = [
56 56 ('n', 'dry-run', None,
57 57 _('do not perform actions, just print output')),
58 58 ]
59 59
60 60 remoteopts = [
61 61 ('e', 'ssh', '',
62 62 _('specify ssh command to use'), _('CMD')),
63 63 ('', 'remotecmd', '',
64 64 _('specify hg command to run on the remote side'), _('CMD')),
65 65 ('', 'insecure', None,
66 66 _('do not verify server certificate (ignoring web.cacerts config)')),
67 67 ]
68 68
69 69 walkopts = [
70 70 ('I', 'include', [],
71 71 _('include names matching the given patterns'), _('PATTERN')),
72 72 ('X', 'exclude', [],
73 73 _('exclude names matching the given patterns'), _('PATTERN')),
74 74 ]
75 75
76 76 commitopts = [
77 77 ('m', 'message', '',
78 78 _('use text as commit message'), _('TEXT')),
79 79 ('l', 'logfile', '',
80 80 _('read commit message from file'), _('FILE')),
81 81 ]
82 82
83 83 commitopts2 = [
84 84 ('d', 'date', '',
85 85 _('record the specified date as commit date'), _('DATE')),
86 86 ('u', 'user', '',
87 87 _('record the specified user as committer'), _('USER')),
88 88 ]
89 89
90 90 # hidden for now
91 91 formatteropts = [
92 92 ('T', 'template', '',
93 93 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
94 94 ]
95 95
96 96 templateopts = [
97 97 ('', 'style', '',
98 98 _('display using template map file (DEPRECATED)'), _('STYLE')),
99 99 ('T', 'template', '',
100 100 _('display with template'), _('TEMPLATE')),
101 101 ]
102 102
103 103 logopts = [
104 104 ('p', 'patch', None, _('show patch')),
105 105 ('g', 'git', None, _('use git extended diff format')),
106 106 ('l', 'limit', '',
107 107 _('limit number of changes displayed'), _('NUM')),
108 108 ('M', 'no-merges', None, _('do not show merges')),
109 109 ('', 'stat', None, _('output diffstat-style summary of changes')),
110 110 ('G', 'graph', None, _("show the revision DAG")),
111 111 ] + templateopts
112 112
113 113 diffopts = [
114 114 ('a', 'text', None, _('treat all files as text')),
115 115 ('g', 'git', None, _('use git extended diff format')),
116 116 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
117 117 ('', 'nodates', None, _('omit dates from diff headers'))
118 118 ]
119 119
120 120 diffwsopts = [
121 121 ('w', 'ignore-all-space', None,
122 122 _('ignore white space when comparing lines')),
123 123 ('b', 'ignore-space-change', None,
124 124 _('ignore changes in the amount of white space')),
125 125 ('B', 'ignore-blank-lines', None,
126 126 _('ignore changes whose lines are all blank')),
127 127 ]
128 128
129 129 diffopts2 = [
130 130 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
131 131 ('p', 'show-function', None, _('show which function each change is in')),
132 132 ('', 'reverse', None, _('produce a diff that undoes the changes')),
133 133 ] + diffwsopts + [
134 134 ('U', 'unified', '',
135 135 _('number of lines of context to show'), _('NUM')),
136 136 ('', 'stat', None, _('output diffstat-style summary of changes')),
137 137 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
138 138 ]
139 139
140 140 mergetoolopts = [
141 141 ('t', 'tool', '', _('specify merge tool')),
142 142 ]
143 143
144 144 similarityopts = [
145 145 ('s', 'similarity', '',
146 146 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
147 147 ]
148 148
149 149 subrepoopts = [
150 150 ('S', 'subrepos', None,
151 151 _('recurse into subrepositories'))
152 152 ]
153 153
154 154 debugrevlogopts = [
155 155 ('c', 'changelog', False, _('open changelog')),
156 156 ('m', 'manifest', False, _('open manifest')),
157 157 ('', 'dir', '', _('open directory manifest')),
158 158 ]
159 159
160 160 # special string such that everything below this line will be ingored in the
161 161 # editor text
162 162 _linebelow = "^HG: ------------------------ >8 ------------------------$"
163 163
164 164 def ishunk(x):
165 165 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
166 166 return isinstance(x, hunkclasses)
167 167
168 168 def newandmodified(chunks, originalchunks):
169 169 newlyaddedandmodifiedfiles = set()
170 170 for chunk in chunks:
171 171 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
172 172 originalchunks:
173 173 newlyaddedandmodifiedfiles.add(chunk.header.filename())
174 174 return newlyaddedandmodifiedfiles
175 175
176 176 def parsealiases(cmd):
177 177 return cmd.lstrip("^").split("|")
178 178
179 179 def setupwrapcolorwrite(ui):
180 180 # wrap ui.write so diff output can be labeled/colorized
181 181 def wrapwrite(orig, *args, **kw):
182 182 label = kw.pop('label', '')
183 183 for chunk, l in patch.difflabel(lambda: args):
184 184 orig(chunk, label=label + l)
185 185
186 186 oldwrite = ui.write
187 187 def wrap(*args, **kwargs):
188 188 return wrapwrite(oldwrite, *args, **kwargs)
189 189 setattr(ui, 'write', wrap)
190 190 return oldwrite
191 191
192 192 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
193 193 if usecurses:
194 194 if testfile:
195 195 recordfn = crecordmod.testdecorator(testfile,
196 196 crecordmod.testchunkselector)
197 197 else:
198 198 recordfn = crecordmod.chunkselector
199 199
200 200 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
201 201
202 202 else:
203 203 return patch.filterpatch(ui, originalhunks, operation)
204 204
205 205 def recordfilter(ui, originalhunks, operation=None):
206 206 """ Prompts the user to filter the originalhunks and return a list of
207 207 selected hunks.
208 208 *operation* is used for to build ui messages to indicate the user what
209 209 kind of filtering they are doing: reverting, committing, shelving, etc.
210 210 (see patch.filterpatch).
211 211 """
212 212 usecurses = crecordmod.checkcurses(ui)
213 213 testfile = ui.config('experimental', 'crecordtest', None)
214 214 oldwrite = setupwrapcolorwrite(ui)
215 215 try:
216 216 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
217 217 testfile, operation)
218 218 finally:
219 219 ui.write = oldwrite
220 220 return newchunks, newopts
221 221
222 222 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
223 223 filterfn, *pats, **opts):
224 224 from . import merge as mergemod
225 225 opts = pycompat.byteskwargs(opts)
226 226 if not ui.interactive():
227 227 if cmdsuggest:
228 228 msg = _('running non-interactively, use %s instead') % cmdsuggest
229 229 else:
230 230 msg = _('running non-interactively')
231 231 raise error.Abort(msg)
232 232
233 233 # make sure username is set before going interactive
234 234 if not opts.get('user'):
235 235 ui.username() # raise exception, username not provided
236 236
237 237 def recordfunc(ui, repo, message, match, opts):
238 238 """This is generic record driver.
239 239
240 240 Its job is to interactively filter local changes, and
241 241 accordingly prepare working directory into a state in which the
242 242 job can be delegated to a non-interactive commit command such as
243 243 'commit' or 'qrefresh'.
244 244
245 245 After the actual job is done by non-interactive command, the
246 246 working directory is restored to its original state.
247 247
248 248 In the end we'll record interesting changes, and everything else
249 249 will be left in place, so the user can continue working.
250 250 """
251 251
252 252 checkunfinished(repo, commit=True)
253 253 wctx = repo[None]
254 254 merge = len(wctx.parents()) > 1
255 255 if merge:
256 256 raise error.Abort(_('cannot partially commit a merge '
257 257 '(use "hg commit" instead)'))
258 258
259 259 def fail(f, msg):
260 260 raise error.Abort('%s: %s' % (f, msg))
261 261
262 262 force = opts.get('force')
263 263 if not force:
264 264 vdirs = []
265 265 match.explicitdir = vdirs.append
266 266 match.bad = fail
267 267
268 268 status = repo.status(match=match)
269 269 if not force:
270 270 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
271 271 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
272 272 diffopts.nodates = True
273 273 diffopts.git = True
274 274 diffopts.showfunc = True
275 275 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
276 276 originalchunks = patch.parsepatch(originaldiff)
277 277
278 278 # 1. filter patch, since we are intending to apply subset of it
279 279 try:
280 280 chunks, newopts = filterfn(ui, originalchunks)
281 281 except patch.PatchError as err:
282 282 raise error.Abort(_('error parsing patch: %s') % err)
283 283 opts.update(newopts)
284 284
285 285 # We need to keep a backup of files that have been newly added and
286 286 # modified during the recording process because there is a previous
287 287 # version without the edit in the workdir
288 288 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
289 289 contenders = set()
290 290 for h in chunks:
291 291 try:
292 292 contenders.update(set(h.files()))
293 293 except AttributeError:
294 294 pass
295 295
296 296 changed = status.modified + status.added + status.removed
297 297 newfiles = [f for f in changed if f in contenders]
298 298 if not newfiles:
299 299 ui.status(_('no changes to record\n'))
300 300 return 0
301 301
302 302 modified = set(status.modified)
303 303
304 304 # 2. backup changed files, so we can restore them in the end
305 305
306 306 if backupall:
307 307 tobackup = changed
308 308 else:
309 309 tobackup = [f for f in newfiles if f in modified or f in \
310 310 newlyaddedandmodifiedfiles]
311 311 backups = {}
312 312 if tobackup:
313 313 backupdir = repo.vfs.join('record-backups')
314 314 try:
315 315 os.mkdir(backupdir)
316 316 except OSError as err:
317 317 if err.errno != errno.EEXIST:
318 318 raise
319 319 try:
320 320 # backup continues
321 321 for f in tobackup:
322 322 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
323 323 dir=backupdir)
324 324 os.close(fd)
325 325 ui.debug('backup %r as %r\n' % (f, tmpname))
326 326 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
327 327 backups[f] = tmpname
328 328
329 329 fp = stringio()
330 330 for c in chunks:
331 331 fname = c.filename()
332 332 if fname in backups:
333 333 c.write(fp)
334 334 dopatch = fp.tell()
335 335 fp.seek(0)
336 336
337 337 # 2.5 optionally review / modify patch in text editor
338 338 if opts.get('review', False):
339 339 patchtext = (crecordmod.diffhelptext
340 340 + crecordmod.patchhelptext
341 341 + fp.read())
342 342 reviewedpatch = ui.edit(patchtext, "",
343 343 extra={"suffix": ".diff"},
344 344 repopath=repo.path)
345 345 fp.truncate(0)
346 346 fp.write(reviewedpatch)
347 347 fp.seek(0)
348 348
349 349 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
350 350 # 3a. apply filtered patch to clean repo (clean)
351 351 if backups:
352 352 # Equivalent to hg.revert
353 353 m = scmutil.matchfiles(repo, backups.keys())
354 354 mergemod.update(repo, repo.dirstate.p1(),
355 355 False, True, matcher=m)
356 356
357 357 # 3b. (apply)
358 358 if dopatch:
359 359 try:
360 360 ui.debug('applying patch\n')
361 361 ui.debug(fp.getvalue())
362 362 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
363 363 except patch.PatchError as err:
364 364 raise error.Abort(str(err))
365 365 del fp
366 366
367 367 # 4. We prepared working directory according to filtered
368 368 # patch. Now is the time to delegate the job to
369 369 # commit/qrefresh or the like!
370 370
371 371 # Make all of the pathnames absolute.
372 372 newfiles = [repo.wjoin(nf) for nf in newfiles]
373 373 return commitfunc(ui, repo, *newfiles, **opts)
374 374 finally:
375 375 # 5. finally restore backed-up files
376 376 try:
377 377 dirstate = repo.dirstate
378 378 for realname, tmpname in backups.iteritems():
379 379 ui.debug('restoring %r to %r\n' % (tmpname, realname))
380 380
381 381 if dirstate[realname] == 'n':
382 382 # without normallookup, restoring timestamp
383 383 # may cause partially committed files
384 384 # to be treated as unmodified
385 385 dirstate.normallookup(realname)
386 386
387 387 # copystat=True here and above are a hack to trick any
388 388 # editors that have f open that we haven't modified them.
389 389 #
390 390 # Also note that this racy as an editor could notice the
391 391 # file's mtime before we've finished writing it.
392 392 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
393 393 os.unlink(tmpname)
394 394 if tobackup:
395 395 os.rmdir(backupdir)
396 396 except OSError:
397 397 pass
398 398
399 399 def recordinwlock(ui, repo, message, match, opts):
400 400 with repo.wlock():
401 401 return recordfunc(ui, repo, message, match, opts)
402 402
403 403 return commit(ui, repo, recordinwlock, pats, opts)
404 404
405 405 def findpossible(cmd, table, strict=False):
406 406 """
407 407 Return cmd -> (aliases, command table entry)
408 408 for each matching command.
409 409 Return debug commands (or their aliases) only if no normal command matches.
410 410 """
411 411 choice = {}
412 412 debugchoice = {}
413 413
414 414 if cmd in table:
415 415 # short-circuit exact matches, "log" alias beats "^log|history"
416 416 keys = [cmd]
417 417 else:
418 418 keys = table.keys()
419 419
420 420 allcmds = []
421 421 for e in keys:
422 422 aliases = parsealiases(e)
423 423 allcmds.extend(aliases)
424 424 found = None
425 425 if cmd in aliases:
426 426 found = cmd
427 427 elif not strict:
428 428 for a in aliases:
429 429 if a.startswith(cmd):
430 430 found = a
431 431 break
432 432 if found is not None:
433 433 if aliases[0].startswith("debug") or found.startswith("debug"):
434 434 debugchoice[found] = (aliases, table[e])
435 435 else:
436 436 choice[found] = (aliases, table[e])
437 437
438 438 if not choice and debugchoice:
439 439 choice = debugchoice
440 440
441 441 return choice, allcmds
442 442
443 443 def findcmd(cmd, table, strict=True):
444 444 """Return (aliases, command table entry) for command string."""
445 445 choice, allcmds = findpossible(cmd, table, strict)
446 446
447 447 if cmd in choice:
448 448 return choice[cmd]
449 449
450 450 if len(choice) > 1:
451 451 clist = sorted(choice)
452 452 raise error.AmbiguousCommand(cmd, clist)
453 453
454 454 if choice:
455 455 return list(choice.values())[0]
456 456
457 457 raise error.UnknownCommand(cmd, allcmds)
458 458
459 459 def findrepo(p):
460 460 while not os.path.isdir(os.path.join(p, ".hg")):
461 461 oldp, p = p, os.path.dirname(p)
462 462 if p == oldp:
463 463 return None
464 464
465 465 return p
466 466
467 467 def bailifchanged(repo, merge=True, hint=None):
468 468 """ enforce the precondition that working directory must be clean.
469 469
470 470 'merge' can be set to false if a pending uncommitted merge should be
471 471 ignored (such as when 'update --check' runs).
472 472
473 473 'hint' is the usual hint given to Abort exception.
474 474 """
475 475
476 476 if merge and repo.dirstate.p2() != nullid:
477 477 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
478 478 modified, added, removed, deleted = repo.status()[:4]
479 479 if modified or added or removed or deleted:
480 480 raise error.Abort(_('uncommitted changes'), hint=hint)
481 481 ctx = repo[None]
482 482 for s in sorted(ctx.substate):
483 483 ctx.sub(s).bailifchanged(hint=hint)
484 484
485 485 def logmessage(ui, opts):
486 486 """ get the log message according to -m and -l option """
487 487 message = opts.get('message')
488 488 logfile = opts.get('logfile')
489 489
490 490 if message and logfile:
491 491 raise error.Abort(_('options --message and --logfile are mutually '
492 492 'exclusive'))
493 493 if not message and logfile:
494 494 try:
495 495 if isstdiofilename(logfile):
496 496 message = ui.fin.read()
497 497 else:
498 498 message = '\n'.join(util.readfile(logfile).splitlines())
499 499 except IOError as inst:
500 500 raise error.Abort(_("can't read commit message '%s': %s") %
501 501 (logfile, inst.strerror))
502 502 return message
503 503
504 504 def mergeeditform(ctxorbool, baseformname):
505 505 """return appropriate editform name (referencing a committemplate)
506 506
507 507 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
508 508 merging is committed.
509 509
510 510 This returns baseformname with '.merge' appended if it is a merge,
511 511 otherwise '.normal' is appended.
512 512 """
513 513 if isinstance(ctxorbool, bool):
514 514 if ctxorbool:
515 515 return baseformname + ".merge"
516 516 elif 1 < len(ctxorbool.parents()):
517 517 return baseformname + ".merge"
518 518
519 519 return baseformname + ".normal"
520 520
521 521 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
522 522 editform='', **opts):
523 523 """get appropriate commit message editor according to '--edit' option
524 524
525 525 'finishdesc' is a function to be called with edited commit message
526 526 (= 'description' of the new changeset) just after editing, but
527 527 before checking empty-ness. It should return actual text to be
528 528 stored into history. This allows to change description before
529 529 storing.
530 530
531 531 'extramsg' is a extra message to be shown in the editor instead of
532 532 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
533 533 is automatically added.
534 534
535 535 'editform' is a dot-separated list of names, to distinguish
536 536 the purpose of commit text editing.
537 537
538 538 'getcommiteditor' returns 'commitforceeditor' regardless of
539 539 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
540 540 they are specific for usage in MQ.
541 541 """
542 542 if edit or finishdesc or extramsg:
543 543 return lambda r, c, s: commitforceeditor(r, c, s,
544 544 finishdesc=finishdesc,
545 545 extramsg=extramsg,
546 546 editform=editform)
547 547 elif editform:
548 548 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
549 549 else:
550 550 return commiteditor
551 551
552 552 def loglimit(opts):
553 553 """get the log limit according to option -l/--limit"""
554 554 limit = opts.get('limit')
555 555 if limit:
556 556 try:
557 557 limit = int(limit)
558 558 except ValueError:
559 559 raise error.Abort(_('limit must be a positive integer'))
560 560 if limit <= 0:
561 561 raise error.Abort(_('limit must be positive'))
562 562 else:
563 563 limit = None
564 564 return limit
565 565
566 566 def makefilename(repo, pat, node, desc=None,
567 567 total=None, seqno=None, revwidth=None, pathname=None):
568 568 node_expander = {
569 569 'H': lambda: hex(node),
570 570 'R': lambda: str(repo.changelog.rev(node)),
571 571 'h': lambda: short(node),
572 572 'm': lambda: re.sub('[^\w]', '_', str(desc))
573 573 }
574 574 expander = {
575 575 '%': lambda: '%',
576 576 'b': lambda: os.path.basename(repo.root),
577 577 }
578 578
579 579 try:
580 580 if node:
581 581 expander.update(node_expander)
582 582 if node:
583 583 expander['r'] = (lambda:
584 584 str(repo.changelog.rev(node)).zfill(revwidth or 0))
585 585 if total is not None:
586 586 expander['N'] = lambda: str(total)
587 587 if seqno is not None:
588 588 expander['n'] = lambda: str(seqno)
589 589 if total is not None and seqno is not None:
590 590 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
591 591 if pathname is not None:
592 592 expander['s'] = lambda: os.path.basename(pathname)
593 593 expander['d'] = lambda: os.path.dirname(pathname) or '.'
594 594 expander['p'] = lambda: pathname
595 595
596 596 newname = []
597 597 patlen = len(pat)
598 598 i = 0
599 599 while i < patlen:
600 600 c = pat[i:i + 1]
601 601 if c == '%':
602 602 i += 1
603 603 c = pat[i:i + 1]
604 604 c = expander[c]()
605 605 newname.append(c)
606 606 i += 1
607 607 return ''.join(newname)
608 608 except KeyError as inst:
609 609 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
610 610 inst.args[0])
611 611
612 612 def isstdiofilename(pat):
613 613 """True if the given pat looks like a filename denoting stdin/stdout"""
614 614 return not pat or pat == '-'
615 615
616 616 class _unclosablefile(object):
617 617 def __init__(self, fp):
618 618 self._fp = fp
619 619
620 620 def close(self):
621 621 pass
622 622
623 623 def __iter__(self):
624 624 return iter(self._fp)
625 625
626 626 def __getattr__(self, attr):
627 627 return getattr(self._fp, attr)
628 628
629 629 def __enter__(self):
630 630 return self
631 631
632 632 def __exit__(self, exc_type, exc_value, exc_tb):
633 633 pass
634 634
635 635 def makefileobj(repo, pat, node=None, desc=None, total=None,
636 636 seqno=None, revwidth=None, mode='wb', modemap=None,
637 637 pathname=None):
638 638
639 639 writable = mode not in ('r', 'rb')
640 640
641 641 if isstdiofilename(pat):
642 642 if writable:
643 643 fp = repo.ui.fout
644 644 else:
645 645 fp = repo.ui.fin
646 646 return _unclosablefile(fp)
647 647 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
648 648 if modemap is not None:
649 649 mode = modemap.get(fn, mode)
650 650 if mode == 'wb':
651 651 modemap[fn] = 'ab'
652 652 return open(fn, mode)
653 653
654 654 def openrevlog(repo, cmd, file_, opts):
655 655 """opens the changelog, manifest, a filelog or a given revlog"""
656 656 cl = opts['changelog']
657 657 mf = opts['manifest']
658 658 dir = opts['dir']
659 659 msg = None
660 660 if cl and mf:
661 661 msg = _('cannot specify --changelog and --manifest at the same time')
662 662 elif cl and dir:
663 663 msg = _('cannot specify --changelog and --dir at the same time')
664 664 elif cl or mf or dir:
665 665 if file_:
666 666 msg = _('cannot specify filename with --changelog or --manifest')
667 667 elif not repo:
668 668 msg = _('cannot specify --changelog or --manifest or --dir '
669 669 'without a repository')
670 670 if msg:
671 671 raise error.Abort(msg)
672 672
673 673 r = None
674 674 if repo:
675 675 if cl:
676 676 r = repo.unfiltered().changelog
677 677 elif dir:
678 678 if 'treemanifest' not in repo.requirements:
679 679 raise error.Abort(_("--dir can only be used on repos with "
680 680 "treemanifest enabled"))
681 681 dirlog = repo.manifestlog._revlog.dirlog(dir)
682 682 if len(dirlog):
683 683 r = dirlog
684 684 elif mf:
685 685 r = repo.manifestlog._revlog
686 686 elif file_:
687 687 filelog = repo.file(file_)
688 688 if len(filelog):
689 689 r = filelog
690 690 if not r:
691 691 if not file_:
692 692 raise error.CommandError(cmd, _('invalid arguments'))
693 693 if not os.path.isfile(file_):
694 694 raise error.Abort(_("revlog '%s' not found") % file_)
695 695 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
696 696 file_[:-2] + ".i")
697 697 return r
698 698
699 699 def copy(ui, repo, pats, opts, rename=False):
700 700 # called with the repo lock held
701 701 #
702 702 # hgsep => pathname that uses "/" to separate directories
703 703 # ossep => pathname that uses os.sep to separate directories
704 704 cwd = repo.getcwd()
705 705 targets = {}
706 706 after = opts.get("after")
707 707 dryrun = opts.get("dry_run")
708 708 wctx = repo[None]
709 709
710 710 def walkpat(pat):
711 711 srcs = []
712 712 if after:
713 713 badstates = '?'
714 714 else:
715 715 badstates = '?r'
716 716 m = scmutil.match(wctx, [pat], opts, globbed=True)
717 717 for abs in wctx.walk(m):
718 718 state = repo.dirstate[abs]
719 719 rel = m.rel(abs)
720 720 exact = m.exact(abs)
721 721 if state in badstates:
722 722 if exact and state == '?':
723 723 ui.warn(_('%s: not copying - file is not managed\n') % rel)
724 724 if exact and state == 'r':
725 725 ui.warn(_('%s: not copying - file has been marked for'
726 726 ' remove\n') % rel)
727 727 continue
728 728 # abs: hgsep
729 729 # rel: ossep
730 730 srcs.append((abs, rel, exact))
731 731 return srcs
732 732
733 733 # abssrc: hgsep
734 734 # relsrc: ossep
735 735 # otarget: ossep
736 736 def copyfile(abssrc, relsrc, otarget, exact):
737 737 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
738 738 if '/' in abstarget:
739 739 # We cannot normalize abstarget itself, this would prevent
740 740 # case only renames, like a => A.
741 741 abspath, absname = abstarget.rsplit('/', 1)
742 742 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
743 743 reltarget = repo.pathto(abstarget, cwd)
744 744 target = repo.wjoin(abstarget)
745 745 src = repo.wjoin(abssrc)
746 746 state = repo.dirstate[abstarget]
747 747
748 748 scmutil.checkportable(ui, abstarget)
749 749
750 750 # check for collisions
751 751 prevsrc = targets.get(abstarget)
752 752 if prevsrc is not None:
753 753 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
754 754 (reltarget, repo.pathto(abssrc, cwd),
755 755 repo.pathto(prevsrc, cwd)))
756 756 return
757 757
758 758 # check for overwrites
759 759 exists = os.path.lexists(target)
760 760 samefile = False
761 761 if exists and abssrc != abstarget:
762 762 if (repo.dirstate.normalize(abssrc) ==
763 763 repo.dirstate.normalize(abstarget)):
764 764 if not rename:
765 765 ui.warn(_("%s: can't copy - same file\n") % reltarget)
766 766 return
767 767 exists = False
768 768 samefile = True
769 769
770 770 if not after and exists or after and state in 'mn':
771 771 if not opts['force']:
772 772 if state in 'mn':
773 773 msg = _('%s: not overwriting - file already committed\n')
774 774 if after:
775 775 flags = '--after --force'
776 776 else:
777 777 flags = '--force'
778 778 if rename:
779 779 hint = _('(hg rename %s to replace the file by '
780 780 'recording a rename)\n') % flags
781 781 else:
782 782 hint = _('(hg copy %s to replace the file by '
783 783 'recording a copy)\n') % flags
784 784 else:
785 785 msg = _('%s: not overwriting - file exists\n')
786 786 if rename:
787 787 hint = _('(hg rename --after to record the rename)\n')
788 788 else:
789 789 hint = _('(hg copy --after to record the copy)\n')
790 790 ui.warn(msg % reltarget)
791 791 ui.warn(hint)
792 792 return
793 793
794 794 if after:
795 795 if not exists:
796 796 if rename:
797 797 ui.warn(_('%s: not recording move - %s does not exist\n') %
798 798 (relsrc, reltarget))
799 799 else:
800 800 ui.warn(_('%s: not recording copy - %s does not exist\n') %
801 801 (relsrc, reltarget))
802 802 return
803 803 elif not dryrun:
804 804 try:
805 805 if exists:
806 806 os.unlink(target)
807 807 targetdir = os.path.dirname(target) or '.'
808 808 if not os.path.isdir(targetdir):
809 809 os.makedirs(targetdir)
810 810 if samefile:
811 811 tmp = target + "~hgrename"
812 812 os.rename(src, tmp)
813 813 os.rename(tmp, target)
814 814 else:
815 815 util.copyfile(src, target)
816 816 srcexists = True
817 817 except IOError as inst:
818 818 if inst.errno == errno.ENOENT:
819 819 ui.warn(_('%s: deleted in working directory\n') % relsrc)
820 820 srcexists = False
821 821 else:
822 822 ui.warn(_('%s: cannot copy - %s\n') %
823 823 (relsrc, inst.strerror))
824 824 return True # report a failure
825 825
826 826 if ui.verbose or not exact:
827 827 if rename:
828 828 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
829 829 else:
830 830 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
831 831
832 832 targets[abstarget] = abssrc
833 833
834 834 # fix up dirstate
835 835 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
836 836 dryrun=dryrun, cwd=cwd)
837 837 if rename and not dryrun:
838 838 if not after and srcexists and not samefile:
839 839 repo.wvfs.unlinkpath(abssrc)
840 840 wctx.forget([abssrc])
841 841
842 842 # pat: ossep
843 843 # dest ossep
844 844 # srcs: list of (hgsep, hgsep, ossep, bool)
845 845 # return: function that takes hgsep and returns ossep
846 846 def targetpathfn(pat, dest, srcs):
847 847 if os.path.isdir(pat):
848 848 abspfx = pathutil.canonpath(repo.root, cwd, pat)
849 849 abspfx = util.localpath(abspfx)
850 850 if destdirexists:
851 851 striplen = len(os.path.split(abspfx)[0])
852 852 else:
853 853 striplen = len(abspfx)
854 854 if striplen:
855 855 striplen += len(pycompat.ossep)
856 856 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
857 857 elif destdirexists:
858 858 res = lambda p: os.path.join(dest,
859 859 os.path.basename(util.localpath(p)))
860 860 else:
861 861 res = lambda p: dest
862 862 return res
863 863
864 864 # pat: ossep
865 865 # dest ossep
866 866 # srcs: list of (hgsep, hgsep, ossep, bool)
867 867 # return: function that takes hgsep and returns ossep
868 868 def targetpathafterfn(pat, dest, srcs):
869 869 if matchmod.patkind(pat):
870 870 # a mercurial pattern
871 871 res = lambda p: os.path.join(dest,
872 872 os.path.basename(util.localpath(p)))
873 873 else:
874 874 abspfx = pathutil.canonpath(repo.root, cwd, pat)
875 875 if len(abspfx) < len(srcs[0][0]):
876 876 # A directory. Either the target path contains the last
877 877 # component of the source path or it does not.
878 878 def evalpath(striplen):
879 879 score = 0
880 880 for s in srcs:
881 881 t = os.path.join(dest, util.localpath(s[0])[striplen:])
882 882 if os.path.lexists(t):
883 883 score += 1
884 884 return score
885 885
886 886 abspfx = util.localpath(abspfx)
887 887 striplen = len(abspfx)
888 888 if striplen:
889 889 striplen += len(pycompat.ossep)
890 890 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
891 891 score = evalpath(striplen)
892 892 striplen1 = len(os.path.split(abspfx)[0])
893 893 if striplen1:
894 894 striplen1 += len(pycompat.ossep)
895 895 if evalpath(striplen1) > score:
896 896 striplen = striplen1
897 897 res = lambda p: os.path.join(dest,
898 898 util.localpath(p)[striplen:])
899 899 else:
900 900 # a file
901 901 if destdirexists:
902 902 res = lambda p: os.path.join(dest,
903 903 os.path.basename(util.localpath(p)))
904 904 else:
905 905 res = lambda p: dest
906 906 return res
907 907
908 908 pats = scmutil.expandpats(pats)
909 909 if not pats:
910 910 raise error.Abort(_('no source or destination specified'))
911 911 if len(pats) == 1:
912 912 raise error.Abort(_('no destination specified'))
913 913 dest = pats.pop()
914 914 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
915 915 if not destdirexists:
916 916 if len(pats) > 1 or matchmod.patkind(pats[0]):
917 917 raise error.Abort(_('with multiple sources, destination must be an '
918 918 'existing directory'))
919 919 if util.endswithsep(dest):
920 920 raise error.Abort(_('destination %s is not a directory') % dest)
921 921
922 922 tfn = targetpathfn
923 923 if after:
924 924 tfn = targetpathafterfn
925 925 copylist = []
926 926 for pat in pats:
927 927 srcs = walkpat(pat)
928 928 if not srcs:
929 929 continue
930 930 copylist.append((tfn(pat, dest, srcs), srcs))
931 931 if not copylist:
932 932 raise error.Abort(_('no files to copy'))
933 933
934 934 errors = 0
935 935 for targetpath, srcs in copylist:
936 936 for abssrc, relsrc, exact in srcs:
937 937 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
938 938 errors += 1
939 939
940 940 if errors:
941 941 ui.warn(_('(consider using --after)\n'))
942 942
943 943 return errors != 0
944 944
945 945 ## facility to let extension process additional data into an import patch
946 946 # list of identifier to be executed in order
947 947 extrapreimport = [] # run before commit
948 948 extrapostimport = [] # run after commit
949 949 # mapping from identifier to actual import function
950 950 #
951 951 # 'preimport' are run before the commit is made and are provided the following
952 952 # arguments:
953 953 # - repo: the localrepository instance,
954 954 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
955 955 # - extra: the future extra dictionary of the changeset, please mutate it,
956 956 # - opts: the import options.
957 957 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
958 958 # mutation of in memory commit and more. Feel free to rework the code to get
959 959 # there.
960 960 extrapreimportmap = {}
961 961 # 'postimport' are run after the commit is made and are provided the following
962 962 # argument:
963 963 # - ctx: the changectx created by import.
964 964 extrapostimportmap = {}
965 965
966 966 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
967 967 """Utility function used by commands.import to import a single patch
968 968
969 969 This function is explicitly defined here to help the evolve extension to
970 970 wrap this part of the import logic.
971 971
972 972 The API is currently a bit ugly because it a simple code translation from
973 973 the import command. Feel free to make it better.
974 974
975 975 :hunk: a patch (as a binary string)
976 976 :parents: nodes that will be parent of the created commit
977 977 :opts: the full dict of option passed to the import command
978 978 :msgs: list to save commit message to.
979 979 (used in case we need to save it when failing)
980 980 :updatefunc: a function that update a repo to a given node
981 981 updatefunc(<repo>, <node>)
982 982 """
983 983 # avoid cycle context -> subrepo -> cmdutil
984 984 from . import context
985 985 extractdata = patch.extract(ui, hunk)
986 986 tmpname = extractdata.get('filename')
987 987 message = extractdata.get('message')
988 988 user = opts.get('user') or extractdata.get('user')
989 989 date = opts.get('date') or extractdata.get('date')
990 990 branch = extractdata.get('branch')
991 991 nodeid = extractdata.get('nodeid')
992 992 p1 = extractdata.get('p1')
993 993 p2 = extractdata.get('p2')
994 994
995 995 nocommit = opts.get('no_commit')
996 996 importbranch = opts.get('import_branch')
997 997 update = not opts.get('bypass')
998 998 strip = opts["strip"]
999 999 prefix = opts["prefix"]
1000 1000 sim = float(opts.get('similarity') or 0)
1001 1001 if not tmpname:
1002 1002 return (None, None, False)
1003 1003
1004 1004 rejects = False
1005 1005
1006 1006 try:
1007 1007 cmdline_message = logmessage(ui, opts)
1008 1008 if cmdline_message:
1009 1009 # pickup the cmdline msg
1010 1010 message = cmdline_message
1011 1011 elif message:
1012 1012 # pickup the patch msg
1013 1013 message = message.strip()
1014 1014 else:
1015 1015 # launch the editor
1016 1016 message = None
1017 1017 ui.debug('message:\n%s\n' % message)
1018 1018
1019 1019 if len(parents) == 1:
1020 1020 parents.append(repo[nullid])
1021 1021 if opts.get('exact'):
1022 1022 if not nodeid or not p1:
1023 1023 raise error.Abort(_('not a Mercurial patch'))
1024 1024 p1 = repo[p1]
1025 1025 p2 = repo[p2 or nullid]
1026 1026 elif p2:
1027 1027 try:
1028 1028 p1 = repo[p1]
1029 1029 p2 = repo[p2]
1030 1030 # Without any options, consider p2 only if the
1031 1031 # patch is being applied on top of the recorded
1032 1032 # first parent.
1033 1033 if p1 != parents[0]:
1034 1034 p1 = parents[0]
1035 1035 p2 = repo[nullid]
1036 1036 except error.RepoError:
1037 1037 p1, p2 = parents
1038 1038 if p2.node() == nullid:
1039 1039 ui.warn(_("warning: import the patch as a normal revision\n"
1040 1040 "(use --exact to import the patch as a merge)\n"))
1041 1041 else:
1042 1042 p1, p2 = parents
1043 1043
1044 1044 n = None
1045 1045 if update:
1046 1046 if p1 != parents[0]:
1047 1047 updatefunc(repo, p1.node())
1048 1048 if p2 != parents[1]:
1049 1049 repo.setparents(p1.node(), p2.node())
1050 1050
1051 1051 if opts.get('exact') or importbranch:
1052 1052 repo.dirstate.setbranch(branch or 'default')
1053 1053
1054 1054 partial = opts.get('partial', False)
1055 1055 files = set()
1056 1056 try:
1057 1057 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1058 1058 files=files, eolmode=None, similarity=sim / 100.0)
1059 1059 except patch.PatchError as e:
1060 1060 if not partial:
1061 1061 raise error.Abort(str(e))
1062 1062 if partial:
1063 1063 rejects = True
1064 1064
1065 1065 files = list(files)
1066 1066 if nocommit:
1067 1067 if message:
1068 1068 msgs.append(message)
1069 1069 else:
1070 1070 if opts.get('exact') or p2:
1071 1071 # If you got here, you either use --force and know what
1072 1072 # you are doing or used --exact or a merge patch while
1073 1073 # being updated to its first parent.
1074 1074 m = None
1075 1075 else:
1076 1076 m = scmutil.matchfiles(repo, files or [])
1077 1077 editform = mergeeditform(repo[None], 'import.normal')
1078 1078 if opts.get('exact'):
1079 1079 editor = None
1080 1080 else:
1081 1081 editor = getcommiteditor(editform=editform, **opts)
1082 1082 extra = {}
1083 1083 for idfunc in extrapreimport:
1084 1084 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1085 1085 overrides = {}
1086 1086 if partial:
1087 1087 overrides[('ui', 'allowemptycommit')] = True
1088 1088 with repo.ui.configoverride(overrides, 'import'):
1089 1089 n = repo.commit(message, user,
1090 1090 date, match=m,
1091 1091 editor=editor, extra=extra)
1092 1092 for idfunc in extrapostimport:
1093 1093 extrapostimportmap[idfunc](repo[n])
1094 1094 else:
1095 1095 if opts.get('exact') or importbranch:
1096 1096 branch = branch or 'default'
1097 1097 else:
1098 1098 branch = p1.branch()
1099 1099 store = patch.filestore()
1100 1100 try:
1101 1101 files = set()
1102 1102 try:
1103 1103 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1104 1104 files, eolmode=None)
1105 1105 except patch.PatchError as e:
1106 1106 raise error.Abort(str(e))
1107 1107 if opts.get('exact'):
1108 1108 editor = None
1109 1109 else:
1110 1110 editor = getcommiteditor(editform='import.bypass')
1111 1111 memctx = context.memctx(repo, (p1.node(), p2.node()),
1112 1112 message,
1113 1113 files=files,
1114 1114 filectxfn=store,
1115 1115 user=user,
1116 1116 date=date,
1117 1117 branch=branch,
1118 1118 editor=editor)
1119 1119 n = memctx.commit()
1120 1120 finally:
1121 1121 store.close()
1122 1122 if opts.get('exact') and nocommit:
1123 1123 # --exact with --no-commit is still useful in that it does merge
1124 1124 # and branch bits
1125 1125 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1126 1126 elif opts.get('exact') and hex(n) != nodeid:
1127 1127 raise error.Abort(_('patch is damaged or loses information'))
1128 1128 msg = _('applied to working directory')
1129 1129 if n:
1130 1130 # i18n: refers to a short changeset id
1131 1131 msg = _('created %s') % short(n)
1132 1132 return (msg, n, rejects)
1133 1133 finally:
1134 1134 os.unlink(tmpname)
1135 1135
1136 1136 # facility to let extensions include additional data in an exported patch
1137 1137 # list of identifiers to be executed in order
1138 1138 extraexport = []
1139 1139 # mapping from identifier to actual export function
1140 1140 # function as to return a string to be added to the header or None
1141 1141 # it is given two arguments (sequencenumber, changectx)
1142 1142 extraexportmap = {}
1143 1143
1144 1144 def _exportsingle(repo, ctx, match, switch_parent, rev, seqno, write, diffopts):
1145 1145 node = scmutil.binnode(ctx)
1146 1146 parents = [p.node() for p in ctx.parents() if p]
1147 1147 branch = ctx.branch()
1148 1148 if switch_parent:
1149 1149 parents.reverse()
1150 1150
1151 1151 if parents:
1152 1152 prev = parents[0]
1153 1153 else:
1154 1154 prev = nullid
1155 1155
1156 1156 write("# HG changeset patch\n")
1157 1157 write("# User %s\n" % ctx.user())
1158 1158 write("# Date %d %d\n" % ctx.date())
1159 1159 write("# %s\n" % util.datestr(ctx.date()))
1160 1160 if branch and branch != 'default':
1161 1161 write("# Branch %s\n" % branch)
1162 1162 write("# Node ID %s\n" % hex(node))
1163 1163 write("# Parent %s\n" % hex(prev))
1164 1164 if len(parents) > 1:
1165 1165 write("# Parent %s\n" % hex(parents[1]))
1166 1166
1167 1167 for headerid in extraexport:
1168 1168 header = extraexportmap[headerid](seqno, ctx)
1169 1169 if header is not None:
1170 1170 write('# %s\n' % header)
1171 1171 write(ctx.description().rstrip())
1172 1172 write("\n\n")
1173 1173
1174 1174 for chunk, label in patch.diffui(repo, prev, node, match, opts=diffopts):
1175 1175 write(chunk, label=label)
1176 1176
1177 1177 def export(repo, revs, fntemplate='hg-%h.patch', fp=None, switch_parent=False,
1178 1178 opts=None, match=None):
1179 1179 '''export changesets as hg patches
1180 1180
1181 1181 Args:
1182 1182 repo: The repository from which we're exporting revisions.
1183 1183 revs: A list of revisions to export as revision numbers.
1184 1184 fntemplate: An optional string to use for generating patch file names.
1185 1185 fp: An optional file-like object to which patches should be written.
1186 1186 switch_parent: If True, show diffs against second parent when not nullid.
1187 1187 Default is false, which always shows diff against p1.
1188 1188 opts: diff options to use for generating the patch.
1189 1189 match: If specified, only export changes to files matching this matcher.
1190 1190
1191 1191 Returns:
1192 1192 Nothing.
1193 1193
1194 1194 Side Effect:
1195 1195 "HG Changeset Patch" data is emitted to one of the following
1196 1196 destinations:
1197 1197 fp is specified: All revs are written to the specified
1198 1198 file-like object.
1199 1199 fntemplate specified: Each rev is written to a unique file named using
1200 1200 the given template.
1201 1201 Neither fp nor template specified: All revs written to repo.ui.write()
1202 1202 '''
1203 1203
1204 1204 total = len(revs)
1205 1205 revwidth = max(len(str(rev)) for rev in revs)
1206 1206 filemode = {}
1207 1207
1208 1208 write = None
1209 1209 dest = '<unnamed>'
1210 1210 if fp:
1211 1211 dest = getattr(fp, 'name', dest)
1212 1212 def write(s, **kw):
1213 1213 fp.write(s)
1214 1214 elif not fntemplate:
1215 1215 write = repo.ui.write
1216 1216
1217 1217 for seqno, rev in enumerate(revs, 1):
1218 1218 ctx = repo[rev]
1219 1219 fo = None
1220 1220 if not fp and fntemplate:
1221 1221 desc_lines = ctx.description().rstrip().split('\n')
1222 1222 desc = desc_lines[0] #Commit always has a first line.
1223 1223 fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
1224 1224 total=total, seqno=seqno, revwidth=revwidth,
1225 1225 mode='wb', modemap=filemode)
1226 1226 dest = fo.name
1227 1227 def write(s, **kw):
1228 1228 fo.write(s)
1229 1229 if not dest.startswith('<'):
1230 1230 repo.ui.note("%s\n" % dest)
1231 1231 _exportsingle(
1232 1232 repo, ctx, match, switch_parent, rev, seqno, write, opts)
1233 1233 if fo is not None:
1234 1234 fo.close()
1235 1235
1236 1236 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1237 1237 changes=None, stat=False, fp=None, prefix='',
1238 1238 root='', listsubrepos=False):
1239 1239 '''show diff or diffstat.'''
1240 1240 if fp is None:
1241 1241 write = ui.write
1242 1242 else:
1243 1243 def write(s, **kw):
1244 1244 fp.write(s)
1245 1245
1246 1246 if root:
1247 1247 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1248 1248 else:
1249 1249 relroot = ''
1250 1250 if relroot != '':
1251 1251 # XXX relative roots currently don't work if the root is within a
1252 1252 # subrepo
1253 1253 uirelroot = match.uipath(relroot)
1254 1254 relroot += '/'
1255 1255 for matchroot in match.files():
1256 1256 if not matchroot.startswith(relroot):
1257 1257 ui.warn(_('warning: %s not inside relative root %s\n') % (
1258 1258 match.uipath(matchroot), uirelroot))
1259 1259
1260 1260 if stat:
1261 1261 diffopts = diffopts.copy(context=0)
1262 1262 width = 80
1263 1263 if not ui.plain():
1264 1264 width = ui.termwidth()
1265 1265 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1266 1266 prefix=prefix, relroot=relroot)
1267 1267 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1268 1268 width=width):
1269 1269 write(chunk, label=label)
1270 1270 else:
1271 1271 for chunk, label in patch.diffui(repo, node1, node2, match,
1272 1272 changes, diffopts, prefix=prefix,
1273 1273 relroot=relroot):
1274 1274 write(chunk, label=label)
1275 1275
1276 1276 if listsubrepos:
1277 1277 ctx1 = repo[node1]
1278 1278 ctx2 = repo[node2]
1279 1279 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1280 1280 tempnode2 = node2
1281 1281 try:
1282 1282 if node2 is not None:
1283 1283 tempnode2 = ctx2.substate[subpath][1]
1284 1284 except KeyError:
1285 1285 # A subrepo that existed in node1 was deleted between node1 and
1286 1286 # node2 (inclusive). Thus, ctx2's substate won't contain that
1287 1287 # subpath. The best we can do is to ignore it.
1288 1288 tempnode2 = None
1289 1289 submatch = matchmod.subdirmatcher(subpath, match)
1290 1290 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1291 1291 stat=stat, fp=fp, prefix=prefix)
1292 1292
1293 1293 def _changesetlabels(ctx):
1294 1294 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1295 1295 if ctx.obsolete():
1296 1296 labels.append('changeset.obsolete')
1297 1297 if ctx.troubled():
1298 1298 labels.append('changeset.troubled')
1299 1299 for trouble in ctx.troubles():
1300 1300 labels.append('trouble.%s' % trouble)
1301 1301 return ' '.join(labels)
1302 1302
1303 1303 class changeset_printer(object):
1304 1304 '''show changeset information when templating not requested.'''
1305 1305
1306 1306 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1307 1307 self.ui = ui
1308 1308 self.repo = repo
1309 1309 self.buffered = buffered
1310 1310 self.matchfn = matchfn
1311 1311 self.diffopts = diffopts
1312 1312 self.header = {}
1313 1313 self.hunk = {}
1314 1314 self.lastheader = None
1315 1315 self.footer = None
1316 1316
1317 1317 def flush(self, ctx):
1318 1318 rev = ctx.rev()
1319 1319 if rev in self.header:
1320 1320 h = self.header[rev]
1321 1321 if h != self.lastheader:
1322 1322 self.lastheader = h
1323 1323 self.ui.write(h)
1324 1324 del self.header[rev]
1325 1325 if rev in self.hunk:
1326 1326 self.ui.write(self.hunk[rev])
1327 1327 del self.hunk[rev]
1328 1328 return 1
1329 1329 return 0
1330 1330
1331 1331 def close(self):
1332 1332 if self.footer:
1333 1333 self.ui.write(self.footer)
1334 1334
1335 1335 def show(self, ctx, copies=None, matchfn=None, **props):
1336 1336 if self.buffered:
1337 1337 self.ui.pushbuffer(labeled=True)
1338 1338 self._show(ctx, copies, matchfn, props)
1339 1339 self.hunk[ctx.rev()] = self.ui.popbuffer()
1340 1340 else:
1341 1341 self._show(ctx, copies, matchfn, props)
1342 1342
1343 1343 def _show(self, ctx, copies, matchfn, props):
1344 1344 '''show a single changeset or file revision'''
1345 1345 changenode = ctx.node()
1346 1346 rev = ctx.rev()
1347 1347 if self.ui.debugflag:
1348 1348 hexfunc = hex
1349 1349 else:
1350 1350 hexfunc = short
1351 1351 # as of now, wctx.node() and wctx.rev() return None, but we want to
1352 1352 # show the same values as {node} and {rev} templatekw
1353 1353 revnode = (scmutil.intrev(ctx), hexfunc(scmutil.binnode(ctx)))
1354 1354
1355 1355 if self.ui.quiet:
1356 1356 self.ui.write("%d:%s\n" % revnode, label='log.node')
1357 1357 return
1358 1358
1359 1359 date = util.datestr(ctx.date())
1360 1360
1361 1361 # i18n: column positioning for "hg log"
1362 1362 self.ui.write(_("changeset: %d:%s\n") % revnode,
1363 1363 label=_changesetlabels(ctx))
1364 1364
1365 1365 # branches are shown first before any other names due to backwards
1366 1366 # compatibility
1367 1367 branch = ctx.branch()
1368 1368 # don't show the default branch name
1369 1369 if branch != 'default':
1370 1370 # i18n: column positioning for "hg log"
1371 1371 self.ui.write(_("branch: %s\n") % branch,
1372 1372 label='log.branch')
1373 1373
1374 1374 for nsname, ns in self.repo.names.iteritems():
1375 1375 # branches has special logic already handled above, so here we just
1376 1376 # skip it
1377 1377 if nsname == 'branches':
1378 1378 continue
1379 1379 # we will use the templatename as the color name since those two
1380 1380 # should be the same
1381 1381 for name in ns.names(self.repo, changenode):
1382 1382 self.ui.write(ns.logfmt % name,
1383 1383 label='log.%s' % ns.colorname)
1384 1384 if self.ui.debugflag:
1385 1385 # i18n: column positioning for "hg log"
1386 1386 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1387 1387 label='log.phase')
1388 1388 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1389 1389 label = 'log.parent changeset.%s' % pctx.phasestr()
1390 1390 # i18n: column positioning for "hg log"
1391 1391 self.ui.write(_("parent: %d:%s\n")
1392 1392 % (pctx.rev(), hexfunc(pctx.node())),
1393 1393 label=label)
1394 1394
1395 1395 if self.ui.debugflag and rev is not None:
1396 1396 mnode = ctx.manifestnode()
1397 1397 # i18n: column positioning for "hg log"
1398 1398 self.ui.write(_("manifest: %d:%s\n") %
1399 1399 (self.repo.manifestlog._revlog.rev(mnode),
1400 1400 hex(mnode)),
1401 1401 label='ui.debug log.manifest')
1402 1402 # i18n: column positioning for "hg log"
1403 1403 self.ui.write(_("user: %s\n") % ctx.user(),
1404 1404 label='log.user')
1405 1405 # i18n: column positioning for "hg log"
1406 1406 self.ui.write(_("date: %s\n") % date,
1407 1407 label='log.date')
1408 1408
1409 1409 if ctx.troubled():
1410 1410 # i18n: column positioning for "hg log"
1411 1411 self.ui.write(_("trouble: %s\n") % ', '.join(ctx.troubles()),
1412 1412 label='log.trouble')
1413 1413
1414 1414 if self.ui.debugflag:
1415 1415 files = ctx.p1().status(ctx)[:3]
1416 1416 for key, value in zip([# i18n: column positioning for "hg log"
1417 1417 _("files:"),
1418 1418 # i18n: column positioning for "hg log"
1419 1419 _("files+:"),
1420 1420 # i18n: column positioning for "hg log"
1421 1421 _("files-:")], files):
1422 1422 if value:
1423 1423 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1424 1424 label='ui.debug log.files')
1425 1425 elif ctx.files() and self.ui.verbose:
1426 1426 # i18n: column positioning for "hg log"
1427 1427 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1428 1428 label='ui.note log.files')
1429 1429 if copies and self.ui.verbose:
1430 1430 copies = ['%s (%s)' % c for c in copies]
1431 1431 # i18n: column positioning for "hg log"
1432 1432 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1433 1433 label='ui.note log.copies')
1434 1434
1435 1435 extra = ctx.extra()
1436 1436 if extra and self.ui.debugflag:
1437 1437 for key, value in sorted(extra.items()):
1438 1438 # i18n: column positioning for "hg log"
1439 1439 self.ui.write(_("extra: %s=%s\n")
1440 1440 % (key, util.escapestr(value)),
1441 1441 label='ui.debug log.extra')
1442 1442
1443 1443 description = ctx.description().strip()
1444 1444 if description:
1445 1445 if self.ui.verbose:
1446 1446 self.ui.write(_("description:\n"),
1447 1447 label='ui.note log.description')
1448 1448 self.ui.write(description,
1449 1449 label='ui.note log.description')
1450 1450 self.ui.write("\n\n")
1451 1451 else:
1452 1452 # i18n: column positioning for "hg log"
1453 1453 self.ui.write(_("summary: %s\n") %
1454 1454 description.splitlines()[0],
1455 1455 label='log.summary')
1456 1456 self.ui.write("\n")
1457 1457
1458 1458 self.showpatch(ctx, matchfn)
1459 1459
1460 1460 def showpatch(self, ctx, matchfn):
1461 1461 if not matchfn:
1462 1462 matchfn = self.matchfn
1463 1463 if matchfn:
1464 1464 stat = self.diffopts.get('stat')
1465 1465 diff = self.diffopts.get('patch')
1466 1466 diffopts = patch.diffallopts(self.ui, self.diffopts)
1467 1467 node = ctx.node()
1468 1468 prev = ctx.p1().node()
1469 1469 if stat:
1470 1470 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1471 1471 match=matchfn, stat=True)
1472 1472 if diff:
1473 1473 if stat:
1474 1474 self.ui.write("\n")
1475 1475 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1476 1476 match=matchfn, stat=False)
1477 1477 self.ui.write("\n")
1478 1478
1479 1479 class jsonchangeset(changeset_printer):
1480 1480 '''format changeset information.'''
1481 1481
1482 1482 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1483 1483 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1484 1484 self.cache = {}
1485 1485 self._first = True
1486 1486
1487 1487 def close(self):
1488 1488 if not self._first:
1489 1489 self.ui.write("\n]\n")
1490 1490 else:
1491 1491 self.ui.write("[]\n")
1492 1492
1493 1493 def _show(self, ctx, copies, matchfn, props):
1494 1494 '''show a single changeset or file revision'''
1495 1495 rev = ctx.rev()
1496 1496 if rev is None:
1497 1497 jrev = jnode = 'null'
1498 1498 else:
1499 1499 jrev = '%d' % rev
1500 1500 jnode = '"%s"' % hex(ctx.node())
1501 1501 j = encoding.jsonescape
1502 1502
1503 1503 if self._first:
1504 1504 self.ui.write("[\n {")
1505 1505 self._first = False
1506 1506 else:
1507 1507 self.ui.write(",\n {")
1508 1508
1509 1509 if self.ui.quiet:
1510 1510 self.ui.write(('\n "rev": %s') % jrev)
1511 1511 self.ui.write((',\n "node": %s') % jnode)
1512 1512 self.ui.write('\n }')
1513 1513 return
1514 1514
1515 1515 self.ui.write(('\n "rev": %s') % jrev)
1516 1516 self.ui.write((',\n "node": %s') % jnode)
1517 1517 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1518 1518 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1519 1519 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1520 1520 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1521 1521 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1522 1522
1523 1523 self.ui.write((',\n "bookmarks": [%s]') %
1524 1524 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1525 1525 self.ui.write((',\n "tags": [%s]') %
1526 1526 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1527 1527 self.ui.write((',\n "parents": [%s]') %
1528 1528 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1529 1529
1530 1530 if self.ui.debugflag:
1531 1531 if rev is None:
1532 1532 jmanifestnode = 'null'
1533 1533 else:
1534 1534 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1535 1535 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1536 1536
1537 1537 self.ui.write((',\n "extra": {%s}') %
1538 1538 ", ".join('"%s": "%s"' % (j(k), j(v))
1539 1539 for k, v in ctx.extra().items()))
1540 1540
1541 1541 files = ctx.p1().status(ctx)
1542 1542 self.ui.write((',\n "modified": [%s]') %
1543 1543 ", ".join('"%s"' % j(f) for f in files[0]))
1544 1544 self.ui.write((',\n "added": [%s]') %
1545 1545 ", ".join('"%s"' % j(f) for f in files[1]))
1546 1546 self.ui.write((',\n "removed": [%s]') %
1547 1547 ", ".join('"%s"' % j(f) for f in files[2]))
1548 1548
1549 1549 elif self.ui.verbose:
1550 1550 self.ui.write((',\n "files": [%s]') %
1551 1551 ", ".join('"%s"' % j(f) for f in ctx.files()))
1552 1552
1553 1553 if copies:
1554 1554 self.ui.write((',\n "copies": {%s}') %
1555 1555 ", ".join('"%s": "%s"' % (j(k), j(v))
1556 1556 for k, v in copies))
1557 1557
1558 1558 matchfn = self.matchfn
1559 1559 if matchfn:
1560 1560 stat = self.diffopts.get('stat')
1561 1561 diff = self.diffopts.get('patch')
1562 1562 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1563 1563 node, prev = ctx.node(), ctx.p1().node()
1564 1564 if stat:
1565 1565 self.ui.pushbuffer()
1566 1566 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1567 1567 match=matchfn, stat=True)
1568 1568 self.ui.write((',\n "diffstat": "%s"')
1569 1569 % j(self.ui.popbuffer()))
1570 1570 if diff:
1571 1571 self.ui.pushbuffer()
1572 1572 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1573 1573 match=matchfn, stat=False)
1574 1574 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1575 1575
1576 1576 self.ui.write("\n }")
1577 1577
1578 1578 class changeset_templater(changeset_printer):
1579 1579 '''format changeset information.'''
1580 1580
1581 1581 def __init__(self, ui, repo, tmplspec, matchfn, diffopts, buffered):
1582 1582 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1583 1583 self.t = formatter.loadtemplater(ui, tmplspec,
1584 1584 cache=templatekw.defaulttempl)
1585 1585 self._counter = itertools.count()
1586 1586 self.cache = {}
1587 1587
1588 1588 # find correct templates for current mode
1589 1589 tmplmodes = [
1590 1590 (True, None),
1591 1591 (self.ui.verbose, 'verbose'),
1592 1592 (self.ui.quiet, 'quiet'),
1593 1593 (self.ui.debugflag, 'debug'),
1594 1594 ]
1595 1595
1596 1596 self._tref = tmplspec.ref
1597 1597 self._parts = {'header': '', 'footer': '',
1598 1598 tmplspec.ref: tmplspec.ref,
1599 1599 'docheader': '', 'docfooter': ''}
1600 1600 for mode, postfix in tmplmodes:
1601 1601 for t in self._parts:
1602 1602 cur = t
1603 1603 if postfix:
1604 1604 cur += "_" + postfix
1605 1605 if mode and cur in self.t:
1606 1606 self._parts[t] = cur
1607 1607
1608 1608 if self._parts['docheader']:
1609 1609 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1610 1610
1611 1611 def close(self):
1612 1612 if self._parts['docfooter']:
1613 1613 if not self.footer:
1614 1614 self.footer = ""
1615 1615 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1616 1616 return super(changeset_templater, self).close()
1617 1617
1618 1618 def _show(self, ctx, copies, matchfn, props):
1619 1619 '''show a single changeset or file revision'''
1620 1620 props = props.copy()
1621 1621 props.update(templatekw.keywords)
1622 1622 props['templ'] = self.t
1623 1623 props['ctx'] = ctx
1624 1624 props['repo'] = self.repo
1625 1625 props['ui'] = self.repo.ui
1626 1626 props['index'] = next(self._counter)
1627 1627 props['revcache'] = {'copies': copies}
1628 1628 props['cache'] = self.cache
1629 1629 props = pycompat.strkwargs(props)
1630 1630
1631 1631 # write header
1632 1632 if self._parts['header']:
1633 1633 h = templater.stringify(self.t(self._parts['header'], **props))
1634 1634 if self.buffered:
1635 1635 self.header[ctx.rev()] = h
1636 1636 else:
1637 1637 if self.lastheader != h:
1638 1638 self.lastheader = h
1639 1639 self.ui.write(h)
1640 1640
1641 1641 # write changeset metadata, then patch if requested
1642 1642 key = self._parts[self._tref]
1643 1643 self.ui.write(templater.stringify(self.t(key, **props)))
1644 1644 self.showpatch(ctx, matchfn)
1645 1645
1646 1646 if self._parts['footer']:
1647 1647 if not self.footer:
1648 1648 self.footer = templater.stringify(
1649 1649 self.t(self._parts['footer'], **props))
1650 1650
1651 1651 def logtemplatespec(tmpl, mapfile):
1652 1652 return formatter.templatespec('changeset', tmpl, mapfile)
1653 1653
1654 1654 def _lookuplogtemplate(ui, tmpl, style):
1655 1655 """Find the template matching the given template spec or style
1656 1656
1657 1657 See formatter.lookuptemplate() for details.
1658 1658 """
1659 1659
1660 1660 # ui settings
1661 1661 if not tmpl and not style: # template are stronger than style
1662 1662 tmpl = ui.config('ui', 'logtemplate')
1663 1663 if tmpl:
1664 1664 return logtemplatespec(templater.unquotestring(tmpl), None)
1665 1665 else:
1666 1666 style = util.expandpath(ui.config('ui', 'style', ''))
1667 1667
1668 1668 if not tmpl and style:
1669 1669 mapfile = style
1670 1670 if not os.path.split(mapfile)[0]:
1671 1671 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1672 1672 or templater.templatepath(mapfile))
1673 1673 if mapname:
1674 1674 mapfile = mapname
1675 1675 return logtemplatespec(None, mapfile)
1676 1676
1677 1677 if not tmpl:
1678 1678 return logtemplatespec(None, None)
1679 1679
1680 1680 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1681 1681
1682 1682 def makelogtemplater(ui, repo, tmpl, buffered=False):
1683 1683 """Create a changeset_templater from a literal template 'tmpl'"""
1684 1684 spec = logtemplatespec(tmpl, None)
1685 1685 return changeset_templater(ui, repo, spec, matchfn=None, diffopts={},
1686 1686 buffered=buffered)
1687 1687
1688 1688 def show_changeset(ui, repo, opts, buffered=False):
1689 1689 """show one changeset using template or regular display.
1690 1690
1691 1691 Display format will be the first non-empty hit of:
1692 1692 1. option 'template'
1693 1693 2. option 'style'
1694 1694 3. [ui] setting 'logtemplate'
1695 1695 4. [ui] setting 'style'
1696 1696 If all of these values are either the unset or the empty string,
1697 1697 regular display via changeset_printer() is done.
1698 1698 """
1699 1699 # options
1700 1700 matchfn = None
1701 1701 if opts.get('patch') or opts.get('stat'):
1702 1702 matchfn = scmutil.matchall(repo)
1703 1703
1704 1704 if opts.get('template') == 'json':
1705 1705 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1706 1706
1707 1707 spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style'))
1708 1708
1709 1709 if not spec.tmpl and not spec.mapfile:
1710 1710 return changeset_printer(ui, repo, matchfn, opts, buffered)
1711 1711
1712 1712 return changeset_templater(ui, repo, spec, matchfn, opts, buffered)
1713 1713
1714 1714 def showmarker(fm, marker, index=None):
1715 1715 """utility function to display obsolescence marker in a readable way
1716 1716
1717 1717 To be used by debug function."""
1718 1718 if index is not None:
1719 1719 fm.write('index', '%i ', index)
1720 1720 fm.write('precnode', '%s ', hex(marker.precnode()))
1721 1721 succs = marker.succnodes()
1722 1722 fm.condwrite(succs, 'succnodes', '%s ',
1723 1723 fm.formatlist(map(hex, succs), name='node'))
1724 1724 fm.write('flag', '%X ', marker.flags())
1725 1725 parents = marker.parentnodes()
1726 1726 if parents is not None:
1727 1727 fm.write('parentnodes', '{%s} ',
1728 1728 fm.formatlist(map(hex, parents), name='node', sep=', '))
1729 1729 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1730 1730 meta = marker.metadata().copy()
1731 1731 meta.pop('date', None)
1732 1732 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
1733 1733 fm.plain('\n')
1734 1734
1735 1735 def finddate(ui, repo, date):
1736 1736 """Find the tipmost changeset that matches the given date spec"""
1737 1737
1738 1738 df = util.matchdate(date)
1739 1739 m = scmutil.matchall(repo)
1740 1740 results = {}
1741 1741
1742 1742 def prep(ctx, fns):
1743 1743 d = ctx.date()
1744 1744 if df(d[0]):
1745 1745 results[ctx.rev()] = d
1746 1746
1747 1747 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1748 1748 rev = ctx.rev()
1749 1749 if rev in results:
1750 1750 ui.status(_("found revision %s from %s\n") %
1751 1751 (rev, util.datestr(results[rev])))
1752 1752 return '%d' % rev
1753 1753
1754 1754 raise error.Abort(_("revision matching date not found"))
1755 1755
1756 1756 def increasingwindows(windowsize=8, sizelimit=512):
1757 1757 while True:
1758 1758 yield windowsize
1759 1759 if windowsize < sizelimit:
1760 1760 windowsize *= 2
1761 1761
1762 1762 class FileWalkError(Exception):
1763 1763 pass
1764 1764
1765 1765 def walkfilerevs(repo, match, follow, revs, fncache):
1766 1766 '''Walks the file history for the matched files.
1767 1767
1768 1768 Returns the changeset revs that are involved in the file history.
1769 1769
1770 1770 Throws FileWalkError if the file history can't be walked using
1771 1771 filelogs alone.
1772 1772 '''
1773 1773 wanted = set()
1774 1774 copies = []
1775 1775 minrev, maxrev = min(revs), max(revs)
1776 1776 def filerevgen(filelog, last):
1777 1777 """
1778 1778 Only files, no patterns. Check the history of each file.
1779 1779
1780 1780 Examines filelog entries within minrev, maxrev linkrev range
1781 1781 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1782 1782 tuples in backwards order
1783 1783 """
1784 1784 cl_count = len(repo)
1785 1785 revs = []
1786 1786 for j in xrange(0, last + 1):
1787 1787 linkrev = filelog.linkrev(j)
1788 1788 if linkrev < minrev:
1789 1789 continue
1790 1790 # only yield rev for which we have the changelog, it can
1791 1791 # happen while doing "hg log" during a pull or commit
1792 1792 if linkrev >= cl_count:
1793 1793 break
1794 1794
1795 1795 parentlinkrevs = []
1796 1796 for p in filelog.parentrevs(j):
1797 1797 if p != nullrev:
1798 1798 parentlinkrevs.append(filelog.linkrev(p))
1799 1799 n = filelog.node(j)
1800 1800 revs.append((linkrev, parentlinkrevs,
1801 1801 follow and filelog.renamed(n)))
1802 1802
1803 1803 return reversed(revs)
1804 1804 def iterfiles():
1805 1805 pctx = repo['.']
1806 1806 for filename in match.files():
1807 1807 if follow:
1808 1808 if filename not in pctx:
1809 1809 raise error.Abort(_('cannot follow file not in parent '
1810 1810 'revision: "%s"') % filename)
1811 1811 yield filename, pctx[filename].filenode()
1812 1812 else:
1813 1813 yield filename, None
1814 1814 for filename_node in copies:
1815 1815 yield filename_node
1816 1816
1817 1817 for file_, node in iterfiles():
1818 1818 filelog = repo.file(file_)
1819 1819 if not len(filelog):
1820 1820 if node is None:
1821 1821 # A zero count may be a directory or deleted file, so
1822 1822 # try to find matching entries on the slow path.
1823 1823 if follow:
1824 1824 raise error.Abort(
1825 1825 _('cannot follow nonexistent file: "%s"') % file_)
1826 1826 raise FileWalkError("Cannot walk via filelog")
1827 1827 else:
1828 1828 continue
1829 1829
1830 1830 if node is None:
1831 1831 last = len(filelog) - 1
1832 1832 else:
1833 1833 last = filelog.rev(node)
1834 1834
1835 1835 # keep track of all ancestors of the file
1836 1836 ancestors = {filelog.linkrev(last)}
1837 1837
1838 1838 # iterate from latest to oldest revision
1839 1839 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1840 1840 if not follow:
1841 1841 if rev > maxrev:
1842 1842 continue
1843 1843 else:
1844 1844 # Note that last might not be the first interesting
1845 1845 # rev to us:
1846 1846 # if the file has been changed after maxrev, we'll
1847 1847 # have linkrev(last) > maxrev, and we still need
1848 1848 # to explore the file graph
1849 1849 if rev not in ancestors:
1850 1850 continue
1851 1851 # XXX insert 1327 fix here
1852 1852 if flparentlinkrevs:
1853 1853 ancestors.update(flparentlinkrevs)
1854 1854
1855 1855 fncache.setdefault(rev, []).append(file_)
1856 1856 wanted.add(rev)
1857 1857 if copied:
1858 1858 copies.append(copied)
1859 1859
1860 1860 return wanted
1861 1861
1862 1862 class _followfilter(object):
1863 1863 def __init__(self, repo, onlyfirst=False):
1864 1864 self.repo = repo
1865 1865 self.startrev = nullrev
1866 1866 self.roots = set()
1867 1867 self.onlyfirst = onlyfirst
1868 1868
1869 1869 def match(self, rev):
1870 1870 def realparents(rev):
1871 1871 if self.onlyfirst:
1872 1872 return self.repo.changelog.parentrevs(rev)[0:1]
1873 1873 else:
1874 1874 return filter(lambda x: x != nullrev,
1875 1875 self.repo.changelog.parentrevs(rev))
1876 1876
1877 1877 if self.startrev == nullrev:
1878 1878 self.startrev = rev
1879 1879 return True
1880 1880
1881 1881 if rev > self.startrev:
1882 1882 # forward: all descendants
1883 1883 if not self.roots:
1884 1884 self.roots.add(self.startrev)
1885 1885 for parent in realparents(rev):
1886 1886 if parent in self.roots:
1887 1887 self.roots.add(rev)
1888 1888 return True
1889 1889 else:
1890 1890 # backwards: all parents
1891 1891 if not self.roots:
1892 1892 self.roots.update(realparents(self.startrev))
1893 1893 if rev in self.roots:
1894 1894 self.roots.remove(rev)
1895 1895 self.roots.update(realparents(rev))
1896 1896 return True
1897 1897
1898 1898 return False
1899 1899
1900 1900 def walkchangerevs(repo, match, opts, prepare):
1901 1901 '''Iterate over files and the revs in which they changed.
1902 1902
1903 1903 Callers most commonly need to iterate backwards over the history
1904 1904 in which they are interested. Doing so has awful (quadratic-looking)
1905 1905 performance, so we use iterators in a "windowed" way.
1906 1906
1907 1907 We walk a window of revisions in the desired order. Within the
1908 1908 window, we first walk forwards to gather data, then in the desired
1909 1909 order (usually backwards) to display it.
1910 1910
1911 1911 This function returns an iterator yielding contexts. Before
1912 1912 yielding each context, the iterator will first call the prepare
1913 1913 function on each context in the window in forward order.'''
1914 1914
1915 1915 follow = opts.get('follow') or opts.get('follow_first')
1916 1916 revs = _logrevs(repo, opts)
1917 1917 if not revs:
1918 1918 return []
1919 1919 wanted = set()
1920 1920 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1921 1921 opts.get('removed'))
1922 1922 fncache = {}
1923 1923 change = repo.changectx
1924 1924
1925 1925 # First step is to fill wanted, the set of revisions that we want to yield.
1926 1926 # When it does not induce extra cost, we also fill fncache for revisions in
1927 1927 # wanted: a cache of filenames that were changed (ctx.files()) and that
1928 1928 # match the file filtering conditions.
1929 1929
1930 1930 if match.always():
1931 1931 # No files, no patterns. Display all revs.
1932 1932 wanted = revs
1933 1933 elif not slowpath:
1934 1934 # We only have to read through the filelog to find wanted revisions
1935 1935
1936 1936 try:
1937 1937 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1938 1938 except FileWalkError:
1939 1939 slowpath = True
1940 1940
1941 1941 # We decided to fall back to the slowpath because at least one
1942 1942 # of the paths was not a file. Check to see if at least one of them
1943 1943 # existed in history, otherwise simply return
1944 1944 for path in match.files():
1945 1945 if path == '.' or path in repo.store:
1946 1946 break
1947 1947 else:
1948 1948 return []
1949 1949
1950 1950 if slowpath:
1951 1951 # We have to read the changelog to match filenames against
1952 1952 # changed files
1953 1953
1954 1954 if follow:
1955 1955 raise error.Abort(_('can only follow copies/renames for explicit '
1956 1956 'filenames'))
1957 1957
1958 1958 # The slow path checks files modified in every changeset.
1959 1959 # This is really slow on large repos, so compute the set lazily.
1960 1960 class lazywantedset(object):
1961 1961 def __init__(self):
1962 1962 self.set = set()
1963 1963 self.revs = set(revs)
1964 1964
1965 1965 # No need to worry about locality here because it will be accessed
1966 1966 # in the same order as the increasing window below.
1967 1967 def __contains__(self, value):
1968 1968 if value in self.set:
1969 1969 return True
1970 1970 elif not value in self.revs:
1971 1971 return False
1972 1972 else:
1973 1973 self.revs.discard(value)
1974 1974 ctx = change(value)
1975 1975 matches = filter(match, ctx.files())
1976 1976 if matches:
1977 1977 fncache[value] = matches
1978 1978 self.set.add(value)
1979 1979 return True
1980 1980 return False
1981 1981
1982 1982 def discard(self, value):
1983 1983 self.revs.discard(value)
1984 1984 self.set.discard(value)
1985 1985
1986 1986 wanted = lazywantedset()
1987 1987
1988 1988 # it might be worthwhile to do this in the iterator if the rev range
1989 1989 # is descending and the prune args are all within that range
1990 1990 for rev in opts.get('prune', ()):
1991 1991 rev = repo[rev].rev()
1992 1992 ff = _followfilter(repo)
1993 1993 stop = min(revs[0], revs[-1])
1994 1994 for x in xrange(rev, stop - 1, -1):
1995 1995 if ff.match(x):
1996 1996 wanted = wanted - [x]
1997 1997
1998 1998 # Now that wanted is correctly initialized, we can iterate over the
1999 1999 # revision range, yielding only revisions in wanted.
2000 2000 def iterate():
2001 2001 if follow and match.always():
2002 2002 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
2003 2003 def want(rev):
2004 2004 return ff.match(rev) and rev in wanted
2005 2005 else:
2006 2006 def want(rev):
2007 2007 return rev in wanted
2008 2008
2009 2009 it = iter(revs)
2010 2010 stopiteration = False
2011 2011 for windowsize in increasingwindows():
2012 2012 nrevs = []
2013 2013 for i in xrange(windowsize):
2014 2014 rev = next(it, None)
2015 2015 if rev is None:
2016 2016 stopiteration = True
2017 2017 break
2018 2018 elif want(rev):
2019 2019 nrevs.append(rev)
2020 2020 for rev in sorted(nrevs):
2021 2021 fns = fncache.get(rev)
2022 2022 ctx = change(rev)
2023 2023 if not fns:
2024 2024 def fns_generator():
2025 2025 for f in ctx.files():
2026 2026 if match(f):
2027 2027 yield f
2028 2028 fns = fns_generator()
2029 2029 prepare(ctx, fns)
2030 2030 for rev in nrevs:
2031 2031 yield change(rev)
2032 2032
2033 2033 if stopiteration:
2034 2034 break
2035 2035
2036 2036 return iterate()
2037 2037
2038 2038 def _makefollowlogfilematcher(repo, files, followfirst):
2039 2039 # When displaying a revision with --patch --follow FILE, we have
2040 2040 # to know which file of the revision must be diffed. With
2041 2041 # --follow, we want the names of the ancestors of FILE in the
2042 2042 # revision, stored in "fcache". "fcache" is populated by
2043 2043 # reproducing the graph traversal already done by --follow revset
2044 2044 # and relating revs to file names (which is not "correct" but
2045 2045 # good enough).
2046 2046 fcache = {}
2047 2047 fcacheready = [False]
2048 2048 pctx = repo['.']
2049 2049
2050 2050 def populate():
2051 2051 for fn in files:
2052 2052 fctx = pctx[fn]
2053 2053 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
2054 2054 for c in fctx.ancestors(followfirst=followfirst):
2055 2055 fcache.setdefault(c.rev(), set()).add(c.path())
2056 2056
2057 2057 def filematcher(rev):
2058 2058 if not fcacheready[0]:
2059 2059 # Lazy initialization
2060 2060 fcacheready[0] = True
2061 2061 populate()
2062 2062 return scmutil.matchfiles(repo, fcache.get(rev, []))
2063 2063
2064 2064 return filematcher
2065 2065
2066 2066 def _makenofollowlogfilematcher(repo, pats, opts):
2067 2067 '''hook for extensions to override the filematcher for non-follow cases'''
2068 2068 return None
2069 2069
2070 2070 def _makelogrevset(repo, pats, opts, revs):
2071 2071 """Return (expr, filematcher) where expr is a revset string built
2072 2072 from log options and file patterns or None. If --stat or --patch
2073 2073 are not passed filematcher is None. Otherwise it is a callable
2074 2074 taking a revision number and returning a match objects filtering
2075 2075 the files to be detailed when displaying the revision.
2076 2076 """
2077 2077 opt2revset = {
2078 2078 'no_merges': ('not merge()', None),
2079 2079 'only_merges': ('merge()', None),
2080 2080 '_ancestors': ('ancestors(%(val)s)', None),
2081 2081 '_fancestors': ('_firstancestors(%(val)s)', None),
2082 2082 '_descendants': ('descendants(%(val)s)', None),
2083 2083 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2084 2084 '_matchfiles': ('_matchfiles(%(val)s)', None),
2085 2085 'date': ('date(%(val)r)', None),
2086 2086 'branch': ('branch(%(val)r)', ' or '),
2087 2087 '_patslog': ('filelog(%(val)r)', ' or '),
2088 2088 '_patsfollow': ('follow(%(val)r)', ' or '),
2089 2089 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2090 2090 'keyword': ('keyword(%(val)r)', ' or '),
2091 2091 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2092 2092 'user': ('user(%(val)r)', ' or '),
2093 2093 }
2094 2094
2095 2095 opts = dict(opts)
2096 2096 # follow or not follow?
2097 2097 follow = opts.get('follow') or opts.get('follow_first')
2098 2098 if opts.get('follow_first'):
2099 2099 followfirst = 1
2100 2100 else:
2101 2101 followfirst = 0
2102 2102 # --follow with FILE behavior depends on revs...
2103 2103 it = iter(revs)
2104 2104 startrev = next(it)
2105 2105 followdescendants = startrev < next(it, startrev)
2106 2106
2107 2107 # branch and only_branch are really aliases and must be handled at
2108 2108 # the same time
2109 2109 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2110 2110 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2111 2111 # pats/include/exclude are passed to match.match() directly in
2112 2112 # _matchfiles() revset but walkchangerevs() builds its matcher with
2113 2113 # scmutil.match(). The difference is input pats are globbed on
2114 2114 # platforms without shell expansion (windows).
2115 2115 wctx = repo[None]
2116 2116 match, pats = scmutil.matchandpats(wctx, pats, opts)
2117 2117 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2118 2118 opts.get('removed'))
2119 2119 if not slowpath:
2120 2120 for f in match.files():
2121 2121 if follow and f not in wctx:
2122 2122 # If the file exists, it may be a directory, so let it
2123 2123 # take the slow path.
2124 2124 if os.path.exists(repo.wjoin(f)):
2125 2125 slowpath = True
2126 2126 continue
2127 2127 else:
2128 2128 raise error.Abort(_('cannot follow file not in parent '
2129 2129 'revision: "%s"') % f)
2130 2130 filelog = repo.file(f)
2131 2131 if not filelog:
2132 2132 # A zero count may be a directory or deleted file, so
2133 2133 # try to find matching entries on the slow path.
2134 2134 if follow:
2135 2135 raise error.Abort(
2136 2136 _('cannot follow nonexistent file: "%s"') % f)
2137 2137 slowpath = True
2138 2138
2139 2139 # We decided to fall back to the slowpath because at least one
2140 2140 # of the paths was not a file. Check to see if at least one of them
2141 2141 # existed in history - in that case, we'll continue down the
2142 2142 # slowpath; otherwise, we can turn off the slowpath
2143 2143 if slowpath:
2144 2144 for path in match.files():
2145 2145 if path == '.' or path in repo.store:
2146 2146 break
2147 2147 else:
2148 2148 slowpath = False
2149 2149
2150 2150 fpats = ('_patsfollow', '_patsfollowfirst')
2151 2151 fnopats = (('_ancestors', '_fancestors'),
2152 2152 ('_descendants', '_fdescendants'))
2153 2153 if slowpath:
2154 2154 # See walkchangerevs() slow path.
2155 2155 #
2156 2156 # pats/include/exclude cannot be represented as separate
2157 2157 # revset expressions as their filtering logic applies at file
2158 2158 # level. For instance "-I a -X a" matches a revision touching
2159 2159 # "a" and "b" while "file(a) and not file(b)" does
2160 2160 # not. Besides, filesets are evaluated against the working
2161 2161 # directory.
2162 2162 matchargs = ['r:', 'd:relpath']
2163 2163 for p in pats:
2164 2164 matchargs.append('p:' + p)
2165 2165 for p in opts.get('include', []):
2166 2166 matchargs.append('i:' + p)
2167 2167 for p in opts.get('exclude', []):
2168 2168 matchargs.append('x:' + p)
2169 2169 matchargs = ','.join(('%r' % p) for p in matchargs)
2170 2170 opts['_matchfiles'] = matchargs
2171 2171 if follow:
2172 2172 opts[fnopats[0][followfirst]] = '.'
2173 2173 else:
2174 2174 if follow:
2175 2175 if pats:
2176 2176 # follow() revset interprets its file argument as a
2177 2177 # manifest entry, so use match.files(), not pats.
2178 2178 opts[fpats[followfirst]] = list(match.files())
2179 2179 else:
2180 2180 op = fnopats[followdescendants][followfirst]
2181 2181 opts[op] = 'rev(%d)' % startrev
2182 2182 else:
2183 2183 opts['_patslog'] = list(pats)
2184 2184
2185 2185 filematcher = None
2186 2186 if opts.get('patch') or opts.get('stat'):
2187 2187 # When following files, track renames via a special matcher.
2188 2188 # If we're forced to take the slowpath it means we're following
2189 2189 # at least one pattern/directory, so don't bother with rename tracking.
2190 2190 if follow and not match.always() and not slowpath:
2191 2191 # _makefollowlogfilematcher expects its files argument to be
2192 2192 # relative to the repo root, so use match.files(), not pats.
2193 2193 filematcher = _makefollowlogfilematcher(repo, match.files(),
2194 2194 followfirst)
2195 2195 else:
2196 2196 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2197 2197 if filematcher is None:
2198 2198 filematcher = lambda rev: match
2199 2199
2200 2200 expr = []
2201 2201 for op, val in sorted(opts.iteritems()):
2202 2202 if not val:
2203 2203 continue
2204 2204 if op not in opt2revset:
2205 2205 continue
2206 2206 revop, andor = opt2revset[op]
2207 2207 if '%(val)' not in revop:
2208 2208 expr.append(revop)
2209 2209 else:
2210 2210 if not isinstance(val, list):
2211 2211 e = revop % {'val': val}
2212 2212 else:
2213 2213 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2214 2214 expr.append(e)
2215 2215
2216 2216 if expr:
2217 2217 expr = '(' + ' and '.join(expr) + ')'
2218 2218 else:
2219 2219 expr = None
2220 2220 return expr, filematcher
2221 2221
2222 2222 def _logrevs(repo, opts):
2223 2223 # Default --rev value depends on --follow but --follow behavior
2224 2224 # depends on revisions resolved from --rev...
2225 2225 follow = opts.get('follow') or opts.get('follow_first')
2226 2226 if opts.get('rev'):
2227 2227 revs = scmutil.revrange(repo, opts['rev'])
2228 2228 elif follow and repo.dirstate.p1() == nullid:
2229 2229 revs = smartset.baseset()
2230 2230 elif follow:
2231 2231 revs = repo.revs('reverse(:.)')
2232 2232 else:
2233 2233 revs = smartset.spanset(repo)
2234 2234 revs.reverse()
2235 2235 return revs
2236 2236
2237 2237 def getgraphlogrevs(repo, pats, opts):
2238 2238 """Return (revs, expr, filematcher) where revs is an iterable of
2239 2239 revision numbers, expr is a revset string built from log options
2240 2240 and file patterns or None, and used to filter 'revs'. If --stat or
2241 2241 --patch are not passed filematcher is None. Otherwise it is a
2242 2242 callable taking a revision number and returning a match objects
2243 2243 filtering the files to be detailed when displaying the revision.
2244 2244 """
2245 2245 limit = loglimit(opts)
2246 2246 revs = _logrevs(repo, opts)
2247 2247 if not revs:
2248 2248 return smartset.baseset(), None, None
2249 2249 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2250 2250 if opts.get('rev'):
2251 2251 # User-specified revs might be unsorted, but don't sort before
2252 2252 # _makelogrevset because it might depend on the order of revs
2253 2253 if not (revs.isdescending() or revs.istopo()):
2254 2254 revs.sort(reverse=True)
2255 2255 if expr:
2256 2256 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2257 2257 revs = matcher(repo, revs)
2258 2258 if limit is not None:
2259 2259 limitedrevs = []
2260 2260 for idx, rev in enumerate(revs):
2261 2261 if idx >= limit:
2262 2262 break
2263 2263 limitedrevs.append(rev)
2264 2264 revs = smartset.baseset(limitedrevs)
2265 2265
2266 2266 return revs, expr, filematcher
2267 2267
2268 2268 def getlogrevs(repo, pats, opts):
2269 2269 """Return (revs, expr, filematcher) where revs is an iterable of
2270 2270 revision numbers, expr is a revset string built from log options
2271 2271 and file patterns or None, and used to filter 'revs'. If --stat or
2272 2272 --patch are not passed filematcher is None. Otherwise it is a
2273 2273 callable taking a revision number and returning a match objects
2274 2274 filtering the files to be detailed when displaying the revision.
2275 2275 """
2276 2276 limit = loglimit(opts)
2277 2277 revs = _logrevs(repo, opts)
2278 2278 if not revs:
2279 2279 return smartset.baseset([]), None, None
2280 2280 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2281 2281 if expr:
2282 2282 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2283 2283 revs = matcher(repo, revs)
2284 2284 if limit is not None:
2285 2285 limitedrevs = []
2286 2286 for idx, r in enumerate(revs):
2287 2287 if limit <= idx:
2288 2288 break
2289 2289 limitedrevs.append(r)
2290 2290 revs = smartset.baseset(limitedrevs)
2291 2291
2292 2292 return revs, expr, filematcher
2293 2293
2294 2294 def _graphnodeformatter(ui, displayer):
2295 2295 spec = ui.config('ui', 'graphnodetemplate')
2296 2296 if not spec:
2297 2297 return templatekw.showgraphnode # fast path for "{graphnode}"
2298 2298
2299 2299 spec = templater.unquotestring(spec)
2300 templ = formatter.maketemplater(ui, 'graphnode', spec)
2300 templ = formatter.maketemplater(ui, spec)
2301 2301 cache = {}
2302 2302 if isinstance(displayer, changeset_templater):
2303 2303 cache = displayer.cache # reuse cache of slow templates
2304 2304 props = templatekw.keywords.copy()
2305 2305 props['templ'] = templ
2306 2306 props['cache'] = cache
2307 2307 def formatnode(repo, ctx):
2308 2308 props['ctx'] = ctx
2309 2309 props['repo'] = repo
2310 2310 props['ui'] = repo.ui
2311 2311 props['revcache'] = {}
2312 return templater.stringify(templ('graphnode', **props))
2312 return templ.render(props)
2313 2313 return formatnode
2314 2314
2315 2315 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2316 2316 filematcher=None):
2317 2317 formatnode = _graphnodeformatter(ui, displayer)
2318 2318 state = graphmod.asciistate()
2319 2319 styles = state['styles']
2320 2320
2321 2321 # only set graph styling if HGPLAIN is not set.
2322 2322 if ui.plain('graph'):
2323 2323 # set all edge styles to |, the default pre-3.8 behaviour
2324 2324 styles.update(dict.fromkeys(styles, '|'))
2325 2325 else:
2326 2326 edgetypes = {
2327 2327 'parent': graphmod.PARENT,
2328 2328 'grandparent': graphmod.GRANDPARENT,
2329 2329 'missing': graphmod.MISSINGPARENT
2330 2330 }
2331 2331 for name, key in edgetypes.items():
2332 2332 # experimental config: experimental.graphstyle.*
2333 2333 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2334 2334 styles[key])
2335 2335 if not styles[key]:
2336 2336 styles[key] = None
2337 2337
2338 2338 # experimental config: experimental.graphshorten
2339 2339 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2340 2340
2341 2341 for rev, type, ctx, parents in dag:
2342 2342 char = formatnode(repo, ctx)
2343 2343 copies = None
2344 2344 if getrenamed and ctx.rev():
2345 2345 copies = []
2346 2346 for fn in ctx.files():
2347 2347 rename = getrenamed(fn, ctx.rev())
2348 2348 if rename:
2349 2349 copies.append((fn, rename[0]))
2350 2350 revmatchfn = None
2351 2351 if filematcher is not None:
2352 2352 revmatchfn = filematcher(ctx.rev())
2353 2353 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2354 2354 lines = displayer.hunk.pop(rev).split('\n')
2355 2355 if not lines[-1]:
2356 2356 del lines[-1]
2357 2357 displayer.flush(ctx)
2358 2358 edges = edgefn(type, char, lines, state, rev, parents)
2359 2359 for type, char, lines, coldata in edges:
2360 2360 graphmod.ascii(ui, state, type, char, lines, coldata)
2361 2361 displayer.close()
2362 2362
2363 2363 def graphlog(ui, repo, pats, opts):
2364 2364 # Parameters are identical to log command ones
2365 2365 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2366 2366 revdag = graphmod.dagwalker(repo, revs)
2367 2367
2368 2368 getrenamed = None
2369 2369 if opts.get('copies'):
2370 2370 endrev = None
2371 2371 if opts.get('rev'):
2372 2372 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2373 2373 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2374 2374
2375 2375 ui.pager('log')
2376 2376 displayer = show_changeset(ui, repo, opts, buffered=True)
2377 2377 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2378 2378 filematcher)
2379 2379
2380 2380 def checkunsupportedgraphflags(pats, opts):
2381 2381 for op in ["newest_first"]:
2382 2382 if op in opts and opts[op]:
2383 2383 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2384 2384 % op.replace("_", "-"))
2385 2385
2386 2386 def graphrevs(repo, nodes, opts):
2387 2387 limit = loglimit(opts)
2388 2388 nodes.reverse()
2389 2389 if limit is not None:
2390 2390 nodes = nodes[:limit]
2391 2391 return graphmod.nodes(repo, nodes)
2392 2392
2393 2393 def add(ui, repo, match, prefix, explicitonly, **opts):
2394 2394 join = lambda f: os.path.join(prefix, f)
2395 2395 bad = []
2396 2396
2397 2397 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2398 2398 names = []
2399 2399 wctx = repo[None]
2400 2400 cca = None
2401 2401 abort, warn = scmutil.checkportabilityalert(ui)
2402 2402 if abort or warn:
2403 2403 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2404 2404
2405 2405 badmatch = matchmod.badmatch(match, badfn)
2406 2406 dirstate = repo.dirstate
2407 2407 # We don't want to just call wctx.walk here, since it would return a lot of
2408 2408 # clean files, which we aren't interested in and takes time.
2409 2409 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2410 2410 True, False, full=False)):
2411 2411 exact = match.exact(f)
2412 2412 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2413 2413 if cca:
2414 2414 cca(f)
2415 2415 names.append(f)
2416 2416 if ui.verbose or not exact:
2417 2417 ui.status(_('adding %s\n') % match.rel(f))
2418 2418
2419 2419 for subpath in sorted(wctx.substate):
2420 2420 sub = wctx.sub(subpath)
2421 2421 try:
2422 2422 submatch = matchmod.subdirmatcher(subpath, match)
2423 2423 if opts.get(r'subrepos'):
2424 2424 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2425 2425 else:
2426 2426 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2427 2427 except error.LookupError:
2428 2428 ui.status(_("skipping missing subrepository: %s\n")
2429 2429 % join(subpath))
2430 2430
2431 2431 if not opts.get(r'dry_run'):
2432 2432 rejected = wctx.add(names, prefix)
2433 2433 bad.extend(f for f in rejected if f in match.files())
2434 2434 return bad
2435 2435
2436 2436 def addwebdirpath(repo, serverpath, webconf):
2437 2437 webconf[serverpath] = repo.root
2438 2438 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2439 2439
2440 2440 for r in repo.revs('filelog("path:.hgsub")'):
2441 2441 ctx = repo[r]
2442 2442 for subpath in ctx.substate:
2443 2443 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2444 2444
2445 2445 def forget(ui, repo, match, prefix, explicitonly):
2446 2446 join = lambda f: os.path.join(prefix, f)
2447 2447 bad = []
2448 2448 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2449 2449 wctx = repo[None]
2450 2450 forgot = []
2451 2451
2452 2452 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2453 2453 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2454 2454 if explicitonly:
2455 2455 forget = [f for f in forget if match.exact(f)]
2456 2456
2457 2457 for subpath in sorted(wctx.substate):
2458 2458 sub = wctx.sub(subpath)
2459 2459 try:
2460 2460 submatch = matchmod.subdirmatcher(subpath, match)
2461 2461 subbad, subforgot = sub.forget(submatch, prefix)
2462 2462 bad.extend([subpath + '/' + f for f in subbad])
2463 2463 forgot.extend([subpath + '/' + f for f in subforgot])
2464 2464 except error.LookupError:
2465 2465 ui.status(_("skipping missing subrepository: %s\n")
2466 2466 % join(subpath))
2467 2467
2468 2468 if not explicitonly:
2469 2469 for f in match.files():
2470 2470 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2471 2471 if f not in forgot:
2472 2472 if repo.wvfs.exists(f):
2473 2473 # Don't complain if the exact case match wasn't given.
2474 2474 # But don't do this until after checking 'forgot', so
2475 2475 # that subrepo files aren't normalized, and this op is
2476 2476 # purely from data cached by the status walk above.
2477 2477 if repo.dirstate.normalize(f) in repo.dirstate:
2478 2478 continue
2479 2479 ui.warn(_('not removing %s: '
2480 2480 'file is already untracked\n')
2481 2481 % match.rel(f))
2482 2482 bad.append(f)
2483 2483
2484 2484 for f in forget:
2485 2485 if ui.verbose or not match.exact(f):
2486 2486 ui.status(_('removing %s\n') % match.rel(f))
2487 2487
2488 2488 rejected = wctx.forget(forget, prefix)
2489 2489 bad.extend(f for f in rejected if f in match.files())
2490 2490 forgot.extend(f for f in forget if f not in rejected)
2491 2491 return bad, forgot
2492 2492
2493 2493 def files(ui, ctx, m, fm, fmt, subrepos):
2494 2494 rev = ctx.rev()
2495 2495 ret = 1
2496 2496 ds = ctx.repo().dirstate
2497 2497
2498 2498 for f in ctx.matches(m):
2499 2499 if rev is None and ds[f] == 'r':
2500 2500 continue
2501 2501 fm.startitem()
2502 2502 if ui.verbose:
2503 2503 fc = ctx[f]
2504 2504 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2505 2505 fm.data(abspath=f)
2506 2506 fm.write('path', fmt, m.rel(f))
2507 2507 ret = 0
2508 2508
2509 2509 for subpath in sorted(ctx.substate):
2510 2510 submatch = matchmod.subdirmatcher(subpath, m)
2511 2511 if (subrepos or m.exact(subpath) or any(submatch.files())):
2512 2512 sub = ctx.sub(subpath)
2513 2513 try:
2514 2514 recurse = m.exact(subpath) or subrepos
2515 2515 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2516 2516 ret = 0
2517 2517 except error.LookupError:
2518 2518 ui.status(_("skipping missing subrepository: %s\n")
2519 2519 % m.abs(subpath))
2520 2520
2521 2521 return ret
2522 2522
2523 2523 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2524 2524 join = lambda f: os.path.join(prefix, f)
2525 2525 ret = 0
2526 2526 s = repo.status(match=m, clean=True)
2527 2527 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2528 2528
2529 2529 wctx = repo[None]
2530 2530
2531 2531 if warnings is None:
2532 2532 warnings = []
2533 2533 warn = True
2534 2534 else:
2535 2535 warn = False
2536 2536
2537 2537 subs = sorted(wctx.substate)
2538 2538 total = len(subs)
2539 2539 count = 0
2540 2540 for subpath in subs:
2541 2541 count += 1
2542 2542 submatch = matchmod.subdirmatcher(subpath, m)
2543 2543 if subrepos or m.exact(subpath) or any(submatch.files()):
2544 2544 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2545 2545 sub = wctx.sub(subpath)
2546 2546 try:
2547 2547 if sub.removefiles(submatch, prefix, after, force, subrepos,
2548 2548 warnings):
2549 2549 ret = 1
2550 2550 except error.LookupError:
2551 2551 warnings.append(_("skipping missing subrepository: %s\n")
2552 2552 % join(subpath))
2553 2553 ui.progress(_('searching'), None)
2554 2554
2555 2555 # warn about failure to delete explicit files/dirs
2556 2556 deleteddirs = util.dirs(deleted)
2557 2557 files = m.files()
2558 2558 total = len(files)
2559 2559 count = 0
2560 2560 for f in files:
2561 2561 def insubrepo():
2562 2562 for subpath in wctx.substate:
2563 2563 if f.startswith(subpath + '/'):
2564 2564 return True
2565 2565 return False
2566 2566
2567 2567 count += 1
2568 2568 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2569 2569 isdir = f in deleteddirs or wctx.hasdir(f)
2570 2570 if (f in repo.dirstate or isdir or f == '.'
2571 2571 or insubrepo() or f in subs):
2572 2572 continue
2573 2573
2574 2574 if repo.wvfs.exists(f):
2575 2575 if repo.wvfs.isdir(f):
2576 2576 warnings.append(_('not removing %s: no tracked files\n')
2577 2577 % m.rel(f))
2578 2578 else:
2579 2579 warnings.append(_('not removing %s: file is untracked\n')
2580 2580 % m.rel(f))
2581 2581 # missing files will generate a warning elsewhere
2582 2582 ret = 1
2583 2583 ui.progress(_('deleting'), None)
2584 2584
2585 2585 if force:
2586 2586 list = modified + deleted + clean + added
2587 2587 elif after:
2588 2588 list = deleted
2589 2589 remaining = modified + added + clean
2590 2590 total = len(remaining)
2591 2591 count = 0
2592 2592 for f in remaining:
2593 2593 count += 1
2594 2594 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2595 2595 warnings.append(_('not removing %s: file still exists\n')
2596 2596 % m.rel(f))
2597 2597 ret = 1
2598 2598 ui.progress(_('skipping'), None)
2599 2599 else:
2600 2600 list = deleted + clean
2601 2601 total = len(modified) + len(added)
2602 2602 count = 0
2603 2603 for f in modified:
2604 2604 count += 1
2605 2605 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2606 2606 warnings.append(_('not removing %s: file is modified (use -f'
2607 2607 ' to force removal)\n') % m.rel(f))
2608 2608 ret = 1
2609 2609 for f in added:
2610 2610 count += 1
2611 2611 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2612 2612 warnings.append(_("not removing %s: file has been marked for add"
2613 2613 " (use 'hg forget' to undo add)\n") % m.rel(f))
2614 2614 ret = 1
2615 2615 ui.progress(_('skipping'), None)
2616 2616
2617 2617 list = sorted(list)
2618 2618 total = len(list)
2619 2619 count = 0
2620 2620 for f in list:
2621 2621 count += 1
2622 2622 if ui.verbose or not m.exact(f):
2623 2623 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2624 2624 ui.status(_('removing %s\n') % m.rel(f))
2625 2625 ui.progress(_('deleting'), None)
2626 2626
2627 2627 with repo.wlock():
2628 2628 if not after:
2629 2629 for f in list:
2630 2630 if f in added:
2631 2631 continue # we never unlink added files on remove
2632 2632 repo.wvfs.unlinkpath(f, ignoremissing=True)
2633 2633 repo[None].forget(list)
2634 2634
2635 2635 if warn:
2636 2636 for warning in warnings:
2637 2637 ui.warn(warning)
2638 2638
2639 2639 return ret
2640 2640
2641 2641 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2642 2642 err = 1
2643 2643
2644 2644 def write(path):
2645 2645 filename = None
2646 2646 if fntemplate:
2647 2647 filename = makefilename(repo, fntemplate, ctx.node(),
2648 2648 pathname=os.path.join(prefix, path))
2649 2649 with formatter.maybereopen(basefm, filename, opts) as fm:
2650 2650 data = ctx[path].data()
2651 2651 if opts.get('decode'):
2652 2652 data = repo.wwritedata(path, data)
2653 2653 fm.startitem()
2654 2654 fm.write('data', '%s', data)
2655 2655 fm.data(abspath=path, path=matcher.rel(path))
2656 2656
2657 2657 # Automation often uses hg cat on single files, so special case it
2658 2658 # for performance to avoid the cost of parsing the manifest.
2659 2659 if len(matcher.files()) == 1 and not matcher.anypats():
2660 2660 file = matcher.files()[0]
2661 2661 mfl = repo.manifestlog
2662 2662 mfnode = ctx.manifestnode()
2663 2663 try:
2664 2664 if mfnode and mfl[mfnode].find(file)[0]:
2665 2665 write(file)
2666 2666 return 0
2667 2667 except KeyError:
2668 2668 pass
2669 2669
2670 2670 for abs in ctx.walk(matcher):
2671 2671 write(abs)
2672 2672 err = 0
2673 2673
2674 2674 for subpath in sorted(ctx.substate):
2675 2675 sub = ctx.sub(subpath)
2676 2676 try:
2677 2677 submatch = matchmod.subdirmatcher(subpath, matcher)
2678 2678
2679 2679 if not sub.cat(submatch, basefm, fntemplate,
2680 2680 os.path.join(prefix, sub._path), **opts):
2681 2681 err = 0
2682 2682 except error.RepoLookupError:
2683 2683 ui.status(_("skipping missing subrepository: %s\n")
2684 2684 % os.path.join(prefix, subpath))
2685 2685
2686 2686 return err
2687 2687
2688 2688 def commit(ui, repo, commitfunc, pats, opts):
2689 2689 '''commit the specified files or all outstanding changes'''
2690 2690 date = opts.get('date')
2691 2691 if date:
2692 2692 opts['date'] = util.parsedate(date)
2693 2693 message = logmessage(ui, opts)
2694 2694 matcher = scmutil.match(repo[None], pats, opts)
2695 2695
2696 2696 # extract addremove carefully -- this function can be called from a command
2697 2697 # that doesn't support addremove
2698 2698 if opts.get('addremove'):
2699 2699 if scmutil.addremove(repo, matcher, "", opts) != 0:
2700 2700 raise error.Abort(
2701 2701 _("failed to mark all new/missing files as added/removed"))
2702 2702
2703 2703 return commitfunc(ui, repo, message, matcher, opts)
2704 2704
2705 2705 def samefile(f, ctx1, ctx2):
2706 2706 if f in ctx1.manifest():
2707 2707 a = ctx1.filectx(f)
2708 2708 if f in ctx2.manifest():
2709 2709 b = ctx2.filectx(f)
2710 2710 return (not a.cmp(b)
2711 2711 and a.flags() == b.flags())
2712 2712 else:
2713 2713 return False
2714 2714 else:
2715 2715 return f not in ctx2.manifest()
2716 2716
2717 2717 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2718 2718 # avoid cycle context -> subrepo -> cmdutil
2719 2719 from . import context
2720 2720
2721 2721 # amend will reuse the existing user if not specified, but the obsolete
2722 2722 # marker creation requires that the current user's name is specified.
2723 2723 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2724 2724 ui.username() # raise exception if username not set
2725 2725
2726 2726 ui.note(_('amending changeset %s\n') % old)
2727 2727 base = old.p1()
2728 2728 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2729 2729
2730 2730 wlock = lock = newid = None
2731 2731 try:
2732 2732 wlock = repo.wlock()
2733 2733 lock = repo.lock()
2734 2734 with repo.transaction('amend') as tr:
2735 2735 # See if we got a message from -m or -l, if not, open the editor
2736 2736 # with the message of the changeset to amend
2737 2737 message = logmessage(ui, opts)
2738 2738 # ensure logfile does not conflict with later enforcement of the
2739 2739 # message. potential logfile content has been processed by
2740 2740 # `logmessage` anyway.
2741 2741 opts.pop('logfile')
2742 2742 # First, do a regular commit to record all changes in the working
2743 2743 # directory (if there are any)
2744 2744 ui.callhooks = False
2745 2745 activebookmark = repo._bookmarks.active
2746 2746 try:
2747 2747 repo._bookmarks.active = None
2748 2748 opts['message'] = 'temporary amend commit for %s' % old
2749 2749 node = commit(ui, repo, commitfunc, pats, opts)
2750 2750 finally:
2751 2751 repo._bookmarks.active = activebookmark
2752 2752 repo._bookmarks.recordchange(tr)
2753 2753 ui.callhooks = True
2754 2754 ctx = repo[node]
2755 2755
2756 2756 # Participating changesets:
2757 2757 #
2758 2758 # node/ctx o - new (intermediate) commit that contains changes
2759 2759 # | from working dir to go into amending commit
2760 2760 # | (or a workingctx if there were no changes)
2761 2761 # |
2762 2762 # old o - changeset to amend
2763 2763 # |
2764 2764 # base o - parent of amending changeset
2765 2765
2766 2766 # Update extra dict from amended commit (e.g. to preserve graft
2767 2767 # source)
2768 2768 extra.update(old.extra())
2769 2769
2770 2770 # Also update it from the intermediate commit or from the wctx
2771 2771 extra.update(ctx.extra())
2772 2772
2773 2773 if len(old.parents()) > 1:
2774 2774 # ctx.files() isn't reliable for merges, so fall back to the
2775 2775 # slower repo.status() method
2776 2776 files = set([fn for st in repo.status(base, old)[:3]
2777 2777 for fn in st])
2778 2778 else:
2779 2779 files = set(old.files())
2780 2780
2781 2781 # Second, we use either the commit we just did, or if there were no
2782 2782 # changes the parent of the working directory as the version of the
2783 2783 # files in the final amend commit
2784 2784 if node:
2785 2785 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2786 2786
2787 2787 user = ctx.user()
2788 2788 date = ctx.date()
2789 2789 # Recompute copies (avoid recording a -> b -> a)
2790 2790 copied = copies.pathcopies(base, ctx)
2791 2791 if old.p2:
2792 2792 copied.update(copies.pathcopies(old.p2(), ctx))
2793 2793
2794 2794 # Prune files which were reverted by the updates: if old
2795 2795 # introduced file X and our intermediate commit, node,
2796 2796 # renamed that file, then those two files are the same and
2797 2797 # we can discard X from our list of files. Likewise if X
2798 2798 # was deleted, it's no longer relevant
2799 2799 files.update(ctx.files())
2800 2800 files = [f for f in files if not samefile(f, ctx, base)]
2801 2801
2802 2802 def filectxfn(repo, ctx_, path):
2803 2803 try:
2804 2804 fctx = ctx[path]
2805 2805 flags = fctx.flags()
2806 2806 mctx = context.memfilectx(repo,
2807 2807 fctx.path(), fctx.data(),
2808 2808 islink='l' in flags,
2809 2809 isexec='x' in flags,
2810 2810 copied=copied.get(path))
2811 2811 return mctx
2812 2812 except KeyError:
2813 2813 return None
2814 2814 else:
2815 2815 ui.note(_('copying changeset %s to %s\n') % (old, base))
2816 2816
2817 2817 # Use version of files as in the old cset
2818 2818 def filectxfn(repo, ctx_, path):
2819 2819 try:
2820 2820 return old.filectx(path)
2821 2821 except KeyError:
2822 2822 return None
2823 2823
2824 2824 user = opts.get('user') or old.user()
2825 2825 date = opts.get('date') or old.date()
2826 2826 editform = mergeeditform(old, 'commit.amend')
2827 2827 editor = getcommiteditor(editform=editform, **opts)
2828 2828 if not message:
2829 2829 editor = getcommiteditor(edit=True, editform=editform)
2830 2830 message = old.description()
2831 2831
2832 2832 pureextra = extra.copy()
2833 2833 extra['amend_source'] = old.hex()
2834 2834
2835 2835 new = context.memctx(repo,
2836 2836 parents=[base.node(), old.p2().node()],
2837 2837 text=message,
2838 2838 files=files,
2839 2839 filectxfn=filectxfn,
2840 2840 user=user,
2841 2841 date=date,
2842 2842 extra=extra,
2843 2843 editor=editor)
2844 2844
2845 2845 newdesc = changelog.stripdesc(new.description())
2846 2846 if ((not node)
2847 2847 and newdesc == old.description()
2848 2848 and user == old.user()
2849 2849 and date == old.date()
2850 2850 and pureextra == old.extra()):
2851 2851 # nothing changed. continuing here would create a new node
2852 2852 # anyway because of the amend_source noise.
2853 2853 #
2854 2854 # This not what we expect from amend.
2855 2855 return old.node()
2856 2856
2857 2857 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2858 2858 try:
2859 2859 if opts.get('secret'):
2860 2860 commitphase = 'secret'
2861 2861 else:
2862 2862 commitphase = old.phase()
2863 2863 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2864 2864 newid = repo.commitctx(new)
2865 2865 finally:
2866 2866 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2867 2867 if newid != old.node():
2868 2868 # Reroute the working copy parent to the new changeset
2869 2869 repo.setparents(newid, nullid)
2870 2870
2871 2871 # Move bookmarks from old parent to amend commit
2872 2872 bms = repo.nodebookmarks(old.node())
2873 2873 if bms:
2874 2874 marks = repo._bookmarks
2875 2875 for bm in bms:
2876 2876 ui.debug('moving bookmarks %r from %s to %s\n' %
2877 2877 (marks, old.hex(), hex(newid)))
2878 2878 marks[bm] = newid
2879 2879 marks.recordchange(tr)
2880 2880 #commit the whole amend process
2881 2881 if createmarkers:
2882 2882 # mark the new changeset as successor of the rewritten one
2883 2883 new = repo[newid]
2884 2884 obs = [(old, (new,))]
2885 2885 if node:
2886 2886 obs.append((ctx, ()))
2887 2887
2888 2888 obsolete.createmarkers(repo, obs, operation='amend')
2889 2889 if not createmarkers and newid != old.node():
2890 2890 # Strip the intermediate commit (if there was one) and the amended
2891 2891 # commit
2892 2892 if node:
2893 2893 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2894 2894 ui.note(_('stripping amended changeset %s\n') % old)
2895 2895 repair.strip(ui, repo, old.node(), topic='amend-backup')
2896 2896 finally:
2897 2897 lockmod.release(lock, wlock)
2898 2898 return newid
2899 2899
2900 2900 def commiteditor(repo, ctx, subs, editform=''):
2901 2901 if ctx.description():
2902 2902 return ctx.description()
2903 2903 return commitforceeditor(repo, ctx, subs, editform=editform,
2904 2904 unchangedmessagedetection=True)
2905 2905
2906 2906 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2907 2907 editform='', unchangedmessagedetection=False):
2908 2908 if not extramsg:
2909 2909 extramsg = _("Leave message empty to abort commit.")
2910 2910
2911 2911 forms = [e for e in editform.split('.') if e]
2912 2912 forms.insert(0, 'changeset')
2913 2913 templatetext = None
2914 2914 while forms:
2915 2915 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2916 2916 if tmpl:
2917 2917 tmpl = templater.unquotestring(tmpl)
2918 2918 templatetext = committext = buildcommittemplate(
2919 2919 repo, ctx, subs, extramsg, tmpl)
2920 2920 break
2921 2921 forms.pop()
2922 2922 else:
2923 2923 committext = buildcommittext(repo, ctx, subs, extramsg)
2924 2924
2925 2925 # run editor in the repository root
2926 2926 olddir = pycompat.getcwd()
2927 2927 os.chdir(repo.root)
2928 2928
2929 2929 # make in-memory changes visible to external process
2930 2930 tr = repo.currenttransaction()
2931 2931 repo.dirstate.write(tr)
2932 2932 pending = tr and tr.writepending() and repo.root
2933 2933
2934 2934 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2935 2935 editform=editform, pending=pending,
2936 2936 repopath=repo.path)
2937 2937 text = editortext
2938 2938
2939 2939 # strip away anything below this special string (used for editors that want
2940 2940 # to display the diff)
2941 2941 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
2942 2942 if stripbelow:
2943 2943 text = text[:stripbelow.start()]
2944 2944
2945 2945 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2946 2946 os.chdir(olddir)
2947 2947
2948 2948 if finishdesc:
2949 2949 text = finishdesc(text)
2950 2950 if not text.strip():
2951 2951 raise error.Abort(_("empty commit message"))
2952 2952 if unchangedmessagedetection and editortext == templatetext:
2953 2953 raise error.Abort(_("commit message unchanged"))
2954 2954
2955 2955 return text
2956 2956
2957 2957 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2958 2958 ui = repo.ui
2959 2959 spec = _lookuplogtemplate(ui, tmpl, None)
2960 2960 t = changeset_templater(ui, repo, spec, None, {}, False)
2961 2961
2962 2962 for k, v in repo.ui.configitems('committemplate'):
2963 2963 if k != 'changeset':
2964 2964 t.t.cache[k] = v
2965 2965
2966 2966 if not extramsg:
2967 2967 extramsg = '' # ensure that extramsg is string
2968 2968
2969 2969 ui.pushbuffer()
2970 2970 t.show(ctx, extramsg=extramsg)
2971 2971 return ui.popbuffer()
2972 2972
2973 2973 def hgprefix(msg):
2974 2974 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2975 2975
2976 2976 def buildcommittext(repo, ctx, subs, extramsg):
2977 2977 edittext = []
2978 2978 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2979 2979 if ctx.description():
2980 2980 edittext.append(ctx.description())
2981 2981 edittext.append("")
2982 2982 edittext.append("") # Empty line between message and comments.
2983 2983 edittext.append(hgprefix(_("Enter commit message."
2984 2984 " Lines beginning with 'HG:' are removed.")))
2985 2985 edittext.append(hgprefix(extramsg))
2986 2986 edittext.append("HG: --")
2987 2987 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2988 2988 if ctx.p2():
2989 2989 edittext.append(hgprefix(_("branch merge")))
2990 2990 if ctx.branch():
2991 2991 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2992 2992 if bookmarks.isactivewdirparent(repo):
2993 2993 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2994 2994 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2995 2995 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2996 2996 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2997 2997 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2998 2998 if not added and not modified and not removed:
2999 2999 edittext.append(hgprefix(_("no files changed")))
3000 3000 edittext.append("")
3001 3001
3002 3002 return "\n".join(edittext)
3003 3003
3004 3004 def commitstatus(repo, node, branch, bheads=None, opts=None):
3005 3005 if opts is None:
3006 3006 opts = {}
3007 3007 ctx = repo[node]
3008 3008 parents = ctx.parents()
3009 3009
3010 3010 if (not opts.get('amend') and bheads and node not in bheads and not
3011 3011 [x for x in parents if x.node() in bheads and x.branch() == branch]):
3012 3012 repo.ui.status(_('created new head\n'))
3013 3013 # The message is not printed for initial roots. For the other
3014 3014 # changesets, it is printed in the following situations:
3015 3015 #
3016 3016 # Par column: for the 2 parents with ...
3017 3017 # N: null or no parent
3018 3018 # B: parent is on another named branch
3019 3019 # C: parent is a regular non head changeset
3020 3020 # H: parent was a branch head of the current branch
3021 3021 # Msg column: whether we print "created new head" message
3022 3022 # In the following, it is assumed that there already exists some
3023 3023 # initial branch heads of the current branch, otherwise nothing is
3024 3024 # printed anyway.
3025 3025 #
3026 3026 # Par Msg Comment
3027 3027 # N N y additional topo root
3028 3028 #
3029 3029 # B N y additional branch root
3030 3030 # C N y additional topo head
3031 3031 # H N n usual case
3032 3032 #
3033 3033 # B B y weird additional branch root
3034 3034 # C B y branch merge
3035 3035 # H B n merge with named branch
3036 3036 #
3037 3037 # C C y additional head from merge
3038 3038 # C H n merge with a head
3039 3039 #
3040 3040 # H H n head merge: head count decreases
3041 3041
3042 3042 if not opts.get('close_branch'):
3043 3043 for r in parents:
3044 3044 if r.closesbranch() and r.branch() == branch:
3045 3045 repo.ui.status(_('reopening closed branch head %d\n') % r)
3046 3046
3047 3047 if repo.ui.debugflag:
3048 3048 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
3049 3049 elif repo.ui.verbose:
3050 3050 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
3051 3051
3052 3052 def postcommitstatus(repo, pats, opts):
3053 3053 return repo.status(match=scmutil.match(repo[None], pats, opts))
3054 3054
3055 3055 def revert(ui, repo, ctx, parents, *pats, **opts):
3056 3056 parent, p2 = parents
3057 3057 node = ctx.node()
3058 3058
3059 3059 mf = ctx.manifest()
3060 3060 if node == p2:
3061 3061 parent = p2
3062 3062
3063 3063 # need all matching names in dirstate and manifest of target rev,
3064 3064 # so have to walk both. do not print errors if files exist in one
3065 3065 # but not other. in both cases, filesets should be evaluated against
3066 3066 # workingctx to get consistent result (issue4497). this means 'set:**'
3067 3067 # cannot be used to select missing files from target rev.
3068 3068
3069 3069 # `names` is a mapping for all elements in working copy and target revision
3070 3070 # The mapping is in the form:
3071 3071 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3072 3072 names = {}
3073 3073
3074 3074 with repo.wlock():
3075 3075 ## filling of the `names` mapping
3076 3076 # walk dirstate to fill `names`
3077 3077
3078 3078 interactive = opts.get('interactive', False)
3079 3079 wctx = repo[None]
3080 3080 m = scmutil.match(wctx, pats, opts)
3081 3081
3082 3082 # we'll need this later
3083 3083 targetsubs = sorted(s for s in wctx.substate if m(s))
3084 3084
3085 3085 if not m.always():
3086 3086 matcher = matchmod.badmatch(m, lambda x, y: False)
3087 3087 for abs in wctx.walk(matcher):
3088 3088 names[abs] = m.rel(abs), m.exact(abs)
3089 3089
3090 3090 # walk target manifest to fill `names`
3091 3091
3092 3092 def badfn(path, msg):
3093 3093 if path in names:
3094 3094 return
3095 3095 if path in ctx.substate:
3096 3096 return
3097 3097 path_ = path + '/'
3098 3098 for f in names:
3099 3099 if f.startswith(path_):
3100 3100 return
3101 3101 ui.warn("%s: %s\n" % (m.rel(path), msg))
3102 3102
3103 3103 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3104 3104 if abs not in names:
3105 3105 names[abs] = m.rel(abs), m.exact(abs)
3106 3106
3107 3107 # Find status of all file in `names`.
3108 3108 m = scmutil.matchfiles(repo, names)
3109 3109
3110 3110 changes = repo.status(node1=node, match=m,
3111 3111 unknown=True, ignored=True, clean=True)
3112 3112 else:
3113 3113 changes = repo.status(node1=node, match=m)
3114 3114 for kind in changes:
3115 3115 for abs in kind:
3116 3116 names[abs] = m.rel(abs), m.exact(abs)
3117 3117
3118 3118 m = scmutil.matchfiles(repo, names)
3119 3119
3120 3120 modified = set(changes.modified)
3121 3121 added = set(changes.added)
3122 3122 removed = set(changes.removed)
3123 3123 _deleted = set(changes.deleted)
3124 3124 unknown = set(changes.unknown)
3125 3125 unknown.update(changes.ignored)
3126 3126 clean = set(changes.clean)
3127 3127 modadded = set()
3128 3128
3129 3129 # We need to account for the state of the file in the dirstate,
3130 3130 # even when we revert against something else than parent. This will
3131 3131 # slightly alter the behavior of revert (doing back up or not, delete
3132 3132 # or just forget etc).
3133 3133 if parent == node:
3134 3134 dsmodified = modified
3135 3135 dsadded = added
3136 3136 dsremoved = removed
3137 3137 # store all local modifications, useful later for rename detection
3138 3138 localchanges = dsmodified | dsadded
3139 3139 modified, added, removed = set(), set(), set()
3140 3140 else:
3141 3141 changes = repo.status(node1=parent, match=m)
3142 3142 dsmodified = set(changes.modified)
3143 3143 dsadded = set(changes.added)
3144 3144 dsremoved = set(changes.removed)
3145 3145 # store all local modifications, useful later for rename detection
3146 3146 localchanges = dsmodified | dsadded
3147 3147
3148 3148 # only take into account for removes between wc and target
3149 3149 clean |= dsremoved - removed
3150 3150 dsremoved &= removed
3151 3151 # distinct between dirstate remove and other
3152 3152 removed -= dsremoved
3153 3153
3154 3154 modadded = added & dsmodified
3155 3155 added -= modadded
3156 3156
3157 3157 # tell newly modified apart.
3158 3158 dsmodified &= modified
3159 3159 dsmodified |= modified & dsadded # dirstate added may need backup
3160 3160 modified -= dsmodified
3161 3161
3162 3162 # We need to wait for some post-processing to update this set
3163 3163 # before making the distinction. The dirstate will be used for
3164 3164 # that purpose.
3165 3165 dsadded = added
3166 3166
3167 3167 # in case of merge, files that are actually added can be reported as
3168 3168 # modified, we need to post process the result
3169 3169 if p2 != nullid:
3170 3170 mergeadd = set(dsmodified)
3171 3171 for path in dsmodified:
3172 3172 if path in mf:
3173 3173 mergeadd.remove(path)
3174 3174 dsadded |= mergeadd
3175 3175 dsmodified -= mergeadd
3176 3176
3177 3177 # if f is a rename, update `names` to also revert the source
3178 3178 cwd = repo.getcwd()
3179 3179 for f in localchanges:
3180 3180 src = repo.dirstate.copied(f)
3181 3181 # XXX should we check for rename down to target node?
3182 3182 if src and src not in names and repo.dirstate[src] == 'r':
3183 3183 dsremoved.add(src)
3184 3184 names[src] = (repo.pathto(src, cwd), True)
3185 3185
3186 3186 # determine the exact nature of the deleted changesets
3187 3187 deladded = set(_deleted)
3188 3188 for path in _deleted:
3189 3189 if path in mf:
3190 3190 deladded.remove(path)
3191 3191 deleted = _deleted - deladded
3192 3192
3193 3193 # distinguish between file to forget and the other
3194 3194 added = set()
3195 3195 for abs in dsadded:
3196 3196 if repo.dirstate[abs] != 'a':
3197 3197 added.add(abs)
3198 3198 dsadded -= added
3199 3199
3200 3200 for abs in deladded:
3201 3201 if repo.dirstate[abs] == 'a':
3202 3202 dsadded.add(abs)
3203 3203 deladded -= dsadded
3204 3204
3205 3205 # For files marked as removed, we check if an unknown file is present at
3206 3206 # the same path. If a such file exists it may need to be backed up.
3207 3207 # Making the distinction at this stage helps have simpler backup
3208 3208 # logic.
3209 3209 removunk = set()
3210 3210 for abs in removed:
3211 3211 target = repo.wjoin(abs)
3212 3212 if os.path.lexists(target):
3213 3213 removunk.add(abs)
3214 3214 removed -= removunk
3215 3215
3216 3216 dsremovunk = set()
3217 3217 for abs in dsremoved:
3218 3218 target = repo.wjoin(abs)
3219 3219 if os.path.lexists(target):
3220 3220 dsremovunk.add(abs)
3221 3221 dsremoved -= dsremovunk
3222 3222
3223 3223 # action to be actually performed by revert
3224 3224 # (<list of file>, message>) tuple
3225 3225 actions = {'revert': ([], _('reverting %s\n')),
3226 3226 'add': ([], _('adding %s\n')),
3227 3227 'remove': ([], _('removing %s\n')),
3228 3228 'drop': ([], _('removing %s\n')),
3229 3229 'forget': ([], _('forgetting %s\n')),
3230 3230 'undelete': ([], _('undeleting %s\n')),
3231 3231 'noop': (None, _('no changes needed to %s\n')),
3232 3232 'unknown': (None, _('file not managed: %s\n')),
3233 3233 }
3234 3234
3235 3235 # "constant" that convey the backup strategy.
3236 3236 # All set to `discard` if `no-backup` is set do avoid checking
3237 3237 # no_backup lower in the code.
3238 3238 # These values are ordered for comparison purposes
3239 3239 backupinteractive = 3 # do backup if interactively modified
3240 3240 backup = 2 # unconditionally do backup
3241 3241 check = 1 # check if the existing file differs from target
3242 3242 discard = 0 # never do backup
3243 3243 if opts.get('no_backup'):
3244 3244 backupinteractive = backup = check = discard
3245 3245 if interactive:
3246 3246 dsmodifiedbackup = backupinteractive
3247 3247 else:
3248 3248 dsmodifiedbackup = backup
3249 3249 tobackup = set()
3250 3250
3251 3251 backupanddel = actions['remove']
3252 3252 if not opts.get('no_backup'):
3253 3253 backupanddel = actions['drop']
3254 3254
3255 3255 disptable = (
3256 3256 # dispatch table:
3257 3257 # file state
3258 3258 # action
3259 3259 # make backup
3260 3260
3261 3261 ## Sets that results that will change file on disk
3262 3262 # Modified compared to target, no local change
3263 3263 (modified, actions['revert'], discard),
3264 3264 # Modified compared to target, but local file is deleted
3265 3265 (deleted, actions['revert'], discard),
3266 3266 # Modified compared to target, local change
3267 3267 (dsmodified, actions['revert'], dsmodifiedbackup),
3268 3268 # Added since target
3269 3269 (added, actions['remove'], discard),
3270 3270 # Added in working directory
3271 3271 (dsadded, actions['forget'], discard),
3272 3272 # Added since target, have local modification
3273 3273 (modadded, backupanddel, backup),
3274 3274 # Added since target but file is missing in working directory
3275 3275 (deladded, actions['drop'], discard),
3276 3276 # Removed since target, before working copy parent
3277 3277 (removed, actions['add'], discard),
3278 3278 # Same as `removed` but an unknown file exists at the same path
3279 3279 (removunk, actions['add'], check),
3280 3280 # Removed since targe, marked as such in working copy parent
3281 3281 (dsremoved, actions['undelete'], discard),
3282 3282 # Same as `dsremoved` but an unknown file exists at the same path
3283 3283 (dsremovunk, actions['undelete'], check),
3284 3284 ## the following sets does not result in any file changes
3285 3285 # File with no modification
3286 3286 (clean, actions['noop'], discard),
3287 3287 # Existing file, not tracked anywhere
3288 3288 (unknown, actions['unknown'], discard),
3289 3289 )
3290 3290
3291 3291 for abs, (rel, exact) in sorted(names.items()):
3292 3292 # target file to be touch on disk (relative to cwd)
3293 3293 target = repo.wjoin(abs)
3294 3294 # search the entry in the dispatch table.
3295 3295 # if the file is in any of these sets, it was touched in the working
3296 3296 # directory parent and we are sure it needs to be reverted.
3297 3297 for table, (xlist, msg), dobackup in disptable:
3298 3298 if abs not in table:
3299 3299 continue
3300 3300 if xlist is not None:
3301 3301 xlist.append(abs)
3302 3302 if dobackup:
3303 3303 # If in interactive mode, don't automatically create
3304 3304 # .orig files (issue4793)
3305 3305 if dobackup == backupinteractive:
3306 3306 tobackup.add(abs)
3307 3307 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3308 3308 bakname = scmutil.origpath(ui, repo, rel)
3309 3309 ui.note(_('saving current version of %s as %s\n') %
3310 3310 (rel, bakname))
3311 3311 if not opts.get('dry_run'):
3312 3312 if interactive:
3313 3313 util.copyfile(target, bakname)
3314 3314 else:
3315 3315 util.rename(target, bakname)
3316 3316 if ui.verbose or not exact:
3317 3317 if not isinstance(msg, basestring):
3318 3318 msg = msg(abs)
3319 3319 ui.status(msg % rel)
3320 3320 elif exact:
3321 3321 ui.warn(msg % rel)
3322 3322 break
3323 3323
3324 3324 if not opts.get('dry_run'):
3325 3325 needdata = ('revert', 'add', 'undelete')
3326 3326 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3327 3327 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3328 3328
3329 3329 if targetsubs:
3330 3330 # Revert the subrepos on the revert list
3331 3331 for sub in targetsubs:
3332 3332 try:
3333 3333 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3334 3334 except KeyError:
3335 3335 raise error.Abort("subrepository '%s' does not exist in %s!"
3336 3336 % (sub, short(ctx.node())))
3337 3337
3338 3338 def _revertprefetch(repo, ctx, *files):
3339 3339 """Let extension changing the storage layer prefetch content"""
3340 3340 pass
3341 3341
3342 3342 def _performrevert(repo, parents, ctx, actions, interactive=False,
3343 3343 tobackup=None):
3344 3344 """function that actually perform all the actions computed for revert
3345 3345
3346 3346 This is an independent function to let extension to plug in and react to
3347 3347 the imminent revert.
3348 3348
3349 3349 Make sure you have the working directory locked when calling this function.
3350 3350 """
3351 3351 parent, p2 = parents
3352 3352 node = ctx.node()
3353 3353 excluded_files = []
3354 3354 matcher_opts = {"exclude": excluded_files}
3355 3355
3356 3356 def checkout(f):
3357 3357 fc = ctx[f]
3358 3358 repo.wwrite(f, fc.data(), fc.flags())
3359 3359
3360 3360 def doremove(f):
3361 3361 try:
3362 3362 repo.wvfs.unlinkpath(f)
3363 3363 except OSError:
3364 3364 pass
3365 3365 repo.dirstate.remove(f)
3366 3366
3367 3367 audit_path = pathutil.pathauditor(repo.root)
3368 3368 for f in actions['forget'][0]:
3369 3369 if interactive:
3370 3370 choice = repo.ui.promptchoice(
3371 3371 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3372 3372 if choice == 0:
3373 3373 repo.dirstate.drop(f)
3374 3374 else:
3375 3375 excluded_files.append(repo.wjoin(f))
3376 3376 else:
3377 3377 repo.dirstate.drop(f)
3378 3378 for f in actions['remove'][0]:
3379 3379 audit_path(f)
3380 3380 if interactive:
3381 3381 choice = repo.ui.promptchoice(
3382 3382 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3383 3383 if choice == 0:
3384 3384 doremove(f)
3385 3385 else:
3386 3386 excluded_files.append(repo.wjoin(f))
3387 3387 else:
3388 3388 doremove(f)
3389 3389 for f in actions['drop'][0]:
3390 3390 audit_path(f)
3391 3391 repo.dirstate.remove(f)
3392 3392
3393 3393 normal = None
3394 3394 if node == parent:
3395 3395 # We're reverting to our parent. If possible, we'd like status
3396 3396 # to report the file as clean. We have to use normallookup for
3397 3397 # merges to avoid losing information about merged/dirty files.
3398 3398 if p2 != nullid:
3399 3399 normal = repo.dirstate.normallookup
3400 3400 else:
3401 3401 normal = repo.dirstate.normal
3402 3402
3403 3403 newlyaddedandmodifiedfiles = set()
3404 3404 if interactive:
3405 3405 # Prompt the user for changes to revert
3406 3406 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3407 3407 m = scmutil.match(ctx, torevert, matcher_opts)
3408 3408 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3409 3409 diffopts.nodates = True
3410 3410 diffopts.git = True
3411 3411 operation = 'discard'
3412 3412 reversehunks = True
3413 3413 if node != parent:
3414 3414 operation = 'revert'
3415 3415 reversehunks = repo.ui.configbool('experimental',
3416 3416 'revertalternateinteractivemode',
3417 3417 True)
3418 3418 if reversehunks:
3419 3419 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3420 3420 else:
3421 3421 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3422 3422 originalchunks = patch.parsepatch(diff)
3423 3423
3424 3424 try:
3425 3425
3426 3426 chunks, opts = recordfilter(repo.ui, originalchunks,
3427 3427 operation=operation)
3428 3428 if reversehunks:
3429 3429 chunks = patch.reversehunks(chunks)
3430 3430
3431 3431 except patch.PatchError as err:
3432 3432 raise error.Abort(_('error parsing patch: %s') % err)
3433 3433
3434 3434 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3435 3435 if tobackup is None:
3436 3436 tobackup = set()
3437 3437 # Apply changes
3438 3438 fp = stringio()
3439 3439 for c in chunks:
3440 3440 # Create a backup file only if this hunk should be backed up
3441 3441 if ishunk(c) and c.header.filename() in tobackup:
3442 3442 abs = c.header.filename()
3443 3443 target = repo.wjoin(abs)
3444 3444 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3445 3445 util.copyfile(target, bakname)
3446 3446 tobackup.remove(abs)
3447 3447 c.write(fp)
3448 3448 dopatch = fp.tell()
3449 3449 fp.seek(0)
3450 3450 if dopatch:
3451 3451 try:
3452 3452 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3453 3453 except patch.PatchError as err:
3454 3454 raise error.Abort(str(err))
3455 3455 del fp
3456 3456 else:
3457 3457 for f in actions['revert'][0]:
3458 3458 checkout(f)
3459 3459 if normal:
3460 3460 normal(f)
3461 3461
3462 3462 for f in actions['add'][0]:
3463 3463 # Don't checkout modified files, they are already created by the diff
3464 3464 if f not in newlyaddedandmodifiedfiles:
3465 3465 checkout(f)
3466 3466 repo.dirstate.add(f)
3467 3467
3468 3468 normal = repo.dirstate.normallookup
3469 3469 if node == parent and p2 == nullid:
3470 3470 normal = repo.dirstate.normal
3471 3471 for f in actions['undelete'][0]:
3472 3472 checkout(f)
3473 3473 normal(f)
3474 3474
3475 3475 copied = copies.pathcopies(repo[parent], ctx)
3476 3476
3477 3477 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3478 3478 if f in copied:
3479 3479 repo.dirstate.copy(copied[f], f)
3480 3480
3481 3481 class command(registrar.command):
3482 3482 def _doregister(self, func, name, *args, **kwargs):
3483 3483 func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
3484 3484 return super(command, self)._doregister(func, name, *args, **kwargs)
3485 3485
3486 3486 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3487 3487 # commands.outgoing. "missing" is "missing" of the result of
3488 3488 # "findcommonoutgoing()"
3489 3489 outgoinghooks = util.hooks()
3490 3490
3491 3491 # a list of (ui, repo) functions called by commands.summary
3492 3492 summaryhooks = util.hooks()
3493 3493
3494 3494 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3495 3495 #
3496 3496 # functions should return tuple of booleans below, if 'changes' is None:
3497 3497 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3498 3498 #
3499 3499 # otherwise, 'changes' is a tuple of tuples below:
3500 3500 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3501 3501 # - (desturl, destbranch, destpeer, outgoing)
3502 3502 summaryremotehooks = util.hooks()
3503 3503
3504 3504 # A list of state files kept by multistep operations like graft.
3505 3505 # Since graft cannot be aborted, it is considered 'clearable' by update.
3506 3506 # note: bisect is intentionally excluded
3507 3507 # (state file, clearable, allowcommit, error, hint)
3508 3508 unfinishedstates = [
3509 3509 ('graftstate', True, False, _('graft in progress'),
3510 3510 _("use 'hg graft --continue' or 'hg update' to abort")),
3511 3511 ('updatestate', True, False, _('last update was interrupted'),
3512 3512 _("use 'hg update' to get a consistent checkout"))
3513 3513 ]
3514 3514
3515 3515 def checkunfinished(repo, commit=False):
3516 3516 '''Look for an unfinished multistep operation, like graft, and abort
3517 3517 if found. It's probably good to check this right before
3518 3518 bailifchanged().
3519 3519 '''
3520 3520 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3521 3521 if commit and allowcommit:
3522 3522 continue
3523 3523 if repo.vfs.exists(f):
3524 3524 raise error.Abort(msg, hint=hint)
3525 3525
3526 3526 def clearunfinished(repo):
3527 3527 '''Check for unfinished operations (as above), and clear the ones
3528 3528 that are clearable.
3529 3529 '''
3530 3530 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3531 3531 if not clearable and repo.vfs.exists(f):
3532 3532 raise error.Abort(msg, hint=hint)
3533 3533 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3534 3534 if clearable and repo.vfs.exists(f):
3535 3535 util.unlink(repo.vfs.join(f))
3536 3536
3537 3537 afterresolvedstates = [
3538 3538 ('graftstate',
3539 3539 _('hg graft --continue')),
3540 3540 ]
3541 3541
3542 3542 def howtocontinue(repo):
3543 3543 '''Check for an unfinished operation and return the command to finish
3544 3544 it.
3545 3545
3546 3546 afterresolvedstates tuples define a .hg/{file} and the corresponding
3547 3547 command needed to finish it.
3548 3548
3549 3549 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3550 3550 a boolean.
3551 3551 '''
3552 3552 contmsg = _("continue: %s")
3553 3553 for f, msg in afterresolvedstates:
3554 3554 if repo.vfs.exists(f):
3555 3555 return contmsg % msg, True
3556 3556 workingctx = repo[None]
3557 3557 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3558 3558 for s in workingctx.substate)
3559 3559 if dirty:
3560 3560 return contmsg % _("hg commit"), False
3561 3561 return None, None
3562 3562
3563 3563 def checkafterresolved(repo):
3564 3564 '''Inform the user about the next action after completing hg resolve
3565 3565
3566 3566 If there's a matching afterresolvedstates, howtocontinue will yield
3567 3567 repo.ui.warn as the reporter.
3568 3568
3569 3569 Otherwise, it will yield repo.ui.note.
3570 3570 '''
3571 3571 msg, warning = howtocontinue(repo)
3572 3572 if msg is not None:
3573 3573 if warning:
3574 3574 repo.ui.warn("%s\n" % msg)
3575 3575 else:
3576 3576 repo.ui.note("%s\n" % msg)
3577 3577
3578 3578 def wrongtooltocontinue(repo, task):
3579 3579 '''Raise an abort suggesting how to properly continue if there is an
3580 3580 active task.
3581 3581
3582 3582 Uses howtocontinue() to find the active task.
3583 3583
3584 3584 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3585 3585 a hint.
3586 3586 '''
3587 3587 after = howtocontinue(repo)
3588 3588 hint = None
3589 3589 if after[1]:
3590 3590 hint = after[0]
3591 3591 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,2204 +1,2204 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import difflib
11 11 import errno
12 12 import operator
13 13 import os
14 14 import random
15 15 import socket
16 16 import string
17 17 import sys
18 18 import tempfile
19 19 import time
20 20
21 21 from .i18n import _
22 22 from .node import (
23 23 bin,
24 24 hex,
25 25 nullhex,
26 26 nullid,
27 27 nullrev,
28 28 short,
29 29 )
30 30 from . import (
31 31 bundle2,
32 32 changegroup,
33 33 cmdutil,
34 34 color,
35 35 context,
36 36 dagparser,
37 37 dagutil,
38 38 encoding,
39 39 error,
40 40 exchange,
41 41 extensions,
42 42 filemerge,
43 43 fileset,
44 44 formatter,
45 45 hg,
46 46 localrepo,
47 47 lock as lockmod,
48 48 merge as mergemod,
49 49 obsolete,
50 50 phases,
51 51 policy,
52 52 pvec,
53 53 pycompat,
54 54 registrar,
55 55 repair,
56 56 revlog,
57 57 revset,
58 58 revsetlang,
59 59 scmutil,
60 60 setdiscovery,
61 61 simplemerge,
62 62 smartset,
63 63 sslutil,
64 64 streamclone,
65 65 templater,
66 66 treediscovery,
67 67 upgrade,
68 68 util,
69 69 vfs as vfsmod,
70 70 )
71 71
72 72 release = lockmod.release
73 73
74 74 command = registrar.command()
75 75
76 76 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
77 77 def debugancestor(ui, repo, *args):
78 78 """find the ancestor revision of two revisions in a given index"""
79 79 if len(args) == 3:
80 80 index, rev1, rev2 = args
81 81 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
82 82 lookup = r.lookup
83 83 elif len(args) == 2:
84 84 if not repo:
85 85 raise error.Abort(_('there is no Mercurial repository here '
86 86 '(.hg not found)'))
87 87 rev1, rev2 = args
88 88 r = repo.changelog
89 89 lookup = repo.lookup
90 90 else:
91 91 raise error.Abort(_('either two or three arguments required'))
92 92 a = r.ancestor(lookup(rev1), lookup(rev2))
93 93 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
94 94
95 95 @command('debugapplystreamclonebundle', [], 'FILE')
96 96 def debugapplystreamclonebundle(ui, repo, fname):
97 97 """apply a stream clone bundle file"""
98 98 f = hg.openpath(ui, fname)
99 99 gen = exchange.readbundle(ui, f, fname)
100 100 gen.apply(repo)
101 101
102 102 @command('debugbuilddag',
103 103 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
104 104 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
105 105 ('n', 'new-file', None, _('add new file at each rev'))],
106 106 _('[OPTION]... [TEXT]'))
107 107 def debugbuilddag(ui, repo, text=None,
108 108 mergeable_file=False,
109 109 overwritten_file=False,
110 110 new_file=False):
111 111 """builds a repo with a given DAG from scratch in the current empty repo
112 112
113 113 The description of the DAG is read from stdin if not given on the
114 114 command line.
115 115
116 116 Elements:
117 117
118 118 - "+n" is a linear run of n nodes based on the current default parent
119 119 - "." is a single node based on the current default parent
120 120 - "$" resets the default parent to null (implied at the start);
121 121 otherwise the default parent is always the last node created
122 122 - "<p" sets the default parent to the backref p
123 123 - "*p" is a fork at parent p, which is a backref
124 124 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
125 125 - "/p2" is a merge of the preceding node and p2
126 126 - ":tag" defines a local tag for the preceding node
127 127 - "@branch" sets the named branch for subsequent nodes
128 128 - "#...\\n" is a comment up to the end of the line
129 129
130 130 Whitespace between the above elements is ignored.
131 131
132 132 A backref is either
133 133
134 134 - a number n, which references the node curr-n, where curr is the current
135 135 node, or
136 136 - the name of a local tag you placed earlier using ":tag", or
137 137 - empty to denote the default parent.
138 138
139 139 All string valued-elements are either strictly alphanumeric, or must
140 140 be enclosed in double quotes ("..."), with "\\" as escape character.
141 141 """
142 142
143 143 if text is None:
144 144 ui.status(_("reading DAG from stdin\n"))
145 145 text = ui.fin.read()
146 146
147 147 cl = repo.changelog
148 148 if len(cl) > 0:
149 149 raise error.Abort(_('repository is not empty'))
150 150
151 151 # determine number of revs in DAG
152 152 total = 0
153 153 for type, data in dagparser.parsedag(text):
154 154 if type == 'n':
155 155 total += 1
156 156
157 157 if mergeable_file:
158 158 linesperrev = 2
159 159 # make a file with k lines per rev
160 160 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
161 161 initialmergedlines.append("")
162 162
163 163 tags = []
164 164
165 165 wlock = lock = tr = None
166 166 try:
167 167 wlock = repo.wlock()
168 168 lock = repo.lock()
169 169 tr = repo.transaction("builddag")
170 170
171 171 at = -1
172 172 atbranch = 'default'
173 173 nodeids = []
174 174 id = 0
175 175 ui.progress(_('building'), id, unit=_('revisions'), total=total)
176 176 for type, data in dagparser.parsedag(text):
177 177 if type == 'n':
178 178 ui.note(('node %s\n' % str(data)))
179 179 id, ps = data
180 180
181 181 files = []
182 182 fctxs = {}
183 183
184 184 p2 = None
185 185 if mergeable_file:
186 186 fn = "mf"
187 187 p1 = repo[ps[0]]
188 188 if len(ps) > 1:
189 189 p2 = repo[ps[1]]
190 190 pa = p1.ancestor(p2)
191 191 base, local, other = [x[fn].data() for x in (pa, p1,
192 192 p2)]
193 193 m3 = simplemerge.Merge3Text(base, local, other)
194 194 ml = [l.strip() for l in m3.merge_lines()]
195 195 ml.append("")
196 196 elif at > 0:
197 197 ml = p1[fn].data().split("\n")
198 198 else:
199 199 ml = initialmergedlines
200 200 ml[id * linesperrev] += " r%i" % id
201 201 mergedtext = "\n".join(ml)
202 202 files.append(fn)
203 203 fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
204 204
205 205 if overwritten_file:
206 206 fn = "of"
207 207 files.append(fn)
208 208 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
209 209
210 210 if new_file:
211 211 fn = "nf%i" % id
212 212 files.append(fn)
213 213 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
214 214 if len(ps) > 1:
215 215 if not p2:
216 216 p2 = repo[ps[1]]
217 217 for fn in p2:
218 218 if fn.startswith("nf"):
219 219 files.append(fn)
220 220 fctxs[fn] = p2[fn]
221 221
222 222 def fctxfn(repo, cx, path):
223 223 return fctxs.get(path)
224 224
225 225 if len(ps) == 0 or ps[0] < 0:
226 226 pars = [None, None]
227 227 elif len(ps) == 1:
228 228 pars = [nodeids[ps[0]], None]
229 229 else:
230 230 pars = [nodeids[p] for p in ps]
231 231 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
232 232 date=(id, 0),
233 233 user="debugbuilddag",
234 234 extra={'branch': atbranch})
235 235 nodeid = repo.commitctx(cx)
236 236 nodeids.append(nodeid)
237 237 at = id
238 238 elif type == 'l':
239 239 id, name = data
240 240 ui.note(('tag %s\n' % name))
241 241 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
242 242 elif type == 'a':
243 243 ui.note(('branch %s\n' % data))
244 244 atbranch = data
245 245 ui.progress(_('building'), id, unit=_('revisions'), total=total)
246 246 tr.close()
247 247
248 248 if tags:
249 249 repo.vfs.write("localtags", "".join(tags))
250 250 finally:
251 251 ui.progress(_('building'), None)
252 252 release(tr, lock, wlock)
253 253
254 254 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
255 255 indent_string = ' ' * indent
256 256 if all:
257 257 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
258 258 % indent_string)
259 259
260 260 def showchunks(named):
261 261 ui.write("\n%s%s\n" % (indent_string, named))
262 262 chain = None
263 263 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
264 264 node = chunkdata['node']
265 265 p1 = chunkdata['p1']
266 266 p2 = chunkdata['p2']
267 267 cs = chunkdata['cs']
268 268 deltabase = chunkdata['deltabase']
269 269 delta = chunkdata['delta']
270 270 ui.write("%s%s %s %s %s %s %s\n" %
271 271 (indent_string, hex(node), hex(p1), hex(p2),
272 272 hex(cs), hex(deltabase), len(delta)))
273 273 chain = node
274 274
275 275 chunkdata = gen.changelogheader()
276 276 showchunks("changelog")
277 277 chunkdata = gen.manifestheader()
278 278 showchunks("manifest")
279 279 for chunkdata in iter(gen.filelogheader, {}):
280 280 fname = chunkdata['filename']
281 281 showchunks(fname)
282 282 else:
283 283 if isinstance(gen, bundle2.unbundle20):
284 284 raise error.Abort(_('use debugbundle2 for this file'))
285 285 chunkdata = gen.changelogheader()
286 286 chain = None
287 287 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
288 288 node = chunkdata['node']
289 289 ui.write("%s%s\n" % (indent_string, hex(node)))
290 290 chain = node
291 291
292 292 def _debugobsmarkers(ui, data, all=None, indent=0, **opts):
293 293 """display version and markers contained in 'data'"""
294 294 indent_string = ' ' * indent
295 295 try:
296 296 version, markers = obsolete._readmarkers(data)
297 297 except error.UnknownVersion as exc:
298 298 msg = "%sunsupported version: %s (%d bytes)\n"
299 299 msg %= indent_string, exc.version, len(data)
300 300 ui.write(msg)
301 301 else:
302 302 msg = "%sversion: %s (%d bytes)\n"
303 303 msg %= indent_string, version, len(data)
304 304 ui.write(msg)
305 305 fm = ui.formatter('debugobsolete', opts)
306 306 for rawmarker in sorted(markers):
307 307 m = obsolete.marker(None, rawmarker)
308 308 fm.startitem()
309 309 fm.plain(indent_string)
310 310 cmdutil.showmarker(fm, m)
311 311 fm.end()
312 312
313 313 def _debugbundle2(ui, gen, all=None, **opts):
314 314 """lists the contents of a bundle2"""
315 315 if not isinstance(gen, bundle2.unbundle20):
316 316 raise error.Abort(_('not a bundle2 file'))
317 317 ui.write(('Stream params: %s\n' % repr(gen.params)))
318 318 parttypes = opts.get('part_type', [])
319 319 for part in gen.iterparts():
320 320 if parttypes and part.type not in parttypes:
321 321 continue
322 322 ui.write('%s -- %r\n' % (part.type, repr(part.params)))
323 323 if part.type == 'changegroup':
324 324 version = part.params.get('version', '01')
325 325 cg = changegroup.getunbundler(version, part, 'UN')
326 326 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
327 327 if part.type == 'obsmarkers':
328 328 _debugobsmarkers(ui, part.read(), all=all, indent=4, **opts)
329 329
330 330 @command('debugbundle',
331 331 [('a', 'all', None, _('show all details')),
332 332 ('', 'part-type', [], _('show only the named part type')),
333 333 ('', 'spec', None, _('print the bundlespec of the bundle'))],
334 334 _('FILE'),
335 335 norepo=True)
336 336 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
337 337 """lists the contents of a bundle"""
338 338 with hg.openpath(ui, bundlepath) as f:
339 339 if spec:
340 340 spec = exchange.getbundlespec(ui, f)
341 341 ui.write('%s\n' % spec)
342 342 return
343 343
344 344 gen = exchange.readbundle(ui, f, bundlepath)
345 345 if isinstance(gen, bundle2.unbundle20):
346 346 return _debugbundle2(ui, gen, all=all, **opts)
347 347 _debugchangegroup(ui, gen, all=all, **opts)
348 348
349 349 @command('debugcheckstate', [], '')
350 350 def debugcheckstate(ui, repo):
351 351 """validate the correctness of the current dirstate"""
352 352 parent1, parent2 = repo.dirstate.parents()
353 353 m1 = repo[parent1].manifest()
354 354 m2 = repo[parent2].manifest()
355 355 errors = 0
356 356 for f in repo.dirstate:
357 357 state = repo.dirstate[f]
358 358 if state in "nr" and f not in m1:
359 359 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
360 360 errors += 1
361 361 if state in "a" and f in m1:
362 362 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
363 363 errors += 1
364 364 if state in "m" and f not in m1 and f not in m2:
365 365 ui.warn(_("%s in state %s, but not in either manifest\n") %
366 366 (f, state))
367 367 errors += 1
368 368 for f in m1:
369 369 state = repo.dirstate[f]
370 370 if state not in "nrm":
371 371 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
372 372 errors += 1
373 373 if errors:
374 374 error = _(".hg/dirstate inconsistent with current parent's manifest")
375 375 raise error.Abort(error)
376 376
377 377 @command('debugcolor',
378 378 [('', 'style', None, _('show all configured styles'))],
379 379 'hg debugcolor')
380 380 def debugcolor(ui, repo, **opts):
381 381 """show available color, effects or style"""
382 382 ui.write(('color mode: %s\n') % ui._colormode)
383 383 if opts.get('style'):
384 384 return _debugdisplaystyle(ui)
385 385 else:
386 386 return _debugdisplaycolor(ui)
387 387
388 388 def _debugdisplaycolor(ui):
389 389 ui = ui.copy()
390 390 ui._styles.clear()
391 391 for effect in color._activeeffects(ui).keys():
392 392 ui._styles[effect] = effect
393 393 if ui._terminfoparams:
394 394 for k, v in ui.configitems('color'):
395 395 if k.startswith('color.'):
396 396 ui._styles[k] = k[6:]
397 397 elif k.startswith('terminfo.'):
398 398 ui._styles[k] = k[9:]
399 399 ui.write(_('available colors:\n'))
400 400 # sort label with a '_' after the other to group '_background' entry.
401 401 items = sorted(ui._styles.items(),
402 402 key=lambda i: ('_' in i[0], i[0], i[1]))
403 403 for colorname, label in items:
404 404 ui.write(('%s\n') % colorname, label=label)
405 405
406 406 def _debugdisplaystyle(ui):
407 407 ui.write(_('available style:\n'))
408 408 width = max(len(s) for s in ui._styles)
409 409 for label, effects in sorted(ui._styles.items()):
410 410 ui.write('%s' % label, label=label)
411 411 if effects:
412 412 # 50
413 413 ui.write(': ')
414 414 ui.write(' ' * (max(0, width - len(label))))
415 415 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
416 416 ui.write('\n')
417 417
418 418 @command('debugcreatestreamclonebundle', [], 'FILE')
419 419 def debugcreatestreamclonebundle(ui, repo, fname):
420 420 """create a stream clone bundle file
421 421
422 422 Stream bundles are special bundles that are essentially archives of
423 423 revlog files. They are commonly used for cloning very quickly.
424 424 """
425 425 # TODO we may want to turn this into an abort when this functionality
426 426 # is moved into `hg bundle`.
427 427 if phases.hassecret(repo):
428 428 ui.warn(_('(warning: stream clone bundle will contain secret '
429 429 'revisions)\n'))
430 430
431 431 requirements, gen = streamclone.generatebundlev1(repo)
432 432 changegroup.writechunks(ui, gen, fname)
433 433
434 434 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
435 435
436 436 @command('debugdag',
437 437 [('t', 'tags', None, _('use tags as labels')),
438 438 ('b', 'branches', None, _('annotate with branch names')),
439 439 ('', 'dots', None, _('use dots for runs')),
440 440 ('s', 'spaces', None, _('separate elements by spaces'))],
441 441 _('[OPTION]... [FILE [REV]...]'),
442 442 optionalrepo=True)
443 443 def debugdag(ui, repo, file_=None, *revs, **opts):
444 444 """format the changelog or an index DAG as a concise textual description
445 445
446 446 If you pass a revlog index, the revlog's DAG is emitted. If you list
447 447 revision numbers, they get labeled in the output as rN.
448 448
449 449 Otherwise, the changelog DAG of the current repo is emitted.
450 450 """
451 451 spaces = opts.get('spaces')
452 452 dots = opts.get('dots')
453 453 if file_:
454 454 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
455 455 file_)
456 456 revs = set((int(r) for r in revs))
457 457 def events():
458 458 for r in rlog:
459 459 yield 'n', (r, list(p for p in rlog.parentrevs(r)
460 460 if p != -1))
461 461 if r in revs:
462 462 yield 'l', (r, "r%i" % r)
463 463 elif repo:
464 464 cl = repo.changelog
465 465 tags = opts.get('tags')
466 466 branches = opts.get('branches')
467 467 if tags:
468 468 labels = {}
469 469 for l, n in repo.tags().items():
470 470 labels.setdefault(cl.rev(n), []).append(l)
471 471 def events():
472 472 b = "default"
473 473 for r in cl:
474 474 if branches:
475 475 newb = cl.read(cl.node(r))[5]['branch']
476 476 if newb != b:
477 477 yield 'a', newb
478 478 b = newb
479 479 yield 'n', (r, list(p for p in cl.parentrevs(r)
480 480 if p != -1))
481 481 if tags:
482 482 ls = labels.get(r)
483 483 if ls:
484 484 for l in ls:
485 485 yield 'l', (r, l)
486 486 else:
487 487 raise error.Abort(_('need repo for changelog dag'))
488 488
489 489 for line in dagparser.dagtextlines(events(),
490 490 addspaces=spaces,
491 491 wraplabels=True,
492 492 wrapannotations=True,
493 493 wrapnonlinear=dots,
494 494 usedots=dots,
495 495 maxlinewidth=70):
496 496 ui.write(line)
497 497 ui.write("\n")
498 498
499 499 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
500 500 def debugdata(ui, repo, file_, rev=None, **opts):
501 501 """dump the contents of a data file revision"""
502 502 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
503 503 if rev is not None:
504 504 raise error.CommandError('debugdata', _('invalid arguments'))
505 505 file_, rev = None, file_
506 506 elif rev is None:
507 507 raise error.CommandError('debugdata', _('invalid arguments'))
508 508 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
509 509 try:
510 510 ui.write(r.revision(r.lookup(rev), raw=True))
511 511 except KeyError:
512 512 raise error.Abort(_('invalid revision identifier %s') % rev)
513 513
514 514 @command('debugdate',
515 515 [('e', 'extended', None, _('try extended date formats'))],
516 516 _('[-e] DATE [RANGE]'),
517 517 norepo=True, optionalrepo=True)
518 518 def debugdate(ui, date, range=None, **opts):
519 519 """parse and display a date"""
520 520 if opts["extended"]:
521 521 d = util.parsedate(date, util.extendeddateformats)
522 522 else:
523 523 d = util.parsedate(date)
524 524 ui.write(("internal: %s %s\n") % d)
525 525 ui.write(("standard: %s\n") % util.datestr(d))
526 526 if range:
527 527 m = util.matchdate(range)
528 528 ui.write(("match: %s\n") % m(d[0]))
529 529
530 530 @command('debugdeltachain',
531 531 cmdutil.debugrevlogopts + cmdutil.formatteropts,
532 532 _('-c|-m|FILE'),
533 533 optionalrepo=True)
534 534 def debugdeltachain(ui, repo, file_=None, **opts):
535 535 """dump information about delta chains in a revlog
536 536
537 537 Output can be templatized. Available template keywords are:
538 538
539 539 :``rev``: revision number
540 540 :``chainid``: delta chain identifier (numbered by unique base)
541 541 :``chainlen``: delta chain length to this revision
542 542 :``prevrev``: previous revision in delta chain
543 543 :``deltatype``: role of delta / how it was computed
544 544 :``compsize``: compressed size of revision
545 545 :``uncompsize``: uncompressed size of revision
546 546 :``chainsize``: total size of compressed revisions in chain
547 547 :``chainratio``: total chain size divided by uncompressed revision size
548 548 (new delta chains typically start at ratio 2.00)
549 549 :``lindist``: linear distance from base revision in delta chain to end
550 550 of this revision
551 551 :``extradist``: total size of revisions not part of this delta chain from
552 552 base of delta chain to end of this revision; a measurement
553 553 of how much extra data we need to read/seek across to read
554 554 the delta chain for this revision
555 555 :``extraratio``: extradist divided by chainsize; another representation of
556 556 how much unrelated data is needed to load this delta chain
557 557 """
558 558 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
559 559 index = r.index
560 560 generaldelta = r.version & revlog.FLAG_GENERALDELTA
561 561
562 562 def revinfo(rev):
563 563 e = index[rev]
564 564 compsize = e[1]
565 565 uncompsize = e[2]
566 566 chainsize = 0
567 567
568 568 if generaldelta:
569 569 if e[3] == e[5]:
570 570 deltatype = 'p1'
571 571 elif e[3] == e[6]:
572 572 deltatype = 'p2'
573 573 elif e[3] == rev - 1:
574 574 deltatype = 'prev'
575 575 elif e[3] == rev:
576 576 deltatype = 'base'
577 577 else:
578 578 deltatype = 'other'
579 579 else:
580 580 if e[3] == rev:
581 581 deltatype = 'base'
582 582 else:
583 583 deltatype = 'prev'
584 584
585 585 chain = r._deltachain(rev)[0]
586 586 for iterrev in chain:
587 587 e = index[iterrev]
588 588 chainsize += e[1]
589 589
590 590 return compsize, uncompsize, deltatype, chain, chainsize
591 591
592 592 fm = ui.formatter('debugdeltachain', opts)
593 593
594 594 fm.plain(' rev chain# chainlen prev delta '
595 595 'size rawsize chainsize ratio lindist extradist '
596 596 'extraratio\n')
597 597
598 598 chainbases = {}
599 599 for rev in r:
600 600 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
601 601 chainbase = chain[0]
602 602 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
603 603 basestart = r.start(chainbase)
604 604 revstart = r.start(rev)
605 605 lineardist = revstart + comp - basestart
606 606 extradist = lineardist - chainsize
607 607 try:
608 608 prevrev = chain[-2]
609 609 except IndexError:
610 610 prevrev = -1
611 611
612 612 chainratio = float(chainsize) / float(uncomp)
613 613 extraratio = float(extradist) / float(chainsize)
614 614
615 615 fm.startitem()
616 616 fm.write('rev chainid chainlen prevrev deltatype compsize '
617 617 'uncompsize chainsize chainratio lindist extradist '
618 618 'extraratio',
619 619 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
620 620 rev, chainid, len(chain), prevrev, deltatype, comp,
621 621 uncomp, chainsize, chainratio, lineardist, extradist,
622 622 extraratio,
623 623 rev=rev, chainid=chainid, chainlen=len(chain),
624 624 prevrev=prevrev, deltatype=deltatype, compsize=comp,
625 625 uncompsize=uncomp, chainsize=chainsize,
626 626 chainratio=chainratio, lindist=lineardist,
627 627 extradist=extradist, extraratio=extraratio)
628 628
629 629 fm.end()
630 630
631 631 @command('debugdirstate|debugstate',
632 632 [('', 'nodates', None, _('do not display the saved mtime')),
633 633 ('', 'datesort', None, _('sort by saved mtime'))],
634 634 _('[OPTION]...'))
635 635 def debugstate(ui, repo, **opts):
636 636 """show the contents of the current dirstate"""
637 637
638 638 nodates = opts.get('nodates')
639 639 datesort = opts.get('datesort')
640 640
641 641 timestr = ""
642 642 if datesort:
643 643 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
644 644 else:
645 645 keyfunc = None # sort by filename
646 646 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
647 647 if ent[3] == -1:
648 648 timestr = 'unset '
649 649 elif nodates:
650 650 timestr = 'set '
651 651 else:
652 652 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
653 653 time.localtime(ent[3]))
654 654 if ent[1] & 0o20000:
655 655 mode = 'lnk'
656 656 else:
657 657 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
658 658 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
659 659 for f in repo.dirstate.copies():
660 660 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
661 661
662 662 @command('debugdiscovery',
663 663 [('', 'old', None, _('use old-style discovery')),
664 664 ('', 'nonheads', None,
665 665 _('use old-style discovery with non-heads included')),
666 666 ] + cmdutil.remoteopts,
667 667 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
668 668 def debugdiscovery(ui, repo, remoteurl="default", **opts):
669 669 """runs the changeset discovery protocol in isolation"""
670 670 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
671 671 opts.get('branch'))
672 672 remote = hg.peer(repo, opts, remoteurl)
673 673 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
674 674
675 675 # make sure tests are repeatable
676 676 random.seed(12323)
677 677
678 678 def doit(localheads, remoteheads, remote=remote):
679 679 if opts.get('old'):
680 680 if localheads:
681 681 raise error.Abort('cannot use localheads with old style '
682 682 'discovery')
683 683 if not util.safehasattr(remote, 'branches'):
684 684 # enable in-client legacy support
685 685 remote = localrepo.locallegacypeer(remote.local())
686 686 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
687 687 force=True)
688 688 common = set(common)
689 689 if not opts.get('nonheads'):
690 690 ui.write(("unpruned common: %s\n") %
691 691 " ".join(sorted(short(n) for n in common)))
692 692 dag = dagutil.revlogdag(repo.changelog)
693 693 all = dag.ancestorset(dag.internalizeall(common))
694 694 common = dag.externalizeall(dag.headsetofconnecteds(all))
695 695 else:
696 696 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
697 697 common = set(common)
698 698 rheads = set(hds)
699 699 lheads = set(repo.heads())
700 700 ui.write(("common heads: %s\n") %
701 701 " ".join(sorted(short(n) for n in common)))
702 702 if lheads <= common:
703 703 ui.write(("local is subset\n"))
704 704 elif rheads <= common:
705 705 ui.write(("remote is subset\n"))
706 706
707 707 serverlogs = opts.get('serverlog')
708 708 if serverlogs:
709 709 for filename in serverlogs:
710 710 with open(filename, 'r') as logfile:
711 711 line = logfile.readline()
712 712 while line:
713 713 parts = line.strip().split(';')
714 714 op = parts[1]
715 715 if op == 'cg':
716 716 pass
717 717 elif op == 'cgss':
718 718 doit(parts[2].split(' '), parts[3].split(' '))
719 719 elif op == 'unb':
720 720 doit(parts[3].split(' '), parts[2].split(' '))
721 721 line = logfile.readline()
722 722 else:
723 723 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
724 724 opts.get('remote_head'))
725 725 localrevs = opts.get('local_head')
726 726 doit(localrevs, remoterevs)
727 727
728 728 @command('debugextensions', cmdutil.formatteropts, [], norepo=True)
729 729 def debugextensions(ui, **opts):
730 730 '''show information about active extensions'''
731 731 exts = extensions.extensions(ui)
732 732 hgver = util.version()
733 733 fm = ui.formatter('debugextensions', opts)
734 734 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
735 735 isinternal = extensions.ismoduleinternal(extmod)
736 736 extsource = pycompat.fsencode(extmod.__file__)
737 737 if isinternal:
738 738 exttestedwith = [] # never expose magic string to users
739 739 else:
740 740 exttestedwith = getattr(extmod, 'testedwith', '').split()
741 741 extbuglink = getattr(extmod, 'buglink', None)
742 742
743 743 fm.startitem()
744 744
745 745 if ui.quiet or ui.verbose:
746 746 fm.write('name', '%s\n', extname)
747 747 else:
748 748 fm.write('name', '%s', extname)
749 749 if isinternal or hgver in exttestedwith:
750 750 fm.plain('\n')
751 751 elif not exttestedwith:
752 752 fm.plain(_(' (untested!)\n'))
753 753 else:
754 754 lasttestedversion = exttestedwith[-1]
755 755 fm.plain(' (%s!)\n' % lasttestedversion)
756 756
757 757 fm.condwrite(ui.verbose and extsource, 'source',
758 758 _(' location: %s\n'), extsource or "")
759 759
760 760 if ui.verbose:
761 761 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
762 762 fm.data(bundled=isinternal)
763 763
764 764 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
765 765 _(' tested with: %s\n'),
766 766 fm.formatlist(exttestedwith, name='ver'))
767 767
768 768 fm.condwrite(ui.verbose and extbuglink, 'buglink',
769 769 _(' bug reporting: %s\n'), extbuglink or "")
770 770
771 771 fm.end()
772 772
773 773 @command('debugfileset',
774 774 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
775 775 _('[-r REV] FILESPEC'))
776 776 def debugfileset(ui, repo, expr, **opts):
777 777 '''parse and apply a fileset specification'''
778 778 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
779 779 if ui.verbose:
780 780 tree = fileset.parse(expr)
781 781 ui.note(fileset.prettyformat(tree), "\n")
782 782
783 783 for f in ctx.getfileset(expr):
784 784 ui.write("%s\n" % f)
785 785
786 786 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
787 787 def debugfsinfo(ui, path="."):
788 788 """show information detected about current filesystem"""
789 789 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
790 790 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
791 791 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
792 792 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
793 793 casesensitive = '(unknown)'
794 794 try:
795 795 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
796 796 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
797 797 except OSError:
798 798 pass
799 799 ui.write(('case-sensitive: %s\n') % casesensitive)
800 800
801 801 @command('debuggetbundle',
802 802 [('H', 'head', [], _('id of head node'), _('ID')),
803 803 ('C', 'common', [], _('id of common node'), _('ID')),
804 804 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
805 805 _('REPO FILE [-H|-C ID]...'),
806 806 norepo=True)
807 807 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
808 808 """retrieves a bundle from a repo
809 809
810 810 Every ID must be a full-length hex node id string. Saves the bundle to the
811 811 given file.
812 812 """
813 813 repo = hg.peer(ui, opts, repopath)
814 814 if not repo.capable('getbundle'):
815 815 raise error.Abort("getbundle() not supported by target repository")
816 816 args = {}
817 817 if common:
818 818 args['common'] = [bin(s) for s in common]
819 819 if head:
820 820 args['heads'] = [bin(s) for s in head]
821 821 # TODO: get desired bundlecaps from command line.
822 822 args['bundlecaps'] = None
823 823 bundle = repo.getbundle('debug', **args)
824 824
825 825 bundletype = opts.get('type', 'bzip2').lower()
826 826 btypes = {'none': 'HG10UN',
827 827 'bzip2': 'HG10BZ',
828 828 'gzip': 'HG10GZ',
829 829 'bundle2': 'HG20'}
830 830 bundletype = btypes.get(bundletype)
831 831 if bundletype not in bundle2.bundletypes:
832 832 raise error.Abort(_('unknown bundle type specified with --type'))
833 833 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
834 834
835 835 @command('debugignore', [], '[FILE]')
836 836 def debugignore(ui, repo, *files, **opts):
837 837 """display the combined ignore pattern and information about ignored files
838 838
839 839 With no argument display the combined ignore pattern.
840 840
841 841 Given space separated file names, shows if the given file is ignored and
842 842 if so, show the ignore rule (file and line number) that matched it.
843 843 """
844 844 ignore = repo.dirstate._ignore
845 845 if not files:
846 846 # Show all the patterns
847 847 ui.write("%s\n" % repr(ignore))
848 848 else:
849 849 for f in files:
850 850 nf = util.normpath(f)
851 851 ignored = None
852 852 ignoredata = None
853 853 if nf != '.':
854 854 if ignore(nf):
855 855 ignored = nf
856 856 ignoredata = repo.dirstate._ignorefileandline(nf)
857 857 else:
858 858 for p in util.finddirs(nf):
859 859 if ignore(p):
860 860 ignored = p
861 861 ignoredata = repo.dirstate._ignorefileandline(p)
862 862 break
863 863 if ignored:
864 864 if ignored == nf:
865 865 ui.write(_("%s is ignored\n") % f)
866 866 else:
867 867 ui.write(_("%s is ignored because of "
868 868 "containing folder %s\n")
869 869 % (f, ignored))
870 870 ignorefile, lineno, line = ignoredata
871 871 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
872 872 % (ignorefile, lineno, line))
873 873 else:
874 874 ui.write(_("%s is not ignored\n") % f)
875 875
876 876 @command('debugindex', cmdutil.debugrevlogopts +
877 877 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
878 878 _('[-f FORMAT] -c|-m|FILE'),
879 879 optionalrepo=True)
880 880 def debugindex(ui, repo, file_=None, **opts):
881 881 """dump the contents of an index file"""
882 882 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
883 883 format = opts.get('format', 0)
884 884 if format not in (0, 1):
885 885 raise error.Abort(_("unknown format %d") % format)
886 886
887 887 generaldelta = r.version & revlog.FLAG_GENERALDELTA
888 888 if generaldelta:
889 889 basehdr = ' delta'
890 890 else:
891 891 basehdr = ' base'
892 892
893 893 if ui.debugflag:
894 894 shortfn = hex
895 895 else:
896 896 shortfn = short
897 897
898 898 # There might not be anything in r, so have a sane default
899 899 idlen = 12
900 900 for i in r:
901 901 idlen = len(shortfn(r.node(i)))
902 902 break
903 903
904 904 if format == 0:
905 905 ui.write((" rev offset length " + basehdr + " linkrev"
906 906 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
907 907 elif format == 1:
908 908 ui.write((" rev flag offset length"
909 909 " size " + basehdr + " link p1 p2"
910 910 " %s\n") % "nodeid".rjust(idlen))
911 911
912 912 for i in r:
913 913 node = r.node(i)
914 914 if generaldelta:
915 915 base = r.deltaparent(i)
916 916 else:
917 917 base = r.chainbase(i)
918 918 if format == 0:
919 919 try:
920 920 pp = r.parents(node)
921 921 except Exception:
922 922 pp = [nullid, nullid]
923 923 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
924 924 i, r.start(i), r.length(i), base, r.linkrev(i),
925 925 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
926 926 elif format == 1:
927 927 pr = r.parentrevs(i)
928 928 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
929 929 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
930 930 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
931 931
932 932 @command('debugindexdot', cmdutil.debugrevlogopts,
933 933 _('-c|-m|FILE'), optionalrepo=True)
934 934 def debugindexdot(ui, repo, file_=None, **opts):
935 935 """dump an index DAG as a graphviz dot file"""
936 936 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
937 937 ui.write(("digraph G {\n"))
938 938 for i in r:
939 939 node = r.node(i)
940 940 pp = r.parents(node)
941 941 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
942 942 if pp[1] != nullid:
943 943 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
944 944 ui.write("}\n")
945 945
946 946 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
947 947 def debuginstall(ui, **opts):
948 948 '''test Mercurial installation
949 949
950 950 Returns 0 on success.
951 951 '''
952 952
953 953 def writetemp(contents):
954 954 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
955 955 f = os.fdopen(fd, pycompat.sysstr("wb"))
956 956 f.write(contents)
957 957 f.close()
958 958 return name
959 959
960 960 problems = 0
961 961
962 962 fm = ui.formatter('debuginstall', opts)
963 963 fm.startitem()
964 964
965 965 # encoding
966 966 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
967 967 err = None
968 968 try:
969 969 encoding.fromlocal("test")
970 970 except error.Abort as inst:
971 971 err = inst
972 972 problems += 1
973 973 fm.condwrite(err, 'encodingerror', _(" %s\n"
974 974 " (check that your locale is properly set)\n"), err)
975 975
976 976 # Python
977 977 fm.write('pythonexe', _("checking Python executable (%s)\n"),
978 978 pycompat.sysexecutable)
979 979 fm.write('pythonver', _("checking Python version (%s)\n"),
980 980 ("%d.%d.%d" % sys.version_info[:3]))
981 981 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
982 982 os.path.dirname(pycompat.fsencode(os.__file__)))
983 983
984 984 security = set(sslutil.supportedprotocols)
985 985 if sslutil.hassni:
986 986 security.add('sni')
987 987
988 988 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
989 989 fm.formatlist(sorted(security), name='protocol',
990 990 fmt='%s', sep=','))
991 991
992 992 # These are warnings, not errors. So don't increment problem count. This
993 993 # may change in the future.
994 994 if 'tls1.2' not in security:
995 995 fm.plain(_(' TLS 1.2 not supported by Python install; '
996 996 'network connections lack modern security\n'))
997 997 if 'sni' not in security:
998 998 fm.plain(_(' SNI not supported by Python install; may have '
999 999 'connectivity issues with some servers\n'))
1000 1000
1001 1001 # TODO print CA cert info
1002 1002
1003 1003 # hg version
1004 1004 hgver = util.version()
1005 1005 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1006 1006 hgver.split('+')[0])
1007 1007 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1008 1008 '+'.join(hgver.split('+')[1:]))
1009 1009
1010 1010 # compiled modules
1011 1011 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1012 1012 policy.policy)
1013 1013 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1014 1014 os.path.dirname(pycompat.fsencode(__file__)))
1015 1015
1016 1016 if policy.policy in ('c', 'allow'):
1017 1017 err = None
1018 1018 try:
1019 1019 from .cext import (
1020 1020 base85,
1021 1021 bdiff,
1022 1022 mpatch,
1023 1023 osutil,
1024 1024 )
1025 1025 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1026 1026 except Exception as inst:
1027 1027 err = inst
1028 1028 problems += 1
1029 1029 fm.condwrite(err, 'extensionserror', " %s\n", err)
1030 1030
1031 1031 compengines = util.compengines._engines.values()
1032 1032 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1033 1033 fm.formatlist(sorted(e.name() for e in compengines),
1034 1034 name='compengine', fmt='%s', sep=', '))
1035 1035 fm.write('compenginesavail', _('checking available compression engines '
1036 1036 '(%s)\n'),
1037 1037 fm.formatlist(sorted(e.name() for e in compengines
1038 1038 if e.available()),
1039 1039 name='compengine', fmt='%s', sep=', '))
1040 1040 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1041 1041 fm.write('compenginesserver', _('checking available compression engines '
1042 1042 'for wire protocol (%s)\n'),
1043 1043 fm.formatlist([e.name() for e in wirecompengines
1044 1044 if e.wireprotosupport()],
1045 1045 name='compengine', fmt='%s', sep=', '))
1046 1046
1047 1047 # templates
1048 1048 p = templater.templatepaths()
1049 1049 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1050 1050 fm.condwrite(not p, '', _(" no template directories found\n"))
1051 1051 if p:
1052 1052 m = templater.templatepath("map-cmdline.default")
1053 1053 if m:
1054 1054 # template found, check if it is working
1055 1055 err = None
1056 1056 try:
1057 1057 templater.templater.frommapfile(m)
1058 1058 except Exception as inst:
1059 1059 err = inst
1060 1060 p = None
1061 1061 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1062 1062 else:
1063 1063 p = None
1064 1064 fm.condwrite(p, 'defaulttemplate',
1065 1065 _("checking default template (%s)\n"), m)
1066 1066 fm.condwrite(not m, 'defaulttemplatenotfound',
1067 1067 _(" template '%s' not found\n"), "default")
1068 1068 if not p:
1069 1069 problems += 1
1070 1070 fm.condwrite(not p, '',
1071 1071 _(" (templates seem to have been installed incorrectly)\n"))
1072 1072
1073 1073 # editor
1074 1074 editor = ui.geteditor()
1075 1075 editor = util.expandpath(editor)
1076 1076 fm.write('editor', _("checking commit editor... (%s)\n"), editor)
1077 1077 cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
1078 1078 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1079 1079 _(" No commit editor set and can't find %s in PATH\n"
1080 1080 " (specify a commit editor in your configuration"
1081 1081 " file)\n"), not cmdpath and editor == 'vi' and editor)
1082 1082 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1083 1083 _(" Can't find editor '%s' in PATH\n"
1084 1084 " (specify a commit editor in your configuration"
1085 1085 " file)\n"), not cmdpath and editor)
1086 1086 if not cmdpath and editor != 'vi':
1087 1087 problems += 1
1088 1088
1089 1089 # check username
1090 1090 username = None
1091 1091 err = None
1092 1092 try:
1093 1093 username = ui.username()
1094 1094 except error.Abort as e:
1095 1095 err = e
1096 1096 problems += 1
1097 1097
1098 1098 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1099 1099 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1100 1100 " (specify a username in your configuration file)\n"), err)
1101 1101
1102 1102 fm.condwrite(not problems, '',
1103 1103 _("no problems detected\n"))
1104 1104 if not problems:
1105 1105 fm.data(problems=problems)
1106 1106 fm.condwrite(problems, 'problems',
1107 1107 _("%d problems detected,"
1108 1108 " please check your install!\n"), problems)
1109 1109 fm.end()
1110 1110
1111 1111 return problems
1112 1112
1113 1113 @command('debugknown', [], _('REPO ID...'), norepo=True)
1114 1114 def debugknown(ui, repopath, *ids, **opts):
1115 1115 """test whether node ids are known to a repo
1116 1116
1117 1117 Every ID must be a full-length hex node id string. Returns a list of 0s
1118 1118 and 1s indicating unknown/known.
1119 1119 """
1120 1120 repo = hg.peer(ui, opts, repopath)
1121 1121 if not repo.capable('known'):
1122 1122 raise error.Abort("known() not supported by target repository")
1123 1123 flags = repo.known([bin(s) for s in ids])
1124 1124 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1125 1125
1126 1126 @command('debuglabelcomplete', [], _('LABEL...'))
1127 1127 def debuglabelcomplete(ui, repo, *args):
1128 1128 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1129 1129 debugnamecomplete(ui, repo, *args)
1130 1130
1131 1131 @command('debuglocks',
1132 1132 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1133 1133 ('W', 'force-wlock', None,
1134 1134 _('free the working state lock (DANGEROUS)'))],
1135 1135 _('[OPTION]...'))
1136 1136 def debuglocks(ui, repo, **opts):
1137 1137 """show or modify state of locks
1138 1138
1139 1139 By default, this command will show which locks are held. This
1140 1140 includes the user and process holding the lock, the amount of time
1141 1141 the lock has been held, and the machine name where the process is
1142 1142 running if it's not local.
1143 1143
1144 1144 Locks protect the integrity of Mercurial's data, so should be
1145 1145 treated with care. System crashes or other interruptions may cause
1146 1146 locks to not be properly released, though Mercurial will usually
1147 1147 detect and remove such stale locks automatically.
1148 1148
1149 1149 However, detecting stale locks may not always be possible (for
1150 1150 instance, on a shared filesystem). Removing locks may also be
1151 1151 blocked by filesystem permissions.
1152 1152
1153 1153 Returns 0 if no locks are held.
1154 1154
1155 1155 """
1156 1156
1157 1157 if opts.get('force_lock'):
1158 1158 repo.svfs.unlink('lock')
1159 1159 if opts.get('force_wlock'):
1160 1160 repo.vfs.unlink('wlock')
1161 1161 if opts.get('force_lock') or opts.get('force_lock'):
1162 1162 return 0
1163 1163
1164 1164 now = time.time()
1165 1165 held = 0
1166 1166
1167 1167 def report(vfs, name, method):
1168 1168 # this causes stale locks to get reaped for more accurate reporting
1169 1169 try:
1170 1170 l = method(False)
1171 1171 except error.LockHeld:
1172 1172 l = None
1173 1173
1174 1174 if l:
1175 1175 l.release()
1176 1176 else:
1177 1177 try:
1178 1178 stat = vfs.lstat(name)
1179 1179 age = now - stat.st_mtime
1180 1180 user = util.username(stat.st_uid)
1181 1181 locker = vfs.readlock(name)
1182 1182 if ":" in locker:
1183 1183 host, pid = locker.split(':')
1184 1184 if host == socket.gethostname():
1185 1185 locker = 'user %s, process %s' % (user, pid)
1186 1186 else:
1187 1187 locker = 'user %s, process %s, host %s' \
1188 1188 % (user, pid, host)
1189 1189 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1190 1190 return 1
1191 1191 except OSError as e:
1192 1192 if e.errno != errno.ENOENT:
1193 1193 raise
1194 1194
1195 1195 ui.write(("%-6s free\n") % (name + ":"))
1196 1196 return 0
1197 1197
1198 1198 held += report(repo.svfs, "lock", repo.lock)
1199 1199 held += report(repo.vfs, "wlock", repo.wlock)
1200 1200
1201 1201 return held
1202 1202
1203 1203 @command('debugmergestate', [], '')
1204 1204 def debugmergestate(ui, repo, *args):
1205 1205 """print merge state
1206 1206
1207 1207 Use --verbose to print out information about whether v1 or v2 merge state
1208 1208 was chosen."""
1209 1209 def _hashornull(h):
1210 1210 if h == nullhex:
1211 1211 return 'null'
1212 1212 else:
1213 1213 return h
1214 1214
1215 1215 def printrecords(version):
1216 1216 ui.write(('* version %s records\n') % version)
1217 1217 if version == 1:
1218 1218 records = v1records
1219 1219 else:
1220 1220 records = v2records
1221 1221
1222 1222 for rtype, record in records:
1223 1223 # pretty print some record types
1224 1224 if rtype == 'L':
1225 1225 ui.write(('local: %s\n') % record)
1226 1226 elif rtype == 'O':
1227 1227 ui.write(('other: %s\n') % record)
1228 1228 elif rtype == 'm':
1229 1229 driver, mdstate = record.split('\0', 1)
1230 1230 ui.write(('merge driver: %s (state "%s")\n')
1231 1231 % (driver, mdstate))
1232 1232 elif rtype in 'FDC':
1233 1233 r = record.split('\0')
1234 1234 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1235 1235 if version == 1:
1236 1236 onode = 'not stored in v1 format'
1237 1237 flags = r[7]
1238 1238 else:
1239 1239 onode, flags = r[7:9]
1240 1240 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1241 1241 % (f, rtype, state, _hashornull(hash)))
1242 1242 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1243 1243 ui.write((' ancestor path: %s (node %s)\n')
1244 1244 % (afile, _hashornull(anode)))
1245 1245 ui.write((' other path: %s (node %s)\n')
1246 1246 % (ofile, _hashornull(onode)))
1247 1247 elif rtype == 'f':
1248 1248 filename, rawextras = record.split('\0', 1)
1249 1249 extras = rawextras.split('\0')
1250 1250 i = 0
1251 1251 extrastrings = []
1252 1252 while i < len(extras):
1253 1253 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1254 1254 i += 2
1255 1255
1256 1256 ui.write(('file extras: %s (%s)\n')
1257 1257 % (filename, ', '.join(extrastrings)))
1258 1258 elif rtype == 'l':
1259 1259 labels = record.split('\0', 2)
1260 1260 labels = [l for l in labels if len(l) > 0]
1261 1261 ui.write(('labels:\n'))
1262 1262 ui.write((' local: %s\n' % labels[0]))
1263 1263 ui.write((' other: %s\n' % labels[1]))
1264 1264 if len(labels) > 2:
1265 1265 ui.write((' base: %s\n' % labels[2]))
1266 1266 else:
1267 1267 ui.write(('unrecognized entry: %s\t%s\n')
1268 1268 % (rtype, record.replace('\0', '\t')))
1269 1269
1270 1270 # Avoid mergestate.read() since it may raise an exception for unsupported
1271 1271 # merge state records. We shouldn't be doing this, but this is OK since this
1272 1272 # command is pretty low-level.
1273 1273 ms = mergemod.mergestate(repo)
1274 1274
1275 1275 # sort so that reasonable information is on top
1276 1276 v1records = ms._readrecordsv1()
1277 1277 v2records = ms._readrecordsv2()
1278 1278 order = 'LOml'
1279 1279 def key(r):
1280 1280 idx = order.find(r[0])
1281 1281 if idx == -1:
1282 1282 return (1, r[1])
1283 1283 else:
1284 1284 return (0, idx)
1285 1285 v1records.sort(key=key)
1286 1286 v2records.sort(key=key)
1287 1287
1288 1288 if not v1records and not v2records:
1289 1289 ui.write(('no merge state found\n'))
1290 1290 elif not v2records:
1291 1291 ui.note(('no version 2 merge state\n'))
1292 1292 printrecords(1)
1293 1293 elif ms._v1v2match(v1records, v2records):
1294 1294 ui.note(('v1 and v2 states match: using v2\n'))
1295 1295 printrecords(2)
1296 1296 else:
1297 1297 ui.note(('v1 and v2 states mismatch: using v1\n'))
1298 1298 printrecords(1)
1299 1299 if ui.verbose:
1300 1300 printrecords(2)
1301 1301
1302 1302 @command('debugnamecomplete', [], _('NAME...'))
1303 1303 def debugnamecomplete(ui, repo, *args):
1304 1304 '''complete "names" - tags, open branch names, bookmark names'''
1305 1305
1306 1306 names = set()
1307 1307 # since we previously only listed open branches, we will handle that
1308 1308 # specially (after this for loop)
1309 1309 for name, ns in repo.names.iteritems():
1310 1310 if name != 'branches':
1311 1311 names.update(ns.listnames(repo))
1312 1312 names.update(tag for (tag, heads, tip, closed)
1313 1313 in repo.branchmap().iterbranches() if not closed)
1314 1314 completions = set()
1315 1315 if not args:
1316 1316 args = ['']
1317 1317 for a in args:
1318 1318 completions.update(n for n in names if n.startswith(a))
1319 1319 ui.write('\n'.join(sorted(completions)))
1320 1320 ui.write('\n')
1321 1321
1322 1322 @command('debugobsolete',
1323 1323 [('', 'flags', 0, _('markers flag')),
1324 1324 ('', 'record-parents', False,
1325 1325 _('record parent information for the precursor')),
1326 1326 ('r', 'rev', [], _('display markers relevant to REV')),
1327 1327 ('', 'exclusive', False, _('restrict display to markers only '
1328 1328 'relevant to REV')),
1329 1329 ('', 'index', False, _('display index of the marker')),
1330 1330 ('', 'delete', [], _('delete markers specified by indices')),
1331 1331 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1332 1332 _('[OBSOLETED [REPLACEMENT ...]]'))
1333 1333 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1334 1334 """create arbitrary obsolete marker
1335 1335
1336 1336 With no arguments, displays the list of obsolescence markers."""
1337 1337
1338 1338 def parsenodeid(s):
1339 1339 try:
1340 1340 # We do not use revsingle/revrange functions here to accept
1341 1341 # arbitrary node identifiers, possibly not present in the
1342 1342 # local repository.
1343 1343 n = bin(s)
1344 1344 if len(n) != len(nullid):
1345 1345 raise TypeError()
1346 1346 return n
1347 1347 except TypeError:
1348 1348 raise error.Abort('changeset references must be full hexadecimal '
1349 1349 'node identifiers')
1350 1350
1351 1351 if opts.get('delete'):
1352 1352 indices = []
1353 1353 for v in opts.get('delete'):
1354 1354 try:
1355 1355 indices.append(int(v))
1356 1356 except ValueError:
1357 1357 raise error.Abort(_('invalid index value: %r') % v,
1358 1358 hint=_('use integers for indices'))
1359 1359
1360 1360 if repo.currenttransaction():
1361 1361 raise error.Abort(_('cannot delete obsmarkers in the middle '
1362 1362 'of transaction.'))
1363 1363
1364 1364 with repo.lock():
1365 1365 n = repair.deleteobsmarkers(repo.obsstore, indices)
1366 1366 ui.write(_('deleted %i obsolescence markers\n') % n)
1367 1367
1368 1368 return
1369 1369
1370 1370 if precursor is not None:
1371 1371 if opts['rev']:
1372 1372 raise error.Abort('cannot select revision when creating marker')
1373 1373 metadata = {}
1374 1374 metadata['user'] = opts['user'] or ui.username()
1375 1375 succs = tuple(parsenodeid(succ) for succ in successors)
1376 1376 l = repo.lock()
1377 1377 try:
1378 1378 tr = repo.transaction('debugobsolete')
1379 1379 try:
1380 1380 date = opts.get('date')
1381 1381 if date:
1382 1382 date = util.parsedate(date)
1383 1383 else:
1384 1384 date = None
1385 1385 prec = parsenodeid(precursor)
1386 1386 parents = None
1387 1387 if opts['record_parents']:
1388 1388 if prec not in repo.unfiltered():
1389 1389 raise error.Abort('cannot used --record-parents on '
1390 1390 'unknown changesets')
1391 1391 parents = repo.unfiltered()[prec].parents()
1392 1392 parents = tuple(p.node() for p in parents)
1393 1393 repo.obsstore.create(tr, prec, succs, opts['flags'],
1394 1394 parents=parents, date=date,
1395 1395 metadata=metadata, ui=ui)
1396 1396 tr.close()
1397 1397 except ValueError as exc:
1398 1398 raise error.Abort(_('bad obsmarker input: %s') % exc)
1399 1399 finally:
1400 1400 tr.release()
1401 1401 finally:
1402 1402 l.release()
1403 1403 else:
1404 1404 if opts['rev']:
1405 1405 revs = scmutil.revrange(repo, opts['rev'])
1406 1406 nodes = [repo[r].node() for r in revs]
1407 1407 markers = list(obsolete.getmarkers(repo, nodes=nodes,
1408 1408 exclusive=opts['exclusive']))
1409 1409 markers.sort(key=lambda x: x._data)
1410 1410 else:
1411 1411 markers = obsolete.getmarkers(repo)
1412 1412
1413 1413 markerstoiter = markers
1414 1414 isrelevant = lambda m: True
1415 1415 if opts.get('rev') and opts.get('index'):
1416 1416 markerstoiter = obsolete.getmarkers(repo)
1417 1417 markerset = set(markers)
1418 1418 isrelevant = lambda m: m in markerset
1419 1419
1420 1420 fm = ui.formatter('debugobsolete', opts)
1421 1421 for i, m in enumerate(markerstoiter):
1422 1422 if not isrelevant(m):
1423 1423 # marker can be irrelevant when we're iterating over a set
1424 1424 # of markers (markerstoiter) which is bigger than the set
1425 1425 # of markers we want to display (markers)
1426 1426 # this can happen if both --index and --rev options are
1427 1427 # provided and thus we need to iterate over all of the markers
1428 1428 # to get the correct indices, but only display the ones that
1429 1429 # are relevant to --rev value
1430 1430 continue
1431 1431 fm.startitem()
1432 1432 ind = i if opts.get('index') else None
1433 1433 cmdutil.showmarker(fm, m, index=ind)
1434 1434 fm.end()
1435 1435
1436 1436 @command('debugpathcomplete',
1437 1437 [('f', 'full', None, _('complete an entire path')),
1438 1438 ('n', 'normal', None, _('show only normal files')),
1439 1439 ('a', 'added', None, _('show only added files')),
1440 1440 ('r', 'removed', None, _('show only removed files'))],
1441 1441 _('FILESPEC...'))
1442 1442 def debugpathcomplete(ui, repo, *specs, **opts):
1443 1443 '''complete part or all of a tracked path
1444 1444
1445 1445 This command supports shells that offer path name completion. It
1446 1446 currently completes only files already known to the dirstate.
1447 1447
1448 1448 Completion extends only to the next path segment unless
1449 1449 --full is specified, in which case entire paths are used.'''
1450 1450
1451 1451 def complete(path, acceptable):
1452 1452 dirstate = repo.dirstate
1453 1453 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1454 1454 rootdir = repo.root + pycompat.ossep
1455 1455 if spec != repo.root and not spec.startswith(rootdir):
1456 1456 return [], []
1457 1457 if os.path.isdir(spec):
1458 1458 spec += '/'
1459 1459 spec = spec[len(rootdir):]
1460 1460 fixpaths = pycompat.ossep != '/'
1461 1461 if fixpaths:
1462 1462 spec = spec.replace(pycompat.ossep, '/')
1463 1463 speclen = len(spec)
1464 1464 fullpaths = opts['full']
1465 1465 files, dirs = set(), set()
1466 1466 adddir, addfile = dirs.add, files.add
1467 1467 for f, st in dirstate.iteritems():
1468 1468 if f.startswith(spec) and st[0] in acceptable:
1469 1469 if fixpaths:
1470 1470 f = f.replace('/', pycompat.ossep)
1471 1471 if fullpaths:
1472 1472 addfile(f)
1473 1473 continue
1474 1474 s = f.find(pycompat.ossep, speclen)
1475 1475 if s >= 0:
1476 1476 adddir(f[:s])
1477 1477 else:
1478 1478 addfile(f)
1479 1479 return files, dirs
1480 1480
1481 1481 acceptable = ''
1482 1482 if opts['normal']:
1483 1483 acceptable += 'nm'
1484 1484 if opts['added']:
1485 1485 acceptable += 'a'
1486 1486 if opts['removed']:
1487 1487 acceptable += 'r'
1488 1488 cwd = repo.getcwd()
1489 1489 if not specs:
1490 1490 specs = ['.']
1491 1491
1492 1492 files, dirs = set(), set()
1493 1493 for spec in specs:
1494 1494 f, d = complete(spec, acceptable or 'nmar')
1495 1495 files.update(f)
1496 1496 dirs.update(d)
1497 1497 files.update(dirs)
1498 1498 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1499 1499 ui.write('\n')
1500 1500
1501 1501 @command('debugpickmergetool',
1502 1502 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1503 1503 ('', 'changedelete', None, _('emulate merging change and delete')),
1504 1504 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1505 1505 _('[PATTERN]...'),
1506 1506 inferrepo=True)
1507 1507 def debugpickmergetool(ui, repo, *pats, **opts):
1508 1508 """examine which merge tool is chosen for specified file
1509 1509
1510 1510 As described in :hg:`help merge-tools`, Mercurial examines
1511 1511 configurations below in this order to decide which merge tool is
1512 1512 chosen for specified file.
1513 1513
1514 1514 1. ``--tool`` option
1515 1515 2. ``HGMERGE`` environment variable
1516 1516 3. configurations in ``merge-patterns`` section
1517 1517 4. configuration of ``ui.merge``
1518 1518 5. configurations in ``merge-tools`` section
1519 1519 6. ``hgmerge`` tool (for historical reason only)
1520 1520 7. default tool for fallback (``:merge`` or ``:prompt``)
1521 1521
1522 1522 This command writes out examination result in the style below::
1523 1523
1524 1524 FILE = MERGETOOL
1525 1525
1526 1526 By default, all files known in the first parent context of the
1527 1527 working directory are examined. Use file patterns and/or -I/-X
1528 1528 options to limit target files. -r/--rev is also useful to examine
1529 1529 files in another context without actual updating to it.
1530 1530
1531 1531 With --debug, this command shows warning messages while matching
1532 1532 against ``merge-patterns`` and so on, too. It is recommended to
1533 1533 use this option with explicit file patterns and/or -I/-X options,
1534 1534 because this option increases amount of output per file according
1535 1535 to configurations in hgrc.
1536 1536
1537 1537 With -v/--verbose, this command shows configurations below at
1538 1538 first (only if specified).
1539 1539
1540 1540 - ``--tool`` option
1541 1541 - ``HGMERGE`` environment variable
1542 1542 - configuration of ``ui.merge``
1543 1543
1544 1544 If merge tool is chosen before matching against
1545 1545 ``merge-patterns``, this command can't show any helpful
1546 1546 information, even with --debug. In such case, information above is
1547 1547 useful to know why a merge tool is chosen.
1548 1548 """
1549 1549 overrides = {}
1550 1550 if opts['tool']:
1551 1551 overrides[('ui', 'forcemerge')] = opts['tool']
1552 1552 ui.note(('with --tool %r\n') % (opts['tool']))
1553 1553
1554 1554 with ui.configoverride(overrides, 'debugmergepatterns'):
1555 1555 hgmerge = encoding.environ.get("HGMERGE")
1556 1556 if hgmerge is not None:
1557 1557 ui.note(('with HGMERGE=%r\n') % (hgmerge))
1558 1558 uimerge = ui.config("ui", "merge")
1559 1559 if uimerge:
1560 1560 ui.note(('with ui.merge=%r\n') % (uimerge))
1561 1561
1562 1562 ctx = scmutil.revsingle(repo, opts.get('rev'))
1563 1563 m = scmutil.match(ctx, pats, opts)
1564 1564 changedelete = opts['changedelete']
1565 1565 for path in ctx.walk(m):
1566 1566 fctx = ctx[path]
1567 1567 try:
1568 1568 if not ui.debugflag:
1569 1569 ui.pushbuffer(error=True)
1570 1570 tool, toolpath = filemerge._picktool(repo, ui, path,
1571 1571 fctx.isbinary(),
1572 1572 'l' in fctx.flags(),
1573 1573 changedelete)
1574 1574 finally:
1575 1575 if not ui.debugflag:
1576 1576 ui.popbuffer()
1577 1577 ui.write(('%s = %s\n') % (path, tool))
1578 1578
1579 1579 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1580 1580 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1581 1581 '''access the pushkey key/value protocol
1582 1582
1583 1583 With two args, list the keys in the given namespace.
1584 1584
1585 1585 With five args, set a key to new if it currently is set to old.
1586 1586 Reports success or failure.
1587 1587 '''
1588 1588
1589 1589 target = hg.peer(ui, {}, repopath)
1590 1590 if keyinfo:
1591 1591 key, old, new = keyinfo
1592 1592 r = target.pushkey(namespace, key, old, new)
1593 1593 ui.status(str(r) + '\n')
1594 1594 return not r
1595 1595 else:
1596 1596 for k, v in sorted(target.listkeys(namespace).iteritems()):
1597 1597 ui.write("%s\t%s\n" % (util.escapestr(k),
1598 1598 util.escapestr(v)))
1599 1599
1600 1600 @command('debugpvec', [], _('A B'))
1601 1601 def debugpvec(ui, repo, a, b=None):
1602 1602 ca = scmutil.revsingle(repo, a)
1603 1603 cb = scmutil.revsingle(repo, b)
1604 1604 pa = pvec.ctxpvec(ca)
1605 1605 pb = pvec.ctxpvec(cb)
1606 1606 if pa == pb:
1607 1607 rel = "="
1608 1608 elif pa > pb:
1609 1609 rel = ">"
1610 1610 elif pa < pb:
1611 1611 rel = "<"
1612 1612 elif pa | pb:
1613 1613 rel = "|"
1614 1614 ui.write(_("a: %s\n") % pa)
1615 1615 ui.write(_("b: %s\n") % pb)
1616 1616 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1617 1617 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1618 1618 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1619 1619 pa.distance(pb), rel))
1620 1620
1621 1621 @command('debugrebuilddirstate|debugrebuildstate',
1622 1622 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1623 1623 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1624 1624 'the working copy parent')),
1625 1625 ],
1626 1626 _('[-r REV]'))
1627 1627 def debugrebuilddirstate(ui, repo, rev, **opts):
1628 1628 """rebuild the dirstate as it would look like for the given revision
1629 1629
1630 1630 If no revision is specified the first current parent will be used.
1631 1631
1632 1632 The dirstate will be set to the files of the given revision.
1633 1633 The actual working directory content or existing dirstate
1634 1634 information such as adds or removes is not considered.
1635 1635
1636 1636 ``minimal`` will only rebuild the dirstate status for files that claim to be
1637 1637 tracked but are not in the parent manifest, or that exist in the parent
1638 1638 manifest but are not in the dirstate. It will not change adds, removes, or
1639 1639 modified files that are in the working copy parent.
1640 1640
1641 1641 One use of this command is to make the next :hg:`status` invocation
1642 1642 check the actual file content.
1643 1643 """
1644 1644 ctx = scmutil.revsingle(repo, rev)
1645 1645 with repo.wlock():
1646 1646 dirstate = repo.dirstate
1647 1647 changedfiles = None
1648 1648 # See command doc for what minimal does.
1649 1649 if opts.get('minimal'):
1650 1650 manifestfiles = set(ctx.manifest().keys())
1651 1651 dirstatefiles = set(dirstate)
1652 1652 manifestonly = manifestfiles - dirstatefiles
1653 1653 dsonly = dirstatefiles - manifestfiles
1654 1654 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1655 1655 changedfiles = manifestonly | dsnotadded
1656 1656
1657 1657 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1658 1658
1659 1659 @command('debugrebuildfncache', [], '')
1660 1660 def debugrebuildfncache(ui, repo):
1661 1661 """rebuild the fncache file"""
1662 1662 repair.rebuildfncache(ui, repo)
1663 1663
1664 1664 @command('debugrename',
1665 1665 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1666 1666 _('[-r REV] FILE'))
1667 1667 def debugrename(ui, repo, file1, *pats, **opts):
1668 1668 """dump rename information"""
1669 1669
1670 1670 ctx = scmutil.revsingle(repo, opts.get('rev'))
1671 1671 m = scmutil.match(ctx, (file1,) + pats, opts)
1672 1672 for abs in ctx.walk(m):
1673 1673 fctx = ctx[abs]
1674 1674 o = fctx.filelog().renamed(fctx.filenode())
1675 1675 rel = m.rel(abs)
1676 1676 if o:
1677 1677 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1678 1678 else:
1679 1679 ui.write(_("%s not renamed\n") % rel)
1680 1680
1681 1681 @command('debugrevlog', cmdutil.debugrevlogopts +
1682 1682 [('d', 'dump', False, _('dump index data'))],
1683 1683 _('-c|-m|FILE'),
1684 1684 optionalrepo=True)
1685 1685 def debugrevlog(ui, repo, file_=None, **opts):
1686 1686 """show data and statistics about a revlog"""
1687 1687 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1688 1688
1689 1689 if opts.get("dump"):
1690 1690 numrevs = len(r)
1691 1691 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1692 1692 " rawsize totalsize compression heads chainlen\n"))
1693 1693 ts = 0
1694 1694 heads = set()
1695 1695
1696 1696 for rev in xrange(numrevs):
1697 1697 dbase = r.deltaparent(rev)
1698 1698 if dbase == -1:
1699 1699 dbase = rev
1700 1700 cbase = r.chainbase(rev)
1701 1701 clen = r.chainlen(rev)
1702 1702 p1, p2 = r.parentrevs(rev)
1703 1703 rs = r.rawsize(rev)
1704 1704 ts = ts + rs
1705 1705 heads -= set(r.parentrevs(rev))
1706 1706 heads.add(rev)
1707 1707 try:
1708 1708 compression = ts / r.end(rev)
1709 1709 except ZeroDivisionError:
1710 1710 compression = 0
1711 1711 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1712 1712 "%11d %5d %8d\n" %
1713 1713 (rev, p1, p2, r.start(rev), r.end(rev),
1714 1714 r.start(dbase), r.start(cbase),
1715 1715 r.start(p1), r.start(p2),
1716 1716 rs, ts, compression, len(heads), clen))
1717 1717 return 0
1718 1718
1719 1719 v = r.version
1720 1720 format = v & 0xFFFF
1721 1721 flags = []
1722 1722 gdelta = False
1723 1723 if v & revlog.FLAG_INLINE_DATA:
1724 1724 flags.append('inline')
1725 1725 if v & revlog.FLAG_GENERALDELTA:
1726 1726 gdelta = True
1727 1727 flags.append('generaldelta')
1728 1728 if not flags:
1729 1729 flags = ['(none)']
1730 1730
1731 1731 nummerges = 0
1732 1732 numfull = 0
1733 1733 numprev = 0
1734 1734 nump1 = 0
1735 1735 nump2 = 0
1736 1736 numother = 0
1737 1737 nump1prev = 0
1738 1738 nump2prev = 0
1739 1739 chainlengths = []
1740 1740
1741 1741 datasize = [None, 0, 0]
1742 1742 fullsize = [None, 0, 0]
1743 1743 deltasize = [None, 0, 0]
1744 1744 chunktypecounts = {}
1745 1745 chunktypesizes = {}
1746 1746
1747 1747 def addsize(size, l):
1748 1748 if l[0] is None or size < l[0]:
1749 1749 l[0] = size
1750 1750 if size > l[1]:
1751 1751 l[1] = size
1752 1752 l[2] += size
1753 1753
1754 1754 numrevs = len(r)
1755 1755 for rev in xrange(numrevs):
1756 1756 p1, p2 = r.parentrevs(rev)
1757 1757 delta = r.deltaparent(rev)
1758 1758 if format > 0:
1759 1759 addsize(r.rawsize(rev), datasize)
1760 1760 if p2 != nullrev:
1761 1761 nummerges += 1
1762 1762 size = r.length(rev)
1763 1763 if delta == nullrev:
1764 1764 chainlengths.append(0)
1765 1765 numfull += 1
1766 1766 addsize(size, fullsize)
1767 1767 else:
1768 1768 chainlengths.append(chainlengths[delta] + 1)
1769 1769 addsize(size, deltasize)
1770 1770 if delta == rev - 1:
1771 1771 numprev += 1
1772 1772 if delta == p1:
1773 1773 nump1prev += 1
1774 1774 elif delta == p2:
1775 1775 nump2prev += 1
1776 1776 elif delta == p1:
1777 1777 nump1 += 1
1778 1778 elif delta == p2:
1779 1779 nump2 += 1
1780 1780 elif delta != nullrev:
1781 1781 numother += 1
1782 1782
1783 1783 # Obtain data on the raw chunks in the revlog.
1784 1784 segment = r._getsegmentforrevs(rev, rev)[1]
1785 1785 if segment:
1786 1786 chunktype = segment[0]
1787 1787 else:
1788 1788 chunktype = 'empty'
1789 1789
1790 1790 if chunktype not in chunktypecounts:
1791 1791 chunktypecounts[chunktype] = 0
1792 1792 chunktypesizes[chunktype] = 0
1793 1793
1794 1794 chunktypecounts[chunktype] += 1
1795 1795 chunktypesizes[chunktype] += size
1796 1796
1797 1797 # Adjust size min value for empty cases
1798 1798 for size in (datasize, fullsize, deltasize):
1799 1799 if size[0] is None:
1800 1800 size[0] = 0
1801 1801
1802 1802 numdeltas = numrevs - numfull
1803 1803 numoprev = numprev - nump1prev - nump2prev
1804 1804 totalrawsize = datasize[2]
1805 1805 datasize[2] /= numrevs
1806 1806 fulltotal = fullsize[2]
1807 1807 fullsize[2] /= numfull
1808 1808 deltatotal = deltasize[2]
1809 1809 if numrevs - numfull > 0:
1810 1810 deltasize[2] /= numrevs - numfull
1811 1811 totalsize = fulltotal + deltatotal
1812 1812 avgchainlen = sum(chainlengths) / numrevs
1813 1813 maxchainlen = max(chainlengths)
1814 1814 compratio = 1
1815 1815 if totalsize:
1816 1816 compratio = totalrawsize / totalsize
1817 1817
1818 1818 basedfmtstr = '%%%dd\n'
1819 1819 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1820 1820
1821 1821 def dfmtstr(max):
1822 1822 return basedfmtstr % len(str(max))
1823 1823 def pcfmtstr(max, padding=0):
1824 1824 return basepcfmtstr % (len(str(max)), ' ' * padding)
1825 1825
1826 1826 def pcfmt(value, total):
1827 1827 if total:
1828 1828 return (value, 100 * float(value) / total)
1829 1829 else:
1830 1830 return value, 100.0
1831 1831
1832 1832 ui.write(('format : %d\n') % format)
1833 1833 ui.write(('flags : %s\n') % ', '.join(flags))
1834 1834
1835 1835 ui.write('\n')
1836 1836 fmt = pcfmtstr(totalsize)
1837 1837 fmt2 = dfmtstr(totalsize)
1838 1838 ui.write(('revisions : ') + fmt2 % numrevs)
1839 1839 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
1840 1840 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
1841 1841 ui.write(('revisions : ') + fmt2 % numrevs)
1842 1842 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
1843 1843 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
1844 1844 ui.write(('revision size : ') + fmt2 % totalsize)
1845 1845 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
1846 1846 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
1847 1847
1848 1848 def fmtchunktype(chunktype):
1849 1849 if chunktype == 'empty':
1850 1850 return ' %s : ' % chunktype
1851 1851 elif chunktype in string.ascii_letters:
1852 1852 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
1853 1853 else:
1854 1854 return ' 0x%s : ' % hex(chunktype)
1855 1855
1856 1856 ui.write('\n')
1857 1857 ui.write(('chunks : ') + fmt2 % numrevs)
1858 1858 for chunktype in sorted(chunktypecounts):
1859 1859 ui.write(fmtchunktype(chunktype))
1860 1860 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
1861 1861 ui.write(('chunks size : ') + fmt2 % totalsize)
1862 1862 for chunktype in sorted(chunktypecounts):
1863 1863 ui.write(fmtchunktype(chunktype))
1864 1864 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
1865 1865
1866 1866 ui.write('\n')
1867 1867 fmt = dfmtstr(max(avgchainlen, compratio))
1868 1868 ui.write(('avg chain length : ') + fmt % avgchainlen)
1869 1869 ui.write(('max chain length : ') + fmt % maxchainlen)
1870 1870 ui.write(('compression ratio : ') + fmt % compratio)
1871 1871
1872 1872 if format > 0:
1873 1873 ui.write('\n')
1874 1874 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
1875 1875 % tuple(datasize))
1876 1876 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
1877 1877 % tuple(fullsize))
1878 1878 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
1879 1879 % tuple(deltasize))
1880 1880
1881 1881 if numdeltas > 0:
1882 1882 ui.write('\n')
1883 1883 fmt = pcfmtstr(numdeltas)
1884 1884 fmt2 = pcfmtstr(numdeltas, 4)
1885 1885 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
1886 1886 if numprev > 0:
1887 1887 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
1888 1888 numprev))
1889 1889 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
1890 1890 numprev))
1891 1891 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
1892 1892 numprev))
1893 1893 if gdelta:
1894 1894 ui.write(('deltas against p1 : ')
1895 1895 + fmt % pcfmt(nump1, numdeltas))
1896 1896 ui.write(('deltas against p2 : ')
1897 1897 + fmt % pcfmt(nump2, numdeltas))
1898 1898 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
1899 1899 numdeltas))
1900 1900
1901 1901 @command('debugrevspec',
1902 1902 [('', 'optimize', None,
1903 1903 _('print parsed tree after optimizing (DEPRECATED)')),
1904 1904 ('', 'show-revs', True, _('print list of result revisions (default)')),
1905 1905 ('s', 'show-set', None, _('print internal representation of result set')),
1906 1906 ('p', 'show-stage', [],
1907 1907 _('print parsed tree at the given stage'), _('NAME')),
1908 1908 ('', 'no-optimized', False, _('evaluate tree without optimization')),
1909 1909 ('', 'verify-optimized', False, _('verify optimized result')),
1910 1910 ],
1911 1911 ('REVSPEC'))
1912 1912 def debugrevspec(ui, repo, expr, **opts):
1913 1913 """parse and apply a revision specification
1914 1914
1915 1915 Use -p/--show-stage option to print the parsed tree at the given stages.
1916 1916 Use -p all to print tree at every stage.
1917 1917
1918 1918 Use --no-show-revs option with -s or -p to print only the set
1919 1919 representation or the parsed tree respectively.
1920 1920
1921 1921 Use --verify-optimized to compare the optimized result with the unoptimized
1922 1922 one. Returns 1 if the optimized result differs.
1923 1923 """
1924 1924 stages = [
1925 1925 ('parsed', lambda tree: tree),
1926 1926 ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)),
1927 1927 ('concatenated', revsetlang.foldconcat),
1928 1928 ('analyzed', revsetlang.analyze),
1929 1929 ('optimized', revsetlang.optimize),
1930 1930 ]
1931 1931 if opts['no_optimized']:
1932 1932 stages = stages[:-1]
1933 1933 if opts['verify_optimized'] and opts['no_optimized']:
1934 1934 raise error.Abort(_('cannot use --verify-optimized with '
1935 1935 '--no-optimized'))
1936 1936 stagenames = set(n for n, f in stages)
1937 1937
1938 1938 showalways = set()
1939 1939 showchanged = set()
1940 1940 if ui.verbose and not opts['show_stage']:
1941 1941 # show parsed tree by --verbose (deprecated)
1942 1942 showalways.add('parsed')
1943 1943 showchanged.update(['expanded', 'concatenated'])
1944 1944 if opts['optimize']:
1945 1945 showalways.add('optimized')
1946 1946 if opts['show_stage'] and opts['optimize']:
1947 1947 raise error.Abort(_('cannot use --optimize with --show-stage'))
1948 1948 if opts['show_stage'] == ['all']:
1949 1949 showalways.update(stagenames)
1950 1950 else:
1951 1951 for n in opts['show_stage']:
1952 1952 if n not in stagenames:
1953 1953 raise error.Abort(_('invalid stage name: %s') % n)
1954 1954 showalways.update(opts['show_stage'])
1955 1955
1956 1956 treebystage = {}
1957 1957 printedtree = None
1958 1958 tree = revsetlang.parse(expr, lookup=repo.__contains__)
1959 1959 for n, f in stages:
1960 1960 treebystage[n] = tree = f(tree)
1961 1961 if n in showalways or (n in showchanged and tree != printedtree):
1962 1962 if opts['show_stage'] or n != 'parsed':
1963 1963 ui.write(("* %s:\n") % n)
1964 1964 ui.write(revsetlang.prettyformat(tree), "\n")
1965 1965 printedtree = tree
1966 1966
1967 1967 if opts['verify_optimized']:
1968 1968 arevs = revset.makematcher(treebystage['analyzed'])(repo)
1969 1969 brevs = revset.makematcher(treebystage['optimized'])(repo)
1970 1970 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
1971 1971 ui.write(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
1972 1972 ui.write(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
1973 1973 arevs = list(arevs)
1974 1974 brevs = list(brevs)
1975 1975 if arevs == brevs:
1976 1976 return 0
1977 1977 ui.write(('--- analyzed\n'), label='diff.file_a')
1978 1978 ui.write(('+++ optimized\n'), label='diff.file_b')
1979 1979 sm = difflib.SequenceMatcher(None, arevs, brevs)
1980 1980 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1981 1981 if tag in ('delete', 'replace'):
1982 1982 for c in arevs[alo:ahi]:
1983 1983 ui.write('-%s\n' % c, label='diff.deleted')
1984 1984 if tag in ('insert', 'replace'):
1985 1985 for c in brevs[blo:bhi]:
1986 1986 ui.write('+%s\n' % c, label='diff.inserted')
1987 1987 if tag == 'equal':
1988 1988 for c in arevs[alo:ahi]:
1989 1989 ui.write(' %s\n' % c)
1990 1990 return 1
1991 1991
1992 1992 func = revset.makematcher(tree)
1993 1993 revs = func(repo)
1994 1994 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
1995 1995 ui.write(("* set:\n"), smartset.prettyformat(revs), "\n")
1996 1996 if not opts['show_revs']:
1997 1997 return
1998 1998 for c in revs:
1999 1999 ui.write("%s\n" % c)
2000 2000
2001 2001 @command('debugsetparents', [], _('REV1 [REV2]'))
2002 2002 def debugsetparents(ui, repo, rev1, rev2=None):
2003 2003 """manually set the parents of the current working directory
2004 2004
2005 2005 This is useful for writing repository conversion tools, but should
2006 2006 be used with care. For example, neither the working directory nor the
2007 2007 dirstate is updated, so file status may be incorrect after running this
2008 2008 command.
2009 2009
2010 2010 Returns 0 on success.
2011 2011 """
2012 2012
2013 2013 r1 = scmutil.revsingle(repo, rev1).node()
2014 2014 r2 = scmutil.revsingle(repo, rev2, 'null').node()
2015 2015
2016 2016 with repo.wlock():
2017 2017 repo.setparents(r1, r2)
2018 2018
2019 2019 @command('debugsub',
2020 2020 [('r', 'rev', '',
2021 2021 _('revision to check'), _('REV'))],
2022 2022 _('[-r REV] [REV]'))
2023 2023 def debugsub(ui, repo, rev=None):
2024 2024 ctx = scmutil.revsingle(repo, rev, None)
2025 2025 for k, v in sorted(ctx.substate.items()):
2026 2026 ui.write(('path %s\n') % k)
2027 2027 ui.write((' source %s\n') % v[0])
2028 2028 ui.write((' revision %s\n') % v[1])
2029 2029
2030 2030 @command('debugsuccessorssets',
2031 2031 [],
2032 2032 _('[REV]'))
2033 2033 def debugsuccessorssets(ui, repo, *revs):
2034 2034 """show set of successors for revision
2035 2035
2036 2036 A successors set of changeset A is a consistent group of revisions that
2037 2037 succeed A. It contains non-obsolete changesets only.
2038 2038
2039 2039 In most cases a changeset A has a single successors set containing a single
2040 2040 successor (changeset A replaced by A').
2041 2041
2042 2042 A changeset that is made obsolete with no successors are called "pruned".
2043 2043 Such changesets have no successors sets at all.
2044 2044
2045 2045 A changeset that has been "split" will have a successors set containing
2046 2046 more than one successor.
2047 2047
2048 2048 A changeset that has been rewritten in multiple different ways is called
2049 2049 "divergent". Such changesets have multiple successor sets (each of which
2050 2050 may also be split, i.e. have multiple successors).
2051 2051
2052 2052 Results are displayed as follows::
2053 2053
2054 2054 <rev1>
2055 2055 <successors-1A>
2056 2056 <rev2>
2057 2057 <successors-2A>
2058 2058 <successors-2B1> <successors-2B2> <successors-2B3>
2059 2059
2060 2060 Here rev2 has two possible (i.e. divergent) successors sets. The first
2061 2061 holds one element, whereas the second holds three (i.e. the changeset has
2062 2062 been split).
2063 2063 """
2064 2064 # passed to successorssets caching computation from one call to another
2065 2065 cache = {}
2066 2066 ctx2str = str
2067 2067 node2str = short
2068 2068 if ui.debug():
2069 2069 def ctx2str(ctx):
2070 2070 return ctx.hex()
2071 2071 node2str = hex
2072 2072 for rev in scmutil.revrange(repo, revs):
2073 2073 ctx = repo[rev]
2074 2074 ui.write('%s\n'% ctx2str(ctx))
2075 2075 for succsset in obsolete.successorssets(repo, ctx.node(), cache):
2076 2076 if succsset:
2077 2077 ui.write(' ')
2078 2078 ui.write(node2str(succsset[0]))
2079 2079 for node in succsset[1:]:
2080 2080 ui.write(' ')
2081 2081 ui.write(node2str(node))
2082 2082 ui.write('\n')
2083 2083
2084 2084 @command('debugtemplate',
2085 2085 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2086 2086 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2087 2087 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2088 2088 optionalrepo=True)
2089 2089 def debugtemplate(ui, repo, tmpl, **opts):
2090 2090 """parse and apply a template
2091 2091
2092 2092 If -r/--rev is given, the template is processed as a log template and
2093 2093 applied to the given changesets. Otherwise, it is processed as a generic
2094 2094 template.
2095 2095
2096 2096 Use --verbose to print the parsed tree.
2097 2097 """
2098 2098 revs = None
2099 2099 if opts['rev']:
2100 2100 if repo is None:
2101 2101 raise error.RepoError(_('there is no Mercurial repository here '
2102 2102 '(.hg not found)'))
2103 2103 revs = scmutil.revrange(repo, opts['rev'])
2104 2104
2105 2105 props = {}
2106 2106 for d in opts['define']:
2107 2107 try:
2108 2108 k, v = (e.strip() for e in d.split('=', 1))
2109 2109 if not k or k == 'ui':
2110 2110 raise ValueError
2111 2111 props[k] = v
2112 2112 except ValueError:
2113 2113 raise error.Abort(_('malformed keyword definition: %s') % d)
2114 2114
2115 2115 if ui.verbose:
2116 2116 aliases = ui.configitems('templatealias')
2117 2117 tree = templater.parse(tmpl)
2118 2118 ui.note(templater.prettyformat(tree), '\n')
2119 2119 newtree = templater.expandaliases(tree, aliases)
2120 2120 if newtree != tree:
2121 2121 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2122 2122
2123 2123 if revs is None:
2124 k = 'debugtemplate'
2125 t = formatter.maketemplater(ui, k, tmpl)
2126 ui.write(templater.stringify(t(k, ui=ui, **props)))
2124 t = formatter.maketemplater(ui, tmpl)
2125 props['ui'] = ui
2126 ui.write(t.render(props))
2127 2127 else:
2128 2128 displayer = cmdutil.makelogtemplater(ui, repo, tmpl)
2129 2129 for r in revs:
2130 2130 displayer.show(repo[r], **props)
2131 2131 displayer.close()
2132 2132
2133 2133 @command('debugupdatecaches', [])
2134 2134 def debugupdatecaches(ui, repo, *pats, **opts):
2135 2135 """warm all known caches in the repository"""
2136 2136 with repo.wlock():
2137 2137 with repo.lock():
2138 2138 repo.updatecaches()
2139 2139
2140 2140 @command('debugupgraderepo', [
2141 2141 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2142 2142 ('', 'run', False, _('performs an upgrade')),
2143 2143 ])
2144 2144 def debugupgraderepo(ui, repo, run=False, optimize=None):
2145 2145 """upgrade a repository to use different features
2146 2146
2147 2147 If no arguments are specified, the repository is evaluated for upgrade
2148 2148 and a list of problems and potential optimizations is printed.
2149 2149
2150 2150 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2151 2151 can be influenced via additional arguments. More details will be provided
2152 2152 by the command output when run without ``--run``.
2153 2153
2154 2154 During the upgrade, the repository will be locked and no writes will be
2155 2155 allowed.
2156 2156
2157 2157 At the end of the upgrade, the repository may not be readable while new
2158 2158 repository data is swapped in. This window will be as long as it takes to
2159 2159 rename some directories inside the ``.hg`` directory. On most machines, this
2160 2160 should complete almost instantaneously and the chances of a consumer being
2161 2161 unable to access the repository should be low.
2162 2162 """
2163 2163 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2164 2164
2165 2165 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2166 2166 inferrepo=True)
2167 2167 def debugwalk(ui, repo, *pats, **opts):
2168 2168 """show how files match on given patterns"""
2169 2169 m = scmutil.match(repo[None], pats, opts)
2170 2170 ui.write(('matcher: %r\n' % m))
2171 2171 items = list(repo[None].walk(m))
2172 2172 if not items:
2173 2173 return
2174 2174 f = lambda fn: fn
2175 2175 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2176 2176 f = lambda fn: util.normpath(fn)
2177 2177 fmt = 'f %%-%ds %%-%ds %%s' % (
2178 2178 max([len(abs) for abs in items]),
2179 2179 max([len(m.rel(abs)) for abs in items]))
2180 2180 for abs in items:
2181 2181 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2182 2182 ui.write("%s\n" % line.rstrip())
2183 2183
2184 2184 @command('debugwireargs',
2185 2185 [('', 'three', '', 'three'),
2186 2186 ('', 'four', '', 'four'),
2187 2187 ('', 'five', '', 'five'),
2188 2188 ] + cmdutil.remoteopts,
2189 2189 _('REPO [OPTIONS]... [ONE [TWO]]'),
2190 2190 norepo=True)
2191 2191 def debugwireargs(ui, repopath, *vals, **opts):
2192 2192 repo = hg.peer(ui, opts, repopath)
2193 2193 for opt in cmdutil.remoteopts:
2194 2194 del opts[opt[1]]
2195 2195 args = {}
2196 2196 for k, v in opts.iteritems():
2197 2197 if v:
2198 2198 args[k] = v
2199 2199 # run twice to check that we don't mess up the stream for the next command
2200 2200 res1 = repo.debugwireargs(*vals, **args)
2201 2201 res2 = repo.debugwireargs(*vals, **args)
2202 2202 ui.write("%s\n" % res1)
2203 2203 if res1 != res2:
2204 2204 ui.warn("%s\n" % res2)
@@ -1,744 +1,744 b''
1 1 # filemerge.py - file-level merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import filecmp
11 11 import os
12 12 import re
13 13 import tempfile
14 14
15 15 from .i18n import _
16 16 from .node import nullid, short
17 17
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 formatter,
22 22 match,
23 23 pycompat,
24 24 scmutil,
25 25 simplemerge,
26 26 tagmerge,
27 27 templatekw,
28 28 templater,
29 29 util,
30 30 )
31 31
32 32 def _toolstr(ui, tool, part, default=""):
33 33 return ui.config("merge-tools", tool + "." + part, default)
34 34
35 35 def _toolbool(ui, tool, part, default=False):
36 36 return ui.configbool("merge-tools", tool + "." + part, default)
37 37
38 38 def _toollist(ui, tool, part, default=None):
39 39 if default is None:
40 40 default = []
41 41 return ui.configlist("merge-tools", tool + "." + part, default)
42 42
43 43 internals = {}
44 44 # Merge tools to document.
45 45 internalsdoc = {}
46 46
47 47 # internal tool merge types
48 48 nomerge = None
49 49 mergeonly = 'mergeonly' # just the full merge, no premerge
50 50 fullmerge = 'fullmerge' # both premerge and merge
51 51
52 52 _localchangedotherdeletedmsg = _(
53 53 "local%(l)s changed %(fd)s which other%(o)s deleted\n"
54 54 "use (c)hanged version, (d)elete, or leave (u)nresolved?"
55 55 "$$ &Changed $$ &Delete $$ &Unresolved")
56 56
57 57 _otherchangedlocaldeletedmsg = _(
58 58 "other%(o)s changed %(fd)s which local%(l)s deleted\n"
59 59 "use (c)hanged version, leave (d)eleted, or "
60 60 "leave (u)nresolved?"
61 61 "$$ &Changed $$ &Deleted $$ &Unresolved")
62 62
63 63 class absentfilectx(object):
64 64 """Represents a file that's ostensibly in a context but is actually not
65 65 present in it.
66 66
67 67 This is here because it's very specific to the filemerge code for now --
68 68 other code is likely going to break with the values this returns."""
69 69 def __init__(self, ctx, f):
70 70 self._ctx = ctx
71 71 self._f = f
72 72
73 73 def path(self):
74 74 return self._f
75 75
76 76 def size(self):
77 77 return None
78 78
79 79 def data(self):
80 80 return None
81 81
82 82 def filenode(self):
83 83 return nullid
84 84
85 85 _customcmp = True
86 86 def cmp(self, fctx):
87 87 """compare with other file context
88 88
89 89 returns True if different from fctx.
90 90 """
91 91 return not (fctx.isabsent() and
92 92 fctx.ctx() == self.ctx() and
93 93 fctx.path() == self.path())
94 94
95 95 def flags(self):
96 96 return ''
97 97
98 98 def changectx(self):
99 99 return self._ctx
100 100
101 101 def isbinary(self):
102 102 return False
103 103
104 104 def isabsent(self):
105 105 return True
106 106
107 107 def internaltool(name, mergetype, onfailure=None, precheck=None):
108 108 '''return a decorator for populating internal merge tool table'''
109 109 def decorator(func):
110 110 fullname = ':' + name
111 111 func.__doc__ = (pycompat.sysstr("``%s``\n" % fullname)
112 112 + func.__doc__.strip())
113 113 internals[fullname] = func
114 114 internals['internal:' + name] = func
115 115 internalsdoc[fullname] = func
116 116 func.mergetype = mergetype
117 117 func.onfailure = onfailure
118 118 func.precheck = precheck
119 119 return func
120 120 return decorator
121 121
122 122 def _findtool(ui, tool):
123 123 if tool in internals:
124 124 return tool
125 125 return findexternaltool(ui, tool)
126 126
127 127 def findexternaltool(ui, tool):
128 128 for kn in ("regkey", "regkeyalt"):
129 129 k = _toolstr(ui, tool, kn)
130 130 if not k:
131 131 continue
132 132 p = util.lookupreg(k, _toolstr(ui, tool, "regname"))
133 133 if p:
134 134 p = util.findexe(p + _toolstr(ui, tool, "regappend"))
135 135 if p:
136 136 return p
137 137 exe = _toolstr(ui, tool, "executable", tool)
138 138 return util.findexe(util.expandpath(exe))
139 139
140 140 def _picktool(repo, ui, path, binary, symlink, changedelete):
141 141 def supportscd(tool):
142 142 return tool in internals and internals[tool].mergetype == nomerge
143 143
144 144 def check(tool, pat, symlink, binary, changedelete):
145 145 tmsg = tool
146 146 if pat:
147 147 tmsg = _("%s (for pattern %s)") % (tool, pat)
148 148 if not _findtool(ui, tool):
149 149 if pat: # explicitly requested tool deserves a warning
150 150 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
151 151 else: # configured but non-existing tools are more silent
152 152 ui.note(_("couldn't find merge tool %s\n") % tmsg)
153 153 elif symlink and not _toolbool(ui, tool, "symlink"):
154 154 ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
155 155 elif binary and not _toolbool(ui, tool, "binary"):
156 156 ui.warn(_("tool %s can't handle binary\n") % tmsg)
157 157 elif changedelete and not supportscd(tool):
158 158 # the nomerge tools are the only tools that support change/delete
159 159 # conflicts
160 160 pass
161 161 elif not util.gui() and _toolbool(ui, tool, "gui"):
162 162 ui.warn(_("tool %s requires a GUI\n") % tmsg)
163 163 else:
164 164 return True
165 165 return False
166 166
167 167 # internal config: ui.forcemerge
168 168 # forcemerge comes from command line arguments, highest priority
169 169 force = ui.config('ui', 'forcemerge')
170 170 if force:
171 171 toolpath = _findtool(ui, force)
172 172 if changedelete and not supportscd(toolpath):
173 173 return ":prompt", None
174 174 else:
175 175 if toolpath:
176 176 return (force, util.shellquote(toolpath))
177 177 else:
178 178 # mimic HGMERGE if given tool not found
179 179 return (force, force)
180 180
181 181 # HGMERGE takes next precedence
182 182 hgmerge = encoding.environ.get("HGMERGE")
183 183 if hgmerge:
184 184 if changedelete and not supportscd(hgmerge):
185 185 return ":prompt", None
186 186 else:
187 187 return (hgmerge, hgmerge)
188 188
189 189 # then patterns
190 190 for pat, tool in ui.configitems("merge-patterns"):
191 191 mf = match.match(repo.root, '', [pat])
192 192 if mf(path) and check(tool, pat, symlink, False, changedelete):
193 193 toolpath = _findtool(ui, tool)
194 194 return (tool, util.shellquote(toolpath))
195 195
196 196 # then merge tools
197 197 tools = {}
198 198 disabled = set()
199 199 for k, v in ui.configitems("merge-tools"):
200 200 t = k.split('.')[0]
201 201 if t not in tools:
202 202 tools[t] = int(_toolstr(ui, t, "priority", "0"))
203 203 if _toolbool(ui, t, "disabled", False):
204 204 disabled.add(t)
205 205 names = tools.keys()
206 206 tools = sorted([(-p, tool) for tool, p in tools.items()
207 207 if tool not in disabled])
208 208 uimerge = ui.config("ui", "merge")
209 209 if uimerge:
210 210 # external tools defined in uimerge won't be able to handle
211 211 # change/delete conflicts
212 212 if uimerge not in names and not changedelete:
213 213 return (uimerge, uimerge)
214 214 tools.insert(0, (None, uimerge)) # highest priority
215 215 tools.append((None, "hgmerge")) # the old default, if found
216 216 for p, t in tools:
217 217 if check(t, None, symlink, binary, changedelete):
218 218 toolpath = _findtool(ui, t)
219 219 return (t, util.shellquote(toolpath))
220 220
221 221 # internal merge or prompt as last resort
222 222 if symlink or binary or changedelete:
223 223 if not changedelete and len(tools):
224 224 # any tool is rejected by capability for symlink or binary
225 225 ui.warn(_("no tool found to merge %s\n") % path)
226 226 return ":prompt", None
227 227 return ":merge", None
228 228
229 229 def _eoltype(data):
230 230 "Guess the EOL type of a file"
231 231 if '\0' in data: # binary
232 232 return None
233 233 if '\r\n' in data: # Windows
234 234 return '\r\n'
235 235 if '\r' in data: # Old Mac
236 236 return '\r'
237 237 if '\n' in data: # UNIX
238 238 return '\n'
239 239 return None # unknown
240 240
241 241 def _matcheol(file, origfile):
242 242 "Convert EOL markers in a file to match origfile"
243 243 tostyle = _eoltype(util.readfile(origfile))
244 244 if tostyle:
245 245 data = util.readfile(file)
246 246 style = _eoltype(data)
247 247 if style:
248 248 newdata = data.replace(style, tostyle)
249 249 if newdata != data:
250 250 util.writefile(file, newdata)
251 251
252 252 @internaltool('prompt', nomerge)
253 253 def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
254 254 """Asks the user which of the local `p1()` or the other `p2()` version to
255 255 keep as the merged version."""
256 256 ui = repo.ui
257 257 fd = fcd.path()
258 258
259 259 prompts = partextras(labels)
260 260 prompts['fd'] = fd
261 261 try:
262 262 if fco.isabsent():
263 263 index = ui.promptchoice(
264 264 _localchangedotherdeletedmsg % prompts, 2)
265 265 choice = ['local', 'other', 'unresolved'][index]
266 266 elif fcd.isabsent():
267 267 index = ui.promptchoice(
268 268 _otherchangedlocaldeletedmsg % prompts, 2)
269 269 choice = ['other', 'local', 'unresolved'][index]
270 270 else:
271 271 index = ui.promptchoice(
272 272 _("keep (l)ocal%(l)s, take (o)ther%(o)s, or leave (u)nresolved"
273 273 " for %(fd)s?"
274 274 "$$ &Local $$ &Other $$ &Unresolved") % prompts, 2)
275 275 choice = ['local', 'other', 'unresolved'][index]
276 276
277 277 if choice == 'other':
278 278 return _iother(repo, mynode, orig, fcd, fco, fca, toolconf,
279 279 labels)
280 280 elif choice == 'local':
281 281 return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf,
282 282 labels)
283 283 elif choice == 'unresolved':
284 284 return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf,
285 285 labels)
286 286 except error.ResponseExpected:
287 287 ui.write("\n")
288 288 return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf,
289 289 labels)
290 290
291 291 @internaltool('local', nomerge)
292 292 def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
293 293 """Uses the local `p1()` version of files as the merged version."""
294 294 return 0, fcd.isabsent()
295 295
296 296 @internaltool('other', nomerge)
297 297 def _iother(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
298 298 """Uses the other `p2()` version of files as the merged version."""
299 299 if fco.isabsent():
300 300 # local changed, remote deleted -- 'deleted' picked
301 301 repo.wvfs.unlinkpath(fcd.path())
302 302 deleted = True
303 303 else:
304 304 repo.wwrite(fcd.path(), fco.data(), fco.flags())
305 305 deleted = False
306 306 return 0, deleted
307 307
308 308 @internaltool('fail', nomerge)
309 309 def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
310 310 """
311 311 Rather than attempting to merge files that were modified on both
312 312 branches, it marks them as unresolved. The resolve command must be
313 313 used to resolve these conflicts."""
314 314 # for change/delete conflicts write out the changed version, then fail
315 315 if fcd.isabsent():
316 316 repo.wwrite(fcd.path(), fco.data(), fco.flags())
317 317 return 1, False
318 318
319 319 def _premerge(repo, fcd, fco, fca, toolconf, files, labels=None):
320 320 tool, toolpath, binary, symlink = toolconf
321 321 if symlink or fcd.isabsent() or fco.isabsent():
322 322 return 1
323 323 a, b, c, back = files
324 324
325 325 ui = repo.ui
326 326
327 327 validkeep = ['keep', 'keep-merge3']
328 328
329 329 # do we attempt to simplemerge first?
330 330 try:
331 331 premerge = _toolbool(ui, tool, "premerge", not binary)
332 332 except error.ConfigError:
333 333 premerge = _toolstr(ui, tool, "premerge").lower()
334 334 if premerge not in validkeep:
335 335 _valid = ', '.join(["'" + v + "'" for v in validkeep])
336 336 raise error.ConfigError(_("%s.premerge not valid "
337 337 "('%s' is neither boolean nor %s)") %
338 338 (tool, premerge, _valid))
339 339
340 340 if premerge:
341 341 if premerge == 'keep-merge3':
342 342 if not labels:
343 343 labels = _defaultconflictlabels
344 344 if len(labels) < 3:
345 345 labels.append('base')
346 346 r = simplemerge.simplemerge(ui, a, b, c, quiet=True, label=labels)
347 347 if not r:
348 348 ui.debug(" premerge successful\n")
349 349 return 0
350 350 if premerge not in validkeep:
351 351 util.copyfile(back, a) # restore from backup and try again
352 352 return 1 # continue merging
353 353
354 354 def _mergecheck(repo, mynode, orig, fcd, fco, fca, toolconf):
355 355 tool, toolpath, binary, symlink = toolconf
356 356 if symlink:
357 357 repo.ui.warn(_('warning: internal %s cannot merge symlinks '
358 358 'for %s\n') % (tool, fcd.path()))
359 359 return False
360 360 if fcd.isabsent() or fco.isabsent():
361 361 repo.ui.warn(_('warning: internal %s cannot merge change/delete '
362 362 'conflict for %s\n') % (tool, fcd.path()))
363 363 return False
364 364 return True
365 365
366 366 def _merge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, mode):
367 367 """
368 368 Uses the internal non-interactive simple merge algorithm for merging
369 369 files. It will fail if there are any conflicts and leave markers in
370 370 the partially merged file. Markers will have two sections, one for each side
371 371 of merge, unless mode equals 'union' which suppresses the markers."""
372 372 a, b, c, back = files
373 373
374 374 ui = repo.ui
375 375
376 376 r = simplemerge.simplemerge(ui, a, b, c, label=labels, mode=mode)
377 377 return True, r, False
378 378
379 379 @internaltool('union', fullmerge,
380 380 _("warning: conflicts while merging %s! "
381 381 "(edit, then use 'hg resolve --mark')\n"),
382 382 precheck=_mergecheck)
383 383 def _iunion(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
384 384 """
385 385 Uses the internal non-interactive simple merge algorithm for merging
386 386 files. It will use both left and right sides for conflict regions.
387 387 No markers are inserted."""
388 388 return _merge(repo, mynode, orig, fcd, fco, fca, toolconf,
389 389 files, labels, 'union')
390 390
391 391 @internaltool('merge', fullmerge,
392 392 _("warning: conflicts while merging %s! "
393 393 "(edit, then use 'hg resolve --mark')\n"),
394 394 precheck=_mergecheck)
395 395 def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
396 396 """
397 397 Uses the internal non-interactive simple merge algorithm for merging
398 398 files. It will fail if there are any conflicts and leave markers in
399 399 the partially merged file. Markers will have two sections, one for each side
400 400 of merge."""
401 401 return _merge(repo, mynode, orig, fcd, fco, fca, toolconf,
402 402 files, labels, 'merge')
403 403
404 404 @internaltool('merge3', fullmerge,
405 405 _("warning: conflicts while merging %s! "
406 406 "(edit, then use 'hg resolve --mark')\n"),
407 407 precheck=_mergecheck)
408 408 def _imerge3(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
409 409 """
410 410 Uses the internal non-interactive simple merge algorithm for merging
411 411 files. It will fail if there are any conflicts and leave markers in
412 412 the partially merged file. Marker will have three sections, one from each
413 413 side of the merge and one for the base content."""
414 414 if not labels:
415 415 labels = _defaultconflictlabels
416 416 if len(labels) < 3:
417 417 labels.append('base')
418 418 return _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels)
419 419
420 420 def _imergeauto(repo, mynode, orig, fcd, fco, fca, toolconf, files,
421 421 labels=None, localorother=None):
422 422 """
423 423 Generic driver for _imergelocal and _imergeother
424 424 """
425 425 assert localorother is not None
426 426 tool, toolpath, binary, symlink = toolconf
427 427 a, b, c, back = files
428 428 r = simplemerge.simplemerge(repo.ui, a, b, c, label=labels,
429 429 localorother=localorother)
430 430 return True, r
431 431
432 432 @internaltool('merge-local', mergeonly, precheck=_mergecheck)
433 433 def _imergelocal(*args, **kwargs):
434 434 """
435 435 Like :merge, but resolve all conflicts non-interactively in favor
436 436 of the local `p1()` changes."""
437 437 success, status = _imergeauto(localorother='local', *args, **kwargs)
438 438 return success, status, False
439 439
440 440 @internaltool('merge-other', mergeonly, precheck=_mergecheck)
441 441 def _imergeother(*args, **kwargs):
442 442 """
443 443 Like :merge, but resolve all conflicts non-interactively in favor
444 444 of the other `p2()` changes."""
445 445 success, status = _imergeauto(localorother='other', *args, **kwargs)
446 446 return success, status, False
447 447
448 448 @internaltool('tagmerge', mergeonly,
449 449 _("automatic tag merging of %s failed! "
450 450 "(use 'hg resolve --tool :merge' or another merge "
451 451 "tool of your choice)\n"))
452 452 def _itagmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
453 453 """
454 454 Uses the internal tag merge algorithm (experimental).
455 455 """
456 456 success, status = tagmerge.merge(repo, fcd, fco, fca)
457 457 return success, status, False
458 458
459 459 @internaltool('dump', fullmerge)
460 460 def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
461 461 """
462 462 Creates three versions of the files to merge, containing the
463 463 contents of local, other and base. These files can then be used to
464 464 perform a merge manually. If the file to be merged is named
465 465 ``a.txt``, these files will accordingly be named ``a.txt.local``,
466 466 ``a.txt.other`` and ``a.txt.base`` and they will be placed in the
467 467 same directory as ``a.txt``.
468 468
469 469 This implies permerge. Therefore, files aren't dumped, if premerge
470 470 runs successfully. Use :forcedump to forcibly write files out.
471 471 """
472 472 a, b, c, back = files
473 473
474 474 fd = fcd.path()
475 475
476 476 util.copyfile(a, a + ".local")
477 477 repo.wwrite(fd + ".other", fco.data(), fco.flags())
478 478 repo.wwrite(fd + ".base", fca.data(), fca.flags())
479 479 return False, 1, False
480 480
481 481 @internaltool('forcedump', mergeonly)
482 482 def _forcedump(repo, mynode, orig, fcd, fco, fca, toolconf, files,
483 483 labels=None):
484 484 """
485 485 Creates three versions of the files as same as :dump, but omits premerge.
486 486 """
487 487 return _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files,
488 488 labels=labels)
489 489
490 490 def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
491 491 tool, toolpath, binary, symlink = toolconf
492 492 if fcd.isabsent() or fco.isabsent():
493 493 repo.ui.warn(_('warning: %s cannot merge change/delete conflict '
494 494 'for %s\n') % (tool, fcd.path()))
495 495 return False, 1, None
496 496 a, b, c, back = files
497 497 out = ""
498 498 env = {'HG_FILE': fcd.path(),
499 499 'HG_MY_NODE': short(mynode),
500 500 'HG_OTHER_NODE': str(fco.changectx()),
501 501 'HG_BASE_NODE': str(fca.changectx()),
502 502 'HG_MY_ISLINK': 'l' in fcd.flags(),
503 503 'HG_OTHER_ISLINK': 'l' in fco.flags(),
504 504 'HG_BASE_ISLINK': 'l' in fca.flags(),
505 505 }
506 506
507 507 ui = repo.ui
508 508
509 509 args = _toolstr(ui, tool, "args", '$local $base $other')
510 510 if "$output" in args:
511 511 out, a = a, back # read input from backup, write to original
512 512 replace = {'local': a, 'base': b, 'other': c, 'output': out}
513 513 args = util.interpolate(r'\$', replace, args,
514 514 lambda s: util.shellquote(util.localpath(s)))
515 515 cmd = toolpath + ' ' + args
516 516 if _toolbool(ui, tool, "gui"):
517 517 repo.ui.status(_('running merge tool %s for file %s\n') %
518 518 (tool, fcd.path()))
519 519 repo.ui.debug('launching merge tool: %s\n' % cmd)
520 520 r = ui.system(cmd, cwd=repo.root, environ=env, blockedtag='mergetool')
521 521 repo.ui.debug('merge tool returned: %s\n' % r)
522 522 return True, r, False
523 523
524 524 def _formatconflictmarker(repo, ctx, template, label, pad):
525 525 """Applies the given template to the ctx, prefixed by the label.
526 526
527 527 Pad is the minimum width of the label prefix, so that multiple markers
528 528 can have aligned templated parts.
529 529 """
530 530 if ctx.node() is None:
531 531 ctx = ctx.p1()
532 532
533 533 props = templatekw.keywords.copy()
534 534 props['templ'] = template
535 535 props['ctx'] = ctx
536 536 props['repo'] = repo
537 templateresult = template('conflictmarker', **props)
537 templateresult = template.render(props)
538 538
539 539 label = ('%s:' % label).ljust(pad + 1)
540 mark = '%s %s' % (label, templater.stringify(templateresult))
540 mark = '%s %s' % (label, templateresult)
541 541
542 542 if mark:
543 543 mark = mark.splitlines()[0] # split for safety
544 544
545 545 # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
546 546 return util.ellipsis(mark, 80 - 8)
547 547
548 548 _defaultconflictmarker = ('{node|short} '
549 549 '{ifeq(tags, "tip", "", '
550 550 'ifeq(tags, "", "", "{tags} "))}'
551 551 '{if(bookmarks, "{bookmarks} ")}'
552 552 '{ifeq(branch, "default", "", "{branch} ")}'
553 553 '- {author|user}: {desc|firstline}')
554 554
555 555 _defaultconflictlabels = ['local', 'other']
556 556
557 557 def _formatlabels(repo, fcd, fco, fca, labels):
558 558 """Formats the given labels using the conflict marker template.
559 559
560 560 Returns a list of formatted labels.
561 561 """
562 562 cd = fcd.changectx()
563 563 co = fco.changectx()
564 564 ca = fca.changectx()
565 565
566 566 ui = repo.ui
567 567 template = ui.config('ui', 'mergemarkertemplate', _defaultconflictmarker)
568 568 template = templater.unquotestring(template)
569 tmpl = formatter.maketemplater(ui, 'conflictmarker', template)
569 tmpl = formatter.maketemplater(ui, template)
570 570
571 571 pad = max(len(l) for l in labels)
572 572
573 573 newlabels = [_formatconflictmarker(repo, cd, tmpl, labels[0], pad),
574 574 _formatconflictmarker(repo, co, tmpl, labels[1], pad)]
575 575 if len(labels) > 2:
576 576 newlabels.append(_formatconflictmarker(repo, ca, tmpl, labels[2], pad))
577 577 return newlabels
578 578
579 579 def partextras(labels):
580 580 """Return a dictionary of extra labels for use in prompts to the user
581 581
582 582 Intended use is in strings of the form "(l)ocal%(l)s".
583 583 """
584 584 if labels is None:
585 585 return {
586 586 "l": "",
587 587 "o": "",
588 588 }
589 589
590 590 return {
591 591 "l": " [%s]" % labels[0],
592 592 "o": " [%s]" % labels[1],
593 593 }
594 594
595 595 def _filemerge(premerge, repo, mynode, orig, fcd, fco, fca, labels=None):
596 596 """perform a 3-way merge in the working directory
597 597
598 598 premerge = whether this is a premerge
599 599 mynode = parent node before merge
600 600 orig = original local filename before merge
601 601 fco = other file context
602 602 fca = ancestor file context
603 603 fcd = local file context for current/destination file
604 604
605 605 Returns whether the merge is complete, the return value of the merge, and
606 606 a boolean indicating whether the file was deleted from disk."""
607 607
608 608 def temp(prefix, ctx):
609 609 fullbase, ext = os.path.splitext(ctx.path())
610 610 pre = "%s~%s." % (os.path.basename(fullbase), prefix)
611 611 (fd, name) = tempfile.mkstemp(prefix=pre, suffix=ext)
612 612 data = repo.wwritedata(ctx.path(), ctx.data())
613 613 f = os.fdopen(fd, pycompat.sysstr("wb"))
614 614 f.write(data)
615 615 f.close()
616 616 return name
617 617
618 618 if not fco.cmp(fcd): # files identical?
619 619 return True, None, False
620 620
621 621 ui = repo.ui
622 622 fd = fcd.path()
623 623 binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
624 624 symlink = 'l' in fcd.flags() + fco.flags()
625 625 changedelete = fcd.isabsent() or fco.isabsent()
626 626 tool, toolpath = _picktool(repo, ui, fd, binary, symlink, changedelete)
627 627 if tool in internals and tool.startswith('internal:'):
628 628 # normalize to new-style names (':merge' etc)
629 629 tool = tool[len('internal'):]
630 630 ui.debug("picked tool '%s' for %s (binary %s symlink %s changedelete %s)\n"
631 631 % (tool, fd, pycompat.bytestr(binary), pycompat.bytestr(symlink),
632 632 pycompat.bytestr(changedelete)))
633 633
634 634 if tool in internals:
635 635 func = internals[tool]
636 636 mergetype = func.mergetype
637 637 onfailure = func.onfailure
638 638 precheck = func.precheck
639 639 else:
640 640 func = _xmerge
641 641 mergetype = fullmerge
642 642 onfailure = _("merging %s failed!\n")
643 643 precheck = None
644 644
645 645 toolconf = tool, toolpath, binary, symlink
646 646
647 647 if mergetype == nomerge:
648 648 r, deleted = func(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
649 649 return True, r, deleted
650 650
651 651 if premerge:
652 652 if orig != fco.path():
653 653 ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
654 654 else:
655 655 ui.status(_("merging %s\n") % fd)
656 656
657 657 ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
658 658
659 659 if precheck and not precheck(repo, mynode, orig, fcd, fco, fca,
660 660 toolconf):
661 661 if onfailure:
662 662 ui.warn(onfailure % fd)
663 663 return True, 1, False
664 664
665 665 a = repo.wjoin(fd)
666 666 b = temp("base", fca)
667 667 c = temp("other", fco)
668 668 if not fcd.isabsent():
669 669 back = scmutil.origpath(ui, repo, a)
670 670 if premerge:
671 671 util.copyfile(a, back)
672 672 else:
673 673 back = None
674 674 files = (a, b, c, back)
675 675
676 676 r = 1
677 677 try:
678 678 markerstyle = ui.config('ui', 'mergemarkers', 'basic')
679 679 if not labels:
680 680 labels = _defaultconflictlabels
681 681 if markerstyle != 'basic':
682 682 labels = _formatlabels(repo, fcd, fco, fca, labels)
683 683
684 684 if premerge and mergetype == fullmerge:
685 685 r = _premerge(repo, fcd, fco, fca, toolconf, files, labels=labels)
686 686 # complete if premerge successful (r is 0)
687 687 return not r, r, False
688 688
689 689 needcheck, r, deleted = func(repo, mynode, orig, fcd, fco, fca,
690 690 toolconf, files, labels=labels)
691 691
692 692 if needcheck:
693 693 r = _check(r, ui, tool, fcd, files)
694 694
695 695 if r:
696 696 if onfailure:
697 697 ui.warn(onfailure % fd)
698 698
699 699 return True, r, deleted
700 700 finally:
701 701 if not r and back is not None:
702 702 util.unlink(back)
703 703 util.unlink(b)
704 704 util.unlink(c)
705 705
706 706 def _check(r, ui, tool, fcd, files):
707 707 fd = fcd.path()
708 708 a, b, c, back = files
709 709
710 710 if not r and (_toolbool(ui, tool, "checkconflicts") or
711 711 'conflicts' in _toollist(ui, tool, "check")):
712 712 if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
713 713 re.MULTILINE):
714 714 r = 1
715 715
716 716 checked = False
717 717 if 'prompt' in _toollist(ui, tool, "check"):
718 718 checked = True
719 719 if ui.promptchoice(_("was merge of '%s' successful (yn)?"
720 720 "$$ &Yes $$ &No") % fd, 1):
721 721 r = 1
722 722
723 723 if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
724 724 'changed' in
725 725 _toollist(ui, tool, "check")):
726 726 if back is not None and filecmp.cmp(a, back):
727 727 if ui.promptchoice(_(" output file %s appears unchanged\n"
728 728 "was merge successful (yn)?"
729 729 "$$ &Yes $$ &No") % fd, 1):
730 730 r = 1
731 731
732 732 if back is not None and _toolbool(ui, tool, "fixeol"):
733 733 _matcheol(a, back)
734 734
735 735 return r
736 736
737 737 def premerge(repo, mynode, orig, fcd, fco, fca, labels=None):
738 738 return _filemerge(True, repo, mynode, orig, fcd, fco, fca, labels=labels)
739 739
740 740 def filemerge(repo, mynode, orig, fcd, fco, fca, labels=None):
741 741 return _filemerge(False, repo, mynode, orig, fcd, fco, fca, labels=labels)
742 742
743 743 # tell hggettext to extract docstrings from these functions:
744 744 i18nfunctions = internals.values()
@@ -1,486 +1,489 b''
1 1 # formatter.py - generic output formatting for mercurial
2 2 #
3 3 # Copyright 2012 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Generic output formatting for Mercurial
9 9
10 10 The formatter provides API to show data in various ways. The following
11 11 functions should be used in place of ui.write():
12 12
13 13 - fm.write() for unconditional output
14 14 - fm.condwrite() to show some extra data conditionally in plain output
15 15 - fm.context() to provide changectx to template output
16 16 - fm.data() to provide extra data to JSON or template output
17 17 - fm.plain() to show raw text that isn't provided to JSON or template output
18 18
19 19 To show structured data (e.g. date tuples, dicts, lists), apply fm.format*()
20 20 beforehand so the data is converted to the appropriate data type. Use
21 21 fm.isplain() if you need to convert or format data conditionally which isn't
22 22 supported by the formatter API.
23 23
24 24 To build nested structure (i.e. a list of dicts), use fm.nested().
25 25
26 26 See also https://www.mercurial-scm.org/wiki/GenericTemplatingPlan
27 27
28 28 fm.condwrite() vs 'if cond:':
29 29
30 30 In most cases, use fm.condwrite() so users can selectively show the data
31 31 in template output. If it's costly to build data, use plain 'if cond:' with
32 32 fm.write().
33 33
34 34 fm.nested() vs fm.formatdict() (or fm.formatlist()):
35 35
36 36 fm.nested() should be used to form a tree structure (a list of dicts of
37 37 lists of dicts...) which can be accessed through template keywords, e.g.
38 38 "{foo % "{bar % {...}} {baz % {...}}"}". On the other hand, fm.formatdict()
39 39 exports a dict-type object to template, which can be accessed by e.g.
40 40 "{get(foo, key)}" function.
41 41
42 42 Doctest helper:
43 43
44 44 >>> def show(fn, verbose=False, **opts):
45 45 ... import sys
46 46 ... from . import ui as uimod
47 47 ... ui = uimod.ui()
48 48 ... ui.fout = sys.stdout # redirect to doctest
49 49 ... ui.verbose = verbose
50 50 ... return fn(ui, ui.formatter(fn.__name__, opts))
51 51
52 52 Basic example:
53 53
54 54 >>> def files(ui, fm):
55 55 ... files = [('foo', 123, (0, 0)), ('bar', 456, (1, 0))]
56 56 ... for f in files:
57 57 ... fm.startitem()
58 58 ... fm.write('path', '%s', f[0])
59 59 ... fm.condwrite(ui.verbose, 'date', ' %s',
60 60 ... fm.formatdate(f[2], '%Y-%m-%d %H:%M:%S'))
61 61 ... fm.data(size=f[1])
62 62 ... fm.plain('\\n')
63 63 ... fm.end()
64 64 >>> show(files)
65 65 foo
66 66 bar
67 67 >>> show(files, verbose=True)
68 68 foo 1970-01-01 00:00:00
69 69 bar 1970-01-01 00:00:01
70 70 >>> show(files, template='json')
71 71 [
72 72 {
73 73 "date": [0, 0],
74 74 "path": "foo",
75 75 "size": 123
76 76 },
77 77 {
78 78 "date": [1, 0],
79 79 "path": "bar",
80 80 "size": 456
81 81 }
82 82 ]
83 83 >>> show(files, template='path: {path}\\ndate: {date|rfc3339date}\\n')
84 84 path: foo
85 85 date: 1970-01-01T00:00:00+00:00
86 86 path: bar
87 87 date: 1970-01-01T00:00:01+00:00
88 88
89 89 Nested example:
90 90
91 91 >>> def subrepos(ui, fm):
92 92 ... fm.startitem()
93 93 ... fm.write('repo', '[%s]\\n', 'baz')
94 94 ... files(ui, fm.nested('files'))
95 95 ... fm.end()
96 96 >>> show(subrepos)
97 97 [baz]
98 98 foo
99 99 bar
100 100 >>> show(subrepos, template='{repo}: {join(files % "{path}", ", ")}\\n')
101 101 baz: foo, bar
102 102 """
103 103
104 104 from __future__ import absolute_import
105 105
106 106 import collections
107 107 import contextlib
108 108 import itertools
109 109 import os
110 110
111 111 from .i18n import _
112 112 from .node import (
113 113 hex,
114 114 short,
115 115 )
116 116
117 117 from . import (
118 118 error,
119 119 pycompat,
120 120 templatefilters,
121 121 templatekw,
122 122 templater,
123 123 util,
124 124 )
125 125
126 126 pickle = util.pickle
127 127
128 128 class _nullconverter(object):
129 129 '''convert non-primitive data types to be processed by formatter'''
130 130 @staticmethod
131 131 def formatdate(date, fmt):
132 132 '''convert date tuple to appropriate format'''
133 133 return date
134 134 @staticmethod
135 135 def formatdict(data, key, value, fmt, sep):
136 136 '''convert dict or key-value pairs to appropriate dict format'''
137 137 # use plain dict instead of util.sortdict so that data can be
138 138 # serialized as a builtin dict in pickle output
139 139 return dict(data)
140 140 @staticmethod
141 141 def formatlist(data, name, fmt, sep):
142 142 '''convert iterable to appropriate list format'''
143 143 return list(data)
144 144
145 145 class baseformatter(object):
146 146 def __init__(self, ui, topic, opts, converter):
147 147 self._ui = ui
148 148 self._topic = topic
149 149 self._style = opts.get("style")
150 150 self._template = opts.get("template")
151 151 self._converter = converter
152 152 self._item = None
153 153 # function to convert node to string suitable for this output
154 154 self.hexfunc = hex
155 155 def __enter__(self):
156 156 return self
157 157 def __exit__(self, exctype, excvalue, traceback):
158 158 if exctype is None:
159 159 self.end()
160 160 def _showitem(self):
161 161 '''show a formatted item once all data is collected'''
162 162 pass
163 163 def startitem(self):
164 164 '''begin an item in the format list'''
165 165 if self._item is not None:
166 166 self._showitem()
167 167 self._item = {}
168 168 def formatdate(self, date, fmt='%a %b %d %H:%M:%S %Y %1%2'):
169 169 '''convert date tuple to appropriate format'''
170 170 return self._converter.formatdate(date, fmt)
171 171 def formatdict(self, data, key='key', value='value', fmt='%s=%s', sep=' '):
172 172 '''convert dict or key-value pairs to appropriate dict format'''
173 173 return self._converter.formatdict(data, key, value, fmt, sep)
174 174 def formatlist(self, data, name, fmt='%s', sep=' '):
175 175 '''convert iterable to appropriate list format'''
176 176 # name is mandatory argument for now, but it could be optional if
177 177 # we have default template keyword, e.g. {item}
178 178 return self._converter.formatlist(data, name, fmt, sep)
179 179 def context(self, **ctxs):
180 180 '''insert context objects to be used to render template keywords'''
181 181 pass
182 182 def data(self, **data):
183 183 '''insert data into item that's not shown in default output'''
184 184 data = pycompat.byteskwargs(data)
185 185 self._item.update(data)
186 186 def write(self, fields, deftext, *fielddata, **opts):
187 187 '''do default text output while assigning data to item'''
188 188 fieldkeys = fields.split()
189 189 assert len(fieldkeys) == len(fielddata)
190 190 self._item.update(zip(fieldkeys, fielddata))
191 191 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
192 192 '''do conditional write (primarily for plain formatter)'''
193 193 fieldkeys = fields.split()
194 194 assert len(fieldkeys) == len(fielddata)
195 195 self._item.update(zip(fieldkeys, fielddata))
196 196 def plain(self, text, **opts):
197 197 '''show raw text for non-templated mode'''
198 198 pass
199 199 def isplain(self):
200 200 '''check for plain formatter usage'''
201 201 return False
202 202 def nested(self, field):
203 203 '''sub formatter to store nested data in the specified field'''
204 204 self._item[field] = data = []
205 205 return _nestedformatter(self._ui, self._converter, data)
206 206 def end(self):
207 207 '''end output for the formatter'''
208 208 if self._item is not None:
209 209 self._showitem()
210 210
211 211 def nullformatter(ui, topic):
212 212 '''formatter that prints nothing'''
213 213 return baseformatter(ui, topic, opts={}, converter=_nullconverter)
214 214
215 215 class _nestedformatter(baseformatter):
216 216 '''build sub items and store them in the parent formatter'''
217 217 def __init__(self, ui, converter, data):
218 218 baseformatter.__init__(self, ui, topic='', opts={}, converter=converter)
219 219 self._data = data
220 220 def _showitem(self):
221 221 self._data.append(self._item)
222 222
223 223 def _iteritems(data):
224 224 '''iterate key-value pairs in stable order'''
225 225 if isinstance(data, dict):
226 226 return sorted(data.iteritems())
227 227 return data
228 228
229 229 class _plainconverter(object):
230 230 '''convert non-primitive data types to text'''
231 231 @staticmethod
232 232 def formatdate(date, fmt):
233 233 '''stringify date tuple in the given format'''
234 234 return util.datestr(date, fmt)
235 235 @staticmethod
236 236 def formatdict(data, key, value, fmt, sep):
237 237 '''stringify key-value pairs separated by sep'''
238 238 return sep.join(fmt % (k, v) for k, v in _iteritems(data))
239 239 @staticmethod
240 240 def formatlist(data, name, fmt, sep):
241 241 '''stringify iterable separated by sep'''
242 242 return sep.join(fmt % e for e in data)
243 243
244 244 class plainformatter(baseformatter):
245 245 '''the default text output scheme'''
246 246 def __init__(self, ui, out, topic, opts):
247 247 baseformatter.__init__(self, ui, topic, opts, _plainconverter)
248 248 if ui.debugflag:
249 249 self.hexfunc = hex
250 250 else:
251 251 self.hexfunc = short
252 252 if ui is out:
253 253 self._write = ui.write
254 254 else:
255 255 self._write = lambda s, **opts: out.write(s)
256 256 def startitem(self):
257 257 pass
258 258 def data(self, **data):
259 259 pass
260 260 def write(self, fields, deftext, *fielddata, **opts):
261 261 self._write(deftext % fielddata, **opts)
262 262 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
263 263 '''do conditional write'''
264 264 if cond:
265 265 self._write(deftext % fielddata, **opts)
266 266 def plain(self, text, **opts):
267 267 self._write(text, **opts)
268 268 def isplain(self):
269 269 return True
270 270 def nested(self, field):
271 271 # nested data will be directly written to ui
272 272 return self
273 273 def end(self):
274 274 pass
275 275
276 276 class debugformatter(baseformatter):
277 277 def __init__(self, ui, out, topic, opts):
278 278 baseformatter.__init__(self, ui, topic, opts, _nullconverter)
279 279 self._out = out
280 280 self._out.write("%s = [\n" % self._topic)
281 281 def _showitem(self):
282 282 self._out.write(" " + repr(self._item) + ",\n")
283 283 def end(self):
284 284 baseformatter.end(self)
285 285 self._out.write("]\n")
286 286
287 287 class pickleformatter(baseformatter):
288 288 def __init__(self, ui, out, topic, opts):
289 289 baseformatter.__init__(self, ui, topic, opts, _nullconverter)
290 290 self._out = out
291 291 self._data = []
292 292 def _showitem(self):
293 293 self._data.append(self._item)
294 294 def end(self):
295 295 baseformatter.end(self)
296 296 self._out.write(pickle.dumps(self._data))
297 297
298 298 class jsonformatter(baseformatter):
299 299 def __init__(self, ui, out, topic, opts):
300 300 baseformatter.__init__(self, ui, topic, opts, _nullconverter)
301 301 self._out = out
302 302 self._out.write("[")
303 303 self._first = True
304 304 def _showitem(self):
305 305 if self._first:
306 306 self._first = False
307 307 else:
308 308 self._out.write(",")
309 309
310 310 self._out.write("\n {\n")
311 311 first = True
312 312 for k, v in sorted(self._item.items()):
313 313 if first:
314 314 first = False
315 315 else:
316 316 self._out.write(",\n")
317 317 u = templatefilters.json(v, paranoid=False)
318 318 self._out.write(' "%s": %s' % (k, u))
319 319 self._out.write("\n }")
320 320 def end(self):
321 321 baseformatter.end(self)
322 322 self._out.write("\n]\n")
323 323
324 324 class _templateconverter(object):
325 325 '''convert non-primitive data types to be processed by templater'''
326 326 @staticmethod
327 327 def formatdate(date, fmt):
328 328 '''return date tuple'''
329 329 return date
330 330 @staticmethod
331 331 def formatdict(data, key, value, fmt, sep):
332 332 '''build object that can be evaluated as either plain string or dict'''
333 333 data = util.sortdict(_iteritems(data))
334 334 def f():
335 335 yield _plainconverter.formatdict(data, key, value, fmt, sep)
336 336 return templatekw.hybriddict(data, key=key, value=value, fmt=fmt,
337 337 gen=f())
338 338 @staticmethod
339 339 def formatlist(data, name, fmt, sep):
340 340 '''build object that can be evaluated as either plain string or list'''
341 341 data = list(data)
342 342 def f():
343 343 yield _plainconverter.formatlist(data, name, fmt, sep)
344 344 return templatekw.hybridlist(data, name=name, fmt=fmt, gen=f())
345 345
346 346 class templateformatter(baseformatter):
347 347 def __init__(self, ui, out, topic, opts):
348 348 baseformatter.__init__(self, ui, topic, opts, _templateconverter)
349 349 self._out = out
350 350 spec = lookuptemplate(ui, topic, opts.get('template', ''))
351 351 self._tref = spec.ref
352 352 self._t = loadtemplater(ui, spec, cache=templatekw.defaulttempl)
353 353 self._counter = itertools.count()
354 354 self._cache = {} # for templatekw/funcs to store reusable data
355 355 def context(self, **ctxs):
356 356 '''insert context objects to be used to render template keywords'''
357 357 assert all(k == 'ctx' for k in ctxs)
358 358 self._item.update(ctxs)
359 359 def _showitem(self):
360 360 # TODO: add support for filectx. probably each template keyword or
361 361 # function will have to declare dependent resources. e.g.
362 362 # @templatekeyword(..., requires=('ctx',))
363 363 props = {}
364 364 if 'ctx' in self._item:
365 365 props.update(templatekw.keywords)
366 366 props['index'] = next(self._counter)
367 367 # explicitly-defined fields precede templatekw
368 368 props.update(self._item)
369 369 if 'ctx' in self._item:
370 370 # but template resources must be always available
371 371 props['templ'] = self._t
372 372 props['repo'] = props['ctx'].repo()
373 373 props['revcache'] = {}
374 374 g = self._t(self._tref, ui=self._ui, cache=self._cache, **props)
375 375 self._out.write(templater.stringify(g))
376 376
377 377 templatespec = collections.namedtuple(r'templatespec',
378 378 r'ref tmpl mapfile')
379 379
380 380 def lookuptemplate(ui, topic, tmpl):
381 381 """Find the template matching the given -T/--template spec 'tmpl'
382 382
383 383 'tmpl' can be any of the following:
384 384
385 385 - a literal template (e.g. '{rev}')
386 386 - a map-file name or path (e.g. 'changelog')
387 387 - a reference to [templates] in config file
388 388 - a path to raw template file
389 389
390 390 A map file defines a stand-alone template environment. If a map file
391 391 selected, all templates defined in the file will be loaded, and the
392 392 template matching the given topic will be rendered. No aliases will be
393 393 loaded from user config.
394 394 """
395 395
396 396 # looks like a literal template?
397 397 if '{' in tmpl:
398 398 return templatespec(topic, tmpl, None)
399 399
400 400 # perhaps a stock style?
401 401 if not os.path.split(tmpl)[0]:
402 402 mapname = (templater.templatepath('map-cmdline.' + tmpl)
403 403 or templater.templatepath(tmpl))
404 404 if mapname and os.path.isfile(mapname):
405 405 return templatespec(topic, None, mapname)
406 406
407 407 # perhaps it's a reference to [templates]
408 408 t = ui.config('templates', tmpl)
409 409 if t:
410 410 return templatespec(topic, templater.unquotestring(t), None)
411 411
412 412 if tmpl == 'list':
413 413 ui.write(_("available styles: %s\n") % templater.stylelist())
414 414 raise error.Abort(_("specify a template"))
415 415
416 416 # perhaps it's a path to a map or a template
417 417 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
418 418 # is it a mapfile for a style?
419 419 if os.path.basename(tmpl).startswith("map-"):
420 420 return templatespec(topic, None, os.path.realpath(tmpl))
421 421 with util.posixfile(tmpl, 'rb') as f:
422 422 tmpl = f.read()
423 423 return templatespec(topic, tmpl, None)
424 424
425 425 # constant string?
426 426 return templatespec(topic, tmpl, None)
427 427
428 428 def loadtemplater(ui, spec, cache=None):
429 429 """Create a templater from either a literal template or loading from
430 430 a map file"""
431 431 assert not (spec.tmpl and spec.mapfile)
432 432 if spec.mapfile:
433 433 return templater.templater.frommapfile(spec.mapfile, cache=cache)
434 return maketemplater(ui, spec.ref, spec.tmpl, cache=cache)
434 return _maketemplater(ui, spec.ref, spec.tmpl, cache=cache)
435 435
436 def maketemplater(ui, topic, tmpl, cache=None):
436 def maketemplater(ui, tmpl, cache=None):
437 437 """Create a templater from a string template 'tmpl'"""
438 return _maketemplater(ui, '', tmpl, cache=cache)
439
440 def _maketemplater(ui, topic, tmpl, cache=None):
438 441 aliases = ui.configitems('templatealias')
439 442 t = templater.templater(cache=cache, aliases=aliases)
440 443 if tmpl:
441 444 t.cache[topic] = tmpl
442 445 return t
443 446
444 447 def formatter(ui, out, topic, opts):
445 448 template = opts.get("template", "")
446 449 if template == "json":
447 450 return jsonformatter(ui, out, topic, opts)
448 451 elif template == "pickle":
449 452 return pickleformatter(ui, out, topic, opts)
450 453 elif template == "debug":
451 454 return debugformatter(ui, out, topic, opts)
452 455 elif template != "":
453 456 return templateformatter(ui, out, topic, opts)
454 457 # developer config: ui.formatdebug
455 458 elif ui.configbool('ui', 'formatdebug'):
456 459 return debugformatter(ui, out, topic, opts)
457 460 # deprecated config: ui.formatjson
458 461 elif ui.configbool('ui', 'formatjson'):
459 462 return jsonformatter(ui, out, topic, opts)
460 463 return plainformatter(ui, out, topic, opts)
461 464
462 465 @contextlib.contextmanager
463 466 def openformatter(ui, filename, topic, opts):
464 467 """Create a formatter that writes outputs to the specified file
465 468
466 469 Must be invoked using the 'with' statement.
467 470 """
468 471 with util.posixfile(filename, 'wb') as out:
469 472 with formatter(ui, out, topic, opts) as fm:
470 473 yield fm
471 474
472 475 @contextlib.contextmanager
473 476 def _neverending(fm):
474 477 yield fm
475 478
476 479 def maybereopen(fm, filename, opts):
477 480 """Create a formatter backed by file if filename specified, else return
478 481 the given formatter
479 482
480 483 Must be invoked using the 'with' statement. This will never call fm.end()
481 484 of the given formatter.
482 485 """
483 486 if filename:
484 487 return openformatter(fm._ui, filename, fm._topic, opts)
485 488 else:
486 489 return _neverending(fm)
@@ -1,1375 +1,1379 b''
1 1 # templater.py - template expansion for output
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11 import re
12 12 import types
13 13
14 14 from .i18n import _
15 15 from . import (
16 16 color,
17 17 config,
18 18 encoding,
19 19 error,
20 20 minirst,
21 21 parser,
22 22 pycompat,
23 23 registrar,
24 24 revset as revsetmod,
25 25 revsetlang,
26 26 templatefilters,
27 27 templatekw,
28 28 util,
29 29 )
30 30
31 31 # template parsing
32 32
33 33 elements = {
34 34 # token-type: binding-strength, primary, prefix, infix, suffix
35 35 "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
36 36 "%": (16, None, None, ("%", 16), None),
37 37 "|": (15, None, None, ("|", 15), None),
38 38 "*": (5, None, None, ("*", 5), None),
39 39 "/": (5, None, None, ("/", 5), None),
40 40 "+": (4, None, None, ("+", 4), None),
41 41 "-": (4, None, ("negate", 19), ("-", 4), None),
42 42 "=": (3, None, None, ("keyvalue", 3), None),
43 43 ",": (2, None, None, ("list", 2), None),
44 44 ")": (0, None, None, None, None),
45 45 "integer": (0, "integer", None, None, None),
46 46 "symbol": (0, "symbol", None, None, None),
47 47 "string": (0, "string", None, None, None),
48 48 "template": (0, "template", None, None, None),
49 49 "end": (0, None, None, None, None),
50 50 }
51 51
52 52 def tokenize(program, start, end, term=None):
53 53 """Parse a template expression into a stream of tokens, which must end
54 54 with term if specified"""
55 55 pos = start
56 56 program = pycompat.bytestr(program)
57 57 while pos < end:
58 58 c = program[pos]
59 59 if c.isspace(): # skip inter-token whitespace
60 60 pass
61 61 elif c in "(=,)%|+-*/": # handle simple operators
62 62 yield (c, None, pos)
63 63 elif c in '"\'': # handle quoted templates
64 64 s = pos + 1
65 65 data, pos = _parsetemplate(program, s, end, c)
66 66 yield ('template', data, s)
67 67 pos -= 1
68 68 elif c == 'r' and program[pos:pos + 2] in ("r'", 'r"'):
69 69 # handle quoted strings
70 70 c = program[pos + 1]
71 71 s = pos = pos + 2
72 72 while pos < end: # find closing quote
73 73 d = program[pos]
74 74 if d == '\\': # skip over escaped characters
75 75 pos += 2
76 76 continue
77 77 if d == c:
78 78 yield ('string', program[s:pos], s)
79 79 break
80 80 pos += 1
81 81 else:
82 82 raise error.ParseError(_("unterminated string"), s)
83 83 elif c.isdigit():
84 84 s = pos
85 85 while pos < end:
86 86 d = program[pos]
87 87 if not d.isdigit():
88 88 break
89 89 pos += 1
90 90 yield ('integer', program[s:pos], s)
91 91 pos -= 1
92 92 elif (c == '\\' and program[pos:pos + 2] in (r"\'", r'\"')
93 93 or c == 'r' and program[pos:pos + 3] in (r"r\'", r'r\"')):
94 94 # handle escaped quoted strings for compatibility with 2.9.2-3.4,
95 95 # where some of nested templates were preprocessed as strings and
96 96 # then compiled. therefore, \"...\" was allowed. (issue4733)
97 97 #
98 98 # processing flow of _evalifliteral() at 5ab28a2e9962:
99 99 # outer template string -> stringify() -> compiletemplate()
100 100 # ------------------------ ------------ ------------------
101 101 # {f("\\\\ {g(\"\\\"\")}"} \\ {g("\"")} [r'\\', {g("\"")}]
102 102 # ~~~~~~~~
103 103 # escaped quoted string
104 104 if c == 'r':
105 105 pos += 1
106 106 token = 'string'
107 107 else:
108 108 token = 'template'
109 109 quote = program[pos:pos + 2]
110 110 s = pos = pos + 2
111 111 while pos < end: # find closing escaped quote
112 112 if program.startswith('\\\\\\', pos, end):
113 113 pos += 4 # skip over double escaped characters
114 114 continue
115 115 if program.startswith(quote, pos, end):
116 116 # interpret as if it were a part of an outer string
117 117 data = parser.unescapestr(program[s:pos])
118 118 if token == 'template':
119 119 data = _parsetemplate(data, 0, len(data))[0]
120 120 yield (token, data, s)
121 121 pos += 1
122 122 break
123 123 pos += 1
124 124 else:
125 125 raise error.ParseError(_("unterminated string"), s)
126 126 elif c.isalnum() or c in '_':
127 127 s = pos
128 128 pos += 1
129 129 while pos < end: # find end of symbol
130 130 d = program[pos]
131 131 if not (d.isalnum() or d == "_"):
132 132 break
133 133 pos += 1
134 134 sym = program[s:pos]
135 135 yield ('symbol', sym, s)
136 136 pos -= 1
137 137 elif c == term:
138 138 yield ('end', None, pos + 1)
139 139 return
140 140 else:
141 141 raise error.ParseError(_("syntax error"), pos)
142 142 pos += 1
143 143 if term:
144 144 raise error.ParseError(_("unterminated template expansion"), start)
145 145 yield ('end', None, pos)
146 146
147 147 def _parsetemplate(tmpl, start, stop, quote=''):
148 148 r"""
149 149 >>> _parsetemplate('foo{bar}"baz', 0, 12)
150 150 ([('string', 'foo'), ('symbol', 'bar'), ('string', '"baz')], 12)
151 151 >>> _parsetemplate('foo{bar}"baz', 0, 12, quote='"')
152 152 ([('string', 'foo'), ('symbol', 'bar')], 9)
153 153 >>> _parsetemplate('foo"{bar}', 0, 9, quote='"')
154 154 ([('string', 'foo')], 4)
155 155 >>> _parsetemplate(r'foo\"bar"baz', 0, 12, quote='"')
156 156 ([('string', 'foo"'), ('string', 'bar')], 9)
157 157 >>> _parsetemplate(r'foo\\"bar', 0, 10, quote='"')
158 158 ([('string', 'foo\\')], 6)
159 159 """
160 160 parsed = []
161 161 sepchars = '{' + quote
162 162 pos = start
163 163 p = parser.parser(elements)
164 164 while pos < stop:
165 165 n = min((tmpl.find(c, pos, stop) for c in sepchars),
166 166 key=lambda n: (n < 0, n))
167 167 if n < 0:
168 168 parsed.append(('string', parser.unescapestr(tmpl[pos:stop])))
169 169 pos = stop
170 170 break
171 171 c = tmpl[n]
172 172 bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
173 173 if bs % 2 == 1:
174 174 # escaped (e.g. '\{', '\\\{', but not '\\{')
175 175 parsed.append(('string', parser.unescapestr(tmpl[pos:n - 1]) + c))
176 176 pos = n + 1
177 177 continue
178 178 if n > pos:
179 179 parsed.append(('string', parser.unescapestr(tmpl[pos:n])))
180 180 if c == quote:
181 181 return parsed, n + 1
182 182
183 183 parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
184 184 parsed.append(parseres)
185 185
186 186 if quote:
187 187 raise error.ParseError(_("unterminated string"), start)
188 188 return parsed, pos
189 189
190 190 def _unnesttemplatelist(tree):
191 191 """Expand list of templates to node tuple
192 192
193 193 >>> def f(tree):
194 194 ... print prettyformat(_unnesttemplatelist(tree))
195 195 >>> f(('template', []))
196 196 ('string', '')
197 197 >>> f(('template', [('string', 'foo')]))
198 198 ('string', 'foo')
199 199 >>> f(('template', [('string', 'foo'), ('symbol', 'rev')]))
200 200 (template
201 201 ('string', 'foo')
202 202 ('symbol', 'rev'))
203 203 >>> f(('template', [('symbol', 'rev')])) # template(rev) -> str
204 204 (template
205 205 ('symbol', 'rev'))
206 206 >>> f(('template', [('template', [('string', 'foo')])]))
207 207 ('string', 'foo')
208 208 """
209 209 if not isinstance(tree, tuple):
210 210 return tree
211 211 op = tree[0]
212 212 if op != 'template':
213 213 return (op,) + tuple(_unnesttemplatelist(x) for x in tree[1:])
214 214
215 215 assert len(tree) == 2
216 216 xs = tuple(_unnesttemplatelist(x) for x in tree[1])
217 217 if not xs:
218 218 return ('string', '') # empty template ""
219 219 elif len(xs) == 1 and xs[0][0] == 'string':
220 220 return xs[0] # fast path for string with no template fragment "x"
221 221 else:
222 222 return (op,) + xs
223 223
224 224 def parse(tmpl):
225 225 """Parse template string into tree"""
226 226 parsed, pos = _parsetemplate(tmpl, 0, len(tmpl))
227 227 assert pos == len(tmpl), 'unquoted template should be consumed'
228 228 return _unnesttemplatelist(('template', parsed))
229 229
230 230 def _parseexpr(expr):
231 231 """Parse a template expression into tree
232 232
233 233 >>> _parseexpr('"foo"')
234 234 ('string', 'foo')
235 235 >>> _parseexpr('foo(bar)')
236 236 ('func', ('symbol', 'foo'), ('symbol', 'bar'))
237 237 >>> _parseexpr('foo(')
238 238 Traceback (most recent call last):
239 239 ...
240 240 ParseError: ('not a prefix: end', 4)
241 241 >>> _parseexpr('"foo" "bar"')
242 242 Traceback (most recent call last):
243 243 ...
244 244 ParseError: ('invalid token', 7)
245 245 """
246 246 p = parser.parser(elements)
247 247 tree, pos = p.parse(tokenize(expr, 0, len(expr)))
248 248 if pos != len(expr):
249 249 raise error.ParseError(_('invalid token'), pos)
250 250 return _unnesttemplatelist(tree)
251 251
252 252 def prettyformat(tree):
253 253 return parser.prettyformat(tree, ('integer', 'string', 'symbol'))
254 254
255 255 def compileexp(exp, context, curmethods):
256 256 """Compile parsed template tree to (func, data) pair"""
257 257 t = exp[0]
258 258 if t in curmethods:
259 259 return curmethods[t](exp, context)
260 260 raise error.ParseError(_("unknown method '%s'") % t)
261 261
262 262 # template evaluation
263 263
264 264 def getsymbol(exp):
265 265 if exp[0] == 'symbol':
266 266 return exp[1]
267 267 raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
268 268
269 269 def getlist(x):
270 270 if not x:
271 271 return []
272 272 if x[0] == 'list':
273 273 return getlist(x[1]) + [x[2]]
274 274 return [x]
275 275
276 276 def gettemplate(exp, context):
277 277 """Compile given template tree or load named template from map file;
278 278 returns (func, data) pair"""
279 279 if exp[0] in ('template', 'string'):
280 280 return compileexp(exp, context, methods)
281 281 if exp[0] == 'symbol':
282 282 # unlike runsymbol(), here 'symbol' is always taken as template name
283 283 # even if it exists in mapping. this allows us to override mapping
284 284 # by web templates, e.g. 'changelogtag' is redefined in map file.
285 285 return context._load(exp[1])
286 286 raise error.ParseError(_("expected template specifier"))
287 287
288 288 def findsymbolicname(arg):
289 289 """Find symbolic name for the given compiled expression; returns None
290 290 if nothing found reliably"""
291 291 while True:
292 292 func, data = arg
293 293 if func is runsymbol:
294 294 return data
295 295 elif func is runfilter:
296 296 arg = data[0]
297 297 else:
298 298 return None
299 299
300 300 def evalfuncarg(context, mapping, arg):
301 301 func, data = arg
302 302 # func() may return string, generator of strings or arbitrary object such
303 303 # as date tuple, but filter does not want generator.
304 304 thing = func(context, mapping, data)
305 305 if isinstance(thing, types.GeneratorType):
306 306 thing = stringify(thing)
307 307 return thing
308 308
309 309 def evalboolean(context, mapping, arg):
310 310 """Evaluate given argument as boolean, but also takes boolean literals"""
311 311 func, data = arg
312 312 if func is runsymbol:
313 313 thing = func(context, mapping, data, default=None)
314 314 if thing is None:
315 315 # not a template keyword, takes as a boolean literal
316 316 thing = util.parsebool(data)
317 317 else:
318 318 thing = func(context, mapping, data)
319 319 if isinstance(thing, bool):
320 320 return thing
321 321 # other objects are evaluated as strings, which means 0 is True, but
322 322 # empty dict/list should be False as they are expected to be ''
323 323 return bool(stringify(thing))
324 324
325 325 def evalinteger(context, mapping, arg, err):
326 326 v = evalfuncarg(context, mapping, arg)
327 327 try:
328 328 return int(v)
329 329 except (TypeError, ValueError):
330 330 raise error.ParseError(err)
331 331
332 332 def evalstring(context, mapping, arg):
333 333 func, data = arg
334 334 return stringify(func(context, mapping, data))
335 335
336 336 def evalstringliteral(context, mapping, arg):
337 337 """Evaluate given argument as string template, but returns symbol name
338 338 if it is unknown"""
339 339 func, data = arg
340 340 if func is runsymbol:
341 341 thing = func(context, mapping, data, default=data)
342 342 else:
343 343 thing = func(context, mapping, data)
344 344 return stringify(thing)
345 345
346 346 def runinteger(context, mapping, data):
347 347 return int(data)
348 348
349 349 def runstring(context, mapping, data):
350 350 return data
351 351
352 352 def _recursivesymbolblocker(key):
353 353 def showrecursion(**args):
354 354 raise error.Abort(_("recursive reference '%s' in template") % key)
355 355 return showrecursion
356 356
357 357 def _runrecursivesymbol(context, mapping, key):
358 358 raise error.Abort(_("recursive reference '%s' in template") % key)
359 359
360 360 def runsymbol(context, mapping, key, default=''):
361 361 v = mapping.get(key)
362 362 if v is None:
363 363 v = context._defaults.get(key)
364 364 if v is None:
365 365 # put poison to cut recursion. we can't move this to parsing phase
366 366 # because "x = {x}" is allowed if "x" is a keyword. (issue4758)
367 367 safemapping = mapping.copy()
368 368 safemapping[key] = _recursivesymbolblocker(key)
369 369 try:
370 370 v = context.process(key, safemapping)
371 371 except TemplateNotFound:
372 372 v = default
373 373 if callable(v):
374 374 return v(**mapping)
375 375 return v
376 376
377 377 def buildtemplate(exp, context):
378 378 ctmpl = [compileexp(e, context, methods) for e in exp[1:]]
379 379 return (runtemplate, ctmpl)
380 380
381 381 def runtemplate(context, mapping, template):
382 382 for func, data in template:
383 383 yield func(context, mapping, data)
384 384
385 385 def buildfilter(exp, context):
386 386 n = getsymbol(exp[2])
387 387 if n in context._filters:
388 388 filt = context._filters[n]
389 389 arg = compileexp(exp[1], context, methods)
390 390 return (runfilter, (arg, filt))
391 391 if n in funcs:
392 392 f = funcs[n]
393 393 args = _buildfuncargs(exp[1], context, methods, n, f._argspec)
394 394 return (f, args)
395 395 raise error.ParseError(_("unknown function '%s'") % n)
396 396
397 397 def runfilter(context, mapping, data):
398 398 arg, filt = data
399 399 thing = evalfuncarg(context, mapping, arg)
400 400 try:
401 401 return filt(thing)
402 402 except (ValueError, AttributeError, TypeError):
403 403 sym = findsymbolicname(arg)
404 404 if sym:
405 405 msg = (_("template filter '%s' is not compatible with keyword '%s'")
406 406 % (filt.func_name, sym))
407 407 else:
408 408 msg = _("incompatible use of template filter '%s'") % filt.func_name
409 409 raise error.Abort(msg)
410 410
411 411 def buildmap(exp, context):
412 412 func, data = compileexp(exp[1], context, methods)
413 413 tfunc, tdata = gettemplate(exp[2], context)
414 414 return (runmap, (func, data, tfunc, tdata))
415 415
416 416 def runmap(context, mapping, data):
417 417 func, data, tfunc, tdata = data
418 418 d = func(context, mapping, data)
419 419 if util.safehasattr(d, 'itermaps'):
420 420 diter = d.itermaps()
421 421 else:
422 422 try:
423 423 diter = iter(d)
424 424 except TypeError:
425 425 if func is runsymbol:
426 426 raise error.ParseError(_("keyword '%s' is not iterable") % data)
427 427 else:
428 428 raise error.ParseError(_("%r is not iterable") % d)
429 429
430 430 for i, v in enumerate(diter):
431 431 lm = mapping.copy()
432 432 lm['index'] = i
433 433 if isinstance(v, dict):
434 434 lm.update(v)
435 435 lm['originalnode'] = mapping.get('node')
436 436 yield tfunc(context, lm, tdata)
437 437 else:
438 438 # v is not an iterable of dicts, this happen when 'key'
439 439 # has been fully expanded already and format is useless.
440 440 # If so, return the expanded value.
441 441 yield v
442 442
443 443 def buildnegate(exp, context):
444 444 arg = compileexp(exp[1], context, exprmethods)
445 445 return (runnegate, arg)
446 446
447 447 def runnegate(context, mapping, data):
448 448 data = evalinteger(context, mapping, data,
449 449 _('negation needs an integer argument'))
450 450 return -data
451 451
452 452 def buildarithmetic(exp, context, func):
453 453 left = compileexp(exp[1], context, exprmethods)
454 454 right = compileexp(exp[2], context, exprmethods)
455 455 return (runarithmetic, (func, left, right))
456 456
457 457 def runarithmetic(context, mapping, data):
458 458 func, left, right = data
459 459 left = evalinteger(context, mapping, left,
460 460 _('arithmetic only defined on integers'))
461 461 right = evalinteger(context, mapping, right,
462 462 _('arithmetic only defined on integers'))
463 463 try:
464 464 return func(left, right)
465 465 except ZeroDivisionError:
466 466 raise error.Abort(_('division by zero is not defined'))
467 467
468 468 def buildfunc(exp, context):
469 469 n = getsymbol(exp[1])
470 470 if n in funcs:
471 471 f = funcs[n]
472 472 args = _buildfuncargs(exp[2], context, exprmethods, n, f._argspec)
473 473 return (f, args)
474 474 if n in context._filters:
475 475 args = _buildfuncargs(exp[2], context, exprmethods, n, argspec=None)
476 476 if len(args) != 1:
477 477 raise error.ParseError(_("filter %s expects one argument") % n)
478 478 f = context._filters[n]
479 479 return (runfilter, (args[0], f))
480 480 raise error.ParseError(_("unknown function '%s'") % n)
481 481
482 482 def _buildfuncargs(exp, context, curmethods, funcname, argspec):
483 483 """Compile parsed tree of function arguments into list or dict of
484 484 (func, data) pairs
485 485
486 486 >>> context = engine(lambda t: (runsymbol, t))
487 487 >>> def fargs(expr, argspec):
488 488 ... x = _parseexpr(expr)
489 489 ... n = getsymbol(x[1])
490 490 ... return _buildfuncargs(x[2], context, exprmethods, n, argspec)
491 491 >>> fargs('a(l=1, k=2)', 'k l m').keys()
492 492 ['l', 'k']
493 493 >>> args = fargs('a(opts=1, k=2)', '**opts')
494 494 >>> args.keys(), args['opts'].keys()
495 495 (['opts'], ['opts', 'k'])
496 496 """
497 497 def compiledict(xs):
498 498 return util.sortdict((k, compileexp(x, context, curmethods))
499 499 for k, x in xs.iteritems())
500 500 def compilelist(xs):
501 501 return [compileexp(x, context, curmethods) for x in xs]
502 502
503 503 if not argspec:
504 504 # filter or function with no argspec: return list of positional args
505 505 return compilelist(getlist(exp))
506 506
507 507 # function with argspec: return dict of named args
508 508 _poskeys, varkey, _keys, optkey = argspec = parser.splitargspec(argspec)
509 509 treeargs = parser.buildargsdict(getlist(exp), funcname, argspec,
510 510 keyvaluenode='keyvalue', keynode='symbol')
511 511 compargs = util.sortdict()
512 512 if varkey:
513 513 compargs[varkey] = compilelist(treeargs.pop(varkey))
514 514 if optkey:
515 515 compargs[optkey] = compiledict(treeargs.pop(optkey))
516 516 compargs.update(compiledict(treeargs))
517 517 return compargs
518 518
519 519 def buildkeyvaluepair(exp, content):
520 520 raise error.ParseError(_("can't use a key-value pair in this context"))
521 521
522 522 # dict of template built-in functions
523 523 funcs = {}
524 524
525 525 templatefunc = registrar.templatefunc(funcs)
526 526
527 527 @templatefunc('date(date[, fmt])')
528 528 def date(context, mapping, args):
529 529 """Format a date. See :hg:`help dates` for formatting
530 530 strings. The default is a Unix date format, including the timezone:
531 531 "Mon Sep 04 15:13:13 2006 0700"."""
532 532 if not (1 <= len(args) <= 2):
533 533 # i18n: "date" is a keyword
534 534 raise error.ParseError(_("date expects one or two arguments"))
535 535
536 536 date = evalfuncarg(context, mapping, args[0])
537 537 fmt = None
538 538 if len(args) == 2:
539 539 fmt = evalstring(context, mapping, args[1])
540 540 try:
541 541 if fmt is None:
542 542 return util.datestr(date)
543 543 else:
544 544 return util.datestr(date, fmt)
545 545 except (TypeError, ValueError):
546 546 # i18n: "date" is a keyword
547 547 raise error.ParseError(_("date expects a date information"))
548 548
549 549 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
550 550 def dict_(context, mapping, args):
551 551 """Construct a dict from key-value pairs. A key may be omitted if
552 552 a value expression can provide an unambiguous name."""
553 553 data = util.sortdict()
554 554
555 555 for v in args['args']:
556 556 k = findsymbolicname(v)
557 557 if not k:
558 558 raise error.ParseError(_('dict key cannot be inferred'))
559 559 if k in data or k in args['kwargs']:
560 560 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
561 561 data[k] = evalfuncarg(context, mapping, v)
562 562
563 563 data.update((k, evalfuncarg(context, mapping, v))
564 564 for k, v in args['kwargs'].iteritems())
565 565 return templatekw.hybriddict(data)
566 566
567 567 @templatefunc('diff([includepattern [, excludepattern]])')
568 568 def diff(context, mapping, args):
569 569 """Show a diff, optionally
570 570 specifying files to include or exclude."""
571 571 if len(args) > 2:
572 572 # i18n: "diff" is a keyword
573 573 raise error.ParseError(_("diff expects zero, one, or two arguments"))
574 574
575 575 def getpatterns(i):
576 576 if i < len(args):
577 577 s = evalstring(context, mapping, args[i]).strip()
578 578 if s:
579 579 return [s]
580 580 return []
581 581
582 582 ctx = mapping['ctx']
583 583 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
584 584
585 585 return ''.join(chunks)
586 586
587 587 @templatefunc('files(pattern)')
588 588 def files(context, mapping, args):
589 589 """All files of the current changeset matching the pattern. See
590 590 :hg:`help patterns`."""
591 591 if not len(args) == 1:
592 592 # i18n: "files" is a keyword
593 593 raise error.ParseError(_("files expects one argument"))
594 594
595 595 raw = evalstring(context, mapping, args[0])
596 596 ctx = mapping['ctx']
597 597 m = ctx.match([raw])
598 598 files = list(ctx.matches(m))
599 599 return templatekw.showlist("file", files, mapping)
600 600
601 601 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
602 602 def fill(context, mapping, args):
603 603 """Fill many
604 604 paragraphs with optional indentation. See the "fill" filter."""
605 605 if not (1 <= len(args) <= 4):
606 606 # i18n: "fill" is a keyword
607 607 raise error.ParseError(_("fill expects one to four arguments"))
608 608
609 609 text = evalstring(context, mapping, args[0])
610 610 width = 76
611 611 initindent = ''
612 612 hangindent = ''
613 613 if 2 <= len(args) <= 4:
614 614 width = evalinteger(context, mapping, args[1],
615 615 # i18n: "fill" is a keyword
616 616 _("fill expects an integer width"))
617 617 try:
618 618 initindent = evalstring(context, mapping, args[2])
619 619 hangindent = evalstring(context, mapping, args[3])
620 620 except IndexError:
621 621 pass
622 622
623 623 return templatefilters.fill(text, width, initindent, hangindent)
624 624
625 625 @templatefunc('formatnode(node)')
626 626 def formatnode(context, mapping, args):
627 627 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
628 628 if len(args) != 1:
629 629 # i18n: "formatnode" is a keyword
630 630 raise error.ParseError(_("formatnode expects one argument"))
631 631
632 632 ui = mapping['ui']
633 633 node = evalstring(context, mapping, args[0])
634 634 if ui.debugflag:
635 635 return node
636 636 return templatefilters.short(node)
637 637
638 638 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
639 639 argspec='text width fillchar left')
640 640 def pad(context, mapping, args):
641 641 """Pad text with a
642 642 fill character."""
643 643 if 'text' not in args or 'width' not in args:
644 644 # i18n: "pad" is a keyword
645 645 raise error.ParseError(_("pad() expects two to four arguments"))
646 646
647 647 width = evalinteger(context, mapping, args['width'],
648 648 # i18n: "pad" is a keyword
649 649 _("pad() expects an integer width"))
650 650
651 651 text = evalstring(context, mapping, args['text'])
652 652
653 653 left = False
654 654 fillchar = ' '
655 655 if 'fillchar' in args:
656 656 fillchar = evalstring(context, mapping, args['fillchar'])
657 657 if len(color.stripeffects(fillchar)) != 1:
658 658 # i18n: "pad" is a keyword
659 659 raise error.ParseError(_("pad() expects a single fill character"))
660 660 if 'left' in args:
661 661 left = evalboolean(context, mapping, args['left'])
662 662
663 663 fillwidth = width - encoding.colwidth(color.stripeffects(text))
664 664 if fillwidth <= 0:
665 665 return text
666 666 if left:
667 667 return fillchar * fillwidth + text
668 668 else:
669 669 return text + fillchar * fillwidth
670 670
671 671 @templatefunc('indent(text, indentchars[, firstline])')
672 672 def indent(context, mapping, args):
673 673 """Indents all non-empty lines
674 674 with the characters given in the indentchars string. An optional
675 675 third parameter will override the indent for the first line only
676 676 if present."""
677 677 if not (2 <= len(args) <= 3):
678 678 # i18n: "indent" is a keyword
679 679 raise error.ParseError(_("indent() expects two or three arguments"))
680 680
681 681 text = evalstring(context, mapping, args[0])
682 682 indent = evalstring(context, mapping, args[1])
683 683
684 684 if len(args) == 3:
685 685 firstline = evalstring(context, mapping, args[2])
686 686 else:
687 687 firstline = indent
688 688
689 689 # the indent function doesn't indent the first line, so we do it here
690 690 return templatefilters.indent(firstline + text, indent)
691 691
692 692 @templatefunc('get(dict, key)')
693 693 def get(context, mapping, args):
694 694 """Get an attribute/key from an object. Some keywords
695 695 are complex types. This function allows you to obtain the value of an
696 696 attribute on these types."""
697 697 if len(args) != 2:
698 698 # i18n: "get" is a keyword
699 699 raise error.ParseError(_("get() expects two arguments"))
700 700
701 701 dictarg = evalfuncarg(context, mapping, args[0])
702 702 if not util.safehasattr(dictarg, 'get'):
703 703 # i18n: "get" is a keyword
704 704 raise error.ParseError(_("get() expects a dict as first argument"))
705 705
706 706 key = evalfuncarg(context, mapping, args[1])
707 707 return dictarg.get(key)
708 708
709 709 @templatefunc('if(expr, then[, else])')
710 710 def if_(context, mapping, args):
711 711 """Conditionally execute based on the result of
712 712 an expression."""
713 713 if not (2 <= len(args) <= 3):
714 714 # i18n: "if" is a keyword
715 715 raise error.ParseError(_("if expects two or three arguments"))
716 716
717 717 test = evalboolean(context, mapping, args[0])
718 718 if test:
719 719 yield args[1][0](context, mapping, args[1][1])
720 720 elif len(args) == 3:
721 721 yield args[2][0](context, mapping, args[2][1])
722 722
723 723 @templatefunc('ifcontains(needle, haystack, then[, else])')
724 724 def ifcontains(context, mapping, args):
725 725 """Conditionally execute based
726 726 on whether the item "needle" is in "haystack"."""
727 727 if not (3 <= len(args) <= 4):
728 728 # i18n: "ifcontains" is a keyword
729 729 raise error.ParseError(_("ifcontains expects three or four arguments"))
730 730
731 731 needle = evalstring(context, mapping, args[0])
732 732 haystack = evalfuncarg(context, mapping, args[1])
733 733
734 734 if needle in haystack:
735 735 yield args[2][0](context, mapping, args[2][1])
736 736 elif len(args) == 4:
737 737 yield args[3][0](context, mapping, args[3][1])
738 738
739 739 @templatefunc('ifeq(expr1, expr2, then[, else])')
740 740 def ifeq(context, mapping, args):
741 741 """Conditionally execute based on
742 742 whether 2 items are equivalent."""
743 743 if not (3 <= len(args) <= 4):
744 744 # i18n: "ifeq" is a keyword
745 745 raise error.ParseError(_("ifeq expects three or four arguments"))
746 746
747 747 test = evalstring(context, mapping, args[0])
748 748 match = evalstring(context, mapping, args[1])
749 749 if test == match:
750 750 yield args[2][0](context, mapping, args[2][1])
751 751 elif len(args) == 4:
752 752 yield args[3][0](context, mapping, args[3][1])
753 753
754 754 @templatefunc('join(list, sep)')
755 755 def join(context, mapping, args):
756 756 """Join items in a list with a delimiter."""
757 757 if not (1 <= len(args) <= 2):
758 758 # i18n: "join" is a keyword
759 759 raise error.ParseError(_("join expects one or two arguments"))
760 760
761 761 joinset = args[0][0](context, mapping, args[0][1])
762 762 if util.safehasattr(joinset, 'itermaps'):
763 763 jf = joinset.joinfmt
764 764 joinset = [jf(x) for x in joinset.itermaps()]
765 765
766 766 joiner = " "
767 767 if len(args) > 1:
768 768 joiner = evalstring(context, mapping, args[1])
769 769
770 770 first = True
771 771 for x in joinset:
772 772 if first:
773 773 first = False
774 774 else:
775 775 yield joiner
776 776 yield x
777 777
778 778 @templatefunc('label(label, expr)')
779 779 def label(context, mapping, args):
780 780 """Apply a label to generated content. Content with
781 781 a label applied can result in additional post-processing, such as
782 782 automatic colorization."""
783 783 if len(args) != 2:
784 784 # i18n: "label" is a keyword
785 785 raise error.ParseError(_("label expects two arguments"))
786 786
787 787 ui = mapping['ui']
788 788 thing = evalstring(context, mapping, args[1])
789 789 # preserve unknown symbol as literal so effects like 'red', 'bold',
790 790 # etc. don't need to be quoted
791 791 label = evalstringliteral(context, mapping, args[0])
792 792
793 793 return ui.label(thing, label)
794 794
795 795 @templatefunc('latesttag([pattern])')
796 796 def latesttag(context, mapping, args):
797 797 """The global tags matching the given pattern on the
798 798 most recent globally tagged ancestor of this changeset.
799 799 If no such tags exist, the "{tag}" template resolves to
800 800 the string "null"."""
801 801 if len(args) > 1:
802 802 # i18n: "latesttag" is a keyword
803 803 raise error.ParseError(_("latesttag expects at most one argument"))
804 804
805 805 pattern = None
806 806 if len(args) == 1:
807 807 pattern = evalstring(context, mapping, args[0])
808 808
809 809 return templatekw.showlatesttags(pattern, **mapping)
810 810
811 811 @templatefunc('localdate(date[, tz])')
812 812 def localdate(context, mapping, args):
813 813 """Converts a date to the specified timezone.
814 814 The default is local date."""
815 815 if not (1 <= len(args) <= 2):
816 816 # i18n: "localdate" is a keyword
817 817 raise error.ParseError(_("localdate expects one or two arguments"))
818 818
819 819 date = evalfuncarg(context, mapping, args[0])
820 820 try:
821 821 date = util.parsedate(date)
822 822 except AttributeError: # not str nor date tuple
823 823 # i18n: "localdate" is a keyword
824 824 raise error.ParseError(_("localdate expects a date information"))
825 825 if len(args) >= 2:
826 826 tzoffset = None
827 827 tz = evalfuncarg(context, mapping, args[1])
828 828 if isinstance(tz, str):
829 829 tzoffset, remainder = util.parsetimezone(tz)
830 830 if remainder:
831 831 tzoffset = None
832 832 if tzoffset is None:
833 833 try:
834 834 tzoffset = int(tz)
835 835 except (TypeError, ValueError):
836 836 # i18n: "localdate" is a keyword
837 837 raise error.ParseError(_("localdate expects a timezone"))
838 838 else:
839 839 tzoffset = util.makedate()[1]
840 840 return (date[0], tzoffset)
841 841
842 842 @templatefunc('mod(a, b)')
843 843 def mod(context, mapping, args):
844 844 """Calculate a mod b such that a / b + a mod b == a"""
845 845 if not len(args) == 2:
846 846 # i18n: "mod" is a keyword
847 847 raise error.ParseError(_("mod expects two arguments"))
848 848
849 849 func = lambda a, b: a % b
850 850 return runarithmetic(context, mapping, (func, args[0], args[1]))
851 851
852 852 @templatefunc('relpath(path)')
853 853 def relpath(context, mapping, args):
854 854 """Convert a repository-absolute path into a filesystem path relative to
855 855 the current working directory."""
856 856 if len(args) != 1:
857 857 # i18n: "relpath" is a keyword
858 858 raise error.ParseError(_("relpath expects one argument"))
859 859
860 860 repo = mapping['ctx'].repo()
861 861 path = evalstring(context, mapping, args[0])
862 862 return repo.pathto(path)
863 863
864 864 @templatefunc('revset(query[, formatargs...])')
865 865 def revset(context, mapping, args):
866 866 """Execute a revision set query. See
867 867 :hg:`help revset`."""
868 868 if not len(args) > 0:
869 869 # i18n: "revset" is a keyword
870 870 raise error.ParseError(_("revset expects one or more arguments"))
871 871
872 872 raw = evalstring(context, mapping, args[0])
873 873 ctx = mapping['ctx']
874 874 repo = ctx.repo()
875 875
876 876 def query(expr):
877 877 m = revsetmod.match(repo.ui, expr)
878 878 return m(repo)
879 879
880 880 if len(args) > 1:
881 881 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
882 882 revs = query(revsetlang.formatspec(raw, *formatargs))
883 883 revs = list(revs)
884 884 else:
885 885 revsetcache = mapping['cache'].setdefault("revsetcache", {})
886 886 if raw in revsetcache:
887 887 revs = revsetcache[raw]
888 888 else:
889 889 revs = query(raw)
890 890 revs = list(revs)
891 891 revsetcache[raw] = revs
892 892
893 893 return templatekw.showrevslist("revision", revs, **mapping)
894 894
895 895 @templatefunc('rstdoc(text, style)')
896 896 def rstdoc(context, mapping, args):
897 897 """Format reStructuredText."""
898 898 if len(args) != 2:
899 899 # i18n: "rstdoc" is a keyword
900 900 raise error.ParseError(_("rstdoc expects two arguments"))
901 901
902 902 text = evalstring(context, mapping, args[0])
903 903 style = evalstring(context, mapping, args[1])
904 904
905 905 return minirst.format(text, style=style, keep=['verbose'])
906 906
907 907 @templatefunc('separate(sep, args)', argspec='sep *args')
908 908 def separate(context, mapping, args):
909 909 """Add a separator between non-empty arguments."""
910 910 if 'sep' not in args:
911 911 # i18n: "separate" is a keyword
912 912 raise error.ParseError(_("separate expects at least one argument"))
913 913
914 914 sep = evalstring(context, mapping, args['sep'])
915 915 first = True
916 916 for arg in args['args']:
917 917 argstr = evalstring(context, mapping, arg)
918 918 if not argstr:
919 919 continue
920 920 if first:
921 921 first = False
922 922 else:
923 923 yield sep
924 924 yield argstr
925 925
926 926 @templatefunc('shortest(node, minlength=4)')
927 927 def shortest(context, mapping, args):
928 928 """Obtain the shortest representation of
929 929 a node."""
930 930 if not (1 <= len(args) <= 2):
931 931 # i18n: "shortest" is a keyword
932 932 raise error.ParseError(_("shortest() expects one or two arguments"))
933 933
934 934 node = evalstring(context, mapping, args[0])
935 935
936 936 minlength = 4
937 937 if len(args) > 1:
938 938 minlength = evalinteger(context, mapping, args[1],
939 939 # i18n: "shortest" is a keyword
940 940 _("shortest() expects an integer minlength"))
941 941
942 942 # _partialmatch() of filtered changelog could take O(len(repo)) time,
943 943 # which would be unacceptably slow. so we look for hash collision in
944 944 # unfiltered space, which means some hashes may be slightly longer.
945 945 cl = mapping['ctx']._repo.unfiltered().changelog
946 946 def isvalid(test):
947 947 try:
948 948 if cl._partialmatch(test) is None:
949 949 return False
950 950
951 951 try:
952 952 i = int(test)
953 953 # if we are a pure int, then starting with zero will not be
954 954 # confused as a rev; or, obviously, if the int is larger than
955 955 # the value of the tip rev
956 956 if test[0] == '0' or i > len(cl):
957 957 return True
958 958 return False
959 959 except ValueError:
960 960 return True
961 961 except error.RevlogError:
962 962 return False
963 963 except error.WdirUnsupported:
964 964 # single 'ff...' match
965 965 return True
966 966
967 967 shortest = node
968 968 startlength = max(6, minlength)
969 969 length = startlength
970 970 while True:
971 971 test = node[:length]
972 972 if isvalid(test):
973 973 shortest = test
974 974 if length == minlength or length > startlength:
975 975 return shortest
976 976 length -= 1
977 977 else:
978 978 length += 1
979 979 if len(shortest) <= length:
980 980 return shortest
981 981
982 982 @templatefunc('strip(text[, chars])')
983 983 def strip(context, mapping, args):
984 984 """Strip characters from a string. By default,
985 985 strips all leading and trailing whitespace."""
986 986 if not (1 <= len(args) <= 2):
987 987 # i18n: "strip" is a keyword
988 988 raise error.ParseError(_("strip expects one or two arguments"))
989 989
990 990 text = evalstring(context, mapping, args[0])
991 991 if len(args) == 2:
992 992 chars = evalstring(context, mapping, args[1])
993 993 return text.strip(chars)
994 994 return text.strip()
995 995
996 996 @templatefunc('sub(pattern, replacement, expression)')
997 997 def sub(context, mapping, args):
998 998 """Perform text substitution
999 999 using regular expressions."""
1000 1000 if len(args) != 3:
1001 1001 # i18n: "sub" is a keyword
1002 1002 raise error.ParseError(_("sub expects three arguments"))
1003 1003
1004 1004 pat = evalstring(context, mapping, args[0])
1005 1005 rpl = evalstring(context, mapping, args[1])
1006 1006 src = evalstring(context, mapping, args[2])
1007 1007 try:
1008 1008 patre = re.compile(pat)
1009 1009 except re.error:
1010 1010 # i18n: "sub" is a keyword
1011 1011 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
1012 1012 try:
1013 1013 yield patre.sub(rpl, src)
1014 1014 except re.error:
1015 1015 # i18n: "sub" is a keyword
1016 1016 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
1017 1017
1018 1018 @templatefunc('startswith(pattern, text)')
1019 1019 def startswith(context, mapping, args):
1020 1020 """Returns the value from the "text" argument
1021 1021 if it begins with the content from the "pattern" argument."""
1022 1022 if len(args) != 2:
1023 1023 # i18n: "startswith" is a keyword
1024 1024 raise error.ParseError(_("startswith expects two arguments"))
1025 1025
1026 1026 patn = evalstring(context, mapping, args[0])
1027 1027 text = evalstring(context, mapping, args[1])
1028 1028 if text.startswith(patn):
1029 1029 return text
1030 1030 return ''
1031 1031
1032 1032 @templatefunc('word(number, text[, separator])')
1033 1033 def word(context, mapping, args):
1034 1034 """Return the nth word from a string."""
1035 1035 if not (2 <= len(args) <= 3):
1036 1036 # i18n: "word" is a keyword
1037 1037 raise error.ParseError(_("word expects two or three arguments, got %d")
1038 1038 % len(args))
1039 1039
1040 1040 num = evalinteger(context, mapping, args[0],
1041 1041 # i18n: "word" is a keyword
1042 1042 _("word expects an integer index"))
1043 1043 text = evalstring(context, mapping, args[1])
1044 1044 if len(args) == 3:
1045 1045 splitter = evalstring(context, mapping, args[2])
1046 1046 else:
1047 1047 splitter = None
1048 1048
1049 1049 tokens = text.split(splitter)
1050 1050 if num >= len(tokens) or num < -len(tokens):
1051 1051 return ''
1052 1052 else:
1053 1053 return tokens[num]
1054 1054
1055 1055 # methods to interpret function arguments or inner expressions (e.g. {_(x)})
1056 1056 exprmethods = {
1057 1057 "integer": lambda e, c: (runinteger, e[1]),
1058 1058 "string": lambda e, c: (runstring, e[1]),
1059 1059 "symbol": lambda e, c: (runsymbol, e[1]),
1060 1060 "template": buildtemplate,
1061 1061 "group": lambda e, c: compileexp(e[1], c, exprmethods),
1062 1062 # ".": buildmember,
1063 1063 "|": buildfilter,
1064 1064 "%": buildmap,
1065 1065 "func": buildfunc,
1066 1066 "keyvalue": buildkeyvaluepair,
1067 1067 "+": lambda e, c: buildarithmetic(e, c, lambda a, b: a + b),
1068 1068 "-": lambda e, c: buildarithmetic(e, c, lambda a, b: a - b),
1069 1069 "negate": buildnegate,
1070 1070 "*": lambda e, c: buildarithmetic(e, c, lambda a, b: a * b),
1071 1071 "/": lambda e, c: buildarithmetic(e, c, lambda a, b: a // b),
1072 1072 }
1073 1073
1074 1074 # methods to interpret top-level template (e.g. {x}, {x|_}, {x % "y"})
1075 1075 methods = exprmethods.copy()
1076 1076 methods["integer"] = exprmethods["symbol"] # '{1}' as variable
1077 1077
1078 1078 class _aliasrules(parser.basealiasrules):
1079 1079 """Parsing and expansion rule set of template aliases"""
1080 1080 _section = _('template alias')
1081 1081 _parse = staticmethod(_parseexpr)
1082 1082
1083 1083 @staticmethod
1084 1084 def _trygetfunc(tree):
1085 1085 """Return (name, args) if tree is func(...) or ...|filter; otherwise
1086 1086 None"""
1087 1087 if tree[0] == 'func' and tree[1][0] == 'symbol':
1088 1088 return tree[1][1], getlist(tree[2])
1089 1089 if tree[0] == '|' and tree[2][0] == 'symbol':
1090 1090 return tree[2][1], [tree[1]]
1091 1091
1092 1092 def expandaliases(tree, aliases):
1093 1093 """Return new tree of aliases are expanded"""
1094 1094 aliasmap = _aliasrules.buildmap(aliases)
1095 1095 return _aliasrules.expand(aliasmap, tree)
1096 1096
1097 1097 # template engine
1098 1098
1099 1099 stringify = templatefilters.stringify
1100 1100
1101 1101 def _flatten(thing):
1102 1102 '''yield a single stream from a possibly nested set of iterators'''
1103 1103 thing = templatekw.unwraphybrid(thing)
1104 1104 if isinstance(thing, str):
1105 1105 yield thing
1106 1106 elif thing is None:
1107 1107 pass
1108 1108 elif not util.safehasattr(thing, '__iter__'):
1109 1109 yield str(thing)
1110 1110 else:
1111 1111 for i in thing:
1112 1112 i = templatekw.unwraphybrid(i)
1113 1113 if isinstance(i, str):
1114 1114 yield i
1115 1115 elif i is None:
1116 1116 pass
1117 1117 elif not util.safehasattr(i, '__iter__'):
1118 1118 yield str(i)
1119 1119 else:
1120 1120 for j in _flatten(i):
1121 1121 yield j
1122 1122
1123 1123 def unquotestring(s):
1124 1124 '''unwrap quotes if any; otherwise returns unmodified string'''
1125 1125 if len(s) < 2 or s[0] not in "'\"" or s[0] != s[-1]:
1126 1126 return s
1127 1127 return s[1:-1]
1128 1128
1129 1129 class engine(object):
1130 1130 '''template expansion engine.
1131 1131
1132 1132 template expansion works like this. a map file contains key=value
1133 1133 pairs. if value is quoted, it is treated as string. otherwise, it
1134 1134 is treated as name of template file.
1135 1135
1136 1136 templater is asked to expand a key in map. it looks up key, and
1137 1137 looks for strings like this: {foo}. it expands {foo} by looking up
1138 1138 foo in map, and substituting it. expansion is recursive: it stops
1139 1139 when there is no more {foo} to replace.
1140 1140
1141 1141 expansion also allows formatting and filtering.
1142 1142
1143 1143 format uses key to expand each item in list. syntax is
1144 1144 {key%format}.
1145 1145
1146 1146 filter uses function to transform value. syntax is
1147 1147 {key|filter1|filter2|...}.'''
1148 1148
1149 1149 def __init__(self, loader, filters=None, defaults=None, aliases=()):
1150 1150 self._loader = loader
1151 1151 if filters is None:
1152 1152 filters = {}
1153 1153 self._filters = filters
1154 1154 if defaults is None:
1155 1155 defaults = {}
1156 1156 self._defaults = defaults
1157 1157 self._aliasmap = _aliasrules.buildmap(aliases)
1158 1158 self._cache = {} # key: (func, data)
1159 1159
1160 1160 def _load(self, t):
1161 1161 '''load, parse, and cache a template'''
1162 1162 if t not in self._cache:
1163 1163 # put poison to cut recursion while compiling 't'
1164 1164 self._cache[t] = (_runrecursivesymbol, t)
1165 1165 try:
1166 1166 x = parse(self._loader(t))
1167 1167 if self._aliasmap:
1168 1168 x = _aliasrules.expand(self._aliasmap, x)
1169 1169 self._cache[t] = compileexp(x, self, methods)
1170 1170 except: # re-raises
1171 1171 del self._cache[t]
1172 1172 raise
1173 1173 return self._cache[t]
1174 1174
1175 1175 def process(self, t, mapping):
1176 1176 '''Perform expansion. t is name of map element to expand.
1177 1177 mapping contains added elements for use during expansion. Is a
1178 1178 generator.'''
1179 1179 func, data = self._load(t)
1180 1180 return _flatten(func(self, mapping, data))
1181 1181
1182 1182 engines = {'default': engine}
1183 1183
1184 1184 def stylelist():
1185 1185 paths = templatepaths()
1186 1186 if not paths:
1187 1187 return _('no templates found, try `hg debuginstall` for more info')
1188 1188 dirlist = os.listdir(paths[0])
1189 1189 stylelist = []
1190 1190 for file in dirlist:
1191 1191 split = file.split(".")
1192 1192 if split[-1] in ('orig', 'rej'):
1193 1193 continue
1194 1194 if split[0] == "map-cmdline":
1195 1195 stylelist.append(split[1])
1196 1196 return ", ".join(sorted(stylelist))
1197 1197
1198 1198 def _readmapfile(mapfile):
1199 1199 """Load template elements from the given map file"""
1200 1200 if not os.path.exists(mapfile):
1201 1201 raise error.Abort(_("style '%s' not found") % mapfile,
1202 1202 hint=_("available styles: %s") % stylelist())
1203 1203
1204 1204 base = os.path.dirname(mapfile)
1205 1205 conf = config.config(includepaths=templatepaths())
1206 1206 conf.read(mapfile)
1207 1207
1208 1208 cache = {}
1209 1209 tmap = {}
1210 1210 for key, val in conf[''].items():
1211 1211 if not val:
1212 1212 raise error.ParseError(_('missing value'), conf.source('', key))
1213 1213 if val[0] in "'\"":
1214 1214 if val[0] != val[-1]:
1215 1215 raise error.ParseError(_('unmatched quotes'),
1216 1216 conf.source('', key))
1217 1217 cache[key] = unquotestring(val)
1218 1218 elif key == "__base__":
1219 1219 # treat as a pointer to a base class for this style
1220 1220 path = util.normpath(os.path.join(base, val))
1221 1221
1222 1222 # fallback check in template paths
1223 1223 if not os.path.exists(path):
1224 1224 for p in templatepaths():
1225 1225 p2 = util.normpath(os.path.join(p, val))
1226 1226 if os.path.isfile(p2):
1227 1227 path = p2
1228 1228 break
1229 1229 p3 = util.normpath(os.path.join(p2, "map"))
1230 1230 if os.path.isfile(p3):
1231 1231 path = p3
1232 1232 break
1233 1233
1234 1234 bcache, btmap = _readmapfile(path)
1235 1235 for k in bcache:
1236 1236 if k not in cache:
1237 1237 cache[k] = bcache[k]
1238 1238 for k in btmap:
1239 1239 if k not in tmap:
1240 1240 tmap[k] = btmap[k]
1241 1241 else:
1242 1242 val = 'default', val
1243 1243 if ':' in val[1]:
1244 1244 val = val[1].split(':', 1)
1245 1245 tmap[key] = val[0], os.path.join(base, val[1])
1246 1246 return cache, tmap
1247 1247
1248 1248 class TemplateNotFound(error.Abort):
1249 1249 pass
1250 1250
1251 1251 class templater(object):
1252 1252
1253 1253 def __init__(self, filters=None, defaults=None, cache=None, aliases=(),
1254 1254 minchunk=1024, maxchunk=65536):
1255 1255 '''set up template engine.
1256 1256 filters is dict of functions. each transforms a value into another.
1257 1257 defaults is dict of default map definitions.
1258 1258 aliases is list of alias (name, replacement) pairs.
1259 1259 '''
1260 1260 if filters is None:
1261 1261 filters = {}
1262 1262 if defaults is None:
1263 1263 defaults = {}
1264 1264 if cache is None:
1265 1265 cache = {}
1266 1266 self.cache = cache.copy()
1267 1267 self.map = {}
1268 1268 self.filters = templatefilters.filters.copy()
1269 1269 self.filters.update(filters)
1270 1270 self.defaults = defaults
1271 1271 self._aliases = aliases
1272 1272 self.minchunk, self.maxchunk = minchunk, maxchunk
1273 1273 self.ecache = {}
1274 1274
1275 1275 @classmethod
1276 1276 def frommapfile(cls, mapfile, filters=None, defaults=None, cache=None,
1277 1277 minchunk=1024, maxchunk=65536):
1278 1278 """Create templater from the specified map file"""
1279 1279 t = cls(filters, defaults, cache, [], minchunk, maxchunk)
1280 1280 cache, tmap = _readmapfile(mapfile)
1281 1281 t.cache.update(cache)
1282 1282 t.map = tmap
1283 1283 return t
1284 1284
1285 1285 def __contains__(self, key):
1286 1286 return key in self.cache or key in self.map
1287 1287
1288 1288 def load(self, t):
1289 1289 '''Get the template for the given template name. Use a local cache.'''
1290 1290 if t not in self.cache:
1291 1291 try:
1292 1292 self.cache[t] = util.readfile(self.map[t][1])
1293 1293 except KeyError as inst:
1294 1294 raise TemplateNotFound(_('"%s" not in template map') %
1295 1295 inst.args[0])
1296 1296 except IOError as inst:
1297 1297 raise IOError(inst.args[0], _('template file %s: %s') %
1298 1298 (self.map[t][1], inst.args[1]))
1299 1299 return self.cache[t]
1300 1300
1301 def render(self, mapping):
1302 """Render the default unnamed template and return result as string"""
1303 return stringify(self('', **mapping))
1304
1301 1305 def __call__(self, t, **mapping):
1302 1306 ttype = t in self.map and self.map[t][0] or 'default'
1303 1307 if ttype not in self.ecache:
1304 1308 try:
1305 1309 ecls = engines[ttype]
1306 1310 except KeyError:
1307 1311 raise error.Abort(_('invalid template engine: %s') % ttype)
1308 1312 self.ecache[ttype] = ecls(self.load, self.filters, self.defaults,
1309 1313 self._aliases)
1310 1314 proc = self.ecache[ttype]
1311 1315
1312 1316 stream = proc.process(t, mapping)
1313 1317 if self.minchunk:
1314 1318 stream = util.increasingchunks(stream, min=self.minchunk,
1315 1319 max=self.maxchunk)
1316 1320 return stream
1317 1321
1318 1322 def templatepaths():
1319 1323 '''return locations used for template files.'''
1320 1324 pathsrel = ['templates']
1321 1325 paths = [os.path.normpath(os.path.join(util.datapath, f))
1322 1326 for f in pathsrel]
1323 1327 return [p for p in paths if os.path.isdir(p)]
1324 1328
1325 1329 def templatepath(name):
1326 1330 '''return location of template file. returns None if not found.'''
1327 1331 for p in templatepaths():
1328 1332 f = os.path.join(p, name)
1329 1333 if os.path.exists(f):
1330 1334 return f
1331 1335 return None
1332 1336
1333 1337 def stylemap(styles, paths=None):
1334 1338 """Return path to mapfile for a given style.
1335 1339
1336 1340 Searches mapfile in the following locations:
1337 1341 1. templatepath/style/map
1338 1342 2. templatepath/map-style
1339 1343 3. templatepath/map
1340 1344 """
1341 1345
1342 1346 if paths is None:
1343 1347 paths = templatepaths()
1344 1348 elif isinstance(paths, str):
1345 1349 paths = [paths]
1346 1350
1347 1351 if isinstance(styles, str):
1348 1352 styles = [styles]
1349 1353
1350 1354 for style in styles:
1351 1355 # only plain name is allowed to honor template paths
1352 1356 if (not style
1353 1357 or style in (os.curdir, os.pardir)
1354 1358 or pycompat.ossep in style
1355 1359 or pycompat.osaltsep and pycompat.osaltsep in style):
1356 1360 continue
1357 1361 locations = [os.path.join(style, 'map'), 'map-' + style]
1358 1362 locations.append('map')
1359 1363
1360 1364 for path in paths:
1361 1365 for location in locations:
1362 1366 mapfile = os.path.join(path, location)
1363 1367 if os.path.isfile(mapfile):
1364 1368 return style, mapfile
1365 1369
1366 1370 raise RuntimeError("No hgweb templates found in %r" % paths)
1367 1371
1368 1372 def loadfunction(ui, extname, registrarobj):
1369 1373 """Load template function from specified registrarobj
1370 1374 """
1371 1375 for name, func in registrarobj._table.iteritems():
1372 1376 funcs[name] = func
1373 1377
1374 1378 # tell hggettext to extract docstrings from these functions:
1375 1379 i18nfunctions = funcs.values()
General Comments 0
You need to be logged in to leave comments. Login now