##// END OF EJS Templates
templater: keep default resources per template engine (API)...
Yuya Nishihara -
r35484:32c278eb default
parent child Browse files
Show More
@@ -1,3973 +1,3971 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import itertools
12 12 import os
13 13 import re
14 14 import tempfile
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 short,
22 22 )
23 23
24 24 from . import (
25 25 bookmarks,
26 26 changelog,
27 27 copies,
28 28 crecord as crecordmod,
29 29 dagop,
30 30 dirstateguard,
31 31 encoding,
32 32 error,
33 33 formatter,
34 34 graphmod,
35 35 match as matchmod,
36 36 mdiff,
37 37 obsolete,
38 38 patch,
39 39 pathutil,
40 40 pycompat,
41 41 registrar,
42 42 revlog,
43 43 revset,
44 44 scmutil,
45 45 smartset,
46 46 templatekw,
47 47 templater,
48 48 util,
49 49 vfs as vfsmod,
50 50 )
51 51 stringio = util.stringio
52 52
53 53 # templates of common command options
54 54
55 55 dryrunopts = [
56 56 ('n', 'dry-run', None,
57 57 _('do not perform actions, just print output')),
58 58 ]
59 59
60 60 remoteopts = [
61 61 ('e', 'ssh', '',
62 62 _('specify ssh command to use'), _('CMD')),
63 63 ('', 'remotecmd', '',
64 64 _('specify hg command to run on the remote side'), _('CMD')),
65 65 ('', 'insecure', None,
66 66 _('do not verify server certificate (ignoring web.cacerts config)')),
67 67 ]
68 68
69 69 walkopts = [
70 70 ('I', 'include', [],
71 71 _('include names matching the given patterns'), _('PATTERN')),
72 72 ('X', 'exclude', [],
73 73 _('exclude names matching the given patterns'), _('PATTERN')),
74 74 ]
75 75
76 76 commitopts = [
77 77 ('m', 'message', '',
78 78 _('use text as commit message'), _('TEXT')),
79 79 ('l', 'logfile', '',
80 80 _('read commit message from file'), _('FILE')),
81 81 ]
82 82
83 83 commitopts2 = [
84 84 ('d', 'date', '',
85 85 _('record the specified date as commit date'), _('DATE')),
86 86 ('u', 'user', '',
87 87 _('record the specified user as committer'), _('USER')),
88 88 ]
89 89
90 90 # hidden for now
91 91 formatteropts = [
92 92 ('T', 'template', '',
93 93 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
94 94 ]
95 95
96 96 templateopts = [
97 97 ('', 'style', '',
98 98 _('display using template map file (DEPRECATED)'), _('STYLE')),
99 99 ('T', 'template', '',
100 100 _('display with template'), _('TEMPLATE')),
101 101 ]
102 102
103 103 logopts = [
104 104 ('p', 'patch', None, _('show patch')),
105 105 ('g', 'git', None, _('use git extended diff format')),
106 106 ('l', 'limit', '',
107 107 _('limit number of changes displayed'), _('NUM')),
108 108 ('M', 'no-merges', None, _('do not show merges')),
109 109 ('', 'stat', None, _('output diffstat-style summary of changes')),
110 110 ('G', 'graph', None, _("show the revision DAG")),
111 111 ] + templateopts
112 112
113 113 diffopts = [
114 114 ('a', 'text', None, _('treat all files as text')),
115 115 ('g', 'git', None, _('use git extended diff format')),
116 116 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
117 117 ('', 'nodates', None, _('omit dates from diff headers'))
118 118 ]
119 119
120 120 diffwsopts = [
121 121 ('w', 'ignore-all-space', None,
122 122 _('ignore white space when comparing lines')),
123 123 ('b', 'ignore-space-change', None,
124 124 _('ignore changes in the amount of white space')),
125 125 ('B', 'ignore-blank-lines', None,
126 126 _('ignore changes whose lines are all blank')),
127 127 ('Z', 'ignore-space-at-eol', None,
128 128 _('ignore changes in whitespace at EOL')),
129 129 ]
130 130
131 131 diffopts2 = [
132 132 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
133 133 ('p', 'show-function', None, _('show which function each change is in')),
134 134 ('', 'reverse', None, _('produce a diff that undoes the changes')),
135 135 ] + diffwsopts + [
136 136 ('U', 'unified', '',
137 137 _('number of lines of context to show'), _('NUM')),
138 138 ('', 'stat', None, _('output diffstat-style summary of changes')),
139 139 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
140 140 ]
141 141
142 142 mergetoolopts = [
143 143 ('t', 'tool', '', _('specify merge tool')),
144 144 ]
145 145
146 146 similarityopts = [
147 147 ('s', 'similarity', '',
148 148 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
149 149 ]
150 150
151 151 subrepoopts = [
152 152 ('S', 'subrepos', None,
153 153 _('recurse into subrepositories'))
154 154 ]
155 155
156 156 debugrevlogopts = [
157 157 ('c', 'changelog', False, _('open changelog')),
158 158 ('m', 'manifest', False, _('open manifest')),
159 159 ('', 'dir', '', _('open directory manifest')),
160 160 ]
161 161
162 162 # special string such that everything below this line will be ingored in the
163 163 # editor text
164 164 _linebelow = "^HG: ------------------------ >8 ------------------------$"
165 165
166 166 def ishunk(x):
167 167 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
168 168 return isinstance(x, hunkclasses)
169 169
170 170 def newandmodified(chunks, originalchunks):
171 171 newlyaddedandmodifiedfiles = set()
172 172 for chunk in chunks:
173 173 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
174 174 originalchunks:
175 175 newlyaddedandmodifiedfiles.add(chunk.header.filename())
176 176 return newlyaddedandmodifiedfiles
177 177
178 178 def parsealiases(cmd):
179 179 return cmd.lstrip("^").split("|")
180 180
181 181 def setupwrapcolorwrite(ui):
182 182 # wrap ui.write so diff output can be labeled/colorized
183 183 def wrapwrite(orig, *args, **kw):
184 184 label = kw.pop(r'label', '')
185 185 for chunk, l in patch.difflabel(lambda: args):
186 186 orig(chunk, label=label + l)
187 187
188 188 oldwrite = ui.write
189 189 def wrap(*args, **kwargs):
190 190 return wrapwrite(oldwrite, *args, **kwargs)
191 191 setattr(ui, 'write', wrap)
192 192 return oldwrite
193 193
194 194 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
195 195 if usecurses:
196 196 if testfile:
197 197 recordfn = crecordmod.testdecorator(testfile,
198 198 crecordmod.testchunkselector)
199 199 else:
200 200 recordfn = crecordmod.chunkselector
201 201
202 202 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
203 203
204 204 else:
205 205 return patch.filterpatch(ui, originalhunks, operation)
206 206
207 207 def recordfilter(ui, originalhunks, operation=None):
208 208 """ Prompts the user to filter the originalhunks and return a list of
209 209 selected hunks.
210 210 *operation* is used for to build ui messages to indicate the user what
211 211 kind of filtering they are doing: reverting, committing, shelving, etc.
212 212 (see patch.filterpatch).
213 213 """
214 214 usecurses = crecordmod.checkcurses(ui)
215 215 testfile = ui.config('experimental', 'crecordtest')
216 216 oldwrite = setupwrapcolorwrite(ui)
217 217 try:
218 218 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
219 219 testfile, operation)
220 220 finally:
221 221 ui.write = oldwrite
222 222 return newchunks, newopts
223 223
224 224 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
225 225 filterfn, *pats, **opts):
226 226 from . import merge as mergemod
227 227 opts = pycompat.byteskwargs(opts)
228 228 if not ui.interactive():
229 229 if cmdsuggest:
230 230 msg = _('running non-interactively, use %s instead') % cmdsuggest
231 231 else:
232 232 msg = _('running non-interactively')
233 233 raise error.Abort(msg)
234 234
235 235 # make sure username is set before going interactive
236 236 if not opts.get('user'):
237 237 ui.username() # raise exception, username not provided
238 238
239 239 def recordfunc(ui, repo, message, match, opts):
240 240 """This is generic record driver.
241 241
242 242 Its job is to interactively filter local changes, and
243 243 accordingly prepare working directory into a state in which the
244 244 job can be delegated to a non-interactive commit command such as
245 245 'commit' or 'qrefresh'.
246 246
247 247 After the actual job is done by non-interactive command, the
248 248 working directory is restored to its original state.
249 249
250 250 In the end we'll record interesting changes, and everything else
251 251 will be left in place, so the user can continue working.
252 252 """
253 253
254 254 checkunfinished(repo, commit=True)
255 255 wctx = repo[None]
256 256 merge = len(wctx.parents()) > 1
257 257 if merge:
258 258 raise error.Abort(_('cannot partially commit a merge '
259 259 '(use "hg commit" instead)'))
260 260
261 261 def fail(f, msg):
262 262 raise error.Abort('%s: %s' % (f, msg))
263 263
264 264 force = opts.get('force')
265 265 if not force:
266 266 vdirs = []
267 267 match.explicitdir = vdirs.append
268 268 match.bad = fail
269 269
270 270 status = repo.status(match=match)
271 271 if not force:
272 272 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
273 273 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
274 274 diffopts.nodates = True
275 275 diffopts.git = True
276 276 diffopts.showfunc = True
277 277 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
278 278 originalchunks = patch.parsepatch(originaldiff)
279 279
280 280 # 1. filter patch, since we are intending to apply subset of it
281 281 try:
282 282 chunks, newopts = filterfn(ui, originalchunks)
283 283 except error.PatchError as err:
284 284 raise error.Abort(_('error parsing patch: %s') % err)
285 285 opts.update(newopts)
286 286
287 287 # We need to keep a backup of files that have been newly added and
288 288 # modified during the recording process because there is a previous
289 289 # version without the edit in the workdir
290 290 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
291 291 contenders = set()
292 292 for h in chunks:
293 293 try:
294 294 contenders.update(set(h.files()))
295 295 except AttributeError:
296 296 pass
297 297
298 298 changed = status.modified + status.added + status.removed
299 299 newfiles = [f for f in changed if f in contenders]
300 300 if not newfiles:
301 301 ui.status(_('no changes to record\n'))
302 302 return 0
303 303
304 304 modified = set(status.modified)
305 305
306 306 # 2. backup changed files, so we can restore them in the end
307 307
308 308 if backupall:
309 309 tobackup = changed
310 310 else:
311 311 tobackup = [f for f in newfiles if f in modified or f in \
312 312 newlyaddedandmodifiedfiles]
313 313 backups = {}
314 314 if tobackup:
315 315 backupdir = repo.vfs.join('record-backups')
316 316 try:
317 317 os.mkdir(backupdir)
318 318 except OSError as err:
319 319 if err.errno != errno.EEXIST:
320 320 raise
321 321 try:
322 322 # backup continues
323 323 for f in tobackup:
324 324 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
325 325 dir=backupdir)
326 326 os.close(fd)
327 327 ui.debug('backup %r as %r\n' % (f, tmpname))
328 328 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
329 329 backups[f] = tmpname
330 330
331 331 fp = stringio()
332 332 for c in chunks:
333 333 fname = c.filename()
334 334 if fname in backups:
335 335 c.write(fp)
336 336 dopatch = fp.tell()
337 337 fp.seek(0)
338 338
339 339 # 2.5 optionally review / modify patch in text editor
340 340 if opts.get('review', False):
341 341 patchtext = (crecordmod.diffhelptext
342 342 + crecordmod.patchhelptext
343 343 + fp.read())
344 344 reviewedpatch = ui.edit(patchtext, "",
345 345 action="diff",
346 346 repopath=repo.path)
347 347 fp.truncate(0)
348 348 fp.write(reviewedpatch)
349 349 fp.seek(0)
350 350
351 351 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
352 352 # 3a. apply filtered patch to clean repo (clean)
353 353 if backups:
354 354 # Equivalent to hg.revert
355 355 m = scmutil.matchfiles(repo, backups.keys())
356 356 mergemod.update(repo, repo.dirstate.p1(),
357 357 False, True, matcher=m)
358 358
359 359 # 3b. (apply)
360 360 if dopatch:
361 361 try:
362 362 ui.debug('applying patch\n')
363 363 ui.debug(fp.getvalue())
364 364 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
365 365 except error.PatchError as err:
366 366 raise error.Abort(str(err))
367 367 del fp
368 368
369 369 # 4. We prepared working directory according to filtered
370 370 # patch. Now is the time to delegate the job to
371 371 # commit/qrefresh or the like!
372 372
373 373 # Make all of the pathnames absolute.
374 374 newfiles = [repo.wjoin(nf) for nf in newfiles]
375 375 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
376 376 finally:
377 377 # 5. finally restore backed-up files
378 378 try:
379 379 dirstate = repo.dirstate
380 380 for realname, tmpname in backups.iteritems():
381 381 ui.debug('restoring %r to %r\n' % (tmpname, realname))
382 382
383 383 if dirstate[realname] == 'n':
384 384 # without normallookup, restoring timestamp
385 385 # may cause partially committed files
386 386 # to be treated as unmodified
387 387 dirstate.normallookup(realname)
388 388
389 389 # copystat=True here and above are a hack to trick any
390 390 # editors that have f open that we haven't modified them.
391 391 #
392 392 # Also note that this racy as an editor could notice the
393 393 # file's mtime before we've finished writing it.
394 394 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
395 395 os.unlink(tmpname)
396 396 if tobackup:
397 397 os.rmdir(backupdir)
398 398 except OSError:
399 399 pass
400 400
401 401 def recordinwlock(ui, repo, message, match, opts):
402 402 with repo.wlock():
403 403 return recordfunc(ui, repo, message, match, opts)
404 404
405 405 return commit(ui, repo, recordinwlock, pats, opts)
406 406
407 407 class dirnode(object):
408 408 """
409 409 Represent a directory in user working copy with information required for
410 410 the purpose of tersing its status.
411 411
412 412 path is the path to the directory
413 413
414 414 statuses is a set of statuses of all files in this directory (this includes
415 415 all the files in all the subdirectories too)
416 416
417 417 files is a list of files which are direct child of this directory
418 418
419 419 subdirs is a dictionary of sub-directory name as the key and it's own
420 420 dirnode object as the value
421 421 """
422 422
423 423 def __init__(self, dirpath):
424 424 self.path = dirpath
425 425 self.statuses = set([])
426 426 self.files = []
427 427 self.subdirs = {}
428 428
429 429 def _addfileindir(self, filename, status):
430 430 """Add a file in this directory as a direct child."""
431 431 self.files.append((filename, status))
432 432
433 433 def addfile(self, filename, status):
434 434 """
435 435 Add a file to this directory or to its direct parent directory.
436 436
437 437 If the file is not direct child of this directory, we traverse to the
438 438 directory of which this file is a direct child of and add the file
439 439 there.
440 440 """
441 441
442 442 # the filename contains a path separator, it means it's not the direct
443 443 # child of this directory
444 444 if '/' in filename:
445 445 subdir, filep = filename.split('/', 1)
446 446
447 447 # does the dirnode object for subdir exists
448 448 if subdir not in self.subdirs:
449 449 subdirpath = os.path.join(self.path, subdir)
450 450 self.subdirs[subdir] = dirnode(subdirpath)
451 451
452 452 # try adding the file in subdir
453 453 self.subdirs[subdir].addfile(filep, status)
454 454
455 455 else:
456 456 self._addfileindir(filename, status)
457 457
458 458 if status not in self.statuses:
459 459 self.statuses.add(status)
460 460
461 461 def iterfilepaths(self):
462 462 """Yield (status, path) for files directly under this directory."""
463 463 for f, st in self.files:
464 464 yield st, os.path.join(self.path, f)
465 465
466 466 def tersewalk(self, terseargs):
467 467 """
468 468 Yield (status, path) obtained by processing the status of this
469 469 dirnode.
470 470
471 471 terseargs is the string of arguments passed by the user with `--terse`
472 472 flag.
473 473
474 474 Following are the cases which can happen:
475 475
476 476 1) All the files in the directory (including all the files in its
477 477 subdirectories) share the same status and the user has asked us to terse
478 478 that status. -> yield (status, dirpath)
479 479
480 480 2) Otherwise, we do following:
481 481
482 482 a) Yield (status, filepath) for all the files which are in this
483 483 directory (only the ones in this directory, not the subdirs)
484 484
485 485 b) Recurse the function on all the subdirectories of this
486 486 directory
487 487 """
488 488
489 489 if len(self.statuses) == 1:
490 490 onlyst = self.statuses.pop()
491 491
492 492 # Making sure we terse only when the status abbreviation is
493 493 # passed as terse argument
494 494 if onlyst in terseargs:
495 495 yield onlyst, self.path + pycompat.ossep
496 496 return
497 497
498 498 # add the files to status list
499 499 for st, fpath in self.iterfilepaths():
500 500 yield st, fpath
501 501
502 502 #recurse on the subdirs
503 503 for dirobj in self.subdirs.values():
504 504 for st, fpath in dirobj.tersewalk(terseargs):
505 505 yield st, fpath
506 506
507 507 def tersedir(statuslist, terseargs):
508 508 """
509 509 Terse the status if all the files in a directory shares the same status.
510 510
511 511 statuslist is scmutil.status() object which contains a list of files for
512 512 each status.
513 513 terseargs is string which is passed by the user as the argument to `--terse`
514 514 flag.
515 515
516 516 The function makes a tree of objects of dirnode class, and at each node it
517 517 stores the information required to know whether we can terse a certain
518 518 directory or not.
519 519 """
520 520 # the order matters here as that is used to produce final list
521 521 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
522 522
523 523 # checking the argument validity
524 524 for s in pycompat.bytestr(terseargs):
525 525 if s not in allst:
526 526 raise error.Abort(_("'%s' not recognized") % s)
527 527
528 528 # creating a dirnode object for the root of the repo
529 529 rootobj = dirnode('')
530 530 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
531 531 'ignored', 'removed')
532 532
533 533 tersedict = {}
534 534 for attrname in pstatus:
535 535 statuschar = attrname[0:1]
536 536 for f in getattr(statuslist, attrname):
537 537 rootobj.addfile(f, statuschar)
538 538 tersedict[statuschar] = []
539 539
540 540 # we won't be tersing the root dir, so add files in it
541 541 for st, fpath in rootobj.iterfilepaths():
542 542 tersedict[st].append(fpath)
543 543
544 544 # process each sub-directory and build tersedict
545 545 for subdir in rootobj.subdirs.values():
546 546 for st, f in subdir.tersewalk(terseargs):
547 547 tersedict[st].append(f)
548 548
549 549 tersedlist = []
550 550 for st in allst:
551 551 tersedict[st].sort()
552 552 tersedlist.append(tersedict[st])
553 553
554 554 return tersedlist
555 555
556 556 def _commentlines(raw):
557 557 '''Surround lineswith a comment char and a new line'''
558 558 lines = raw.splitlines()
559 559 commentedlines = ['# %s' % line for line in lines]
560 560 return '\n'.join(commentedlines) + '\n'
561 561
562 562 def _conflictsmsg(repo):
563 563 # avoid merge cycle
564 564 from . import merge as mergemod
565 565 mergestate = mergemod.mergestate.read(repo)
566 566 if not mergestate.active():
567 567 return
568 568
569 569 m = scmutil.match(repo[None])
570 570 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
571 571 if unresolvedlist:
572 572 mergeliststr = '\n'.join(
573 573 [' %s' % util.pathto(repo.root, pycompat.getcwd(), path)
574 574 for path in unresolvedlist])
575 575 msg = _('''Unresolved merge conflicts:
576 576
577 577 %s
578 578
579 579 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
580 580 else:
581 581 msg = _('No unresolved merge conflicts.')
582 582
583 583 return _commentlines(msg)
584 584
585 585 def _helpmessage(continuecmd, abortcmd):
586 586 msg = _('To continue: %s\n'
587 587 'To abort: %s') % (continuecmd, abortcmd)
588 588 return _commentlines(msg)
589 589
590 590 def _rebasemsg():
591 591 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
592 592
593 593 def _histeditmsg():
594 594 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
595 595
596 596 def _unshelvemsg():
597 597 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
598 598
599 599 def _updatecleanmsg(dest=None):
600 600 warning = _('warning: this will discard uncommitted changes')
601 601 return 'hg update --clean %s (%s)' % (dest or '.', warning)
602 602
603 603 def _graftmsg():
604 604 # tweakdefaults requires `update` to have a rev hence the `.`
605 605 return _helpmessage('hg graft --continue', _updatecleanmsg())
606 606
607 607 def _mergemsg():
608 608 # tweakdefaults requires `update` to have a rev hence the `.`
609 609 return _helpmessage('hg commit', _updatecleanmsg())
610 610
611 611 def _bisectmsg():
612 612 msg = _('To mark the changeset good: hg bisect --good\n'
613 613 'To mark the changeset bad: hg bisect --bad\n'
614 614 'To abort: hg bisect --reset\n')
615 615 return _commentlines(msg)
616 616
617 617 def fileexistspredicate(filename):
618 618 return lambda repo: repo.vfs.exists(filename)
619 619
620 620 def _mergepredicate(repo):
621 621 return len(repo[None].parents()) > 1
622 622
623 623 STATES = (
624 624 # (state, predicate to detect states, helpful message function)
625 625 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
626 626 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
627 627 ('graft', fileexistspredicate('graftstate'), _graftmsg),
628 628 ('unshelve', fileexistspredicate('unshelverebasestate'), _unshelvemsg),
629 629 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
630 630 # The merge state is part of a list that will be iterated over.
631 631 # They need to be last because some of the other unfinished states may also
632 632 # be in a merge or update state (eg. rebase, histedit, graft, etc).
633 633 # We want those to have priority.
634 634 ('merge', _mergepredicate, _mergemsg),
635 635 )
636 636
637 637 def _getrepostate(repo):
638 638 # experimental config: commands.status.skipstates
639 639 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
640 640 for state, statedetectionpredicate, msgfn in STATES:
641 641 if state in skip:
642 642 continue
643 643 if statedetectionpredicate(repo):
644 644 return (state, statedetectionpredicate, msgfn)
645 645
646 646 def morestatus(repo, fm):
647 647 statetuple = _getrepostate(repo)
648 648 label = 'status.morestatus'
649 649 if statetuple:
650 650 fm.startitem()
651 651 state, statedetectionpredicate, helpfulmsg = statetuple
652 652 statemsg = _('The repository is in an unfinished *%s* state.') % state
653 653 fm.write('statemsg', '%s\n', _commentlines(statemsg), label=label)
654 654 conmsg = _conflictsmsg(repo)
655 655 if conmsg:
656 656 fm.write('conflictsmsg', '%s\n', conmsg, label=label)
657 657 if helpfulmsg:
658 658 helpmsg = helpfulmsg()
659 659 fm.write('helpmsg', '%s\n', helpmsg, label=label)
660 660
661 661 def findpossible(cmd, table, strict=False):
662 662 """
663 663 Return cmd -> (aliases, command table entry)
664 664 for each matching command.
665 665 Return debug commands (or their aliases) only if no normal command matches.
666 666 """
667 667 choice = {}
668 668 debugchoice = {}
669 669
670 670 if cmd in table:
671 671 # short-circuit exact matches, "log" alias beats "^log|history"
672 672 keys = [cmd]
673 673 else:
674 674 keys = table.keys()
675 675
676 676 allcmds = []
677 677 for e in keys:
678 678 aliases = parsealiases(e)
679 679 allcmds.extend(aliases)
680 680 found = None
681 681 if cmd in aliases:
682 682 found = cmd
683 683 elif not strict:
684 684 for a in aliases:
685 685 if a.startswith(cmd):
686 686 found = a
687 687 break
688 688 if found is not None:
689 689 if aliases[0].startswith("debug") or found.startswith("debug"):
690 690 debugchoice[found] = (aliases, table[e])
691 691 else:
692 692 choice[found] = (aliases, table[e])
693 693
694 694 if not choice and debugchoice:
695 695 choice = debugchoice
696 696
697 697 return choice, allcmds
698 698
699 699 def findcmd(cmd, table, strict=True):
700 700 """Return (aliases, command table entry) for command string."""
701 701 choice, allcmds = findpossible(cmd, table, strict)
702 702
703 703 if cmd in choice:
704 704 return choice[cmd]
705 705
706 706 if len(choice) > 1:
707 707 clist = sorted(choice)
708 708 raise error.AmbiguousCommand(cmd, clist)
709 709
710 710 if choice:
711 711 return list(choice.values())[0]
712 712
713 713 raise error.UnknownCommand(cmd, allcmds)
714 714
715 715 def findrepo(p):
716 716 while not os.path.isdir(os.path.join(p, ".hg")):
717 717 oldp, p = p, os.path.dirname(p)
718 718 if p == oldp:
719 719 return None
720 720
721 721 return p
722 722
723 723 def bailifchanged(repo, merge=True, hint=None):
724 724 """ enforce the precondition that working directory must be clean.
725 725
726 726 'merge' can be set to false if a pending uncommitted merge should be
727 727 ignored (such as when 'update --check' runs).
728 728
729 729 'hint' is the usual hint given to Abort exception.
730 730 """
731 731
732 732 if merge and repo.dirstate.p2() != nullid:
733 733 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
734 734 modified, added, removed, deleted = repo.status()[:4]
735 735 if modified or added or removed or deleted:
736 736 raise error.Abort(_('uncommitted changes'), hint=hint)
737 737 ctx = repo[None]
738 738 for s in sorted(ctx.substate):
739 739 ctx.sub(s).bailifchanged(hint=hint)
740 740
741 741 def logmessage(ui, opts):
742 742 """ get the log message according to -m and -l option """
743 743 message = opts.get('message')
744 744 logfile = opts.get('logfile')
745 745
746 746 if message and logfile:
747 747 raise error.Abort(_('options --message and --logfile are mutually '
748 748 'exclusive'))
749 749 if not message and logfile:
750 750 try:
751 751 if isstdiofilename(logfile):
752 752 message = ui.fin.read()
753 753 else:
754 754 message = '\n'.join(util.readfile(logfile).splitlines())
755 755 except IOError as inst:
756 756 raise error.Abort(_("can't read commit message '%s': %s") %
757 757 (logfile, encoding.strtolocal(inst.strerror)))
758 758 return message
759 759
760 760 def mergeeditform(ctxorbool, baseformname):
761 761 """return appropriate editform name (referencing a committemplate)
762 762
763 763 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
764 764 merging is committed.
765 765
766 766 This returns baseformname with '.merge' appended if it is a merge,
767 767 otherwise '.normal' is appended.
768 768 """
769 769 if isinstance(ctxorbool, bool):
770 770 if ctxorbool:
771 771 return baseformname + ".merge"
772 772 elif 1 < len(ctxorbool.parents()):
773 773 return baseformname + ".merge"
774 774
775 775 return baseformname + ".normal"
776 776
777 777 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
778 778 editform='', **opts):
779 779 """get appropriate commit message editor according to '--edit' option
780 780
781 781 'finishdesc' is a function to be called with edited commit message
782 782 (= 'description' of the new changeset) just after editing, but
783 783 before checking empty-ness. It should return actual text to be
784 784 stored into history. This allows to change description before
785 785 storing.
786 786
787 787 'extramsg' is a extra message to be shown in the editor instead of
788 788 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
789 789 is automatically added.
790 790
791 791 'editform' is a dot-separated list of names, to distinguish
792 792 the purpose of commit text editing.
793 793
794 794 'getcommiteditor' returns 'commitforceeditor' regardless of
795 795 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
796 796 they are specific for usage in MQ.
797 797 """
798 798 if edit or finishdesc or extramsg:
799 799 return lambda r, c, s: commitforceeditor(r, c, s,
800 800 finishdesc=finishdesc,
801 801 extramsg=extramsg,
802 802 editform=editform)
803 803 elif editform:
804 804 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
805 805 else:
806 806 return commiteditor
807 807
808 808 def loglimit(opts):
809 809 """get the log limit according to option -l/--limit"""
810 810 limit = opts.get('limit')
811 811 if limit:
812 812 try:
813 813 limit = int(limit)
814 814 except ValueError:
815 815 raise error.Abort(_('limit must be a positive integer'))
816 816 if limit <= 0:
817 817 raise error.Abort(_('limit must be positive'))
818 818 else:
819 819 limit = None
820 820 return limit
821 821
822 822 def makefilename(repo, pat, node, desc=None,
823 823 total=None, seqno=None, revwidth=None, pathname=None):
824 824 node_expander = {
825 825 'H': lambda: hex(node),
826 826 'R': lambda: '%d' % repo.changelog.rev(node),
827 827 'h': lambda: short(node),
828 828 'm': lambda: re.sub('[^\w]', '_', desc or '')
829 829 }
830 830 expander = {
831 831 '%': lambda: '%',
832 832 'b': lambda: os.path.basename(repo.root),
833 833 }
834 834
835 835 try:
836 836 if node:
837 837 expander.update(node_expander)
838 838 if node:
839 839 expander['r'] = (lambda:
840 840 ('%d' % repo.changelog.rev(node)).zfill(revwidth or 0))
841 841 if total is not None:
842 842 expander['N'] = lambda: '%d' % total
843 843 if seqno is not None:
844 844 expander['n'] = lambda: '%d' % seqno
845 845 if total is not None and seqno is not None:
846 846 expander['n'] = (lambda: ('%d' % seqno).zfill(len('%d' % total)))
847 847 if pathname is not None:
848 848 expander['s'] = lambda: os.path.basename(pathname)
849 849 expander['d'] = lambda: os.path.dirname(pathname) or '.'
850 850 expander['p'] = lambda: pathname
851 851
852 852 newname = []
853 853 patlen = len(pat)
854 854 i = 0
855 855 while i < patlen:
856 856 c = pat[i:i + 1]
857 857 if c == '%':
858 858 i += 1
859 859 c = pat[i:i + 1]
860 860 c = expander[c]()
861 861 newname.append(c)
862 862 i += 1
863 863 return ''.join(newname)
864 864 except KeyError as inst:
865 865 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
866 866 inst.args[0])
867 867
868 868 def isstdiofilename(pat):
869 869 """True if the given pat looks like a filename denoting stdin/stdout"""
870 870 return not pat or pat == '-'
871 871
872 872 class _unclosablefile(object):
873 873 def __init__(self, fp):
874 874 self._fp = fp
875 875
876 876 def close(self):
877 877 pass
878 878
879 879 def __iter__(self):
880 880 return iter(self._fp)
881 881
882 882 def __getattr__(self, attr):
883 883 return getattr(self._fp, attr)
884 884
885 885 def __enter__(self):
886 886 return self
887 887
888 888 def __exit__(self, exc_type, exc_value, exc_tb):
889 889 pass
890 890
891 891 def makefileobj(repo, pat, node=None, desc=None, total=None,
892 892 seqno=None, revwidth=None, mode='wb', modemap=None,
893 893 pathname=None):
894 894
895 895 writable = mode not in ('r', 'rb')
896 896
897 897 if isstdiofilename(pat):
898 898 if writable:
899 899 fp = repo.ui.fout
900 900 else:
901 901 fp = repo.ui.fin
902 902 return _unclosablefile(fp)
903 903 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
904 904 if modemap is not None:
905 905 mode = modemap.get(fn, mode)
906 906 if mode == 'wb':
907 907 modemap[fn] = 'ab'
908 908 return open(fn, mode)
909 909
910 910 def openrevlog(repo, cmd, file_, opts):
911 911 """opens the changelog, manifest, a filelog or a given revlog"""
912 912 cl = opts['changelog']
913 913 mf = opts['manifest']
914 914 dir = opts['dir']
915 915 msg = None
916 916 if cl and mf:
917 917 msg = _('cannot specify --changelog and --manifest at the same time')
918 918 elif cl and dir:
919 919 msg = _('cannot specify --changelog and --dir at the same time')
920 920 elif cl or mf or dir:
921 921 if file_:
922 922 msg = _('cannot specify filename with --changelog or --manifest')
923 923 elif not repo:
924 924 msg = _('cannot specify --changelog or --manifest or --dir '
925 925 'without a repository')
926 926 if msg:
927 927 raise error.Abort(msg)
928 928
929 929 r = None
930 930 if repo:
931 931 if cl:
932 932 r = repo.unfiltered().changelog
933 933 elif dir:
934 934 if 'treemanifest' not in repo.requirements:
935 935 raise error.Abort(_("--dir can only be used on repos with "
936 936 "treemanifest enabled"))
937 937 dirlog = repo.manifestlog._revlog.dirlog(dir)
938 938 if len(dirlog):
939 939 r = dirlog
940 940 elif mf:
941 941 r = repo.manifestlog._revlog
942 942 elif file_:
943 943 filelog = repo.file(file_)
944 944 if len(filelog):
945 945 r = filelog
946 946 if not r:
947 947 if not file_:
948 948 raise error.CommandError(cmd, _('invalid arguments'))
949 949 if not os.path.isfile(file_):
950 950 raise error.Abort(_("revlog '%s' not found") % file_)
951 951 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
952 952 file_[:-2] + ".i")
953 953 return r
954 954
955 955 def copy(ui, repo, pats, opts, rename=False):
956 956 # called with the repo lock held
957 957 #
958 958 # hgsep => pathname that uses "/" to separate directories
959 959 # ossep => pathname that uses os.sep to separate directories
960 960 cwd = repo.getcwd()
961 961 targets = {}
962 962 after = opts.get("after")
963 963 dryrun = opts.get("dry_run")
964 964 wctx = repo[None]
965 965
966 966 def walkpat(pat):
967 967 srcs = []
968 968 if after:
969 969 badstates = '?'
970 970 else:
971 971 badstates = '?r'
972 972 m = scmutil.match(wctx, [pat], opts, globbed=True)
973 973 for abs in wctx.walk(m):
974 974 state = repo.dirstate[abs]
975 975 rel = m.rel(abs)
976 976 exact = m.exact(abs)
977 977 if state in badstates:
978 978 if exact and state == '?':
979 979 ui.warn(_('%s: not copying - file is not managed\n') % rel)
980 980 if exact and state == 'r':
981 981 ui.warn(_('%s: not copying - file has been marked for'
982 982 ' remove\n') % rel)
983 983 continue
984 984 # abs: hgsep
985 985 # rel: ossep
986 986 srcs.append((abs, rel, exact))
987 987 return srcs
988 988
989 989 # abssrc: hgsep
990 990 # relsrc: ossep
991 991 # otarget: ossep
992 992 def copyfile(abssrc, relsrc, otarget, exact):
993 993 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
994 994 if '/' in abstarget:
995 995 # We cannot normalize abstarget itself, this would prevent
996 996 # case only renames, like a => A.
997 997 abspath, absname = abstarget.rsplit('/', 1)
998 998 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
999 999 reltarget = repo.pathto(abstarget, cwd)
1000 1000 target = repo.wjoin(abstarget)
1001 1001 src = repo.wjoin(abssrc)
1002 1002 state = repo.dirstate[abstarget]
1003 1003
1004 1004 scmutil.checkportable(ui, abstarget)
1005 1005
1006 1006 # check for collisions
1007 1007 prevsrc = targets.get(abstarget)
1008 1008 if prevsrc is not None:
1009 1009 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1010 1010 (reltarget, repo.pathto(abssrc, cwd),
1011 1011 repo.pathto(prevsrc, cwd)))
1012 1012 return
1013 1013
1014 1014 # check for overwrites
1015 1015 exists = os.path.lexists(target)
1016 1016 samefile = False
1017 1017 if exists and abssrc != abstarget:
1018 1018 if (repo.dirstate.normalize(abssrc) ==
1019 1019 repo.dirstate.normalize(abstarget)):
1020 1020 if not rename:
1021 1021 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1022 1022 return
1023 1023 exists = False
1024 1024 samefile = True
1025 1025
1026 1026 if not after and exists or after and state in 'mn':
1027 1027 if not opts['force']:
1028 1028 if state in 'mn':
1029 1029 msg = _('%s: not overwriting - file already committed\n')
1030 1030 if after:
1031 1031 flags = '--after --force'
1032 1032 else:
1033 1033 flags = '--force'
1034 1034 if rename:
1035 1035 hint = _('(hg rename %s to replace the file by '
1036 1036 'recording a rename)\n') % flags
1037 1037 else:
1038 1038 hint = _('(hg copy %s to replace the file by '
1039 1039 'recording a copy)\n') % flags
1040 1040 else:
1041 1041 msg = _('%s: not overwriting - file exists\n')
1042 1042 if rename:
1043 1043 hint = _('(hg rename --after to record the rename)\n')
1044 1044 else:
1045 1045 hint = _('(hg copy --after to record the copy)\n')
1046 1046 ui.warn(msg % reltarget)
1047 1047 ui.warn(hint)
1048 1048 return
1049 1049
1050 1050 if after:
1051 1051 if not exists:
1052 1052 if rename:
1053 1053 ui.warn(_('%s: not recording move - %s does not exist\n') %
1054 1054 (relsrc, reltarget))
1055 1055 else:
1056 1056 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1057 1057 (relsrc, reltarget))
1058 1058 return
1059 1059 elif not dryrun:
1060 1060 try:
1061 1061 if exists:
1062 1062 os.unlink(target)
1063 1063 targetdir = os.path.dirname(target) or '.'
1064 1064 if not os.path.isdir(targetdir):
1065 1065 os.makedirs(targetdir)
1066 1066 if samefile:
1067 1067 tmp = target + "~hgrename"
1068 1068 os.rename(src, tmp)
1069 1069 os.rename(tmp, target)
1070 1070 else:
1071 1071 util.copyfile(src, target)
1072 1072 srcexists = True
1073 1073 except IOError as inst:
1074 1074 if inst.errno == errno.ENOENT:
1075 1075 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1076 1076 srcexists = False
1077 1077 else:
1078 1078 ui.warn(_('%s: cannot copy - %s\n') %
1079 1079 (relsrc, encoding.strtolocal(inst.strerror)))
1080 1080 return True # report a failure
1081 1081
1082 1082 if ui.verbose or not exact:
1083 1083 if rename:
1084 1084 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1085 1085 else:
1086 1086 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1087 1087
1088 1088 targets[abstarget] = abssrc
1089 1089
1090 1090 # fix up dirstate
1091 1091 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1092 1092 dryrun=dryrun, cwd=cwd)
1093 1093 if rename and not dryrun:
1094 1094 if not after and srcexists and not samefile:
1095 1095 repo.wvfs.unlinkpath(abssrc)
1096 1096 wctx.forget([abssrc])
1097 1097
1098 1098 # pat: ossep
1099 1099 # dest ossep
1100 1100 # srcs: list of (hgsep, hgsep, ossep, bool)
1101 1101 # return: function that takes hgsep and returns ossep
1102 1102 def targetpathfn(pat, dest, srcs):
1103 1103 if os.path.isdir(pat):
1104 1104 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1105 1105 abspfx = util.localpath(abspfx)
1106 1106 if destdirexists:
1107 1107 striplen = len(os.path.split(abspfx)[0])
1108 1108 else:
1109 1109 striplen = len(abspfx)
1110 1110 if striplen:
1111 1111 striplen += len(pycompat.ossep)
1112 1112 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1113 1113 elif destdirexists:
1114 1114 res = lambda p: os.path.join(dest,
1115 1115 os.path.basename(util.localpath(p)))
1116 1116 else:
1117 1117 res = lambda p: dest
1118 1118 return res
1119 1119
1120 1120 # pat: ossep
1121 1121 # dest ossep
1122 1122 # srcs: list of (hgsep, hgsep, ossep, bool)
1123 1123 # return: function that takes hgsep and returns ossep
1124 1124 def targetpathafterfn(pat, dest, srcs):
1125 1125 if matchmod.patkind(pat):
1126 1126 # a mercurial pattern
1127 1127 res = lambda p: os.path.join(dest,
1128 1128 os.path.basename(util.localpath(p)))
1129 1129 else:
1130 1130 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1131 1131 if len(abspfx) < len(srcs[0][0]):
1132 1132 # A directory. Either the target path contains the last
1133 1133 # component of the source path or it does not.
1134 1134 def evalpath(striplen):
1135 1135 score = 0
1136 1136 for s in srcs:
1137 1137 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1138 1138 if os.path.lexists(t):
1139 1139 score += 1
1140 1140 return score
1141 1141
1142 1142 abspfx = util.localpath(abspfx)
1143 1143 striplen = len(abspfx)
1144 1144 if striplen:
1145 1145 striplen += len(pycompat.ossep)
1146 1146 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1147 1147 score = evalpath(striplen)
1148 1148 striplen1 = len(os.path.split(abspfx)[0])
1149 1149 if striplen1:
1150 1150 striplen1 += len(pycompat.ossep)
1151 1151 if evalpath(striplen1) > score:
1152 1152 striplen = striplen1
1153 1153 res = lambda p: os.path.join(dest,
1154 1154 util.localpath(p)[striplen:])
1155 1155 else:
1156 1156 # a file
1157 1157 if destdirexists:
1158 1158 res = lambda p: os.path.join(dest,
1159 1159 os.path.basename(util.localpath(p)))
1160 1160 else:
1161 1161 res = lambda p: dest
1162 1162 return res
1163 1163
1164 1164 pats = scmutil.expandpats(pats)
1165 1165 if not pats:
1166 1166 raise error.Abort(_('no source or destination specified'))
1167 1167 if len(pats) == 1:
1168 1168 raise error.Abort(_('no destination specified'))
1169 1169 dest = pats.pop()
1170 1170 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1171 1171 if not destdirexists:
1172 1172 if len(pats) > 1 or matchmod.patkind(pats[0]):
1173 1173 raise error.Abort(_('with multiple sources, destination must be an '
1174 1174 'existing directory'))
1175 1175 if util.endswithsep(dest):
1176 1176 raise error.Abort(_('destination %s is not a directory') % dest)
1177 1177
1178 1178 tfn = targetpathfn
1179 1179 if after:
1180 1180 tfn = targetpathafterfn
1181 1181 copylist = []
1182 1182 for pat in pats:
1183 1183 srcs = walkpat(pat)
1184 1184 if not srcs:
1185 1185 continue
1186 1186 copylist.append((tfn(pat, dest, srcs), srcs))
1187 1187 if not copylist:
1188 1188 raise error.Abort(_('no files to copy'))
1189 1189
1190 1190 errors = 0
1191 1191 for targetpath, srcs in copylist:
1192 1192 for abssrc, relsrc, exact in srcs:
1193 1193 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1194 1194 errors += 1
1195 1195
1196 1196 if errors:
1197 1197 ui.warn(_('(consider using --after)\n'))
1198 1198
1199 1199 return errors != 0
1200 1200
1201 1201 ## facility to let extension process additional data into an import patch
1202 1202 # list of identifier to be executed in order
1203 1203 extrapreimport = [] # run before commit
1204 1204 extrapostimport = [] # run after commit
1205 1205 # mapping from identifier to actual import function
1206 1206 #
1207 1207 # 'preimport' are run before the commit is made and are provided the following
1208 1208 # arguments:
1209 1209 # - repo: the localrepository instance,
1210 1210 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1211 1211 # - extra: the future extra dictionary of the changeset, please mutate it,
1212 1212 # - opts: the import options.
1213 1213 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1214 1214 # mutation of in memory commit and more. Feel free to rework the code to get
1215 1215 # there.
1216 1216 extrapreimportmap = {}
1217 1217 # 'postimport' are run after the commit is made and are provided the following
1218 1218 # argument:
1219 1219 # - ctx: the changectx created by import.
1220 1220 extrapostimportmap = {}
1221 1221
1222 1222 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
1223 1223 """Utility function used by commands.import to import a single patch
1224 1224
1225 1225 This function is explicitly defined here to help the evolve extension to
1226 1226 wrap this part of the import logic.
1227 1227
1228 1228 The API is currently a bit ugly because it a simple code translation from
1229 1229 the import command. Feel free to make it better.
1230 1230
1231 1231 :hunk: a patch (as a binary string)
1232 1232 :parents: nodes that will be parent of the created commit
1233 1233 :opts: the full dict of option passed to the import command
1234 1234 :msgs: list to save commit message to.
1235 1235 (used in case we need to save it when failing)
1236 1236 :updatefunc: a function that update a repo to a given node
1237 1237 updatefunc(<repo>, <node>)
1238 1238 """
1239 1239 # avoid cycle context -> subrepo -> cmdutil
1240 1240 from . import context
1241 1241 extractdata = patch.extract(ui, hunk)
1242 1242 tmpname = extractdata.get('filename')
1243 1243 message = extractdata.get('message')
1244 1244 user = opts.get('user') or extractdata.get('user')
1245 1245 date = opts.get('date') or extractdata.get('date')
1246 1246 branch = extractdata.get('branch')
1247 1247 nodeid = extractdata.get('nodeid')
1248 1248 p1 = extractdata.get('p1')
1249 1249 p2 = extractdata.get('p2')
1250 1250
1251 1251 nocommit = opts.get('no_commit')
1252 1252 importbranch = opts.get('import_branch')
1253 1253 update = not opts.get('bypass')
1254 1254 strip = opts["strip"]
1255 1255 prefix = opts["prefix"]
1256 1256 sim = float(opts.get('similarity') or 0)
1257 1257 if not tmpname:
1258 1258 return (None, None, False)
1259 1259
1260 1260 rejects = False
1261 1261
1262 1262 try:
1263 1263 cmdline_message = logmessage(ui, opts)
1264 1264 if cmdline_message:
1265 1265 # pickup the cmdline msg
1266 1266 message = cmdline_message
1267 1267 elif message:
1268 1268 # pickup the patch msg
1269 1269 message = message.strip()
1270 1270 else:
1271 1271 # launch the editor
1272 1272 message = None
1273 1273 ui.debug('message:\n%s\n' % message)
1274 1274
1275 1275 if len(parents) == 1:
1276 1276 parents.append(repo[nullid])
1277 1277 if opts.get('exact'):
1278 1278 if not nodeid or not p1:
1279 1279 raise error.Abort(_('not a Mercurial patch'))
1280 1280 p1 = repo[p1]
1281 1281 p2 = repo[p2 or nullid]
1282 1282 elif p2:
1283 1283 try:
1284 1284 p1 = repo[p1]
1285 1285 p2 = repo[p2]
1286 1286 # Without any options, consider p2 only if the
1287 1287 # patch is being applied on top of the recorded
1288 1288 # first parent.
1289 1289 if p1 != parents[0]:
1290 1290 p1 = parents[0]
1291 1291 p2 = repo[nullid]
1292 1292 except error.RepoError:
1293 1293 p1, p2 = parents
1294 1294 if p2.node() == nullid:
1295 1295 ui.warn(_("warning: import the patch as a normal revision\n"
1296 1296 "(use --exact to import the patch as a merge)\n"))
1297 1297 else:
1298 1298 p1, p2 = parents
1299 1299
1300 1300 n = None
1301 1301 if update:
1302 1302 if p1 != parents[0]:
1303 1303 updatefunc(repo, p1.node())
1304 1304 if p2 != parents[1]:
1305 1305 repo.setparents(p1.node(), p2.node())
1306 1306
1307 1307 if opts.get('exact') or importbranch:
1308 1308 repo.dirstate.setbranch(branch or 'default')
1309 1309
1310 1310 partial = opts.get('partial', False)
1311 1311 files = set()
1312 1312 try:
1313 1313 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1314 1314 files=files, eolmode=None, similarity=sim / 100.0)
1315 1315 except error.PatchError as e:
1316 1316 if not partial:
1317 1317 raise error.Abort(str(e))
1318 1318 if partial:
1319 1319 rejects = True
1320 1320
1321 1321 files = list(files)
1322 1322 if nocommit:
1323 1323 if message:
1324 1324 msgs.append(message)
1325 1325 else:
1326 1326 if opts.get('exact') or p2:
1327 1327 # If you got here, you either use --force and know what
1328 1328 # you are doing or used --exact or a merge patch while
1329 1329 # being updated to its first parent.
1330 1330 m = None
1331 1331 else:
1332 1332 m = scmutil.matchfiles(repo, files or [])
1333 1333 editform = mergeeditform(repo[None], 'import.normal')
1334 1334 if opts.get('exact'):
1335 1335 editor = None
1336 1336 else:
1337 1337 editor = getcommiteditor(editform=editform,
1338 1338 **pycompat.strkwargs(opts))
1339 1339 extra = {}
1340 1340 for idfunc in extrapreimport:
1341 1341 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1342 1342 overrides = {}
1343 1343 if partial:
1344 1344 overrides[('ui', 'allowemptycommit')] = True
1345 1345 with repo.ui.configoverride(overrides, 'import'):
1346 1346 n = repo.commit(message, user,
1347 1347 date, match=m,
1348 1348 editor=editor, extra=extra)
1349 1349 for idfunc in extrapostimport:
1350 1350 extrapostimportmap[idfunc](repo[n])
1351 1351 else:
1352 1352 if opts.get('exact') or importbranch:
1353 1353 branch = branch or 'default'
1354 1354 else:
1355 1355 branch = p1.branch()
1356 1356 store = patch.filestore()
1357 1357 try:
1358 1358 files = set()
1359 1359 try:
1360 1360 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1361 1361 files, eolmode=None)
1362 1362 except error.PatchError as e:
1363 1363 raise error.Abort(str(e))
1364 1364 if opts.get('exact'):
1365 1365 editor = None
1366 1366 else:
1367 1367 editor = getcommiteditor(editform='import.bypass')
1368 1368 memctx = context.memctx(repo, (p1.node(), p2.node()),
1369 1369 message,
1370 1370 files=files,
1371 1371 filectxfn=store,
1372 1372 user=user,
1373 1373 date=date,
1374 1374 branch=branch,
1375 1375 editor=editor)
1376 1376 n = memctx.commit()
1377 1377 finally:
1378 1378 store.close()
1379 1379 if opts.get('exact') and nocommit:
1380 1380 # --exact with --no-commit is still useful in that it does merge
1381 1381 # and branch bits
1382 1382 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1383 1383 elif opts.get('exact') and hex(n) != nodeid:
1384 1384 raise error.Abort(_('patch is damaged or loses information'))
1385 1385 msg = _('applied to working directory')
1386 1386 if n:
1387 1387 # i18n: refers to a short changeset id
1388 1388 msg = _('created %s') % short(n)
1389 1389 return (msg, n, rejects)
1390 1390 finally:
1391 1391 os.unlink(tmpname)
1392 1392
1393 1393 # facility to let extensions include additional data in an exported patch
1394 1394 # list of identifiers to be executed in order
1395 1395 extraexport = []
1396 1396 # mapping from identifier to actual export function
1397 1397 # function as to return a string to be added to the header or None
1398 1398 # it is given two arguments (sequencenumber, changectx)
1399 1399 extraexportmap = {}
1400 1400
1401 1401 def _exportsingle(repo, ctx, match, switch_parent, rev, seqno, write, diffopts):
1402 1402 node = scmutil.binnode(ctx)
1403 1403 parents = [p.node() for p in ctx.parents() if p]
1404 1404 branch = ctx.branch()
1405 1405 if switch_parent:
1406 1406 parents.reverse()
1407 1407
1408 1408 if parents:
1409 1409 prev = parents[0]
1410 1410 else:
1411 1411 prev = nullid
1412 1412
1413 1413 write("# HG changeset patch\n")
1414 1414 write("# User %s\n" % ctx.user())
1415 1415 write("# Date %d %d\n" % ctx.date())
1416 1416 write("# %s\n" % util.datestr(ctx.date()))
1417 1417 if branch and branch != 'default':
1418 1418 write("# Branch %s\n" % branch)
1419 1419 write("# Node ID %s\n" % hex(node))
1420 1420 write("# Parent %s\n" % hex(prev))
1421 1421 if len(parents) > 1:
1422 1422 write("# Parent %s\n" % hex(parents[1]))
1423 1423
1424 1424 for headerid in extraexport:
1425 1425 header = extraexportmap[headerid](seqno, ctx)
1426 1426 if header is not None:
1427 1427 write('# %s\n' % header)
1428 1428 write(ctx.description().rstrip())
1429 1429 write("\n\n")
1430 1430
1431 1431 for chunk, label in patch.diffui(repo, prev, node, match, opts=diffopts):
1432 1432 write(chunk, label=label)
1433 1433
1434 1434 def export(repo, revs, fntemplate='hg-%h.patch', fp=None, switch_parent=False,
1435 1435 opts=None, match=None):
1436 1436 '''export changesets as hg patches
1437 1437
1438 1438 Args:
1439 1439 repo: The repository from which we're exporting revisions.
1440 1440 revs: A list of revisions to export as revision numbers.
1441 1441 fntemplate: An optional string to use for generating patch file names.
1442 1442 fp: An optional file-like object to which patches should be written.
1443 1443 switch_parent: If True, show diffs against second parent when not nullid.
1444 1444 Default is false, which always shows diff against p1.
1445 1445 opts: diff options to use for generating the patch.
1446 1446 match: If specified, only export changes to files matching this matcher.
1447 1447
1448 1448 Returns:
1449 1449 Nothing.
1450 1450
1451 1451 Side Effect:
1452 1452 "HG Changeset Patch" data is emitted to one of the following
1453 1453 destinations:
1454 1454 fp is specified: All revs are written to the specified
1455 1455 file-like object.
1456 1456 fntemplate specified: Each rev is written to a unique file named using
1457 1457 the given template.
1458 1458 Neither fp nor template specified: All revs written to repo.ui.write()
1459 1459 '''
1460 1460
1461 1461 total = len(revs)
1462 1462 revwidth = max(len(str(rev)) for rev in revs)
1463 1463 filemode = {}
1464 1464
1465 1465 write = None
1466 1466 dest = '<unnamed>'
1467 1467 if fp:
1468 1468 dest = getattr(fp, 'name', dest)
1469 1469 def write(s, **kw):
1470 1470 fp.write(s)
1471 1471 elif not fntemplate:
1472 1472 write = repo.ui.write
1473 1473
1474 1474 for seqno, rev in enumerate(revs, 1):
1475 1475 ctx = repo[rev]
1476 1476 fo = None
1477 1477 if not fp and fntemplate:
1478 1478 desc_lines = ctx.description().rstrip().split('\n')
1479 1479 desc = desc_lines[0] #Commit always has a first line.
1480 1480 fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
1481 1481 total=total, seqno=seqno, revwidth=revwidth,
1482 1482 mode='wb', modemap=filemode)
1483 1483 dest = fo.name
1484 1484 def write(s, **kw):
1485 1485 fo.write(s)
1486 1486 if not dest.startswith('<'):
1487 1487 repo.ui.note("%s\n" % dest)
1488 1488 _exportsingle(
1489 1489 repo, ctx, match, switch_parent, rev, seqno, write, opts)
1490 1490 if fo is not None:
1491 1491 fo.close()
1492 1492
1493 1493 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1494 1494 changes=None, stat=False, fp=None, prefix='',
1495 1495 root='', listsubrepos=False, hunksfilterfn=None):
1496 1496 '''show diff or diffstat.'''
1497 1497 if fp is None:
1498 1498 write = ui.write
1499 1499 else:
1500 1500 def write(s, **kw):
1501 1501 fp.write(s)
1502 1502
1503 1503 if root:
1504 1504 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1505 1505 else:
1506 1506 relroot = ''
1507 1507 if relroot != '':
1508 1508 # XXX relative roots currently don't work if the root is within a
1509 1509 # subrepo
1510 1510 uirelroot = match.uipath(relroot)
1511 1511 relroot += '/'
1512 1512 for matchroot in match.files():
1513 1513 if not matchroot.startswith(relroot):
1514 1514 ui.warn(_('warning: %s not inside relative root %s\n') % (
1515 1515 match.uipath(matchroot), uirelroot))
1516 1516
1517 1517 if stat:
1518 1518 diffopts = diffopts.copy(context=0, noprefix=False)
1519 1519 width = 80
1520 1520 if not ui.plain():
1521 1521 width = ui.termwidth()
1522 1522 chunks = patch.diff(repo, node1, node2, match, changes, opts=diffopts,
1523 1523 prefix=prefix, relroot=relroot,
1524 1524 hunksfilterfn=hunksfilterfn)
1525 1525 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1526 1526 width=width):
1527 1527 write(chunk, label=label)
1528 1528 else:
1529 1529 for chunk, label in patch.diffui(repo, node1, node2, match,
1530 1530 changes, opts=diffopts, prefix=prefix,
1531 1531 relroot=relroot,
1532 1532 hunksfilterfn=hunksfilterfn):
1533 1533 write(chunk, label=label)
1534 1534
1535 1535 if listsubrepos:
1536 1536 ctx1 = repo[node1]
1537 1537 ctx2 = repo[node2]
1538 1538 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1539 1539 tempnode2 = node2
1540 1540 try:
1541 1541 if node2 is not None:
1542 1542 tempnode2 = ctx2.substate[subpath][1]
1543 1543 except KeyError:
1544 1544 # A subrepo that existed in node1 was deleted between node1 and
1545 1545 # node2 (inclusive). Thus, ctx2's substate won't contain that
1546 1546 # subpath. The best we can do is to ignore it.
1547 1547 tempnode2 = None
1548 1548 submatch = matchmod.subdirmatcher(subpath, match)
1549 1549 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1550 1550 stat=stat, fp=fp, prefix=prefix)
1551 1551
1552 1552 def _changesetlabels(ctx):
1553 1553 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1554 1554 if ctx.obsolete():
1555 1555 labels.append('changeset.obsolete')
1556 1556 if ctx.isunstable():
1557 1557 labels.append('changeset.unstable')
1558 1558 for instability in ctx.instabilities():
1559 1559 labels.append('instability.%s' % instability)
1560 1560 return ' '.join(labels)
1561 1561
1562 1562 class changeset_printer(object):
1563 1563 '''show changeset information when templating not requested.'''
1564 1564
1565 1565 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1566 1566 self.ui = ui
1567 1567 self.repo = repo
1568 1568 self.buffered = buffered
1569 1569 self.matchfn = matchfn
1570 1570 self.diffopts = diffopts
1571 1571 self.header = {}
1572 1572 self.hunk = {}
1573 1573 self.lastheader = None
1574 1574 self.footer = None
1575 1575 self._columns = templatekw.getlogcolumns()
1576 1576
1577 1577 def flush(self, ctx):
1578 1578 rev = ctx.rev()
1579 1579 if rev in self.header:
1580 1580 h = self.header[rev]
1581 1581 if h != self.lastheader:
1582 1582 self.lastheader = h
1583 1583 self.ui.write(h)
1584 1584 del self.header[rev]
1585 1585 if rev in self.hunk:
1586 1586 self.ui.write(self.hunk[rev])
1587 1587 del self.hunk[rev]
1588 1588 return 1
1589 1589 return 0
1590 1590
1591 1591 def close(self):
1592 1592 if self.footer:
1593 1593 self.ui.write(self.footer)
1594 1594
1595 1595 def show(self, ctx, copies=None, matchfn=None, hunksfilterfn=None,
1596 1596 **props):
1597 1597 props = pycompat.byteskwargs(props)
1598 1598 if self.buffered:
1599 1599 self.ui.pushbuffer(labeled=True)
1600 1600 self._show(ctx, copies, matchfn, hunksfilterfn, props)
1601 1601 self.hunk[ctx.rev()] = self.ui.popbuffer()
1602 1602 else:
1603 1603 self._show(ctx, copies, matchfn, hunksfilterfn, props)
1604 1604
1605 1605 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1606 1606 '''show a single changeset or file revision'''
1607 1607 changenode = ctx.node()
1608 1608 rev = ctx.rev()
1609 1609
1610 1610 if self.ui.quiet:
1611 1611 self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
1612 1612 label='log.node')
1613 1613 return
1614 1614
1615 1615 columns = self._columns
1616 1616 self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx),
1617 1617 label=_changesetlabels(ctx))
1618 1618
1619 1619 # branches are shown first before any other names due to backwards
1620 1620 # compatibility
1621 1621 branch = ctx.branch()
1622 1622 # don't show the default branch name
1623 1623 if branch != 'default':
1624 1624 self.ui.write(columns['branch'] % branch, label='log.branch')
1625 1625
1626 1626 for nsname, ns in self.repo.names.iteritems():
1627 1627 # branches has special logic already handled above, so here we just
1628 1628 # skip it
1629 1629 if nsname == 'branches':
1630 1630 continue
1631 1631 # we will use the templatename as the color name since those two
1632 1632 # should be the same
1633 1633 for name in ns.names(self.repo, changenode):
1634 1634 self.ui.write(ns.logfmt % name,
1635 1635 label='log.%s' % ns.colorname)
1636 1636 if self.ui.debugflag:
1637 1637 self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase')
1638 1638 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1639 1639 label = 'log.parent changeset.%s' % pctx.phasestr()
1640 1640 self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx),
1641 1641 label=label)
1642 1642
1643 1643 if self.ui.debugflag and rev is not None:
1644 1644 mnode = ctx.manifestnode()
1645 1645 mrev = self.repo.manifestlog._revlog.rev(mnode)
1646 1646 self.ui.write(columns['manifest']
1647 1647 % scmutil.formatrevnode(self.ui, mrev, mnode),
1648 1648 label='ui.debug log.manifest')
1649 1649 self.ui.write(columns['user'] % ctx.user(), label='log.user')
1650 1650 self.ui.write(columns['date'] % util.datestr(ctx.date()),
1651 1651 label='log.date')
1652 1652
1653 1653 if ctx.isunstable():
1654 1654 instabilities = ctx.instabilities()
1655 1655 self.ui.write(columns['instability'] % ', '.join(instabilities),
1656 1656 label='log.instability')
1657 1657
1658 1658 elif ctx.obsolete():
1659 1659 self._showobsfate(ctx)
1660 1660
1661 1661 self._exthook(ctx)
1662 1662
1663 1663 if self.ui.debugflag:
1664 1664 files = ctx.p1().status(ctx)[:3]
1665 1665 for key, value in zip(['files', 'files+', 'files-'], files):
1666 1666 if value:
1667 1667 self.ui.write(columns[key] % " ".join(value),
1668 1668 label='ui.debug log.files')
1669 1669 elif ctx.files() and self.ui.verbose:
1670 1670 self.ui.write(columns['files'] % " ".join(ctx.files()),
1671 1671 label='ui.note log.files')
1672 1672 if copies and self.ui.verbose:
1673 1673 copies = ['%s (%s)' % c for c in copies]
1674 1674 self.ui.write(columns['copies'] % ' '.join(copies),
1675 1675 label='ui.note log.copies')
1676 1676
1677 1677 extra = ctx.extra()
1678 1678 if extra and self.ui.debugflag:
1679 1679 for key, value in sorted(extra.items()):
1680 1680 self.ui.write(columns['extra'] % (key, util.escapestr(value)),
1681 1681 label='ui.debug log.extra')
1682 1682
1683 1683 description = ctx.description().strip()
1684 1684 if description:
1685 1685 if self.ui.verbose:
1686 1686 self.ui.write(_("description:\n"),
1687 1687 label='ui.note log.description')
1688 1688 self.ui.write(description,
1689 1689 label='ui.note log.description')
1690 1690 self.ui.write("\n\n")
1691 1691 else:
1692 1692 self.ui.write(columns['summary'] % description.splitlines()[0],
1693 1693 label='log.summary')
1694 1694 self.ui.write("\n")
1695 1695
1696 1696 self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
1697 1697
1698 1698 def _showobsfate(self, ctx):
1699 1699 obsfate = templatekw.showobsfate(repo=self.repo, ctx=ctx, ui=self.ui)
1700 1700
1701 1701 if obsfate:
1702 1702 for obsfateline in obsfate:
1703 1703 self.ui.write(self._columns['obsolete'] % obsfateline,
1704 1704 label='log.obsfate')
1705 1705
1706 1706 def _exthook(self, ctx):
1707 1707 '''empty method used by extension as a hook point
1708 1708 '''
1709 1709
1710 1710 def showpatch(self, ctx, matchfn, hunksfilterfn=None):
1711 1711 if not matchfn:
1712 1712 matchfn = self.matchfn
1713 1713 if matchfn:
1714 1714 stat = self.diffopts.get('stat')
1715 1715 diff = self.diffopts.get('patch')
1716 1716 diffopts = patch.diffallopts(self.ui, self.diffopts)
1717 1717 node = ctx.node()
1718 1718 prev = ctx.p1().node()
1719 1719 if stat:
1720 1720 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1721 1721 match=matchfn, stat=True,
1722 1722 hunksfilterfn=hunksfilterfn)
1723 1723 if diff:
1724 1724 if stat:
1725 1725 self.ui.write("\n")
1726 1726 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1727 1727 match=matchfn, stat=False,
1728 1728 hunksfilterfn=hunksfilterfn)
1729 1729 self.ui.write("\n")
1730 1730
1731 1731 class jsonchangeset(changeset_printer):
1732 1732 '''format changeset information.'''
1733 1733
1734 1734 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1735 1735 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1736 1736 self.cache = {}
1737 1737 self._first = True
1738 1738
1739 1739 def close(self):
1740 1740 if not self._first:
1741 1741 self.ui.write("\n]\n")
1742 1742 else:
1743 1743 self.ui.write("[]\n")
1744 1744
1745 1745 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1746 1746 '''show a single changeset or file revision'''
1747 1747 rev = ctx.rev()
1748 1748 if rev is None:
1749 1749 jrev = jnode = 'null'
1750 1750 else:
1751 1751 jrev = '%d' % rev
1752 1752 jnode = '"%s"' % hex(ctx.node())
1753 1753 j = encoding.jsonescape
1754 1754
1755 1755 if self._first:
1756 1756 self.ui.write("[\n {")
1757 1757 self._first = False
1758 1758 else:
1759 1759 self.ui.write(",\n {")
1760 1760
1761 1761 if self.ui.quiet:
1762 1762 self.ui.write(('\n "rev": %s') % jrev)
1763 1763 self.ui.write((',\n "node": %s') % jnode)
1764 1764 self.ui.write('\n }')
1765 1765 return
1766 1766
1767 1767 self.ui.write(('\n "rev": %s') % jrev)
1768 1768 self.ui.write((',\n "node": %s') % jnode)
1769 1769 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1770 1770 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1771 1771 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1772 1772 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1773 1773 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1774 1774
1775 1775 self.ui.write((',\n "bookmarks": [%s]') %
1776 1776 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1777 1777 self.ui.write((',\n "tags": [%s]') %
1778 1778 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1779 1779 self.ui.write((',\n "parents": [%s]') %
1780 1780 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1781 1781
1782 1782 if self.ui.debugflag:
1783 1783 if rev is None:
1784 1784 jmanifestnode = 'null'
1785 1785 else:
1786 1786 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1787 1787 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1788 1788
1789 1789 self.ui.write((',\n "extra": {%s}') %
1790 1790 ", ".join('"%s": "%s"' % (j(k), j(v))
1791 1791 for k, v in ctx.extra().items()))
1792 1792
1793 1793 files = ctx.p1().status(ctx)
1794 1794 self.ui.write((',\n "modified": [%s]') %
1795 1795 ", ".join('"%s"' % j(f) for f in files[0]))
1796 1796 self.ui.write((',\n "added": [%s]') %
1797 1797 ", ".join('"%s"' % j(f) for f in files[1]))
1798 1798 self.ui.write((',\n "removed": [%s]') %
1799 1799 ", ".join('"%s"' % j(f) for f in files[2]))
1800 1800
1801 1801 elif self.ui.verbose:
1802 1802 self.ui.write((',\n "files": [%s]') %
1803 1803 ", ".join('"%s"' % j(f) for f in ctx.files()))
1804 1804
1805 1805 if copies:
1806 1806 self.ui.write((',\n "copies": {%s}') %
1807 1807 ", ".join('"%s": "%s"' % (j(k), j(v))
1808 1808 for k, v in copies))
1809 1809
1810 1810 matchfn = self.matchfn
1811 1811 if matchfn:
1812 1812 stat = self.diffopts.get('stat')
1813 1813 diff = self.diffopts.get('patch')
1814 1814 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1815 1815 node, prev = ctx.node(), ctx.p1().node()
1816 1816 if stat:
1817 1817 self.ui.pushbuffer()
1818 1818 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1819 1819 match=matchfn, stat=True)
1820 1820 self.ui.write((',\n "diffstat": "%s"')
1821 1821 % j(self.ui.popbuffer()))
1822 1822 if diff:
1823 1823 self.ui.pushbuffer()
1824 1824 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1825 1825 match=matchfn, stat=False)
1826 1826 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1827 1827
1828 1828 self.ui.write("\n }")
1829 1829
1830 1830 class changeset_templater(changeset_printer):
1831 1831 '''format changeset information.
1832 1832
1833 1833 Note: there are a variety of convenience functions to build a
1834 1834 changeset_templater for common cases. See functions such as:
1835 1835 makelogtemplater, show_changeset, buildcommittemplate, or other
1836 1836 functions that use changesest_templater.
1837 1837 '''
1838 1838
1839 1839 # Arguments before "buffered" used to be positional. Consider not
1840 1840 # adding/removing arguments before "buffered" to not break callers.
1841 1841 def __init__(self, ui, repo, tmplspec, matchfn=None, diffopts=None,
1842 1842 buffered=False):
1843 1843 diffopts = diffopts or {}
1844 1844
1845 1845 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1846 1846 self.t = formatter.loadtemplater(ui, tmplspec,
1847 1847 cache=templatekw.defaulttempl)
1848 1848 self._counter = itertools.count()
1849 1849 self.cache = {}
1850 1850
1851 1851 self._tref = tmplspec.ref
1852 1852 self._parts = {'header': '', 'footer': '',
1853 1853 tmplspec.ref: tmplspec.ref,
1854 1854 'docheader': '', 'docfooter': '',
1855 1855 'separator': ''}
1856 1856 if tmplspec.mapfile:
1857 1857 # find correct templates for current mode, for backward
1858 1858 # compatibility with 'log -v/-q/--debug' using a mapfile
1859 1859 tmplmodes = [
1860 1860 (True, ''),
1861 1861 (self.ui.verbose, '_verbose'),
1862 1862 (self.ui.quiet, '_quiet'),
1863 1863 (self.ui.debugflag, '_debug'),
1864 1864 ]
1865 1865 for mode, postfix in tmplmodes:
1866 1866 for t in self._parts:
1867 1867 cur = t + postfix
1868 1868 if mode and cur in self.t:
1869 1869 self._parts[t] = cur
1870 1870 else:
1871 1871 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
1872 1872 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
1873 1873 self._parts.update(m)
1874 1874
1875 1875 if self._parts['docheader']:
1876 1876 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1877 1877
1878 1878 def close(self):
1879 1879 if self._parts['docfooter']:
1880 1880 if not self.footer:
1881 1881 self.footer = ""
1882 1882 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1883 1883 return super(changeset_templater, self).close()
1884 1884
1885 1885 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1886 1886 '''show a single changeset or file revision'''
1887 1887 props = props.copy()
1888 1888 props.update(templatekw.keywords)
1889 props['templ'] = self.t
1890 1889 props['ctx'] = ctx
1891 1890 props['repo'] = self.repo
1892 1891 props['ui'] = self.repo.ui
1893 1892 props['index'] = index = next(self._counter)
1894 1893 props['revcache'] = {'copies': copies}
1895 1894 props['cache'] = self.cache
1896 1895 props = pycompat.strkwargs(props)
1897 1896
1898 1897 # write separator, which wouldn't work well with the header part below
1899 1898 # since there's inherently a conflict between header (across items) and
1900 1899 # separator (per item)
1901 1900 if self._parts['separator'] and index > 0:
1902 1901 self.ui.write(templater.stringify(self.t(self._parts['separator'])))
1903 1902
1904 1903 # write header
1905 1904 if self._parts['header']:
1906 1905 h = templater.stringify(self.t(self._parts['header'], **props))
1907 1906 if self.buffered:
1908 1907 self.header[ctx.rev()] = h
1909 1908 else:
1910 1909 if self.lastheader != h:
1911 1910 self.lastheader = h
1912 1911 self.ui.write(h)
1913 1912
1914 1913 # write changeset metadata, then patch if requested
1915 1914 key = self._parts[self._tref]
1916 1915 self.ui.write(templater.stringify(self.t(key, **props)))
1917 1916 self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
1918 1917
1919 1918 if self._parts['footer']:
1920 1919 if not self.footer:
1921 1920 self.footer = templater.stringify(
1922 1921 self.t(self._parts['footer'], **props))
1923 1922
1924 1923 def logtemplatespec(tmpl, mapfile):
1925 1924 if mapfile:
1926 1925 return formatter.templatespec('changeset', tmpl, mapfile)
1927 1926 else:
1928 1927 return formatter.templatespec('', tmpl, None)
1929 1928
1930 1929 def _lookuplogtemplate(ui, tmpl, style):
1931 1930 """Find the template matching the given template spec or style
1932 1931
1933 1932 See formatter.lookuptemplate() for details.
1934 1933 """
1935 1934
1936 1935 # ui settings
1937 1936 if not tmpl and not style: # template are stronger than style
1938 1937 tmpl = ui.config('ui', 'logtemplate')
1939 1938 if tmpl:
1940 1939 return logtemplatespec(templater.unquotestring(tmpl), None)
1941 1940 else:
1942 1941 style = util.expandpath(ui.config('ui', 'style'))
1943 1942
1944 1943 if not tmpl and style:
1945 1944 mapfile = style
1946 1945 if not os.path.split(mapfile)[0]:
1947 1946 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1948 1947 or templater.templatepath(mapfile))
1949 1948 if mapname:
1950 1949 mapfile = mapname
1951 1950 return logtemplatespec(None, mapfile)
1952 1951
1953 1952 if not tmpl:
1954 1953 return logtemplatespec(None, None)
1955 1954
1956 1955 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1957 1956
1958 1957 def makelogtemplater(ui, repo, tmpl, buffered=False):
1959 1958 """Create a changeset_templater from a literal template 'tmpl'
1960 1959 byte-string."""
1961 1960 spec = logtemplatespec(tmpl, None)
1962 1961 return changeset_templater(ui, repo, spec, buffered=buffered)
1963 1962
1964 1963 def show_changeset(ui, repo, opts, buffered=False):
1965 1964 """show one changeset using template or regular display.
1966 1965
1967 1966 Display format will be the first non-empty hit of:
1968 1967 1. option 'template'
1969 1968 2. option 'style'
1970 1969 3. [ui] setting 'logtemplate'
1971 1970 4. [ui] setting 'style'
1972 1971 If all of these values are either the unset or the empty string,
1973 1972 regular display via changeset_printer() is done.
1974 1973 """
1975 1974 # options
1976 1975 match = None
1977 1976 if opts.get('patch') or opts.get('stat'):
1978 1977 match = scmutil.matchall(repo)
1979 1978
1980 1979 if opts.get('template') == 'json':
1981 1980 return jsonchangeset(ui, repo, match, opts, buffered)
1982 1981
1983 1982 spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style'))
1984 1983
1985 1984 if not spec.ref and not spec.tmpl and not spec.mapfile:
1986 1985 return changeset_printer(ui, repo, match, opts, buffered)
1987 1986
1988 1987 return changeset_templater(ui, repo, spec, match, opts, buffered)
1989 1988
1990 1989 def showmarker(fm, marker, index=None):
1991 1990 """utility function to display obsolescence marker in a readable way
1992 1991
1993 1992 To be used by debug function."""
1994 1993 if index is not None:
1995 1994 fm.write('index', '%i ', index)
1996 1995 fm.write('prednode', '%s ', hex(marker.prednode()))
1997 1996 succs = marker.succnodes()
1998 1997 fm.condwrite(succs, 'succnodes', '%s ',
1999 1998 fm.formatlist(map(hex, succs), name='node'))
2000 1999 fm.write('flag', '%X ', marker.flags())
2001 2000 parents = marker.parentnodes()
2002 2001 if parents is not None:
2003 2002 fm.write('parentnodes', '{%s} ',
2004 2003 fm.formatlist(map(hex, parents), name='node', sep=', '))
2005 2004 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
2006 2005 meta = marker.metadata().copy()
2007 2006 meta.pop('date', None)
2008 2007 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
2009 2008 fm.plain('\n')
2010 2009
2011 2010 def finddate(ui, repo, date):
2012 2011 """Find the tipmost changeset that matches the given date spec"""
2013 2012
2014 2013 df = util.matchdate(date)
2015 2014 m = scmutil.matchall(repo)
2016 2015 results = {}
2017 2016
2018 2017 def prep(ctx, fns):
2019 2018 d = ctx.date()
2020 2019 if df(d[0]):
2021 2020 results[ctx.rev()] = d
2022 2021
2023 2022 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
2024 2023 rev = ctx.rev()
2025 2024 if rev in results:
2026 2025 ui.status(_("found revision %s from %s\n") %
2027 2026 (rev, util.datestr(results[rev])))
2028 2027 return '%d' % rev
2029 2028
2030 2029 raise error.Abort(_("revision matching date not found"))
2031 2030
2032 2031 def increasingwindows(windowsize=8, sizelimit=512):
2033 2032 while True:
2034 2033 yield windowsize
2035 2034 if windowsize < sizelimit:
2036 2035 windowsize *= 2
2037 2036
2038 2037 class FileWalkError(Exception):
2039 2038 pass
2040 2039
2041 2040 def walkfilerevs(repo, match, follow, revs, fncache):
2042 2041 '''Walks the file history for the matched files.
2043 2042
2044 2043 Returns the changeset revs that are involved in the file history.
2045 2044
2046 2045 Throws FileWalkError if the file history can't be walked using
2047 2046 filelogs alone.
2048 2047 '''
2049 2048 wanted = set()
2050 2049 copies = []
2051 2050 minrev, maxrev = min(revs), max(revs)
2052 2051 def filerevgen(filelog, last):
2053 2052 """
2054 2053 Only files, no patterns. Check the history of each file.
2055 2054
2056 2055 Examines filelog entries within minrev, maxrev linkrev range
2057 2056 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2058 2057 tuples in backwards order
2059 2058 """
2060 2059 cl_count = len(repo)
2061 2060 revs = []
2062 2061 for j in xrange(0, last + 1):
2063 2062 linkrev = filelog.linkrev(j)
2064 2063 if linkrev < minrev:
2065 2064 continue
2066 2065 # only yield rev for which we have the changelog, it can
2067 2066 # happen while doing "hg log" during a pull or commit
2068 2067 if linkrev >= cl_count:
2069 2068 break
2070 2069
2071 2070 parentlinkrevs = []
2072 2071 for p in filelog.parentrevs(j):
2073 2072 if p != nullrev:
2074 2073 parentlinkrevs.append(filelog.linkrev(p))
2075 2074 n = filelog.node(j)
2076 2075 revs.append((linkrev, parentlinkrevs,
2077 2076 follow and filelog.renamed(n)))
2078 2077
2079 2078 return reversed(revs)
2080 2079 def iterfiles():
2081 2080 pctx = repo['.']
2082 2081 for filename in match.files():
2083 2082 if follow:
2084 2083 if filename not in pctx:
2085 2084 raise error.Abort(_('cannot follow file not in parent '
2086 2085 'revision: "%s"') % filename)
2087 2086 yield filename, pctx[filename].filenode()
2088 2087 else:
2089 2088 yield filename, None
2090 2089 for filename_node in copies:
2091 2090 yield filename_node
2092 2091
2093 2092 for file_, node in iterfiles():
2094 2093 filelog = repo.file(file_)
2095 2094 if not len(filelog):
2096 2095 if node is None:
2097 2096 # A zero count may be a directory or deleted file, so
2098 2097 # try to find matching entries on the slow path.
2099 2098 if follow:
2100 2099 raise error.Abort(
2101 2100 _('cannot follow nonexistent file: "%s"') % file_)
2102 2101 raise FileWalkError("Cannot walk via filelog")
2103 2102 else:
2104 2103 continue
2105 2104
2106 2105 if node is None:
2107 2106 last = len(filelog) - 1
2108 2107 else:
2109 2108 last = filelog.rev(node)
2110 2109
2111 2110 # keep track of all ancestors of the file
2112 2111 ancestors = {filelog.linkrev(last)}
2113 2112
2114 2113 # iterate from latest to oldest revision
2115 2114 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
2116 2115 if not follow:
2117 2116 if rev > maxrev:
2118 2117 continue
2119 2118 else:
2120 2119 # Note that last might not be the first interesting
2121 2120 # rev to us:
2122 2121 # if the file has been changed after maxrev, we'll
2123 2122 # have linkrev(last) > maxrev, and we still need
2124 2123 # to explore the file graph
2125 2124 if rev not in ancestors:
2126 2125 continue
2127 2126 # XXX insert 1327 fix here
2128 2127 if flparentlinkrevs:
2129 2128 ancestors.update(flparentlinkrevs)
2130 2129
2131 2130 fncache.setdefault(rev, []).append(file_)
2132 2131 wanted.add(rev)
2133 2132 if copied:
2134 2133 copies.append(copied)
2135 2134
2136 2135 return wanted
2137 2136
2138 2137 class _followfilter(object):
2139 2138 def __init__(self, repo, onlyfirst=False):
2140 2139 self.repo = repo
2141 2140 self.startrev = nullrev
2142 2141 self.roots = set()
2143 2142 self.onlyfirst = onlyfirst
2144 2143
2145 2144 def match(self, rev):
2146 2145 def realparents(rev):
2147 2146 if self.onlyfirst:
2148 2147 return self.repo.changelog.parentrevs(rev)[0:1]
2149 2148 else:
2150 2149 return filter(lambda x: x != nullrev,
2151 2150 self.repo.changelog.parentrevs(rev))
2152 2151
2153 2152 if self.startrev == nullrev:
2154 2153 self.startrev = rev
2155 2154 return True
2156 2155
2157 2156 if rev > self.startrev:
2158 2157 # forward: all descendants
2159 2158 if not self.roots:
2160 2159 self.roots.add(self.startrev)
2161 2160 for parent in realparents(rev):
2162 2161 if parent in self.roots:
2163 2162 self.roots.add(rev)
2164 2163 return True
2165 2164 else:
2166 2165 # backwards: all parents
2167 2166 if not self.roots:
2168 2167 self.roots.update(realparents(self.startrev))
2169 2168 if rev in self.roots:
2170 2169 self.roots.remove(rev)
2171 2170 self.roots.update(realparents(rev))
2172 2171 return True
2173 2172
2174 2173 return False
2175 2174
2176 2175 def walkchangerevs(repo, match, opts, prepare):
2177 2176 '''Iterate over files and the revs in which they changed.
2178 2177
2179 2178 Callers most commonly need to iterate backwards over the history
2180 2179 in which they are interested. Doing so has awful (quadratic-looking)
2181 2180 performance, so we use iterators in a "windowed" way.
2182 2181
2183 2182 We walk a window of revisions in the desired order. Within the
2184 2183 window, we first walk forwards to gather data, then in the desired
2185 2184 order (usually backwards) to display it.
2186 2185
2187 2186 This function returns an iterator yielding contexts. Before
2188 2187 yielding each context, the iterator will first call the prepare
2189 2188 function on each context in the window in forward order.'''
2190 2189
2191 2190 follow = opts.get('follow') or opts.get('follow_first')
2192 2191 revs = _logrevs(repo, opts)
2193 2192 if not revs:
2194 2193 return []
2195 2194 wanted = set()
2196 2195 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
2197 2196 fncache = {}
2198 2197 change = repo.changectx
2199 2198
2200 2199 # First step is to fill wanted, the set of revisions that we want to yield.
2201 2200 # When it does not induce extra cost, we also fill fncache for revisions in
2202 2201 # wanted: a cache of filenames that were changed (ctx.files()) and that
2203 2202 # match the file filtering conditions.
2204 2203
2205 2204 if match.always():
2206 2205 # No files, no patterns. Display all revs.
2207 2206 wanted = revs
2208 2207 elif not slowpath:
2209 2208 # We only have to read through the filelog to find wanted revisions
2210 2209
2211 2210 try:
2212 2211 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2213 2212 except FileWalkError:
2214 2213 slowpath = True
2215 2214
2216 2215 # We decided to fall back to the slowpath because at least one
2217 2216 # of the paths was not a file. Check to see if at least one of them
2218 2217 # existed in history, otherwise simply return
2219 2218 for path in match.files():
2220 2219 if path == '.' or path in repo.store:
2221 2220 break
2222 2221 else:
2223 2222 return []
2224 2223
2225 2224 if slowpath:
2226 2225 # We have to read the changelog to match filenames against
2227 2226 # changed files
2228 2227
2229 2228 if follow:
2230 2229 raise error.Abort(_('can only follow copies/renames for explicit '
2231 2230 'filenames'))
2232 2231
2233 2232 # The slow path checks files modified in every changeset.
2234 2233 # This is really slow on large repos, so compute the set lazily.
2235 2234 class lazywantedset(object):
2236 2235 def __init__(self):
2237 2236 self.set = set()
2238 2237 self.revs = set(revs)
2239 2238
2240 2239 # No need to worry about locality here because it will be accessed
2241 2240 # in the same order as the increasing window below.
2242 2241 def __contains__(self, value):
2243 2242 if value in self.set:
2244 2243 return True
2245 2244 elif not value in self.revs:
2246 2245 return False
2247 2246 else:
2248 2247 self.revs.discard(value)
2249 2248 ctx = change(value)
2250 2249 matches = filter(match, ctx.files())
2251 2250 if matches:
2252 2251 fncache[value] = matches
2253 2252 self.set.add(value)
2254 2253 return True
2255 2254 return False
2256 2255
2257 2256 def discard(self, value):
2258 2257 self.revs.discard(value)
2259 2258 self.set.discard(value)
2260 2259
2261 2260 wanted = lazywantedset()
2262 2261
2263 2262 # it might be worthwhile to do this in the iterator if the rev range
2264 2263 # is descending and the prune args are all within that range
2265 2264 for rev in opts.get('prune', ()):
2266 2265 rev = repo[rev].rev()
2267 2266 ff = _followfilter(repo)
2268 2267 stop = min(revs[0], revs[-1])
2269 2268 for x in xrange(rev, stop - 1, -1):
2270 2269 if ff.match(x):
2271 2270 wanted = wanted - [x]
2272 2271
2273 2272 # Now that wanted is correctly initialized, we can iterate over the
2274 2273 # revision range, yielding only revisions in wanted.
2275 2274 def iterate():
2276 2275 if follow and match.always():
2277 2276 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
2278 2277 def want(rev):
2279 2278 return ff.match(rev) and rev in wanted
2280 2279 else:
2281 2280 def want(rev):
2282 2281 return rev in wanted
2283 2282
2284 2283 it = iter(revs)
2285 2284 stopiteration = False
2286 2285 for windowsize in increasingwindows():
2287 2286 nrevs = []
2288 2287 for i in xrange(windowsize):
2289 2288 rev = next(it, None)
2290 2289 if rev is None:
2291 2290 stopiteration = True
2292 2291 break
2293 2292 elif want(rev):
2294 2293 nrevs.append(rev)
2295 2294 for rev in sorted(nrevs):
2296 2295 fns = fncache.get(rev)
2297 2296 ctx = change(rev)
2298 2297 if not fns:
2299 2298 def fns_generator():
2300 2299 for f in ctx.files():
2301 2300 if match(f):
2302 2301 yield f
2303 2302 fns = fns_generator()
2304 2303 prepare(ctx, fns)
2305 2304 for rev in nrevs:
2306 2305 yield change(rev)
2307 2306
2308 2307 if stopiteration:
2309 2308 break
2310 2309
2311 2310 return iterate()
2312 2311
2313 2312 def _makefollowlogfilematcher(repo, files, followfirst):
2314 2313 # When displaying a revision with --patch --follow FILE, we have
2315 2314 # to know which file of the revision must be diffed. With
2316 2315 # --follow, we want the names of the ancestors of FILE in the
2317 2316 # revision, stored in "fcache". "fcache" is populated by
2318 2317 # reproducing the graph traversal already done by --follow revset
2319 2318 # and relating revs to file names (which is not "correct" but
2320 2319 # good enough).
2321 2320 fcache = {}
2322 2321 fcacheready = [False]
2323 2322 pctx = repo['.']
2324 2323
2325 2324 def populate():
2326 2325 for fn in files:
2327 2326 fctx = pctx[fn]
2328 2327 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
2329 2328 for c in fctx.ancestors(followfirst=followfirst):
2330 2329 fcache.setdefault(c.rev(), set()).add(c.path())
2331 2330
2332 2331 def filematcher(rev):
2333 2332 if not fcacheready[0]:
2334 2333 # Lazy initialization
2335 2334 fcacheready[0] = True
2336 2335 populate()
2337 2336 return scmutil.matchfiles(repo, fcache.get(rev, []))
2338 2337
2339 2338 return filematcher
2340 2339
2341 2340 def _makenofollowlogfilematcher(repo, pats, opts):
2342 2341 '''hook for extensions to override the filematcher for non-follow cases'''
2343 2342 return None
2344 2343
2345 2344 def _makelogrevset(repo, pats, opts, revs):
2346 2345 """Return (expr, filematcher) where expr is a revset string built
2347 2346 from log options and file patterns or None. If --stat or --patch
2348 2347 are not passed filematcher is None. Otherwise it is a callable
2349 2348 taking a revision number and returning a match objects filtering
2350 2349 the files to be detailed when displaying the revision.
2351 2350 """
2352 2351 opt2revset = {
2353 2352 'no_merges': ('not merge()', None),
2354 2353 'only_merges': ('merge()', None),
2355 2354 '_ancestors': ('ancestors(%(val)s)', None),
2356 2355 '_fancestors': ('_firstancestors(%(val)s)', None),
2357 2356 '_descendants': ('descendants(%(val)s)', None),
2358 2357 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2359 2358 '_matchfiles': ('_matchfiles(%(val)s)', None),
2360 2359 'date': ('date(%(val)r)', None),
2361 2360 'branch': ('branch(%(val)r)', ' or '),
2362 2361 '_patslog': ('filelog(%(val)r)', ' or '),
2363 2362 '_patsfollow': ('follow(%(val)r)', ' or '),
2364 2363 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2365 2364 'keyword': ('keyword(%(val)r)', ' or '),
2366 2365 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2367 2366 'user': ('user(%(val)r)', ' or '),
2368 2367 }
2369 2368
2370 2369 opts = dict(opts)
2371 2370 # follow or not follow?
2372 2371 follow = opts.get('follow') or opts.get('follow_first')
2373 2372 if opts.get('follow_first'):
2374 2373 followfirst = 1
2375 2374 else:
2376 2375 followfirst = 0
2377 2376 # --follow with FILE behavior depends on revs...
2378 2377 it = iter(revs)
2379 2378 startrev = next(it)
2380 2379 followdescendants = startrev < next(it, startrev)
2381 2380
2382 2381 # branch and only_branch are really aliases and must be handled at
2383 2382 # the same time
2384 2383 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2385 2384 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2386 2385 # pats/include/exclude are passed to match.match() directly in
2387 2386 # _matchfiles() revset but walkchangerevs() builds its matcher with
2388 2387 # scmutil.match(). The difference is input pats are globbed on
2389 2388 # platforms without shell expansion (windows).
2390 2389 wctx = repo[None]
2391 2390 match, pats = scmutil.matchandpats(wctx, pats, opts)
2392 2391 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
2393 2392 if not slowpath:
2394 2393 for f in match.files():
2395 2394 if follow and f not in wctx:
2396 2395 # If the file exists, it may be a directory, so let it
2397 2396 # take the slow path.
2398 2397 if os.path.exists(repo.wjoin(f)):
2399 2398 slowpath = True
2400 2399 continue
2401 2400 else:
2402 2401 raise error.Abort(_('cannot follow file not in parent '
2403 2402 'revision: "%s"') % f)
2404 2403 filelog = repo.file(f)
2405 2404 if not filelog:
2406 2405 # A zero count may be a directory or deleted file, so
2407 2406 # try to find matching entries on the slow path.
2408 2407 if follow:
2409 2408 raise error.Abort(
2410 2409 _('cannot follow nonexistent file: "%s"') % f)
2411 2410 slowpath = True
2412 2411
2413 2412 # We decided to fall back to the slowpath because at least one
2414 2413 # of the paths was not a file. Check to see if at least one of them
2415 2414 # existed in history - in that case, we'll continue down the
2416 2415 # slowpath; otherwise, we can turn off the slowpath
2417 2416 if slowpath:
2418 2417 for path in match.files():
2419 2418 if path == '.' or path in repo.store:
2420 2419 break
2421 2420 else:
2422 2421 slowpath = False
2423 2422
2424 2423 fpats = ('_patsfollow', '_patsfollowfirst')
2425 2424 fnopats = (('_ancestors', '_fancestors'),
2426 2425 ('_descendants', '_fdescendants'))
2427 2426 if slowpath:
2428 2427 # See walkchangerevs() slow path.
2429 2428 #
2430 2429 # pats/include/exclude cannot be represented as separate
2431 2430 # revset expressions as their filtering logic applies at file
2432 2431 # level. For instance "-I a -X a" matches a revision touching
2433 2432 # "a" and "b" while "file(a) and not file(b)" does
2434 2433 # not. Besides, filesets are evaluated against the working
2435 2434 # directory.
2436 2435 matchargs = ['r:', 'd:relpath']
2437 2436 for p in pats:
2438 2437 matchargs.append('p:' + p)
2439 2438 for p in opts.get('include', []):
2440 2439 matchargs.append('i:' + p)
2441 2440 for p in opts.get('exclude', []):
2442 2441 matchargs.append('x:' + p)
2443 2442 matchargs = ','.join(('%r' % p) for p in matchargs)
2444 2443 opts['_matchfiles'] = matchargs
2445 2444 if follow:
2446 2445 opts[fnopats[0][followfirst]] = '.'
2447 2446 else:
2448 2447 if follow:
2449 2448 if pats:
2450 2449 # follow() revset interprets its file argument as a
2451 2450 # manifest entry, so use match.files(), not pats.
2452 2451 opts[fpats[followfirst]] = list(match.files())
2453 2452 else:
2454 2453 op = fnopats[followdescendants][followfirst]
2455 2454 opts[op] = 'rev(%d)' % startrev
2456 2455 else:
2457 2456 opts['_patslog'] = list(pats)
2458 2457
2459 2458 filematcher = None
2460 2459 if opts.get('patch') or opts.get('stat'):
2461 2460 # When following files, track renames via a special matcher.
2462 2461 # If we're forced to take the slowpath it means we're following
2463 2462 # at least one pattern/directory, so don't bother with rename tracking.
2464 2463 if follow and not match.always() and not slowpath:
2465 2464 # _makefollowlogfilematcher expects its files argument to be
2466 2465 # relative to the repo root, so use match.files(), not pats.
2467 2466 filematcher = _makefollowlogfilematcher(repo, match.files(),
2468 2467 followfirst)
2469 2468 else:
2470 2469 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2471 2470 if filematcher is None:
2472 2471 filematcher = lambda rev: match
2473 2472
2474 2473 expr = []
2475 2474 for op, val in sorted(opts.iteritems()):
2476 2475 if not val:
2477 2476 continue
2478 2477 if op not in opt2revset:
2479 2478 continue
2480 2479 revop, andor = opt2revset[op]
2481 2480 if '%(val)' not in revop:
2482 2481 expr.append(revop)
2483 2482 else:
2484 2483 if not isinstance(val, list):
2485 2484 e = revop % {'val': val}
2486 2485 else:
2487 2486 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2488 2487 expr.append(e)
2489 2488
2490 2489 if expr:
2491 2490 expr = '(' + ' and '.join(expr) + ')'
2492 2491 else:
2493 2492 expr = None
2494 2493 return expr, filematcher
2495 2494
2496 2495 def _logrevs(repo, opts):
2497 2496 # Default --rev value depends on --follow but --follow behavior
2498 2497 # depends on revisions resolved from --rev...
2499 2498 follow = opts.get('follow') or opts.get('follow_first')
2500 2499 if opts.get('rev'):
2501 2500 revs = scmutil.revrange(repo, opts['rev'])
2502 2501 elif follow and repo.dirstate.p1() == nullid:
2503 2502 revs = smartset.baseset()
2504 2503 elif follow:
2505 2504 revs = repo.revs('reverse(:.)')
2506 2505 else:
2507 2506 revs = smartset.spanset(repo)
2508 2507 revs.reverse()
2509 2508 return revs
2510 2509
2511 2510 def getgraphlogrevs(repo, pats, opts):
2512 2511 """Return (revs, expr, filematcher) where revs is an iterable of
2513 2512 revision numbers, expr is a revset string built from log options
2514 2513 and file patterns or None, and used to filter 'revs'. If --stat or
2515 2514 --patch are not passed filematcher is None. Otherwise it is a
2516 2515 callable taking a revision number and returning a match objects
2517 2516 filtering the files to be detailed when displaying the revision.
2518 2517 """
2519 2518 limit = loglimit(opts)
2520 2519 revs = _logrevs(repo, opts)
2521 2520 if not revs:
2522 2521 return smartset.baseset(), None, None
2523 2522 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2524 2523 if opts.get('rev'):
2525 2524 # User-specified revs might be unsorted, but don't sort before
2526 2525 # _makelogrevset because it might depend on the order of revs
2527 2526 if not (revs.isdescending() or revs.istopo()):
2528 2527 revs.sort(reverse=True)
2529 2528 if expr:
2530 2529 matcher = revset.match(repo.ui, expr)
2531 2530 revs = matcher(repo, revs)
2532 2531 if limit is not None:
2533 2532 limitedrevs = []
2534 2533 for idx, rev in enumerate(revs):
2535 2534 if idx >= limit:
2536 2535 break
2537 2536 limitedrevs.append(rev)
2538 2537 revs = smartset.baseset(limitedrevs)
2539 2538
2540 2539 return revs, expr, filematcher
2541 2540
2542 2541 def getlogrevs(repo, pats, opts):
2543 2542 """Return (revs, expr, filematcher) where revs is an iterable of
2544 2543 revision numbers, expr is a revset string built from log options
2545 2544 and file patterns or None, and used to filter 'revs'. If --stat or
2546 2545 --patch are not passed filematcher is None. Otherwise it is a
2547 2546 callable taking a revision number and returning a match objects
2548 2547 filtering the files to be detailed when displaying the revision.
2549 2548 """
2550 2549 limit = loglimit(opts)
2551 2550 revs = _logrevs(repo, opts)
2552 2551 if not revs:
2553 2552 return smartset.baseset([]), None, None
2554 2553 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2555 2554 if expr:
2556 2555 matcher = revset.match(repo.ui, expr)
2557 2556 revs = matcher(repo, revs)
2558 2557 if limit is not None:
2559 2558 limitedrevs = []
2560 2559 for idx, r in enumerate(revs):
2561 2560 if limit <= idx:
2562 2561 break
2563 2562 limitedrevs.append(r)
2564 2563 revs = smartset.baseset(limitedrevs)
2565 2564
2566 2565 return revs, expr, filematcher
2567 2566
2568 2567 def _parselinerangelogopt(repo, opts):
2569 2568 """Parse --line-range log option and return a list of tuples (filename,
2570 2569 (fromline, toline)).
2571 2570 """
2572 2571 linerangebyfname = []
2573 2572 for pat in opts.get('line_range', []):
2574 2573 try:
2575 2574 pat, linerange = pat.rsplit(',', 1)
2576 2575 except ValueError:
2577 2576 raise error.Abort(_('malformatted line-range pattern %s') % pat)
2578 2577 try:
2579 2578 fromline, toline = map(int, linerange.split(':'))
2580 2579 except ValueError:
2581 2580 raise error.Abort(_("invalid line range for %s") % pat)
2582 2581 msg = _("line range pattern '%s' must match exactly one file") % pat
2583 2582 fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
2584 2583 linerangebyfname.append(
2585 2584 (fname, util.processlinerange(fromline, toline)))
2586 2585 return linerangebyfname
2587 2586
2588 2587 def getloglinerangerevs(repo, userrevs, opts):
2589 2588 """Return (revs, filematcher, hunksfilter).
2590 2589
2591 2590 "revs" are revisions obtained by processing "line-range" log options and
2592 2591 walking block ancestors of each specified file/line-range.
2593 2592
2594 2593 "filematcher(rev) -> match" is a factory function returning a match object
2595 2594 for a given revision for file patterns specified in --line-range option.
2596 2595 If neither --stat nor --patch options are passed, "filematcher" is None.
2597 2596
2598 2597 "hunksfilter(rev) -> filterfn(fctx, hunks)" is a factory function
2599 2598 returning a hunks filtering function.
2600 2599 If neither --stat nor --patch options are passed, "filterhunks" is None.
2601 2600 """
2602 2601 wctx = repo[None]
2603 2602
2604 2603 # Two-levels map of "rev -> file ctx -> [line range]".
2605 2604 linerangesbyrev = {}
2606 2605 for fname, (fromline, toline) in _parselinerangelogopt(repo, opts):
2607 2606 if fname not in wctx:
2608 2607 raise error.Abort(_('cannot follow file not in parent '
2609 2608 'revision: "%s"') % fname)
2610 2609 fctx = wctx.filectx(fname)
2611 2610 for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
2612 2611 rev = fctx.introrev()
2613 2612 if rev not in userrevs:
2614 2613 continue
2615 2614 linerangesbyrev.setdefault(
2616 2615 rev, {}).setdefault(
2617 2616 fctx.path(), []).append(linerange)
2618 2617
2619 2618 filematcher = None
2620 2619 hunksfilter = None
2621 2620 if opts.get('patch') or opts.get('stat'):
2622 2621
2623 2622 def nofilterhunksfn(fctx, hunks):
2624 2623 return hunks
2625 2624
2626 2625 def hunksfilter(rev):
2627 2626 fctxlineranges = linerangesbyrev.get(rev)
2628 2627 if fctxlineranges is None:
2629 2628 return nofilterhunksfn
2630 2629
2631 2630 def filterfn(fctx, hunks):
2632 2631 lineranges = fctxlineranges.get(fctx.path())
2633 2632 if lineranges is not None:
2634 2633 for hr, lines in hunks:
2635 2634 if hr is None: # binary
2636 2635 yield hr, lines
2637 2636 continue
2638 2637 if any(mdiff.hunkinrange(hr[2:], lr)
2639 2638 for lr in lineranges):
2640 2639 yield hr, lines
2641 2640 else:
2642 2641 for hunk in hunks:
2643 2642 yield hunk
2644 2643
2645 2644 return filterfn
2646 2645
2647 2646 def filematcher(rev):
2648 2647 files = list(linerangesbyrev.get(rev, []))
2649 2648 return scmutil.matchfiles(repo, files)
2650 2649
2651 2650 revs = sorted(linerangesbyrev, reverse=True)
2652 2651
2653 2652 return revs, filematcher, hunksfilter
2654 2653
2655 2654 def _graphnodeformatter(ui, displayer):
2656 2655 spec = ui.config('ui', 'graphnodetemplate')
2657 2656 if not spec:
2658 2657 return templatekw.showgraphnode # fast path for "{graphnode}"
2659 2658
2660 2659 spec = templater.unquotestring(spec)
2661 2660 templ = formatter.maketemplater(ui, spec)
2662 2661 cache = {}
2663 2662 if isinstance(displayer, changeset_templater):
2664 2663 cache = displayer.cache # reuse cache of slow templates
2665 2664 props = templatekw.keywords.copy()
2666 props['templ'] = templ
2667 2665 props['cache'] = cache
2668 2666 def formatnode(repo, ctx):
2669 2667 props['ctx'] = ctx
2670 2668 props['repo'] = repo
2671 2669 props['ui'] = repo.ui
2672 2670 props['revcache'] = {}
2673 2671 return templ.render(props)
2674 2672 return formatnode
2675 2673
2676 2674 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2677 2675 filematcher=None, props=None):
2678 2676 props = props or {}
2679 2677 formatnode = _graphnodeformatter(ui, displayer)
2680 2678 state = graphmod.asciistate()
2681 2679 styles = state['styles']
2682 2680
2683 2681 # only set graph styling if HGPLAIN is not set.
2684 2682 if ui.plain('graph'):
2685 2683 # set all edge styles to |, the default pre-3.8 behaviour
2686 2684 styles.update(dict.fromkeys(styles, '|'))
2687 2685 else:
2688 2686 edgetypes = {
2689 2687 'parent': graphmod.PARENT,
2690 2688 'grandparent': graphmod.GRANDPARENT,
2691 2689 'missing': graphmod.MISSINGPARENT
2692 2690 }
2693 2691 for name, key in edgetypes.items():
2694 2692 # experimental config: experimental.graphstyle.*
2695 2693 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2696 2694 styles[key])
2697 2695 if not styles[key]:
2698 2696 styles[key] = None
2699 2697
2700 2698 # experimental config: experimental.graphshorten
2701 2699 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2702 2700
2703 2701 for rev, type, ctx, parents in dag:
2704 2702 char = formatnode(repo, ctx)
2705 2703 copies = None
2706 2704 if getrenamed and ctx.rev():
2707 2705 copies = []
2708 2706 for fn in ctx.files():
2709 2707 rename = getrenamed(fn, ctx.rev())
2710 2708 if rename:
2711 2709 copies.append((fn, rename[0]))
2712 2710 revmatchfn = None
2713 2711 if filematcher is not None:
2714 2712 revmatchfn = filematcher(ctx.rev())
2715 2713 edges = edgefn(type, char, state, rev, parents)
2716 2714 firstedge = next(edges)
2717 2715 width = firstedge[2]
2718 2716 displayer.show(ctx, copies=copies, matchfn=revmatchfn,
2719 2717 _graphwidth=width, **pycompat.strkwargs(props))
2720 2718 lines = displayer.hunk.pop(rev).split('\n')
2721 2719 if not lines[-1]:
2722 2720 del lines[-1]
2723 2721 displayer.flush(ctx)
2724 2722 for type, char, width, coldata in itertools.chain([firstedge], edges):
2725 2723 graphmod.ascii(ui, state, type, char, lines, coldata)
2726 2724 lines = []
2727 2725 displayer.close()
2728 2726
2729 2727 def graphlog(ui, repo, pats, opts):
2730 2728 # Parameters are identical to log command ones
2731 2729 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2732 2730 revdag = graphmod.dagwalker(repo, revs)
2733 2731
2734 2732 getrenamed = None
2735 2733 if opts.get('copies'):
2736 2734 endrev = None
2737 2735 if opts.get('rev'):
2738 2736 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2739 2737 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2740 2738
2741 2739 ui.pager('log')
2742 2740 displayer = show_changeset(ui, repo, opts, buffered=True)
2743 2741 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2744 2742 filematcher)
2745 2743
2746 2744 def checkunsupportedgraphflags(pats, opts):
2747 2745 for op in ["newest_first"]:
2748 2746 if op in opts and opts[op]:
2749 2747 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2750 2748 % op.replace("_", "-"))
2751 2749
2752 2750 def graphrevs(repo, nodes, opts):
2753 2751 limit = loglimit(opts)
2754 2752 nodes.reverse()
2755 2753 if limit is not None:
2756 2754 nodes = nodes[:limit]
2757 2755 return graphmod.nodes(repo, nodes)
2758 2756
2759 2757 def add(ui, repo, match, prefix, explicitonly, **opts):
2760 2758 join = lambda f: os.path.join(prefix, f)
2761 2759 bad = []
2762 2760
2763 2761 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2764 2762 names = []
2765 2763 wctx = repo[None]
2766 2764 cca = None
2767 2765 abort, warn = scmutil.checkportabilityalert(ui)
2768 2766 if abort or warn:
2769 2767 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2770 2768
2771 2769 badmatch = matchmod.badmatch(match, badfn)
2772 2770 dirstate = repo.dirstate
2773 2771 # We don't want to just call wctx.walk here, since it would return a lot of
2774 2772 # clean files, which we aren't interested in and takes time.
2775 2773 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2776 2774 unknown=True, ignored=False, full=False)):
2777 2775 exact = match.exact(f)
2778 2776 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2779 2777 if cca:
2780 2778 cca(f)
2781 2779 names.append(f)
2782 2780 if ui.verbose or not exact:
2783 2781 ui.status(_('adding %s\n') % match.rel(f))
2784 2782
2785 2783 for subpath in sorted(wctx.substate):
2786 2784 sub = wctx.sub(subpath)
2787 2785 try:
2788 2786 submatch = matchmod.subdirmatcher(subpath, match)
2789 2787 if opts.get(r'subrepos'):
2790 2788 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2791 2789 else:
2792 2790 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2793 2791 except error.LookupError:
2794 2792 ui.status(_("skipping missing subrepository: %s\n")
2795 2793 % join(subpath))
2796 2794
2797 2795 if not opts.get(r'dry_run'):
2798 2796 rejected = wctx.add(names, prefix)
2799 2797 bad.extend(f for f in rejected if f in match.files())
2800 2798 return bad
2801 2799
2802 2800 def addwebdirpath(repo, serverpath, webconf):
2803 2801 webconf[serverpath] = repo.root
2804 2802 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2805 2803
2806 2804 for r in repo.revs('filelog("path:.hgsub")'):
2807 2805 ctx = repo[r]
2808 2806 for subpath in ctx.substate:
2809 2807 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2810 2808
2811 2809 def forget(ui, repo, match, prefix, explicitonly):
2812 2810 join = lambda f: os.path.join(prefix, f)
2813 2811 bad = []
2814 2812 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2815 2813 wctx = repo[None]
2816 2814 forgot = []
2817 2815
2818 2816 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2819 2817 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2820 2818 if explicitonly:
2821 2819 forget = [f for f in forget if match.exact(f)]
2822 2820
2823 2821 for subpath in sorted(wctx.substate):
2824 2822 sub = wctx.sub(subpath)
2825 2823 try:
2826 2824 submatch = matchmod.subdirmatcher(subpath, match)
2827 2825 subbad, subforgot = sub.forget(submatch, prefix)
2828 2826 bad.extend([subpath + '/' + f for f in subbad])
2829 2827 forgot.extend([subpath + '/' + f for f in subforgot])
2830 2828 except error.LookupError:
2831 2829 ui.status(_("skipping missing subrepository: %s\n")
2832 2830 % join(subpath))
2833 2831
2834 2832 if not explicitonly:
2835 2833 for f in match.files():
2836 2834 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2837 2835 if f not in forgot:
2838 2836 if repo.wvfs.exists(f):
2839 2837 # Don't complain if the exact case match wasn't given.
2840 2838 # But don't do this until after checking 'forgot', so
2841 2839 # that subrepo files aren't normalized, and this op is
2842 2840 # purely from data cached by the status walk above.
2843 2841 if repo.dirstate.normalize(f) in repo.dirstate:
2844 2842 continue
2845 2843 ui.warn(_('not removing %s: '
2846 2844 'file is already untracked\n')
2847 2845 % match.rel(f))
2848 2846 bad.append(f)
2849 2847
2850 2848 for f in forget:
2851 2849 if ui.verbose or not match.exact(f):
2852 2850 ui.status(_('removing %s\n') % match.rel(f))
2853 2851
2854 2852 rejected = wctx.forget(forget, prefix)
2855 2853 bad.extend(f for f in rejected if f in match.files())
2856 2854 forgot.extend(f for f in forget if f not in rejected)
2857 2855 return bad, forgot
2858 2856
2859 2857 def files(ui, ctx, m, fm, fmt, subrepos):
2860 2858 rev = ctx.rev()
2861 2859 ret = 1
2862 2860 ds = ctx.repo().dirstate
2863 2861
2864 2862 for f in ctx.matches(m):
2865 2863 if rev is None and ds[f] == 'r':
2866 2864 continue
2867 2865 fm.startitem()
2868 2866 if ui.verbose:
2869 2867 fc = ctx[f]
2870 2868 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2871 2869 fm.data(abspath=f)
2872 2870 fm.write('path', fmt, m.rel(f))
2873 2871 ret = 0
2874 2872
2875 2873 for subpath in sorted(ctx.substate):
2876 2874 submatch = matchmod.subdirmatcher(subpath, m)
2877 2875 if (subrepos or m.exact(subpath) or any(submatch.files())):
2878 2876 sub = ctx.sub(subpath)
2879 2877 try:
2880 2878 recurse = m.exact(subpath) or subrepos
2881 2879 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2882 2880 ret = 0
2883 2881 except error.LookupError:
2884 2882 ui.status(_("skipping missing subrepository: %s\n")
2885 2883 % m.abs(subpath))
2886 2884
2887 2885 return ret
2888 2886
2889 2887 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2890 2888 join = lambda f: os.path.join(prefix, f)
2891 2889 ret = 0
2892 2890 s = repo.status(match=m, clean=True)
2893 2891 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2894 2892
2895 2893 wctx = repo[None]
2896 2894
2897 2895 if warnings is None:
2898 2896 warnings = []
2899 2897 warn = True
2900 2898 else:
2901 2899 warn = False
2902 2900
2903 2901 subs = sorted(wctx.substate)
2904 2902 total = len(subs)
2905 2903 count = 0
2906 2904 for subpath in subs:
2907 2905 count += 1
2908 2906 submatch = matchmod.subdirmatcher(subpath, m)
2909 2907 if subrepos or m.exact(subpath) or any(submatch.files()):
2910 2908 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2911 2909 sub = wctx.sub(subpath)
2912 2910 try:
2913 2911 if sub.removefiles(submatch, prefix, after, force, subrepos,
2914 2912 warnings):
2915 2913 ret = 1
2916 2914 except error.LookupError:
2917 2915 warnings.append(_("skipping missing subrepository: %s\n")
2918 2916 % join(subpath))
2919 2917 ui.progress(_('searching'), None)
2920 2918
2921 2919 # warn about failure to delete explicit files/dirs
2922 2920 deleteddirs = util.dirs(deleted)
2923 2921 files = m.files()
2924 2922 total = len(files)
2925 2923 count = 0
2926 2924 for f in files:
2927 2925 def insubrepo():
2928 2926 for subpath in wctx.substate:
2929 2927 if f.startswith(subpath + '/'):
2930 2928 return True
2931 2929 return False
2932 2930
2933 2931 count += 1
2934 2932 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2935 2933 isdir = f in deleteddirs or wctx.hasdir(f)
2936 2934 if (f in repo.dirstate or isdir or f == '.'
2937 2935 or insubrepo() or f in subs):
2938 2936 continue
2939 2937
2940 2938 if repo.wvfs.exists(f):
2941 2939 if repo.wvfs.isdir(f):
2942 2940 warnings.append(_('not removing %s: no tracked files\n')
2943 2941 % m.rel(f))
2944 2942 else:
2945 2943 warnings.append(_('not removing %s: file is untracked\n')
2946 2944 % m.rel(f))
2947 2945 # missing files will generate a warning elsewhere
2948 2946 ret = 1
2949 2947 ui.progress(_('deleting'), None)
2950 2948
2951 2949 if force:
2952 2950 list = modified + deleted + clean + added
2953 2951 elif after:
2954 2952 list = deleted
2955 2953 remaining = modified + added + clean
2956 2954 total = len(remaining)
2957 2955 count = 0
2958 2956 for f in remaining:
2959 2957 count += 1
2960 2958 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2961 2959 if ui.verbose or (f in files):
2962 2960 warnings.append(_('not removing %s: file still exists\n')
2963 2961 % m.rel(f))
2964 2962 ret = 1
2965 2963 ui.progress(_('skipping'), None)
2966 2964 else:
2967 2965 list = deleted + clean
2968 2966 total = len(modified) + len(added)
2969 2967 count = 0
2970 2968 for f in modified:
2971 2969 count += 1
2972 2970 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2973 2971 warnings.append(_('not removing %s: file is modified (use -f'
2974 2972 ' to force removal)\n') % m.rel(f))
2975 2973 ret = 1
2976 2974 for f in added:
2977 2975 count += 1
2978 2976 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2979 2977 warnings.append(_("not removing %s: file has been marked for add"
2980 2978 " (use 'hg forget' to undo add)\n") % m.rel(f))
2981 2979 ret = 1
2982 2980 ui.progress(_('skipping'), None)
2983 2981
2984 2982 list = sorted(list)
2985 2983 total = len(list)
2986 2984 count = 0
2987 2985 for f in list:
2988 2986 count += 1
2989 2987 if ui.verbose or not m.exact(f):
2990 2988 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2991 2989 ui.status(_('removing %s\n') % m.rel(f))
2992 2990 ui.progress(_('deleting'), None)
2993 2991
2994 2992 with repo.wlock():
2995 2993 if not after:
2996 2994 for f in list:
2997 2995 if f in added:
2998 2996 continue # we never unlink added files on remove
2999 2997 repo.wvfs.unlinkpath(f, ignoremissing=True)
3000 2998 repo[None].forget(list)
3001 2999
3002 3000 if warn:
3003 3001 for warning in warnings:
3004 3002 ui.warn(warning)
3005 3003
3006 3004 return ret
3007 3005
3008 3006 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
3009 3007 err = 1
3010 3008 opts = pycompat.byteskwargs(opts)
3011 3009
3012 3010 def write(path):
3013 3011 filename = None
3014 3012 if fntemplate:
3015 3013 filename = makefilename(repo, fntemplate, ctx.node(),
3016 3014 pathname=os.path.join(prefix, path))
3017 3015 # attempt to create the directory if it does not already exist
3018 3016 try:
3019 3017 os.makedirs(os.path.dirname(filename))
3020 3018 except OSError:
3021 3019 pass
3022 3020 with formatter.maybereopen(basefm, filename, opts) as fm:
3023 3021 data = ctx[path].data()
3024 3022 if opts.get('decode'):
3025 3023 data = repo.wwritedata(path, data)
3026 3024 fm.startitem()
3027 3025 fm.write('data', '%s', data)
3028 3026 fm.data(abspath=path, path=matcher.rel(path))
3029 3027
3030 3028 # Automation often uses hg cat on single files, so special case it
3031 3029 # for performance to avoid the cost of parsing the manifest.
3032 3030 if len(matcher.files()) == 1 and not matcher.anypats():
3033 3031 file = matcher.files()[0]
3034 3032 mfl = repo.manifestlog
3035 3033 mfnode = ctx.manifestnode()
3036 3034 try:
3037 3035 if mfnode and mfl[mfnode].find(file)[0]:
3038 3036 write(file)
3039 3037 return 0
3040 3038 except KeyError:
3041 3039 pass
3042 3040
3043 3041 for abs in ctx.walk(matcher):
3044 3042 write(abs)
3045 3043 err = 0
3046 3044
3047 3045 for subpath in sorted(ctx.substate):
3048 3046 sub = ctx.sub(subpath)
3049 3047 try:
3050 3048 submatch = matchmod.subdirmatcher(subpath, matcher)
3051 3049
3052 3050 if not sub.cat(submatch, basefm, fntemplate,
3053 3051 os.path.join(prefix, sub._path),
3054 3052 **pycompat.strkwargs(opts)):
3055 3053 err = 0
3056 3054 except error.RepoLookupError:
3057 3055 ui.status(_("skipping missing subrepository: %s\n")
3058 3056 % os.path.join(prefix, subpath))
3059 3057
3060 3058 return err
3061 3059
3062 3060 def commit(ui, repo, commitfunc, pats, opts):
3063 3061 '''commit the specified files or all outstanding changes'''
3064 3062 date = opts.get('date')
3065 3063 if date:
3066 3064 opts['date'] = util.parsedate(date)
3067 3065 message = logmessage(ui, opts)
3068 3066 matcher = scmutil.match(repo[None], pats, opts)
3069 3067
3070 3068 dsguard = None
3071 3069 # extract addremove carefully -- this function can be called from a command
3072 3070 # that doesn't support addremove
3073 3071 if opts.get('addremove'):
3074 3072 dsguard = dirstateguard.dirstateguard(repo, 'commit')
3075 3073 with dsguard or util.nullcontextmanager():
3076 3074 if dsguard:
3077 3075 if scmutil.addremove(repo, matcher, "", opts) != 0:
3078 3076 raise error.Abort(
3079 3077 _("failed to mark all new/missing files as added/removed"))
3080 3078
3081 3079 return commitfunc(ui, repo, message, matcher, opts)
3082 3080
3083 3081 def samefile(f, ctx1, ctx2):
3084 3082 if f in ctx1.manifest():
3085 3083 a = ctx1.filectx(f)
3086 3084 if f in ctx2.manifest():
3087 3085 b = ctx2.filectx(f)
3088 3086 return (not a.cmp(b)
3089 3087 and a.flags() == b.flags())
3090 3088 else:
3091 3089 return False
3092 3090 else:
3093 3091 return f not in ctx2.manifest()
3094 3092
3095 3093 def amend(ui, repo, old, extra, pats, opts):
3096 3094 # avoid cycle context -> subrepo -> cmdutil
3097 3095 from . import context
3098 3096
3099 3097 # amend will reuse the existing user if not specified, but the obsolete
3100 3098 # marker creation requires that the current user's name is specified.
3101 3099 if obsolete.isenabled(repo, obsolete.createmarkersopt):
3102 3100 ui.username() # raise exception if username not set
3103 3101
3104 3102 ui.note(_('amending changeset %s\n') % old)
3105 3103 base = old.p1()
3106 3104
3107 3105 with repo.wlock(), repo.lock(), repo.transaction('amend'):
3108 3106 # Participating changesets:
3109 3107 #
3110 3108 # wctx o - workingctx that contains changes from working copy
3111 3109 # | to go into amending commit
3112 3110 # |
3113 3111 # old o - changeset to amend
3114 3112 # |
3115 3113 # base o - first parent of the changeset to amend
3116 3114 wctx = repo[None]
3117 3115
3118 3116 # Copy to avoid mutating input
3119 3117 extra = extra.copy()
3120 3118 # Update extra dict from amended commit (e.g. to preserve graft
3121 3119 # source)
3122 3120 extra.update(old.extra())
3123 3121
3124 3122 # Also update it from the from the wctx
3125 3123 extra.update(wctx.extra())
3126 3124
3127 3125 user = opts.get('user') or old.user()
3128 3126 date = opts.get('date') or old.date()
3129 3127
3130 3128 # Parse the date to allow comparison between date and old.date()
3131 3129 date = util.parsedate(date)
3132 3130
3133 3131 if len(old.parents()) > 1:
3134 3132 # ctx.files() isn't reliable for merges, so fall back to the
3135 3133 # slower repo.status() method
3136 3134 files = set([fn for st in repo.status(base, old)[:3]
3137 3135 for fn in st])
3138 3136 else:
3139 3137 files = set(old.files())
3140 3138
3141 3139 # add/remove the files to the working copy if the "addremove" option
3142 3140 # was specified.
3143 3141 matcher = scmutil.match(wctx, pats, opts)
3144 3142 if (opts.get('addremove')
3145 3143 and scmutil.addremove(repo, matcher, "", opts)):
3146 3144 raise error.Abort(
3147 3145 _("failed to mark all new/missing files as added/removed"))
3148 3146
3149 3147 # Check subrepos. This depends on in-place wctx._status update in
3150 3148 # subrepo.precommit(). To minimize the risk of this hack, we do
3151 3149 # nothing if .hgsub does not exist.
3152 3150 if '.hgsub' in wctx or '.hgsub' in old:
3153 3151 from . import subrepo # avoid cycle: cmdutil -> subrepo -> cmdutil
3154 3152 subs, commitsubs, newsubstate = subrepo.precommit(
3155 3153 ui, wctx, wctx._status, matcher)
3156 3154 # amend should abort if commitsubrepos is enabled
3157 3155 assert not commitsubs
3158 3156 if subs:
3159 3157 subrepo.writestate(repo, newsubstate)
3160 3158
3161 3159 filestoamend = set(f for f in wctx.files() if matcher(f))
3162 3160
3163 3161 changes = (len(filestoamend) > 0)
3164 3162 if changes:
3165 3163 # Recompute copies (avoid recording a -> b -> a)
3166 3164 copied = copies.pathcopies(base, wctx, matcher)
3167 3165 if old.p2:
3168 3166 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3169 3167
3170 3168 # Prune files which were reverted by the updates: if old
3171 3169 # introduced file X and the file was renamed in the working
3172 3170 # copy, then those two files are the same and
3173 3171 # we can discard X from our list of files. Likewise if X
3174 3172 # was removed, it's no longer relevant. If X is missing (aka
3175 3173 # deleted), old X must be preserved.
3176 3174 files.update(filestoamend)
3177 3175 files = [f for f in files if (not samefile(f, wctx, base)
3178 3176 or f in wctx.deleted())]
3179 3177
3180 3178 def filectxfn(repo, ctx_, path):
3181 3179 try:
3182 3180 # If the file being considered is not amongst the files
3183 3181 # to be amended, we should return the file context from the
3184 3182 # old changeset. This avoids issues when only some files in
3185 3183 # the working copy are being amended but there are also
3186 3184 # changes to other files from the old changeset.
3187 3185 if path not in filestoamend:
3188 3186 return old.filectx(path)
3189 3187
3190 3188 # Return None for removed files.
3191 3189 if path in wctx.removed():
3192 3190 return None
3193 3191
3194 3192 fctx = wctx[path]
3195 3193 flags = fctx.flags()
3196 3194 mctx = context.memfilectx(repo, ctx_,
3197 3195 fctx.path(), fctx.data(),
3198 3196 islink='l' in flags,
3199 3197 isexec='x' in flags,
3200 3198 copied=copied.get(path))
3201 3199 return mctx
3202 3200 except KeyError:
3203 3201 return None
3204 3202 else:
3205 3203 ui.note(_('copying changeset %s to %s\n') % (old, base))
3206 3204
3207 3205 # Use version of files as in the old cset
3208 3206 def filectxfn(repo, ctx_, path):
3209 3207 try:
3210 3208 return old.filectx(path)
3211 3209 except KeyError:
3212 3210 return None
3213 3211
3214 3212 # See if we got a message from -m or -l, if not, open the editor with
3215 3213 # the message of the changeset to amend.
3216 3214 message = logmessage(ui, opts)
3217 3215
3218 3216 editform = mergeeditform(old, 'commit.amend')
3219 3217 editor = getcommiteditor(editform=editform,
3220 3218 **pycompat.strkwargs(opts))
3221 3219
3222 3220 if not message:
3223 3221 editor = getcommiteditor(edit=True, editform=editform)
3224 3222 message = old.description()
3225 3223
3226 3224 pureextra = extra.copy()
3227 3225 extra['amend_source'] = old.hex()
3228 3226
3229 3227 new = context.memctx(repo,
3230 3228 parents=[base.node(), old.p2().node()],
3231 3229 text=message,
3232 3230 files=files,
3233 3231 filectxfn=filectxfn,
3234 3232 user=user,
3235 3233 date=date,
3236 3234 extra=extra,
3237 3235 editor=editor)
3238 3236
3239 3237 newdesc = changelog.stripdesc(new.description())
3240 3238 if ((not changes)
3241 3239 and newdesc == old.description()
3242 3240 and user == old.user()
3243 3241 and date == old.date()
3244 3242 and pureextra == old.extra()):
3245 3243 # nothing changed. continuing here would create a new node
3246 3244 # anyway because of the amend_source noise.
3247 3245 #
3248 3246 # This not what we expect from amend.
3249 3247 return old.node()
3250 3248
3251 3249 if opts.get('secret'):
3252 3250 commitphase = 'secret'
3253 3251 else:
3254 3252 commitphase = old.phase()
3255 3253 overrides = {('phases', 'new-commit'): commitphase}
3256 3254 with ui.configoverride(overrides, 'amend'):
3257 3255 newid = repo.commitctx(new)
3258 3256
3259 3257 # Reroute the working copy parent to the new changeset
3260 3258 repo.setparents(newid, nullid)
3261 3259 mapping = {old.node(): (newid,)}
3262 3260 obsmetadata = None
3263 3261 if opts.get('note'):
3264 3262 obsmetadata = {'note': opts['note']}
3265 3263 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata)
3266 3264
3267 3265 # Fixing the dirstate because localrepo.commitctx does not update
3268 3266 # it. This is rather convenient because we did not need to update
3269 3267 # the dirstate for all the files in the new commit which commitctx
3270 3268 # could have done if it updated the dirstate. Now, we can
3271 3269 # selectively update the dirstate only for the amended files.
3272 3270 dirstate = repo.dirstate
3273 3271
3274 3272 # Update the state of the files which were added and
3275 3273 # and modified in the amend to "normal" in the dirstate.
3276 3274 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3277 3275 for f in normalfiles:
3278 3276 dirstate.normal(f)
3279 3277
3280 3278 # Update the state of files which were removed in the amend
3281 3279 # to "removed" in the dirstate.
3282 3280 removedfiles = set(wctx.removed()) & filestoamend
3283 3281 for f in removedfiles:
3284 3282 dirstate.drop(f)
3285 3283
3286 3284 return newid
3287 3285
3288 3286 def commiteditor(repo, ctx, subs, editform=''):
3289 3287 if ctx.description():
3290 3288 return ctx.description()
3291 3289 return commitforceeditor(repo, ctx, subs, editform=editform,
3292 3290 unchangedmessagedetection=True)
3293 3291
3294 3292 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
3295 3293 editform='', unchangedmessagedetection=False):
3296 3294 if not extramsg:
3297 3295 extramsg = _("Leave message empty to abort commit.")
3298 3296
3299 3297 forms = [e for e in editform.split('.') if e]
3300 3298 forms.insert(0, 'changeset')
3301 3299 templatetext = None
3302 3300 while forms:
3303 3301 ref = '.'.join(forms)
3304 3302 if repo.ui.config('committemplate', ref):
3305 3303 templatetext = committext = buildcommittemplate(
3306 3304 repo, ctx, subs, extramsg, ref)
3307 3305 break
3308 3306 forms.pop()
3309 3307 else:
3310 3308 committext = buildcommittext(repo, ctx, subs, extramsg)
3311 3309
3312 3310 # run editor in the repository root
3313 3311 olddir = pycompat.getcwd()
3314 3312 os.chdir(repo.root)
3315 3313
3316 3314 # make in-memory changes visible to external process
3317 3315 tr = repo.currenttransaction()
3318 3316 repo.dirstate.write(tr)
3319 3317 pending = tr and tr.writepending() and repo.root
3320 3318
3321 3319 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
3322 3320 editform=editform, pending=pending,
3323 3321 repopath=repo.path, action='commit')
3324 3322 text = editortext
3325 3323
3326 3324 # strip away anything below this special string (used for editors that want
3327 3325 # to display the diff)
3328 3326 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3329 3327 if stripbelow:
3330 3328 text = text[:stripbelow.start()]
3331 3329
3332 3330 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
3333 3331 os.chdir(olddir)
3334 3332
3335 3333 if finishdesc:
3336 3334 text = finishdesc(text)
3337 3335 if not text.strip():
3338 3336 raise error.Abort(_("empty commit message"))
3339 3337 if unchangedmessagedetection and editortext == templatetext:
3340 3338 raise error.Abort(_("commit message unchanged"))
3341 3339
3342 3340 return text
3343 3341
3344 3342 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3345 3343 ui = repo.ui
3346 3344 spec = formatter.templatespec(ref, None, None)
3347 3345 t = changeset_templater(ui, repo, spec, None, {}, False)
3348 3346 t.t.cache.update((k, templater.unquotestring(v))
3349 3347 for k, v in repo.ui.configitems('committemplate'))
3350 3348
3351 3349 if not extramsg:
3352 3350 extramsg = '' # ensure that extramsg is string
3353 3351
3354 3352 ui.pushbuffer()
3355 3353 t.show(ctx, extramsg=extramsg)
3356 3354 return ui.popbuffer()
3357 3355
3358 3356 def hgprefix(msg):
3359 3357 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
3360 3358
3361 3359 def buildcommittext(repo, ctx, subs, extramsg):
3362 3360 edittext = []
3363 3361 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3364 3362 if ctx.description():
3365 3363 edittext.append(ctx.description())
3366 3364 edittext.append("")
3367 3365 edittext.append("") # Empty line between message and comments.
3368 3366 edittext.append(hgprefix(_("Enter commit message."
3369 3367 " Lines beginning with 'HG:' are removed.")))
3370 3368 edittext.append(hgprefix(extramsg))
3371 3369 edittext.append("HG: --")
3372 3370 edittext.append(hgprefix(_("user: %s") % ctx.user()))
3373 3371 if ctx.p2():
3374 3372 edittext.append(hgprefix(_("branch merge")))
3375 3373 if ctx.branch():
3376 3374 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
3377 3375 if bookmarks.isactivewdirparent(repo):
3378 3376 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
3379 3377 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
3380 3378 edittext.extend([hgprefix(_("added %s") % f) for f in added])
3381 3379 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
3382 3380 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
3383 3381 if not added and not modified and not removed:
3384 3382 edittext.append(hgprefix(_("no files changed")))
3385 3383 edittext.append("")
3386 3384
3387 3385 return "\n".join(edittext)
3388 3386
3389 3387 def commitstatus(repo, node, branch, bheads=None, opts=None):
3390 3388 if opts is None:
3391 3389 opts = {}
3392 3390 ctx = repo[node]
3393 3391 parents = ctx.parents()
3394 3392
3395 3393 if (not opts.get('amend') and bheads and node not in bheads and not
3396 3394 [x for x in parents if x.node() in bheads and x.branch() == branch]):
3397 3395 repo.ui.status(_('created new head\n'))
3398 3396 # The message is not printed for initial roots. For the other
3399 3397 # changesets, it is printed in the following situations:
3400 3398 #
3401 3399 # Par column: for the 2 parents with ...
3402 3400 # N: null or no parent
3403 3401 # B: parent is on another named branch
3404 3402 # C: parent is a regular non head changeset
3405 3403 # H: parent was a branch head of the current branch
3406 3404 # Msg column: whether we print "created new head" message
3407 3405 # In the following, it is assumed that there already exists some
3408 3406 # initial branch heads of the current branch, otherwise nothing is
3409 3407 # printed anyway.
3410 3408 #
3411 3409 # Par Msg Comment
3412 3410 # N N y additional topo root
3413 3411 #
3414 3412 # B N y additional branch root
3415 3413 # C N y additional topo head
3416 3414 # H N n usual case
3417 3415 #
3418 3416 # B B y weird additional branch root
3419 3417 # C B y branch merge
3420 3418 # H B n merge with named branch
3421 3419 #
3422 3420 # C C y additional head from merge
3423 3421 # C H n merge with a head
3424 3422 #
3425 3423 # H H n head merge: head count decreases
3426 3424
3427 3425 if not opts.get('close_branch'):
3428 3426 for r in parents:
3429 3427 if r.closesbranch() and r.branch() == branch:
3430 3428 repo.ui.status(_('reopening closed branch head %d\n') % r)
3431 3429
3432 3430 if repo.ui.debugflag:
3433 3431 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
3434 3432 elif repo.ui.verbose:
3435 3433 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
3436 3434
3437 3435 def postcommitstatus(repo, pats, opts):
3438 3436 return repo.status(match=scmutil.match(repo[None], pats, opts))
3439 3437
3440 3438 def revert(ui, repo, ctx, parents, *pats, **opts):
3441 3439 opts = pycompat.byteskwargs(opts)
3442 3440 parent, p2 = parents
3443 3441 node = ctx.node()
3444 3442
3445 3443 mf = ctx.manifest()
3446 3444 if node == p2:
3447 3445 parent = p2
3448 3446
3449 3447 # need all matching names in dirstate and manifest of target rev,
3450 3448 # so have to walk both. do not print errors if files exist in one
3451 3449 # but not other. in both cases, filesets should be evaluated against
3452 3450 # workingctx to get consistent result (issue4497). this means 'set:**'
3453 3451 # cannot be used to select missing files from target rev.
3454 3452
3455 3453 # `names` is a mapping for all elements in working copy and target revision
3456 3454 # The mapping is in the form:
3457 3455 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3458 3456 names = {}
3459 3457
3460 3458 with repo.wlock():
3461 3459 ## filling of the `names` mapping
3462 3460 # walk dirstate to fill `names`
3463 3461
3464 3462 interactive = opts.get('interactive', False)
3465 3463 wctx = repo[None]
3466 3464 m = scmutil.match(wctx, pats, opts)
3467 3465
3468 3466 # we'll need this later
3469 3467 targetsubs = sorted(s for s in wctx.substate if m(s))
3470 3468
3471 3469 if not m.always():
3472 3470 matcher = matchmod.badmatch(m, lambda x, y: False)
3473 3471 for abs in wctx.walk(matcher):
3474 3472 names[abs] = m.rel(abs), m.exact(abs)
3475 3473
3476 3474 # walk target manifest to fill `names`
3477 3475
3478 3476 def badfn(path, msg):
3479 3477 if path in names:
3480 3478 return
3481 3479 if path in ctx.substate:
3482 3480 return
3483 3481 path_ = path + '/'
3484 3482 for f in names:
3485 3483 if f.startswith(path_):
3486 3484 return
3487 3485 ui.warn("%s: %s\n" % (m.rel(path), msg))
3488 3486
3489 3487 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3490 3488 if abs not in names:
3491 3489 names[abs] = m.rel(abs), m.exact(abs)
3492 3490
3493 3491 # Find status of all file in `names`.
3494 3492 m = scmutil.matchfiles(repo, names)
3495 3493
3496 3494 changes = repo.status(node1=node, match=m,
3497 3495 unknown=True, ignored=True, clean=True)
3498 3496 else:
3499 3497 changes = repo.status(node1=node, match=m)
3500 3498 for kind in changes:
3501 3499 for abs in kind:
3502 3500 names[abs] = m.rel(abs), m.exact(abs)
3503 3501
3504 3502 m = scmutil.matchfiles(repo, names)
3505 3503
3506 3504 modified = set(changes.modified)
3507 3505 added = set(changes.added)
3508 3506 removed = set(changes.removed)
3509 3507 _deleted = set(changes.deleted)
3510 3508 unknown = set(changes.unknown)
3511 3509 unknown.update(changes.ignored)
3512 3510 clean = set(changes.clean)
3513 3511 modadded = set()
3514 3512
3515 3513 # We need to account for the state of the file in the dirstate,
3516 3514 # even when we revert against something else than parent. This will
3517 3515 # slightly alter the behavior of revert (doing back up or not, delete
3518 3516 # or just forget etc).
3519 3517 if parent == node:
3520 3518 dsmodified = modified
3521 3519 dsadded = added
3522 3520 dsremoved = removed
3523 3521 # store all local modifications, useful later for rename detection
3524 3522 localchanges = dsmodified | dsadded
3525 3523 modified, added, removed = set(), set(), set()
3526 3524 else:
3527 3525 changes = repo.status(node1=parent, match=m)
3528 3526 dsmodified = set(changes.modified)
3529 3527 dsadded = set(changes.added)
3530 3528 dsremoved = set(changes.removed)
3531 3529 # store all local modifications, useful later for rename detection
3532 3530 localchanges = dsmodified | dsadded
3533 3531
3534 3532 # only take into account for removes between wc and target
3535 3533 clean |= dsremoved - removed
3536 3534 dsremoved &= removed
3537 3535 # distinct between dirstate remove and other
3538 3536 removed -= dsremoved
3539 3537
3540 3538 modadded = added & dsmodified
3541 3539 added -= modadded
3542 3540
3543 3541 # tell newly modified apart.
3544 3542 dsmodified &= modified
3545 3543 dsmodified |= modified & dsadded # dirstate added may need backup
3546 3544 modified -= dsmodified
3547 3545
3548 3546 # We need to wait for some post-processing to update this set
3549 3547 # before making the distinction. The dirstate will be used for
3550 3548 # that purpose.
3551 3549 dsadded = added
3552 3550
3553 3551 # in case of merge, files that are actually added can be reported as
3554 3552 # modified, we need to post process the result
3555 3553 if p2 != nullid:
3556 3554 mergeadd = set(dsmodified)
3557 3555 for path in dsmodified:
3558 3556 if path in mf:
3559 3557 mergeadd.remove(path)
3560 3558 dsadded |= mergeadd
3561 3559 dsmodified -= mergeadd
3562 3560
3563 3561 # if f is a rename, update `names` to also revert the source
3564 3562 cwd = repo.getcwd()
3565 3563 for f in localchanges:
3566 3564 src = repo.dirstate.copied(f)
3567 3565 # XXX should we check for rename down to target node?
3568 3566 if src and src not in names and repo.dirstate[src] == 'r':
3569 3567 dsremoved.add(src)
3570 3568 names[src] = (repo.pathto(src, cwd), True)
3571 3569
3572 3570 # determine the exact nature of the deleted changesets
3573 3571 deladded = set(_deleted)
3574 3572 for path in _deleted:
3575 3573 if path in mf:
3576 3574 deladded.remove(path)
3577 3575 deleted = _deleted - deladded
3578 3576
3579 3577 # distinguish between file to forget and the other
3580 3578 added = set()
3581 3579 for abs in dsadded:
3582 3580 if repo.dirstate[abs] != 'a':
3583 3581 added.add(abs)
3584 3582 dsadded -= added
3585 3583
3586 3584 for abs in deladded:
3587 3585 if repo.dirstate[abs] == 'a':
3588 3586 dsadded.add(abs)
3589 3587 deladded -= dsadded
3590 3588
3591 3589 # For files marked as removed, we check if an unknown file is present at
3592 3590 # the same path. If a such file exists it may need to be backed up.
3593 3591 # Making the distinction at this stage helps have simpler backup
3594 3592 # logic.
3595 3593 removunk = set()
3596 3594 for abs in removed:
3597 3595 target = repo.wjoin(abs)
3598 3596 if os.path.lexists(target):
3599 3597 removunk.add(abs)
3600 3598 removed -= removunk
3601 3599
3602 3600 dsremovunk = set()
3603 3601 for abs in dsremoved:
3604 3602 target = repo.wjoin(abs)
3605 3603 if os.path.lexists(target):
3606 3604 dsremovunk.add(abs)
3607 3605 dsremoved -= dsremovunk
3608 3606
3609 3607 # action to be actually performed by revert
3610 3608 # (<list of file>, message>) tuple
3611 3609 actions = {'revert': ([], _('reverting %s\n')),
3612 3610 'add': ([], _('adding %s\n')),
3613 3611 'remove': ([], _('removing %s\n')),
3614 3612 'drop': ([], _('removing %s\n')),
3615 3613 'forget': ([], _('forgetting %s\n')),
3616 3614 'undelete': ([], _('undeleting %s\n')),
3617 3615 'noop': (None, _('no changes needed to %s\n')),
3618 3616 'unknown': (None, _('file not managed: %s\n')),
3619 3617 }
3620 3618
3621 3619 # "constant" that convey the backup strategy.
3622 3620 # All set to `discard` if `no-backup` is set do avoid checking
3623 3621 # no_backup lower in the code.
3624 3622 # These values are ordered for comparison purposes
3625 3623 backupinteractive = 3 # do backup if interactively modified
3626 3624 backup = 2 # unconditionally do backup
3627 3625 check = 1 # check if the existing file differs from target
3628 3626 discard = 0 # never do backup
3629 3627 if opts.get('no_backup'):
3630 3628 backupinteractive = backup = check = discard
3631 3629 if interactive:
3632 3630 dsmodifiedbackup = backupinteractive
3633 3631 else:
3634 3632 dsmodifiedbackup = backup
3635 3633 tobackup = set()
3636 3634
3637 3635 backupanddel = actions['remove']
3638 3636 if not opts.get('no_backup'):
3639 3637 backupanddel = actions['drop']
3640 3638
3641 3639 disptable = (
3642 3640 # dispatch table:
3643 3641 # file state
3644 3642 # action
3645 3643 # make backup
3646 3644
3647 3645 ## Sets that results that will change file on disk
3648 3646 # Modified compared to target, no local change
3649 3647 (modified, actions['revert'], discard),
3650 3648 # Modified compared to target, but local file is deleted
3651 3649 (deleted, actions['revert'], discard),
3652 3650 # Modified compared to target, local change
3653 3651 (dsmodified, actions['revert'], dsmodifiedbackup),
3654 3652 # Added since target
3655 3653 (added, actions['remove'], discard),
3656 3654 # Added in working directory
3657 3655 (dsadded, actions['forget'], discard),
3658 3656 # Added since target, have local modification
3659 3657 (modadded, backupanddel, backup),
3660 3658 # Added since target but file is missing in working directory
3661 3659 (deladded, actions['drop'], discard),
3662 3660 # Removed since target, before working copy parent
3663 3661 (removed, actions['add'], discard),
3664 3662 # Same as `removed` but an unknown file exists at the same path
3665 3663 (removunk, actions['add'], check),
3666 3664 # Removed since targe, marked as such in working copy parent
3667 3665 (dsremoved, actions['undelete'], discard),
3668 3666 # Same as `dsremoved` but an unknown file exists at the same path
3669 3667 (dsremovunk, actions['undelete'], check),
3670 3668 ## the following sets does not result in any file changes
3671 3669 # File with no modification
3672 3670 (clean, actions['noop'], discard),
3673 3671 # Existing file, not tracked anywhere
3674 3672 (unknown, actions['unknown'], discard),
3675 3673 )
3676 3674
3677 3675 for abs, (rel, exact) in sorted(names.items()):
3678 3676 # target file to be touch on disk (relative to cwd)
3679 3677 target = repo.wjoin(abs)
3680 3678 # search the entry in the dispatch table.
3681 3679 # if the file is in any of these sets, it was touched in the working
3682 3680 # directory parent and we are sure it needs to be reverted.
3683 3681 for table, (xlist, msg), dobackup in disptable:
3684 3682 if abs not in table:
3685 3683 continue
3686 3684 if xlist is not None:
3687 3685 xlist.append(abs)
3688 3686 if dobackup:
3689 3687 # If in interactive mode, don't automatically create
3690 3688 # .orig files (issue4793)
3691 3689 if dobackup == backupinteractive:
3692 3690 tobackup.add(abs)
3693 3691 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3694 3692 bakname = scmutil.origpath(ui, repo, rel)
3695 3693 ui.note(_('saving current version of %s as %s\n') %
3696 3694 (rel, bakname))
3697 3695 if not opts.get('dry_run'):
3698 3696 if interactive:
3699 3697 util.copyfile(target, bakname)
3700 3698 else:
3701 3699 util.rename(target, bakname)
3702 3700 if ui.verbose or not exact:
3703 3701 if not isinstance(msg, bytes):
3704 3702 msg = msg(abs)
3705 3703 ui.status(msg % rel)
3706 3704 elif exact:
3707 3705 ui.warn(msg % rel)
3708 3706 break
3709 3707
3710 3708 if not opts.get('dry_run'):
3711 3709 needdata = ('revert', 'add', 'undelete')
3712 3710 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3713 3711 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3714 3712
3715 3713 if targetsubs:
3716 3714 # Revert the subrepos on the revert list
3717 3715 for sub in targetsubs:
3718 3716 try:
3719 3717 wctx.sub(sub).revert(ctx.substate[sub], *pats,
3720 3718 **pycompat.strkwargs(opts))
3721 3719 except KeyError:
3722 3720 raise error.Abort("subrepository '%s' does not exist in %s!"
3723 3721 % (sub, short(ctx.node())))
3724 3722
3725 3723 def _revertprefetch(repo, ctx, *files):
3726 3724 """Let extension changing the storage layer prefetch content"""
3727 3725
3728 3726 def _performrevert(repo, parents, ctx, actions, interactive=False,
3729 3727 tobackup=None):
3730 3728 """function that actually perform all the actions computed for revert
3731 3729
3732 3730 This is an independent function to let extension to plug in and react to
3733 3731 the imminent revert.
3734 3732
3735 3733 Make sure you have the working directory locked when calling this function.
3736 3734 """
3737 3735 parent, p2 = parents
3738 3736 node = ctx.node()
3739 3737 excluded_files = []
3740 3738 matcher_opts = {"exclude": excluded_files}
3741 3739
3742 3740 def checkout(f):
3743 3741 fc = ctx[f]
3744 3742 repo.wwrite(f, fc.data(), fc.flags())
3745 3743
3746 3744 def doremove(f):
3747 3745 try:
3748 3746 repo.wvfs.unlinkpath(f)
3749 3747 except OSError:
3750 3748 pass
3751 3749 repo.dirstate.remove(f)
3752 3750
3753 3751 audit_path = pathutil.pathauditor(repo.root, cached=True)
3754 3752 for f in actions['forget'][0]:
3755 3753 if interactive:
3756 3754 choice = repo.ui.promptchoice(
3757 3755 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3758 3756 if choice == 0:
3759 3757 repo.dirstate.drop(f)
3760 3758 else:
3761 3759 excluded_files.append(repo.wjoin(f))
3762 3760 else:
3763 3761 repo.dirstate.drop(f)
3764 3762 for f in actions['remove'][0]:
3765 3763 audit_path(f)
3766 3764 if interactive:
3767 3765 choice = repo.ui.promptchoice(
3768 3766 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3769 3767 if choice == 0:
3770 3768 doremove(f)
3771 3769 else:
3772 3770 excluded_files.append(repo.wjoin(f))
3773 3771 else:
3774 3772 doremove(f)
3775 3773 for f in actions['drop'][0]:
3776 3774 audit_path(f)
3777 3775 repo.dirstate.remove(f)
3778 3776
3779 3777 normal = None
3780 3778 if node == parent:
3781 3779 # We're reverting to our parent. If possible, we'd like status
3782 3780 # to report the file as clean. We have to use normallookup for
3783 3781 # merges to avoid losing information about merged/dirty files.
3784 3782 if p2 != nullid:
3785 3783 normal = repo.dirstate.normallookup
3786 3784 else:
3787 3785 normal = repo.dirstate.normal
3788 3786
3789 3787 newlyaddedandmodifiedfiles = set()
3790 3788 if interactive:
3791 3789 # Prompt the user for changes to revert
3792 3790 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3793 3791 m = scmutil.match(ctx, torevert, matcher_opts)
3794 3792 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3795 3793 diffopts.nodates = True
3796 3794 diffopts.git = True
3797 3795 operation = 'discard'
3798 3796 reversehunks = True
3799 3797 if node != parent:
3800 3798 operation = 'apply'
3801 3799 reversehunks = False
3802 3800 if reversehunks:
3803 3801 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3804 3802 else:
3805 3803 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3806 3804 originalchunks = patch.parsepatch(diff)
3807 3805
3808 3806 try:
3809 3807
3810 3808 chunks, opts = recordfilter(repo.ui, originalchunks,
3811 3809 operation=operation)
3812 3810 if reversehunks:
3813 3811 chunks = patch.reversehunks(chunks)
3814 3812
3815 3813 except error.PatchError as err:
3816 3814 raise error.Abort(_('error parsing patch: %s') % err)
3817 3815
3818 3816 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3819 3817 if tobackup is None:
3820 3818 tobackup = set()
3821 3819 # Apply changes
3822 3820 fp = stringio()
3823 3821 for c in chunks:
3824 3822 # Create a backup file only if this hunk should be backed up
3825 3823 if ishunk(c) and c.header.filename() in tobackup:
3826 3824 abs = c.header.filename()
3827 3825 target = repo.wjoin(abs)
3828 3826 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3829 3827 util.copyfile(target, bakname)
3830 3828 tobackup.remove(abs)
3831 3829 c.write(fp)
3832 3830 dopatch = fp.tell()
3833 3831 fp.seek(0)
3834 3832 if dopatch:
3835 3833 try:
3836 3834 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3837 3835 except error.PatchError as err:
3838 3836 raise error.Abort(str(err))
3839 3837 del fp
3840 3838 else:
3841 3839 for f in actions['revert'][0]:
3842 3840 checkout(f)
3843 3841 if normal:
3844 3842 normal(f)
3845 3843
3846 3844 for f in actions['add'][0]:
3847 3845 # Don't checkout modified files, they are already created by the diff
3848 3846 if f not in newlyaddedandmodifiedfiles:
3849 3847 checkout(f)
3850 3848 repo.dirstate.add(f)
3851 3849
3852 3850 normal = repo.dirstate.normallookup
3853 3851 if node == parent and p2 == nullid:
3854 3852 normal = repo.dirstate.normal
3855 3853 for f in actions['undelete'][0]:
3856 3854 checkout(f)
3857 3855 normal(f)
3858 3856
3859 3857 copied = copies.pathcopies(repo[parent], ctx)
3860 3858
3861 3859 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3862 3860 if f in copied:
3863 3861 repo.dirstate.copy(copied[f], f)
3864 3862
3865 3863 class command(registrar.command):
3866 3864 """deprecated: used registrar.command instead"""
3867 3865 def _doregister(self, func, name, *args, **kwargs):
3868 3866 func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
3869 3867 return super(command, self)._doregister(func, name, *args, **kwargs)
3870 3868
3871 3869 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3872 3870 # commands.outgoing. "missing" is "missing" of the result of
3873 3871 # "findcommonoutgoing()"
3874 3872 outgoinghooks = util.hooks()
3875 3873
3876 3874 # a list of (ui, repo) functions called by commands.summary
3877 3875 summaryhooks = util.hooks()
3878 3876
3879 3877 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3880 3878 #
3881 3879 # functions should return tuple of booleans below, if 'changes' is None:
3882 3880 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3883 3881 #
3884 3882 # otherwise, 'changes' is a tuple of tuples below:
3885 3883 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3886 3884 # - (desturl, destbranch, destpeer, outgoing)
3887 3885 summaryremotehooks = util.hooks()
3888 3886
3889 3887 # A list of state files kept by multistep operations like graft.
3890 3888 # Since graft cannot be aborted, it is considered 'clearable' by update.
3891 3889 # note: bisect is intentionally excluded
3892 3890 # (state file, clearable, allowcommit, error, hint)
3893 3891 unfinishedstates = [
3894 3892 ('graftstate', True, False, _('graft in progress'),
3895 3893 _("use 'hg graft --continue' or 'hg update' to abort")),
3896 3894 ('updatestate', True, False, _('last update was interrupted'),
3897 3895 _("use 'hg update' to get a consistent checkout"))
3898 3896 ]
3899 3897
3900 3898 def checkunfinished(repo, commit=False):
3901 3899 '''Look for an unfinished multistep operation, like graft, and abort
3902 3900 if found. It's probably good to check this right before
3903 3901 bailifchanged().
3904 3902 '''
3905 3903 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3906 3904 if commit and allowcommit:
3907 3905 continue
3908 3906 if repo.vfs.exists(f):
3909 3907 raise error.Abort(msg, hint=hint)
3910 3908
3911 3909 def clearunfinished(repo):
3912 3910 '''Check for unfinished operations (as above), and clear the ones
3913 3911 that are clearable.
3914 3912 '''
3915 3913 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3916 3914 if not clearable and repo.vfs.exists(f):
3917 3915 raise error.Abort(msg, hint=hint)
3918 3916 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3919 3917 if clearable and repo.vfs.exists(f):
3920 3918 util.unlink(repo.vfs.join(f))
3921 3919
3922 3920 afterresolvedstates = [
3923 3921 ('graftstate',
3924 3922 _('hg graft --continue')),
3925 3923 ]
3926 3924
3927 3925 def howtocontinue(repo):
3928 3926 '''Check for an unfinished operation and return the command to finish
3929 3927 it.
3930 3928
3931 3929 afterresolvedstates tuples define a .hg/{file} and the corresponding
3932 3930 command needed to finish it.
3933 3931
3934 3932 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3935 3933 a boolean.
3936 3934 '''
3937 3935 contmsg = _("continue: %s")
3938 3936 for f, msg in afterresolvedstates:
3939 3937 if repo.vfs.exists(f):
3940 3938 return contmsg % msg, True
3941 3939 if repo[None].dirty(missing=True, merge=False, branch=False):
3942 3940 return contmsg % _("hg commit"), False
3943 3941 return None, None
3944 3942
3945 3943 def checkafterresolved(repo):
3946 3944 '''Inform the user about the next action after completing hg resolve
3947 3945
3948 3946 If there's a matching afterresolvedstates, howtocontinue will yield
3949 3947 repo.ui.warn as the reporter.
3950 3948
3951 3949 Otherwise, it will yield repo.ui.note.
3952 3950 '''
3953 3951 msg, warning = howtocontinue(repo)
3954 3952 if msg is not None:
3955 3953 if warning:
3956 3954 repo.ui.warn("%s\n" % msg)
3957 3955 else:
3958 3956 repo.ui.note("%s\n" % msg)
3959 3957
3960 3958 def wrongtooltocontinue(repo, task):
3961 3959 '''Raise an abort suggesting how to properly continue if there is an
3962 3960 active task.
3963 3961
3964 3962 Uses howtocontinue() to find the active task.
3965 3963
3966 3964 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3967 3965 a hint.
3968 3966 '''
3969 3967 after = howtocontinue(repo)
3970 3968 hint = None
3971 3969 if after[1]:
3972 3970 hint = after[0]
3973 3971 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,531 +1,531 b''
1 1 # formatter.py - generic output formatting for mercurial
2 2 #
3 3 # Copyright 2012 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Generic output formatting for Mercurial
9 9
10 10 The formatter provides API to show data in various ways. The following
11 11 functions should be used in place of ui.write():
12 12
13 13 - fm.write() for unconditional output
14 14 - fm.condwrite() to show some extra data conditionally in plain output
15 15 - fm.context() to provide changectx to template output
16 16 - fm.data() to provide extra data to JSON or template output
17 17 - fm.plain() to show raw text that isn't provided to JSON or template output
18 18
19 19 To show structured data (e.g. date tuples, dicts, lists), apply fm.format*()
20 20 beforehand so the data is converted to the appropriate data type. Use
21 21 fm.isplain() if you need to convert or format data conditionally which isn't
22 22 supported by the formatter API.
23 23
24 24 To build nested structure (i.e. a list of dicts), use fm.nested().
25 25
26 26 See also https://www.mercurial-scm.org/wiki/GenericTemplatingPlan
27 27
28 28 fm.condwrite() vs 'if cond:':
29 29
30 30 In most cases, use fm.condwrite() so users can selectively show the data
31 31 in template output. If it's costly to build data, use plain 'if cond:' with
32 32 fm.write().
33 33
34 34 fm.nested() vs fm.formatdict() (or fm.formatlist()):
35 35
36 36 fm.nested() should be used to form a tree structure (a list of dicts of
37 37 lists of dicts...) which can be accessed through template keywords, e.g.
38 38 "{foo % "{bar % {...}} {baz % {...}}"}". On the other hand, fm.formatdict()
39 39 exports a dict-type object to template, which can be accessed by e.g.
40 40 "{get(foo, key)}" function.
41 41
42 42 Doctest helper:
43 43
44 44 >>> def show(fn, verbose=False, **opts):
45 45 ... import sys
46 46 ... from . import ui as uimod
47 47 ... ui = uimod.ui()
48 48 ... ui.verbose = verbose
49 49 ... ui.pushbuffer()
50 50 ... try:
51 51 ... return fn(ui, ui.formatter(pycompat.sysbytes(fn.__name__),
52 52 ... pycompat.byteskwargs(opts)))
53 53 ... finally:
54 54 ... print(pycompat.sysstr(ui.popbuffer()), end='')
55 55
56 56 Basic example:
57 57
58 58 >>> def files(ui, fm):
59 59 ... files = [(b'foo', 123, (0, 0)), (b'bar', 456, (1, 0))]
60 60 ... for f in files:
61 61 ... fm.startitem()
62 62 ... fm.write(b'path', b'%s', f[0])
63 63 ... fm.condwrite(ui.verbose, b'date', b' %s',
64 64 ... fm.formatdate(f[2], b'%Y-%m-%d %H:%M:%S'))
65 65 ... fm.data(size=f[1])
66 66 ... fm.plain(b'\\n')
67 67 ... fm.end()
68 68 >>> show(files)
69 69 foo
70 70 bar
71 71 >>> show(files, verbose=True)
72 72 foo 1970-01-01 00:00:00
73 73 bar 1970-01-01 00:00:01
74 74 >>> show(files, template=b'json')
75 75 [
76 76 {
77 77 "date": [0, 0],
78 78 "path": "foo",
79 79 "size": 123
80 80 },
81 81 {
82 82 "date": [1, 0],
83 83 "path": "bar",
84 84 "size": 456
85 85 }
86 86 ]
87 87 >>> show(files, template=b'path: {path}\\ndate: {date|rfc3339date}\\n')
88 88 path: foo
89 89 date: 1970-01-01T00:00:00+00:00
90 90 path: bar
91 91 date: 1970-01-01T00:00:01+00:00
92 92
93 93 Nested example:
94 94
95 95 >>> def subrepos(ui, fm):
96 96 ... fm.startitem()
97 97 ... fm.write(b'repo', b'[%s]\\n', b'baz')
98 98 ... files(ui, fm.nested(b'files'))
99 99 ... fm.end()
100 100 >>> show(subrepos)
101 101 [baz]
102 102 foo
103 103 bar
104 104 >>> show(subrepos, template=b'{repo}: {join(files % "{path}", ", ")}\\n')
105 105 baz: foo, bar
106 106 """
107 107
108 108 from __future__ import absolute_import, print_function
109 109
110 110 import collections
111 111 import contextlib
112 112 import itertools
113 113 import os
114 114
115 115 from .i18n import _
116 116 from .node import (
117 117 hex,
118 118 short,
119 119 )
120 120
121 121 from . import (
122 122 error,
123 123 pycompat,
124 124 templatefilters,
125 125 templatekw,
126 126 templater,
127 127 util,
128 128 )
129 129
130 130 pickle = util.pickle
131 131
132 132 class _nullconverter(object):
133 133 '''convert non-primitive data types to be processed by formatter'''
134 134
135 135 # set to True if context object should be stored as item
136 136 storecontext = False
137 137
138 138 @staticmethod
139 139 def formatdate(date, fmt):
140 140 '''convert date tuple to appropriate format'''
141 141 return date
142 142 @staticmethod
143 143 def formatdict(data, key, value, fmt, sep):
144 144 '''convert dict or key-value pairs to appropriate dict format'''
145 145 # use plain dict instead of util.sortdict so that data can be
146 146 # serialized as a builtin dict in pickle output
147 147 return dict(data)
148 148 @staticmethod
149 149 def formatlist(data, name, fmt, sep):
150 150 '''convert iterable to appropriate list format'''
151 151 return list(data)
152 152
153 153 class baseformatter(object):
154 154 def __init__(self, ui, topic, opts, converter):
155 155 self._ui = ui
156 156 self._topic = topic
157 157 self._style = opts.get("style")
158 158 self._template = opts.get("template")
159 159 self._converter = converter
160 160 self._item = None
161 161 # function to convert node to string suitable for this output
162 162 self.hexfunc = hex
163 163 def __enter__(self):
164 164 return self
165 165 def __exit__(self, exctype, excvalue, traceback):
166 166 if exctype is None:
167 167 self.end()
168 168 def _showitem(self):
169 169 '''show a formatted item once all data is collected'''
170 170 def startitem(self):
171 171 '''begin an item in the format list'''
172 172 if self._item is not None:
173 173 self._showitem()
174 174 self._item = {}
175 175 def formatdate(self, date, fmt='%a %b %d %H:%M:%S %Y %1%2'):
176 176 '''convert date tuple to appropriate format'''
177 177 return self._converter.formatdate(date, fmt)
178 178 def formatdict(self, data, key='key', value='value', fmt='%s=%s', sep=' '):
179 179 '''convert dict or key-value pairs to appropriate dict format'''
180 180 return self._converter.formatdict(data, key, value, fmt, sep)
181 181 def formatlist(self, data, name, fmt='%s', sep=' '):
182 182 '''convert iterable to appropriate list format'''
183 183 # name is mandatory argument for now, but it could be optional if
184 184 # we have default template keyword, e.g. {item}
185 185 return self._converter.formatlist(data, name, fmt, sep)
186 186 def context(self, **ctxs):
187 187 '''insert context objects to be used to render template keywords'''
188 188 ctxs = pycompat.byteskwargs(ctxs)
189 189 assert all(k == 'ctx' for k in ctxs)
190 190 if self._converter.storecontext:
191 191 self._item.update(ctxs)
192 192 def data(self, **data):
193 193 '''insert data into item that's not shown in default output'''
194 194 data = pycompat.byteskwargs(data)
195 195 self._item.update(data)
196 196 def write(self, fields, deftext, *fielddata, **opts):
197 197 '''do default text output while assigning data to item'''
198 198 fieldkeys = fields.split()
199 199 assert len(fieldkeys) == len(fielddata)
200 200 self._item.update(zip(fieldkeys, fielddata))
201 201 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
202 202 '''do conditional write (primarily for plain formatter)'''
203 203 fieldkeys = fields.split()
204 204 assert len(fieldkeys) == len(fielddata)
205 205 self._item.update(zip(fieldkeys, fielddata))
206 206 def plain(self, text, **opts):
207 207 '''show raw text for non-templated mode'''
208 208 def isplain(self):
209 209 '''check for plain formatter usage'''
210 210 return False
211 211 def nested(self, field):
212 212 '''sub formatter to store nested data in the specified field'''
213 213 self._item[field] = data = []
214 214 return _nestedformatter(self._ui, self._converter, data)
215 215 def end(self):
216 216 '''end output for the formatter'''
217 217 if self._item is not None:
218 218 self._showitem()
219 219
220 220 def nullformatter(ui, topic):
221 221 '''formatter that prints nothing'''
222 222 return baseformatter(ui, topic, opts={}, converter=_nullconverter)
223 223
224 224 class _nestedformatter(baseformatter):
225 225 '''build sub items and store them in the parent formatter'''
226 226 def __init__(self, ui, converter, data):
227 227 baseformatter.__init__(self, ui, topic='', opts={}, converter=converter)
228 228 self._data = data
229 229 def _showitem(self):
230 230 self._data.append(self._item)
231 231
232 232 def _iteritems(data):
233 233 '''iterate key-value pairs in stable order'''
234 234 if isinstance(data, dict):
235 235 return sorted(data.iteritems())
236 236 return data
237 237
238 238 class _plainconverter(object):
239 239 '''convert non-primitive data types to text'''
240 240
241 241 storecontext = False
242 242
243 243 @staticmethod
244 244 def formatdate(date, fmt):
245 245 '''stringify date tuple in the given format'''
246 246 return util.datestr(date, fmt)
247 247 @staticmethod
248 248 def formatdict(data, key, value, fmt, sep):
249 249 '''stringify key-value pairs separated by sep'''
250 250 return sep.join(fmt % (k, v) for k, v in _iteritems(data))
251 251 @staticmethod
252 252 def formatlist(data, name, fmt, sep):
253 253 '''stringify iterable separated by sep'''
254 254 return sep.join(fmt % e for e in data)
255 255
256 256 class plainformatter(baseformatter):
257 257 '''the default text output scheme'''
258 258 def __init__(self, ui, out, topic, opts):
259 259 baseformatter.__init__(self, ui, topic, opts, _plainconverter)
260 260 if ui.debugflag:
261 261 self.hexfunc = hex
262 262 else:
263 263 self.hexfunc = short
264 264 if ui is out:
265 265 self._write = ui.write
266 266 else:
267 267 self._write = lambda s, **opts: out.write(s)
268 268 def startitem(self):
269 269 pass
270 270 def data(self, **data):
271 271 pass
272 272 def write(self, fields, deftext, *fielddata, **opts):
273 273 self._write(deftext % fielddata, **opts)
274 274 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
275 275 '''do conditional write'''
276 276 if cond:
277 277 self._write(deftext % fielddata, **opts)
278 278 def plain(self, text, **opts):
279 279 self._write(text, **opts)
280 280 def isplain(self):
281 281 return True
282 282 def nested(self, field):
283 283 # nested data will be directly written to ui
284 284 return self
285 285 def end(self):
286 286 pass
287 287
288 288 class debugformatter(baseformatter):
289 289 def __init__(self, ui, out, topic, opts):
290 290 baseformatter.__init__(self, ui, topic, opts, _nullconverter)
291 291 self._out = out
292 292 self._out.write("%s = [\n" % self._topic)
293 293 def _showitem(self):
294 294 self._out.write(" " + repr(self._item) + ",\n")
295 295 def end(self):
296 296 baseformatter.end(self)
297 297 self._out.write("]\n")
298 298
299 299 class pickleformatter(baseformatter):
300 300 def __init__(self, ui, out, topic, opts):
301 301 baseformatter.__init__(self, ui, topic, opts, _nullconverter)
302 302 self._out = out
303 303 self._data = []
304 304 def _showitem(self):
305 305 self._data.append(self._item)
306 306 def end(self):
307 307 baseformatter.end(self)
308 308 self._out.write(pickle.dumps(self._data))
309 309
310 310 class jsonformatter(baseformatter):
311 311 def __init__(self, ui, out, topic, opts):
312 312 baseformatter.__init__(self, ui, topic, opts, _nullconverter)
313 313 self._out = out
314 314 self._out.write("[")
315 315 self._first = True
316 316 def _showitem(self):
317 317 if self._first:
318 318 self._first = False
319 319 else:
320 320 self._out.write(",")
321 321
322 322 self._out.write("\n {\n")
323 323 first = True
324 324 for k, v in sorted(self._item.items()):
325 325 if first:
326 326 first = False
327 327 else:
328 328 self._out.write(",\n")
329 329 u = templatefilters.json(v, paranoid=False)
330 330 self._out.write(' "%s": %s' % (k, u))
331 331 self._out.write("\n }")
332 332 def end(self):
333 333 baseformatter.end(self)
334 334 self._out.write("\n]\n")
335 335
336 336 class _templateconverter(object):
337 337 '''convert non-primitive data types to be processed by templater'''
338 338
339 339 storecontext = True
340 340
341 341 @staticmethod
342 342 def formatdate(date, fmt):
343 343 '''return date tuple'''
344 344 return date
345 345 @staticmethod
346 346 def formatdict(data, key, value, fmt, sep):
347 347 '''build object that can be evaluated as either plain string or dict'''
348 348 data = util.sortdict(_iteritems(data))
349 349 def f():
350 350 yield _plainconverter.formatdict(data, key, value, fmt, sep)
351 351 return templatekw.hybriddict(data, key=key, value=value, fmt=fmt, gen=f)
352 352 @staticmethod
353 353 def formatlist(data, name, fmt, sep):
354 354 '''build object that can be evaluated as either plain string or list'''
355 355 data = list(data)
356 356 def f():
357 357 yield _plainconverter.formatlist(data, name, fmt, sep)
358 358 return templatekw.hybridlist(data, name=name, fmt=fmt, gen=f)
359 359
360 360 class templateformatter(baseformatter):
361 361 def __init__(self, ui, out, topic, opts):
362 362 baseformatter.__init__(self, ui, topic, opts, _templateconverter)
363 363 self._out = out
364 364 spec = lookuptemplate(ui, topic, opts.get('template', ''))
365 365 self._tref = spec.ref
366 366 self._t = loadtemplater(ui, spec, cache=templatekw.defaulttempl)
367 367 self._parts = templatepartsmap(spec, self._t,
368 368 ['docheader', 'docfooter', 'separator'])
369 369 self._counter = itertools.count()
370 370 self._cache = {} # for templatekw/funcs to store reusable data
371 371 self._renderitem('docheader', {})
372 372
373 373 def _showitem(self):
374 374 item = self._item.copy()
375 375 item['index'] = index = next(self._counter)
376 376 if index > 0:
377 377 self._renderitem('separator', {})
378 378 self._renderitem(self._tref, item)
379 379
380 380 def _renderitem(self, part, item):
381 381 if part not in self._parts:
382 382 return
383 383 ref = self._parts[part]
384 384
385 385 # TODO: add support for filectx. probably each template keyword or
386 386 # function will have to declare dependent resources. e.g.
387 387 # @templatekeyword(..., requires=('ctx',))
388 388 props = {}
389 389 if 'ctx' in item:
390 390 props.update(templatekw.keywords)
391 391 # explicitly-defined fields precede templatekw
392 392 props.update(item)
393 393 if 'ctx' in item:
394 394 # but template resources must be always available
395 props['templ'] = self._t
396 395 props['repo'] = props['ctx'].repo()
397 396 props['revcache'] = {}
398 397 props = pycompat.strkwargs(props)
399 398 g = self._t(ref, ui=self._ui, cache=self._cache, **props)
400 399 self._out.write(templater.stringify(g))
401 400
402 401 def end(self):
403 402 baseformatter.end(self)
404 403 self._renderitem('docfooter', {})
405 404
406 405 templatespec = collections.namedtuple(r'templatespec',
407 406 r'ref tmpl mapfile')
408 407
409 408 def lookuptemplate(ui, topic, tmpl):
410 409 """Find the template matching the given -T/--template spec 'tmpl'
411 410
412 411 'tmpl' can be any of the following:
413 412
414 413 - a literal template (e.g. '{rev}')
415 414 - a map-file name or path (e.g. 'changelog')
416 415 - a reference to [templates] in config file
417 416 - a path to raw template file
418 417
419 418 A map file defines a stand-alone template environment. If a map file
420 419 selected, all templates defined in the file will be loaded, and the
421 420 template matching the given topic will be rendered. Aliases won't be
422 421 loaded from user config, but from the map file.
423 422
424 423 If no map file selected, all templates in [templates] section will be
425 424 available as well as aliases in [templatealias].
426 425 """
427 426
428 427 # looks like a literal template?
429 428 if '{' in tmpl:
430 429 return templatespec('', tmpl, None)
431 430
432 431 # perhaps a stock style?
433 432 if not os.path.split(tmpl)[0]:
434 433 mapname = (templater.templatepath('map-cmdline.' + tmpl)
435 434 or templater.templatepath(tmpl))
436 435 if mapname and os.path.isfile(mapname):
437 436 return templatespec(topic, None, mapname)
438 437
439 438 # perhaps it's a reference to [templates]
440 439 if ui.config('templates', tmpl):
441 440 return templatespec(tmpl, None, None)
442 441
443 442 if tmpl == 'list':
444 443 ui.write(_("available styles: %s\n") % templater.stylelist())
445 444 raise error.Abort(_("specify a template"))
446 445
447 446 # perhaps it's a path to a map or a template
448 447 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
449 448 # is it a mapfile for a style?
450 449 if os.path.basename(tmpl).startswith("map-"):
451 450 return templatespec(topic, None, os.path.realpath(tmpl))
452 451 with util.posixfile(tmpl, 'rb') as f:
453 452 tmpl = f.read()
454 453 return templatespec('', tmpl, None)
455 454
456 455 # constant string?
457 456 return templatespec('', tmpl, None)
458 457
459 458 def templatepartsmap(spec, t, partnames):
460 459 """Create a mapping of {part: ref}"""
461 460 partsmap = {spec.ref: spec.ref} # initial ref must exist in t
462 461 if spec.mapfile:
463 462 partsmap.update((p, p) for p in partnames if p in t)
464 463 elif spec.ref:
465 464 for part in partnames:
466 465 ref = '%s:%s' % (spec.ref, part) # select config sub-section
467 466 if ref in t:
468 467 partsmap[part] = ref
469 468 return partsmap
470 469
471 def loadtemplater(ui, spec, cache=None):
470 def loadtemplater(ui, spec, resources=None, cache=None):
472 471 """Create a templater from either a literal template or loading from
473 472 a map file"""
474 473 assert not (spec.tmpl and spec.mapfile)
475 474 if spec.mapfile:
476 return templater.templater.frommapfile(spec.mapfile, cache=cache)
477 return maketemplater(ui, spec.tmpl, cache=cache)
475 frommapfile = templater.templater.frommapfile
476 return frommapfile(spec.mapfile, resources=resources, cache=cache)
477 return maketemplater(ui, spec.tmpl, resources=resources, cache=cache)
478 478
479 def maketemplater(ui, tmpl, cache=None):
479 def maketemplater(ui, tmpl, resources=None, cache=None):
480 480 """Create a templater from a string template 'tmpl'"""
481 481 aliases = ui.configitems('templatealias')
482 t = templater.templater(cache=cache, aliases=aliases)
482 t = templater.templater(resources=resources, cache=cache, aliases=aliases)
483 483 t.cache.update((k, templater.unquotestring(v))
484 484 for k, v in ui.configitems('templates'))
485 485 if tmpl:
486 486 t.cache[''] = tmpl
487 487 return t
488 488
489 489 def formatter(ui, out, topic, opts):
490 490 template = opts.get("template", "")
491 491 if template == "json":
492 492 return jsonformatter(ui, out, topic, opts)
493 493 elif template == "pickle":
494 494 return pickleformatter(ui, out, topic, opts)
495 495 elif template == "debug":
496 496 return debugformatter(ui, out, topic, opts)
497 497 elif template != "":
498 498 return templateformatter(ui, out, topic, opts)
499 499 # developer config: ui.formatdebug
500 500 elif ui.configbool('ui', 'formatdebug'):
501 501 return debugformatter(ui, out, topic, opts)
502 502 # deprecated config: ui.formatjson
503 503 elif ui.configbool('ui', 'formatjson'):
504 504 return jsonformatter(ui, out, topic, opts)
505 505 return plainformatter(ui, out, topic, opts)
506 506
507 507 @contextlib.contextmanager
508 508 def openformatter(ui, filename, topic, opts):
509 509 """Create a formatter that writes outputs to the specified file
510 510
511 511 Must be invoked using the 'with' statement.
512 512 """
513 513 with util.posixfile(filename, 'wb') as out:
514 514 with formatter(ui, out, topic, opts) as fm:
515 515 yield fm
516 516
517 517 @contextlib.contextmanager
518 518 def _neverending(fm):
519 519 yield fm
520 520
521 521 def maybereopen(fm, filename, opts):
522 522 """Create a formatter backed by file if filename specified, else return
523 523 the given formatter
524 524
525 525 Must be invoked using the 'with' statement. This will never call fm.end()
526 526 of the given formatter.
527 527 """
528 528 if filename:
529 529 return openformatter(fm._ui, filename, fm._topic, opts)
530 530 else:
531 531 return _neverending(fm)
@@ -1,1538 +1,1566 b''
1 1 # templater.py - template expansion for output
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import, print_function
9 9
10 10 import os
11 11 import re
12 12 import types
13 13
14 14 from .i18n import _
15 15 from . import (
16 16 color,
17 17 config,
18 18 encoding,
19 19 error,
20 20 minirst,
21 21 obsutil,
22 22 parser,
23 23 pycompat,
24 24 registrar,
25 25 revset as revsetmod,
26 26 revsetlang,
27 27 scmutil,
28 28 templatefilters,
29 29 templatekw,
30 30 util,
31 31 )
32 32
33 33 # template parsing
34 34
35 35 elements = {
36 36 # token-type: binding-strength, primary, prefix, infix, suffix
37 37 "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
38 38 ".": (18, None, None, (".", 18), None),
39 39 "%": (15, None, None, ("%", 15), None),
40 40 "|": (15, None, None, ("|", 15), None),
41 41 "*": (5, None, None, ("*", 5), None),
42 42 "/": (5, None, None, ("/", 5), None),
43 43 "+": (4, None, None, ("+", 4), None),
44 44 "-": (4, None, ("negate", 19), ("-", 4), None),
45 45 "=": (3, None, None, ("keyvalue", 3), None),
46 46 ",": (2, None, None, ("list", 2), None),
47 47 ")": (0, None, None, None, None),
48 48 "integer": (0, "integer", None, None, None),
49 49 "symbol": (0, "symbol", None, None, None),
50 50 "string": (0, "string", None, None, None),
51 51 "template": (0, "template", None, None, None),
52 52 "end": (0, None, None, None, None),
53 53 }
54 54
55 55 def tokenize(program, start, end, term=None):
56 56 """Parse a template expression into a stream of tokens, which must end
57 57 with term if specified"""
58 58 pos = start
59 59 program = pycompat.bytestr(program)
60 60 while pos < end:
61 61 c = program[pos]
62 62 if c.isspace(): # skip inter-token whitespace
63 63 pass
64 64 elif c in "(=,).%|+-*/": # handle simple operators
65 65 yield (c, None, pos)
66 66 elif c in '"\'': # handle quoted templates
67 67 s = pos + 1
68 68 data, pos = _parsetemplate(program, s, end, c)
69 69 yield ('template', data, s)
70 70 pos -= 1
71 71 elif c == 'r' and program[pos:pos + 2] in ("r'", 'r"'):
72 72 # handle quoted strings
73 73 c = program[pos + 1]
74 74 s = pos = pos + 2
75 75 while pos < end: # find closing quote
76 76 d = program[pos]
77 77 if d == '\\': # skip over escaped characters
78 78 pos += 2
79 79 continue
80 80 if d == c:
81 81 yield ('string', program[s:pos], s)
82 82 break
83 83 pos += 1
84 84 else:
85 85 raise error.ParseError(_("unterminated string"), s)
86 86 elif c.isdigit():
87 87 s = pos
88 88 while pos < end:
89 89 d = program[pos]
90 90 if not d.isdigit():
91 91 break
92 92 pos += 1
93 93 yield ('integer', program[s:pos], s)
94 94 pos -= 1
95 95 elif (c == '\\' and program[pos:pos + 2] in (r"\'", r'\"')
96 96 or c == 'r' and program[pos:pos + 3] in (r"r\'", r'r\"')):
97 97 # handle escaped quoted strings for compatibility with 2.9.2-3.4,
98 98 # where some of nested templates were preprocessed as strings and
99 99 # then compiled. therefore, \"...\" was allowed. (issue4733)
100 100 #
101 101 # processing flow of _evalifliteral() at 5ab28a2e9962:
102 102 # outer template string -> stringify() -> compiletemplate()
103 103 # ------------------------ ------------ ------------------
104 104 # {f("\\\\ {g(\"\\\"\")}"} \\ {g("\"")} [r'\\', {g("\"")}]
105 105 # ~~~~~~~~
106 106 # escaped quoted string
107 107 if c == 'r':
108 108 pos += 1
109 109 token = 'string'
110 110 else:
111 111 token = 'template'
112 112 quote = program[pos:pos + 2]
113 113 s = pos = pos + 2
114 114 while pos < end: # find closing escaped quote
115 115 if program.startswith('\\\\\\', pos, end):
116 116 pos += 4 # skip over double escaped characters
117 117 continue
118 118 if program.startswith(quote, pos, end):
119 119 # interpret as if it were a part of an outer string
120 120 data = parser.unescapestr(program[s:pos])
121 121 if token == 'template':
122 122 data = _parsetemplate(data, 0, len(data))[0]
123 123 yield (token, data, s)
124 124 pos += 1
125 125 break
126 126 pos += 1
127 127 else:
128 128 raise error.ParseError(_("unterminated string"), s)
129 129 elif c.isalnum() or c in '_':
130 130 s = pos
131 131 pos += 1
132 132 while pos < end: # find end of symbol
133 133 d = program[pos]
134 134 if not (d.isalnum() or d == "_"):
135 135 break
136 136 pos += 1
137 137 sym = program[s:pos]
138 138 yield ('symbol', sym, s)
139 139 pos -= 1
140 140 elif c == term:
141 141 yield ('end', None, pos + 1)
142 142 return
143 143 else:
144 144 raise error.ParseError(_("syntax error"), pos)
145 145 pos += 1
146 146 if term:
147 147 raise error.ParseError(_("unterminated template expansion"), start)
148 148 yield ('end', None, pos)
149 149
150 150 def _parsetemplate(tmpl, start, stop, quote=''):
151 151 r"""
152 152 >>> _parsetemplate(b'foo{bar}"baz', 0, 12)
153 153 ([('string', 'foo'), ('symbol', 'bar'), ('string', '"baz')], 12)
154 154 >>> _parsetemplate(b'foo{bar}"baz', 0, 12, quote=b'"')
155 155 ([('string', 'foo'), ('symbol', 'bar')], 9)
156 156 >>> _parsetemplate(b'foo"{bar}', 0, 9, quote=b'"')
157 157 ([('string', 'foo')], 4)
158 158 >>> _parsetemplate(br'foo\"bar"baz', 0, 12, quote=b'"')
159 159 ([('string', 'foo"'), ('string', 'bar')], 9)
160 160 >>> _parsetemplate(br'foo\\"bar', 0, 10, quote=b'"')
161 161 ([('string', 'foo\\')], 6)
162 162 """
163 163 parsed = []
164 164 sepchars = '{' + quote
165 165 pos = start
166 166 p = parser.parser(elements)
167 167 while pos < stop:
168 168 n = min((tmpl.find(c, pos, stop) for c in sepchars),
169 169 key=lambda n: (n < 0, n))
170 170 if n < 0:
171 171 parsed.append(('string', parser.unescapestr(tmpl[pos:stop])))
172 172 pos = stop
173 173 break
174 174 c = tmpl[n:n + 1]
175 175 bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
176 176 if bs % 2 == 1:
177 177 # escaped (e.g. '\{', '\\\{', but not '\\{')
178 178 parsed.append(('string', parser.unescapestr(tmpl[pos:n - 1]) + c))
179 179 pos = n + 1
180 180 continue
181 181 if n > pos:
182 182 parsed.append(('string', parser.unescapestr(tmpl[pos:n])))
183 183 if c == quote:
184 184 return parsed, n + 1
185 185
186 186 parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
187 187 parsed.append(parseres)
188 188
189 189 if quote:
190 190 raise error.ParseError(_("unterminated string"), start)
191 191 return parsed, pos
192 192
193 193 def _unnesttemplatelist(tree):
194 194 """Expand list of templates to node tuple
195 195
196 196 >>> def f(tree):
197 197 ... print(pycompat.sysstr(prettyformat(_unnesttemplatelist(tree))))
198 198 >>> f((b'template', []))
199 199 (string '')
200 200 >>> f((b'template', [(b'string', b'foo')]))
201 201 (string 'foo')
202 202 >>> f((b'template', [(b'string', b'foo'), (b'symbol', b'rev')]))
203 203 (template
204 204 (string 'foo')
205 205 (symbol 'rev'))
206 206 >>> f((b'template', [(b'symbol', b'rev')])) # template(rev) -> str
207 207 (template
208 208 (symbol 'rev'))
209 209 >>> f((b'template', [(b'template', [(b'string', b'foo')])]))
210 210 (string 'foo')
211 211 """
212 212 if not isinstance(tree, tuple):
213 213 return tree
214 214 op = tree[0]
215 215 if op != 'template':
216 216 return (op,) + tuple(_unnesttemplatelist(x) for x in tree[1:])
217 217
218 218 assert len(tree) == 2
219 219 xs = tuple(_unnesttemplatelist(x) for x in tree[1])
220 220 if not xs:
221 221 return ('string', '') # empty template ""
222 222 elif len(xs) == 1 and xs[0][0] == 'string':
223 223 return xs[0] # fast path for string with no template fragment "x"
224 224 else:
225 225 return (op,) + xs
226 226
227 227 def parse(tmpl):
228 228 """Parse template string into tree"""
229 229 parsed, pos = _parsetemplate(tmpl, 0, len(tmpl))
230 230 assert pos == len(tmpl), 'unquoted template should be consumed'
231 231 return _unnesttemplatelist(('template', parsed))
232 232
233 233 def _parseexpr(expr):
234 234 """Parse a template expression into tree
235 235
236 236 >>> _parseexpr(b'"foo"')
237 237 ('string', 'foo')
238 238 >>> _parseexpr(b'foo(bar)')
239 239 ('func', ('symbol', 'foo'), ('symbol', 'bar'))
240 240 >>> _parseexpr(b'foo(')
241 241 Traceback (most recent call last):
242 242 ...
243 243 ParseError: ('not a prefix: end', 4)
244 244 >>> _parseexpr(b'"foo" "bar"')
245 245 Traceback (most recent call last):
246 246 ...
247 247 ParseError: ('invalid token', 7)
248 248 """
249 249 p = parser.parser(elements)
250 250 tree, pos = p.parse(tokenize(expr, 0, len(expr)))
251 251 if pos != len(expr):
252 252 raise error.ParseError(_('invalid token'), pos)
253 253 return _unnesttemplatelist(tree)
254 254
255 255 def prettyformat(tree):
256 256 return parser.prettyformat(tree, ('integer', 'string', 'symbol'))
257 257
258 258 def compileexp(exp, context, curmethods):
259 259 """Compile parsed template tree to (func, data) pair"""
260 260 t = exp[0]
261 261 if t in curmethods:
262 262 return curmethods[t](exp, context)
263 263 raise error.ParseError(_("unknown method '%s'") % t)
264 264
265 265 # template evaluation
266 266
267 267 def getsymbol(exp):
268 268 if exp[0] == 'symbol':
269 269 return exp[1]
270 270 raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
271 271
272 272 def getlist(x):
273 273 if not x:
274 274 return []
275 275 if x[0] == 'list':
276 276 return getlist(x[1]) + [x[2]]
277 277 return [x]
278 278
279 279 def gettemplate(exp, context):
280 280 """Compile given template tree or load named template from map file;
281 281 returns (func, data) pair"""
282 282 if exp[0] in ('template', 'string'):
283 283 return compileexp(exp, context, methods)
284 284 if exp[0] == 'symbol':
285 285 # unlike runsymbol(), here 'symbol' is always taken as template name
286 286 # even if it exists in mapping. this allows us to override mapping
287 287 # by web templates, e.g. 'changelogtag' is redefined in map file.
288 288 return context._load(exp[1])
289 289 raise error.ParseError(_("expected template specifier"))
290 290
291 291 def findsymbolicname(arg):
292 292 """Find symbolic name for the given compiled expression; returns None
293 293 if nothing found reliably"""
294 294 while True:
295 295 func, data = arg
296 296 if func is runsymbol:
297 297 return data
298 298 elif func is runfilter:
299 299 arg = data[0]
300 300 else:
301 301 return None
302 302
303 303 def evalrawexp(context, mapping, arg):
304 304 """Evaluate given argument as a bare template object which may require
305 305 further processing (such as folding generator of strings)"""
306 306 func, data = arg
307 307 return func(context, mapping, data)
308 308
309 309 def evalfuncarg(context, mapping, arg):
310 310 """Evaluate given argument as value type"""
311 311 thing = evalrawexp(context, mapping, arg)
312 312 thing = templatekw.unwrapvalue(thing)
313 313 # evalrawexp() may return string, generator of strings or arbitrary object
314 314 # such as date tuple, but filter does not want generator.
315 315 if isinstance(thing, types.GeneratorType):
316 316 thing = stringify(thing)
317 317 return thing
318 318
319 319 def evalboolean(context, mapping, arg):
320 320 """Evaluate given argument as boolean, but also takes boolean literals"""
321 321 func, data = arg
322 322 if func is runsymbol:
323 323 thing = func(context, mapping, data, default=None)
324 324 if thing is None:
325 325 # not a template keyword, takes as a boolean literal
326 326 thing = util.parsebool(data)
327 327 else:
328 328 thing = func(context, mapping, data)
329 329 thing = templatekw.unwrapvalue(thing)
330 330 if isinstance(thing, bool):
331 331 return thing
332 332 # other objects are evaluated as strings, which means 0 is True, but
333 333 # empty dict/list should be False as they are expected to be ''
334 334 return bool(stringify(thing))
335 335
336 336 def evalinteger(context, mapping, arg, err=None):
337 337 v = evalfuncarg(context, mapping, arg)
338 338 try:
339 339 return int(v)
340 340 except (TypeError, ValueError):
341 341 raise error.ParseError(err or _('not an integer'))
342 342
343 343 def evalstring(context, mapping, arg):
344 344 return stringify(evalrawexp(context, mapping, arg))
345 345
346 346 def evalstringliteral(context, mapping, arg):
347 347 """Evaluate given argument as string template, but returns symbol name
348 348 if it is unknown"""
349 349 func, data = arg
350 350 if func is runsymbol:
351 351 thing = func(context, mapping, data, default=data)
352 352 else:
353 353 thing = func(context, mapping, data)
354 354 return stringify(thing)
355 355
356 356 _evalfuncbytype = {
357 357 bool: evalboolean,
358 358 bytes: evalstring,
359 359 int: evalinteger,
360 360 }
361 361
362 362 def evalastype(context, mapping, arg, typ):
363 363 """Evaluate given argument and coerce its type"""
364 364 try:
365 365 f = _evalfuncbytype[typ]
366 366 except KeyError:
367 367 raise error.ProgrammingError('invalid type specified: %r' % typ)
368 368 return f(context, mapping, arg)
369 369
370 370 def runinteger(context, mapping, data):
371 371 return int(data)
372 372
373 373 def runstring(context, mapping, data):
374 374 return data
375 375
376 376 def _recursivesymbolblocker(key):
377 377 def showrecursion(**args):
378 378 raise error.Abort(_("recursive reference '%s' in template") % key)
379 379 return showrecursion
380 380
381 381 def _runrecursivesymbol(context, mapping, key):
382 382 raise error.Abort(_("recursive reference '%s' in template") % key)
383 383
384 384 def runsymbol(context, mapping, key, default=''):
385 385 v = context.symbol(mapping, key)
386 386 if v is None:
387 387 # put poison to cut recursion. we can't move this to parsing phase
388 388 # because "x = {x}" is allowed if "x" is a keyword. (issue4758)
389 389 safemapping = mapping.copy()
390 390 safemapping[key] = _recursivesymbolblocker(key)
391 391 try:
392 392 v = context.process(key, safemapping)
393 393 except TemplateNotFound:
394 394 v = default
395 395 if callable(v):
396 return v(**pycompat.strkwargs(mapping))
396 # TODO: templatekw functions will be updated to take (context, mapping)
397 # pair instead of **props
398 props = context._resources.copy()
399 props.update(mapping)
400 return v(**props)
397 401 return v
398 402
399 403 def buildtemplate(exp, context):
400 404 ctmpl = [compileexp(e, context, methods) for e in exp[1:]]
401 405 return (runtemplate, ctmpl)
402 406
403 407 def runtemplate(context, mapping, template):
404 408 for arg in template:
405 409 yield evalrawexp(context, mapping, arg)
406 410
407 411 def buildfilter(exp, context):
408 412 n = getsymbol(exp[2])
409 413 if n in context._filters:
410 414 filt = context._filters[n]
411 415 arg = compileexp(exp[1], context, methods)
412 416 return (runfilter, (arg, filt))
413 417 if n in funcs:
414 418 f = funcs[n]
415 419 args = _buildfuncargs(exp[1], context, methods, n, f._argspec)
416 420 return (f, args)
417 421 raise error.ParseError(_("unknown function '%s'") % n)
418 422
419 423 def runfilter(context, mapping, data):
420 424 arg, filt = data
421 425 thing = evalfuncarg(context, mapping, arg)
422 426 try:
423 427 return filt(thing)
424 428 except (ValueError, AttributeError, TypeError):
425 429 sym = findsymbolicname(arg)
426 430 if sym:
427 431 msg = (_("template filter '%s' is not compatible with keyword '%s'")
428 432 % (pycompat.sysbytes(filt.__name__), sym))
429 433 else:
430 434 msg = (_("incompatible use of template filter '%s'")
431 435 % pycompat.sysbytes(filt.__name__))
432 436 raise error.Abort(msg)
433 437
434 438 def buildmap(exp, context):
435 439 darg = compileexp(exp[1], context, methods)
436 440 targ = gettemplate(exp[2], context)
437 441 return (runmap, (darg, targ))
438 442
439 443 def runmap(context, mapping, data):
440 444 darg, targ = data
441 445 d = evalrawexp(context, mapping, darg)
442 446 if util.safehasattr(d, 'itermaps'):
443 447 diter = d.itermaps()
444 448 else:
445 449 try:
446 450 diter = iter(d)
447 451 except TypeError:
448 452 sym = findsymbolicname(darg)
449 453 if sym:
450 454 raise error.ParseError(_("keyword '%s' is not iterable") % sym)
451 455 else:
452 456 raise error.ParseError(_("%r is not iterable") % d)
453 457
454 458 for i, v in enumerate(diter):
455 459 lm = mapping.copy()
456 460 lm['index'] = i
457 461 if isinstance(v, dict):
458 462 lm.update(v)
459 463 lm['originalnode'] = mapping.get('node')
460 464 yield evalrawexp(context, lm, targ)
461 465 else:
462 466 # v is not an iterable of dicts, this happen when 'key'
463 467 # has been fully expanded already and format is useless.
464 468 # If so, return the expanded value.
465 469 yield v
466 470
467 471 def buildmember(exp, context):
468 472 darg = compileexp(exp[1], context, methods)
469 473 memb = getsymbol(exp[2])
470 474 return (runmember, (darg, memb))
471 475
472 476 def runmember(context, mapping, data):
473 477 darg, memb = data
474 478 d = evalrawexp(context, mapping, darg)
475 479 if util.safehasattr(d, 'tomap'):
476 480 lm = mapping.copy()
477 481 lm.update(d.tomap())
478 482 return runsymbol(context, lm, memb)
479 483 if util.safehasattr(d, 'get'):
480 484 return _getdictitem(d, memb)
481 485
482 486 sym = findsymbolicname(darg)
483 487 if sym:
484 488 raise error.ParseError(_("keyword '%s' has no member") % sym)
485 489 else:
486 490 raise error.ParseError(_("%r has no member") % d)
487 491
488 492 def buildnegate(exp, context):
489 493 arg = compileexp(exp[1], context, exprmethods)
490 494 return (runnegate, arg)
491 495
492 496 def runnegate(context, mapping, data):
493 497 data = evalinteger(context, mapping, data,
494 498 _('negation needs an integer argument'))
495 499 return -data
496 500
497 501 def buildarithmetic(exp, context, func):
498 502 left = compileexp(exp[1], context, exprmethods)
499 503 right = compileexp(exp[2], context, exprmethods)
500 504 return (runarithmetic, (func, left, right))
501 505
502 506 def runarithmetic(context, mapping, data):
503 507 func, left, right = data
504 508 left = evalinteger(context, mapping, left,
505 509 _('arithmetic only defined on integers'))
506 510 right = evalinteger(context, mapping, right,
507 511 _('arithmetic only defined on integers'))
508 512 try:
509 513 return func(left, right)
510 514 except ZeroDivisionError:
511 515 raise error.Abort(_('division by zero is not defined'))
512 516
513 517 def buildfunc(exp, context):
514 518 n = getsymbol(exp[1])
515 519 if n in funcs:
516 520 f = funcs[n]
517 521 args = _buildfuncargs(exp[2], context, exprmethods, n, f._argspec)
518 522 return (f, args)
519 523 if n in context._filters:
520 524 args = _buildfuncargs(exp[2], context, exprmethods, n, argspec=None)
521 525 if len(args) != 1:
522 526 raise error.ParseError(_("filter %s expects one argument") % n)
523 527 f = context._filters[n]
524 528 return (runfilter, (args[0], f))
525 529 raise error.ParseError(_("unknown function '%s'") % n)
526 530
527 531 def _buildfuncargs(exp, context, curmethods, funcname, argspec):
528 532 """Compile parsed tree of function arguments into list or dict of
529 533 (func, data) pairs
530 534
531 535 >>> context = engine(lambda t: (runsymbol, t))
532 536 >>> def fargs(expr, argspec):
533 537 ... x = _parseexpr(expr)
534 538 ... n = getsymbol(x[1])
535 539 ... return _buildfuncargs(x[2], context, exprmethods, n, argspec)
536 540 >>> list(fargs(b'a(l=1, k=2)', b'k l m').keys())
537 541 ['l', 'k']
538 542 >>> args = fargs(b'a(opts=1, k=2)', b'**opts')
539 543 >>> list(args.keys()), list(args[b'opts'].keys())
540 544 (['opts'], ['opts', 'k'])
541 545 """
542 546 def compiledict(xs):
543 547 return util.sortdict((k, compileexp(x, context, curmethods))
544 548 for k, x in xs.iteritems())
545 549 def compilelist(xs):
546 550 return [compileexp(x, context, curmethods) for x in xs]
547 551
548 552 if not argspec:
549 553 # filter or function with no argspec: return list of positional args
550 554 return compilelist(getlist(exp))
551 555
552 556 # function with argspec: return dict of named args
553 557 _poskeys, varkey, _keys, optkey = argspec = parser.splitargspec(argspec)
554 558 treeargs = parser.buildargsdict(getlist(exp), funcname, argspec,
555 559 keyvaluenode='keyvalue', keynode='symbol')
556 560 compargs = util.sortdict()
557 561 if varkey:
558 562 compargs[varkey] = compilelist(treeargs.pop(varkey))
559 563 if optkey:
560 564 compargs[optkey] = compiledict(treeargs.pop(optkey))
561 565 compargs.update(compiledict(treeargs))
562 566 return compargs
563 567
564 568 def buildkeyvaluepair(exp, content):
565 569 raise error.ParseError(_("can't use a key-value pair in this context"))
566 570
567 571 # dict of template built-in functions
568 572 funcs = {}
569 573
570 574 templatefunc = registrar.templatefunc(funcs)
571 575
572 576 @templatefunc('date(date[, fmt])')
573 577 def date(context, mapping, args):
574 578 """Format a date. See :hg:`help dates` for formatting
575 579 strings. The default is a Unix date format, including the timezone:
576 580 "Mon Sep 04 15:13:13 2006 0700"."""
577 581 if not (1 <= len(args) <= 2):
578 582 # i18n: "date" is a keyword
579 583 raise error.ParseError(_("date expects one or two arguments"))
580 584
581 585 date = evalfuncarg(context, mapping, args[0])
582 586 fmt = None
583 587 if len(args) == 2:
584 588 fmt = evalstring(context, mapping, args[1])
585 589 try:
586 590 if fmt is None:
587 591 return util.datestr(date)
588 592 else:
589 593 return util.datestr(date, fmt)
590 594 except (TypeError, ValueError):
591 595 # i18n: "date" is a keyword
592 596 raise error.ParseError(_("date expects a date information"))
593 597
594 598 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
595 599 def dict_(context, mapping, args):
596 600 """Construct a dict from key-value pairs. A key may be omitted if
597 601 a value expression can provide an unambiguous name."""
598 602 data = util.sortdict()
599 603
600 604 for v in args['args']:
601 605 k = findsymbolicname(v)
602 606 if not k:
603 607 raise error.ParseError(_('dict key cannot be inferred'))
604 608 if k in data or k in args['kwargs']:
605 609 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
606 610 data[k] = evalfuncarg(context, mapping, v)
607 611
608 612 data.update((k, evalfuncarg(context, mapping, v))
609 613 for k, v in args['kwargs'].iteritems())
610 614 return templatekw.hybriddict(data)
611 615
612 616 @templatefunc('diff([includepattern [, excludepattern]])')
613 617 def diff(context, mapping, args):
614 618 """Show a diff, optionally
615 619 specifying files to include or exclude."""
616 620 if len(args) > 2:
617 621 # i18n: "diff" is a keyword
618 622 raise error.ParseError(_("diff expects zero, one, or two arguments"))
619 623
620 624 def getpatterns(i):
621 625 if i < len(args):
622 626 s = evalstring(context, mapping, args[i]).strip()
623 627 if s:
624 628 return [s]
625 629 return []
626 630
627 631 ctx = context.resource(mapping, 'ctx')
628 632 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
629 633
630 634 return ''.join(chunks)
631 635
632 636 @templatefunc('extdata(source)', argspec='source')
633 637 def extdata(context, mapping, args):
634 638 """Show a text read from the specified extdata source. (EXPERIMENTAL)"""
635 639 if 'source' not in args:
636 640 # i18n: "extdata" is a keyword
637 641 raise error.ParseError(_('extdata expects one argument'))
638 642
639 643 source = evalstring(context, mapping, args['source'])
640 644 cache = context.resource(mapping, 'cache').setdefault('extdata', {})
641 645 ctx = context.resource(mapping, 'ctx')
642 646 if source in cache:
643 647 data = cache[source]
644 648 else:
645 649 data = cache[source] = scmutil.extdatasource(ctx.repo(), source)
646 650 return data.get(ctx.rev(), '')
647 651
648 652 @templatefunc('files(pattern)')
649 653 def files(context, mapping, args):
650 654 """All files of the current changeset matching the pattern. See
651 655 :hg:`help patterns`."""
652 656 if not len(args) == 1:
653 657 # i18n: "files" is a keyword
654 658 raise error.ParseError(_("files expects one argument"))
655 659
656 660 raw = evalstring(context, mapping, args[0])
657 661 ctx = context.resource(mapping, 'ctx')
658 662 m = ctx.match([raw])
659 663 files = list(ctx.matches(m))
660 return templatekw.showlist("file", files, mapping)
664 # TODO: pass (context, mapping) pair to keyword function
665 props = context._resources.copy()
666 props.update(mapping)
667 return templatekw.showlist("file", files, props)
661 668
662 669 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
663 670 def fill(context, mapping, args):
664 671 """Fill many
665 672 paragraphs with optional indentation. See the "fill" filter."""
666 673 if not (1 <= len(args) <= 4):
667 674 # i18n: "fill" is a keyword
668 675 raise error.ParseError(_("fill expects one to four arguments"))
669 676
670 677 text = evalstring(context, mapping, args[0])
671 678 width = 76
672 679 initindent = ''
673 680 hangindent = ''
674 681 if 2 <= len(args) <= 4:
675 682 width = evalinteger(context, mapping, args[1],
676 683 # i18n: "fill" is a keyword
677 684 _("fill expects an integer width"))
678 685 try:
679 686 initindent = evalstring(context, mapping, args[2])
680 687 hangindent = evalstring(context, mapping, args[3])
681 688 except IndexError:
682 689 pass
683 690
684 691 return templatefilters.fill(text, width, initindent, hangindent)
685 692
686 693 @templatefunc('formatnode(node)')
687 694 def formatnode(context, mapping, args):
688 695 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
689 696 if len(args) != 1:
690 697 # i18n: "formatnode" is a keyword
691 698 raise error.ParseError(_("formatnode expects one argument"))
692 699
693 700 ui = context.resource(mapping, 'ui')
694 701 node = evalstring(context, mapping, args[0])
695 702 if ui.debugflag:
696 703 return node
697 704 return templatefilters.short(node)
698 705
699 706 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
700 707 argspec='text width fillchar left')
701 708 def pad(context, mapping, args):
702 709 """Pad text with a
703 710 fill character."""
704 711 if 'text' not in args or 'width' not in args:
705 712 # i18n: "pad" is a keyword
706 713 raise error.ParseError(_("pad() expects two to four arguments"))
707 714
708 715 width = evalinteger(context, mapping, args['width'],
709 716 # i18n: "pad" is a keyword
710 717 _("pad() expects an integer width"))
711 718
712 719 text = evalstring(context, mapping, args['text'])
713 720
714 721 left = False
715 722 fillchar = ' '
716 723 if 'fillchar' in args:
717 724 fillchar = evalstring(context, mapping, args['fillchar'])
718 725 if len(color.stripeffects(fillchar)) != 1:
719 726 # i18n: "pad" is a keyword
720 727 raise error.ParseError(_("pad() expects a single fill character"))
721 728 if 'left' in args:
722 729 left = evalboolean(context, mapping, args['left'])
723 730
724 731 fillwidth = width - encoding.colwidth(color.stripeffects(text))
725 732 if fillwidth <= 0:
726 733 return text
727 734 if left:
728 735 return fillchar * fillwidth + text
729 736 else:
730 737 return text + fillchar * fillwidth
731 738
732 739 @templatefunc('indent(text, indentchars[, firstline])')
733 740 def indent(context, mapping, args):
734 741 """Indents all non-empty lines
735 742 with the characters given in the indentchars string. An optional
736 743 third parameter will override the indent for the first line only
737 744 if present."""
738 745 if not (2 <= len(args) <= 3):
739 746 # i18n: "indent" is a keyword
740 747 raise error.ParseError(_("indent() expects two or three arguments"))
741 748
742 749 text = evalstring(context, mapping, args[0])
743 750 indent = evalstring(context, mapping, args[1])
744 751
745 752 if len(args) == 3:
746 753 firstline = evalstring(context, mapping, args[2])
747 754 else:
748 755 firstline = indent
749 756
750 757 # the indent function doesn't indent the first line, so we do it here
751 758 return templatefilters.indent(firstline + text, indent)
752 759
753 760 @templatefunc('get(dict, key)')
754 761 def get(context, mapping, args):
755 762 """Get an attribute/key from an object. Some keywords
756 763 are complex types. This function allows you to obtain the value of an
757 764 attribute on these types."""
758 765 if len(args) != 2:
759 766 # i18n: "get" is a keyword
760 767 raise error.ParseError(_("get() expects two arguments"))
761 768
762 769 dictarg = evalfuncarg(context, mapping, args[0])
763 770 if not util.safehasattr(dictarg, 'get'):
764 771 # i18n: "get" is a keyword
765 772 raise error.ParseError(_("get() expects a dict as first argument"))
766 773
767 774 key = evalfuncarg(context, mapping, args[1])
768 775 return _getdictitem(dictarg, key)
769 776
770 777 def _getdictitem(dictarg, key):
771 778 val = dictarg.get(key)
772 779 if val is None:
773 780 return
774 781 return templatekw.wraphybridvalue(dictarg, key, val)
775 782
776 783 @templatefunc('if(expr, then[, else])')
777 784 def if_(context, mapping, args):
778 785 """Conditionally execute based on the result of
779 786 an expression."""
780 787 if not (2 <= len(args) <= 3):
781 788 # i18n: "if" is a keyword
782 789 raise error.ParseError(_("if expects two or three arguments"))
783 790
784 791 test = evalboolean(context, mapping, args[0])
785 792 if test:
786 793 yield evalrawexp(context, mapping, args[1])
787 794 elif len(args) == 3:
788 795 yield evalrawexp(context, mapping, args[2])
789 796
790 797 @templatefunc('ifcontains(needle, haystack, then[, else])')
791 798 def ifcontains(context, mapping, args):
792 799 """Conditionally execute based
793 800 on whether the item "needle" is in "haystack"."""
794 801 if not (3 <= len(args) <= 4):
795 802 # i18n: "ifcontains" is a keyword
796 803 raise error.ParseError(_("ifcontains expects three or four arguments"))
797 804
798 805 haystack = evalfuncarg(context, mapping, args[1])
799 806 try:
800 807 needle = evalastype(context, mapping, args[0],
801 808 getattr(haystack, 'keytype', None) or bytes)
802 809 found = (needle in haystack)
803 810 except error.ParseError:
804 811 found = False
805 812
806 813 if found:
807 814 yield evalrawexp(context, mapping, args[2])
808 815 elif len(args) == 4:
809 816 yield evalrawexp(context, mapping, args[3])
810 817
811 818 @templatefunc('ifeq(expr1, expr2, then[, else])')
812 819 def ifeq(context, mapping, args):
813 820 """Conditionally execute based on
814 821 whether 2 items are equivalent."""
815 822 if not (3 <= len(args) <= 4):
816 823 # i18n: "ifeq" is a keyword
817 824 raise error.ParseError(_("ifeq expects three or four arguments"))
818 825
819 826 test = evalstring(context, mapping, args[0])
820 827 match = evalstring(context, mapping, args[1])
821 828 if test == match:
822 829 yield evalrawexp(context, mapping, args[2])
823 830 elif len(args) == 4:
824 831 yield evalrawexp(context, mapping, args[3])
825 832
826 833 @templatefunc('join(list, sep)')
827 834 def join(context, mapping, args):
828 835 """Join items in a list with a delimiter."""
829 836 if not (1 <= len(args) <= 2):
830 837 # i18n: "join" is a keyword
831 838 raise error.ParseError(_("join expects one or two arguments"))
832 839
833 840 # TODO: perhaps this should be evalfuncarg(), but it can't because hgweb
834 841 # abuses generator as a keyword that returns a list of dicts.
835 842 joinset = evalrawexp(context, mapping, args[0])
836 843 joinset = templatekw.unwrapvalue(joinset)
837 844 joinfmt = getattr(joinset, 'joinfmt', pycompat.identity)
838 845 joiner = " "
839 846 if len(args) > 1:
840 847 joiner = evalstring(context, mapping, args[1])
841 848
842 849 first = True
843 850 for x in joinset:
844 851 if first:
845 852 first = False
846 853 else:
847 854 yield joiner
848 855 yield joinfmt(x)
849 856
850 857 @templatefunc('label(label, expr)')
851 858 def label(context, mapping, args):
852 859 """Apply a label to generated content. Content with
853 860 a label applied can result in additional post-processing, such as
854 861 automatic colorization."""
855 862 if len(args) != 2:
856 863 # i18n: "label" is a keyword
857 864 raise error.ParseError(_("label expects two arguments"))
858 865
859 866 ui = context.resource(mapping, 'ui')
860 867 thing = evalstring(context, mapping, args[1])
861 868 # preserve unknown symbol as literal so effects like 'red', 'bold',
862 869 # etc. don't need to be quoted
863 870 label = evalstringliteral(context, mapping, args[0])
864 871
865 872 return ui.label(thing, label)
866 873
867 874 @templatefunc('latesttag([pattern])')
868 875 def latesttag(context, mapping, args):
869 876 """The global tags matching the given pattern on the
870 877 most recent globally tagged ancestor of this changeset.
871 878 If no such tags exist, the "{tag}" template resolves to
872 879 the string "null"."""
873 880 if len(args) > 1:
874 881 # i18n: "latesttag" is a keyword
875 882 raise error.ParseError(_("latesttag expects at most one argument"))
876 883
877 884 pattern = None
878 885 if len(args) == 1:
879 886 pattern = evalstring(context, mapping, args[0])
880 887
881 return templatekw.showlatesttags(pattern, **pycompat.strkwargs(mapping))
888 # TODO: pass (context, mapping) pair to keyword function
889 props = context._resources.copy()
890 props.update(mapping)
891 return templatekw.showlatesttags(pattern, **pycompat.strkwargs(props))
882 892
883 893 @templatefunc('localdate(date[, tz])')
884 894 def localdate(context, mapping, args):
885 895 """Converts a date to the specified timezone.
886 896 The default is local date."""
887 897 if not (1 <= len(args) <= 2):
888 898 # i18n: "localdate" is a keyword
889 899 raise error.ParseError(_("localdate expects one or two arguments"))
890 900
891 901 date = evalfuncarg(context, mapping, args[0])
892 902 try:
893 903 date = util.parsedate(date)
894 904 except AttributeError: # not str nor date tuple
895 905 # i18n: "localdate" is a keyword
896 906 raise error.ParseError(_("localdate expects a date information"))
897 907 if len(args) >= 2:
898 908 tzoffset = None
899 909 tz = evalfuncarg(context, mapping, args[1])
900 910 if isinstance(tz, str):
901 911 tzoffset, remainder = util.parsetimezone(tz)
902 912 if remainder:
903 913 tzoffset = None
904 914 if tzoffset is None:
905 915 try:
906 916 tzoffset = int(tz)
907 917 except (TypeError, ValueError):
908 918 # i18n: "localdate" is a keyword
909 919 raise error.ParseError(_("localdate expects a timezone"))
910 920 else:
911 921 tzoffset = util.makedate()[1]
912 922 return (date[0], tzoffset)
913 923
914 924 @templatefunc('max(iterable)')
915 925 def max_(context, mapping, args, **kwargs):
916 926 """Return the max of an iterable"""
917 927 if len(args) != 1:
918 928 # i18n: "max" is a keyword
919 929 raise error.ParseError(_("max expects one argument"))
920 930
921 931 iterable = evalfuncarg(context, mapping, args[0])
922 932 try:
923 933 x = max(iterable)
924 934 except (TypeError, ValueError):
925 935 # i18n: "max" is a keyword
926 936 raise error.ParseError(_("max first argument should be an iterable"))
927 937 return templatekw.wraphybridvalue(iterable, x, x)
928 938
929 939 @templatefunc('min(iterable)')
930 940 def min_(context, mapping, args, **kwargs):
931 941 """Return the min of an iterable"""
932 942 if len(args) != 1:
933 943 # i18n: "min" is a keyword
934 944 raise error.ParseError(_("min expects one argument"))
935 945
936 946 iterable = evalfuncarg(context, mapping, args[0])
937 947 try:
938 948 x = min(iterable)
939 949 except (TypeError, ValueError):
940 950 # i18n: "min" is a keyword
941 951 raise error.ParseError(_("min first argument should be an iterable"))
942 952 return templatekw.wraphybridvalue(iterable, x, x)
943 953
944 954 @templatefunc('mod(a, b)')
945 955 def mod(context, mapping, args):
946 956 """Calculate a mod b such that a / b + a mod b == a"""
947 957 if not len(args) == 2:
948 958 # i18n: "mod" is a keyword
949 959 raise error.ParseError(_("mod expects two arguments"))
950 960
951 961 func = lambda a, b: a % b
952 962 return runarithmetic(context, mapping, (func, args[0], args[1]))
953 963
954 964 @templatefunc('obsfateoperations(markers)')
955 965 def obsfateoperations(context, mapping, args):
956 966 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
957 967 if len(args) != 1:
958 968 # i18n: "obsfateoperations" is a keyword
959 969 raise error.ParseError(_("obsfateoperations expects one argument"))
960 970
961 971 markers = evalfuncarg(context, mapping, args[0])
962 972
963 973 try:
964 974 data = obsutil.markersoperations(markers)
965 975 return templatekw.hybridlist(data, name='operation')
966 976 except (TypeError, KeyError):
967 977 # i18n: "obsfateoperations" is a keyword
968 978 errmsg = _("obsfateoperations first argument should be an iterable")
969 979 raise error.ParseError(errmsg)
970 980
971 981 @templatefunc('obsfatedate(markers)')
972 982 def obsfatedate(context, mapping, args):
973 983 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
974 984 if len(args) != 1:
975 985 # i18n: "obsfatedate" is a keyword
976 986 raise error.ParseError(_("obsfatedate expects one argument"))
977 987
978 988 markers = evalfuncarg(context, mapping, args[0])
979 989
980 990 try:
981 991 data = obsutil.markersdates(markers)
982 992 return templatekw.hybridlist(data, name='date', fmt='%d %d')
983 993 except (TypeError, KeyError):
984 994 # i18n: "obsfatedate" is a keyword
985 995 errmsg = _("obsfatedate first argument should be an iterable")
986 996 raise error.ParseError(errmsg)
987 997
988 998 @templatefunc('obsfateusers(markers)')
989 999 def obsfateusers(context, mapping, args):
990 1000 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
991 1001 if len(args) != 1:
992 1002 # i18n: "obsfateusers" is a keyword
993 1003 raise error.ParseError(_("obsfateusers expects one argument"))
994 1004
995 1005 markers = evalfuncarg(context, mapping, args[0])
996 1006
997 1007 try:
998 1008 data = obsutil.markersusers(markers)
999 1009 return templatekw.hybridlist(data, name='user')
1000 1010 except (TypeError, KeyError, ValueError):
1001 1011 # i18n: "obsfateusers" is a keyword
1002 1012 msg = _("obsfateusers first argument should be an iterable of "
1003 1013 "obsmakers")
1004 1014 raise error.ParseError(msg)
1005 1015
1006 1016 @templatefunc('obsfateverb(successors, markers)')
1007 1017 def obsfateverb(context, mapping, args):
1008 1018 """Compute obsfate related information based on successors (EXPERIMENTAL)"""
1009 1019 if len(args) != 2:
1010 1020 # i18n: "obsfateverb" is a keyword
1011 1021 raise error.ParseError(_("obsfateverb expects two arguments"))
1012 1022
1013 1023 successors = evalfuncarg(context, mapping, args[0])
1014 1024 markers = evalfuncarg(context, mapping, args[1])
1015 1025
1016 1026 try:
1017 1027 return obsutil.obsfateverb(successors, markers)
1018 1028 except TypeError:
1019 1029 # i18n: "obsfateverb" is a keyword
1020 1030 errmsg = _("obsfateverb first argument should be countable")
1021 1031 raise error.ParseError(errmsg)
1022 1032
1023 1033 @templatefunc('relpath(path)')
1024 1034 def relpath(context, mapping, args):
1025 1035 """Convert a repository-absolute path into a filesystem path relative to
1026 1036 the current working directory."""
1027 1037 if len(args) != 1:
1028 1038 # i18n: "relpath" is a keyword
1029 1039 raise error.ParseError(_("relpath expects one argument"))
1030 1040
1031 1041 repo = context.resource(mapping, 'ctx').repo()
1032 1042 path = evalstring(context, mapping, args[0])
1033 1043 return repo.pathto(path)
1034 1044
1035 1045 @templatefunc('revset(query[, formatargs...])')
1036 1046 def revset(context, mapping, args):
1037 1047 """Execute a revision set query. See
1038 1048 :hg:`help revset`."""
1039 1049 if not len(args) > 0:
1040 1050 # i18n: "revset" is a keyword
1041 1051 raise error.ParseError(_("revset expects one or more arguments"))
1042 1052
1043 1053 raw = evalstring(context, mapping, args[0])
1044 1054 ctx = context.resource(mapping, 'ctx')
1045 1055 repo = ctx.repo()
1046 1056
1047 1057 def query(expr):
1048 1058 m = revsetmod.match(repo.ui, expr, repo=repo)
1049 1059 return m(repo)
1050 1060
1051 1061 if len(args) > 1:
1052 1062 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
1053 1063 revs = query(revsetlang.formatspec(raw, *formatargs))
1054 1064 revs = list(revs)
1055 1065 else:
1056 1066 cache = context.resource(mapping, 'cache')
1057 1067 revsetcache = cache.setdefault("revsetcache", {})
1058 1068 if raw in revsetcache:
1059 1069 revs = revsetcache[raw]
1060 1070 else:
1061 1071 revs = query(raw)
1062 1072 revs = list(revs)
1063 1073 revsetcache[raw] = revs
1064 1074
1075 # TODO: pass (context, mapping) pair to keyword function
1076 props = context._resources.copy()
1077 props.update(mapping)
1065 1078 return templatekw.showrevslist("revision", revs,
1066 **pycompat.strkwargs(mapping))
1079 **pycompat.strkwargs(props))
1067 1080
1068 1081 @templatefunc('rstdoc(text, style)')
1069 1082 def rstdoc(context, mapping, args):
1070 1083 """Format reStructuredText."""
1071 1084 if len(args) != 2:
1072 1085 # i18n: "rstdoc" is a keyword
1073 1086 raise error.ParseError(_("rstdoc expects two arguments"))
1074 1087
1075 1088 text = evalstring(context, mapping, args[0])
1076 1089 style = evalstring(context, mapping, args[1])
1077 1090
1078 1091 return minirst.format(text, style=style, keep=['verbose'])
1079 1092
1080 1093 @templatefunc('separate(sep, args)', argspec='sep *args')
1081 1094 def separate(context, mapping, args):
1082 1095 """Add a separator between non-empty arguments."""
1083 1096 if 'sep' not in args:
1084 1097 # i18n: "separate" is a keyword
1085 1098 raise error.ParseError(_("separate expects at least one argument"))
1086 1099
1087 1100 sep = evalstring(context, mapping, args['sep'])
1088 1101 first = True
1089 1102 for arg in args['args']:
1090 1103 argstr = evalstring(context, mapping, arg)
1091 1104 if not argstr:
1092 1105 continue
1093 1106 if first:
1094 1107 first = False
1095 1108 else:
1096 1109 yield sep
1097 1110 yield argstr
1098 1111
1099 1112 @templatefunc('shortest(node, minlength=4)')
1100 1113 def shortest(context, mapping, args):
1101 1114 """Obtain the shortest representation of
1102 1115 a node."""
1103 1116 if not (1 <= len(args) <= 2):
1104 1117 # i18n: "shortest" is a keyword
1105 1118 raise error.ParseError(_("shortest() expects one or two arguments"))
1106 1119
1107 1120 node = evalstring(context, mapping, args[0])
1108 1121
1109 1122 minlength = 4
1110 1123 if len(args) > 1:
1111 1124 minlength = evalinteger(context, mapping, args[1],
1112 1125 # i18n: "shortest" is a keyword
1113 1126 _("shortest() expects an integer minlength"))
1114 1127
1115 1128 # _partialmatch() of filtered changelog could take O(len(repo)) time,
1116 1129 # which would be unacceptably slow. so we look for hash collision in
1117 1130 # unfiltered space, which means some hashes may be slightly longer.
1118 1131 cl = context.resource(mapping, 'ctx')._repo.unfiltered().changelog
1119 1132 return cl.shortest(node, minlength)
1120 1133
1121 1134 @templatefunc('strip(text[, chars])')
1122 1135 def strip(context, mapping, args):
1123 1136 """Strip characters from a string. By default,
1124 1137 strips all leading and trailing whitespace."""
1125 1138 if not (1 <= len(args) <= 2):
1126 1139 # i18n: "strip" is a keyword
1127 1140 raise error.ParseError(_("strip expects one or two arguments"))
1128 1141
1129 1142 text = evalstring(context, mapping, args[0])
1130 1143 if len(args) == 2:
1131 1144 chars = evalstring(context, mapping, args[1])
1132 1145 return text.strip(chars)
1133 1146 return text.strip()
1134 1147
1135 1148 @templatefunc('sub(pattern, replacement, expression)')
1136 1149 def sub(context, mapping, args):
1137 1150 """Perform text substitution
1138 1151 using regular expressions."""
1139 1152 if len(args) != 3:
1140 1153 # i18n: "sub" is a keyword
1141 1154 raise error.ParseError(_("sub expects three arguments"))
1142 1155
1143 1156 pat = evalstring(context, mapping, args[0])
1144 1157 rpl = evalstring(context, mapping, args[1])
1145 1158 src = evalstring(context, mapping, args[2])
1146 1159 try:
1147 1160 patre = re.compile(pat)
1148 1161 except re.error:
1149 1162 # i18n: "sub" is a keyword
1150 1163 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
1151 1164 try:
1152 1165 yield patre.sub(rpl, src)
1153 1166 except re.error:
1154 1167 # i18n: "sub" is a keyword
1155 1168 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
1156 1169
1157 1170 @templatefunc('startswith(pattern, text)')
1158 1171 def startswith(context, mapping, args):
1159 1172 """Returns the value from the "text" argument
1160 1173 if it begins with the content from the "pattern" argument."""
1161 1174 if len(args) != 2:
1162 1175 # i18n: "startswith" is a keyword
1163 1176 raise error.ParseError(_("startswith expects two arguments"))
1164 1177
1165 1178 patn = evalstring(context, mapping, args[0])
1166 1179 text = evalstring(context, mapping, args[1])
1167 1180 if text.startswith(patn):
1168 1181 return text
1169 1182 return ''
1170 1183
1171 1184 @templatefunc('word(number, text[, separator])')
1172 1185 def word(context, mapping, args):
1173 1186 """Return the nth word from a string."""
1174 1187 if not (2 <= len(args) <= 3):
1175 1188 # i18n: "word" is a keyword
1176 1189 raise error.ParseError(_("word expects two or three arguments, got %d")
1177 1190 % len(args))
1178 1191
1179 1192 num = evalinteger(context, mapping, args[0],
1180 1193 # i18n: "word" is a keyword
1181 1194 _("word expects an integer index"))
1182 1195 text = evalstring(context, mapping, args[1])
1183 1196 if len(args) == 3:
1184 1197 splitter = evalstring(context, mapping, args[2])
1185 1198 else:
1186 1199 splitter = None
1187 1200
1188 1201 tokens = text.split(splitter)
1189 1202 if num >= len(tokens) or num < -len(tokens):
1190 1203 return ''
1191 1204 else:
1192 1205 return tokens[num]
1193 1206
1194 1207 # methods to interpret function arguments or inner expressions (e.g. {_(x)})
1195 1208 exprmethods = {
1196 1209 "integer": lambda e, c: (runinteger, e[1]),
1197 1210 "string": lambda e, c: (runstring, e[1]),
1198 1211 "symbol": lambda e, c: (runsymbol, e[1]),
1199 1212 "template": buildtemplate,
1200 1213 "group": lambda e, c: compileexp(e[1], c, exprmethods),
1201 1214 ".": buildmember,
1202 1215 "|": buildfilter,
1203 1216 "%": buildmap,
1204 1217 "func": buildfunc,
1205 1218 "keyvalue": buildkeyvaluepair,
1206 1219 "+": lambda e, c: buildarithmetic(e, c, lambda a, b: a + b),
1207 1220 "-": lambda e, c: buildarithmetic(e, c, lambda a, b: a - b),
1208 1221 "negate": buildnegate,
1209 1222 "*": lambda e, c: buildarithmetic(e, c, lambda a, b: a * b),
1210 1223 "/": lambda e, c: buildarithmetic(e, c, lambda a, b: a // b),
1211 1224 }
1212 1225
1213 1226 # methods to interpret top-level template (e.g. {x}, {x|_}, {x % "y"})
1214 1227 methods = exprmethods.copy()
1215 1228 methods["integer"] = exprmethods["symbol"] # '{1}' as variable
1216 1229
1217 1230 class _aliasrules(parser.basealiasrules):
1218 1231 """Parsing and expansion rule set of template aliases"""
1219 1232 _section = _('template alias')
1220 1233 _parse = staticmethod(_parseexpr)
1221 1234
1222 1235 @staticmethod
1223 1236 def _trygetfunc(tree):
1224 1237 """Return (name, args) if tree is func(...) or ...|filter; otherwise
1225 1238 None"""
1226 1239 if tree[0] == 'func' and tree[1][0] == 'symbol':
1227 1240 return tree[1][1], getlist(tree[2])
1228 1241 if tree[0] == '|' and tree[2][0] == 'symbol':
1229 1242 return tree[2][1], [tree[1]]
1230 1243
1231 1244 def expandaliases(tree, aliases):
1232 1245 """Return new tree of aliases are expanded"""
1233 1246 aliasmap = _aliasrules.buildmap(aliases)
1234 1247 return _aliasrules.expand(aliasmap, tree)
1235 1248
1236 1249 # template engine
1237 1250
1238 1251 stringify = templatefilters.stringify
1239 1252
1240 1253 def _flatten(thing):
1241 1254 '''yield a single stream from a possibly nested set of iterators'''
1242 1255 thing = templatekw.unwraphybrid(thing)
1243 1256 if isinstance(thing, bytes):
1244 1257 yield thing
1245 1258 elif isinstance(thing, str):
1246 1259 # We can only hit this on Python 3, and it's here to guard
1247 1260 # against infinite recursion.
1248 1261 raise error.ProgrammingError('Mercurial IO including templates is done'
1249 1262 ' with bytes, not strings')
1250 1263 elif thing is None:
1251 1264 pass
1252 1265 elif not util.safehasattr(thing, '__iter__'):
1253 1266 yield pycompat.bytestr(thing)
1254 1267 else:
1255 1268 for i in thing:
1256 1269 i = templatekw.unwraphybrid(i)
1257 1270 if isinstance(i, bytes):
1258 1271 yield i
1259 1272 elif i is None:
1260 1273 pass
1261 1274 elif not util.safehasattr(i, '__iter__'):
1262 1275 yield pycompat.bytestr(i)
1263 1276 else:
1264 1277 for j in _flatten(i):
1265 1278 yield j
1266 1279
1267 1280 def unquotestring(s):
1268 1281 '''unwrap quotes if any; otherwise returns unmodified string'''
1269 1282 if len(s) < 2 or s[0] not in "'\"" or s[0] != s[-1]:
1270 1283 return s
1271 1284 return s[1:-1]
1272 1285
1273 1286 class engine(object):
1274 1287 '''template expansion engine.
1275 1288
1276 1289 template expansion works like this. a map file contains key=value
1277 1290 pairs. if value is quoted, it is treated as string. otherwise, it
1278 1291 is treated as name of template file.
1279 1292
1280 1293 templater is asked to expand a key in map. it looks up key, and
1281 1294 looks for strings like this: {foo}. it expands {foo} by looking up
1282 1295 foo in map, and substituting it. expansion is recursive: it stops
1283 1296 when there is no more {foo} to replace.
1284 1297
1285 1298 expansion also allows formatting and filtering.
1286 1299
1287 1300 format uses key to expand each item in list. syntax is
1288 1301 {key%format}.
1289 1302
1290 1303 filter uses function to transform value. syntax is
1291 1304 {key|filter1|filter2|...}.'''
1292 1305
1293 def __init__(self, loader, filters=None, defaults=None, aliases=()):
1306 def __init__(self, loader, filters=None, defaults=None, resources=None,
1307 aliases=()):
1294 1308 self._loader = loader
1295 1309 if filters is None:
1296 1310 filters = {}
1297 1311 self._filters = filters
1298 1312 if defaults is None:
1299 1313 defaults = {}
1314 if resources is None:
1315 resources = {}
1300 1316 self._defaults = defaults
1317 self._resources = resources
1301 1318 self._aliasmap = _aliasrules.buildmap(aliases)
1302 1319 self._cache = {} # key: (func, data)
1303 1320
1304 1321 def symbol(self, mapping, key):
1305 1322 """Resolve symbol to value or function; None if nothing found"""
1306 1323 v = mapping.get(key)
1307 1324 if v is None:
1308 1325 v = self._defaults.get(key)
1309 1326 return v
1310 1327
1311 1328 def resource(self, mapping, key):
1312 1329 """Return internal data (e.g. cache) used for keyword/function
1313 1330 evaluation"""
1314 return mapping[key]
1331 v = mapping.get(key)
1332 if v is None:
1333 v = self._resources.get(key)
1334 if v is None:
1335 raise KeyError
1336 return v
1315 1337
1316 1338 def _load(self, t):
1317 1339 '''load, parse, and cache a template'''
1318 1340 if t not in self._cache:
1319 1341 # put poison to cut recursion while compiling 't'
1320 1342 self._cache[t] = (_runrecursivesymbol, t)
1321 1343 try:
1322 1344 x = parse(self._loader(t))
1323 1345 if self._aliasmap:
1324 1346 x = _aliasrules.expand(self._aliasmap, x)
1325 1347 self._cache[t] = compileexp(x, self, methods)
1326 1348 except: # re-raises
1327 1349 del self._cache[t]
1328 1350 raise
1329 1351 return self._cache[t]
1330 1352
1331 1353 def process(self, t, mapping):
1332 1354 '''Perform expansion. t is name of map element to expand.
1333 1355 mapping contains added elements for use during expansion. Is a
1334 1356 generator.'''
1335 1357 func, data = self._load(t)
1336 1358 return _flatten(func(self, mapping, data))
1337 1359
1338 1360 engines = {'default': engine}
1339 1361
1340 1362 def stylelist():
1341 1363 paths = templatepaths()
1342 1364 if not paths:
1343 1365 return _('no templates found, try `hg debuginstall` for more info')
1344 1366 dirlist = os.listdir(paths[0])
1345 1367 stylelist = []
1346 1368 for file in dirlist:
1347 1369 split = file.split(".")
1348 1370 if split[-1] in ('orig', 'rej'):
1349 1371 continue
1350 1372 if split[0] == "map-cmdline":
1351 1373 stylelist.append(split[1])
1352 1374 return ", ".join(sorted(stylelist))
1353 1375
1354 1376 def _readmapfile(mapfile):
1355 1377 """Load template elements from the given map file"""
1356 1378 if not os.path.exists(mapfile):
1357 1379 raise error.Abort(_("style '%s' not found") % mapfile,
1358 1380 hint=_("available styles: %s") % stylelist())
1359 1381
1360 1382 base = os.path.dirname(mapfile)
1361 1383 conf = config.config(includepaths=templatepaths())
1362 1384 conf.read(mapfile, remap={'': 'templates'})
1363 1385
1364 1386 cache = {}
1365 1387 tmap = {}
1366 1388 aliases = []
1367 1389
1368 1390 val = conf.get('templates', '__base__')
1369 1391 if val and val[0] not in "'\"":
1370 1392 # treat as a pointer to a base class for this style
1371 1393 path = util.normpath(os.path.join(base, val))
1372 1394
1373 1395 # fallback check in template paths
1374 1396 if not os.path.exists(path):
1375 1397 for p in templatepaths():
1376 1398 p2 = util.normpath(os.path.join(p, val))
1377 1399 if os.path.isfile(p2):
1378 1400 path = p2
1379 1401 break
1380 1402 p3 = util.normpath(os.path.join(p2, "map"))
1381 1403 if os.path.isfile(p3):
1382 1404 path = p3
1383 1405 break
1384 1406
1385 1407 cache, tmap, aliases = _readmapfile(path)
1386 1408
1387 1409 for key, val in conf['templates'].items():
1388 1410 if not val:
1389 1411 raise error.ParseError(_('missing value'),
1390 1412 conf.source('templates', key))
1391 1413 if val[0] in "'\"":
1392 1414 if val[0] != val[-1]:
1393 1415 raise error.ParseError(_('unmatched quotes'),
1394 1416 conf.source('templates', key))
1395 1417 cache[key] = unquotestring(val)
1396 1418 elif key != '__base__':
1397 1419 val = 'default', val
1398 1420 if ':' in val[1]:
1399 1421 val = val[1].split(':', 1)
1400 1422 tmap[key] = val[0], os.path.join(base, val[1])
1401 1423 aliases.extend(conf['templatealias'].items())
1402 1424 return cache, tmap, aliases
1403 1425
1404 1426 class TemplateNotFound(error.Abort):
1405 1427 pass
1406 1428
1407 1429 class templater(object):
1408 1430
1409 def __init__(self, filters=None, defaults=None, cache=None, aliases=(),
1410 minchunk=1024, maxchunk=65536):
1431 def __init__(self, filters=None, defaults=None, resources=None,
1432 cache=None, aliases=(), minchunk=1024, maxchunk=65536):
1411 1433 '''set up template engine.
1412 1434 filters is dict of functions. each transforms a value into another.
1413 1435 defaults is dict of default map definitions.
1436 resources is dict of internal data (e.g. cache), which are inaccessible
1437 from user template.
1414 1438 aliases is list of alias (name, replacement) pairs.
1415 1439 '''
1416 1440 if filters is None:
1417 1441 filters = {}
1418 1442 if defaults is None:
1419 1443 defaults = {}
1444 if resources is None:
1445 resources = {}
1420 1446 if cache is None:
1421 1447 cache = {}
1422 1448 self.cache = cache.copy()
1423 1449 self.map = {}
1424 1450 self.filters = templatefilters.filters.copy()
1425 1451 self.filters.update(filters)
1426 1452 self.defaults = defaults
1453 self._resources = {'templ': self}
1454 self._resources.update(resources)
1427 1455 self._aliases = aliases
1428 1456 self.minchunk, self.maxchunk = minchunk, maxchunk
1429 1457 self.ecache = {}
1430 1458
1431 1459 @classmethod
1432 def frommapfile(cls, mapfile, filters=None, defaults=None, cache=None,
1433 minchunk=1024, maxchunk=65536):
1460 def frommapfile(cls, mapfile, filters=None, defaults=None, resources=None,
1461 cache=None, minchunk=1024, maxchunk=65536):
1434 1462 """Create templater from the specified map file"""
1435 t = cls(filters, defaults, cache, [], minchunk, maxchunk)
1463 t = cls(filters, defaults, resources, cache, [], minchunk, maxchunk)
1436 1464 cache, tmap, aliases = _readmapfile(mapfile)
1437 1465 t.cache.update(cache)
1438 1466 t.map = tmap
1439 1467 t._aliases = aliases
1440 1468 return t
1441 1469
1442 1470 def __contains__(self, key):
1443 1471 return key in self.cache or key in self.map
1444 1472
1445 1473 def load(self, t):
1446 1474 '''Get the template for the given template name. Use a local cache.'''
1447 1475 if t not in self.cache:
1448 1476 try:
1449 1477 self.cache[t] = util.readfile(self.map[t][1])
1450 1478 except KeyError as inst:
1451 1479 raise TemplateNotFound(_('"%s" not in template map') %
1452 1480 inst.args[0])
1453 1481 except IOError as inst:
1454 1482 raise IOError(inst.args[0], _('template file %s: %s') %
1455 1483 (self.map[t][1], inst.args[1]))
1456 1484 return self.cache[t]
1457 1485
1458 1486 def render(self, mapping):
1459 1487 """Render the default unnamed template and return result as string"""
1460 1488 mapping = pycompat.strkwargs(mapping)
1461 1489 return stringify(self('', **mapping))
1462 1490
1463 1491 def __call__(self, t, **mapping):
1464 1492 mapping = pycompat.byteskwargs(mapping)
1465 1493 ttype = t in self.map and self.map[t][0] or 'default'
1466 1494 if ttype not in self.ecache:
1467 1495 try:
1468 1496 ecls = engines[ttype]
1469 1497 except KeyError:
1470 1498 raise error.Abort(_('invalid template engine: %s') % ttype)
1471 1499 self.ecache[ttype] = ecls(self.load, self.filters, self.defaults,
1472 self._aliases)
1500 self._resources, self._aliases)
1473 1501 proc = self.ecache[ttype]
1474 1502
1475 1503 stream = proc.process(t, mapping)
1476 1504 if self.minchunk:
1477 1505 stream = util.increasingchunks(stream, min=self.minchunk,
1478 1506 max=self.maxchunk)
1479 1507 return stream
1480 1508
1481 1509 def templatepaths():
1482 1510 '''return locations used for template files.'''
1483 1511 pathsrel = ['templates']
1484 1512 paths = [os.path.normpath(os.path.join(util.datapath, f))
1485 1513 for f in pathsrel]
1486 1514 return [p for p in paths if os.path.isdir(p)]
1487 1515
1488 1516 def templatepath(name):
1489 1517 '''return location of template file. returns None if not found.'''
1490 1518 for p in templatepaths():
1491 1519 f = os.path.join(p, name)
1492 1520 if os.path.exists(f):
1493 1521 return f
1494 1522 return None
1495 1523
1496 1524 def stylemap(styles, paths=None):
1497 1525 """Return path to mapfile for a given style.
1498 1526
1499 1527 Searches mapfile in the following locations:
1500 1528 1. templatepath/style/map
1501 1529 2. templatepath/map-style
1502 1530 3. templatepath/map
1503 1531 """
1504 1532
1505 1533 if paths is None:
1506 1534 paths = templatepaths()
1507 1535 elif isinstance(paths, str):
1508 1536 paths = [paths]
1509 1537
1510 1538 if isinstance(styles, str):
1511 1539 styles = [styles]
1512 1540
1513 1541 for style in styles:
1514 1542 # only plain name is allowed to honor template paths
1515 1543 if (not style
1516 1544 or style in (os.curdir, os.pardir)
1517 1545 or pycompat.ossep in style
1518 1546 or pycompat.osaltsep and pycompat.osaltsep in style):
1519 1547 continue
1520 1548 locations = [os.path.join(style, 'map'), 'map-' + style]
1521 1549 locations.append('map')
1522 1550
1523 1551 for path in paths:
1524 1552 for location in locations:
1525 1553 mapfile = os.path.join(path, location)
1526 1554 if os.path.isfile(mapfile):
1527 1555 return style, mapfile
1528 1556
1529 1557 raise RuntimeError("No hgweb templates found in %r" % paths)
1530 1558
1531 1559 def loadfunction(ui, extname, registrarobj):
1532 1560 """Load template function from specified registrarobj
1533 1561 """
1534 1562 for name, func in registrarobj._table.iteritems():
1535 1563 funcs[name] = func
1536 1564
1537 1565 # tell hggettext to extract docstrings from these functions:
1538 1566 i18nfunctions = funcs.values()
@@ -1,54 +1,57 b''
1 1
2 2 $ cat > engine.py << EOF
3 3 >
4 4 > from mercurial import templater
5 5 >
6 6 > class mytemplater(object):
7 > def __init__(self, loader, filters, defaults, aliases):
7 > def __init__(self, loader, filters, defaults, resources, aliases):
8 8 > self.loader = loader
9 > self._resources = resources
9 10 >
10 11 > def process(self, t, map):
11 12 > tmpl = self.loader(t)
12 13 > for k, v in map.iteritems():
13 14 > if k in ('templ', 'ctx', 'repo', 'revcache', 'cache', 'troubles'):
14 15 > continue
15 16 > if hasattr(v, '__call__'):
16 > v = v(**map)
17 > props = self._resources.copy()
18 > props.update(map)
19 > v = v(**props)
17 20 > v = templater.stringify(v)
18 21 > tmpl = tmpl.replace('{{%s}}' % k, v)
19 22 > yield tmpl
20 23 >
21 24 > templater.engines['my'] = mytemplater
22 25 > EOF
23 26 $ hg init test
24 27 $ echo '[extensions]' > test/.hg/hgrc
25 28 $ echo "engine = `pwd`/engine.py" >> test/.hg/hgrc
26 29 $ cd test
27 30 $ cat > mymap << EOF
28 31 > changeset = my:changeset.txt
29 32 > EOF
30 33 $ cat > changeset.txt << EOF
31 34 > {{rev}} {{node}} {{author}}
32 35 > EOF
33 36 $ hg ci -Ama
34 37 adding changeset.txt
35 38 adding mymap
36 39 $ hg log --style=./mymap
37 40 0 97e5f848f0936960273bbf75be6388cd0350a32b test
38 41
39 42 $ cat > changeset.txt << EOF
40 43 > {{p1rev}} {{p1node}} {{p2rev}} {{p2node}}
41 44 > EOF
42 45 $ hg ci -Ama
43 46 $ hg log --style=./mymap
44 47 0 97e5f848f0936960273bbf75be6388cd0350a32b -1 0000000000000000000000000000000000000000
45 48 -1 0000000000000000000000000000000000000000 -1 0000000000000000000000000000000000000000
46 49
47 50 invalid engine type:
48 51
49 52 $ echo 'changeset = unknown:changeset.txt' > unknownenginemap
50 53 $ hg log --style=./unknownenginemap
51 54 abort: invalid template engine: unknown
52 55 [255]
53 56
54 57 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now