##// END OF EJS Templates
localrepo: move repo creation logic out of localrepository.__init__ (API)...
Gregory Szorc -
r39584:7ce9dea3 default
parent child Browse files
Show More
@@ -1,815 +1,815 b''
1 1 # keyword.py - $Keyword$ expansion for Mercurial
2 2 #
3 3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # $Id$
9 9 #
10 10 # Keyword expansion hack against the grain of a Distributed SCM
11 11 #
12 12 # There are many good reasons why this is not needed in a distributed
13 13 # SCM, still it may be useful in very small projects based on single
14 14 # files (like LaTeX packages), that are mostly addressed to an
15 15 # audience not running a version control system.
16 16 #
17 17 # For in-depth discussion refer to
18 18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
19 19 #
20 20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 21 #
22 22 # Binary files are not touched.
23 23 #
24 24 # Files to act upon/ignore are specified in the [keyword] section.
25 25 # Customized keyword template mappings in the [keywordmaps] section.
26 26 #
27 27 # Run 'hg help keyword' and 'hg kwdemo' to get info on configuration.
28 28
29 29 '''expand keywords in tracked files
30 30
31 31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 32 tracked text files selected by your configuration.
33 33
34 34 Keywords are only expanded in local repositories and not stored in the
35 35 change history. The mechanism can be regarded as a convenience for the
36 36 current user or for archive distribution.
37 37
38 38 Keywords expand to the changeset data pertaining to the latest change
39 39 relative to the working directory parent of each file.
40 40
41 41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 42 sections of hgrc files.
43 43
44 44 Example::
45 45
46 46 [keyword]
47 47 # expand keywords in every python file except those matching "x*"
48 48 **.py =
49 49 x* = ignore
50 50
51 51 [keywordset]
52 52 # prefer svn- over cvs-like default keywordmaps
53 53 svn = True
54 54
55 55 .. note::
56 56
57 57 The more specific you are in your filename patterns the less you
58 58 lose speed in huge repositories.
59 59
60 60 For [keywordmaps] template mapping and expansion demonstration and
61 61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
62 62 available templates and filters.
63 63
64 64 Three additional date template filters are provided:
65 65
66 66 :``utcdate``: "2006/09/18 15:13:13"
67 67 :``svnutcdate``: "2006-09-18 15:13:13Z"
68 68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
69 69
70 70 The default template mappings (view with :hg:`kwdemo -d`) can be
71 71 replaced with customized keywords and templates. Again, run
72 72 :hg:`kwdemo` to control the results of your configuration changes.
73 73
74 74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
75 75 to avoid storing expanded keywords in the change history.
76 76
77 77 To force expansion after enabling it, or a configuration change, run
78 78 :hg:`kwexpand`.
79 79
80 80 Expansions spanning more than one line and incremental expansions,
81 81 like CVS' $Log$, are not supported. A keyword template map "Log =
82 82 {desc}" expands to the first line of the changeset description.
83 83 '''
84 84
85 85
86 86 from __future__ import absolute_import
87 87
88 88 import os
89 89 import re
90 90 import weakref
91 91
92 92 from mercurial.i18n import _
93 93 from mercurial.hgweb import webcommands
94 94
95 95 from mercurial import (
96 96 cmdutil,
97 97 context,
98 98 dispatch,
99 99 error,
100 100 extensions,
101 101 filelog,
102 102 localrepo,
103 103 logcmdutil,
104 104 match,
105 105 patch,
106 106 pathutil,
107 107 pycompat,
108 108 registrar,
109 109 scmutil,
110 110 templatefilters,
111 111 templateutil,
112 112 util,
113 113 )
114 114 from mercurial.utils import (
115 115 dateutil,
116 116 stringutil,
117 117 )
118 118
119 119 cmdtable = {}
120 120 command = registrar.command(cmdtable)
121 121 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
122 122 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
123 123 # be specifying the version(s) of Mercurial they are tested with, or
124 124 # leave the attribute unspecified.
125 125 testedwith = 'ships-with-hg-core'
126 126
127 127 # hg commands that do not act on keywords
128 128 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
129 129 ' outgoing push tip verify convert email glog')
130 130
131 131 # webcommands that do not act on keywords
132 132 nokwwebcommands = ('annotate changeset rev filediff diff comparison')
133 133
134 134 # hg commands that trigger expansion only when writing to working dir,
135 135 # not when reading filelog, and unexpand when reading from working dir
136 136 restricted = ('merge kwexpand kwshrink record qrecord resolve transplant'
137 137 ' unshelve rebase graft backout histedit fetch')
138 138
139 139 # names of extensions using dorecord
140 140 recordextensions = 'record'
141 141
142 142 colortable = {
143 143 'kwfiles.enabled': 'green bold',
144 144 'kwfiles.deleted': 'cyan bold underline',
145 145 'kwfiles.enabledunknown': 'green',
146 146 'kwfiles.ignored': 'bold',
147 147 'kwfiles.ignoredunknown': 'none'
148 148 }
149 149
150 150 templatefilter = registrar.templatefilter()
151 151
152 152 configtable = {}
153 153 configitem = registrar.configitem(configtable)
154 154
155 155 configitem('keywordset', 'svn',
156 156 default=False,
157 157 )
158 158 # date like in cvs' $Date
159 159 @templatefilter('utcdate', intype=templateutil.date)
160 160 def utcdate(date):
161 161 '''Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
162 162 '''
163 163 dateformat = '%Y/%m/%d %H:%M:%S'
164 164 return dateutil.datestr((date[0], 0), dateformat)
165 165 # date like in svn's $Date
166 166 @templatefilter('svnisodate', intype=templateutil.date)
167 167 def svnisodate(date):
168 168 '''Date. Returns a date in this format: "2009-08-18 13:00:13
169 169 +0200 (Tue, 18 Aug 2009)".
170 170 '''
171 171 return dateutil.datestr(date, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
172 172 # date like in svn's $Id
173 173 @templatefilter('svnutcdate', intype=templateutil.date)
174 174 def svnutcdate(date):
175 175 '''Date. Returns a UTC-date in this format: "2009-08-18
176 176 11:00:13Z".
177 177 '''
178 178 dateformat = '%Y-%m-%d %H:%M:%SZ'
179 179 return dateutil.datestr((date[0], 0), dateformat)
180 180
181 181 # make keyword tools accessible
182 182 kwtools = {'hgcmd': ''}
183 183
184 184 def _defaultkwmaps(ui):
185 185 '''Returns default keywordmaps according to keywordset configuration.'''
186 186 templates = {
187 187 'Revision': '{node|short}',
188 188 'Author': '{author|user}',
189 189 }
190 190 kwsets = ({
191 191 'Date': '{date|utcdate}',
192 192 'RCSfile': '{file|basename},v',
193 193 'RCSFile': '{file|basename},v', # kept for backwards compatibility
194 194 # with hg-keyword
195 195 'Source': '{root}/{file},v',
196 196 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
197 197 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
198 198 }, {
199 199 'Date': '{date|svnisodate}',
200 200 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
201 201 'LastChangedRevision': '{node|short}',
202 202 'LastChangedBy': '{author|user}',
203 203 'LastChangedDate': '{date|svnisodate}',
204 204 })
205 205 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
206 206 return templates
207 207
208 208 def _shrinktext(text, subfunc):
209 209 '''Helper for keyword expansion removal in text.
210 210 Depending on subfunc also returns number of substitutions.'''
211 211 return subfunc(br'$\1$', text)
212 212
213 213 def _preselect(wstatus, changed):
214 214 '''Retrieves modified and added files from a working directory state
215 215 and returns the subset of each contained in given changed files
216 216 retrieved from a change context.'''
217 217 modified = [f for f in wstatus.modified if f in changed]
218 218 added = [f for f in wstatus.added if f in changed]
219 219 return modified, added
220 220
221 221
222 222 class kwtemplater(object):
223 223 '''
224 224 Sets up keyword templates, corresponding keyword regex, and
225 225 provides keyword substitution functions.
226 226 '''
227 227
228 228 def __init__(self, ui, repo, inc, exc):
229 229 self.ui = ui
230 230 self._repo = weakref.ref(repo)
231 231 self.match = match.match(repo.root, '', [], inc, exc)
232 232 self.restrict = kwtools['hgcmd'] in restricted.split()
233 233 self.postcommit = False
234 234
235 235 kwmaps = self.ui.configitems('keywordmaps')
236 236 if kwmaps: # override default templates
237 237 self.templates = dict(kwmaps)
238 238 else:
239 239 self.templates = _defaultkwmaps(self.ui)
240 240
241 241 @property
242 242 def repo(self):
243 243 return self._repo()
244 244
245 245 @util.propertycache
246 246 def escape(self):
247 247 '''Returns bar-separated and escaped keywords.'''
248 248 return '|'.join(map(stringutil.reescape, self.templates.keys()))
249 249
250 250 @util.propertycache
251 251 def rekw(self):
252 252 '''Returns regex for unexpanded keywords.'''
253 253 return re.compile(br'\$(%s)\$' % self.escape)
254 254
255 255 @util.propertycache
256 256 def rekwexp(self):
257 257 '''Returns regex for expanded keywords.'''
258 258 return re.compile(br'\$(%s): [^$\n\r]*? \$' % self.escape)
259 259
260 260 def substitute(self, data, path, ctx, subfunc):
261 261 '''Replaces keywords in data with expanded template.'''
262 262 def kwsub(mobj):
263 263 kw = mobj.group(1)
264 264 ct = logcmdutil.maketemplater(self.ui, self.repo,
265 265 self.templates[kw])
266 266 self.ui.pushbuffer()
267 267 ct.show(ctx, root=self.repo.root, file=path)
268 268 ekw = templatefilters.firstline(self.ui.popbuffer())
269 269 return '$%s: %s $' % (kw, ekw)
270 270 return subfunc(kwsub, data)
271 271
272 272 def linkctx(self, path, fileid):
273 273 '''Similar to filelog.linkrev, but returns a changectx.'''
274 274 return self.repo.filectx(path, fileid=fileid).changectx()
275 275
276 276 def expand(self, path, node, data):
277 277 '''Returns data with keywords expanded.'''
278 278 if (not self.restrict and self.match(path)
279 279 and not stringutil.binary(data)):
280 280 ctx = self.linkctx(path, node)
281 281 return self.substitute(data, path, ctx, self.rekw.sub)
282 282 return data
283 283
284 284 def iskwfile(self, cand, ctx):
285 285 '''Returns subset of candidates which are configured for keyword
286 286 expansion but are not symbolic links.'''
287 287 return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)]
288 288
289 289 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
290 290 '''Overwrites selected files expanding/shrinking keywords.'''
291 291 if self.restrict or lookup or self.postcommit: # exclude kw_copy
292 292 candidates = self.iskwfile(candidates, ctx)
293 293 if not candidates:
294 294 return
295 295 kwcmd = self.restrict and lookup # kwexpand/kwshrink
296 296 if self.restrict or expand and lookup:
297 297 mf = ctx.manifest()
298 298 if self.restrict or rekw:
299 299 re_kw = self.rekw
300 300 else:
301 301 re_kw = self.rekwexp
302 302 if expand:
303 303 msg = _('overwriting %s expanding keywords\n')
304 304 else:
305 305 msg = _('overwriting %s shrinking keywords\n')
306 306 for f in candidates:
307 307 if self.restrict:
308 308 data = self.repo.file(f).read(mf[f])
309 309 else:
310 310 data = self.repo.wread(f)
311 311 if stringutil.binary(data):
312 312 continue
313 313 if expand:
314 314 parents = ctx.parents()
315 315 if lookup:
316 316 ctx = self.linkctx(f, mf[f])
317 317 elif self.restrict and len(parents) > 1:
318 318 # merge commit
319 319 # in case of conflict f is in modified state during
320 320 # merge, even if f does not differ from f in parent
321 321 for p in parents:
322 322 if f in p and not p[f].cmp(ctx[f]):
323 323 ctx = p[f].changectx()
324 324 break
325 325 data, found = self.substitute(data, f, ctx, re_kw.subn)
326 326 elif self.restrict:
327 327 found = re_kw.search(data)
328 328 else:
329 329 data, found = _shrinktext(data, re_kw.subn)
330 330 if found:
331 331 self.ui.note(msg % f)
332 332 fp = self.repo.wvfs(f, "wb", atomictemp=True)
333 333 fp.write(data)
334 334 fp.close()
335 335 if kwcmd:
336 336 self.repo.dirstate.normal(f)
337 337 elif self.postcommit:
338 338 self.repo.dirstate.normallookup(f)
339 339
340 340 def shrink(self, fname, text):
341 341 '''Returns text with all keyword substitutions removed.'''
342 342 if self.match(fname) and not stringutil.binary(text):
343 343 return _shrinktext(text, self.rekwexp.sub)
344 344 return text
345 345
346 346 def shrinklines(self, fname, lines):
347 347 '''Returns lines with keyword substitutions removed.'''
348 348 if self.match(fname):
349 349 text = ''.join(lines)
350 350 if not stringutil.binary(text):
351 351 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
352 352 return lines
353 353
354 354 def wread(self, fname, data):
355 355 '''If in restricted mode returns data read from wdir with
356 356 keyword substitutions removed.'''
357 357 if self.restrict:
358 358 return self.shrink(fname, data)
359 359 return data
360 360
361 361 class kwfilelog(filelog.filelog):
362 362 '''
363 363 Subclass of filelog to hook into its read, add, cmp methods.
364 364 Keywords are "stored" unexpanded, and processed on reading.
365 365 '''
366 366 def __init__(self, opener, kwt, path):
367 367 super(kwfilelog, self).__init__(opener, path)
368 368 self.kwt = kwt
369 369 self.path = path
370 370
371 371 def read(self, node):
372 372 '''Expands keywords when reading filelog.'''
373 373 data = super(kwfilelog, self).read(node)
374 374 if self.renamed(node):
375 375 return data
376 376 return self.kwt.expand(self.path, node, data)
377 377
378 378 def add(self, text, meta, tr, link, p1=None, p2=None):
379 379 '''Removes keyword substitutions when adding to filelog.'''
380 380 text = self.kwt.shrink(self.path, text)
381 381 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
382 382
383 383 def cmp(self, node, text):
384 384 '''Removes keyword substitutions for comparison.'''
385 385 text = self.kwt.shrink(self.path, text)
386 386 return super(kwfilelog, self).cmp(node, text)
387 387
388 388 def _status(ui, repo, wctx, kwt, *pats, **opts):
389 389 '''Bails out if [keyword] configuration is not active.
390 390 Returns status of working directory.'''
391 391 if kwt:
392 392 opts = pycompat.byteskwargs(opts)
393 393 return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
394 394 unknown=opts.get('unknown') or opts.get('all'))
395 395 if ui.configitems('keyword'):
396 396 raise error.Abort(_('[keyword] patterns cannot match'))
397 397 raise error.Abort(_('no [keyword] patterns configured'))
398 398
399 399 def _kwfwrite(ui, repo, expand, *pats, **opts):
400 400 '''Selects files and passes them to kwtemplater.overwrite.'''
401 401 wctx = repo[None]
402 402 if len(wctx.parents()) > 1:
403 403 raise error.Abort(_('outstanding uncommitted merge'))
404 404 kwt = getattr(repo, '_keywordkwt', None)
405 405 with repo.wlock():
406 406 status = _status(ui, repo, wctx, kwt, *pats, **opts)
407 407 if status.modified or status.added or status.removed or status.deleted:
408 408 raise error.Abort(_('outstanding uncommitted changes'))
409 409 kwt.overwrite(wctx, status.clean, True, expand)
410 410
411 411 @command('kwdemo',
412 412 [('d', 'default', None, _('show default keyword template maps')),
413 413 ('f', 'rcfile', '',
414 414 _('read maps from rcfile'), _('FILE'))],
415 415 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
416 416 optionalrepo=True)
417 417 def demo(ui, repo, *args, **opts):
418 418 '''print [keywordmaps] configuration and an expansion example
419 419
420 420 Show current, custom, or default keyword template maps and their
421 421 expansions.
422 422
423 423 Extend the current configuration by specifying maps as arguments
424 424 and using -f/--rcfile to source an external hgrc file.
425 425
426 426 Use -d/--default to disable current configuration.
427 427
428 428 See :hg:`help templates` for information on templates and filters.
429 429 '''
430 430 def demoitems(section, items):
431 431 ui.write('[%s]\n' % section)
432 432 for k, v in sorted(items):
433 433 ui.write('%s = %s\n' % (k, v))
434 434
435 435 fn = 'demo.txt'
436 436 tmpdir = pycompat.mkdtemp('', 'kwdemo.')
437 437 ui.note(_('creating temporary repository at %s\n') % tmpdir)
438 438 if repo is None:
439 439 baseui = ui
440 440 else:
441 441 baseui = repo.baseui
442 repo = localrepo.localrepository(baseui, tmpdir, True)
442 repo = localrepo.instance(baseui, tmpdir, create=True)
443 443 ui.setconfig('keyword', fn, '', 'keyword')
444 444 svn = ui.configbool('keywordset', 'svn')
445 445 # explicitly set keywordset for demo output
446 446 ui.setconfig('keywordset', 'svn', svn, 'keyword')
447 447
448 448 uikwmaps = ui.configitems('keywordmaps')
449 449 if args or opts.get(r'rcfile'):
450 450 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
451 451 if uikwmaps:
452 452 ui.status(_('\textending current template maps\n'))
453 453 if opts.get(r'default') or not uikwmaps:
454 454 if svn:
455 455 ui.status(_('\toverriding default svn keywordset\n'))
456 456 else:
457 457 ui.status(_('\toverriding default cvs keywordset\n'))
458 458 if opts.get(r'rcfile'):
459 459 ui.readconfig(opts.get('rcfile'))
460 460 if args:
461 461 # simulate hgrc parsing
462 462 rcmaps = '[keywordmaps]\n%s\n' % '\n'.join(args)
463 463 repo.vfs.write('hgrc', rcmaps)
464 464 ui.readconfig(repo.vfs.join('hgrc'))
465 465 kwmaps = dict(ui.configitems('keywordmaps'))
466 466 elif opts.get(r'default'):
467 467 if svn:
468 468 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
469 469 else:
470 470 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
471 471 kwmaps = _defaultkwmaps(ui)
472 472 if uikwmaps:
473 473 ui.status(_('\tdisabling current template maps\n'))
474 474 for k, v in kwmaps.iteritems():
475 475 ui.setconfig('keywordmaps', k, v, 'keyword')
476 476 else:
477 477 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
478 478 if uikwmaps:
479 479 kwmaps = dict(uikwmaps)
480 480 else:
481 481 kwmaps = _defaultkwmaps(ui)
482 482
483 483 uisetup(ui)
484 484 reposetup(ui, repo)
485 485 ui.write(('[extensions]\nkeyword =\n'))
486 486 demoitems('keyword', ui.configitems('keyword'))
487 487 demoitems('keywordset', ui.configitems('keywordset'))
488 488 demoitems('keywordmaps', kwmaps.iteritems())
489 489 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
490 490 repo.wvfs.write(fn, keywords)
491 491 repo[None].add([fn])
492 492 ui.note(_('\nkeywords written to %s:\n') % fn)
493 493 ui.note(keywords)
494 494 with repo.wlock():
495 495 repo.dirstate.setbranch('demobranch')
496 496 for name, cmd in ui.configitems('hooks'):
497 497 if name.split('.', 1)[0].find('commit') > -1:
498 498 repo.ui.setconfig('hooks', name, '', 'keyword')
499 499 msg = _('hg keyword configuration and expansion example')
500 500 ui.note(("hg ci -m '%s'\n" % msg))
501 501 repo.commit(text=msg)
502 502 ui.status(_('\n\tkeywords expanded\n'))
503 503 ui.write(repo.wread(fn))
504 504 repo.wvfs.rmtree(repo.root)
505 505
506 506 @command('kwexpand',
507 507 cmdutil.walkopts,
508 508 _('hg kwexpand [OPTION]... [FILE]...'),
509 509 inferrepo=True)
510 510 def expand(ui, repo, *pats, **opts):
511 511 '''expand keywords in the working directory
512 512
513 513 Run after (re)enabling keyword expansion.
514 514
515 515 kwexpand refuses to run if given files contain local changes.
516 516 '''
517 517 # 3rd argument sets expansion to True
518 518 _kwfwrite(ui, repo, True, *pats, **opts)
519 519
520 520 @command('kwfiles',
521 521 [('A', 'all', None, _('show keyword status flags of all files')),
522 522 ('i', 'ignore', None, _('show files excluded from expansion')),
523 523 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
524 524 ] + cmdutil.walkopts,
525 525 _('hg kwfiles [OPTION]... [FILE]...'),
526 526 inferrepo=True)
527 527 def files(ui, repo, *pats, **opts):
528 528 '''show files configured for keyword expansion
529 529
530 530 List which files in the working directory are matched by the
531 531 [keyword] configuration patterns.
532 532
533 533 Useful to prevent inadvertent keyword expansion and to speed up
534 534 execution by including only files that are actual candidates for
535 535 expansion.
536 536
537 537 See :hg:`help keyword` on how to construct patterns both for
538 538 inclusion and exclusion of files.
539 539
540 540 With -A/--all and -v/--verbose the codes used to show the status
541 541 of files are::
542 542
543 543 K = keyword expansion candidate
544 544 k = keyword expansion candidate (not tracked)
545 545 I = ignored
546 546 i = ignored (not tracked)
547 547 '''
548 548 kwt = getattr(repo, '_keywordkwt', None)
549 549 wctx = repo[None]
550 550 status = _status(ui, repo, wctx, kwt, *pats, **opts)
551 551 if pats:
552 552 cwd = repo.getcwd()
553 553 else:
554 554 cwd = ''
555 555 files = []
556 556 opts = pycompat.byteskwargs(opts)
557 557 if not opts.get('unknown') or opts.get('all'):
558 558 files = sorted(status.modified + status.added + status.clean)
559 559 kwfiles = kwt.iskwfile(files, wctx)
560 560 kwdeleted = kwt.iskwfile(status.deleted, wctx)
561 561 kwunknown = kwt.iskwfile(status.unknown, wctx)
562 562 if not opts.get('ignore') or opts.get('all'):
563 563 showfiles = kwfiles, kwdeleted, kwunknown
564 564 else:
565 565 showfiles = [], [], []
566 566 if opts.get('all') or opts.get('ignore'):
567 567 showfiles += ([f for f in files if f not in kwfiles],
568 568 [f for f in status.unknown if f not in kwunknown])
569 569 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
570 570 kwstates = zip(kwlabels, pycompat.bytestr('K!kIi'), showfiles)
571 571 fm = ui.formatter('kwfiles', opts)
572 572 fmt = '%.0s%s\n'
573 573 if opts.get('all') or ui.verbose:
574 574 fmt = '%s %s\n'
575 575 for kwstate, char, filenames in kwstates:
576 576 label = 'kwfiles.' + kwstate
577 577 for f in filenames:
578 578 fm.startitem()
579 579 fm.data(kwstatus=char, path=f)
580 580 fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
581 581 fm.end()
582 582
583 583 @command('kwshrink',
584 584 cmdutil.walkopts,
585 585 _('hg kwshrink [OPTION]... [FILE]...'),
586 586 inferrepo=True)
587 587 def shrink(ui, repo, *pats, **opts):
588 588 '''revert expanded keywords in the working directory
589 589
590 590 Must be run before changing/disabling active keywords.
591 591
592 592 kwshrink refuses to run if given files contain local changes.
593 593 '''
594 594 # 3rd argument sets expansion to False
595 595 _kwfwrite(ui, repo, False, *pats, **opts)
596 596
597 597 # monkeypatches
598 598
599 599 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
600 600 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
601 601 rejects or conflicts due to expanded keywords in working dir.'''
602 602 orig(self, ui, gp, backend, store, eolmode)
603 603 kwt = getattr(getattr(backend, 'repo', None), '_keywordkwt', None)
604 604 if kwt:
605 605 # shrink keywords read from working dir
606 606 self.lines = kwt.shrinklines(self.fname, self.lines)
607 607
608 608 def kwdiff(orig, repo, *args, **kwargs):
609 609 '''Monkeypatch patch.diff to avoid expansion.'''
610 610 kwt = getattr(repo, '_keywordkwt', None)
611 611 if kwt:
612 612 restrict = kwt.restrict
613 613 kwt.restrict = True
614 614 try:
615 615 for chunk in orig(repo, *args, **kwargs):
616 616 yield chunk
617 617 finally:
618 618 if kwt:
619 619 kwt.restrict = restrict
620 620
621 621 def kwweb_skip(orig, web):
622 622 '''Wraps webcommands.x turning off keyword expansion.'''
623 623 kwt = getattr(web.repo, '_keywordkwt', None)
624 624 if kwt:
625 625 origmatch = kwt.match
626 626 kwt.match = util.never
627 627 try:
628 628 for chunk in orig(web):
629 629 yield chunk
630 630 finally:
631 631 if kwt:
632 632 kwt.match = origmatch
633 633
634 634 def kw_amend(orig, ui, repo, old, extra, pats, opts):
635 635 '''Wraps cmdutil.amend expanding keywords after amend.'''
636 636 kwt = getattr(repo, '_keywordkwt', None)
637 637 if kwt is None:
638 638 return orig(ui, repo, old, extra, pats, opts)
639 639 with repo.wlock():
640 640 kwt.postcommit = True
641 641 newid = orig(ui, repo, old, extra, pats, opts)
642 642 if newid != old.node():
643 643 ctx = repo[newid]
644 644 kwt.restrict = True
645 645 kwt.overwrite(ctx, ctx.files(), False, True)
646 646 kwt.restrict = False
647 647 return newid
648 648
649 649 def kw_copy(orig, ui, repo, pats, opts, rename=False):
650 650 '''Wraps cmdutil.copy so that copy/rename destinations do not
651 651 contain expanded keywords.
652 652 Note that the source of a regular file destination may also be a
653 653 symlink:
654 654 hg cp sym x -> x is symlink
655 655 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
656 656 For the latter we have to follow the symlink to find out whether its
657 657 target is configured for expansion and we therefore must unexpand the
658 658 keywords in the destination.'''
659 659 kwt = getattr(repo, '_keywordkwt', None)
660 660 if kwt is None:
661 661 return orig(ui, repo, pats, opts, rename)
662 662 with repo.wlock():
663 663 orig(ui, repo, pats, opts, rename)
664 664 if opts.get('dry_run'):
665 665 return
666 666 wctx = repo[None]
667 667 cwd = repo.getcwd()
668 668
669 669 def haskwsource(dest):
670 670 '''Returns true if dest is a regular file and configured for
671 671 expansion or a symlink which points to a file configured for
672 672 expansion. '''
673 673 source = repo.dirstate.copied(dest)
674 674 if 'l' in wctx.flags(source):
675 675 source = pathutil.canonpath(repo.root, cwd,
676 676 os.path.realpath(source))
677 677 return kwt.match(source)
678 678
679 679 candidates = [f for f in repo.dirstate.copies() if
680 680 'l' not in wctx.flags(f) and haskwsource(f)]
681 681 kwt.overwrite(wctx, candidates, False, False)
682 682
683 683 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
684 684 '''Wraps record.dorecord expanding keywords after recording.'''
685 685 kwt = getattr(repo, '_keywordkwt', None)
686 686 if kwt is None:
687 687 return orig(ui, repo, commitfunc, *pats, **opts)
688 688 with repo.wlock():
689 689 # record returns 0 even when nothing has changed
690 690 # therefore compare nodes before and after
691 691 kwt.postcommit = True
692 692 ctx = repo['.']
693 693 wstatus = ctx.status()
694 694 ret = orig(ui, repo, commitfunc, *pats, **opts)
695 695 recctx = repo['.']
696 696 if ctx != recctx:
697 697 modified, added = _preselect(wstatus, recctx.files())
698 698 kwt.restrict = False
699 699 kwt.overwrite(recctx, modified, False, True)
700 700 kwt.overwrite(recctx, added, False, True, True)
701 701 kwt.restrict = True
702 702 return ret
703 703
704 704 def kwfilectx_cmp(orig, self, fctx):
705 705 if fctx._customcmp:
706 706 return fctx.cmp(self)
707 707 kwt = getattr(self._repo, '_keywordkwt', None)
708 708 if kwt is None:
709 709 return orig(self, fctx)
710 710 # keyword affects data size, comparing wdir and filelog size does
711 711 # not make sense
712 712 if (fctx._filenode is None and
713 713 (self._repo._encodefilterpats or
714 714 kwt.match(fctx.path()) and 'l' not in fctx.flags() or
715 715 self.size() - 4 == fctx.size()) or
716 716 self.size() == fctx.size()):
717 717 return self._filelog.cmp(self._filenode, fctx.data())
718 718 return True
719 719
720 720 def uisetup(ui):
721 721 ''' Monkeypatches dispatch._parse to retrieve user command.
722 722 Overrides file method to return kwfilelog instead of filelog
723 723 if file matches user configuration.
724 724 Wraps commit to overwrite configured files with updated
725 725 keyword substitutions.
726 726 Monkeypatches patch and webcommands.'''
727 727
728 728 def kwdispatch_parse(orig, ui, args):
729 729 '''Monkeypatch dispatch._parse to obtain running hg command.'''
730 730 cmd, func, args, options, cmdoptions = orig(ui, args)
731 731 kwtools['hgcmd'] = cmd
732 732 return cmd, func, args, options, cmdoptions
733 733
734 734 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
735 735
736 736 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
737 737 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
738 738 extensions.wrapfunction(patch, 'diff', kwdiff)
739 739 extensions.wrapfunction(cmdutil, 'amend', kw_amend)
740 740 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
741 741 extensions.wrapfunction(cmdutil, 'dorecord', kw_dorecord)
742 742 for c in nokwwebcommands.split():
743 743 extensions.wrapfunction(webcommands, c, kwweb_skip)
744 744
745 745 def reposetup(ui, repo):
746 746 '''Sets up repo as kwrepo for keyword substitution.'''
747 747
748 748 try:
749 749 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
750 750 or '.hg' in util.splitpath(repo.root)
751 751 or repo._url.startswith('bundle:')):
752 752 return
753 753 except AttributeError:
754 754 pass
755 755
756 756 inc, exc = [], ['.hg*']
757 757 for pat, opt in ui.configitems('keyword'):
758 758 if opt != 'ignore':
759 759 inc.append(pat)
760 760 else:
761 761 exc.append(pat)
762 762 if not inc:
763 763 return
764 764
765 765 kwt = kwtemplater(ui, repo, inc, exc)
766 766
767 767 class kwrepo(repo.__class__):
768 768 def file(self, f):
769 769 if f[0] == '/':
770 770 f = f[1:]
771 771 return kwfilelog(self.svfs, kwt, f)
772 772
773 773 def wread(self, filename):
774 774 data = super(kwrepo, self).wread(filename)
775 775 return kwt.wread(filename, data)
776 776
777 777 def commit(self, *args, **opts):
778 778 # use custom commitctx for user commands
779 779 # other extensions can still wrap repo.commitctx directly
780 780 self.commitctx = self.kwcommitctx
781 781 try:
782 782 return super(kwrepo, self).commit(*args, **opts)
783 783 finally:
784 784 del self.commitctx
785 785
786 786 def kwcommitctx(self, ctx, error=False):
787 787 n = super(kwrepo, self).commitctx(ctx, error)
788 788 # no lock needed, only called from repo.commit() which already locks
789 789 if not kwt.postcommit:
790 790 restrict = kwt.restrict
791 791 kwt.restrict = True
792 792 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
793 793 False, True)
794 794 kwt.restrict = restrict
795 795 return n
796 796
797 797 def rollback(self, dryrun=False, force=False):
798 798 with self.wlock():
799 799 origrestrict = kwt.restrict
800 800 try:
801 801 if not dryrun:
802 802 changed = self['.'].files()
803 803 ret = super(kwrepo, self).rollback(dryrun, force)
804 804 if not dryrun:
805 805 ctx = self['.']
806 806 modified, added = _preselect(ctx.status(), changed)
807 807 kwt.restrict = False
808 808 kwt.overwrite(ctx, modified, True, True)
809 809 kwt.overwrite(ctx, added, True, False)
810 810 return ret
811 811 finally:
812 812 kwt.restrict = origrestrict
813 813
814 814 repo.__class__ = kwrepo
815 815 repo._keywordkwt = kwt
@@ -1,2449 +1,2472 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 narrowspec,
48 48 obsolete,
49 49 pathutil,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepoutil,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67 from .utils import (
68 68 interfaceutil,
69 69 procutil,
70 70 stringutil,
71 71 )
72 72
73 73 from .revlogutils import (
74 74 constants as revlogconst,
75 75 )
76 76
77 77 release = lockmod.release
78 78 urlerr = util.urlerr
79 79 urlreq = util.urlreq
80 80
81 81 # set of (path, vfs-location) tuples. vfs-location is:
82 82 # - 'plain for vfs relative paths
83 83 # - '' for svfs relative paths
84 84 _cachedfiles = set()
85 85
86 86 class _basefilecache(scmutil.filecache):
87 87 """All filecache usage on repo are done for logic that should be unfiltered
88 88 """
89 89 def __get__(self, repo, type=None):
90 90 if repo is None:
91 91 return self
92 92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 93 def __set__(self, repo, value):
94 94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 95 def __delete__(self, repo):
96 96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97 97
98 98 class repofilecache(_basefilecache):
99 99 """filecache for files in .hg but outside of .hg/store"""
100 100 def __init__(self, *paths):
101 101 super(repofilecache, self).__init__(*paths)
102 102 for path in paths:
103 103 _cachedfiles.add((path, 'plain'))
104 104
105 105 def join(self, obj, fname):
106 106 return obj.vfs.join(fname)
107 107
108 108 class storecache(_basefilecache):
109 109 """filecache for files in the store"""
110 110 def __init__(self, *paths):
111 111 super(storecache, self).__init__(*paths)
112 112 for path in paths:
113 113 _cachedfiles.add((path, ''))
114 114
115 115 def join(self, obj, fname):
116 116 return obj.sjoin(fname)
117 117
118 118 def isfilecached(repo, name):
119 119 """check if a repo has already cached "name" filecache-ed property
120 120
121 121 This returns (cachedobj-or-None, iscached) tuple.
122 122 """
123 123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 124 if not cacheentry:
125 125 return None, False
126 126 return cacheentry.obj, True
127 127
128 128 class unfilteredpropertycache(util.propertycache):
129 129 """propertycache that apply to unfiltered repo only"""
130 130
131 131 def __get__(self, repo, type=None):
132 132 unfi = repo.unfiltered()
133 133 if unfi is repo:
134 134 return super(unfilteredpropertycache, self).__get__(unfi)
135 135 return getattr(unfi, self.name)
136 136
137 137 class filteredpropertycache(util.propertycache):
138 138 """propertycache that must take filtering in account"""
139 139
140 140 def cachevalue(self, obj, value):
141 141 object.__setattr__(obj, self.name, value)
142 142
143 143
144 144 def hasunfilteredcache(repo, name):
145 145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 146 return name in vars(repo.unfiltered())
147 147
148 148 def unfilteredmethod(orig):
149 149 """decorate method that always need to be run on unfiltered version"""
150 150 def wrapper(repo, *args, **kwargs):
151 151 return orig(repo.unfiltered(), *args, **kwargs)
152 152 return wrapper
153 153
154 154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 155 'unbundle'}
156 156 legacycaps = moderncaps.union({'changegroupsubset'})
157 157
158 158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 159 class localcommandexecutor(object):
160 160 def __init__(self, peer):
161 161 self._peer = peer
162 162 self._sent = False
163 163 self._closed = False
164 164
165 165 def __enter__(self):
166 166 return self
167 167
168 168 def __exit__(self, exctype, excvalue, exctb):
169 169 self.close()
170 170
171 171 def callcommand(self, command, args):
172 172 if self._sent:
173 173 raise error.ProgrammingError('callcommand() cannot be used after '
174 174 'sendcommands()')
175 175
176 176 if self._closed:
177 177 raise error.ProgrammingError('callcommand() cannot be used after '
178 178 'close()')
179 179
180 180 # We don't need to support anything fancy. Just call the named
181 181 # method on the peer and return a resolved future.
182 182 fn = getattr(self._peer, pycompat.sysstr(command))
183 183
184 184 f = pycompat.futures.Future()
185 185
186 186 try:
187 187 result = fn(**pycompat.strkwargs(args))
188 188 except Exception:
189 189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 190 else:
191 191 f.set_result(result)
192 192
193 193 return f
194 194
195 195 def sendcommands(self):
196 196 self._sent = True
197 197
198 198 def close(self):
199 199 self._closed = True
200 200
201 201 @interfaceutil.implementer(repository.ipeercommands)
202 202 class localpeer(repository.peer):
203 203 '''peer for a local repo; reflects only the most recent API'''
204 204
205 205 def __init__(self, repo, caps=None):
206 206 super(localpeer, self).__init__()
207 207
208 208 if caps is None:
209 209 caps = moderncaps.copy()
210 210 self._repo = repo.filtered('served')
211 211 self.ui = repo.ui
212 212 self._caps = repo._restrictcapabilities(caps)
213 213
214 214 # Begin of _basepeer interface.
215 215
216 216 def url(self):
217 217 return self._repo.url()
218 218
219 219 def local(self):
220 220 return self._repo
221 221
222 222 def peer(self):
223 223 return self
224 224
225 225 def canpush(self):
226 226 return True
227 227
228 228 def close(self):
229 229 self._repo.close()
230 230
231 231 # End of _basepeer interface.
232 232
233 233 # Begin of _basewirecommands interface.
234 234
235 235 def branchmap(self):
236 236 return self._repo.branchmap()
237 237
238 238 def capabilities(self):
239 239 return self._caps
240 240
241 241 def clonebundles(self):
242 242 return self._repo.tryread('clonebundles.manifest')
243 243
244 244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 245 """Used to test argument passing over the wire"""
246 246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 247 pycompat.bytestr(four),
248 248 pycompat.bytestr(five))
249 249
250 250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 251 **kwargs):
252 252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 253 common=common, bundlecaps=bundlecaps,
254 254 **kwargs)[1]
255 255 cb = util.chunkbuffer(chunks)
256 256
257 257 if exchange.bundle2requested(bundlecaps):
258 258 # When requesting a bundle2, getbundle returns a stream to make the
259 259 # wire level function happier. We need to build a proper object
260 260 # from it in local peer.
261 261 return bundle2.getunbundler(self.ui, cb)
262 262 else:
263 263 return changegroup.getunbundler('01', cb, None)
264 264
265 265 def heads(self):
266 266 return self._repo.heads()
267 267
268 268 def known(self, nodes):
269 269 return self._repo.known(nodes)
270 270
271 271 def listkeys(self, namespace):
272 272 return self._repo.listkeys(namespace)
273 273
274 274 def lookup(self, key):
275 275 return self._repo.lookup(key)
276 276
277 277 def pushkey(self, namespace, key, old, new):
278 278 return self._repo.pushkey(namespace, key, old, new)
279 279
280 280 def stream_out(self):
281 281 raise error.Abort(_('cannot perform stream clone against local '
282 282 'peer'))
283 283
284 284 def unbundle(self, bundle, heads, url):
285 285 """apply a bundle on a repo
286 286
287 287 This function handles the repo locking itself."""
288 288 try:
289 289 try:
290 290 bundle = exchange.readbundle(self.ui, bundle, None)
291 291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 292 if util.safehasattr(ret, 'getchunks'):
293 293 # This is a bundle20 object, turn it into an unbundler.
294 294 # This little dance should be dropped eventually when the
295 295 # API is finally improved.
296 296 stream = util.chunkbuffer(ret.getchunks())
297 297 ret = bundle2.getunbundler(self.ui, stream)
298 298 return ret
299 299 except Exception as exc:
300 300 # If the exception contains output salvaged from a bundle2
301 301 # reply, we need to make sure it is printed before continuing
302 302 # to fail. So we build a bundle2 with such output and consume
303 303 # it directly.
304 304 #
305 305 # This is not very elegant but allows a "simple" solution for
306 306 # issue4594
307 307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 308 if output:
309 309 bundler = bundle2.bundle20(self._repo.ui)
310 310 for out in output:
311 311 bundler.addpart(out)
312 312 stream = util.chunkbuffer(bundler.getchunks())
313 313 b = bundle2.getunbundler(self.ui, stream)
314 314 bundle2.processbundle(self._repo, b)
315 315 raise
316 316 except error.PushRaced as exc:
317 317 raise error.ResponseError(_('push failed:'),
318 318 stringutil.forcebytestr(exc))
319 319
320 320 # End of _basewirecommands interface.
321 321
322 322 # Begin of peer interface.
323 323
324 324 def commandexecutor(self):
325 325 return localcommandexecutor(self)
326 326
327 327 # End of peer interface.
328 328
329 329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 330 class locallegacypeer(localpeer):
331 331 '''peer extension which implements legacy methods too; used for tests with
332 332 restricted capabilities'''
333 333
334 334 def __init__(self, repo):
335 335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336 336
337 337 # Begin of baselegacywirecommands interface.
338 338
339 339 def between(self, pairs):
340 340 return self._repo.between(pairs)
341 341
342 342 def branches(self, nodes):
343 343 return self._repo.branches(nodes)
344 344
345 345 def changegroup(self, nodes, source):
346 346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 347 missingheads=self._repo.heads())
348 348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 349
350 350 def changegroupsubset(self, bases, heads, source):
351 351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 352 missingheads=heads)
353 353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354 354
355 355 # End of baselegacywirecommands interface.
356 356
357 357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 358 # clients.
359 359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360 360
361 361 # A repository with the sparserevlog feature will have delta chains that
362 362 # can spread over a larger span. Sparse reading cuts these large spans into
363 363 # pieces, so that each piece isn't too big.
364 364 # Without the sparserevlog capability, reading from the repository could use
365 365 # huge amounts of memory, because the whole span would be read at once,
366 366 # including all the intermediate revisions that aren't pertinent for the chain.
367 367 # This is why once a repository has enabled sparse-read, it becomes required.
368 368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369 369
370 370 # Functions receiving (ui, features) that extensions can register to impact
371 371 # the ability to load repositories with custom requirements. Only
372 372 # functions defined in loaded extensions are called.
373 373 #
374 374 # The function receives a set of requirement strings that the repository
375 375 # is capable of opening. Functions will typically add elements to the
376 376 # set to reflect that the extension knows how to handle that requirements.
377 377 featuresetupfuncs = set()
378 378
379 379 @interfaceutil.implementer(repository.completelocalrepository)
380 380 class localrepository(object):
381 381
382 382 # obsolete experimental requirements:
383 383 # - manifestv2: An experimental new manifest format that allowed
384 384 # for stem compression of long paths. Experiment ended up not
385 385 # being successful (repository sizes went up due to worse delta
386 386 # chains), and the code was deleted in 4.6.
387 387 supportedformats = {
388 388 'revlogv1',
389 389 'generaldelta',
390 390 'treemanifest',
391 391 REVLOGV2_REQUIREMENT,
392 392 SPARSEREVLOG_REQUIREMENT,
393 393 }
394 394 _basesupported = supportedformats | {
395 395 'store',
396 396 'fncache',
397 397 'shared',
398 398 'relshared',
399 399 'dotencode',
400 400 'exp-sparse',
401 401 'internal-phase'
402 402 }
403 403 openerreqs = {
404 404 'revlogv1',
405 405 'generaldelta',
406 406 'treemanifest',
407 407 }
408 408
409 409 # list of prefix for file which can be written without 'wlock'
410 410 # Extensions should extend this list when needed
411 411 _wlockfreeprefix = {
412 412 # We migh consider requiring 'wlock' for the next
413 413 # two, but pretty much all the existing code assume
414 414 # wlock is not needed so we keep them excluded for
415 415 # now.
416 416 'hgrc',
417 417 'requires',
418 418 # XXX cache is a complicatged business someone
419 419 # should investigate this in depth at some point
420 420 'cache/',
421 421 # XXX shouldn't be dirstate covered by the wlock?
422 422 'dirstate',
423 423 # XXX bisect was still a bit too messy at the time
424 424 # this changeset was introduced. Someone should fix
425 425 # the remainig bit and drop this line
426 426 'bisect.state',
427 427 }
428 428
429 def __init__(self, baseui, path, create=False, intents=None):
429 def __init__(self, baseui, path, intents=None):
430 """Create a new local repository instance.
431
432 Most callers should use ``hg.repository()`` or ``localrepo.instance()``
433 for obtaining a new repository object.
434 """
435
430 436 self.requirements = set()
431 437 self.filtername = None
432 438 # wvfs: rooted at the repository root, used to access the working copy
433 439 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
434 440 # vfs: rooted at .hg, used to access repo files outside of .hg/store
435 441 self.vfs = None
436 442 # svfs: usually rooted at .hg/store, used to access repository history
437 443 # If this is a shared repository, this vfs may point to another
438 444 # repository's .hg/store directory.
439 445 self.svfs = None
440 446 self.root = self.wvfs.base
441 447 self.path = self.wvfs.join(".hg")
442 448 self.origroot = path
443 449 self.baseui = baseui
444 450 self.ui = baseui.copy()
445 451 self.ui.copy = baseui.copy # prevent copying repo configuration
446 452 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
447 453 if (self.ui.configbool('devel', 'all-warnings') or
448 454 self.ui.configbool('devel', 'check-locks')):
449 455 self.vfs.audit = self._getvfsward(self.vfs.audit)
450 456 # A list of callback to shape the phase if no data were found.
451 457 # Callback are in the form: func(repo, roots) --> processed root.
452 458 # This list it to be filled by extension during repo setup
453 459 self._phasedefaults = []
454 460 try:
455 461 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
456 462 self._loadextensions()
457 463 except IOError:
458 464 pass
459 465
460 466 if featuresetupfuncs:
461 467 self.supported = set(self._basesupported) # use private copy
462 468 extmods = set(m.__name__ for n, m
463 469 in extensions.extensions(self.ui))
464 470 for setupfunc in featuresetupfuncs:
465 471 if setupfunc.__module__ in extmods:
466 472 setupfunc(self.ui, self.supported)
467 473 else:
468 474 self.supported = self._basesupported
469 475 color.setup(self.ui)
470 476
471 477 # Add compression engines.
472 478 for name in util.compengines:
473 479 engine = util.compengines[name]
474 480 if engine.revlogheader():
475 481 self.supported.add('exp-compression-%s' % name)
476 482
477 483 if not self.vfs.isdir():
478 if create:
479 self.requirements = newreporequirements(self.ui)
480
481 if not self.wvfs.exists():
482 self.wvfs.makedirs()
483 self.vfs.makedir(notindexed=True)
484
485 if 'store' in self.requirements:
486 self.vfs.mkdir("store")
487
488 # create an invalid changelog
489 self.vfs.append(
490 "00changelog.i",
491 '\0\0\0\2' # represents revlogv2
492 ' dummy changelog to prevent using the old repo layout'
493 )
494 else:
495 try:
496 self.vfs.stat()
497 except OSError as inst:
498 if inst.errno != errno.ENOENT:
499 raise
500 raise error.RepoError(_("repository %s not found") % path)
501 elif create:
502 raise error.RepoError(_("repository %s already exists") % path)
484 try:
485 self.vfs.stat()
486 except OSError as inst:
487 if inst.errno != errno.ENOENT:
488 raise
489 raise error.RepoError(_("repository %s not found") % path)
503 490 else:
504 491 try:
505 492 self.requirements = scmutil.readrequires(
506 493 self.vfs, self.supported)
507 494 except IOError as inst:
508 495 if inst.errno != errno.ENOENT:
509 496 raise
510 497
511 498 cachepath = self.vfs.join('cache')
512 499 self.sharedpath = self.path
513 500 try:
514 501 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
515 502 if 'relshared' in self.requirements:
516 503 sharedpath = self.vfs.join(sharedpath)
517 504 vfs = vfsmod.vfs(sharedpath, realpath=True)
518 505 cachepath = vfs.join('cache')
519 506 s = vfs.base
520 507 if not vfs.exists():
521 508 raise error.RepoError(
522 509 _('.hg/sharedpath points to nonexistent directory %s') % s)
523 510 self.sharedpath = s
524 511 except IOError as inst:
525 512 if inst.errno != errno.ENOENT:
526 513 raise
527 514
528 515 if 'exp-sparse' in self.requirements and not sparse.enabled:
529 516 raise error.RepoError(_('repository is using sparse feature but '
530 517 'sparse is not enabled; enable the '
531 518 '"sparse" extensions to access'))
532 519
533 520 self.store = store.store(
534 521 self.requirements, self.sharedpath,
535 522 lambda base: vfsmod.vfs(base, cacheaudited=True))
536 523 self.spath = self.store.path
537 524 self.svfs = self.store.vfs
538 525 self.sjoin = self.store.join
539 526 self.vfs.createmode = self.store.createmode
540 527 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
541 528 self.cachevfs.createmode = self.store.createmode
542 529 if (self.ui.configbool('devel', 'all-warnings') or
543 530 self.ui.configbool('devel', 'check-locks')):
544 531 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
545 532 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
546 533 else: # standard vfs
547 534 self.svfs.audit = self._getsvfsward(self.svfs.audit)
548 535 self._applyopenerreqs()
549 if create:
550 self._writerequirements()
551 536
552 537 self._dirstatevalidatewarned = False
553 538
554 539 self._branchcaches = {}
555 540 self._revbranchcache = None
556 541 self._filterpats = {}
557 542 self._datafilters = {}
558 543 self._transref = self._lockref = self._wlockref = None
559 544
560 545 # A cache for various files under .hg/ that tracks file changes,
561 546 # (used by the filecache decorator)
562 547 #
563 548 # Maps a property name to its util.filecacheentry
564 549 self._filecache = {}
565 550
566 551 # hold sets of revision to be filtered
567 552 # should be cleared when something might have changed the filter value:
568 553 # - new changesets,
569 554 # - phase change,
570 555 # - new obsolescence marker,
571 556 # - working directory parent change,
572 557 # - bookmark changes
573 558 self.filteredrevcache = {}
574 559
575 560 # post-dirstate-status hooks
576 561 self._postdsstatus = []
577 562
578 563 # generic mapping between names and nodes
579 564 self.names = namespaces.namespaces()
580 565
581 566 # Key to signature value.
582 567 self._sparsesignaturecache = {}
583 568 # Signature to cached matcher instance.
584 569 self._sparsematchercache = {}
585 570
586 571 def _getvfsward(self, origfunc):
587 572 """build a ward for self.vfs"""
588 573 rref = weakref.ref(self)
589 574 def checkvfs(path, mode=None):
590 575 ret = origfunc(path, mode=mode)
591 576 repo = rref()
592 577 if (repo is None
593 578 or not util.safehasattr(repo, '_wlockref')
594 579 or not util.safehasattr(repo, '_lockref')):
595 580 return
596 581 if mode in (None, 'r', 'rb'):
597 582 return
598 583 if path.startswith(repo.path):
599 584 # truncate name relative to the repository (.hg)
600 585 path = path[len(repo.path) + 1:]
601 586 if path.startswith('cache/'):
602 587 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
603 588 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
604 589 if path.startswith('journal.'):
605 590 # journal is covered by 'lock'
606 591 if repo._currentlock(repo._lockref) is None:
607 592 repo.ui.develwarn('write with no lock: "%s"' % path,
608 593 stacklevel=2, config='check-locks')
609 594 elif repo._currentlock(repo._wlockref) is None:
610 595 # rest of vfs files are covered by 'wlock'
611 596 #
612 597 # exclude special files
613 598 for prefix in self._wlockfreeprefix:
614 599 if path.startswith(prefix):
615 600 return
616 601 repo.ui.develwarn('write with no wlock: "%s"' % path,
617 602 stacklevel=2, config='check-locks')
618 603 return ret
619 604 return checkvfs
620 605
621 606 def _getsvfsward(self, origfunc):
622 607 """build a ward for self.svfs"""
623 608 rref = weakref.ref(self)
624 609 def checksvfs(path, mode=None):
625 610 ret = origfunc(path, mode=mode)
626 611 repo = rref()
627 612 if repo is None or not util.safehasattr(repo, '_lockref'):
628 613 return
629 614 if mode in (None, 'r', 'rb'):
630 615 return
631 616 if path.startswith(repo.sharedpath):
632 617 # truncate name relative to the repository (.hg)
633 618 path = path[len(repo.sharedpath) + 1:]
634 619 if repo._currentlock(repo._lockref) is None:
635 620 repo.ui.develwarn('write with no lock: "%s"' % path,
636 621 stacklevel=3)
637 622 return ret
638 623 return checksvfs
639 624
640 625 def close(self):
641 626 self._writecaches()
642 627
643 628 def _loadextensions(self):
644 629 extensions.loadall(self.ui)
645 630
646 631 def _writecaches(self):
647 632 if self._revbranchcache:
648 633 self._revbranchcache.write()
649 634
650 635 def _restrictcapabilities(self, caps):
651 636 if self.ui.configbool('experimental', 'bundle2-advertise'):
652 637 caps = set(caps)
653 638 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
654 639 role='client'))
655 640 caps.add('bundle2=' + urlreq.quote(capsblob))
656 641 return caps
657 642
658 643 def _applyopenerreqs(self):
659 644 self.svfs.options = dict((r, 1) for r in self.requirements
660 645 if r in self.openerreqs)
661 646 # experimental config: format.chunkcachesize
662 647 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
663 648 if chunkcachesize is not None:
664 649 self.svfs.options['chunkcachesize'] = chunkcachesize
665 650 # experimental config: format.manifestcachesize
666 651 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
667 652 if manifestcachesize is not None:
668 653 self.svfs.options['manifestcachesize'] = manifestcachesize
669 654 deltabothparents = self.ui.configbool('storage',
670 655 'revlog.optimize-delta-parent-choice')
671 656 self.svfs.options['deltabothparents'] = deltabothparents
672 657 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
673 658 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
674 659 if 0 <= chainspan:
675 660 self.svfs.options['maxdeltachainspan'] = chainspan
676 661 mmapindexthreshold = self.ui.configbytes('experimental',
677 662 'mmapindexthreshold')
678 663 if mmapindexthreshold is not None:
679 664 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
680 665 withsparseread = self.ui.configbool('experimental', 'sparse-read')
681 666 srdensitythres = float(self.ui.config('experimental',
682 667 'sparse-read.density-threshold'))
683 668 srmingapsize = self.ui.configbytes('experimental',
684 669 'sparse-read.min-gap-size')
685 670 self.svfs.options['with-sparse-read'] = withsparseread
686 671 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
687 672 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
688 673 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
689 674 self.svfs.options['sparse-revlog'] = sparserevlog
690 675 if sparserevlog:
691 676 self.svfs.options['generaldelta'] = True
692 677 maxchainlen = None
693 678 if sparserevlog:
694 679 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
695 680 # experimental config: format.maxchainlen
696 681 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
697 682 if maxchainlen is not None:
698 683 self.svfs.options['maxchainlen'] = maxchainlen
699 684
700 685 for r in self.requirements:
701 686 if r.startswith('exp-compression-'):
702 687 self.svfs.options['compengine'] = r[len('exp-compression-'):]
703 688
704 689 # TODO move "revlogv2" to openerreqs once finalized.
705 690 if REVLOGV2_REQUIREMENT in self.requirements:
706 691 self.svfs.options['revlogv2'] = True
707 692
708 693 def _writerequirements(self):
709 694 scmutil.writerequires(self.vfs, self.requirements)
710 695
711 696 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
712 697 # self -> auditor -> self._checknested -> self
713 698
714 699 @property
715 700 def auditor(self):
716 701 # This is only used by context.workingctx.match in order to
717 702 # detect files in subrepos.
718 703 return pathutil.pathauditor(self.root, callback=self._checknested)
719 704
720 705 @property
721 706 def nofsauditor(self):
722 707 # This is only used by context.basectx.match in order to detect
723 708 # files in subrepos.
724 709 return pathutil.pathauditor(self.root, callback=self._checknested,
725 710 realfs=False, cached=True)
726 711
727 712 def _checknested(self, path):
728 713 """Determine if path is a legal nested repository."""
729 714 if not path.startswith(self.root):
730 715 return False
731 716 subpath = path[len(self.root) + 1:]
732 717 normsubpath = util.pconvert(subpath)
733 718
734 719 # XXX: Checking against the current working copy is wrong in
735 720 # the sense that it can reject things like
736 721 #
737 722 # $ hg cat -r 10 sub/x.txt
738 723 #
739 724 # if sub/ is no longer a subrepository in the working copy
740 725 # parent revision.
741 726 #
742 727 # However, it can of course also allow things that would have
743 728 # been rejected before, such as the above cat command if sub/
744 729 # is a subrepository now, but was a normal directory before.
745 730 # The old path auditor would have rejected by mistake since it
746 731 # panics when it sees sub/.hg/.
747 732 #
748 733 # All in all, checking against the working copy seems sensible
749 734 # since we want to prevent access to nested repositories on
750 735 # the filesystem *now*.
751 736 ctx = self[None]
752 737 parts = util.splitpath(subpath)
753 738 while parts:
754 739 prefix = '/'.join(parts)
755 740 if prefix in ctx.substate:
756 741 if prefix == normsubpath:
757 742 return True
758 743 else:
759 744 sub = ctx.sub(prefix)
760 745 return sub.checknested(subpath[len(prefix) + 1:])
761 746 else:
762 747 parts.pop()
763 748 return False
764 749
765 750 def peer(self):
766 751 return localpeer(self) # not cached to avoid reference cycle
767 752
768 753 def unfiltered(self):
769 754 """Return unfiltered version of the repository
770 755
771 756 Intended to be overwritten by filtered repo."""
772 757 return self
773 758
774 759 def filtered(self, name, visibilityexceptions=None):
775 760 """Return a filtered version of a repository"""
776 761 cls = repoview.newtype(self.unfiltered().__class__)
777 762 return cls(self, name, visibilityexceptions)
778 763
779 764 @repofilecache('bookmarks', 'bookmarks.current')
780 765 def _bookmarks(self):
781 766 return bookmarks.bmstore(self)
782 767
783 768 @property
784 769 def _activebookmark(self):
785 770 return self._bookmarks.active
786 771
787 772 # _phasesets depend on changelog. what we need is to call
788 773 # _phasecache.invalidate() if '00changelog.i' was changed, but it
789 774 # can't be easily expressed in filecache mechanism.
790 775 @storecache('phaseroots', '00changelog.i')
791 776 def _phasecache(self):
792 777 return phases.phasecache(self, self._phasedefaults)
793 778
794 779 @storecache('obsstore')
795 780 def obsstore(self):
796 781 return obsolete.makestore(self.ui, self)
797 782
798 783 @storecache('00changelog.i')
799 784 def changelog(self):
800 785 return changelog.changelog(self.svfs,
801 786 trypending=txnutil.mayhavepending(self.root))
802 787
803 788 def _constructmanifest(self):
804 789 # This is a temporary function while we migrate from manifest to
805 790 # manifestlog. It allows bundlerepo and unionrepo to intercept the
806 791 # manifest creation.
807 792 return manifest.manifestrevlog(self.svfs)
808 793
809 794 @storecache('00manifest.i')
810 795 def manifestlog(self):
811 796 return manifest.manifestlog(self.svfs, self)
812 797
813 798 @repofilecache('dirstate')
814 799 def dirstate(self):
815 800 return self._makedirstate()
816 801
817 802 def _makedirstate(self):
818 803 """Extension point for wrapping the dirstate per-repo."""
819 804 sparsematchfn = lambda: sparse.matcher(self)
820 805
821 806 return dirstate.dirstate(self.vfs, self.ui, self.root,
822 807 self._dirstatevalidate, sparsematchfn)
823 808
824 809 def _dirstatevalidate(self, node):
825 810 try:
826 811 self.changelog.rev(node)
827 812 return node
828 813 except error.LookupError:
829 814 if not self._dirstatevalidatewarned:
830 815 self._dirstatevalidatewarned = True
831 816 self.ui.warn(_("warning: ignoring unknown"
832 817 " working parent %s!\n") % short(node))
833 818 return nullid
834 819
835 820 @storecache(narrowspec.FILENAME)
836 821 def narrowpats(self):
837 822 """matcher patterns for this repository's narrowspec
838 823
839 824 A tuple of (includes, excludes).
840 825 """
841 826 source = self
842 827 if self.shared():
843 828 from . import hg
844 829 source = hg.sharedreposource(self)
845 830 return narrowspec.load(source)
846 831
847 832 @storecache(narrowspec.FILENAME)
848 833 def _narrowmatch(self):
849 834 if repository.NARROW_REQUIREMENT not in self.requirements:
850 835 return matchmod.always(self.root, '')
851 836 include, exclude = self.narrowpats
852 837 return narrowspec.match(self.root, include=include, exclude=exclude)
853 838
854 839 # TODO(martinvonz): make this property-like instead?
855 840 def narrowmatch(self):
856 841 return self._narrowmatch
857 842
858 843 def setnarrowpats(self, newincludes, newexcludes):
859 844 target = self
860 845 if self.shared():
861 846 from . import hg
862 847 target = hg.sharedreposource(self)
863 848 narrowspec.save(target, newincludes, newexcludes)
864 849 self.invalidate(clearfilecache=True)
865 850
866 851 def __getitem__(self, changeid):
867 852 if changeid is None:
868 853 return context.workingctx(self)
869 854 if isinstance(changeid, context.basectx):
870 855 return changeid
871 856 if isinstance(changeid, slice):
872 857 # wdirrev isn't contiguous so the slice shouldn't include it
873 858 return [context.changectx(self, i)
874 859 for i in pycompat.xrange(*changeid.indices(len(self)))
875 860 if i not in self.changelog.filteredrevs]
876 861 try:
877 862 return context.changectx(self, changeid)
878 863 except error.WdirUnsupported:
879 864 return context.workingctx(self)
880 865
881 866 def __contains__(self, changeid):
882 867 """True if the given changeid exists
883 868
884 869 error.AmbiguousPrefixLookupError is raised if an ambiguous node
885 870 specified.
886 871 """
887 872 try:
888 873 self[changeid]
889 874 return True
890 875 except error.RepoLookupError:
891 876 return False
892 877
893 878 def __nonzero__(self):
894 879 return True
895 880
896 881 __bool__ = __nonzero__
897 882
898 883 def __len__(self):
899 884 # no need to pay the cost of repoview.changelog
900 885 unfi = self.unfiltered()
901 886 return len(unfi.changelog)
902 887
903 888 def __iter__(self):
904 889 return iter(self.changelog)
905 890
906 891 def revs(self, expr, *args):
907 892 '''Find revisions matching a revset.
908 893
909 894 The revset is specified as a string ``expr`` that may contain
910 895 %-formatting to escape certain types. See ``revsetlang.formatspec``.
911 896
912 897 Revset aliases from the configuration are not expanded. To expand
913 898 user aliases, consider calling ``scmutil.revrange()`` or
914 899 ``repo.anyrevs([expr], user=True)``.
915 900
916 901 Returns a revset.abstractsmartset, which is a list-like interface
917 902 that contains integer revisions.
918 903 '''
919 904 expr = revsetlang.formatspec(expr, *args)
920 905 m = revset.match(None, expr)
921 906 return m(self)
922 907
923 908 def set(self, expr, *args):
924 909 '''Find revisions matching a revset and emit changectx instances.
925 910
926 911 This is a convenience wrapper around ``revs()`` that iterates the
927 912 result and is a generator of changectx instances.
928 913
929 914 Revset aliases from the configuration are not expanded. To expand
930 915 user aliases, consider calling ``scmutil.revrange()``.
931 916 '''
932 917 for r in self.revs(expr, *args):
933 918 yield self[r]
934 919
935 920 def anyrevs(self, specs, user=False, localalias=None):
936 921 '''Find revisions matching one of the given revsets.
937 922
938 923 Revset aliases from the configuration are not expanded by default. To
939 924 expand user aliases, specify ``user=True``. To provide some local
940 925 definitions overriding user aliases, set ``localalias`` to
941 926 ``{name: definitionstring}``.
942 927 '''
943 928 if user:
944 929 m = revset.matchany(self.ui, specs,
945 930 lookup=revset.lookupfn(self),
946 931 localalias=localalias)
947 932 else:
948 933 m = revset.matchany(None, specs, localalias=localalias)
949 934 return m(self)
950 935
951 936 def url(self):
952 937 return 'file:' + self.root
953 938
954 939 def hook(self, name, throw=False, **args):
955 940 """Call a hook, passing this repo instance.
956 941
957 942 This a convenience method to aid invoking hooks. Extensions likely
958 943 won't call this unless they have registered a custom hook or are
959 944 replacing code that is expected to call a hook.
960 945 """
961 946 return hook.hook(self.ui, self, name, throw, **args)
962 947
963 948 @filteredpropertycache
964 949 def _tagscache(self):
965 950 '''Returns a tagscache object that contains various tags related
966 951 caches.'''
967 952
968 953 # This simplifies its cache management by having one decorated
969 954 # function (this one) and the rest simply fetch things from it.
970 955 class tagscache(object):
971 956 def __init__(self):
972 957 # These two define the set of tags for this repository. tags
973 958 # maps tag name to node; tagtypes maps tag name to 'global' or
974 959 # 'local'. (Global tags are defined by .hgtags across all
975 960 # heads, and local tags are defined in .hg/localtags.)
976 961 # They constitute the in-memory cache of tags.
977 962 self.tags = self.tagtypes = None
978 963
979 964 self.nodetagscache = self.tagslist = None
980 965
981 966 cache = tagscache()
982 967 cache.tags, cache.tagtypes = self._findtags()
983 968
984 969 return cache
985 970
986 971 def tags(self):
987 972 '''return a mapping of tag to node'''
988 973 t = {}
989 974 if self.changelog.filteredrevs:
990 975 tags, tt = self._findtags()
991 976 else:
992 977 tags = self._tagscache.tags
993 978 for k, v in tags.iteritems():
994 979 try:
995 980 # ignore tags to unknown nodes
996 981 self.changelog.rev(v)
997 982 t[k] = v
998 983 except (error.LookupError, ValueError):
999 984 pass
1000 985 return t
1001 986
1002 987 def _findtags(self):
1003 988 '''Do the hard work of finding tags. Return a pair of dicts
1004 989 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1005 990 maps tag name to a string like \'global\' or \'local\'.
1006 991 Subclasses or extensions are free to add their own tags, but
1007 992 should be aware that the returned dicts will be retained for the
1008 993 duration of the localrepo object.'''
1009 994
1010 995 # XXX what tagtype should subclasses/extensions use? Currently
1011 996 # mq and bookmarks add tags, but do not set the tagtype at all.
1012 997 # Should each extension invent its own tag type? Should there
1013 998 # be one tagtype for all such "virtual" tags? Or is the status
1014 999 # quo fine?
1015 1000
1016 1001
1017 1002 # map tag name to (node, hist)
1018 1003 alltags = tagsmod.findglobaltags(self.ui, self)
1019 1004 # map tag name to tag type
1020 1005 tagtypes = dict((tag, 'global') for tag in alltags)
1021 1006
1022 1007 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1023 1008
1024 1009 # Build the return dicts. Have to re-encode tag names because
1025 1010 # the tags module always uses UTF-8 (in order not to lose info
1026 1011 # writing to the cache), but the rest of Mercurial wants them in
1027 1012 # local encoding.
1028 1013 tags = {}
1029 1014 for (name, (node, hist)) in alltags.iteritems():
1030 1015 if node != nullid:
1031 1016 tags[encoding.tolocal(name)] = node
1032 1017 tags['tip'] = self.changelog.tip()
1033 1018 tagtypes = dict([(encoding.tolocal(name), value)
1034 1019 for (name, value) in tagtypes.iteritems()])
1035 1020 return (tags, tagtypes)
1036 1021
1037 1022 def tagtype(self, tagname):
1038 1023 '''
1039 1024 return the type of the given tag. result can be:
1040 1025
1041 1026 'local' : a local tag
1042 1027 'global' : a global tag
1043 1028 None : tag does not exist
1044 1029 '''
1045 1030
1046 1031 return self._tagscache.tagtypes.get(tagname)
1047 1032
1048 1033 def tagslist(self):
1049 1034 '''return a list of tags ordered by revision'''
1050 1035 if not self._tagscache.tagslist:
1051 1036 l = []
1052 1037 for t, n in self.tags().iteritems():
1053 1038 l.append((self.changelog.rev(n), t, n))
1054 1039 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1055 1040
1056 1041 return self._tagscache.tagslist
1057 1042
1058 1043 def nodetags(self, node):
1059 1044 '''return the tags associated with a node'''
1060 1045 if not self._tagscache.nodetagscache:
1061 1046 nodetagscache = {}
1062 1047 for t, n in self._tagscache.tags.iteritems():
1063 1048 nodetagscache.setdefault(n, []).append(t)
1064 1049 for tags in nodetagscache.itervalues():
1065 1050 tags.sort()
1066 1051 self._tagscache.nodetagscache = nodetagscache
1067 1052 return self._tagscache.nodetagscache.get(node, [])
1068 1053
1069 1054 def nodebookmarks(self, node):
1070 1055 """return the list of bookmarks pointing to the specified node"""
1071 1056 return self._bookmarks.names(node)
1072 1057
1073 1058 def branchmap(self):
1074 1059 '''returns a dictionary {branch: [branchheads]} with branchheads
1075 1060 ordered by increasing revision number'''
1076 1061 branchmap.updatecache(self)
1077 1062 return self._branchcaches[self.filtername]
1078 1063
1079 1064 @unfilteredmethod
1080 1065 def revbranchcache(self):
1081 1066 if not self._revbranchcache:
1082 1067 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1083 1068 return self._revbranchcache
1084 1069
1085 1070 def branchtip(self, branch, ignoremissing=False):
1086 1071 '''return the tip node for a given branch
1087 1072
1088 1073 If ignoremissing is True, then this method will not raise an error.
1089 1074 This is helpful for callers that only expect None for a missing branch
1090 1075 (e.g. namespace).
1091 1076
1092 1077 '''
1093 1078 try:
1094 1079 return self.branchmap().branchtip(branch)
1095 1080 except KeyError:
1096 1081 if not ignoremissing:
1097 1082 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1098 1083 else:
1099 1084 pass
1100 1085
1101 1086 def lookup(self, key):
1102 1087 return scmutil.revsymbol(self, key).node()
1103 1088
1104 1089 def lookupbranch(self, key):
1105 1090 if key in self.branchmap():
1106 1091 return key
1107 1092
1108 1093 return scmutil.revsymbol(self, key).branch()
1109 1094
1110 1095 def known(self, nodes):
1111 1096 cl = self.changelog
1112 1097 nm = cl.nodemap
1113 1098 filtered = cl.filteredrevs
1114 1099 result = []
1115 1100 for n in nodes:
1116 1101 r = nm.get(n)
1117 1102 resp = not (r is None or r in filtered)
1118 1103 result.append(resp)
1119 1104 return result
1120 1105
1121 1106 def local(self):
1122 1107 return self
1123 1108
1124 1109 def publishing(self):
1125 1110 # it's safe (and desirable) to trust the publish flag unconditionally
1126 1111 # so that we don't finalize changes shared between users via ssh or nfs
1127 1112 return self.ui.configbool('phases', 'publish', untrusted=True)
1128 1113
1129 1114 def cancopy(self):
1130 1115 # so statichttprepo's override of local() works
1131 1116 if not self.local():
1132 1117 return False
1133 1118 if not self.publishing():
1134 1119 return True
1135 1120 # if publishing we can't copy if there is filtered content
1136 1121 return not self.filtered('visible').changelog.filteredrevs
1137 1122
1138 1123 def shared(self):
1139 1124 '''the type of shared repository (None if not shared)'''
1140 1125 if self.sharedpath != self.path:
1141 1126 return 'store'
1142 1127 return None
1143 1128
1144 1129 def wjoin(self, f, *insidef):
1145 1130 return self.vfs.reljoin(self.root, f, *insidef)
1146 1131
1147 1132 def file(self, f):
1148 1133 if f[0] == '/':
1149 1134 f = f[1:]
1150 1135 return filelog.filelog(self.svfs, f)
1151 1136
1152 1137 def setparents(self, p1, p2=nullid):
1153 1138 with self.dirstate.parentchange():
1154 1139 copies = self.dirstate.setparents(p1, p2)
1155 1140 pctx = self[p1]
1156 1141 if copies:
1157 1142 # Adjust copy records, the dirstate cannot do it, it
1158 1143 # requires access to parents manifests. Preserve them
1159 1144 # only for entries added to first parent.
1160 1145 for f in copies:
1161 1146 if f not in pctx and copies[f] in pctx:
1162 1147 self.dirstate.copy(copies[f], f)
1163 1148 if p2 == nullid:
1164 1149 for f, s in sorted(self.dirstate.copies().items()):
1165 1150 if f not in pctx and s not in pctx:
1166 1151 self.dirstate.copy(None, f)
1167 1152
1168 1153 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1169 1154 """changeid can be a changeset revision, node, or tag.
1170 1155 fileid can be a file revision or node."""
1171 1156 return context.filectx(self, path, changeid, fileid,
1172 1157 changectx=changectx)
1173 1158
1174 1159 def getcwd(self):
1175 1160 return self.dirstate.getcwd()
1176 1161
1177 1162 def pathto(self, f, cwd=None):
1178 1163 return self.dirstate.pathto(f, cwd)
1179 1164
1180 1165 def _loadfilter(self, filter):
1181 1166 if filter not in self._filterpats:
1182 1167 l = []
1183 1168 for pat, cmd in self.ui.configitems(filter):
1184 1169 if cmd == '!':
1185 1170 continue
1186 1171 mf = matchmod.match(self.root, '', [pat])
1187 1172 fn = None
1188 1173 params = cmd
1189 1174 for name, filterfn in self._datafilters.iteritems():
1190 1175 if cmd.startswith(name):
1191 1176 fn = filterfn
1192 1177 params = cmd[len(name):].lstrip()
1193 1178 break
1194 1179 if not fn:
1195 1180 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1196 1181 # Wrap old filters not supporting keyword arguments
1197 1182 if not pycompat.getargspec(fn)[2]:
1198 1183 oldfn = fn
1199 1184 fn = lambda s, c, **kwargs: oldfn(s, c)
1200 1185 l.append((mf, fn, params))
1201 1186 self._filterpats[filter] = l
1202 1187 return self._filterpats[filter]
1203 1188
1204 1189 def _filter(self, filterpats, filename, data):
1205 1190 for mf, fn, cmd in filterpats:
1206 1191 if mf(filename):
1207 1192 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1208 1193 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1209 1194 break
1210 1195
1211 1196 return data
1212 1197
1213 1198 @unfilteredpropertycache
1214 1199 def _encodefilterpats(self):
1215 1200 return self._loadfilter('encode')
1216 1201
1217 1202 @unfilteredpropertycache
1218 1203 def _decodefilterpats(self):
1219 1204 return self._loadfilter('decode')
1220 1205
1221 1206 def adddatafilter(self, name, filter):
1222 1207 self._datafilters[name] = filter
1223 1208
1224 1209 def wread(self, filename):
1225 1210 if self.wvfs.islink(filename):
1226 1211 data = self.wvfs.readlink(filename)
1227 1212 else:
1228 1213 data = self.wvfs.read(filename)
1229 1214 return self._filter(self._encodefilterpats, filename, data)
1230 1215
1231 1216 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1232 1217 """write ``data`` into ``filename`` in the working directory
1233 1218
1234 1219 This returns length of written (maybe decoded) data.
1235 1220 """
1236 1221 data = self._filter(self._decodefilterpats, filename, data)
1237 1222 if 'l' in flags:
1238 1223 self.wvfs.symlink(data, filename)
1239 1224 else:
1240 1225 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1241 1226 **kwargs)
1242 1227 if 'x' in flags:
1243 1228 self.wvfs.setflags(filename, False, True)
1244 1229 else:
1245 1230 self.wvfs.setflags(filename, False, False)
1246 1231 return len(data)
1247 1232
1248 1233 def wwritedata(self, filename, data):
1249 1234 return self._filter(self._decodefilterpats, filename, data)
1250 1235
1251 1236 def currenttransaction(self):
1252 1237 """return the current transaction or None if non exists"""
1253 1238 if self._transref:
1254 1239 tr = self._transref()
1255 1240 else:
1256 1241 tr = None
1257 1242
1258 1243 if tr and tr.running():
1259 1244 return tr
1260 1245 return None
1261 1246
1262 1247 def transaction(self, desc, report=None):
1263 1248 if (self.ui.configbool('devel', 'all-warnings')
1264 1249 or self.ui.configbool('devel', 'check-locks')):
1265 1250 if self._currentlock(self._lockref) is None:
1266 1251 raise error.ProgrammingError('transaction requires locking')
1267 1252 tr = self.currenttransaction()
1268 1253 if tr is not None:
1269 1254 return tr.nest(name=desc)
1270 1255
1271 1256 # abort here if the journal already exists
1272 1257 if self.svfs.exists("journal"):
1273 1258 raise error.RepoError(
1274 1259 _("abandoned transaction found"),
1275 1260 hint=_("run 'hg recover' to clean up transaction"))
1276 1261
1277 1262 idbase = "%.40f#%f" % (random.random(), time.time())
1278 1263 ha = hex(hashlib.sha1(idbase).digest())
1279 1264 txnid = 'TXN:' + ha
1280 1265 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1281 1266
1282 1267 self._writejournal(desc)
1283 1268 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1284 1269 if report:
1285 1270 rp = report
1286 1271 else:
1287 1272 rp = self.ui.warn
1288 1273 vfsmap = {'plain': self.vfs} # root of .hg/
1289 1274 # we must avoid cyclic reference between repo and transaction.
1290 1275 reporef = weakref.ref(self)
1291 1276 # Code to track tag movement
1292 1277 #
1293 1278 # Since tags are all handled as file content, it is actually quite hard
1294 1279 # to track these movement from a code perspective. So we fallback to a
1295 1280 # tracking at the repository level. One could envision to track changes
1296 1281 # to the '.hgtags' file through changegroup apply but that fails to
1297 1282 # cope with case where transaction expose new heads without changegroup
1298 1283 # being involved (eg: phase movement).
1299 1284 #
1300 1285 # For now, We gate the feature behind a flag since this likely comes
1301 1286 # with performance impacts. The current code run more often than needed
1302 1287 # and do not use caches as much as it could. The current focus is on
1303 1288 # the behavior of the feature so we disable it by default. The flag
1304 1289 # will be removed when we are happy with the performance impact.
1305 1290 #
1306 1291 # Once this feature is no longer experimental move the following
1307 1292 # documentation to the appropriate help section:
1308 1293 #
1309 1294 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1310 1295 # tags (new or changed or deleted tags). In addition the details of
1311 1296 # these changes are made available in a file at:
1312 1297 # ``REPOROOT/.hg/changes/tags.changes``.
1313 1298 # Make sure you check for HG_TAG_MOVED before reading that file as it
1314 1299 # might exist from a previous transaction even if no tag were touched
1315 1300 # in this one. Changes are recorded in a line base format::
1316 1301 #
1317 1302 # <action> <hex-node> <tag-name>\n
1318 1303 #
1319 1304 # Actions are defined as follow:
1320 1305 # "-R": tag is removed,
1321 1306 # "+A": tag is added,
1322 1307 # "-M": tag is moved (old value),
1323 1308 # "+M": tag is moved (new value),
1324 1309 tracktags = lambda x: None
1325 1310 # experimental config: experimental.hook-track-tags
1326 1311 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1327 1312 if desc != 'strip' and shouldtracktags:
1328 1313 oldheads = self.changelog.headrevs()
1329 1314 def tracktags(tr2):
1330 1315 repo = reporef()
1331 1316 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1332 1317 newheads = repo.changelog.headrevs()
1333 1318 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1334 1319 # notes: we compare lists here.
1335 1320 # As we do it only once buiding set would not be cheaper
1336 1321 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1337 1322 if changes:
1338 1323 tr2.hookargs['tag_moved'] = '1'
1339 1324 with repo.vfs('changes/tags.changes', 'w',
1340 1325 atomictemp=True) as changesfile:
1341 1326 # note: we do not register the file to the transaction
1342 1327 # because we needs it to still exist on the transaction
1343 1328 # is close (for txnclose hooks)
1344 1329 tagsmod.writediff(changesfile, changes)
1345 1330 def validate(tr2):
1346 1331 """will run pre-closing hooks"""
1347 1332 # XXX the transaction API is a bit lacking here so we take a hacky
1348 1333 # path for now
1349 1334 #
1350 1335 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1351 1336 # dict is copied before these run. In addition we needs the data
1352 1337 # available to in memory hooks too.
1353 1338 #
1354 1339 # Moreover, we also need to make sure this runs before txnclose
1355 1340 # hooks and there is no "pending" mechanism that would execute
1356 1341 # logic only if hooks are about to run.
1357 1342 #
1358 1343 # Fixing this limitation of the transaction is also needed to track
1359 1344 # other families of changes (bookmarks, phases, obsolescence).
1360 1345 #
1361 1346 # This will have to be fixed before we remove the experimental
1362 1347 # gating.
1363 1348 tracktags(tr2)
1364 1349 repo = reporef()
1365 1350 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1366 1351 scmutil.enforcesinglehead(repo, tr2, desc)
1367 1352 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1368 1353 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1369 1354 args = tr.hookargs.copy()
1370 1355 args.update(bookmarks.preparehookargs(name, old, new))
1371 1356 repo.hook('pretxnclose-bookmark', throw=True,
1372 1357 txnname=desc,
1373 1358 **pycompat.strkwargs(args))
1374 1359 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1375 1360 cl = repo.unfiltered().changelog
1376 1361 for rev, (old, new) in tr.changes['phases'].items():
1377 1362 args = tr.hookargs.copy()
1378 1363 node = hex(cl.node(rev))
1379 1364 args.update(phases.preparehookargs(node, old, new))
1380 1365 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1381 1366 **pycompat.strkwargs(args))
1382 1367
1383 1368 repo.hook('pretxnclose', throw=True,
1384 1369 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1385 1370 def releasefn(tr, success):
1386 1371 repo = reporef()
1387 1372 if success:
1388 1373 # this should be explicitly invoked here, because
1389 1374 # in-memory changes aren't written out at closing
1390 1375 # transaction, if tr.addfilegenerator (via
1391 1376 # dirstate.write or so) isn't invoked while
1392 1377 # transaction running
1393 1378 repo.dirstate.write(None)
1394 1379 else:
1395 1380 # discard all changes (including ones already written
1396 1381 # out) in this transaction
1397 1382 narrowspec.restorebackup(self, 'journal.narrowspec')
1398 1383 repo.dirstate.restorebackup(None, 'journal.dirstate')
1399 1384
1400 1385 repo.invalidate(clearfilecache=True)
1401 1386
1402 1387 tr = transaction.transaction(rp, self.svfs, vfsmap,
1403 1388 "journal",
1404 1389 "undo",
1405 1390 aftertrans(renames),
1406 1391 self.store.createmode,
1407 1392 validator=validate,
1408 1393 releasefn=releasefn,
1409 1394 checkambigfiles=_cachedfiles,
1410 1395 name=desc)
1411 1396 tr.changes['origrepolen'] = len(self)
1412 1397 tr.changes['obsmarkers'] = set()
1413 1398 tr.changes['phases'] = {}
1414 1399 tr.changes['bookmarks'] = {}
1415 1400
1416 1401 tr.hookargs['txnid'] = txnid
1417 1402 # note: writing the fncache only during finalize mean that the file is
1418 1403 # outdated when running hooks. As fncache is used for streaming clone,
1419 1404 # this is not expected to break anything that happen during the hooks.
1420 1405 tr.addfinalize('flush-fncache', self.store.write)
1421 1406 def txnclosehook(tr2):
1422 1407 """To be run if transaction is successful, will schedule a hook run
1423 1408 """
1424 1409 # Don't reference tr2 in hook() so we don't hold a reference.
1425 1410 # This reduces memory consumption when there are multiple
1426 1411 # transactions per lock. This can likely go away if issue5045
1427 1412 # fixes the function accumulation.
1428 1413 hookargs = tr2.hookargs
1429 1414
1430 1415 def hookfunc():
1431 1416 repo = reporef()
1432 1417 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1433 1418 bmchanges = sorted(tr.changes['bookmarks'].items())
1434 1419 for name, (old, new) in bmchanges:
1435 1420 args = tr.hookargs.copy()
1436 1421 args.update(bookmarks.preparehookargs(name, old, new))
1437 1422 repo.hook('txnclose-bookmark', throw=False,
1438 1423 txnname=desc, **pycompat.strkwargs(args))
1439 1424
1440 1425 if hook.hashook(repo.ui, 'txnclose-phase'):
1441 1426 cl = repo.unfiltered().changelog
1442 1427 phasemv = sorted(tr.changes['phases'].items())
1443 1428 for rev, (old, new) in phasemv:
1444 1429 args = tr.hookargs.copy()
1445 1430 node = hex(cl.node(rev))
1446 1431 args.update(phases.preparehookargs(node, old, new))
1447 1432 repo.hook('txnclose-phase', throw=False, txnname=desc,
1448 1433 **pycompat.strkwargs(args))
1449 1434
1450 1435 repo.hook('txnclose', throw=False, txnname=desc,
1451 1436 **pycompat.strkwargs(hookargs))
1452 1437 reporef()._afterlock(hookfunc)
1453 1438 tr.addfinalize('txnclose-hook', txnclosehook)
1454 1439 # Include a leading "-" to make it happen before the transaction summary
1455 1440 # reports registered via scmutil.registersummarycallback() whose names
1456 1441 # are 00-txnreport etc. That way, the caches will be warm when the
1457 1442 # callbacks run.
1458 1443 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1459 1444 def txnaborthook(tr2):
1460 1445 """To be run if transaction is aborted
1461 1446 """
1462 1447 reporef().hook('txnabort', throw=False, txnname=desc,
1463 1448 **pycompat.strkwargs(tr2.hookargs))
1464 1449 tr.addabort('txnabort-hook', txnaborthook)
1465 1450 # avoid eager cache invalidation. in-memory data should be identical
1466 1451 # to stored data if transaction has no error.
1467 1452 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1468 1453 self._transref = weakref.ref(tr)
1469 1454 scmutil.registersummarycallback(self, tr, desc)
1470 1455 return tr
1471 1456
1472 1457 def _journalfiles(self):
1473 1458 return ((self.svfs, 'journal'),
1474 1459 (self.vfs, 'journal.dirstate'),
1475 1460 (self.vfs, 'journal.branch'),
1476 1461 (self.vfs, 'journal.desc'),
1477 1462 (self.vfs, 'journal.bookmarks'),
1478 1463 (self.svfs, 'journal.phaseroots'))
1479 1464
1480 1465 def undofiles(self):
1481 1466 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1482 1467
1483 1468 @unfilteredmethod
1484 1469 def _writejournal(self, desc):
1485 1470 self.dirstate.savebackup(None, 'journal.dirstate')
1486 1471 narrowspec.savebackup(self, 'journal.narrowspec')
1487 1472 self.vfs.write("journal.branch",
1488 1473 encoding.fromlocal(self.dirstate.branch()))
1489 1474 self.vfs.write("journal.desc",
1490 1475 "%d\n%s\n" % (len(self), desc))
1491 1476 self.vfs.write("journal.bookmarks",
1492 1477 self.vfs.tryread("bookmarks"))
1493 1478 self.svfs.write("journal.phaseroots",
1494 1479 self.svfs.tryread("phaseroots"))
1495 1480
1496 1481 def recover(self):
1497 1482 with self.lock():
1498 1483 if self.svfs.exists("journal"):
1499 1484 self.ui.status(_("rolling back interrupted transaction\n"))
1500 1485 vfsmap = {'': self.svfs,
1501 1486 'plain': self.vfs,}
1502 1487 transaction.rollback(self.svfs, vfsmap, "journal",
1503 1488 self.ui.warn,
1504 1489 checkambigfiles=_cachedfiles)
1505 1490 self.invalidate()
1506 1491 return True
1507 1492 else:
1508 1493 self.ui.warn(_("no interrupted transaction available\n"))
1509 1494 return False
1510 1495
1511 1496 def rollback(self, dryrun=False, force=False):
1512 1497 wlock = lock = dsguard = None
1513 1498 try:
1514 1499 wlock = self.wlock()
1515 1500 lock = self.lock()
1516 1501 if self.svfs.exists("undo"):
1517 1502 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1518 1503
1519 1504 return self._rollback(dryrun, force, dsguard)
1520 1505 else:
1521 1506 self.ui.warn(_("no rollback information available\n"))
1522 1507 return 1
1523 1508 finally:
1524 1509 release(dsguard, lock, wlock)
1525 1510
1526 1511 @unfilteredmethod # Until we get smarter cache management
1527 1512 def _rollback(self, dryrun, force, dsguard):
1528 1513 ui = self.ui
1529 1514 try:
1530 1515 args = self.vfs.read('undo.desc').splitlines()
1531 1516 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1532 1517 if len(args) >= 3:
1533 1518 detail = args[2]
1534 1519 oldtip = oldlen - 1
1535 1520
1536 1521 if detail and ui.verbose:
1537 1522 msg = (_('repository tip rolled back to revision %d'
1538 1523 ' (undo %s: %s)\n')
1539 1524 % (oldtip, desc, detail))
1540 1525 else:
1541 1526 msg = (_('repository tip rolled back to revision %d'
1542 1527 ' (undo %s)\n')
1543 1528 % (oldtip, desc))
1544 1529 except IOError:
1545 1530 msg = _('rolling back unknown transaction\n')
1546 1531 desc = None
1547 1532
1548 1533 if not force and self['.'] != self['tip'] and desc == 'commit':
1549 1534 raise error.Abort(
1550 1535 _('rollback of last commit while not checked out '
1551 1536 'may lose data'), hint=_('use -f to force'))
1552 1537
1553 1538 ui.status(msg)
1554 1539 if dryrun:
1555 1540 return 0
1556 1541
1557 1542 parents = self.dirstate.parents()
1558 1543 self.destroying()
1559 1544 vfsmap = {'plain': self.vfs, '': self.svfs}
1560 1545 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1561 1546 checkambigfiles=_cachedfiles)
1562 1547 if self.vfs.exists('undo.bookmarks'):
1563 1548 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1564 1549 if self.svfs.exists('undo.phaseroots'):
1565 1550 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1566 1551 self.invalidate()
1567 1552
1568 1553 parentgone = (parents[0] not in self.changelog.nodemap or
1569 1554 parents[1] not in self.changelog.nodemap)
1570 1555 if parentgone:
1571 1556 # prevent dirstateguard from overwriting already restored one
1572 1557 dsguard.close()
1573 1558
1574 1559 narrowspec.restorebackup(self, 'undo.narrowspec')
1575 1560 self.dirstate.restorebackup(None, 'undo.dirstate')
1576 1561 try:
1577 1562 branch = self.vfs.read('undo.branch')
1578 1563 self.dirstate.setbranch(encoding.tolocal(branch))
1579 1564 except IOError:
1580 1565 ui.warn(_('named branch could not be reset: '
1581 1566 'current branch is still \'%s\'\n')
1582 1567 % self.dirstate.branch())
1583 1568
1584 1569 parents = tuple([p.rev() for p in self[None].parents()])
1585 1570 if len(parents) > 1:
1586 1571 ui.status(_('working directory now based on '
1587 1572 'revisions %d and %d\n') % parents)
1588 1573 else:
1589 1574 ui.status(_('working directory now based on '
1590 1575 'revision %d\n') % parents)
1591 1576 mergemod.mergestate.clean(self, self['.'].node())
1592 1577
1593 1578 # TODO: if we know which new heads may result from this rollback, pass
1594 1579 # them to destroy(), which will prevent the branchhead cache from being
1595 1580 # invalidated.
1596 1581 self.destroyed()
1597 1582 return 0
1598 1583
1599 1584 def _buildcacheupdater(self, newtransaction):
1600 1585 """called during transaction to build the callback updating cache
1601 1586
1602 1587 Lives on the repository to help extension who might want to augment
1603 1588 this logic. For this purpose, the created transaction is passed to the
1604 1589 method.
1605 1590 """
1606 1591 # we must avoid cyclic reference between repo and transaction.
1607 1592 reporef = weakref.ref(self)
1608 1593 def updater(tr):
1609 1594 repo = reporef()
1610 1595 repo.updatecaches(tr)
1611 1596 return updater
1612 1597
1613 1598 @unfilteredmethod
1614 1599 def updatecaches(self, tr=None, full=False):
1615 1600 """warm appropriate caches
1616 1601
1617 1602 If this function is called after a transaction closed. The transaction
1618 1603 will be available in the 'tr' argument. This can be used to selectively
1619 1604 update caches relevant to the changes in that transaction.
1620 1605
1621 1606 If 'full' is set, make sure all caches the function knows about have
1622 1607 up-to-date data. Even the ones usually loaded more lazily.
1623 1608 """
1624 1609 if tr is not None and tr.hookargs.get('source') == 'strip':
1625 1610 # During strip, many caches are invalid but
1626 1611 # later call to `destroyed` will refresh them.
1627 1612 return
1628 1613
1629 1614 if tr is None or tr.changes['origrepolen'] < len(self):
1630 1615 # updating the unfiltered branchmap should refresh all the others,
1631 1616 self.ui.debug('updating the branch cache\n')
1632 1617 branchmap.updatecache(self.filtered('served'))
1633 1618
1634 1619 if full:
1635 1620 rbc = self.revbranchcache()
1636 1621 for r in self.changelog:
1637 1622 rbc.branchinfo(r)
1638 1623 rbc.write()
1639 1624
1640 1625 # ensure the working copy parents are in the manifestfulltextcache
1641 1626 for ctx in self['.'].parents():
1642 1627 ctx.manifest() # accessing the manifest is enough
1643 1628
1644 1629 def invalidatecaches(self):
1645 1630
1646 1631 if '_tagscache' in vars(self):
1647 1632 # can't use delattr on proxy
1648 1633 del self.__dict__['_tagscache']
1649 1634
1650 1635 self.unfiltered()._branchcaches.clear()
1651 1636 self.invalidatevolatilesets()
1652 1637 self._sparsesignaturecache.clear()
1653 1638
1654 1639 def invalidatevolatilesets(self):
1655 1640 self.filteredrevcache.clear()
1656 1641 obsolete.clearobscaches(self)
1657 1642
1658 1643 def invalidatedirstate(self):
1659 1644 '''Invalidates the dirstate, causing the next call to dirstate
1660 1645 to check if it was modified since the last time it was read,
1661 1646 rereading it if it has.
1662 1647
1663 1648 This is different to dirstate.invalidate() that it doesn't always
1664 1649 rereads the dirstate. Use dirstate.invalidate() if you want to
1665 1650 explicitly read the dirstate again (i.e. restoring it to a previous
1666 1651 known good state).'''
1667 1652 if hasunfilteredcache(self, 'dirstate'):
1668 1653 for k in self.dirstate._filecache:
1669 1654 try:
1670 1655 delattr(self.dirstate, k)
1671 1656 except AttributeError:
1672 1657 pass
1673 1658 delattr(self.unfiltered(), 'dirstate')
1674 1659
1675 1660 def invalidate(self, clearfilecache=False):
1676 1661 '''Invalidates both store and non-store parts other than dirstate
1677 1662
1678 1663 If a transaction is running, invalidation of store is omitted,
1679 1664 because discarding in-memory changes might cause inconsistency
1680 1665 (e.g. incomplete fncache causes unintentional failure, but
1681 1666 redundant one doesn't).
1682 1667 '''
1683 1668 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1684 1669 for k in list(self._filecache.keys()):
1685 1670 # dirstate is invalidated separately in invalidatedirstate()
1686 1671 if k == 'dirstate':
1687 1672 continue
1688 1673 if (k == 'changelog' and
1689 1674 self.currenttransaction() and
1690 1675 self.changelog._delayed):
1691 1676 # The changelog object may store unwritten revisions. We don't
1692 1677 # want to lose them.
1693 1678 # TODO: Solve the problem instead of working around it.
1694 1679 continue
1695 1680
1696 1681 if clearfilecache:
1697 1682 del self._filecache[k]
1698 1683 try:
1699 1684 delattr(unfiltered, k)
1700 1685 except AttributeError:
1701 1686 pass
1702 1687 self.invalidatecaches()
1703 1688 if not self.currenttransaction():
1704 1689 # TODO: Changing contents of store outside transaction
1705 1690 # causes inconsistency. We should make in-memory store
1706 1691 # changes detectable, and abort if changed.
1707 1692 self.store.invalidatecaches()
1708 1693
1709 1694 def invalidateall(self):
1710 1695 '''Fully invalidates both store and non-store parts, causing the
1711 1696 subsequent operation to reread any outside changes.'''
1712 1697 # extension should hook this to invalidate its caches
1713 1698 self.invalidate()
1714 1699 self.invalidatedirstate()
1715 1700
1716 1701 @unfilteredmethod
1717 1702 def _refreshfilecachestats(self, tr):
1718 1703 """Reload stats of cached files so that they are flagged as valid"""
1719 1704 for k, ce in self._filecache.items():
1720 1705 k = pycompat.sysstr(k)
1721 1706 if k == r'dirstate' or k not in self.__dict__:
1722 1707 continue
1723 1708 ce.refresh()
1724 1709
1725 1710 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1726 1711 inheritchecker=None, parentenvvar=None):
1727 1712 parentlock = None
1728 1713 # the contents of parentenvvar are used by the underlying lock to
1729 1714 # determine whether it can be inherited
1730 1715 if parentenvvar is not None:
1731 1716 parentlock = encoding.environ.get(parentenvvar)
1732 1717
1733 1718 timeout = 0
1734 1719 warntimeout = 0
1735 1720 if wait:
1736 1721 timeout = self.ui.configint("ui", "timeout")
1737 1722 warntimeout = self.ui.configint("ui", "timeout.warn")
1738 1723 # internal config: ui.signal-safe-lock
1739 1724 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1740 1725
1741 1726 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1742 1727 releasefn=releasefn,
1743 1728 acquirefn=acquirefn, desc=desc,
1744 1729 inheritchecker=inheritchecker,
1745 1730 parentlock=parentlock,
1746 1731 signalsafe=signalsafe)
1747 1732 return l
1748 1733
1749 1734 def _afterlock(self, callback):
1750 1735 """add a callback to be run when the repository is fully unlocked
1751 1736
1752 1737 The callback will be executed when the outermost lock is released
1753 1738 (with wlock being higher level than 'lock')."""
1754 1739 for ref in (self._wlockref, self._lockref):
1755 1740 l = ref and ref()
1756 1741 if l and l.held:
1757 1742 l.postrelease.append(callback)
1758 1743 break
1759 1744 else: # no lock have been found.
1760 1745 callback()
1761 1746
1762 1747 def lock(self, wait=True):
1763 1748 '''Lock the repository store (.hg/store) and return a weak reference
1764 1749 to the lock. Use this before modifying the store (e.g. committing or
1765 1750 stripping). If you are opening a transaction, get a lock as well.)
1766 1751
1767 1752 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1768 1753 'wlock' first to avoid a dead-lock hazard.'''
1769 1754 l = self._currentlock(self._lockref)
1770 1755 if l is not None:
1771 1756 l.lock()
1772 1757 return l
1773 1758
1774 1759 l = self._lock(self.svfs, "lock", wait, None,
1775 1760 self.invalidate, _('repository %s') % self.origroot)
1776 1761 self._lockref = weakref.ref(l)
1777 1762 return l
1778 1763
1779 1764 def _wlockchecktransaction(self):
1780 1765 if self.currenttransaction() is not None:
1781 1766 raise error.LockInheritanceContractViolation(
1782 1767 'wlock cannot be inherited in the middle of a transaction')
1783 1768
1784 1769 def wlock(self, wait=True):
1785 1770 '''Lock the non-store parts of the repository (everything under
1786 1771 .hg except .hg/store) and return a weak reference to the lock.
1787 1772
1788 1773 Use this before modifying files in .hg.
1789 1774
1790 1775 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1791 1776 'wlock' first to avoid a dead-lock hazard.'''
1792 1777 l = self._wlockref and self._wlockref()
1793 1778 if l is not None and l.held:
1794 1779 l.lock()
1795 1780 return l
1796 1781
1797 1782 # We do not need to check for non-waiting lock acquisition. Such
1798 1783 # acquisition would not cause dead-lock as they would just fail.
1799 1784 if wait and (self.ui.configbool('devel', 'all-warnings')
1800 1785 or self.ui.configbool('devel', 'check-locks')):
1801 1786 if self._currentlock(self._lockref) is not None:
1802 1787 self.ui.develwarn('"wlock" acquired after "lock"')
1803 1788
1804 1789 def unlock():
1805 1790 if self.dirstate.pendingparentchange():
1806 1791 self.dirstate.invalidate()
1807 1792 else:
1808 1793 self.dirstate.write(None)
1809 1794
1810 1795 self._filecache['dirstate'].refresh()
1811 1796
1812 1797 l = self._lock(self.vfs, "wlock", wait, unlock,
1813 1798 self.invalidatedirstate, _('working directory of %s') %
1814 1799 self.origroot,
1815 1800 inheritchecker=self._wlockchecktransaction,
1816 1801 parentenvvar='HG_WLOCK_LOCKER')
1817 1802 self._wlockref = weakref.ref(l)
1818 1803 return l
1819 1804
1820 1805 def _currentlock(self, lockref):
1821 1806 """Returns the lock if it's held, or None if it's not."""
1822 1807 if lockref is None:
1823 1808 return None
1824 1809 l = lockref()
1825 1810 if l is None or not l.held:
1826 1811 return None
1827 1812 return l
1828 1813
1829 1814 def currentwlock(self):
1830 1815 """Returns the wlock if it's held, or None if it's not."""
1831 1816 return self._currentlock(self._wlockref)
1832 1817
1833 1818 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1834 1819 """
1835 1820 commit an individual file as part of a larger transaction
1836 1821 """
1837 1822
1838 1823 fname = fctx.path()
1839 1824 fparent1 = manifest1.get(fname, nullid)
1840 1825 fparent2 = manifest2.get(fname, nullid)
1841 1826 if isinstance(fctx, context.filectx):
1842 1827 node = fctx.filenode()
1843 1828 if node in [fparent1, fparent2]:
1844 1829 self.ui.debug('reusing %s filelog entry\n' % fname)
1845 1830 if manifest1.flags(fname) != fctx.flags():
1846 1831 changelist.append(fname)
1847 1832 return node
1848 1833
1849 1834 flog = self.file(fname)
1850 1835 meta = {}
1851 1836 copy = fctx.renamed()
1852 1837 if copy and copy[0] != fname:
1853 1838 # Mark the new revision of this file as a copy of another
1854 1839 # file. This copy data will effectively act as a parent
1855 1840 # of this new revision. If this is a merge, the first
1856 1841 # parent will be the nullid (meaning "look up the copy data")
1857 1842 # and the second one will be the other parent. For example:
1858 1843 #
1859 1844 # 0 --- 1 --- 3 rev1 changes file foo
1860 1845 # \ / rev2 renames foo to bar and changes it
1861 1846 # \- 2 -/ rev3 should have bar with all changes and
1862 1847 # should record that bar descends from
1863 1848 # bar in rev2 and foo in rev1
1864 1849 #
1865 1850 # this allows this merge to succeed:
1866 1851 #
1867 1852 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1868 1853 # \ / merging rev3 and rev4 should use bar@rev2
1869 1854 # \- 2 --- 4 as the merge base
1870 1855 #
1871 1856
1872 1857 cfname = copy[0]
1873 1858 crev = manifest1.get(cfname)
1874 1859 newfparent = fparent2
1875 1860
1876 1861 if manifest2: # branch merge
1877 1862 if fparent2 == nullid or crev is None: # copied on remote side
1878 1863 if cfname in manifest2:
1879 1864 crev = manifest2[cfname]
1880 1865 newfparent = fparent1
1881 1866
1882 1867 # Here, we used to search backwards through history to try to find
1883 1868 # where the file copy came from if the source of a copy was not in
1884 1869 # the parent directory. However, this doesn't actually make sense to
1885 1870 # do (what does a copy from something not in your working copy even
1886 1871 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1887 1872 # the user that copy information was dropped, so if they didn't
1888 1873 # expect this outcome it can be fixed, but this is the correct
1889 1874 # behavior in this circumstance.
1890 1875
1891 1876 if crev:
1892 1877 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1893 1878 meta["copy"] = cfname
1894 1879 meta["copyrev"] = hex(crev)
1895 1880 fparent1, fparent2 = nullid, newfparent
1896 1881 else:
1897 1882 self.ui.warn(_("warning: can't find ancestor for '%s' "
1898 1883 "copied from '%s'!\n") % (fname, cfname))
1899 1884
1900 1885 elif fparent1 == nullid:
1901 1886 fparent1, fparent2 = fparent2, nullid
1902 1887 elif fparent2 != nullid:
1903 1888 # is one parent an ancestor of the other?
1904 1889 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1905 1890 if fparent1 in fparentancestors:
1906 1891 fparent1, fparent2 = fparent2, nullid
1907 1892 elif fparent2 in fparentancestors:
1908 1893 fparent2 = nullid
1909 1894
1910 1895 # is the file changed?
1911 1896 text = fctx.data()
1912 1897 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1913 1898 changelist.append(fname)
1914 1899 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1915 1900 # are just the flags changed during merge?
1916 1901 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1917 1902 changelist.append(fname)
1918 1903
1919 1904 return fparent1
1920 1905
1921 1906 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1922 1907 """check for commit arguments that aren't committable"""
1923 1908 if match.isexact() or match.prefix():
1924 1909 matched = set(status.modified + status.added + status.removed)
1925 1910
1926 1911 for f in match.files():
1927 1912 f = self.dirstate.normalize(f)
1928 1913 if f == '.' or f in matched or f in wctx.substate:
1929 1914 continue
1930 1915 if f in status.deleted:
1931 1916 fail(f, _('file not found!'))
1932 1917 if f in vdirs: # visited directory
1933 1918 d = f + '/'
1934 1919 for mf in matched:
1935 1920 if mf.startswith(d):
1936 1921 break
1937 1922 else:
1938 1923 fail(f, _("no match under directory!"))
1939 1924 elif f not in self.dirstate:
1940 1925 fail(f, _("file not tracked!"))
1941 1926
1942 1927 @unfilteredmethod
1943 1928 def commit(self, text="", user=None, date=None, match=None, force=False,
1944 1929 editor=False, extra=None):
1945 1930 """Add a new revision to current repository.
1946 1931
1947 1932 Revision information is gathered from the working directory,
1948 1933 match can be used to filter the committed files. If editor is
1949 1934 supplied, it is called to get a commit message.
1950 1935 """
1951 1936 if extra is None:
1952 1937 extra = {}
1953 1938
1954 1939 def fail(f, msg):
1955 1940 raise error.Abort('%s: %s' % (f, msg))
1956 1941
1957 1942 if not match:
1958 1943 match = matchmod.always(self.root, '')
1959 1944
1960 1945 if not force:
1961 1946 vdirs = []
1962 1947 match.explicitdir = vdirs.append
1963 1948 match.bad = fail
1964 1949
1965 1950 wlock = lock = tr = None
1966 1951 try:
1967 1952 wlock = self.wlock()
1968 1953 lock = self.lock() # for recent changelog (see issue4368)
1969 1954
1970 1955 wctx = self[None]
1971 1956 merge = len(wctx.parents()) > 1
1972 1957
1973 1958 if not force and merge and not match.always():
1974 1959 raise error.Abort(_('cannot partially commit a merge '
1975 1960 '(do not specify files or patterns)'))
1976 1961
1977 1962 status = self.status(match=match, clean=force)
1978 1963 if force:
1979 1964 status.modified.extend(status.clean) # mq may commit clean files
1980 1965
1981 1966 # check subrepos
1982 1967 subs, commitsubs, newstate = subrepoutil.precommit(
1983 1968 self.ui, wctx, status, match, force=force)
1984 1969
1985 1970 # make sure all explicit patterns are matched
1986 1971 if not force:
1987 1972 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1988 1973
1989 1974 cctx = context.workingcommitctx(self, status,
1990 1975 text, user, date, extra)
1991 1976
1992 1977 # internal config: ui.allowemptycommit
1993 1978 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1994 1979 or extra.get('close') or merge or cctx.files()
1995 1980 or self.ui.configbool('ui', 'allowemptycommit'))
1996 1981 if not allowemptycommit:
1997 1982 return None
1998 1983
1999 1984 if merge and cctx.deleted():
2000 1985 raise error.Abort(_("cannot commit merge with missing files"))
2001 1986
2002 1987 ms = mergemod.mergestate.read(self)
2003 1988 mergeutil.checkunresolved(ms)
2004 1989
2005 1990 if editor:
2006 1991 cctx._text = editor(self, cctx, subs)
2007 1992 edited = (text != cctx._text)
2008 1993
2009 1994 # Save commit message in case this transaction gets rolled back
2010 1995 # (e.g. by a pretxncommit hook). Leave the content alone on
2011 1996 # the assumption that the user will use the same editor again.
2012 1997 msgfn = self.savecommitmessage(cctx._text)
2013 1998
2014 1999 # commit subs and write new state
2015 2000 if subs:
2016 2001 for s in sorted(commitsubs):
2017 2002 sub = wctx.sub(s)
2018 2003 self.ui.status(_('committing subrepository %s\n') %
2019 2004 subrepoutil.subrelpath(sub))
2020 2005 sr = sub.commit(cctx._text, user, date)
2021 2006 newstate[s] = (newstate[s][0], sr)
2022 2007 subrepoutil.writestate(self, newstate)
2023 2008
2024 2009 p1, p2 = self.dirstate.parents()
2025 2010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2026 2011 try:
2027 2012 self.hook("precommit", throw=True, parent1=hookp1,
2028 2013 parent2=hookp2)
2029 2014 tr = self.transaction('commit')
2030 2015 ret = self.commitctx(cctx, True)
2031 2016 except: # re-raises
2032 2017 if edited:
2033 2018 self.ui.write(
2034 2019 _('note: commit message saved in %s\n') % msgfn)
2035 2020 raise
2036 2021 # update bookmarks, dirstate and mergestate
2037 2022 bookmarks.update(self, [p1, p2], ret)
2038 2023 cctx.markcommitted(ret)
2039 2024 ms.reset()
2040 2025 tr.close()
2041 2026
2042 2027 finally:
2043 2028 lockmod.release(tr, lock, wlock)
2044 2029
2045 2030 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2046 2031 # hack for command that use a temporary commit (eg: histedit)
2047 2032 # temporary commit got stripped before hook release
2048 2033 if self.changelog.hasnode(ret):
2049 2034 self.hook("commit", node=node, parent1=parent1,
2050 2035 parent2=parent2)
2051 2036 self._afterlock(commithook)
2052 2037 return ret
2053 2038
2054 2039 @unfilteredmethod
2055 2040 def commitctx(self, ctx, error=False):
2056 2041 """Add a new revision to current repository.
2057 2042 Revision information is passed via the context argument.
2058 2043
2059 2044 ctx.files() should list all files involved in this commit, i.e.
2060 2045 modified/added/removed files. On merge, it may be wider than the
2061 2046 ctx.files() to be committed, since any file nodes derived directly
2062 2047 from p1 or p2 are excluded from the committed ctx.files().
2063 2048 """
2064 2049
2065 2050 tr = None
2066 2051 p1, p2 = ctx.p1(), ctx.p2()
2067 2052 user = ctx.user()
2068 2053
2069 2054 lock = self.lock()
2070 2055 try:
2071 2056 tr = self.transaction("commit")
2072 2057 trp = weakref.proxy(tr)
2073 2058
2074 2059 if ctx.manifestnode():
2075 2060 # reuse an existing manifest revision
2076 2061 self.ui.debug('reusing known manifest\n')
2077 2062 mn = ctx.manifestnode()
2078 2063 files = ctx.files()
2079 2064 elif ctx.files():
2080 2065 m1ctx = p1.manifestctx()
2081 2066 m2ctx = p2.manifestctx()
2082 2067 mctx = m1ctx.copy()
2083 2068
2084 2069 m = mctx.read()
2085 2070 m1 = m1ctx.read()
2086 2071 m2 = m2ctx.read()
2087 2072
2088 2073 # check in files
2089 2074 added = []
2090 2075 changed = []
2091 2076 removed = list(ctx.removed())
2092 2077 linkrev = len(self)
2093 2078 self.ui.note(_("committing files:\n"))
2094 2079 for f in sorted(ctx.modified() + ctx.added()):
2095 2080 self.ui.note(f + "\n")
2096 2081 try:
2097 2082 fctx = ctx[f]
2098 2083 if fctx is None:
2099 2084 removed.append(f)
2100 2085 else:
2101 2086 added.append(f)
2102 2087 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2103 2088 trp, changed)
2104 2089 m.setflag(f, fctx.flags())
2105 2090 except OSError as inst:
2106 2091 self.ui.warn(_("trouble committing %s!\n") % f)
2107 2092 raise
2108 2093 except IOError as inst:
2109 2094 errcode = getattr(inst, 'errno', errno.ENOENT)
2110 2095 if error or errcode and errcode != errno.ENOENT:
2111 2096 self.ui.warn(_("trouble committing %s!\n") % f)
2112 2097 raise
2113 2098
2114 2099 # update manifest
2115 2100 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2116 2101 drop = [f for f in removed if f in m]
2117 2102 for f in drop:
2118 2103 del m[f]
2119 2104 files = changed + removed
2120 2105 md = None
2121 2106 if not files:
2122 2107 # if no "files" actually changed in terms of the changelog,
2123 2108 # try hard to detect unmodified manifest entry so that the
2124 2109 # exact same commit can be reproduced later on convert.
2125 2110 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2126 2111 if not files and md:
2127 2112 self.ui.debug('not reusing manifest (no file change in '
2128 2113 'changelog, but manifest differs)\n')
2129 2114 if files or md:
2130 2115 self.ui.note(_("committing manifest\n"))
2131 2116 mn = mctx.write(trp, linkrev,
2132 2117 p1.manifestnode(), p2.manifestnode(),
2133 2118 added, drop)
2134 2119 else:
2135 2120 self.ui.debug('reusing manifest form p1 (listed files '
2136 2121 'actually unchanged)\n')
2137 2122 mn = p1.manifestnode()
2138 2123 else:
2139 2124 self.ui.debug('reusing manifest from p1 (no file change)\n')
2140 2125 mn = p1.manifestnode()
2141 2126 files = []
2142 2127
2143 2128 # update changelog
2144 2129 self.ui.note(_("committing changelog\n"))
2145 2130 self.changelog.delayupdate(tr)
2146 2131 n = self.changelog.add(mn, files, ctx.description(),
2147 2132 trp, p1.node(), p2.node(),
2148 2133 user, ctx.date(), ctx.extra().copy())
2149 2134 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2150 2135 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2151 2136 parent2=xp2)
2152 2137 # set the new commit is proper phase
2153 2138 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2154 2139 if targetphase:
2155 2140 # retract boundary do not alter parent changeset.
2156 2141 # if a parent have higher the resulting phase will
2157 2142 # be compliant anyway
2158 2143 #
2159 2144 # if minimal phase was 0 we don't need to retract anything
2160 2145 phases.registernew(self, tr, targetphase, [n])
2161 2146 tr.close()
2162 2147 return n
2163 2148 finally:
2164 2149 if tr:
2165 2150 tr.release()
2166 2151 lock.release()
2167 2152
2168 2153 @unfilteredmethod
2169 2154 def destroying(self):
2170 2155 '''Inform the repository that nodes are about to be destroyed.
2171 2156 Intended for use by strip and rollback, so there's a common
2172 2157 place for anything that has to be done before destroying history.
2173 2158
2174 2159 This is mostly useful for saving state that is in memory and waiting
2175 2160 to be flushed when the current lock is released. Because a call to
2176 2161 destroyed is imminent, the repo will be invalidated causing those
2177 2162 changes to stay in memory (waiting for the next unlock), or vanish
2178 2163 completely.
2179 2164 '''
2180 2165 # When using the same lock to commit and strip, the phasecache is left
2181 2166 # dirty after committing. Then when we strip, the repo is invalidated,
2182 2167 # causing those changes to disappear.
2183 2168 if '_phasecache' in vars(self):
2184 2169 self._phasecache.write()
2185 2170
2186 2171 @unfilteredmethod
2187 2172 def destroyed(self):
2188 2173 '''Inform the repository that nodes have been destroyed.
2189 2174 Intended for use by strip and rollback, so there's a common
2190 2175 place for anything that has to be done after destroying history.
2191 2176 '''
2192 2177 # When one tries to:
2193 2178 # 1) destroy nodes thus calling this method (e.g. strip)
2194 2179 # 2) use phasecache somewhere (e.g. commit)
2195 2180 #
2196 2181 # then 2) will fail because the phasecache contains nodes that were
2197 2182 # removed. We can either remove phasecache from the filecache,
2198 2183 # causing it to reload next time it is accessed, or simply filter
2199 2184 # the removed nodes now and write the updated cache.
2200 2185 self._phasecache.filterunknown(self)
2201 2186 self._phasecache.write()
2202 2187
2203 2188 # refresh all repository caches
2204 2189 self.updatecaches()
2205 2190
2206 2191 # Ensure the persistent tag cache is updated. Doing it now
2207 2192 # means that the tag cache only has to worry about destroyed
2208 2193 # heads immediately after a strip/rollback. That in turn
2209 2194 # guarantees that "cachetip == currenttip" (comparing both rev
2210 2195 # and node) always means no nodes have been added or destroyed.
2211 2196
2212 2197 # XXX this is suboptimal when qrefresh'ing: we strip the current
2213 2198 # head, refresh the tag cache, then immediately add a new head.
2214 2199 # But I think doing it this way is necessary for the "instant
2215 2200 # tag cache retrieval" case to work.
2216 2201 self.invalidate()
2217 2202
2218 2203 def status(self, node1='.', node2=None, match=None,
2219 2204 ignored=False, clean=False, unknown=False,
2220 2205 listsubrepos=False):
2221 2206 '''a convenience method that calls node1.status(node2)'''
2222 2207 return self[node1].status(node2, match, ignored, clean, unknown,
2223 2208 listsubrepos)
2224 2209
2225 2210 def addpostdsstatus(self, ps):
2226 2211 """Add a callback to run within the wlock, at the point at which status
2227 2212 fixups happen.
2228 2213
2229 2214 On status completion, callback(wctx, status) will be called with the
2230 2215 wlock held, unless the dirstate has changed from underneath or the wlock
2231 2216 couldn't be grabbed.
2232 2217
2233 2218 Callbacks should not capture and use a cached copy of the dirstate --
2234 2219 it might change in the meanwhile. Instead, they should access the
2235 2220 dirstate via wctx.repo().dirstate.
2236 2221
2237 2222 This list is emptied out after each status run -- extensions should
2238 2223 make sure it adds to this list each time dirstate.status is called.
2239 2224 Extensions should also make sure they don't call this for statuses
2240 2225 that don't involve the dirstate.
2241 2226 """
2242 2227
2243 2228 # The list is located here for uniqueness reasons -- it is actually
2244 2229 # managed by the workingctx, but that isn't unique per-repo.
2245 2230 self._postdsstatus.append(ps)
2246 2231
2247 2232 def postdsstatus(self):
2248 2233 """Used by workingctx to get the list of post-dirstate-status hooks."""
2249 2234 return self._postdsstatus
2250 2235
2251 2236 def clearpostdsstatus(self):
2252 2237 """Used by workingctx to clear post-dirstate-status hooks."""
2253 2238 del self._postdsstatus[:]
2254 2239
2255 2240 def heads(self, start=None):
2256 2241 if start is None:
2257 2242 cl = self.changelog
2258 2243 headrevs = reversed(cl.headrevs())
2259 2244 return [cl.node(rev) for rev in headrevs]
2260 2245
2261 2246 heads = self.changelog.heads(start)
2262 2247 # sort the output in rev descending order
2263 2248 return sorted(heads, key=self.changelog.rev, reverse=True)
2264 2249
2265 2250 def branchheads(self, branch=None, start=None, closed=False):
2266 2251 '''return a (possibly filtered) list of heads for the given branch
2267 2252
2268 2253 Heads are returned in topological order, from newest to oldest.
2269 2254 If branch is None, use the dirstate branch.
2270 2255 If start is not None, return only heads reachable from start.
2271 2256 If closed is True, return heads that are marked as closed as well.
2272 2257 '''
2273 2258 if branch is None:
2274 2259 branch = self[None].branch()
2275 2260 branches = self.branchmap()
2276 2261 if branch not in branches:
2277 2262 return []
2278 2263 # the cache returns heads ordered lowest to highest
2279 2264 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2280 2265 if start is not None:
2281 2266 # filter out the heads that cannot be reached from startrev
2282 2267 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2283 2268 bheads = [h for h in bheads if h in fbheads]
2284 2269 return bheads
2285 2270
2286 2271 def branches(self, nodes):
2287 2272 if not nodes:
2288 2273 nodes = [self.changelog.tip()]
2289 2274 b = []
2290 2275 for n in nodes:
2291 2276 t = n
2292 2277 while True:
2293 2278 p = self.changelog.parents(n)
2294 2279 if p[1] != nullid or p[0] == nullid:
2295 2280 b.append((t, n, p[0], p[1]))
2296 2281 break
2297 2282 n = p[0]
2298 2283 return b
2299 2284
2300 2285 def between(self, pairs):
2301 2286 r = []
2302 2287
2303 2288 for top, bottom in pairs:
2304 2289 n, l, i = top, [], 0
2305 2290 f = 1
2306 2291
2307 2292 while n != bottom and n != nullid:
2308 2293 p = self.changelog.parents(n)[0]
2309 2294 if i == f:
2310 2295 l.append(n)
2311 2296 f = f * 2
2312 2297 n = p
2313 2298 i += 1
2314 2299
2315 2300 r.append(l)
2316 2301
2317 2302 return r
2318 2303
2319 2304 def checkpush(self, pushop):
2320 2305 """Extensions can override this function if additional checks have
2321 2306 to be performed before pushing, or call it if they override push
2322 2307 command.
2323 2308 """
2324 2309
2325 2310 @unfilteredpropertycache
2326 2311 def prepushoutgoinghooks(self):
2327 2312 """Return util.hooks consists of a pushop with repo, remote, outgoing
2328 2313 methods, which are called before pushing changesets.
2329 2314 """
2330 2315 return util.hooks()
2331 2316
2332 2317 def pushkey(self, namespace, key, old, new):
2333 2318 try:
2334 2319 tr = self.currenttransaction()
2335 2320 hookargs = {}
2336 2321 if tr is not None:
2337 2322 hookargs.update(tr.hookargs)
2338 2323 hookargs = pycompat.strkwargs(hookargs)
2339 2324 hookargs[r'namespace'] = namespace
2340 2325 hookargs[r'key'] = key
2341 2326 hookargs[r'old'] = old
2342 2327 hookargs[r'new'] = new
2343 2328 self.hook('prepushkey', throw=True, **hookargs)
2344 2329 except error.HookAbort as exc:
2345 2330 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2346 2331 if exc.hint:
2347 2332 self.ui.write_err(_("(%s)\n") % exc.hint)
2348 2333 return False
2349 2334 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2350 2335 ret = pushkey.push(self, namespace, key, old, new)
2351 2336 def runhook():
2352 2337 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2353 2338 ret=ret)
2354 2339 self._afterlock(runhook)
2355 2340 return ret
2356 2341
2357 2342 def listkeys(self, namespace):
2358 2343 self.hook('prelistkeys', throw=True, namespace=namespace)
2359 2344 self.ui.debug('listing keys for "%s"\n' % namespace)
2360 2345 values = pushkey.list(self, namespace)
2361 2346 self.hook('listkeys', namespace=namespace, values=values)
2362 2347 return values
2363 2348
2364 2349 def debugwireargs(self, one, two, three=None, four=None, five=None):
2365 2350 '''used to test argument passing over the wire'''
2366 2351 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2367 2352 pycompat.bytestr(four),
2368 2353 pycompat.bytestr(five))
2369 2354
2370 2355 def savecommitmessage(self, text):
2371 2356 fp = self.vfs('last-message.txt', 'wb')
2372 2357 try:
2373 2358 fp.write(text)
2374 2359 finally:
2375 2360 fp.close()
2376 2361 return self.pathto(fp.name[len(self.root) + 1:])
2377 2362
2378 2363 # used to avoid circular references so destructors work
2379 2364 def aftertrans(files):
2380 2365 renamefiles = [tuple(t) for t in files]
2381 2366 def a():
2382 2367 for vfs, src, dest in renamefiles:
2383 2368 # if src and dest refer to a same file, vfs.rename is a no-op,
2384 2369 # leaving both src and dest on disk. delete dest to make sure
2385 2370 # the rename couldn't be such a no-op.
2386 2371 vfs.tryunlink(dest)
2387 2372 try:
2388 2373 vfs.rename(src, dest)
2389 2374 except OSError: # journal file does not yet exist
2390 2375 pass
2391 2376 return a
2392 2377
2393 2378 def undoname(fn):
2394 2379 base, name = os.path.split(fn)
2395 2380 assert name.startswith('journal')
2396 2381 return os.path.join(base, name.replace('journal', 'undo', 1))
2397 2382
2398 2383 def instance(ui, path, create, intents=None):
2399 return localrepository(ui, util.urllocalpath(path), create,
2400 intents=intents)
2384 if create:
2385 vfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2386
2387 if vfs.exists('.hg'):
2388 raise error.RepoError(_('repository %s already exists') % path)
2389
2390 createrepository(ui, vfs)
2391
2392 return localrepository(ui, util.urllocalpath(path), intents=intents)
2401 2393
2402 2394 def islocal(path):
2403 2395 return True
2404 2396
2405 2397 def newreporequirements(ui):
2406 2398 """Determine the set of requirements for a new local repository.
2407 2399
2408 2400 Extensions can wrap this function to specify custom requirements for
2409 2401 new repositories.
2410 2402 """
2411 2403 requirements = {'revlogv1'}
2412 2404 if ui.configbool('format', 'usestore'):
2413 2405 requirements.add('store')
2414 2406 if ui.configbool('format', 'usefncache'):
2415 2407 requirements.add('fncache')
2416 2408 if ui.configbool('format', 'dotencode'):
2417 2409 requirements.add('dotencode')
2418 2410
2419 2411 compengine = ui.config('experimental', 'format.compression')
2420 2412 if compengine not in util.compengines:
2421 2413 raise error.Abort(_('compression engine %s defined by '
2422 2414 'experimental.format.compression not available') %
2423 2415 compengine,
2424 2416 hint=_('run "hg debuginstall" to list available '
2425 2417 'compression engines'))
2426 2418
2427 2419 # zlib is the historical default and doesn't need an explicit requirement.
2428 2420 if compengine != 'zlib':
2429 2421 requirements.add('exp-compression-%s' % compengine)
2430 2422
2431 2423 if scmutil.gdinitconfig(ui):
2432 2424 requirements.add('generaldelta')
2433 2425 if ui.configbool('experimental', 'treemanifest'):
2434 2426 requirements.add('treemanifest')
2435 2427 # experimental config: format.sparse-revlog
2436 2428 if ui.configbool('format', 'sparse-revlog'):
2437 2429 requirements.add(SPARSEREVLOG_REQUIREMENT)
2438 2430
2439 2431 revlogv2 = ui.config('experimental', 'revlogv2')
2440 2432 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2441 2433 requirements.remove('revlogv1')
2442 2434 # generaldelta is implied by revlogv2.
2443 2435 requirements.discard('generaldelta')
2444 2436 requirements.add(REVLOGV2_REQUIREMENT)
2445 2437 # experimental config: format.internal-phase
2446 2438 if ui.configbool('format', 'internal-phase'):
2447 2439 requirements.add('internal-phase')
2448 2440
2449 2441 return requirements
2442
2443 def createrepository(ui, wdirvfs):
2444 """Create a new repository in a vfs.
2445
2446 ``wdirvfs`` is a vfs instance pointing at the working directory.
2447 ``requirements`` is a set of requirements for the new repository.
2448 """
2449 requirements = newreporequirements(ui)
2450
2451 if not wdirvfs.exists():
2452 wdirvfs.makedirs()
2453
2454 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2455 hgvfs.makedir(notindexed=True)
2456
2457 if b'store' in requirements:
2458 hgvfs.mkdir(b'store')
2459
2460 # We create an invalid changelog outside the store so very old
2461 # Mercurial versions (which didn't know about the requirements
2462 # file) encounter an error on reading the changelog. This
2463 # effectively locks out old clients and prevents them from
2464 # mucking with a repo in an unknown format.
2465 #
2466 # The revlog header has version 2, which won't be recognized by
2467 # such old clients.
2468 hgvfs.append(b'00changelog.i',
2469 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2470 b'layout')
2471
2472 scmutil.writerequires(hgvfs, requirements)
@@ -1,47 +1,47 b''
1 1 #!/usr/bin/env python
2 2 from __future__ import absolute_import, print_function
3 3
4 4 import sys
5 5
6 6 from mercurial import (
7 7 commands,
8 8 localrepo,
9 9 ui as uimod,
10 10 )
11 11
12 12 print_ = print
13 13 def print(*args, **kwargs):
14 14 """print() wrapper that flushes stdout buffers to avoid py3 buffer issues
15 15
16 16 We could also just write directly to sys.stdout.buffer the way the
17 17 ui object will, but this was easier for porting the test.
18 18 """
19 19 print_(*args, **kwargs)
20 20 sys.stdout.flush()
21 21
22 22 u = uimod.ui.load()
23 23
24 24 print('% creating repo')
25 repo = localrepo.localrepository(u, b'.', create=True)
25 repo = localrepo.instance(u, b'.', create=True)
26 26
27 27 f = open('test.py', 'w')
28 28 try:
29 29 f.write('foo\n')
30 30 finally:
31 31 f.close
32 32
33 33 print('% add and commit')
34 34 commands.add(u, repo, b'test.py')
35 35 commands.commit(u, repo, message=b'*')
36 36 commands.status(u, repo, clean=True)
37 37
38 38
39 39 print('% change')
40 40 f = open('test.py', 'w')
41 41 try:
42 42 f.write('bar\n')
43 43 finally:
44 44 f.close()
45 45
46 46 # this would return clean instead of changed before the fix
47 47 commands.status(u, repo, clean=True, modified=True)
General Comments 0
You need to be logged in to leave comments. Login now