##// END OF EJS Templates
lock: use configint for 'ui.timeout' config...
Boris Feld -
r35208:d210723b default
parent child Browse files
Show More
@@ -1,516 +1,516
1 1 # journal.py
2 2 #
3 3 # Copyright 2014-2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """track previous positions of bookmarks (EXPERIMENTAL)
8 8
9 9 This extension adds a new command: `hg journal`, which shows you where
10 10 bookmarks were previously located.
11 11
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import errno
18 18 import os
19 19 import weakref
20 20
21 21 from mercurial.i18n import _
22 22
23 23 from mercurial import (
24 24 bookmarks,
25 25 cmdutil,
26 26 dispatch,
27 27 error,
28 28 extensions,
29 29 hg,
30 30 localrepo,
31 31 lock,
32 32 node,
33 33 pycompat,
34 34 registrar,
35 35 util,
36 36 )
37 37
38 38 from . import share
39 39
40 40 cmdtable = {}
41 41 command = registrar.command(cmdtable)
42 42
43 43 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
44 44 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
45 45 # be specifying the version(s) of Mercurial they are tested with, or
46 46 # leave the attribute unspecified.
47 47 testedwith = 'ships-with-hg-core'
48 48
49 49 # storage format version; increment when the format changes
50 50 storageversion = 0
51 51
52 52 # namespaces
53 53 bookmarktype = 'bookmark'
54 54 wdirparenttype = 'wdirparent'
55 55 # In a shared repository, what shared feature name is used
56 56 # to indicate this namespace is shared with the source?
57 57 sharednamespaces = {
58 58 bookmarktype: hg.sharedbookmarks,
59 59 }
60 60
61 61 # Journal recording, register hooks and storage object
62 62 def extsetup(ui):
63 63 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
64 64 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
65 65 extensions.wrapfilecache(
66 66 localrepo.localrepository, 'dirstate', wrapdirstate)
67 67 extensions.wrapfunction(hg, 'postshare', wrappostshare)
68 68 extensions.wrapfunction(hg, 'copystore', unsharejournal)
69 69
70 70 def reposetup(ui, repo):
71 71 if repo.local():
72 72 repo.journal = journalstorage(repo)
73 73 repo._wlockfreeprefix.add('namejournal')
74 74
75 75 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
76 76 if cached:
77 77 # already instantiated dirstate isn't yet marked as
78 78 # "journal"-ing, even though repo.dirstate() was already
79 79 # wrapped by own wrapdirstate()
80 80 _setupdirstate(repo, dirstate)
81 81
82 82 def runcommand(orig, lui, repo, cmd, fullargs, *args):
83 83 """Track the command line options for recording in the journal"""
84 84 journalstorage.recordcommand(*fullargs)
85 85 return orig(lui, repo, cmd, fullargs, *args)
86 86
87 87 def _setupdirstate(repo, dirstate):
88 88 dirstate.journalstorage = repo.journal
89 89 dirstate.addparentchangecallback('journal', recorddirstateparents)
90 90
91 91 # hooks to record dirstate changes
92 92 def wrapdirstate(orig, repo):
93 93 """Make journal storage available to the dirstate object"""
94 94 dirstate = orig(repo)
95 95 if util.safehasattr(repo, 'journal'):
96 96 _setupdirstate(repo, dirstate)
97 97 return dirstate
98 98
99 99 def recorddirstateparents(dirstate, old, new):
100 100 """Records all dirstate parent changes in the journal."""
101 101 old = list(old)
102 102 new = list(new)
103 103 if util.safehasattr(dirstate, 'journalstorage'):
104 104 # only record two hashes if there was a merge
105 105 oldhashes = old[:1] if old[1] == node.nullid else old
106 106 newhashes = new[:1] if new[1] == node.nullid else new
107 107 dirstate.journalstorage.record(
108 108 wdirparenttype, '.', oldhashes, newhashes)
109 109
110 110 # hooks to record bookmark changes (both local and remote)
111 111 def recordbookmarks(orig, store, fp):
112 112 """Records all bookmark changes in the journal."""
113 113 repo = store._repo
114 114 if util.safehasattr(repo, 'journal'):
115 115 oldmarks = bookmarks.bmstore(repo)
116 116 for mark, value in store.iteritems():
117 117 oldvalue = oldmarks.get(mark, node.nullid)
118 118 if value != oldvalue:
119 119 repo.journal.record(bookmarktype, mark, oldvalue, value)
120 120 return orig(store, fp)
121 121
122 122 # shared repository support
123 123 def _readsharedfeatures(repo):
124 124 """A set of shared features for this repository"""
125 125 try:
126 126 return set(repo.vfs.read('shared').splitlines())
127 127 except IOError as inst:
128 128 if inst.errno != errno.ENOENT:
129 129 raise
130 130 return set()
131 131
132 132 def _mergeentriesiter(*iterables, **kwargs):
133 133 """Given a set of sorted iterables, yield the next entry in merged order
134 134
135 135 Note that by default entries go from most recent to oldest.
136 136 """
137 137 order = kwargs.pop(r'order', max)
138 138 iterables = [iter(it) for it in iterables]
139 139 # this tracks still active iterables; iterables are deleted as they are
140 140 # exhausted, which is why this is a dictionary and why each entry also
141 141 # stores the key. Entries are mutable so we can store the next value each
142 142 # time.
143 143 iterable_map = {}
144 144 for key, it in enumerate(iterables):
145 145 try:
146 146 iterable_map[key] = [next(it), key, it]
147 147 except StopIteration:
148 148 # empty entry, can be ignored
149 149 pass
150 150
151 151 while iterable_map:
152 152 value, key, it = order(iterable_map.itervalues())
153 153 yield value
154 154 try:
155 155 iterable_map[key][0] = next(it)
156 156 except StopIteration:
157 157 # this iterable is empty, remove it from consideration
158 158 del iterable_map[key]
159 159
160 160 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
161 161 """Mark this shared working copy as sharing journal information"""
162 162 with destrepo.wlock():
163 163 orig(sourcerepo, destrepo, **kwargs)
164 164 with destrepo.vfs('shared', 'a') as fp:
165 165 fp.write('journal\n')
166 166
167 167 def unsharejournal(orig, ui, repo, repopath):
168 168 """Copy shared journal entries into this repo when unsharing"""
169 169 if (repo.path == repopath and repo.shared() and
170 170 util.safehasattr(repo, 'journal')):
171 171 sharedrepo = share._getsrcrepo(repo)
172 172 sharedfeatures = _readsharedfeatures(repo)
173 173 if sharedrepo and sharedfeatures > {'journal'}:
174 174 # there is a shared repository and there are shared journal entries
175 175 # to copy. move shared date over from source to destination but
176 176 # move the local file first
177 177 if repo.vfs.exists('namejournal'):
178 178 journalpath = repo.vfs.join('namejournal')
179 179 util.rename(journalpath, journalpath + '.bak')
180 180 storage = repo.journal
181 181 local = storage._open(
182 182 repo.vfs, filename='namejournal.bak', _newestfirst=False)
183 183 shared = (
184 184 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
185 185 if sharednamespaces.get(e.namespace) in sharedfeatures)
186 186 for entry in _mergeentriesiter(local, shared, order=min):
187 187 storage._write(repo.vfs, entry)
188 188
189 189 return orig(ui, repo, repopath)
190 190
191 191 class journalentry(collections.namedtuple(
192 192 u'journalentry',
193 193 u'timestamp user command namespace name oldhashes newhashes')):
194 194 """Individual journal entry
195 195
196 196 * timestamp: a mercurial (time, timezone) tuple
197 197 * user: the username that ran the command
198 198 * namespace: the entry namespace, an opaque string
199 199 * name: the name of the changed item, opaque string with meaning in the
200 200 namespace
201 201 * command: the hg command that triggered this record
202 202 * oldhashes: a tuple of one or more binary hashes for the old location
203 203 * newhashes: a tuple of one or more binary hashes for the new location
204 204
205 205 Handles serialisation from and to the storage format. Fields are
206 206 separated by newlines, hashes are written out in hex separated by commas,
207 207 timestamp and timezone are separated by a space.
208 208
209 209 """
210 210 @classmethod
211 211 def fromstorage(cls, line):
212 212 (time, user, command, namespace, name,
213 213 oldhashes, newhashes) = line.split('\n')
214 214 timestamp, tz = time.split()
215 215 timestamp, tz = float(timestamp), int(tz)
216 216 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
217 217 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
218 218 return cls(
219 219 (timestamp, tz), user, command, namespace, name,
220 220 oldhashes, newhashes)
221 221
222 222 def __str__(self):
223 223 """String representation for storage"""
224 224 time = ' '.join(map(str, self.timestamp))
225 225 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
226 226 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
227 227 return '\n'.join((
228 228 time, self.user, self.command, self.namespace, self.name,
229 229 oldhashes, newhashes))
230 230
231 231 class journalstorage(object):
232 232 """Storage for journal entries
233 233
234 234 Entries are divided over two files; one with entries that pertain to the
235 235 local working copy *only*, and one with entries that are shared across
236 236 multiple working copies when shared using the share extension.
237 237
238 238 Entries are stored with NUL bytes as separators. See the journalentry
239 239 class for the per-entry structure.
240 240
241 241 The file format starts with an integer version, delimited by a NUL.
242 242
243 243 This storage uses a dedicated lock; this makes it easier to avoid issues
244 244 with adding entries that added when the regular wlock is unlocked (e.g.
245 245 the dirstate).
246 246
247 247 """
248 248 _currentcommand = ()
249 249 _lockref = None
250 250
251 251 def __init__(self, repo):
252 252 self.user = util.getuser()
253 253 self.ui = repo.ui
254 254 self.vfs = repo.vfs
255 255
256 256 # is this working copy using a shared storage?
257 257 self.sharedfeatures = self.sharedvfs = None
258 258 if repo.shared():
259 259 features = _readsharedfeatures(repo)
260 260 sharedrepo = share._getsrcrepo(repo)
261 261 if sharedrepo is not None and 'journal' in features:
262 262 self.sharedvfs = sharedrepo.vfs
263 263 self.sharedfeatures = features
264 264
265 265 # track the current command for recording in journal entries
266 266 @property
267 267 def command(self):
268 268 commandstr = ' '.join(
269 269 map(util.shellquote, journalstorage._currentcommand))
270 270 if '\n' in commandstr:
271 271 # truncate multi-line commands
272 272 commandstr = commandstr.partition('\n')[0] + ' ...'
273 273 return commandstr
274 274
275 275 @classmethod
276 276 def recordcommand(cls, *fullargs):
277 277 """Set the current hg arguments, stored with recorded entries"""
278 278 # Set the current command on the class because we may have started
279 279 # with a non-local repo (cloning for example).
280 280 cls._currentcommand = fullargs
281 281
282 282 def _currentlock(self, lockref):
283 283 """Returns the lock if it's held, or None if it's not.
284 284
285 285 (This is copied from the localrepo class)
286 286 """
287 287 if lockref is None:
288 288 return None
289 289 l = lockref()
290 290 if l is None or not l.held:
291 291 return None
292 292 return l
293 293
294 294 def jlock(self, vfs):
295 295 """Create a lock for the journal file"""
296 296 if self._currentlock(self._lockref) is not None:
297 297 raise error.Abort(_('journal lock does not support nesting'))
298 298 desc = _('journal of %s') % vfs.base
299 299 try:
300 300 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
301 301 except error.LockHeld as inst:
302 302 self.ui.warn(
303 303 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
304 304 # default to 600 seconds timeout
305 305 l = lock.lock(
306 306 vfs, 'namejournal.lock',
307 int(self.ui.config("ui", "timeout")), desc=desc)
307 self.ui.configint("ui", "timeout"), desc=desc)
308 308 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
309 309 self._lockref = weakref.ref(l)
310 310 return l
311 311
312 312 def record(self, namespace, name, oldhashes, newhashes):
313 313 """Record a new journal entry
314 314
315 315 * namespace: an opaque string; this can be used to filter on the type
316 316 of recorded entries.
317 317 * name: the name defining this entry; for bookmarks, this is the
318 318 bookmark name. Can be filtered on when retrieving entries.
319 319 * oldhashes and newhashes: each a single binary hash, or a list of
320 320 binary hashes. These represent the old and new position of the named
321 321 item.
322 322
323 323 """
324 324 if not isinstance(oldhashes, list):
325 325 oldhashes = [oldhashes]
326 326 if not isinstance(newhashes, list):
327 327 newhashes = [newhashes]
328 328
329 329 entry = journalentry(
330 330 util.makedate(), self.user, self.command, namespace, name,
331 331 oldhashes, newhashes)
332 332
333 333 vfs = self.vfs
334 334 if self.sharedvfs is not None:
335 335 # write to the shared repository if this feature is being
336 336 # shared between working copies.
337 337 if sharednamespaces.get(namespace) in self.sharedfeatures:
338 338 vfs = self.sharedvfs
339 339
340 340 self._write(vfs, entry)
341 341
342 342 def _write(self, vfs, entry):
343 343 with self.jlock(vfs):
344 344 version = None
345 345 # open file in amend mode to ensure it is created if missing
346 346 with vfs('namejournal', mode='a+b') as f:
347 347 f.seek(0, os.SEEK_SET)
348 348 # Read just enough bytes to get a version number (up to 2
349 349 # digits plus separator)
350 350 version = f.read(3).partition('\0')[0]
351 351 if version and version != str(storageversion):
352 352 # different version of the storage. Exit early (and not
353 353 # write anything) if this is not a version we can handle or
354 354 # the file is corrupt. In future, perhaps rotate the file
355 355 # instead?
356 356 self.ui.warn(
357 357 _("unsupported journal file version '%s'\n") % version)
358 358 return
359 359 if not version:
360 360 # empty file, write version first
361 361 f.write(str(storageversion) + '\0')
362 362 f.seek(0, os.SEEK_END)
363 363 f.write(str(entry) + '\0')
364 364
365 365 def filtered(self, namespace=None, name=None):
366 366 """Yield all journal entries with the given namespace or name
367 367
368 368 Both the namespace and the name are optional; if neither is given all
369 369 entries in the journal are produced.
370 370
371 371 Matching supports regular expressions by using the `re:` prefix
372 372 (use `literal:` to match names or namespaces that start with `re:`)
373 373
374 374 """
375 375 if namespace is not None:
376 376 namespace = util.stringmatcher(namespace)[-1]
377 377 if name is not None:
378 378 name = util.stringmatcher(name)[-1]
379 379 for entry in self:
380 380 if namespace is not None and not namespace(entry.namespace):
381 381 continue
382 382 if name is not None and not name(entry.name):
383 383 continue
384 384 yield entry
385 385
386 386 def __iter__(self):
387 387 """Iterate over the storage
388 388
389 389 Yields journalentry instances for each contained journal record.
390 390
391 391 """
392 392 local = self._open(self.vfs)
393 393
394 394 if self.sharedvfs is None:
395 395 return local
396 396
397 397 # iterate over both local and shared entries, but only those
398 398 # shared entries that are among the currently shared features
399 399 shared = (
400 400 e for e in self._open(self.sharedvfs)
401 401 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
402 402 return _mergeentriesiter(local, shared)
403 403
404 404 def _open(self, vfs, filename='namejournal', _newestfirst=True):
405 405 if not vfs.exists(filename):
406 406 return
407 407
408 408 with vfs(filename) as f:
409 409 raw = f.read()
410 410
411 411 lines = raw.split('\0')
412 412 version = lines and lines[0]
413 413 if version != str(storageversion):
414 414 version = version or _('not available')
415 415 raise error.Abort(_("unknown journal file version '%s'") % version)
416 416
417 417 # Skip the first line, it's a version number. Normally we iterate over
418 418 # these in reverse order to list newest first; only when copying across
419 419 # a shared storage do we forgo reversing.
420 420 lines = lines[1:]
421 421 if _newestfirst:
422 422 lines = reversed(lines)
423 423 for line in lines:
424 424 if not line:
425 425 continue
426 426 yield journalentry.fromstorage(line)
427 427
428 428 # journal reading
429 429 # log options that don't make sense for journal
430 430 _ignoreopts = ('no-merges', 'graph')
431 431 @command(
432 432 'journal', [
433 433 ('', 'all', None, 'show history for all names'),
434 434 ('c', 'commits', None, 'show commit metadata'),
435 435 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
436 436 '[OPTION]... [BOOKMARKNAME]')
437 437 def journal(ui, repo, *args, **opts):
438 438 """show the previous position of bookmarks and the working copy
439 439
440 440 The journal is used to see the previous commits that bookmarks and the
441 441 working copy pointed to. By default the previous locations for the working
442 442 copy. Passing a bookmark name will show all the previous positions of
443 443 that bookmark. Use the --all switch to show previous locations for all
444 444 bookmarks and the working copy; each line will then include the bookmark
445 445 name, or '.' for the working copy, as well.
446 446
447 447 If `name` starts with `re:`, the remainder of the name is treated as
448 448 a regular expression. To match a name that actually starts with `re:`,
449 449 use the prefix `literal:`.
450 450
451 451 By default hg journal only shows the commit hash and the command that was
452 452 running at that time. -v/--verbose will show the prior hash, the user, and
453 453 the time at which it happened.
454 454
455 455 Use -c/--commits to output log information on each commit hash; at this
456 456 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
457 457 switches to alter the log output for these.
458 458
459 459 `hg journal -T json` can be used to produce machine readable output.
460 460
461 461 """
462 462 opts = pycompat.byteskwargs(opts)
463 463 name = '.'
464 464 if opts.get('all'):
465 465 if args:
466 466 raise error.Abort(
467 467 _("You can't combine --all and filtering on a name"))
468 468 name = None
469 469 if args:
470 470 name = args[0]
471 471
472 472 fm = ui.formatter('journal', opts)
473 473
474 474 if opts.get("template") != "json":
475 475 if name is None:
476 476 displayname = _('the working copy and bookmarks')
477 477 else:
478 478 displayname = "'%s'" % name
479 479 ui.status(_("previous locations of %s:\n") % displayname)
480 480
481 481 limit = cmdutil.loglimit(opts)
482 482 entry = None
483 483 for count, entry in enumerate(repo.journal.filtered(name=name)):
484 484 if count == limit:
485 485 break
486 486 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
487 487 name='node', sep=',')
488 488 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
489 489 name='node', sep=',')
490 490
491 491 fm.startitem()
492 492 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
493 493 fm.write('newhashes', '%s', newhashesstr)
494 494 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
495 495 fm.condwrite(
496 496 opts.get('all') or name.startswith('re:'),
497 497 'name', ' %-8s', entry.name)
498 498
499 499 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
500 500 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
501 501 fm.write('command', ' %s\n', entry.command)
502 502
503 503 if opts.get("commits"):
504 504 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
505 505 for hash in entry.newhashes:
506 506 try:
507 507 ctx = repo[hash]
508 508 displayer.show(ctx)
509 509 except error.RepoLookupError as e:
510 510 fm.write('repolookuperror', "%s\n\n", str(e))
511 511 displayer.close()
512 512
513 513 fm.end()
514 514
515 515 if entry is None:
516 516 ui.status(_("no recorded locations\n"))
@@ -1,2301 +1,2301
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepo,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67
68 68 release = lockmod.release
69 69 urlerr = util.urlerr
70 70 urlreq = util.urlreq
71 71
72 72 # set of (path, vfs-location) tuples. vfs-location is:
73 73 # - 'plain for vfs relative paths
74 74 # - '' for svfs relative paths
75 75 _cachedfiles = set()
76 76
77 77 class _basefilecache(scmutil.filecache):
78 78 """All filecache usage on repo are done for logic that should be unfiltered
79 79 """
80 80 def __get__(self, repo, type=None):
81 81 if repo is None:
82 82 return self
83 83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 84 def __set__(self, repo, value):
85 85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 86 def __delete__(self, repo):
87 87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88 88
89 89 class repofilecache(_basefilecache):
90 90 """filecache for files in .hg but outside of .hg/store"""
91 91 def __init__(self, *paths):
92 92 super(repofilecache, self).__init__(*paths)
93 93 for path in paths:
94 94 _cachedfiles.add((path, 'plain'))
95 95
96 96 def join(self, obj, fname):
97 97 return obj.vfs.join(fname)
98 98
99 99 class storecache(_basefilecache):
100 100 """filecache for files in the store"""
101 101 def __init__(self, *paths):
102 102 super(storecache, self).__init__(*paths)
103 103 for path in paths:
104 104 _cachedfiles.add((path, ''))
105 105
106 106 def join(self, obj, fname):
107 107 return obj.sjoin(fname)
108 108
109 109 def isfilecached(repo, name):
110 110 """check if a repo has already cached "name" filecache-ed property
111 111
112 112 This returns (cachedobj-or-None, iscached) tuple.
113 113 """
114 114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 115 if not cacheentry:
116 116 return None, False
117 117 return cacheentry.obj, True
118 118
119 119 class unfilteredpropertycache(util.propertycache):
120 120 """propertycache that apply to unfiltered repo only"""
121 121
122 122 def __get__(self, repo, type=None):
123 123 unfi = repo.unfiltered()
124 124 if unfi is repo:
125 125 return super(unfilteredpropertycache, self).__get__(unfi)
126 126 return getattr(unfi, self.name)
127 127
128 128 class filteredpropertycache(util.propertycache):
129 129 """propertycache that must take filtering in account"""
130 130
131 131 def cachevalue(self, obj, value):
132 132 object.__setattr__(obj, self.name, value)
133 133
134 134
135 135 def hasunfilteredcache(repo, name):
136 136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 137 return name in vars(repo.unfiltered())
138 138
139 139 def unfilteredmethod(orig):
140 140 """decorate method that always need to be run on unfiltered version"""
141 141 def wrapper(repo, *args, **kwargs):
142 142 return orig(repo.unfiltered(), *args, **kwargs)
143 143 return wrapper
144 144
145 145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 146 'unbundle'}
147 147 legacycaps = moderncaps.union({'changegroupsubset'})
148 148
149 149 class localpeer(repository.peer):
150 150 '''peer for a local repo; reflects only the most recent API'''
151 151
152 152 def __init__(self, repo, caps=None):
153 153 super(localpeer, self).__init__()
154 154
155 155 if caps is None:
156 156 caps = moderncaps.copy()
157 157 self._repo = repo.filtered('served')
158 158 self._ui = repo.ui
159 159 self._caps = repo._restrictcapabilities(caps)
160 160
161 161 # Begin of _basepeer interface.
162 162
163 163 @util.propertycache
164 164 def ui(self):
165 165 return self._ui
166 166
167 167 def url(self):
168 168 return self._repo.url()
169 169
170 170 def local(self):
171 171 return self._repo
172 172
173 173 def peer(self):
174 174 return self
175 175
176 176 def canpush(self):
177 177 return True
178 178
179 179 def close(self):
180 180 self._repo.close()
181 181
182 182 # End of _basepeer interface.
183 183
184 184 # Begin of _basewirecommands interface.
185 185
186 186 def branchmap(self):
187 187 return self._repo.branchmap()
188 188
189 189 def capabilities(self):
190 190 return self._caps
191 191
192 192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 193 """Used to test argument passing over the wire"""
194 194 return "%s %s %s %s %s" % (one, two, three, four, five)
195 195
196 196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 197 **kwargs):
198 198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 199 common=common, bundlecaps=bundlecaps,
200 200 **kwargs)
201 201 cb = util.chunkbuffer(chunks)
202 202
203 203 if exchange.bundle2requested(bundlecaps):
204 204 # When requesting a bundle2, getbundle returns a stream to make the
205 205 # wire level function happier. We need to build a proper object
206 206 # from it in local peer.
207 207 return bundle2.getunbundler(self.ui, cb)
208 208 else:
209 209 return changegroup.getunbundler('01', cb, None)
210 210
211 211 def heads(self):
212 212 return self._repo.heads()
213 213
214 214 def known(self, nodes):
215 215 return self._repo.known(nodes)
216 216
217 217 def listkeys(self, namespace):
218 218 return self._repo.listkeys(namespace)
219 219
220 220 def lookup(self, key):
221 221 return self._repo.lookup(key)
222 222
223 223 def pushkey(self, namespace, key, old, new):
224 224 return self._repo.pushkey(namespace, key, old, new)
225 225
226 226 def stream_out(self):
227 227 raise error.Abort(_('cannot perform stream clone against local '
228 228 'peer'))
229 229
230 230 def unbundle(self, cg, heads, url):
231 231 """apply a bundle on a repo
232 232
233 233 This function handles the repo locking itself."""
234 234 try:
235 235 try:
236 236 cg = exchange.readbundle(self.ui, cg, None)
237 237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 238 if util.safehasattr(ret, 'getchunks'):
239 239 # This is a bundle20 object, turn it into an unbundler.
240 240 # This little dance should be dropped eventually when the
241 241 # API is finally improved.
242 242 stream = util.chunkbuffer(ret.getchunks())
243 243 ret = bundle2.getunbundler(self.ui, stream)
244 244 return ret
245 245 except Exception as exc:
246 246 # If the exception contains output salvaged from a bundle2
247 247 # reply, we need to make sure it is printed before continuing
248 248 # to fail. So we build a bundle2 with such output and consume
249 249 # it directly.
250 250 #
251 251 # This is not very elegant but allows a "simple" solution for
252 252 # issue4594
253 253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 254 if output:
255 255 bundler = bundle2.bundle20(self._repo.ui)
256 256 for out in output:
257 257 bundler.addpart(out)
258 258 stream = util.chunkbuffer(bundler.getchunks())
259 259 b = bundle2.getunbundler(self.ui, stream)
260 260 bundle2.processbundle(self._repo, b)
261 261 raise
262 262 except error.PushRaced as exc:
263 263 raise error.ResponseError(_('push failed:'), str(exc))
264 264
265 265 # End of _basewirecommands interface.
266 266
267 267 # Begin of peer interface.
268 268
269 269 def iterbatch(self):
270 270 return peer.localiterbatcher(self)
271 271
272 272 # End of peer interface.
273 273
274 274 class locallegacypeer(repository.legacypeer, localpeer):
275 275 '''peer extension which implements legacy methods too; used for tests with
276 276 restricted capabilities'''
277 277
278 278 def __init__(self, repo):
279 279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280 280
281 281 # Begin of baselegacywirecommands interface.
282 282
283 283 def between(self, pairs):
284 284 return self._repo.between(pairs)
285 285
286 286 def branches(self, nodes):
287 287 return self._repo.branches(nodes)
288 288
289 289 def changegroup(self, basenodes, source):
290 290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 291 missingheads=self._repo.heads())
292 292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293 293
294 294 def changegroupsubset(self, bases, heads, source):
295 295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 296 missingheads=heads)
297 297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298 298
299 299 # End of baselegacywirecommands interface.
300 300
301 301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 302 # clients.
303 303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304 304
305 305 class localrepository(object):
306 306
307 307 supportedformats = {
308 308 'revlogv1',
309 309 'generaldelta',
310 310 'treemanifest',
311 311 'manifestv2',
312 312 REVLOGV2_REQUIREMENT,
313 313 }
314 314 _basesupported = supportedformats | {
315 315 'store',
316 316 'fncache',
317 317 'shared',
318 318 'relshared',
319 319 'dotencode',
320 320 'exp-sparse',
321 321 }
322 322 openerreqs = {
323 323 'revlogv1',
324 324 'generaldelta',
325 325 'treemanifest',
326 326 'manifestv2',
327 327 }
328 328
329 329 # a list of (ui, featureset) functions.
330 330 # only functions defined in module of enabled extensions are invoked
331 331 featuresetupfuncs = set()
332 332
333 333 # list of prefix for file which can be written without 'wlock'
334 334 # Extensions should extend this list when needed
335 335 _wlockfreeprefix = {
336 336 # We migh consider requiring 'wlock' for the next
337 337 # two, but pretty much all the existing code assume
338 338 # wlock is not needed so we keep them excluded for
339 339 # now.
340 340 'hgrc',
341 341 'requires',
342 342 # XXX cache is a complicatged business someone
343 343 # should investigate this in depth at some point
344 344 'cache/',
345 345 # XXX shouldn't be dirstate covered by the wlock?
346 346 'dirstate',
347 347 # XXX bisect was still a bit too messy at the time
348 348 # this changeset was introduced. Someone should fix
349 349 # the remainig bit and drop this line
350 350 'bisect.state',
351 351 }
352 352
353 353 def __init__(self, baseui, path, create=False):
354 354 self.requirements = set()
355 355 self.filtername = None
356 356 # wvfs: rooted at the repository root, used to access the working copy
357 357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 359 self.vfs = None
360 360 # svfs: usually rooted at .hg/store, used to access repository history
361 361 # If this is a shared repository, this vfs may point to another
362 362 # repository's .hg/store directory.
363 363 self.svfs = None
364 364 self.root = self.wvfs.base
365 365 self.path = self.wvfs.join(".hg")
366 366 self.origroot = path
367 367 # This is only used by context.workingctx.match in order to
368 368 # detect files in subrepos.
369 369 self.auditor = pathutil.pathauditor(
370 370 self.root, callback=self._checknested)
371 371 # This is only used by context.basectx.match in order to detect
372 372 # files in subrepos.
373 373 self.nofsauditor = pathutil.pathauditor(
374 374 self.root, callback=self._checknested, realfs=False, cached=True)
375 375 self.baseui = baseui
376 376 self.ui = baseui.copy()
377 377 self.ui.copy = baseui.copy # prevent copying repo configuration
378 378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
379 379 if (self.ui.configbool('devel', 'all-warnings') or
380 380 self.ui.configbool('devel', 'check-locks')):
381 381 self.vfs.audit = self._getvfsward(self.vfs.audit)
382 382 # A list of callback to shape the phase if no data were found.
383 383 # Callback are in the form: func(repo, roots) --> processed root.
384 384 # This list it to be filled by extension during repo setup
385 385 self._phasedefaults = []
386 386 try:
387 387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
388 388 self._loadextensions()
389 389 except IOError:
390 390 pass
391 391
392 392 if self.featuresetupfuncs:
393 393 self.supported = set(self._basesupported) # use private copy
394 394 extmods = set(m.__name__ for n, m
395 395 in extensions.extensions(self.ui))
396 396 for setupfunc in self.featuresetupfuncs:
397 397 if setupfunc.__module__ in extmods:
398 398 setupfunc(self.ui, self.supported)
399 399 else:
400 400 self.supported = self._basesupported
401 401 color.setup(self.ui)
402 402
403 403 # Add compression engines.
404 404 for name in util.compengines:
405 405 engine = util.compengines[name]
406 406 if engine.revlogheader():
407 407 self.supported.add('exp-compression-%s' % name)
408 408
409 409 if not self.vfs.isdir():
410 410 if create:
411 411 self.requirements = newreporequirements(self)
412 412
413 413 if not self.wvfs.exists():
414 414 self.wvfs.makedirs()
415 415 self.vfs.makedir(notindexed=True)
416 416
417 417 if 'store' in self.requirements:
418 418 self.vfs.mkdir("store")
419 419
420 420 # create an invalid changelog
421 421 self.vfs.append(
422 422 "00changelog.i",
423 423 '\0\0\0\2' # represents revlogv2
424 424 ' dummy changelog to prevent using the old repo layout'
425 425 )
426 426 else:
427 427 raise error.RepoError(_("repository %s not found") % path)
428 428 elif create:
429 429 raise error.RepoError(_("repository %s already exists") % path)
430 430 else:
431 431 try:
432 432 self.requirements = scmutil.readrequires(
433 433 self.vfs, self.supported)
434 434 except IOError as inst:
435 435 if inst.errno != errno.ENOENT:
436 436 raise
437 437
438 438 cachepath = self.vfs.join('cache')
439 439 self.sharedpath = self.path
440 440 try:
441 441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
442 442 if 'relshared' in self.requirements:
443 443 sharedpath = self.vfs.join(sharedpath)
444 444 vfs = vfsmod.vfs(sharedpath, realpath=True)
445 445 cachepath = vfs.join('cache')
446 446 s = vfs.base
447 447 if not vfs.exists():
448 448 raise error.RepoError(
449 449 _('.hg/sharedpath points to nonexistent directory %s') % s)
450 450 self.sharedpath = s
451 451 except IOError as inst:
452 452 if inst.errno != errno.ENOENT:
453 453 raise
454 454
455 455 if 'exp-sparse' in self.requirements and not sparse.enabled:
456 456 raise error.RepoError(_('repository is using sparse feature but '
457 457 'sparse is not enabled; enable the '
458 458 '"sparse" extensions to access'))
459 459
460 460 self.store = store.store(
461 461 self.requirements, self.sharedpath,
462 462 lambda base: vfsmod.vfs(base, cacheaudited=True))
463 463 self.spath = self.store.path
464 464 self.svfs = self.store.vfs
465 465 self.sjoin = self.store.join
466 466 self.vfs.createmode = self.store.createmode
467 467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
468 468 self.cachevfs.createmode = self.store.createmode
469 469 if (self.ui.configbool('devel', 'all-warnings') or
470 470 self.ui.configbool('devel', 'check-locks')):
471 471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
472 472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
473 473 else: # standard vfs
474 474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
475 475 self._applyopenerreqs()
476 476 if create:
477 477 self._writerequirements()
478 478
479 479 self._dirstatevalidatewarned = False
480 480
481 481 self._branchcaches = {}
482 482 self._revbranchcache = None
483 483 self.filterpats = {}
484 484 self._datafilters = {}
485 485 self._transref = self._lockref = self._wlockref = None
486 486
487 487 # A cache for various files under .hg/ that tracks file changes,
488 488 # (used by the filecache decorator)
489 489 #
490 490 # Maps a property name to its util.filecacheentry
491 491 self._filecache = {}
492 492
493 493 # hold sets of revision to be filtered
494 494 # should be cleared when something might have changed the filter value:
495 495 # - new changesets,
496 496 # - phase change,
497 497 # - new obsolescence marker,
498 498 # - working directory parent change,
499 499 # - bookmark changes
500 500 self.filteredrevcache = {}
501 501
502 502 # post-dirstate-status hooks
503 503 self._postdsstatus = []
504 504
505 505 # Cache of types representing filtered repos.
506 506 self._filteredrepotypes = weakref.WeakKeyDictionary()
507 507
508 508 # generic mapping between names and nodes
509 509 self.names = namespaces.namespaces()
510 510
511 511 # Key to signature value.
512 512 self._sparsesignaturecache = {}
513 513 # Signature to cached matcher instance.
514 514 self._sparsematchercache = {}
515 515
516 516 def _getvfsward(self, origfunc):
517 517 """build a ward for self.vfs"""
518 518 rref = weakref.ref(self)
519 519 def checkvfs(path, mode=None):
520 520 ret = origfunc(path, mode=mode)
521 521 repo = rref()
522 522 if (repo is None
523 523 or not util.safehasattr(repo, '_wlockref')
524 524 or not util.safehasattr(repo, '_lockref')):
525 525 return
526 526 if mode in (None, 'r', 'rb'):
527 527 return
528 528 if path.startswith(repo.path):
529 529 # truncate name relative to the repository (.hg)
530 530 path = path[len(repo.path) + 1:]
531 531 if path.startswith('cache/'):
532 532 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
533 533 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
534 534 if path.startswith('journal.'):
535 535 # journal is covered by 'lock'
536 536 if repo._currentlock(repo._lockref) is None:
537 537 repo.ui.develwarn('write with no lock: "%s"' % path,
538 538 stacklevel=2, config='check-locks')
539 539 elif repo._currentlock(repo._wlockref) is None:
540 540 # rest of vfs files are covered by 'wlock'
541 541 #
542 542 # exclude special files
543 543 for prefix in self._wlockfreeprefix:
544 544 if path.startswith(prefix):
545 545 return
546 546 repo.ui.develwarn('write with no wlock: "%s"' % path,
547 547 stacklevel=2, config='check-locks')
548 548 return ret
549 549 return checkvfs
550 550
551 551 def _getsvfsward(self, origfunc):
552 552 """build a ward for self.svfs"""
553 553 rref = weakref.ref(self)
554 554 def checksvfs(path, mode=None):
555 555 ret = origfunc(path, mode=mode)
556 556 repo = rref()
557 557 if repo is None or not util.safehasattr(repo, '_lockref'):
558 558 return
559 559 if mode in (None, 'r', 'rb'):
560 560 return
561 561 if path.startswith(repo.sharedpath):
562 562 # truncate name relative to the repository (.hg)
563 563 path = path[len(repo.sharedpath) + 1:]
564 564 if repo._currentlock(repo._lockref) is None:
565 565 repo.ui.develwarn('write with no lock: "%s"' % path,
566 566 stacklevel=3)
567 567 return ret
568 568 return checksvfs
569 569
570 570 def close(self):
571 571 self._writecaches()
572 572
573 573 def _loadextensions(self):
574 574 extensions.loadall(self.ui)
575 575
576 576 def _writecaches(self):
577 577 if self._revbranchcache:
578 578 self._revbranchcache.write()
579 579
580 580 def _restrictcapabilities(self, caps):
581 581 if self.ui.configbool('experimental', 'bundle2-advertise'):
582 582 caps = set(caps)
583 583 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
584 584 caps.add('bundle2=' + urlreq.quote(capsblob))
585 585 return caps
586 586
587 587 def _applyopenerreqs(self):
588 588 self.svfs.options = dict((r, 1) for r in self.requirements
589 589 if r in self.openerreqs)
590 590 # experimental config: format.chunkcachesize
591 591 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
592 592 if chunkcachesize is not None:
593 593 self.svfs.options['chunkcachesize'] = chunkcachesize
594 594 # experimental config: format.maxchainlen
595 595 maxchainlen = self.ui.configint('format', 'maxchainlen')
596 596 if maxchainlen is not None:
597 597 self.svfs.options['maxchainlen'] = maxchainlen
598 598 # experimental config: format.manifestcachesize
599 599 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
600 600 if manifestcachesize is not None:
601 601 self.svfs.options['manifestcachesize'] = manifestcachesize
602 602 # experimental config: format.aggressivemergedeltas
603 603 aggressivemergedeltas = self.ui.configbool('format',
604 604 'aggressivemergedeltas')
605 605 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
606 606 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
607 607 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
608 608 if 0 <= chainspan:
609 609 self.svfs.options['maxdeltachainspan'] = chainspan
610 610 mmapindexthreshold = self.ui.configbytes('experimental',
611 611 'mmapindexthreshold')
612 612 if mmapindexthreshold is not None:
613 613 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
614 614 withsparseread = self.ui.configbool('experimental', 'sparse-read')
615 615 srdensitythres = float(self.ui.config('experimental',
616 616 'sparse-read.density-threshold'))
617 617 srmingapsize = self.ui.configbytes('experimental',
618 618 'sparse-read.min-gap-size')
619 619 self.svfs.options['with-sparse-read'] = withsparseread
620 620 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
621 621 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
622 622
623 623 for r in self.requirements:
624 624 if r.startswith('exp-compression-'):
625 625 self.svfs.options['compengine'] = r[len('exp-compression-'):]
626 626
627 627 # TODO move "revlogv2" to openerreqs once finalized.
628 628 if REVLOGV2_REQUIREMENT in self.requirements:
629 629 self.svfs.options['revlogv2'] = True
630 630
631 631 def _writerequirements(self):
632 632 scmutil.writerequires(self.vfs, self.requirements)
633 633
634 634 def _checknested(self, path):
635 635 """Determine if path is a legal nested repository."""
636 636 if not path.startswith(self.root):
637 637 return False
638 638 subpath = path[len(self.root) + 1:]
639 639 normsubpath = util.pconvert(subpath)
640 640
641 641 # XXX: Checking against the current working copy is wrong in
642 642 # the sense that it can reject things like
643 643 #
644 644 # $ hg cat -r 10 sub/x.txt
645 645 #
646 646 # if sub/ is no longer a subrepository in the working copy
647 647 # parent revision.
648 648 #
649 649 # However, it can of course also allow things that would have
650 650 # been rejected before, such as the above cat command if sub/
651 651 # is a subrepository now, but was a normal directory before.
652 652 # The old path auditor would have rejected by mistake since it
653 653 # panics when it sees sub/.hg/.
654 654 #
655 655 # All in all, checking against the working copy seems sensible
656 656 # since we want to prevent access to nested repositories on
657 657 # the filesystem *now*.
658 658 ctx = self[None]
659 659 parts = util.splitpath(subpath)
660 660 while parts:
661 661 prefix = '/'.join(parts)
662 662 if prefix in ctx.substate:
663 663 if prefix == normsubpath:
664 664 return True
665 665 else:
666 666 sub = ctx.sub(prefix)
667 667 return sub.checknested(subpath[len(prefix) + 1:])
668 668 else:
669 669 parts.pop()
670 670 return False
671 671
672 672 def peer(self):
673 673 return localpeer(self) # not cached to avoid reference cycle
674 674
675 675 def unfiltered(self):
676 676 """Return unfiltered version of the repository
677 677
678 678 Intended to be overwritten by filtered repo."""
679 679 return self
680 680
681 681 def filtered(self, name):
682 682 """Return a filtered version of a repository"""
683 683 # Python <3.4 easily leaks types via __mro__. See
684 684 # https://bugs.python.org/issue17950. We cache dynamically
685 685 # created types so this method doesn't leak on every
686 686 # invocation.
687 687
688 688 key = self.unfiltered().__class__
689 689 if key not in self._filteredrepotypes:
690 690 # Build a new type with the repoview mixin and the base
691 691 # class of this repo. Give it a name containing the
692 692 # filter name to aid debugging.
693 693 bases = (repoview.repoview, key)
694 694 cls = type(r'%sfilteredrepo' % name, bases, {})
695 695 self._filteredrepotypes[key] = cls
696 696
697 697 return self._filteredrepotypes[key](self, name)
698 698
699 699 @repofilecache('bookmarks', 'bookmarks.current')
700 700 def _bookmarks(self):
701 701 return bookmarks.bmstore(self)
702 702
703 703 @property
704 704 def _activebookmark(self):
705 705 return self._bookmarks.active
706 706
707 707 # _phaserevs and _phasesets depend on changelog. what we need is to
708 708 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
709 709 # can't be easily expressed in filecache mechanism.
710 710 @storecache('phaseroots', '00changelog.i')
711 711 def _phasecache(self):
712 712 return phases.phasecache(self, self._phasedefaults)
713 713
714 714 @storecache('obsstore')
715 715 def obsstore(self):
716 716 return obsolete.makestore(self.ui, self)
717 717
718 718 @storecache('00changelog.i')
719 719 def changelog(self):
720 720 return changelog.changelog(self.svfs,
721 721 trypending=txnutil.mayhavepending(self.root))
722 722
723 723 def _constructmanifest(self):
724 724 # This is a temporary function while we migrate from manifest to
725 725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
726 726 # manifest creation.
727 727 return manifest.manifestrevlog(self.svfs)
728 728
729 729 @storecache('00manifest.i')
730 730 def manifestlog(self):
731 731 return manifest.manifestlog(self.svfs, self)
732 732
733 733 @repofilecache('dirstate')
734 734 def dirstate(self):
735 735 sparsematchfn = lambda: sparse.matcher(self)
736 736
737 737 return dirstate.dirstate(self.vfs, self.ui, self.root,
738 738 self._dirstatevalidate, sparsematchfn)
739 739
740 740 def _dirstatevalidate(self, node):
741 741 try:
742 742 self.changelog.rev(node)
743 743 return node
744 744 except error.LookupError:
745 745 if not self._dirstatevalidatewarned:
746 746 self._dirstatevalidatewarned = True
747 747 self.ui.warn(_("warning: ignoring unknown"
748 748 " working parent %s!\n") % short(node))
749 749 return nullid
750 750
751 751 def __getitem__(self, changeid):
752 752 if changeid is None:
753 753 return context.workingctx(self)
754 754 if isinstance(changeid, slice):
755 755 # wdirrev isn't contiguous so the slice shouldn't include it
756 756 return [context.changectx(self, i)
757 757 for i in xrange(*changeid.indices(len(self)))
758 758 if i not in self.changelog.filteredrevs]
759 759 try:
760 760 return context.changectx(self, changeid)
761 761 except error.WdirUnsupported:
762 762 return context.workingctx(self)
763 763
764 764 def __contains__(self, changeid):
765 765 """True if the given changeid exists
766 766
767 767 error.LookupError is raised if an ambiguous node specified.
768 768 """
769 769 try:
770 770 self[changeid]
771 771 return True
772 772 except error.RepoLookupError:
773 773 return False
774 774
775 775 def __nonzero__(self):
776 776 return True
777 777
778 778 __bool__ = __nonzero__
779 779
780 780 def __len__(self):
781 781 return len(self.changelog)
782 782
783 783 def __iter__(self):
784 784 return iter(self.changelog)
785 785
786 786 def revs(self, expr, *args):
787 787 '''Find revisions matching a revset.
788 788
789 789 The revset is specified as a string ``expr`` that may contain
790 790 %-formatting to escape certain types. See ``revsetlang.formatspec``.
791 791
792 792 Revset aliases from the configuration are not expanded. To expand
793 793 user aliases, consider calling ``scmutil.revrange()`` or
794 794 ``repo.anyrevs([expr], user=True)``.
795 795
796 796 Returns a revset.abstractsmartset, which is a list-like interface
797 797 that contains integer revisions.
798 798 '''
799 799 expr = revsetlang.formatspec(expr, *args)
800 800 m = revset.match(None, expr)
801 801 return m(self)
802 802
803 803 def set(self, expr, *args):
804 804 '''Find revisions matching a revset and emit changectx instances.
805 805
806 806 This is a convenience wrapper around ``revs()`` that iterates the
807 807 result and is a generator of changectx instances.
808 808
809 809 Revset aliases from the configuration are not expanded. To expand
810 810 user aliases, consider calling ``scmutil.revrange()``.
811 811 '''
812 812 for r in self.revs(expr, *args):
813 813 yield self[r]
814 814
815 815 def anyrevs(self, specs, user=False, localalias=None):
816 816 '''Find revisions matching one of the given revsets.
817 817
818 818 Revset aliases from the configuration are not expanded by default. To
819 819 expand user aliases, specify ``user=True``. To provide some local
820 820 definitions overriding user aliases, set ``localalias`` to
821 821 ``{name: definitionstring}``.
822 822 '''
823 823 if user:
824 824 m = revset.matchany(self.ui, specs, repo=self,
825 825 localalias=localalias)
826 826 else:
827 827 m = revset.matchany(None, specs, localalias=localalias)
828 828 return m(self)
829 829
830 830 def url(self):
831 831 return 'file:' + self.root
832 832
833 833 def hook(self, name, throw=False, **args):
834 834 """Call a hook, passing this repo instance.
835 835
836 836 This a convenience method to aid invoking hooks. Extensions likely
837 837 won't call this unless they have registered a custom hook or are
838 838 replacing code that is expected to call a hook.
839 839 """
840 840 return hook.hook(self.ui, self, name, throw, **args)
841 841
842 842 @filteredpropertycache
843 843 def _tagscache(self):
844 844 '''Returns a tagscache object that contains various tags related
845 845 caches.'''
846 846
847 847 # This simplifies its cache management by having one decorated
848 848 # function (this one) and the rest simply fetch things from it.
849 849 class tagscache(object):
850 850 def __init__(self):
851 851 # These two define the set of tags for this repository. tags
852 852 # maps tag name to node; tagtypes maps tag name to 'global' or
853 853 # 'local'. (Global tags are defined by .hgtags across all
854 854 # heads, and local tags are defined in .hg/localtags.)
855 855 # They constitute the in-memory cache of tags.
856 856 self.tags = self.tagtypes = None
857 857
858 858 self.nodetagscache = self.tagslist = None
859 859
860 860 cache = tagscache()
861 861 cache.tags, cache.tagtypes = self._findtags()
862 862
863 863 return cache
864 864
865 865 def tags(self):
866 866 '''return a mapping of tag to node'''
867 867 t = {}
868 868 if self.changelog.filteredrevs:
869 869 tags, tt = self._findtags()
870 870 else:
871 871 tags = self._tagscache.tags
872 872 for k, v in tags.iteritems():
873 873 try:
874 874 # ignore tags to unknown nodes
875 875 self.changelog.rev(v)
876 876 t[k] = v
877 877 except (error.LookupError, ValueError):
878 878 pass
879 879 return t
880 880
881 881 def _findtags(self):
882 882 '''Do the hard work of finding tags. Return a pair of dicts
883 883 (tags, tagtypes) where tags maps tag name to node, and tagtypes
884 884 maps tag name to a string like \'global\' or \'local\'.
885 885 Subclasses or extensions are free to add their own tags, but
886 886 should be aware that the returned dicts will be retained for the
887 887 duration of the localrepo object.'''
888 888
889 889 # XXX what tagtype should subclasses/extensions use? Currently
890 890 # mq and bookmarks add tags, but do not set the tagtype at all.
891 891 # Should each extension invent its own tag type? Should there
892 892 # be one tagtype for all such "virtual" tags? Or is the status
893 893 # quo fine?
894 894
895 895
896 896 # map tag name to (node, hist)
897 897 alltags = tagsmod.findglobaltags(self.ui, self)
898 898 # map tag name to tag type
899 899 tagtypes = dict((tag, 'global') for tag in alltags)
900 900
901 901 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
902 902
903 903 # Build the return dicts. Have to re-encode tag names because
904 904 # the tags module always uses UTF-8 (in order not to lose info
905 905 # writing to the cache), but the rest of Mercurial wants them in
906 906 # local encoding.
907 907 tags = {}
908 908 for (name, (node, hist)) in alltags.iteritems():
909 909 if node != nullid:
910 910 tags[encoding.tolocal(name)] = node
911 911 tags['tip'] = self.changelog.tip()
912 912 tagtypes = dict([(encoding.tolocal(name), value)
913 913 for (name, value) in tagtypes.iteritems()])
914 914 return (tags, tagtypes)
915 915
916 916 def tagtype(self, tagname):
917 917 '''
918 918 return the type of the given tag. result can be:
919 919
920 920 'local' : a local tag
921 921 'global' : a global tag
922 922 None : tag does not exist
923 923 '''
924 924
925 925 return self._tagscache.tagtypes.get(tagname)
926 926
927 927 def tagslist(self):
928 928 '''return a list of tags ordered by revision'''
929 929 if not self._tagscache.tagslist:
930 930 l = []
931 931 for t, n in self.tags().iteritems():
932 932 l.append((self.changelog.rev(n), t, n))
933 933 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
934 934
935 935 return self._tagscache.tagslist
936 936
937 937 def nodetags(self, node):
938 938 '''return the tags associated with a node'''
939 939 if not self._tagscache.nodetagscache:
940 940 nodetagscache = {}
941 941 for t, n in self._tagscache.tags.iteritems():
942 942 nodetagscache.setdefault(n, []).append(t)
943 943 for tags in nodetagscache.itervalues():
944 944 tags.sort()
945 945 self._tagscache.nodetagscache = nodetagscache
946 946 return self._tagscache.nodetagscache.get(node, [])
947 947
948 948 def nodebookmarks(self, node):
949 949 """return the list of bookmarks pointing to the specified node"""
950 950 marks = []
951 951 for bookmark, n in self._bookmarks.iteritems():
952 952 if n == node:
953 953 marks.append(bookmark)
954 954 return sorted(marks)
955 955
956 956 def branchmap(self):
957 957 '''returns a dictionary {branch: [branchheads]} with branchheads
958 958 ordered by increasing revision number'''
959 959 branchmap.updatecache(self)
960 960 return self._branchcaches[self.filtername]
961 961
962 962 @unfilteredmethod
963 963 def revbranchcache(self):
964 964 if not self._revbranchcache:
965 965 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
966 966 return self._revbranchcache
967 967
968 968 def branchtip(self, branch, ignoremissing=False):
969 969 '''return the tip node for a given branch
970 970
971 971 If ignoremissing is True, then this method will not raise an error.
972 972 This is helpful for callers that only expect None for a missing branch
973 973 (e.g. namespace).
974 974
975 975 '''
976 976 try:
977 977 return self.branchmap().branchtip(branch)
978 978 except KeyError:
979 979 if not ignoremissing:
980 980 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
981 981 else:
982 982 pass
983 983
984 984 def lookup(self, key):
985 985 return self[key].node()
986 986
987 987 def lookupbranch(self, key, remote=None):
988 988 repo = remote or self
989 989 if key in repo.branchmap():
990 990 return key
991 991
992 992 repo = (remote and remote.local()) and remote or self
993 993 return repo[key].branch()
994 994
995 995 def known(self, nodes):
996 996 cl = self.changelog
997 997 nm = cl.nodemap
998 998 filtered = cl.filteredrevs
999 999 result = []
1000 1000 for n in nodes:
1001 1001 r = nm.get(n)
1002 1002 resp = not (r is None or r in filtered)
1003 1003 result.append(resp)
1004 1004 return result
1005 1005
1006 1006 def local(self):
1007 1007 return self
1008 1008
1009 1009 def publishing(self):
1010 1010 # it's safe (and desirable) to trust the publish flag unconditionally
1011 1011 # so that we don't finalize changes shared between users via ssh or nfs
1012 1012 return self.ui.configbool('phases', 'publish', untrusted=True)
1013 1013
1014 1014 def cancopy(self):
1015 1015 # so statichttprepo's override of local() works
1016 1016 if not self.local():
1017 1017 return False
1018 1018 if not self.publishing():
1019 1019 return True
1020 1020 # if publishing we can't copy if there is filtered content
1021 1021 return not self.filtered('visible').changelog.filteredrevs
1022 1022
1023 1023 def shared(self):
1024 1024 '''the type of shared repository (None if not shared)'''
1025 1025 if self.sharedpath != self.path:
1026 1026 return 'store'
1027 1027 return None
1028 1028
1029 1029 def wjoin(self, f, *insidef):
1030 1030 return self.vfs.reljoin(self.root, f, *insidef)
1031 1031
1032 1032 def file(self, f):
1033 1033 if f[0] == '/':
1034 1034 f = f[1:]
1035 1035 return filelog.filelog(self.svfs, f)
1036 1036
1037 1037 def changectx(self, changeid):
1038 1038 return self[changeid]
1039 1039
1040 1040 def setparents(self, p1, p2=nullid):
1041 1041 with self.dirstate.parentchange():
1042 1042 copies = self.dirstate.setparents(p1, p2)
1043 1043 pctx = self[p1]
1044 1044 if copies:
1045 1045 # Adjust copy records, the dirstate cannot do it, it
1046 1046 # requires access to parents manifests. Preserve them
1047 1047 # only for entries added to first parent.
1048 1048 for f in copies:
1049 1049 if f not in pctx and copies[f] in pctx:
1050 1050 self.dirstate.copy(copies[f], f)
1051 1051 if p2 == nullid:
1052 1052 for f, s in sorted(self.dirstate.copies().items()):
1053 1053 if f not in pctx and s not in pctx:
1054 1054 self.dirstate.copy(None, f)
1055 1055
1056 1056 def filectx(self, path, changeid=None, fileid=None):
1057 1057 """changeid can be a changeset revision, node, or tag.
1058 1058 fileid can be a file revision or node."""
1059 1059 return context.filectx(self, path, changeid, fileid)
1060 1060
1061 1061 def getcwd(self):
1062 1062 return self.dirstate.getcwd()
1063 1063
1064 1064 def pathto(self, f, cwd=None):
1065 1065 return self.dirstate.pathto(f, cwd)
1066 1066
1067 1067 def _loadfilter(self, filter):
1068 1068 if filter not in self.filterpats:
1069 1069 l = []
1070 1070 for pat, cmd in self.ui.configitems(filter):
1071 1071 if cmd == '!':
1072 1072 continue
1073 1073 mf = matchmod.match(self.root, '', [pat])
1074 1074 fn = None
1075 1075 params = cmd
1076 1076 for name, filterfn in self._datafilters.iteritems():
1077 1077 if cmd.startswith(name):
1078 1078 fn = filterfn
1079 1079 params = cmd[len(name):].lstrip()
1080 1080 break
1081 1081 if not fn:
1082 1082 fn = lambda s, c, **kwargs: util.filter(s, c)
1083 1083 # Wrap old filters not supporting keyword arguments
1084 1084 if not inspect.getargspec(fn)[2]:
1085 1085 oldfn = fn
1086 1086 fn = lambda s, c, **kwargs: oldfn(s, c)
1087 1087 l.append((mf, fn, params))
1088 1088 self.filterpats[filter] = l
1089 1089 return self.filterpats[filter]
1090 1090
1091 1091 def _filter(self, filterpats, filename, data):
1092 1092 for mf, fn, cmd in filterpats:
1093 1093 if mf(filename):
1094 1094 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1095 1095 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1096 1096 break
1097 1097
1098 1098 return data
1099 1099
1100 1100 @unfilteredpropertycache
1101 1101 def _encodefilterpats(self):
1102 1102 return self._loadfilter('encode')
1103 1103
1104 1104 @unfilteredpropertycache
1105 1105 def _decodefilterpats(self):
1106 1106 return self._loadfilter('decode')
1107 1107
1108 1108 def adddatafilter(self, name, filter):
1109 1109 self._datafilters[name] = filter
1110 1110
1111 1111 def wread(self, filename):
1112 1112 if self.wvfs.islink(filename):
1113 1113 data = self.wvfs.readlink(filename)
1114 1114 else:
1115 1115 data = self.wvfs.read(filename)
1116 1116 return self._filter(self._encodefilterpats, filename, data)
1117 1117
1118 1118 def wwrite(self, filename, data, flags, backgroundclose=False):
1119 1119 """write ``data`` into ``filename`` in the working directory
1120 1120
1121 1121 This returns length of written (maybe decoded) data.
1122 1122 """
1123 1123 data = self._filter(self._decodefilterpats, filename, data)
1124 1124 if 'l' in flags:
1125 1125 self.wvfs.symlink(data, filename)
1126 1126 else:
1127 1127 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1128 1128 if 'x' in flags:
1129 1129 self.wvfs.setflags(filename, False, True)
1130 1130 return len(data)
1131 1131
1132 1132 def wwritedata(self, filename, data):
1133 1133 return self._filter(self._decodefilterpats, filename, data)
1134 1134
1135 1135 def currenttransaction(self):
1136 1136 """return the current transaction or None if non exists"""
1137 1137 if self._transref:
1138 1138 tr = self._transref()
1139 1139 else:
1140 1140 tr = None
1141 1141
1142 1142 if tr and tr.running():
1143 1143 return tr
1144 1144 return None
1145 1145
1146 1146 def transaction(self, desc, report=None):
1147 1147 if (self.ui.configbool('devel', 'all-warnings')
1148 1148 or self.ui.configbool('devel', 'check-locks')):
1149 1149 if self._currentlock(self._lockref) is None:
1150 1150 raise error.ProgrammingError('transaction requires locking')
1151 1151 tr = self.currenttransaction()
1152 1152 if tr is not None:
1153 1153 scmutil.registersummarycallback(self, tr, desc)
1154 1154 return tr.nest()
1155 1155
1156 1156 # abort here if the journal already exists
1157 1157 if self.svfs.exists("journal"):
1158 1158 raise error.RepoError(
1159 1159 _("abandoned transaction found"),
1160 1160 hint=_("run 'hg recover' to clean up transaction"))
1161 1161
1162 1162 idbase = "%.40f#%f" % (random.random(), time.time())
1163 1163 ha = hex(hashlib.sha1(idbase).digest())
1164 1164 txnid = 'TXN:' + ha
1165 1165 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1166 1166
1167 1167 self._writejournal(desc)
1168 1168 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1169 1169 if report:
1170 1170 rp = report
1171 1171 else:
1172 1172 rp = self.ui.warn
1173 1173 vfsmap = {'plain': self.vfs} # root of .hg/
1174 1174 # we must avoid cyclic reference between repo and transaction.
1175 1175 reporef = weakref.ref(self)
1176 1176 # Code to track tag movement
1177 1177 #
1178 1178 # Since tags are all handled as file content, it is actually quite hard
1179 1179 # to track these movement from a code perspective. So we fallback to a
1180 1180 # tracking at the repository level. One could envision to track changes
1181 1181 # to the '.hgtags' file through changegroup apply but that fails to
1182 1182 # cope with case where transaction expose new heads without changegroup
1183 1183 # being involved (eg: phase movement).
1184 1184 #
1185 1185 # For now, We gate the feature behind a flag since this likely comes
1186 1186 # with performance impacts. The current code run more often than needed
1187 1187 # and do not use caches as much as it could. The current focus is on
1188 1188 # the behavior of the feature so we disable it by default. The flag
1189 1189 # will be removed when we are happy with the performance impact.
1190 1190 #
1191 1191 # Once this feature is no longer experimental move the following
1192 1192 # documentation to the appropriate help section:
1193 1193 #
1194 1194 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1195 1195 # tags (new or changed or deleted tags). In addition the details of
1196 1196 # these changes are made available in a file at:
1197 1197 # ``REPOROOT/.hg/changes/tags.changes``.
1198 1198 # Make sure you check for HG_TAG_MOVED before reading that file as it
1199 1199 # might exist from a previous transaction even if no tag were touched
1200 1200 # in this one. Changes are recorded in a line base format::
1201 1201 #
1202 1202 # <action> <hex-node> <tag-name>\n
1203 1203 #
1204 1204 # Actions are defined as follow:
1205 1205 # "-R": tag is removed,
1206 1206 # "+A": tag is added,
1207 1207 # "-M": tag is moved (old value),
1208 1208 # "+M": tag is moved (new value),
1209 1209 tracktags = lambda x: None
1210 1210 # experimental config: experimental.hook-track-tags
1211 1211 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1212 1212 if desc != 'strip' and shouldtracktags:
1213 1213 oldheads = self.changelog.headrevs()
1214 1214 def tracktags(tr2):
1215 1215 repo = reporef()
1216 1216 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1217 1217 newheads = repo.changelog.headrevs()
1218 1218 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1219 1219 # notes: we compare lists here.
1220 1220 # As we do it only once buiding set would not be cheaper
1221 1221 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1222 1222 if changes:
1223 1223 tr2.hookargs['tag_moved'] = '1'
1224 1224 with repo.vfs('changes/tags.changes', 'w',
1225 1225 atomictemp=True) as changesfile:
1226 1226 # note: we do not register the file to the transaction
1227 1227 # because we needs it to still exist on the transaction
1228 1228 # is close (for txnclose hooks)
1229 1229 tagsmod.writediff(changesfile, changes)
1230 1230 def validate(tr2):
1231 1231 """will run pre-closing hooks"""
1232 1232 # XXX the transaction API is a bit lacking here so we take a hacky
1233 1233 # path for now
1234 1234 #
1235 1235 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1236 1236 # dict is copied before these run. In addition we needs the data
1237 1237 # available to in memory hooks too.
1238 1238 #
1239 1239 # Moreover, we also need to make sure this runs before txnclose
1240 1240 # hooks and there is no "pending" mechanism that would execute
1241 1241 # logic only if hooks are about to run.
1242 1242 #
1243 1243 # Fixing this limitation of the transaction is also needed to track
1244 1244 # other families of changes (bookmarks, phases, obsolescence).
1245 1245 #
1246 1246 # This will have to be fixed before we remove the experimental
1247 1247 # gating.
1248 1248 tracktags(tr2)
1249 1249 repo = reporef()
1250 1250 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1251 1251 scmutil.enforcesinglehead(repo, tr2, desc)
1252 1252 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1253 1253 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1254 1254 args = tr.hookargs.copy()
1255 1255 args.update(bookmarks.preparehookargs(name, old, new))
1256 1256 repo.hook('pretxnclose-bookmark', throw=True,
1257 1257 txnname=desc,
1258 1258 **pycompat.strkwargs(args))
1259 1259 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1260 1260 cl = repo.unfiltered().changelog
1261 1261 for rev, (old, new) in tr.changes['phases'].items():
1262 1262 args = tr.hookargs.copy()
1263 1263 node = hex(cl.node(rev))
1264 1264 args.update(phases.preparehookargs(node, old, new))
1265 1265 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1266 1266 **pycompat.strkwargs(args))
1267 1267
1268 1268 repo.hook('pretxnclose', throw=True,
1269 1269 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1270 1270 def releasefn(tr, success):
1271 1271 repo = reporef()
1272 1272 if success:
1273 1273 # this should be explicitly invoked here, because
1274 1274 # in-memory changes aren't written out at closing
1275 1275 # transaction, if tr.addfilegenerator (via
1276 1276 # dirstate.write or so) isn't invoked while
1277 1277 # transaction running
1278 1278 repo.dirstate.write(None)
1279 1279 else:
1280 1280 # discard all changes (including ones already written
1281 1281 # out) in this transaction
1282 1282 repo.dirstate.restorebackup(None, 'journal.dirstate')
1283 1283
1284 1284 repo.invalidate(clearfilecache=True)
1285 1285
1286 1286 tr = transaction.transaction(rp, self.svfs, vfsmap,
1287 1287 "journal",
1288 1288 "undo",
1289 1289 aftertrans(renames),
1290 1290 self.store.createmode,
1291 1291 validator=validate,
1292 1292 releasefn=releasefn,
1293 1293 checkambigfiles=_cachedfiles)
1294 1294 tr.changes['revs'] = set()
1295 1295 tr.changes['obsmarkers'] = set()
1296 1296 tr.changes['phases'] = {}
1297 1297 tr.changes['bookmarks'] = {}
1298 1298
1299 1299 tr.hookargs['txnid'] = txnid
1300 1300 # note: writing the fncache only during finalize mean that the file is
1301 1301 # outdated when running hooks. As fncache is used for streaming clone,
1302 1302 # this is not expected to break anything that happen during the hooks.
1303 1303 tr.addfinalize('flush-fncache', self.store.write)
1304 1304 def txnclosehook(tr2):
1305 1305 """To be run if transaction is successful, will schedule a hook run
1306 1306 """
1307 1307 # Don't reference tr2 in hook() so we don't hold a reference.
1308 1308 # This reduces memory consumption when there are multiple
1309 1309 # transactions per lock. This can likely go away if issue5045
1310 1310 # fixes the function accumulation.
1311 1311 hookargs = tr2.hookargs
1312 1312
1313 1313 def hookfunc():
1314 1314 repo = reporef()
1315 1315 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1316 1316 bmchanges = sorted(tr.changes['bookmarks'].items())
1317 1317 for name, (old, new) in bmchanges:
1318 1318 args = tr.hookargs.copy()
1319 1319 args.update(bookmarks.preparehookargs(name, old, new))
1320 1320 repo.hook('txnclose-bookmark', throw=False,
1321 1321 txnname=desc, **pycompat.strkwargs(args))
1322 1322
1323 1323 if hook.hashook(repo.ui, 'txnclose-phase'):
1324 1324 cl = repo.unfiltered().changelog
1325 1325 phasemv = sorted(tr.changes['phases'].items())
1326 1326 for rev, (old, new) in phasemv:
1327 1327 args = tr.hookargs.copy()
1328 1328 node = hex(cl.node(rev))
1329 1329 args.update(phases.preparehookargs(node, old, new))
1330 1330 repo.hook('txnclose-phase', throw=False, txnname=desc,
1331 1331 **pycompat.strkwargs(args))
1332 1332
1333 1333 repo.hook('txnclose', throw=False, txnname=desc,
1334 1334 **pycompat.strkwargs(hookargs))
1335 1335 reporef()._afterlock(hookfunc)
1336 1336 tr.addfinalize('txnclose-hook', txnclosehook)
1337 1337 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1338 1338 def txnaborthook(tr2):
1339 1339 """To be run if transaction is aborted
1340 1340 """
1341 1341 reporef().hook('txnabort', throw=False, txnname=desc,
1342 1342 **tr2.hookargs)
1343 1343 tr.addabort('txnabort-hook', txnaborthook)
1344 1344 # avoid eager cache invalidation. in-memory data should be identical
1345 1345 # to stored data if transaction has no error.
1346 1346 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1347 1347 self._transref = weakref.ref(tr)
1348 1348 scmutil.registersummarycallback(self, tr, desc)
1349 1349 return tr
1350 1350
1351 1351 def _journalfiles(self):
1352 1352 return ((self.svfs, 'journal'),
1353 1353 (self.vfs, 'journal.dirstate'),
1354 1354 (self.vfs, 'journal.branch'),
1355 1355 (self.vfs, 'journal.desc'),
1356 1356 (self.vfs, 'journal.bookmarks'),
1357 1357 (self.svfs, 'journal.phaseroots'))
1358 1358
1359 1359 def undofiles(self):
1360 1360 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1361 1361
1362 1362 @unfilteredmethod
1363 1363 def _writejournal(self, desc):
1364 1364 self.dirstate.savebackup(None, 'journal.dirstate')
1365 1365 self.vfs.write("journal.branch",
1366 1366 encoding.fromlocal(self.dirstate.branch()))
1367 1367 self.vfs.write("journal.desc",
1368 1368 "%d\n%s\n" % (len(self), desc))
1369 1369 self.vfs.write("journal.bookmarks",
1370 1370 self.vfs.tryread("bookmarks"))
1371 1371 self.svfs.write("journal.phaseroots",
1372 1372 self.svfs.tryread("phaseroots"))
1373 1373
1374 1374 def recover(self):
1375 1375 with self.lock():
1376 1376 if self.svfs.exists("journal"):
1377 1377 self.ui.status(_("rolling back interrupted transaction\n"))
1378 1378 vfsmap = {'': self.svfs,
1379 1379 'plain': self.vfs,}
1380 1380 transaction.rollback(self.svfs, vfsmap, "journal",
1381 1381 self.ui.warn,
1382 1382 checkambigfiles=_cachedfiles)
1383 1383 self.invalidate()
1384 1384 return True
1385 1385 else:
1386 1386 self.ui.warn(_("no interrupted transaction available\n"))
1387 1387 return False
1388 1388
1389 1389 def rollback(self, dryrun=False, force=False):
1390 1390 wlock = lock = dsguard = None
1391 1391 try:
1392 1392 wlock = self.wlock()
1393 1393 lock = self.lock()
1394 1394 if self.svfs.exists("undo"):
1395 1395 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1396 1396
1397 1397 return self._rollback(dryrun, force, dsguard)
1398 1398 else:
1399 1399 self.ui.warn(_("no rollback information available\n"))
1400 1400 return 1
1401 1401 finally:
1402 1402 release(dsguard, lock, wlock)
1403 1403
1404 1404 @unfilteredmethod # Until we get smarter cache management
1405 1405 def _rollback(self, dryrun, force, dsguard):
1406 1406 ui = self.ui
1407 1407 try:
1408 1408 args = self.vfs.read('undo.desc').splitlines()
1409 1409 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1410 1410 if len(args) >= 3:
1411 1411 detail = args[2]
1412 1412 oldtip = oldlen - 1
1413 1413
1414 1414 if detail and ui.verbose:
1415 1415 msg = (_('repository tip rolled back to revision %d'
1416 1416 ' (undo %s: %s)\n')
1417 1417 % (oldtip, desc, detail))
1418 1418 else:
1419 1419 msg = (_('repository tip rolled back to revision %d'
1420 1420 ' (undo %s)\n')
1421 1421 % (oldtip, desc))
1422 1422 except IOError:
1423 1423 msg = _('rolling back unknown transaction\n')
1424 1424 desc = None
1425 1425
1426 1426 if not force and self['.'] != self['tip'] and desc == 'commit':
1427 1427 raise error.Abort(
1428 1428 _('rollback of last commit while not checked out '
1429 1429 'may lose data'), hint=_('use -f to force'))
1430 1430
1431 1431 ui.status(msg)
1432 1432 if dryrun:
1433 1433 return 0
1434 1434
1435 1435 parents = self.dirstate.parents()
1436 1436 self.destroying()
1437 1437 vfsmap = {'plain': self.vfs, '': self.svfs}
1438 1438 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1439 1439 checkambigfiles=_cachedfiles)
1440 1440 if self.vfs.exists('undo.bookmarks'):
1441 1441 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1442 1442 if self.svfs.exists('undo.phaseroots'):
1443 1443 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1444 1444 self.invalidate()
1445 1445
1446 1446 parentgone = (parents[0] not in self.changelog.nodemap or
1447 1447 parents[1] not in self.changelog.nodemap)
1448 1448 if parentgone:
1449 1449 # prevent dirstateguard from overwriting already restored one
1450 1450 dsguard.close()
1451 1451
1452 1452 self.dirstate.restorebackup(None, 'undo.dirstate')
1453 1453 try:
1454 1454 branch = self.vfs.read('undo.branch')
1455 1455 self.dirstate.setbranch(encoding.tolocal(branch))
1456 1456 except IOError:
1457 1457 ui.warn(_('named branch could not be reset: '
1458 1458 'current branch is still \'%s\'\n')
1459 1459 % self.dirstate.branch())
1460 1460
1461 1461 parents = tuple([p.rev() for p in self[None].parents()])
1462 1462 if len(parents) > 1:
1463 1463 ui.status(_('working directory now based on '
1464 1464 'revisions %d and %d\n') % parents)
1465 1465 else:
1466 1466 ui.status(_('working directory now based on '
1467 1467 'revision %d\n') % parents)
1468 1468 mergemod.mergestate.clean(self, self['.'].node())
1469 1469
1470 1470 # TODO: if we know which new heads may result from this rollback, pass
1471 1471 # them to destroy(), which will prevent the branchhead cache from being
1472 1472 # invalidated.
1473 1473 self.destroyed()
1474 1474 return 0
1475 1475
1476 1476 def _buildcacheupdater(self, newtransaction):
1477 1477 """called during transaction to build the callback updating cache
1478 1478
1479 1479 Lives on the repository to help extension who might want to augment
1480 1480 this logic. For this purpose, the created transaction is passed to the
1481 1481 method.
1482 1482 """
1483 1483 # we must avoid cyclic reference between repo and transaction.
1484 1484 reporef = weakref.ref(self)
1485 1485 def updater(tr):
1486 1486 repo = reporef()
1487 1487 repo.updatecaches(tr)
1488 1488 return updater
1489 1489
1490 1490 @unfilteredmethod
1491 1491 def updatecaches(self, tr=None):
1492 1492 """warm appropriate caches
1493 1493
1494 1494 If this function is called after a transaction closed. The transaction
1495 1495 will be available in the 'tr' argument. This can be used to selectively
1496 1496 update caches relevant to the changes in that transaction.
1497 1497 """
1498 1498 if tr is not None and tr.hookargs.get('source') == 'strip':
1499 1499 # During strip, many caches are invalid but
1500 1500 # later call to `destroyed` will refresh them.
1501 1501 return
1502 1502
1503 1503 if tr is None or tr.changes['revs']:
1504 1504 # updating the unfiltered branchmap should refresh all the others,
1505 1505 self.ui.debug('updating the branch cache\n')
1506 1506 branchmap.updatecache(self.filtered('served'))
1507 1507
1508 1508 def invalidatecaches(self):
1509 1509
1510 1510 if '_tagscache' in vars(self):
1511 1511 # can't use delattr on proxy
1512 1512 del self.__dict__['_tagscache']
1513 1513
1514 1514 self.unfiltered()._branchcaches.clear()
1515 1515 self.invalidatevolatilesets()
1516 1516 self._sparsesignaturecache.clear()
1517 1517
1518 1518 def invalidatevolatilesets(self):
1519 1519 self.filteredrevcache.clear()
1520 1520 obsolete.clearobscaches(self)
1521 1521
1522 1522 def invalidatedirstate(self):
1523 1523 '''Invalidates the dirstate, causing the next call to dirstate
1524 1524 to check if it was modified since the last time it was read,
1525 1525 rereading it if it has.
1526 1526
1527 1527 This is different to dirstate.invalidate() that it doesn't always
1528 1528 rereads the dirstate. Use dirstate.invalidate() if you want to
1529 1529 explicitly read the dirstate again (i.e. restoring it to a previous
1530 1530 known good state).'''
1531 1531 if hasunfilteredcache(self, 'dirstate'):
1532 1532 for k in self.dirstate._filecache:
1533 1533 try:
1534 1534 delattr(self.dirstate, k)
1535 1535 except AttributeError:
1536 1536 pass
1537 1537 delattr(self.unfiltered(), 'dirstate')
1538 1538
1539 1539 def invalidate(self, clearfilecache=False):
1540 1540 '''Invalidates both store and non-store parts other than dirstate
1541 1541
1542 1542 If a transaction is running, invalidation of store is omitted,
1543 1543 because discarding in-memory changes might cause inconsistency
1544 1544 (e.g. incomplete fncache causes unintentional failure, but
1545 1545 redundant one doesn't).
1546 1546 '''
1547 1547 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1548 1548 for k in list(self._filecache.keys()):
1549 1549 # dirstate is invalidated separately in invalidatedirstate()
1550 1550 if k == 'dirstate':
1551 1551 continue
1552 1552 if (k == 'changelog' and
1553 1553 self.currenttransaction() and
1554 1554 self.changelog._delayed):
1555 1555 # The changelog object may store unwritten revisions. We don't
1556 1556 # want to lose them.
1557 1557 # TODO: Solve the problem instead of working around it.
1558 1558 continue
1559 1559
1560 1560 if clearfilecache:
1561 1561 del self._filecache[k]
1562 1562 try:
1563 1563 delattr(unfiltered, k)
1564 1564 except AttributeError:
1565 1565 pass
1566 1566 self.invalidatecaches()
1567 1567 if not self.currenttransaction():
1568 1568 # TODO: Changing contents of store outside transaction
1569 1569 # causes inconsistency. We should make in-memory store
1570 1570 # changes detectable, and abort if changed.
1571 1571 self.store.invalidatecaches()
1572 1572
1573 1573 def invalidateall(self):
1574 1574 '''Fully invalidates both store and non-store parts, causing the
1575 1575 subsequent operation to reread any outside changes.'''
1576 1576 # extension should hook this to invalidate its caches
1577 1577 self.invalidate()
1578 1578 self.invalidatedirstate()
1579 1579
1580 1580 @unfilteredmethod
1581 1581 def _refreshfilecachestats(self, tr):
1582 1582 """Reload stats of cached files so that they are flagged as valid"""
1583 1583 for k, ce in self._filecache.items():
1584 1584 if k == 'dirstate' or k not in self.__dict__:
1585 1585 continue
1586 1586 ce.refresh()
1587 1587
1588 1588 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1589 1589 inheritchecker=None, parentenvvar=None):
1590 1590 parentlock = None
1591 1591 # the contents of parentenvvar are used by the underlying lock to
1592 1592 # determine whether it can be inherited
1593 1593 if parentenvvar is not None:
1594 1594 parentlock = encoding.environ.get(parentenvvar)
1595 1595 try:
1596 1596 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1597 1597 acquirefn=acquirefn, desc=desc,
1598 1598 inheritchecker=inheritchecker,
1599 1599 parentlock=parentlock)
1600 1600 except error.LockHeld as inst:
1601 1601 if not wait:
1602 1602 raise
1603 1603 # show more details for new-style locks
1604 1604 if ':' in inst.locker:
1605 1605 host, pid = inst.locker.split(":", 1)
1606 1606 self.ui.warn(
1607 1607 _("waiting for lock on %s held by process %r "
1608 1608 "on host %r\n") % (desc, pid, host))
1609 1609 else:
1610 1610 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1611 1611 (desc, inst.locker))
1612 1612 # default to 600 seconds timeout
1613 1613 l = lockmod.lock(vfs, lockname,
1614 int(self.ui.config("ui", "timeout")),
1614 self.ui.configint("ui", "timeout"),
1615 1615 releasefn=releasefn, acquirefn=acquirefn,
1616 1616 desc=desc)
1617 1617 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1618 1618 return l
1619 1619
1620 1620 def _afterlock(self, callback):
1621 1621 """add a callback to be run when the repository is fully unlocked
1622 1622
1623 1623 The callback will be executed when the outermost lock is released
1624 1624 (with wlock being higher level than 'lock')."""
1625 1625 for ref in (self._wlockref, self._lockref):
1626 1626 l = ref and ref()
1627 1627 if l and l.held:
1628 1628 l.postrelease.append(callback)
1629 1629 break
1630 1630 else: # no lock have been found.
1631 1631 callback()
1632 1632
1633 1633 def lock(self, wait=True):
1634 1634 '''Lock the repository store (.hg/store) and return a weak reference
1635 1635 to the lock. Use this before modifying the store (e.g. committing or
1636 1636 stripping). If you are opening a transaction, get a lock as well.)
1637 1637
1638 1638 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1639 1639 'wlock' first to avoid a dead-lock hazard.'''
1640 1640 l = self._currentlock(self._lockref)
1641 1641 if l is not None:
1642 1642 l.lock()
1643 1643 return l
1644 1644
1645 1645 l = self._lock(self.svfs, "lock", wait, None,
1646 1646 self.invalidate, _('repository %s') % self.origroot)
1647 1647 self._lockref = weakref.ref(l)
1648 1648 return l
1649 1649
1650 1650 def _wlockchecktransaction(self):
1651 1651 if self.currenttransaction() is not None:
1652 1652 raise error.LockInheritanceContractViolation(
1653 1653 'wlock cannot be inherited in the middle of a transaction')
1654 1654
1655 1655 def wlock(self, wait=True):
1656 1656 '''Lock the non-store parts of the repository (everything under
1657 1657 .hg except .hg/store) and return a weak reference to the lock.
1658 1658
1659 1659 Use this before modifying files in .hg.
1660 1660
1661 1661 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1662 1662 'wlock' first to avoid a dead-lock hazard.'''
1663 1663 l = self._wlockref and self._wlockref()
1664 1664 if l is not None and l.held:
1665 1665 l.lock()
1666 1666 return l
1667 1667
1668 1668 # We do not need to check for non-waiting lock acquisition. Such
1669 1669 # acquisition would not cause dead-lock as they would just fail.
1670 1670 if wait and (self.ui.configbool('devel', 'all-warnings')
1671 1671 or self.ui.configbool('devel', 'check-locks')):
1672 1672 if self._currentlock(self._lockref) is not None:
1673 1673 self.ui.develwarn('"wlock" acquired after "lock"')
1674 1674
1675 1675 def unlock():
1676 1676 if self.dirstate.pendingparentchange():
1677 1677 self.dirstate.invalidate()
1678 1678 else:
1679 1679 self.dirstate.write(None)
1680 1680
1681 1681 self._filecache['dirstate'].refresh()
1682 1682
1683 1683 l = self._lock(self.vfs, "wlock", wait, unlock,
1684 1684 self.invalidatedirstate, _('working directory of %s') %
1685 1685 self.origroot,
1686 1686 inheritchecker=self._wlockchecktransaction,
1687 1687 parentenvvar='HG_WLOCK_LOCKER')
1688 1688 self._wlockref = weakref.ref(l)
1689 1689 return l
1690 1690
1691 1691 def _currentlock(self, lockref):
1692 1692 """Returns the lock if it's held, or None if it's not."""
1693 1693 if lockref is None:
1694 1694 return None
1695 1695 l = lockref()
1696 1696 if l is None or not l.held:
1697 1697 return None
1698 1698 return l
1699 1699
1700 1700 def currentwlock(self):
1701 1701 """Returns the wlock if it's held, or None if it's not."""
1702 1702 return self._currentlock(self._wlockref)
1703 1703
1704 1704 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1705 1705 """
1706 1706 commit an individual file as part of a larger transaction
1707 1707 """
1708 1708
1709 1709 fname = fctx.path()
1710 1710 fparent1 = manifest1.get(fname, nullid)
1711 1711 fparent2 = manifest2.get(fname, nullid)
1712 1712 if isinstance(fctx, context.filectx):
1713 1713 node = fctx.filenode()
1714 1714 if node in [fparent1, fparent2]:
1715 1715 self.ui.debug('reusing %s filelog entry\n' % fname)
1716 1716 if manifest1.flags(fname) != fctx.flags():
1717 1717 changelist.append(fname)
1718 1718 return node
1719 1719
1720 1720 flog = self.file(fname)
1721 1721 meta = {}
1722 1722 copy = fctx.renamed()
1723 1723 if copy and copy[0] != fname:
1724 1724 # Mark the new revision of this file as a copy of another
1725 1725 # file. This copy data will effectively act as a parent
1726 1726 # of this new revision. If this is a merge, the first
1727 1727 # parent will be the nullid (meaning "look up the copy data")
1728 1728 # and the second one will be the other parent. For example:
1729 1729 #
1730 1730 # 0 --- 1 --- 3 rev1 changes file foo
1731 1731 # \ / rev2 renames foo to bar and changes it
1732 1732 # \- 2 -/ rev3 should have bar with all changes and
1733 1733 # should record that bar descends from
1734 1734 # bar in rev2 and foo in rev1
1735 1735 #
1736 1736 # this allows this merge to succeed:
1737 1737 #
1738 1738 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1739 1739 # \ / merging rev3 and rev4 should use bar@rev2
1740 1740 # \- 2 --- 4 as the merge base
1741 1741 #
1742 1742
1743 1743 cfname = copy[0]
1744 1744 crev = manifest1.get(cfname)
1745 1745 newfparent = fparent2
1746 1746
1747 1747 if manifest2: # branch merge
1748 1748 if fparent2 == nullid or crev is None: # copied on remote side
1749 1749 if cfname in manifest2:
1750 1750 crev = manifest2[cfname]
1751 1751 newfparent = fparent1
1752 1752
1753 1753 # Here, we used to search backwards through history to try to find
1754 1754 # where the file copy came from if the source of a copy was not in
1755 1755 # the parent directory. However, this doesn't actually make sense to
1756 1756 # do (what does a copy from something not in your working copy even
1757 1757 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1758 1758 # the user that copy information was dropped, so if they didn't
1759 1759 # expect this outcome it can be fixed, but this is the correct
1760 1760 # behavior in this circumstance.
1761 1761
1762 1762 if crev:
1763 1763 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1764 1764 meta["copy"] = cfname
1765 1765 meta["copyrev"] = hex(crev)
1766 1766 fparent1, fparent2 = nullid, newfparent
1767 1767 else:
1768 1768 self.ui.warn(_("warning: can't find ancestor for '%s' "
1769 1769 "copied from '%s'!\n") % (fname, cfname))
1770 1770
1771 1771 elif fparent1 == nullid:
1772 1772 fparent1, fparent2 = fparent2, nullid
1773 1773 elif fparent2 != nullid:
1774 1774 # is one parent an ancestor of the other?
1775 1775 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1776 1776 if fparent1 in fparentancestors:
1777 1777 fparent1, fparent2 = fparent2, nullid
1778 1778 elif fparent2 in fparentancestors:
1779 1779 fparent2 = nullid
1780 1780
1781 1781 # is the file changed?
1782 1782 text = fctx.data()
1783 1783 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1784 1784 changelist.append(fname)
1785 1785 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1786 1786 # are just the flags changed during merge?
1787 1787 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1788 1788 changelist.append(fname)
1789 1789
1790 1790 return fparent1
1791 1791
1792 1792 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1793 1793 """check for commit arguments that aren't committable"""
1794 1794 if match.isexact() or match.prefix():
1795 1795 matched = set(status.modified + status.added + status.removed)
1796 1796
1797 1797 for f in match.files():
1798 1798 f = self.dirstate.normalize(f)
1799 1799 if f == '.' or f in matched or f in wctx.substate:
1800 1800 continue
1801 1801 if f in status.deleted:
1802 1802 fail(f, _('file not found!'))
1803 1803 if f in vdirs: # visited directory
1804 1804 d = f + '/'
1805 1805 for mf in matched:
1806 1806 if mf.startswith(d):
1807 1807 break
1808 1808 else:
1809 1809 fail(f, _("no match under directory!"))
1810 1810 elif f not in self.dirstate:
1811 1811 fail(f, _("file not tracked!"))
1812 1812
1813 1813 @unfilteredmethod
1814 1814 def commit(self, text="", user=None, date=None, match=None, force=False,
1815 1815 editor=False, extra=None):
1816 1816 """Add a new revision to current repository.
1817 1817
1818 1818 Revision information is gathered from the working directory,
1819 1819 match can be used to filter the committed files. If editor is
1820 1820 supplied, it is called to get a commit message.
1821 1821 """
1822 1822 if extra is None:
1823 1823 extra = {}
1824 1824
1825 1825 def fail(f, msg):
1826 1826 raise error.Abort('%s: %s' % (f, msg))
1827 1827
1828 1828 if not match:
1829 1829 match = matchmod.always(self.root, '')
1830 1830
1831 1831 if not force:
1832 1832 vdirs = []
1833 1833 match.explicitdir = vdirs.append
1834 1834 match.bad = fail
1835 1835
1836 1836 wlock = lock = tr = None
1837 1837 try:
1838 1838 wlock = self.wlock()
1839 1839 lock = self.lock() # for recent changelog (see issue4368)
1840 1840
1841 1841 wctx = self[None]
1842 1842 merge = len(wctx.parents()) > 1
1843 1843
1844 1844 if not force and merge and not match.always():
1845 1845 raise error.Abort(_('cannot partially commit a merge '
1846 1846 '(do not specify files or patterns)'))
1847 1847
1848 1848 status = self.status(match=match, clean=force)
1849 1849 if force:
1850 1850 status.modified.extend(status.clean) # mq may commit clean files
1851 1851
1852 1852 # check subrepos
1853 1853 subs, commitsubs, newstate = subrepo.precommit(
1854 1854 self.ui, wctx, status, match, force=force)
1855 1855
1856 1856 # make sure all explicit patterns are matched
1857 1857 if not force:
1858 1858 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1859 1859
1860 1860 cctx = context.workingcommitctx(self, status,
1861 1861 text, user, date, extra)
1862 1862
1863 1863 # internal config: ui.allowemptycommit
1864 1864 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1865 1865 or extra.get('close') or merge or cctx.files()
1866 1866 or self.ui.configbool('ui', 'allowemptycommit'))
1867 1867 if not allowemptycommit:
1868 1868 return None
1869 1869
1870 1870 if merge and cctx.deleted():
1871 1871 raise error.Abort(_("cannot commit merge with missing files"))
1872 1872
1873 1873 ms = mergemod.mergestate.read(self)
1874 1874 mergeutil.checkunresolved(ms)
1875 1875
1876 1876 if editor:
1877 1877 cctx._text = editor(self, cctx, subs)
1878 1878 edited = (text != cctx._text)
1879 1879
1880 1880 # Save commit message in case this transaction gets rolled back
1881 1881 # (e.g. by a pretxncommit hook). Leave the content alone on
1882 1882 # the assumption that the user will use the same editor again.
1883 1883 msgfn = self.savecommitmessage(cctx._text)
1884 1884
1885 1885 # commit subs and write new state
1886 1886 if subs:
1887 1887 for s in sorted(commitsubs):
1888 1888 sub = wctx.sub(s)
1889 1889 self.ui.status(_('committing subrepository %s\n') %
1890 1890 subrepo.subrelpath(sub))
1891 1891 sr = sub.commit(cctx._text, user, date)
1892 1892 newstate[s] = (newstate[s][0], sr)
1893 1893 subrepo.writestate(self, newstate)
1894 1894
1895 1895 p1, p2 = self.dirstate.parents()
1896 1896 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1897 1897 try:
1898 1898 self.hook("precommit", throw=True, parent1=hookp1,
1899 1899 parent2=hookp2)
1900 1900 tr = self.transaction('commit')
1901 1901 ret = self.commitctx(cctx, True)
1902 1902 except: # re-raises
1903 1903 if edited:
1904 1904 self.ui.write(
1905 1905 _('note: commit message saved in %s\n') % msgfn)
1906 1906 raise
1907 1907 # update bookmarks, dirstate and mergestate
1908 1908 bookmarks.update(self, [p1, p2], ret)
1909 1909 cctx.markcommitted(ret)
1910 1910 ms.reset()
1911 1911 tr.close()
1912 1912
1913 1913 finally:
1914 1914 lockmod.release(tr, lock, wlock)
1915 1915
1916 1916 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1917 1917 # hack for command that use a temporary commit (eg: histedit)
1918 1918 # temporary commit got stripped before hook release
1919 1919 if self.changelog.hasnode(ret):
1920 1920 self.hook("commit", node=node, parent1=parent1,
1921 1921 parent2=parent2)
1922 1922 self._afterlock(commithook)
1923 1923 return ret
1924 1924
1925 1925 @unfilteredmethod
1926 1926 def commitctx(self, ctx, error=False):
1927 1927 """Add a new revision to current repository.
1928 1928 Revision information is passed via the context argument.
1929 1929 """
1930 1930
1931 1931 tr = None
1932 1932 p1, p2 = ctx.p1(), ctx.p2()
1933 1933 user = ctx.user()
1934 1934
1935 1935 lock = self.lock()
1936 1936 try:
1937 1937 tr = self.transaction("commit")
1938 1938 trp = weakref.proxy(tr)
1939 1939
1940 1940 if ctx.manifestnode():
1941 1941 # reuse an existing manifest revision
1942 1942 mn = ctx.manifestnode()
1943 1943 files = ctx.files()
1944 1944 elif ctx.files():
1945 1945 m1ctx = p1.manifestctx()
1946 1946 m2ctx = p2.manifestctx()
1947 1947 mctx = m1ctx.copy()
1948 1948
1949 1949 m = mctx.read()
1950 1950 m1 = m1ctx.read()
1951 1951 m2 = m2ctx.read()
1952 1952
1953 1953 # check in files
1954 1954 added = []
1955 1955 changed = []
1956 1956 removed = list(ctx.removed())
1957 1957 linkrev = len(self)
1958 1958 self.ui.note(_("committing files:\n"))
1959 1959 for f in sorted(ctx.modified() + ctx.added()):
1960 1960 self.ui.note(f + "\n")
1961 1961 try:
1962 1962 fctx = ctx[f]
1963 1963 if fctx is None:
1964 1964 removed.append(f)
1965 1965 else:
1966 1966 added.append(f)
1967 1967 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1968 1968 trp, changed)
1969 1969 m.setflag(f, fctx.flags())
1970 1970 except OSError as inst:
1971 1971 self.ui.warn(_("trouble committing %s!\n") % f)
1972 1972 raise
1973 1973 except IOError as inst:
1974 1974 errcode = getattr(inst, 'errno', errno.ENOENT)
1975 1975 if error or errcode and errcode != errno.ENOENT:
1976 1976 self.ui.warn(_("trouble committing %s!\n") % f)
1977 1977 raise
1978 1978
1979 1979 # update manifest
1980 1980 self.ui.note(_("committing manifest\n"))
1981 1981 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1982 1982 drop = [f for f in removed if f in m]
1983 1983 for f in drop:
1984 1984 del m[f]
1985 1985 mn = mctx.write(trp, linkrev,
1986 1986 p1.manifestnode(), p2.manifestnode(),
1987 1987 added, drop)
1988 1988 files = changed + removed
1989 1989 else:
1990 1990 mn = p1.manifestnode()
1991 1991 files = []
1992 1992
1993 1993 # update changelog
1994 1994 self.ui.note(_("committing changelog\n"))
1995 1995 self.changelog.delayupdate(tr)
1996 1996 n = self.changelog.add(mn, files, ctx.description(),
1997 1997 trp, p1.node(), p2.node(),
1998 1998 user, ctx.date(), ctx.extra().copy())
1999 1999 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2000 2000 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2001 2001 parent2=xp2)
2002 2002 # set the new commit is proper phase
2003 2003 targetphase = subrepo.newcommitphase(self.ui, ctx)
2004 2004 if targetphase:
2005 2005 # retract boundary do not alter parent changeset.
2006 2006 # if a parent have higher the resulting phase will
2007 2007 # be compliant anyway
2008 2008 #
2009 2009 # if minimal phase was 0 we don't need to retract anything
2010 2010 phases.registernew(self, tr, targetphase, [n])
2011 2011 tr.close()
2012 2012 return n
2013 2013 finally:
2014 2014 if tr:
2015 2015 tr.release()
2016 2016 lock.release()
2017 2017
2018 2018 @unfilteredmethod
2019 2019 def destroying(self):
2020 2020 '''Inform the repository that nodes are about to be destroyed.
2021 2021 Intended for use by strip and rollback, so there's a common
2022 2022 place for anything that has to be done before destroying history.
2023 2023
2024 2024 This is mostly useful for saving state that is in memory and waiting
2025 2025 to be flushed when the current lock is released. Because a call to
2026 2026 destroyed is imminent, the repo will be invalidated causing those
2027 2027 changes to stay in memory (waiting for the next unlock), or vanish
2028 2028 completely.
2029 2029 '''
2030 2030 # When using the same lock to commit and strip, the phasecache is left
2031 2031 # dirty after committing. Then when we strip, the repo is invalidated,
2032 2032 # causing those changes to disappear.
2033 2033 if '_phasecache' in vars(self):
2034 2034 self._phasecache.write()
2035 2035
2036 2036 @unfilteredmethod
2037 2037 def destroyed(self):
2038 2038 '''Inform the repository that nodes have been destroyed.
2039 2039 Intended for use by strip and rollback, so there's a common
2040 2040 place for anything that has to be done after destroying history.
2041 2041 '''
2042 2042 # When one tries to:
2043 2043 # 1) destroy nodes thus calling this method (e.g. strip)
2044 2044 # 2) use phasecache somewhere (e.g. commit)
2045 2045 #
2046 2046 # then 2) will fail because the phasecache contains nodes that were
2047 2047 # removed. We can either remove phasecache from the filecache,
2048 2048 # causing it to reload next time it is accessed, or simply filter
2049 2049 # the removed nodes now and write the updated cache.
2050 2050 self._phasecache.filterunknown(self)
2051 2051 self._phasecache.write()
2052 2052
2053 2053 # refresh all repository caches
2054 2054 self.updatecaches()
2055 2055
2056 2056 # Ensure the persistent tag cache is updated. Doing it now
2057 2057 # means that the tag cache only has to worry about destroyed
2058 2058 # heads immediately after a strip/rollback. That in turn
2059 2059 # guarantees that "cachetip == currenttip" (comparing both rev
2060 2060 # and node) always means no nodes have been added or destroyed.
2061 2061
2062 2062 # XXX this is suboptimal when qrefresh'ing: we strip the current
2063 2063 # head, refresh the tag cache, then immediately add a new head.
2064 2064 # But I think doing it this way is necessary for the "instant
2065 2065 # tag cache retrieval" case to work.
2066 2066 self.invalidate()
2067 2067
2068 2068 def walk(self, match, node=None):
2069 2069 '''
2070 2070 walk recursively through the directory tree or a given
2071 2071 changeset, finding all files matched by the match
2072 2072 function
2073 2073 '''
2074 2074 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2075 2075 return self[node].walk(match)
2076 2076
2077 2077 def status(self, node1='.', node2=None, match=None,
2078 2078 ignored=False, clean=False, unknown=False,
2079 2079 listsubrepos=False):
2080 2080 '''a convenience method that calls node1.status(node2)'''
2081 2081 return self[node1].status(node2, match, ignored, clean, unknown,
2082 2082 listsubrepos)
2083 2083
2084 2084 def addpostdsstatus(self, ps):
2085 2085 """Add a callback to run within the wlock, at the point at which status
2086 2086 fixups happen.
2087 2087
2088 2088 On status completion, callback(wctx, status) will be called with the
2089 2089 wlock held, unless the dirstate has changed from underneath or the wlock
2090 2090 couldn't be grabbed.
2091 2091
2092 2092 Callbacks should not capture and use a cached copy of the dirstate --
2093 2093 it might change in the meanwhile. Instead, they should access the
2094 2094 dirstate via wctx.repo().dirstate.
2095 2095
2096 2096 This list is emptied out after each status run -- extensions should
2097 2097 make sure it adds to this list each time dirstate.status is called.
2098 2098 Extensions should also make sure they don't call this for statuses
2099 2099 that don't involve the dirstate.
2100 2100 """
2101 2101
2102 2102 # The list is located here for uniqueness reasons -- it is actually
2103 2103 # managed by the workingctx, but that isn't unique per-repo.
2104 2104 self._postdsstatus.append(ps)
2105 2105
2106 2106 def postdsstatus(self):
2107 2107 """Used by workingctx to get the list of post-dirstate-status hooks."""
2108 2108 return self._postdsstatus
2109 2109
2110 2110 def clearpostdsstatus(self):
2111 2111 """Used by workingctx to clear post-dirstate-status hooks."""
2112 2112 del self._postdsstatus[:]
2113 2113
2114 2114 def heads(self, start=None):
2115 2115 if start is None:
2116 2116 cl = self.changelog
2117 2117 headrevs = reversed(cl.headrevs())
2118 2118 return [cl.node(rev) for rev in headrevs]
2119 2119
2120 2120 heads = self.changelog.heads(start)
2121 2121 # sort the output in rev descending order
2122 2122 return sorted(heads, key=self.changelog.rev, reverse=True)
2123 2123
2124 2124 def branchheads(self, branch=None, start=None, closed=False):
2125 2125 '''return a (possibly filtered) list of heads for the given branch
2126 2126
2127 2127 Heads are returned in topological order, from newest to oldest.
2128 2128 If branch is None, use the dirstate branch.
2129 2129 If start is not None, return only heads reachable from start.
2130 2130 If closed is True, return heads that are marked as closed as well.
2131 2131 '''
2132 2132 if branch is None:
2133 2133 branch = self[None].branch()
2134 2134 branches = self.branchmap()
2135 2135 if branch not in branches:
2136 2136 return []
2137 2137 # the cache returns heads ordered lowest to highest
2138 2138 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2139 2139 if start is not None:
2140 2140 # filter out the heads that cannot be reached from startrev
2141 2141 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2142 2142 bheads = [h for h in bheads if h in fbheads]
2143 2143 return bheads
2144 2144
2145 2145 def branches(self, nodes):
2146 2146 if not nodes:
2147 2147 nodes = [self.changelog.tip()]
2148 2148 b = []
2149 2149 for n in nodes:
2150 2150 t = n
2151 2151 while True:
2152 2152 p = self.changelog.parents(n)
2153 2153 if p[1] != nullid or p[0] == nullid:
2154 2154 b.append((t, n, p[0], p[1]))
2155 2155 break
2156 2156 n = p[0]
2157 2157 return b
2158 2158
2159 2159 def between(self, pairs):
2160 2160 r = []
2161 2161
2162 2162 for top, bottom in pairs:
2163 2163 n, l, i = top, [], 0
2164 2164 f = 1
2165 2165
2166 2166 while n != bottom and n != nullid:
2167 2167 p = self.changelog.parents(n)[0]
2168 2168 if i == f:
2169 2169 l.append(n)
2170 2170 f = f * 2
2171 2171 n = p
2172 2172 i += 1
2173 2173
2174 2174 r.append(l)
2175 2175
2176 2176 return r
2177 2177
2178 2178 def checkpush(self, pushop):
2179 2179 """Extensions can override this function if additional checks have
2180 2180 to be performed before pushing, or call it if they override push
2181 2181 command.
2182 2182 """
2183 2183
2184 2184 @unfilteredpropertycache
2185 2185 def prepushoutgoinghooks(self):
2186 2186 """Return util.hooks consists of a pushop with repo, remote, outgoing
2187 2187 methods, which are called before pushing changesets.
2188 2188 """
2189 2189 return util.hooks()
2190 2190
2191 2191 def pushkey(self, namespace, key, old, new):
2192 2192 try:
2193 2193 tr = self.currenttransaction()
2194 2194 hookargs = {}
2195 2195 if tr is not None:
2196 2196 hookargs.update(tr.hookargs)
2197 2197 hookargs['namespace'] = namespace
2198 2198 hookargs['key'] = key
2199 2199 hookargs['old'] = old
2200 2200 hookargs['new'] = new
2201 2201 self.hook('prepushkey', throw=True, **hookargs)
2202 2202 except error.HookAbort as exc:
2203 2203 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2204 2204 if exc.hint:
2205 2205 self.ui.write_err(_("(%s)\n") % exc.hint)
2206 2206 return False
2207 2207 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2208 2208 ret = pushkey.push(self, namespace, key, old, new)
2209 2209 def runhook():
2210 2210 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2211 2211 ret=ret)
2212 2212 self._afterlock(runhook)
2213 2213 return ret
2214 2214
2215 2215 def listkeys(self, namespace):
2216 2216 self.hook('prelistkeys', throw=True, namespace=namespace)
2217 2217 self.ui.debug('listing keys for "%s"\n' % namespace)
2218 2218 values = pushkey.list(self, namespace)
2219 2219 self.hook('listkeys', namespace=namespace, values=values)
2220 2220 return values
2221 2221
2222 2222 def debugwireargs(self, one, two, three=None, four=None, five=None):
2223 2223 '''used to test argument passing over the wire'''
2224 2224 return "%s %s %s %s %s" % (one, two, three, four, five)
2225 2225
2226 2226 def savecommitmessage(self, text):
2227 2227 fp = self.vfs('last-message.txt', 'wb')
2228 2228 try:
2229 2229 fp.write(text)
2230 2230 finally:
2231 2231 fp.close()
2232 2232 return self.pathto(fp.name[len(self.root) + 1:])
2233 2233
2234 2234 # used to avoid circular references so destructors work
2235 2235 def aftertrans(files):
2236 2236 renamefiles = [tuple(t) for t in files]
2237 2237 def a():
2238 2238 for vfs, src, dest in renamefiles:
2239 2239 # if src and dest refer to a same file, vfs.rename is a no-op,
2240 2240 # leaving both src and dest on disk. delete dest to make sure
2241 2241 # the rename couldn't be such a no-op.
2242 2242 vfs.tryunlink(dest)
2243 2243 try:
2244 2244 vfs.rename(src, dest)
2245 2245 except OSError: # journal file does not yet exist
2246 2246 pass
2247 2247 return a
2248 2248
2249 2249 def undoname(fn):
2250 2250 base, name = os.path.split(fn)
2251 2251 assert name.startswith('journal')
2252 2252 return os.path.join(base, name.replace('journal', 'undo', 1))
2253 2253
2254 2254 def instance(ui, path, create):
2255 2255 return localrepository(ui, util.urllocalpath(path), create)
2256 2256
2257 2257 def islocal(path):
2258 2258 return True
2259 2259
2260 2260 def newreporequirements(repo):
2261 2261 """Determine the set of requirements for a new local repository.
2262 2262
2263 2263 Extensions can wrap this function to specify custom requirements for
2264 2264 new repositories.
2265 2265 """
2266 2266 ui = repo.ui
2267 2267 requirements = {'revlogv1'}
2268 2268 if ui.configbool('format', 'usestore'):
2269 2269 requirements.add('store')
2270 2270 if ui.configbool('format', 'usefncache'):
2271 2271 requirements.add('fncache')
2272 2272 if ui.configbool('format', 'dotencode'):
2273 2273 requirements.add('dotencode')
2274 2274
2275 2275 compengine = ui.config('experimental', 'format.compression')
2276 2276 if compengine not in util.compengines:
2277 2277 raise error.Abort(_('compression engine %s defined by '
2278 2278 'experimental.format.compression not available') %
2279 2279 compengine,
2280 2280 hint=_('run "hg debuginstall" to list available '
2281 2281 'compression engines'))
2282 2282
2283 2283 # zlib is the historical default and doesn't need an explicit requirement.
2284 2284 if compengine != 'zlib':
2285 2285 requirements.add('exp-compression-%s' % compengine)
2286 2286
2287 2287 if scmutil.gdinitconfig(ui):
2288 2288 requirements.add('generaldelta')
2289 2289 if ui.configbool('experimental', 'treemanifest'):
2290 2290 requirements.add('treemanifest')
2291 2291 if ui.configbool('experimental', 'manifestv2'):
2292 2292 requirements.add('manifestv2')
2293 2293
2294 2294 revlogv2 = ui.config('experimental', 'revlogv2')
2295 2295 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2296 2296 requirements.remove('revlogv1')
2297 2297 # generaldelta is implied by revlogv2.
2298 2298 requirements.discard('generaldelta')
2299 2299 requirements.add(REVLOGV2_REQUIREMENT)
2300 2300
2301 2301 return requirements
General Comments 0
You need to be logged in to leave comments. Login now