##// END OF EJS Templates
lock: use configint for 'ui.timeout' config...
Boris Feld -
r35208:d210723b default
parent child Browse files
Show More
@@ -1,516 +1,516
1 # journal.py
1 # journal.py
2 #
2 #
3 # Copyright 2014-2016 Facebook, Inc.
3 # Copyright 2014-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """track previous positions of bookmarks (EXPERIMENTAL)
7 """track previous positions of bookmarks (EXPERIMENTAL)
8
8
9 This extension adds a new command: `hg journal`, which shows you where
9 This extension adds a new command: `hg journal`, which shows you where
10 bookmarks were previously located.
10 bookmarks were previously located.
11
11
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import errno
17 import errno
18 import os
18 import os
19 import weakref
19 import weakref
20
20
21 from mercurial.i18n import _
21 from mercurial.i18n import _
22
22
23 from mercurial import (
23 from mercurial import (
24 bookmarks,
24 bookmarks,
25 cmdutil,
25 cmdutil,
26 dispatch,
26 dispatch,
27 error,
27 error,
28 extensions,
28 extensions,
29 hg,
29 hg,
30 localrepo,
30 localrepo,
31 lock,
31 lock,
32 node,
32 node,
33 pycompat,
33 pycompat,
34 registrar,
34 registrar,
35 util,
35 util,
36 )
36 )
37
37
38 from . import share
38 from . import share
39
39
40 cmdtable = {}
40 cmdtable = {}
41 command = registrar.command(cmdtable)
41 command = registrar.command(cmdtable)
42
42
43 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
43 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
44 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
44 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
45 # be specifying the version(s) of Mercurial they are tested with, or
45 # be specifying the version(s) of Mercurial they are tested with, or
46 # leave the attribute unspecified.
46 # leave the attribute unspecified.
47 testedwith = 'ships-with-hg-core'
47 testedwith = 'ships-with-hg-core'
48
48
49 # storage format version; increment when the format changes
49 # storage format version; increment when the format changes
50 storageversion = 0
50 storageversion = 0
51
51
52 # namespaces
52 # namespaces
53 bookmarktype = 'bookmark'
53 bookmarktype = 'bookmark'
54 wdirparenttype = 'wdirparent'
54 wdirparenttype = 'wdirparent'
55 # In a shared repository, what shared feature name is used
55 # In a shared repository, what shared feature name is used
56 # to indicate this namespace is shared with the source?
56 # to indicate this namespace is shared with the source?
57 sharednamespaces = {
57 sharednamespaces = {
58 bookmarktype: hg.sharedbookmarks,
58 bookmarktype: hg.sharedbookmarks,
59 }
59 }
60
60
61 # Journal recording, register hooks and storage object
61 # Journal recording, register hooks and storage object
62 def extsetup(ui):
62 def extsetup(ui):
63 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
63 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
64 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
64 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
65 extensions.wrapfilecache(
65 extensions.wrapfilecache(
66 localrepo.localrepository, 'dirstate', wrapdirstate)
66 localrepo.localrepository, 'dirstate', wrapdirstate)
67 extensions.wrapfunction(hg, 'postshare', wrappostshare)
67 extensions.wrapfunction(hg, 'postshare', wrappostshare)
68 extensions.wrapfunction(hg, 'copystore', unsharejournal)
68 extensions.wrapfunction(hg, 'copystore', unsharejournal)
69
69
70 def reposetup(ui, repo):
70 def reposetup(ui, repo):
71 if repo.local():
71 if repo.local():
72 repo.journal = journalstorage(repo)
72 repo.journal = journalstorage(repo)
73 repo._wlockfreeprefix.add('namejournal')
73 repo._wlockfreeprefix.add('namejournal')
74
74
75 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
75 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
76 if cached:
76 if cached:
77 # already instantiated dirstate isn't yet marked as
77 # already instantiated dirstate isn't yet marked as
78 # "journal"-ing, even though repo.dirstate() was already
78 # "journal"-ing, even though repo.dirstate() was already
79 # wrapped by own wrapdirstate()
79 # wrapped by own wrapdirstate()
80 _setupdirstate(repo, dirstate)
80 _setupdirstate(repo, dirstate)
81
81
82 def runcommand(orig, lui, repo, cmd, fullargs, *args):
82 def runcommand(orig, lui, repo, cmd, fullargs, *args):
83 """Track the command line options for recording in the journal"""
83 """Track the command line options for recording in the journal"""
84 journalstorage.recordcommand(*fullargs)
84 journalstorage.recordcommand(*fullargs)
85 return orig(lui, repo, cmd, fullargs, *args)
85 return orig(lui, repo, cmd, fullargs, *args)
86
86
87 def _setupdirstate(repo, dirstate):
87 def _setupdirstate(repo, dirstate):
88 dirstate.journalstorage = repo.journal
88 dirstate.journalstorage = repo.journal
89 dirstate.addparentchangecallback('journal', recorddirstateparents)
89 dirstate.addparentchangecallback('journal', recorddirstateparents)
90
90
91 # hooks to record dirstate changes
91 # hooks to record dirstate changes
92 def wrapdirstate(orig, repo):
92 def wrapdirstate(orig, repo):
93 """Make journal storage available to the dirstate object"""
93 """Make journal storage available to the dirstate object"""
94 dirstate = orig(repo)
94 dirstate = orig(repo)
95 if util.safehasattr(repo, 'journal'):
95 if util.safehasattr(repo, 'journal'):
96 _setupdirstate(repo, dirstate)
96 _setupdirstate(repo, dirstate)
97 return dirstate
97 return dirstate
98
98
99 def recorddirstateparents(dirstate, old, new):
99 def recorddirstateparents(dirstate, old, new):
100 """Records all dirstate parent changes in the journal."""
100 """Records all dirstate parent changes in the journal."""
101 old = list(old)
101 old = list(old)
102 new = list(new)
102 new = list(new)
103 if util.safehasattr(dirstate, 'journalstorage'):
103 if util.safehasattr(dirstate, 'journalstorage'):
104 # only record two hashes if there was a merge
104 # only record two hashes if there was a merge
105 oldhashes = old[:1] if old[1] == node.nullid else old
105 oldhashes = old[:1] if old[1] == node.nullid else old
106 newhashes = new[:1] if new[1] == node.nullid else new
106 newhashes = new[:1] if new[1] == node.nullid else new
107 dirstate.journalstorage.record(
107 dirstate.journalstorage.record(
108 wdirparenttype, '.', oldhashes, newhashes)
108 wdirparenttype, '.', oldhashes, newhashes)
109
109
110 # hooks to record bookmark changes (both local and remote)
110 # hooks to record bookmark changes (both local and remote)
111 def recordbookmarks(orig, store, fp):
111 def recordbookmarks(orig, store, fp):
112 """Records all bookmark changes in the journal."""
112 """Records all bookmark changes in the journal."""
113 repo = store._repo
113 repo = store._repo
114 if util.safehasattr(repo, 'journal'):
114 if util.safehasattr(repo, 'journal'):
115 oldmarks = bookmarks.bmstore(repo)
115 oldmarks = bookmarks.bmstore(repo)
116 for mark, value in store.iteritems():
116 for mark, value in store.iteritems():
117 oldvalue = oldmarks.get(mark, node.nullid)
117 oldvalue = oldmarks.get(mark, node.nullid)
118 if value != oldvalue:
118 if value != oldvalue:
119 repo.journal.record(bookmarktype, mark, oldvalue, value)
119 repo.journal.record(bookmarktype, mark, oldvalue, value)
120 return orig(store, fp)
120 return orig(store, fp)
121
121
122 # shared repository support
122 # shared repository support
123 def _readsharedfeatures(repo):
123 def _readsharedfeatures(repo):
124 """A set of shared features for this repository"""
124 """A set of shared features for this repository"""
125 try:
125 try:
126 return set(repo.vfs.read('shared').splitlines())
126 return set(repo.vfs.read('shared').splitlines())
127 except IOError as inst:
127 except IOError as inst:
128 if inst.errno != errno.ENOENT:
128 if inst.errno != errno.ENOENT:
129 raise
129 raise
130 return set()
130 return set()
131
131
132 def _mergeentriesiter(*iterables, **kwargs):
132 def _mergeentriesiter(*iterables, **kwargs):
133 """Given a set of sorted iterables, yield the next entry in merged order
133 """Given a set of sorted iterables, yield the next entry in merged order
134
134
135 Note that by default entries go from most recent to oldest.
135 Note that by default entries go from most recent to oldest.
136 """
136 """
137 order = kwargs.pop(r'order', max)
137 order = kwargs.pop(r'order', max)
138 iterables = [iter(it) for it in iterables]
138 iterables = [iter(it) for it in iterables]
139 # this tracks still active iterables; iterables are deleted as they are
139 # this tracks still active iterables; iterables are deleted as they are
140 # exhausted, which is why this is a dictionary and why each entry also
140 # exhausted, which is why this is a dictionary and why each entry also
141 # stores the key. Entries are mutable so we can store the next value each
141 # stores the key. Entries are mutable so we can store the next value each
142 # time.
142 # time.
143 iterable_map = {}
143 iterable_map = {}
144 for key, it in enumerate(iterables):
144 for key, it in enumerate(iterables):
145 try:
145 try:
146 iterable_map[key] = [next(it), key, it]
146 iterable_map[key] = [next(it), key, it]
147 except StopIteration:
147 except StopIteration:
148 # empty entry, can be ignored
148 # empty entry, can be ignored
149 pass
149 pass
150
150
151 while iterable_map:
151 while iterable_map:
152 value, key, it = order(iterable_map.itervalues())
152 value, key, it = order(iterable_map.itervalues())
153 yield value
153 yield value
154 try:
154 try:
155 iterable_map[key][0] = next(it)
155 iterable_map[key][0] = next(it)
156 except StopIteration:
156 except StopIteration:
157 # this iterable is empty, remove it from consideration
157 # this iterable is empty, remove it from consideration
158 del iterable_map[key]
158 del iterable_map[key]
159
159
160 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
160 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
161 """Mark this shared working copy as sharing journal information"""
161 """Mark this shared working copy as sharing journal information"""
162 with destrepo.wlock():
162 with destrepo.wlock():
163 orig(sourcerepo, destrepo, **kwargs)
163 orig(sourcerepo, destrepo, **kwargs)
164 with destrepo.vfs('shared', 'a') as fp:
164 with destrepo.vfs('shared', 'a') as fp:
165 fp.write('journal\n')
165 fp.write('journal\n')
166
166
167 def unsharejournal(orig, ui, repo, repopath):
167 def unsharejournal(orig, ui, repo, repopath):
168 """Copy shared journal entries into this repo when unsharing"""
168 """Copy shared journal entries into this repo when unsharing"""
169 if (repo.path == repopath and repo.shared() and
169 if (repo.path == repopath and repo.shared() and
170 util.safehasattr(repo, 'journal')):
170 util.safehasattr(repo, 'journal')):
171 sharedrepo = share._getsrcrepo(repo)
171 sharedrepo = share._getsrcrepo(repo)
172 sharedfeatures = _readsharedfeatures(repo)
172 sharedfeatures = _readsharedfeatures(repo)
173 if sharedrepo and sharedfeatures > {'journal'}:
173 if sharedrepo and sharedfeatures > {'journal'}:
174 # there is a shared repository and there are shared journal entries
174 # there is a shared repository and there are shared journal entries
175 # to copy. move shared date over from source to destination but
175 # to copy. move shared date over from source to destination but
176 # move the local file first
176 # move the local file first
177 if repo.vfs.exists('namejournal'):
177 if repo.vfs.exists('namejournal'):
178 journalpath = repo.vfs.join('namejournal')
178 journalpath = repo.vfs.join('namejournal')
179 util.rename(journalpath, journalpath + '.bak')
179 util.rename(journalpath, journalpath + '.bak')
180 storage = repo.journal
180 storage = repo.journal
181 local = storage._open(
181 local = storage._open(
182 repo.vfs, filename='namejournal.bak', _newestfirst=False)
182 repo.vfs, filename='namejournal.bak', _newestfirst=False)
183 shared = (
183 shared = (
184 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
184 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
185 if sharednamespaces.get(e.namespace) in sharedfeatures)
185 if sharednamespaces.get(e.namespace) in sharedfeatures)
186 for entry in _mergeentriesiter(local, shared, order=min):
186 for entry in _mergeentriesiter(local, shared, order=min):
187 storage._write(repo.vfs, entry)
187 storage._write(repo.vfs, entry)
188
188
189 return orig(ui, repo, repopath)
189 return orig(ui, repo, repopath)
190
190
191 class journalentry(collections.namedtuple(
191 class journalentry(collections.namedtuple(
192 u'journalentry',
192 u'journalentry',
193 u'timestamp user command namespace name oldhashes newhashes')):
193 u'timestamp user command namespace name oldhashes newhashes')):
194 """Individual journal entry
194 """Individual journal entry
195
195
196 * timestamp: a mercurial (time, timezone) tuple
196 * timestamp: a mercurial (time, timezone) tuple
197 * user: the username that ran the command
197 * user: the username that ran the command
198 * namespace: the entry namespace, an opaque string
198 * namespace: the entry namespace, an opaque string
199 * name: the name of the changed item, opaque string with meaning in the
199 * name: the name of the changed item, opaque string with meaning in the
200 namespace
200 namespace
201 * command: the hg command that triggered this record
201 * command: the hg command that triggered this record
202 * oldhashes: a tuple of one or more binary hashes for the old location
202 * oldhashes: a tuple of one or more binary hashes for the old location
203 * newhashes: a tuple of one or more binary hashes for the new location
203 * newhashes: a tuple of one or more binary hashes for the new location
204
204
205 Handles serialisation from and to the storage format. Fields are
205 Handles serialisation from and to the storage format. Fields are
206 separated by newlines, hashes are written out in hex separated by commas,
206 separated by newlines, hashes are written out in hex separated by commas,
207 timestamp and timezone are separated by a space.
207 timestamp and timezone are separated by a space.
208
208
209 """
209 """
210 @classmethod
210 @classmethod
211 def fromstorage(cls, line):
211 def fromstorage(cls, line):
212 (time, user, command, namespace, name,
212 (time, user, command, namespace, name,
213 oldhashes, newhashes) = line.split('\n')
213 oldhashes, newhashes) = line.split('\n')
214 timestamp, tz = time.split()
214 timestamp, tz = time.split()
215 timestamp, tz = float(timestamp), int(tz)
215 timestamp, tz = float(timestamp), int(tz)
216 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
216 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
217 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
217 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
218 return cls(
218 return cls(
219 (timestamp, tz), user, command, namespace, name,
219 (timestamp, tz), user, command, namespace, name,
220 oldhashes, newhashes)
220 oldhashes, newhashes)
221
221
222 def __str__(self):
222 def __str__(self):
223 """String representation for storage"""
223 """String representation for storage"""
224 time = ' '.join(map(str, self.timestamp))
224 time = ' '.join(map(str, self.timestamp))
225 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
225 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
226 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
226 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
227 return '\n'.join((
227 return '\n'.join((
228 time, self.user, self.command, self.namespace, self.name,
228 time, self.user, self.command, self.namespace, self.name,
229 oldhashes, newhashes))
229 oldhashes, newhashes))
230
230
231 class journalstorage(object):
231 class journalstorage(object):
232 """Storage for journal entries
232 """Storage for journal entries
233
233
234 Entries are divided over two files; one with entries that pertain to the
234 Entries are divided over two files; one with entries that pertain to the
235 local working copy *only*, and one with entries that are shared across
235 local working copy *only*, and one with entries that are shared across
236 multiple working copies when shared using the share extension.
236 multiple working copies when shared using the share extension.
237
237
238 Entries are stored with NUL bytes as separators. See the journalentry
238 Entries are stored with NUL bytes as separators. See the journalentry
239 class for the per-entry structure.
239 class for the per-entry structure.
240
240
241 The file format starts with an integer version, delimited by a NUL.
241 The file format starts with an integer version, delimited by a NUL.
242
242
243 This storage uses a dedicated lock; this makes it easier to avoid issues
243 This storage uses a dedicated lock; this makes it easier to avoid issues
244 with adding entries that added when the regular wlock is unlocked (e.g.
244 with adding entries that added when the regular wlock is unlocked (e.g.
245 the dirstate).
245 the dirstate).
246
246
247 """
247 """
248 _currentcommand = ()
248 _currentcommand = ()
249 _lockref = None
249 _lockref = None
250
250
251 def __init__(self, repo):
251 def __init__(self, repo):
252 self.user = util.getuser()
252 self.user = util.getuser()
253 self.ui = repo.ui
253 self.ui = repo.ui
254 self.vfs = repo.vfs
254 self.vfs = repo.vfs
255
255
256 # is this working copy using a shared storage?
256 # is this working copy using a shared storage?
257 self.sharedfeatures = self.sharedvfs = None
257 self.sharedfeatures = self.sharedvfs = None
258 if repo.shared():
258 if repo.shared():
259 features = _readsharedfeatures(repo)
259 features = _readsharedfeatures(repo)
260 sharedrepo = share._getsrcrepo(repo)
260 sharedrepo = share._getsrcrepo(repo)
261 if sharedrepo is not None and 'journal' in features:
261 if sharedrepo is not None and 'journal' in features:
262 self.sharedvfs = sharedrepo.vfs
262 self.sharedvfs = sharedrepo.vfs
263 self.sharedfeatures = features
263 self.sharedfeatures = features
264
264
265 # track the current command for recording in journal entries
265 # track the current command for recording in journal entries
266 @property
266 @property
267 def command(self):
267 def command(self):
268 commandstr = ' '.join(
268 commandstr = ' '.join(
269 map(util.shellquote, journalstorage._currentcommand))
269 map(util.shellquote, journalstorage._currentcommand))
270 if '\n' in commandstr:
270 if '\n' in commandstr:
271 # truncate multi-line commands
271 # truncate multi-line commands
272 commandstr = commandstr.partition('\n')[0] + ' ...'
272 commandstr = commandstr.partition('\n')[0] + ' ...'
273 return commandstr
273 return commandstr
274
274
275 @classmethod
275 @classmethod
276 def recordcommand(cls, *fullargs):
276 def recordcommand(cls, *fullargs):
277 """Set the current hg arguments, stored with recorded entries"""
277 """Set the current hg arguments, stored with recorded entries"""
278 # Set the current command on the class because we may have started
278 # Set the current command on the class because we may have started
279 # with a non-local repo (cloning for example).
279 # with a non-local repo (cloning for example).
280 cls._currentcommand = fullargs
280 cls._currentcommand = fullargs
281
281
282 def _currentlock(self, lockref):
282 def _currentlock(self, lockref):
283 """Returns the lock if it's held, or None if it's not.
283 """Returns the lock if it's held, or None if it's not.
284
284
285 (This is copied from the localrepo class)
285 (This is copied from the localrepo class)
286 """
286 """
287 if lockref is None:
287 if lockref is None:
288 return None
288 return None
289 l = lockref()
289 l = lockref()
290 if l is None or not l.held:
290 if l is None or not l.held:
291 return None
291 return None
292 return l
292 return l
293
293
294 def jlock(self, vfs):
294 def jlock(self, vfs):
295 """Create a lock for the journal file"""
295 """Create a lock for the journal file"""
296 if self._currentlock(self._lockref) is not None:
296 if self._currentlock(self._lockref) is not None:
297 raise error.Abort(_('journal lock does not support nesting'))
297 raise error.Abort(_('journal lock does not support nesting'))
298 desc = _('journal of %s') % vfs.base
298 desc = _('journal of %s') % vfs.base
299 try:
299 try:
300 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
300 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
301 except error.LockHeld as inst:
301 except error.LockHeld as inst:
302 self.ui.warn(
302 self.ui.warn(
303 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
303 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
304 # default to 600 seconds timeout
304 # default to 600 seconds timeout
305 l = lock.lock(
305 l = lock.lock(
306 vfs, 'namejournal.lock',
306 vfs, 'namejournal.lock',
307 int(self.ui.config("ui", "timeout")), desc=desc)
307 self.ui.configint("ui", "timeout"), desc=desc)
308 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
308 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
309 self._lockref = weakref.ref(l)
309 self._lockref = weakref.ref(l)
310 return l
310 return l
311
311
312 def record(self, namespace, name, oldhashes, newhashes):
312 def record(self, namespace, name, oldhashes, newhashes):
313 """Record a new journal entry
313 """Record a new journal entry
314
314
315 * namespace: an opaque string; this can be used to filter on the type
315 * namespace: an opaque string; this can be used to filter on the type
316 of recorded entries.
316 of recorded entries.
317 * name: the name defining this entry; for bookmarks, this is the
317 * name: the name defining this entry; for bookmarks, this is the
318 bookmark name. Can be filtered on when retrieving entries.
318 bookmark name. Can be filtered on when retrieving entries.
319 * oldhashes and newhashes: each a single binary hash, or a list of
319 * oldhashes and newhashes: each a single binary hash, or a list of
320 binary hashes. These represent the old and new position of the named
320 binary hashes. These represent the old and new position of the named
321 item.
321 item.
322
322
323 """
323 """
324 if not isinstance(oldhashes, list):
324 if not isinstance(oldhashes, list):
325 oldhashes = [oldhashes]
325 oldhashes = [oldhashes]
326 if not isinstance(newhashes, list):
326 if not isinstance(newhashes, list):
327 newhashes = [newhashes]
327 newhashes = [newhashes]
328
328
329 entry = journalentry(
329 entry = journalentry(
330 util.makedate(), self.user, self.command, namespace, name,
330 util.makedate(), self.user, self.command, namespace, name,
331 oldhashes, newhashes)
331 oldhashes, newhashes)
332
332
333 vfs = self.vfs
333 vfs = self.vfs
334 if self.sharedvfs is not None:
334 if self.sharedvfs is not None:
335 # write to the shared repository if this feature is being
335 # write to the shared repository if this feature is being
336 # shared between working copies.
336 # shared between working copies.
337 if sharednamespaces.get(namespace) in self.sharedfeatures:
337 if sharednamespaces.get(namespace) in self.sharedfeatures:
338 vfs = self.sharedvfs
338 vfs = self.sharedvfs
339
339
340 self._write(vfs, entry)
340 self._write(vfs, entry)
341
341
342 def _write(self, vfs, entry):
342 def _write(self, vfs, entry):
343 with self.jlock(vfs):
343 with self.jlock(vfs):
344 version = None
344 version = None
345 # open file in amend mode to ensure it is created if missing
345 # open file in amend mode to ensure it is created if missing
346 with vfs('namejournal', mode='a+b') as f:
346 with vfs('namejournal', mode='a+b') as f:
347 f.seek(0, os.SEEK_SET)
347 f.seek(0, os.SEEK_SET)
348 # Read just enough bytes to get a version number (up to 2
348 # Read just enough bytes to get a version number (up to 2
349 # digits plus separator)
349 # digits plus separator)
350 version = f.read(3).partition('\0')[0]
350 version = f.read(3).partition('\0')[0]
351 if version and version != str(storageversion):
351 if version and version != str(storageversion):
352 # different version of the storage. Exit early (and not
352 # different version of the storage. Exit early (and not
353 # write anything) if this is not a version we can handle or
353 # write anything) if this is not a version we can handle or
354 # the file is corrupt. In future, perhaps rotate the file
354 # the file is corrupt. In future, perhaps rotate the file
355 # instead?
355 # instead?
356 self.ui.warn(
356 self.ui.warn(
357 _("unsupported journal file version '%s'\n") % version)
357 _("unsupported journal file version '%s'\n") % version)
358 return
358 return
359 if not version:
359 if not version:
360 # empty file, write version first
360 # empty file, write version first
361 f.write(str(storageversion) + '\0')
361 f.write(str(storageversion) + '\0')
362 f.seek(0, os.SEEK_END)
362 f.seek(0, os.SEEK_END)
363 f.write(str(entry) + '\0')
363 f.write(str(entry) + '\0')
364
364
365 def filtered(self, namespace=None, name=None):
365 def filtered(self, namespace=None, name=None):
366 """Yield all journal entries with the given namespace or name
366 """Yield all journal entries with the given namespace or name
367
367
368 Both the namespace and the name are optional; if neither is given all
368 Both the namespace and the name are optional; if neither is given all
369 entries in the journal are produced.
369 entries in the journal are produced.
370
370
371 Matching supports regular expressions by using the `re:` prefix
371 Matching supports regular expressions by using the `re:` prefix
372 (use `literal:` to match names or namespaces that start with `re:`)
372 (use `literal:` to match names or namespaces that start with `re:`)
373
373
374 """
374 """
375 if namespace is not None:
375 if namespace is not None:
376 namespace = util.stringmatcher(namespace)[-1]
376 namespace = util.stringmatcher(namespace)[-1]
377 if name is not None:
377 if name is not None:
378 name = util.stringmatcher(name)[-1]
378 name = util.stringmatcher(name)[-1]
379 for entry in self:
379 for entry in self:
380 if namespace is not None and not namespace(entry.namespace):
380 if namespace is not None and not namespace(entry.namespace):
381 continue
381 continue
382 if name is not None and not name(entry.name):
382 if name is not None and not name(entry.name):
383 continue
383 continue
384 yield entry
384 yield entry
385
385
386 def __iter__(self):
386 def __iter__(self):
387 """Iterate over the storage
387 """Iterate over the storage
388
388
389 Yields journalentry instances for each contained journal record.
389 Yields journalentry instances for each contained journal record.
390
390
391 """
391 """
392 local = self._open(self.vfs)
392 local = self._open(self.vfs)
393
393
394 if self.sharedvfs is None:
394 if self.sharedvfs is None:
395 return local
395 return local
396
396
397 # iterate over both local and shared entries, but only those
397 # iterate over both local and shared entries, but only those
398 # shared entries that are among the currently shared features
398 # shared entries that are among the currently shared features
399 shared = (
399 shared = (
400 e for e in self._open(self.sharedvfs)
400 e for e in self._open(self.sharedvfs)
401 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
401 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
402 return _mergeentriesiter(local, shared)
402 return _mergeentriesiter(local, shared)
403
403
404 def _open(self, vfs, filename='namejournal', _newestfirst=True):
404 def _open(self, vfs, filename='namejournal', _newestfirst=True):
405 if not vfs.exists(filename):
405 if not vfs.exists(filename):
406 return
406 return
407
407
408 with vfs(filename) as f:
408 with vfs(filename) as f:
409 raw = f.read()
409 raw = f.read()
410
410
411 lines = raw.split('\0')
411 lines = raw.split('\0')
412 version = lines and lines[0]
412 version = lines and lines[0]
413 if version != str(storageversion):
413 if version != str(storageversion):
414 version = version or _('not available')
414 version = version or _('not available')
415 raise error.Abort(_("unknown journal file version '%s'") % version)
415 raise error.Abort(_("unknown journal file version '%s'") % version)
416
416
417 # Skip the first line, it's a version number. Normally we iterate over
417 # Skip the first line, it's a version number. Normally we iterate over
418 # these in reverse order to list newest first; only when copying across
418 # these in reverse order to list newest first; only when copying across
419 # a shared storage do we forgo reversing.
419 # a shared storage do we forgo reversing.
420 lines = lines[1:]
420 lines = lines[1:]
421 if _newestfirst:
421 if _newestfirst:
422 lines = reversed(lines)
422 lines = reversed(lines)
423 for line in lines:
423 for line in lines:
424 if not line:
424 if not line:
425 continue
425 continue
426 yield journalentry.fromstorage(line)
426 yield journalentry.fromstorage(line)
427
427
428 # journal reading
428 # journal reading
429 # log options that don't make sense for journal
429 # log options that don't make sense for journal
430 _ignoreopts = ('no-merges', 'graph')
430 _ignoreopts = ('no-merges', 'graph')
431 @command(
431 @command(
432 'journal', [
432 'journal', [
433 ('', 'all', None, 'show history for all names'),
433 ('', 'all', None, 'show history for all names'),
434 ('c', 'commits', None, 'show commit metadata'),
434 ('c', 'commits', None, 'show commit metadata'),
435 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
435 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
436 '[OPTION]... [BOOKMARKNAME]')
436 '[OPTION]... [BOOKMARKNAME]')
437 def journal(ui, repo, *args, **opts):
437 def journal(ui, repo, *args, **opts):
438 """show the previous position of bookmarks and the working copy
438 """show the previous position of bookmarks and the working copy
439
439
440 The journal is used to see the previous commits that bookmarks and the
440 The journal is used to see the previous commits that bookmarks and the
441 working copy pointed to. By default the previous locations for the working
441 working copy pointed to. By default the previous locations for the working
442 copy. Passing a bookmark name will show all the previous positions of
442 copy. Passing a bookmark name will show all the previous positions of
443 that bookmark. Use the --all switch to show previous locations for all
443 that bookmark. Use the --all switch to show previous locations for all
444 bookmarks and the working copy; each line will then include the bookmark
444 bookmarks and the working copy; each line will then include the bookmark
445 name, or '.' for the working copy, as well.
445 name, or '.' for the working copy, as well.
446
446
447 If `name` starts with `re:`, the remainder of the name is treated as
447 If `name` starts with `re:`, the remainder of the name is treated as
448 a regular expression. To match a name that actually starts with `re:`,
448 a regular expression. To match a name that actually starts with `re:`,
449 use the prefix `literal:`.
449 use the prefix `literal:`.
450
450
451 By default hg journal only shows the commit hash and the command that was
451 By default hg journal only shows the commit hash and the command that was
452 running at that time. -v/--verbose will show the prior hash, the user, and
452 running at that time. -v/--verbose will show the prior hash, the user, and
453 the time at which it happened.
453 the time at which it happened.
454
454
455 Use -c/--commits to output log information on each commit hash; at this
455 Use -c/--commits to output log information on each commit hash; at this
456 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
456 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
457 switches to alter the log output for these.
457 switches to alter the log output for these.
458
458
459 `hg journal -T json` can be used to produce machine readable output.
459 `hg journal -T json` can be used to produce machine readable output.
460
460
461 """
461 """
462 opts = pycompat.byteskwargs(opts)
462 opts = pycompat.byteskwargs(opts)
463 name = '.'
463 name = '.'
464 if opts.get('all'):
464 if opts.get('all'):
465 if args:
465 if args:
466 raise error.Abort(
466 raise error.Abort(
467 _("You can't combine --all and filtering on a name"))
467 _("You can't combine --all and filtering on a name"))
468 name = None
468 name = None
469 if args:
469 if args:
470 name = args[0]
470 name = args[0]
471
471
472 fm = ui.formatter('journal', opts)
472 fm = ui.formatter('journal', opts)
473
473
474 if opts.get("template") != "json":
474 if opts.get("template") != "json":
475 if name is None:
475 if name is None:
476 displayname = _('the working copy and bookmarks')
476 displayname = _('the working copy and bookmarks')
477 else:
477 else:
478 displayname = "'%s'" % name
478 displayname = "'%s'" % name
479 ui.status(_("previous locations of %s:\n") % displayname)
479 ui.status(_("previous locations of %s:\n") % displayname)
480
480
481 limit = cmdutil.loglimit(opts)
481 limit = cmdutil.loglimit(opts)
482 entry = None
482 entry = None
483 for count, entry in enumerate(repo.journal.filtered(name=name)):
483 for count, entry in enumerate(repo.journal.filtered(name=name)):
484 if count == limit:
484 if count == limit:
485 break
485 break
486 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
486 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
487 name='node', sep=',')
487 name='node', sep=',')
488 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
488 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
489 name='node', sep=',')
489 name='node', sep=',')
490
490
491 fm.startitem()
491 fm.startitem()
492 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
492 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
493 fm.write('newhashes', '%s', newhashesstr)
493 fm.write('newhashes', '%s', newhashesstr)
494 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
494 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
495 fm.condwrite(
495 fm.condwrite(
496 opts.get('all') or name.startswith('re:'),
496 opts.get('all') or name.startswith('re:'),
497 'name', ' %-8s', entry.name)
497 'name', ' %-8s', entry.name)
498
498
499 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
499 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
500 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
500 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
501 fm.write('command', ' %s\n', entry.command)
501 fm.write('command', ' %s\n', entry.command)
502
502
503 if opts.get("commits"):
503 if opts.get("commits"):
504 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
504 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
505 for hash in entry.newhashes:
505 for hash in entry.newhashes:
506 try:
506 try:
507 ctx = repo[hash]
507 ctx = repo[hash]
508 displayer.show(ctx)
508 displayer.show(ctx)
509 except error.RepoLookupError as e:
509 except error.RepoLookupError as e:
510 fm.write('repolookuperror', "%s\n\n", str(e))
510 fm.write('repolookuperror', "%s\n\n", str(e))
511 displayer.close()
511 displayer.close()
512
512
513 fm.end()
513 fm.end()
514
514
515 if entry is None:
515 if entry is None:
516 ui.status(_("no recorded locations\n"))
516 ui.status(_("no recorded locations\n"))
@@ -1,2301 +1,2301
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepo,
60 subrepo,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67
67
68 release = lockmod.release
68 release = lockmod.release
69 urlerr = util.urlerr
69 urlerr = util.urlerr
70 urlreq = util.urlreq
70 urlreq = util.urlreq
71
71
72 # set of (path, vfs-location) tuples. vfs-location is:
72 # set of (path, vfs-location) tuples. vfs-location is:
73 # - 'plain for vfs relative paths
73 # - 'plain for vfs relative paths
74 # - '' for svfs relative paths
74 # - '' for svfs relative paths
75 _cachedfiles = set()
75 _cachedfiles = set()
76
76
77 class _basefilecache(scmutil.filecache):
77 class _basefilecache(scmutil.filecache):
78 """All filecache usage on repo are done for logic that should be unfiltered
78 """All filecache usage on repo are done for logic that should be unfiltered
79 """
79 """
80 def __get__(self, repo, type=None):
80 def __get__(self, repo, type=None):
81 if repo is None:
81 if repo is None:
82 return self
82 return self
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 def __set__(self, repo, value):
84 def __set__(self, repo, value):
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 def __delete__(self, repo):
86 def __delete__(self, repo):
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88
88
89 class repofilecache(_basefilecache):
89 class repofilecache(_basefilecache):
90 """filecache for files in .hg but outside of .hg/store"""
90 """filecache for files in .hg but outside of .hg/store"""
91 def __init__(self, *paths):
91 def __init__(self, *paths):
92 super(repofilecache, self).__init__(*paths)
92 super(repofilecache, self).__init__(*paths)
93 for path in paths:
93 for path in paths:
94 _cachedfiles.add((path, 'plain'))
94 _cachedfiles.add((path, 'plain'))
95
95
96 def join(self, obj, fname):
96 def join(self, obj, fname):
97 return obj.vfs.join(fname)
97 return obj.vfs.join(fname)
98
98
99 class storecache(_basefilecache):
99 class storecache(_basefilecache):
100 """filecache for files in the store"""
100 """filecache for files in the store"""
101 def __init__(self, *paths):
101 def __init__(self, *paths):
102 super(storecache, self).__init__(*paths)
102 super(storecache, self).__init__(*paths)
103 for path in paths:
103 for path in paths:
104 _cachedfiles.add((path, ''))
104 _cachedfiles.add((path, ''))
105
105
106 def join(self, obj, fname):
106 def join(self, obj, fname):
107 return obj.sjoin(fname)
107 return obj.sjoin(fname)
108
108
109 def isfilecached(repo, name):
109 def isfilecached(repo, name):
110 """check if a repo has already cached "name" filecache-ed property
110 """check if a repo has already cached "name" filecache-ed property
111
111
112 This returns (cachedobj-or-None, iscached) tuple.
112 This returns (cachedobj-or-None, iscached) tuple.
113 """
113 """
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 if not cacheentry:
115 if not cacheentry:
116 return None, False
116 return None, False
117 return cacheentry.obj, True
117 return cacheentry.obj, True
118
118
119 class unfilteredpropertycache(util.propertycache):
119 class unfilteredpropertycache(util.propertycache):
120 """propertycache that apply to unfiltered repo only"""
120 """propertycache that apply to unfiltered repo only"""
121
121
122 def __get__(self, repo, type=None):
122 def __get__(self, repo, type=None):
123 unfi = repo.unfiltered()
123 unfi = repo.unfiltered()
124 if unfi is repo:
124 if unfi is repo:
125 return super(unfilteredpropertycache, self).__get__(unfi)
125 return super(unfilteredpropertycache, self).__get__(unfi)
126 return getattr(unfi, self.name)
126 return getattr(unfi, self.name)
127
127
128 class filteredpropertycache(util.propertycache):
128 class filteredpropertycache(util.propertycache):
129 """propertycache that must take filtering in account"""
129 """propertycache that must take filtering in account"""
130
130
131 def cachevalue(self, obj, value):
131 def cachevalue(self, obj, value):
132 object.__setattr__(obj, self.name, value)
132 object.__setattr__(obj, self.name, value)
133
133
134
134
135 def hasunfilteredcache(repo, name):
135 def hasunfilteredcache(repo, name):
136 """check if a repo has an unfilteredpropertycache value for <name>"""
136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 return name in vars(repo.unfiltered())
137 return name in vars(repo.unfiltered())
138
138
139 def unfilteredmethod(orig):
139 def unfilteredmethod(orig):
140 """decorate method that always need to be run on unfiltered version"""
140 """decorate method that always need to be run on unfiltered version"""
141 def wrapper(repo, *args, **kwargs):
141 def wrapper(repo, *args, **kwargs):
142 return orig(repo.unfiltered(), *args, **kwargs)
142 return orig(repo.unfiltered(), *args, **kwargs)
143 return wrapper
143 return wrapper
144
144
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 'unbundle'}
146 'unbundle'}
147 legacycaps = moderncaps.union({'changegroupsubset'})
147 legacycaps = moderncaps.union({'changegroupsubset'})
148
148
149 class localpeer(repository.peer):
149 class localpeer(repository.peer):
150 '''peer for a local repo; reflects only the most recent API'''
150 '''peer for a local repo; reflects only the most recent API'''
151
151
152 def __init__(self, repo, caps=None):
152 def __init__(self, repo, caps=None):
153 super(localpeer, self).__init__()
153 super(localpeer, self).__init__()
154
154
155 if caps is None:
155 if caps is None:
156 caps = moderncaps.copy()
156 caps = moderncaps.copy()
157 self._repo = repo.filtered('served')
157 self._repo = repo.filtered('served')
158 self._ui = repo.ui
158 self._ui = repo.ui
159 self._caps = repo._restrictcapabilities(caps)
159 self._caps = repo._restrictcapabilities(caps)
160
160
161 # Begin of _basepeer interface.
161 # Begin of _basepeer interface.
162
162
163 @util.propertycache
163 @util.propertycache
164 def ui(self):
164 def ui(self):
165 return self._ui
165 return self._ui
166
166
167 def url(self):
167 def url(self):
168 return self._repo.url()
168 return self._repo.url()
169
169
170 def local(self):
170 def local(self):
171 return self._repo
171 return self._repo
172
172
173 def peer(self):
173 def peer(self):
174 return self
174 return self
175
175
176 def canpush(self):
176 def canpush(self):
177 return True
177 return True
178
178
179 def close(self):
179 def close(self):
180 self._repo.close()
180 self._repo.close()
181
181
182 # End of _basepeer interface.
182 # End of _basepeer interface.
183
183
184 # Begin of _basewirecommands interface.
184 # Begin of _basewirecommands interface.
185
185
186 def branchmap(self):
186 def branchmap(self):
187 return self._repo.branchmap()
187 return self._repo.branchmap()
188
188
189 def capabilities(self):
189 def capabilities(self):
190 return self._caps
190 return self._caps
191
191
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 """Used to test argument passing over the wire"""
193 """Used to test argument passing over the wire"""
194 return "%s %s %s %s %s" % (one, two, three, four, five)
194 return "%s %s %s %s %s" % (one, two, three, four, five)
195
195
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 **kwargs):
197 **kwargs):
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 common=common, bundlecaps=bundlecaps,
199 common=common, bundlecaps=bundlecaps,
200 **kwargs)
200 **kwargs)
201 cb = util.chunkbuffer(chunks)
201 cb = util.chunkbuffer(chunks)
202
202
203 if exchange.bundle2requested(bundlecaps):
203 if exchange.bundle2requested(bundlecaps):
204 # When requesting a bundle2, getbundle returns a stream to make the
204 # When requesting a bundle2, getbundle returns a stream to make the
205 # wire level function happier. We need to build a proper object
205 # wire level function happier. We need to build a proper object
206 # from it in local peer.
206 # from it in local peer.
207 return bundle2.getunbundler(self.ui, cb)
207 return bundle2.getunbundler(self.ui, cb)
208 else:
208 else:
209 return changegroup.getunbundler('01', cb, None)
209 return changegroup.getunbundler('01', cb, None)
210
210
211 def heads(self):
211 def heads(self):
212 return self._repo.heads()
212 return self._repo.heads()
213
213
214 def known(self, nodes):
214 def known(self, nodes):
215 return self._repo.known(nodes)
215 return self._repo.known(nodes)
216
216
217 def listkeys(self, namespace):
217 def listkeys(self, namespace):
218 return self._repo.listkeys(namespace)
218 return self._repo.listkeys(namespace)
219
219
220 def lookup(self, key):
220 def lookup(self, key):
221 return self._repo.lookup(key)
221 return self._repo.lookup(key)
222
222
223 def pushkey(self, namespace, key, old, new):
223 def pushkey(self, namespace, key, old, new):
224 return self._repo.pushkey(namespace, key, old, new)
224 return self._repo.pushkey(namespace, key, old, new)
225
225
226 def stream_out(self):
226 def stream_out(self):
227 raise error.Abort(_('cannot perform stream clone against local '
227 raise error.Abort(_('cannot perform stream clone against local '
228 'peer'))
228 'peer'))
229
229
230 def unbundle(self, cg, heads, url):
230 def unbundle(self, cg, heads, url):
231 """apply a bundle on a repo
231 """apply a bundle on a repo
232
232
233 This function handles the repo locking itself."""
233 This function handles the repo locking itself."""
234 try:
234 try:
235 try:
235 try:
236 cg = exchange.readbundle(self.ui, cg, None)
236 cg = exchange.readbundle(self.ui, cg, None)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 if util.safehasattr(ret, 'getchunks'):
238 if util.safehasattr(ret, 'getchunks'):
239 # This is a bundle20 object, turn it into an unbundler.
239 # This is a bundle20 object, turn it into an unbundler.
240 # This little dance should be dropped eventually when the
240 # This little dance should be dropped eventually when the
241 # API is finally improved.
241 # API is finally improved.
242 stream = util.chunkbuffer(ret.getchunks())
242 stream = util.chunkbuffer(ret.getchunks())
243 ret = bundle2.getunbundler(self.ui, stream)
243 ret = bundle2.getunbundler(self.ui, stream)
244 return ret
244 return ret
245 except Exception as exc:
245 except Exception as exc:
246 # If the exception contains output salvaged from a bundle2
246 # If the exception contains output salvaged from a bundle2
247 # reply, we need to make sure it is printed before continuing
247 # reply, we need to make sure it is printed before continuing
248 # to fail. So we build a bundle2 with such output and consume
248 # to fail. So we build a bundle2 with such output and consume
249 # it directly.
249 # it directly.
250 #
250 #
251 # This is not very elegant but allows a "simple" solution for
251 # This is not very elegant but allows a "simple" solution for
252 # issue4594
252 # issue4594
253 output = getattr(exc, '_bundle2salvagedoutput', ())
253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 if output:
254 if output:
255 bundler = bundle2.bundle20(self._repo.ui)
255 bundler = bundle2.bundle20(self._repo.ui)
256 for out in output:
256 for out in output:
257 bundler.addpart(out)
257 bundler.addpart(out)
258 stream = util.chunkbuffer(bundler.getchunks())
258 stream = util.chunkbuffer(bundler.getchunks())
259 b = bundle2.getunbundler(self.ui, stream)
259 b = bundle2.getunbundler(self.ui, stream)
260 bundle2.processbundle(self._repo, b)
260 bundle2.processbundle(self._repo, b)
261 raise
261 raise
262 except error.PushRaced as exc:
262 except error.PushRaced as exc:
263 raise error.ResponseError(_('push failed:'), str(exc))
263 raise error.ResponseError(_('push failed:'), str(exc))
264
264
265 # End of _basewirecommands interface.
265 # End of _basewirecommands interface.
266
266
267 # Begin of peer interface.
267 # Begin of peer interface.
268
268
269 def iterbatch(self):
269 def iterbatch(self):
270 return peer.localiterbatcher(self)
270 return peer.localiterbatcher(self)
271
271
272 # End of peer interface.
272 # End of peer interface.
273
273
274 class locallegacypeer(repository.legacypeer, localpeer):
274 class locallegacypeer(repository.legacypeer, localpeer):
275 '''peer extension which implements legacy methods too; used for tests with
275 '''peer extension which implements legacy methods too; used for tests with
276 restricted capabilities'''
276 restricted capabilities'''
277
277
278 def __init__(self, repo):
278 def __init__(self, repo):
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280
280
281 # Begin of baselegacywirecommands interface.
281 # Begin of baselegacywirecommands interface.
282
282
283 def between(self, pairs):
283 def between(self, pairs):
284 return self._repo.between(pairs)
284 return self._repo.between(pairs)
285
285
286 def branches(self, nodes):
286 def branches(self, nodes):
287 return self._repo.branches(nodes)
287 return self._repo.branches(nodes)
288
288
289 def changegroup(self, basenodes, source):
289 def changegroup(self, basenodes, source):
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 missingheads=self._repo.heads())
291 missingheads=self._repo.heads())
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293
293
294 def changegroupsubset(self, bases, heads, source):
294 def changegroupsubset(self, bases, heads, source):
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 missingheads=heads)
296 missingheads=heads)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298
298
299 # End of baselegacywirecommands interface.
299 # End of baselegacywirecommands interface.
300
300
301 # Increment the sub-version when the revlog v2 format changes to lock out old
301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 # clients.
302 # clients.
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304
304
305 class localrepository(object):
305 class localrepository(object):
306
306
307 supportedformats = {
307 supportedformats = {
308 'revlogv1',
308 'revlogv1',
309 'generaldelta',
309 'generaldelta',
310 'treemanifest',
310 'treemanifest',
311 'manifestv2',
311 'manifestv2',
312 REVLOGV2_REQUIREMENT,
312 REVLOGV2_REQUIREMENT,
313 }
313 }
314 _basesupported = supportedformats | {
314 _basesupported = supportedformats | {
315 'store',
315 'store',
316 'fncache',
316 'fncache',
317 'shared',
317 'shared',
318 'relshared',
318 'relshared',
319 'dotencode',
319 'dotencode',
320 'exp-sparse',
320 'exp-sparse',
321 }
321 }
322 openerreqs = {
322 openerreqs = {
323 'revlogv1',
323 'revlogv1',
324 'generaldelta',
324 'generaldelta',
325 'treemanifest',
325 'treemanifest',
326 'manifestv2',
326 'manifestv2',
327 }
327 }
328
328
329 # a list of (ui, featureset) functions.
329 # a list of (ui, featureset) functions.
330 # only functions defined in module of enabled extensions are invoked
330 # only functions defined in module of enabled extensions are invoked
331 featuresetupfuncs = set()
331 featuresetupfuncs = set()
332
332
333 # list of prefix for file which can be written without 'wlock'
333 # list of prefix for file which can be written without 'wlock'
334 # Extensions should extend this list when needed
334 # Extensions should extend this list when needed
335 _wlockfreeprefix = {
335 _wlockfreeprefix = {
336 # We migh consider requiring 'wlock' for the next
336 # We migh consider requiring 'wlock' for the next
337 # two, but pretty much all the existing code assume
337 # two, but pretty much all the existing code assume
338 # wlock is not needed so we keep them excluded for
338 # wlock is not needed so we keep them excluded for
339 # now.
339 # now.
340 'hgrc',
340 'hgrc',
341 'requires',
341 'requires',
342 # XXX cache is a complicatged business someone
342 # XXX cache is a complicatged business someone
343 # should investigate this in depth at some point
343 # should investigate this in depth at some point
344 'cache/',
344 'cache/',
345 # XXX shouldn't be dirstate covered by the wlock?
345 # XXX shouldn't be dirstate covered by the wlock?
346 'dirstate',
346 'dirstate',
347 # XXX bisect was still a bit too messy at the time
347 # XXX bisect was still a bit too messy at the time
348 # this changeset was introduced. Someone should fix
348 # this changeset was introduced. Someone should fix
349 # the remainig bit and drop this line
349 # the remainig bit and drop this line
350 'bisect.state',
350 'bisect.state',
351 }
351 }
352
352
353 def __init__(self, baseui, path, create=False):
353 def __init__(self, baseui, path, create=False):
354 self.requirements = set()
354 self.requirements = set()
355 self.filtername = None
355 self.filtername = None
356 # wvfs: rooted at the repository root, used to access the working copy
356 # wvfs: rooted at the repository root, used to access the working copy
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 self.vfs = None
359 self.vfs = None
360 # svfs: usually rooted at .hg/store, used to access repository history
360 # svfs: usually rooted at .hg/store, used to access repository history
361 # If this is a shared repository, this vfs may point to another
361 # If this is a shared repository, this vfs may point to another
362 # repository's .hg/store directory.
362 # repository's .hg/store directory.
363 self.svfs = None
363 self.svfs = None
364 self.root = self.wvfs.base
364 self.root = self.wvfs.base
365 self.path = self.wvfs.join(".hg")
365 self.path = self.wvfs.join(".hg")
366 self.origroot = path
366 self.origroot = path
367 # This is only used by context.workingctx.match in order to
367 # This is only used by context.workingctx.match in order to
368 # detect files in subrepos.
368 # detect files in subrepos.
369 self.auditor = pathutil.pathauditor(
369 self.auditor = pathutil.pathauditor(
370 self.root, callback=self._checknested)
370 self.root, callback=self._checknested)
371 # This is only used by context.basectx.match in order to detect
371 # This is only used by context.basectx.match in order to detect
372 # files in subrepos.
372 # files in subrepos.
373 self.nofsauditor = pathutil.pathauditor(
373 self.nofsauditor = pathutil.pathauditor(
374 self.root, callback=self._checknested, realfs=False, cached=True)
374 self.root, callback=self._checknested, realfs=False, cached=True)
375 self.baseui = baseui
375 self.baseui = baseui
376 self.ui = baseui.copy()
376 self.ui = baseui.copy()
377 self.ui.copy = baseui.copy # prevent copying repo configuration
377 self.ui.copy = baseui.copy # prevent copying repo configuration
378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
379 if (self.ui.configbool('devel', 'all-warnings') or
379 if (self.ui.configbool('devel', 'all-warnings') or
380 self.ui.configbool('devel', 'check-locks')):
380 self.ui.configbool('devel', 'check-locks')):
381 self.vfs.audit = self._getvfsward(self.vfs.audit)
381 self.vfs.audit = self._getvfsward(self.vfs.audit)
382 # A list of callback to shape the phase if no data were found.
382 # A list of callback to shape the phase if no data were found.
383 # Callback are in the form: func(repo, roots) --> processed root.
383 # Callback are in the form: func(repo, roots) --> processed root.
384 # This list it to be filled by extension during repo setup
384 # This list it to be filled by extension during repo setup
385 self._phasedefaults = []
385 self._phasedefaults = []
386 try:
386 try:
387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
388 self._loadextensions()
388 self._loadextensions()
389 except IOError:
389 except IOError:
390 pass
390 pass
391
391
392 if self.featuresetupfuncs:
392 if self.featuresetupfuncs:
393 self.supported = set(self._basesupported) # use private copy
393 self.supported = set(self._basesupported) # use private copy
394 extmods = set(m.__name__ for n, m
394 extmods = set(m.__name__ for n, m
395 in extensions.extensions(self.ui))
395 in extensions.extensions(self.ui))
396 for setupfunc in self.featuresetupfuncs:
396 for setupfunc in self.featuresetupfuncs:
397 if setupfunc.__module__ in extmods:
397 if setupfunc.__module__ in extmods:
398 setupfunc(self.ui, self.supported)
398 setupfunc(self.ui, self.supported)
399 else:
399 else:
400 self.supported = self._basesupported
400 self.supported = self._basesupported
401 color.setup(self.ui)
401 color.setup(self.ui)
402
402
403 # Add compression engines.
403 # Add compression engines.
404 for name in util.compengines:
404 for name in util.compengines:
405 engine = util.compengines[name]
405 engine = util.compengines[name]
406 if engine.revlogheader():
406 if engine.revlogheader():
407 self.supported.add('exp-compression-%s' % name)
407 self.supported.add('exp-compression-%s' % name)
408
408
409 if not self.vfs.isdir():
409 if not self.vfs.isdir():
410 if create:
410 if create:
411 self.requirements = newreporequirements(self)
411 self.requirements = newreporequirements(self)
412
412
413 if not self.wvfs.exists():
413 if not self.wvfs.exists():
414 self.wvfs.makedirs()
414 self.wvfs.makedirs()
415 self.vfs.makedir(notindexed=True)
415 self.vfs.makedir(notindexed=True)
416
416
417 if 'store' in self.requirements:
417 if 'store' in self.requirements:
418 self.vfs.mkdir("store")
418 self.vfs.mkdir("store")
419
419
420 # create an invalid changelog
420 # create an invalid changelog
421 self.vfs.append(
421 self.vfs.append(
422 "00changelog.i",
422 "00changelog.i",
423 '\0\0\0\2' # represents revlogv2
423 '\0\0\0\2' # represents revlogv2
424 ' dummy changelog to prevent using the old repo layout'
424 ' dummy changelog to prevent using the old repo layout'
425 )
425 )
426 else:
426 else:
427 raise error.RepoError(_("repository %s not found") % path)
427 raise error.RepoError(_("repository %s not found") % path)
428 elif create:
428 elif create:
429 raise error.RepoError(_("repository %s already exists") % path)
429 raise error.RepoError(_("repository %s already exists") % path)
430 else:
430 else:
431 try:
431 try:
432 self.requirements = scmutil.readrequires(
432 self.requirements = scmutil.readrequires(
433 self.vfs, self.supported)
433 self.vfs, self.supported)
434 except IOError as inst:
434 except IOError as inst:
435 if inst.errno != errno.ENOENT:
435 if inst.errno != errno.ENOENT:
436 raise
436 raise
437
437
438 cachepath = self.vfs.join('cache')
438 cachepath = self.vfs.join('cache')
439 self.sharedpath = self.path
439 self.sharedpath = self.path
440 try:
440 try:
441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
442 if 'relshared' in self.requirements:
442 if 'relshared' in self.requirements:
443 sharedpath = self.vfs.join(sharedpath)
443 sharedpath = self.vfs.join(sharedpath)
444 vfs = vfsmod.vfs(sharedpath, realpath=True)
444 vfs = vfsmod.vfs(sharedpath, realpath=True)
445 cachepath = vfs.join('cache')
445 cachepath = vfs.join('cache')
446 s = vfs.base
446 s = vfs.base
447 if not vfs.exists():
447 if not vfs.exists():
448 raise error.RepoError(
448 raise error.RepoError(
449 _('.hg/sharedpath points to nonexistent directory %s') % s)
449 _('.hg/sharedpath points to nonexistent directory %s') % s)
450 self.sharedpath = s
450 self.sharedpath = s
451 except IOError as inst:
451 except IOError as inst:
452 if inst.errno != errno.ENOENT:
452 if inst.errno != errno.ENOENT:
453 raise
453 raise
454
454
455 if 'exp-sparse' in self.requirements and not sparse.enabled:
455 if 'exp-sparse' in self.requirements and not sparse.enabled:
456 raise error.RepoError(_('repository is using sparse feature but '
456 raise error.RepoError(_('repository is using sparse feature but '
457 'sparse is not enabled; enable the '
457 'sparse is not enabled; enable the '
458 '"sparse" extensions to access'))
458 '"sparse" extensions to access'))
459
459
460 self.store = store.store(
460 self.store = store.store(
461 self.requirements, self.sharedpath,
461 self.requirements, self.sharedpath,
462 lambda base: vfsmod.vfs(base, cacheaudited=True))
462 lambda base: vfsmod.vfs(base, cacheaudited=True))
463 self.spath = self.store.path
463 self.spath = self.store.path
464 self.svfs = self.store.vfs
464 self.svfs = self.store.vfs
465 self.sjoin = self.store.join
465 self.sjoin = self.store.join
466 self.vfs.createmode = self.store.createmode
466 self.vfs.createmode = self.store.createmode
467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
468 self.cachevfs.createmode = self.store.createmode
468 self.cachevfs.createmode = self.store.createmode
469 if (self.ui.configbool('devel', 'all-warnings') or
469 if (self.ui.configbool('devel', 'all-warnings') or
470 self.ui.configbool('devel', 'check-locks')):
470 self.ui.configbool('devel', 'check-locks')):
471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
473 else: # standard vfs
473 else: # standard vfs
474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
475 self._applyopenerreqs()
475 self._applyopenerreqs()
476 if create:
476 if create:
477 self._writerequirements()
477 self._writerequirements()
478
478
479 self._dirstatevalidatewarned = False
479 self._dirstatevalidatewarned = False
480
480
481 self._branchcaches = {}
481 self._branchcaches = {}
482 self._revbranchcache = None
482 self._revbranchcache = None
483 self.filterpats = {}
483 self.filterpats = {}
484 self._datafilters = {}
484 self._datafilters = {}
485 self._transref = self._lockref = self._wlockref = None
485 self._transref = self._lockref = self._wlockref = None
486
486
487 # A cache for various files under .hg/ that tracks file changes,
487 # A cache for various files under .hg/ that tracks file changes,
488 # (used by the filecache decorator)
488 # (used by the filecache decorator)
489 #
489 #
490 # Maps a property name to its util.filecacheentry
490 # Maps a property name to its util.filecacheentry
491 self._filecache = {}
491 self._filecache = {}
492
492
493 # hold sets of revision to be filtered
493 # hold sets of revision to be filtered
494 # should be cleared when something might have changed the filter value:
494 # should be cleared when something might have changed the filter value:
495 # - new changesets,
495 # - new changesets,
496 # - phase change,
496 # - phase change,
497 # - new obsolescence marker,
497 # - new obsolescence marker,
498 # - working directory parent change,
498 # - working directory parent change,
499 # - bookmark changes
499 # - bookmark changes
500 self.filteredrevcache = {}
500 self.filteredrevcache = {}
501
501
502 # post-dirstate-status hooks
502 # post-dirstate-status hooks
503 self._postdsstatus = []
503 self._postdsstatus = []
504
504
505 # Cache of types representing filtered repos.
505 # Cache of types representing filtered repos.
506 self._filteredrepotypes = weakref.WeakKeyDictionary()
506 self._filteredrepotypes = weakref.WeakKeyDictionary()
507
507
508 # generic mapping between names and nodes
508 # generic mapping between names and nodes
509 self.names = namespaces.namespaces()
509 self.names = namespaces.namespaces()
510
510
511 # Key to signature value.
511 # Key to signature value.
512 self._sparsesignaturecache = {}
512 self._sparsesignaturecache = {}
513 # Signature to cached matcher instance.
513 # Signature to cached matcher instance.
514 self._sparsematchercache = {}
514 self._sparsematchercache = {}
515
515
516 def _getvfsward(self, origfunc):
516 def _getvfsward(self, origfunc):
517 """build a ward for self.vfs"""
517 """build a ward for self.vfs"""
518 rref = weakref.ref(self)
518 rref = weakref.ref(self)
519 def checkvfs(path, mode=None):
519 def checkvfs(path, mode=None):
520 ret = origfunc(path, mode=mode)
520 ret = origfunc(path, mode=mode)
521 repo = rref()
521 repo = rref()
522 if (repo is None
522 if (repo is None
523 or not util.safehasattr(repo, '_wlockref')
523 or not util.safehasattr(repo, '_wlockref')
524 or not util.safehasattr(repo, '_lockref')):
524 or not util.safehasattr(repo, '_lockref')):
525 return
525 return
526 if mode in (None, 'r', 'rb'):
526 if mode in (None, 'r', 'rb'):
527 return
527 return
528 if path.startswith(repo.path):
528 if path.startswith(repo.path):
529 # truncate name relative to the repository (.hg)
529 # truncate name relative to the repository (.hg)
530 path = path[len(repo.path) + 1:]
530 path = path[len(repo.path) + 1:]
531 if path.startswith('cache/'):
531 if path.startswith('cache/'):
532 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
532 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
533 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
533 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
534 if path.startswith('journal.'):
534 if path.startswith('journal.'):
535 # journal is covered by 'lock'
535 # journal is covered by 'lock'
536 if repo._currentlock(repo._lockref) is None:
536 if repo._currentlock(repo._lockref) is None:
537 repo.ui.develwarn('write with no lock: "%s"' % path,
537 repo.ui.develwarn('write with no lock: "%s"' % path,
538 stacklevel=2, config='check-locks')
538 stacklevel=2, config='check-locks')
539 elif repo._currentlock(repo._wlockref) is None:
539 elif repo._currentlock(repo._wlockref) is None:
540 # rest of vfs files are covered by 'wlock'
540 # rest of vfs files are covered by 'wlock'
541 #
541 #
542 # exclude special files
542 # exclude special files
543 for prefix in self._wlockfreeprefix:
543 for prefix in self._wlockfreeprefix:
544 if path.startswith(prefix):
544 if path.startswith(prefix):
545 return
545 return
546 repo.ui.develwarn('write with no wlock: "%s"' % path,
546 repo.ui.develwarn('write with no wlock: "%s"' % path,
547 stacklevel=2, config='check-locks')
547 stacklevel=2, config='check-locks')
548 return ret
548 return ret
549 return checkvfs
549 return checkvfs
550
550
551 def _getsvfsward(self, origfunc):
551 def _getsvfsward(self, origfunc):
552 """build a ward for self.svfs"""
552 """build a ward for self.svfs"""
553 rref = weakref.ref(self)
553 rref = weakref.ref(self)
554 def checksvfs(path, mode=None):
554 def checksvfs(path, mode=None):
555 ret = origfunc(path, mode=mode)
555 ret = origfunc(path, mode=mode)
556 repo = rref()
556 repo = rref()
557 if repo is None or not util.safehasattr(repo, '_lockref'):
557 if repo is None or not util.safehasattr(repo, '_lockref'):
558 return
558 return
559 if mode in (None, 'r', 'rb'):
559 if mode in (None, 'r', 'rb'):
560 return
560 return
561 if path.startswith(repo.sharedpath):
561 if path.startswith(repo.sharedpath):
562 # truncate name relative to the repository (.hg)
562 # truncate name relative to the repository (.hg)
563 path = path[len(repo.sharedpath) + 1:]
563 path = path[len(repo.sharedpath) + 1:]
564 if repo._currentlock(repo._lockref) is None:
564 if repo._currentlock(repo._lockref) is None:
565 repo.ui.develwarn('write with no lock: "%s"' % path,
565 repo.ui.develwarn('write with no lock: "%s"' % path,
566 stacklevel=3)
566 stacklevel=3)
567 return ret
567 return ret
568 return checksvfs
568 return checksvfs
569
569
570 def close(self):
570 def close(self):
571 self._writecaches()
571 self._writecaches()
572
572
573 def _loadextensions(self):
573 def _loadextensions(self):
574 extensions.loadall(self.ui)
574 extensions.loadall(self.ui)
575
575
576 def _writecaches(self):
576 def _writecaches(self):
577 if self._revbranchcache:
577 if self._revbranchcache:
578 self._revbranchcache.write()
578 self._revbranchcache.write()
579
579
580 def _restrictcapabilities(self, caps):
580 def _restrictcapabilities(self, caps):
581 if self.ui.configbool('experimental', 'bundle2-advertise'):
581 if self.ui.configbool('experimental', 'bundle2-advertise'):
582 caps = set(caps)
582 caps = set(caps)
583 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
583 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
584 caps.add('bundle2=' + urlreq.quote(capsblob))
584 caps.add('bundle2=' + urlreq.quote(capsblob))
585 return caps
585 return caps
586
586
587 def _applyopenerreqs(self):
587 def _applyopenerreqs(self):
588 self.svfs.options = dict((r, 1) for r in self.requirements
588 self.svfs.options = dict((r, 1) for r in self.requirements
589 if r in self.openerreqs)
589 if r in self.openerreqs)
590 # experimental config: format.chunkcachesize
590 # experimental config: format.chunkcachesize
591 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
591 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
592 if chunkcachesize is not None:
592 if chunkcachesize is not None:
593 self.svfs.options['chunkcachesize'] = chunkcachesize
593 self.svfs.options['chunkcachesize'] = chunkcachesize
594 # experimental config: format.maxchainlen
594 # experimental config: format.maxchainlen
595 maxchainlen = self.ui.configint('format', 'maxchainlen')
595 maxchainlen = self.ui.configint('format', 'maxchainlen')
596 if maxchainlen is not None:
596 if maxchainlen is not None:
597 self.svfs.options['maxchainlen'] = maxchainlen
597 self.svfs.options['maxchainlen'] = maxchainlen
598 # experimental config: format.manifestcachesize
598 # experimental config: format.manifestcachesize
599 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
599 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
600 if manifestcachesize is not None:
600 if manifestcachesize is not None:
601 self.svfs.options['manifestcachesize'] = manifestcachesize
601 self.svfs.options['manifestcachesize'] = manifestcachesize
602 # experimental config: format.aggressivemergedeltas
602 # experimental config: format.aggressivemergedeltas
603 aggressivemergedeltas = self.ui.configbool('format',
603 aggressivemergedeltas = self.ui.configbool('format',
604 'aggressivemergedeltas')
604 'aggressivemergedeltas')
605 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
605 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
606 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
606 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
607 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
607 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
608 if 0 <= chainspan:
608 if 0 <= chainspan:
609 self.svfs.options['maxdeltachainspan'] = chainspan
609 self.svfs.options['maxdeltachainspan'] = chainspan
610 mmapindexthreshold = self.ui.configbytes('experimental',
610 mmapindexthreshold = self.ui.configbytes('experimental',
611 'mmapindexthreshold')
611 'mmapindexthreshold')
612 if mmapindexthreshold is not None:
612 if mmapindexthreshold is not None:
613 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
613 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
614 withsparseread = self.ui.configbool('experimental', 'sparse-read')
614 withsparseread = self.ui.configbool('experimental', 'sparse-read')
615 srdensitythres = float(self.ui.config('experimental',
615 srdensitythres = float(self.ui.config('experimental',
616 'sparse-read.density-threshold'))
616 'sparse-read.density-threshold'))
617 srmingapsize = self.ui.configbytes('experimental',
617 srmingapsize = self.ui.configbytes('experimental',
618 'sparse-read.min-gap-size')
618 'sparse-read.min-gap-size')
619 self.svfs.options['with-sparse-read'] = withsparseread
619 self.svfs.options['with-sparse-read'] = withsparseread
620 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
620 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
621 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
621 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
622
622
623 for r in self.requirements:
623 for r in self.requirements:
624 if r.startswith('exp-compression-'):
624 if r.startswith('exp-compression-'):
625 self.svfs.options['compengine'] = r[len('exp-compression-'):]
625 self.svfs.options['compengine'] = r[len('exp-compression-'):]
626
626
627 # TODO move "revlogv2" to openerreqs once finalized.
627 # TODO move "revlogv2" to openerreqs once finalized.
628 if REVLOGV2_REQUIREMENT in self.requirements:
628 if REVLOGV2_REQUIREMENT in self.requirements:
629 self.svfs.options['revlogv2'] = True
629 self.svfs.options['revlogv2'] = True
630
630
631 def _writerequirements(self):
631 def _writerequirements(self):
632 scmutil.writerequires(self.vfs, self.requirements)
632 scmutil.writerequires(self.vfs, self.requirements)
633
633
634 def _checknested(self, path):
634 def _checknested(self, path):
635 """Determine if path is a legal nested repository."""
635 """Determine if path is a legal nested repository."""
636 if not path.startswith(self.root):
636 if not path.startswith(self.root):
637 return False
637 return False
638 subpath = path[len(self.root) + 1:]
638 subpath = path[len(self.root) + 1:]
639 normsubpath = util.pconvert(subpath)
639 normsubpath = util.pconvert(subpath)
640
640
641 # XXX: Checking against the current working copy is wrong in
641 # XXX: Checking against the current working copy is wrong in
642 # the sense that it can reject things like
642 # the sense that it can reject things like
643 #
643 #
644 # $ hg cat -r 10 sub/x.txt
644 # $ hg cat -r 10 sub/x.txt
645 #
645 #
646 # if sub/ is no longer a subrepository in the working copy
646 # if sub/ is no longer a subrepository in the working copy
647 # parent revision.
647 # parent revision.
648 #
648 #
649 # However, it can of course also allow things that would have
649 # However, it can of course also allow things that would have
650 # been rejected before, such as the above cat command if sub/
650 # been rejected before, such as the above cat command if sub/
651 # is a subrepository now, but was a normal directory before.
651 # is a subrepository now, but was a normal directory before.
652 # The old path auditor would have rejected by mistake since it
652 # The old path auditor would have rejected by mistake since it
653 # panics when it sees sub/.hg/.
653 # panics when it sees sub/.hg/.
654 #
654 #
655 # All in all, checking against the working copy seems sensible
655 # All in all, checking against the working copy seems sensible
656 # since we want to prevent access to nested repositories on
656 # since we want to prevent access to nested repositories on
657 # the filesystem *now*.
657 # the filesystem *now*.
658 ctx = self[None]
658 ctx = self[None]
659 parts = util.splitpath(subpath)
659 parts = util.splitpath(subpath)
660 while parts:
660 while parts:
661 prefix = '/'.join(parts)
661 prefix = '/'.join(parts)
662 if prefix in ctx.substate:
662 if prefix in ctx.substate:
663 if prefix == normsubpath:
663 if prefix == normsubpath:
664 return True
664 return True
665 else:
665 else:
666 sub = ctx.sub(prefix)
666 sub = ctx.sub(prefix)
667 return sub.checknested(subpath[len(prefix) + 1:])
667 return sub.checknested(subpath[len(prefix) + 1:])
668 else:
668 else:
669 parts.pop()
669 parts.pop()
670 return False
670 return False
671
671
672 def peer(self):
672 def peer(self):
673 return localpeer(self) # not cached to avoid reference cycle
673 return localpeer(self) # not cached to avoid reference cycle
674
674
675 def unfiltered(self):
675 def unfiltered(self):
676 """Return unfiltered version of the repository
676 """Return unfiltered version of the repository
677
677
678 Intended to be overwritten by filtered repo."""
678 Intended to be overwritten by filtered repo."""
679 return self
679 return self
680
680
681 def filtered(self, name):
681 def filtered(self, name):
682 """Return a filtered version of a repository"""
682 """Return a filtered version of a repository"""
683 # Python <3.4 easily leaks types via __mro__. See
683 # Python <3.4 easily leaks types via __mro__. See
684 # https://bugs.python.org/issue17950. We cache dynamically
684 # https://bugs.python.org/issue17950. We cache dynamically
685 # created types so this method doesn't leak on every
685 # created types so this method doesn't leak on every
686 # invocation.
686 # invocation.
687
687
688 key = self.unfiltered().__class__
688 key = self.unfiltered().__class__
689 if key not in self._filteredrepotypes:
689 if key not in self._filteredrepotypes:
690 # Build a new type with the repoview mixin and the base
690 # Build a new type with the repoview mixin and the base
691 # class of this repo. Give it a name containing the
691 # class of this repo. Give it a name containing the
692 # filter name to aid debugging.
692 # filter name to aid debugging.
693 bases = (repoview.repoview, key)
693 bases = (repoview.repoview, key)
694 cls = type(r'%sfilteredrepo' % name, bases, {})
694 cls = type(r'%sfilteredrepo' % name, bases, {})
695 self._filteredrepotypes[key] = cls
695 self._filteredrepotypes[key] = cls
696
696
697 return self._filteredrepotypes[key](self, name)
697 return self._filteredrepotypes[key](self, name)
698
698
699 @repofilecache('bookmarks', 'bookmarks.current')
699 @repofilecache('bookmarks', 'bookmarks.current')
700 def _bookmarks(self):
700 def _bookmarks(self):
701 return bookmarks.bmstore(self)
701 return bookmarks.bmstore(self)
702
702
703 @property
703 @property
704 def _activebookmark(self):
704 def _activebookmark(self):
705 return self._bookmarks.active
705 return self._bookmarks.active
706
706
707 # _phaserevs and _phasesets depend on changelog. what we need is to
707 # _phaserevs and _phasesets depend on changelog. what we need is to
708 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
708 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
709 # can't be easily expressed in filecache mechanism.
709 # can't be easily expressed in filecache mechanism.
710 @storecache('phaseroots', '00changelog.i')
710 @storecache('phaseroots', '00changelog.i')
711 def _phasecache(self):
711 def _phasecache(self):
712 return phases.phasecache(self, self._phasedefaults)
712 return phases.phasecache(self, self._phasedefaults)
713
713
714 @storecache('obsstore')
714 @storecache('obsstore')
715 def obsstore(self):
715 def obsstore(self):
716 return obsolete.makestore(self.ui, self)
716 return obsolete.makestore(self.ui, self)
717
717
718 @storecache('00changelog.i')
718 @storecache('00changelog.i')
719 def changelog(self):
719 def changelog(self):
720 return changelog.changelog(self.svfs,
720 return changelog.changelog(self.svfs,
721 trypending=txnutil.mayhavepending(self.root))
721 trypending=txnutil.mayhavepending(self.root))
722
722
723 def _constructmanifest(self):
723 def _constructmanifest(self):
724 # This is a temporary function while we migrate from manifest to
724 # This is a temporary function while we migrate from manifest to
725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
726 # manifest creation.
726 # manifest creation.
727 return manifest.manifestrevlog(self.svfs)
727 return manifest.manifestrevlog(self.svfs)
728
728
729 @storecache('00manifest.i')
729 @storecache('00manifest.i')
730 def manifestlog(self):
730 def manifestlog(self):
731 return manifest.manifestlog(self.svfs, self)
731 return manifest.manifestlog(self.svfs, self)
732
732
733 @repofilecache('dirstate')
733 @repofilecache('dirstate')
734 def dirstate(self):
734 def dirstate(self):
735 sparsematchfn = lambda: sparse.matcher(self)
735 sparsematchfn = lambda: sparse.matcher(self)
736
736
737 return dirstate.dirstate(self.vfs, self.ui, self.root,
737 return dirstate.dirstate(self.vfs, self.ui, self.root,
738 self._dirstatevalidate, sparsematchfn)
738 self._dirstatevalidate, sparsematchfn)
739
739
740 def _dirstatevalidate(self, node):
740 def _dirstatevalidate(self, node):
741 try:
741 try:
742 self.changelog.rev(node)
742 self.changelog.rev(node)
743 return node
743 return node
744 except error.LookupError:
744 except error.LookupError:
745 if not self._dirstatevalidatewarned:
745 if not self._dirstatevalidatewarned:
746 self._dirstatevalidatewarned = True
746 self._dirstatevalidatewarned = True
747 self.ui.warn(_("warning: ignoring unknown"
747 self.ui.warn(_("warning: ignoring unknown"
748 " working parent %s!\n") % short(node))
748 " working parent %s!\n") % short(node))
749 return nullid
749 return nullid
750
750
751 def __getitem__(self, changeid):
751 def __getitem__(self, changeid):
752 if changeid is None:
752 if changeid is None:
753 return context.workingctx(self)
753 return context.workingctx(self)
754 if isinstance(changeid, slice):
754 if isinstance(changeid, slice):
755 # wdirrev isn't contiguous so the slice shouldn't include it
755 # wdirrev isn't contiguous so the slice shouldn't include it
756 return [context.changectx(self, i)
756 return [context.changectx(self, i)
757 for i in xrange(*changeid.indices(len(self)))
757 for i in xrange(*changeid.indices(len(self)))
758 if i not in self.changelog.filteredrevs]
758 if i not in self.changelog.filteredrevs]
759 try:
759 try:
760 return context.changectx(self, changeid)
760 return context.changectx(self, changeid)
761 except error.WdirUnsupported:
761 except error.WdirUnsupported:
762 return context.workingctx(self)
762 return context.workingctx(self)
763
763
764 def __contains__(self, changeid):
764 def __contains__(self, changeid):
765 """True if the given changeid exists
765 """True if the given changeid exists
766
766
767 error.LookupError is raised if an ambiguous node specified.
767 error.LookupError is raised if an ambiguous node specified.
768 """
768 """
769 try:
769 try:
770 self[changeid]
770 self[changeid]
771 return True
771 return True
772 except error.RepoLookupError:
772 except error.RepoLookupError:
773 return False
773 return False
774
774
775 def __nonzero__(self):
775 def __nonzero__(self):
776 return True
776 return True
777
777
778 __bool__ = __nonzero__
778 __bool__ = __nonzero__
779
779
780 def __len__(self):
780 def __len__(self):
781 return len(self.changelog)
781 return len(self.changelog)
782
782
783 def __iter__(self):
783 def __iter__(self):
784 return iter(self.changelog)
784 return iter(self.changelog)
785
785
786 def revs(self, expr, *args):
786 def revs(self, expr, *args):
787 '''Find revisions matching a revset.
787 '''Find revisions matching a revset.
788
788
789 The revset is specified as a string ``expr`` that may contain
789 The revset is specified as a string ``expr`` that may contain
790 %-formatting to escape certain types. See ``revsetlang.formatspec``.
790 %-formatting to escape certain types. See ``revsetlang.formatspec``.
791
791
792 Revset aliases from the configuration are not expanded. To expand
792 Revset aliases from the configuration are not expanded. To expand
793 user aliases, consider calling ``scmutil.revrange()`` or
793 user aliases, consider calling ``scmutil.revrange()`` or
794 ``repo.anyrevs([expr], user=True)``.
794 ``repo.anyrevs([expr], user=True)``.
795
795
796 Returns a revset.abstractsmartset, which is a list-like interface
796 Returns a revset.abstractsmartset, which is a list-like interface
797 that contains integer revisions.
797 that contains integer revisions.
798 '''
798 '''
799 expr = revsetlang.formatspec(expr, *args)
799 expr = revsetlang.formatspec(expr, *args)
800 m = revset.match(None, expr)
800 m = revset.match(None, expr)
801 return m(self)
801 return m(self)
802
802
803 def set(self, expr, *args):
803 def set(self, expr, *args):
804 '''Find revisions matching a revset and emit changectx instances.
804 '''Find revisions matching a revset and emit changectx instances.
805
805
806 This is a convenience wrapper around ``revs()`` that iterates the
806 This is a convenience wrapper around ``revs()`` that iterates the
807 result and is a generator of changectx instances.
807 result and is a generator of changectx instances.
808
808
809 Revset aliases from the configuration are not expanded. To expand
809 Revset aliases from the configuration are not expanded. To expand
810 user aliases, consider calling ``scmutil.revrange()``.
810 user aliases, consider calling ``scmutil.revrange()``.
811 '''
811 '''
812 for r in self.revs(expr, *args):
812 for r in self.revs(expr, *args):
813 yield self[r]
813 yield self[r]
814
814
815 def anyrevs(self, specs, user=False, localalias=None):
815 def anyrevs(self, specs, user=False, localalias=None):
816 '''Find revisions matching one of the given revsets.
816 '''Find revisions matching one of the given revsets.
817
817
818 Revset aliases from the configuration are not expanded by default. To
818 Revset aliases from the configuration are not expanded by default. To
819 expand user aliases, specify ``user=True``. To provide some local
819 expand user aliases, specify ``user=True``. To provide some local
820 definitions overriding user aliases, set ``localalias`` to
820 definitions overriding user aliases, set ``localalias`` to
821 ``{name: definitionstring}``.
821 ``{name: definitionstring}``.
822 '''
822 '''
823 if user:
823 if user:
824 m = revset.matchany(self.ui, specs, repo=self,
824 m = revset.matchany(self.ui, specs, repo=self,
825 localalias=localalias)
825 localalias=localalias)
826 else:
826 else:
827 m = revset.matchany(None, specs, localalias=localalias)
827 m = revset.matchany(None, specs, localalias=localalias)
828 return m(self)
828 return m(self)
829
829
830 def url(self):
830 def url(self):
831 return 'file:' + self.root
831 return 'file:' + self.root
832
832
833 def hook(self, name, throw=False, **args):
833 def hook(self, name, throw=False, **args):
834 """Call a hook, passing this repo instance.
834 """Call a hook, passing this repo instance.
835
835
836 This a convenience method to aid invoking hooks. Extensions likely
836 This a convenience method to aid invoking hooks. Extensions likely
837 won't call this unless they have registered a custom hook or are
837 won't call this unless they have registered a custom hook or are
838 replacing code that is expected to call a hook.
838 replacing code that is expected to call a hook.
839 """
839 """
840 return hook.hook(self.ui, self, name, throw, **args)
840 return hook.hook(self.ui, self, name, throw, **args)
841
841
842 @filteredpropertycache
842 @filteredpropertycache
843 def _tagscache(self):
843 def _tagscache(self):
844 '''Returns a tagscache object that contains various tags related
844 '''Returns a tagscache object that contains various tags related
845 caches.'''
845 caches.'''
846
846
847 # This simplifies its cache management by having one decorated
847 # This simplifies its cache management by having one decorated
848 # function (this one) and the rest simply fetch things from it.
848 # function (this one) and the rest simply fetch things from it.
849 class tagscache(object):
849 class tagscache(object):
850 def __init__(self):
850 def __init__(self):
851 # These two define the set of tags for this repository. tags
851 # These two define the set of tags for this repository. tags
852 # maps tag name to node; tagtypes maps tag name to 'global' or
852 # maps tag name to node; tagtypes maps tag name to 'global' or
853 # 'local'. (Global tags are defined by .hgtags across all
853 # 'local'. (Global tags are defined by .hgtags across all
854 # heads, and local tags are defined in .hg/localtags.)
854 # heads, and local tags are defined in .hg/localtags.)
855 # They constitute the in-memory cache of tags.
855 # They constitute the in-memory cache of tags.
856 self.tags = self.tagtypes = None
856 self.tags = self.tagtypes = None
857
857
858 self.nodetagscache = self.tagslist = None
858 self.nodetagscache = self.tagslist = None
859
859
860 cache = tagscache()
860 cache = tagscache()
861 cache.tags, cache.tagtypes = self._findtags()
861 cache.tags, cache.tagtypes = self._findtags()
862
862
863 return cache
863 return cache
864
864
865 def tags(self):
865 def tags(self):
866 '''return a mapping of tag to node'''
866 '''return a mapping of tag to node'''
867 t = {}
867 t = {}
868 if self.changelog.filteredrevs:
868 if self.changelog.filteredrevs:
869 tags, tt = self._findtags()
869 tags, tt = self._findtags()
870 else:
870 else:
871 tags = self._tagscache.tags
871 tags = self._tagscache.tags
872 for k, v in tags.iteritems():
872 for k, v in tags.iteritems():
873 try:
873 try:
874 # ignore tags to unknown nodes
874 # ignore tags to unknown nodes
875 self.changelog.rev(v)
875 self.changelog.rev(v)
876 t[k] = v
876 t[k] = v
877 except (error.LookupError, ValueError):
877 except (error.LookupError, ValueError):
878 pass
878 pass
879 return t
879 return t
880
880
881 def _findtags(self):
881 def _findtags(self):
882 '''Do the hard work of finding tags. Return a pair of dicts
882 '''Do the hard work of finding tags. Return a pair of dicts
883 (tags, tagtypes) where tags maps tag name to node, and tagtypes
883 (tags, tagtypes) where tags maps tag name to node, and tagtypes
884 maps tag name to a string like \'global\' or \'local\'.
884 maps tag name to a string like \'global\' or \'local\'.
885 Subclasses or extensions are free to add their own tags, but
885 Subclasses or extensions are free to add their own tags, but
886 should be aware that the returned dicts will be retained for the
886 should be aware that the returned dicts will be retained for the
887 duration of the localrepo object.'''
887 duration of the localrepo object.'''
888
888
889 # XXX what tagtype should subclasses/extensions use? Currently
889 # XXX what tagtype should subclasses/extensions use? Currently
890 # mq and bookmarks add tags, but do not set the tagtype at all.
890 # mq and bookmarks add tags, but do not set the tagtype at all.
891 # Should each extension invent its own tag type? Should there
891 # Should each extension invent its own tag type? Should there
892 # be one tagtype for all such "virtual" tags? Or is the status
892 # be one tagtype for all such "virtual" tags? Or is the status
893 # quo fine?
893 # quo fine?
894
894
895
895
896 # map tag name to (node, hist)
896 # map tag name to (node, hist)
897 alltags = tagsmod.findglobaltags(self.ui, self)
897 alltags = tagsmod.findglobaltags(self.ui, self)
898 # map tag name to tag type
898 # map tag name to tag type
899 tagtypes = dict((tag, 'global') for tag in alltags)
899 tagtypes = dict((tag, 'global') for tag in alltags)
900
900
901 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
901 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
902
902
903 # Build the return dicts. Have to re-encode tag names because
903 # Build the return dicts. Have to re-encode tag names because
904 # the tags module always uses UTF-8 (in order not to lose info
904 # the tags module always uses UTF-8 (in order not to lose info
905 # writing to the cache), but the rest of Mercurial wants them in
905 # writing to the cache), but the rest of Mercurial wants them in
906 # local encoding.
906 # local encoding.
907 tags = {}
907 tags = {}
908 for (name, (node, hist)) in alltags.iteritems():
908 for (name, (node, hist)) in alltags.iteritems():
909 if node != nullid:
909 if node != nullid:
910 tags[encoding.tolocal(name)] = node
910 tags[encoding.tolocal(name)] = node
911 tags['tip'] = self.changelog.tip()
911 tags['tip'] = self.changelog.tip()
912 tagtypes = dict([(encoding.tolocal(name), value)
912 tagtypes = dict([(encoding.tolocal(name), value)
913 for (name, value) in tagtypes.iteritems()])
913 for (name, value) in tagtypes.iteritems()])
914 return (tags, tagtypes)
914 return (tags, tagtypes)
915
915
916 def tagtype(self, tagname):
916 def tagtype(self, tagname):
917 '''
917 '''
918 return the type of the given tag. result can be:
918 return the type of the given tag. result can be:
919
919
920 'local' : a local tag
920 'local' : a local tag
921 'global' : a global tag
921 'global' : a global tag
922 None : tag does not exist
922 None : tag does not exist
923 '''
923 '''
924
924
925 return self._tagscache.tagtypes.get(tagname)
925 return self._tagscache.tagtypes.get(tagname)
926
926
927 def tagslist(self):
927 def tagslist(self):
928 '''return a list of tags ordered by revision'''
928 '''return a list of tags ordered by revision'''
929 if not self._tagscache.tagslist:
929 if not self._tagscache.tagslist:
930 l = []
930 l = []
931 for t, n in self.tags().iteritems():
931 for t, n in self.tags().iteritems():
932 l.append((self.changelog.rev(n), t, n))
932 l.append((self.changelog.rev(n), t, n))
933 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
933 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
934
934
935 return self._tagscache.tagslist
935 return self._tagscache.tagslist
936
936
937 def nodetags(self, node):
937 def nodetags(self, node):
938 '''return the tags associated with a node'''
938 '''return the tags associated with a node'''
939 if not self._tagscache.nodetagscache:
939 if not self._tagscache.nodetagscache:
940 nodetagscache = {}
940 nodetagscache = {}
941 for t, n in self._tagscache.tags.iteritems():
941 for t, n in self._tagscache.tags.iteritems():
942 nodetagscache.setdefault(n, []).append(t)
942 nodetagscache.setdefault(n, []).append(t)
943 for tags in nodetagscache.itervalues():
943 for tags in nodetagscache.itervalues():
944 tags.sort()
944 tags.sort()
945 self._tagscache.nodetagscache = nodetagscache
945 self._tagscache.nodetagscache = nodetagscache
946 return self._tagscache.nodetagscache.get(node, [])
946 return self._tagscache.nodetagscache.get(node, [])
947
947
948 def nodebookmarks(self, node):
948 def nodebookmarks(self, node):
949 """return the list of bookmarks pointing to the specified node"""
949 """return the list of bookmarks pointing to the specified node"""
950 marks = []
950 marks = []
951 for bookmark, n in self._bookmarks.iteritems():
951 for bookmark, n in self._bookmarks.iteritems():
952 if n == node:
952 if n == node:
953 marks.append(bookmark)
953 marks.append(bookmark)
954 return sorted(marks)
954 return sorted(marks)
955
955
956 def branchmap(self):
956 def branchmap(self):
957 '''returns a dictionary {branch: [branchheads]} with branchheads
957 '''returns a dictionary {branch: [branchheads]} with branchheads
958 ordered by increasing revision number'''
958 ordered by increasing revision number'''
959 branchmap.updatecache(self)
959 branchmap.updatecache(self)
960 return self._branchcaches[self.filtername]
960 return self._branchcaches[self.filtername]
961
961
962 @unfilteredmethod
962 @unfilteredmethod
963 def revbranchcache(self):
963 def revbranchcache(self):
964 if not self._revbranchcache:
964 if not self._revbranchcache:
965 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
965 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
966 return self._revbranchcache
966 return self._revbranchcache
967
967
968 def branchtip(self, branch, ignoremissing=False):
968 def branchtip(self, branch, ignoremissing=False):
969 '''return the tip node for a given branch
969 '''return the tip node for a given branch
970
970
971 If ignoremissing is True, then this method will not raise an error.
971 If ignoremissing is True, then this method will not raise an error.
972 This is helpful for callers that only expect None for a missing branch
972 This is helpful for callers that only expect None for a missing branch
973 (e.g. namespace).
973 (e.g. namespace).
974
974
975 '''
975 '''
976 try:
976 try:
977 return self.branchmap().branchtip(branch)
977 return self.branchmap().branchtip(branch)
978 except KeyError:
978 except KeyError:
979 if not ignoremissing:
979 if not ignoremissing:
980 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
980 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
981 else:
981 else:
982 pass
982 pass
983
983
984 def lookup(self, key):
984 def lookup(self, key):
985 return self[key].node()
985 return self[key].node()
986
986
987 def lookupbranch(self, key, remote=None):
987 def lookupbranch(self, key, remote=None):
988 repo = remote or self
988 repo = remote or self
989 if key in repo.branchmap():
989 if key in repo.branchmap():
990 return key
990 return key
991
991
992 repo = (remote and remote.local()) and remote or self
992 repo = (remote and remote.local()) and remote or self
993 return repo[key].branch()
993 return repo[key].branch()
994
994
995 def known(self, nodes):
995 def known(self, nodes):
996 cl = self.changelog
996 cl = self.changelog
997 nm = cl.nodemap
997 nm = cl.nodemap
998 filtered = cl.filteredrevs
998 filtered = cl.filteredrevs
999 result = []
999 result = []
1000 for n in nodes:
1000 for n in nodes:
1001 r = nm.get(n)
1001 r = nm.get(n)
1002 resp = not (r is None or r in filtered)
1002 resp = not (r is None or r in filtered)
1003 result.append(resp)
1003 result.append(resp)
1004 return result
1004 return result
1005
1005
1006 def local(self):
1006 def local(self):
1007 return self
1007 return self
1008
1008
1009 def publishing(self):
1009 def publishing(self):
1010 # it's safe (and desirable) to trust the publish flag unconditionally
1010 # it's safe (and desirable) to trust the publish flag unconditionally
1011 # so that we don't finalize changes shared between users via ssh or nfs
1011 # so that we don't finalize changes shared between users via ssh or nfs
1012 return self.ui.configbool('phases', 'publish', untrusted=True)
1012 return self.ui.configbool('phases', 'publish', untrusted=True)
1013
1013
1014 def cancopy(self):
1014 def cancopy(self):
1015 # so statichttprepo's override of local() works
1015 # so statichttprepo's override of local() works
1016 if not self.local():
1016 if not self.local():
1017 return False
1017 return False
1018 if not self.publishing():
1018 if not self.publishing():
1019 return True
1019 return True
1020 # if publishing we can't copy if there is filtered content
1020 # if publishing we can't copy if there is filtered content
1021 return not self.filtered('visible').changelog.filteredrevs
1021 return not self.filtered('visible').changelog.filteredrevs
1022
1022
1023 def shared(self):
1023 def shared(self):
1024 '''the type of shared repository (None if not shared)'''
1024 '''the type of shared repository (None if not shared)'''
1025 if self.sharedpath != self.path:
1025 if self.sharedpath != self.path:
1026 return 'store'
1026 return 'store'
1027 return None
1027 return None
1028
1028
1029 def wjoin(self, f, *insidef):
1029 def wjoin(self, f, *insidef):
1030 return self.vfs.reljoin(self.root, f, *insidef)
1030 return self.vfs.reljoin(self.root, f, *insidef)
1031
1031
1032 def file(self, f):
1032 def file(self, f):
1033 if f[0] == '/':
1033 if f[0] == '/':
1034 f = f[1:]
1034 f = f[1:]
1035 return filelog.filelog(self.svfs, f)
1035 return filelog.filelog(self.svfs, f)
1036
1036
1037 def changectx(self, changeid):
1037 def changectx(self, changeid):
1038 return self[changeid]
1038 return self[changeid]
1039
1039
1040 def setparents(self, p1, p2=nullid):
1040 def setparents(self, p1, p2=nullid):
1041 with self.dirstate.parentchange():
1041 with self.dirstate.parentchange():
1042 copies = self.dirstate.setparents(p1, p2)
1042 copies = self.dirstate.setparents(p1, p2)
1043 pctx = self[p1]
1043 pctx = self[p1]
1044 if copies:
1044 if copies:
1045 # Adjust copy records, the dirstate cannot do it, it
1045 # Adjust copy records, the dirstate cannot do it, it
1046 # requires access to parents manifests. Preserve them
1046 # requires access to parents manifests. Preserve them
1047 # only for entries added to first parent.
1047 # only for entries added to first parent.
1048 for f in copies:
1048 for f in copies:
1049 if f not in pctx and copies[f] in pctx:
1049 if f not in pctx and copies[f] in pctx:
1050 self.dirstate.copy(copies[f], f)
1050 self.dirstate.copy(copies[f], f)
1051 if p2 == nullid:
1051 if p2 == nullid:
1052 for f, s in sorted(self.dirstate.copies().items()):
1052 for f, s in sorted(self.dirstate.copies().items()):
1053 if f not in pctx and s not in pctx:
1053 if f not in pctx and s not in pctx:
1054 self.dirstate.copy(None, f)
1054 self.dirstate.copy(None, f)
1055
1055
1056 def filectx(self, path, changeid=None, fileid=None):
1056 def filectx(self, path, changeid=None, fileid=None):
1057 """changeid can be a changeset revision, node, or tag.
1057 """changeid can be a changeset revision, node, or tag.
1058 fileid can be a file revision or node."""
1058 fileid can be a file revision or node."""
1059 return context.filectx(self, path, changeid, fileid)
1059 return context.filectx(self, path, changeid, fileid)
1060
1060
1061 def getcwd(self):
1061 def getcwd(self):
1062 return self.dirstate.getcwd()
1062 return self.dirstate.getcwd()
1063
1063
1064 def pathto(self, f, cwd=None):
1064 def pathto(self, f, cwd=None):
1065 return self.dirstate.pathto(f, cwd)
1065 return self.dirstate.pathto(f, cwd)
1066
1066
1067 def _loadfilter(self, filter):
1067 def _loadfilter(self, filter):
1068 if filter not in self.filterpats:
1068 if filter not in self.filterpats:
1069 l = []
1069 l = []
1070 for pat, cmd in self.ui.configitems(filter):
1070 for pat, cmd in self.ui.configitems(filter):
1071 if cmd == '!':
1071 if cmd == '!':
1072 continue
1072 continue
1073 mf = matchmod.match(self.root, '', [pat])
1073 mf = matchmod.match(self.root, '', [pat])
1074 fn = None
1074 fn = None
1075 params = cmd
1075 params = cmd
1076 for name, filterfn in self._datafilters.iteritems():
1076 for name, filterfn in self._datafilters.iteritems():
1077 if cmd.startswith(name):
1077 if cmd.startswith(name):
1078 fn = filterfn
1078 fn = filterfn
1079 params = cmd[len(name):].lstrip()
1079 params = cmd[len(name):].lstrip()
1080 break
1080 break
1081 if not fn:
1081 if not fn:
1082 fn = lambda s, c, **kwargs: util.filter(s, c)
1082 fn = lambda s, c, **kwargs: util.filter(s, c)
1083 # Wrap old filters not supporting keyword arguments
1083 # Wrap old filters not supporting keyword arguments
1084 if not inspect.getargspec(fn)[2]:
1084 if not inspect.getargspec(fn)[2]:
1085 oldfn = fn
1085 oldfn = fn
1086 fn = lambda s, c, **kwargs: oldfn(s, c)
1086 fn = lambda s, c, **kwargs: oldfn(s, c)
1087 l.append((mf, fn, params))
1087 l.append((mf, fn, params))
1088 self.filterpats[filter] = l
1088 self.filterpats[filter] = l
1089 return self.filterpats[filter]
1089 return self.filterpats[filter]
1090
1090
1091 def _filter(self, filterpats, filename, data):
1091 def _filter(self, filterpats, filename, data):
1092 for mf, fn, cmd in filterpats:
1092 for mf, fn, cmd in filterpats:
1093 if mf(filename):
1093 if mf(filename):
1094 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1094 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1095 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1095 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1096 break
1096 break
1097
1097
1098 return data
1098 return data
1099
1099
1100 @unfilteredpropertycache
1100 @unfilteredpropertycache
1101 def _encodefilterpats(self):
1101 def _encodefilterpats(self):
1102 return self._loadfilter('encode')
1102 return self._loadfilter('encode')
1103
1103
1104 @unfilteredpropertycache
1104 @unfilteredpropertycache
1105 def _decodefilterpats(self):
1105 def _decodefilterpats(self):
1106 return self._loadfilter('decode')
1106 return self._loadfilter('decode')
1107
1107
1108 def adddatafilter(self, name, filter):
1108 def adddatafilter(self, name, filter):
1109 self._datafilters[name] = filter
1109 self._datafilters[name] = filter
1110
1110
1111 def wread(self, filename):
1111 def wread(self, filename):
1112 if self.wvfs.islink(filename):
1112 if self.wvfs.islink(filename):
1113 data = self.wvfs.readlink(filename)
1113 data = self.wvfs.readlink(filename)
1114 else:
1114 else:
1115 data = self.wvfs.read(filename)
1115 data = self.wvfs.read(filename)
1116 return self._filter(self._encodefilterpats, filename, data)
1116 return self._filter(self._encodefilterpats, filename, data)
1117
1117
1118 def wwrite(self, filename, data, flags, backgroundclose=False):
1118 def wwrite(self, filename, data, flags, backgroundclose=False):
1119 """write ``data`` into ``filename`` in the working directory
1119 """write ``data`` into ``filename`` in the working directory
1120
1120
1121 This returns length of written (maybe decoded) data.
1121 This returns length of written (maybe decoded) data.
1122 """
1122 """
1123 data = self._filter(self._decodefilterpats, filename, data)
1123 data = self._filter(self._decodefilterpats, filename, data)
1124 if 'l' in flags:
1124 if 'l' in flags:
1125 self.wvfs.symlink(data, filename)
1125 self.wvfs.symlink(data, filename)
1126 else:
1126 else:
1127 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1127 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1128 if 'x' in flags:
1128 if 'x' in flags:
1129 self.wvfs.setflags(filename, False, True)
1129 self.wvfs.setflags(filename, False, True)
1130 return len(data)
1130 return len(data)
1131
1131
1132 def wwritedata(self, filename, data):
1132 def wwritedata(self, filename, data):
1133 return self._filter(self._decodefilterpats, filename, data)
1133 return self._filter(self._decodefilterpats, filename, data)
1134
1134
1135 def currenttransaction(self):
1135 def currenttransaction(self):
1136 """return the current transaction or None if non exists"""
1136 """return the current transaction or None if non exists"""
1137 if self._transref:
1137 if self._transref:
1138 tr = self._transref()
1138 tr = self._transref()
1139 else:
1139 else:
1140 tr = None
1140 tr = None
1141
1141
1142 if tr and tr.running():
1142 if tr and tr.running():
1143 return tr
1143 return tr
1144 return None
1144 return None
1145
1145
1146 def transaction(self, desc, report=None):
1146 def transaction(self, desc, report=None):
1147 if (self.ui.configbool('devel', 'all-warnings')
1147 if (self.ui.configbool('devel', 'all-warnings')
1148 or self.ui.configbool('devel', 'check-locks')):
1148 or self.ui.configbool('devel', 'check-locks')):
1149 if self._currentlock(self._lockref) is None:
1149 if self._currentlock(self._lockref) is None:
1150 raise error.ProgrammingError('transaction requires locking')
1150 raise error.ProgrammingError('transaction requires locking')
1151 tr = self.currenttransaction()
1151 tr = self.currenttransaction()
1152 if tr is not None:
1152 if tr is not None:
1153 scmutil.registersummarycallback(self, tr, desc)
1153 scmutil.registersummarycallback(self, tr, desc)
1154 return tr.nest()
1154 return tr.nest()
1155
1155
1156 # abort here if the journal already exists
1156 # abort here if the journal already exists
1157 if self.svfs.exists("journal"):
1157 if self.svfs.exists("journal"):
1158 raise error.RepoError(
1158 raise error.RepoError(
1159 _("abandoned transaction found"),
1159 _("abandoned transaction found"),
1160 hint=_("run 'hg recover' to clean up transaction"))
1160 hint=_("run 'hg recover' to clean up transaction"))
1161
1161
1162 idbase = "%.40f#%f" % (random.random(), time.time())
1162 idbase = "%.40f#%f" % (random.random(), time.time())
1163 ha = hex(hashlib.sha1(idbase).digest())
1163 ha = hex(hashlib.sha1(idbase).digest())
1164 txnid = 'TXN:' + ha
1164 txnid = 'TXN:' + ha
1165 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1165 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1166
1166
1167 self._writejournal(desc)
1167 self._writejournal(desc)
1168 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1168 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1169 if report:
1169 if report:
1170 rp = report
1170 rp = report
1171 else:
1171 else:
1172 rp = self.ui.warn
1172 rp = self.ui.warn
1173 vfsmap = {'plain': self.vfs} # root of .hg/
1173 vfsmap = {'plain': self.vfs} # root of .hg/
1174 # we must avoid cyclic reference between repo and transaction.
1174 # we must avoid cyclic reference between repo and transaction.
1175 reporef = weakref.ref(self)
1175 reporef = weakref.ref(self)
1176 # Code to track tag movement
1176 # Code to track tag movement
1177 #
1177 #
1178 # Since tags are all handled as file content, it is actually quite hard
1178 # Since tags are all handled as file content, it is actually quite hard
1179 # to track these movement from a code perspective. So we fallback to a
1179 # to track these movement from a code perspective. So we fallback to a
1180 # tracking at the repository level. One could envision to track changes
1180 # tracking at the repository level. One could envision to track changes
1181 # to the '.hgtags' file through changegroup apply but that fails to
1181 # to the '.hgtags' file through changegroup apply but that fails to
1182 # cope with case where transaction expose new heads without changegroup
1182 # cope with case where transaction expose new heads without changegroup
1183 # being involved (eg: phase movement).
1183 # being involved (eg: phase movement).
1184 #
1184 #
1185 # For now, We gate the feature behind a flag since this likely comes
1185 # For now, We gate the feature behind a flag since this likely comes
1186 # with performance impacts. The current code run more often than needed
1186 # with performance impacts. The current code run more often than needed
1187 # and do not use caches as much as it could. The current focus is on
1187 # and do not use caches as much as it could. The current focus is on
1188 # the behavior of the feature so we disable it by default. The flag
1188 # the behavior of the feature so we disable it by default. The flag
1189 # will be removed when we are happy with the performance impact.
1189 # will be removed when we are happy with the performance impact.
1190 #
1190 #
1191 # Once this feature is no longer experimental move the following
1191 # Once this feature is no longer experimental move the following
1192 # documentation to the appropriate help section:
1192 # documentation to the appropriate help section:
1193 #
1193 #
1194 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1194 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1195 # tags (new or changed or deleted tags). In addition the details of
1195 # tags (new or changed or deleted tags). In addition the details of
1196 # these changes are made available in a file at:
1196 # these changes are made available in a file at:
1197 # ``REPOROOT/.hg/changes/tags.changes``.
1197 # ``REPOROOT/.hg/changes/tags.changes``.
1198 # Make sure you check for HG_TAG_MOVED before reading that file as it
1198 # Make sure you check for HG_TAG_MOVED before reading that file as it
1199 # might exist from a previous transaction even if no tag were touched
1199 # might exist from a previous transaction even if no tag were touched
1200 # in this one. Changes are recorded in a line base format::
1200 # in this one. Changes are recorded in a line base format::
1201 #
1201 #
1202 # <action> <hex-node> <tag-name>\n
1202 # <action> <hex-node> <tag-name>\n
1203 #
1203 #
1204 # Actions are defined as follow:
1204 # Actions are defined as follow:
1205 # "-R": tag is removed,
1205 # "-R": tag is removed,
1206 # "+A": tag is added,
1206 # "+A": tag is added,
1207 # "-M": tag is moved (old value),
1207 # "-M": tag is moved (old value),
1208 # "+M": tag is moved (new value),
1208 # "+M": tag is moved (new value),
1209 tracktags = lambda x: None
1209 tracktags = lambda x: None
1210 # experimental config: experimental.hook-track-tags
1210 # experimental config: experimental.hook-track-tags
1211 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1211 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1212 if desc != 'strip' and shouldtracktags:
1212 if desc != 'strip' and shouldtracktags:
1213 oldheads = self.changelog.headrevs()
1213 oldheads = self.changelog.headrevs()
1214 def tracktags(tr2):
1214 def tracktags(tr2):
1215 repo = reporef()
1215 repo = reporef()
1216 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1216 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1217 newheads = repo.changelog.headrevs()
1217 newheads = repo.changelog.headrevs()
1218 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1218 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1219 # notes: we compare lists here.
1219 # notes: we compare lists here.
1220 # As we do it only once buiding set would not be cheaper
1220 # As we do it only once buiding set would not be cheaper
1221 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1221 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1222 if changes:
1222 if changes:
1223 tr2.hookargs['tag_moved'] = '1'
1223 tr2.hookargs['tag_moved'] = '1'
1224 with repo.vfs('changes/tags.changes', 'w',
1224 with repo.vfs('changes/tags.changes', 'w',
1225 atomictemp=True) as changesfile:
1225 atomictemp=True) as changesfile:
1226 # note: we do not register the file to the transaction
1226 # note: we do not register the file to the transaction
1227 # because we needs it to still exist on the transaction
1227 # because we needs it to still exist on the transaction
1228 # is close (for txnclose hooks)
1228 # is close (for txnclose hooks)
1229 tagsmod.writediff(changesfile, changes)
1229 tagsmod.writediff(changesfile, changes)
1230 def validate(tr2):
1230 def validate(tr2):
1231 """will run pre-closing hooks"""
1231 """will run pre-closing hooks"""
1232 # XXX the transaction API is a bit lacking here so we take a hacky
1232 # XXX the transaction API is a bit lacking here so we take a hacky
1233 # path for now
1233 # path for now
1234 #
1234 #
1235 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1235 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1236 # dict is copied before these run. In addition we needs the data
1236 # dict is copied before these run. In addition we needs the data
1237 # available to in memory hooks too.
1237 # available to in memory hooks too.
1238 #
1238 #
1239 # Moreover, we also need to make sure this runs before txnclose
1239 # Moreover, we also need to make sure this runs before txnclose
1240 # hooks and there is no "pending" mechanism that would execute
1240 # hooks and there is no "pending" mechanism that would execute
1241 # logic only if hooks are about to run.
1241 # logic only if hooks are about to run.
1242 #
1242 #
1243 # Fixing this limitation of the transaction is also needed to track
1243 # Fixing this limitation of the transaction is also needed to track
1244 # other families of changes (bookmarks, phases, obsolescence).
1244 # other families of changes (bookmarks, phases, obsolescence).
1245 #
1245 #
1246 # This will have to be fixed before we remove the experimental
1246 # This will have to be fixed before we remove the experimental
1247 # gating.
1247 # gating.
1248 tracktags(tr2)
1248 tracktags(tr2)
1249 repo = reporef()
1249 repo = reporef()
1250 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1250 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1251 scmutil.enforcesinglehead(repo, tr2, desc)
1251 scmutil.enforcesinglehead(repo, tr2, desc)
1252 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1252 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1253 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1253 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1254 args = tr.hookargs.copy()
1254 args = tr.hookargs.copy()
1255 args.update(bookmarks.preparehookargs(name, old, new))
1255 args.update(bookmarks.preparehookargs(name, old, new))
1256 repo.hook('pretxnclose-bookmark', throw=True,
1256 repo.hook('pretxnclose-bookmark', throw=True,
1257 txnname=desc,
1257 txnname=desc,
1258 **pycompat.strkwargs(args))
1258 **pycompat.strkwargs(args))
1259 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1259 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1260 cl = repo.unfiltered().changelog
1260 cl = repo.unfiltered().changelog
1261 for rev, (old, new) in tr.changes['phases'].items():
1261 for rev, (old, new) in tr.changes['phases'].items():
1262 args = tr.hookargs.copy()
1262 args = tr.hookargs.copy()
1263 node = hex(cl.node(rev))
1263 node = hex(cl.node(rev))
1264 args.update(phases.preparehookargs(node, old, new))
1264 args.update(phases.preparehookargs(node, old, new))
1265 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1265 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1266 **pycompat.strkwargs(args))
1266 **pycompat.strkwargs(args))
1267
1267
1268 repo.hook('pretxnclose', throw=True,
1268 repo.hook('pretxnclose', throw=True,
1269 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1269 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1270 def releasefn(tr, success):
1270 def releasefn(tr, success):
1271 repo = reporef()
1271 repo = reporef()
1272 if success:
1272 if success:
1273 # this should be explicitly invoked here, because
1273 # this should be explicitly invoked here, because
1274 # in-memory changes aren't written out at closing
1274 # in-memory changes aren't written out at closing
1275 # transaction, if tr.addfilegenerator (via
1275 # transaction, if tr.addfilegenerator (via
1276 # dirstate.write or so) isn't invoked while
1276 # dirstate.write or so) isn't invoked while
1277 # transaction running
1277 # transaction running
1278 repo.dirstate.write(None)
1278 repo.dirstate.write(None)
1279 else:
1279 else:
1280 # discard all changes (including ones already written
1280 # discard all changes (including ones already written
1281 # out) in this transaction
1281 # out) in this transaction
1282 repo.dirstate.restorebackup(None, 'journal.dirstate')
1282 repo.dirstate.restorebackup(None, 'journal.dirstate')
1283
1283
1284 repo.invalidate(clearfilecache=True)
1284 repo.invalidate(clearfilecache=True)
1285
1285
1286 tr = transaction.transaction(rp, self.svfs, vfsmap,
1286 tr = transaction.transaction(rp, self.svfs, vfsmap,
1287 "journal",
1287 "journal",
1288 "undo",
1288 "undo",
1289 aftertrans(renames),
1289 aftertrans(renames),
1290 self.store.createmode,
1290 self.store.createmode,
1291 validator=validate,
1291 validator=validate,
1292 releasefn=releasefn,
1292 releasefn=releasefn,
1293 checkambigfiles=_cachedfiles)
1293 checkambigfiles=_cachedfiles)
1294 tr.changes['revs'] = set()
1294 tr.changes['revs'] = set()
1295 tr.changes['obsmarkers'] = set()
1295 tr.changes['obsmarkers'] = set()
1296 tr.changes['phases'] = {}
1296 tr.changes['phases'] = {}
1297 tr.changes['bookmarks'] = {}
1297 tr.changes['bookmarks'] = {}
1298
1298
1299 tr.hookargs['txnid'] = txnid
1299 tr.hookargs['txnid'] = txnid
1300 # note: writing the fncache only during finalize mean that the file is
1300 # note: writing the fncache only during finalize mean that the file is
1301 # outdated when running hooks. As fncache is used for streaming clone,
1301 # outdated when running hooks. As fncache is used for streaming clone,
1302 # this is not expected to break anything that happen during the hooks.
1302 # this is not expected to break anything that happen during the hooks.
1303 tr.addfinalize('flush-fncache', self.store.write)
1303 tr.addfinalize('flush-fncache', self.store.write)
1304 def txnclosehook(tr2):
1304 def txnclosehook(tr2):
1305 """To be run if transaction is successful, will schedule a hook run
1305 """To be run if transaction is successful, will schedule a hook run
1306 """
1306 """
1307 # Don't reference tr2 in hook() so we don't hold a reference.
1307 # Don't reference tr2 in hook() so we don't hold a reference.
1308 # This reduces memory consumption when there are multiple
1308 # This reduces memory consumption when there are multiple
1309 # transactions per lock. This can likely go away if issue5045
1309 # transactions per lock. This can likely go away if issue5045
1310 # fixes the function accumulation.
1310 # fixes the function accumulation.
1311 hookargs = tr2.hookargs
1311 hookargs = tr2.hookargs
1312
1312
1313 def hookfunc():
1313 def hookfunc():
1314 repo = reporef()
1314 repo = reporef()
1315 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1315 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1316 bmchanges = sorted(tr.changes['bookmarks'].items())
1316 bmchanges = sorted(tr.changes['bookmarks'].items())
1317 for name, (old, new) in bmchanges:
1317 for name, (old, new) in bmchanges:
1318 args = tr.hookargs.copy()
1318 args = tr.hookargs.copy()
1319 args.update(bookmarks.preparehookargs(name, old, new))
1319 args.update(bookmarks.preparehookargs(name, old, new))
1320 repo.hook('txnclose-bookmark', throw=False,
1320 repo.hook('txnclose-bookmark', throw=False,
1321 txnname=desc, **pycompat.strkwargs(args))
1321 txnname=desc, **pycompat.strkwargs(args))
1322
1322
1323 if hook.hashook(repo.ui, 'txnclose-phase'):
1323 if hook.hashook(repo.ui, 'txnclose-phase'):
1324 cl = repo.unfiltered().changelog
1324 cl = repo.unfiltered().changelog
1325 phasemv = sorted(tr.changes['phases'].items())
1325 phasemv = sorted(tr.changes['phases'].items())
1326 for rev, (old, new) in phasemv:
1326 for rev, (old, new) in phasemv:
1327 args = tr.hookargs.copy()
1327 args = tr.hookargs.copy()
1328 node = hex(cl.node(rev))
1328 node = hex(cl.node(rev))
1329 args.update(phases.preparehookargs(node, old, new))
1329 args.update(phases.preparehookargs(node, old, new))
1330 repo.hook('txnclose-phase', throw=False, txnname=desc,
1330 repo.hook('txnclose-phase', throw=False, txnname=desc,
1331 **pycompat.strkwargs(args))
1331 **pycompat.strkwargs(args))
1332
1332
1333 repo.hook('txnclose', throw=False, txnname=desc,
1333 repo.hook('txnclose', throw=False, txnname=desc,
1334 **pycompat.strkwargs(hookargs))
1334 **pycompat.strkwargs(hookargs))
1335 reporef()._afterlock(hookfunc)
1335 reporef()._afterlock(hookfunc)
1336 tr.addfinalize('txnclose-hook', txnclosehook)
1336 tr.addfinalize('txnclose-hook', txnclosehook)
1337 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1337 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1338 def txnaborthook(tr2):
1338 def txnaborthook(tr2):
1339 """To be run if transaction is aborted
1339 """To be run if transaction is aborted
1340 """
1340 """
1341 reporef().hook('txnabort', throw=False, txnname=desc,
1341 reporef().hook('txnabort', throw=False, txnname=desc,
1342 **tr2.hookargs)
1342 **tr2.hookargs)
1343 tr.addabort('txnabort-hook', txnaborthook)
1343 tr.addabort('txnabort-hook', txnaborthook)
1344 # avoid eager cache invalidation. in-memory data should be identical
1344 # avoid eager cache invalidation. in-memory data should be identical
1345 # to stored data if transaction has no error.
1345 # to stored data if transaction has no error.
1346 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1346 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1347 self._transref = weakref.ref(tr)
1347 self._transref = weakref.ref(tr)
1348 scmutil.registersummarycallback(self, tr, desc)
1348 scmutil.registersummarycallback(self, tr, desc)
1349 return tr
1349 return tr
1350
1350
1351 def _journalfiles(self):
1351 def _journalfiles(self):
1352 return ((self.svfs, 'journal'),
1352 return ((self.svfs, 'journal'),
1353 (self.vfs, 'journal.dirstate'),
1353 (self.vfs, 'journal.dirstate'),
1354 (self.vfs, 'journal.branch'),
1354 (self.vfs, 'journal.branch'),
1355 (self.vfs, 'journal.desc'),
1355 (self.vfs, 'journal.desc'),
1356 (self.vfs, 'journal.bookmarks'),
1356 (self.vfs, 'journal.bookmarks'),
1357 (self.svfs, 'journal.phaseroots'))
1357 (self.svfs, 'journal.phaseroots'))
1358
1358
1359 def undofiles(self):
1359 def undofiles(self):
1360 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1360 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1361
1361
1362 @unfilteredmethod
1362 @unfilteredmethod
1363 def _writejournal(self, desc):
1363 def _writejournal(self, desc):
1364 self.dirstate.savebackup(None, 'journal.dirstate')
1364 self.dirstate.savebackup(None, 'journal.dirstate')
1365 self.vfs.write("journal.branch",
1365 self.vfs.write("journal.branch",
1366 encoding.fromlocal(self.dirstate.branch()))
1366 encoding.fromlocal(self.dirstate.branch()))
1367 self.vfs.write("journal.desc",
1367 self.vfs.write("journal.desc",
1368 "%d\n%s\n" % (len(self), desc))
1368 "%d\n%s\n" % (len(self), desc))
1369 self.vfs.write("journal.bookmarks",
1369 self.vfs.write("journal.bookmarks",
1370 self.vfs.tryread("bookmarks"))
1370 self.vfs.tryread("bookmarks"))
1371 self.svfs.write("journal.phaseroots",
1371 self.svfs.write("journal.phaseroots",
1372 self.svfs.tryread("phaseroots"))
1372 self.svfs.tryread("phaseroots"))
1373
1373
1374 def recover(self):
1374 def recover(self):
1375 with self.lock():
1375 with self.lock():
1376 if self.svfs.exists("journal"):
1376 if self.svfs.exists("journal"):
1377 self.ui.status(_("rolling back interrupted transaction\n"))
1377 self.ui.status(_("rolling back interrupted transaction\n"))
1378 vfsmap = {'': self.svfs,
1378 vfsmap = {'': self.svfs,
1379 'plain': self.vfs,}
1379 'plain': self.vfs,}
1380 transaction.rollback(self.svfs, vfsmap, "journal",
1380 transaction.rollback(self.svfs, vfsmap, "journal",
1381 self.ui.warn,
1381 self.ui.warn,
1382 checkambigfiles=_cachedfiles)
1382 checkambigfiles=_cachedfiles)
1383 self.invalidate()
1383 self.invalidate()
1384 return True
1384 return True
1385 else:
1385 else:
1386 self.ui.warn(_("no interrupted transaction available\n"))
1386 self.ui.warn(_("no interrupted transaction available\n"))
1387 return False
1387 return False
1388
1388
1389 def rollback(self, dryrun=False, force=False):
1389 def rollback(self, dryrun=False, force=False):
1390 wlock = lock = dsguard = None
1390 wlock = lock = dsguard = None
1391 try:
1391 try:
1392 wlock = self.wlock()
1392 wlock = self.wlock()
1393 lock = self.lock()
1393 lock = self.lock()
1394 if self.svfs.exists("undo"):
1394 if self.svfs.exists("undo"):
1395 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1395 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1396
1396
1397 return self._rollback(dryrun, force, dsguard)
1397 return self._rollback(dryrun, force, dsguard)
1398 else:
1398 else:
1399 self.ui.warn(_("no rollback information available\n"))
1399 self.ui.warn(_("no rollback information available\n"))
1400 return 1
1400 return 1
1401 finally:
1401 finally:
1402 release(dsguard, lock, wlock)
1402 release(dsguard, lock, wlock)
1403
1403
1404 @unfilteredmethod # Until we get smarter cache management
1404 @unfilteredmethod # Until we get smarter cache management
1405 def _rollback(self, dryrun, force, dsguard):
1405 def _rollback(self, dryrun, force, dsguard):
1406 ui = self.ui
1406 ui = self.ui
1407 try:
1407 try:
1408 args = self.vfs.read('undo.desc').splitlines()
1408 args = self.vfs.read('undo.desc').splitlines()
1409 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1409 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1410 if len(args) >= 3:
1410 if len(args) >= 3:
1411 detail = args[2]
1411 detail = args[2]
1412 oldtip = oldlen - 1
1412 oldtip = oldlen - 1
1413
1413
1414 if detail and ui.verbose:
1414 if detail and ui.verbose:
1415 msg = (_('repository tip rolled back to revision %d'
1415 msg = (_('repository tip rolled back to revision %d'
1416 ' (undo %s: %s)\n')
1416 ' (undo %s: %s)\n')
1417 % (oldtip, desc, detail))
1417 % (oldtip, desc, detail))
1418 else:
1418 else:
1419 msg = (_('repository tip rolled back to revision %d'
1419 msg = (_('repository tip rolled back to revision %d'
1420 ' (undo %s)\n')
1420 ' (undo %s)\n')
1421 % (oldtip, desc))
1421 % (oldtip, desc))
1422 except IOError:
1422 except IOError:
1423 msg = _('rolling back unknown transaction\n')
1423 msg = _('rolling back unknown transaction\n')
1424 desc = None
1424 desc = None
1425
1425
1426 if not force and self['.'] != self['tip'] and desc == 'commit':
1426 if not force and self['.'] != self['tip'] and desc == 'commit':
1427 raise error.Abort(
1427 raise error.Abort(
1428 _('rollback of last commit while not checked out '
1428 _('rollback of last commit while not checked out '
1429 'may lose data'), hint=_('use -f to force'))
1429 'may lose data'), hint=_('use -f to force'))
1430
1430
1431 ui.status(msg)
1431 ui.status(msg)
1432 if dryrun:
1432 if dryrun:
1433 return 0
1433 return 0
1434
1434
1435 parents = self.dirstate.parents()
1435 parents = self.dirstate.parents()
1436 self.destroying()
1436 self.destroying()
1437 vfsmap = {'plain': self.vfs, '': self.svfs}
1437 vfsmap = {'plain': self.vfs, '': self.svfs}
1438 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1438 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1439 checkambigfiles=_cachedfiles)
1439 checkambigfiles=_cachedfiles)
1440 if self.vfs.exists('undo.bookmarks'):
1440 if self.vfs.exists('undo.bookmarks'):
1441 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1441 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1442 if self.svfs.exists('undo.phaseroots'):
1442 if self.svfs.exists('undo.phaseroots'):
1443 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1443 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1444 self.invalidate()
1444 self.invalidate()
1445
1445
1446 parentgone = (parents[0] not in self.changelog.nodemap or
1446 parentgone = (parents[0] not in self.changelog.nodemap or
1447 parents[1] not in self.changelog.nodemap)
1447 parents[1] not in self.changelog.nodemap)
1448 if parentgone:
1448 if parentgone:
1449 # prevent dirstateguard from overwriting already restored one
1449 # prevent dirstateguard from overwriting already restored one
1450 dsguard.close()
1450 dsguard.close()
1451
1451
1452 self.dirstate.restorebackup(None, 'undo.dirstate')
1452 self.dirstate.restorebackup(None, 'undo.dirstate')
1453 try:
1453 try:
1454 branch = self.vfs.read('undo.branch')
1454 branch = self.vfs.read('undo.branch')
1455 self.dirstate.setbranch(encoding.tolocal(branch))
1455 self.dirstate.setbranch(encoding.tolocal(branch))
1456 except IOError:
1456 except IOError:
1457 ui.warn(_('named branch could not be reset: '
1457 ui.warn(_('named branch could not be reset: '
1458 'current branch is still \'%s\'\n')
1458 'current branch is still \'%s\'\n')
1459 % self.dirstate.branch())
1459 % self.dirstate.branch())
1460
1460
1461 parents = tuple([p.rev() for p in self[None].parents()])
1461 parents = tuple([p.rev() for p in self[None].parents()])
1462 if len(parents) > 1:
1462 if len(parents) > 1:
1463 ui.status(_('working directory now based on '
1463 ui.status(_('working directory now based on '
1464 'revisions %d and %d\n') % parents)
1464 'revisions %d and %d\n') % parents)
1465 else:
1465 else:
1466 ui.status(_('working directory now based on '
1466 ui.status(_('working directory now based on '
1467 'revision %d\n') % parents)
1467 'revision %d\n') % parents)
1468 mergemod.mergestate.clean(self, self['.'].node())
1468 mergemod.mergestate.clean(self, self['.'].node())
1469
1469
1470 # TODO: if we know which new heads may result from this rollback, pass
1470 # TODO: if we know which new heads may result from this rollback, pass
1471 # them to destroy(), which will prevent the branchhead cache from being
1471 # them to destroy(), which will prevent the branchhead cache from being
1472 # invalidated.
1472 # invalidated.
1473 self.destroyed()
1473 self.destroyed()
1474 return 0
1474 return 0
1475
1475
1476 def _buildcacheupdater(self, newtransaction):
1476 def _buildcacheupdater(self, newtransaction):
1477 """called during transaction to build the callback updating cache
1477 """called during transaction to build the callback updating cache
1478
1478
1479 Lives on the repository to help extension who might want to augment
1479 Lives on the repository to help extension who might want to augment
1480 this logic. For this purpose, the created transaction is passed to the
1480 this logic. For this purpose, the created transaction is passed to the
1481 method.
1481 method.
1482 """
1482 """
1483 # we must avoid cyclic reference between repo and transaction.
1483 # we must avoid cyclic reference between repo and transaction.
1484 reporef = weakref.ref(self)
1484 reporef = weakref.ref(self)
1485 def updater(tr):
1485 def updater(tr):
1486 repo = reporef()
1486 repo = reporef()
1487 repo.updatecaches(tr)
1487 repo.updatecaches(tr)
1488 return updater
1488 return updater
1489
1489
1490 @unfilteredmethod
1490 @unfilteredmethod
1491 def updatecaches(self, tr=None):
1491 def updatecaches(self, tr=None):
1492 """warm appropriate caches
1492 """warm appropriate caches
1493
1493
1494 If this function is called after a transaction closed. The transaction
1494 If this function is called after a transaction closed. The transaction
1495 will be available in the 'tr' argument. This can be used to selectively
1495 will be available in the 'tr' argument. This can be used to selectively
1496 update caches relevant to the changes in that transaction.
1496 update caches relevant to the changes in that transaction.
1497 """
1497 """
1498 if tr is not None and tr.hookargs.get('source') == 'strip':
1498 if tr is not None and tr.hookargs.get('source') == 'strip':
1499 # During strip, many caches are invalid but
1499 # During strip, many caches are invalid but
1500 # later call to `destroyed` will refresh them.
1500 # later call to `destroyed` will refresh them.
1501 return
1501 return
1502
1502
1503 if tr is None or tr.changes['revs']:
1503 if tr is None or tr.changes['revs']:
1504 # updating the unfiltered branchmap should refresh all the others,
1504 # updating the unfiltered branchmap should refresh all the others,
1505 self.ui.debug('updating the branch cache\n')
1505 self.ui.debug('updating the branch cache\n')
1506 branchmap.updatecache(self.filtered('served'))
1506 branchmap.updatecache(self.filtered('served'))
1507
1507
1508 def invalidatecaches(self):
1508 def invalidatecaches(self):
1509
1509
1510 if '_tagscache' in vars(self):
1510 if '_tagscache' in vars(self):
1511 # can't use delattr on proxy
1511 # can't use delattr on proxy
1512 del self.__dict__['_tagscache']
1512 del self.__dict__['_tagscache']
1513
1513
1514 self.unfiltered()._branchcaches.clear()
1514 self.unfiltered()._branchcaches.clear()
1515 self.invalidatevolatilesets()
1515 self.invalidatevolatilesets()
1516 self._sparsesignaturecache.clear()
1516 self._sparsesignaturecache.clear()
1517
1517
1518 def invalidatevolatilesets(self):
1518 def invalidatevolatilesets(self):
1519 self.filteredrevcache.clear()
1519 self.filteredrevcache.clear()
1520 obsolete.clearobscaches(self)
1520 obsolete.clearobscaches(self)
1521
1521
1522 def invalidatedirstate(self):
1522 def invalidatedirstate(self):
1523 '''Invalidates the dirstate, causing the next call to dirstate
1523 '''Invalidates the dirstate, causing the next call to dirstate
1524 to check if it was modified since the last time it was read,
1524 to check if it was modified since the last time it was read,
1525 rereading it if it has.
1525 rereading it if it has.
1526
1526
1527 This is different to dirstate.invalidate() that it doesn't always
1527 This is different to dirstate.invalidate() that it doesn't always
1528 rereads the dirstate. Use dirstate.invalidate() if you want to
1528 rereads the dirstate. Use dirstate.invalidate() if you want to
1529 explicitly read the dirstate again (i.e. restoring it to a previous
1529 explicitly read the dirstate again (i.e. restoring it to a previous
1530 known good state).'''
1530 known good state).'''
1531 if hasunfilteredcache(self, 'dirstate'):
1531 if hasunfilteredcache(self, 'dirstate'):
1532 for k in self.dirstate._filecache:
1532 for k in self.dirstate._filecache:
1533 try:
1533 try:
1534 delattr(self.dirstate, k)
1534 delattr(self.dirstate, k)
1535 except AttributeError:
1535 except AttributeError:
1536 pass
1536 pass
1537 delattr(self.unfiltered(), 'dirstate')
1537 delattr(self.unfiltered(), 'dirstate')
1538
1538
1539 def invalidate(self, clearfilecache=False):
1539 def invalidate(self, clearfilecache=False):
1540 '''Invalidates both store and non-store parts other than dirstate
1540 '''Invalidates both store and non-store parts other than dirstate
1541
1541
1542 If a transaction is running, invalidation of store is omitted,
1542 If a transaction is running, invalidation of store is omitted,
1543 because discarding in-memory changes might cause inconsistency
1543 because discarding in-memory changes might cause inconsistency
1544 (e.g. incomplete fncache causes unintentional failure, but
1544 (e.g. incomplete fncache causes unintentional failure, but
1545 redundant one doesn't).
1545 redundant one doesn't).
1546 '''
1546 '''
1547 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1547 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1548 for k in list(self._filecache.keys()):
1548 for k in list(self._filecache.keys()):
1549 # dirstate is invalidated separately in invalidatedirstate()
1549 # dirstate is invalidated separately in invalidatedirstate()
1550 if k == 'dirstate':
1550 if k == 'dirstate':
1551 continue
1551 continue
1552 if (k == 'changelog' and
1552 if (k == 'changelog' and
1553 self.currenttransaction() and
1553 self.currenttransaction() and
1554 self.changelog._delayed):
1554 self.changelog._delayed):
1555 # The changelog object may store unwritten revisions. We don't
1555 # The changelog object may store unwritten revisions. We don't
1556 # want to lose them.
1556 # want to lose them.
1557 # TODO: Solve the problem instead of working around it.
1557 # TODO: Solve the problem instead of working around it.
1558 continue
1558 continue
1559
1559
1560 if clearfilecache:
1560 if clearfilecache:
1561 del self._filecache[k]
1561 del self._filecache[k]
1562 try:
1562 try:
1563 delattr(unfiltered, k)
1563 delattr(unfiltered, k)
1564 except AttributeError:
1564 except AttributeError:
1565 pass
1565 pass
1566 self.invalidatecaches()
1566 self.invalidatecaches()
1567 if not self.currenttransaction():
1567 if not self.currenttransaction():
1568 # TODO: Changing contents of store outside transaction
1568 # TODO: Changing contents of store outside transaction
1569 # causes inconsistency. We should make in-memory store
1569 # causes inconsistency. We should make in-memory store
1570 # changes detectable, and abort if changed.
1570 # changes detectable, and abort if changed.
1571 self.store.invalidatecaches()
1571 self.store.invalidatecaches()
1572
1572
1573 def invalidateall(self):
1573 def invalidateall(self):
1574 '''Fully invalidates both store and non-store parts, causing the
1574 '''Fully invalidates both store and non-store parts, causing the
1575 subsequent operation to reread any outside changes.'''
1575 subsequent operation to reread any outside changes.'''
1576 # extension should hook this to invalidate its caches
1576 # extension should hook this to invalidate its caches
1577 self.invalidate()
1577 self.invalidate()
1578 self.invalidatedirstate()
1578 self.invalidatedirstate()
1579
1579
1580 @unfilteredmethod
1580 @unfilteredmethod
1581 def _refreshfilecachestats(self, tr):
1581 def _refreshfilecachestats(self, tr):
1582 """Reload stats of cached files so that they are flagged as valid"""
1582 """Reload stats of cached files so that they are flagged as valid"""
1583 for k, ce in self._filecache.items():
1583 for k, ce in self._filecache.items():
1584 if k == 'dirstate' or k not in self.__dict__:
1584 if k == 'dirstate' or k not in self.__dict__:
1585 continue
1585 continue
1586 ce.refresh()
1586 ce.refresh()
1587
1587
1588 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1588 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1589 inheritchecker=None, parentenvvar=None):
1589 inheritchecker=None, parentenvvar=None):
1590 parentlock = None
1590 parentlock = None
1591 # the contents of parentenvvar are used by the underlying lock to
1591 # the contents of parentenvvar are used by the underlying lock to
1592 # determine whether it can be inherited
1592 # determine whether it can be inherited
1593 if parentenvvar is not None:
1593 if parentenvvar is not None:
1594 parentlock = encoding.environ.get(parentenvvar)
1594 parentlock = encoding.environ.get(parentenvvar)
1595 try:
1595 try:
1596 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1596 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1597 acquirefn=acquirefn, desc=desc,
1597 acquirefn=acquirefn, desc=desc,
1598 inheritchecker=inheritchecker,
1598 inheritchecker=inheritchecker,
1599 parentlock=parentlock)
1599 parentlock=parentlock)
1600 except error.LockHeld as inst:
1600 except error.LockHeld as inst:
1601 if not wait:
1601 if not wait:
1602 raise
1602 raise
1603 # show more details for new-style locks
1603 # show more details for new-style locks
1604 if ':' in inst.locker:
1604 if ':' in inst.locker:
1605 host, pid = inst.locker.split(":", 1)
1605 host, pid = inst.locker.split(":", 1)
1606 self.ui.warn(
1606 self.ui.warn(
1607 _("waiting for lock on %s held by process %r "
1607 _("waiting for lock on %s held by process %r "
1608 "on host %r\n") % (desc, pid, host))
1608 "on host %r\n") % (desc, pid, host))
1609 else:
1609 else:
1610 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1610 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1611 (desc, inst.locker))
1611 (desc, inst.locker))
1612 # default to 600 seconds timeout
1612 # default to 600 seconds timeout
1613 l = lockmod.lock(vfs, lockname,
1613 l = lockmod.lock(vfs, lockname,
1614 int(self.ui.config("ui", "timeout")),
1614 self.ui.configint("ui", "timeout"),
1615 releasefn=releasefn, acquirefn=acquirefn,
1615 releasefn=releasefn, acquirefn=acquirefn,
1616 desc=desc)
1616 desc=desc)
1617 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1617 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1618 return l
1618 return l
1619
1619
1620 def _afterlock(self, callback):
1620 def _afterlock(self, callback):
1621 """add a callback to be run when the repository is fully unlocked
1621 """add a callback to be run when the repository is fully unlocked
1622
1622
1623 The callback will be executed when the outermost lock is released
1623 The callback will be executed when the outermost lock is released
1624 (with wlock being higher level than 'lock')."""
1624 (with wlock being higher level than 'lock')."""
1625 for ref in (self._wlockref, self._lockref):
1625 for ref in (self._wlockref, self._lockref):
1626 l = ref and ref()
1626 l = ref and ref()
1627 if l and l.held:
1627 if l and l.held:
1628 l.postrelease.append(callback)
1628 l.postrelease.append(callback)
1629 break
1629 break
1630 else: # no lock have been found.
1630 else: # no lock have been found.
1631 callback()
1631 callback()
1632
1632
1633 def lock(self, wait=True):
1633 def lock(self, wait=True):
1634 '''Lock the repository store (.hg/store) and return a weak reference
1634 '''Lock the repository store (.hg/store) and return a weak reference
1635 to the lock. Use this before modifying the store (e.g. committing or
1635 to the lock. Use this before modifying the store (e.g. committing or
1636 stripping). If you are opening a transaction, get a lock as well.)
1636 stripping). If you are opening a transaction, get a lock as well.)
1637
1637
1638 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1638 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1639 'wlock' first to avoid a dead-lock hazard.'''
1639 'wlock' first to avoid a dead-lock hazard.'''
1640 l = self._currentlock(self._lockref)
1640 l = self._currentlock(self._lockref)
1641 if l is not None:
1641 if l is not None:
1642 l.lock()
1642 l.lock()
1643 return l
1643 return l
1644
1644
1645 l = self._lock(self.svfs, "lock", wait, None,
1645 l = self._lock(self.svfs, "lock", wait, None,
1646 self.invalidate, _('repository %s') % self.origroot)
1646 self.invalidate, _('repository %s') % self.origroot)
1647 self._lockref = weakref.ref(l)
1647 self._lockref = weakref.ref(l)
1648 return l
1648 return l
1649
1649
1650 def _wlockchecktransaction(self):
1650 def _wlockchecktransaction(self):
1651 if self.currenttransaction() is not None:
1651 if self.currenttransaction() is not None:
1652 raise error.LockInheritanceContractViolation(
1652 raise error.LockInheritanceContractViolation(
1653 'wlock cannot be inherited in the middle of a transaction')
1653 'wlock cannot be inherited in the middle of a transaction')
1654
1654
1655 def wlock(self, wait=True):
1655 def wlock(self, wait=True):
1656 '''Lock the non-store parts of the repository (everything under
1656 '''Lock the non-store parts of the repository (everything under
1657 .hg except .hg/store) and return a weak reference to the lock.
1657 .hg except .hg/store) and return a weak reference to the lock.
1658
1658
1659 Use this before modifying files in .hg.
1659 Use this before modifying files in .hg.
1660
1660
1661 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1661 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1662 'wlock' first to avoid a dead-lock hazard.'''
1662 'wlock' first to avoid a dead-lock hazard.'''
1663 l = self._wlockref and self._wlockref()
1663 l = self._wlockref and self._wlockref()
1664 if l is not None and l.held:
1664 if l is not None and l.held:
1665 l.lock()
1665 l.lock()
1666 return l
1666 return l
1667
1667
1668 # We do not need to check for non-waiting lock acquisition. Such
1668 # We do not need to check for non-waiting lock acquisition. Such
1669 # acquisition would not cause dead-lock as they would just fail.
1669 # acquisition would not cause dead-lock as they would just fail.
1670 if wait and (self.ui.configbool('devel', 'all-warnings')
1670 if wait and (self.ui.configbool('devel', 'all-warnings')
1671 or self.ui.configbool('devel', 'check-locks')):
1671 or self.ui.configbool('devel', 'check-locks')):
1672 if self._currentlock(self._lockref) is not None:
1672 if self._currentlock(self._lockref) is not None:
1673 self.ui.develwarn('"wlock" acquired after "lock"')
1673 self.ui.develwarn('"wlock" acquired after "lock"')
1674
1674
1675 def unlock():
1675 def unlock():
1676 if self.dirstate.pendingparentchange():
1676 if self.dirstate.pendingparentchange():
1677 self.dirstate.invalidate()
1677 self.dirstate.invalidate()
1678 else:
1678 else:
1679 self.dirstate.write(None)
1679 self.dirstate.write(None)
1680
1680
1681 self._filecache['dirstate'].refresh()
1681 self._filecache['dirstate'].refresh()
1682
1682
1683 l = self._lock(self.vfs, "wlock", wait, unlock,
1683 l = self._lock(self.vfs, "wlock", wait, unlock,
1684 self.invalidatedirstate, _('working directory of %s') %
1684 self.invalidatedirstate, _('working directory of %s') %
1685 self.origroot,
1685 self.origroot,
1686 inheritchecker=self._wlockchecktransaction,
1686 inheritchecker=self._wlockchecktransaction,
1687 parentenvvar='HG_WLOCK_LOCKER')
1687 parentenvvar='HG_WLOCK_LOCKER')
1688 self._wlockref = weakref.ref(l)
1688 self._wlockref = weakref.ref(l)
1689 return l
1689 return l
1690
1690
1691 def _currentlock(self, lockref):
1691 def _currentlock(self, lockref):
1692 """Returns the lock if it's held, or None if it's not."""
1692 """Returns the lock if it's held, or None if it's not."""
1693 if lockref is None:
1693 if lockref is None:
1694 return None
1694 return None
1695 l = lockref()
1695 l = lockref()
1696 if l is None or not l.held:
1696 if l is None or not l.held:
1697 return None
1697 return None
1698 return l
1698 return l
1699
1699
1700 def currentwlock(self):
1700 def currentwlock(self):
1701 """Returns the wlock if it's held, or None if it's not."""
1701 """Returns the wlock if it's held, or None if it's not."""
1702 return self._currentlock(self._wlockref)
1702 return self._currentlock(self._wlockref)
1703
1703
1704 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1704 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1705 """
1705 """
1706 commit an individual file as part of a larger transaction
1706 commit an individual file as part of a larger transaction
1707 """
1707 """
1708
1708
1709 fname = fctx.path()
1709 fname = fctx.path()
1710 fparent1 = manifest1.get(fname, nullid)
1710 fparent1 = manifest1.get(fname, nullid)
1711 fparent2 = manifest2.get(fname, nullid)
1711 fparent2 = manifest2.get(fname, nullid)
1712 if isinstance(fctx, context.filectx):
1712 if isinstance(fctx, context.filectx):
1713 node = fctx.filenode()
1713 node = fctx.filenode()
1714 if node in [fparent1, fparent2]:
1714 if node in [fparent1, fparent2]:
1715 self.ui.debug('reusing %s filelog entry\n' % fname)
1715 self.ui.debug('reusing %s filelog entry\n' % fname)
1716 if manifest1.flags(fname) != fctx.flags():
1716 if manifest1.flags(fname) != fctx.flags():
1717 changelist.append(fname)
1717 changelist.append(fname)
1718 return node
1718 return node
1719
1719
1720 flog = self.file(fname)
1720 flog = self.file(fname)
1721 meta = {}
1721 meta = {}
1722 copy = fctx.renamed()
1722 copy = fctx.renamed()
1723 if copy and copy[0] != fname:
1723 if copy and copy[0] != fname:
1724 # Mark the new revision of this file as a copy of another
1724 # Mark the new revision of this file as a copy of another
1725 # file. This copy data will effectively act as a parent
1725 # file. This copy data will effectively act as a parent
1726 # of this new revision. If this is a merge, the first
1726 # of this new revision. If this is a merge, the first
1727 # parent will be the nullid (meaning "look up the copy data")
1727 # parent will be the nullid (meaning "look up the copy data")
1728 # and the second one will be the other parent. For example:
1728 # and the second one will be the other parent. For example:
1729 #
1729 #
1730 # 0 --- 1 --- 3 rev1 changes file foo
1730 # 0 --- 1 --- 3 rev1 changes file foo
1731 # \ / rev2 renames foo to bar and changes it
1731 # \ / rev2 renames foo to bar and changes it
1732 # \- 2 -/ rev3 should have bar with all changes and
1732 # \- 2 -/ rev3 should have bar with all changes and
1733 # should record that bar descends from
1733 # should record that bar descends from
1734 # bar in rev2 and foo in rev1
1734 # bar in rev2 and foo in rev1
1735 #
1735 #
1736 # this allows this merge to succeed:
1736 # this allows this merge to succeed:
1737 #
1737 #
1738 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1738 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1739 # \ / merging rev3 and rev4 should use bar@rev2
1739 # \ / merging rev3 and rev4 should use bar@rev2
1740 # \- 2 --- 4 as the merge base
1740 # \- 2 --- 4 as the merge base
1741 #
1741 #
1742
1742
1743 cfname = copy[0]
1743 cfname = copy[0]
1744 crev = manifest1.get(cfname)
1744 crev = manifest1.get(cfname)
1745 newfparent = fparent2
1745 newfparent = fparent2
1746
1746
1747 if manifest2: # branch merge
1747 if manifest2: # branch merge
1748 if fparent2 == nullid or crev is None: # copied on remote side
1748 if fparent2 == nullid or crev is None: # copied on remote side
1749 if cfname in manifest2:
1749 if cfname in manifest2:
1750 crev = manifest2[cfname]
1750 crev = manifest2[cfname]
1751 newfparent = fparent1
1751 newfparent = fparent1
1752
1752
1753 # Here, we used to search backwards through history to try to find
1753 # Here, we used to search backwards through history to try to find
1754 # where the file copy came from if the source of a copy was not in
1754 # where the file copy came from if the source of a copy was not in
1755 # the parent directory. However, this doesn't actually make sense to
1755 # the parent directory. However, this doesn't actually make sense to
1756 # do (what does a copy from something not in your working copy even
1756 # do (what does a copy from something not in your working copy even
1757 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1757 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1758 # the user that copy information was dropped, so if they didn't
1758 # the user that copy information was dropped, so if they didn't
1759 # expect this outcome it can be fixed, but this is the correct
1759 # expect this outcome it can be fixed, but this is the correct
1760 # behavior in this circumstance.
1760 # behavior in this circumstance.
1761
1761
1762 if crev:
1762 if crev:
1763 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1763 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1764 meta["copy"] = cfname
1764 meta["copy"] = cfname
1765 meta["copyrev"] = hex(crev)
1765 meta["copyrev"] = hex(crev)
1766 fparent1, fparent2 = nullid, newfparent
1766 fparent1, fparent2 = nullid, newfparent
1767 else:
1767 else:
1768 self.ui.warn(_("warning: can't find ancestor for '%s' "
1768 self.ui.warn(_("warning: can't find ancestor for '%s' "
1769 "copied from '%s'!\n") % (fname, cfname))
1769 "copied from '%s'!\n") % (fname, cfname))
1770
1770
1771 elif fparent1 == nullid:
1771 elif fparent1 == nullid:
1772 fparent1, fparent2 = fparent2, nullid
1772 fparent1, fparent2 = fparent2, nullid
1773 elif fparent2 != nullid:
1773 elif fparent2 != nullid:
1774 # is one parent an ancestor of the other?
1774 # is one parent an ancestor of the other?
1775 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1775 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1776 if fparent1 in fparentancestors:
1776 if fparent1 in fparentancestors:
1777 fparent1, fparent2 = fparent2, nullid
1777 fparent1, fparent2 = fparent2, nullid
1778 elif fparent2 in fparentancestors:
1778 elif fparent2 in fparentancestors:
1779 fparent2 = nullid
1779 fparent2 = nullid
1780
1780
1781 # is the file changed?
1781 # is the file changed?
1782 text = fctx.data()
1782 text = fctx.data()
1783 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1783 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1784 changelist.append(fname)
1784 changelist.append(fname)
1785 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1785 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1786 # are just the flags changed during merge?
1786 # are just the flags changed during merge?
1787 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1787 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1788 changelist.append(fname)
1788 changelist.append(fname)
1789
1789
1790 return fparent1
1790 return fparent1
1791
1791
1792 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1792 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1793 """check for commit arguments that aren't committable"""
1793 """check for commit arguments that aren't committable"""
1794 if match.isexact() or match.prefix():
1794 if match.isexact() or match.prefix():
1795 matched = set(status.modified + status.added + status.removed)
1795 matched = set(status.modified + status.added + status.removed)
1796
1796
1797 for f in match.files():
1797 for f in match.files():
1798 f = self.dirstate.normalize(f)
1798 f = self.dirstate.normalize(f)
1799 if f == '.' or f in matched or f in wctx.substate:
1799 if f == '.' or f in matched or f in wctx.substate:
1800 continue
1800 continue
1801 if f in status.deleted:
1801 if f in status.deleted:
1802 fail(f, _('file not found!'))
1802 fail(f, _('file not found!'))
1803 if f in vdirs: # visited directory
1803 if f in vdirs: # visited directory
1804 d = f + '/'
1804 d = f + '/'
1805 for mf in matched:
1805 for mf in matched:
1806 if mf.startswith(d):
1806 if mf.startswith(d):
1807 break
1807 break
1808 else:
1808 else:
1809 fail(f, _("no match under directory!"))
1809 fail(f, _("no match under directory!"))
1810 elif f not in self.dirstate:
1810 elif f not in self.dirstate:
1811 fail(f, _("file not tracked!"))
1811 fail(f, _("file not tracked!"))
1812
1812
1813 @unfilteredmethod
1813 @unfilteredmethod
1814 def commit(self, text="", user=None, date=None, match=None, force=False,
1814 def commit(self, text="", user=None, date=None, match=None, force=False,
1815 editor=False, extra=None):
1815 editor=False, extra=None):
1816 """Add a new revision to current repository.
1816 """Add a new revision to current repository.
1817
1817
1818 Revision information is gathered from the working directory,
1818 Revision information is gathered from the working directory,
1819 match can be used to filter the committed files. If editor is
1819 match can be used to filter the committed files. If editor is
1820 supplied, it is called to get a commit message.
1820 supplied, it is called to get a commit message.
1821 """
1821 """
1822 if extra is None:
1822 if extra is None:
1823 extra = {}
1823 extra = {}
1824
1824
1825 def fail(f, msg):
1825 def fail(f, msg):
1826 raise error.Abort('%s: %s' % (f, msg))
1826 raise error.Abort('%s: %s' % (f, msg))
1827
1827
1828 if not match:
1828 if not match:
1829 match = matchmod.always(self.root, '')
1829 match = matchmod.always(self.root, '')
1830
1830
1831 if not force:
1831 if not force:
1832 vdirs = []
1832 vdirs = []
1833 match.explicitdir = vdirs.append
1833 match.explicitdir = vdirs.append
1834 match.bad = fail
1834 match.bad = fail
1835
1835
1836 wlock = lock = tr = None
1836 wlock = lock = tr = None
1837 try:
1837 try:
1838 wlock = self.wlock()
1838 wlock = self.wlock()
1839 lock = self.lock() # for recent changelog (see issue4368)
1839 lock = self.lock() # for recent changelog (see issue4368)
1840
1840
1841 wctx = self[None]
1841 wctx = self[None]
1842 merge = len(wctx.parents()) > 1
1842 merge = len(wctx.parents()) > 1
1843
1843
1844 if not force and merge and not match.always():
1844 if not force and merge and not match.always():
1845 raise error.Abort(_('cannot partially commit a merge '
1845 raise error.Abort(_('cannot partially commit a merge '
1846 '(do not specify files or patterns)'))
1846 '(do not specify files or patterns)'))
1847
1847
1848 status = self.status(match=match, clean=force)
1848 status = self.status(match=match, clean=force)
1849 if force:
1849 if force:
1850 status.modified.extend(status.clean) # mq may commit clean files
1850 status.modified.extend(status.clean) # mq may commit clean files
1851
1851
1852 # check subrepos
1852 # check subrepos
1853 subs, commitsubs, newstate = subrepo.precommit(
1853 subs, commitsubs, newstate = subrepo.precommit(
1854 self.ui, wctx, status, match, force=force)
1854 self.ui, wctx, status, match, force=force)
1855
1855
1856 # make sure all explicit patterns are matched
1856 # make sure all explicit patterns are matched
1857 if not force:
1857 if not force:
1858 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1858 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1859
1859
1860 cctx = context.workingcommitctx(self, status,
1860 cctx = context.workingcommitctx(self, status,
1861 text, user, date, extra)
1861 text, user, date, extra)
1862
1862
1863 # internal config: ui.allowemptycommit
1863 # internal config: ui.allowemptycommit
1864 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1864 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1865 or extra.get('close') or merge or cctx.files()
1865 or extra.get('close') or merge or cctx.files()
1866 or self.ui.configbool('ui', 'allowemptycommit'))
1866 or self.ui.configbool('ui', 'allowemptycommit'))
1867 if not allowemptycommit:
1867 if not allowemptycommit:
1868 return None
1868 return None
1869
1869
1870 if merge and cctx.deleted():
1870 if merge and cctx.deleted():
1871 raise error.Abort(_("cannot commit merge with missing files"))
1871 raise error.Abort(_("cannot commit merge with missing files"))
1872
1872
1873 ms = mergemod.mergestate.read(self)
1873 ms = mergemod.mergestate.read(self)
1874 mergeutil.checkunresolved(ms)
1874 mergeutil.checkunresolved(ms)
1875
1875
1876 if editor:
1876 if editor:
1877 cctx._text = editor(self, cctx, subs)
1877 cctx._text = editor(self, cctx, subs)
1878 edited = (text != cctx._text)
1878 edited = (text != cctx._text)
1879
1879
1880 # Save commit message in case this transaction gets rolled back
1880 # Save commit message in case this transaction gets rolled back
1881 # (e.g. by a pretxncommit hook). Leave the content alone on
1881 # (e.g. by a pretxncommit hook). Leave the content alone on
1882 # the assumption that the user will use the same editor again.
1882 # the assumption that the user will use the same editor again.
1883 msgfn = self.savecommitmessage(cctx._text)
1883 msgfn = self.savecommitmessage(cctx._text)
1884
1884
1885 # commit subs and write new state
1885 # commit subs and write new state
1886 if subs:
1886 if subs:
1887 for s in sorted(commitsubs):
1887 for s in sorted(commitsubs):
1888 sub = wctx.sub(s)
1888 sub = wctx.sub(s)
1889 self.ui.status(_('committing subrepository %s\n') %
1889 self.ui.status(_('committing subrepository %s\n') %
1890 subrepo.subrelpath(sub))
1890 subrepo.subrelpath(sub))
1891 sr = sub.commit(cctx._text, user, date)
1891 sr = sub.commit(cctx._text, user, date)
1892 newstate[s] = (newstate[s][0], sr)
1892 newstate[s] = (newstate[s][0], sr)
1893 subrepo.writestate(self, newstate)
1893 subrepo.writestate(self, newstate)
1894
1894
1895 p1, p2 = self.dirstate.parents()
1895 p1, p2 = self.dirstate.parents()
1896 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1896 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1897 try:
1897 try:
1898 self.hook("precommit", throw=True, parent1=hookp1,
1898 self.hook("precommit", throw=True, parent1=hookp1,
1899 parent2=hookp2)
1899 parent2=hookp2)
1900 tr = self.transaction('commit')
1900 tr = self.transaction('commit')
1901 ret = self.commitctx(cctx, True)
1901 ret = self.commitctx(cctx, True)
1902 except: # re-raises
1902 except: # re-raises
1903 if edited:
1903 if edited:
1904 self.ui.write(
1904 self.ui.write(
1905 _('note: commit message saved in %s\n') % msgfn)
1905 _('note: commit message saved in %s\n') % msgfn)
1906 raise
1906 raise
1907 # update bookmarks, dirstate and mergestate
1907 # update bookmarks, dirstate and mergestate
1908 bookmarks.update(self, [p1, p2], ret)
1908 bookmarks.update(self, [p1, p2], ret)
1909 cctx.markcommitted(ret)
1909 cctx.markcommitted(ret)
1910 ms.reset()
1910 ms.reset()
1911 tr.close()
1911 tr.close()
1912
1912
1913 finally:
1913 finally:
1914 lockmod.release(tr, lock, wlock)
1914 lockmod.release(tr, lock, wlock)
1915
1915
1916 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1916 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1917 # hack for command that use a temporary commit (eg: histedit)
1917 # hack for command that use a temporary commit (eg: histedit)
1918 # temporary commit got stripped before hook release
1918 # temporary commit got stripped before hook release
1919 if self.changelog.hasnode(ret):
1919 if self.changelog.hasnode(ret):
1920 self.hook("commit", node=node, parent1=parent1,
1920 self.hook("commit", node=node, parent1=parent1,
1921 parent2=parent2)
1921 parent2=parent2)
1922 self._afterlock(commithook)
1922 self._afterlock(commithook)
1923 return ret
1923 return ret
1924
1924
1925 @unfilteredmethod
1925 @unfilteredmethod
1926 def commitctx(self, ctx, error=False):
1926 def commitctx(self, ctx, error=False):
1927 """Add a new revision to current repository.
1927 """Add a new revision to current repository.
1928 Revision information is passed via the context argument.
1928 Revision information is passed via the context argument.
1929 """
1929 """
1930
1930
1931 tr = None
1931 tr = None
1932 p1, p2 = ctx.p1(), ctx.p2()
1932 p1, p2 = ctx.p1(), ctx.p2()
1933 user = ctx.user()
1933 user = ctx.user()
1934
1934
1935 lock = self.lock()
1935 lock = self.lock()
1936 try:
1936 try:
1937 tr = self.transaction("commit")
1937 tr = self.transaction("commit")
1938 trp = weakref.proxy(tr)
1938 trp = weakref.proxy(tr)
1939
1939
1940 if ctx.manifestnode():
1940 if ctx.manifestnode():
1941 # reuse an existing manifest revision
1941 # reuse an existing manifest revision
1942 mn = ctx.manifestnode()
1942 mn = ctx.manifestnode()
1943 files = ctx.files()
1943 files = ctx.files()
1944 elif ctx.files():
1944 elif ctx.files():
1945 m1ctx = p1.manifestctx()
1945 m1ctx = p1.manifestctx()
1946 m2ctx = p2.manifestctx()
1946 m2ctx = p2.manifestctx()
1947 mctx = m1ctx.copy()
1947 mctx = m1ctx.copy()
1948
1948
1949 m = mctx.read()
1949 m = mctx.read()
1950 m1 = m1ctx.read()
1950 m1 = m1ctx.read()
1951 m2 = m2ctx.read()
1951 m2 = m2ctx.read()
1952
1952
1953 # check in files
1953 # check in files
1954 added = []
1954 added = []
1955 changed = []
1955 changed = []
1956 removed = list(ctx.removed())
1956 removed = list(ctx.removed())
1957 linkrev = len(self)
1957 linkrev = len(self)
1958 self.ui.note(_("committing files:\n"))
1958 self.ui.note(_("committing files:\n"))
1959 for f in sorted(ctx.modified() + ctx.added()):
1959 for f in sorted(ctx.modified() + ctx.added()):
1960 self.ui.note(f + "\n")
1960 self.ui.note(f + "\n")
1961 try:
1961 try:
1962 fctx = ctx[f]
1962 fctx = ctx[f]
1963 if fctx is None:
1963 if fctx is None:
1964 removed.append(f)
1964 removed.append(f)
1965 else:
1965 else:
1966 added.append(f)
1966 added.append(f)
1967 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1967 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1968 trp, changed)
1968 trp, changed)
1969 m.setflag(f, fctx.flags())
1969 m.setflag(f, fctx.flags())
1970 except OSError as inst:
1970 except OSError as inst:
1971 self.ui.warn(_("trouble committing %s!\n") % f)
1971 self.ui.warn(_("trouble committing %s!\n") % f)
1972 raise
1972 raise
1973 except IOError as inst:
1973 except IOError as inst:
1974 errcode = getattr(inst, 'errno', errno.ENOENT)
1974 errcode = getattr(inst, 'errno', errno.ENOENT)
1975 if error or errcode and errcode != errno.ENOENT:
1975 if error or errcode and errcode != errno.ENOENT:
1976 self.ui.warn(_("trouble committing %s!\n") % f)
1976 self.ui.warn(_("trouble committing %s!\n") % f)
1977 raise
1977 raise
1978
1978
1979 # update manifest
1979 # update manifest
1980 self.ui.note(_("committing manifest\n"))
1980 self.ui.note(_("committing manifest\n"))
1981 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1981 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1982 drop = [f for f in removed if f in m]
1982 drop = [f for f in removed if f in m]
1983 for f in drop:
1983 for f in drop:
1984 del m[f]
1984 del m[f]
1985 mn = mctx.write(trp, linkrev,
1985 mn = mctx.write(trp, linkrev,
1986 p1.manifestnode(), p2.manifestnode(),
1986 p1.manifestnode(), p2.manifestnode(),
1987 added, drop)
1987 added, drop)
1988 files = changed + removed
1988 files = changed + removed
1989 else:
1989 else:
1990 mn = p1.manifestnode()
1990 mn = p1.manifestnode()
1991 files = []
1991 files = []
1992
1992
1993 # update changelog
1993 # update changelog
1994 self.ui.note(_("committing changelog\n"))
1994 self.ui.note(_("committing changelog\n"))
1995 self.changelog.delayupdate(tr)
1995 self.changelog.delayupdate(tr)
1996 n = self.changelog.add(mn, files, ctx.description(),
1996 n = self.changelog.add(mn, files, ctx.description(),
1997 trp, p1.node(), p2.node(),
1997 trp, p1.node(), p2.node(),
1998 user, ctx.date(), ctx.extra().copy())
1998 user, ctx.date(), ctx.extra().copy())
1999 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1999 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2000 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2000 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2001 parent2=xp2)
2001 parent2=xp2)
2002 # set the new commit is proper phase
2002 # set the new commit is proper phase
2003 targetphase = subrepo.newcommitphase(self.ui, ctx)
2003 targetphase = subrepo.newcommitphase(self.ui, ctx)
2004 if targetphase:
2004 if targetphase:
2005 # retract boundary do not alter parent changeset.
2005 # retract boundary do not alter parent changeset.
2006 # if a parent have higher the resulting phase will
2006 # if a parent have higher the resulting phase will
2007 # be compliant anyway
2007 # be compliant anyway
2008 #
2008 #
2009 # if minimal phase was 0 we don't need to retract anything
2009 # if minimal phase was 0 we don't need to retract anything
2010 phases.registernew(self, tr, targetphase, [n])
2010 phases.registernew(self, tr, targetphase, [n])
2011 tr.close()
2011 tr.close()
2012 return n
2012 return n
2013 finally:
2013 finally:
2014 if tr:
2014 if tr:
2015 tr.release()
2015 tr.release()
2016 lock.release()
2016 lock.release()
2017
2017
2018 @unfilteredmethod
2018 @unfilteredmethod
2019 def destroying(self):
2019 def destroying(self):
2020 '''Inform the repository that nodes are about to be destroyed.
2020 '''Inform the repository that nodes are about to be destroyed.
2021 Intended for use by strip and rollback, so there's a common
2021 Intended for use by strip and rollback, so there's a common
2022 place for anything that has to be done before destroying history.
2022 place for anything that has to be done before destroying history.
2023
2023
2024 This is mostly useful for saving state that is in memory and waiting
2024 This is mostly useful for saving state that is in memory and waiting
2025 to be flushed when the current lock is released. Because a call to
2025 to be flushed when the current lock is released. Because a call to
2026 destroyed is imminent, the repo will be invalidated causing those
2026 destroyed is imminent, the repo will be invalidated causing those
2027 changes to stay in memory (waiting for the next unlock), or vanish
2027 changes to stay in memory (waiting for the next unlock), or vanish
2028 completely.
2028 completely.
2029 '''
2029 '''
2030 # When using the same lock to commit and strip, the phasecache is left
2030 # When using the same lock to commit and strip, the phasecache is left
2031 # dirty after committing. Then when we strip, the repo is invalidated,
2031 # dirty after committing. Then when we strip, the repo is invalidated,
2032 # causing those changes to disappear.
2032 # causing those changes to disappear.
2033 if '_phasecache' in vars(self):
2033 if '_phasecache' in vars(self):
2034 self._phasecache.write()
2034 self._phasecache.write()
2035
2035
2036 @unfilteredmethod
2036 @unfilteredmethod
2037 def destroyed(self):
2037 def destroyed(self):
2038 '''Inform the repository that nodes have been destroyed.
2038 '''Inform the repository that nodes have been destroyed.
2039 Intended for use by strip and rollback, so there's a common
2039 Intended for use by strip and rollback, so there's a common
2040 place for anything that has to be done after destroying history.
2040 place for anything that has to be done after destroying history.
2041 '''
2041 '''
2042 # When one tries to:
2042 # When one tries to:
2043 # 1) destroy nodes thus calling this method (e.g. strip)
2043 # 1) destroy nodes thus calling this method (e.g. strip)
2044 # 2) use phasecache somewhere (e.g. commit)
2044 # 2) use phasecache somewhere (e.g. commit)
2045 #
2045 #
2046 # then 2) will fail because the phasecache contains nodes that were
2046 # then 2) will fail because the phasecache contains nodes that were
2047 # removed. We can either remove phasecache from the filecache,
2047 # removed. We can either remove phasecache from the filecache,
2048 # causing it to reload next time it is accessed, or simply filter
2048 # causing it to reload next time it is accessed, or simply filter
2049 # the removed nodes now and write the updated cache.
2049 # the removed nodes now and write the updated cache.
2050 self._phasecache.filterunknown(self)
2050 self._phasecache.filterunknown(self)
2051 self._phasecache.write()
2051 self._phasecache.write()
2052
2052
2053 # refresh all repository caches
2053 # refresh all repository caches
2054 self.updatecaches()
2054 self.updatecaches()
2055
2055
2056 # Ensure the persistent tag cache is updated. Doing it now
2056 # Ensure the persistent tag cache is updated. Doing it now
2057 # means that the tag cache only has to worry about destroyed
2057 # means that the tag cache only has to worry about destroyed
2058 # heads immediately after a strip/rollback. That in turn
2058 # heads immediately after a strip/rollback. That in turn
2059 # guarantees that "cachetip == currenttip" (comparing both rev
2059 # guarantees that "cachetip == currenttip" (comparing both rev
2060 # and node) always means no nodes have been added or destroyed.
2060 # and node) always means no nodes have been added or destroyed.
2061
2061
2062 # XXX this is suboptimal when qrefresh'ing: we strip the current
2062 # XXX this is suboptimal when qrefresh'ing: we strip the current
2063 # head, refresh the tag cache, then immediately add a new head.
2063 # head, refresh the tag cache, then immediately add a new head.
2064 # But I think doing it this way is necessary for the "instant
2064 # But I think doing it this way is necessary for the "instant
2065 # tag cache retrieval" case to work.
2065 # tag cache retrieval" case to work.
2066 self.invalidate()
2066 self.invalidate()
2067
2067
2068 def walk(self, match, node=None):
2068 def walk(self, match, node=None):
2069 '''
2069 '''
2070 walk recursively through the directory tree or a given
2070 walk recursively through the directory tree or a given
2071 changeset, finding all files matched by the match
2071 changeset, finding all files matched by the match
2072 function
2072 function
2073 '''
2073 '''
2074 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2074 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2075 return self[node].walk(match)
2075 return self[node].walk(match)
2076
2076
2077 def status(self, node1='.', node2=None, match=None,
2077 def status(self, node1='.', node2=None, match=None,
2078 ignored=False, clean=False, unknown=False,
2078 ignored=False, clean=False, unknown=False,
2079 listsubrepos=False):
2079 listsubrepos=False):
2080 '''a convenience method that calls node1.status(node2)'''
2080 '''a convenience method that calls node1.status(node2)'''
2081 return self[node1].status(node2, match, ignored, clean, unknown,
2081 return self[node1].status(node2, match, ignored, clean, unknown,
2082 listsubrepos)
2082 listsubrepos)
2083
2083
2084 def addpostdsstatus(self, ps):
2084 def addpostdsstatus(self, ps):
2085 """Add a callback to run within the wlock, at the point at which status
2085 """Add a callback to run within the wlock, at the point at which status
2086 fixups happen.
2086 fixups happen.
2087
2087
2088 On status completion, callback(wctx, status) will be called with the
2088 On status completion, callback(wctx, status) will be called with the
2089 wlock held, unless the dirstate has changed from underneath or the wlock
2089 wlock held, unless the dirstate has changed from underneath or the wlock
2090 couldn't be grabbed.
2090 couldn't be grabbed.
2091
2091
2092 Callbacks should not capture and use a cached copy of the dirstate --
2092 Callbacks should not capture and use a cached copy of the dirstate --
2093 it might change in the meanwhile. Instead, they should access the
2093 it might change in the meanwhile. Instead, they should access the
2094 dirstate via wctx.repo().dirstate.
2094 dirstate via wctx.repo().dirstate.
2095
2095
2096 This list is emptied out after each status run -- extensions should
2096 This list is emptied out after each status run -- extensions should
2097 make sure it adds to this list each time dirstate.status is called.
2097 make sure it adds to this list each time dirstate.status is called.
2098 Extensions should also make sure they don't call this for statuses
2098 Extensions should also make sure they don't call this for statuses
2099 that don't involve the dirstate.
2099 that don't involve the dirstate.
2100 """
2100 """
2101
2101
2102 # The list is located here for uniqueness reasons -- it is actually
2102 # The list is located here for uniqueness reasons -- it is actually
2103 # managed by the workingctx, but that isn't unique per-repo.
2103 # managed by the workingctx, but that isn't unique per-repo.
2104 self._postdsstatus.append(ps)
2104 self._postdsstatus.append(ps)
2105
2105
2106 def postdsstatus(self):
2106 def postdsstatus(self):
2107 """Used by workingctx to get the list of post-dirstate-status hooks."""
2107 """Used by workingctx to get the list of post-dirstate-status hooks."""
2108 return self._postdsstatus
2108 return self._postdsstatus
2109
2109
2110 def clearpostdsstatus(self):
2110 def clearpostdsstatus(self):
2111 """Used by workingctx to clear post-dirstate-status hooks."""
2111 """Used by workingctx to clear post-dirstate-status hooks."""
2112 del self._postdsstatus[:]
2112 del self._postdsstatus[:]
2113
2113
2114 def heads(self, start=None):
2114 def heads(self, start=None):
2115 if start is None:
2115 if start is None:
2116 cl = self.changelog
2116 cl = self.changelog
2117 headrevs = reversed(cl.headrevs())
2117 headrevs = reversed(cl.headrevs())
2118 return [cl.node(rev) for rev in headrevs]
2118 return [cl.node(rev) for rev in headrevs]
2119
2119
2120 heads = self.changelog.heads(start)
2120 heads = self.changelog.heads(start)
2121 # sort the output in rev descending order
2121 # sort the output in rev descending order
2122 return sorted(heads, key=self.changelog.rev, reverse=True)
2122 return sorted(heads, key=self.changelog.rev, reverse=True)
2123
2123
2124 def branchheads(self, branch=None, start=None, closed=False):
2124 def branchheads(self, branch=None, start=None, closed=False):
2125 '''return a (possibly filtered) list of heads for the given branch
2125 '''return a (possibly filtered) list of heads for the given branch
2126
2126
2127 Heads are returned in topological order, from newest to oldest.
2127 Heads are returned in topological order, from newest to oldest.
2128 If branch is None, use the dirstate branch.
2128 If branch is None, use the dirstate branch.
2129 If start is not None, return only heads reachable from start.
2129 If start is not None, return only heads reachable from start.
2130 If closed is True, return heads that are marked as closed as well.
2130 If closed is True, return heads that are marked as closed as well.
2131 '''
2131 '''
2132 if branch is None:
2132 if branch is None:
2133 branch = self[None].branch()
2133 branch = self[None].branch()
2134 branches = self.branchmap()
2134 branches = self.branchmap()
2135 if branch not in branches:
2135 if branch not in branches:
2136 return []
2136 return []
2137 # the cache returns heads ordered lowest to highest
2137 # the cache returns heads ordered lowest to highest
2138 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2138 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2139 if start is not None:
2139 if start is not None:
2140 # filter out the heads that cannot be reached from startrev
2140 # filter out the heads that cannot be reached from startrev
2141 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2141 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2142 bheads = [h for h in bheads if h in fbheads]
2142 bheads = [h for h in bheads if h in fbheads]
2143 return bheads
2143 return bheads
2144
2144
2145 def branches(self, nodes):
2145 def branches(self, nodes):
2146 if not nodes:
2146 if not nodes:
2147 nodes = [self.changelog.tip()]
2147 nodes = [self.changelog.tip()]
2148 b = []
2148 b = []
2149 for n in nodes:
2149 for n in nodes:
2150 t = n
2150 t = n
2151 while True:
2151 while True:
2152 p = self.changelog.parents(n)
2152 p = self.changelog.parents(n)
2153 if p[1] != nullid or p[0] == nullid:
2153 if p[1] != nullid or p[0] == nullid:
2154 b.append((t, n, p[0], p[1]))
2154 b.append((t, n, p[0], p[1]))
2155 break
2155 break
2156 n = p[0]
2156 n = p[0]
2157 return b
2157 return b
2158
2158
2159 def between(self, pairs):
2159 def between(self, pairs):
2160 r = []
2160 r = []
2161
2161
2162 for top, bottom in pairs:
2162 for top, bottom in pairs:
2163 n, l, i = top, [], 0
2163 n, l, i = top, [], 0
2164 f = 1
2164 f = 1
2165
2165
2166 while n != bottom and n != nullid:
2166 while n != bottom and n != nullid:
2167 p = self.changelog.parents(n)[0]
2167 p = self.changelog.parents(n)[0]
2168 if i == f:
2168 if i == f:
2169 l.append(n)
2169 l.append(n)
2170 f = f * 2
2170 f = f * 2
2171 n = p
2171 n = p
2172 i += 1
2172 i += 1
2173
2173
2174 r.append(l)
2174 r.append(l)
2175
2175
2176 return r
2176 return r
2177
2177
2178 def checkpush(self, pushop):
2178 def checkpush(self, pushop):
2179 """Extensions can override this function if additional checks have
2179 """Extensions can override this function if additional checks have
2180 to be performed before pushing, or call it if they override push
2180 to be performed before pushing, or call it if they override push
2181 command.
2181 command.
2182 """
2182 """
2183
2183
2184 @unfilteredpropertycache
2184 @unfilteredpropertycache
2185 def prepushoutgoinghooks(self):
2185 def prepushoutgoinghooks(self):
2186 """Return util.hooks consists of a pushop with repo, remote, outgoing
2186 """Return util.hooks consists of a pushop with repo, remote, outgoing
2187 methods, which are called before pushing changesets.
2187 methods, which are called before pushing changesets.
2188 """
2188 """
2189 return util.hooks()
2189 return util.hooks()
2190
2190
2191 def pushkey(self, namespace, key, old, new):
2191 def pushkey(self, namespace, key, old, new):
2192 try:
2192 try:
2193 tr = self.currenttransaction()
2193 tr = self.currenttransaction()
2194 hookargs = {}
2194 hookargs = {}
2195 if tr is not None:
2195 if tr is not None:
2196 hookargs.update(tr.hookargs)
2196 hookargs.update(tr.hookargs)
2197 hookargs['namespace'] = namespace
2197 hookargs['namespace'] = namespace
2198 hookargs['key'] = key
2198 hookargs['key'] = key
2199 hookargs['old'] = old
2199 hookargs['old'] = old
2200 hookargs['new'] = new
2200 hookargs['new'] = new
2201 self.hook('prepushkey', throw=True, **hookargs)
2201 self.hook('prepushkey', throw=True, **hookargs)
2202 except error.HookAbort as exc:
2202 except error.HookAbort as exc:
2203 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2203 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2204 if exc.hint:
2204 if exc.hint:
2205 self.ui.write_err(_("(%s)\n") % exc.hint)
2205 self.ui.write_err(_("(%s)\n") % exc.hint)
2206 return False
2206 return False
2207 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2207 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2208 ret = pushkey.push(self, namespace, key, old, new)
2208 ret = pushkey.push(self, namespace, key, old, new)
2209 def runhook():
2209 def runhook():
2210 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2210 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2211 ret=ret)
2211 ret=ret)
2212 self._afterlock(runhook)
2212 self._afterlock(runhook)
2213 return ret
2213 return ret
2214
2214
2215 def listkeys(self, namespace):
2215 def listkeys(self, namespace):
2216 self.hook('prelistkeys', throw=True, namespace=namespace)
2216 self.hook('prelistkeys', throw=True, namespace=namespace)
2217 self.ui.debug('listing keys for "%s"\n' % namespace)
2217 self.ui.debug('listing keys for "%s"\n' % namespace)
2218 values = pushkey.list(self, namespace)
2218 values = pushkey.list(self, namespace)
2219 self.hook('listkeys', namespace=namespace, values=values)
2219 self.hook('listkeys', namespace=namespace, values=values)
2220 return values
2220 return values
2221
2221
2222 def debugwireargs(self, one, two, three=None, four=None, five=None):
2222 def debugwireargs(self, one, two, three=None, four=None, five=None):
2223 '''used to test argument passing over the wire'''
2223 '''used to test argument passing over the wire'''
2224 return "%s %s %s %s %s" % (one, two, three, four, five)
2224 return "%s %s %s %s %s" % (one, two, three, four, five)
2225
2225
2226 def savecommitmessage(self, text):
2226 def savecommitmessage(self, text):
2227 fp = self.vfs('last-message.txt', 'wb')
2227 fp = self.vfs('last-message.txt', 'wb')
2228 try:
2228 try:
2229 fp.write(text)
2229 fp.write(text)
2230 finally:
2230 finally:
2231 fp.close()
2231 fp.close()
2232 return self.pathto(fp.name[len(self.root) + 1:])
2232 return self.pathto(fp.name[len(self.root) + 1:])
2233
2233
2234 # used to avoid circular references so destructors work
2234 # used to avoid circular references so destructors work
2235 def aftertrans(files):
2235 def aftertrans(files):
2236 renamefiles = [tuple(t) for t in files]
2236 renamefiles = [tuple(t) for t in files]
2237 def a():
2237 def a():
2238 for vfs, src, dest in renamefiles:
2238 for vfs, src, dest in renamefiles:
2239 # if src and dest refer to a same file, vfs.rename is a no-op,
2239 # if src and dest refer to a same file, vfs.rename is a no-op,
2240 # leaving both src and dest on disk. delete dest to make sure
2240 # leaving both src and dest on disk. delete dest to make sure
2241 # the rename couldn't be such a no-op.
2241 # the rename couldn't be such a no-op.
2242 vfs.tryunlink(dest)
2242 vfs.tryunlink(dest)
2243 try:
2243 try:
2244 vfs.rename(src, dest)
2244 vfs.rename(src, dest)
2245 except OSError: # journal file does not yet exist
2245 except OSError: # journal file does not yet exist
2246 pass
2246 pass
2247 return a
2247 return a
2248
2248
2249 def undoname(fn):
2249 def undoname(fn):
2250 base, name = os.path.split(fn)
2250 base, name = os.path.split(fn)
2251 assert name.startswith('journal')
2251 assert name.startswith('journal')
2252 return os.path.join(base, name.replace('journal', 'undo', 1))
2252 return os.path.join(base, name.replace('journal', 'undo', 1))
2253
2253
2254 def instance(ui, path, create):
2254 def instance(ui, path, create):
2255 return localrepository(ui, util.urllocalpath(path), create)
2255 return localrepository(ui, util.urllocalpath(path), create)
2256
2256
2257 def islocal(path):
2257 def islocal(path):
2258 return True
2258 return True
2259
2259
2260 def newreporequirements(repo):
2260 def newreporequirements(repo):
2261 """Determine the set of requirements for a new local repository.
2261 """Determine the set of requirements for a new local repository.
2262
2262
2263 Extensions can wrap this function to specify custom requirements for
2263 Extensions can wrap this function to specify custom requirements for
2264 new repositories.
2264 new repositories.
2265 """
2265 """
2266 ui = repo.ui
2266 ui = repo.ui
2267 requirements = {'revlogv1'}
2267 requirements = {'revlogv1'}
2268 if ui.configbool('format', 'usestore'):
2268 if ui.configbool('format', 'usestore'):
2269 requirements.add('store')
2269 requirements.add('store')
2270 if ui.configbool('format', 'usefncache'):
2270 if ui.configbool('format', 'usefncache'):
2271 requirements.add('fncache')
2271 requirements.add('fncache')
2272 if ui.configbool('format', 'dotencode'):
2272 if ui.configbool('format', 'dotencode'):
2273 requirements.add('dotencode')
2273 requirements.add('dotencode')
2274
2274
2275 compengine = ui.config('experimental', 'format.compression')
2275 compengine = ui.config('experimental', 'format.compression')
2276 if compengine not in util.compengines:
2276 if compengine not in util.compengines:
2277 raise error.Abort(_('compression engine %s defined by '
2277 raise error.Abort(_('compression engine %s defined by '
2278 'experimental.format.compression not available') %
2278 'experimental.format.compression not available') %
2279 compengine,
2279 compengine,
2280 hint=_('run "hg debuginstall" to list available '
2280 hint=_('run "hg debuginstall" to list available '
2281 'compression engines'))
2281 'compression engines'))
2282
2282
2283 # zlib is the historical default and doesn't need an explicit requirement.
2283 # zlib is the historical default and doesn't need an explicit requirement.
2284 if compengine != 'zlib':
2284 if compengine != 'zlib':
2285 requirements.add('exp-compression-%s' % compengine)
2285 requirements.add('exp-compression-%s' % compengine)
2286
2286
2287 if scmutil.gdinitconfig(ui):
2287 if scmutil.gdinitconfig(ui):
2288 requirements.add('generaldelta')
2288 requirements.add('generaldelta')
2289 if ui.configbool('experimental', 'treemanifest'):
2289 if ui.configbool('experimental', 'treemanifest'):
2290 requirements.add('treemanifest')
2290 requirements.add('treemanifest')
2291 if ui.configbool('experimental', 'manifestv2'):
2291 if ui.configbool('experimental', 'manifestv2'):
2292 requirements.add('manifestv2')
2292 requirements.add('manifestv2')
2293
2293
2294 revlogv2 = ui.config('experimental', 'revlogv2')
2294 revlogv2 = ui.config('experimental', 'revlogv2')
2295 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2295 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2296 requirements.remove('revlogv1')
2296 requirements.remove('revlogv1')
2297 # generaldelta is implied by revlogv2.
2297 # generaldelta is implied by revlogv2.
2298 requirements.discard('generaldelta')
2298 requirements.discard('generaldelta')
2299 requirements.add(REVLOGV2_REQUIREMENT)
2299 requirements.add(REVLOGV2_REQUIREMENT)
2300
2300
2301 return requirements
2301 return requirements
General Comments 0
You need to be logged in to leave comments. Login now