##// END OF EJS Templates
localrepo: delete localrepo.manifest...
Durham Goode -
r30376:f84fc6a9 default
parent child Browse files
Show More
@@ -1,2004 +1,2000 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 cmdutil,
31 cmdutil,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 namespaces,
44 namespaces,
45 obsolete,
45 obsolete,
46 pathutil,
46 pathutil,
47 peer,
47 peer,
48 phases,
48 phases,
49 pushkey,
49 pushkey,
50 repoview,
50 repoview,
51 revset,
51 revset,
52 scmutil,
52 scmutil,
53 store,
53 store,
54 subrepo,
54 subrepo,
55 tags as tagsmod,
55 tags as tagsmod,
56 transaction,
56 transaction,
57 util,
57 util,
58 )
58 )
59
59
60 release = lockmod.release
60 release = lockmod.release
61 urlerr = util.urlerr
61 urlerr = util.urlerr
62 urlreq = util.urlreq
62 urlreq = util.urlreq
63
63
64 class repofilecache(scmutil.filecache):
64 class repofilecache(scmutil.filecache):
65 """All filecache usage on repo are done for logic that should be unfiltered
65 """All filecache usage on repo are done for logic that should be unfiltered
66 """
66 """
67
67
68 def __get__(self, repo, type=None):
68 def __get__(self, repo, type=None):
69 if repo is None:
69 if repo is None:
70 return self
70 return self
71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 def __set__(self, repo, value):
72 def __set__(self, repo, value):
73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 def __delete__(self, repo):
74 def __delete__(self, repo):
75 return super(repofilecache, self).__delete__(repo.unfiltered())
75 return super(repofilecache, self).__delete__(repo.unfiltered())
76
76
77 class storecache(repofilecache):
77 class storecache(repofilecache):
78 """filecache for files in the store"""
78 """filecache for files in the store"""
79 def join(self, obj, fname):
79 def join(self, obj, fname):
80 return obj.sjoin(fname)
80 return obj.sjoin(fname)
81
81
82 class unfilteredpropertycache(util.propertycache):
82 class unfilteredpropertycache(util.propertycache):
83 """propertycache that apply to unfiltered repo only"""
83 """propertycache that apply to unfiltered repo only"""
84
84
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 unfi = repo.unfiltered()
86 unfi = repo.unfiltered()
87 if unfi is repo:
87 if unfi is repo:
88 return super(unfilteredpropertycache, self).__get__(unfi)
88 return super(unfilteredpropertycache, self).__get__(unfi)
89 return getattr(unfi, self.name)
89 return getattr(unfi, self.name)
90
90
91 class filteredpropertycache(util.propertycache):
91 class filteredpropertycache(util.propertycache):
92 """propertycache that must take filtering in account"""
92 """propertycache that must take filtering in account"""
93
93
94 def cachevalue(self, obj, value):
94 def cachevalue(self, obj, value):
95 object.__setattr__(obj, self.name, value)
95 object.__setattr__(obj, self.name, value)
96
96
97
97
98 def hasunfilteredcache(repo, name):
98 def hasunfilteredcache(repo, name):
99 """check if a repo has an unfilteredpropertycache value for <name>"""
99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 return name in vars(repo.unfiltered())
100 return name in vars(repo.unfiltered())
101
101
102 def unfilteredmethod(orig):
102 def unfilteredmethod(orig):
103 """decorate method that always need to be run on unfiltered version"""
103 """decorate method that always need to be run on unfiltered version"""
104 def wrapper(repo, *args, **kwargs):
104 def wrapper(repo, *args, **kwargs):
105 return orig(repo.unfiltered(), *args, **kwargs)
105 return orig(repo.unfiltered(), *args, **kwargs)
106 return wrapper
106 return wrapper
107
107
108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 'unbundle'))
109 'unbundle'))
110 legacycaps = moderncaps.union(set(['changegroupsubset']))
110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111
111
112 class localpeer(peer.peerrepository):
112 class localpeer(peer.peerrepository):
113 '''peer for a local repo; reflects only the most recent API'''
113 '''peer for a local repo; reflects only the most recent API'''
114
114
115 def __init__(self, repo, caps=moderncaps):
115 def __init__(self, repo, caps=moderncaps):
116 peer.peerrepository.__init__(self)
116 peer.peerrepository.__init__(self)
117 self._repo = repo.filtered('served')
117 self._repo = repo.filtered('served')
118 self.ui = repo.ui
118 self.ui = repo.ui
119 self._caps = repo._restrictcapabilities(caps)
119 self._caps = repo._restrictcapabilities(caps)
120 self.requirements = repo.requirements
120 self.requirements = repo.requirements
121 self.supportedformats = repo.supportedformats
121 self.supportedformats = repo.supportedformats
122
122
123 def close(self):
123 def close(self):
124 self._repo.close()
124 self._repo.close()
125
125
126 def _capabilities(self):
126 def _capabilities(self):
127 return self._caps
127 return self._caps
128
128
129 def local(self):
129 def local(self):
130 return self._repo
130 return self._repo
131
131
132 def canpush(self):
132 def canpush(self):
133 return True
133 return True
134
134
135 def url(self):
135 def url(self):
136 return self._repo.url()
136 return self._repo.url()
137
137
138 def lookup(self, key):
138 def lookup(self, key):
139 return self._repo.lookup(key)
139 return self._repo.lookup(key)
140
140
141 def branchmap(self):
141 def branchmap(self):
142 return self._repo.branchmap()
142 return self._repo.branchmap()
143
143
144 def heads(self):
144 def heads(self):
145 return self._repo.heads()
145 return self._repo.heads()
146
146
147 def known(self, nodes):
147 def known(self, nodes):
148 return self._repo.known(nodes)
148 return self._repo.known(nodes)
149
149
150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 **kwargs):
151 **kwargs):
152 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
152 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
153 common=common, bundlecaps=bundlecaps,
153 common=common, bundlecaps=bundlecaps,
154 **kwargs)
154 **kwargs)
155 cb = util.chunkbuffer(chunks)
155 cb = util.chunkbuffer(chunks)
156
156
157 if bundlecaps is not None and 'HG20' in bundlecaps:
157 if bundlecaps is not None and 'HG20' in bundlecaps:
158 # When requesting a bundle2, getbundle returns a stream to make the
158 # When requesting a bundle2, getbundle returns a stream to make the
159 # wire level function happier. We need to build a proper object
159 # wire level function happier. We need to build a proper object
160 # from it in local peer.
160 # from it in local peer.
161 return bundle2.getunbundler(self.ui, cb)
161 return bundle2.getunbundler(self.ui, cb)
162 else:
162 else:
163 return changegroup.getunbundler('01', cb, None)
163 return changegroup.getunbundler('01', cb, None)
164
164
165 # TODO We might want to move the next two calls into legacypeer and add
165 # TODO We might want to move the next two calls into legacypeer and add
166 # unbundle instead.
166 # unbundle instead.
167
167
168 def unbundle(self, cg, heads, url):
168 def unbundle(self, cg, heads, url):
169 """apply a bundle on a repo
169 """apply a bundle on a repo
170
170
171 This function handles the repo locking itself."""
171 This function handles the repo locking itself."""
172 try:
172 try:
173 try:
173 try:
174 cg = exchange.readbundle(self.ui, cg, None)
174 cg = exchange.readbundle(self.ui, cg, None)
175 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
175 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
176 if util.safehasattr(ret, 'getchunks'):
176 if util.safehasattr(ret, 'getchunks'):
177 # This is a bundle20 object, turn it into an unbundler.
177 # This is a bundle20 object, turn it into an unbundler.
178 # This little dance should be dropped eventually when the
178 # This little dance should be dropped eventually when the
179 # API is finally improved.
179 # API is finally improved.
180 stream = util.chunkbuffer(ret.getchunks())
180 stream = util.chunkbuffer(ret.getchunks())
181 ret = bundle2.getunbundler(self.ui, stream)
181 ret = bundle2.getunbundler(self.ui, stream)
182 return ret
182 return ret
183 except Exception as exc:
183 except Exception as exc:
184 # If the exception contains output salvaged from a bundle2
184 # If the exception contains output salvaged from a bundle2
185 # reply, we need to make sure it is printed before continuing
185 # reply, we need to make sure it is printed before continuing
186 # to fail. So we build a bundle2 with such output and consume
186 # to fail. So we build a bundle2 with such output and consume
187 # it directly.
187 # it directly.
188 #
188 #
189 # This is not very elegant but allows a "simple" solution for
189 # This is not very elegant but allows a "simple" solution for
190 # issue4594
190 # issue4594
191 output = getattr(exc, '_bundle2salvagedoutput', ())
191 output = getattr(exc, '_bundle2salvagedoutput', ())
192 if output:
192 if output:
193 bundler = bundle2.bundle20(self._repo.ui)
193 bundler = bundle2.bundle20(self._repo.ui)
194 for out in output:
194 for out in output:
195 bundler.addpart(out)
195 bundler.addpart(out)
196 stream = util.chunkbuffer(bundler.getchunks())
196 stream = util.chunkbuffer(bundler.getchunks())
197 b = bundle2.getunbundler(self.ui, stream)
197 b = bundle2.getunbundler(self.ui, stream)
198 bundle2.processbundle(self._repo, b)
198 bundle2.processbundle(self._repo, b)
199 raise
199 raise
200 except error.PushRaced as exc:
200 except error.PushRaced as exc:
201 raise error.ResponseError(_('push failed:'), str(exc))
201 raise error.ResponseError(_('push failed:'), str(exc))
202
202
203 def lock(self):
203 def lock(self):
204 return self._repo.lock()
204 return self._repo.lock()
205
205
206 def addchangegroup(self, cg, source, url):
206 def addchangegroup(self, cg, source, url):
207 return cg.apply(self._repo, source, url)
207 return cg.apply(self._repo, source, url)
208
208
209 def pushkey(self, namespace, key, old, new):
209 def pushkey(self, namespace, key, old, new):
210 return self._repo.pushkey(namespace, key, old, new)
210 return self._repo.pushkey(namespace, key, old, new)
211
211
212 def listkeys(self, namespace):
212 def listkeys(self, namespace):
213 return self._repo.listkeys(namespace)
213 return self._repo.listkeys(namespace)
214
214
215 def debugwireargs(self, one, two, three=None, four=None, five=None):
215 def debugwireargs(self, one, two, three=None, four=None, five=None):
216 '''used to test argument passing over the wire'''
216 '''used to test argument passing over the wire'''
217 return "%s %s %s %s %s" % (one, two, three, four, five)
217 return "%s %s %s %s %s" % (one, two, three, four, five)
218
218
219 class locallegacypeer(localpeer):
219 class locallegacypeer(localpeer):
220 '''peer extension which implements legacy methods too; used for tests with
220 '''peer extension which implements legacy methods too; used for tests with
221 restricted capabilities'''
221 restricted capabilities'''
222
222
223 def __init__(self, repo):
223 def __init__(self, repo):
224 localpeer.__init__(self, repo, caps=legacycaps)
224 localpeer.__init__(self, repo, caps=legacycaps)
225
225
226 def branches(self, nodes):
226 def branches(self, nodes):
227 return self._repo.branches(nodes)
227 return self._repo.branches(nodes)
228
228
229 def between(self, pairs):
229 def between(self, pairs):
230 return self._repo.between(pairs)
230 return self._repo.between(pairs)
231
231
232 def changegroup(self, basenodes, source):
232 def changegroup(self, basenodes, source):
233 return changegroup.changegroup(self._repo, basenodes, source)
233 return changegroup.changegroup(self._repo, basenodes, source)
234
234
235 def changegroupsubset(self, bases, heads, source):
235 def changegroupsubset(self, bases, heads, source):
236 return changegroup.changegroupsubset(self._repo, bases, heads, source)
236 return changegroup.changegroupsubset(self._repo, bases, heads, source)
237
237
238 class localrepository(object):
238 class localrepository(object):
239
239
240 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
240 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
241 'manifestv2'))
241 'manifestv2'))
242 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
242 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
243 'dotencode'))
243 'dotencode'))
244 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
244 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
245 filtername = None
245 filtername = None
246
246
247 # a list of (ui, featureset) functions.
247 # a list of (ui, featureset) functions.
248 # only functions defined in module of enabled extensions are invoked
248 # only functions defined in module of enabled extensions are invoked
249 featuresetupfuncs = set()
249 featuresetupfuncs = set()
250
250
251 def __init__(self, baseui, path=None, create=False):
251 def __init__(self, baseui, path=None, create=False):
252 self.requirements = set()
252 self.requirements = set()
253 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
253 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
254 self.wopener = self.wvfs
254 self.wopener = self.wvfs
255 self.root = self.wvfs.base
255 self.root = self.wvfs.base
256 self.path = self.wvfs.join(".hg")
256 self.path = self.wvfs.join(".hg")
257 self.origroot = path
257 self.origroot = path
258 self.auditor = pathutil.pathauditor(self.root, self._checknested)
258 self.auditor = pathutil.pathauditor(self.root, self._checknested)
259 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
259 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
260 realfs=False)
260 realfs=False)
261 self.vfs = scmutil.vfs(self.path)
261 self.vfs = scmutil.vfs(self.path)
262 self.opener = self.vfs
262 self.opener = self.vfs
263 self.baseui = baseui
263 self.baseui = baseui
264 self.ui = baseui.copy()
264 self.ui = baseui.copy()
265 self.ui.copy = baseui.copy # prevent copying repo configuration
265 self.ui.copy = baseui.copy # prevent copying repo configuration
266 # A list of callback to shape the phase if no data were found.
266 # A list of callback to shape the phase if no data were found.
267 # Callback are in the form: func(repo, roots) --> processed root.
267 # Callback are in the form: func(repo, roots) --> processed root.
268 # This list it to be filled by extension during repo setup
268 # This list it to be filled by extension during repo setup
269 self._phasedefaults = []
269 self._phasedefaults = []
270 try:
270 try:
271 self.ui.readconfig(self.join("hgrc"), self.root)
271 self.ui.readconfig(self.join("hgrc"), self.root)
272 extensions.loadall(self.ui)
272 extensions.loadall(self.ui)
273 except IOError:
273 except IOError:
274 pass
274 pass
275
275
276 if self.featuresetupfuncs:
276 if self.featuresetupfuncs:
277 self.supported = set(self._basesupported) # use private copy
277 self.supported = set(self._basesupported) # use private copy
278 extmods = set(m.__name__ for n, m
278 extmods = set(m.__name__ for n, m
279 in extensions.extensions(self.ui))
279 in extensions.extensions(self.ui))
280 for setupfunc in self.featuresetupfuncs:
280 for setupfunc in self.featuresetupfuncs:
281 if setupfunc.__module__ in extmods:
281 if setupfunc.__module__ in extmods:
282 setupfunc(self.ui, self.supported)
282 setupfunc(self.ui, self.supported)
283 else:
283 else:
284 self.supported = self._basesupported
284 self.supported = self._basesupported
285
285
286 if not self.vfs.isdir():
286 if not self.vfs.isdir():
287 if create:
287 if create:
288 self.requirements = newreporequirements(self)
288 self.requirements = newreporequirements(self)
289
289
290 if not self.wvfs.exists():
290 if not self.wvfs.exists():
291 self.wvfs.makedirs()
291 self.wvfs.makedirs()
292 self.vfs.makedir(notindexed=True)
292 self.vfs.makedir(notindexed=True)
293
293
294 if 'store' in self.requirements:
294 if 'store' in self.requirements:
295 self.vfs.mkdir("store")
295 self.vfs.mkdir("store")
296
296
297 # create an invalid changelog
297 # create an invalid changelog
298 self.vfs.append(
298 self.vfs.append(
299 "00changelog.i",
299 "00changelog.i",
300 '\0\0\0\2' # represents revlogv2
300 '\0\0\0\2' # represents revlogv2
301 ' dummy changelog to prevent using the old repo layout'
301 ' dummy changelog to prevent using the old repo layout'
302 )
302 )
303 else:
303 else:
304 raise error.RepoError(_("repository %s not found") % path)
304 raise error.RepoError(_("repository %s not found") % path)
305 elif create:
305 elif create:
306 raise error.RepoError(_("repository %s already exists") % path)
306 raise error.RepoError(_("repository %s already exists") % path)
307 else:
307 else:
308 try:
308 try:
309 self.requirements = scmutil.readrequires(
309 self.requirements = scmutil.readrequires(
310 self.vfs, self.supported)
310 self.vfs, self.supported)
311 except IOError as inst:
311 except IOError as inst:
312 if inst.errno != errno.ENOENT:
312 if inst.errno != errno.ENOENT:
313 raise
313 raise
314
314
315 self.sharedpath = self.path
315 self.sharedpath = self.path
316 try:
316 try:
317 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
317 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
318 realpath=True)
318 realpath=True)
319 s = vfs.base
319 s = vfs.base
320 if not vfs.exists():
320 if not vfs.exists():
321 raise error.RepoError(
321 raise error.RepoError(
322 _('.hg/sharedpath points to nonexistent directory %s') % s)
322 _('.hg/sharedpath points to nonexistent directory %s') % s)
323 self.sharedpath = s
323 self.sharedpath = s
324 except IOError as inst:
324 except IOError as inst:
325 if inst.errno != errno.ENOENT:
325 if inst.errno != errno.ENOENT:
326 raise
326 raise
327
327
328 self.store = store.store(
328 self.store = store.store(
329 self.requirements, self.sharedpath, scmutil.vfs)
329 self.requirements, self.sharedpath, scmutil.vfs)
330 self.spath = self.store.path
330 self.spath = self.store.path
331 self.svfs = self.store.vfs
331 self.svfs = self.store.vfs
332 self.sjoin = self.store.join
332 self.sjoin = self.store.join
333 self.vfs.createmode = self.store.createmode
333 self.vfs.createmode = self.store.createmode
334 self._applyopenerreqs()
334 self._applyopenerreqs()
335 if create:
335 if create:
336 self._writerequirements()
336 self._writerequirements()
337
337
338 self._dirstatevalidatewarned = False
338 self._dirstatevalidatewarned = False
339
339
340 self._branchcaches = {}
340 self._branchcaches = {}
341 self._revbranchcache = None
341 self._revbranchcache = None
342 self.filterpats = {}
342 self.filterpats = {}
343 self._datafilters = {}
343 self._datafilters = {}
344 self._transref = self._lockref = self._wlockref = None
344 self._transref = self._lockref = self._wlockref = None
345
345
346 # A cache for various files under .hg/ that tracks file changes,
346 # A cache for various files under .hg/ that tracks file changes,
347 # (used by the filecache decorator)
347 # (used by the filecache decorator)
348 #
348 #
349 # Maps a property name to its util.filecacheentry
349 # Maps a property name to its util.filecacheentry
350 self._filecache = {}
350 self._filecache = {}
351
351
352 # hold sets of revision to be filtered
352 # hold sets of revision to be filtered
353 # should be cleared when something might have changed the filter value:
353 # should be cleared when something might have changed the filter value:
354 # - new changesets,
354 # - new changesets,
355 # - phase change,
355 # - phase change,
356 # - new obsolescence marker,
356 # - new obsolescence marker,
357 # - working directory parent change,
357 # - working directory parent change,
358 # - bookmark changes
358 # - bookmark changes
359 self.filteredrevcache = {}
359 self.filteredrevcache = {}
360
360
361 # generic mapping between names and nodes
361 # generic mapping between names and nodes
362 self.names = namespaces.namespaces()
362 self.names = namespaces.namespaces()
363
363
364 def close(self):
364 def close(self):
365 self._writecaches()
365 self._writecaches()
366
366
367 def _writecaches(self):
367 def _writecaches(self):
368 if self._revbranchcache:
368 if self._revbranchcache:
369 self._revbranchcache.write()
369 self._revbranchcache.write()
370
370
371 def _restrictcapabilities(self, caps):
371 def _restrictcapabilities(self, caps):
372 if self.ui.configbool('experimental', 'bundle2-advertise', True):
372 if self.ui.configbool('experimental', 'bundle2-advertise', True):
373 caps = set(caps)
373 caps = set(caps)
374 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
374 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
375 caps.add('bundle2=' + urlreq.quote(capsblob))
375 caps.add('bundle2=' + urlreq.quote(capsblob))
376 return caps
376 return caps
377
377
378 def _applyopenerreqs(self):
378 def _applyopenerreqs(self):
379 self.svfs.options = dict((r, 1) for r in self.requirements
379 self.svfs.options = dict((r, 1) for r in self.requirements
380 if r in self.openerreqs)
380 if r in self.openerreqs)
381 # experimental config: format.chunkcachesize
381 # experimental config: format.chunkcachesize
382 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
382 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
383 if chunkcachesize is not None:
383 if chunkcachesize is not None:
384 self.svfs.options['chunkcachesize'] = chunkcachesize
384 self.svfs.options['chunkcachesize'] = chunkcachesize
385 # experimental config: format.maxchainlen
385 # experimental config: format.maxchainlen
386 maxchainlen = self.ui.configint('format', 'maxchainlen')
386 maxchainlen = self.ui.configint('format', 'maxchainlen')
387 if maxchainlen is not None:
387 if maxchainlen is not None:
388 self.svfs.options['maxchainlen'] = maxchainlen
388 self.svfs.options['maxchainlen'] = maxchainlen
389 # experimental config: format.manifestcachesize
389 # experimental config: format.manifestcachesize
390 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
390 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
391 if manifestcachesize is not None:
391 if manifestcachesize is not None:
392 self.svfs.options['manifestcachesize'] = manifestcachesize
392 self.svfs.options['manifestcachesize'] = manifestcachesize
393 # experimental config: format.aggressivemergedeltas
393 # experimental config: format.aggressivemergedeltas
394 aggressivemergedeltas = self.ui.configbool('format',
394 aggressivemergedeltas = self.ui.configbool('format',
395 'aggressivemergedeltas', False)
395 'aggressivemergedeltas', False)
396 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
396 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
397 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
397 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
398
398
399 def _writerequirements(self):
399 def _writerequirements(self):
400 scmutil.writerequires(self.vfs, self.requirements)
400 scmutil.writerequires(self.vfs, self.requirements)
401
401
402 def _checknested(self, path):
402 def _checknested(self, path):
403 """Determine if path is a legal nested repository."""
403 """Determine if path is a legal nested repository."""
404 if not path.startswith(self.root):
404 if not path.startswith(self.root):
405 return False
405 return False
406 subpath = path[len(self.root) + 1:]
406 subpath = path[len(self.root) + 1:]
407 normsubpath = util.pconvert(subpath)
407 normsubpath = util.pconvert(subpath)
408
408
409 # XXX: Checking against the current working copy is wrong in
409 # XXX: Checking against the current working copy is wrong in
410 # the sense that it can reject things like
410 # the sense that it can reject things like
411 #
411 #
412 # $ hg cat -r 10 sub/x.txt
412 # $ hg cat -r 10 sub/x.txt
413 #
413 #
414 # if sub/ is no longer a subrepository in the working copy
414 # if sub/ is no longer a subrepository in the working copy
415 # parent revision.
415 # parent revision.
416 #
416 #
417 # However, it can of course also allow things that would have
417 # However, it can of course also allow things that would have
418 # been rejected before, such as the above cat command if sub/
418 # been rejected before, such as the above cat command if sub/
419 # is a subrepository now, but was a normal directory before.
419 # is a subrepository now, but was a normal directory before.
420 # The old path auditor would have rejected by mistake since it
420 # The old path auditor would have rejected by mistake since it
421 # panics when it sees sub/.hg/.
421 # panics when it sees sub/.hg/.
422 #
422 #
423 # All in all, checking against the working copy seems sensible
423 # All in all, checking against the working copy seems sensible
424 # since we want to prevent access to nested repositories on
424 # since we want to prevent access to nested repositories on
425 # the filesystem *now*.
425 # the filesystem *now*.
426 ctx = self[None]
426 ctx = self[None]
427 parts = util.splitpath(subpath)
427 parts = util.splitpath(subpath)
428 while parts:
428 while parts:
429 prefix = '/'.join(parts)
429 prefix = '/'.join(parts)
430 if prefix in ctx.substate:
430 if prefix in ctx.substate:
431 if prefix == normsubpath:
431 if prefix == normsubpath:
432 return True
432 return True
433 else:
433 else:
434 sub = ctx.sub(prefix)
434 sub = ctx.sub(prefix)
435 return sub.checknested(subpath[len(prefix) + 1:])
435 return sub.checknested(subpath[len(prefix) + 1:])
436 else:
436 else:
437 parts.pop()
437 parts.pop()
438 return False
438 return False
439
439
440 def peer(self):
440 def peer(self):
441 return localpeer(self) # not cached to avoid reference cycle
441 return localpeer(self) # not cached to avoid reference cycle
442
442
443 def unfiltered(self):
443 def unfiltered(self):
444 """Return unfiltered version of the repository
444 """Return unfiltered version of the repository
445
445
446 Intended to be overwritten by filtered repo."""
446 Intended to be overwritten by filtered repo."""
447 return self
447 return self
448
448
449 def filtered(self, name):
449 def filtered(self, name):
450 """Return a filtered version of a repository"""
450 """Return a filtered version of a repository"""
451 # build a new class with the mixin and the current class
451 # build a new class with the mixin and the current class
452 # (possibly subclass of the repo)
452 # (possibly subclass of the repo)
453 class proxycls(repoview.repoview, self.unfiltered().__class__):
453 class proxycls(repoview.repoview, self.unfiltered().__class__):
454 pass
454 pass
455 return proxycls(self, name)
455 return proxycls(self, name)
456
456
457 @repofilecache('bookmarks', 'bookmarks.current')
457 @repofilecache('bookmarks', 'bookmarks.current')
458 def _bookmarks(self):
458 def _bookmarks(self):
459 return bookmarks.bmstore(self)
459 return bookmarks.bmstore(self)
460
460
461 @property
461 @property
462 def _activebookmark(self):
462 def _activebookmark(self):
463 return self._bookmarks.active
463 return self._bookmarks.active
464
464
465 def bookmarkheads(self, bookmark):
465 def bookmarkheads(self, bookmark):
466 name = bookmark.split('@', 1)[0]
466 name = bookmark.split('@', 1)[0]
467 heads = []
467 heads = []
468 for mark, n in self._bookmarks.iteritems():
468 for mark, n in self._bookmarks.iteritems():
469 if mark.split('@', 1)[0] == name:
469 if mark.split('@', 1)[0] == name:
470 heads.append(n)
470 heads.append(n)
471 return heads
471 return heads
472
472
473 # _phaserevs and _phasesets depend on changelog. what we need is to
473 # _phaserevs and _phasesets depend on changelog. what we need is to
474 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
474 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
475 # can't be easily expressed in filecache mechanism.
475 # can't be easily expressed in filecache mechanism.
476 @storecache('phaseroots', '00changelog.i')
476 @storecache('phaseroots', '00changelog.i')
477 def _phasecache(self):
477 def _phasecache(self):
478 return phases.phasecache(self, self._phasedefaults)
478 return phases.phasecache(self, self._phasedefaults)
479
479
480 @storecache('obsstore')
480 @storecache('obsstore')
481 def obsstore(self):
481 def obsstore(self):
482 # read default format for new obsstore.
482 # read default format for new obsstore.
483 # developer config: format.obsstore-version
483 # developer config: format.obsstore-version
484 defaultformat = self.ui.configint('format', 'obsstore-version', None)
484 defaultformat = self.ui.configint('format', 'obsstore-version', None)
485 # rely on obsstore class default when possible.
485 # rely on obsstore class default when possible.
486 kwargs = {}
486 kwargs = {}
487 if defaultformat is not None:
487 if defaultformat is not None:
488 kwargs['defaultformat'] = defaultformat
488 kwargs['defaultformat'] = defaultformat
489 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
489 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
490 store = obsolete.obsstore(self.svfs, readonly=readonly,
490 store = obsolete.obsstore(self.svfs, readonly=readonly,
491 **kwargs)
491 **kwargs)
492 if store and readonly:
492 if store and readonly:
493 self.ui.warn(
493 self.ui.warn(
494 _('obsolete feature not enabled but %i markers found!\n')
494 _('obsolete feature not enabled but %i markers found!\n')
495 % len(list(store)))
495 % len(list(store)))
496 return store
496 return store
497
497
498 @storecache('00changelog.i')
498 @storecache('00changelog.i')
499 def changelog(self):
499 def changelog(self):
500 c = changelog.changelog(self.svfs)
500 c = changelog.changelog(self.svfs)
501 if 'HG_PENDING' in os.environ:
501 if 'HG_PENDING' in os.environ:
502 p = os.environ['HG_PENDING']
502 p = os.environ['HG_PENDING']
503 if p.startswith(self.root):
503 if p.startswith(self.root):
504 c.readpending('00changelog.i.a')
504 c.readpending('00changelog.i.a')
505 return c
505 return c
506
506
507 @property
508 def manifest(self):
509 return self.manifestlog._oldmanifest
510
511 def _constructmanifest(self):
507 def _constructmanifest(self):
512 # This is a temporary function while we migrate from manifest to
508 # This is a temporary function while we migrate from manifest to
513 # manifestlog. It allows bundlerepo and unionrepo to intercept the
509 # manifestlog. It allows bundlerepo and unionrepo to intercept the
514 # manifest creation.
510 # manifest creation.
515 return manifest.manifest(self.svfs)
511 return manifest.manifest(self.svfs)
516
512
517 @storecache('00manifest.i')
513 @storecache('00manifest.i')
518 def manifestlog(self):
514 def manifestlog(self):
519 return manifest.manifestlog(self.svfs, self)
515 return manifest.manifestlog(self.svfs, self)
520
516
521 @repofilecache('dirstate')
517 @repofilecache('dirstate')
522 def dirstate(self):
518 def dirstate(self):
523 return dirstate.dirstate(self.vfs, self.ui, self.root,
519 return dirstate.dirstate(self.vfs, self.ui, self.root,
524 self._dirstatevalidate)
520 self._dirstatevalidate)
525
521
526 def _dirstatevalidate(self, node):
522 def _dirstatevalidate(self, node):
527 try:
523 try:
528 self.changelog.rev(node)
524 self.changelog.rev(node)
529 return node
525 return node
530 except error.LookupError:
526 except error.LookupError:
531 if not self._dirstatevalidatewarned:
527 if not self._dirstatevalidatewarned:
532 self._dirstatevalidatewarned = True
528 self._dirstatevalidatewarned = True
533 self.ui.warn(_("warning: ignoring unknown"
529 self.ui.warn(_("warning: ignoring unknown"
534 " working parent %s!\n") % short(node))
530 " working parent %s!\n") % short(node))
535 return nullid
531 return nullid
536
532
537 def __getitem__(self, changeid):
533 def __getitem__(self, changeid):
538 if changeid is None or changeid == wdirrev:
534 if changeid is None or changeid == wdirrev:
539 return context.workingctx(self)
535 return context.workingctx(self)
540 if isinstance(changeid, slice):
536 if isinstance(changeid, slice):
541 return [context.changectx(self, i)
537 return [context.changectx(self, i)
542 for i in xrange(*changeid.indices(len(self)))
538 for i in xrange(*changeid.indices(len(self)))
543 if i not in self.changelog.filteredrevs]
539 if i not in self.changelog.filteredrevs]
544 return context.changectx(self, changeid)
540 return context.changectx(self, changeid)
545
541
546 def __contains__(self, changeid):
542 def __contains__(self, changeid):
547 try:
543 try:
548 self[changeid]
544 self[changeid]
549 return True
545 return True
550 except error.RepoLookupError:
546 except error.RepoLookupError:
551 return False
547 return False
552
548
553 def __nonzero__(self):
549 def __nonzero__(self):
554 return True
550 return True
555
551
556 def __len__(self):
552 def __len__(self):
557 return len(self.changelog)
553 return len(self.changelog)
558
554
559 def __iter__(self):
555 def __iter__(self):
560 return iter(self.changelog)
556 return iter(self.changelog)
561
557
562 def revs(self, expr, *args):
558 def revs(self, expr, *args):
563 '''Find revisions matching a revset.
559 '''Find revisions matching a revset.
564
560
565 The revset is specified as a string ``expr`` that may contain
561 The revset is specified as a string ``expr`` that may contain
566 %-formatting to escape certain types. See ``revset.formatspec``.
562 %-formatting to escape certain types. See ``revset.formatspec``.
567
563
568 Revset aliases from the configuration are not expanded. To expand
564 Revset aliases from the configuration are not expanded. To expand
569 user aliases, consider calling ``scmutil.revrange()``.
565 user aliases, consider calling ``scmutil.revrange()``.
570
566
571 Returns a revset.abstractsmartset, which is a list-like interface
567 Returns a revset.abstractsmartset, which is a list-like interface
572 that contains integer revisions.
568 that contains integer revisions.
573 '''
569 '''
574 expr = revset.formatspec(expr, *args)
570 expr = revset.formatspec(expr, *args)
575 m = revset.match(None, expr)
571 m = revset.match(None, expr)
576 return m(self)
572 return m(self)
577
573
578 def set(self, expr, *args):
574 def set(self, expr, *args):
579 '''Find revisions matching a revset and emit changectx instances.
575 '''Find revisions matching a revset and emit changectx instances.
580
576
581 This is a convenience wrapper around ``revs()`` that iterates the
577 This is a convenience wrapper around ``revs()`` that iterates the
582 result and is a generator of changectx instances.
578 result and is a generator of changectx instances.
583
579
584 Revset aliases from the configuration are not expanded. To expand
580 Revset aliases from the configuration are not expanded. To expand
585 user aliases, consider calling ``scmutil.revrange()``.
581 user aliases, consider calling ``scmutil.revrange()``.
586 '''
582 '''
587 for r in self.revs(expr, *args):
583 for r in self.revs(expr, *args):
588 yield self[r]
584 yield self[r]
589
585
590 def url(self):
586 def url(self):
591 return 'file:' + self.root
587 return 'file:' + self.root
592
588
593 def hook(self, name, throw=False, **args):
589 def hook(self, name, throw=False, **args):
594 """Call a hook, passing this repo instance.
590 """Call a hook, passing this repo instance.
595
591
596 This a convenience method to aid invoking hooks. Extensions likely
592 This a convenience method to aid invoking hooks. Extensions likely
597 won't call this unless they have registered a custom hook or are
593 won't call this unless they have registered a custom hook or are
598 replacing code that is expected to call a hook.
594 replacing code that is expected to call a hook.
599 """
595 """
600 return hook.hook(self.ui, self, name, throw, **args)
596 return hook.hook(self.ui, self, name, throw, **args)
601
597
602 @unfilteredmethod
598 @unfilteredmethod
603 def _tag(self, names, node, message, local, user, date, extra=None,
599 def _tag(self, names, node, message, local, user, date, extra=None,
604 editor=False):
600 editor=False):
605 if isinstance(names, str):
601 if isinstance(names, str):
606 names = (names,)
602 names = (names,)
607
603
608 branches = self.branchmap()
604 branches = self.branchmap()
609 for name in names:
605 for name in names:
610 self.hook('pretag', throw=True, node=hex(node), tag=name,
606 self.hook('pretag', throw=True, node=hex(node), tag=name,
611 local=local)
607 local=local)
612 if name in branches:
608 if name in branches:
613 self.ui.warn(_("warning: tag %s conflicts with existing"
609 self.ui.warn(_("warning: tag %s conflicts with existing"
614 " branch name\n") % name)
610 " branch name\n") % name)
615
611
616 def writetags(fp, names, munge, prevtags):
612 def writetags(fp, names, munge, prevtags):
617 fp.seek(0, 2)
613 fp.seek(0, 2)
618 if prevtags and prevtags[-1] != '\n':
614 if prevtags and prevtags[-1] != '\n':
619 fp.write('\n')
615 fp.write('\n')
620 for name in names:
616 for name in names:
621 if munge:
617 if munge:
622 m = munge(name)
618 m = munge(name)
623 else:
619 else:
624 m = name
620 m = name
625
621
626 if (self._tagscache.tagtypes and
622 if (self._tagscache.tagtypes and
627 name in self._tagscache.tagtypes):
623 name in self._tagscache.tagtypes):
628 old = self.tags().get(name, nullid)
624 old = self.tags().get(name, nullid)
629 fp.write('%s %s\n' % (hex(old), m))
625 fp.write('%s %s\n' % (hex(old), m))
630 fp.write('%s %s\n' % (hex(node), m))
626 fp.write('%s %s\n' % (hex(node), m))
631 fp.close()
627 fp.close()
632
628
633 prevtags = ''
629 prevtags = ''
634 if local:
630 if local:
635 try:
631 try:
636 fp = self.vfs('localtags', 'r+')
632 fp = self.vfs('localtags', 'r+')
637 except IOError:
633 except IOError:
638 fp = self.vfs('localtags', 'a')
634 fp = self.vfs('localtags', 'a')
639 else:
635 else:
640 prevtags = fp.read()
636 prevtags = fp.read()
641
637
642 # local tags are stored in the current charset
638 # local tags are stored in the current charset
643 writetags(fp, names, None, prevtags)
639 writetags(fp, names, None, prevtags)
644 for name in names:
640 for name in names:
645 self.hook('tag', node=hex(node), tag=name, local=local)
641 self.hook('tag', node=hex(node), tag=name, local=local)
646 return
642 return
647
643
648 try:
644 try:
649 fp = self.wfile('.hgtags', 'rb+')
645 fp = self.wfile('.hgtags', 'rb+')
650 except IOError as e:
646 except IOError as e:
651 if e.errno != errno.ENOENT:
647 if e.errno != errno.ENOENT:
652 raise
648 raise
653 fp = self.wfile('.hgtags', 'ab')
649 fp = self.wfile('.hgtags', 'ab')
654 else:
650 else:
655 prevtags = fp.read()
651 prevtags = fp.read()
656
652
657 # committed tags are stored in UTF-8
653 # committed tags are stored in UTF-8
658 writetags(fp, names, encoding.fromlocal, prevtags)
654 writetags(fp, names, encoding.fromlocal, prevtags)
659
655
660 fp.close()
656 fp.close()
661
657
662 self.invalidatecaches()
658 self.invalidatecaches()
663
659
664 if '.hgtags' not in self.dirstate:
660 if '.hgtags' not in self.dirstate:
665 self[None].add(['.hgtags'])
661 self[None].add(['.hgtags'])
666
662
667 m = matchmod.exact(self.root, '', ['.hgtags'])
663 m = matchmod.exact(self.root, '', ['.hgtags'])
668 tagnode = self.commit(message, user, date, extra=extra, match=m,
664 tagnode = self.commit(message, user, date, extra=extra, match=m,
669 editor=editor)
665 editor=editor)
670
666
671 for name in names:
667 for name in names:
672 self.hook('tag', node=hex(node), tag=name, local=local)
668 self.hook('tag', node=hex(node), tag=name, local=local)
673
669
674 return tagnode
670 return tagnode
675
671
676 def tag(self, names, node, message, local, user, date, editor=False):
672 def tag(self, names, node, message, local, user, date, editor=False):
677 '''tag a revision with one or more symbolic names.
673 '''tag a revision with one or more symbolic names.
678
674
679 names is a list of strings or, when adding a single tag, names may be a
675 names is a list of strings or, when adding a single tag, names may be a
680 string.
676 string.
681
677
682 if local is True, the tags are stored in a per-repository file.
678 if local is True, the tags are stored in a per-repository file.
683 otherwise, they are stored in the .hgtags file, and a new
679 otherwise, they are stored in the .hgtags file, and a new
684 changeset is committed with the change.
680 changeset is committed with the change.
685
681
686 keyword arguments:
682 keyword arguments:
687
683
688 local: whether to store tags in non-version-controlled file
684 local: whether to store tags in non-version-controlled file
689 (default False)
685 (default False)
690
686
691 message: commit message to use if committing
687 message: commit message to use if committing
692
688
693 user: name of user to use if committing
689 user: name of user to use if committing
694
690
695 date: date tuple to use if committing'''
691 date: date tuple to use if committing'''
696
692
697 if not local:
693 if not local:
698 m = matchmod.exact(self.root, '', ['.hgtags'])
694 m = matchmod.exact(self.root, '', ['.hgtags'])
699 if any(self.status(match=m, unknown=True, ignored=True)):
695 if any(self.status(match=m, unknown=True, ignored=True)):
700 raise error.Abort(_('working copy of .hgtags is changed'),
696 raise error.Abort(_('working copy of .hgtags is changed'),
701 hint=_('please commit .hgtags manually'))
697 hint=_('please commit .hgtags manually'))
702
698
703 self.tags() # instantiate the cache
699 self.tags() # instantiate the cache
704 self._tag(names, node, message, local, user, date, editor=editor)
700 self._tag(names, node, message, local, user, date, editor=editor)
705
701
706 @filteredpropertycache
702 @filteredpropertycache
707 def _tagscache(self):
703 def _tagscache(self):
708 '''Returns a tagscache object that contains various tags related
704 '''Returns a tagscache object that contains various tags related
709 caches.'''
705 caches.'''
710
706
711 # This simplifies its cache management by having one decorated
707 # This simplifies its cache management by having one decorated
712 # function (this one) and the rest simply fetch things from it.
708 # function (this one) and the rest simply fetch things from it.
713 class tagscache(object):
709 class tagscache(object):
714 def __init__(self):
710 def __init__(self):
715 # These two define the set of tags for this repository. tags
711 # These two define the set of tags for this repository. tags
716 # maps tag name to node; tagtypes maps tag name to 'global' or
712 # maps tag name to node; tagtypes maps tag name to 'global' or
717 # 'local'. (Global tags are defined by .hgtags across all
713 # 'local'. (Global tags are defined by .hgtags across all
718 # heads, and local tags are defined in .hg/localtags.)
714 # heads, and local tags are defined in .hg/localtags.)
719 # They constitute the in-memory cache of tags.
715 # They constitute the in-memory cache of tags.
720 self.tags = self.tagtypes = None
716 self.tags = self.tagtypes = None
721
717
722 self.nodetagscache = self.tagslist = None
718 self.nodetagscache = self.tagslist = None
723
719
724 cache = tagscache()
720 cache = tagscache()
725 cache.tags, cache.tagtypes = self._findtags()
721 cache.tags, cache.tagtypes = self._findtags()
726
722
727 return cache
723 return cache
728
724
729 def tags(self):
725 def tags(self):
730 '''return a mapping of tag to node'''
726 '''return a mapping of tag to node'''
731 t = {}
727 t = {}
732 if self.changelog.filteredrevs:
728 if self.changelog.filteredrevs:
733 tags, tt = self._findtags()
729 tags, tt = self._findtags()
734 else:
730 else:
735 tags = self._tagscache.tags
731 tags = self._tagscache.tags
736 for k, v in tags.iteritems():
732 for k, v in tags.iteritems():
737 try:
733 try:
738 # ignore tags to unknown nodes
734 # ignore tags to unknown nodes
739 self.changelog.rev(v)
735 self.changelog.rev(v)
740 t[k] = v
736 t[k] = v
741 except (error.LookupError, ValueError):
737 except (error.LookupError, ValueError):
742 pass
738 pass
743 return t
739 return t
744
740
745 def _findtags(self):
741 def _findtags(self):
746 '''Do the hard work of finding tags. Return a pair of dicts
742 '''Do the hard work of finding tags. Return a pair of dicts
747 (tags, tagtypes) where tags maps tag name to node, and tagtypes
743 (tags, tagtypes) where tags maps tag name to node, and tagtypes
748 maps tag name to a string like \'global\' or \'local\'.
744 maps tag name to a string like \'global\' or \'local\'.
749 Subclasses or extensions are free to add their own tags, but
745 Subclasses or extensions are free to add their own tags, but
750 should be aware that the returned dicts will be retained for the
746 should be aware that the returned dicts will be retained for the
751 duration of the localrepo object.'''
747 duration of the localrepo object.'''
752
748
753 # XXX what tagtype should subclasses/extensions use? Currently
749 # XXX what tagtype should subclasses/extensions use? Currently
754 # mq and bookmarks add tags, but do not set the tagtype at all.
750 # mq and bookmarks add tags, but do not set the tagtype at all.
755 # Should each extension invent its own tag type? Should there
751 # Should each extension invent its own tag type? Should there
756 # be one tagtype for all such "virtual" tags? Or is the status
752 # be one tagtype for all such "virtual" tags? Or is the status
757 # quo fine?
753 # quo fine?
758
754
759 alltags = {} # map tag name to (node, hist)
755 alltags = {} # map tag name to (node, hist)
760 tagtypes = {}
756 tagtypes = {}
761
757
762 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
758 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
763 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
759 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
764
760
765 # Build the return dicts. Have to re-encode tag names because
761 # Build the return dicts. Have to re-encode tag names because
766 # the tags module always uses UTF-8 (in order not to lose info
762 # the tags module always uses UTF-8 (in order not to lose info
767 # writing to the cache), but the rest of Mercurial wants them in
763 # writing to the cache), but the rest of Mercurial wants them in
768 # local encoding.
764 # local encoding.
769 tags = {}
765 tags = {}
770 for (name, (node, hist)) in alltags.iteritems():
766 for (name, (node, hist)) in alltags.iteritems():
771 if node != nullid:
767 if node != nullid:
772 tags[encoding.tolocal(name)] = node
768 tags[encoding.tolocal(name)] = node
773 tags['tip'] = self.changelog.tip()
769 tags['tip'] = self.changelog.tip()
774 tagtypes = dict([(encoding.tolocal(name), value)
770 tagtypes = dict([(encoding.tolocal(name), value)
775 for (name, value) in tagtypes.iteritems()])
771 for (name, value) in tagtypes.iteritems()])
776 return (tags, tagtypes)
772 return (tags, tagtypes)
777
773
778 def tagtype(self, tagname):
774 def tagtype(self, tagname):
779 '''
775 '''
780 return the type of the given tag. result can be:
776 return the type of the given tag. result can be:
781
777
782 'local' : a local tag
778 'local' : a local tag
783 'global' : a global tag
779 'global' : a global tag
784 None : tag does not exist
780 None : tag does not exist
785 '''
781 '''
786
782
787 return self._tagscache.tagtypes.get(tagname)
783 return self._tagscache.tagtypes.get(tagname)
788
784
789 def tagslist(self):
785 def tagslist(self):
790 '''return a list of tags ordered by revision'''
786 '''return a list of tags ordered by revision'''
791 if not self._tagscache.tagslist:
787 if not self._tagscache.tagslist:
792 l = []
788 l = []
793 for t, n in self.tags().iteritems():
789 for t, n in self.tags().iteritems():
794 l.append((self.changelog.rev(n), t, n))
790 l.append((self.changelog.rev(n), t, n))
795 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
791 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
796
792
797 return self._tagscache.tagslist
793 return self._tagscache.tagslist
798
794
799 def nodetags(self, node):
795 def nodetags(self, node):
800 '''return the tags associated with a node'''
796 '''return the tags associated with a node'''
801 if not self._tagscache.nodetagscache:
797 if not self._tagscache.nodetagscache:
802 nodetagscache = {}
798 nodetagscache = {}
803 for t, n in self._tagscache.tags.iteritems():
799 for t, n in self._tagscache.tags.iteritems():
804 nodetagscache.setdefault(n, []).append(t)
800 nodetagscache.setdefault(n, []).append(t)
805 for tags in nodetagscache.itervalues():
801 for tags in nodetagscache.itervalues():
806 tags.sort()
802 tags.sort()
807 self._tagscache.nodetagscache = nodetagscache
803 self._tagscache.nodetagscache = nodetagscache
808 return self._tagscache.nodetagscache.get(node, [])
804 return self._tagscache.nodetagscache.get(node, [])
809
805
810 def nodebookmarks(self, node):
806 def nodebookmarks(self, node):
811 """return the list of bookmarks pointing to the specified node"""
807 """return the list of bookmarks pointing to the specified node"""
812 marks = []
808 marks = []
813 for bookmark, n in self._bookmarks.iteritems():
809 for bookmark, n in self._bookmarks.iteritems():
814 if n == node:
810 if n == node:
815 marks.append(bookmark)
811 marks.append(bookmark)
816 return sorted(marks)
812 return sorted(marks)
817
813
818 def branchmap(self):
814 def branchmap(self):
819 '''returns a dictionary {branch: [branchheads]} with branchheads
815 '''returns a dictionary {branch: [branchheads]} with branchheads
820 ordered by increasing revision number'''
816 ordered by increasing revision number'''
821 branchmap.updatecache(self)
817 branchmap.updatecache(self)
822 return self._branchcaches[self.filtername]
818 return self._branchcaches[self.filtername]
823
819
824 @unfilteredmethod
820 @unfilteredmethod
825 def revbranchcache(self):
821 def revbranchcache(self):
826 if not self._revbranchcache:
822 if not self._revbranchcache:
827 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
823 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
828 return self._revbranchcache
824 return self._revbranchcache
829
825
830 def branchtip(self, branch, ignoremissing=False):
826 def branchtip(self, branch, ignoremissing=False):
831 '''return the tip node for a given branch
827 '''return the tip node for a given branch
832
828
833 If ignoremissing is True, then this method will not raise an error.
829 If ignoremissing is True, then this method will not raise an error.
834 This is helpful for callers that only expect None for a missing branch
830 This is helpful for callers that only expect None for a missing branch
835 (e.g. namespace).
831 (e.g. namespace).
836
832
837 '''
833 '''
838 try:
834 try:
839 return self.branchmap().branchtip(branch)
835 return self.branchmap().branchtip(branch)
840 except KeyError:
836 except KeyError:
841 if not ignoremissing:
837 if not ignoremissing:
842 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
838 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
843 else:
839 else:
844 pass
840 pass
845
841
846 def lookup(self, key):
842 def lookup(self, key):
847 return self[key].node()
843 return self[key].node()
848
844
849 def lookupbranch(self, key, remote=None):
845 def lookupbranch(self, key, remote=None):
850 repo = remote or self
846 repo = remote or self
851 if key in repo.branchmap():
847 if key in repo.branchmap():
852 return key
848 return key
853
849
854 repo = (remote and remote.local()) and remote or self
850 repo = (remote and remote.local()) and remote or self
855 return repo[key].branch()
851 return repo[key].branch()
856
852
857 def known(self, nodes):
853 def known(self, nodes):
858 cl = self.changelog
854 cl = self.changelog
859 nm = cl.nodemap
855 nm = cl.nodemap
860 filtered = cl.filteredrevs
856 filtered = cl.filteredrevs
861 result = []
857 result = []
862 for n in nodes:
858 for n in nodes:
863 r = nm.get(n)
859 r = nm.get(n)
864 resp = not (r is None or r in filtered)
860 resp = not (r is None or r in filtered)
865 result.append(resp)
861 result.append(resp)
866 return result
862 return result
867
863
868 def local(self):
864 def local(self):
869 return self
865 return self
870
866
871 def publishing(self):
867 def publishing(self):
872 # it's safe (and desirable) to trust the publish flag unconditionally
868 # it's safe (and desirable) to trust the publish flag unconditionally
873 # so that we don't finalize changes shared between users via ssh or nfs
869 # so that we don't finalize changes shared between users via ssh or nfs
874 return self.ui.configbool('phases', 'publish', True, untrusted=True)
870 return self.ui.configbool('phases', 'publish', True, untrusted=True)
875
871
876 def cancopy(self):
872 def cancopy(self):
877 # so statichttprepo's override of local() works
873 # so statichttprepo's override of local() works
878 if not self.local():
874 if not self.local():
879 return False
875 return False
880 if not self.publishing():
876 if not self.publishing():
881 return True
877 return True
882 # if publishing we can't copy if there is filtered content
878 # if publishing we can't copy if there is filtered content
883 return not self.filtered('visible').changelog.filteredrevs
879 return not self.filtered('visible').changelog.filteredrevs
884
880
885 def shared(self):
881 def shared(self):
886 '''the type of shared repository (None if not shared)'''
882 '''the type of shared repository (None if not shared)'''
887 if self.sharedpath != self.path:
883 if self.sharedpath != self.path:
888 return 'store'
884 return 'store'
889 return None
885 return None
890
886
891 def join(self, f, *insidef):
887 def join(self, f, *insidef):
892 return self.vfs.join(os.path.join(f, *insidef))
888 return self.vfs.join(os.path.join(f, *insidef))
893
889
894 def wjoin(self, f, *insidef):
890 def wjoin(self, f, *insidef):
895 return self.vfs.reljoin(self.root, f, *insidef)
891 return self.vfs.reljoin(self.root, f, *insidef)
896
892
897 def file(self, f):
893 def file(self, f):
898 if f[0] == '/':
894 if f[0] == '/':
899 f = f[1:]
895 f = f[1:]
900 return filelog.filelog(self.svfs, f)
896 return filelog.filelog(self.svfs, f)
901
897
902 def changectx(self, changeid):
898 def changectx(self, changeid):
903 return self[changeid]
899 return self[changeid]
904
900
905 def setparents(self, p1, p2=nullid):
901 def setparents(self, p1, p2=nullid):
906 self.dirstate.beginparentchange()
902 self.dirstate.beginparentchange()
907 copies = self.dirstate.setparents(p1, p2)
903 copies = self.dirstate.setparents(p1, p2)
908 pctx = self[p1]
904 pctx = self[p1]
909 if copies:
905 if copies:
910 # Adjust copy records, the dirstate cannot do it, it
906 # Adjust copy records, the dirstate cannot do it, it
911 # requires access to parents manifests. Preserve them
907 # requires access to parents manifests. Preserve them
912 # only for entries added to first parent.
908 # only for entries added to first parent.
913 for f in copies:
909 for f in copies:
914 if f not in pctx and copies[f] in pctx:
910 if f not in pctx and copies[f] in pctx:
915 self.dirstate.copy(copies[f], f)
911 self.dirstate.copy(copies[f], f)
916 if p2 == nullid:
912 if p2 == nullid:
917 for f, s in sorted(self.dirstate.copies().items()):
913 for f, s in sorted(self.dirstate.copies().items()):
918 if f not in pctx and s not in pctx:
914 if f not in pctx and s not in pctx:
919 self.dirstate.copy(None, f)
915 self.dirstate.copy(None, f)
920 self.dirstate.endparentchange()
916 self.dirstate.endparentchange()
921
917
922 def filectx(self, path, changeid=None, fileid=None):
918 def filectx(self, path, changeid=None, fileid=None):
923 """changeid can be a changeset revision, node, or tag.
919 """changeid can be a changeset revision, node, or tag.
924 fileid can be a file revision or node."""
920 fileid can be a file revision or node."""
925 return context.filectx(self, path, changeid, fileid)
921 return context.filectx(self, path, changeid, fileid)
926
922
927 def getcwd(self):
923 def getcwd(self):
928 return self.dirstate.getcwd()
924 return self.dirstate.getcwd()
929
925
930 def pathto(self, f, cwd=None):
926 def pathto(self, f, cwd=None):
931 return self.dirstate.pathto(f, cwd)
927 return self.dirstate.pathto(f, cwd)
932
928
933 def wfile(self, f, mode='r'):
929 def wfile(self, f, mode='r'):
934 return self.wvfs(f, mode)
930 return self.wvfs(f, mode)
935
931
936 def _link(self, f):
932 def _link(self, f):
937 return self.wvfs.islink(f)
933 return self.wvfs.islink(f)
938
934
939 def _loadfilter(self, filter):
935 def _loadfilter(self, filter):
940 if filter not in self.filterpats:
936 if filter not in self.filterpats:
941 l = []
937 l = []
942 for pat, cmd in self.ui.configitems(filter):
938 for pat, cmd in self.ui.configitems(filter):
943 if cmd == '!':
939 if cmd == '!':
944 continue
940 continue
945 mf = matchmod.match(self.root, '', [pat])
941 mf = matchmod.match(self.root, '', [pat])
946 fn = None
942 fn = None
947 params = cmd
943 params = cmd
948 for name, filterfn in self._datafilters.iteritems():
944 for name, filterfn in self._datafilters.iteritems():
949 if cmd.startswith(name):
945 if cmd.startswith(name):
950 fn = filterfn
946 fn = filterfn
951 params = cmd[len(name):].lstrip()
947 params = cmd[len(name):].lstrip()
952 break
948 break
953 if not fn:
949 if not fn:
954 fn = lambda s, c, **kwargs: util.filter(s, c)
950 fn = lambda s, c, **kwargs: util.filter(s, c)
955 # Wrap old filters not supporting keyword arguments
951 # Wrap old filters not supporting keyword arguments
956 if not inspect.getargspec(fn)[2]:
952 if not inspect.getargspec(fn)[2]:
957 oldfn = fn
953 oldfn = fn
958 fn = lambda s, c, **kwargs: oldfn(s, c)
954 fn = lambda s, c, **kwargs: oldfn(s, c)
959 l.append((mf, fn, params))
955 l.append((mf, fn, params))
960 self.filterpats[filter] = l
956 self.filterpats[filter] = l
961 return self.filterpats[filter]
957 return self.filterpats[filter]
962
958
963 def _filter(self, filterpats, filename, data):
959 def _filter(self, filterpats, filename, data):
964 for mf, fn, cmd in filterpats:
960 for mf, fn, cmd in filterpats:
965 if mf(filename):
961 if mf(filename):
966 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
962 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
967 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
963 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
968 break
964 break
969
965
970 return data
966 return data
971
967
972 @unfilteredpropertycache
968 @unfilteredpropertycache
973 def _encodefilterpats(self):
969 def _encodefilterpats(self):
974 return self._loadfilter('encode')
970 return self._loadfilter('encode')
975
971
976 @unfilteredpropertycache
972 @unfilteredpropertycache
977 def _decodefilterpats(self):
973 def _decodefilterpats(self):
978 return self._loadfilter('decode')
974 return self._loadfilter('decode')
979
975
980 def adddatafilter(self, name, filter):
976 def adddatafilter(self, name, filter):
981 self._datafilters[name] = filter
977 self._datafilters[name] = filter
982
978
983 def wread(self, filename):
979 def wread(self, filename):
984 if self._link(filename):
980 if self._link(filename):
985 data = self.wvfs.readlink(filename)
981 data = self.wvfs.readlink(filename)
986 else:
982 else:
987 data = self.wvfs.read(filename)
983 data = self.wvfs.read(filename)
988 return self._filter(self._encodefilterpats, filename, data)
984 return self._filter(self._encodefilterpats, filename, data)
989
985
990 def wwrite(self, filename, data, flags, backgroundclose=False):
986 def wwrite(self, filename, data, flags, backgroundclose=False):
991 """write ``data`` into ``filename`` in the working directory
987 """write ``data`` into ``filename`` in the working directory
992
988
993 This returns length of written (maybe decoded) data.
989 This returns length of written (maybe decoded) data.
994 """
990 """
995 data = self._filter(self._decodefilterpats, filename, data)
991 data = self._filter(self._decodefilterpats, filename, data)
996 if 'l' in flags:
992 if 'l' in flags:
997 self.wvfs.symlink(data, filename)
993 self.wvfs.symlink(data, filename)
998 else:
994 else:
999 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
995 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1000 if 'x' in flags:
996 if 'x' in flags:
1001 self.wvfs.setflags(filename, False, True)
997 self.wvfs.setflags(filename, False, True)
1002 return len(data)
998 return len(data)
1003
999
1004 def wwritedata(self, filename, data):
1000 def wwritedata(self, filename, data):
1005 return self._filter(self._decodefilterpats, filename, data)
1001 return self._filter(self._decodefilterpats, filename, data)
1006
1002
1007 def currenttransaction(self):
1003 def currenttransaction(self):
1008 """return the current transaction or None if non exists"""
1004 """return the current transaction or None if non exists"""
1009 if self._transref:
1005 if self._transref:
1010 tr = self._transref()
1006 tr = self._transref()
1011 else:
1007 else:
1012 tr = None
1008 tr = None
1013
1009
1014 if tr and tr.running():
1010 if tr and tr.running():
1015 return tr
1011 return tr
1016 return None
1012 return None
1017
1013
1018 def transaction(self, desc, report=None):
1014 def transaction(self, desc, report=None):
1019 if (self.ui.configbool('devel', 'all-warnings')
1015 if (self.ui.configbool('devel', 'all-warnings')
1020 or self.ui.configbool('devel', 'check-locks')):
1016 or self.ui.configbool('devel', 'check-locks')):
1021 if self._currentlock(self._lockref) is None:
1017 if self._currentlock(self._lockref) is None:
1022 raise RuntimeError('programming error: transaction requires '
1018 raise RuntimeError('programming error: transaction requires '
1023 'locking')
1019 'locking')
1024 tr = self.currenttransaction()
1020 tr = self.currenttransaction()
1025 if tr is not None:
1021 if tr is not None:
1026 return tr.nest()
1022 return tr.nest()
1027
1023
1028 # abort here if the journal already exists
1024 # abort here if the journal already exists
1029 if self.svfs.exists("journal"):
1025 if self.svfs.exists("journal"):
1030 raise error.RepoError(
1026 raise error.RepoError(
1031 _("abandoned transaction found"),
1027 _("abandoned transaction found"),
1032 hint=_("run 'hg recover' to clean up transaction"))
1028 hint=_("run 'hg recover' to clean up transaction"))
1033
1029
1034 idbase = "%.40f#%f" % (random.random(), time.time())
1030 idbase = "%.40f#%f" % (random.random(), time.time())
1035 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1031 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1036 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1032 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1037
1033
1038 self._writejournal(desc)
1034 self._writejournal(desc)
1039 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1035 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1040 if report:
1036 if report:
1041 rp = report
1037 rp = report
1042 else:
1038 else:
1043 rp = self.ui.warn
1039 rp = self.ui.warn
1044 vfsmap = {'plain': self.vfs} # root of .hg/
1040 vfsmap = {'plain': self.vfs} # root of .hg/
1045 # we must avoid cyclic reference between repo and transaction.
1041 # we must avoid cyclic reference between repo and transaction.
1046 reporef = weakref.ref(self)
1042 reporef = weakref.ref(self)
1047 def validate(tr):
1043 def validate(tr):
1048 """will run pre-closing hooks"""
1044 """will run pre-closing hooks"""
1049 reporef().hook('pretxnclose', throw=True,
1045 reporef().hook('pretxnclose', throw=True,
1050 txnname=desc, **tr.hookargs)
1046 txnname=desc, **tr.hookargs)
1051 def releasefn(tr, success):
1047 def releasefn(tr, success):
1052 repo = reporef()
1048 repo = reporef()
1053 if success:
1049 if success:
1054 # this should be explicitly invoked here, because
1050 # this should be explicitly invoked here, because
1055 # in-memory changes aren't written out at closing
1051 # in-memory changes aren't written out at closing
1056 # transaction, if tr.addfilegenerator (via
1052 # transaction, if tr.addfilegenerator (via
1057 # dirstate.write or so) isn't invoked while
1053 # dirstate.write or so) isn't invoked while
1058 # transaction running
1054 # transaction running
1059 repo.dirstate.write(None)
1055 repo.dirstate.write(None)
1060 else:
1056 else:
1061 # discard all changes (including ones already written
1057 # discard all changes (including ones already written
1062 # out) in this transaction
1058 # out) in this transaction
1063 repo.dirstate.restorebackup(None, prefix='journal.')
1059 repo.dirstate.restorebackup(None, prefix='journal.')
1064
1060
1065 repo.invalidate(clearfilecache=True)
1061 repo.invalidate(clearfilecache=True)
1066
1062
1067 tr = transaction.transaction(rp, self.svfs, vfsmap,
1063 tr = transaction.transaction(rp, self.svfs, vfsmap,
1068 "journal",
1064 "journal",
1069 "undo",
1065 "undo",
1070 aftertrans(renames),
1066 aftertrans(renames),
1071 self.store.createmode,
1067 self.store.createmode,
1072 validator=validate,
1068 validator=validate,
1073 releasefn=releasefn)
1069 releasefn=releasefn)
1074
1070
1075 tr.hookargs['txnid'] = txnid
1071 tr.hookargs['txnid'] = txnid
1076 # note: writing the fncache only during finalize mean that the file is
1072 # note: writing the fncache only during finalize mean that the file is
1077 # outdated when running hooks. As fncache is used for streaming clone,
1073 # outdated when running hooks. As fncache is used for streaming clone,
1078 # this is not expected to break anything that happen during the hooks.
1074 # this is not expected to break anything that happen during the hooks.
1079 tr.addfinalize('flush-fncache', self.store.write)
1075 tr.addfinalize('flush-fncache', self.store.write)
1080 def txnclosehook(tr2):
1076 def txnclosehook(tr2):
1081 """To be run if transaction is successful, will schedule a hook run
1077 """To be run if transaction is successful, will schedule a hook run
1082 """
1078 """
1083 # Don't reference tr2 in hook() so we don't hold a reference.
1079 # Don't reference tr2 in hook() so we don't hold a reference.
1084 # This reduces memory consumption when there are multiple
1080 # This reduces memory consumption when there are multiple
1085 # transactions per lock. This can likely go away if issue5045
1081 # transactions per lock. This can likely go away if issue5045
1086 # fixes the function accumulation.
1082 # fixes the function accumulation.
1087 hookargs = tr2.hookargs
1083 hookargs = tr2.hookargs
1088
1084
1089 def hook():
1085 def hook():
1090 reporef().hook('txnclose', throw=False, txnname=desc,
1086 reporef().hook('txnclose', throw=False, txnname=desc,
1091 **hookargs)
1087 **hookargs)
1092 reporef()._afterlock(hook)
1088 reporef()._afterlock(hook)
1093 tr.addfinalize('txnclose-hook', txnclosehook)
1089 tr.addfinalize('txnclose-hook', txnclosehook)
1094 def txnaborthook(tr2):
1090 def txnaborthook(tr2):
1095 """To be run if transaction is aborted
1091 """To be run if transaction is aborted
1096 """
1092 """
1097 reporef().hook('txnabort', throw=False, txnname=desc,
1093 reporef().hook('txnabort', throw=False, txnname=desc,
1098 **tr2.hookargs)
1094 **tr2.hookargs)
1099 tr.addabort('txnabort-hook', txnaborthook)
1095 tr.addabort('txnabort-hook', txnaborthook)
1100 # avoid eager cache invalidation. in-memory data should be identical
1096 # avoid eager cache invalidation. in-memory data should be identical
1101 # to stored data if transaction has no error.
1097 # to stored data if transaction has no error.
1102 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1098 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1103 self._transref = weakref.ref(tr)
1099 self._transref = weakref.ref(tr)
1104 return tr
1100 return tr
1105
1101
1106 def _journalfiles(self):
1102 def _journalfiles(self):
1107 return ((self.svfs, 'journal'),
1103 return ((self.svfs, 'journal'),
1108 (self.vfs, 'journal.dirstate'),
1104 (self.vfs, 'journal.dirstate'),
1109 (self.vfs, 'journal.branch'),
1105 (self.vfs, 'journal.branch'),
1110 (self.vfs, 'journal.desc'),
1106 (self.vfs, 'journal.desc'),
1111 (self.vfs, 'journal.bookmarks'),
1107 (self.vfs, 'journal.bookmarks'),
1112 (self.svfs, 'journal.phaseroots'))
1108 (self.svfs, 'journal.phaseroots'))
1113
1109
1114 def undofiles(self):
1110 def undofiles(self):
1115 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1111 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1116
1112
1117 def _writejournal(self, desc):
1113 def _writejournal(self, desc):
1118 self.dirstate.savebackup(None, prefix='journal.')
1114 self.dirstate.savebackup(None, prefix='journal.')
1119 self.vfs.write("journal.branch",
1115 self.vfs.write("journal.branch",
1120 encoding.fromlocal(self.dirstate.branch()))
1116 encoding.fromlocal(self.dirstate.branch()))
1121 self.vfs.write("journal.desc",
1117 self.vfs.write("journal.desc",
1122 "%d\n%s\n" % (len(self), desc))
1118 "%d\n%s\n" % (len(self), desc))
1123 self.vfs.write("journal.bookmarks",
1119 self.vfs.write("journal.bookmarks",
1124 self.vfs.tryread("bookmarks"))
1120 self.vfs.tryread("bookmarks"))
1125 self.svfs.write("journal.phaseroots",
1121 self.svfs.write("journal.phaseroots",
1126 self.svfs.tryread("phaseroots"))
1122 self.svfs.tryread("phaseroots"))
1127
1123
1128 def recover(self):
1124 def recover(self):
1129 with self.lock():
1125 with self.lock():
1130 if self.svfs.exists("journal"):
1126 if self.svfs.exists("journal"):
1131 self.ui.status(_("rolling back interrupted transaction\n"))
1127 self.ui.status(_("rolling back interrupted transaction\n"))
1132 vfsmap = {'': self.svfs,
1128 vfsmap = {'': self.svfs,
1133 'plain': self.vfs,}
1129 'plain': self.vfs,}
1134 transaction.rollback(self.svfs, vfsmap, "journal",
1130 transaction.rollback(self.svfs, vfsmap, "journal",
1135 self.ui.warn)
1131 self.ui.warn)
1136 self.invalidate()
1132 self.invalidate()
1137 return True
1133 return True
1138 else:
1134 else:
1139 self.ui.warn(_("no interrupted transaction available\n"))
1135 self.ui.warn(_("no interrupted transaction available\n"))
1140 return False
1136 return False
1141
1137
1142 def rollback(self, dryrun=False, force=False):
1138 def rollback(self, dryrun=False, force=False):
1143 wlock = lock = dsguard = None
1139 wlock = lock = dsguard = None
1144 try:
1140 try:
1145 wlock = self.wlock()
1141 wlock = self.wlock()
1146 lock = self.lock()
1142 lock = self.lock()
1147 if self.svfs.exists("undo"):
1143 if self.svfs.exists("undo"):
1148 dsguard = cmdutil.dirstateguard(self, 'rollback')
1144 dsguard = cmdutil.dirstateguard(self, 'rollback')
1149
1145
1150 return self._rollback(dryrun, force, dsguard)
1146 return self._rollback(dryrun, force, dsguard)
1151 else:
1147 else:
1152 self.ui.warn(_("no rollback information available\n"))
1148 self.ui.warn(_("no rollback information available\n"))
1153 return 1
1149 return 1
1154 finally:
1150 finally:
1155 release(dsguard, lock, wlock)
1151 release(dsguard, lock, wlock)
1156
1152
1157 @unfilteredmethod # Until we get smarter cache management
1153 @unfilteredmethod # Until we get smarter cache management
1158 def _rollback(self, dryrun, force, dsguard):
1154 def _rollback(self, dryrun, force, dsguard):
1159 ui = self.ui
1155 ui = self.ui
1160 try:
1156 try:
1161 args = self.vfs.read('undo.desc').splitlines()
1157 args = self.vfs.read('undo.desc').splitlines()
1162 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1158 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1163 if len(args) >= 3:
1159 if len(args) >= 3:
1164 detail = args[2]
1160 detail = args[2]
1165 oldtip = oldlen - 1
1161 oldtip = oldlen - 1
1166
1162
1167 if detail and ui.verbose:
1163 if detail and ui.verbose:
1168 msg = (_('repository tip rolled back to revision %s'
1164 msg = (_('repository tip rolled back to revision %s'
1169 ' (undo %s: %s)\n')
1165 ' (undo %s: %s)\n')
1170 % (oldtip, desc, detail))
1166 % (oldtip, desc, detail))
1171 else:
1167 else:
1172 msg = (_('repository tip rolled back to revision %s'
1168 msg = (_('repository tip rolled back to revision %s'
1173 ' (undo %s)\n')
1169 ' (undo %s)\n')
1174 % (oldtip, desc))
1170 % (oldtip, desc))
1175 except IOError:
1171 except IOError:
1176 msg = _('rolling back unknown transaction\n')
1172 msg = _('rolling back unknown transaction\n')
1177 desc = None
1173 desc = None
1178
1174
1179 if not force and self['.'] != self['tip'] and desc == 'commit':
1175 if not force and self['.'] != self['tip'] and desc == 'commit':
1180 raise error.Abort(
1176 raise error.Abort(
1181 _('rollback of last commit while not checked out '
1177 _('rollback of last commit while not checked out '
1182 'may lose data'), hint=_('use -f to force'))
1178 'may lose data'), hint=_('use -f to force'))
1183
1179
1184 ui.status(msg)
1180 ui.status(msg)
1185 if dryrun:
1181 if dryrun:
1186 return 0
1182 return 0
1187
1183
1188 parents = self.dirstate.parents()
1184 parents = self.dirstate.parents()
1189 self.destroying()
1185 self.destroying()
1190 vfsmap = {'plain': self.vfs, '': self.svfs}
1186 vfsmap = {'plain': self.vfs, '': self.svfs}
1191 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1187 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1192 if self.vfs.exists('undo.bookmarks'):
1188 if self.vfs.exists('undo.bookmarks'):
1193 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1189 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1194 if self.svfs.exists('undo.phaseroots'):
1190 if self.svfs.exists('undo.phaseroots'):
1195 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1191 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1196 self.invalidate()
1192 self.invalidate()
1197
1193
1198 parentgone = (parents[0] not in self.changelog.nodemap or
1194 parentgone = (parents[0] not in self.changelog.nodemap or
1199 parents[1] not in self.changelog.nodemap)
1195 parents[1] not in self.changelog.nodemap)
1200 if parentgone:
1196 if parentgone:
1201 # prevent dirstateguard from overwriting already restored one
1197 # prevent dirstateguard from overwriting already restored one
1202 dsguard.close()
1198 dsguard.close()
1203
1199
1204 self.dirstate.restorebackup(None, prefix='undo.')
1200 self.dirstate.restorebackup(None, prefix='undo.')
1205 try:
1201 try:
1206 branch = self.vfs.read('undo.branch')
1202 branch = self.vfs.read('undo.branch')
1207 self.dirstate.setbranch(encoding.tolocal(branch))
1203 self.dirstate.setbranch(encoding.tolocal(branch))
1208 except IOError:
1204 except IOError:
1209 ui.warn(_('named branch could not be reset: '
1205 ui.warn(_('named branch could not be reset: '
1210 'current branch is still \'%s\'\n')
1206 'current branch is still \'%s\'\n')
1211 % self.dirstate.branch())
1207 % self.dirstate.branch())
1212
1208
1213 parents = tuple([p.rev() for p in self[None].parents()])
1209 parents = tuple([p.rev() for p in self[None].parents()])
1214 if len(parents) > 1:
1210 if len(parents) > 1:
1215 ui.status(_('working directory now based on '
1211 ui.status(_('working directory now based on '
1216 'revisions %d and %d\n') % parents)
1212 'revisions %d and %d\n') % parents)
1217 else:
1213 else:
1218 ui.status(_('working directory now based on '
1214 ui.status(_('working directory now based on '
1219 'revision %d\n') % parents)
1215 'revision %d\n') % parents)
1220 mergemod.mergestate.clean(self, self['.'].node())
1216 mergemod.mergestate.clean(self, self['.'].node())
1221
1217
1222 # TODO: if we know which new heads may result from this rollback, pass
1218 # TODO: if we know which new heads may result from this rollback, pass
1223 # them to destroy(), which will prevent the branchhead cache from being
1219 # them to destroy(), which will prevent the branchhead cache from being
1224 # invalidated.
1220 # invalidated.
1225 self.destroyed()
1221 self.destroyed()
1226 return 0
1222 return 0
1227
1223
1228 def invalidatecaches(self):
1224 def invalidatecaches(self):
1229
1225
1230 if '_tagscache' in vars(self):
1226 if '_tagscache' in vars(self):
1231 # can't use delattr on proxy
1227 # can't use delattr on proxy
1232 del self.__dict__['_tagscache']
1228 del self.__dict__['_tagscache']
1233
1229
1234 self.unfiltered()._branchcaches.clear()
1230 self.unfiltered()._branchcaches.clear()
1235 self.invalidatevolatilesets()
1231 self.invalidatevolatilesets()
1236
1232
1237 def invalidatevolatilesets(self):
1233 def invalidatevolatilesets(self):
1238 self.filteredrevcache.clear()
1234 self.filteredrevcache.clear()
1239 obsolete.clearobscaches(self)
1235 obsolete.clearobscaches(self)
1240
1236
1241 def invalidatedirstate(self):
1237 def invalidatedirstate(self):
1242 '''Invalidates the dirstate, causing the next call to dirstate
1238 '''Invalidates the dirstate, causing the next call to dirstate
1243 to check if it was modified since the last time it was read,
1239 to check if it was modified since the last time it was read,
1244 rereading it if it has.
1240 rereading it if it has.
1245
1241
1246 This is different to dirstate.invalidate() that it doesn't always
1242 This is different to dirstate.invalidate() that it doesn't always
1247 rereads the dirstate. Use dirstate.invalidate() if you want to
1243 rereads the dirstate. Use dirstate.invalidate() if you want to
1248 explicitly read the dirstate again (i.e. restoring it to a previous
1244 explicitly read the dirstate again (i.e. restoring it to a previous
1249 known good state).'''
1245 known good state).'''
1250 if hasunfilteredcache(self, 'dirstate'):
1246 if hasunfilteredcache(self, 'dirstate'):
1251 for k in self.dirstate._filecache:
1247 for k in self.dirstate._filecache:
1252 try:
1248 try:
1253 delattr(self.dirstate, k)
1249 delattr(self.dirstate, k)
1254 except AttributeError:
1250 except AttributeError:
1255 pass
1251 pass
1256 delattr(self.unfiltered(), 'dirstate')
1252 delattr(self.unfiltered(), 'dirstate')
1257
1253
1258 def invalidate(self, clearfilecache=False):
1254 def invalidate(self, clearfilecache=False):
1259 '''Invalidates both store and non-store parts other than dirstate
1255 '''Invalidates both store and non-store parts other than dirstate
1260
1256
1261 If a transaction is running, invalidation of store is omitted,
1257 If a transaction is running, invalidation of store is omitted,
1262 because discarding in-memory changes might cause inconsistency
1258 because discarding in-memory changes might cause inconsistency
1263 (e.g. incomplete fncache causes unintentional failure, but
1259 (e.g. incomplete fncache causes unintentional failure, but
1264 redundant one doesn't).
1260 redundant one doesn't).
1265 '''
1261 '''
1266 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1262 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1267 for k in self._filecache.keys():
1263 for k in self._filecache.keys():
1268 # dirstate is invalidated separately in invalidatedirstate()
1264 # dirstate is invalidated separately in invalidatedirstate()
1269 if k == 'dirstate':
1265 if k == 'dirstate':
1270 continue
1266 continue
1271
1267
1272 if clearfilecache:
1268 if clearfilecache:
1273 del self._filecache[k]
1269 del self._filecache[k]
1274 try:
1270 try:
1275 delattr(unfiltered, k)
1271 delattr(unfiltered, k)
1276 except AttributeError:
1272 except AttributeError:
1277 pass
1273 pass
1278 self.invalidatecaches()
1274 self.invalidatecaches()
1279 if not self.currenttransaction():
1275 if not self.currenttransaction():
1280 # TODO: Changing contents of store outside transaction
1276 # TODO: Changing contents of store outside transaction
1281 # causes inconsistency. We should make in-memory store
1277 # causes inconsistency. We should make in-memory store
1282 # changes detectable, and abort if changed.
1278 # changes detectable, and abort if changed.
1283 self.store.invalidatecaches()
1279 self.store.invalidatecaches()
1284
1280
1285 def invalidateall(self):
1281 def invalidateall(self):
1286 '''Fully invalidates both store and non-store parts, causing the
1282 '''Fully invalidates both store and non-store parts, causing the
1287 subsequent operation to reread any outside changes.'''
1283 subsequent operation to reread any outside changes.'''
1288 # extension should hook this to invalidate its caches
1284 # extension should hook this to invalidate its caches
1289 self.invalidate()
1285 self.invalidate()
1290 self.invalidatedirstate()
1286 self.invalidatedirstate()
1291
1287
1292 @unfilteredmethod
1288 @unfilteredmethod
1293 def _refreshfilecachestats(self, tr):
1289 def _refreshfilecachestats(self, tr):
1294 """Reload stats of cached files so that they are flagged as valid"""
1290 """Reload stats of cached files so that they are flagged as valid"""
1295 for k, ce in self._filecache.items():
1291 for k, ce in self._filecache.items():
1296 if k == 'dirstate' or k not in self.__dict__:
1292 if k == 'dirstate' or k not in self.__dict__:
1297 continue
1293 continue
1298 ce.refresh()
1294 ce.refresh()
1299
1295
1300 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1296 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1301 inheritchecker=None, parentenvvar=None):
1297 inheritchecker=None, parentenvvar=None):
1302 parentlock = None
1298 parentlock = None
1303 # the contents of parentenvvar are used by the underlying lock to
1299 # the contents of parentenvvar are used by the underlying lock to
1304 # determine whether it can be inherited
1300 # determine whether it can be inherited
1305 if parentenvvar is not None:
1301 if parentenvvar is not None:
1306 parentlock = os.environ.get(parentenvvar)
1302 parentlock = os.environ.get(parentenvvar)
1307 try:
1303 try:
1308 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1304 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1309 acquirefn=acquirefn, desc=desc,
1305 acquirefn=acquirefn, desc=desc,
1310 inheritchecker=inheritchecker,
1306 inheritchecker=inheritchecker,
1311 parentlock=parentlock)
1307 parentlock=parentlock)
1312 except error.LockHeld as inst:
1308 except error.LockHeld as inst:
1313 if not wait:
1309 if not wait:
1314 raise
1310 raise
1315 # show more details for new-style locks
1311 # show more details for new-style locks
1316 if ':' in inst.locker:
1312 if ':' in inst.locker:
1317 host, pid = inst.locker.split(":", 1)
1313 host, pid = inst.locker.split(":", 1)
1318 self.ui.warn(
1314 self.ui.warn(
1319 _("waiting for lock on %s held by process %r "
1315 _("waiting for lock on %s held by process %r "
1320 "on host %r\n") % (desc, pid, host))
1316 "on host %r\n") % (desc, pid, host))
1321 else:
1317 else:
1322 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1318 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1323 (desc, inst.locker))
1319 (desc, inst.locker))
1324 # default to 600 seconds timeout
1320 # default to 600 seconds timeout
1325 l = lockmod.lock(vfs, lockname,
1321 l = lockmod.lock(vfs, lockname,
1326 int(self.ui.config("ui", "timeout", "600")),
1322 int(self.ui.config("ui", "timeout", "600")),
1327 releasefn=releasefn, acquirefn=acquirefn,
1323 releasefn=releasefn, acquirefn=acquirefn,
1328 desc=desc)
1324 desc=desc)
1329 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1325 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1330 return l
1326 return l
1331
1327
1332 def _afterlock(self, callback):
1328 def _afterlock(self, callback):
1333 """add a callback to be run when the repository is fully unlocked
1329 """add a callback to be run when the repository is fully unlocked
1334
1330
1335 The callback will be executed when the outermost lock is released
1331 The callback will be executed when the outermost lock is released
1336 (with wlock being higher level than 'lock')."""
1332 (with wlock being higher level than 'lock')."""
1337 for ref in (self._wlockref, self._lockref):
1333 for ref in (self._wlockref, self._lockref):
1338 l = ref and ref()
1334 l = ref and ref()
1339 if l and l.held:
1335 if l and l.held:
1340 l.postrelease.append(callback)
1336 l.postrelease.append(callback)
1341 break
1337 break
1342 else: # no lock have been found.
1338 else: # no lock have been found.
1343 callback()
1339 callback()
1344
1340
1345 def lock(self, wait=True):
1341 def lock(self, wait=True):
1346 '''Lock the repository store (.hg/store) and return a weak reference
1342 '''Lock the repository store (.hg/store) and return a weak reference
1347 to the lock. Use this before modifying the store (e.g. committing or
1343 to the lock. Use this before modifying the store (e.g. committing or
1348 stripping). If you are opening a transaction, get a lock as well.)
1344 stripping). If you are opening a transaction, get a lock as well.)
1349
1345
1350 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1346 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1351 'wlock' first to avoid a dead-lock hazard.'''
1347 'wlock' first to avoid a dead-lock hazard.'''
1352 l = self._currentlock(self._lockref)
1348 l = self._currentlock(self._lockref)
1353 if l is not None:
1349 if l is not None:
1354 l.lock()
1350 l.lock()
1355 return l
1351 return l
1356
1352
1357 l = self._lock(self.svfs, "lock", wait, None,
1353 l = self._lock(self.svfs, "lock", wait, None,
1358 self.invalidate, _('repository %s') % self.origroot)
1354 self.invalidate, _('repository %s') % self.origroot)
1359 self._lockref = weakref.ref(l)
1355 self._lockref = weakref.ref(l)
1360 return l
1356 return l
1361
1357
1362 def _wlockchecktransaction(self):
1358 def _wlockchecktransaction(self):
1363 if self.currenttransaction() is not None:
1359 if self.currenttransaction() is not None:
1364 raise error.LockInheritanceContractViolation(
1360 raise error.LockInheritanceContractViolation(
1365 'wlock cannot be inherited in the middle of a transaction')
1361 'wlock cannot be inherited in the middle of a transaction')
1366
1362
1367 def wlock(self, wait=True):
1363 def wlock(self, wait=True):
1368 '''Lock the non-store parts of the repository (everything under
1364 '''Lock the non-store parts of the repository (everything under
1369 .hg except .hg/store) and return a weak reference to the lock.
1365 .hg except .hg/store) and return a weak reference to the lock.
1370
1366
1371 Use this before modifying files in .hg.
1367 Use this before modifying files in .hg.
1372
1368
1373 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1369 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1374 'wlock' first to avoid a dead-lock hazard.'''
1370 'wlock' first to avoid a dead-lock hazard.'''
1375 l = self._wlockref and self._wlockref()
1371 l = self._wlockref and self._wlockref()
1376 if l is not None and l.held:
1372 if l is not None and l.held:
1377 l.lock()
1373 l.lock()
1378 return l
1374 return l
1379
1375
1380 # We do not need to check for non-waiting lock acquisition. Such
1376 # We do not need to check for non-waiting lock acquisition. Such
1381 # acquisition would not cause dead-lock as they would just fail.
1377 # acquisition would not cause dead-lock as they would just fail.
1382 if wait and (self.ui.configbool('devel', 'all-warnings')
1378 if wait and (self.ui.configbool('devel', 'all-warnings')
1383 or self.ui.configbool('devel', 'check-locks')):
1379 or self.ui.configbool('devel', 'check-locks')):
1384 if self._currentlock(self._lockref) is not None:
1380 if self._currentlock(self._lockref) is not None:
1385 self.ui.develwarn('"wlock" acquired after "lock"')
1381 self.ui.develwarn('"wlock" acquired after "lock"')
1386
1382
1387 def unlock():
1383 def unlock():
1388 if self.dirstate.pendingparentchange():
1384 if self.dirstate.pendingparentchange():
1389 self.dirstate.invalidate()
1385 self.dirstate.invalidate()
1390 else:
1386 else:
1391 self.dirstate.write(None)
1387 self.dirstate.write(None)
1392
1388
1393 self._filecache['dirstate'].refresh()
1389 self._filecache['dirstate'].refresh()
1394
1390
1395 l = self._lock(self.vfs, "wlock", wait, unlock,
1391 l = self._lock(self.vfs, "wlock", wait, unlock,
1396 self.invalidatedirstate, _('working directory of %s') %
1392 self.invalidatedirstate, _('working directory of %s') %
1397 self.origroot,
1393 self.origroot,
1398 inheritchecker=self._wlockchecktransaction,
1394 inheritchecker=self._wlockchecktransaction,
1399 parentenvvar='HG_WLOCK_LOCKER')
1395 parentenvvar='HG_WLOCK_LOCKER')
1400 self._wlockref = weakref.ref(l)
1396 self._wlockref = weakref.ref(l)
1401 return l
1397 return l
1402
1398
1403 def _currentlock(self, lockref):
1399 def _currentlock(self, lockref):
1404 """Returns the lock if it's held, or None if it's not."""
1400 """Returns the lock if it's held, or None if it's not."""
1405 if lockref is None:
1401 if lockref is None:
1406 return None
1402 return None
1407 l = lockref()
1403 l = lockref()
1408 if l is None or not l.held:
1404 if l is None or not l.held:
1409 return None
1405 return None
1410 return l
1406 return l
1411
1407
1412 def currentwlock(self):
1408 def currentwlock(self):
1413 """Returns the wlock if it's held, or None if it's not."""
1409 """Returns the wlock if it's held, or None if it's not."""
1414 return self._currentlock(self._wlockref)
1410 return self._currentlock(self._wlockref)
1415
1411
1416 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1412 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1417 """
1413 """
1418 commit an individual file as part of a larger transaction
1414 commit an individual file as part of a larger transaction
1419 """
1415 """
1420
1416
1421 fname = fctx.path()
1417 fname = fctx.path()
1422 fparent1 = manifest1.get(fname, nullid)
1418 fparent1 = manifest1.get(fname, nullid)
1423 fparent2 = manifest2.get(fname, nullid)
1419 fparent2 = manifest2.get(fname, nullid)
1424 if isinstance(fctx, context.filectx):
1420 if isinstance(fctx, context.filectx):
1425 node = fctx.filenode()
1421 node = fctx.filenode()
1426 if node in [fparent1, fparent2]:
1422 if node in [fparent1, fparent2]:
1427 self.ui.debug('reusing %s filelog entry\n' % fname)
1423 self.ui.debug('reusing %s filelog entry\n' % fname)
1428 if manifest1.flags(fname) != fctx.flags():
1424 if manifest1.flags(fname) != fctx.flags():
1429 changelist.append(fname)
1425 changelist.append(fname)
1430 return node
1426 return node
1431
1427
1432 flog = self.file(fname)
1428 flog = self.file(fname)
1433 meta = {}
1429 meta = {}
1434 copy = fctx.renamed()
1430 copy = fctx.renamed()
1435 if copy and copy[0] != fname:
1431 if copy and copy[0] != fname:
1436 # Mark the new revision of this file as a copy of another
1432 # Mark the new revision of this file as a copy of another
1437 # file. This copy data will effectively act as a parent
1433 # file. This copy data will effectively act as a parent
1438 # of this new revision. If this is a merge, the first
1434 # of this new revision. If this is a merge, the first
1439 # parent will be the nullid (meaning "look up the copy data")
1435 # parent will be the nullid (meaning "look up the copy data")
1440 # and the second one will be the other parent. For example:
1436 # and the second one will be the other parent. For example:
1441 #
1437 #
1442 # 0 --- 1 --- 3 rev1 changes file foo
1438 # 0 --- 1 --- 3 rev1 changes file foo
1443 # \ / rev2 renames foo to bar and changes it
1439 # \ / rev2 renames foo to bar and changes it
1444 # \- 2 -/ rev3 should have bar with all changes and
1440 # \- 2 -/ rev3 should have bar with all changes and
1445 # should record that bar descends from
1441 # should record that bar descends from
1446 # bar in rev2 and foo in rev1
1442 # bar in rev2 and foo in rev1
1447 #
1443 #
1448 # this allows this merge to succeed:
1444 # this allows this merge to succeed:
1449 #
1445 #
1450 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1446 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1451 # \ / merging rev3 and rev4 should use bar@rev2
1447 # \ / merging rev3 and rev4 should use bar@rev2
1452 # \- 2 --- 4 as the merge base
1448 # \- 2 --- 4 as the merge base
1453 #
1449 #
1454
1450
1455 cfname = copy[0]
1451 cfname = copy[0]
1456 crev = manifest1.get(cfname)
1452 crev = manifest1.get(cfname)
1457 newfparent = fparent2
1453 newfparent = fparent2
1458
1454
1459 if manifest2: # branch merge
1455 if manifest2: # branch merge
1460 if fparent2 == nullid or crev is None: # copied on remote side
1456 if fparent2 == nullid or crev is None: # copied on remote side
1461 if cfname in manifest2:
1457 if cfname in manifest2:
1462 crev = manifest2[cfname]
1458 crev = manifest2[cfname]
1463 newfparent = fparent1
1459 newfparent = fparent1
1464
1460
1465 # Here, we used to search backwards through history to try to find
1461 # Here, we used to search backwards through history to try to find
1466 # where the file copy came from if the source of a copy was not in
1462 # where the file copy came from if the source of a copy was not in
1467 # the parent directory. However, this doesn't actually make sense to
1463 # the parent directory. However, this doesn't actually make sense to
1468 # do (what does a copy from something not in your working copy even
1464 # do (what does a copy from something not in your working copy even
1469 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1465 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1470 # the user that copy information was dropped, so if they didn't
1466 # the user that copy information was dropped, so if they didn't
1471 # expect this outcome it can be fixed, but this is the correct
1467 # expect this outcome it can be fixed, but this is the correct
1472 # behavior in this circumstance.
1468 # behavior in this circumstance.
1473
1469
1474 if crev:
1470 if crev:
1475 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1471 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1476 meta["copy"] = cfname
1472 meta["copy"] = cfname
1477 meta["copyrev"] = hex(crev)
1473 meta["copyrev"] = hex(crev)
1478 fparent1, fparent2 = nullid, newfparent
1474 fparent1, fparent2 = nullid, newfparent
1479 else:
1475 else:
1480 self.ui.warn(_("warning: can't find ancestor for '%s' "
1476 self.ui.warn(_("warning: can't find ancestor for '%s' "
1481 "copied from '%s'!\n") % (fname, cfname))
1477 "copied from '%s'!\n") % (fname, cfname))
1482
1478
1483 elif fparent1 == nullid:
1479 elif fparent1 == nullid:
1484 fparent1, fparent2 = fparent2, nullid
1480 fparent1, fparent2 = fparent2, nullid
1485 elif fparent2 != nullid:
1481 elif fparent2 != nullid:
1486 # is one parent an ancestor of the other?
1482 # is one parent an ancestor of the other?
1487 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1483 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1488 if fparent1 in fparentancestors:
1484 if fparent1 in fparentancestors:
1489 fparent1, fparent2 = fparent2, nullid
1485 fparent1, fparent2 = fparent2, nullid
1490 elif fparent2 in fparentancestors:
1486 elif fparent2 in fparentancestors:
1491 fparent2 = nullid
1487 fparent2 = nullid
1492
1488
1493 # is the file changed?
1489 # is the file changed?
1494 text = fctx.data()
1490 text = fctx.data()
1495 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1491 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1496 changelist.append(fname)
1492 changelist.append(fname)
1497 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1493 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1498 # are just the flags changed during merge?
1494 # are just the flags changed during merge?
1499 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1495 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1500 changelist.append(fname)
1496 changelist.append(fname)
1501
1497
1502 return fparent1
1498 return fparent1
1503
1499
1504 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1500 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1505 """check for commit arguments that aren't committable"""
1501 """check for commit arguments that aren't committable"""
1506 if match.isexact() or match.prefix():
1502 if match.isexact() or match.prefix():
1507 matched = set(status.modified + status.added + status.removed)
1503 matched = set(status.modified + status.added + status.removed)
1508
1504
1509 for f in match.files():
1505 for f in match.files():
1510 f = self.dirstate.normalize(f)
1506 f = self.dirstate.normalize(f)
1511 if f == '.' or f in matched or f in wctx.substate:
1507 if f == '.' or f in matched or f in wctx.substate:
1512 continue
1508 continue
1513 if f in status.deleted:
1509 if f in status.deleted:
1514 fail(f, _('file not found!'))
1510 fail(f, _('file not found!'))
1515 if f in vdirs: # visited directory
1511 if f in vdirs: # visited directory
1516 d = f + '/'
1512 d = f + '/'
1517 for mf in matched:
1513 for mf in matched:
1518 if mf.startswith(d):
1514 if mf.startswith(d):
1519 break
1515 break
1520 else:
1516 else:
1521 fail(f, _("no match under directory!"))
1517 fail(f, _("no match under directory!"))
1522 elif f not in self.dirstate:
1518 elif f not in self.dirstate:
1523 fail(f, _("file not tracked!"))
1519 fail(f, _("file not tracked!"))
1524
1520
1525 @unfilteredmethod
1521 @unfilteredmethod
1526 def commit(self, text="", user=None, date=None, match=None, force=False,
1522 def commit(self, text="", user=None, date=None, match=None, force=False,
1527 editor=False, extra=None):
1523 editor=False, extra=None):
1528 """Add a new revision to current repository.
1524 """Add a new revision to current repository.
1529
1525
1530 Revision information is gathered from the working directory,
1526 Revision information is gathered from the working directory,
1531 match can be used to filter the committed files. If editor is
1527 match can be used to filter the committed files. If editor is
1532 supplied, it is called to get a commit message.
1528 supplied, it is called to get a commit message.
1533 """
1529 """
1534 if extra is None:
1530 if extra is None:
1535 extra = {}
1531 extra = {}
1536
1532
1537 def fail(f, msg):
1533 def fail(f, msg):
1538 raise error.Abort('%s: %s' % (f, msg))
1534 raise error.Abort('%s: %s' % (f, msg))
1539
1535
1540 if not match:
1536 if not match:
1541 match = matchmod.always(self.root, '')
1537 match = matchmod.always(self.root, '')
1542
1538
1543 if not force:
1539 if not force:
1544 vdirs = []
1540 vdirs = []
1545 match.explicitdir = vdirs.append
1541 match.explicitdir = vdirs.append
1546 match.bad = fail
1542 match.bad = fail
1547
1543
1548 wlock = lock = tr = None
1544 wlock = lock = tr = None
1549 try:
1545 try:
1550 wlock = self.wlock()
1546 wlock = self.wlock()
1551 lock = self.lock() # for recent changelog (see issue4368)
1547 lock = self.lock() # for recent changelog (see issue4368)
1552
1548
1553 wctx = self[None]
1549 wctx = self[None]
1554 merge = len(wctx.parents()) > 1
1550 merge = len(wctx.parents()) > 1
1555
1551
1556 if not force and merge and match.ispartial():
1552 if not force and merge and match.ispartial():
1557 raise error.Abort(_('cannot partially commit a merge '
1553 raise error.Abort(_('cannot partially commit a merge '
1558 '(do not specify files or patterns)'))
1554 '(do not specify files or patterns)'))
1559
1555
1560 status = self.status(match=match, clean=force)
1556 status = self.status(match=match, clean=force)
1561 if force:
1557 if force:
1562 status.modified.extend(status.clean) # mq may commit clean files
1558 status.modified.extend(status.clean) # mq may commit clean files
1563
1559
1564 # check subrepos
1560 # check subrepos
1565 subs = []
1561 subs = []
1566 commitsubs = set()
1562 commitsubs = set()
1567 newstate = wctx.substate.copy()
1563 newstate = wctx.substate.copy()
1568 # only manage subrepos and .hgsubstate if .hgsub is present
1564 # only manage subrepos and .hgsubstate if .hgsub is present
1569 if '.hgsub' in wctx:
1565 if '.hgsub' in wctx:
1570 # we'll decide whether to track this ourselves, thanks
1566 # we'll decide whether to track this ourselves, thanks
1571 for c in status.modified, status.added, status.removed:
1567 for c in status.modified, status.added, status.removed:
1572 if '.hgsubstate' in c:
1568 if '.hgsubstate' in c:
1573 c.remove('.hgsubstate')
1569 c.remove('.hgsubstate')
1574
1570
1575 # compare current state to last committed state
1571 # compare current state to last committed state
1576 # build new substate based on last committed state
1572 # build new substate based on last committed state
1577 oldstate = wctx.p1().substate
1573 oldstate = wctx.p1().substate
1578 for s in sorted(newstate.keys()):
1574 for s in sorted(newstate.keys()):
1579 if not match(s):
1575 if not match(s):
1580 # ignore working copy, use old state if present
1576 # ignore working copy, use old state if present
1581 if s in oldstate:
1577 if s in oldstate:
1582 newstate[s] = oldstate[s]
1578 newstate[s] = oldstate[s]
1583 continue
1579 continue
1584 if not force:
1580 if not force:
1585 raise error.Abort(
1581 raise error.Abort(
1586 _("commit with new subrepo %s excluded") % s)
1582 _("commit with new subrepo %s excluded") % s)
1587 dirtyreason = wctx.sub(s).dirtyreason(True)
1583 dirtyreason = wctx.sub(s).dirtyreason(True)
1588 if dirtyreason:
1584 if dirtyreason:
1589 if not self.ui.configbool('ui', 'commitsubrepos'):
1585 if not self.ui.configbool('ui', 'commitsubrepos'):
1590 raise error.Abort(dirtyreason,
1586 raise error.Abort(dirtyreason,
1591 hint=_("use --subrepos for recursive commit"))
1587 hint=_("use --subrepos for recursive commit"))
1592 subs.append(s)
1588 subs.append(s)
1593 commitsubs.add(s)
1589 commitsubs.add(s)
1594 else:
1590 else:
1595 bs = wctx.sub(s).basestate()
1591 bs = wctx.sub(s).basestate()
1596 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1592 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1597 if oldstate.get(s, (None, None, None))[1] != bs:
1593 if oldstate.get(s, (None, None, None))[1] != bs:
1598 subs.append(s)
1594 subs.append(s)
1599
1595
1600 # check for removed subrepos
1596 # check for removed subrepos
1601 for p in wctx.parents():
1597 for p in wctx.parents():
1602 r = [s for s in p.substate if s not in newstate]
1598 r = [s for s in p.substate if s not in newstate]
1603 subs += [s for s in r if match(s)]
1599 subs += [s for s in r if match(s)]
1604 if subs:
1600 if subs:
1605 if (not match('.hgsub') and
1601 if (not match('.hgsub') and
1606 '.hgsub' in (wctx.modified() + wctx.added())):
1602 '.hgsub' in (wctx.modified() + wctx.added())):
1607 raise error.Abort(
1603 raise error.Abort(
1608 _("can't commit subrepos without .hgsub"))
1604 _("can't commit subrepos without .hgsub"))
1609 status.modified.insert(0, '.hgsubstate')
1605 status.modified.insert(0, '.hgsubstate')
1610
1606
1611 elif '.hgsub' in status.removed:
1607 elif '.hgsub' in status.removed:
1612 # clean up .hgsubstate when .hgsub is removed
1608 # clean up .hgsubstate when .hgsub is removed
1613 if ('.hgsubstate' in wctx and
1609 if ('.hgsubstate' in wctx and
1614 '.hgsubstate' not in (status.modified + status.added +
1610 '.hgsubstate' not in (status.modified + status.added +
1615 status.removed)):
1611 status.removed)):
1616 status.removed.insert(0, '.hgsubstate')
1612 status.removed.insert(0, '.hgsubstate')
1617
1613
1618 # make sure all explicit patterns are matched
1614 # make sure all explicit patterns are matched
1619 if not force:
1615 if not force:
1620 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1616 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1621
1617
1622 cctx = context.workingcommitctx(self, status,
1618 cctx = context.workingcommitctx(self, status,
1623 text, user, date, extra)
1619 text, user, date, extra)
1624
1620
1625 # internal config: ui.allowemptycommit
1621 # internal config: ui.allowemptycommit
1626 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1622 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1627 or extra.get('close') or merge or cctx.files()
1623 or extra.get('close') or merge or cctx.files()
1628 or self.ui.configbool('ui', 'allowemptycommit'))
1624 or self.ui.configbool('ui', 'allowemptycommit'))
1629 if not allowemptycommit:
1625 if not allowemptycommit:
1630 return None
1626 return None
1631
1627
1632 if merge and cctx.deleted():
1628 if merge and cctx.deleted():
1633 raise error.Abort(_("cannot commit merge with missing files"))
1629 raise error.Abort(_("cannot commit merge with missing files"))
1634
1630
1635 ms = mergemod.mergestate.read(self)
1631 ms = mergemod.mergestate.read(self)
1636 cmdutil.checkunresolved(ms)
1632 cmdutil.checkunresolved(ms)
1637
1633
1638 if editor:
1634 if editor:
1639 cctx._text = editor(self, cctx, subs)
1635 cctx._text = editor(self, cctx, subs)
1640 edited = (text != cctx._text)
1636 edited = (text != cctx._text)
1641
1637
1642 # Save commit message in case this transaction gets rolled back
1638 # Save commit message in case this transaction gets rolled back
1643 # (e.g. by a pretxncommit hook). Leave the content alone on
1639 # (e.g. by a pretxncommit hook). Leave the content alone on
1644 # the assumption that the user will use the same editor again.
1640 # the assumption that the user will use the same editor again.
1645 msgfn = self.savecommitmessage(cctx._text)
1641 msgfn = self.savecommitmessage(cctx._text)
1646
1642
1647 # commit subs and write new state
1643 # commit subs and write new state
1648 if subs:
1644 if subs:
1649 for s in sorted(commitsubs):
1645 for s in sorted(commitsubs):
1650 sub = wctx.sub(s)
1646 sub = wctx.sub(s)
1651 self.ui.status(_('committing subrepository %s\n') %
1647 self.ui.status(_('committing subrepository %s\n') %
1652 subrepo.subrelpath(sub))
1648 subrepo.subrelpath(sub))
1653 sr = sub.commit(cctx._text, user, date)
1649 sr = sub.commit(cctx._text, user, date)
1654 newstate[s] = (newstate[s][0], sr)
1650 newstate[s] = (newstate[s][0], sr)
1655 subrepo.writestate(self, newstate)
1651 subrepo.writestate(self, newstate)
1656
1652
1657 p1, p2 = self.dirstate.parents()
1653 p1, p2 = self.dirstate.parents()
1658 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1654 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1659 try:
1655 try:
1660 self.hook("precommit", throw=True, parent1=hookp1,
1656 self.hook("precommit", throw=True, parent1=hookp1,
1661 parent2=hookp2)
1657 parent2=hookp2)
1662 tr = self.transaction('commit')
1658 tr = self.transaction('commit')
1663 ret = self.commitctx(cctx, True)
1659 ret = self.commitctx(cctx, True)
1664 except: # re-raises
1660 except: # re-raises
1665 if edited:
1661 if edited:
1666 self.ui.write(
1662 self.ui.write(
1667 _('note: commit message saved in %s\n') % msgfn)
1663 _('note: commit message saved in %s\n') % msgfn)
1668 raise
1664 raise
1669 # update bookmarks, dirstate and mergestate
1665 # update bookmarks, dirstate and mergestate
1670 bookmarks.update(self, [p1, p2], ret)
1666 bookmarks.update(self, [p1, p2], ret)
1671 cctx.markcommitted(ret)
1667 cctx.markcommitted(ret)
1672 ms.reset()
1668 ms.reset()
1673 tr.close()
1669 tr.close()
1674
1670
1675 finally:
1671 finally:
1676 lockmod.release(tr, lock, wlock)
1672 lockmod.release(tr, lock, wlock)
1677
1673
1678 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1674 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1679 # hack for command that use a temporary commit (eg: histedit)
1675 # hack for command that use a temporary commit (eg: histedit)
1680 # temporary commit got stripped before hook release
1676 # temporary commit got stripped before hook release
1681 if self.changelog.hasnode(ret):
1677 if self.changelog.hasnode(ret):
1682 self.hook("commit", node=node, parent1=parent1,
1678 self.hook("commit", node=node, parent1=parent1,
1683 parent2=parent2)
1679 parent2=parent2)
1684 self._afterlock(commithook)
1680 self._afterlock(commithook)
1685 return ret
1681 return ret
1686
1682
1687 @unfilteredmethod
1683 @unfilteredmethod
1688 def commitctx(self, ctx, error=False):
1684 def commitctx(self, ctx, error=False):
1689 """Add a new revision to current repository.
1685 """Add a new revision to current repository.
1690 Revision information is passed via the context argument.
1686 Revision information is passed via the context argument.
1691 """
1687 """
1692
1688
1693 tr = None
1689 tr = None
1694 p1, p2 = ctx.p1(), ctx.p2()
1690 p1, p2 = ctx.p1(), ctx.p2()
1695 user = ctx.user()
1691 user = ctx.user()
1696
1692
1697 lock = self.lock()
1693 lock = self.lock()
1698 try:
1694 try:
1699 tr = self.transaction("commit")
1695 tr = self.transaction("commit")
1700 trp = weakref.proxy(tr)
1696 trp = weakref.proxy(tr)
1701
1697
1702 if ctx.files():
1698 if ctx.files():
1703 m1ctx = p1.manifestctx()
1699 m1ctx = p1.manifestctx()
1704 m2ctx = p2.manifestctx()
1700 m2ctx = p2.manifestctx()
1705 mctx = m1ctx.copy()
1701 mctx = m1ctx.copy()
1706
1702
1707 m = mctx.read()
1703 m = mctx.read()
1708 m1 = m1ctx.read()
1704 m1 = m1ctx.read()
1709 m2 = m2ctx.read()
1705 m2 = m2ctx.read()
1710
1706
1711 # check in files
1707 # check in files
1712 added = []
1708 added = []
1713 changed = []
1709 changed = []
1714 removed = list(ctx.removed())
1710 removed = list(ctx.removed())
1715 linkrev = len(self)
1711 linkrev = len(self)
1716 self.ui.note(_("committing files:\n"))
1712 self.ui.note(_("committing files:\n"))
1717 for f in sorted(ctx.modified() + ctx.added()):
1713 for f in sorted(ctx.modified() + ctx.added()):
1718 self.ui.note(f + "\n")
1714 self.ui.note(f + "\n")
1719 try:
1715 try:
1720 fctx = ctx[f]
1716 fctx = ctx[f]
1721 if fctx is None:
1717 if fctx is None:
1722 removed.append(f)
1718 removed.append(f)
1723 else:
1719 else:
1724 added.append(f)
1720 added.append(f)
1725 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1721 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1726 trp, changed)
1722 trp, changed)
1727 m.setflag(f, fctx.flags())
1723 m.setflag(f, fctx.flags())
1728 except OSError as inst:
1724 except OSError as inst:
1729 self.ui.warn(_("trouble committing %s!\n") % f)
1725 self.ui.warn(_("trouble committing %s!\n") % f)
1730 raise
1726 raise
1731 except IOError as inst:
1727 except IOError as inst:
1732 errcode = getattr(inst, 'errno', errno.ENOENT)
1728 errcode = getattr(inst, 'errno', errno.ENOENT)
1733 if error or errcode and errcode != errno.ENOENT:
1729 if error or errcode and errcode != errno.ENOENT:
1734 self.ui.warn(_("trouble committing %s!\n") % f)
1730 self.ui.warn(_("trouble committing %s!\n") % f)
1735 raise
1731 raise
1736
1732
1737 # update manifest
1733 # update manifest
1738 self.ui.note(_("committing manifest\n"))
1734 self.ui.note(_("committing manifest\n"))
1739 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1735 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1740 drop = [f for f in removed if f in m]
1736 drop = [f for f in removed if f in m]
1741 for f in drop:
1737 for f in drop:
1742 del m[f]
1738 del m[f]
1743 mn = mctx.write(trp, linkrev,
1739 mn = mctx.write(trp, linkrev,
1744 p1.manifestnode(), p2.manifestnode(),
1740 p1.manifestnode(), p2.manifestnode(),
1745 added, drop)
1741 added, drop)
1746 files = changed + removed
1742 files = changed + removed
1747 else:
1743 else:
1748 mn = p1.manifestnode()
1744 mn = p1.manifestnode()
1749 files = []
1745 files = []
1750
1746
1751 # update changelog
1747 # update changelog
1752 self.ui.note(_("committing changelog\n"))
1748 self.ui.note(_("committing changelog\n"))
1753 self.changelog.delayupdate(tr)
1749 self.changelog.delayupdate(tr)
1754 n = self.changelog.add(mn, files, ctx.description(),
1750 n = self.changelog.add(mn, files, ctx.description(),
1755 trp, p1.node(), p2.node(),
1751 trp, p1.node(), p2.node(),
1756 user, ctx.date(), ctx.extra().copy())
1752 user, ctx.date(), ctx.extra().copy())
1757 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1753 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1758 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1754 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1759 parent2=xp2)
1755 parent2=xp2)
1760 # set the new commit is proper phase
1756 # set the new commit is proper phase
1761 targetphase = subrepo.newcommitphase(self.ui, ctx)
1757 targetphase = subrepo.newcommitphase(self.ui, ctx)
1762 if targetphase:
1758 if targetphase:
1763 # retract boundary do not alter parent changeset.
1759 # retract boundary do not alter parent changeset.
1764 # if a parent have higher the resulting phase will
1760 # if a parent have higher the resulting phase will
1765 # be compliant anyway
1761 # be compliant anyway
1766 #
1762 #
1767 # if minimal phase was 0 we don't need to retract anything
1763 # if minimal phase was 0 we don't need to retract anything
1768 phases.retractboundary(self, tr, targetphase, [n])
1764 phases.retractboundary(self, tr, targetphase, [n])
1769 tr.close()
1765 tr.close()
1770 branchmap.updatecache(self.filtered('served'))
1766 branchmap.updatecache(self.filtered('served'))
1771 return n
1767 return n
1772 finally:
1768 finally:
1773 if tr:
1769 if tr:
1774 tr.release()
1770 tr.release()
1775 lock.release()
1771 lock.release()
1776
1772
1777 @unfilteredmethod
1773 @unfilteredmethod
1778 def destroying(self):
1774 def destroying(self):
1779 '''Inform the repository that nodes are about to be destroyed.
1775 '''Inform the repository that nodes are about to be destroyed.
1780 Intended for use by strip and rollback, so there's a common
1776 Intended for use by strip and rollback, so there's a common
1781 place for anything that has to be done before destroying history.
1777 place for anything that has to be done before destroying history.
1782
1778
1783 This is mostly useful for saving state that is in memory and waiting
1779 This is mostly useful for saving state that is in memory and waiting
1784 to be flushed when the current lock is released. Because a call to
1780 to be flushed when the current lock is released. Because a call to
1785 destroyed is imminent, the repo will be invalidated causing those
1781 destroyed is imminent, the repo will be invalidated causing those
1786 changes to stay in memory (waiting for the next unlock), or vanish
1782 changes to stay in memory (waiting for the next unlock), or vanish
1787 completely.
1783 completely.
1788 '''
1784 '''
1789 # When using the same lock to commit and strip, the phasecache is left
1785 # When using the same lock to commit and strip, the phasecache is left
1790 # dirty after committing. Then when we strip, the repo is invalidated,
1786 # dirty after committing. Then when we strip, the repo is invalidated,
1791 # causing those changes to disappear.
1787 # causing those changes to disappear.
1792 if '_phasecache' in vars(self):
1788 if '_phasecache' in vars(self):
1793 self._phasecache.write()
1789 self._phasecache.write()
1794
1790
1795 @unfilteredmethod
1791 @unfilteredmethod
1796 def destroyed(self):
1792 def destroyed(self):
1797 '''Inform the repository that nodes have been destroyed.
1793 '''Inform the repository that nodes have been destroyed.
1798 Intended for use by strip and rollback, so there's a common
1794 Intended for use by strip and rollback, so there's a common
1799 place for anything that has to be done after destroying history.
1795 place for anything that has to be done after destroying history.
1800 '''
1796 '''
1801 # When one tries to:
1797 # When one tries to:
1802 # 1) destroy nodes thus calling this method (e.g. strip)
1798 # 1) destroy nodes thus calling this method (e.g. strip)
1803 # 2) use phasecache somewhere (e.g. commit)
1799 # 2) use phasecache somewhere (e.g. commit)
1804 #
1800 #
1805 # then 2) will fail because the phasecache contains nodes that were
1801 # then 2) will fail because the phasecache contains nodes that were
1806 # removed. We can either remove phasecache from the filecache,
1802 # removed. We can either remove phasecache from the filecache,
1807 # causing it to reload next time it is accessed, or simply filter
1803 # causing it to reload next time it is accessed, or simply filter
1808 # the removed nodes now and write the updated cache.
1804 # the removed nodes now and write the updated cache.
1809 self._phasecache.filterunknown(self)
1805 self._phasecache.filterunknown(self)
1810 self._phasecache.write()
1806 self._phasecache.write()
1811
1807
1812 # update the 'served' branch cache to help read only server process
1808 # update the 'served' branch cache to help read only server process
1813 # Thanks to branchcache collaboration this is done from the nearest
1809 # Thanks to branchcache collaboration this is done from the nearest
1814 # filtered subset and it is expected to be fast.
1810 # filtered subset and it is expected to be fast.
1815 branchmap.updatecache(self.filtered('served'))
1811 branchmap.updatecache(self.filtered('served'))
1816
1812
1817 # Ensure the persistent tag cache is updated. Doing it now
1813 # Ensure the persistent tag cache is updated. Doing it now
1818 # means that the tag cache only has to worry about destroyed
1814 # means that the tag cache only has to worry about destroyed
1819 # heads immediately after a strip/rollback. That in turn
1815 # heads immediately after a strip/rollback. That in turn
1820 # guarantees that "cachetip == currenttip" (comparing both rev
1816 # guarantees that "cachetip == currenttip" (comparing both rev
1821 # and node) always means no nodes have been added or destroyed.
1817 # and node) always means no nodes have been added or destroyed.
1822
1818
1823 # XXX this is suboptimal when qrefresh'ing: we strip the current
1819 # XXX this is suboptimal when qrefresh'ing: we strip the current
1824 # head, refresh the tag cache, then immediately add a new head.
1820 # head, refresh the tag cache, then immediately add a new head.
1825 # But I think doing it this way is necessary for the "instant
1821 # But I think doing it this way is necessary for the "instant
1826 # tag cache retrieval" case to work.
1822 # tag cache retrieval" case to work.
1827 self.invalidate()
1823 self.invalidate()
1828
1824
1829 def walk(self, match, node=None):
1825 def walk(self, match, node=None):
1830 '''
1826 '''
1831 walk recursively through the directory tree or a given
1827 walk recursively through the directory tree or a given
1832 changeset, finding all files matched by the match
1828 changeset, finding all files matched by the match
1833 function
1829 function
1834 '''
1830 '''
1835 return self[node].walk(match)
1831 return self[node].walk(match)
1836
1832
1837 def status(self, node1='.', node2=None, match=None,
1833 def status(self, node1='.', node2=None, match=None,
1838 ignored=False, clean=False, unknown=False,
1834 ignored=False, clean=False, unknown=False,
1839 listsubrepos=False):
1835 listsubrepos=False):
1840 '''a convenience method that calls node1.status(node2)'''
1836 '''a convenience method that calls node1.status(node2)'''
1841 return self[node1].status(node2, match, ignored, clean, unknown,
1837 return self[node1].status(node2, match, ignored, clean, unknown,
1842 listsubrepos)
1838 listsubrepos)
1843
1839
1844 def heads(self, start=None):
1840 def heads(self, start=None):
1845 heads = self.changelog.heads(start)
1841 heads = self.changelog.heads(start)
1846 # sort the output in rev descending order
1842 # sort the output in rev descending order
1847 return sorted(heads, key=self.changelog.rev, reverse=True)
1843 return sorted(heads, key=self.changelog.rev, reverse=True)
1848
1844
1849 def branchheads(self, branch=None, start=None, closed=False):
1845 def branchheads(self, branch=None, start=None, closed=False):
1850 '''return a (possibly filtered) list of heads for the given branch
1846 '''return a (possibly filtered) list of heads for the given branch
1851
1847
1852 Heads are returned in topological order, from newest to oldest.
1848 Heads are returned in topological order, from newest to oldest.
1853 If branch is None, use the dirstate branch.
1849 If branch is None, use the dirstate branch.
1854 If start is not None, return only heads reachable from start.
1850 If start is not None, return only heads reachable from start.
1855 If closed is True, return heads that are marked as closed as well.
1851 If closed is True, return heads that are marked as closed as well.
1856 '''
1852 '''
1857 if branch is None:
1853 if branch is None:
1858 branch = self[None].branch()
1854 branch = self[None].branch()
1859 branches = self.branchmap()
1855 branches = self.branchmap()
1860 if branch not in branches:
1856 if branch not in branches:
1861 return []
1857 return []
1862 # the cache returns heads ordered lowest to highest
1858 # the cache returns heads ordered lowest to highest
1863 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1859 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1864 if start is not None:
1860 if start is not None:
1865 # filter out the heads that cannot be reached from startrev
1861 # filter out the heads that cannot be reached from startrev
1866 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1862 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1867 bheads = [h for h in bheads if h in fbheads]
1863 bheads = [h for h in bheads if h in fbheads]
1868 return bheads
1864 return bheads
1869
1865
1870 def branches(self, nodes):
1866 def branches(self, nodes):
1871 if not nodes:
1867 if not nodes:
1872 nodes = [self.changelog.tip()]
1868 nodes = [self.changelog.tip()]
1873 b = []
1869 b = []
1874 for n in nodes:
1870 for n in nodes:
1875 t = n
1871 t = n
1876 while True:
1872 while True:
1877 p = self.changelog.parents(n)
1873 p = self.changelog.parents(n)
1878 if p[1] != nullid or p[0] == nullid:
1874 if p[1] != nullid or p[0] == nullid:
1879 b.append((t, n, p[0], p[1]))
1875 b.append((t, n, p[0], p[1]))
1880 break
1876 break
1881 n = p[0]
1877 n = p[0]
1882 return b
1878 return b
1883
1879
1884 def between(self, pairs):
1880 def between(self, pairs):
1885 r = []
1881 r = []
1886
1882
1887 for top, bottom in pairs:
1883 for top, bottom in pairs:
1888 n, l, i = top, [], 0
1884 n, l, i = top, [], 0
1889 f = 1
1885 f = 1
1890
1886
1891 while n != bottom and n != nullid:
1887 while n != bottom and n != nullid:
1892 p = self.changelog.parents(n)[0]
1888 p = self.changelog.parents(n)[0]
1893 if i == f:
1889 if i == f:
1894 l.append(n)
1890 l.append(n)
1895 f = f * 2
1891 f = f * 2
1896 n = p
1892 n = p
1897 i += 1
1893 i += 1
1898
1894
1899 r.append(l)
1895 r.append(l)
1900
1896
1901 return r
1897 return r
1902
1898
1903 def checkpush(self, pushop):
1899 def checkpush(self, pushop):
1904 """Extensions can override this function if additional checks have
1900 """Extensions can override this function if additional checks have
1905 to be performed before pushing, or call it if they override push
1901 to be performed before pushing, or call it if they override push
1906 command.
1902 command.
1907 """
1903 """
1908 pass
1904 pass
1909
1905
1910 @unfilteredpropertycache
1906 @unfilteredpropertycache
1911 def prepushoutgoinghooks(self):
1907 def prepushoutgoinghooks(self):
1912 """Return util.hooks consists of a pushop with repo, remote, outgoing
1908 """Return util.hooks consists of a pushop with repo, remote, outgoing
1913 methods, which are called before pushing changesets.
1909 methods, which are called before pushing changesets.
1914 """
1910 """
1915 return util.hooks()
1911 return util.hooks()
1916
1912
1917 def pushkey(self, namespace, key, old, new):
1913 def pushkey(self, namespace, key, old, new):
1918 try:
1914 try:
1919 tr = self.currenttransaction()
1915 tr = self.currenttransaction()
1920 hookargs = {}
1916 hookargs = {}
1921 if tr is not None:
1917 if tr is not None:
1922 hookargs.update(tr.hookargs)
1918 hookargs.update(tr.hookargs)
1923 hookargs['namespace'] = namespace
1919 hookargs['namespace'] = namespace
1924 hookargs['key'] = key
1920 hookargs['key'] = key
1925 hookargs['old'] = old
1921 hookargs['old'] = old
1926 hookargs['new'] = new
1922 hookargs['new'] = new
1927 self.hook('prepushkey', throw=True, **hookargs)
1923 self.hook('prepushkey', throw=True, **hookargs)
1928 except error.HookAbort as exc:
1924 except error.HookAbort as exc:
1929 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1925 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1930 if exc.hint:
1926 if exc.hint:
1931 self.ui.write_err(_("(%s)\n") % exc.hint)
1927 self.ui.write_err(_("(%s)\n") % exc.hint)
1932 return False
1928 return False
1933 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1929 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1934 ret = pushkey.push(self, namespace, key, old, new)
1930 ret = pushkey.push(self, namespace, key, old, new)
1935 def runhook():
1931 def runhook():
1936 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1932 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1937 ret=ret)
1933 ret=ret)
1938 self._afterlock(runhook)
1934 self._afterlock(runhook)
1939 return ret
1935 return ret
1940
1936
1941 def listkeys(self, namespace):
1937 def listkeys(self, namespace):
1942 self.hook('prelistkeys', throw=True, namespace=namespace)
1938 self.hook('prelistkeys', throw=True, namespace=namespace)
1943 self.ui.debug('listing keys for "%s"\n' % namespace)
1939 self.ui.debug('listing keys for "%s"\n' % namespace)
1944 values = pushkey.list(self, namespace)
1940 values = pushkey.list(self, namespace)
1945 self.hook('listkeys', namespace=namespace, values=values)
1941 self.hook('listkeys', namespace=namespace, values=values)
1946 return values
1942 return values
1947
1943
1948 def debugwireargs(self, one, two, three=None, four=None, five=None):
1944 def debugwireargs(self, one, two, three=None, four=None, five=None):
1949 '''used to test argument passing over the wire'''
1945 '''used to test argument passing over the wire'''
1950 return "%s %s %s %s %s" % (one, two, three, four, five)
1946 return "%s %s %s %s %s" % (one, two, three, four, five)
1951
1947
1952 def savecommitmessage(self, text):
1948 def savecommitmessage(self, text):
1953 fp = self.vfs('last-message.txt', 'wb')
1949 fp = self.vfs('last-message.txt', 'wb')
1954 try:
1950 try:
1955 fp.write(text)
1951 fp.write(text)
1956 finally:
1952 finally:
1957 fp.close()
1953 fp.close()
1958 return self.pathto(fp.name[len(self.root) + 1:])
1954 return self.pathto(fp.name[len(self.root) + 1:])
1959
1955
1960 # used to avoid circular references so destructors work
1956 # used to avoid circular references so destructors work
1961 def aftertrans(files):
1957 def aftertrans(files):
1962 renamefiles = [tuple(t) for t in files]
1958 renamefiles = [tuple(t) for t in files]
1963 def a():
1959 def a():
1964 for vfs, src, dest in renamefiles:
1960 for vfs, src, dest in renamefiles:
1965 try:
1961 try:
1966 vfs.rename(src, dest)
1962 vfs.rename(src, dest)
1967 except OSError: # journal file does not yet exist
1963 except OSError: # journal file does not yet exist
1968 pass
1964 pass
1969 return a
1965 return a
1970
1966
1971 def undoname(fn):
1967 def undoname(fn):
1972 base, name = os.path.split(fn)
1968 base, name = os.path.split(fn)
1973 assert name.startswith('journal')
1969 assert name.startswith('journal')
1974 return os.path.join(base, name.replace('journal', 'undo', 1))
1970 return os.path.join(base, name.replace('journal', 'undo', 1))
1975
1971
1976 def instance(ui, path, create):
1972 def instance(ui, path, create):
1977 return localrepository(ui, util.urllocalpath(path), create)
1973 return localrepository(ui, util.urllocalpath(path), create)
1978
1974
1979 def islocal(path):
1975 def islocal(path):
1980 return True
1976 return True
1981
1977
1982 def newreporequirements(repo):
1978 def newreporequirements(repo):
1983 """Determine the set of requirements for a new local repository.
1979 """Determine the set of requirements for a new local repository.
1984
1980
1985 Extensions can wrap this function to specify custom requirements for
1981 Extensions can wrap this function to specify custom requirements for
1986 new repositories.
1982 new repositories.
1987 """
1983 """
1988 ui = repo.ui
1984 ui = repo.ui
1989 requirements = set(['revlogv1'])
1985 requirements = set(['revlogv1'])
1990 if ui.configbool('format', 'usestore', True):
1986 if ui.configbool('format', 'usestore', True):
1991 requirements.add('store')
1987 requirements.add('store')
1992 if ui.configbool('format', 'usefncache', True):
1988 if ui.configbool('format', 'usefncache', True):
1993 requirements.add('fncache')
1989 requirements.add('fncache')
1994 if ui.configbool('format', 'dotencode', True):
1990 if ui.configbool('format', 'dotencode', True):
1995 requirements.add('dotencode')
1991 requirements.add('dotencode')
1996
1992
1997 if scmutil.gdinitconfig(ui):
1993 if scmutil.gdinitconfig(ui):
1998 requirements.add('generaldelta')
1994 requirements.add('generaldelta')
1999 if ui.configbool('experimental', 'treemanifest', False):
1995 if ui.configbool('experimental', 'treemanifest', False):
2000 requirements.add('treemanifest')
1996 requirements.add('treemanifest')
2001 if ui.configbool('experimental', 'manifestv2', False):
1997 if ui.configbool('experimental', 'manifestv2', False):
2002 requirements.add('manifestv2')
1998 requirements.add('manifestv2')
2003
1999
2004 return requirements
2000 return requirements
General Comments 0
You need to be logged in to leave comments. Login now