##// END OF EJS Templates
localrepository: remove None as default value of path argument in __init__()...
Pulkit Goyal -
r30571:91db2aa0 default
parent child Browse files
Show More
@@ -1,2005 +1,2005 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 repoview,
51 repoview,
52 revset,
52 revset,
53 scmutil,
53 scmutil,
54 store,
54 store,
55 subrepo,
55 subrepo,
56 tags as tagsmod,
56 tags as tagsmod,
57 transaction,
57 transaction,
58 util,
58 util,
59 )
59 )
60
60
61 release = lockmod.release
61 release = lockmod.release
62 urlerr = util.urlerr
62 urlerr = util.urlerr
63 urlreq = util.urlreq
63 urlreq = util.urlreq
64
64
65 class repofilecache(scmutil.filecache):
65 class repofilecache(scmutil.filecache):
66 """All filecache usage on repo are done for logic that should be unfiltered
66 """All filecache usage on repo are done for logic that should be unfiltered
67 """
67 """
68
68
69 def __get__(self, repo, type=None):
69 def __get__(self, repo, type=None):
70 if repo is None:
70 if repo is None:
71 return self
71 return self
72 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 return super(repofilecache, self).__get__(repo.unfiltered(), type)
73 def __set__(self, repo, value):
73 def __set__(self, repo, value):
74 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 return super(repofilecache, self).__set__(repo.unfiltered(), value)
75 def __delete__(self, repo):
75 def __delete__(self, repo):
76 return super(repofilecache, self).__delete__(repo.unfiltered())
76 return super(repofilecache, self).__delete__(repo.unfiltered())
77
77
78 class storecache(repofilecache):
78 class storecache(repofilecache):
79 """filecache for files in the store"""
79 """filecache for files in the store"""
80 def join(self, obj, fname):
80 def join(self, obj, fname):
81 return obj.sjoin(fname)
81 return obj.sjoin(fname)
82
82
83 class unfilteredpropertycache(util.propertycache):
83 class unfilteredpropertycache(util.propertycache):
84 """propertycache that apply to unfiltered repo only"""
84 """propertycache that apply to unfiltered repo only"""
85
85
86 def __get__(self, repo, type=None):
86 def __get__(self, repo, type=None):
87 unfi = repo.unfiltered()
87 unfi = repo.unfiltered()
88 if unfi is repo:
88 if unfi is repo:
89 return super(unfilteredpropertycache, self).__get__(unfi)
89 return super(unfilteredpropertycache, self).__get__(unfi)
90 return getattr(unfi, self.name)
90 return getattr(unfi, self.name)
91
91
92 class filteredpropertycache(util.propertycache):
92 class filteredpropertycache(util.propertycache):
93 """propertycache that must take filtering in account"""
93 """propertycache that must take filtering in account"""
94
94
95 def cachevalue(self, obj, value):
95 def cachevalue(self, obj, value):
96 object.__setattr__(obj, self.name, value)
96 object.__setattr__(obj, self.name, value)
97
97
98
98
99 def hasunfilteredcache(repo, name):
99 def hasunfilteredcache(repo, name):
100 """check if a repo has an unfilteredpropertycache value for <name>"""
100 """check if a repo has an unfilteredpropertycache value for <name>"""
101 return name in vars(repo.unfiltered())
101 return name in vars(repo.unfiltered())
102
102
103 def unfilteredmethod(orig):
103 def unfilteredmethod(orig):
104 """decorate method that always need to be run on unfiltered version"""
104 """decorate method that always need to be run on unfiltered version"""
105 def wrapper(repo, *args, **kwargs):
105 def wrapper(repo, *args, **kwargs):
106 return orig(repo.unfiltered(), *args, **kwargs)
106 return orig(repo.unfiltered(), *args, **kwargs)
107 return wrapper
107 return wrapper
108
108
109 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
110 'unbundle'))
110 'unbundle'))
111 legacycaps = moderncaps.union(set(['changegroupsubset']))
111 legacycaps = moderncaps.union(set(['changegroupsubset']))
112
112
113 class localpeer(peer.peerrepository):
113 class localpeer(peer.peerrepository):
114 '''peer for a local repo; reflects only the most recent API'''
114 '''peer for a local repo; reflects only the most recent API'''
115
115
116 def __init__(self, repo, caps=moderncaps):
116 def __init__(self, repo, caps=moderncaps):
117 peer.peerrepository.__init__(self)
117 peer.peerrepository.__init__(self)
118 self._repo = repo.filtered('served')
118 self._repo = repo.filtered('served')
119 self.ui = repo.ui
119 self.ui = repo.ui
120 self._caps = repo._restrictcapabilities(caps)
120 self._caps = repo._restrictcapabilities(caps)
121 self.requirements = repo.requirements
121 self.requirements = repo.requirements
122 self.supportedformats = repo.supportedformats
122 self.supportedformats = repo.supportedformats
123
123
124 def close(self):
124 def close(self):
125 self._repo.close()
125 self._repo.close()
126
126
127 def _capabilities(self):
127 def _capabilities(self):
128 return self._caps
128 return self._caps
129
129
130 def local(self):
130 def local(self):
131 return self._repo
131 return self._repo
132
132
133 def canpush(self):
133 def canpush(self):
134 return True
134 return True
135
135
136 def url(self):
136 def url(self):
137 return self._repo.url()
137 return self._repo.url()
138
138
139 def lookup(self, key):
139 def lookup(self, key):
140 return self._repo.lookup(key)
140 return self._repo.lookup(key)
141
141
142 def branchmap(self):
142 def branchmap(self):
143 return self._repo.branchmap()
143 return self._repo.branchmap()
144
144
145 def heads(self):
145 def heads(self):
146 return self._repo.heads()
146 return self._repo.heads()
147
147
148 def known(self, nodes):
148 def known(self, nodes):
149 return self._repo.known(nodes)
149 return self._repo.known(nodes)
150
150
151 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
152 **kwargs):
152 **kwargs):
153 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
153 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
154 common=common, bundlecaps=bundlecaps,
154 common=common, bundlecaps=bundlecaps,
155 **kwargs)
155 **kwargs)
156 cb = util.chunkbuffer(chunks)
156 cb = util.chunkbuffer(chunks)
157
157
158 if bundlecaps is not None and 'HG20' in bundlecaps:
158 if bundlecaps is not None and 'HG20' in bundlecaps:
159 # When requesting a bundle2, getbundle returns a stream to make the
159 # When requesting a bundle2, getbundle returns a stream to make the
160 # wire level function happier. We need to build a proper object
160 # wire level function happier. We need to build a proper object
161 # from it in local peer.
161 # from it in local peer.
162 return bundle2.getunbundler(self.ui, cb)
162 return bundle2.getunbundler(self.ui, cb)
163 else:
163 else:
164 return changegroup.getunbundler('01', cb, None)
164 return changegroup.getunbundler('01', cb, None)
165
165
166 # TODO We might want to move the next two calls into legacypeer and add
166 # TODO We might want to move the next two calls into legacypeer and add
167 # unbundle instead.
167 # unbundle instead.
168
168
169 def unbundle(self, cg, heads, url):
169 def unbundle(self, cg, heads, url):
170 """apply a bundle on a repo
170 """apply a bundle on a repo
171
171
172 This function handles the repo locking itself."""
172 This function handles the repo locking itself."""
173 try:
173 try:
174 try:
174 try:
175 cg = exchange.readbundle(self.ui, cg, None)
175 cg = exchange.readbundle(self.ui, cg, None)
176 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
176 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
177 if util.safehasattr(ret, 'getchunks'):
177 if util.safehasattr(ret, 'getchunks'):
178 # This is a bundle20 object, turn it into an unbundler.
178 # This is a bundle20 object, turn it into an unbundler.
179 # This little dance should be dropped eventually when the
179 # This little dance should be dropped eventually when the
180 # API is finally improved.
180 # API is finally improved.
181 stream = util.chunkbuffer(ret.getchunks())
181 stream = util.chunkbuffer(ret.getchunks())
182 ret = bundle2.getunbundler(self.ui, stream)
182 ret = bundle2.getunbundler(self.ui, stream)
183 return ret
183 return ret
184 except Exception as exc:
184 except Exception as exc:
185 # If the exception contains output salvaged from a bundle2
185 # If the exception contains output salvaged from a bundle2
186 # reply, we need to make sure it is printed before continuing
186 # reply, we need to make sure it is printed before continuing
187 # to fail. So we build a bundle2 with such output and consume
187 # to fail. So we build a bundle2 with such output and consume
188 # it directly.
188 # it directly.
189 #
189 #
190 # This is not very elegant but allows a "simple" solution for
190 # This is not very elegant but allows a "simple" solution for
191 # issue4594
191 # issue4594
192 output = getattr(exc, '_bundle2salvagedoutput', ())
192 output = getattr(exc, '_bundle2salvagedoutput', ())
193 if output:
193 if output:
194 bundler = bundle2.bundle20(self._repo.ui)
194 bundler = bundle2.bundle20(self._repo.ui)
195 for out in output:
195 for out in output:
196 bundler.addpart(out)
196 bundler.addpart(out)
197 stream = util.chunkbuffer(bundler.getchunks())
197 stream = util.chunkbuffer(bundler.getchunks())
198 b = bundle2.getunbundler(self.ui, stream)
198 b = bundle2.getunbundler(self.ui, stream)
199 bundle2.processbundle(self._repo, b)
199 bundle2.processbundle(self._repo, b)
200 raise
200 raise
201 except error.PushRaced as exc:
201 except error.PushRaced as exc:
202 raise error.ResponseError(_('push failed:'), str(exc))
202 raise error.ResponseError(_('push failed:'), str(exc))
203
203
204 def lock(self):
204 def lock(self):
205 return self._repo.lock()
205 return self._repo.lock()
206
206
207 def addchangegroup(self, cg, source, url):
207 def addchangegroup(self, cg, source, url):
208 return cg.apply(self._repo, source, url)
208 return cg.apply(self._repo, source, url)
209
209
210 def pushkey(self, namespace, key, old, new):
210 def pushkey(self, namespace, key, old, new):
211 return self._repo.pushkey(namespace, key, old, new)
211 return self._repo.pushkey(namespace, key, old, new)
212
212
213 def listkeys(self, namespace):
213 def listkeys(self, namespace):
214 return self._repo.listkeys(namespace)
214 return self._repo.listkeys(namespace)
215
215
216 def debugwireargs(self, one, two, three=None, four=None, five=None):
216 def debugwireargs(self, one, two, three=None, four=None, five=None):
217 '''used to test argument passing over the wire'''
217 '''used to test argument passing over the wire'''
218 return "%s %s %s %s %s" % (one, two, three, four, five)
218 return "%s %s %s %s %s" % (one, two, three, four, five)
219
219
220 class locallegacypeer(localpeer):
220 class locallegacypeer(localpeer):
221 '''peer extension which implements legacy methods too; used for tests with
221 '''peer extension which implements legacy methods too; used for tests with
222 restricted capabilities'''
222 restricted capabilities'''
223
223
224 def __init__(self, repo):
224 def __init__(self, repo):
225 localpeer.__init__(self, repo, caps=legacycaps)
225 localpeer.__init__(self, repo, caps=legacycaps)
226
226
227 def branches(self, nodes):
227 def branches(self, nodes):
228 return self._repo.branches(nodes)
228 return self._repo.branches(nodes)
229
229
230 def between(self, pairs):
230 def between(self, pairs):
231 return self._repo.between(pairs)
231 return self._repo.between(pairs)
232
232
233 def changegroup(self, basenodes, source):
233 def changegroup(self, basenodes, source):
234 return changegroup.changegroup(self._repo, basenodes, source)
234 return changegroup.changegroup(self._repo, basenodes, source)
235
235
236 def changegroupsubset(self, bases, heads, source):
236 def changegroupsubset(self, bases, heads, source):
237 return changegroup.changegroupsubset(self._repo, bases, heads, source)
237 return changegroup.changegroupsubset(self._repo, bases, heads, source)
238
238
239 class localrepository(object):
239 class localrepository(object):
240
240
241 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
241 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
242 'manifestv2'))
242 'manifestv2'))
243 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
243 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
244 'dotencode'))
244 'dotencode'))
245 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
245 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
246 filtername = None
246 filtername = None
247
247
248 # a list of (ui, featureset) functions.
248 # a list of (ui, featureset) functions.
249 # only functions defined in module of enabled extensions are invoked
249 # only functions defined in module of enabled extensions are invoked
250 featuresetupfuncs = set()
250 featuresetupfuncs = set()
251
251
252 def __init__(self, baseui, path=None, create=False):
252 def __init__(self, baseui, path, create=False):
253 self.requirements = set()
253 self.requirements = set()
254 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
254 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
255 self.wopener = self.wvfs
255 self.wopener = self.wvfs
256 self.root = self.wvfs.base
256 self.root = self.wvfs.base
257 self.path = self.wvfs.join(".hg")
257 self.path = self.wvfs.join(".hg")
258 self.origroot = path
258 self.origroot = path
259 self.auditor = pathutil.pathauditor(self.root, self._checknested)
259 self.auditor = pathutil.pathauditor(self.root, self._checknested)
260 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
260 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
261 realfs=False)
261 realfs=False)
262 self.vfs = scmutil.vfs(self.path)
262 self.vfs = scmutil.vfs(self.path)
263 self.opener = self.vfs
263 self.opener = self.vfs
264 self.baseui = baseui
264 self.baseui = baseui
265 self.ui = baseui.copy()
265 self.ui = baseui.copy()
266 self.ui.copy = baseui.copy # prevent copying repo configuration
266 self.ui.copy = baseui.copy # prevent copying repo configuration
267 # A list of callback to shape the phase if no data were found.
267 # A list of callback to shape the phase if no data were found.
268 # Callback are in the form: func(repo, roots) --> processed root.
268 # Callback are in the form: func(repo, roots) --> processed root.
269 # This list it to be filled by extension during repo setup
269 # This list it to be filled by extension during repo setup
270 self._phasedefaults = []
270 self._phasedefaults = []
271 try:
271 try:
272 self.ui.readconfig(self.join("hgrc"), self.root)
272 self.ui.readconfig(self.join("hgrc"), self.root)
273 extensions.loadall(self.ui)
273 extensions.loadall(self.ui)
274 except IOError:
274 except IOError:
275 pass
275 pass
276
276
277 if self.featuresetupfuncs:
277 if self.featuresetupfuncs:
278 self.supported = set(self._basesupported) # use private copy
278 self.supported = set(self._basesupported) # use private copy
279 extmods = set(m.__name__ for n, m
279 extmods = set(m.__name__ for n, m
280 in extensions.extensions(self.ui))
280 in extensions.extensions(self.ui))
281 for setupfunc in self.featuresetupfuncs:
281 for setupfunc in self.featuresetupfuncs:
282 if setupfunc.__module__ in extmods:
282 if setupfunc.__module__ in extmods:
283 setupfunc(self.ui, self.supported)
283 setupfunc(self.ui, self.supported)
284 else:
284 else:
285 self.supported = self._basesupported
285 self.supported = self._basesupported
286
286
287 if not self.vfs.isdir():
287 if not self.vfs.isdir():
288 if create:
288 if create:
289 self.requirements = newreporequirements(self)
289 self.requirements = newreporequirements(self)
290
290
291 if not self.wvfs.exists():
291 if not self.wvfs.exists():
292 self.wvfs.makedirs()
292 self.wvfs.makedirs()
293 self.vfs.makedir(notindexed=True)
293 self.vfs.makedir(notindexed=True)
294
294
295 if 'store' in self.requirements:
295 if 'store' in self.requirements:
296 self.vfs.mkdir("store")
296 self.vfs.mkdir("store")
297
297
298 # create an invalid changelog
298 # create an invalid changelog
299 self.vfs.append(
299 self.vfs.append(
300 "00changelog.i",
300 "00changelog.i",
301 '\0\0\0\2' # represents revlogv2
301 '\0\0\0\2' # represents revlogv2
302 ' dummy changelog to prevent using the old repo layout'
302 ' dummy changelog to prevent using the old repo layout'
303 )
303 )
304 else:
304 else:
305 raise error.RepoError(_("repository %s not found") % path)
305 raise error.RepoError(_("repository %s not found") % path)
306 elif create:
306 elif create:
307 raise error.RepoError(_("repository %s already exists") % path)
307 raise error.RepoError(_("repository %s already exists") % path)
308 else:
308 else:
309 try:
309 try:
310 self.requirements = scmutil.readrequires(
310 self.requirements = scmutil.readrequires(
311 self.vfs, self.supported)
311 self.vfs, self.supported)
312 except IOError as inst:
312 except IOError as inst:
313 if inst.errno != errno.ENOENT:
313 if inst.errno != errno.ENOENT:
314 raise
314 raise
315
315
316 self.sharedpath = self.path
316 self.sharedpath = self.path
317 try:
317 try:
318 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
318 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
319 realpath=True)
319 realpath=True)
320 s = vfs.base
320 s = vfs.base
321 if not vfs.exists():
321 if not vfs.exists():
322 raise error.RepoError(
322 raise error.RepoError(
323 _('.hg/sharedpath points to nonexistent directory %s') % s)
323 _('.hg/sharedpath points to nonexistent directory %s') % s)
324 self.sharedpath = s
324 self.sharedpath = s
325 except IOError as inst:
325 except IOError as inst:
326 if inst.errno != errno.ENOENT:
326 if inst.errno != errno.ENOENT:
327 raise
327 raise
328
328
329 self.store = store.store(
329 self.store = store.store(
330 self.requirements, self.sharedpath, scmutil.vfs)
330 self.requirements, self.sharedpath, scmutil.vfs)
331 self.spath = self.store.path
331 self.spath = self.store.path
332 self.svfs = self.store.vfs
332 self.svfs = self.store.vfs
333 self.sjoin = self.store.join
333 self.sjoin = self.store.join
334 self.vfs.createmode = self.store.createmode
334 self.vfs.createmode = self.store.createmode
335 self._applyopenerreqs()
335 self._applyopenerreqs()
336 if create:
336 if create:
337 self._writerequirements()
337 self._writerequirements()
338
338
339 self._dirstatevalidatewarned = False
339 self._dirstatevalidatewarned = False
340
340
341 self._branchcaches = {}
341 self._branchcaches = {}
342 self._revbranchcache = None
342 self._revbranchcache = None
343 self.filterpats = {}
343 self.filterpats = {}
344 self._datafilters = {}
344 self._datafilters = {}
345 self._transref = self._lockref = self._wlockref = None
345 self._transref = self._lockref = self._wlockref = None
346
346
347 # A cache for various files under .hg/ that tracks file changes,
347 # A cache for various files under .hg/ that tracks file changes,
348 # (used by the filecache decorator)
348 # (used by the filecache decorator)
349 #
349 #
350 # Maps a property name to its util.filecacheentry
350 # Maps a property name to its util.filecacheentry
351 self._filecache = {}
351 self._filecache = {}
352
352
353 # hold sets of revision to be filtered
353 # hold sets of revision to be filtered
354 # should be cleared when something might have changed the filter value:
354 # should be cleared when something might have changed the filter value:
355 # - new changesets,
355 # - new changesets,
356 # - phase change,
356 # - phase change,
357 # - new obsolescence marker,
357 # - new obsolescence marker,
358 # - working directory parent change,
358 # - working directory parent change,
359 # - bookmark changes
359 # - bookmark changes
360 self.filteredrevcache = {}
360 self.filteredrevcache = {}
361
361
362 # generic mapping between names and nodes
362 # generic mapping between names and nodes
363 self.names = namespaces.namespaces()
363 self.names = namespaces.namespaces()
364
364
365 def close(self):
365 def close(self):
366 self._writecaches()
366 self._writecaches()
367
367
368 def _writecaches(self):
368 def _writecaches(self):
369 if self._revbranchcache:
369 if self._revbranchcache:
370 self._revbranchcache.write()
370 self._revbranchcache.write()
371
371
372 def _restrictcapabilities(self, caps):
372 def _restrictcapabilities(self, caps):
373 if self.ui.configbool('experimental', 'bundle2-advertise', True):
373 if self.ui.configbool('experimental', 'bundle2-advertise', True):
374 caps = set(caps)
374 caps = set(caps)
375 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
375 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
376 caps.add('bundle2=' + urlreq.quote(capsblob))
376 caps.add('bundle2=' + urlreq.quote(capsblob))
377 return caps
377 return caps
378
378
379 def _applyopenerreqs(self):
379 def _applyopenerreqs(self):
380 self.svfs.options = dict((r, 1) for r in self.requirements
380 self.svfs.options = dict((r, 1) for r in self.requirements
381 if r in self.openerreqs)
381 if r in self.openerreqs)
382 # experimental config: format.chunkcachesize
382 # experimental config: format.chunkcachesize
383 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
383 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
384 if chunkcachesize is not None:
384 if chunkcachesize is not None:
385 self.svfs.options['chunkcachesize'] = chunkcachesize
385 self.svfs.options['chunkcachesize'] = chunkcachesize
386 # experimental config: format.maxchainlen
386 # experimental config: format.maxchainlen
387 maxchainlen = self.ui.configint('format', 'maxchainlen')
387 maxchainlen = self.ui.configint('format', 'maxchainlen')
388 if maxchainlen is not None:
388 if maxchainlen is not None:
389 self.svfs.options['maxchainlen'] = maxchainlen
389 self.svfs.options['maxchainlen'] = maxchainlen
390 # experimental config: format.manifestcachesize
390 # experimental config: format.manifestcachesize
391 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
391 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
392 if manifestcachesize is not None:
392 if manifestcachesize is not None:
393 self.svfs.options['manifestcachesize'] = manifestcachesize
393 self.svfs.options['manifestcachesize'] = manifestcachesize
394 # experimental config: format.aggressivemergedeltas
394 # experimental config: format.aggressivemergedeltas
395 aggressivemergedeltas = self.ui.configbool('format',
395 aggressivemergedeltas = self.ui.configbool('format',
396 'aggressivemergedeltas', False)
396 'aggressivemergedeltas', False)
397 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
397 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
398 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
398 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
399
399
400 def _writerequirements(self):
400 def _writerequirements(self):
401 scmutil.writerequires(self.vfs, self.requirements)
401 scmutil.writerequires(self.vfs, self.requirements)
402
402
403 def _checknested(self, path):
403 def _checknested(self, path):
404 """Determine if path is a legal nested repository."""
404 """Determine if path is a legal nested repository."""
405 if not path.startswith(self.root):
405 if not path.startswith(self.root):
406 return False
406 return False
407 subpath = path[len(self.root) + 1:]
407 subpath = path[len(self.root) + 1:]
408 normsubpath = util.pconvert(subpath)
408 normsubpath = util.pconvert(subpath)
409
409
410 # XXX: Checking against the current working copy is wrong in
410 # XXX: Checking against the current working copy is wrong in
411 # the sense that it can reject things like
411 # the sense that it can reject things like
412 #
412 #
413 # $ hg cat -r 10 sub/x.txt
413 # $ hg cat -r 10 sub/x.txt
414 #
414 #
415 # if sub/ is no longer a subrepository in the working copy
415 # if sub/ is no longer a subrepository in the working copy
416 # parent revision.
416 # parent revision.
417 #
417 #
418 # However, it can of course also allow things that would have
418 # However, it can of course also allow things that would have
419 # been rejected before, such as the above cat command if sub/
419 # been rejected before, such as the above cat command if sub/
420 # is a subrepository now, but was a normal directory before.
420 # is a subrepository now, but was a normal directory before.
421 # The old path auditor would have rejected by mistake since it
421 # The old path auditor would have rejected by mistake since it
422 # panics when it sees sub/.hg/.
422 # panics when it sees sub/.hg/.
423 #
423 #
424 # All in all, checking against the working copy seems sensible
424 # All in all, checking against the working copy seems sensible
425 # since we want to prevent access to nested repositories on
425 # since we want to prevent access to nested repositories on
426 # the filesystem *now*.
426 # the filesystem *now*.
427 ctx = self[None]
427 ctx = self[None]
428 parts = util.splitpath(subpath)
428 parts = util.splitpath(subpath)
429 while parts:
429 while parts:
430 prefix = '/'.join(parts)
430 prefix = '/'.join(parts)
431 if prefix in ctx.substate:
431 if prefix in ctx.substate:
432 if prefix == normsubpath:
432 if prefix == normsubpath:
433 return True
433 return True
434 else:
434 else:
435 sub = ctx.sub(prefix)
435 sub = ctx.sub(prefix)
436 return sub.checknested(subpath[len(prefix) + 1:])
436 return sub.checknested(subpath[len(prefix) + 1:])
437 else:
437 else:
438 parts.pop()
438 parts.pop()
439 return False
439 return False
440
440
441 def peer(self):
441 def peer(self):
442 return localpeer(self) # not cached to avoid reference cycle
442 return localpeer(self) # not cached to avoid reference cycle
443
443
444 def unfiltered(self):
444 def unfiltered(self):
445 """Return unfiltered version of the repository
445 """Return unfiltered version of the repository
446
446
447 Intended to be overwritten by filtered repo."""
447 Intended to be overwritten by filtered repo."""
448 return self
448 return self
449
449
450 def filtered(self, name):
450 def filtered(self, name):
451 """Return a filtered version of a repository"""
451 """Return a filtered version of a repository"""
452 # build a new class with the mixin and the current class
452 # build a new class with the mixin and the current class
453 # (possibly subclass of the repo)
453 # (possibly subclass of the repo)
454 class proxycls(repoview.repoview, self.unfiltered().__class__):
454 class proxycls(repoview.repoview, self.unfiltered().__class__):
455 pass
455 pass
456 return proxycls(self, name)
456 return proxycls(self, name)
457
457
458 @repofilecache('bookmarks', 'bookmarks.current')
458 @repofilecache('bookmarks', 'bookmarks.current')
459 def _bookmarks(self):
459 def _bookmarks(self):
460 return bookmarks.bmstore(self)
460 return bookmarks.bmstore(self)
461
461
462 @property
462 @property
463 def _activebookmark(self):
463 def _activebookmark(self):
464 return self._bookmarks.active
464 return self._bookmarks.active
465
465
466 def bookmarkheads(self, bookmark):
466 def bookmarkheads(self, bookmark):
467 name = bookmark.split('@', 1)[0]
467 name = bookmark.split('@', 1)[0]
468 heads = []
468 heads = []
469 for mark, n in self._bookmarks.iteritems():
469 for mark, n in self._bookmarks.iteritems():
470 if mark.split('@', 1)[0] == name:
470 if mark.split('@', 1)[0] == name:
471 heads.append(n)
471 heads.append(n)
472 return heads
472 return heads
473
473
474 # _phaserevs and _phasesets depend on changelog. what we need is to
474 # _phaserevs and _phasesets depend on changelog. what we need is to
475 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
475 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
476 # can't be easily expressed in filecache mechanism.
476 # can't be easily expressed in filecache mechanism.
477 @storecache('phaseroots', '00changelog.i')
477 @storecache('phaseroots', '00changelog.i')
478 def _phasecache(self):
478 def _phasecache(self):
479 return phases.phasecache(self, self._phasedefaults)
479 return phases.phasecache(self, self._phasedefaults)
480
480
481 @storecache('obsstore')
481 @storecache('obsstore')
482 def obsstore(self):
482 def obsstore(self):
483 # read default format for new obsstore.
483 # read default format for new obsstore.
484 # developer config: format.obsstore-version
484 # developer config: format.obsstore-version
485 defaultformat = self.ui.configint('format', 'obsstore-version', None)
485 defaultformat = self.ui.configint('format', 'obsstore-version', None)
486 # rely on obsstore class default when possible.
486 # rely on obsstore class default when possible.
487 kwargs = {}
487 kwargs = {}
488 if defaultformat is not None:
488 if defaultformat is not None:
489 kwargs['defaultformat'] = defaultformat
489 kwargs['defaultformat'] = defaultformat
490 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
490 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
491 store = obsolete.obsstore(self.svfs, readonly=readonly,
491 store = obsolete.obsstore(self.svfs, readonly=readonly,
492 **kwargs)
492 **kwargs)
493 if store and readonly:
493 if store and readonly:
494 self.ui.warn(
494 self.ui.warn(
495 _('obsolete feature not enabled but %i markers found!\n')
495 _('obsolete feature not enabled but %i markers found!\n')
496 % len(list(store)))
496 % len(list(store)))
497 return store
497 return store
498
498
499 @storecache('00changelog.i')
499 @storecache('00changelog.i')
500 def changelog(self):
500 def changelog(self):
501 c = changelog.changelog(self.svfs)
501 c = changelog.changelog(self.svfs)
502 if 'HG_PENDING' in os.environ:
502 if 'HG_PENDING' in os.environ:
503 p = os.environ['HG_PENDING']
503 p = os.environ['HG_PENDING']
504 if p.startswith(self.root):
504 if p.startswith(self.root):
505 c.readpending('00changelog.i.a')
505 c.readpending('00changelog.i.a')
506 return c
506 return c
507
507
508 def _constructmanifest(self):
508 def _constructmanifest(self):
509 # This is a temporary function while we migrate from manifest to
509 # This is a temporary function while we migrate from manifest to
510 # manifestlog. It allows bundlerepo and unionrepo to intercept the
510 # manifestlog. It allows bundlerepo and unionrepo to intercept the
511 # manifest creation.
511 # manifest creation.
512 return manifest.manifestrevlog(self.svfs)
512 return manifest.manifestrevlog(self.svfs)
513
513
514 @storecache('00manifest.i')
514 @storecache('00manifest.i')
515 def manifestlog(self):
515 def manifestlog(self):
516 return manifest.manifestlog(self.svfs, self)
516 return manifest.manifestlog(self.svfs, self)
517
517
518 @repofilecache('dirstate')
518 @repofilecache('dirstate')
519 def dirstate(self):
519 def dirstate(self):
520 return dirstate.dirstate(self.vfs, self.ui, self.root,
520 return dirstate.dirstate(self.vfs, self.ui, self.root,
521 self._dirstatevalidate)
521 self._dirstatevalidate)
522
522
523 def _dirstatevalidate(self, node):
523 def _dirstatevalidate(self, node):
524 try:
524 try:
525 self.changelog.rev(node)
525 self.changelog.rev(node)
526 return node
526 return node
527 except error.LookupError:
527 except error.LookupError:
528 if not self._dirstatevalidatewarned:
528 if not self._dirstatevalidatewarned:
529 self._dirstatevalidatewarned = True
529 self._dirstatevalidatewarned = True
530 self.ui.warn(_("warning: ignoring unknown"
530 self.ui.warn(_("warning: ignoring unknown"
531 " working parent %s!\n") % short(node))
531 " working parent %s!\n") % short(node))
532 return nullid
532 return nullid
533
533
534 def __getitem__(self, changeid):
534 def __getitem__(self, changeid):
535 if changeid is None or changeid == wdirrev:
535 if changeid is None or changeid == wdirrev:
536 return context.workingctx(self)
536 return context.workingctx(self)
537 if isinstance(changeid, slice):
537 if isinstance(changeid, slice):
538 return [context.changectx(self, i)
538 return [context.changectx(self, i)
539 for i in xrange(*changeid.indices(len(self)))
539 for i in xrange(*changeid.indices(len(self)))
540 if i not in self.changelog.filteredrevs]
540 if i not in self.changelog.filteredrevs]
541 return context.changectx(self, changeid)
541 return context.changectx(self, changeid)
542
542
543 def __contains__(self, changeid):
543 def __contains__(self, changeid):
544 try:
544 try:
545 self[changeid]
545 self[changeid]
546 return True
546 return True
547 except error.RepoLookupError:
547 except error.RepoLookupError:
548 return False
548 return False
549
549
550 def __nonzero__(self):
550 def __nonzero__(self):
551 return True
551 return True
552
552
553 def __len__(self):
553 def __len__(self):
554 return len(self.changelog)
554 return len(self.changelog)
555
555
556 def __iter__(self):
556 def __iter__(self):
557 return iter(self.changelog)
557 return iter(self.changelog)
558
558
559 def revs(self, expr, *args):
559 def revs(self, expr, *args):
560 '''Find revisions matching a revset.
560 '''Find revisions matching a revset.
561
561
562 The revset is specified as a string ``expr`` that may contain
562 The revset is specified as a string ``expr`` that may contain
563 %-formatting to escape certain types. See ``revset.formatspec``.
563 %-formatting to escape certain types. See ``revset.formatspec``.
564
564
565 Revset aliases from the configuration are not expanded. To expand
565 Revset aliases from the configuration are not expanded. To expand
566 user aliases, consider calling ``scmutil.revrange()``.
566 user aliases, consider calling ``scmutil.revrange()``.
567
567
568 Returns a revset.abstractsmartset, which is a list-like interface
568 Returns a revset.abstractsmartset, which is a list-like interface
569 that contains integer revisions.
569 that contains integer revisions.
570 '''
570 '''
571 expr = revset.formatspec(expr, *args)
571 expr = revset.formatspec(expr, *args)
572 m = revset.match(None, expr)
572 m = revset.match(None, expr)
573 return m(self)
573 return m(self)
574
574
575 def set(self, expr, *args):
575 def set(self, expr, *args):
576 '''Find revisions matching a revset and emit changectx instances.
576 '''Find revisions matching a revset and emit changectx instances.
577
577
578 This is a convenience wrapper around ``revs()`` that iterates the
578 This is a convenience wrapper around ``revs()`` that iterates the
579 result and is a generator of changectx instances.
579 result and is a generator of changectx instances.
580
580
581 Revset aliases from the configuration are not expanded. To expand
581 Revset aliases from the configuration are not expanded. To expand
582 user aliases, consider calling ``scmutil.revrange()``.
582 user aliases, consider calling ``scmutil.revrange()``.
583 '''
583 '''
584 for r in self.revs(expr, *args):
584 for r in self.revs(expr, *args):
585 yield self[r]
585 yield self[r]
586
586
587 def url(self):
587 def url(self):
588 return 'file:' + self.root
588 return 'file:' + self.root
589
589
590 def hook(self, name, throw=False, **args):
590 def hook(self, name, throw=False, **args):
591 """Call a hook, passing this repo instance.
591 """Call a hook, passing this repo instance.
592
592
593 This a convenience method to aid invoking hooks. Extensions likely
593 This a convenience method to aid invoking hooks. Extensions likely
594 won't call this unless they have registered a custom hook or are
594 won't call this unless they have registered a custom hook or are
595 replacing code that is expected to call a hook.
595 replacing code that is expected to call a hook.
596 """
596 """
597 return hook.hook(self.ui, self, name, throw, **args)
597 return hook.hook(self.ui, self, name, throw, **args)
598
598
599 @unfilteredmethod
599 @unfilteredmethod
600 def _tag(self, names, node, message, local, user, date, extra=None,
600 def _tag(self, names, node, message, local, user, date, extra=None,
601 editor=False):
601 editor=False):
602 if isinstance(names, str):
602 if isinstance(names, str):
603 names = (names,)
603 names = (names,)
604
604
605 branches = self.branchmap()
605 branches = self.branchmap()
606 for name in names:
606 for name in names:
607 self.hook('pretag', throw=True, node=hex(node), tag=name,
607 self.hook('pretag', throw=True, node=hex(node), tag=name,
608 local=local)
608 local=local)
609 if name in branches:
609 if name in branches:
610 self.ui.warn(_("warning: tag %s conflicts with existing"
610 self.ui.warn(_("warning: tag %s conflicts with existing"
611 " branch name\n") % name)
611 " branch name\n") % name)
612
612
613 def writetags(fp, names, munge, prevtags):
613 def writetags(fp, names, munge, prevtags):
614 fp.seek(0, 2)
614 fp.seek(0, 2)
615 if prevtags and prevtags[-1] != '\n':
615 if prevtags and prevtags[-1] != '\n':
616 fp.write('\n')
616 fp.write('\n')
617 for name in names:
617 for name in names:
618 if munge:
618 if munge:
619 m = munge(name)
619 m = munge(name)
620 else:
620 else:
621 m = name
621 m = name
622
622
623 if (self._tagscache.tagtypes and
623 if (self._tagscache.tagtypes and
624 name in self._tagscache.tagtypes):
624 name in self._tagscache.tagtypes):
625 old = self.tags().get(name, nullid)
625 old = self.tags().get(name, nullid)
626 fp.write('%s %s\n' % (hex(old), m))
626 fp.write('%s %s\n' % (hex(old), m))
627 fp.write('%s %s\n' % (hex(node), m))
627 fp.write('%s %s\n' % (hex(node), m))
628 fp.close()
628 fp.close()
629
629
630 prevtags = ''
630 prevtags = ''
631 if local:
631 if local:
632 try:
632 try:
633 fp = self.vfs('localtags', 'r+')
633 fp = self.vfs('localtags', 'r+')
634 except IOError:
634 except IOError:
635 fp = self.vfs('localtags', 'a')
635 fp = self.vfs('localtags', 'a')
636 else:
636 else:
637 prevtags = fp.read()
637 prevtags = fp.read()
638
638
639 # local tags are stored in the current charset
639 # local tags are stored in the current charset
640 writetags(fp, names, None, prevtags)
640 writetags(fp, names, None, prevtags)
641 for name in names:
641 for name in names:
642 self.hook('tag', node=hex(node), tag=name, local=local)
642 self.hook('tag', node=hex(node), tag=name, local=local)
643 return
643 return
644
644
645 try:
645 try:
646 fp = self.wfile('.hgtags', 'rb+')
646 fp = self.wfile('.hgtags', 'rb+')
647 except IOError as e:
647 except IOError as e:
648 if e.errno != errno.ENOENT:
648 if e.errno != errno.ENOENT:
649 raise
649 raise
650 fp = self.wfile('.hgtags', 'ab')
650 fp = self.wfile('.hgtags', 'ab')
651 else:
651 else:
652 prevtags = fp.read()
652 prevtags = fp.read()
653
653
654 # committed tags are stored in UTF-8
654 # committed tags are stored in UTF-8
655 writetags(fp, names, encoding.fromlocal, prevtags)
655 writetags(fp, names, encoding.fromlocal, prevtags)
656
656
657 fp.close()
657 fp.close()
658
658
659 self.invalidatecaches()
659 self.invalidatecaches()
660
660
661 if '.hgtags' not in self.dirstate:
661 if '.hgtags' not in self.dirstate:
662 self[None].add(['.hgtags'])
662 self[None].add(['.hgtags'])
663
663
664 m = matchmod.exact(self.root, '', ['.hgtags'])
664 m = matchmod.exact(self.root, '', ['.hgtags'])
665 tagnode = self.commit(message, user, date, extra=extra, match=m,
665 tagnode = self.commit(message, user, date, extra=extra, match=m,
666 editor=editor)
666 editor=editor)
667
667
668 for name in names:
668 for name in names:
669 self.hook('tag', node=hex(node), tag=name, local=local)
669 self.hook('tag', node=hex(node), tag=name, local=local)
670
670
671 return tagnode
671 return tagnode
672
672
673 def tag(self, names, node, message, local, user, date, editor=False):
673 def tag(self, names, node, message, local, user, date, editor=False):
674 '''tag a revision with one or more symbolic names.
674 '''tag a revision with one or more symbolic names.
675
675
676 names is a list of strings or, when adding a single tag, names may be a
676 names is a list of strings or, when adding a single tag, names may be a
677 string.
677 string.
678
678
679 if local is True, the tags are stored in a per-repository file.
679 if local is True, the tags are stored in a per-repository file.
680 otherwise, they are stored in the .hgtags file, and a new
680 otherwise, they are stored in the .hgtags file, and a new
681 changeset is committed with the change.
681 changeset is committed with the change.
682
682
683 keyword arguments:
683 keyword arguments:
684
684
685 local: whether to store tags in non-version-controlled file
685 local: whether to store tags in non-version-controlled file
686 (default False)
686 (default False)
687
687
688 message: commit message to use if committing
688 message: commit message to use if committing
689
689
690 user: name of user to use if committing
690 user: name of user to use if committing
691
691
692 date: date tuple to use if committing'''
692 date: date tuple to use if committing'''
693
693
694 if not local:
694 if not local:
695 m = matchmod.exact(self.root, '', ['.hgtags'])
695 m = matchmod.exact(self.root, '', ['.hgtags'])
696 if any(self.status(match=m, unknown=True, ignored=True)):
696 if any(self.status(match=m, unknown=True, ignored=True)):
697 raise error.Abort(_('working copy of .hgtags is changed'),
697 raise error.Abort(_('working copy of .hgtags is changed'),
698 hint=_('please commit .hgtags manually'))
698 hint=_('please commit .hgtags manually'))
699
699
700 self.tags() # instantiate the cache
700 self.tags() # instantiate the cache
701 self._tag(names, node, message, local, user, date, editor=editor)
701 self._tag(names, node, message, local, user, date, editor=editor)
702
702
703 @filteredpropertycache
703 @filteredpropertycache
704 def _tagscache(self):
704 def _tagscache(self):
705 '''Returns a tagscache object that contains various tags related
705 '''Returns a tagscache object that contains various tags related
706 caches.'''
706 caches.'''
707
707
708 # This simplifies its cache management by having one decorated
708 # This simplifies its cache management by having one decorated
709 # function (this one) and the rest simply fetch things from it.
709 # function (this one) and the rest simply fetch things from it.
710 class tagscache(object):
710 class tagscache(object):
711 def __init__(self):
711 def __init__(self):
712 # These two define the set of tags for this repository. tags
712 # These two define the set of tags for this repository. tags
713 # maps tag name to node; tagtypes maps tag name to 'global' or
713 # maps tag name to node; tagtypes maps tag name to 'global' or
714 # 'local'. (Global tags are defined by .hgtags across all
714 # 'local'. (Global tags are defined by .hgtags across all
715 # heads, and local tags are defined in .hg/localtags.)
715 # heads, and local tags are defined in .hg/localtags.)
716 # They constitute the in-memory cache of tags.
716 # They constitute the in-memory cache of tags.
717 self.tags = self.tagtypes = None
717 self.tags = self.tagtypes = None
718
718
719 self.nodetagscache = self.tagslist = None
719 self.nodetagscache = self.tagslist = None
720
720
721 cache = tagscache()
721 cache = tagscache()
722 cache.tags, cache.tagtypes = self._findtags()
722 cache.tags, cache.tagtypes = self._findtags()
723
723
724 return cache
724 return cache
725
725
726 def tags(self):
726 def tags(self):
727 '''return a mapping of tag to node'''
727 '''return a mapping of tag to node'''
728 t = {}
728 t = {}
729 if self.changelog.filteredrevs:
729 if self.changelog.filteredrevs:
730 tags, tt = self._findtags()
730 tags, tt = self._findtags()
731 else:
731 else:
732 tags = self._tagscache.tags
732 tags = self._tagscache.tags
733 for k, v in tags.iteritems():
733 for k, v in tags.iteritems():
734 try:
734 try:
735 # ignore tags to unknown nodes
735 # ignore tags to unknown nodes
736 self.changelog.rev(v)
736 self.changelog.rev(v)
737 t[k] = v
737 t[k] = v
738 except (error.LookupError, ValueError):
738 except (error.LookupError, ValueError):
739 pass
739 pass
740 return t
740 return t
741
741
742 def _findtags(self):
742 def _findtags(self):
743 '''Do the hard work of finding tags. Return a pair of dicts
743 '''Do the hard work of finding tags. Return a pair of dicts
744 (tags, tagtypes) where tags maps tag name to node, and tagtypes
744 (tags, tagtypes) where tags maps tag name to node, and tagtypes
745 maps tag name to a string like \'global\' or \'local\'.
745 maps tag name to a string like \'global\' or \'local\'.
746 Subclasses or extensions are free to add their own tags, but
746 Subclasses or extensions are free to add their own tags, but
747 should be aware that the returned dicts will be retained for the
747 should be aware that the returned dicts will be retained for the
748 duration of the localrepo object.'''
748 duration of the localrepo object.'''
749
749
750 # XXX what tagtype should subclasses/extensions use? Currently
750 # XXX what tagtype should subclasses/extensions use? Currently
751 # mq and bookmarks add tags, but do not set the tagtype at all.
751 # mq and bookmarks add tags, but do not set the tagtype at all.
752 # Should each extension invent its own tag type? Should there
752 # Should each extension invent its own tag type? Should there
753 # be one tagtype for all such "virtual" tags? Or is the status
753 # be one tagtype for all such "virtual" tags? Or is the status
754 # quo fine?
754 # quo fine?
755
755
756 alltags = {} # map tag name to (node, hist)
756 alltags = {} # map tag name to (node, hist)
757 tagtypes = {}
757 tagtypes = {}
758
758
759 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
759 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
760 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
760 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
761
761
762 # Build the return dicts. Have to re-encode tag names because
762 # Build the return dicts. Have to re-encode tag names because
763 # the tags module always uses UTF-8 (in order not to lose info
763 # the tags module always uses UTF-8 (in order not to lose info
764 # writing to the cache), but the rest of Mercurial wants them in
764 # writing to the cache), but the rest of Mercurial wants them in
765 # local encoding.
765 # local encoding.
766 tags = {}
766 tags = {}
767 for (name, (node, hist)) in alltags.iteritems():
767 for (name, (node, hist)) in alltags.iteritems():
768 if node != nullid:
768 if node != nullid:
769 tags[encoding.tolocal(name)] = node
769 tags[encoding.tolocal(name)] = node
770 tags['tip'] = self.changelog.tip()
770 tags['tip'] = self.changelog.tip()
771 tagtypes = dict([(encoding.tolocal(name), value)
771 tagtypes = dict([(encoding.tolocal(name), value)
772 for (name, value) in tagtypes.iteritems()])
772 for (name, value) in tagtypes.iteritems()])
773 return (tags, tagtypes)
773 return (tags, tagtypes)
774
774
775 def tagtype(self, tagname):
775 def tagtype(self, tagname):
776 '''
776 '''
777 return the type of the given tag. result can be:
777 return the type of the given tag. result can be:
778
778
779 'local' : a local tag
779 'local' : a local tag
780 'global' : a global tag
780 'global' : a global tag
781 None : tag does not exist
781 None : tag does not exist
782 '''
782 '''
783
783
784 return self._tagscache.tagtypes.get(tagname)
784 return self._tagscache.tagtypes.get(tagname)
785
785
786 def tagslist(self):
786 def tagslist(self):
787 '''return a list of tags ordered by revision'''
787 '''return a list of tags ordered by revision'''
788 if not self._tagscache.tagslist:
788 if not self._tagscache.tagslist:
789 l = []
789 l = []
790 for t, n in self.tags().iteritems():
790 for t, n in self.tags().iteritems():
791 l.append((self.changelog.rev(n), t, n))
791 l.append((self.changelog.rev(n), t, n))
792 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
792 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
793
793
794 return self._tagscache.tagslist
794 return self._tagscache.tagslist
795
795
796 def nodetags(self, node):
796 def nodetags(self, node):
797 '''return the tags associated with a node'''
797 '''return the tags associated with a node'''
798 if not self._tagscache.nodetagscache:
798 if not self._tagscache.nodetagscache:
799 nodetagscache = {}
799 nodetagscache = {}
800 for t, n in self._tagscache.tags.iteritems():
800 for t, n in self._tagscache.tags.iteritems():
801 nodetagscache.setdefault(n, []).append(t)
801 nodetagscache.setdefault(n, []).append(t)
802 for tags in nodetagscache.itervalues():
802 for tags in nodetagscache.itervalues():
803 tags.sort()
803 tags.sort()
804 self._tagscache.nodetagscache = nodetagscache
804 self._tagscache.nodetagscache = nodetagscache
805 return self._tagscache.nodetagscache.get(node, [])
805 return self._tagscache.nodetagscache.get(node, [])
806
806
807 def nodebookmarks(self, node):
807 def nodebookmarks(self, node):
808 """return the list of bookmarks pointing to the specified node"""
808 """return the list of bookmarks pointing to the specified node"""
809 marks = []
809 marks = []
810 for bookmark, n in self._bookmarks.iteritems():
810 for bookmark, n in self._bookmarks.iteritems():
811 if n == node:
811 if n == node:
812 marks.append(bookmark)
812 marks.append(bookmark)
813 return sorted(marks)
813 return sorted(marks)
814
814
815 def branchmap(self):
815 def branchmap(self):
816 '''returns a dictionary {branch: [branchheads]} with branchheads
816 '''returns a dictionary {branch: [branchheads]} with branchheads
817 ordered by increasing revision number'''
817 ordered by increasing revision number'''
818 branchmap.updatecache(self)
818 branchmap.updatecache(self)
819 return self._branchcaches[self.filtername]
819 return self._branchcaches[self.filtername]
820
820
821 @unfilteredmethod
821 @unfilteredmethod
822 def revbranchcache(self):
822 def revbranchcache(self):
823 if not self._revbranchcache:
823 if not self._revbranchcache:
824 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
824 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
825 return self._revbranchcache
825 return self._revbranchcache
826
826
827 def branchtip(self, branch, ignoremissing=False):
827 def branchtip(self, branch, ignoremissing=False):
828 '''return the tip node for a given branch
828 '''return the tip node for a given branch
829
829
830 If ignoremissing is True, then this method will not raise an error.
830 If ignoremissing is True, then this method will not raise an error.
831 This is helpful for callers that only expect None for a missing branch
831 This is helpful for callers that only expect None for a missing branch
832 (e.g. namespace).
832 (e.g. namespace).
833
833
834 '''
834 '''
835 try:
835 try:
836 return self.branchmap().branchtip(branch)
836 return self.branchmap().branchtip(branch)
837 except KeyError:
837 except KeyError:
838 if not ignoremissing:
838 if not ignoremissing:
839 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
839 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
840 else:
840 else:
841 pass
841 pass
842
842
843 def lookup(self, key):
843 def lookup(self, key):
844 return self[key].node()
844 return self[key].node()
845
845
846 def lookupbranch(self, key, remote=None):
846 def lookupbranch(self, key, remote=None):
847 repo = remote or self
847 repo = remote or self
848 if key in repo.branchmap():
848 if key in repo.branchmap():
849 return key
849 return key
850
850
851 repo = (remote and remote.local()) and remote or self
851 repo = (remote and remote.local()) and remote or self
852 return repo[key].branch()
852 return repo[key].branch()
853
853
854 def known(self, nodes):
854 def known(self, nodes):
855 cl = self.changelog
855 cl = self.changelog
856 nm = cl.nodemap
856 nm = cl.nodemap
857 filtered = cl.filteredrevs
857 filtered = cl.filteredrevs
858 result = []
858 result = []
859 for n in nodes:
859 for n in nodes:
860 r = nm.get(n)
860 r = nm.get(n)
861 resp = not (r is None or r in filtered)
861 resp = not (r is None or r in filtered)
862 result.append(resp)
862 result.append(resp)
863 return result
863 return result
864
864
865 def local(self):
865 def local(self):
866 return self
866 return self
867
867
868 def publishing(self):
868 def publishing(self):
869 # it's safe (and desirable) to trust the publish flag unconditionally
869 # it's safe (and desirable) to trust the publish flag unconditionally
870 # so that we don't finalize changes shared between users via ssh or nfs
870 # so that we don't finalize changes shared between users via ssh or nfs
871 return self.ui.configbool('phases', 'publish', True, untrusted=True)
871 return self.ui.configbool('phases', 'publish', True, untrusted=True)
872
872
873 def cancopy(self):
873 def cancopy(self):
874 # so statichttprepo's override of local() works
874 # so statichttprepo's override of local() works
875 if not self.local():
875 if not self.local():
876 return False
876 return False
877 if not self.publishing():
877 if not self.publishing():
878 return True
878 return True
879 # if publishing we can't copy if there is filtered content
879 # if publishing we can't copy if there is filtered content
880 return not self.filtered('visible').changelog.filteredrevs
880 return not self.filtered('visible').changelog.filteredrevs
881
881
882 def shared(self):
882 def shared(self):
883 '''the type of shared repository (None if not shared)'''
883 '''the type of shared repository (None if not shared)'''
884 if self.sharedpath != self.path:
884 if self.sharedpath != self.path:
885 return 'store'
885 return 'store'
886 return None
886 return None
887
887
888 def join(self, f, *insidef):
888 def join(self, f, *insidef):
889 return self.vfs.join(os.path.join(f, *insidef))
889 return self.vfs.join(os.path.join(f, *insidef))
890
890
891 def wjoin(self, f, *insidef):
891 def wjoin(self, f, *insidef):
892 return self.vfs.reljoin(self.root, f, *insidef)
892 return self.vfs.reljoin(self.root, f, *insidef)
893
893
894 def file(self, f):
894 def file(self, f):
895 if f[0] == '/':
895 if f[0] == '/':
896 f = f[1:]
896 f = f[1:]
897 return filelog.filelog(self.svfs, f)
897 return filelog.filelog(self.svfs, f)
898
898
899 def changectx(self, changeid):
899 def changectx(self, changeid):
900 return self[changeid]
900 return self[changeid]
901
901
902 def setparents(self, p1, p2=nullid):
902 def setparents(self, p1, p2=nullid):
903 self.dirstate.beginparentchange()
903 self.dirstate.beginparentchange()
904 copies = self.dirstate.setparents(p1, p2)
904 copies = self.dirstate.setparents(p1, p2)
905 pctx = self[p1]
905 pctx = self[p1]
906 if copies:
906 if copies:
907 # Adjust copy records, the dirstate cannot do it, it
907 # Adjust copy records, the dirstate cannot do it, it
908 # requires access to parents manifests. Preserve them
908 # requires access to parents manifests. Preserve them
909 # only for entries added to first parent.
909 # only for entries added to first parent.
910 for f in copies:
910 for f in copies:
911 if f not in pctx and copies[f] in pctx:
911 if f not in pctx and copies[f] in pctx:
912 self.dirstate.copy(copies[f], f)
912 self.dirstate.copy(copies[f], f)
913 if p2 == nullid:
913 if p2 == nullid:
914 for f, s in sorted(self.dirstate.copies().items()):
914 for f, s in sorted(self.dirstate.copies().items()):
915 if f not in pctx and s not in pctx:
915 if f not in pctx and s not in pctx:
916 self.dirstate.copy(None, f)
916 self.dirstate.copy(None, f)
917 self.dirstate.endparentchange()
917 self.dirstate.endparentchange()
918
918
919 def filectx(self, path, changeid=None, fileid=None):
919 def filectx(self, path, changeid=None, fileid=None):
920 """changeid can be a changeset revision, node, or tag.
920 """changeid can be a changeset revision, node, or tag.
921 fileid can be a file revision or node."""
921 fileid can be a file revision or node."""
922 return context.filectx(self, path, changeid, fileid)
922 return context.filectx(self, path, changeid, fileid)
923
923
924 def getcwd(self):
924 def getcwd(self):
925 return self.dirstate.getcwd()
925 return self.dirstate.getcwd()
926
926
927 def pathto(self, f, cwd=None):
927 def pathto(self, f, cwd=None):
928 return self.dirstate.pathto(f, cwd)
928 return self.dirstate.pathto(f, cwd)
929
929
930 def wfile(self, f, mode='r'):
930 def wfile(self, f, mode='r'):
931 return self.wvfs(f, mode)
931 return self.wvfs(f, mode)
932
932
933 def _link(self, f):
933 def _link(self, f):
934 return self.wvfs.islink(f)
934 return self.wvfs.islink(f)
935
935
936 def _loadfilter(self, filter):
936 def _loadfilter(self, filter):
937 if filter not in self.filterpats:
937 if filter not in self.filterpats:
938 l = []
938 l = []
939 for pat, cmd in self.ui.configitems(filter):
939 for pat, cmd in self.ui.configitems(filter):
940 if cmd == '!':
940 if cmd == '!':
941 continue
941 continue
942 mf = matchmod.match(self.root, '', [pat])
942 mf = matchmod.match(self.root, '', [pat])
943 fn = None
943 fn = None
944 params = cmd
944 params = cmd
945 for name, filterfn in self._datafilters.iteritems():
945 for name, filterfn in self._datafilters.iteritems():
946 if cmd.startswith(name):
946 if cmd.startswith(name):
947 fn = filterfn
947 fn = filterfn
948 params = cmd[len(name):].lstrip()
948 params = cmd[len(name):].lstrip()
949 break
949 break
950 if not fn:
950 if not fn:
951 fn = lambda s, c, **kwargs: util.filter(s, c)
951 fn = lambda s, c, **kwargs: util.filter(s, c)
952 # Wrap old filters not supporting keyword arguments
952 # Wrap old filters not supporting keyword arguments
953 if not inspect.getargspec(fn)[2]:
953 if not inspect.getargspec(fn)[2]:
954 oldfn = fn
954 oldfn = fn
955 fn = lambda s, c, **kwargs: oldfn(s, c)
955 fn = lambda s, c, **kwargs: oldfn(s, c)
956 l.append((mf, fn, params))
956 l.append((mf, fn, params))
957 self.filterpats[filter] = l
957 self.filterpats[filter] = l
958 return self.filterpats[filter]
958 return self.filterpats[filter]
959
959
960 def _filter(self, filterpats, filename, data):
960 def _filter(self, filterpats, filename, data):
961 for mf, fn, cmd in filterpats:
961 for mf, fn, cmd in filterpats:
962 if mf(filename):
962 if mf(filename):
963 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
963 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
964 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
964 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
965 break
965 break
966
966
967 return data
967 return data
968
968
969 @unfilteredpropertycache
969 @unfilteredpropertycache
970 def _encodefilterpats(self):
970 def _encodefilterpats(self):
971 return self._loadfilter('encode')
971 return self._loadfilter('encode')
972
972
973 @unfilteredpropertycache
973 @unfilteredpropertycache
974 def _decodefilterpats(self):
974 def _decodefilterpats(self):
975 return self._loadfilter('decode')
975 return self._loadfilter('decode')
976
976
977 def adddatafilter(self, name, filter):
977 def adddatafilter(self, name, filter):
978 self._datafilters[name] = filter
978 self._datafilters[name] = filter
979
979
980 def wread(self, filename):
980 def wread(self, filename):
981 if self._link(filename):
981 if self._link(filename):
982 data = self.wvfs.readlink(filename)
982 data = self.wvfs.readlink(filename)
983 else:
983 else:
984 data = self.wvfs.read(filename)
984 data = self.wvfs.read(filename)
985 return self._filter(self._encodefilterpats, filename, data)
985 return self._filter(self._encodefilterpats, filename, data)
986
986
987 def wwrite(self, filename, data, flags, backgroundclose=False):
987 def wwrite(self, filename, data, flags, backgroundclose=False):
988 """write ``data`` into ``filename`` in the working directory
988 """write ``data`` into ``filename`` in the working directory
989
989
990 This returns length of written (maybe decoded) data.
990 This returns length of written (maybe decoded) data.
991 """
991 """
992 data = self._filter(self._decodefilterpats, filename, data)
992 data = self._filter(self._decodefilterpats, filename, data)
993 if 'l' in flags:
993 if 'l' in flags:
994 self.wvfs.symlink(data, filename)
994 self.wvfs.symlink(data, filename)
995 else:
995 else:
996 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
996 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
997 if 'x' in flags:
997 if 'x' in flags:
998 self.wvfs.setflags(filename, False, True)
998 self.wvfs.setflags(filename, False, True)
999 return len(data)
999 return len(data)
1000
1000
1001 def wwritedata(self, filename, data):
1001 def wwritedata(self, filename, data):
1002 return self._filter(self._decodefilterpats, filename, data)
1002 return self._filter(self._decodefilterpats, filename, data)
1003
1003
1004 def currenttransaction(self):
1004 def currenttransaction(self):
1005 """return the current transaction or None if non exists"""
1005 """return the current transaction or None if non exists"""
1006 if self._transref:
1006 if self._transref:
1007 tr = self._transref()
1007 tr = self._transref()
1008 else:
1008 else:
1009 tr = None
1009 tr = None
1010
1010
1011 if tr and tr.running():
1011 if tr and tr.running():
1012 return tr
1012 return tr
1013 return None
1013 return None
1014
1014
1015 def transaction(self, desc, report=None):
1015 def transaction(self, desc, report=None):
1016 if (self.ui.configbool('devel', 'all-warnings')
1016 if (self.ui.configbool('devel', 'all-warnings')
1017 or self.ui.configbool('devel', 'check-locks')):
1017 or self.ui.configbool('devel', 'check-locks')):
1018 if self._currentlock(self._lockref) is None:
1018 if self._currentlock(self._lockref) is None:
1019 raise RuntimeError('programming error: transaction requires '
1019 raise RuntimeError('programming error: transaction requires '
1020 'locking')
1020 'locking')
1021 tr = self.currenttransaction()
1021 tr = self.currenttransaction()
1022 if tr is not None:
1022 if tr is not None:
1023 return tr.nest()
1023 return tr.nest()
1024
1024
1025 # abort here if the journal already exists
1025 # abort here if the journal already exists
1026 if self.svfs.exists("journal"):
1026 if self.svfs.exists("journal"):
1027 raise error.RepoError(
1027 raise error.RepoError(
1028 _("abandoned transaction found"),
1028 _("abandoned transaction found"),
1029 hint=_("run 'hg recover' to clean up transaction"))
1029 hint=_("run 'hg recover' to clean up transaction"))
1030
1030
1031 idbase = "%.40f#%f" % (random.random(), time.time())
1031 idbase = "%.40f#%f" % (random.random(), time.time())
1032 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1032 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1033 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1033 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1034
1034
1035 self._writejournal(desc)
1035 self._writejournal(desc)
1036 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1036 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1037 if report:
1037 if report:
1038 rp = report
1038 rp = report
1039 else:
1039 else:
1040 rp = self.ui.warn
1040 rp = self.ui.warn
1041 vfsmap = {'plain': self.vfs} # root of .hg/
1041 vfsmap = {'plain': self.vfs} # root of .hg/
1042 # we must avoid cyclic reference between repo and transaction.
1042 # we must avoid cyclic reference between repo and transaction.
1043 reporef = weakref.ref(self)
1043 reporef = weakref.ref(self)
1044 def validate(tr):
1044 def validate(tr):
1045 """will run pre-closing hooks"""
1045 """will run pre-closing hooks"""
1046 reporef().hook('pretxnclose', throw=True,
1046 reporef().hook('pretxnclose', throw=True,
1047 txnname=desc, **tr.hookargs)
1047 txnname=desc, **tr.hookargs)
1048 def releasefn(tr, success):
1048 def releasefn(tr, success):
1049 repo = reporef()
1049 repo = reporef()
1050 if success:
1050 if success:
1051 # this should be explicitly invoked here, because
1051 # this should be explicitly invoked here, because
1052 # in-memory changes aren't written out at closing
1052 # in-memory changes aren't written out at closing
1053 # transaction, if tr.addfilegenerator (via
1053 # transaction, if tr.addfilegenerator (via
1054 # dirstate.write or so) isn't invoked while
1054 # dirstate.write or so) isn't invoked while
1055 # transaction running
1055 # transaction running
1056 repo.dirstate.write(None)
1056 repo.dirstate.write(None)
1057 else:
1057 else:
1058 # discard all changes (including ones already written
1058 # discard all changes (including ones already written
1059 # out) in this transaction
1059 # out) in this transaction
1060 repo.dirstate.restorebackup(None, prefix='journal.')
1060 repo.dirstate.restorebackup(None, prefix='journal.')
1061
1061
1062 repo.invalidate(clearfilecache=True)
1062 repo.invalidate(clearfilecache=True)
1063
1063
1064 tr = transaction.transaction(rp, self.svfs, vfsmap,
1064 tr = transaction.transaction(rp, self.svfs, vfsmap,
1065 "journal",
1065 "journal",
1066 "undo",
1066 "undo",
1067 aftertrans(renames),
1067 aftertrans(renames),
1068 self.store.createmode,
1068 self.store.createmode,
1069 validator=validate,
1069 validator=validate,
1070 releasefn=releasefn)
1070 releasefn=releasefn)
1071
1071
1072 tr.hookargs['txnid'] = txnid
1072 tr.hookargs['txnid'] = txnid
1073 # note: writing the fncache only during finalize mean that the file is
1073 # note: writing the fncache only during finalize mean that the file is
1074 # outdated when running hooks. As fncache is used for streaming clone,
1074 # outdated when running hooks. As fncache is used for streaming clone,
1075 # this is not expected to break anything that happen during the hooks.
1075 # this is not expected to break anything that happen during the hooks.
1076 tr.addfinalize('flush-fncache', self.store.write)
1076 tr.addfinalize('flush-fncache', self.store.write)
1077 def txnclosehook(tr2):
1077 def txnclosehook(tr2):
1078 """To be run if transaction is successful, will schedule a hook run
1078 """To be run if transaction is successful, will schedule a hook run
1079 """
1079 """
1080 # Don't reference tr2 in hook() so we don't hold a reference.
1080 # Don't reference tr2 in hook() so we don't hold a reference.
1081 # This reduces memory consumption when there are multiple
1081 # This reduces memory consumption when there are multiple
1082 # transactions per lock. This can likely go away if issue5045
1082 # transactions per lock. This can likely go away if issue5045
1083 # fixes the function accumulation.
1083 # fixes the function accumulation.
1084 hookargs = tr2.hookargs
1084 hookargs = tr2.hookargs
1085
1085
1086 def hook():
1086 def hook():
1087 reporef().hook('txnclose', throw=False, txnname=desc,
1087 reporef().hook('txnclose', throw=False, txnname=desc,
1088 **hookargs)
1088 **hookargs)
1089 reporef()._afterlock(hook)
1089 reporef()._afterlock(hook)
1090 tr.addfinalize('txnclose-hook', txnclosehook)
1090 tr.addfinalize('txnclose-hook', txnclosehook)
1091 def txnaborthook(tr2):
1091 def txnaborthook(tr2):
1092 """To be run if transaction is aborted
1092 """To be run if transaction is aborted
1093 """
1093 """
1094 reporef().hook('txnabort', throw=False, txnname=desc,
1094 reporef().hook('txnabort', throw=False, txnname=desc,
1095 **tr2.hookargs)
1095 **tr2.hookargs)
1096 tr.addabort('txnabort-hook', txnaborthook)
1096 tr.addabort('txnabort-hook', txnaborthook)
1097 # avoid eager cache invalidation. in-memory data should be identical
1097 # avoid eager cache invalidation. in-memory data should be identical
1098 # to stored data if transaction has no error.
1098 # to stored data if transaction has no error.
1099 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1099 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1100 self._transref = weakref.ref(tr)
1100 self._transref = weakref.ref(tr)
1101 return tr
1101 return tr
1102
1102
1103 def _journalfiles(self):
1103 def _journalfiles(self):
1104 return ((self.svfs, 'journal'),
1104 return ((self.svfs, 'journal'),
1105 (self.vfs, 'journal.dirstate'),
1105 (self.vfs, 'journal.dirstate'),
1106 (self.vfs, 'journal.branch'),
1106 (self.vfs, 'journal.branch'),
1107 (self.vfs, 'journal.desc'),
1107 (self.vfs, 'journal.desc'),
1108 (self.vfs, 'journal.bookmarks'),
1108 (self.vfs, 'journal.bookmarks'),
1109 (self.svfs, 'journal.phaseroots'))
1109 (self.svfs, 'journal.phaseroots'))
1110
1110
1111 def undofiles(self):
1111 def undofiles(self):
1112 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1112 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1113
1113
1114 def _writejournal(self, desc):
1114 def _writejournal(self, desc):
1115 self.dirstate.savebackup(None, prefix='journal.')
1115 self.dirstate.savebackup(None, prefix='journal.')
1116 self.vfs.write("journal.branch",
1116 self.vfs.write("journal.branch",
1117 encoding.fromlocal(self.dirstate.branch()))
1117 encoding.fromlocal(self.dirstate.branch()))
1118 self.vfs.write("journal.desc",
1118 self.vfs.write("journal.desc",
1119 "%d\n%s\n" % (len(self), desc))
1119 "%d\n%s\n" % (len(self), desc))
1120 self.vfs.write("journal.bookmarks",
1120 self.vfs.write("journal.bookmarks",
1121 self.vfs.tryread("bookmarks"))
1121 self.vfs.tryread("bookmarks"))
1122 self.svfs.write("journal.phaseroots",
1122 self.svfs.write("journal.phaseroots",
1123 self.svfs.tryread("phaseroots"))
1123 self.svfs.tryread("phaseroots"))
1124
1124
1125 def recover(self):
1125 def recover(self):
1126 with self.lock():
1126 with self.lock():
1127 if self.svfs.exists("journal"):
1127 if self.svfs.exists("journal"):
1128 self.ui.status(_("rolling back interrupted transaction\n"))
1128 self.ui.status(_("rolling back interrupted transaction\n"))
1129 vfsmap = {'': self.svfs,
1129 vfsmap = {'': self.svfs,
1130 'plain': self.vfs,}
1130 'plain': self.vfs,}
1131 transaction.rollback(self.svfs, vfsmap, "journal",
1131 transaction.rollback(self.svfs, vfsmap, "journal",
1132 self.ui.warn)
1132 self.ui.warn)
1133 self.invalidate()
1133 self.invalidate()
1134 return True
1134 return True
1135 else:
1135 else:
1136 self.ui.warn(_("no interrupted transaction available\n"))
1136 self.ui.warn(_("no interrupted transaction available\n"))
1137 return False
1137 return False
1138
1138
1139 def rollback(self, dryrun=False, force=False):
1139 def rollback(self, dryrun=False, force=False):
1140 wlock = lock = dsguard = None
1140 wlock = lock = dsguard = None
1141 try:
1141 try:
1142 wlock = self.wlock()
1142 wlock = self.wlock()
1143 lock = self.lock()
1143 lock = self.lock()
1144 if self.svfs.exists("undo"):
1144 if self.svfs.exists("undo"):
1145 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1145 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1146
1146
1147 return self._rollback(dryrun, force, dsguard)
1147 return self._rollback(dryrun, force, dsguard)
1148 else:
1148 else:
1149 self.ui.warn(_("no rollback information available\n"))
1149 self.ui.warn(_("no rollback information available\n"))
1150 return 1
1150 return 1
1151 finally:
1151 finally:
1152 release(dsguard, lock, wlock)
1152 release(dsguard, lock, wlock)
1153
1153
1154 @unfilteredmethod # Until we get smarter cache management
1154 @unfilteredmethod # Until we get smarter cache management
1155 def _rollback(self, dryrun, force, dsguard):
1155 def _rollback(self, dryrun, force, dsguard):
1156 ui = self.ui
1156 ui = self.ui
1157 try:
1157 try:
1158 args = self.vfs.read('undo.desc').splitlines()
1158 args = self.vfs.read('undo.desc').splitlines()
1159 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1159 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1160 if len(args) >= 3:
1160 if len(args) >= 3:
1161 detail = args[2]
1161 detail = args[2]
1162 oldtip = oldlen - 1
1162 oldtip = oldlen - 1
1163
1163
1164 if detail and ui.verbose:
1164 if detail and ui.verbose:
1165 msg = (_('repository tip rolled back to revision %s'
1165 msg = (_('repository tip rolled back to revision %s'
1166 ' (undo %s: %s)\n')
1166 ' (undo %s: %s)\n')
1167 % (oldtip, desc, detail))
1167 % (oldtip, desc, detail))
1168 else:
1168 else:
1169 msg = (_('repository tip rolled back to revision %s'
1169 msg = (_('repository tip rolled back to revision %s'
1170 ' (undo %s)\n')
1170 ' (undo %s)\n')
1171 % (oldtip, desc))
1171 % (oldtip, desc))
1172 except IOError:
1172 except IOError:
1173 msg = _('rolling back unknown transaction\n')
1173 msg = _('rolling back unknown transaction\n')
1174 desc = None
1174 desc = None
1175
1175
1176 if not force and self['.'] != self['tip'] and desc == 'commit':
1176 if not force and self['.'] != self['tip'] and desc == 'commit':
1177 raise error.Abort(
1177 raise error.Abort(
1178 _('rollback of last commit while not checked out '
1178 _('rollback of last commit while not checked out '
1179 'may lose data'), hint=_('use -f to force'))
1179 'may lose data'), hint=_('use -f to force'))
1180
1180
1181 ui.status(msg)
1181 ui.status(msg)
1182 if dryrun:
1182 if dryrun:
1183 return 0
1183 return 0
1184
1184
1185 parents = self.dirstate.parents()
1185 parents = self.dirstate.parents()
1186 self.destroying()
1186 self.destroying()
1187 vfsmap = {'plain': self.vfs, '': self.svfs}
1187 vfsmap = {'plain': self.vfs, '': self.svfs}
1188 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1188 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1189 if self.vfs.exists('undo.bookmarks'):
1189 if self.vfs.exists('undo.bookmarks'):
1190 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1190 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1191 if self.svfs.exists('undo.phaseroots'):
1191 if self.svfs.exists('undo.phaseroots'):
1192 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1192 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1193 self.invalidate()
1193 self.invalidate()
1194
1194
1195 parentgone = (parents[0] not in self.changelog.nodemap or
1195 parentgone = (parents[0] not in self.changelog.nodemap or
1196 parents[1] not in self.changelog.nodemap)
1196 parents[1] not in self.changelog.nodemap)
1197 if parentgone:
1197 if parentgone:
1198 # prevent dirstateguard from overwriting already restored one
1198 # prevent dirstateguard from overwriting already restored one
1199 dsguard.close()
1199 dsguard.close()
1200
1200
1201 self.dirstate.restorebackup(None, prefix='undo.')
1201 self.dirstate.restorebackup(None, prefix='undo.')
1202 try:
1202 try:
1203 branch = self.vfs.read('undo.branch')
1203 branch = self.vfs.read('undo.branch')
1204 self.dirstate.setbranch(encoding.tolocal(branch))
1204 self.dirstate.setbranch(encoding.tolocal(branch))
1205 except IOError:
1205 except IOError:
1206 ui.warn(_('named branch could not be reset: '
1206 ui.warn(_('named branch could not be reset: '
1207 'current branch is still \'%s\'\n')
1207 'current branch is still \'%s\'\n')
1208 % self.dirstate.branch())
1208 % self.dirstate.branch())
1209
1209
1210 parents = tuple([p.rev() for p in self[None].parents()])
1210 parents = tuple([p.rev() for p in self[None].parents()])
1211 if len(parents) > 1:
1211 if len(parents) > 1:
1212 ui.status(_('working directory now based on '
1212 ui.status(_('working directory now based on '
1213 'revisions %d and %d\n') % parents)
1213 'revisions %d and %d\n') % parents)
1214 else:
1214 else:
1215 ui.status(_('working directory now based on '
1215 ui.status(_('working directory now based on '
1216 'revision %d\n') % parents)
1216 'revision %d\n') % parents)
1217 mergemod.mergestate.clean(self, self['.'].node())
1217 mergemod.mergestate.clean(self, self['.'].node())
1218
1218
1219 # TODO: if we know which new heads may result from this rollback, pass
1219 # TODO: if we know which new heads may result from this rollback, pass
1220 # them to destroy(), which will prevent the branchhead cache from being
1220 # them to destroy(), which will prevent the branchhead cache from being
1221 # invalidated.
1221 # invalidated.
1222 self.destroyed()
1222 self.destroyed()
1223 return 0
1223 return 0
1224
1224
1225 def invalidatecaches(self):
1225 def invalidatecaches(self):
1226
1226
1227 if '_tagscache' in vars(self):
1227 if '_tagscache' in vars(self):
1228 # can't use delattr on proxy
1228 # can't use delattr on proxy
1229 del self.__dict__['_tagscache']
1229 del self.__dict__['_tagscache']
1230
1230
1231 self.unfiltered()._branchcaches.clear()
1231 self.unfiltered()._branchcaches.clear()
1232 self.invalidatevolatilesets()
1232 self.invalidatevolatilesets()
1233
1233
1234 def invalidatevolatilesets(self):
1234 def invalidatevolatilesets(self):
1235 self.filteredrevcache.clear()
1235 self.filteredrevcache.clear()
1236 obsolete.clearobscaches(self)
1236 obsolete.clearobscaches(self)
1237
1237
1238 def invalidatedirstate(self):
1238 def invalidatedirstate(self):
1239 '''Invalidates the dirstate, causing the next call to dirstate
1239 '''Invalidates the dirstate, causing the next call to dirstate
1240 to check if it was modified since the last time it was read,
1240 to check if it was modified since the last time it was read,
1241 rereading it if it has.
1241 rereading it if it has.
1242
1242
1243 This is different to dirstate.invalidate() that it doesn't always
1243 This is different to dirstate.invalidate() that it doesn't always
1244 rereads the dirstate. Use dirstate.invalidate() if you want to
1244 rereads the dirstate. Use dirstate.invalidate() if you want to
1245 explicitly read the dirstate again (i.e. restoring it to a previous
1245 explicitly read the dirstate again (i.e. restoring it to a previous
1246 known good state).'''
1246 known good state).'''
1247 if hasunfilteredcache(self, 'dirstate'):
1247 if hasunfilteredcache(self, 'dirstate'):
1248 for k in self.dirstate._filecache:
1248 for k in self.dirstate._filecache:
1249 try:
1249 try:
1250 delattr(self.dirstate, k)
1250 delattr(self.dirstate, k)
1251 except AttributeError:
1251 except AttributeError:
1252 pass
1252 pass
1253 delattr(self.unfiltered(), 'dirstate')
1253 delattr(self.unfiltered(), 'dirstate')
1254
1254
1255 def invalidate(self, clearfilecache=False):
1255 def invalidate(self, clearfilecache=False):
1256 '''Invalidates both store and non-store parts other than dirstate
1256 '''Invalidates both store and non-store parts other than dirstate
1257
1257
1258 If a transaction is running, invalidation of store is omitted,
1258 If a transaction is running, invalidation of store is omitted,
1259 because discarding in-memory changes might cause inconsistency
1259 because discarding in-memory changes might cause inconsistency
1260 (e.g. incomplete fncache causes unintentional failure, but
1260 (e.g. incomplete fncache causes unintentional failure, but
1261 redundant one doesn't).
1261 redundant one doesn't).
1262 '''
1262 '''
1263 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1263 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1264 for k in self._filecache.keys():
1264 for k in self._filecache.keys():
1265 # dirstate is invalidated separately in invalidatedirstate()
1265 # dirstate is invalidated separately in invalidatedirstate()
1266 if k == 'dirstate':
1266 if k == 'dirstate':
1267 continue
1267 continue
1268
1268
1269 if clearfilecache:
1269 if clearfilecache:
1270 del self._filecache[k]
1270 del self._filecache[k]
1271 try:
1271 try:
1272 delattr(unfiltered, k)
1272 delattr(unfiltered, k)
1273 except AttributeError:
1273 except AttributeError:
1274 pass
1274 pass
1275 self.invalidatecaches()
1275 self.invalidatecaches()
1276 if not self.currenttransaction():
1276 if not self.currenttransaction():
1277 # TODO: Changing contents of store outside transaction
1277 # TODO: Changing contents of store outside transaction
1278 # causes inconsistency. We should make in-memory store
1278 # causes inconsistency. We should make in-memory store
1279 # changes detectable, and abort if changed.
1279 # changes detectable, and abort if changed.
1280 self.store.invalidatecaches()
1280 self.store.invalidatecaches()
1281
1281
1282 def invalidateall(self):
1282 def invalidateall(self):
1283 '''Fully invalidates both store and non-store parts, causing the
1283 '''Fully invalidates both store and non-store parts, causing the
1284 subsequent operation to reread any outside changes.'''
1284 subsequent operation to reread any outside changes.'''
1285 # extension should hook this to invalidate its caches
1285 # extension should hook this to invalidate its caches
1286 self.invalidate()
1286 self.invalidate()
1287 self.invalidatedirstate()
1287 self.invalidatedirstate()
1288
1288
1289 @unfilteredmethod
1289 @unfilteredmethod
1290 def _refreshfilecachestats(self, tr):
1290 def _refreshfilecachestats(self, tr):
1291 """Reload stats of cached files so that they are flagged as valid"""
1291 """Reload stats of cached files so that they are flagged as valid"""
1292 for k, ce in self._filecache.items():
1292 for k, ce in self._filecache.items():
1293 if k == 'dirstate' or k not in self.__dict__:
1293 if k == 'dirstate' or k not in self.__dict__:
1294 continue
1294 continue
1295 ce.refresh()
1295 ce.refresh()
1296
1296
1297 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1297 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1298 inheritchecker=None, parentenvvar=None):
1298 inheritchecker=None, parentenvvar=None):
1299 parentlock = None
1299 parentlock = None
1300 # the contents of parentenvvar are used by the underlying lock to
1300 # the contents of parentenvvar are used by the underlying lock to
1301 # determine whether it can be inherited
1301 # determine whether it can be inherited
1302 if parentenvvar is not None:
1302 if parentenvvar is not None:
1303 parentlock = os.environ.get(parentenvvar)
1303 parentlock = os.environ.get(parentenvvar)
1304 try:
1304 try:
1305 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1305 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1306 acquirefn=acquirefn, desc=desc,
1306 acquirefn=acquirefn, desc=desc,
1307 inheritchecker=inheritchecker,
1307 inheritchecker=inheritchecker,
1308 parentlock=parentlock)
1308 parentlock=parentlock)
1309 except error.LockHeld as inst:
1309 except error.LockHeld as inst:
1310 if not wait:
1310 if not wait:
1311 raise
1311 raise
1312 # show more details for new-style locks
1312 # show more details for new-style locks
1313 if ':' in inst.locker:
1313 if ':' in inst.locker:
1314 host, pid = inst.locker.split(":", 1)
1314 host, pid = inst.locker.split(":", 1)
1315 self.ui.warn(
1315 self.ui.warn(
1316 _("waiting for lock on %s held by process %r "
1316 _("waiting for lock on %s held by process %r "
1317 "on host %r\n") % (desc, pid, host))
1317 "on host %r\n") % (desc, pid, host))
1318 else:
1318 else:
1319 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1319 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1320 (desc, inst.locker))
1320 (desc, inst.locker))
1321 # default to 600 seconds timeout
1321 # default to 600 seconds timeout
1322 l = lockmod.lock(vfs, lockname,
1322 l = lockmod.lock(vfs, lockname,
1323 int(self.ui.config("ui", "timeout", "600")),
1323 int(self.ui.config("ui", "timeout", "600")),
1324 releasefn=releasefn, acquirefn=acquirefn,
1324 releasefn=releasefn, acquirefn=acquirefn,
1325 desc=desc)
1325 desc=desc)
1326 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1326 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1327 return l
1327 return l
1328
1328
1329 def _afterlock(self, callback):
1329 def _afterlock(self, callback):
1330 """add a callback to be run when the repository is fully unlocked
1330 """add a callback to be run when the repository is fully unlocked
1331
1331
1332 The callback will be executed when the outermost lock is released
1332 The callback will be executed when the outermost lock is released
1333 (with wlock being higher level than 'lock')."""
1333 (with wlock being higher level than 'lock')."""
1334 for ref in (self._wlockref, self._lockref):
1334 for ref in (self._wlockref, self._lockref):
1335 l = ref and ref()
1335 l = ref and ref()
1336 if l and l.held:
1336 if l and l.held:
1337 l.postrelease.append(callback)
1337 l.postrelease.append(callback)
1338 break
1338 break
1339 else: # no lock have been found.
1339 else: # no lock have been found.
1340 callback()
1340 callback()
1341
1341
1342 def lock(self, wait=True):
1342 def lock(self, wait=True):
1343 '''Lock the repository store (.hg/store) and return a weak reference
1343 '''Lock the repository store (.hg/store) and return a weak reference
1344 to the lock. Use this before modifying the store (e.g. committing or
1344 to the lock. Use this before modifying the store (e.g. committing or
1345 stripping). If you are opening a transaction, get a lock as well.)
1345 stripping). If you are opening a transaction, get a lock as well.)
1346
1346
1347 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1347 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1348 'wlock' first to avoid a dead-lock hazard.'''
1348 'wlock' first to avoid a dead-lock hazard.'''
1349 l = self._currentlock(self._lockref)
1349 l = self._currentlock(self._lockref)
1350 if l is not None:
1350 if l is not None:
1351 l.lock()
1351 l.lock()
1352 return l
1352 return l
1353
1353
1354 l = self._lock(self.svfs, "lock", wait, None,
1354 l = self._lock(self.svfs, "lock", wait, None,
1355 self.invalidate, _('repository %s') % self.origroot)
1355 self.invalidate, _('repository %s') % self.origroot)
1356 self._lockref = weakref.ref(l)
1356 self._lockref = weakref.ref(l)
1357 return l
1357 return l
1358
1358
1359 def _wlockchecktransaction(self):
1359 def _wlockchecktransaction(self):
1360 if self.currenttransaction() is not None:
1360 if self.currenttransaction() is not None:
1361 raise error.LockInheritanceContractViolation(
1361 raise error.LockInheritanceContractViolation(
1362 'wlock cannot be inherited in the middle of a transaction')
1362 'wlock cannot be inherited in the middle of a transaction')
1363
1363
1364 def wlock(self, wait=True):
1364 def wlock(self, wait=True):
1365 '''Lock the non-store parts of the repository (everything under
1365 '''Lock the non-store parts of the repository (everything under
1366 .hg except .hg/store) and return a weak reference to the lock.
1366 .hg except .hg/store) and return a weak reference to the lock.
1367
1367
1368 Use this before modifying files in .hg.
1368 Use this before modifying files in .hg.
1369
1369
1370 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1370 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1371 'wlock' first to avoid a dead-lock hazard.'''
1371 'wlock' first to avoid a dead-lock hazard.'''
1372 l = self._wlockref and self._wlockref()
1372 l = self._wlockref and self._wlockref()
1373 if l is not None and l.held:
1373 if l is not None and l.held:
1374 l.lock()
1374 l.lock()
1375 return l
1375 return l
1376
1376
1377 # We do not need to check for non-waiting lock acquisition. Such
1377 # We do not need to check for non-waiting lock acquisition. Such
1378 # acquisition would not cause dead-lock as they would just fail.
1378 # acquisition would not cause dead-lock as they would just fail.
1379 if wait and (self.ui.configbool('devel', 'all-warnings')
1379 if wait and (self.ui.configbool('devel', 'all-warnings')
1380 or self.ui.configbool('devel', 'check-locks')):
1380 or self.ui.configbool('devel', 'check-locks')):
1381 if self._currentlock(self._lockref) is not None:
1381 if self._currentlock(self._lockref) is not None:
1382 self.ui.develwarn('"wlock" acquired after "lock"')
1382 self.ui.develwarn('"wlock" acquired after "lock"')
1383
1383
1384 def unlock():
1384 def unlock():
1385 if self.dirstate.pendingparentchange():
1385 if self.dirstate.pendingparentchange():
1386 self.dirstate.invalidate()
1386 self.dirstate.invalidate()
1387 else:
1387 else:
1388 self.dirstate.write(None)
1388 self.dirstate.write(None)
1389
1389
1390 self._filecache['dirstate'].refresh()
1390 self._filecache['dirstate'].refresh()
1391
1391
1392 l = self._lock(self.vfs, "wlock", wait, unlock,
1392 l = self._lock(self.vfs, "wlock", wait, unlock,
1393 self.invalidatedirstate, _('working directory of %s') %
1393 self.invalidatedirstate, _('working directory of %s') %
1394 self.origroot,
1394 self.origroot,
1395 inheritchecker=self._wlockchecktransaction,
1395 inheritchecker=self._wlockchecktransaction,
1396 parentenvvar='HG_WLOCK_LOCKER')
1396 parentenvvar='HG_WLOCK_LOCKER')
1397 self._wlockref = weakref.ref(l)
1397 self._wlockref = weakref.ref(l)
1398 return l
1398 return l
1399
1399
1400 def _currentlock(self, lockref):
1400 def _currentlock(self, lockref):
1401 """Returns the lock if it's held, or None if it's not."""
1401 """Returns the lock if it's held, or None if it's not."""
1402 if lockref is None:
1402 if lockref is None:
1403 return None
1403 return None
1404 l = lockref()
1404 l = lockref()
1405 if l is None or not l.held:
1405 if l is None or not l.held:
1406 return None
1406 return None
1407 return l
1407 return l
1408
1408
1409 def currentwlock(self):
1409 def currentwlock(self):
1410 """Returns the wlock if it's held, or None if it's not."""
1410 """Returns the wlock if it's held, or None if it's not."""
1411 return self._currentlock(self._wlockref)
1411 return self._currentlock(self._wlockref)
1412
1412
1413 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1413 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1414 """
1414 """
1415 commit an individual file as part of a larger transaction
1415 commit an individual file as part of a larger transaction
1416 """
1416 """
1417
1417
1418 fname = fctx.path()
1418 fname = fctx.path()
1419 fparent1 = manifest1.get(fname, nullid)
1419 fparent1 = manifest1.get(fname, nullid)
1420 fparent2 = manifest2.get(fname, nullid)
1420 fparent2 = manifest2.get(fname, nullid)
1421 if isinstance(fctx, context.filectx):
1421 if isinstance(fctx, context.filectx):
1422 node = fctx.filenode()
1422 node = fctx.filenode()
1423 if node in [fparent1, fparent2]:
1423 if node in [fparent1, fparent2]:
1424 self.ui.debug('reusing %s filelog entry\n' % fname)
1424 self.ui.debug('reusing %s filelog entry\n' % fname)
1425 if manifest1.flags(fname) != fctx.flags():
1425 if manifest1.flags(fname) != fctx.flags():
1426 changelist.append(fname)
1426 changelist.append(fname)
1427 return node
1427 return node
1428
1428
1429 flog = self.file(fname)
1429 flog = self.file(fname)
1430 meta = {}
1430 meta = {}
1431 copy = fctx.renamed()
1431 copy = fctx.renamed()
1432 if copy and copy[0] != fname:
1432 if copy and copy[0] != fname:
1433 # Mark the new revision of this file as a copy of another
1433 # Mark the new revision of this file as a copy of another
1434 # file. This copy data will effectively act as a parent
1434 # file. This copy data will effectively act as a parent
1435 # of this new revision. If this is a merge, the first
1435 # of this new revision. If this is a merge, the first
1436 # parent will be the nullid (meaning "look up the copy data")
1436 # parent will be the nullid (meaning "look up the copy data")
1437 # and the second one will be the other parent. For example:
1437 # and the second one will be the other parent. For example:
1438 #
1438 #
1439 # 0 --- 1 --- 3 rev1 changes file foo
1439 # 0 --- 1 --- 3 rev1 changes file foo
1440 # \ / rev2 renames foo to bar and changes it
1440 # \ / rev2 renames foo to bar and changes it
1441 # \- 2 -/ rev3 should have bar with all changes and
1441 # \- 2 -/ rev3 should have bar with all changes and
1442 # should record that bar descends from
1442 # should record that bar descends from
1443 # bar in rev2 and foo in rev1
1443 # bar in rev2 and foo in rev1
1444 #
1444 #
1445 # this allows this merge to succeed:
1445 # this allows this merge to succeed:
1446 #
1446 #
1447 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1447 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1448 # \ / merging rev3 and rev4 should use bar@rev2
1448 # \ / merging rev3 and rev4 should use bar@rev2
1449 # \- 2 --- 4 as the merge base
1449 # \- 2 --- 4 as the merge base
1450 #
1450 #
1451
1451
1452 cfname = copy[0]
1452 cfname = copy[0]
1453 crev = manifest1.get(cfname)
1453 crev = manifest1.get(cfname)
1454 newfparent = fparent2
1454 newfparent = fparent2
1455
1455
1456 if manifest2: # branch merge
1456 if manifest2: # branch merge
1457 if fparent2 == nullid or crev is None: # copied on remote side
1457 if fparent2 == nullid or crev is None: # copied on remote side
1458 if cfname in manifest2:
1458 if cfname in manifest2:
1459 crev = manifest2[cfname]
1459 crev = manifest2[cfname]
1460 newfparent = fparent1
1460 newfparent = fparent1
1461
1461
1462 # Here, we used to search backwards through history to try to find
1462 # Here, we used to search backwards through history to try to find
1463 # where the file copy came from if the source of a copy was not in
1463 # where the file copy came from if the source of a copy was not in
1464 # the parent directory. However, this doesn't actually make sense to
1464 # the parent directory. However, this doesn't actually make sense to
1465 # do (what does a copy from something not in your working copy even
1465 # do (what does a copy from something not in your working copy even
1466 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1466 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1467 # the user that copy information was dropped, so if they didn't
1467 # the user that copy information was dropped, so if they didn't
1468 # expect this outcome it can be fixed, but this is the correct
1468 # expect this outcome it can be fixed, but this is the correct
1469 # behavior in this circumstance.
1469 # behavior in this circumstance.
1470
1470
1471 if crev:
1471 if crev:
1472 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1472 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1473 meta["copy"] = cfname
1473 meta["copy"] = cfname
1474 meta["copyrev"] = hex(crev)
1474 meta["copyrev"] = hex(crev)
1475 fparent1, fparent2 = nullid, newfparent
1475 fparent1, fparent2 = nullid, newfparent
1476 else:
1476 else:
1477 self.ui.warn(_("warning: can't find ancestor for '%s' "
1477 self.ui.warn(_("warning: can't find ancestor for '%s' "
1478 "copied from '%s'!\n") % (fname, cfname))
1478 "copied from '%s'!\n") % (fname, cfname))
1479
1479
1480 elif fparent1 == nullid:
1480 elif fparent1 == nullid:
1481 fparent1, fparent2 = fparent2, nullid
1481 fparent1, fparent2 = fparent2, nullid
1482 elif fparent2 != nullid:
1482 elif fparent2 != nullid:
1483 # is one parent an ancestor of the other?
1483 # is one parent an ancestor of the other?
1484 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1484 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1485 if fparent1 in fparentancestors:
1485 if fparent1 in fparentancestors:
1486 fparent1, fparent2 = fparent2, nullid
1486 fparent1, fparent2 = fparent2, nullid
1487 elif fparent2 in fparentancestors:
1487 elif fparent2 in fparentancestors:
1488 fparent2 = nullid
1488 fparent2 = nullid
1489
1489
1490 # is the file changed?
1490 # is the file changed?
1491 text = fctx.data()
1491 text = fctx.data()
1492 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1492 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1493 changelist.append(fname)
1493 changelist.append(fname)
1494 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1494 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1495 # are just the flags changed during merge?
1495 # are just the flags changed during merge?
1496 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1496 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1497 changelist.append(fname)
1497 changelist.append(fname)
1498
1498
1499 return fparent1
1499 return fparent1
1500
1500
1501 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1501 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1502 """check for commit arguments that aren't committable"""
1502 """check for commit arguments that aren't committable"""
1503 if match.isexact() or match.prefix():
1503 if match.isexact() or match.prefix():
1504 matched = set(status.modified + status.added + status.removed)
1504 matched = set(status.modified + status.added + status.removed)
1505
1505
1506 for f in match.files():
1506 for f in match.files():
1507 f = self.dirstate.normalize(f)
1507 f = self.dirstate.normalize(f)
1508 if f == '.' or f in matched or f in wctx.substate:
1508 if f == '.' or f in matched or f in wctx.substate:
1509 continue
1509 continue
1510 if f in status.deleted:
1510 if f in status.deleted:
1511 fail(f, _('file not found!'))
1511 fail(f, _('file not found!'))
1512 if f in vdirs: # visited directory
1512 if f in vdirs: # visited directory
1513 d = f + '/'
1513 d = f + '/'
1514 for mf in matched:
1514 for mf in matched:
1515 if mf.startswith(d):
1515 if mf.startswith(d):
1516 break
1516 break
1517 else:
1517 else:
1518 fail(f, _("no match under directory!"))
1518 fail(f, _("no match under directory!"))
1519 elif f not in self.dirstate:
1519 elif f not in self.dirstate:
1520 fail(f, _("file not tracked!"))
1520 fail(f, _("file not tracked!"))
1521
1521
1522 @unfilteredmethod
1522 @unfilteredmethod
1523 def commit(self, text="", user=None, date=None, match=None, force=False,
1523 def commit(self, text="", user=None, date=None, match=None, force=False,
1524 editor=False, extra=None):
1524 editor=False, extra=None):
1525 """Add a new revision to current repository.
1525 """Add a new revision to current repository.
1526
1526
1527 Revision information is gathered from the working directory,
1527 Revision information is gathered from the working directory,
1528 match can be used to filter the committed files. If editor is
1528 match can be used to filter the committed files. If editor is
1529 supplied, it is called to get a commit message.
1529 supplied, it is called to get a commit message.
1530 """
1530 """
1531 if extra is None:
1531 if extra is None:
1532 extra = {}
1532 extra = {}
1533
1533
1534 def fail(f, msg):
1534 def fail(f, msg):
1535 raise error.Abort('%s: %s' % (f, msg))
1535 raise error.Abort('%s: %s' % (f, msg))
1536
1536
1537 if not match:
1537 if not match:
1538 match = matchmod.always(self.root, '')
1538 match = matchmod.always(self.root, '')
1539
1539
1540 if not force:
1540 if not force:
1541 vdirs = []
1541 vdirs = []
1542 match.explicitdir = vdirs.append
1542 match.explicitdir = vdirs.append
1543 match.bad = fail
1543 match.bad = fail
1544
1544
1545 wlock = lock = tr = None
1545 wlock = lock = tr = None
1546 try:
1546 try:
1547 wlock = self.wlock()
1547 wlock = self.wlock()
1548 lock = self.lock() # for recent changelog (see issue4368)
1548 lock = self.lock() # for recent changelog (see issue4368)
1549
1549
1550 wctx = self[None]
1550 wctx = self[None]
1551 merge = len(wctx.parents()) > 1
1551 merge = len(wctx.parents()) > 1
1552
1552
1553 if not force and merge and match.ispartial():
1553 if not force and merge and match.ispartial():
1554 raise error.Abort(_('cannot partially commit a merge '
1554 raise error.Abort(_('cannot partially commit a merge '
1555 '(do not specify files or patterns)'))
1555 '(do not specify files or patterns)'))
1556
1556
1557 status = self.status(match=match, clean=force)
1557 status = self.status(match=match, clean=force)
1558 if force:
1558 if force:
1559 status.modified.extend(status.clean) # mq may commit clean files
1559 status.modified.extend(status.clean) # mq may commit clean files
1560
1560
1561 # check subrepos
1561 # check subrepos
1562 subs = []
1562 subs = []
1563 commitsubs = set()
1563 commitsubs = set()
1564 newstate = wctx.substate.copy()
1564 newstate = wctx.substate.copy()
1565 # only manage subrepos and .hgsubstate if .hgsub is present
1565 # only manage subrepos and .hgsubstate if .hgsub is present
1566 if '.hgsub' in wctx:
1566 if '.hgsub' in wctx:
1567 # we'll decide whether to track this ourselves, thanks
1567 # we'll decide whether to track this ourselves, thanks
1568 for c in status.modified, status.added, status.removed:
1568 for c in status.modified, status.added, status.removed:
1569 if '.hgsubstate' in c:
1569 if '.hgsubstate' in c:
1570 c.remove('.hgsubstate')
1570 c.remove('.hgsubstate')
1571
1571
1572 # compare current state to last committed state
1572 # compare current state to last committed state
1573 # build new substate based on last committed state
1573 # build new substate based on last committed state
1574 oldstate = wctx.p1().substate
1574 oldstate = wctx.p1().substate
1575 for s in sorted(newstate.keys()):
1575 for s in sorted(newstate.keys()):
1576 if not match(s):
1576 if not match(s):
1577 # ignore working copy, use old state if present
1577 # ignore working copy, use old state if present
1578 if s in oldstate:
1578 if s in oldstate:
1579 newstate[s] = oldstate[s]
1579 newstate[s] = oldstate[s]
1580 continue
1580 continue
1581 if not force:
1581 if not force:
1582 raise error.Abort(
1582 raise error.Abort(
1583 _("commit with new subrepo %s excluded") % s)
1583 _("commit with new subrepo %s excluded") % s)
1584 dirtyreason = wctx.sub(s).dirtyreason(True)
1584 dirtyreason = wctx.sub(s).dirtyreason(True)
1585 if dirtyreason:
1585 if dirtyreason:
1586 if not self.ui.configbool('ui', 'commitsubrepos'):
1586 if not self.ui.configbool('ui', 'commitsubrepos'):
1587 raise error.Abort(dirtyreason,
1587 raise error.Abort(dirtyreason,
1588 hint=_("use --subrepos for recursive commit"))
1588 hint=_("use --subrepos for recursive commit"))
1589 subs.append(s)
1589 subs.append(s)
1590 commitsubs.add(s)
1590 commitsubs.add(s)
1591 else:
1591 else:
1592 bs = wctx.sub(s).basestate()
1592 bs = wctx.sub(s).basestate()
1593 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1593 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1594 if oldstate.get(s, (None, None, None))[1] != bs:
1594 if oldstate.get(s, (None, None, None))[1] != bs:
1595 subs.append(s)
1595 subs.append(s)
1596
1596
1597 # check for removed subrepos
1597 # check for removed subrepos
1598 for p in wctx.parents():
1598 for p in wctx.parents():
1599 r = [s for s in p.substate if s not in newstate]
1599 r = [s for s in p.substate if s not in newstate]
1600 subs += [s for s in r if match(s)]
1600 subs += [s for s in r if match(s)]
1601 if subs:
1601 if subs:
1602 if (not match('.hgsub') and
1602 if (not match('.hgsub') and
1603 '.hgsub' in (wctx.modified() + wctx.added())):
1603 '.hgsub' in (wctx.modified() + wctx.added())):
1604 raise error.Abort(
1604 raise error.Abort(
1605 _("can't commit subrepos without .hgsub"))
1605 _("can't commit subrepos without .hgsub"))
1606 status.modified.insert(0, '.hgsubstate')
1606 status.modified.insert(0, '.hgsubstate')
1607
1607
1608 elif '.hgsub' in status.removed:
1608 elif '.hgsub' in status.removed:
1609 # clean up .hgsubstate when .hgsub is removed
1609 # clean up .hgsubstate when .hgsub is removed
1610 if ('.hgsubstate' in wctx and
1610 if ('.hgsubstate' in wctx and
1611 '.hgsubstate' not in (status.modified + status.added +
1611 '.hgsubstate' not in (status.modified + status.added +
1612 status.removed)):
1612 status.removed)):
1613 status.removed.insert(0, '.hgsubstate')
1613 status.removed.insert(0, '.hgsubstate')
1614
1614
1615 # make sure all explicit patterns are matched
1615 # make sure all explicit patterns are matched
1616 if not force:
1616 if not force:
1617 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1617 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1618
1618
1619 cctx = context.workingcommitctx(self, status,
1619 cctx = context.workingcommitctx(self, status,
1620 text, user, date, extra)
1620 text, user, date, extra)
1621
1621
1622 # internal config: ui.allowemptycommit
1622 # internal config: ui.allowemptycommit
1623 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1623 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1624 or extra.get('close') or merge or cctx.files()
1624 or extra.get('close') or merge or cctx.files()
1625 or self.ui.configbool('ui', 'allowemptycommit'))
1625 or self.ui.configbool('ui', 'allowemptycommit'))
1626 if not allowemptycommit:
1626 if not allowemptycommit:
1627 return None
1627 return None
1628
1628
1629 if merge and cctx.deleted():
1629 if merge and cctx.deleted():
1630 raise error.Abort(_("cannot commit merge with missing files"))
1630 raise error.Abort(_("cannot commit merge with missing files"))
1631
1631
1632 ms = mergemod.mergestate.read(self)
1632 ms = mergemod.mergestate.read(self)
1633 mergeutil.checkunresolved(ms)
1633 mergeutil.checkunresolved(ms)
1634
1634
1635 if editor:
1635 if editor:
1636 cctx._text = editor(self, cctx, subs)
1636 cctx._text = editor(self, cctx, subs)
1637 edited = (text != cctx._text)
1637 edited = (text != cctx._text)
1638
1638
1639 # Save commit message in case this transaction gets rolled back
1639 # Save commit message in case this transaction gets rolled back
1640 # (e.g. by a pretxncommit hook). Leave the content alone on
1640 # (e.g. by a pretxncommit hook). Leave the content alone on
1641 # the assumption that the user will use the same editor again.
1641 # the assumption that the user will use the same editor again.
1642 msgfn = self.savecommitmessage(cctx._text)
1642 msgfn = self.savecommitmessage(cctx._text)
1643
1643
1644 # commit subs and write new state
1644 # commit subs and write new state
1645 if subs:
1645 if subs:
1646 for s in sorted(commitsubs):
1646 for s in sorted(commitsubs):
1647 sub = wctx.sub(s)
1647 sub = wctx.sub(s)
1648 self.ui.status(_('committing subrepository %s\n') %
1648 self.ui.status(_('committing subrepository %s\n') %
1649 subrepo.subrelpath(sub))
1649 subrepo.subrelpath(sub))
1650 sr = sub.commit(cctx._text, user, date)
1650 sr = sub.commit(cctx._text, user, date)
1651 newstate[s] = (newstate[s][0], sr)
1651 newstate[s] = (newstate[s][0], sr)
1652 subrepo.writestate(self, newstate)
1652 subrepo.writestate(self, newstate)
1653
1653
1654 p1, p2 = self.dirstate.parents()
1654 p1, p2 = self.dirstate.parents()
1655 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1655 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1656 try:
1656 try:
1657 self.hook("precommit", throw=True, parent1=hookp1,
1657 self.hook("precommit", throw=True, parent1=hookp1,
1658 parent2=hookp2)
1658 parent2=hookp2)
1659 tr = self.transaction('commit')
1659 tr = self.transaction('commit')
1660 ret = self.commitctx(cctx, True)
1660 ret = self.commitctx(cctx, True)
1661 except: # re-raises
1661 except: # re-raises
1662 if edited:
1662 if edited:
1663 self.ui.write(
1663 self.ui.write(
1664 _('note: commit message saved in %s\n') % msgfn)
1664 _('note: commit message saved in %s\n') % msgfn)
1665 raise
1665 raise
1666 # update bookmarks, dirstate and mergestate
1666 # update bookmarks, dirstate and mergestate
1667 bookmarks.update(self, [p1, p2], ret)
1667 bookmarks.update(self, [p1, p2], ret)
1668 cctx.markcommitted(ret)
1668 cctx.markcommitted(ret)
1669 ms.reset()
1669 ms.reset()
1670 tr.close()
1670 tr.close()
1671
1671
1672 finally:
1672 finally:
1673 lockmod.release(tr, lock, wlock)
1673 lockmod.release(tr, lock, wlock)
1674
1674
1675 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1675 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1676 # hack for command that use a temporary commit (eg: histedit)
1676 # hack for command that use a temporary commit (eg: histedit)
1677 # temporary commit got stripped before hook release
1677 # temporary commit got stripped before hook release
1678 if self.changelog.hasnode(ret):
1678 if self.changelog.hasnode(ret):
1679 self.hook("commit", node=node, parent1=parent1,
1679 self.hook("commit", node=node, parent1=parent1,
1680 parent2=parent2)
1680 parent2=parent2)
1681 self._afterlock(commithook)
1681 self._afterlock(commithook)
1682 return ret
1682 return ret
1683
1683
1684 @unfilteredmethod
1684 @unfilteredmethod
1685 def commitctx(self, ctx, error=False):
1685 def commitctx(self, ctx, error=False):
1686 """Add a new revision to current repository.
1686 """Add a new revision to current repository.
1687 Revision information is passed via the context argument.
1687 Revision information is passed via the context argument.
1688 """
1688 """
1689
1689
1690 tr = None
1690 tr = None
1691 p1, p2 = ctx.p1(), ctx.p2()
1691 p1, p2 = ctx.p1(), ctx.p2()
1692 user = ctx.user()
1692 user = ctx.user()
1693
1693
1694 lock = self.lock()
1694 lock = self.lock()
1695 try:
1695 try:
1696 tr = self.transaction("commit")
1696 tr = self.transaction("commit")
1697 trp = weakref.proxy(tr)
1697 trp = weakref.proxy(tr)
1698
1698
1699 if ctx.manifestnode():
1699 if ctx.manifestnode():
1700 # reuse an existing manifest revision
1700 # reuse an existing manifest revision
1701 mn = ctx.manifestnode()
1701 mn = ctx.manifestnode()
1702 files = ctx.files()
1702 files = ctx.files()
1703 elif ctx.files():
1703 elif ctx.files():
1704 m1ctx = p1.manifestctx()
1704 m1ctx = p1.manifestctx()
1705 m2ctx = p2.manifestctx()
1705 m2ctx = p2.manifestctx()
1706 mctx = m1ctx.copy()
1706 mctx = m1ctx.copy()
1707
1707
1708 m = mctx.read()
1708 m = mctx.read()
1709 m1 = m1ctx.read()
1709 m1 = m1ctx.read()
1710 m2 = m2ctx.read()
1710 m2 = m2ctx.read()
1711
1711
1712 # check in files
1712 # check in files
1713 added = []
1713 added = []
1714 changed = []
1714 changed = []
1715 removed = list(ctx.removed())
1715 removed = list(ctx.removed())
1716 linkrev = len(self)
1716 linkrev = len(self)
1717 self.ui.note(_("committing files:\n"))
1717 self.ui.note(_("committing files:\n"))
1718 for f in sorted(ctx.modified() + ctx.added()):
1718 for f in sorted(ctx.modified() + ctx.added()):
1719 self.ui.note(f + "\n")
1719 self.ui.note(f + "\n")
1720 try:
1720 try:
1721 fctx = ctx[f]
1721 fctx = ctx[f]
1722 if fctx is None:
1722 if fctx is None:
1723 removed.append(f)
1723 removed.append(f)
1724 else:
1724 else:
1725 added.append(f)
1725 added.append(f)
1726 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1726 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1727 trp, changed)
1727 trp, changed)
1728 m.setflag(f, fctx.flags())
1728 m.setflag(f, fctx.flags())
1729 except OSError as inst:
1729 except OSError as inst:
1730 self.ui.warn(_("trouble committing %s!\n") % f)
1730 self.ui.warn(_("trouble committing %s!\n") % f)
1731 raise
1731 raise
1732 except IOError as inst:
1732 except IOError as inst:
1733 errcode = getattr(inst, 'errno', errno.ENOENT)
1733 errcode = getattr(inst, 'errno', errno.ENOENT)
1734 if error or errcode and errcode != errno.ENOENT:
1734 if error or errcode and errcode != errno.ENOENT:
1735 self.ui.warn(_("trouble committing %s!\n") % f)
1735 self.ui.warn(_("trouble committing %s!\n") % f)
1736 raise
1736 raise
1737
1737
1738 # update manifest
1738 # update manifest
1739 self.ui.note(_("committing manifest\n"))
1739 self.ui.note(_("committing manifest\n"))
1740 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1740 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1741 drop = [f for f in removed if f in m]
1741 drop = [f for f in removed if f in m]
1742 for f in drop:
1742 for f in drop:
1743 del m[f]
1743 del m[f]
1744 mn = mctx.write(trp, linkrev,
1744 mn = mctx.write(trp, linkrev,
1745 p1.manifestnode(), p2.manifestnode(),
1745 p1.manifestnode(), p2.manifestnode(),
1746 added, drop)
1746 added, drop)
1747 files = changed + removed
1747 files = changed + removed
1748 else:
1748 else:
1749 mn = p1.manifestnode()
1749 mn = p1.manifestnode()
1750 files = []
1750 files = []
1751
1751
1752 # update changelog
1752 # update changelog
1753 self.ui.note(_("committing changelog\n"))
1753 self.ui.note(_("committing changelog\n"))
1754 self.changelog.delayupdate(tr)
1754 self.changelog.delayupdate(tr)
1755 n = self.changelog.add(mn, files, ctx.description(),
1755 n = self.changelog.add(mn, files, ctx.description(),
1756 trp, p1.node(), p2.node(),
1756 trp, p1.node(), p2.node(),
1757 user, ctx.date(), ctx.extra().copy())
1757 user, ctx.date(), ctx.extra().copy())
1758 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1758 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1759 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1759 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1760 parent2=xp2)
1760 parent2=xp2)
1761 # set the new commit is proper phase
1761 # set the new commit is proper phase
1762 targetphase = subrepo.newcommitphase(self.ui, ctx)
1762 targetphase = subrepo.newcommitphase(self.ui, ctx)
1763 if targetphase:
1763 if targetphase:
1764 # retract boundary do not alter parent changeset.
1764 # retract boundary do not alter parent changeset.
1765 # if a parent have higher the resulting phase will
1765 # if a parent have higher the resulting phase will
1766 # be compliant anyway
1766 # be compliant anyway
1767 #
1767 #
1768 # if minimal phase was 0 we don't need to retract anything
1768 # if minimal phase was 0 we don't need to retract anything
1769 phases.retractboundary(self, tr, targetphase, [n])
1769 phases.retractboundary(self, tr, targetphase, [n])
1770 tr.close()
1770 tr.close()
1771 branchmap.updatecache(self.filtered('served'))
1771 branchmap.updatecache(self.filtered('served'))
1772 return n
1772 return n
1773 finally:
1773 finally:
1774 if tr:
1774 if tr:
1775 tr.release()
1775 tr.release()
1776 lock.release()
1776 lock.release()
1777
1777
1778 @unfilteredmethod
1778 @unfilteredmethod
1779 def destroying(self):
1779 def destroying(self):
1780 '''Inform the repository that nodes are about to be destroyed.
1780 '''Inform the repository that nodes are about to be destroyed.
1781 Intended for use by strip and rollback, so there's a common
1781 Intended for use by strip and rollback, so there's a common
1782 place for anything that has to be done before destroying history.
1782 place for anything that has to be done before destroying history.
1783
1783
1784 This is mostly useful for saving state that is in memory and waiting
1784 This is mostly useful for saving state that is in memory and waiting
1785 to be flushed when the current lock is released. Because a call to
1785 to be flushed when the current lock is released. Because a call to
1786 destroyed is imminent, the repo will be invalidated causing those
1786 destroyed is imminent, the repo will be invalidated causing those
1787 changes to stay in memory (waiting for the next unlock), or vanish
1787 changes to stay in memory (waiting for the next unlock), or vanish
1788 completely.
1788 completely.
1789 '''
1789 '''
1790 # When using the same lock to commit and strip, the phasecache is left
1790 # When using the same lock to commit and strip, the phasecache is left
1791 # dirty after committing. Then when we strip, the repo is invalidated,
1791 # dirty after committing. Then when we strip, the repo is invalidated,
1792 # causing those changes to disappear.
1792 # causing those changes to disappear.
1793 if '_phasecache' in vars(self):
1793 if '_phasecache' in vars(self):
1794 self._phasecache.write()
1794 self._phasecache.write()
1795
1795
1796 @unfilteredmethod
1796 @unfilteredmethod
1797 def destroyed(self):
1797 def destroyed(self):
1798 '''Inform the repository that nodes have been destroyed.
1798 '''Inform the repository that nodes have been destroyed.
1799 Intended for use by strip and rollback, so there's a common
1799 Intended for use by strip and rollback, so there's a common
1800 place for anything that has to be done after destroying history.
1800 place for anything that has to be done after destroying history.
1801 '''
1801 '''
1802 # When one tries to:
1802 # When one tries to:
1803 # 1) destroy nodes thus calling this method (e.g. strip)
1803 # 1) destroy nodes thus calling this method (e.g. strip)
1804 # 2) use phasecache somewhere (e.g. commit)
1804 # 2) use phasecache somewhere (e.g. commit)
1805 #
1805 #
1806 # then 2) will fail because the phasecache contains nodes that were
1806 # then 2) will fail because the phasecache contains nodes that were
1807 # removed. We can either remove phasecache from the filecache,
1807 # removed. We can either remove phasecache from the filecache,
1808 # causing it to reload next time it is accessed, or simply filter
1808 # causing it to reload next time it is accessed, or simply filter
1809 # the removed nodes now and write the updated cache.
1809 # the removed nodes now and write the updated cache.
1810 self._phasecache.filterunknown(self)
1810 self._phasecache.filterunknown(self)
1811 self._phasecache.write()
1811 self._phasecache.write()
1812
1812
1813 # update the 'served' branch cache to help read only server process
1813 # update the 'served' branch cache to help read only server process
1814 # Thanks to branchcache collaboration this is done from the nearest
1814 # Thanks to branchcache collaboration this is done from the nearest
1815 # filtered subset and it is expected to be fast.
1815 # filtered subset and it is expected to be fast.
1816 branchmap.updatecache(self.filtered('served'))
1816 branchmap.updatecache(self.filtered('served'))
1817
1817
1818 # Ensure the persistent tag cache is updated. Doing it now
1818 # Ensure the persistent tag cache is updated. Doing it now
1819 # means that the tag cache only has to worry about destroyed
1819 # means that the tag cache only has to worry about destroyed
1820 # heads immediately after a strip/rollback. That in turn
1820 # heads immediately after a strip/rollback. That in turn
1821 # guarantees that "cachetip == currenttip" (comparing both rev
1821 # guarantees that "cachetip == currenttip" (comparing both rev
1822 # and node) always means no nodes have been added or destroyed.
1822 # and node) always means no nodes have been added or destroyed.
1823
1823
1824 # XXX this is suboptimal when qrefresh'ing: we strip the current
1824 # XXX this is suboptimal when qrefresh'ing: we strip the current
1825 # head, refresh the tag cache, then immediately add a new head.
1825 # head, refresh the tag cache, then immediately add a new head.
1826 # But I think doing it this way is necessary for the "instant
1826 # But I think doing it this way is necessary for the "instant
1827 # tag cache retrieval" case to work.
1827 # tag cache retrieval" case to work.
1828 self.invalidate()
1828 self.invalidate()
1829
1829
1830 def walk(self, match, node=None):
1830 def walk(self, match, node=None):
1831 '''
1831 '''
1832 walk recursively through the directory tree or a given
1832 walk recursively through the directory tree or a given
1833 changeset, finding all files matched by the match
1833 changeset, finding all files matched by the match
1834 function
1834 function
1835 '''
1835 '''
1836 return self[node].walk(match)
1836 return self[node].walk(match)
1837
1837
1838 def status(self, node1='.', node2=None, match=None,
1838 def status(self, node1='.', node2=None, match=None,
1839 ignored=False, clean=False, unknown=False,
1839 ignored=False, clean=False, unknown=False,
1840 listsubrepos=False):
1840 listsubrepos=False):
1841 '''a convenience method that calls node1.status(node2)'''
1841 '''a convenience method that calls node1.status(node2)'''
1842 return self[node1].status(node2, match, ignored, clean, unknown,
1842 return self[node1].status(node2, match, ignored, clean, unknown,
1843 listsubrepos)
1843 listsubrepos)
1844
1844
1845 def heads(self, start=None):
1845 def heads(self, start=None):
1846 heads = self.changelog.heads(start)
1846 heads = self.changelog.heads(start)
1847 # sort the output in rev descending order
1847 # sort the output in rev descending order
1848 return sorted(heads, key=self.changelog.rev, reverse=True)
1848 return sorted(heads, key=self.changelog.rev, reverse=True)
1849
1849
1850 def branchheads(self, branch=None, start=None, closed=False):
1850 def branchheads(self, branch=None, start=None, closed=False):
1851 '''return a (possibly filtered) list of heads for the given branch
1851 '''return a (possibly filtered) list of heads for the given branch
1852
1852
1853 Heads are returned in topological order, from newest to oldest.
1853 Heads are returned in topological order, from newest to oldest.
1854 If branch is None, use the dirstate branch.
1854 If branch is None, use the dirstate branch.
1855 If start is not None, return only heads reachable from start.
1855 If start is not None, return only heads reachable from start.
1856 If closed is True, return heads that are marked as closed as well.
1856 If closed is True, return heads that are marked as closed as well.
1857 '''
1857 '''
1858 if branch is None:
1858 if branch is None:
1859 branch = self[None].branch()
1859 branch = self[None].branch()
1860 branches = self.branchmap()
1860 branches = self.branchmap()
1861 if branch not in branches:
1861 if branch not in branches:
1862 return []
1862 return []
1863 # the cache returns heads ordered lowest to highest
1863 # the cache returns heads ordered lowest to highest
1864 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1864 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1865 if start is not None:
1865 if start is not None:
1866 # filter out the heads that cannot be reached from startrev
1866 # filter out the heads that cannot be reached from startrev
1867 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1867 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1868 bheads = [h for h in bheads if h in fbheads]
1868 bheads = [h for h in bheads if h in fbheads]
1869 return bheads
1869 return bheads
1870
1870
1871 def branches(self, nodes):
1871 def branches(self, nodes):
1872 if not nodes:
1872 if not nodes:
1873 nodes = [self.changelog.tip()]
1873 nodes = [self.changelog.tip()]
1874 b = []
1874 b = []
1875 for n in nodes:
1875 for n in nodes:
1876 t = n
1876 t = n
1877 while True:
1877 while True:
1878 p = self.changelog.parents(n)
1878 p = self.changelog.parents(n)
1879 if p[1] != nullid or p[0] == nullid:
1879 if p[1] != nullid or p[0] == nullid:
1880 b.append((t, n, p[0], p[1]))
1880 b.append((t, n, p[0], p[1]))
1881 break
1881 break
1882 n = p[0]
1882 n = p[0]
1883 return b
1883 return b
1884
1884
1885 def between(self, pairs):
1885 def between(self, pairs):
1886 r = []
1886 r = []
1887
1887
1888 for top, bottom in pairs:
1888 for top, bottom in pairs:
1889 n, l, i = top, [], 0
1889 n, l, i = top, [], 0
1890 f = 1
1890 f = 1
1891
1891
1892 while n != bottom and n != nullid:
1892 while n != bottom and n != nullid:
1893 p = self.changelog.parents(n)[0]
1893 p = self.changelog.parents(n)[0]
1894 if i == f:
1894 if i == f:
1895 l.append(n)
1895 l.append(n)
1896 f = f * 2
1896 f = f * 2
1897 n = p
1897 n = p
1898 i += 1
1898 i += 1
1899
1899
1900 r.append(l)
1900 r.append(l)
1901
1901
1902 return r
1902 return r
1903
1903
1904 def checkpush(self, pushop):
1904 def checkpush(self, pushop):
1905 """Extensions can override this function if additional checks have
1905 """Extensions can override this function if additional checks have
1906 to be performed before pushing, or call it if they override push
1906 to be performed before pushing, or call it if they override push
1907 command.
1907 command.
1908 """
1908 """
1909 pass
1909 pass
1910
1910
1911 @unfilteredpropertycache
1911 @unfilteredpropertycache
1912 def prepushoutgoinghooks(self):
1912 def prepushoutgoinghooks(self):
1913 """Return util.hooks consists of a pushop with repo, remote, outgoing
1913 """Return util.hooks consists of a pushop with repo, remote, outgoing
1914 methods, which are called before pushing changesets.
1914 methods, which are called before pushing changesets.
1915 """
1915 """
1916 return util.hooks()
1916 return util.hooks()
1917
1917
1918 def pushkey(self, namespace, key, old, new):
1918 def pushkey(self, namespace, key, old, new):
1919 try:
1919 try:
1920 tr = self.currenttransaction()
1920 tr = self.currenttransaction()
1921 hookargs = {}
1921 hookargs = {}
1922 if tr is not None:
1922 if tr is not None:
1923 hookargs.update(tr.hookargs)
1923 hookargs.update(tr.hookargs)
1924 hookargs['namespace'] = namespace
1924 hookargs['namespace'] = namespace
1925 hookargs['key'] = key
1925 hookargs['key'] = key
1926 hookargs['old'] = old
1926 hookargs['old'] = old
1927 hookargs['new'] = new
1927 hookargs['new'] = new
1928 self.hook('prepushkey', throw=True, **hookargs)
1928 self.hook('prepushkey', throw=True, **hookargs)
1929 except error.HookAbort as exc:
1929 except error.HookAbort as exc:
1930 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1930 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1931 if exc.hint:
1931 if exc.hint:
1932 self.ui.write_err(_("(%s)\n") % exc.hint)
1932 self.ui.write_err(_("(%s)\n") % exc.hint)
1933 return False
1933 return False
1934 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1934 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1935 ret = pushkey.push(self, namespace, key, old, new)
1935 ret = pushkey.push(self, namespace, key, old, new)
1936 def runhook():
1936 def runhook():
1937 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1937 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1938 ret=ret)
1938 ret=ret)
1939 self._afterlock(runhook)
1939 self._afterlock(runhook)
1940 return ret
1940 return ret
1941
1941
1942 def listkeys(self, namespace):
1942 def listkeys(self, namespace):
1943 self.hook('prelistkeys', throw=True, namespace=namespace)
1943 self.hook('prelistkeys', throw=True, namespace=namespace)
1944 self.ui.debug('listing keys for "%s"\n' % namespace)
1944 self.ui.debug('listing keys for "%s"\n' % namespace)
1945 values = pushkey.list(self, namespace)
1945 values = pushkey.list(self, namespace)
1946 self.hook('listkeys', namespace=namespace, values=values)
1946 self.hook('listkeys', namespace=namespace, values=values)
1947 return values
1947 return values
1948
1948
1949 def debugwireargs(self, one, two, three=None, four=None, five=None):
1949 def debugwireargs(self, one, two, three=None, four=None, five=None):
1950 '''used to test argument passing over the wire'''
1950 '''used to test argument passing over the wire'''
1951 return "%s %s %s %s %s" % (one, two, three, four, five)
1951 return "%s %s %s %s %s" % (one, two, three, four, five)
1952
1952
1953 def savecommitmessage(self, text):
1953 def savecommitmessage(self, text):
1954 fp = self.vfs('last-message.txt', 'wb')
1954 fp = self.vfs('last-message.txt', 'wb')
1955 try:
1955 try:
1956 fp.write(text)
1956 fp.write(text)
1957 finally:
1957 finally:
1958 fp.close()
1958 fp.close()
1959 return self.pathto(fp.name[len(self.root) + 1:])
1959 return self.pathto(fp.name[len(self.root) + 1:])
1960
1960
1961 # used to avoid circular references so destructors work
1961 # used to avoid circular references so destructors work
1962 def aftertrans(files):
1962 def aftertrans(files):
1963 renamefiles = [tuple(t) for t in files]
1963 renamefiles = [tuple(t) for t in files]
1964 def a():
1964 def a():
1965 for vfs, src, dest in renamefiles:
1965 for vfs, src, dest in renamefiles:
1966 try:
1966 try:
1967 vfs.rename(src, dest)
1967 vfs.rename(src, dest)
1968 except OSError: # journal file does not yet exist
1968 except OSError: # journal file does not yet exist
1969 pass
1969 pass
1970 return a
1970 return a
1971
1971
1972 def undoname(fn):
1972 def undoname(fn):
1973 base, name = os.path.split(fn)
1973 base, name = os.path.split(fn)
1974 assert name.startswith('journal')
1974 assert name.startswith('journal')
1975 return os.path.join(base, name.replace('journal', 'undo', 1))
1975 return os.path.join(base, name.replace('journal', 'undo', 1))
1976
1976
1977 def instance(ui, path, create):
1977 def instance(ui, path, create):
1978 return localrepository(ui, util.urllocalpath(path), create)
1978 return localrepository(ui, util.urllocalpath(path), create)
1979
1979
1980 def islocal(path):
1980 def islocal(path):
1981 return True
1981 return True
1982
1982
1983 def newreporequirements(repo):
1983 def newreporequirements(repo):
1984 """Determine the set of requirements for a new local repository.
1984 """Determine the set of requirements for a new local repository.
1985
1985
1986 Extensions can wrap this function to specify custom requirements for
1986 Extensions can wrap this function to specify custom requirements for
1987 new repositories.
1987 new repositories.
1988 """
1988 """
1989 ui = repo.ui
1989 ui = repo.ui
1990 requirements = set(['revlogv1'])
1990 requirements = set(['revlogv1'])
1991 if ui.configbool('format', 'usestore', True):
1991 if ui.configbool('format', 'usestore', True):
1992 requirements.add('store')
1992 requirements.add('store')
1993 if ui.configbool('format', 'usefncache', True):
1993 if ui.configbool('format', 'usefncache', True):
1994 requirements.add('fncache')
1994 requirements.add('fncache')
1995 if ui.configbool('format', 'dotencode', True):
1995 if ui.configbool('format', 'dotencode', True):
1996 requirements.add('dotencode')
1996 requirements.add('dotencode')
1997
1997
1998 if scmutil.gdinitconfig(ui):
1998 if scmutil.gdinitconfig(ui):
1999 requirements.add('generaldelta')
1999 requirements.add('generaldelta')
2000 if ui.configbool('experimental', 'treemanifest', False):
2000 if ui.configbool('experimental', 'treemanifest', False):
2001 requirements.add('treemanifest')
2001 requirements.add('treemanifest')
2002 if ui.configbool('experimental', 'manifestv2', False):
2002 if ui.configbool('experimental', 'manifestv2', False):
2003 requirements.add('manifestv2')
2003 requirements.add('manifestv2')
2004
2004
2005 return requirements
2005 return requirements
General Comments 0
You need to be logged in to leave comments. Login now