##// END OF EJS Templates
repofilecache: define a 'join' method...
Pierre-Yves David -
r31282:b9228a22 default
parent child Browse files
Show More
@@ -1,2073 +1,2075 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 store,
56 store,
57 subrepo,
57 subrepo,
58 tags as tagsmod,
58 tags as tagsmod,
59 transaction,
59 transaction,
60 txnutil,
60 txnutil,
61 util,
61 util,
62 vfs as vfsmod,
62 vfs as vfsmod,
63 )
63 )
64
64
65 release = lockmod.release
65 release = lockmod.release
66 urlerr = util.urlerr
66 urlerr = util.urlerr
67 urlreq = util.urlreq
67 urlreq = util.urlreq
68
68
69 class repofilecache(scmutil.filecache):
69 class repofilecache(scmutil.filecache):
70 """All filecache usage on repo are done for logic that should be unfiltered
70 """All filecache usage on repo are done for logic that should be unfiltered
71 """
71 """
72
72
73 def join(self, obj, fname):
74 return obj.join(fname)
73 def __get__(self, repo, type=None):
75 def __get__(self, repo, type=None):
74 if repo is None:
76 if repo is None:
75 return self
77 return self
76 return super(repofilecache, self).__get__(repo.unfiltered(), type)
78 return super(repofilecache, self).__get__(repo.unfiltered(), type)
77 def __set__(self, repo, value):
79 def __set__(self, repo, value):
78 return super(repofilecache, self).__set__(repo.unfiltered(), value)
80 return super(repofilecache, self).__set__(repo.unfiltered(), value)
79 def __delete__(self, repo):
81 def __delete__(self, repo):
80 return super(repofilecache, self).__delete__(repo.unfiltered())
82 return super(repofilecache, self).__delete__(repo.unfiltered())
81
83
82 class storecache(repofilecache):
84 class storecache(repofilecache):
83 """filecache for files in the store"""
85 """filecache for files in the store"""
84 def join(self, obj, fname):
86 def join(self, obj, fname):
85 return obj.sjoin(fname)
87 return obj.sjoin(fname)
86
88
87 class unfilteredpropertycache(util.propertycache):
89 class unfilteredpropertycache(util.propertycache):
88 """propertycache that apply to unfiltered repo only"""
90 """propertycache that apply to unfiltered repo only"""
89
91
90 def __get__(self, repo, type=None):
92 def __get__(self, repo, type=None):
91 unfi = repo.unfiltered()
93 unfi = repo.unfiltered()
92 if unfi is repo:
94 if unfi is repo:
93 return super(unfilteredpropertycache, self).__get__(unfi)
95 return super(unfilteredpropertycache, self).__get__(unfi)
94 return getattr(unfi, self.name)
96 return getattr(unfi, self.name)
95
97
96 class filteredpropertycache(util.propertycache):
98 class filteredpropertycache(util.propertycache):
97 """propertycache that must take filtering in account"""
99 """propertycache that must take filtering in account"""
98
100
99 def cachevalue(self, obj, value):
101 def cachevalue(self, obj, value):
100 object.__setattr__(obj, self.name, value)
102 object.__setattr__(obj, self.name, value)
101
103
102
104
103 def hasunfilteredcache(repo, name):
105 def hasunfilteredcache(repo, name):
104 """check if a repo has an unfilteredpropertycache value for <name>"""
106 """check if a repo has an unfilteredpropertycache value for <name>"""
105 return name in vars(repo.unfiltered())
107 return name in vars(repo.unfiltered())
106
108
107 def unfilteredmethod(orig):
109 def unfilteredmethod(orig):
108 """decorate method that always need to be run on unfiltered version"""
110 """decorate method that always need to be run on unfiltered version"""
109 def wrapper(repo, *args, **kwargs):
111 def wrapper(repo, *args, **kwargs):
110 return orig(repo.unfiltered(), *args, **kwargs)
112 return orig(repo.unfiltered(), *args, **kwargs)
111 return wrapper
113 return wrapper
112
114
113 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
115 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
114 'unbundle'))
116 'unbundle'))
115 legacycaps = moderncaps.union(set(['changegroupsubset']))
117 legacycaps = moderncaps.union(set(['changegroupsubset']))
116
118
117 class localpeer(peer.peerrepository):
119 class localpeer(peer.peerrepository):
118 '''peer for a local repo; reflects only the most recent API'''
120 '''peer for a local repo; reflects only the most recent API'''
119
121
120 def __init__(self, repo, caps=moderncaps):
122 def __init__(self, repo, caps=moderncaps):
121 peer.peerrepository.__init__(self)
123 peer.peerrepository.__init__(self)
122 self._repo = repo.filtered('served')
124 self._repo = repo.filtered('served')
123 self.ui = repo.ui
125 self.ui = repo.ui
124 self._caps = repo._restrictcapabilities(caps)
126 self._caps = repo._restrictcapabilities(caps)
125 self.requirements = repo.requirements
127 self.requirements = repo.requirements
126 self.supportedformats = repo.supportedformats
128 self.supportedformats = repo.supportedformats
127
129
128 def close(self):
130 def close(self):
129 self._repo.close()
131 self._repo.close()
130
132
131 def _capabilities(self):
133 def _capabilities(self):
132 return self._caps
134 return self._caps
133
135
134 def local(self):
136 def local(self):
135 return self._repo
137 return self._repo
136
138
137 def canpush(self):
139 def canpush(self):
138 return True
140 return True
139
141
140 def url(self):
142 def url(self):
141 return self._repo.url()
143 return self._repo.url()
142
144
143 def lookup(self, key):
145 def lookup(self, key):
144 return self._repo.lookup(key)
146 return self._repo.lookup(key)
145
147
146 def branchmap(self):
148 def branchmap(self):
147 return self._repo.branchmap()
149 return self._repo.branchmap()
148
150
149 def heads(self):
151 def heads(self):
150 return self._repo.heads()
152 return self._repo.heads()
151
153
152 def known(self, nodes):
154 def known(self, nodes):
153 return self._repo.known(nodes)
155 return self._repo.known(nodes)
154
156
155 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
157 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
156 **kwargs):
158 **kwargs):
157 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
159 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
158 common=common, bundlecaps=bundlecaps,
160 common=common, bundlecaps=bundlecaps,
159 **kwargs)
161 **kwargs)
160 cb = util.chunkbuffer(chunks)
162 cb = util.chunkbuffer(chunks)
161
163
162 if bundlecaps is not None and 'HG20' in bundlecaps:
164 if bundlecaps is not None and 'HG20' in bundlecaps:
163 # When requesting a bundle2, getbundle returns a stream to make the
165 # When requesting a bundle2, getbundle returns a stream to make the
164 # wire level function happier. We need to build a proper object
166 # wire level function happier. We need to build a proper object
165 # from it in local peer.
167 # from it in local peer.
166 return bundle2.getunbundler(self.ui, cb)
168 return bundle2.getunbundler(self.ui, cb)
167 else:
169 else:
168 return changegroup.getunbundler('01', cb, None)
170 return changegroup.getunbundler('01', cb, None)
169
171
170 # TODO We might want to move the next two calls into legacypeer and add
172 # TODO We might want to move the next two calls into legacypeer and add
171 # unbundle instead.
173 # unbundle instead.
172
174
173 def unbundle(self, cg, heads, url):
175 def unbundle(self, cg, heads, url):
174 """apply a bundle on a repo
176 """apply a bundle on a repo
175
177
176 This function handles the repo locking itself."""
178 This function handles the repo locking itself."""
177 try:
179 try:
178 try:
180 try:
179 cg = exchange.readbundle(self.ui, cg, None)
181 cg = exchange.readbundle(self.ui, cg, None)
180 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
182 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
181 if util.safehasattr(ret, 'getchunks'):
183 if util.safehasattr(ret, 'getchunks'):
182 # This is a bundle20 object, turn it into an unbundler.
184 # This is a bundle20 object, turn it into an unbundler.
183 # This little dance should be dropped eventually when the
185 # This little dance should be dropped eventually when the
184 # API is finally improved.
186 # API is finally improved.
185 stream = util.chunkbuffer(ret.getchunks())
187 stream = util.chunkbuffer(ret.getchunks())
186 ret = bundle2.getunbundler(self.ui, stream)
188 ret = bundle2.getunbundler(self.ui, stream)
187 return ret
189 return ret
188 except Exception as exc:
190 except Exception as exc:
189 # If the exception contains output salvaged from a bundle2
191 # If the exception contains output salvaged from a bundle2
190 # reply, we need to make sure it is printed before continuing
192 # reply, we need to make sure it is printed before continuing
191 # to fail. So we build a bundle2 with such output and consume
193 # to fail. So we build a bundle2 with such output and consume
192 # it directly.
194 # it directly.
193 #
195 #
194 # This is not very elegant but allows a "simple" solution for
196 # This is not very elegant but allows a "simple" solution for
195 # issue4594
197 # issue4594
196 output = getattr(exc, '_bundle2salvagedoutput', ())
198 output = getattr(exc, '_bundle2salvagedoutput', ())
197 if output:
199 if output:
198 bundler = bundle2.bundle20(self._repo.ui)
200 bundler = bundle2.bundle20(self._repo.ui)
199 for out in output:
201 for out in output:
200 bundler.addpart(out)
202 bundler.addpart(out)
201 stream = util.chunkbuffer(bundler.getchunks())
203 stream = util.chunkbuffer(bundler.getchunks())
202 b = bundle2.getunbundler(self.ui, stream)
204 b = bundle2.getunbundler(self.ui, stream)
203 bundle2.processbundle(self._repo, b)
205 bundle2.processbundle(self._repo, b)
204 raise
206 raise
205 except error.PushRaced as exc:
207 except error.PushRaced as exc:
206 raise error.ResponseError(_('push failed:'), str(exc))
208 raise error.ResponseError(_('push failed:'), str(exc))
207
209
208 def lock(self):
210 def lock(self):
209 return self._repo.lock()
211 return self._repo.lock()
210
212
211 def addchangegroup(self, cg, source, url):
213 def addchangegroup(self, cg, source, url):
212 return cg.apply(self._repo, source, url)
214 return cg.apply(self._repo, source, url)
213
215
214 def pushkey(self, namespace, key, old, new):
216 def pushkey(self, namespace, key, old, new):
215 return self._repo.pushkey(namespace, key, old, new)
217 return self._repo.pushkey(namespace, key, old, new)
216
218
217 def listkeys(self, namespace):
219 def listkeys(self, namespace):
218 return self._repo.listkeys(namespace)
220 return self._repo.listkeys(namespace)
219
221
220 def debugwireargs(self, one, two, three=None, four=None, five=None):
222 def debugwireargs(self, one, two, three=None, four=None, five=None):
221 '''used to test argument passing over the wire'''
223 '''used to test argument passing over the wire'''
222 return "%s %s %s %s %s" % (one, two, three, four, five)
224 return "%s %s %s %s %s" % (one, two, three, four, five)
223
225
224 class locallegacypeer(localpeer):
226 class locallegacypeer(localpeer):
225 '''peer extension which implements legacy methods too; used for tests with
227 '''peer extension which implements legacy methods too; used for tests with
226 restricted capabilities'''
228 restricted capabilities'''
227
229
228 def __init__(self, repo):
230 def __init__(self, repo):
229 localpeer.__init__(self, repo, caps=legacycaps)
231 localpeer.__init__(self, repo, caps=legacycaps)
230
232
231 def branches(self, nodes):
233 def branches(self, nodes):
232 return self._repo.branches(nodes)
234 return self._repo.branches(nodes)
233
235
234 def between(self, pairs):
236 def between(self, pairs):
235 return self._repo.between(pairs)
237 return self._repo.between(pairs)
236
238
237 def changegroup(self, basenodes, source):
239 def changegroup(self, basenodes, source):
238 return changegroup.changegroup(self._repo, basenodes, source)
240 return changegroup.changegroup(self._repo, basenodes, source)
239
241
240 def changegroupsubset(self, bases, heads, source):
242 def changegroupsubset(self, bases, heads, source):
241 return changegroup.changegroupsubset(self._repo, bases, heads, source)
243 return changegroup.changegroupsubset(self._repo, bases, heads, source)
242
244
243 class localrepository(object):
245 class localrepository(object):
244
246
245 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
247 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
246 'manifestv2'))
248 'manifestv2'))
247 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
249 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
248 'relshared', 'dotencode'))
250 'relshared', 'dotencode'))
249 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
251 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
250 filtername = None
252 filtername = None
251
253
252 # a list of (ui, featureset) functions.
254 # a list of (ui, featureset) functions.
253 # only functions defined in module of enabled extensions are invoked
255 # only functions defined in module of enabled extensions are invoked
254 featuresetupfuncs = set()
256 featuresetupfuncs = set()
255
257
256 def __init__(self, baseui, path, create=False):
258 def __init__(self, baseui, path, create=False):
257 self.requirements = set()
259 self.requirements = set()
258 # vfs to access the working copy
260 # vfs to access the working copy
259 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
261 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
260 # vfs to access the content of the repository
262 # vfs to access the content of the repository
261 self.vfs = None
263 self.vfs = None
262 # vfs to access the store part of the repository
264 # vfs to access the store part of the repository
263 self.svfs = None
265 self.svfs = None
264 self.root = self.wvfs.base
266 self.root = self.wvfs.base
265 self.path = self.wvfs.join(".hg")
267 self.path = self.wvfs.join(".hg")
266 self.origroot = path
268 self.origroot = path
267 self.auditor = pathutil.pathauditor(self.root, self._checknested)
269 self.auditor = pathutil.pathauditor(self.root, self._checknested)
268 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
270 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
269 realfs=False)
271 realfs=False)
270 self.vfs = vfsmod.vfs(self.path)
272 self.vfs = vfsmod.vfs(self.path)
271 self.baseui = baseui
273 self.baseui = baseui
272 self.ui = baseui.copy()
274 self.ui = baseui.copy()
273 self.ui.copy = baseui.copy # prevent copying repo configuration
275 self.ui.copy = baseui.copy # prevent copying repo configuration
274 # A list of callback to shape the phase if no data were found.
276 # A list of callback to shape the phase if no data were found.
275 # Callback are in the form: func(repo, roots) --> processed root.
277 # Callback are in the form: func(repo, roots) --> processed root.
276 # This list it to be filled by extension during repo setup
278 # This list it to be filled by extension during repo setup
277 self._phasedefaults = []
279 self._phasedefaults = []
278 try:
280 try:
279 self.ui.readconfig(self.join("hgrc"), self.root)
281 self.ui.readconfig(self.join("hgrc"), self.root)
280 self._loadextensions()
282 self._loadextensions()
281 except IOError:
283 except IOError:
282 pass
284 pass
283
285
284 if self.featuresetupfuncs:
286 if self.featuresetupfuncs:
285 self.supported = set(self._basesupported) # use private copy
287 self.supported = set(self._basesupported) # use private copy
286 extmods = set(m.__name__ for n, m
288 extmods = set(m.__name__ for n, m
287 in extensions.extensions(self.ui))
289 in extensions.extensions(self.ui))
288 for setupfunc in self.featuresetupfuncs:
290 for setupfunc in self.featuresetupfuncs:
289 if setupfunc.__module__ in extmods:
291 if setupfunc.__module__ in extmods:
290 setupfunc(self.ui, self.supported)
292 setupfunc(self.ui, self.supported)
291 else:
293 else:
292 self.supported = self._basesupported
294 self.supported = self._basesupported
293 color.setup(self.ui)
295 color.setup(self.ui)
294
296
295 # Add compression engines.
297 # Add compression engines.
296 for name in util.compengines:
298 for name in util.compengines:
297 engine = util.compengines[name]
299 engine = util.compengines[name]
298 if engine.revlogheader():
300 if engine.revlogheader():
299 self.supported.add('exp-compression-%s' % name)
301 self.supported.add('exp-compression-%s' % name)
300
302
301 if not self.vfs.isdir():
303 if not self.vfs.isdir():
302 if create:
304 if create:
303 self.requirements = newreporequirements(self)
305 self.requirements = newreporequirements(self)
304
306
305 if not self.wvfs.exists():
307 if not self.wvfs.exists():
306 self.wvfs.makedirs()
308 self.wvfs.makedirs()
307 self.vfs.makedir(notindexed=True)
309 self.vfs.makedir(notindexed=True)
308
310
309 if 'store' in self.requirements:
311 if 'store' in self.requirements:
310 self.vfs.mkdir("store")
312 self.vfs.mkdir("store")
311
313
312 # create an invalid changelog
314 # create an invalid changelog
313 self.vfs.append(
315 self.vfs.append(
314 "00changelog.i",
316 "00changelog.i",
315 '\0\0\0\2' # represents revlogv2
317 '\0\0\0\2' # represents revlogv2
316 ' dummy changelog to prevent using the old repo layout'
318 ' dummy changelog to prevent using the old repo layout'
317 )
319 )
318 else:
320 else:
319 raise error.RepoError(_("repository %s not found") % path)
321 raise error.RepoError(_("repository %s not found") % path)
320 elif create:
322 elif create:
321 raise error.RepoError(_("repository %s already exists") % path)
323 raise error.RepoError(_("repository %s already exists") % path)
322 else:
324 else:
323 try:
325 try:
324 self.requirements = scmutil.readrequires(
326 self.requirements = scmutil.readrequires(
325 self.vfs, self.supported)
327 self.vfs, self.supported)
326 except IOError as inst:
328 except IOError as inst:
327 if inst.errno != errno.ENOENT:
329 if inst.errno != errno.ENOENT:
328 raise
330 raise
329
331
330 self.sharedpath = self.path
332 self.sharedpath = self.path
331 try:
333 try:
332 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
334 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
333 if 'relshared' in self.requirements:
335 if 'relshared' in self.requirements:
334 sharedpath = self.vfs.join(sharedpath)
336 sharedpath = self.vfs.join(sharedpath)
335 vfs = vfsmod.vfs(sharedpath, realpath=True)
337 vfs = vfsmod.vfs(sharedpath, realpath=True)
336 s = vfs.base
338 s = vfs.base
337 if not vfs.exists():
339 if not vfs.exists():
338 raise error.RepoError(
340 raise error.RepoError(
339 _('.hg/sharedpath points to nonexistent directory %s') % s)
341 _('.hg/sharedpath points to nonexistent directory %s') % s)
340 self.sharedpath = s
342 self.sharedpath = s
341 except IOError as inst:
343 except IOError as inst:
342 if inst.errno != errno.ENOENT:
344 if inst.errno != errno.ENOENT:
343 raise
345 raise
344
346
345 self.store = store.store(
347 self.store = store.store(
346 self.requirements, self.sharedpath, vfsmod.vfs)
348 self.requirements, self.sharedpath, vfsmod.vfs)
347 self.spath = self.store.path
349 self.spath = self.store.path
348 self.svfs = self.store.vfs
350 self.svfs = self.store.vfs
349 self.sjoin = self.store.join
351 self.sjoin = self.store.join
350 self.vfs.createmode = self.store.createmode
352 self.vfs.createmode = self.store.createmode
351 self._applyopenerreqs()
353 self._applyopenerreqs()
352 if create:
354 if create:
353 self._writerequirements()
355 self._writerequirements()
354
356
355 self._dirstatevalidatewarned = False
357 self._dirstatevalidatewarned = False
356
358
357 self._branchcaches = {}
359 self._branchcaches = {}
358 self._revbranchcache = None
360 self._revbranchcache = None
359 self.filterpats = {}
361 self.filterpats = {}
360 self._datafilters = {}
362 self._datafilters = {}
361 self._transref = self._lockref = self._wlockref = None
363 self._transref = self._lockref = self._wlockref = None
362
364
363 # A cache for various files under .hg/ that tracks file changes,
365 # A cache for various files under .hg/ that tracks file changes,
364 # (used by the filecache decorator)
366 # (used by the filecache decorator)
365 #
367 #
366 # Maps a property name to its util.filecacheentry
368 # Maps a property name to its util.filecacheentry
367 self._filecache = {}
369 self._filecache = {}
368
370
369 # hold sets of revision to be filtered
371 # hold sets of revision to be filtered
370 # should be cleared when something might have changed the filter value:
372 # should be cleared when something might have changed the filter value:
371 # - new changesets,
373 # - new changesets,
372 # - phase change,
374 # - phase change,
373 # - new obsolescence marker,
375 # - new obsolescence marker,
374 # - working directory parent change,
376 # - working directory parent change,
375 # - bookmark changes
377 # - bookmark changes
376 self.filteredrevcache = {}
378 self.filteredrevcache = {}
377
379
378 # generic mapping between names and nodes
380 # generic mapping between names and nodes
379 self.names = namespaces.namespaces()
381 self.names = namespaces.namespaces()
380
382
381 @property
383 @property
382 def wopener(self):
384 def wopener(self):
383 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
385 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
384 return self.wvfs
386 return self.wvfs
385
387
386 @property
388 @property
387 def opener(self):
389 def opener(self):
388 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
390 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
389 return self.vfs
391 return self.vfs
390
392
391 def close(self):
393 def close(self):
392 self._writecaches()
394 self._writecaches()
393
395
394 def _loadextensions(self):
396 def _loadextensions(self):
395 extensions.loadall(self.ui)
397 extensions.loadall(self.ui)
396
398
397 def _writecaches(self):
399 def _writecaches(self):
398 if self._revbranchcache:
400 if self._revbranchcache:
399 self._revbranchcache.write()
401 self._revbranchcache.write()
400
402
401 def _restrictcapabilities(self, caps):
403 def _restrictcapabilities(self, caps):
402 if self.ui.configbool('experimental', 'bundle2-advertise', True):
404 if self.ui.configbool('experimental', 'bundle2-advertise', True):
403 caps = set(caps)
405 caps = set(caps)
404 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
406 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
405 caps.add('bundle2=' + urlreq.quote(capsblob))
407 caps.add('bundle2=' + urlreq.quote(capsblob))
406 return caps
408 return caps
407
409
408 def _applyopenerreqs(self):
410 def _applyopenerreqs(self):
409 self.svfs.options = dict((r, 1) for r in self.requirements
411 self.svfs.options = dict((r, 1) for r in self.requirements
410 if r in self.openerreqs)
412 if r in self.openerreqs)
411 # experimental config: format.chunkcachesize
413 # experimental config: format.chunkcachesize
412 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
414 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
413 if chunkcachesize is not None:
415 if chunkcachesize is not None:
414 self.svfs.options['chunkcachesize'] = chunkcachesize
416 self.svfs.options['chunkcachesize'] = chunkcachesize
415 # experimental config: format.maxchainlen
417 # experimental config: format.maxchainlen
416 maxchainlen = self.ui.configint('format', 'maxchainlen')
418 maxchainlen = self.ui.configint('format', 'maxchainlen')
417 if maxchainlen is not None:
419 if maxchainlen is not None:
418 self.svfs.options['maxchainlen'] = maxchainlen
420 self.svfs.options['maxchainlen'] = maxchainlen
419 # experimental config: format.manifestcachesize
421 # experimental config: format.manifestcachesize
420 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
422 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
421 if manifestcachesize is not None:
423 if manifestcachesize is not None:
422 self.svfs.options['manifestcachesize'] = manifestcachesize
424 self.svfs.options['manifestcachesize'] = manifestcachesize
423 # experimental config: format.aggressivemergedeltas
425 # experimental config: format.aggressivemergedeltas
424 aggressivemergedeltas = self.ui.configbool('format',
426 aggressivemergedeltas = self.ui.configbool('format',
425 'aggressivemergedeltas', False)
427 'aggressivemergedeltas', False)
426 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
428 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
427 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
429 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
428
430
429 for r in self.requirements:
431 for r in self.requirements:
430 if r.startswith('exp-compression-'):
432 if r.startswith('exp-compression-'):
431 self.svfs.options['compengine'] = r[len('exp-compression-'):]
433 self.svfs.options['compengine'] = r[len('exp-compression-'):]
432
434
433 def _writerequirements(self):
435 def _writerequirements(self):
434 scmutil.writerequires(self.vfs, self.requirements)
436 scmutil.writerequires(self.vfs, self.requirements)
435
437
436 def _checknested(self, path):
438 def _checknested(self, path):
437 """Determine if path is a legal nested repository."""
439 """Determine if path is a legal nested repository."""
438 if not path.startswith(self.root):
440 if not path.startswith(self.root):
439 return False
441 return False
440 subpath = path[len(self.root) + 1:]
442 subpath = path[len(self.root) + 1:]
441 normsubpath = util.pconvert(subpath)
443 normsubpath = util.pconvert(subpath)
442
444
443 # XXX: Checking against the current working copy is wrong in
445 # XXX: Checking against the current working copy is wrong in
444 # the sense that it can reject things like
446 # the sense that it can reject things like
445 #
447 #
446 # $ hg cat -r 10 sub/x.txt
448 # $ hg cat -r 10 sub/x.txt
447 #
449 #
448 # if sub/ is no longer a subrepository in the working copy
450 # if sub/ is no longer a subrepository in the working copy
449 # parent revision.
451 # parent revision.
450 #
452 #
451 # However, it can of course also allow things that would have
453 # However, it can of course also allow things that would have
452 # been rejected before, such as the above cat command if sub/
454 # been rejected before, such as the above cat command if sub/
453 # is a subrepository now, but was a normal directory before.
455 # is a subrepository now, but was a normal directory before.
454 # The old path auditor would have rejected by mistake since it
456 # The old path auditor would have rejected by mistake since it
455 # panics when it sees sub/.hg/.
457 # panics when it sees sub/.hg/.
456 #
458 #
457 # All in all, checking against the working copy seems sensible
459 # All in all, checking against the working copy seems sensible
458 # since we want to prevent access to nested repositories on
460 # since we want to prevent access to nested repositories on
459 # the filesystem *now*.
461 # the filesystem *now*.
460 ctx = self[None]
462 ctx = self[None]
461 parts = util.splitpath(subpath)
463 parts = util.splitpath(subpath)
462 while parts:
464 while parts:
463 prefix = '/'.join(parts)
465 prefix = '/'.join(parts)
464 if prefix in ctx.substate:
466 if prefix in ctx.substate:
465 if prefix == normsubpath:
467 if prefix == normsubpath:
466 return True
468 return True
467 else:
469 else:
468 sub = ctx.sub(prefix)
470 sub = ctx.sub(prefix)
469 return sub.checknested(subpath[len(prefix) + 1:])
471 return sub.checknested(subpath[len(prefix) + 1:])
470 else:
472 else:
471 parts.pop()
473 parts.pop()
472 return False
474 return False
473
475
474 def peer(self):
476 def peer(self):
475 return localpeer(self) # not cached to avoid reference cycle
477 return localpeer(self) # not cached to avoid reference cycle
476
478
477 def unfiltered(self):
479 def unfiltered(self):
478 """Return unfiltered version of the repository
480 """Return unfiltered version of the repository
479
481
480 Intended to be overwritten by filtered repo."""
482 Intended to be overwritten by filtered repo."""
481 return self
483 return self
482
484
483 def filtered(self, name):
485 def filtered(self, name):
484 """Return a filtered version of a repository"""
486 """Return a filtered version of a repository"""
485 # build a new class with the mixin and the current class
487 # build a new class with the mixin and the current class
486 # (possibly subclass of the repo)
488 # (possibly subclass of the repo)
487 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
489 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
488 pass
490 pass
489 return filteredrepo(self, name)
491 return filteredrepo(self, name)
490
492
491 @repofilecache('bookmarks', 'bookmarks.current')
493 @repofilecache('bookmarks', 'bookmarks.current')
492 def _bookmarks(self):
494 def _bookmarks(self):
493 return bookmarks.bmstore(self)
495 return bookmarks.bmstore(self)
494
496
495 @property
497 @property
496 def _activebookmark(self):
498 def _activebookmark(self):
497 return self._bookmarks.active
499 return self._bookmarks.active
498
500
499 def bookmarkheads(self, bookmark):
501 def bookmarkheads(self, bookmark):
500 name = bookmark.split('@', 1)[0]
502 name = bookmark.split('@', 1)[0]
501 heads = []
503 heads = []
502 for mark, n in self._bookmarks.iteritems():
504 for mark, n in self._bookmarks.iteritems():
503 if mark.split('@', 1)[0] == name:
505 if mark.split('@', 1)[0] == name:
504 heads.append(n)
506 heads.append(n)
505 return heads
507 return heads
506
508
507 # _phaserevs and _phasesets depend on changelog. what we need is to
509 # _phaserevs and _phasesets depend on changelog. what we need is to
508 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
510 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
509 # can't be easily expressed in filecache mechanism.
511 # can't be easily expressed in filecache mechanism.
510 @storecache('phaseroots', '00changelog.i')
512 @storecache('phaseroots', '00changelog.i')
511 def _phasecache(self):
513 def _phasecache(self):
512 return phases.phasecache(self, self._phasedefaults)
514 return phases.phasecache(self, self._phasedefaults)
513
515
514 @storecache('obsstore')
516 @storecache('obsstore')
515 def obsstore(self):
517 def obsstore(self):
516 # read default format for new obsstore.
518 # read default format for new obsstore.
517 # developer config: format.obsstore-version
519 # developer config: format.obsstore-version
518 defaultformat = self.ui.configint('format', 'obsstore-version', None)
520 defaultformat = self.ui.configint('format', 'obsstore-version', None)
519 # rely on obsstore class default when possible.
521 # rely on obsstore class default when possible.
520 kwargs = {}
522 kwargs = {}
521 if defaultformat is not None:
523 if defaultformat is not None:
522 kwargs['defaultformat'] = defaultformat
524 kwargs['defaultformat'] = defaultformat
523 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
525 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
524 store = obsolete.obsstore(self.svfs, readonly=readonly,
526 store = obsolete.obsstore(self.svfs, readonly=readonly,
525 **kwargs)
527 **kwargs)
526 if store and readonly:
528 if store and readonly:
527 self.ui.warn(
529 self.ui.warn(
528 _('obsolete feature not enabled but %i markers found!\n')
530 _('obsolete feature not enabled but %i markers found!\n')
529 % len(list(store)))
531 % len(list(store)))
530 return store
532 return store
531
533
532 @storecache('00changelog.i')
534 @storecache('00changelog.i')
533 def changelog(self):
535 def changelog(self):
534 c = changelog.changelog(self.svfs)
536 c = changelog.changelog(self.svfs)
535 if txnutil.mayhavepending(self.root):
537 if txnutil.mayhavepending(self.root):
536 c.readpending('00changelog.i.a')
538 c.readpending('00changelog.i.a')
537 return c
539 return c
538
540
539 def _constructmanifest(self):
541 def _constructmanifest(self):
540 # This is a temporary function while we migrate from manifest to
542 # This is a temporary function while we migrate from manifest to
541 # manifestlog. It allows bundlerepo and unionrepo to intercept the
543 # manifestlog. It allows bundlerepo and unionrepo to intercept the
542 # manifest creation.
544 # manifest creation.
543 return manifest.manifestrevlog(self.svfs)
545 return manifest.manifestrevlog(self.svfs)
544
546
545 @storecache('00manifest.i')
547 @storecache('00manifest.i')
546 def manifestlog(self):
548 def manifestlog(self):
547 return manifest.manifestlog(self.svfs, self)
549 return manifest.manifestlog(self.svfs, self)
548
550
549 @repofilecache('dirstate')
551 @repofilecache('dirstate')
550 def dirstate(self):
552 def dirstate(self):
551 return dirstate.dirstate(self.vfs, self.ui, self.root,
553 return dirstate.dirstate(self.vfs, self.ui, self.root,
552 self._dirstatevalidate)
554 self._dirstatevalidate)
553
555
554 def _dirstatevalidate(self, node):
556 def _dirstatevalidate(self, node):
555 try:
557 try:
556 self.changelog.rev(node)
558 self.changelog.rev(node)
557 return node
559 return node
558 except error.LookupError:
560 except error.LookupError:
559 if not self._dirstatevalidatewarned:
561 if not self._dirstatevalidatewarned:
560 self._dirstatevalidatewarned = True
562 self._dirstatevalidatewarned = True
561 self.ui.warn(_("warning: ignoring unknown"
563 self.ui.warn(_("warning: ignoring unknown"
562 " working parent %s!\n") % short(node))
564 " working parent %s!\n") % short(node))
563 return nullid
565 return nullid
564
566
565 def __getitem__(self, changeid):
567 def __getitem__(self, changeid):
566 if changeid is None or changeid == wdirrev:
568 if changeid is None or changeid == wdirrev:
567 return context.workingctx(self)
569 return context.workingctx(self)
568 if isinstance(changeid, slice):
570 if isinstance(changeid, slice):
569 return [context.changectx(self, i)
571 return [context.changectx(self, i)
570 for i in xrange(*changeid.indices(len(self)))
572 for i in xrange(*changeid.indices(len(self)))
571 if i not in self.changelog.filteredrevs]
573 if i not in self.changelog.filteredrevs]
572 return context.changectx(self, changeid)
574 return context.changectx(self, changeid)
573
575
574 def __contains__(self, changeid):
576 def __contains__(self, changeid):
575 try:
577 try:
576 self[changeid]
578 self[changeid]
577 return True
579 return True
578 except error.RepoLookupError:
580 except error.RepoLookupError:
579 return False
581 return False
580
582
581 def __nonzero__(self):
583 def __nonzero__(self):
582 return True
584 return True
583
585
584 def __len__(self):
586 def __len__(self):
585 return len(self.changelog)
587 return len(self.changelog)
586
588
587 def __iter__(self):
589 def __iter__(self):
588 return iter(self.changelog)
590 return iter(self.changelog)
589
591
590 def revs(self, expr, *args):
592 def revs(self, expr, *args):
591 '''Find revisions matching a revset.
593 '''Find revisions matching a revset.
592
594
593 The revset is specified as a string ``expr`` that may contain
595 The revset is specified as a string ``expr`` that may contain
594 %-formatting to escape certain types. See ``revsetlang.formatspec``.
596 %-formatting to escape certain types. See ``revsetlang.formatspec``.
595
597
596 Revset aliases from the configuration are not expanded. To expand
598 Revset aliases from the configuration are not expanded. To expand
597 user aliases, consider calling ``scmutil.revrange()`` or
599 user aliases, consider calling ``scmutil.revrange()`` or
598 ``repo.anyrevs([expr], user=True)``.
600 ``repo.anyrevs([expr], user=True)``.
599
601
600 Returns a revset.abstractsmartset, which is a list-like interface
602 Returns a revset.abstractsmartset, which is a list-like interface
601 that contains integer revisions.
603 that contains integer revisions.
602 '''
604 '''
603 expr = revsetlang.formatspec(expr, *args)
605 expr = revsetlang.formatspec(expr, *args)
604 m = revset.match(None, expr)
606 m = revset.match(None, expr)
605 return m(self)
607 return m(self)
606
608
607 def set(self, expr, *args):
609 def set(self, expr, *args):
608 '''Find revisions matching a revset and emit changectx instances.
610 '''Find revisions matching a revset and emit changectx instances.
609
611
610 This is a convenience wrapper around ``revs()`` that iterates the
612 This is a convenience wrapper around ``revs()`` that iterates the
611 result and is a generator of changectx instances.
613 result and is a generator of changectx instances.
612
614
613 Revset aliases from the configuration are not expanded. To expand
615 Revset aliases from the configuration are not expanded. To expand
614 user aliases, consider calling ``scmutil.revrange()``.
616 user aliases, consider calling ``scmutil.revrange()``.
615 '''
617 '''
616 for r in self.revs(expr, *args):
618 for r in self.revs(expr, *args):
617 yield self[r]
619 yield self[r]
618
620
619 def anyrevs(self, specs, user=False):
621 def anyrevs(self, specs, user=False):
620 '''Find revisions matching one of the given revsets.
622 '''Find revisions matching one of the given revsets.
621
623
622 Revset aliases from the configuration are not expanded by default. To
624 Revset aliases from the configuration are not expanded by default. To
623 expand user aliases, specify ``user=True``.
625 expand user aliases, specify ``user=True``.
624 '''
626 '''
625 if user:
627 if user:
626 m = revset.matchany(self.ui, specs, repo=self)
628 m = revset.matchany(self.ui, specs, repo=self)
627 else:
629 else:
628 m = revset.matchany(None, specs)
630 m = revset.matchany(None, specs)
629 return m(self)
631 return m(self)
630
632
631 def url(self):
633 def url(self):
632 return 'file:' + self.root
634 return 'file:' + self.root
633
635
634 def hook(self, name, throw=False, **args):
636 def hook(self, name, throw=False, **args):
635 """Call a hook, passing this repo instance.
637 """Call a hook, passing this repo instance.
636
638
637 This a convenience method to aid invoking hooks. Extensions likely
639 This a convenience method to aid invoking hooks. Extensions likely
638 won't call this unless they have registered a custom hook or are
640 won't call this unless they have registered a custom hook or are
639 replacing code that is expected to call a hook.
641 replacing code that is expected to call a hook.
640 """
642 """
641 return hook.hook(self.ui, self, name, throw, **args)
643 return hook.hook(self.ui, self, name, throw, **args)
642
644
643 @unfilteredmethod
645 @unfilteredmethod
644 def _tag(self, names, node, message, local, user, date, extra=None,
646 def _tag(self, names, node, message, local, user, date, extra=None,
645 editor=False):
647 editor=False):
646 if isinstance(names, str):
648 if isinstance(names, str):
647 names = (names,)
649 names = (names,)
648
650
649 branches = self.branchmap()
651 branches = self.branchmap()
650 for name in names:
652 for name in names:
651 self.hook('pretag', throw=True, node=hex(node), tag=name,
653 self.hook('pretag', throw=True, node=hex(node), tag=name,
652 local=local)
654 local=local)
653 if name in branches:
655 if name in branches:
654 self.ui.warn(_("warning: tag %s conflicts with existing"
656 self.ui.warn(_("warning: tag %s conflicts with existing"
655 " branch name\n") % name)
657 " branch name\n") % name)
656
658
657 def writetags(fp, names, munge, prevtags):
659 def writetags(fp, names, munge, prevtags):
658 fp.seek(0, 2)
660 fp.seek(0, 2)
659 if prevtags and prevtags[-1] != '\n':
661 if prevtags and prevtags[-1] != '\n':
660 fp.write('\n')
662 fp.write('\n')
661 for name in names:
663 for name in names:
662 if munge:
664 if munge:
663 m = munge(name)
665 m = munge(name)
664 else:
666 else:
665 m = name
667 m = name
666
668
667 if (self._tagscache.tagtypes and
669 if (self._tagscache.tagtypes and
668 name in self._tagscache.tagtypes):
670 name in self._tagscache.tagtypes):
669 old = self.tags().get(name, nullid)
671 old = self.tags().get(name, nullid)
670 fp.write('%s %s\n' % (hex(old), m))
672 fp.write('%s %s\n' % (hex(old), m))
671 fp.write('%s %s\n' % (hex(node), m))
673 fp.write('%s %s\n' % (hex(node), m))
672 fp.close()
674 fp.close()
673
675
674 prevtags = ''
676 prevtags = ''
675 if local:
677 if local:
676 try:
678 try:
677 fp = self.vfs('localtags', 'r+')
679 fp = self.vfs('localtags', 'r+')
678 except IOError:
680 except IOError:
679 fp = self.vfs('localtags', 'a')
681 fp = self.vfs('localtags', 'a')
680 else:
682 else:
681 prevtags = fp.read()
683 prevtags = fp.read()
682
684
683 # local tags are stored in the current charset
685 # local tags are stored in the current charset
684 writetags(fp, names, None, prevtags)
686 writetags(fp, names, None, prevtags)
685 for name in names:
687 for name in names:
686 self.hook('tag', node=hex(node), tag=name, local=local)
688 self.hook('tag', node=hex(node), tag=name, local=local)
687 return
689 return
688
690
689 try:
691 try:
690 fp = self.wfile('.hgtags', 'rb+')
692 fp = self.wfile('.hgtags', 'rb+')
691 except IOError as e:
693 except IOError as e:
692 if e.errno != errno.ENOENT:
694 if e.errno != errno.ENOENT:
693 raise
695 raise
694 fp = self.wfile('.hgtags', 'ab')
696 fp = self.wfile('.hgtags', 'ab')
695 else:
697 else:
696 prevtags = fp.read()
698 prevtags = fp.read()
697
699
698 # committed tags are stored in UTF-8
700 # committed tags are stored in UTF-8
699 writetags(fp, names, encoding.fromlocal, prevtags)
701 writetags(fp, names, encoding.fromlocal, prevtags)
700
702
701 fp.close()
703 fp.close()
702
704
703 self.invalidatecaches()
705 self.invalidatecaches()
704
706
705 if '.hgtags' not in self.dirstate:
707 if '.hgtags' not in self.dirstate:
706 self[None].add(['.hgtags'])
708 self[None].add(['.hgtags'])
707
709
708 m = matchmod.exact(self.root, '', ['.hgtags'])
710 m = matchmod.exact(self.root, '', ['.hgtags'])
709 tagnode = self.commit(message, user, date, extra=extra, match=m,
711 tagnode = self.commit(message, user, date, extra=extra, match=m,
710 editor=editor)
712 editor=editor)
711
713
712 for name in names:
714 for name in names:
713 self.hook('tag', node=hex(node), tag=name, local=local)
715 self.hook('tag', node=hex(node), tag=name, local=local)
714
716
715 return tagnode
717 return tagnode
716
718
717 def tag(self, names, node, message, local, user, date, editor=False):
719 def tag(self, names, node, message, local, user, date, editor=False):
718 '''tag a revision with one or more symbolic names.
720 '''tag a revision with one or more symbolic names.
719
721
720 names is a list of strings or, when adding a single tag, names may be a
722 names is a list of strings or, when adding a single tag, names may be a
721 string.
723 string.
722
724
723 if local is True, the tags are stored in a per-repository file.
725 if local is True, the tags are stored in a per-repository file.
724 otherwise, they are stored in the .hgtags file, and a new
726 otherwise, they are stored in the .hgtags file, and a new
725 changeset is committed with the change.
727 changeset is committed with the change.
726
728
727 keyword arguments:
729 keyword arguments:
728
730
729 local: whether to store tags in non-version-controlled file
731 local: whether to store tags in non-version-controlled file
730 (default False)
732 (default False)
731
733
732 message: commit message to use if committing
734 message: commit message to use if committing
733
735
734 user: name of user to use if committing
736 user: name of user to use if committing
735
737
736 date: date tuple to use if committing'''
738 date: date tuple to use if committing'''
737
739
738 if not local:
740 if not local:
739 m = matchmod.exact(self.root, '', ['.hgtags'])
741 m = matchmod.exact(self.root, '', ['.hgtags'])
740 if any(self.status(match=m, unknown=True, ignored=True)):
742 if any(self.status(match=m, unknown=True, ignored=True)):
741 raise error.Abort(_('working copy of .hgtags is changed'),
743 raise error.Abort(_('working copy of .hgtags is changed'),
742 hint=_('please commit .hgtags manually'))
744 hint=_('please commit .hgtags manually'))
743
745
744 self.tags() # instantiate the cache
746 self.tags() # instantiate the cache
745 self._tag(names, node, message, local, user, date, editor=editor)
747 self._tag(names, node, message, local, user, date, editor=editor)
746
748
747 @filteredpropertycache
749 @filteredpropertycache
748 def _tagscache(self):
750 def _tagscache(self):
749 '''Returns a tagscache object that contains various tags related
751 '''Returns a tagscache object that contains various tags related
750 caches.'''
752 caches.'''
751
753
752 # This simplifies its cache management by having one decorated
754 # This simplifies its cache management by having one decorated
753 # function (this one) and the rest simply fetch things from it.
755 # function (this one) and the rest simply fetch things from it.
754 class tagscache(object):
756 class tagscache(object):
755 def __init__(self):
757 def __init__(self):
756 # These two define the set of tags for this repository. tags
758 # These two define the set of tags for this repository. tags
757 # maps tag name to node; tagtypes maps tag name to 'global' or
759 # maps tag name to node; tagtypes maps tag name to 'global' or
758 # 'local'. (Global tags are defined by .hgtags across all
760 # 'local'. (Global tags are defined by .hgtags across all
759 # heads, and local tags are defined in .hg/localtags.)
761 # heads, and local tags are defined in .hg/localtags.)
760 # They constitute the in-memory cache of tags.
762 # They constitute the in-memory cache of tags.
761 self.tags = self.tagtypes = None
763 self.tags = self.tagtypes = None
762
764
763 self.nodetagscache = self.tagslist = None
765 self.nodetagscache = self.tagslist = None
764
766
765 cache = tagscache()
767 cache = tagscache()
766 cache.tags, cache.tagtypes = self._findtags()
768 cache.tags, cache.tagtypes = self._findtags()
767
769
768 return cache
770 return cache
769
771
770 def tags(self):
772 def tags(self):
771 '''return a mapping of tag to node'''
773 '''return a mapping of tag to node'''
772 t = {}
774 t = {}
773 if self.changelog.filteredrevs:
775 if self.changelog.filteredrevs:
774 tags, tt = self._findtags()
776 tags, tt = self._findtags()
775 else:
777 else:
776 tags = self._tagscache.tags
778 tags = self._tagscache.tags
777 for k, v in tags.iteritems():
779 for k, v in tags.iteritems():
778 try:
780 try:
779 # ignore tags to unknown nodes
781 # ignore tags to unknown nodes
780 self.changelog.rev(v)
782 self.changelog.rev(v)
781 t[k] = v
783 t[k] = v
782 except (error.LookupError, ValueError):
784 except (error.LookupError, ValueError):
783 pass
785 pass
784 return t
786 return t
785
787
786 def _findtags(self):
788 def _findtags(self):
787 '''Do the hard work of finding tags. Return a pair of dicts
789 '''Do the hard work of finding tags. Return a pair of dicts
788 (tags, tagtypes) where tags maps tag name to node, and tagtypes
790 (tags, tagtypes) where tags maps tag name to node, and tagtypes
789 maps tag name to a string like \'global\' or \'local\'.
791 maps tag name to a string like \'global\' or \'local\'.
790 Subclasses or extensions are free to add their own tags, but
792 Subclasses or extensions are free to add their own tags, but
791 should be aware that the returned dicts will be retained for the
793 should be aware that the returned dicts will be retained for the
792 duration of the localrepo object.'''
794 duration of the localrepo object.'''
793
795
794 # XXX what tagtype should subclasses/extensions use? Currently
796 # XXX what tagtype should subclasses/extensions use? Currently
795 # mq and bookmarks add tags, but do not set the tagtype at all.
797 # mq and bookmarks add tags, but do not set the tagtype at all.
796 # Should each extension invent its own tag type? Should there
798 # Should each extension invent its own tag type? Should there
797 # be one tagtype for all such "virtual" tags? Or is the status
799 # be one tagtype for all such "virtual" tags? Or is the status
798 # quo fine?
800 # quo fine?
799
801
800 alltags = {} # map tag name to (node, hist)
802 alltags = {} # map tag name to (node, hist)
801 tagtypes = {}
803 tagtypes = {}
802
804
803 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
805 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
804 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
806 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
805
807
806 # Build the return dicts. Have to re-encode tag names because
808 # Build the return dicts. Have to re-encode tag names because
807 # the tags module always uses UTF-8 (in order not to lose info
809 # the tags module always uses UTF-8 (in order not to lose info
808 # writing to the cache), but the rest of Mercurial wants them in
810 # writing to the cache), but the rest of Mercurial wants them in
809 # local encoding.
811 # local encoding.
810 tags = {}
812 tags = {}
811 for (name, (node, hist)) in alltags.iteritems():
813 for (name, (node, hist)) in alltags.iteritems():
812 if node != nullid:
814 if node != nullid:
813 tags[encoding.tolocal(name)] = node
815 tags[encoding.tolocal(name)] = node
814 tags['tip'] = self.changelog.tip()
816 tags['tip'] = self.changelog.tip()
815 tagtypes = dict([(encoding.tolocal(name), value)
817 tagtypes = dict([(encoding.tolocal(name), value)
816 for (name, value) in tagtypes.iteritems()])
818 for (name, value) in tagtypes.iteritems()])
817 return (tags, tagtypes)
819 return (tags, tagtypes)
818
820
819 def tagtype(self, tagname):
821 def tagtype(self, tagname):
820 '''
822 '''
821 return the type of the given tag. result can be:
823 return the type of the given tag. result can be:
822
824
823 'local' : a local tag
825 'local' : a local tag
824 'global' : a global tag
826 'global' : a global tag
825 None : tag does not exist
827 None : tag does not exist
826 '''
828 '''
827
829
828 return self._tagscache.tagtypes.get(tagname)
830 return self._tagscache.tagtypes.get(tagname)
829
831
830 def tagslist(self):
832 def tagslist(self):
831 '''return a list of tags ordered by revision'''
833 '''return a list of tags ordered by revision'''
832 if not self._tagscache.tagslist:
834 if not self._tagscache.tagslist:
833 l = []
835 l = []
834 for t, n in self.tags().iteritems():
836 for t, n in self.tags().iteritems():
835 l.append((self.changelog.rev(n), t, n))
837 l.append((self.changelog.rev(n), t, n))
836 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
838 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
837
839
838 return self._tagscache.tagslist
840 return self._tagscache.tagslist
839
841
840 def nodetags(self, node):
842 def nodetags(self, node):
841 '''return the tags associated with a node'''
843 '''return the tags associated with a node'''
842 if not self._tagscache.nodetagscache:
844 if not self._tagscache.nodetagscache:
843 nodetagscache = {}
845 nodetagscache = {}
844 for t, n in self._tagscache.tags.iteritems():
846 for t, n in self._tagscache.tags.iteritems():
845 nodetagscache.setdefault(n, []).append(t)
847 nodetagscache.setdefault(n, []).append(t)
846 for tags in nodetagscache.itervalues():
848 for tags in nodetagscache.itervalues():
847 tags.sort()
849 tags.sort()
848 self._tagscache.nodetagscache = nodetagscache
850 self._tagscache.nodetagscache = nodetagscache
849 return self._tagscache.nodetagscache.get(node, [])
851 return self._tagscache.nodetagscache.get(node, [])
850
852
851 def nodebookmarks(self, node):
853 def nodebookmarks(self, node):
852 """return the list of bookmarks pointing to the specified node"""
854 """return the list of bookmarks pointing to the specified node"""
853 marks = []
855 marks = []
854 for bookmark, n in self._bookmarks.iteritems():
856 for bookmark, n in self._bookmarks.iteritems():
855 if n == node:
857 if n == node:
856 marks.append(bookmark)
858 marks.append(bookmark)
857 return sorted(marks)
859 return sorted(marks)
858
860
859 def branchmap(self):
861 def branchmap(self):
860 '''returns a dictionary {branch: [branchheads]} with branchheads
862 '''returns a dictionary {branch: [branchheads]} with branchheads
861 ordered by increasing revision number'''
863 ordered by increasing revision number'''
862 branchmap.updatecache(self)
864 branchmap.updatecache(self)
863 return self._branchcaches[self.filtername]
865 return self._branchcaches[self.filtername]
864
866
865 @unfilteredmethod
867 @unfilteredmethod
866 def revbranchcache(self):
868 def revbranchcache(self):
867 if not self._revbranchcache:
869 if not self._revbranchcache:
868 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
870 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
869 return self._revbranchcache
871 return self._revbranchcache
870
872
871 def branchtip(self, branch, ignoremissing=False):
873 def branchtip(self, branch, ignoremissing=False):
872 '''return the tip node for a given branch
874 '''return the tip node for a given branch
873
875
874 If ignoremissing is True, then this method will not raise an error.
876 If ignoremissing is True, then this method will not raise an error.
875 This is helpful for callers that only expect None for a missing branch
877 This is helpful for callers that only expect None for a missing branch
876 (e.g. namespace).
878 (e.g. namespace).
877
879
878 '''
880 '''
879 try:
881 try:
880 return self.branchmap().branchtip(branch)
882 return self.branchmap().branchtip(branch)
881 except KeyError:
883 except KeyError:
882 if not ignoremissing:
884 if not ignoremissing:
883 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
885 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
884 else:
886 else:
885 pass
887 pass
886
888
887 def lookup(self, key):
889 def lookup(self, key):
888 return self[key].node()
890 return self[key].node()
889
891
890 def lookupbranch(self, key, remote=None):
892 def lookupbranch(self, key, remote=None):
891 repo = remote or self
893 repo = remote or self
892 if key in repo.branchmap():
894 if key in repo.branchmap():
893 return key
895 return key
894
896
895 repo = (remote and remote.local()) and remote or self
897 repo = (remote and remote.local()) and remote or self
896 return repo[key].branch()
898 return repo[key].branch()
897
899
898 def known(self, nodes):
900 def known(self, nodes):
899 cl = self.changelog
901 cl = self.changelog
900 nm = cl.nodemap
902 nm = cl.nodemap
901 filtered = cl.filteredrevs
903 filtered = cl.filteredrevs
902 result = []
904 result = []
903 for n in nodes:
905 for n in nodes:
904 r = nm.get(n)
906 r = nm.get(n)
905 resp = not (r is None or r in filtered)
907 resp = not (r is None or r in filtered)
906 result.append(resp)
908 result.append(resp)
907 return result
909 return result
908
910
909 def local(self):
911 def local(self):
910 return self
912 return self
911
913
912 def publishing(self):
914 def publishing(self):
913 # it's safe (and desirable) to trust the publish flag unconditionally
915 # it's safe (and desirable) to trust the publish flag unconditionally
914 # so that we don't finalize changes shared between users via ssh or nfs
916 # so that we don't finalize changes shared between users via ssh or nfs
915 return self.ui.configbool('phases', 'publish', True, untrusted=True)
917 return self.ui.configbool('phases', 'publish', True, untrusted=True)
916
918
917 def cancopy(self):
919 def cancopy(self):
918 # so statichttprepo's override of local() works
920 # so statichttprepo's override of local() works
919 if not self.local():
921 if not self.local():
920 return False
922 return False
921 if not self.publishing():
923 if not self.publishing():
922 return True
924 return True
923 # if publishing we can't copy if there is filtered content
925 # if publishing we can't copy if there is filtered content
924 return not self.filtered('visible').changelog.filteredrevs
926 return not self.filtered('visible').changelog.filteredrevs
925
927
926 def shared(self):
928 def shared(self):
927 '''the type of shared repository (None if not shared)'''
929 '''the type of shared repository (None if not shared)'''
928 if self.sharedpath != self.path:
930 if self.sharedpath != self.path:
929 return 'store'
931 return 'store'
930 return None
932 return None
931
933
932 def join(self, f, *insidef):
934 def join(self, f, *insidef):
933 return self.vfs.join(os.path.join(f, *insidef))
935 return self.vfs.join(os.path.join(f, *insidef))
934
936
935 def wjoin(self, f, *insidef):
937 def wjoin(self, f, *insidef):
936 return self.vfs.reljoin(self.root, f, *insidef)
938 return self.vfs.reljoin(self.root, f, *insidef)
937
939
938 def file(self, f):
940 def file(self, f):
939 if f[0] == '/':
941 if f[0] == '/':
940 f = f[1:]
942 f = f[1:]
941 return filelog.filelog(self.svfs, f)
943 return filelog.filelog(self.svfs, f)
942
944
943 def changectx(self, changeid):
945 def changectx(self, changeid):
944 return self[changeid]
946 return self[changeid]
945
947
946 def setparents(self, p1, p2=nullid):
948 def setparents(self, p1, p2=nullid):
947 self.dirstate.beginparentchange()
949 self.dirstate.beginparentchange()
948 copies = self.dirstate.setparents(p1, p2)
950 copies = self.dirstate.setparents(p1, p2)
949 pctx = self[p1]
951 pctx = self[p1]
950 if copies:
952 if copies:
951 # Adjust copy records, the dirstate cannot do it, it
953 # Adjust copy records, the dirstate cannot do it, it
952 # requires access to parents manifests. Preserve them
954 # requires access to parents manifests. Preserve them
953 # only for entries added to first parent.
955 # only for entries added to first parent.
954 for f in copies:
956 for f in copies:
955 if f not in pctx and copies[f] in pctx:
957 if f not in pctx and copies[f] in pctx:
956 self.dirstate.copy(copies[f], f)
958 self.dirstate.copy(copies[f], f)
957 if p2 == nullid:
959 if p2 == nullid:
958 for f, s in sorted(self.dirstate.copies().items()):
960 for f, s in sorted(self.dirstate.copies().items()):
959 if f not in pctx and s not in pctx:
961 if f not in pctx and s not in pctx:
960 self.dirstate.copy(None, f)
962 self.dirstate.copy(None, f)
961 self.dirstate.endparentchange()
963 self.dirstate.endparentchange()
962
964
963 def filectx(self, path, changeid=None, fileid=None):
965 def filectx(self, path, changeid=None, fileid=None):
964 """changeid can be a changeset revision, node, or tag.
966 """changeid can be a changeset revision, node, or tag.
965 fileid can be a file revision or node."""
967 fileid can be a file revision or node."""
966 return context.filectx(self, path, changeid, fileid)
968 return context.filectx(self, path, changeid, fileid)
967
969
968 def getcwd(self):
970 def getcwd(self):
969 return self.dirstate.getcwd()
971 return self.dirstate.getcwd()
970
972
971 def pathto(self, f, cwd=None):
973 def pathto(self, f, cwd=None):
972 return self.dirstate.pathto(f, cwd)
974 return self.dirstate.pathto(f, cwd)
973
975
974 def wfile(self, f, mode='r'):
976 def wfile(self, f, mode='r'):
975 return self.wvfs(f, mode)
977 return self.wvfs(f, mode)
976
978
977 def _link(self, f):
979 def _link(self, f):
978 return self.wvfs.islink(f)
980 return self.wvfs.islink(f)
979
981
980 def _loadfilter(self, filter):
982 def _loadfilter(self, filter):
981 if filter not in self.filterpats:
983 if filter not in self.filterpats:
982 l = []
984 l = []
983 for pat, cmd in self.ui.configitems(filter):
985 for pat, cmd in self.ui.configitems(filter):
984 if cmd == '!':
986 if cmd == '!':
985 continue
987 continue
986 mf = matchmod.match(self.root, '', [pat])
988 mf = matchmod.match(self.root, '', [pat])
987 fn = None
989 fn = None
988 params = cmd
990 params = cmd
989 for name, filterfn in self._datafilters.iteritems():
991 for name, filterfn in self._datafilters.iteritems():
990 if cmd.startswith(name):
992 if cmd.startswith(name):
991 fn = filterfn
993 fn = filterfn
992 params = cmd[len(name):].lstrip()
994 params = cmd[len(name):].lstrip()
993 break
995 break
994 if not fn:
996 if not fn:
995 fn = lambda s, c, **kwargs: util.filter(s, c)
997 fn = lambda s, c, **kwargs: util.filter(s, c)
996 # Wrap old filters not supporting keyword arguments
998 # Wrap old filters not supporting keyword arguments
997 if not inspect.getargspec(fn)[2]:
999 if not inspect.getargspec(fn)[2]:
998 oldfn = fn
1000 oldfn = fn
999 fn = lambda s, c, **kwargs: oldfn(s, c)
1001 fn = lambda s, c, **kwargs: oldfn(s, c)
1000 l.append((mf, fn, params))
1002 l.append((mf, fn, params))
1001 self.filterpats[filter] = l
1003 self.filterpats[filter] = l
1002 return self.filterpats[filter]
1004 return self.filterpats[filter]
1003
1005
1004 def _filter(self, filterpats, filename, data):
1006 def _filter(self, filterpats, filename, data):
1005 for mf, fn, cmd in filterpats:
1007 for mf, fn, cmd in filterpats:
1006 if mf(filename):
1008 if mf(filename):
1007 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1009 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1008 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1010 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1009 break
1011 break
1010
1012
1011 return data
1013 return data
1012
1014
1013 @unfilteredpropertycache
1015 @unfilteredpropertycache
1014 def _encodefilterpats(self):
1016 def _encodefilterpats(self):
1015 return self._loadfilter('encode')
1017 return self._loadfilter('encode')
1016
1018
1017 @unfilteredpropertycache
1019 @unfilteredpropertycache
1018 def _decodefilterpats(self):
1020 def _decodefilterpats(self):
1019 return self._loadfilter('decode')
1021 return self._loadfilter('decode')
1020
1022
1021 def adddatafilter(self, name, filter):
1023 def adddatafilter(self, name, filter):
1022 self._datafilters[name] = filter
1024 self._datafilters[name] = filter
1023
1025
1024 def wread(self, filename):
1026 def wread(self, filename):
1025 if self._link(filename):
1027 if self._link(filename):
1026 data = self.wvfs.readlink(filename)
1028 data = self.wvfs.readlink(filename)
1027 else:
1029 else:
1028 data = self.wvfs.read(filename)
1030 data = self.wvfs.read(filename)
1029 return self._filter(self._encodefilterpats, filename, data)
1031 return self._filter(self._encodefilterpats, filename, data)
1030
1032
1031 def wwrite(self, filename, data, flags, backgroundclose=False):
1033 def wwrite(self, filename, data, flags, backgroundclose=False):
1032 """write ``data`` into ``filename`` in the working directory
1034 """write ``data`` into ``filename`` in the working directory
1033
1035
1034 This returns length of written (maybe decoded) data.
1036 This returns length of written (maybe decoded) data.
1035 """
1037 """
1036 data = self._filter(self._decodefilterpats, filename, data)
1038 data = self._filter(self._decodefilterpats, filename, data)
1037 if 'l' in flags:
1039 if 'l' in flags:
1038 self.wvfs.symlink(data, filename)
1040 self.wvfs.symlink(data, filename)
1039 else:
1041 else:
1040 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1042 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1041 if 'x' in flags:
1043 if 'x' in flags:
1042 self.wvfs.setflags(filename, False, True)
1044 self.wvfs.setflags(filename, False, True)
1043 return len(data)
1045 return len(data)
1044
1046
1045 def wwritedata(self, filename, data):
1047 def wwritedata(self, filename, data):
1046 return self._filter(self._decodefilterpats, filename, data)
1048 return self._filter(self._decodefilterpats, filename, data)
1047
1049
1048 def currenttransaction(self):
1050 def currenttransaction(self):
1049 """return the current transaction or None if non exists"""
1051 """return the current transaction or None if non exists"""
1050 if self._transref:
1052 if self._transref:
1051 tr = self._transref()
1053 tr = self._transref()
1052 else:
1054 else:
1053 tr = None
1055 tr = None
1054
1056
1055 if tr and tr.running():
1057 if tr and tr.running():
1056 return tr
1058 return tr
1057 return None
1059 return None
1058
1060
1059 def transaction(self, desc, report=None):
1061 def transaction(self, desc, report=None):
1060 if (self.ui.configbool('devel', 'all-warnings')
1062 if (self.ui.configbool('devel', 'all-warnings')
1061 or self.ui.configbool('devel', 'check-locks')):
1063 or self.ui.configbool('devel', 'check-locks')):
1062 if self._currentlock(self._lockref) is None:
1064 if self._currentlock(self._lockref) is None:
1063 raise error.ProgrammingError('transaction requires locking')
1065 raise error.ProgrammingError('transaction requires locking')
1064 tr = self.currenttransaction()
1066 tr = self.currenttransaction()
1065 if tr is not None:
1067 if tr is not None:
1066 return tr.nest()
1068 return tr.nest()
1067
1069
1068 # abort here if the journal already exists
1070 # abort here if the journal already exists
1069 if self.svfs.exists("journal"):
1071 if self.svfs.exists("journal"):
1070 raise error.RepoError(
1072 raise error.RepoError(
1071 _("abandoned transaction found"),
1073 _("abandoned transaction found"),
1072 hint=_("run 'hg recover' to clean up transaction"))
1074 hint=_("run 'hg recover' to clean up transaction"))
1073
1075
1074 idbase = "%.40f#%f" % (random.random(), time.time())
1076 idbase = "%.40f#%f" % (random.random(), time.time())
1075 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1077 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1076 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1078 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1077
1079
1078 self._writejournal(desc)
1080 self._writejournal(desc)
1079 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1081 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1080 if report:
1082 if report:
1081 rp = report
1083 rp = report
1082 else:
1084 else:
1083 rp = self.ui.warn
1085 rp = self.ui.warn
1084 vfsmap = {'plain': self.vfs} # root of .hg/
1086 vfsmap = {'plain': self.vfs} # root of .hg/
1085 # we must avoid cyclic reference between repo and transaction.
1087 # we must avoid cyclic reference between repo and transaction.
1086 reporef = weakref.ref(self)
1088 reporef = weakref.ref(self)
1087 def validate(tr):
1089 def validate(tr):
1088 """will run pre-closing hooks"""
1090 """will run pre-closing hooks"""
1089 reporef().hook('pretxnclose', throw=True,
1091 reporef().hook('pretxnclose', throw=True,
1090 txnname=desc, **tr.hookargs)
1092 txnname=desc, **tr.hookargs)
1091 def releasefn(tr, success):
1093 def releasefn(tr, success):
1092 repo = reporef()
1094 repo = reporef()
1093 if success:
1095 if success:
1094 # this should be explicitly invoked here, because
1096 # this should be explicitly invoked here, because
1095 # in-memory changes aren't written out at closing
1097 # in-memory changes aren't written out at closing
1096 # transaction, if tr.addfilegenerator (via
1098 # transaction, if tr.addfilegenerator (via
1097 # dirstate.write or so) isn't invoked while
1099 # dirstate.write or so) isn't invoked while
1098 # transaction running
1100 # transaction running
1099 repo.dirstate.write(None)
1101 repo.dirstate.write(None)
1100 else:
1102 else:
1101 # discard all changes (including ones already written
1103 # discard all changes (including ones already written
1102 # out) in this transaction
1104 # out) in this transaction
1103 repo.dirstate.restorebackup(None, prefix='journal.')
1105 repo.dirstate.restorebackup(None, prefix='journal.')
1104
1106
1105 repo.invalidate(clearfilecache=True)
1107 repo.invalidate(clearfilecache=True)
1106
1108
1107 tr = transaction.transaction(rp, self.svfs, vfsmap,
1109 tr = transaction.transaction(rp, self.svfs, vfsmap,
1108 "journal",
1110 "journal",
1109 "undo",
1111 "undo",
1110 aftertrans(renames),
1112 aftertrans(renames),
1111 self.store.createmode,
1113 self.store.createmode,
1112 validator=validate,
1114 validator=validate,
1113 releasefn=releasefn)
1115 releasefn=releasefn)
1114
1116
1115 tr.hookargs['txnid'] = txnid
1117 tr.hookargs['txnid'] = txnid
1116 # note: writing the fncache only during finalize mean that the file is
1118 # note: writing the fncache only during finalize mean that the file is
1117 # outdated when running hooks. As fncache is used for streaming clone,
1119 # outdated when running hooks. As fncache is used for streaming clone,
1118 # this is not expected to break anything that happen during the hooks.
1120 # this is not expected to break anything that happen during the hooks.
1119 tr.addfinalize('flush-fncache', self.store.write)
1121 tr.addfinalize('flush-fncache', self.store.write)
1120 def txnclosehook(tr2):
1122 def txnclosehook(tr2):
1121 """To be run if transaction is successful, will schedule a hook run
1123 """To be run if transaction is successful, will schedule a hook run
1122 """
1124 """
1123 # Don't reference tr2 in hook() so we don't hold a reference.
1125 # Don't reference tr2 in hook() so we don't hold a reference.
1124 # This reduces memory consumption when there are multiple
1126 # This reduces memory consumption when there are multiple
1125 # transactions per lock. This can likely go away if issue5045
1127 # transactions per lock. This can likely go away if issue5045
1126 # fixes the function accumulation.
1128 # fixes the function accumulation.
1127 hookargs = tr2.hookargs
1129 hookargs = tr2.hookargs
1128
1130
1129 def hook():
1131 def hook():
1130 reporef().hook('txnclose', throw=False, txnname=desc,
1132 reporef().hook('txnclose', throw=False, txnname=desc,
1131 **hookargs)
1133 **hookargs)
1132 reporef()._afterlock(hook)
1134 reporef()._afterlock(hook)
1133 tr.addfinalize('txnclose-hook', txnclosehook)
1135 tr.addfinalize('txnclose-hook', txnclosehook)
1134 def txnaborthook(tr2):
1136 def txnaborthook(tr2):
1135 """To be run if transaction is aborted
1137 """To be run if transaction is aborted
1136 """
1138 """
1137 reporef().hook('txnabort', throw=False, txnname=desc,
1139 reporef().hook('txnabort', throw=False, txnname=desc,
1138 **tr2.hookargs)
1140 **tr2.hookargs)
1139 tr.addabort('txnabort-hook', txnaborthook)
1141 tr.addabort('txnabort-hook', txnaborthook)
1140 # avoid eager cache invalidation. in-memory data should be identical
1142 # avoid eager cache invalidation. in-memory data should be identical
1141 # to stored data if transaction has no error.
1143 # to stored data if transaction has no error.
1142 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1144 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1143 self._transref = weakref.ref(tr)
1145 self._transref = weakref.ref(tr)
1144 return tr
1146 return tr
1145
1147
1146 def _journalfiles(self):
1148 def _journalfiles(self):
1147 return ((self.svfs, 'journal'),
1149 return ((self.svfs, 'journal'),
1148 (self.vfs, 'journal.dirstate'),
1150 (self.vfs, 'journal.dirstate'),
1149 (self.vfs, 'journal.branch'),
1151 (self.vfs, 'journal.branch'),
1150 (self.vfs, 'journal.desc'),
1152 (self.vfs, 'journal.desc'),
1151 (self.vfs, 'journal.bookmarks'),
1153 (self.vfs, 'journal.bookmarks'),
1152 (self.svfs, 'journal.phaseroots'))
1154 (self.svfs, 'journal.phaseroots'))
1153
1155
1154 def undofiles(self):
1156 def undofiles(self):
1155 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1157 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1156
1158
1157 def _writejournal(self, desc):
1159 def _writejournal(self, desc):
1158 self.dirstate.savebackup(None, prefix='journal.')
1160 self.dirstate.savebackup(None, prefix='journal.')
1159 self.vfs.write("journal.branch",
1161 self.vfs.write("journal.branch",
1160 encoding.fromlocal(self.dirstate.branch()))
1162 encoding.fromlocal(self.dirstate.branch()))
1161 self.vfs.write("journal.desc",
1163 self.vfs.write("journal.desc",
1162 "%d\n%s\n" % (len(self), desc))
1164 "%d\n%s\n" % (len(self), desc))
1163 self.vfs.write("journal.bookmarks",
1165 self.vfs.write("journal.bookmarks",
1164 self.vfs.tryread("bookmarks"))
1166 self.vfs.tryread("bookmarks"))
1165 self.svfs.write("journal.phaseroots",
1167 self.svfs.write("journal.phaseroots",
1166 self.svfs.tryread("phaseroots"))
1168 self.svfs.tryread("phaseroots"))
1167
1169
1168 def recover(self):
1170 def recover(self):
1169 with self.lock():
1171 with self.lock():
1170 if self.svfs.exists("journal"):
1172 if self.svfs.exists("journal"):
1171 self.ui.status(_("rolling back interrupted transaction\n"))
1173 self.ui.status(_("rolling back interrupted transaction\n"))
1172 vfsmap = {'': self.svfs,
1174 vfsmap = {'': self.svfs,
1173 'plain': self.vfs,}
1175 'plain': self.vfs,}
1174 transaction.rollback(self.svfs, vfsmap, "journal",
1176 transaction.rollback(self.svfs, vfsmap, "journal",
1175 self.ui.warn)
1177 self.ui.warn)
1176 self.invalidate()
1178 self.invalidate()
1177 return True
1179 return True
1178 else:
1180 else:
1179 self.ui.warn(_("no interrupted transaction available\n"))
1181 self.ui.warn(_("no interrupted transaction available\n"))
1180 return False
1182 return False
1181
1183
1182 def rollback(self, dryrun=False, force=False):
1184 def rollback(self, dryrun=False, force=False):
1183 wlock = lock = dsguard = None
1185 wlock = lock = dsguard = None
1184 try:
1186 try:
1185 wlock = self.wlock()
1187 wlock = self.wlock()
1186 lock = self.lock()
1188 lock = self.lock()
1187 if self.svfs.exists("undo"):
1189 if self.svfs.exists("undo"):
1188 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1190 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1189
1191
1190 return self._rollback(dryrun, force, dsguard)
1192 return self._rollback(dryrun, force, dsguard)
1191 else:
1193 else:
1192 self.ui.warn(_("no rollback information available\n"))
1194 self.ui.warn(_("no rollback information available\n"))
1193 return 1
1195 return 1
1194 finally:
1196 finally:
1195 release(dsguard, lock, wlock)
1197 release(dsguard, lock, wlock)
1196
1198
1197 @unfilteredmethod # Until we get smarter cache management
1199 @unfilteredmethod # Until we get smarter cache management
1198 def _rollback(self, dryrun, force, dsguard):
1200 def _rollback(self, dryrun, force, dsguard):
1199 ui = self.ui
1201 ui = self.ui
1200 try:
1202 try:
1201 args = self.vfs.read('undo.desc').splitlines()
1203 args = self.vfs.read('undo.desc').splitlines()
1202 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1204 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1203 if len(args) >= 3:
1205 if len(args) >= 3:
1204 detail = args[2]
1206 detail = args[2]
1205 oldtip = oldlen - 1
1207 oldtip = oldlen - 1
1206
1208
1207 if detail and ui.verbose:
1209 if detail and ui.verbose:
1208 msg = (_('repository tip rolled back to revision %s'
1210 msg = (_('repository tip rolled back to revision %s'
1209 ' (undo %s: %s)\n')
1211 ' (undo %s: %s)\n')
1210 % (oldtip, desc, detail))
1212 % (oldtip, desc, detail))
1211 else:
1213 else:
1212 msg = (_('repository tip rolled back to revision %s'
1214 msg = (_('repository tip rolled back to revision %s'
1213 ' (undo %s)\n')
1215 ' (undo %s)\n')
1214 % (oldtip, desc))
1216 % (oldtip, desc))
1215 except IOError:
1217 except IOError:
1216 msg = _('rolling back unknown transaction\n')
1218 msg = _('rolling back unknown transaction\n')
1217 desc = None
1219 desc = None
1218
1220
1219 if not force and self['.'] != self['tip'] and desc == 'commit':
1221 if not force and self['.'] != self['tip'] and desc == 'commit':
1220 raise error.Abort(
1222 raise error.Abort(
1221 _('rollback of last commit while not checked out '
1223 _('rollback of last commit while not checked out '
1222 'may lose data'), hint=_('use -f to force'))
1224 'may lose data'), hint=_('use -f to force'))
1223
1225
1224 ui.status(msg)
1226 ui.status(msg)
1225 if dryrun:
1227 if dryrun:
1226 return 0
1228 return 0
1227
1229
1228 parents = self.dirstate.parents()
1230 parents = self.dirstate.parents()
1229 self.destroying()
1231 self.destroying()
1230 vfsmap = {'plain': self.vfs, '': self.svfs}
1232 vfsmap = {'plain': self.vfs, '': self.svfs}
1231 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1233 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1232 if self.vfs.exists('undo.bookmarks'):
1234 if self.vfs.exists('undo.bookmarks'):
1233 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1235 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1234 if self.svfs.exists('undo.phaseroots'):
1236 if self.svfs.exists('undo.phaseroots'):
1235 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1237 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1236 self.invalidate()
1238 self.invalidate()
1237
1239
1238 parentgone = (parents[0] not in self.changelog.nodemap or
1240 parentgone = (parents[0] not in self.changelog.nodemap or
1239 parents[1] not in self.changelog.nodemap)
1241 parents[1] not in self.changelog.nodemap)
1240 if parentgone:
1242 if parentgone:
1241 # prevent dirstateguard from overwriting already restored one
1243 # prevent dirstateguard from overwriting already restored one
1242 dsguard.close()
1244 dsguard.close()
1243
1245
1244 self.dirstate.restorebackup(None, prefix='undo.')
1246 self.dirstate.restorebackup(None, prefix='undo.')
1245 try:
1247 try:
1246 branch = self.vfs.read('undo.branch')
1248 branch = self.vfs.read('undo.branch')
1247 self.dirstate.setbranch(encoding.tolocal(branch))
1249 self.dirstate.setbranch(encoding.tolocal(branch))
1248 except IOError:
1250 except IOError:
1249 ui.warn(_('named branch could not be reset: '
1251 ui.warn(_('named branch could not be reset: '
1250 'current branch is still \'%s\'\n')
1252 'current branch is still \'%s\'\n')
1251 % self.dirstate.branch())
1253 % self.dirstate.branch())
1252
1254
1253 parents = tuple([p.rev() for p in self[None].parents()])
1255 parents = tuple([p.rev() for p in self[None].parents()])
1254 if len(parents) > 1:
1256 if len(parents) > 1:
1255 ui.status(_('working directory now based on '
1257 ui.status(_('working directory now based on '
1256 'revisions %d and %d\n') % parents)
1258 'revisions %d and %d\n') % parents)
1257 else:
1259 else:
1258 ui.status(_('working directory now based on '
1260 ui.status(_('working directory now based on '
1259 'revision %d\n') % parents)
1261 'revision %d\n') % parents)
1260 mergemod.mergestate.clean(self, self['.'].node())
1262 mergemod.mergestate.clean(self, self['.'].node())
1261
1263
1262 # TODO: if we know which new heads may result from this rollback, pass
1264 # TODO: if we know which new heads may result from this rollback, pass
1263 # them to destroy(), which will prevent the branchhead cache from being
1265 # them to destroy(), which will prevent the branchhead cache from being
1264 # invalidated.
1266 # invalidated.
1265 self.destroyed()
1267 self.destroyed()
1266 return 0
1268 return 0
1267
1269
1268 def invalidatecaches(self):
1270 def invalidatecaches(self):
1269
1271
1270 if '_tagscache' in vars(self):
1272 if '_tagscache' in vars(self):
1271 # can't use delattr on proxy
1273 # can't use delattr on proxy
1272 del self.__dict__['_tagscache']
1274 del self.__dict__['_tagscache']
1273
1275
1274 self.unfiltered()._branchcaches.clear()
1276 self.unfiltered()._branchcaches.clear()
1275 self.invalidatevolatilesets()
1277 self.invalidatevolatilesets()
1276
1278
1277 def invalidatevolatilesets(self):
1279 def invalidatevolatilesets(self):
1278 self.filteredrevcache.clear()
1280 self.filteredrevcache.clear()
1279 obsolete.clearobscaches(self)
1281 obsolete.clearobscaches(self)
1280
1282
1281 def invalidatedirstate(self):
1283 def invalidatedirstate(self):
1282 '''Invalidates the dirstate, causing the next call to dirstate
1284 '''Invalidates the dirstate, causing the next call to dirstate
1283 to check if it was modified since the last time it was read,
1285 to check if it was modified since the last time it was read,
1284 rereading it if it has.
1286 rereading it if it has.
1285
1287
1286 This is different to dirstate.invalidate() that it doesn't always
1288 This is different to dirstate.invalidate() that it doesn't always
1287 rereads the dirstate. Use dirstate.invalidate() if you want to
1289 rereads the dirstate. Use dirstate.invalidate() if you want to
1288 explicitly read the dirstate again (i.e. restoring it to a previous
1290 explicitly read the dirstate again (i.e. restoring it to a previous
1289 known good state).'''
1291 known good state).'''
1290 if hasunfilteredcache(self, 'dirstate'):
1292 if hasunfilteredcache(self, 'dirstate'):
1291 for k in self.dirstate._filecache:
1293 for k in self.dirstate._filecache:
1292 try:
1294 try:
1293 delattr(self.dirstate, k)
1295 delattr(self.dirstate, k)
1294 except AttributeError:
1296 except AttributeError:
1295 pass
1297 pass
1296 delattr(self.unfiltered(), 'dirstate')
1298 delattr(self.unfiltered(), 'dirstate')
1297
1299
1298 def invalidate(self, clearfilecache=False):
1300 def invalidate(self, clearfilecache=False):
1299 '''Invalidates both store and non-store parts other than dirstate
1301 '''Invalidates both store and non-store parts other than dirstate
1300
1302
1301 If a transaction is running, invalidation of store is omitted,
1303 If a transaction is running, invalidation of store is omitted,
1302 because discarding in-memory changes might cause inconsistency
1304 because discarding in-memory changes might cause inconsistency
1303 (e.g. incomplete fncache causes unintentional failure, but
1305 (e.g. incomplete fncache causes unintentional failure, but
1304 redundant one doesn't).
1306 redundant one doesn't).
1305 '''
1307 '''
1306 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1308 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1307 for k in self._filecache.keys():
1309 for k in self._filecache.keys():
1308 # dirstate is invalidated separately in invalidatedirstate()
1310 # dirstate is invalidated separately in invalidatedirstate()
1309 if k == 'dirstate':
1311 if k == 'dirstate':
1310 continue
1312 continue
1311
1313
1312 if clearfilecache:
1314 if clearfilecache:
1313 del self._filecache[k]
1315 del self._filecache[k]
1314 try:
1316 try:
1315 delattr(unfiltered, k)
1317 delattr(unfiltered, k)
1316 except AttributeError:
1318 except AttributeError:
1317 pass
1319 pass
1318 self.invalidatecaches()
1320 self.invalidatecaches()
1319 if not self.currenttransaction():
1321 if not self.currenttransaction():
1320 # TODO: Changing contents of store outside transaction
1322 # TODO: Changing contents of store outside transaction
1321 # causes inconsistency. We should make in-memory store
1323 # causes inconsistency. We should make in-memory store
1322 # changes detectable, and abort if changed.
1324 # changes detectable, and abort if changed.
1323 self.store.invalidatecaches()
1325 self.store.invalidatecaches()
1324
1326
1325 def invalidateall(self):
1327 def invalidateall(self):
1326 '''Fully invalidates both store and non-store parts, causing the
1328 '''Fully invalidates both store and non-store parts, causing the
1327 subsequent operation to reread any outside changes.'''
1329 subsequent operation to reread any outside changes.'''
1328 # extension should hook this to invalidate its caches
1330 # extension should hook this to invalidate its caches
1329 self.invalidate()
1331 self.invalidate()
1330 self.invalidatedirstate()
1332 self.invalidatedirstate()
1331
1333
1332 @unfilteredmethod
1334 @unfilteredmethod
1333 def _refreshfilecachestats(self, tr):
1335 def _refreshfilecachestats(self, tr):
1334 """Reload stats of cached files so that they are flagged as valid"""
1336 """Reload stats of cached files so that they are flagged as valid"""
1335 for k, ce in self._filecache.items():
1337 for k, ce in self._filecache.items():
1336 if k == 'dirstate' or k not in self.__dict__:
1338 if k == 'dirstate' or k not in self.__dict__:
1337 continue
1339 continue
1338 ce.refresh()
1340 ce.refresh()
1339
1341
1340 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1342 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1341 inheritchecker=None, parentenvvar=None):
1343 inheritchecker=None, parentenvvar=None):
1342 parentlock = None
1344 parentlock = None
1343 # the contents of parentenvvar are used by the underlying lock to
1345 # the contents of parentenvvar are used by the underlying lock to
1344 # determine whether it can be inherited
1346 # determine whether it can be inherited
1345 if parentenvvar is not None:
1347 if parentenvvar is not None:
1346 parentlock = encoding.environ.get(parentenvvar)
1348 parentlock = encoding.environ.get(parentenvvar)
1347 try:
1349 try:
1348 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1350 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1349 acquirefn=acquirefn, desc=desc,
1351 acquirefn=acquirefn, desc=desc,
1350 inheritchecker=inheritchecker,
1352 inheritchecker=inheritchecker,
1351 parentlock=parentlock)
1353 parentlock=parentlock)
1352 except error.LockHeld as inst:
1354 except error.LockHeld as inst:
1353 if not wait:
1355 if not wait:
1354 raise
1356 raise
1355 # show more details for new-style locks
1357 # show more details for new-style locks
1356 if ':' in inst.locker:
1358 if ':' in inst.locker:
1357 host, pid = inst.locker.split(":", 1)
1359 host, pid = inst.locker.split(":", 1)
1358 self.ui.warn(
1360 self.ui.warn(
1359 _("waiting for lock on %s held by process %r "
1361 _("waiting for lock on %s held by process %r "
1360 "on host %r\n") % (desc, pid, host))
1362 "on host %r\n") % (desc, pid, host))
1361 else:
1363 else:
1362 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1364 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1363 (desc, inst.locker))
1365 (desc, inst.locker))
1364 # default to 600 seconds timeout
1366 # default to 600 seconds timeout
1365 l = lockmod.lock(vfs, lockname,
1367 l = lockmod.lock(vfs, lockname,
1366 int(self.ui.config("ui", "timeout", "600")),
1368 int(self.ui.config("ui", "timeout", "600")),
1367 releasefn=releasefn, acquirefn=acquirefn,
1369 releasefn=releasefn, acquirefn=acquirefn,
1368 desc=desc)
1370 desc=desc)
1369 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1371 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1370 return l
1372 return l
1371
1373
1372 def _afterlock(self, callback):
1374 def _afterlock(self, callback):
1373 """add a callback to be run when the repository is fully unlocked
1375 """add a callback to be run when the repository is fully unlocked
1374
1376
1375 The callback will be executed when the outermost lock is released
1377 The callback will be executed when the outermost lock is released
1376 (with wlock being higher level than 'lock')."""
1378 (with wlock being higher level than 'lock')."""
1377 for ref in (self._wlockref, self._lockref):
1379 for ref in (self._wlockref, self._lockref):
1378 l = ref and ref()
1380 l = ref and ref()
1379 if l and l.held:
1381 if l and l.held:
1380 l.postrelease.append(callback)
1382 l.postrelease.append(callback)
1381 break
1383 break
1382 else: # no lock have been found.
1384 else: # no lock have been found.
1383 callback()
1385 callback()
1384
1386
1385 def lock(self, wait=True):
1387 def lock(self, wait=True):
1386 '''Lock the repository store (.hg/store) and return a weak reference
1388 '''Lock the repository store (.hg/store) and return a weak reference
1387 to the lock. Use this before modifying the store (e.g. committing or
1389 to the lock. Use this before modifying the store (e.g. committing or
1388 stripping). If you are opening a transaction, get a lock as well.)
1390 stripping). If you are opening a transaction, get a lock as well.)
1389
1391
1390 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1392 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1391 'wlock' first to avoid a dead-lock hazard.'''
1393 'wlock' first to avoid a dead-lock hazard.'''
1392 l = self._currentlock(self._lockref)
1394 l = self._currentlock(self._lockref)
1393 if l is not None:
1395 if l is not None:
1394 l.lock()
1396 l.lock()
1395 return l
1397 return l
1396
1398
1397 l = self._lock(self.svfs, "lock", wait, None,
1399 l = self._lock(self.svfs, "lock", wait, None,
1398 self.invalidate, _('repository %s') % self.origroot)
1400 self.invalidate, _('repository %s') % self.origroot)
1399 self._lockref = weakref.ref(l)
1401 self._lockref = weakref.ref(l)
1400 return l
1402 return l
1401
1403
1402 def _wlockchecktransaction(self):
1404 def _wlockchecktransaction(self):
1403 if self.currenttransaction() is not None:
1405 if self.currenttransaction() is not None:
1404 raise error.LockInheritanceContractViolation(
1406 raise error.LockInheritanceContractViolation(
1405 'wlock cannot be inherited in the middle of a transaction')
1407 'wlock cannot be inherited in the middle of a transaction')
1406
1408
1407 def wlock(self, wait=True):
1409 def wlock(self, wait=True):
1408 '''Lock the non-store parts of the repository (everything under
1410 '''Lock the non-store parts of the repository (everything under
1409 .hg except .hg/store) and return a weak reference to the lock.
1411 .hg except .hg/store) and return a weak reference to the lock.
1410
1412
1411 Use this before modifying files in .hg.
1413 Use this before modifying files in .hg.
1412
1414
1413 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1415 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1414 'wlock' first to avoid a dead-lock hazard.'''
1416 'wlock' first to avoid a dead-lock hazard.'''
1415 l = self._wlockref and self._wlockref()
1417 l = self._wlockref and self._wlockref()
1416 if l is not None and l.held:
1418 if l is not None and l.held:
1417 l.lock()
1419 l.lock()
1418 return l
1420 return l
1419
1421
1420 # We do not need to check for non-waiting lock acquisition. Such
1422 # We do not need to check for non-waiting lock acquisition. Such
1421 # acquisition would not cause dead-lock as they would just fail.
1423 # acquisition would not cause dead-lock as they would just fail.
1422 if wait and (self.ui.configbool('devel', 'all-warnings')
1424 if wait and (self.ui.configbool('devel', 'all-warnings')
1423 or self.ui.configbool('devel', 'check-locks')):
1425 or self.ui.configbool('devel', 'check-locks')):
1424 if self._currentlock(self._lockref) is not None:
1426 if self._currentlock(self._lockref) is not None:
1425 self.ui.develwarn('"wlock" acquired after "lock"')
1427 self.ui.develwarn('"wlock" acquired after "lock"')
1426
1428
1427 def unlock():
1429 def unlock():
1428 if self.dirstate.pendingparentchange():
1430 if self.dirstate.pendingparentchange():
1429 self.dirstate.invalidate()
1431 self.dirstate.invalidate()
1430 else:
1432 else:
1431 self.dirstate.write(None)
1433 self.dirstate.write(None)
1432
1434
1433 self._filecache['dirstate'].refresh()
1435 self._filecache['dirstate'].refresh()
1434
1436
1435 l = self._lock(self.vfs, "wlock", wait, unlock,
1437 l = self._lock(self.vfs, "wlock", wait, unlock,
1436 self.invalidatedirstate, _('working directory of %s') %
1438 self.invalidatedirstate, _('working directory of %s') %
1437 self.origroot,
1439 self.origroot,
1438 inheritchecker=self._wlockchecktransaction,
1440 inheritchecker=self._wlockchecktransaction,
1439 parentenvvar='HG_WLOCK_LOCKER')
1441 parentenvvar='HG_WLOCK_LOCKER')
1440 self._wlockref = weakref.ref(l)
1442 self._wlockref = weakref.ref(l)
1441 return l
1443 return l
1442
1444
1443 def _currentlock(self, lockref):
1445 def _currentlock(self, lockref):
1444 """Returns the lock if it's held, or None if it's not."""
1446 """Returns the lock if it's held, or None if it's not."""
1445 if lockref is None:
1447 if lockref is None:
1446 return None
1448 return None
1447 l = lockref()
1449 l = lockref()
1448 if l is None or not l.held:
1450 if l is None or not l.held:
1449 return None
1451 return None
1450 return l
1452 return l
1451
1453
1452 def currentwlock(self):
1454 def currentwlock(self):
1453 """Returns the wlock if it's held, or None if it's not."""
1455 """Returns the wlock if it's held, or None if it's not."""
1454 return self._currentlock(self._wlockref)
1456 return self._currentlock(self._wlockref)
1455
1457
1456 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1458 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1457 """
1459 """
1458 commit an individual file as part of a larger transaction
1460 commit an individual file as part of a larger transaction
1459 """
1461 """
1460
1462
1461 fname = fctx.path()
1463 fname = fctx.path()
1462 fparent1 = manifest1.get(fname, nullid)
1464 fparent1 = manifest1.get(fname, nullid)
1463 fparent2 = manifest2.get(fname, nullid)
1465 fparent2 = manifest2.get(fname, nullid)
1464 if isinstance(fctx, context.filectx):
1466 if isinstance(fctx, context.filectx):
1465 node = fctx.filenode()
1467 node = fctx.filenode()
1466 if node in [fparent1, fparent2]:
1468 if node in [fparent1, fparent2]:
1467 self.ui.debug('reusing %s filelog entry\n' % fname)
1469 self.ui.debug('reusing %s filelog entry\n' % fname)
1468 if manifest1.flags(fname) != fctx.flags():
1470 if manifest1.flags(fname) != fctx.flags():
1469 changelist.append(fname)
1471 changelist.append(fname)
1470 return node
1472 return node
1471
1473
1472 flog = self.file(fname)
1474 flog = self.file(fname)
1473 meta = {}
1475 meta = {}
1474 copy = fctx.renamed()
1476 copy = fctx.renamed()
1475 if copy and copy[0] != fname:
1477 if copy and copy[0] != fname:
1476 # Mark the new revision of this file as a copy of another
1478 # Mark the new revision of this file as a copy of another
1477 # file. This copy data will effectively act as a parent
1479 # file. This copy data will effectively act as a parent
1478 # of this new revision. If this is a merge, the first
1480 # of this new revision. If this is a merge, the first
1479 # parent will be the nullid (meaning "look up the copy data")
1481 # parent will be the nullid (meaning "look up the copy data")
1480 # and the second one will be the other parent. For example:
1482 # and the second one will be the other parent. For example:
1481 #
1483 #
1482 # 0 --- 1 --- 3 rev1 changes file foo
1484 # 0 --- 1 --- 3 rev1 changes file foo
1483 # \ / rev2 renames foo to bar and changes it
1485 # \ / rev2 renames foo to bar and changes it
1484 # \- 2 -/ rev3 should have bar with all changes and
1486 # \- 2 -/ rev3 should have bar with all changes and
1485 # should record that bar descends from
1487 # should record that bar descends from
1486 # bar in rev2 and foo in rev1
1488 # bar in rev2 and foo in rev1
1487 #
1489 #
1488 # this allows this merge to succeed:
1490 # this allows this merge to succeed:
1489 #
1491 #
1490 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1492 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1491 # \ / merging rev3 and rev4 should use bar@rev2
1493 # \ / merging rev3 and rev4 should use bar@rev2
1492 # \- 2 --- 4 as the merge base
1494 # \- 2 --- 4 as the merge base
1493 #
1495 #
1494
1496
1495 cfname = copy[0]
1497 cfname = copy[0]
1496 crev = manifest1.get(cfname)
1498 crev = manifest1.get(cfname)
1497 newfparent = fparent2
1499 newfparent = fparent2
1498
1500
1499 if manifest2: # branch merge
1501 if manifest2: # branch merge
1500 if fparent2 == nullid or crev is None: # copied on remote side
1502 if fparent2 == nullid or crev is None: # copied on remote side
1501 if cfname in manifest2:
1503 if cfname in manifest2:
1502 crev = manifest2[cfname]
1504 crev = manifest2[cfname]
1503 newfparent = fparent1
1505 newfparent = fparent1
1504
1506
1505 # Here, we used to search backwards through history to try to find
1507 # Here, we used to search backwards through history to try to find
1506 # where the file copy came from if the source of a copy was not in
1508 # where the file copy came from if the source of a copy was not in
1507 # the parent directory. However, this doesn't actually make sense to
1509 # the parent directory. However, this doesn't actually make sense to
1508 # do (what does a copy from something not in your working copy even
1510 # do (what does a copy from something not in your working copy even
1509 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1511 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1510 # the user that copy information was dropped, so if they didn't
1512 # the user that copy information was dropped, so if they didn't
1511 # expect this outcome it can be fixed, but this is the correct
1513 # expect this outcome it can be fixed, but this is the correct
1512 # behavior in this circumstance.
1514 # behavior in this circumstance.
1513
1515
1514 if crev:
1516 if crev:
1515 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1517 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1516 meta["copy"] = cfname
1518 meta["copy"] = cfname
1517 meta["copyrev"] = hex(crev)
1519 meta["copyrev"] = hex(crev)
1518 fparent1, fparent2 = nullid, newfparent
1520 fparent1, fparent2 = nullid, newfparent
1519 else:
1521 else:
1520 self.ui.warn(_("warning: can't find ancestor for '%s' "
1522 self.ui.warn(_("warning: can't find ancestor for '%s' "
1521 "copied from '%s'!\n") % (fname, cfname))
1523 "copied from '%s'!\n") % (fname, cfname))
1522
1524
1523 elif fparent1 == nullid:
1525 elif fparent1 == nullid:
1524 fparent1, fparent2 = fparent2, nullid
1526 fparent1, fparent2 = fparent2, nullid
1525 elif fparent2 != nullid:
1527 elif fparent2 != nullid:
1526 # is one parent an ancestor of the other?
1528 # is one parent an ancestor of the other?
1527 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1529 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1528 if fparent1 in fparentancestors:
1530 if fparent1 in fparentancestors:
1529 fparent1, fparent2 = fparent2, nullid
1531 fparent1, fparent2 = fparent2, nullid
1530 elif fparent2 in fparentancestors:
1532 elif fparent2 in fparentancestors:
1531 fparent2 = nullid
1533 fparent2 = nullid
1532
1534
1533 # is the file changed?
1535 # is the file changed?
1534 text = fctx.data()
1536 text = fctx.data()
1535 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1537 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1536 changelist.append(fname)
1538 changelist.append(fname)
1537 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1539 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1538 # are just the flags changed during merge?
1540 # are just the flags changed during merge?
1539 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1541 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1540 changelist.append(fname)
1542 changelist.append(fname)
1541
1543
1542 return fparent1
1544 return fparent1
1543
1545
1544 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1546 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1545 """check for commit arguments that aren't committable"""
1547 """check for commit arguments that aren't committable"""
1546 if match.isexact() or match.prefix():
1548 if match.isexact() or match.prefix():
1547 matched = set(status.modified + status.added + status.removed)
1549 matched = set(status.modified + status.added + status.removed)
1548
1550
1549 for f in match.files():
1551 for f in match.files():
1550 f = self.dirstate.normalize(f)
1552 f = self.dirstate.normalize(f)
1551 if f == '.' or f in matched or f in wctx.substate:
1553 if f == '.' or f in matched or f in wctx.substate:
1552 continue
1554 continue
1553 if f in status.deleted:
1555 if f in status.deleted:
1554 fail(f, _('file not found!'))
1556 fail(f, _('file not found!'))
1555 if f in vdirs: # visited directory
1557 if f in vdirs: # visited directory
1556 d = f + '/'
1558 d = f + '/'
1557 for mf in matched:
1559 for mf in matched:
1558 if mf.startswith(d):
1560 if mf.startswith(d):
1559 break
1561 break
1560 else:
1562 else:
1561 fail(f, _("no match under directory!"))
1563 fail(f, _("no match under directory!"))
1562 elif f not in self.dirstate:
1564 elif f not in self.dirstate:
1563 fail(f, _("file not tracked!"))
1565 fail(f, _("file not tracked!"))
1564
1566
1565 @unfilteredmethod
1567 @unfilteredmethod
1566 def commit(self, text="", user=None, date=None, match=None, force=False,
1568 def commit(self, text="", user=None, date=None, match=None, force=False,
1567 editor=False, extra=None):
1569 editor=False, extra=None):
1568 """Add a new revision to current repository.
1570 """Add a new revision to current repository.
1569
1571
1570 Revision information is gathered from the working directory,
1572 Revision information is gathered from the working directory,
1571 match can be used to filter the committed files. If editor is
1573 match can be used to filter the committed files. If editor is
1572 supplied, it is called to get a commit message.
1574 supplied, it is called to get a commit message.
1573 """
1575 """
1574 if extra is None:
1576 if extra is None:
1575 extra = {}
1577 extra = {}
1576
1578
1577 def fail(f, msg):
1579 def fail(f, msg):
1578 raise error.Abort('%s: %s' % (f, msg))
1580 raise error.Abort('%s: %s' % (f, msg))
1579
1581
1580 if not match:
1582 if not match:
1581 match = matchmod.always(self.root, '')
1583 match = matchmod.always(self.root, '')
1582
1584
1583 if not force:
1585 if not force:
1584 vdirs = []
1586 vdirs = []
1585 match.explicitdir = vdirs.append
1587 match.explicitdir = vdirs.append
1586 match.bad = fail
1588 match.bad = fail
1587
1589
1588 wlock = lock = tr = None
1590 wlock = lock = tr = None
1589 try:
1591 try:
1590 wlock = self.wlock()
1592 wlock = self.wlock()
1591 lock = self.lock() # for recent changelog (see issue4368)
1593 lock = self.lock() # for recent changelog (see issue4368)
1592
1594
1593 wctx = self[None]
1595 wctx = self[None]
1594 merge = len(wctx.parents()) > 1
1596 merge = len(wctx.parents()) > 1
1595
1597
1596 if not force and merge and match.ispartial():
1598 if not force and merge and match.ispartial():
1597 raise error.Abort(_('cannot partially commit a merge '
1599 raise error.Abort(_('cannot partially commit a merge '
1598 '(do not specify files or patterns)'))
1600 '(do not specify files or patterns)'))
1599
1601
1600 status = self.status(match=match, clean=force)
1602 status = self.status(match=match, clean=force)
1601 if force:
1603 if force:
1602 status.modified.extend(status.clean) # mq may commit clean files
1604 status.modified.extend(status.clean) # mq may commit clean files
1603
1605
1604 # check subrepos
1606 # check subrepos
1605 subs = []
1607 subs = []
1606 commitsubs = set()
1608 commitsubs = set()
1607 newstate = wctx.substate.copy()
1609 newstate = wctx.substate.copy()
1608 # only manage subrepos and .hgsubstate if .hgsub is present
1610 # only manage subrepos and .hgsubstate if .hgsub is present
1609 if '.hgsub' in wctx:
1611 if '.hgsub' in wctx:
1610 # we'll decide whether to track this ourselves, thanks
1612 # we'll decide whether to track this ourselves, thanks
1611 for c in status.modified, status.added, status.removed:
1613 for c in status.modified, status.added, status.removed:
1612 if '.hgsubstate' in c:
1614 if '.hgsubstate' in c:
1613 c.remove('.hgsubstate')
1615 c.remove('.hgsubstate')
1614
1616
1615 # compare current state to last committed state
1617 # compare current state to last committed state
1616 # build new substate based on last committed state
1618 # build new substate based on last committed state
1617 oldstate = wctx.p1().substate
1619 oldstate = wctx.p1().substate
1618 for s in sorted(newstate.keys()):
1620 for s in sorted(newstate.keys()):
1619 if not match(s):
1621 if not match(s):
1620 # ignore working copy, use old state if present
1622 # ignore working copy, use old state if present
1621 if s in oldstate:
1623 if s in oldstate:
1622 newstate[s] = oldstate[s]
1624 newstate[s] = oldstate[s]
1623 continue
1625 continue
1624 if not force:
1626 if not force:
1625 raise error.Abort(
1627 raise error.Abort(
1626 _("commit with new subrepo %s excluded") % s)
1628 _("commit with new subrepo %s excluded") % s)
1627 dirtyreason = wctx.sub(s).dirtyreason(True)
1629 dirtyreason = wctx.sub(s).dirtyreason(True)
1628 if dirtyreason:
1630 if dirtyreason:
1629 if not self.ui.configbool('ui', 'commitsubrepos'):
1631 if not self.ui.configbool('ui', 'commitsubrepos'):
1630 raise error.Abort(dirtyreason,
1632 raise error.Abort(dirtyreason,
1631 hint=_("use --subrepos for recursive commit"))
1633 hint=_("use --subrepos for recursive commit"))
1632 subs.append(s)
1634 subs.append(s)
1633 commitsubs.add(s)
1635 commitsubs.add(s)
1634 else:
1636 else:
1635 bs = wctx.sub(s).basestate()
1637 bs = wctx.sub(s).basestate()
1636 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1638 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1637 if oldstate.get(s, (None, None, None))[1] != bs:
1639 if oldstate.get(s, (None, None, None))[1] != bs:
1638 subs.append(s)
1640 subs.append(s)
1639
1641
1640 # check for removed subrepos
1642 # check for removed subrepos
1641 for p in wctx.parents():
1643 for p in wctx.parents():
1642 r = [s for s in p.substate if s not in newstate]
1644 r = [s for s in p.substate if s not in newstate]
1643 subs += [s for s in r if match(s)]
1645 subs += [s for s in r if match(s)]
1644 if subs:
1646 if subs:
1645 if (not match('.hgsub') and
1647 if (not match('.hgsub') and
1646 '.hgsub' in (wctx.modified() + wctx.added())):
1648 '.hgsub' in (wctx.modified() + wctx.added())):
1647 raise error.Abort(
1649 raise error.Abort(
1648 _("can't commit subrepos without .hgsub"))
1650 _("can't commit subrepos without .hgsub"))
1649 status.modified.insert(0, '.hgsubstate')
1651 status.modified.insert(0, '.hgsubstate')
1650
1652
1651 elif '.hgsub' in status.removed:
1653 elif '.hgsub' in status.removed:
1652 # clean up .hgsubstate when .hgsub is removed
1654 # clean up .hgsubstate when .hgsub is removed
1653 if ('.hgsubstate' in wctx and
1655 if ('.hgsubstate' in wctx and
1654 '.hgsubstate' not in (status.modified + status.added +
1656 '.hgsubstate' not in (status.modified + status.added +
1655 status.removed)):
1657 status.removed)):
1656 status.removed.insert(0, '.hgsubstate')
1658 status.removed.insert(0, '.hgsubstate')
1657
1659
1658 # make sure all explicit patterns are matched
1660 # make sure all explicit patterns are matched
1659 if not force:
1661 if not force:
1660 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1662 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1661
1663
1662 cctx = context.workingcommitctx(self, status,
1664 cctx = context.workingcommitctx(self, status,
1663 text, user, date, extra)
1665 text, user, date, extra)
1664
1666
1665 # internal config: ui.allowemptycommit
1667 # internal config: ui.allowemptycommit
1666 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1668 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1667 or extra.get('close') or merge or cctx.files()
1669 or extra.get('close') or merge or cctx.files()
1668 or self.ui.configbool('ui', 'allowemptycommit'))
1670 or self.ui.configbool('ui', 'allowemptycommit'))
1669 if not allowemptycommit:
1671 if not allowemptycommit:
1670 return None
1672 return None
1671
1673
1672 if merge and cctx.deleted():
1674 if merge and cctx.deleted():
1673 raise error.Abort(_("cannot commit merge with missing files"))
1675 raise error.Abort(_("cannot commit merge with missing files"))
1674
1676
1675 ms = mergemod.mergestate.read(self)
1677 ms = mergemod.mergestate.read(self)
1676 mergeutil.checkunresolved(ms)
1678 mergeutil.checkunresolved(ms)
1677
1679
1678 if editor:
1680 if editor:
1679 cctx._text = editor(self, cctx, subs)
1681 cctx._text = editor(self, cctx, subs)
1680 edited = (text != cctx._text)
1682 edited = (text != cctx._text)
1681
1683
1682 # Save commit message in case this transaction gets rolled back
1684 # Save commit message in case this transaction gets rolled back
1683 # (e.g. by a pretxncommit hook). Leave the content alone on
1685 # (e.g. by a pretxncommit hook). Leave the content alone on
1684 # the assumption that the user will use the same editor again.
1686 # the assumption that the user will use the same editor again.
1685 msgfn = self.savecommitmessage(cctx._text)
1687 msgfn = self.savecommitmessage(cctx._text)
1686
1688
1687 # commit subs and write new state
1689 # commit subs and write new state
1688 if subs:
1690 if subs:
1689 for s in sorted(commitsubs):
1691 for s in sorted(commitsubs):
1690 sub = wctx.sub(s)
1692 sub = wctx.sub(s)
1691 self.ui.status(_('committing subrepository %s\n') %
1693 self.ui.status(_('committing subrepository %s\n') %
1692 subrepo.subrelpath(sub))
1694 subrepo.subrelpath(sub))
1693 sr = sub.commit(cctx._text, user, date)
1695 sr = sub.commit(cctx._text, user, date)
1694 newstate[s] = (newstate[s][0], sr)
1696 newstate[s] = (newstate[s][0], sr)
1695 subrepo.writestate(self, newstate)
1697 subrepo.writestate(self, newstate)
1696
1698
1697 p1, p2 = self.dirstate.parents()
1699 p1, p2 = self.dirstate.parents()
1698 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1700 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1699 try:
1701 try:
1700 self.hook("precommit", throw=True, parent1=hookp1,
1702 self.hook("precommit", throw=True, parent1=hookp1,
1701 parent2=hookp2)
1703 parent2=hookp2)
1702 tr = self.transaction('commit')
1704 tr = self.transaction('commit')
1703 ret = self.commitctx(cctx, True)
1705 ret = self.commitctx(cctx, True)
1704 except: # re-raises
1706 except: # re-raises
1705 if edited:
1707 if edited:
1706 self.ui.write(
1708 self.ui.write(
1707 _('note: commit message saved in %s\n') % msgfn)
1709 _('note: commit message saved in %s\n') % msgfn)
1708 raise
1710 raise
1709 # update bookmarks, dirstate and mergestate
1711 # update bookmarks, dirstate and mergestate
1710 bookmarks.update(self, [p1, p2], ret)
1712 bookmarks.update(self, [p1, p2], ret)
1711 cctx.markcommitted(ret)
1713 cctx.markcommitted(ret)
1712 ms.reset()
1714 ms.reset()
1713 tr.close()
1715 tr.close()
1714
1716
1715 finally:
1717 finally:
1716 lockmod.release(tr, lock, wlock)
1718 lockmod.release(tr, lock, wlock)
1717
1719
1718 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1720 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1719 # hack for command that use a temporary commit (eg: histedit)
1721 # hack for command that use a temporary commit (eg: histedit)
1720 # temporary commit got stripped before hook release
1722 # temporary commit got stripped before hook release
1721 if self.changelog.hasnode(ret):
1723 if self.changelog.hasnode(ret):
1722 self.hook("commit", node=node, parent1=parent1,
1724 self.hook("commit", node=node, parent1=parent1,
1723 parent2=parent2)
1725 parent2=parent2)
1724 self._afterlock(commithook)
1726 self._afterlock(commithook)
1725 return ret
1727 return ret
1726
1728
1727 @unfilteredmethod
1729 @unfilteredmethod
1728 def commitctx(self, ctx, error=False):
1730 def commitctx(self, ctx, error=False):
1729 """Add a new revision to current repository.
1731 """Add a new revision to current repository.
1730 Revision information is passed via the context argument.
1732 Revision information is passed via the context argument.
1731 """
1733 """
1732
1734
1733 tr = None
1735 tr = None
1734 p1, p2 = ctx.p1(), ctx.p2()
1736 p1, p2 = ctx.p1(), ctx.p2()
1735 user = ctx.user()
1737 user = ctx.user()
1736
1738
1737 lock = self.lock()
1739 lock = self.lock()
1738 try:
1740 try:
1739 tr = self.transaction("commit")
1741 tr = self.transaction("commit")
1740 trp = weakref.proxy(tr)
1742 trp = weakref.proxy(tr)
1741
1743
1742 if ctx.manifestnode():
1744 if ctx.manifestnode():
1743 # reuse an existing manifest revision
1745 # reuse an existing manifest revision
1744 mn = ctx.manifestnode()
1746 mn = ctx.manifestnode()
1745 files = ctx.files()
1747 files = ctx.files()
1746 elif ctx.files():
1748 elif ctx.files():
1747 m1ctx = p1.manifestctx()
1749 m1ctx = p1.manifestctx()
1748 m2ctx = p2.manifestctx()
1750 m2ctx = p2.manifestctx()
1749 mctx = m1ctx.copy()
1751 mctx = m1ctx.copy()
1750
1752
1751 m = mctx.read()
1753 m = mctx.read()
1752 m1 = m1ctx.read()
1754 m1 = m1ctx.read()
1753 m2 = m2ctx.read()
1755 m2 = m2ctx.read()
1754
1756
1755 # check in files
1757 # check in files
1756 added = []
1758 added = []
1757 changed = []
1759 changed = []
1758 removed = list(ctx.removed())
1760 removed = list(ctx.removed())
1759 linkrev = len(self)
1761 linkrev = len(self)
1760 self.ui.note(_("committing files:\n"))
1762 self.ui.note(_("committing files:\n"))
1761 for f in sorted(ctx.modified() + ctx.added()):
1763 for f in sorted(ctx.modified() + ctx.added()):
1762 self.ui.note(f + "\n")
1764 self.ui.note(f + "\n")
1763 try:
1765 try:
1764 fctx = ctx[f]
1766 fctx = ctx[f]
1765 if fctx is None:
1767 if fctx is None:
1766 removed.append(f)
1768 removed.append(f)
1767 else:
1769 else:
1768 added.append(f)
1770 added.append(f)
1769 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1771 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1770 trp, changed)
1772 trp, changed)
1771 m.setflag(f, fctx.flags())
1773 m.setflag(f, fctx.flags())
1772 except OSError as inst:
1774 except OSError as inst:
1773 self.ui.warn(_("trouble committing %s!\n") % f)
1775 self.ui.warn(_("trouble committing %s!\n") % f)
1774 raise
1776 raise
1775 except IOError as inst:
1777 except IOError as inst:
1776 errcode = getattr(inst, 'errno', errno.ENOENT)
1778 errcode = getattr(inst, 'errno', errno.ENOENT)
1777 if error or errcode and errcode != errno.ENOENT:
1779 if error or errcode and errcode != errno.ENOENT:
1778 self.ui.warn(_("trouble committing %s!\n") % f)
1780 self.ui.warn(_("trouble committing %s!\n") % f)
1779 raise
1781 raise
1780
1782
1781 # update manifest
1783 # update manifest
1782 self.ui.note(_("committing manifest\n"))
1784 self.ui.note(_("committing manifest\n"))
1783 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1785 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1784 drop = [f for f in removed if f in m]
1786 drop = [f for f in removed if f in m]
1785 for f in drop:
1787 for f in drop:
1786 del m[f]
1788 del m[f]
1787 mn = mctx.write(trp, linkrev,
1789 mn = mctx.write(trp, linkrev,
1788 p1.manifestnode(), p2.manifestnode(),
1790 p1.manifestnode(), p2.manifestnode(),
1789 added, drop)
1791 added, drop)
1790 files = changed + removed
1792 files = changed + removed
1791 else:
1793 else:
1792 mn = p1.manifestnode()
1794 mn = p1.manifestnode()
1793 files = []
1795 files = []
1794
1796
1795 # update changelog
1797 # update changelog
1796 self.ui.note(_("committing changelog\n"))
1798 self.ui.note(_("committing changelog\n"))
1797 self.changelog.delayupdate(tr)
1799 self.changelog.delayupdate(tr)
1798 n = self.changelog.add(mn, files, ctx.description(),
1800 n = self.changelog.add(mn, files, ctx.description(),
1799 trp, p1.node(), p2.node(),
1801 trp, p1.node(), p2.node(),
1800 user, ctx.date(), ctx.extra().copy())
1802 user, ctx.date(), ctx.extra().copy())
1801 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1803 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1802 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1804 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1803 parent2=xp2)
1805 parent2=xp2)
1804 # set the new commit is proper phase
1806 # set the new commit is proper phase
1805 targetphase = subrepo.newcommitphase(self.ui, ctx)
1807 targetphase = subrepo.newcommitphase(self.ui, ctx)
1806 if targetphase:
1808 if targetphase:
1807 # retract boundary do not alter parent changeset.
1809 # retract boundary do not alter parent changeset.
1808 # if a parent have higher the resulting phase will
1810 # if a parent have higher the resulting phase will
1809 # be compliant anyway
1811 # be compliant anyway
1810 #
1812 #
1811 # if minimal phase was 0 we don't need to retract anything
1813 # if minimal phase was 0 we don't need to retract anything
1812 phases.retractboundary(self, tr, targetphase, [n])
1814 phases.retractboundary(self, tr, targetphase, [n])
1813 tr.close()
1815 tr.close()
1814 branchmap.updatecache(self.filtered('served'))
1816 branchmap.updatecache(self.filtered('served'))
1815 return n
1817 return n
1816 finally:
1818 finally:
1817 if tr:
1819 if tr:
1818 tr.release()
1820 tr.release()
1819 lock.release()
1821 lock.release()
1820
1822
1821 @unfilteredmethod
1823 @unfilteredmethod
1822 def destroying(self):
1824 def destroying(self):
1823 '''Inform the repository that nodes are about to be destroyed.
1825 '''Inform the repository that nodes are about to be destroyed.
1824 Intended for use by strip and rollback, so there's a common
1826 Intended for use by strip and rollback, so there's a common
1825 place for anything that has to be done before destroying history.
1827 place for anything that has to be done before destroying history.
1826
1828
1827 This is mostly useful for saving state that is in memory and waiting
1829 This is mostly useful for saving state that is in memory and waiting
1828 to be flushed when the current lock is released. Because a call to
1830 to be flushed when the current lock is released. Because a call to
1829 destroyed is imminent, the repo will be invalidated causing those
1831 destroyed is imminent, the repo will be invalidated causing those
1830 changes to stay in memory (waiting for the next unlock), or vanish
1832 changes to stay in memory (waiting for the next unlock), or vanish
1831 completely.
1833 completely.
1832 '''
1834 '''
1833 # When using the same lock to commit and strip, the phasecache is left
1835 # When using the same lock to commit and strip, the phasecache is left
1834 # dirty after committing. Then when we strip, the repo is invalidated,
1836 # dirty after committing. Then when we strip, the repo is invalidated,
1835 # causing those changes to disappear.
1837 # causing those changes to disappear.
1836 if '_phasecache' in vars(self):
1838 if '_phasecache' in vars(self):
1837 self._phasecache.write()
1839 self._phasecache.write()
1838
1840
1839 @unfilteredmethod
1841 @unfilteredmethod
1840 def destroyed(self):
1842 def destroyed(self):
1841 '''Inform the repository that nodes have been destroyed.
1843 '''Inform the repository that nodes have been destroyed.
1842 Intended for use by strip and rollback, so there's a common
1844 Intended for use by strip and rollback, so there's a common
1843 place for anything that has to be done after destroying history.
1845 place for anything that has to be done after destroying history.
1844 '''
1846 '''
1845 # When one tries to:
1847 # When one tries to:
1846 # 1) destroy nodes thus calling this method (e.g. strip)
1848 # 1) destroy nodes thus calling this method (e.g. strip)
1847 # 2) use phasecache somewhere (e.g. commit)
1849 # 2) use phasecache somewhere (e.g. commit)
1848 #
1850 #
1849 # then 2) will fail because the phasecache contains nodes that were
1851 # then 2) will fail because the phasecache contains nodes that were
1850 # removed. We can either remove phasecache from the filecache,
1852 # removed. We can either remove phasecache from the filecache,
1851 # causing it to reload next time it is accessed, or simply filter
1853 # causing it to reload next time it is accessed, or simply filter
1852 # the removed nodes now and write the updated cache.
1854 # the removed nodes now and write the updated cache.
1853 self._phasecache.filterunknown(self)
1855 self._phasecache.filterunknown(self)
1854 self._phasecache.write()
1856 self._phasecache.write()
1855
1857
1856 # update the 'served' branch cache to help read only server process
1858 # update the 'served' branch cache to help read only server process
1857 # Thanks to branchcache collaboration this is done from the nearest
1859 # Thanks to branchcache collaboration this is done from the nearest
1858 # filtered subset and it is expected to be fast.
1860 # filtered subset and it is expected to be fast.
1859 branchmap.updatecache(self.filtered('served'))
1861 branchmap.updatecache(self.filtered('served'))
1860
1862
1861 # Ensure the persistent tag cache is updated. Doing it now
1863 # Ensure the persistent tag cache is updated. Doing it now
1862 # means that the tag cache only has to worry about destroyed
1864 # means that the tag cache only has to worry about destroyed
1863 # heads immediately after a strip/rollback. That in turn
1865 # heads immediately after a strip/rollback. That in turn
1864 # guarantees that "cachetip == currenttip" (comparing both rev
1866 # guarantees that "cachetip == currenttip" (comparing both rev
1865 # and node) always means no nodes have been added or destroyed.
1867 # and node) always means no nodes have been added or destroyed.
1866
1868
1867 # XXX this is suboptimal when qrefresh'ing: we strip the current
1869 # XXX this is suboptimal when qrefresh'ing: we strip the current
1868 # head, refresh the tag cache, then immediately add a new head.
1870 # head, refresh the tag cache, then immediately add a new head.
1869 # But I think doing it this way is necessary for the "instant
1871 # But I think doing it this way is necessary for the "instant
1870 # tag cache retrieval" case to work.
1872 # tag cache retrieval" case to work.
1871 self.invalidate()
1873 self.invalidate()
1872
1874
1873 def walk(self, match, node=None):
1875 def walk(self, match, node=None):
1874 '''
1876 '''
1875 walk recursively through the directory tree or a given
1877 walk recursively through the directory tree or a given
1876 changeset, finding all files matched by the match
1878 changeset, finding all files matched by the match
1877 function
1879 function
1878 '''
1880 '''
1879 return self[node].walk(match)
1881 return self[node].walk(match)
1880
1882
1881 def status(self, node1='.', node2=None, match=None,
1883 def status(self, node1='.', node2=None, match=None,
1882 ignored=False, clean=False, unknown=False,
1884 ignored=False, clean=False, unknown=False,
1883 listsubrepos=False):
1885 listsubrepos=False):
1884 '''a convenience method that calls node1.status(node2)'''
1886 '''a convenience method that calls node1.status(node2)'''
1885 return self[node1].status(node2, match, ignored, clean, unknown,
1887 return self[node1].status(node2, match, ignored, clean, unknown,
1886 listsubrepos)
1888 listsubrepos)
1887
1889
1888 def heads(self, start=None):
1890 def heads(self, start=None):
1889 if start is None:
1891 if start is None:
1890 cl = self.changelog
1892 cl = self.changelog
1891 headrevs = reversed(cl.headrevs())
1893 headrevs = reversed(cl.headrevs())
1892 return [cl.node(rev) for rev in headrevs]
1894 return [cl.node(rev) for rev in headrevs]
1893
1895
1894 heads = self.changelog.heads(start)
1896 heads = self.changelog.heads(start)
1895 # sort the output in rev descending order
1897 # sort the output in rev descending order
1896 return sorted(heads, key=self.changelog.rev, reverse=True)
1898 return sorted(heads, key=self.changelog.rev, reverse=True)
1897
1899
1898 def branchheads(self, branch=None, start=None, closed=False):
1900 def branchheads(self, branch=None, start=None, closed=False):
1899 '''return a (possibly filtered) list of heads for the given branch
1901 '''return a (possibly filtered) list of heads for the given branch
1900
1902
1901 Heads are returned in topological order, from newest to oldest.
1903 Heads are returned in topological order, from newest to oldest.
1902 If branch is None, use the dirstate branch.
1904 If branch is None, use the dirstate branch.
1903 If start is not None, return only heads reachable from start.
1905 If start is not None, return only heads reachable from start.
1904 If closed is True, return heads that are marked as closed as well.
1906 If closed is True, return heads that are marked as closed as well.
1905 '''
1907 '''
1906 if branch is None:
1908 if branch is None:
1907 branch = self[None].branch()
1909 branch = self[None].branch()
1908 branches = self.branchmap()
1910 branches = self.branchmap()
1909 if branch not in branches:
1911 if branch not in branches:
1910 return []
1912 return []
1911 # the cache returns heads ordered lowest to highest
1913 # the cache returns heads ordered lowest to highest
1912 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1914 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1913 if start is not None:
1915 if start is not None:
1914 # filter out the heads that cannot be reached from startrev
1916 # filter out the heads that cannot be reached from startrev
1915 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1917 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1916 bheads = [h for h in bheads if h in fbheads]
1918 bheads = [h for h in bheads if h in fbheads]
1917 return bheads
1919 return bheads
1918
1920
1919 def branches(self, nodes):
1921 def branches(self, nodes):
1920 if not nodes:
1922 if not nodes:
1921 nodes = [self.changelog.tip()]
1923 nodes = [self.changelog.tip()]
1922 b = []
1924 b = []
1923 for n in nodes:
1925 for n in nodes:
1924 t = n
1926 t = n
1925 while True:
1927 while True:
1926 p = self.changelog.parents(n)
1928 p = self.changelog.parents(n)
1927 if p[1] != nullid or p[0] == nullid:
1929 if p[1] != nullid or p[0] == nullid:
1928 b.append((t, n, p[0], p[1]))
1930 b.append((t, n, p[0], p[1]))
1929 break
1931 break
1930 n = p[0]
1932 n = p[0]
1931 return b
1933 return b
1932
1934
1933 def between(self, pairs):
1935 def between(self, pairs):
1934 r = []
1936 r = []
1935
1937
1936 for top, bottom in pairs:
1938 for top, bottom in pairs:
1937 n, l, i = top, [], 0
1939 n, l, i = top, [], 0
1938 f = 1
1940 f = 1
1939
1941
1940 while n != bottom and n != nullid:
1942 while n != bottom and n != nullid:
1941 p = self.changelog.parents(n)[0]
1943 p = self.changelog.parents(n)[0]
1942 if i == f:
1944 if i == f:
1943 l.append(n)
1945 l.append(n)
1944 f = f * 2
1946 f = f * 2
1945 n = p
1947 n = p
1946 i += 1
1948 i += 1
1947
1949
1948 r.append(l)
1950 r.append(l)
1949
1951
1950 return r
1952 return r
1951
1953
1952 def checkpush(self, pushop):
1954 def checkpush(self, pushop):
1953 """Extensions can override this function if additional checks have
1955 """Extensions can override this function if additional checks have
1954 to be performed before pushing, or call it if they override push
1956 to be performed before pushing, or call it if they override push
1955 command.
1957 command.
1956 """
1958 """
1957 pass
1959 pass
1958
1960
1959 @unfilteredpropertycache
1961 @unfilteredpropertycache
1960 def prepushoutgoinghooks(self):
1962 def prepushoutgoinghooks(self):
1961 """Return util.hooks consists of a pushop with repo, remote, outgoing
1963 """Return util.hooks consists of a pushop with repo, remote, outgoing
1962 methods, which are called before pushing changesets.
1964 methods, which are called before pushing changesets.
1963 """
1965 """
1964 return util.hooks()
1966 return util.hooks()
1965
1967
1966 def pushkey(self, namespace, key, old, new):
1968 def pushkey(self, namespace, key, old, new):
1967 try:
1969 try:
1968 tr = self.currenttransaction()
1970 tr = self.currenttransaction()
1969 hookargs = {}
1971 hookargs = {}
1970 if tr is not None:
1972 if tr is not None:
1971 hookargs.update(tr.hookargs)
1973 hookargs.update(tr.hookargs)
1972 hookargs['namespace'] = namespace
1974 hookargs['namespace'] = namespace
1973 hookargs['key'] = key
1975 hookargs['key'] = key
1974 hookargs['old'] = old
1976 hookargs['old'] = old
1975 hookargs['new'] = new
1977 hookargs['new'] = new
1976 self.hook('prepushkey', throw=True, **hookargs)
1978 self.hook('prepushkey', throw=True, **hookargs)
1977 except error.HookAbort as exc:
1979 except error.HookAbort as exc:
1978 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1980 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1979 if exc.hint:
1981 if exc.hint:
1980 self.ui.write_err(_("(%s)\n") % exc.hint)
1982 self.ui.write_err(_("(%s)\n") % exc.hint)
1981 return False
1983 return False
1982 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1984 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1983 ret = pushkey.push(self, namespace, key, old, new)
1985 ret = pushkey.push(self, namespace, key, old, new)
1984 def runhook():
1986 def runhook():
1985 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1987 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1986 ret=ret)
1988 ret=ret)
1987 self._afterlock(runhook)
1989 self._afterlock(runhook)
1988 return ret
1990 return ret
1989
1991
1990 def listkeys(self, namespace):
1992 def listkeys(self, namespace):
1991 self.hook('prelistkeys', throw=True, namespace=namespace)
1993 self.hook('prelistkeys', throw=True, namespace=namespace)
1992 self.ui.debug('listing keys for "%s"\n' % namespace)
1994 self.ui.debug('listing keys for "%s"\n' % namespace)
1993 values = pushkey.list(self, namespace)
1995 values = pushkey.list(self, namespace)
1994 self.hook('listkeys', namespace=namespace, values=values)
1996 self.hook('listkeys', namespace=namespace, values=values)
1995 return values
1997 return values
1996
1998
1997 def debugwireargs(self, one, two, three=None, four=None, five=None):
1999 def debugwireargs(self, one, two, three=None, four=None, five=None):
1998 '''used to test argument passing over the wire'''
2000 '''used to test argument passing over the wire'''
1999 return "%s %s %s %s %s" % (one, two, three, four, five)
2001 return "%s %s %s %s %s" % (one, two, three, four, five)
2000
2002
2001 def savecommitmessage(self, text):
2003 def savecommitmessage(self, text):
2002 fp = self.vfs('last-message.txt', 'wb')
2004 fp = self.vfs('last-message.txt', 'wb')
2003 try:
2005 try:
2004 fp.write(text)
2006 fp.write(text)
2005 finally:
2007 finally:
2006 fp.close()
2008 fp.close()
2007 return self.pathto(fp.name[len(self.root) + 1:])
2009 return self.pathto(fp.name[len(self.root) + 1:])
2008
2010
2009 # used to avoid circular references so destructors work
2011 # used to avoid circular references so destructors work
2010 def aftertrans(files):
2012 def aftertrans(files):
2011 renamefiles = [tuple(t) for t in files]
2013 renamefiles = [tuple(t) for t in files]
2012 def a():
2014 def a():
2013 for vfs, src, dest in renamefiles:
2015 for vfs, src, dest in renamefiles:
2014 try:
2016 try:
2015 # if src and dest refer to a same file, vfs.rename is a no-op,
2017 # if src and dest refer to a same file, vfs.rename is a no-op,
2016 # leaving both src and dest on disk. delete dest to make sure
2018 # leaving both src and dest on disk. delete dest to make sure
2017 # the rename couldn't be such a no-op.
2019 # the rename couldn't be such a no-op.
2018 vfs.unlink(dest)
2020 vfs.unlink(dest)
2019 except OSError as ex:
2021 except OSError as ex:
2020 if ex.errno != errno.ENOENT:
2022 if ex.errno != errno.ENOENT:
2021 raise
2023 raise
2022 try:
2024 try:
2023 vfs.rename(src, dest)
2025 vfs.rename(src, dest)
2024 except OSError: # journal file does not yet exist
2026 except OSError: # journal file does not yet exist
2025 pass
2027 pass
2026 return a
2028 return a
2027
2029
2028 def undoname(fn):
2030 def undoname(fn):
2029 base, name = os.path.split(fn)
2031 base, name = os.path.split(fn)
2030 assert name.startswith('journal')
2032 assert name.startswith('journal')
2031 return os.path.join(base, name.replace('journal', 'undo', 1))
2033 return os.path.join(base, name.replace('journal', 'undo', 1))
2032
2034
2033 def instance(ui, path, create):
2035 def instance(ui, path, create):
2034 return localrepository(ui, util.urllocalpath(path), create)
2036 return localrepository(ui, util.urllocalpath(path), create)
2035
2037
2036 def islocal(path):
2038 def islocal(path):
2037 return True
2039 return True
2038
2040
2039 def newreporequirements(repo):
2041 def newreporequirements(repo):
2040 """Determine the set of requirements for a new local repository.
2042 """Determine the set of requirements for a new local repository.
2041
2043
2042 Extensions can wrap this function to specify custom requirements for
2044 Extensions can wrap this function to specify custom requirements for
2043 new repositories.
2045 new repositories.
2044 """
2046 """
2045 ui = repo.ui
2047 ui = repo.ui
2046 requirements = set(['revlogv1'])
2048 requirements = set(['revlogv1'])
2047 if ui.configbool('format', 'usestore', True):
2049 if ui.configbool('format', 'usestore', True):
2048 requirements.add('store')
2050 requirements.add('store')
2049 if ui.configbool('format', 'usefncache', True):
2051 if ui.configbool('format', 'usefncache', True):
2050 requirements.add('fncache')
2052 requirements.add('fncache')
2051 if ui.configbool('format', 'dotencode', True):
2053 if ui.configbool('format', 'dotencode', True):
2052 requirements.add('dotencode')
2054 requirements.add('dotencode')
2053
2055
2054 compengine = ui.config('experimental', 'format.compression', 'zlib')
2056 compengine = ui.config('experimental', 'format.compression', 'zlib')
2055 if compengine not in util.compengines:
2057 if compengine not in util.compengines:
2056 raise error.Abort(_('compression engine %s defined by '
2058 raise error.Abort(_('compression engine %s defined by '
2057 'experimental.format.compression not available') %
2059 'experimental.format.compression not available') %
2058 compengine,
2060 compengine,
2059 hint=_('run "hg debuginstall" to list available '
2061 hint=_('run "hg debuginstall" to list available '
2060 'compression engines'))
2062 'compression engines'))
2061
2063
2062 # zlib is the historical default and doesn't need an explicit requirement.
2064 # zlib is the historical default and doesn't need an explicit requirement.
2063 if compengine != 'zlib':
2065 if compengine != 'zlib':
2064 requirements.add('exp-compression-%s' % compengine)
2066 requirements.add('exp-compression-%s' % compengine)
2065
2067
2066 if scmutil.gdinitconfig(ui):
2068 if scmutil.gdinitconfig(ui):
2067 requirements.add('generaldelta')
2069 requirements.add('generaldelta')
2068 if ui.configbool('experimental', 'treemanifest', False):
2070 if ui.configbool('experimental', 'treemanifest', False):
2069 requirements.add('treemanifest')
2071 requirements.add('treemanifest')
2070 if ui.configbool('experimental', 'manifestv2', False):
2072 if ui.configbool('experimental', 'manifestv2', False):
2071 requirements.add('manifestv2')
2073 requirements.add('manifestv2')
2072
2074
2073 return requirements
2075 return requirements
General Comments 0
You need to be logged in to leave comments. Login now