##// END OF EJS Templates
localrepo: move filtername to __init__...
Gregory Szorc -
r32730:b8ff7d0f default
parent child Browse files
Show More
@@ -1,2075 +1,2075 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 pycompat,
51 pycompat,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 store,
56 store,
57 subrepo,
57 subrepo,
58 tags as tagsmod,
58 tags as tagsmod,
59 transaction,
59 transaction,
60 txnutil,
60 txnutil,
61 util,
61 util,
62 vfs as vfsmod,
62 vfs as vfsmod,
63 )
63 )
64
64
65 release = lockmod.release
65 release = lockmod.release
66 urlerr = util.urlerr
66 urlerr = util.urlerr
67 urlreq = util.urlreq
67 urlreq = util.urlreq
68
68
69 class repofilecache(scmutil.filecache):
69 class repofilecache(scmutil.filecache):
70 """All filecache usage on repo are done for logic that should be unfiltered
70 """All filecache usage on repo are done for logic that should be unfiltered
71 """
71 """
72
72
73 def join(self, obj, fname):
73 def join(self, obj, fname):
74 return obj.vfs.join(fname)
74 return obj.vfs.join(fname)
75 def __get__(self, repo, type=None):
75 def __get__(self, repo, type=None):
76 if repo is None:
76 if repo is None:
77 return self
77 return self
78 return super(repofilecache, self).__get__(repo.unfiltered(), type)
78 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 def __set__(self, repo, value):
79 def __set__(self, repo, value):
80 return super(repofilecache, self).__set__(repo.unfiltered(), value)
80 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 def __delete__(self, repo):
81 def __delete__(self, repo):
82 return super(repofilecache, self).__delete__(repo.unfiltered())
82 return super(repofilecache, self).__delete__(repo.unfiltered())
83
83
84 class storecache(repofilecache):
84 class storecache(repofilecache):
85 """filecache for files in the store"""
85 """filecache for files in the store"""
86 def join(self, obj, fname):
86 def join(self, obj, fname):
87 return obj.sjoin(fname)
87 return obj.sjoin(fname)
88
88
89 class unfilteredpropertycache(util.propertycache):
89 class unfilteredpropertycache(util.propertycache):
90 """propertycache that apply to unfiltered repo only"""
90 """propertycache that apply to unfiltered repo only"""
91
91
92 def __get__(self, repo, type=None):
92 def __get__(self, repo, type=None):
93 unfi = repo.unfiltered()
93 unfi = repo.unfiltered()
94 if unfi is repo:
94 if unfi is repo:
95 return super(unfilteredpropertycache, self).__get__(unfi)
95 return super(unfilteredpropertycache, self).__get__(unfi)
96 return getattr(unfi, self.name)
96 return getattr(unfi, self.name)
97
97
98 class filteredpropertycache(util.propertycache):
98 class filteredpropertycache(util.propertycache):
99 """propertycache that must take filtering in account"""
99 """propertycache that must take filtering in account"""
100
100
101 def cachevalue(self, obj, value):
101 def cachevalue(self, obj, value):
102 object.__setattr__(obj, self.name, value)
102 object.__setattr__(obj, self.name, value)
103
103
104
104
105 def hasunfilteredcache(repo, name):
105 def hasunfilteredcache(repo, name):
106 """check if a repo has an unfilteredpropertycache value for <name>"""
106 """check if a repo has an unfilteredpropertycache value for <name>"""
107 return name in vars(repo.unfiltered())
107 return name in vars(repo.unfiltered())
108
108
109 def unfilteredmethod(orig):
109 def unfilteredmethod(orig):
110 """decorate method that always need to be run on unfiltered version"""
110 """decorate method that always need to be run on unfiltered version"""
111 def wrapper(repo, *args, **kwargs):
111 def wrapper(repo, *args, **kwargs):
112 return orig(repo.unfiltered(), *args, **kwargs)
112 return orig(repo.unfiltered(), *args, **kwargs)
113 return wrapper
113 return wrapper
114
114
115 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
115 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 'unbundle'}
116 'unbundle'}
117 legacycaps = moderncaps.union({'changegroupsubset'})
117 legacycaps = moderncaps.union({'changegroupsubset'})
118
118
119 class localpeer(peer.peerrepository):
119 class localpeer(peer.peerrepository):
120 '''peer for a local repo; reflects only the most recent API'''
120 '''peer for a local repo; reflects only the most recent API'''
121
121
122 def __init__(self, repo, caps=None):
122 def __init__(self, repo, caps=None):
123 if caps is None:
123 if caps is None:
124 caps = moderncaps.copy()
124 caps = moderncaps.copy()
125 peer.peerrepository.__init__(self)
125 peer.peerrepository.__init__(self)
126 self._repo = repo.filtered('served')
126 self._repo = repo.filtered('served')
127 self.ui = repo.ui
127 self.ui = repo.ui
128 self._caps = repo._restrictcapabilities(caps)
128 self._caps = repo._restrictcapabilities(caps)
129 self.requirements = repo.requirements
129 self.requirements = repo.requirements
130 self.supportedformats = repo.supportedformats
130 self.supportedformats = repo.supportedformats
131
131
132 def close(self):
132 def close(self):
133 self._repo.close()
133 self._repo.close()
134
134
135 def _capabilities(self):
135 def _capabilities(self):
136 return self._caps
136 return self._caps
137
137
138 def local(self):
138 def local(self):
139 return self._repo
139 return self._repo
140
140
141 def canpush(self):
141 def canpush(self):
142 return True
142 return True
143
143
144 def url(self):
144 def url(self):
145 return self._repo.url()
145 return self._repo.url()
146
146
147 def lookup(self, key):
147 def lookup(self, key):
148 return self._repo.lookup(key)
148 return self._repo.lookup(key)
149
149
150 def branchmap(self):
150 def branchmap(self):
151 return self._repo.branchmap()
151 return self._repo.branchmap()
152
152
153 def heads(self):
153 def heads(self):
154 return self._repo.heads()
154 return self._repo.heads()
155
155
156 def known(self, nodes):
156 def known(self, nodes):
157 return self._repo.known(nodes)
157 return self._repo.known(nodes)
158
158
159 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
159 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
160 **kwargs):
160 **kwargs):
161 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
161 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
162 common=common, bundlecaps=bundlecaps,
162 common=common, bundlecaps=bundlecaps,
163 **kwargs)
163 **kwargs)
164 cb = util.chunkbuffer(chunks)
164 cb = util.chunkbuffer(chunks)
165
165
166 if exchange.bundle2requested(bundlecaps):
166 if exchange.bundle2requested(bundlecaps):
167 # When requesting a bundle2, getbundle returns a stream to make the
167 # When requesting a bundle2, getbundle returns a stream to make the
168 # wire level function happier. We need to build a proper object
168 # wire level function happier. We need to build a proper object
169 # from it in local peer.
169 # from it in local peer.
170 return bundle2.getunbundler(self.ui, cb)
170 return bundle2.getunbundler(self.ui, cb)
171 else:
171 else:
172 return changegroup.getunbundler('01', cb, None)
172 return changegroup.getunbundler('01', cb, None)
173
173
174 # TODO We might want to move the next two calls into legacypeer and add
174 # TODO We might want to move the next two calls into legacypeer and add
175 # unbundle instead.
175 # unbundle instead.
176
176
177 def unbundle(self, cg, heads, url):
177 def unbundle(self, cg, heads, url):
178 """apply a bundle on a repo
178 """apply a bundle on a repo
179
179
180 This function handles the repo locking itself."""
180 This function handles the repo locking itself."""
181 try:
181 try:
182 try:
182 try:
183 cg = exchange.readbundle(self.ui, cg, None)
183 cg = exchange.readbundle(self.ui, cg, None)
184 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
184 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
185 if util.safehasattr(ret, 'getchunks'):
185 if util.safehasattr(ret, 'getchunks'):
186 # This is a bundle20 object, turn it into an unbundler.
186 # This is a bundle20 object, turn it into an unbundler.
187 # This little dance should be dropped eventually when the
187 # This little dance should be dropped eventually when the
188 # API is finally improved.
188 # API is finally improved.
189 stream = util.chunkbuffer(ret.getchunks())
189 stream = util.chunkbuffer(ret.getchunks())
190 ret = bundle2.getunbundler(self.ui, stream)
190 ret = bundle2.getunbundler(self.ui, stream)
191 return ret
191 return ret
192 except Exception as exc:
192 except Exception as exc:
193 # If the exception contains output salvaged from a bundle2
193 # If the exception contains output salvaged from a bundle2
194 # reply, we need to make sure it is printed before continuing
194 # reply, we need to make sure it is printed before continuing
195 # to fail. So we build a bundle2 with such output and consume
195 # to fail. So we build a bundle2 with such output and consume
196 # it directly.
196 # it directly.
197 #
197 #
198 # This is not very elegant but allows a "simple" solution for
198 # This is not very elegant but allows a "simple" solution for
199 # issue4594
199 # issue4594
200 output = getattr(exc, '_bundle2salvagedoutput', ())
200 output = getattr(exc, '_bundle2salvagedoutput', ())
201 if output:
201 if output:
202 bundler = bundle2.bundle20(self._repo.ui)
202 bundler = bundle2.bundle20(self._repo.ui)
203 for out in output:
203 for out in output:
204 bundler.addpart(out)
204 bundler.addpart(out)
205 stream = util.chunkbuffer(bundler.getchunks())
205 stream = util.chunkbuffer(bundler.getchunks())
206 b = bundle2.getunbundler(self.ui, stream)
206 b = bundle2.getunbundler(self.ui, stream)
207 bundle2.processbundle(self._repo, b)
207 bundle2.processbundle(self._repo, b)
208 raise
208 raise
209 except error.PushRaced as exc:
209 except error.PushRaced as exc:
210 raise error.ResponseError(_('push failed:'), str(exc))
210 raise error.ResponseError(_('push failed:'), str(exc))
211
211
212 def lock(self):
212 def lock(self):
213 return self._repo.lock()
213 return self._repo.lock()
214
214
215 def addchangegroup(self, cg, source, url):
215 def addchangegroup(self, cg, source, url):
216 return cg.apply(self._repo, source, url)
216 return cg.apply(self._repo, source, url)
217
217
218 def pushkey(self, namespace, key, old, new):
218 def pushkey(self, namespace, key, old, new):
219 return self._repo.pushkey(namespace, key, old, new)
219 return self._repo.pushkey(namespace, key, old, new)
220
220
221 def listkeys(self, namespace):
221 def listkeys(self, namespace):
222 return self._repo.listkeys(namespace)
222 return self._repo.listkeys(namespace)
223
223
224 def debugwireargs(self, one, two, three=None, four=None, five=None):
224 def debugwireargs(self, one, two, three=None, four=None, five=None):
225 '''used to test argument passing over the wire'''
225 '''used to test argument passing over the wire'''
226 return "%s %s %s %s %s" % (one, two, three, four, five)
226 return "%s %s %s %s %s" % (one, two, three, four, five)
227
227
228 class locallegacypeer(localpeer):
228 class locallegacypeer(localpeer):
229 '''peer extension which implements legacy methods too; used for tests with
229 '''peer extension which implements legacy methods too; used for tests with
230 restricted capabilities'''
230 restricted capabilities'''
231
231
232 def __init__(self, repo):
232 def __init__(self, repo):
233 localpeer.__init__(self, repo, caps=legacycaps)
233 localpeer.__init__(self, repo, caps=legacycaps)
234
234
235 def branches(self, nodes):
235 def branches(self, nodes):
236 return self._repo.branches(nodes)
236 return self._repo.branches(nodes)
237
237
238 def between(self, pairs):
238 def between(self, pairs):
239 return self._repo.between(pairs)
239 return self._repo.between(pairs)
240
240
241 def changegroup(self, basenodes, source):
241 def changegroup(self, basenodes, source):
242 return changegroup.changegroup(self._repo, basenodes, source)
242 return changegroup.changegroup(self._repo, basenodes, source)
243
243
244 def changegroupsubset(self, bases, heads, source):
244 def changegroupsubset(self, bases, heads, source):
245 return changegroup.changegroupsubset(self._repo, bases, heads, source)
245 return changegroup.changegroupsubset(self._repo, bases, heads, source)
246
246
247 # Increment the sub-version when the revlog v2 format changes to lock out old
247 # Increment the sub-version when the revlog v2 format changes to lock out old
248 # clients.
248 # clients.
249 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
249 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
250
250
251 class localrepository(object):
251 class localrepository(object):
252
252
253 supportedformats = {
253 supportedformats = {
254 'revlogv1',
254 'revlogv1',
255 'generaldelta',
255 'generaldelta',
256 'treemanifest',
256 'treemanifest',
257 'manifestv2',
257 'manifestv2',
258 REVLOGV2_REQUIREMENT,
258 REVLOGV2_REQUIREMENT,
259 }
259 }
260 _basesupported = supportedformats | {
260 _basesupported = supportedformats | {
261 'store',
261 'store',
262 'fncache',
262 'fncache',
263 'shared',
263 'shared',
264 'relshared',
264 'relshared',
265 'dotencode',
265 'dotencode',
266 }
266 }
267 openerreqs = {
267 openerreqs = {
268 'revlogv1',
268 'revlogv1',
269 'generaldelta',
269 'generaldelta',
270 'treemanifest',
270 'treemanifest',
271 'manifestv2',
271 'manifestv2',
272 }
272 }
273 filtername = None
274
273
275 # a list of (ui, featureset) functions.
274 # a list of (ui, featureset) functions.
276 # only functions defined in module of enabled extensions are invoked
275 # only functions defined in module of enabled extensions are invoked
277 featuresetupfuncs = set()
276 featuresetupfuncs = set()
278
277
279 def __init__(self, baseui, path, create=False):
278 def __init__(self, baseui, path, create=False):
280 self.requirements = set()
279 self.requirements = set()
280 self.filtername = None
281 # wvfs: rooted at the repository root, used to access the working copy
281 # wvfs: rooted at the repository root, used to access the working copy
282 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
282 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
283 # vfs: rooted at .hg, used to access repo files outside of .hg/store
283 # vfs: rooted at .hg, used to access repo files outside of .hg/store
284 self.vfs = None
284 self.vfs = None
285 # svfs: usually rooted at .hg/store, used to access repository history
285 # svfs: usually rooted at .hg/store, used to access repository history
286 # If this is a shared repository, this vfs may point to another
286 # If this is a shared repository, this vfs may point to another
287 # repository's .hg/store directory.
287 # repository's .hg/store directory.
288 self.svfs = None
288 self.svfs = None
289 self.root = self.wvfs.base
289 self.root = self.wvfs.base
290 self.path = self.wvfs.join(".hg")
290 self.path = self.wvfs.join(".hg")
291 self.origroot = path
291 self.origroot = path
292 self.auditor = pathutil.pathauditor(self.root, self._checknested)
292 self.auditor = pathutil.pathauditor(self.root, self._checknested)
293 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
293 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
294 realfs=False)
294 realfs=False)
295 self.vfs = vfsmod.vfs(self.path)
295 self.vfs = vfsmod.vfs(self.path)
296 self.baseui = baseui
296 self.baseui = baseui
297 self.ui = baseui.copy()
297 self.ui = baseui.copy()
298 self.ui.copy = baseui.copy # prevent copying repo configuration
298 self.ui.copy = baseui.copy # prevent copying repo configuration
299 # A list of callback to shape the phase if no data were found.
299 # A list of callback to shape the phase if no data were found.
300 # Callback are in the form: func(repo, roots) --> processed root.
300 # Callback are in the form: func(repo, roots) --> processed root.
301 # This list it to be filled by extension during repo setup
301 # This list it to be filled by extension during repo setup
302 self._phasedefaults = []
302 self._phasedefaults = []
303 try:
303 try:
304 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
304 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
305 self._loadextensions()
305 self._loadextensions()
306 except IOError:
306 except IOError:
307 pass
307 pass
308
308
309 if self.featuresetupfuncs:
309 if self.featuresetupfuncs:
310 self.supported = set(self._basesupported) # use private copy
310 self.supported = set(self._basesupported) # use private copy
311 extmods = set(m.__name__ for n, m
311 extmods = set(m.__name__ for n, m
312 in extensions.extensions(self.ui))
312 in extensions.extensions(self.ui))
313 for setupfunc in self.featuresetupfuncs:
313 for setupfunc in self.featuresetupfuncs:
314 if setupfunc.__module__ in extmods:
314 if setupfunc.__module__ in extmods:
315 setupfunc(self.ui, self.supported)
315 setupfunc(self.ui, self.supported)
316 else:
316 else:
317 self.supported = self._basesupported
317 self.supported = self._basesupported
318 color.setup(self.ui)
318 color.setup(self.ui)
319
319
320 # Add compression engines.
320 # Add compression engines.
321 for name in util.compengines:
321 for name in util.compengines:
322 engine = util.compengines[name]
322 engine = util.compengines[name]
323 if engine.revlogheader():
323 if engine.revlogheader():
324 self.supported.add('exp-compression-%s' % name)
324 self.supported.add('exp-compression-%s' % name)
325
325
326 if not self.vfs.isdir():
326 if not self.vfs.isdir():
327 if create:
327 if create:
328 self.requirements = newreporequirements(self)
328 self.requirements = newreporequirements(self)
329
329
330 if not self.wvfs.exists():
330 if not self.wvfs.exists():
331 self.wvfs.makedirs()
331 self.wvfs.makedirs()
332 self.vfs.makedir(notindexed=True)
332 self.vfs.makedir(notindexed=True)
333
333
334 if 'store' in self.requirements:
334 if 'store' in self.requirements:
335 self.vfs.mkdir("store")
335 self.vfs.mkdir("store")
336
336
337 # create an invalid changelog
337 # create an invalid changelog
338 self.vfs.append(
338 self.vfs.append(
339 "00changelog.i",
339 "00changelog.i",
340 '\0\0\0\2' # represents revlogv2
340 '\0\0\0\2' # represents revlogv2
341 ' dummy changelog to prevent using the old repo layout'
341 ' dummy changelog to prevent using the old repo layout'
342 )
342 )
343 else:
343 else:
344 raise error.RepoError(_("repository %s not found") % path)
344 raise error.RepoError(_("repository %s not found") % path)
345 elif create:
345 elif create:
346 raise error.RepoError(_("repository %s already exists") % path)
346 raise error.RepoError(_("repository %s already exists") % path)
347 else:
347 else:
348 try:
348 try:
349 self.requirements = scmutil.readrequires(
349 self.requirements = scmutil.readrequires(
350 self.vfs, self.supported)
350 self.vfs, self.supported)
351 except IOError as inst:
351 except IOError as inst:
352 if inst.errno != errno.ENOENT:
352 if inst.errno != errno.ENOENT:
353 raise
353 raise
354
354
355 self.sharedpath = self.path
355 self.sharedpath = self.path
356 try:
356 try:
357 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
357 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
358 if 'relshared' in self.requirements:
358 if 'relshared' in self.requirements:
359 sharedpath = self.vfs.join(sharedpath)
359 sharedpath = self.vfs.join(sharedpath)
360 vfs = vfsmod.vfs(sharedpath, realpath=True)
360 vfs = vfsmod.vfs(sharedpath, realpath=True)
361 s = vfs.base
361 s = vfs.base
362 if not vfs.exists():
362 if not vfs.exists():
363 raise error.RepoError(
363 raise error.RepoError(
364 _('.hg/sharedpath points to nonexistent directory %s') % s)
364 _('.hg/sharedpath points to nonexistent directory %s') % s)
365 self.sharedpath = s
365 self.sharedpath = s
366 except IOError as inst:
366 except IOError as inst:
367 if inst.errno != errno.ENOENT:
367 if inst.errno != errno.ENOENT:
368 raise
368 raise
369
369
370 self.store = store.store(
370 self.store = store.store(
371 self.requirements, self.sharedpath, vfsmod.vfs)
371 self.requirements, self.sharedpath, vfsmod.vfs)
372 self.spath = self.store.path
372 self.spath = self.store.path
373 self.svfs = self.store.vfs
373 self.svfs = self.store.vfs
374 self.sjoin = self.store.join
374 self.sjoin = self.store.join
375 self.vfs.createmode = self.store.createmode
375 self.vfs.createmode = self.store.createmode
376 self._applyopenerreqs()
376 self._applyopenerreqs()
377 if create:
377 if create:
378 self._writerequirements()
378 self._writerequirements()
379
379
380 self._dirstatevalidatewarned = False
380 self._dirstatevalidatewarned = False
381
381
382 self._branchcaches = {}
382 self._branchcaches = {}
383 self._revbranchcache = None
383 self._revbranchcache = None
384 self.filterpats = {}
384 self.filterpats = {}
385 self._datafilters = {}
385 self._datafilters = {}
386 self._transref = self._lockref = self._wlockref = None
386 self._transref = self._lockref = self._wlockref = None
387
387
388 # A cache for various files under .hg/ that tracks file changes,
388 # A cache for various files under .hg/ that tracks file changes,
389 # (used by the filecache decorator)
389 # (used by the filecache decorator)
390 #
390 #
391 # Maps a property name to its util.filecacheentry
391 # Maps a property name to its util.filecacheentry
392 self._filecache = {}
392 self._filecache = {}
393
393
394 # hold sets of revision to be filtered
394 # hold sets of revision to be filtered
395 # should be cleared when something might have changed the filter value:
395 # should be cleared when something might have changed the filter value:
396 # - new changesets,
396 # - new changesets,
397 # - phase change,
397 # - phase change,
398 # - new obsolescence marker,
398 # - new obsolescence marker,
399 # - working directory parent change,
399 # - working directory parent change,
400 # - bookmark changes
400 # - bookmark changes
401 self.filteredrevcache = {}
401 self.filteredrevcache = {}
402
402
403 # generic mapping between names and nodes
403 # generic mapping between names and nodes
404 self.names = namespaces.namespaces()
404 self.names = namespaces.namespaces()
405
405
406 def close(self):
406 def close(self):
407 self._writecaches()
407 self._writecaches()
408
408
409 def _loadextensions(self):
409 def _loadextensions(self):
410 extensions.loadall(self.ui)
410 extensions.loadall(self.ui)
411
411
412 def _writecaches(self):
412 def _writecaches(self):
413 if self._revbranchcache:
413 if self._revbranchcache:
414 self._revbranchcache.write()
414 self._revbranchcache.write()
415
415
416 def _restrictcapabilities(self, caps):
416 def _restrictcapabilities(self, caps):
417 if self.ui.configbool('experimental', 'bundle2-advertise', True):
417 if self.ui.configbool('experimental', 'bundle2-advertise', True):
418 caps = set(caps)
418 caps = set(caps)
419 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
419 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
420 caps.add('bundle2=' + urlreq.quote(capsblob))
420 caps.add('bundle2=' + urlreq.quote(capsblob))
421 return caps
421 return caps
422
422
423 def _applyopenerreqs(self):
423 def _applyopenerreqs(self):
424 self.svfs.options = dict((r, 1) for r in self.requirements
424 self.svfs.options = dict((r, 1) for r in self.requirements
425 if r in self.openerreqs)
425 if r in self.openerreqs)
426 # experimental config: format.chunkcachesize
426 # experimental config: format.chunkcachesize
427 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
427 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
428 if chunkcachesize is not None:
428 if chunkcachesize is not None:
429 self.svfs.options['chunkcachesize'] = chunkcachesize
429 self.svfs.options['chunkcachesize'] = chunkcachesize
430 # experimental config: format.maxchainlen
430 # experimental config: format.maxchainlen
431 maxchainlen = self.ui.configint('format', 'maxchainlen')
431 maxchainlen = self.ui.configint('format', 'maxchainlen')
432 if maxchainlen is not None:
432 if maxchainlen is not None:
433 self.svfs.options['maxchainlen'] = maxchainlen
433 self.svfs.options['maxchainlen'] = maxchainlen
434 # experimental config: format.manifestcachesize
434 # experimental config: format.manifestcachesize
435 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
435 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
436 if manifestcachesize is not None:
436 if manifestcachesize is not None:
437 self.svfs.options['manifestcachesize'] = manifestcachesize
437 self.svfs.options['manifestcachesize'] = manifestcachesize
438 # experimental config: format.aggressivemergedeltas
438 # experimental config: format.aggressivemergedeltas
439 aggressivemergedeltas = self.ui.configbool('format',
439 aggressivemergedeltas = self.ui.configbool('format',
440 'aggressivemergedeltas', False)
440 'aggressivemergedeltas', False)
441 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
441 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
442 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
442 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
443
443
444 for r in self.requirements:
444 for r in self.requirements:
445 if r.startswith('exp-compression-'):
445 if r.startswith('exp-compression-'):
446 self.svfs.options['compengine'] = r[len('exp-compression-'):]
446 self.svfs.options['compengine'] = r[len('exp-compression-'):]
447
447
448 # TODO move "revlogv2" to openerreqs once finalized.
448 # TODO move "revlogv2" to openerreqs once finalized.
449 if REVLOGV2_REQUIREMENT in self.requirements:
449 if REVLOGV2_REQUIREMENT in self.requirements:
450 self.svfs.options['revlogv2'] = True
450 self.svfs.options['revlogv2'] = True
451
451
452 def _writerequirements(self):
452 def _writerequirements(self):
453 scmutil.writerequires(self.vfs, self.requirements)
453 scmutil.writerequires(self.vfs, self.requirements)
454
454
455 def _checknested(self, path):
455 def _checknested(self, path):
456 """Determine if path is a legal nested repository."""
456 """Determine if path is a legal nested repository."""
457 if not path.startswith(self.root):
457 if not path.startswith(self.root):
458 return False
458 return False
459 subpath = path[len(self.root) + 1:]
459 subpath = path[len(self.root) + 1:]
460 normsubpath = util.pconvert(subpath)
460 normsubpath = util.pconvert(subpath)
461
461
462 # XXX: Checking against the current working copy is wrong in
462 # XXX: Checking against the current working copy is wrong in
463 # the sense that it can reject things like
463 # the sense that it can reject things like
464 #
464 #
465 # $ hg cat -r 10 sub/x.txt
465 # $ hg cat -r 10 sub/x.txt
466 #
466 #
467 # if sub/ is no longer a subrepository in the working copy
467 # if sub/ is no longer a subrepository in the working copy
468 # parent revision.
468 # parent revision.
469 #
469 #
470 # However, it can of course also allow things that would have
470 # However, it can of course also allow things that would have
471 # been rejected before, such as the above cat command if sub/
471 # been rejected before, such as the above cat command if sub/
472 # is a subrepository now, but was a normal directory before.
472 # is a subrepository now, but was a normal directory before.
473 # The old path auditor would have rejected by mistake since it
473 # The old path auditor would have rejected by mistake since it
474 # panics when it sees sub/.hg/.
474 # panics when it sees sub/.hg/.
475 #
475 #
476 # All in all, checking against the working copy seems sensible
476 # All in all, checking against the working copy seems sensible
477 # since we want to prevent access to nested repositories on
477 # since we want to prevent access to nested repositories on
478 # the filesystem *now*.
478 # the filesystem *now*.
479 ctx = self[None]
479 ctx = self[None]
480 parts = util.splitpath(subpath)
480 parts = util.splitpath(subpath)
481 while parts:
481 while parts:
482 prefix = '/'.join(parts)
482 prefix = '/'.join(parts)
483 if prefix in ctx.substate:
483 if prefix in ctx.substate:
484 if prefix == normsubpath:
484 if prefix == normsubpath:
485 return True
485 return True
486 else:
486 else:
487 sub = ctx.sub(prefix)
487 sub = ctx.sub(prefix)
488 return sub.checknested(subpath[len(prefix) + 1:])
488 return sub.checknested(subpath[len(prefix) + 1:])
489 else:
489 else:
490 parts.pop()
490 parts.pop()
491 return False
491 return False
492
492
493 def peer(self):
493 def peer(self):
494 return localpeer(self) # not cached to avoid reference cycle
494 return localpeer(self) # not cached to avoid reference cycle
495
495
496 def unfiltered(self):
496 def unfiltered(self):
497 """Return unfiltered version of the repository
497 """Return unfiltered version of the repository
498
498
499 Intended to be overwritten by filtered repo."""
499 Intended to be overwritten by filtered repo."""
500 return self
500 return self
501
501
502 def filtered(self, name):
502 def filtered(self, name):
503 """Return a filtered version of a repository"""
503 """Return a filtered version of a repository"""
504 # build a new class with the mixin and the current class
504 # build a new class with the mixin and the current class
505 # (possibly subclass of the repo)
505 # (possibly subclass of the repo)
506 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
506 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
507 pass
507 pass
508 return filteredrepo(self, name)
508 return filteredrepo(self, name)
509
509
510 @repofilecache('bookmarks', 'bookmarks.current')
510 @repofilecache('bookmarks', 'bookmarks.current')
511 def _bookmarks(self):
511 def _bookmarks(self):
512 return bookmarks.bmstore(self)
512 return bookmarks.bmstore(self)
513
513
514 @property
514 @property
515 def _activebookmark(self):
515 def _activebookmark(self):
516 return self._bookmarks.active
516 return self._bookmarks.active
517
517
518 # _phaserevs and _phasesets depend on changelog. what we need is to
518 # _phaserevs and _phasesets depend on changelog. what we need is to
519 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
519 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
520 # can't be easily expressed in filecache mechanism.
520 # can't be easily expressed in filecache mechanism.
521 @storecache('phaseroots', '00changelog.i')
521 @storecache('phaseroots', '00changelog.i')
522 def _phasecache(self):
522 def _phasecache(self):
523 return phases.phasecache(self, self._phasedefaults)
523 return phases.phasecache(self, self._phasedefaults)
524
524
525 @storecache('obsstore')
525 @storecache('obsstore')
526 def obsstore(self):
526 def obsstore(self):
527 return obsolete.makestore(self.ui, self)
527 return obsolete.makestore(self.ui, self)
528
528
529 @storecache('00changelog.i')
529 @storecache('00changelog.i')
530 def changelog(self):
530 def changelog(self):
531 return changelog.changelog(self.svfs,
531 return changelog.changelog(self.svfs,
532 trypending=txnutil.mayhavepending(self.root))
532 trypending=txnutil.mayhavepending(self.root))
533
533
534 def _constructmanifest(self):
534 def _constructmanifest(self):
535 # This is a temporary function while we migrate from manifest to
535 # This is a temporary function while we migrate from manifest to
536 # manifestlog. It allows bundlerepo and unionrepo to intercept the
536 # manifestlog. It allows bundlerepo and unionrepo to intercept the
537 # manifest creation.
537 # manifest creation.
538 return manifest.manifestrevlog(self.svfs)
538 return manifest.manifestrevlog(self.svfs)
539
539
540 @storecache('00manifest.i')
540 @storecache('00manifest.i')
541 def manifestlog(self):
541 def manifestlog(self):
542 return manifest.manifestlog(self.svfs, self)
542 return manifest.manifestlog(self.svfs, self)
543
543
544 @repofilecache('dirstate')
544 @repofilecache('dirstate')
545 def dirstate(self):
545 def dirstate(self):
546 return dirstate.dirstate(self.vfs, self.ui, self.root,
546 return dirstate.dirstate(self.vfs, self.ui, self.root,
547 self._dirstatevalidate)
547 self._dirstatevalidate)
548
548
549 def _dirstatevalidate(self, node):
549 def _dirstatevalidate(self, node):
550 try:
550 try:
551 self.changelog.rev(node)
551 self.changelog.rev(node)
552 return node
552 return node
553 except error.LookupError:
553 except error.LookupError:
554 if not self._dirstatevalidatewarned:
554 if not self._dirstatevalidatewarned:
555 self._dirstatevalidatewarned = True
555 self._dirstatevalidatewarned = True
556 self.ui.warn(_("warning: ignoring unknown"
556 self.ui.warn(_("warning: ignoring unknown"
557 " working parent %s!\n") % short(node))
557 " working parent %s!\n") % short(node))
558 return nullid
558 return nullid
559
559
560 def __getitem__(self, changeid):
560 def __getitem__(self, changeid):
561 if changeid is None:
561 if changeid is None:
562 return context.workingctx(self)
562 return context.workingctx(self)
563 if isinstance(changeid, slice):
563 if isinstance(changeid, slice):
564 # wdirrev isn't contiguous so the slice shouldn't include it
564 # wdirrev isn't contiguous so the slice shouldn't include it
565 return [context.changectx(self, i)
565 return [context.changectx(self, i)
566 for i in xrange(*changeid.indices(len(self)))
566 for i in xrange(*changeid.indices(len(self)))
567 if i not in self.changelog.filteredrevs]
567 if i not in self.changelog.filteredrevs]
568 try:
568 try:
569 return context.changectx(self, changeid)
569 return context.changectx(self, changeid)
570 except error.WdirUnsupported:
570 except error.WdirUnsupported:
571 return context.workingctx(self)
571 return context.workingctx(self)
572
572
573 def __contains__(self, changeid):
573 def __contains__(self, changeid):
574 """True if the given changeid exists
574 """True if the given changeid exists
575
575
576 error.LookupError is raised if an ambiguous node specified.
576 error.LookupError is raised if an ambiguous node specified.
577 """
577 """
578 try:
578 try:
579 self[changeid]
579 self[changeid]
580 return True
580 return True
581 except error.RepoLookupError:
581 except error.RepoLookupError:
582 return False
582 return False
583
583
584 def __nonzero__(self):
584 def __nonzero__(self):
585 return True
585 return True
586
586
587 __bool__ = __nonzero__
587 __bool__ = __nonzero__
588
588
589 def __len__(self):
589 def __len__(self):
590 return len(self.changelog)
590 return len(self.changelog)
591
591
592 def __iter__(self):
592 def __iter__(self):
593 return iter(self.changelog)
593 return iter(self.changelog)
594
594
595 def revs(self, expr, *args):
595 def revs(self, expr, *args):
596 '''Find revisions matching a revset.
596 '''Find revisions matching a revset.
597
597
598 The revset is specified as a string ``expr`` that may contain
598 The revset is specified as a string ``expr`` that may contain
599 %-formatting to escape certain types. See ``revsetlang.formatspec``.
599 %-formatting to escape certain types. See ``revsetlang.formatspec``.
600
600
601 Revset aliases from the configuration are not expanded. To expand
601 Revset aliases from the configuration are not expanded. To expand
602 user aliases, consider calling ``scmutil.revrange()`` or
602 user aliases, consider calling ``scmutil.revrange()`` or
603 ``repo.anyrevs([expr], user=True)``.
603 ``repo.anyrevs([expr], user=True)``.
604
604
605 Returns a revset.abstractsmartset, which is a list-like interface
605 Returns a revset.abstractsmartset, which is a list-like interface
606 that contains integer revisions.
606 that contains integer revisions.
607 '''
607 '''
608 expr = revsetlang.formatspec(expr, *args)
608 expr = revsetlang.formatspec(expr, *args)
609 m = revset.match(None, expr)
609 m = revset.match(None, expr)
610 return m(self)
610 return m(self)
611
611
612 def set(self, expr, *args):
612 def set(self, expr, *args):
613 '''Find revisions matching a revset and emit changectx instances.
613 '''Find revisions matching a revset and emit changectx instances.
614
614
615 This is a convenience wrapper around ``revs()`` that iterates the
615 This is a convenience wrapper around ``revs()`` that iterates the
616 result and is a generator of changectx instances.
616 result and is a generator of changectx instances.
617
617
618 Revset aliases from the configuration are not expanded. To expand
618 Revset aliases from the configuration are not expanded. To expand
619 user aliases, consider calling ``scmutil.revrange()``.
619 user aliases, consider calling ``scmutil.revrange()``.
620 '''
620 '''
621 for r in self.revs(expr, *args):
621 for r in self.revs(expr, *args):
622 yield self[r]
622 yield self[r]
623
623
624 def anyrevs(self, specs, user=False):
624 def anyrevs(self, specs, user=False):
625 '''Find revisions matching one of the given revsets.
625 '''Find revisions matching one of the given revsets.
626
626
627 Revset aliases from the configuration are not expanded by default. To
627 Revset aliases from the configuration are not expanded by default. To
628 expand user aliases, specify ``user=True``.
628 expand user aliases, specify ``user=True``.
629 '''
629 '''
630 if user:
630 if user:
631 m = revset.matchany(self.ui, specs, repo=self)
631 m = revset.matchany(self.ui, specs, repo=self)
632 else:
632 else:
633 m = revset.matchany(None, specs)
633 m = revset.matchany(None, specs)
634 return m(self)
634 return m(self)
635
635
636 def url(self):
636 def url(self):
637 return 'file:' + self.root
637 return 'file:' + self.root
638
638
639 def hook(self, name, throw=False, **args):
639 def hook(self, name, throw=False, **args):
640 """Call a hook, passing this repo instance.
640 """Call a hook, passing this repo instance.
641
641
642 This a convenience method to aid invoking hooks. Extensions likely
642 This a convenience method to aid invoking hooks. Extensions likely
643 won't call this unless they have registered a custom hook or are
643 won't call this unless they have registered a custom hook or are
644 replacing code that is expected to call a hook.
644 replacing code that is expected to call a hook.
645 """
645 """
646 return hook.hook(self.ui, self, name, throw, **args)
646 return hook.hook(self.ui, self, name, throw, **args)
647
647
648 @filteredpropertycache
648 @filteredpropertycache
649 def _tagscache(self):
649 def _tagscache(self):
650 '''Returns a tagscache object that contains various tags related
650 '''Returns a tagscache object that contains various tags related
651 caches.'''
651 caches.'''
652
652
653 # This simplifies its cache management by having one decorated
653 # This simplifies its cache management by having one decorated
654 # function (this one) and the rest simply fetch things from it.
654 # function (this one) and the rest simply fetch things from it.
655 class tagscache(object):
655 class tagscache(object):
656 def __init__(self):
656 def __init__(self):
657 # These two define the set of tags for this repository. tags
657 # These two define the set of tags for this repository. tags
658 # maps tag name to node; tagtypes maps tag name to 'global' or
658 # maps tag name to node; tagtypes maps tag name to 'global' or
659 # 'local'. (Global tags are defined by .hgtags across all
659 # 'local'. (Global tags are defined by .hgtags across all
660 # heads, and local tags are defined in .hg/localtags.)
660 # heads, and local tags are defined in .hg/localtags.)
661 # They constitute the in-memory cache of tags.
661 # They constitute the in-memory cache of tags.
662 self.tags = self.tagtypes = None
662 self.tags = self.tagtypes = None
663
663
664 self.nodetagscache = self.tagslist = None
664 self.nodetagscache = self.tagslist = None
665
665
666 cache = tagscache()
666 cache = tagscache()
667 cache.tags, cache.tagtypes = self._findtags()
667 cache.tags, cache.tagtypes = self._findtags()
668
668
669 return cache
669 return cache
670
670
671 def tags(self):
671 def tags(self):
672 '''return a mapping of tag to node'''
672 '''return a mapping of tag to node'''
673 t = {}
673 t = {}
674 if self.changelog.filteredrevs:
674 if self.changelog.filteredrevs:
675 tags, tt = self._findtags()
675 tags, tt = self._findtags()
676 else:
676 else:
677 tags = self._tagscache.tags
677 tags = self._tagscache.tags
678 for k, v in tags.iteritems():
678 for k, v in tags.iteritems():
679 try:
679 try:
680 # ignore tags to unknown nodes
680 # ignore tags to unknown nodes
681 self.changelog.rev(v)
681 self.changelog.rev(v)
682 t[k] = v
682 t[k] = v
683 except (error.LookupError, ValueError):
683 except (error.LookupError, ValueError):
684 pass
684 pass
685 return t
685 return t
686
686
687 def _findtags(self):
687 def _findtags(self):
688 '''Do the hard work of finding tags. Return a pair of dicts
688 '''Do the hard work of finding tags. Return a pair of dicts
689 (tags, tagtypes) where tags maps tag name to node, and tagtypes
689 (tags, tagtypes) where tags maps tag name to node, and tagtypes
690 maps tag name to a string like \'global\' or \'local\'.
690 maps tag name to a string like \'global\' or \'local\'.
691 Subclasses or extensions are free to add their own tags, but
691 Subclasses or extensions are free to add their own tags, but
692 should be aware that the returned dicts will be retained for the
692 should be aware that the returned dicts will be retained for the
693 duration of the localrepo object.'''
693 duration of the localrepo object.'''
694
694
695 # XXX what tagtype should subclasses/extensions use? Currently
695 # XXX what tagtype should subclasses/extensions use? Currently
696 # mq and bookmarks add tags, but do not set the tagtype at all.
696 # mq and bookmarks add tags, but do not set the tagtype at all.
697 # Should each extension invent its own tag type? Should there
697 # Should each extension invent its own tag type? Should there
698 # be one tagtype for all such "virtual" tags? Or is the status
698 # be one tagtype for all such "virtual" tags? Or is the status
699 # quo fine?
699 # quo fine?
700
700
701
701
702 # map tag name to (node, hist)
702 # map tag name to (node, hist)
703 alltags = tagsmod.findglobaltags(self.ui, self)
703 alltags = tagsmod.findglobaltags(self.ui, self)
704 # map tag name to tag type
704 # map tag name to tag type
705 tagtypes = dict((tag, 'global') for tag in alltags)
705 tagtypes = dict((tag, 'global') for tag in alltags)
706
706
707 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
707 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
708
708
709 # Build the return dicts. Have to re-encode tag names because
709 # Build the return dicts. Have to re-encode tag names because
710 # the tags module always uses UTF-8 (in order not to lose info
710 # the tags module always uses UTF-8 (in order not to lose info
711 # writing to the cache), but the rest of Mercurial wants them in
711 # writing to the cache), but the rest of Mercurial wants them in
712 # local encoding.
712 # local encoding.
713 tags = {}
713 tags = {}
714 for (name, (node, hist)) in alltags.iteritems():
714 for (name, (node, hist)) in alltags.iteritems():
715 if node != nullid:
715 if node != nullid:
716 tags[encoding.tolocal(name)] = node
716 tags[encoding.tolocal(name)] = node
717 tags['tip'] = self.changelog.tip()
717 tags['tip'] = self.changelog.tip()
718 tagtypes = dict([(encoding.tolocal(name), value)
718 tagtypes = dict([(encoding.tolocal(name), value)
719 for (name, value) in tagtypes.iteritems()])
719 for (name, value) in tagtypes.iteritems()])
720 return (tags, tagtypes)
720 return (tags, tagtypes)
721
721
722 def tagtype(self, tagname):
722 def tagtype(self, tagname):
723 '''
723 '''
724 return the type of the given tag. result can be:
724 return the type of the given tag. result can be:
725
725
726 'local' : a local tag
726 'local' : a local tag
727 'global' : a global tag
727 'global' : a global tag
728 None : tag does not exist
728 None : tag does not exist
729 '''
729 '''
730
730
731 return self._tagscache.tagtypes.get(tagname)
731 return self._tagscache.tagtypes.get(tagname)
732
732
733 def tagslist(self):
733 def tagslist(self):
734 '''return a list of tags ordered by revision'''
734 '''return a list of tags ordered by revision'''
735 if not self._tagscache.tagslist:
735 if not self._tagscache.tagslist:
736 l = []
736 l = []
737 for t, n in self.tags().iteritems():
737 for t, n in self.tags().iteritems():
738 l.append((self.changelog.rev(n), t, n))
738 l.append((self.changelog.rev(n), t, n))
739 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
739 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
740
740
741 return self._tagscache.tagslist
741 return self._tagscache.tagslist
742
742
743 def nodetags(self, node):
743 def nodetags(self, node):
744 '''return the tags associated with a node'''
744 '''return the tags associated with a node'''
745 if not self._tagscache.nodetagscache:
745 if not self._tagscache.nodetagscache:
746 nodetagscache = {}
746 nodetagscache = {}
747 for t, n in self._tagscache.tags.iteritems():
747 for t, n in self._tagscache.tags.iteritems():
748 nodetagscache.setdefault(n, []).append(t)
748 nodetagscache.setdefault(n, []).append(t)
749 for tags in nodetagscache.itervalues():
749 for tags in nodetagscache.itervalues():
750 tags.sort()
750 tags.sort()
751 self._tagscache.nodetagscache = nodetagscache
751 self._tagscache.nodetagscache = nodetagscache
752 return self._tagscache.nodetagscache.get(node, [])
752 return self._tagscache.nodetagscache.get(node, [])
753
753
754 def nodebookmarks(self, node):
754 def nodebookmarks(self, node):
755 """return the list of bookmarks pointing to the specified node"""
755 """return the list of bookmarks pointing to the specified node"""
756 marks = []
756 marks = []
757 for bookmark, n in self._bookmarks.iteritems():
757 for bookmark, n in self._bookmarks.iteritems():
758 if n == node:
758 if n == node:
759 marks.append(bookmark)
759 marks.append(bookmark)
760 return sorted(marks)
760 return sorted(marks)
761
761
762 def branchmap(self):
762 def branchmap(self):
763 '''returns a dictionary {branch: [branchheads]} with branchheads
763 '''returns a dictionary {branch: [branchheads]} with branchheads
764 ordered by increasing revision number'''
764 ordered by increasing revision number'''
765 branchmap.updatecache(self)
765 branchmap.updatecache(self)
766 return self._branchcaches[self.filtername]
766 return self._branchcaches[self.filtername]
767
767
768 @unfilteredmethod
768 @unfilteredmethod
769 def revbranchcache(self):
769 def revbranchcache(self):
770 if not self._revbranchcache:
770 if not self._revbranchcache:
771 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
771 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
772 return self._revbranchcache
772 return self._revbranchcache
773
773
774 def branchtip(self, branch, ignoremissing=False):
774 def branchtip(self, branch, ignoremissing=False):
775 '''return the tip node for a given branch
775 '''return the tip node for a given branch
776
776
777 If ignoremissing is True, then this method will not raise an error.
777 If ignoremissing is True, then this method will not raise an error.
778 This is helpful for callers that only expect None for a missing branch
778 This is helpful for callers that only expect None for a missing branch
779 (e.g. namespace).
779 (e.g. namespace).
780
780
781 '''
781 '''
782 try:
782 try:
783 return self.branchmap().branchtip(branch)
783 return self.branchmap().branchtip(branch)
784 except KeyError:
784 except KeyError:
785 if not ignoremissing:
785 if not ignoremissing:
786 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
786 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
787 else:
787 else:
788 pass
788 pass
789
789
790 def lookup(self, key):
790 def lookup(self, key):
791 return self[key].node()
791 return self[key].node()
792
792
793 def lookupbranch(self, key, remote=None):
793 def lookupbranch(self, key, remote=None):
794 repo = remote or self
794 repo = remote or self
795 if key in repo.branchmap():
795 if key in repo.branchmap():
796 return key
796 return key
797
797
798 repo = (remote and remote.local()) and remote or self
798 repo = (remote and remote.local()) and remote or self
799 return repo[key].branch()
799 return repo[key].branch()
800
800
801 def known(self, nodes):
801 def known(self, nodes):
802 cl = self.changelog
802 cl = self.changelog
803 nm = cl.nodemap
803 nm = cl.nodemap
804 filtered = cl.filteredrevs
804 filtered = cl.filteredrevs
805 result = []
805 result = []
806 for n in nodes:
806 for n in nodes:
807 r = nm.get(n)
807 r = nm.get(n)
808 resp = not (r is None or r in filtered)
808 resp = not (r is None or r in filtered)
809 result.append(resp)
809 result.append(resp)
810 return result
810 return result
811
811
812 def local(self):
812 def local(self):
813 return self
813 return self
814
814
815 def publishing(self):
815 def publishing(self):
816 # it's safe (and desirable) to trust the publish flag unconditionally
816 # it's safe (and desirable) to trust the publish flag unconditionally
817 # so that we don't finalize changes shared between users via ssh or nfs
817 # so that we don't finalize changes shared between users via ssh or nfs
818 return self.ui.configbool('phases', 'publish', True, untrusted=True)
818 return self.ui.configbool('phases', 'publish', True, untrusted=True)
819
819
820 def cancopy(self):
820 def cancopy(self):
821 # so statichttprepo's override of local() works
821 # so statichttprepo's override of local() works
822 if not self.local():
822 if not self.local():
823 return False
823 return False
824 if not self.publishing():
824 if not self.publishing():
825 return True
825 return True
826 # if publishing we can't copy if there is filtered content
826 # if publishing we can't copy if there is filtered content
827 return not self.filtered('visible').changelog.filteredrevs
827 return not self.filtered('visible').changelog.filteredrevs
828
828
829 def shared(self):
829 def shared(self):
830 '''the type of shared repository (None if not shared)'''
830 '''the type of shared repository (None if not shared)'''
831 if self.sharedpath != self.path:
831 if self.sharedpath != self.path:
832 return 'store'
832 return 'store'
833 return None
833 return None
834
834
835 def wjoin(self, f, *insidef):
835 def wjoin(self, f, *insidef):
836 return self.vfs.reljoin(self.root, f, *insidef)
836 return self.vfs.reljoin(self.root, f, *insidef)
837
837
838 def file(self, f):
838 def file(self, f):
839 if f[0] == '/':
839 if f[0] == '/':
840 f = f[1:]
840 f = f[1:]
841 return filelog.filelog(self.svfs, f)
841 return filelog.filelog(self.svfs, f)
842
842
843 def changectx(self, changeid):
843 def changectx(self, changeid):
844 return self[changeid]
844 return self[changeid]
845
845
846 def setparents(self, p1, p2=nullid):
846 def setparents(self, p1, p2=nullid):
847 with self.dirstate.parentchange():
847 with self.dirstate.parentchange():
848 copies = self.dirstate.setparents(p1, p2)
848 copies = self.dirstate.setparents(p1, p2)
849 pctx = self[p1]
849 pctx = self[p1]
850 if copies:
850 if copies:
851 # Adjust copy records, the dirstate cannot do it, it
851 # Adjust copy records, the dirstate cannot do it, it
852 # requires access to parents manifests. Preserve them
852 # requires access to parents manifests. Preserve them
853 # only for entries added to first parent.
853 # only for entries added to first parent.
854 for f in copies:
854 for f in copies:
855 if f not in pctx and copies[f] in pctx:
855 if f not in pctx and copies[f] in pctx:
856 self.dirstate.copy(copies[f], f)
856 self.dirstate.copy(copies[f], f)
857 if p2 == nullid:
857 if p2 == nullid:
858 for f, s in sorted(self.dirstate.copies().items()):
858 for f, s in sorted(self.dirstate.copies().items()):
859 if f not in pctx and s not in pctx:
859 if f not in pctx and s not in pctx:
860 self.dirstate.copy(None, f)
860 self.dirstate.copy(None, f)
861
861
862 def filectx(self, path, changeid=None, fileid=None):
862 def filectx(self, path, changeid=None, fileid=None):
863 """changeid can be a changeset revision, node, or tag.
863 """changeid can be a changeset revision, node, or tag.
864 fileid can be a file revision or node."""
864 fileid can be a file revision or node."""
865 return context.filectx(self, path, changeid, fileid)
865 return context.filectx(self, path, changeid, fileid)
866
866
867 def getcwd(self):
867 def getcwd(self):
868 return self.dirstate.getcwd()
868 return self.dirstate.getcwd()
869
869
870 def pathto(self, f, cwd=None):
870 def pathto(self, f, cwd=None):
871 return self.dirstate.pathto(f, cwd)
871 return self.dirstate.pathto(f, cwd)
872
872
873 def _loadfilter(self, filter):
873 def _loadfilter(self, filter):
874 if filter not in self.filterpats:
874 if filter not in self.filterpats:
875 l = []
875 l = []
876 for pat, cmd in self.ui.configitems(filter):
876 for pat, cmd in self.ui.configitems(filter):
877 if cmd == '!':
877 if cmd == '!':
878 continue
878 continue
879 mf = matchmod.match(self.root, '', [pat])
879 mf = matchmod.match(self.root, '', [pat])
880 fn = None
880 fn = None
881 params = cmd
881 params = cmd
882 for name, filterfn in self._datafilters.iteritems():
882 for name, filterfn in self._datafilters.iteritems():
883 if cmd.startswith(name):
883 if cmd.startswith(name):
884 fn = filterfn
884 fn = filterfn
885 params = cmd[len(name):].lstrip()
885 params = cmd[len(name):].lstrip()
886 break
886 break
887 if not fn:
887 if not fn:
888 fn = lambda s, c, **kwargs: util.filter(s, c)
888 fn = lambda s, c, **kwargs: util.filter(s, c)
889 # Wrap old filters not supporting keyword arguments
889 # Wrap old filters not supporting keyword arguments
890 if not inspect.getargspec(fn)[2]:
890 if not inspect.getargspec(fn)[2]:
891 oldfn = fn
891 oldfn = fn
892 fn = lambda s, c, **kwargs: oldfn(s, c)
892 fn = lambda s, c, **kwargs: oldfn(s, c)
893 l.append((mf, fn, params))
893 l.append((mf, fn, params))
894 self.filterpats[filter] = l
894 self.filterpats[filter] = l
895 return self.filterpats[filter]
895 return self.filterpats[filter]
896
896
897 def _filter(self, filterpats, filename, data):
897 def _filter(self, filterpats, filename, data):
898 for mf, fn, cmd in filterpats:
898 for mf, fn, cmd in filterpats:
899 if mf(filename):
899 if mf(filename):
900 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
900 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
901 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
901 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
902 break
902 break
903
903
904 return data
904 return data
905
905
906 @unfilteredpropertycache
906 @unfilteredpropertycache
907 def _encodefilterpats(self):
907 def _encodefilterpats(self):
908 return self._loadfilter('encode')
908 return self._loadfilter('encode')
909
909
910 @unfilteredpropertycache
910 @unfilteredpropertycache
911 def _decodefilterpats(self):
911 def _decodefilterpats(self):
912 return self._loadfilter('decode')
912 return self._loadfilter('decode')
913
913
914 def adddatafilter(self, name, filter):
914 def adddatafilter(self, name, filter):
915 self._datafilters[name] = filter
915 self._datafilters[name] = filter
916
916
917 def wread(self, filename):
917 def wread(self, filename):
918 if self.wvfs.islink(filename):
918 if self.wvfs.islink(filename):
919 data = self.wvfs.readlink(filename)
919 data = self.wvfs.readlink(filename)
920 else:
920 else:
921 data = self.wvfs.read(filename)
921 data = self.wvfs.read(filename)
922 return self._filter(self._encodefilterpats, filename, data)
922 return self._filter(self._encodefilterpats, filename, data)
923
923
924 def wwrite(self, filename, data, flags, backgroundclose=False):
924 def wwrite(self, filename, data, flags, backgroundclose=False):
925 """write ``data`` into ``filename`` in the working directory
925 """write ``data`` into ``filename`` in the working directory
926
926
927 This returns length of written (maybe decoded) data.
927 This returns length of written (maybe decoded) data.
928 """
928 """
929 data = self._filter(self._decodefilterpats, filename, data)
929 data = self._filter(self._decodefilterpats, filename, data)
930 if 'l' in flags:
930 if 'l' in flags:
931 self.wvfs.symlink(data, filename)
931 self.wvfs.symlink(data, filename)
932 else:
932 else:
933 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
933 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
934 if 'x' in flags:
934 if 'x' in flags:
935 self.wvfs.setflags(filename, False, True)
935 self.wvfs.setflags(filename, False, True)
936 return len(data)
936 return len(data)
937
937
938 def wwritedata(self, filename, data):
938 def wwritedata(self, filename, data):
939 return self._filter(self._decodefilterpats, filename, data)
939 return self._filter(self._decodefilterpats, filename, data)
940
940
941 def currenttransaction(self):
941 def currenttransaction(self):
942 """return the current transaction or None if non exists"""
942 """return the current transaction or None if non exists"""
943 if self._transref:
943 if self._transref:
944 tr = self._transref()
944 tr = self._transref()
945 else:
945 else:
946 tr = None
946 tr = None
947
947
948 if tr and tr.running():
948 if tr and tr.running():
949 return tr
949 return tr
950 return None
950 return None
951
951
952 def transaction(self, desc, report=None):
952 def transaction(self, desc, report=None):
953 if (self.ui.configbool('devel', 'all-warnings')
953 if (self.ui.configbool('devel', 'all-warnings')
954 or self.ui.configbool('devel', 'check-locks')):
954 or self.ui.configbool('devel', 'check-locks')):
955 if self._currentlock(self._lockref) is None:
955 if self._currentlock(self._lockref) is None:
956 raise error.ProgrammingError('transaction requires locking')
956 raise error.ProgrammingError('transaction requires locking')
957 tr = self.currenttransaction()
957 tr = self.currenttransaction()
958 if tr is not None:
958 if tr is not None:
959 return tr.nest()
959 return tr.nest()
960
960
961 # abort here if the journal already exists
961 # abort here if the journal already exists
962 if self.svfs.exists("journal"):
962 if self.svfs.exists("journal"):
963 raise error.RepoError(
963 raise error.RepoError(
964 _("abandoned transaction found"),
964 _("abandoned transaction found"),
965 hint=_("run 'hg recover' to clean up transaction"))
965 hint=_("run 'hg recover' to clean up transaction"))
966
966
967 idbase = "%.40f#%f" % (random.random(), time.time())
967 idbase = "%.40f#%f" % (random.random(), time.time())
968 ha = hex(hashlib.sha1(idbase).digest())
968 ha = hex(hashlib.sha1(idbase).digest())
969 txnid = 'TXN:' + ha
969 txnid = 'TXN:' + ha
970 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
970 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
971
971
972 self._writejournal(desc)
972 self._writejournal(desc)
973 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
973 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
974 if report:
974 if report:
975 rp = report
975 rp = report
976 else:
976 else:
977 rp = self.ui.warn
977 rp = self.ui.warn
978 vfsmap = {'plain': self.vfs} # root of .hg/
978 vfsmap = {'plain': self.vfs} # root of .hg/
979 # we must avoid cyclic reference between repo and transaction.
979 # we must avoid cyclic reference between repo and transaction.
980 reporef = weakref.ref(self)
980 reporef = weakref.ref(self)
981 # Code to track tag movement
981 # Code to track tag movement
982 #
982 #
983 # Since tags are all handled as file content, it is actually quite hard
983 # Since tags are all handled as file content, it is actually quite hard
984 # to track these movement from a code perspective. So we fallback to a
984 # to track these movement from a code perspective. So we fallback to a
985 # tracking at the repository level. One could envision to track changes
985 # tracking at the repository level. One could envision to track changes
986 # to the '.hgtags' file through changegroup apply but that fails to
986 # to the '.hgtags' file through changegroup apply but that fails to
987 # cope with case where transaction expose new heads without changegroup
987 # cope with case where transaction expose new heads without changegroup
988 # being involved (eg: phase movement).
988 # being involved (eg: phase movement).
989 #
989 #
990 # For now, We gate the feature behind a flag since this likely comes
990 # For now, We gate the feature behind a flag since this likely comes
991 # with performance impacts. The current code run more often than needed
991 # with performance impacts. The current code run more often than needed
992 # and do not use caches as much as it could. The current focus is on
992 # and do not use caches as much as it could. The current focus is on
993 # the behavior of the feature so we disable it by default. The flag
993 # the behavior of the feature so we disable it by default. The flag
994 # will be removed when we are happy with the performance impact.
994 # will be removed when we are happy with the performance impact.
995 #
995 #
996 # Once this feature is no longer experimental move the following
996 # Once this feature is no longer experimental move the following
997 # documentation to the appropriate help section:
997 # documentation to the appropriate help section:
998 #
998 #
999 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
999 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1000 # tags (new or changed or deleted tags). In addition the details of
1000 # tags (new or changed or deleted tags). In addition the details of
1001 # these changes are made available in a file at:
1001 # these changes are made available in a file at:
1002 # ``REPOROOT/.hg/changes/tags.changes``.
1002 # ``REPOROOT/.hg/changes/tags.changes``.
1003 # Make sure you check for HG_TAG_MOVED before reading that file as it
1003 # Make sure you check for HG_TAG_MOVED before reading that file as it
1004 # might exist from a previous transaction even if no tag were touched
1004 # might exist from a previous transaction even if no tag were touched
1005 # in this one. Changes are recorded in a line base format::
1005 # in this one. Changes are recorded in a line base format::
1006 #
1006 #
1007 # <action> <hex-node> <tag-name>\n
1007 # <action> <hex-node> <tag-name>\n
1008 #
1008 #
1009 # Actions are defined as follow:
1009 # Actions are defined as follow:
1010 # "-R": tag is removed,
1010 # "-R": tag is removed,
1011 # "+A": tag is added,
1011 # "+A": tag is added,
1012 # "-M": tag is moved (old value),
1012 # "-M": tag is moved (old value),
1013 # "+M": tag is moved (new value),
1013 # "+M": tag is moved (new value),
1014 tracktags = lambda x: None
1014 tracktags = lambda x: None
1015 # experimental config: experimental.hook-track-tags
1015 # experimental config: experimental.hook-track-tags
1016 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1016 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1017 False)
1017 False)
1018 if desc != 'strip' and shouldtracktags:
1018 if desc != 'strip' and shouldtracktags:
1019 oldheads = self.changelog.headrevs()
1019 oldheads = self.changelog.headrevs()
1020 def tracktags(tr2):
1020 def tracktags(tr2):
1021 repo = reporef()
1021 repo = reporef()
1022 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1022 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1023 newheads = repo.changelog.headrevs()
1023 newheads = repo.changelog.headrevs()
1024 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1024 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1025 # notes: we compare lists here.
1025 # notes: we compare lists here.
1026 # As we do it only once buiding set would not be cheaper
1026 # As we do it only once buiding set would not be cheaper
1027 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1027 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1028 if changes:
1028 if changes:
1029 tr2.hookargs['tag_moved'] = '1'
1029 tr2.hookargs['tag_moved'] = '1'
1030 with repo.vfs('changes/tags.changes', 'w',
1030 with repo.vfs('changes/tags.changes', 'w',
1031 atomictemp=True) as changesfile:
1031 atomictemp=True) as changesfile:
1032 # note: we do not register the file to the transaction
1032 # note: we do not register the file to the transaction
1033 # because we needs it to still exist on the transaction
1033 # because we needs it to still exist on the transaction
1034 # is close (for txnclose hooks)
1034 # is close (for txnclose hooks)
1035 tagsmod.writediff(changesfile, changes)
1035 tagsmod.writediff(changesfile, changes)
1036 def validate(tr2):
1036 def validate(tr2):
1037 """will run pre-closing hooks"""
1037 """will run pre-closing hooks"""
1038 # XXX the transaction API is a bit lacking here so we take a hacky
1038 # XXX the transaction API is a bit lacking here so we take a hacky
1039 # path for now
1039 # path for now
1040 #
1040 #
1041 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1041 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1042 # dict is copied before these run. In addition we needs the data
1042 # dict is copied before these run. In addition we needs the data
1043 # available to in memory hooks too.
1043 # available to in memory hooks too.
1044 #
1044 #
1045 # Moreover, we also need to make sure this runs before txnclose
1045 # Moreover, we also need to make sure this runs before txnclose
1046 # hooks and there is no "pending" mechanism that would execute
1046 # hooks and there is no "pending" mechanism that would execute
1047 # logic only if hooks are about to run.
1047 # logic only if hooks are about to run.
1048 #
1048 #
1049 # Fixing this limitation of the transaction is also needed to track
1049 # Fixing this limitation of the transaction is also needed to track
1050 # other families of changes (bookmarks, phases, obsolescence).
1050 # other families of changes (bookmarks, phases, obsolescence).
1051 #
1051 #
1052 # This will have to be fixed before we remove the experimental
1052 # This will have to be fixed before we remove the experimental
1053 # gating.
1053 # gating.
1054 tracktags(tr2)
1054 tracktags(tr2)
1055 reporef().hook('pretxnclose', throw=True,
1055 reporef().hook('pretxnclose', throw=True,
1056 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1056 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1057 def releasefn(tr, success):
1057 def releasefn(tr, success):
1058 repo = reporef()
1058 repo = reporef()
1059 if success:
1059 if success:
1060 # this should be explicitly invoked here, because
1060 # this should be explicitly invoked here, because
1061 # in-memory changes aren't written out at closing
1061 # in-memory changes aren't written out at closing
1062 # transaction, if tr.addfilegenerator (via
1062 # transaction, if tr.addfilegenerator (via
1063 # dirstate.write or so) isn't invoked while
1063 # dirstate.write or so) isn't invoked while
1064 # transaction running
1064 # transaction running
1065 repo.dirstate.write(None)
1065 repo.dirstate.write(None)
1066 else:
1066 else:
1067 # discard all changes (including ones already written
1067 # discard all changes (including ones already written
1068 # out) in this transaction
1068 # out) in this transaction
1069 repo.dirstate.restorebackup(None, prefix='journal.')
1069 repo.dirstate.restorebackup(None, prefix='journal.')
1070
1070
1071 repo.invalidate(clearfilecache=True)
1071 repo.invalidate(clearfilecache=True)
1072
1072
1073 tr = transaction.transaction(rp, self.svfs, vfsmap,
1073 tr = transaction.transaction(rp, self.svfs, vfsmap,
1074 "journal",
1074 "journal",
1075 "undo",
1075 "undo",
1076 aftertrans(renames),
1076 aftertrans(renames),
1077 self.store.createmode,
1077 self.store.createmode,
1078 validator=validate,
1078 validator=validate,
1079 releasefn=releasefn)
1079 releasefn=releasefn)
1080 tr.changes['revs'] = set()
1080 tr.changes['revs'] = set()
1081
1081
1082 tr.hookargs['txnid'] = txnid
1082 tr.hookargs['txnid'] = txnid
1083 # note: writing the fncache only during finalize mean that the file is
1083 # note: writing the fncache only during finalize mean that the file is
1084 # outdated when running hooks. As fncache is used for streaming clone,
1084 # outdated when running hooks. As fncache is used for streaming clone,
1085 # this is not expected to break anything that happen during the hooks.
1085 # this is not expected to break anything that happen during the hooks.
1086 tr.addfinalize('flush-fncache', self.store.write)
1086 tr.addfinalize('flush-fncache', self.store.write)
1087 def txnclosehook(tr2):
1087 def txnclosehook(tr2):
1088 """To be run if transaction is successful, will schedule a hook run
1088 """To be run if transaction is successful, will schedule a hook run
1089 """
1089 """
1090 # Don't reference tr2 in hook() so we don't hold a reference.
1090 # Don't reference tr2 in hook() so we don't hold a reference.
1091 # This reduces memory consumption when there are multiple
1091 # This reduces memory consumption when there are multiple
1092 # transactions per lock. This can likely go away if issue5045
1092 # transactions per lock. This can likely go away if issue5045
1093 # fixes the function accumulation.
1093 # fixes the function accumulation.
1094 hookargs = tr2.hookargs
1094 hookargs = tr2.hookargs
1095
1095
1096 def hook():
1096 def hook():
1097 reporef().hook('txnclose', throw=False, txnname=desc,
1097 reporef().hook('txnclose', throw=False, txnname=desc,
1098 **pycompat.strkwargs(hookargs))
1098 **pycompat.strkwargs(hookargs))
1099 reporef()._afterlock(hook)
1099 reporef()._afterlock(hook)
1100 tr.addfinalize('txnclose-hook', txnclosehook)
1100 tr.addfinalize('txnclose-hook', txnclosehook)
1101 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1101 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1102 def txnaborthook(tr2):
1102 def txnaborthook(tr2):
1103 """To be run if transaction is aborted
1103 """To be run if transaction is aborted
1104 """
1104 """
1105 reporef().hook('txnabort', throw=False, txnname=desc,
1105 reporef().hook('txnabort', throw=False, txnname=desc,
1106 **tr2.hookargs)
1106 **tr2.hookargs)
1107 tr.addabort('txnabort-hook', txnaborthook)
1107 tr.addabort('txnabort-hook', txnaborthook)
1108 # avoid eager cache invalidation. in-memory data should be identical
1108 # avoid eager cache invalidation. in-memory data should be identical
1109 # to stored data if transaction has no error.
1109 # to stored data if transaction has no error.
1110 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1110 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1111 self._transref = weakref.ref(tr)
1111 self._transref = weakref.ref(tr)
1112 return tr
1112 return tr
1113
1113
1114 def _journalfiles(self):
1114 def _journalfiles(self):
1115 return ((self.svfs, 'journal'),
1115 return ((self.svfs, 'journal'),
1116 (self.vfs, 'journal.dirstate'),
1116 (self.vfs, 'journal.dirstate'),
1117 (self.vfs, 'journal.branch'),
1117 (self.vfs, 'journal.branch'),
1118 (self.vfs, 'journal.desc'),
1118 (self.vfs, 'journal.desc'),
1119 (self.vfs, 'journal.bookmarks'),
1119 (self.vfs, 'journal.bookmarks'),
1120 (self.svfs, 'journal.phaseroots'))
1120 (self.svfs, 'journal.phaseroots'))
1121
1121
1122 def undofiles(self):
1122 def undofiles(self):
1123 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1123 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1124
1124
1125 @unfilteredmethod
1125 @unfilteredmethod
1126 def _writejournal(self, desc):
1126 def _writejournal(self, desc):
1127 self.dirstate.savebackup(None, prefix='journal.')
1127 self.dirstate.savebackup(None, prefix='journal.')
1128 self.vfs.write("journal.branch",
1128 self.vfs.write("journal.branch",
1129 encoding.fromlocal(self.dirstate.branch()))
1129 encoding.fromlocal(self.dirstate.branch()))
1130 self.vfs.write("journal.desc",
1130 self.vfs.write("journal.desc",
1131 "%d\n%s\n" % (len(self), desc))
1131 "%d\n%s\n" % (len(self), desc))
1132 self.vfs.write("journal.bookmarks",
1132 self.vfs.write("journal.bookmarks",
1133 self.vfs.tryread("bookmarks"))
1133 self.vfs.tryread("bookmarks"))
1134 self.svfs.write("journal.phaseroots",
1134 self.svfs.write("journal.phaseroots",
1135 self.svfs.tryread("phaseroots"))
1135 self.svfs.tryread("phaseroots"))
1136
1136
1137 def recover(self):
1137 def recover(self):
1138 with self.lock():
1138 with self.lock():
1139 if self.svfs.exists("journal"):
1139 if self.svfs.exists("journal"):
1140 self.ui.status(_("rolling back interrupted transaction\n"))
1140 self.ui.status(_("rolling back interrupted transaction\n"))
1141 vfsmap = {'': self.svfs,
1141 vfsmap = {'': self.svfs,
1142 'plain': self.vfs,}
1142 'plain': self.vfs,}
1143 transaction.rollback(self.svfs, vfsmap, "journal",
1143 transaction.rollback(self.svfs, vfsmap, "journal",
1144 self.ui.warn)
1144 self.ui.warn)
1145 self.invalidate()
1145 self.invalidate()
1146 return True
1146 return True
1147 else:
1147 else:
1148 self.ui.warn(_("no interrupted transaction available\n"))
1148 self.ui.warn(_("no interrupted transaction available\n"))
1149 return False
1149 return False
1150
1150
1151 def rollback(self, dryrun=False, force=False):
1151 def rollback(self, dryrun=False, force=False):
1152 wlock = lock = dsguard = None
1152 wlock = lock = dsguard = None
1153 try:
1153 try:
1154 wlock = self.wlock()
1154 wlock = self.wlock()
1155 lock = self.lock()
1155 lock = self.lock()
1156 if self.svfs.exists("undo"):
1156 if self.svfs.exists("undo"):
1157 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1157 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1158
1158
1159 return self._rollback(dryrun, force, dsguard)
1159 return self._rollback(dryrun, force, dsguard)
1160 else:
1160 else:
1161 self.ui.warn(_("no rollback information available\n"))
1161 self.ui.warn(_("no rollback information available\n"))
1162 return 1
1162 return 1
1163 finally:
1163 finally:
1164 release(dsguard, lock, wlock)
1164 release(dsguard, lock, wlock)
1165
1165
1166 @unfilteredmethod # Until we get smarter cache management
1166 @unfilteredmethod # Until we get smarter cache management
1167 def _rollback(self, dryrun, force, dsguard):
1167 def _rollback(self, dryrun, force, dsguard):
1168 ui = self.ui
1168 ui = self.ui
1169 try:
1169 try:
1170 args = self.vfs.read('undo.desc').splitlines()
1170 args = self.vfs.read('undo.desc').splitlines()
1171 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1171 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1172 if len(args) >= 3:
1172 if len(args) >= 3:
1173 detail = args[2]
1173 detail = args[2]
1174 oldtip = oldlen - 1
1174 oldtip = oldlen - 1
1175
1175
1176 if detail and ui.verbose:
1176 if detail and ui.verbose:
1177 msg = (_('repository tip rolled back to revision %s'
1177 msg = (_('repository tip rolled back to revision %s'
1178 ' (undo %s: %s)\n')
1178 ' (undo %s: %s)\n')
1179 % (oldtip, desc, detail))
1179 % (oldtip, desc, detail))
1180 else:
1180 else:
1181 msg = (_('repository tip rolled back to revision %s'
1181 msg = (_('repository tip rolled back to revision %s'
1182 ' (undo %s)\n')
1182 ' (undo %s)\n')
1183 % (oldtip, desc))
1183 % (oldtip, desc))
1184 except IOError:
1184 except IOError:
1185 msg = _('rolling back unknown transaction\n')
1185 msg = _('rolling back unknown transaction\n')
1186 desc = None
1186 desc = None
1187
1187
1188 if not force and self['.'] != self['tip'] and desc == 'commit':
1188 if not force and self['.'] != self['tip'] and desc == 'commit':
1189 raise error.Abort(
1189 raise error.Abort(
1190 _('rollback of last commit while not checked out '
1190 _('rollback of last commit while not checked out '
1191 'may lose data'), hint=_('use -f to force'))
1191 'may lose data'), hint=_('use -f to force'))
1192
1192
1193 ui.status(msg)
1193 ui.status(msg)
1194 if dryrun:
1194 if dryrun:
1195 return 0
1195 return 0
1196
1196
1197 parents = self.dirstate.parents()
1197 parents = self.dirstate.parents()
1198 self.destroying()
1198 self.destroying()
1199 vfsmap = {'plain': self.vfs, '': self.svfs}
1199 vfsmap = {'plain': self.vfs, '': self.svfs}
1200 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1200 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1201 if self.vfs.exists('undo.bookmarks'):
1201 if self.vfs.exists('undo.bookmarks'):
1202 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1202 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1203 if self.svfs.exists('undo.phaseroots'):
1203 if self.svfs.exists('undo.phaseroots'):
1204 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1204 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1205 self.invalidate()
1205 self.invalidate()
1206
1206
1207 parentgone = (parents[0] not in self.changelog.nodemap or
1207 parentgone = (parents[0] not in self.changelog.nodemap or
1208 parents[1] not in self.changelog.nodemap)
1208 parents[1] not in self.changelog.nodemap)
1209 if parentgone:
1209 if parentgone:
1210 # prevent dirstateguard from overwriting already restored one
1210 # prevent dirstateguard from overwriting already restored one
1211 dsguard.close()
1211 dsguard.close()
1212
1212
1213 self.dirstate.restorebackup(None, prefix='undo.')
1213 self.dirstate.restorebackup(None, prefix='undo.')
1214 try:
1214 try:
1215 branch = self.vfs.read('undo.branch')
1215 branch = self.vfs.read('undo.branch')
1216 self.dirstate.setbranch(encoding.tolocal(branch))
1216 self.dirstate.setbranch(encoding.tolocal(branch))
1217 except IOError:
1217 except IOError:
1218 ui.warn(_('named branch could not be reset: '
1218 ui.warn(_('named branch could not be reset: '
1219 'current branch is still \'%s\'\n')
1219 'current branch is still \'%s\'\n')
1220 % self.dirstate.branch())
1220 % self.dirstate.branch())
1221
1221
1222 parents = tuple([p.rev() for p in self[None].parents()])
1222 parents = tuple([p.rev() for p in self[None].parents()])
1223 if len(parents) > 1:
1223 if len(parents) > 1:
1224 ui.status(_('working directory now based on '
1224 ui.status(_('working directory now based on '
1225 'revisions %d and %d\n') % parents)
1225 'revisions %d and %d\n') % parents)
1226 else:
1226 else:
1227 ui.status(_('working directory now based on '
1227 ui.status(_('working directory now based on '
1228 'revision %d\n') % parents)
1228 'revision %d\n') % parents)
1229 mergemod.mergestate.clean(self, self['.'].node())
1229 mergemod.mergestate.clean(self, self['.'].node())
1230
1230
1231 # TODO: if we know which new heads may result from this rollback, pass
1231 # TODO: if we know which new heads may result from this rollback, pass
1232 # them to destroy(), which will prevent the branchhead cache from being
1232 # them to destroy(), which will prevent the branchhead cache from being
1233 # invalidated.
1233 # invalidated.
1234 self.destroyed()
1234 self.destroyed()
1235 return 0
1235 return 0
1236
1236
1237 def _buildcacheupdater(self, newtransaction):
1237 def _buildcacheupdater(self, newtransaction):
1238 """called during transaction to build the callback updating cache
1238 """called during transaction to build the callback updating cache
1239
1239
1240 Lives on the repository to help extension who might want to augment
1240 Lives on the repository to help extension who might want to augment
1241 this logic. For this purpose, the created transaction is passed to the
1241 this logic. For this purpose, the created transaction is passed to the
1242 method.
1242 method.
1243 """
1243 """
1244 # we must avoid cyclic reference between repo and transaction.
1244 # we must avoid cyclic reference between repo and transaction.
1245 reporef = weakref.ref(self)
1245 reporef = weakref.ref(self)
1246 def updater(tr):
1246 def updater(tr):
1247 repo = reporef()
1247 repo = reporef()
1248 repo.updatecaches(tr)
1248 repo.updatecaches(tr)
1249 return updater
1249 return updater
1250
1250
1251 @unfilteredmethod
1251 @unfilteredmethod
1252 def updatecaches(self, tr=None):
1252 def updatecaches(self, tr=None):
1253 """warm appropriate caches
1253 """warm appropriate caches
1254
1254
1255 If this function is called after a transaction closed. The transaction
1255 If this function is called after a transaction closed. The transaction
1256 will be available in the 'tr' argument. This can be used to selectively
1256 will be available in the 'tr' argument. This can be used to selectively
1257 update caches relevant to the changes in that transaction.
1257 update caches relevant to the changes in that transaction.
1258 """
1258 """
1259 if tr is not None and tr.hookargs.get('source') == 'strip':
1259 if tr is not None and tr.hookargs.get('source') == 'strip':
1260 # During strip, many caches are invalid but
1260 # During strip, many caches are invalid but
1261 # later call to `destroyed` will refresh them.
1261 # later call to `destroyed` will refresh them.
1262 return
1262 return
1263
1263
1264 if tr is None or tr.changes['revs']:
1264 if tr is None or tr.changes['revs']:
1265 # updating the unfiltered branchmap should refresh all the others,
1265 # updating the unfiltered branchmap should refresh all the others,
1266 self.ui.debug('updating the branch cache\n')
1266 self.ui.debug('updating the branch cache\n')
1267 branchmap.updatecache(self.filtered('served'))
1267 branchmap.updatecache(self.filtered('served'))
1268
1268
1269 def invalidatecaches(self):
1269 def invalidatecaches(self):
1270
1270
1271 if '_tagscache' in vars(self):
1271 if '_tagscache' in vars(self):
1272 # can't use delattr on proxy
1272 # can't use delattr on proxy
1273 del self.__dict__['_tagscache']
1273 del self.__dict__['_tagscache']
1274
1274
1275 self.unfiltered()._branchcaches.clear()
1275 self.unfiltered()._branchcaches.clear()
1276 self.invalidatevolatilesets()
1276 self.invalidatevolatilesets()
1277
1277
1278 def invalidatevolatilesets(self):
1278 def invalidatevolatilesets(self):
1279 self.filteredrevcache.clear()
1279 self.filteredrevcache.clear()
1280 obsolete.clearobscaches(self)
1280 obsolete.clearobscaches(self)
1281
1281
1282 def invalidatedirstate(self):
1282 def invalidatedirstate(self):
1283 '''Invalidates the dirstate, causing the next call to dirstate
1283 '''Invalidates the dirstate, causing the next call to dirstate
1284 to check if it was modified since the last time it was read,
1284 to check if it was modified since the last time it was read,
1285 rereading it if it has.
1285 rereading it if it has.
1286
1286
1287 This is different to dirstate.invalidate() that it doesn't always
1287 This is different to dirstate.invalidate() that it doesn't always
1288 rereads the dirstate. Use dirstate.invalidate() if you want to
1288 rereads the dirstate. Use dirstate.invalidate() if you want to
1289 explicitly read the dirstate again (i.e. restoring it to a previous
1289 explicitly read the dirstate again (i.e. restoring it to a previous
1290 known good state).'''
1290 known good state).'''
1291 if hasunfilteredcache(self, 'dirstate'):
1291 if hasunfilteredcache(self, 'dirstate'):
1292 for k in self.dirstate._filecache:
1292 for k in self.dirstate._filecache:
1293 try:
1293 try:
1294 delattr(self.dirstate, k)
1294 delattr(self.dirstate, k)
1295 except AttributeError:
1295 except AttributeError:
1296 pass
1296 pass
1297 delattr(self.unfiltered(), 'dirstate')
1297 delattr(self.unfiltered(), 'dirstate')
1298
1298
1299 def invalidate(self, clearfilecache=False):
1299 def invalidate(self, clearfilecache=False):
1300 '''Invalidates both store and non-store parts other than dirstate
1300 '''Invalidates both store and non-store parts other than dirstate
1301
1301
1302 If a transaction is running, invalidation of store is omitted,
1302 If a transaction is running, invalidation of store is omitted,
1303 because discarding in-memory changes might cause inconsistency
1303 because discarding in-memory changes might cause inconsistency
1304 (e.g. incomplete fncache causes unintentional failure, but
1304 (e.g. incomplete fncache causes unintentional failure, but
1305 redundant one doesn't).
1305 redundant one doesn't).
1306 '''
1306 '''
1307 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1307 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1308 for k in list(self._filecache.keys()):
1308 for k in list(self._filecache.keys()):
1309 # dirstate is invalidated separately in invalidatedirstate()
1309 # dirstate is invalidated separately in invalidatedirstate()
1310 if k == 'dirstate':
1310 if k == 'dirstate':
1311 continue
1311 continue
1312
1312
1313 if clearfilecache:
1313 if clearfilecache:
1314 del self._filecache[k]
1314 del self._filecache[k]
1315 try:
1315 try:
1316 delattr(unfiltered, k)
1316 delattr(unfiltered, k)
1317 except AttributeError:
1317 except AttributeError:
1318 pass
1318 pass
1319 self.invalidatecaches()
1319 self.invalidatecaches()
1320 if not self.currenttransaction():
1320 if not self.currenttransaction():
1321 # TODO: Changing contents of store outside transaction
1321 # TODO: Changing contents of store outside transaction
1322 # causes inconsistency. We should make in-memory store
1322 # causes inconsistency. We should make in-memory store
1323 # changes detectable, and abort if changed.
1323 # changes detectable, and abort if changed.
1324 self.store.invalidatecaches()
1324 self.store.invalidatecaches()
1325
1325
1326 def invalidateall(self):
1326 def invalidateall(self):
1327 '''Fully invalidates both store and non-store parts, causing the
1327 '''Fully invalidates both store and non-store parts, causing the
1328 subsequent operation to reread any outside changes.'''
1328 subsequent operation to reread any outside changes.'''
1329 # extension should hook this to invalidate its caches
1329 # extension should hook this to invalidate its caches
1330 self.invalidate()
1330 self.invalidate()
1331 self.invalidatedirstate()
1331 self.invalidatedirstate()
1332
1332
1333 @unfilteredmethod
1333 @unfilteredmethod
1334 def _refreshfilecachestats(self, tr):
1334 def _refreshfilecachestats(self, tr):
1335 """Reload stats of cached files so that they are flagged as valid"""
1335 """Reload stats of cached files so that they are flagged as valid"""
1336 for k, ce in self._filecache.items():
1336 for k, ce in self._filecache.items():
1337 if k == 'dirstate' or k not in self.__dict__:
1337 if k == 'dirstate' or k not in self.__dict__:
1338 continue
1338 continue
1339 ce.refresh()
1339 ce.refresh()
1340
1340
1341 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1341 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1342 inheritchecker=None, parentenvvar=None):
1342 inheritchecker=None, parentenvvar=None):
1343 parentlock = None
1343 parentlock = None
1344 # the contents of parentenvvar are used by the underlying lock to
1344 # the contents of parentenvvar are used by the underlying lock to
1345 # determine whether it can be inherited
1345 # determine whether it can be inherited
1346 if parentenvvar is not None:
1346 if parentenvvar is not None:
1347 parentlock = encoding.environ.get(parentenvvar)
1347 parentlock = encoding.environ.get(parentenvvar)
1348 try:
1348 try:
1349 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1349 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1350 acquirefn=acquirefn, desc=desc,
1350 acquirefn=acquirefn, desc=desc,
1351 inheritchecker=inheritchecker,
1351 inheritchecker=inheritchecker,
1352 parentlock=parentlock)
1352 parentlock=parentlock)
1353 except error.LockHeld as inst:
1353 except error.LockHeld as inst:
1354 if not wait:
1354 if not wait:
1355 raise
1355 raise
1356 # show more details for new-style locks
1356 # show more details for new-style locks
1357 if ':' in inst.locker:
1357 if ':' in inst.locker:
1358 host, pid = inst.locker.split(":", 1)
1358 host, pid = inst.locker.split(":", 1)
1359 self.ui.warn(
1359 self.ui.warn(
1360 _("waiting for lock on %s held by process %r "
1360 _("waiting for lock on %s held by process %r "
1361 "on host %r\n") % (desc, pid, host))
1361 "on host %r\n") % (desc, pid, host))
1362 else:
1362 else:
1363 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1363 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1364 (desc, inst.locker))
1364 (desc, inst.locker))
1365 # default to 600 seconds timeout
1365 # default to 600 seconds timeout
1366 l = lockmod.lock(vfs, lockname,
1366 l = lockmod.lock(vfs, lockname,
1367 int(self.ui.config("ui", "timeout", "600")),
1367 int(self.ui.config("ui", "timeout", "600")),
1368 releasefn=releasefn, acquirefn=acquirefn,
1368 releasefn=releasefn, acquirefn=acquirefn,
1369 desc=desc)
1369 desc=desc)
1370 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1370 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1371 return l
1371 return l
1372
1372
1373 def _afterlock(self, callback):
1373 def _afterlock(self, callback):
1374 """add a callback to be run when the repository is fully unlocked
1374 """add a callback to be run when the repository is fully unlocked
1375
1375
1376 The callback will be executed when the outermost lock is released
1376 The callback will be executed when the outermost lock is released
1377 (with wlock being higher level than 'lock')."""
1377 (with wlock being higher level than 'lock')."""
1378 for ref in (self._wlockref, self._lockref):
1378 for ref in (self._wlockref, self._lockref):
1379 l = ref and ref()
1379 l = ref and ref()
1380 if l and l.held:
1380 if l and l.held:
1381 l.postrelease.append(callback)
1381 l.postrelease.append(callback)
1382 break
1382 break
1383 else: # no lock have been found.
1383 else: # no lock have been found.
1384 callback()
1384 callback()
1385
1385
1386 def lock(self, wait=True):
1386 def lock(self, wait=True):
1387 '''Lock the repository store (.hg/store) and return a weak reference
1387 '''Lock the repository store (.hg/store) and return a weak reference
1388 to the lock. Use this before modifying the store (e.g. committing or
1388 to the lock. Use this before modifying the store (e.g. committing or
1389 stripping). If you are opening a transaction, get a lock as well.)
1389 stripping). If you are opening a transaction, get a lock as well.)
1390
1390
1391 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1391 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1392 'wlock' first to avoid a dead-lock hazard.'''
1392 'wlock' first to avoid a dead-lock hazard.'''
1393 l = self._currentlock(self._lockref)
1393 l = self._currentlock(self._lockref)
1394 if l is not None:
1394 if l is not None:
1395 l.lock()
1395 l.lock()
1396 return l
1396 return l
1397
1397
1398 l = self._lock(self.svfs, "lock", wait, None,
1398 l = self._lock(self.svfs, "lock", wait, None,
1399 self.invalidate, _('repository %s') % self.origroot)
1399 self.invalidate, _('repository %s') % self.origroot)
1400 self._lockref = weakref.ref(l)
1400 self._lockref = weakref.ref(l)
1401 return l
1401 return l
1402
1402
1403 def _wlockchecktransaction(self):
1403 def _wlockchecktransaction(self):
1404 if self.currenttransaction() is not None:
1404 if self.currenttransaction() is not None:
1405 raise error.LockInheritanceContractViolation(
1405 raise error.LockInheritanceContractViolation(
1406 'wlock cannot be inherited in the middle of a transaction')
1406 'wlock cannot be inherited in the middle of a transaction')
1407
1407
1408 def wlock(self, wait=True):
1408 def wlock(self, wait=True):
1409 '''Lock the non-store parts of the repository (everything under
1409 '''Lock the non-store parts of the repository (everything under
1410 .hg except .hg/store) and return a weak reference to the lock.
1410 .hg except .hg/store) and return a weak reference to the lock.
1411
1411
1412 Use this before modifying files in .hg.
1412 Use this before modifying files in .hg.
1413
1413
1414 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1414 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1415 'wlock' first to avoid a dead-lock hazard.'''
1415 'wlock' first to avoid a dead-lock hazard.'''
1416 l = self._wlockref and self._wlockref()
1416 l = self._wlockref and self._wlockref()
1417 if l is not None and l.held:
1417 if l is not None and l.held:
1418 l.lock()
1418 l.lock()
1419 return l
1419 return l
1420
1420
1421 # We do not need to check for non-waiting lock acquisition. Such
1421 # We do not need to check for non-waiting lock acquisition. Such
1422 # acquisition would not cause dead-lock as they would just fail.
1422 # acquisition would not cause dead-lock as they would just fail.
1423 if wait and (self.ui.configbool('devel', 'all-warnings')
1423 if wait and (self.ui.configbool('devel', 'all-warnings')
1424 or self.ui.configbool('devel', 'check-locks')):
1424 or self.ui.configbool('devel', 'check-locks')):
1425 if self._currentlock(self._lockref) is not None:
1425 if self._currentlock(self._lockref) is not None:
1426 self.ui.develwarn('"wlock" acquired after "lock"')
1426 self.ui.develwarn('"wlock" acquired after "lock"')
1427
1427
1428 def unlock():
1428 def unlock():
1429 if self.dirstate.pendingparentchange():
1429 if self.dirstate.pendingparentchange():
1430 self.dirstate.invalidate()
1430 self.dirstate.invalidate()
1431 else:
1431 else:
1432 self.dirstate.write(None)
1432 self.dirstate.write(None)
1433
1433
1434 self._filecache['dirstate'].refresh()
1434 self._filecache['dirstate'].refresh()
1435
1435
1436 l = self._lock(self.vfs, "wlock", wait, unlock,
1436 l = self._lock(self.vfs, "wlock", wait, unlock,
1437 self.invalidatedirstate, _('working directory of %s') %
1437 self.invalidatedirstate, _('working directory of %s') %
1438 self.origroot,
1438 self.origroot,
1439 inheritchecker=self._wlockchecktransaction,
1439 inheritchecker=self._wlockchecktransaction,
1440 parentenvvar='HG_WLOCK_LOCKER')
1440 parentenvvar='HG_WLOCK_LOCKER')
1441 self._wlockref = weakref.ref(l)
1441 self._wlockref = weakref.ref(l)
1442 return l
1442 return l
1443
1443
1444 def _currentlock(self, lockref):
1444 def _currentlock(self, lockref):
1445 """Returns the lock if it's held, or None if it's not."""
1445 """Returns the lock if it's held, or None if it's not."""
1446 if lockref is None:
1446 if lockref is None:
1447 return None
1447 return None
1448 l = lockref()
1448 l = lockref()
1449 if l is None or not l.held:
1449 if l is None or not l.held:
1450 return None
1450 return None
1451 return l
1451 return l
1452
1452
1453 def currentwlock(self):
1453 def currentwlock(self):
1454 """Returns the wlock if it's held, or None if it's not."""
1454 """Returns the wlock if it's held, or None if it's not."""
1455 return self._currentlock(self._wlockref)
1455 return self._currentlock(self._wlockref)
1456
1456
1457 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1457 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1458 """
1458 """
1459 commit an individual file as part of a larger transaction
1459 commit an individual file as part of a larger transaction
1460 """
1460 """
1461
1461
1462 fname = fctx.path()
1462 fname = fctx.path()
1463 fparent1 = manifest1.get(fname, nullid)
1463 fparent1 = manifest1.get(fname, nullid)
1464 fparent2 = manifest2.get(fname, nullid)
1464 fparent2 = manifest2.get(fname, nullid)
1465 if isinstance(fctx, context.filectx):
1465 if isinstance(fctx, context.filectx):
1466 node = fctx.filenode()
1466 node = fctx.filenode()
1467 if node in [fparent1, fparent2]:
1467 if node in [fparent1, fparent2]:
1468 self.ui.debug('reusing %s filelog entry\n' % fname)
1468 self.ui.debug('reusing %s filelog entry\n' % fname)
1469 if manifest1.flags(fname) != fctx.flags():
1469 if manifest1.flags(fname) != fctx.flags():
1470 changelist.append(fname)
1470 changelist.append(fname)
1471 return node
1471 return node
1472
1472
1473 flog = self.file(fname)
1473 flog = self.file(fname)
1474 meta = {}
1474 meta = {}
1475 copy = fctx.renamed()
1475 copy = fctx.renamed()
1476 if copy and copy[0] != fname:
1476 if copy and copy[0] != fname:
1477 # Mark the new revision of this file as a copy of another
1477 # Mark the new revision of this file as a copy of another
1478 # file. This copy data will effectively act as a parent
1478 # file. This copy data will effectively act as a parent
1479 # of this new revision. If this is a merge, the first
1479 # of this new revision. If this is a merge, the first
1480 # parent will be the nullid (meaning "look up the copy data")
1480 # parent will be the nullid (meaning "look up the copy data")
1481 # and the second one will be the other parent. For example:
1481 # and the second one will be the other parent. For example:
1482 #
1482 #
1483 # 0 --- 1 --- 3 rev1 changes file foo
1483 # 0 --- 1 --- 3 rev1 changes file foo
1484 # \ / rev2 renames foo to bar and changes it
1484 # \ / rev2 renames foo to bar and changes it
1485 # \- 2 -/ rev3 should have bar with all changes and
1485 # \- 2 -/ rev3 should have bar with all changes and
1486 # should record that bar descends from
1486 # should record that bar descends from
1487 # bar in rev2 and foo in rev1
1487 # bar in rev2 and foo in rev1
1488 #
1488 #
1489 # this allows this merge to succeed:
1489 # this allows this merge to succeed:
1490 #
1490 #
1491 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1491 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1492 # \ / merging rev3 and rev4 should use bar@rev2
1492 # \ / merging rev3 and rev4 should use bar@rev2
1493 # \- 2 --- 4 as the merge base
1493 # \- 2 --- 4 as the merge base
1494 #
1494 #
1495
1495
1496 cfname = copy[0]
1496 cfname = copy[0]
1497 crev = manifest1.get(cfname)
1497 crev = manifest1.get(cfname)
1498 newfparent = fparent2
1498 newfparent = fparent2
1499
1499
1500 if manifest2: # branch merge
1500 if manifest2: # branch merge
1501 if fparent2 == nullid or crev is None: # copied on remote side
1501 if fparent2 == nullid or crev is None: # copied on remote side
1502 if cfname in manifest2:
1502 if cfname in manifest2:
1503 crev = manifest2[cfname]
1503 crev = manifest2[cfname]
1504 newfparent = fparent1
1504 newfparent = fparent1
1505
1505
1506 # Here, we used to search backwards through history to try to find
1506 # Here, we used to search backwards through history to try to find
1507 # where the file copy came from if the source of a copy was not in
1507 # where the file copy came from if the source of a copy was not in
1508 # the parent directory. However, this doesn't actually make sense to
1508 # the parent directory. However, this doesn't actually make sense to
1509 # do (what does a copy from something not in your working copy even
1509 # do (what does a copy from something not in your working copy even
1510 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1510 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1511 # the user that copy information was dropped, so if they didn't
1511 # the user that copy information was dropped, so if they didn't
1512 # expect this outcome it can be fixed, but this is the correct
1512 # expect this outcome it can be fixed, but this is the correct
1513 # behavior in this circumstance.
1513 # behavior in this circumstance.
1514
1514
1515 if crev:
1515 if crev:
1516 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1516 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1517 meta["copy"] = cfname
1517 meta["copy"] = cfname
1518 meta["copyrev"] = hex(crev)
1518 meta["copyrev"] = hex(crev)
1519 fparent1, fparent2 = nullid, newfparent
1519 fparent1, fparent2 = nullid, newfparent
1520 else:
1520 else:
1521 self.ui.warn(_("warning: can't find ancestor for '%s' "
1521 self.ui.warn(_("warning: can't find ancestor for '%s' "
1522 "copied from '%s'!\n") % (fname, cfname))
1522 "copied from '%s'!\n") % (fname, cfname))
1523
1523
1524 elif fparent1 == nullid:
1524 elif fparent1 == nullid:
1525 fparent1, fparent2 = fparent2, nullid
1525 fparent1, fparent2 = fparent2, nullid
1526 elif fparent2 != nullid:
1526 elif fparent2 != nullid:
1527 # is one parent an ancestor of the other?
1527 # is one parent an ancestor of the other?
1528 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1528 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1529 if fparent1 in fparentancestors:
1529 if fparent1 in fparentancestors:
1530 fparent1, fparent2 = fparent2, nullid
1530 fparent1, fparent2 = fparent2, nullid
1531 elif fparent2 in fparentancestors:
1531 elif fparent2 in fparentancestors:
1532 fparent2 = nullid
1532 fparent2 = nullid
1533
1533
1534 # is the file changed?
1534 # is the file changed?
1535 text = fctx.data()
1535 text = fctx.data()
1536 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1536 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1537 changelist.append(fname)
1537 changelist.append(fname)
1538 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1538 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1539 # are just the flags changed during merge?
1539 # are just the flags changed during merge?
1540 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1540 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1541 changelist.append(fname)
1541 changelist.append(fname)
1542
1542
1543 return fparent1
1543 return fparent1
1544
1544
1545 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1545 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1546 """check for commit arguments that aren't committable"""
1546 """check for commit arguments that aren't committable"""
1547 if match.isexact() or match.prefix():
1547 if match.isexact() or match.prefix():
1548 matched = set(status.modified + status.added + status.removed)
1548 matched = set(status.modified + status.added + status.removed)
1549
1549
1550 for f in match.files():
1550 for f in match.files():
1551 f = self.dirstate.normalize(f)
1551 f = self.dirstate.normalize(f)
1552 if f == '.' or f in matched or f in wctx.substate:
1552 if f == '.' or f in matched or f in wctx.substate:
1553 continue
1553 continue
1554 if f in status.deleted:
1554 if f in status.deleted:
1555 fail(f, _('file not found!'))
1555 fail(f, _('file not found!'))
1556 if f in vdirs: # visited directory
1556 if f in vdirs: # visited directory
1557 d = f + '/'
1557 d = f + '/'
1558 for mf in matched:
1558 for mf in matched:
1559 if mf.startswith(d):
1559 if mf.startswith(d):
1560 break
1560 break
1561 else:
1561 else:
1562 fail(f, _("no match under directory!"))
1562 fail(f, _("no match under directory!"))
1563 elif f not in self.dirstate:
1563 elif f not in self.dirstate:
1564 fail(f, _("file not tracked!"))
1564 fail(f, _("file not tracked!"))
1565
1565
1566 @unfilteredmethod
1566 @unfilteredmethod
1567 def commit(self, text="", user=None, date=None, match=None, force=False,
1567 def commit(self, text="", user=None, date=None, match=None, force=False,
1568 editor=False, extra=None):
1568 editor=False, extra=None):
1569 """Add a new revision to current repository.
1569 """Add a new revision to current repository.
1570
1570
1571 Revision information is gathered from the working directory,
1571 Revision information is gathered from the working directory,
1572 match can be used to filter the committed files. If editor is
1572 match can be used to filter the committed files. If editor is
1573 supplied, it is called to get a commit message.
1573 supplied, it is called to get a commit message.
1574 """
1574 """
1575 if extra is None:
1575 if extra is None:
1576 extra = {}
1576 extra = {}
1577
1577
1578 def fail(f, msg):
1578 def fail(f, msg):
1579 raise error.Abort('%s: %s' % (f, msg))
1579 raise error.Abort('%s: %s' % (f, msg))
1580
1580
1581 if not match:
1581 if not match:
1582 match = matchmod.always(self.root, '')
1582 match = matchmod.always(self.root, '')
1583
1583
1584 if not force:
1584 if not force:
1585 vdirs = []
1585 vdirs = []
1586 match.explicitdir = vdirs.append
1586 match.explicitdir = vdirs.append
1587 match.bad = fail
1587 match.bad = fail
1588
1588
1589 wlock = lock = tr = None
1589 wlock = lock = tr = None
1590 try:
1590 try:
1591 wlock = self.wlock()
1591 wlock = self.wlock()
1592 lock = self.lock() # for recent changelog (see issue4368)
1592 lock = self.lock() # for recent changelog (see issue4368)
1593
1593
1594 wctx = self[None]
1594 wctx = self[None]
1595 merge = len(wctx.parents()) > 1
1595 merge = len(wctx.parents()) > 1
1596
1596
1597 if not force and merge and not match.always():
1597 if not force and merge and not match.always():
1598 raise error.Abort(_('cannot partially commit a merge '
1598 raise error.Abort(_('cannot partially commit a merge '
1599 '(do not specify files or patterns)'))
1599 '(do not specify files or patterns)'))
1600
1600
1601 status = self.status(match=match, clean=force)
1601 status = self.status(match=match, clean=force)
1602 if force:
1602 if force:
1603 status.modified.extend(status.clean) # mq may commit clean files
1603 status.modified.extend(status.clean) # mq may commit clean files
1604
1604
1605 # check subrepos
1605 # check subrepos
1606 subs = []
1606 subs = []
1607 commitsubs = set()
1607 commitsubs = set()
1608 newstate = wctx.substate.copy()
1608 newstate = wctx.substate.copy()
1609 # only manage subrepos and .hgsubstate if .hgsub is present
1609 # only manage subrepos and .hgsubstate if .hgsub is present
1610 if '.hgsub' in wctx:
1610 if '.hgsub' in wctx:
1611 # we'll decide whether to track this ourselves, thanks
1611 # we'll decide whether to track this ourselves, thanks
1612 for c in status.modified, status.added, status.removed:
1612 for c in status.modified, status.added, status.removed:
1613 if '.hgsubstate' in c:
1613 if '.hgsubstate' in c:
1614 c.remove('.hgsubstate')
1614 c.remove('.hgsubstate')
1615
1615
1616 # compare current state to last committed state
1616 # compare current state to last committed state
1617 # build new substate based on last committed state
1617 # build new substate based on last committed state
1618 oldstate = wctx.p1().substate
1618 oldstate = wctx.p1().substate
1619 for s in sorted(newstate.keys()):
1619 for s in sorted(newstate.keys()):
1620 if not match(s):
1620 if not match(s):
1621 # ignore working copy, use old state if present
1621 # ignore working copy, use old state if present
1622 if s in oldstate:
1622 if s in oldstate:
1623 newstate[s] = oldstate[s]
1623 newstate[s] = oldstate[s]
1624 continue
1624 continue
1625 if not force:
1625 if not force:
1626 raise error.Abort(
1626 raise error.Abort(
1627 _("commit with new subrepo %s excluded") % s)
1627 _("commit with new subrepo %s excluded") % s)
1628 dirtyreason = wctx.sub(s).dirtyreason(True)
1628 dirtyreason = wctx.sub(s).dirtyreason(True)
1629 if dirtyreason:
1629 if dirtyreason:
1630 if not self.ui.configbool('ui', 'commitsubrepos'):
1630 if not self.ui.configbool('ui', 'commitsubrepos'):
1631 raise error.Abort(dirtyreason,
1631 raise error.Abort(dirtyreason,
1632 hint=_("use --subrepos for recursive commit"))
1632 hint=_("use --subrepos for recursive commit"))
1633 subs.append(s)
1633 subs.append(s)
1634 commitsubs.add(s)
1634 commitsubs.add(s)
1635 else:
1635 else:
1636 bs = wctx.sub(s).basestate()
1636 bs = wctx.sub(s).basestate()
1637 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1637 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1638 if oldstate.get(s, (None, None, None))[1] != bs:
1638 if oldstate.get(s, (None, None, None))[1] != bs:
1639 subs.append(s)
1639 subs.append(s)
1640
1640
1641 # check for removed subrepos
1641 # check for removed subrepos
1642 for p in wctx.parents():
1642 for p in wctx.parents():
1643 r = [s for s in p.substate if s not in newstate]
1643 r = [s for s in p.substate if s not in newstate]
1644 subs += [s for s in r if match(s)]
1644 subs += [s for s in r if match(s)]
1645 if subs:
1645 if subs:
1646 if (not match('.hgsub') and
1646 if (not match('.hgsub') and
1647 '.hgsub' in (wctx.modified() + wctx.added())):
1647 '.hgsub' in (wctx.modified() + wctx.added())):
1648 raise error.Abort(
1648 raise error.Abort(
1649 _("can't commit subrepos without .hgsub"))
1649 _("can't commit subrepos without .hgsub"))
1650 status.modified.insert(0, '.hgsubstate')
1650 status.modified.insert(0, '.hgsubstate')
1651
1651
1652 elif '.hgsub' in status.removed:
1652 elif '.hgsub' in status.removed:
1653 # clean up .hgsubstate when .hgsub is removed
1653 # clean up .hgsubstate when .hgsub is removed
1654 if ('.hgsubstate' in wctx and
1654 if ('.hgsubstate' in wctx and
1655 '.hgsubstate' not in (status.modified + status.added +
1655 '.hgsubstate' not in (status.modified + status.added +
1656 status.removed)):
1656 status.removed)):
1657 status.removed.insert(0, '.hgsubstate')
1657 status.removed.insert(0, '.hgsubstate')
1658
1658
1659 # make sure all explicit patterns are matched
1659 # make sure all explicit patterns are matched
1660 if not force:
1660 if not force:
1661 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1661 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1662
1662
1663 cctx = context.workingcommitctx(self, status,
1663 cctx = context.workingcommitctx(self, status,
1664 text, user, date, extra)
1664 text, user, date, extra)
1665
1665
1666 # internal config: ui.allowemptycommit
1666 # internal config: ui.allowemptycommit
1667 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1667 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1668 or extra.get('close') or merge or cctx.files()
1668 or extra.get('close') or merge or cctx.files()
1669 or self.ui.configbool('ui', 'allowemptycommit'))
1669 or self.ui.configbool('ui', 'allowemptycommit'))
1670 if not allowemptycommit:
1670 if not allowemptycommit:
1671 return None
1671 return None
1672
1672
1673 if merge and cctx.deleted():
1673 if merge and cctx.deleted():
1674 raise error.Abort(_("cannot commit merge with missing files"))
1674 raise error.Abort(_("cannot commit merge with missing files"))
1675
1675
1676 ms = mergemod.mergestate.read(self)
1676 ms = mergemod.mergestate.read(self)
1677 mergeutil.checkunresolved(ms)
1677 mergeutil.checkunresolved(ms)
1678
1678
1679 if editor:
1679 if editor:
1680 cctx._text = editor(self, cctx, subs)
1680 cctx._text = editor(self, cctx, subs)
1681 edited = (text != cctx._text)
1681 edited = (text != cctx._text)
1682
1682
1683 # Save commit message in case this transaction gets rolled back
1683 # Save commit message in case this transaction gets rolled back
1684 # (e.g. by a pretxncommit hook). Leave the content alone on
1684 # (e.g. by a pretxncommit hook). Leave the content alone on
1685 # the assumption that the user will use the same editor again.
1685 # the assumption that the user will use the same editor again.
1686 msgfn = self.savecommitmessage(cctx._text)
1686 msgfn = self.savecommitmessage(cctx._text)
1687
1687
1688 # commit subs and write new state
1688 # commit subs and write new state
1689 if subs:
1689 if subs:
1690 for s in sorted(commitsubs):
1690 for s in sorted(commitsubs):
1691 sub = wctx.sub(s)
1691 sub = wctx.sub(s)
1692 self.ui.status(_('committing subrepository %s\n') %
1692 self.ui.status(_('committing subrepository %s\n') %
1693 subrepo.subrelpath(sub))
1693 subrepo.subrelpath(sub))
1694 sr = sub.commit(cctx._text, user, date)
1694 sr = sub.commit(cctx._text, user, date)
1695 newstate[s] = (newstate[s][0], sr)
1695 newstate[s] = (newstate[s][0], sr)
1696 subrepo.writestate(self, newstate)
1696 subrepo.writestate(self, newstate)
1697
1697
1698 p1, p2 = self.dirstate.parents()
1698 p1, p2 = self.dirstate.parents()
1699 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1699 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1700 try:
1700 try:
1701 self.hook("precommit", throw=True, parent1=hookp1,
1701 self.hook("precommit", throw=True, parent1=hookp1,
1702 parent2=hookp2)
1702 parent2=hookp2)
1703 tr = self.transaction('commit')
1703 tr = self.transaction('commit')
1704 ret = self.commitctx(cctx, True)
1704 ret = self.commitctx(cctx, True)
1705 except: # re-raises
1705 except: # re-raises
1706 if edited:
1706 if edited:
1707 self.ui.write(
1707 self.ui.write(
1708 _('note: commit message saved in %s\n') % msgfn)
1708 _('note: commit message saved in %s\n') % msgfn)
1709 raise
1709 raise
1710 # update bookmarks, dirstate and mergestate
1710 # update bookmarks, dirstate and mergestate
1711 bookmarks.update(self, [p1, p2], ret)
1711 bookmarks.update(self, [p1, p2], ret)
1712 cctx.markcommitted(ret)
1712 cctx.markcommitted(ret)
1713 ms.reset()
1713 ms.reset()
1714 tr.close()
1714 tr.close()
1715
1715
1716 finally:
1716 finally:
1717 lockmod.release(tr, lock, wlock)
1717 lockmod.release(tr, lock, wlock)
1718
1718
1719 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1719 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1720 # hack for command that use a temporary commit (eg: histedit)
1720 # hack for command that use a temporary commit (eg: histedit)
1721 # temporary commit got stripped before hook release
1721 # temporary commit got stripped before hook release
1722 if self.changelog.hasnode(ret):
1722 if self.changelog.hasnode(ret):
1723 self.hook("commit", node=node, parent1=parent1,
1723 self.hook("commit", node=node, parent1=parent1,
1724 parent2=parent2)
1724 parent2=parent2)
1725 self._afterlock(commithook)
1725 self._afterlock(commithook)
1726 return ret
1726 return ret
1727
1727
1728 @unfilteredmethod
1728 @unfilteredmethod
1729 def commitctx(self, ctx, error=False):
1729 def commitctx(self, ctx, error=False):
1730 """Add a new revision to current repository.
1730 """Add a new revision to current repository.
1731 Revision information is passed via the context argument.
1731 Revision information is passed via the context argument.
1732 """
1732 """
1733
1733
1734 tr = None
1734 tr = None
1735 p1, p2 = ctx.p1(), ctx.p2()
1735 p1, p2 = ctx.p1(), ctx.p2()
1736 user = ctx.user()
1736 user = ctx.user()
1737
1737
1738 lock = self.lock()
1738 lock = self.lock()
1739 try:
1739 try:
1740 tr = self.transaction("commit")
1740 tr = self.transaction("commit")
1741 trp = weakref.proxy(tr)
1741 trp = weakref.proxy(tr)
1742
1742
1743 if ctx.manifestnode():
1743 if ctx.manifestnode():
1744 # reuse an existing manifest revision
1744 # reuse an existing manifest revision
1745 mn = ctx.manifestnode()
1745 mn = ctx.manifestnode()
1746 files = ctx.files()
1746 files = ctx.files()
1747 elif ctx.files():
1747 elif ctx.files():
1748 m1ctx = p1.manifestctx()
1748 m1ctx = p1.manifestctx()
1749 m2ctx = p2.manifestctx()
1749 m2ctx = p2.manifestctx()
1750 mctx = m1ctx.copy()
1750 mctx = m1ctx.copy()
1751
1751
1752 m = mctx.read()
1752 m = mctx.read()
1753 m1 = m1ctx.read()
1753 m1 = m1ctx.read()
1754 m2 = m2ctx.read()
1754 m2 = m2ctx.read()
1755
1755
1756 # check in files
1756 # check in files
1757 added = []
1757 added = []
1758 changed = []
1758 changed = []
1759 removed = list(ctx.removed())
1759 removed = list(ctx.removed())
1760 linkrev = len(self)
1760 linkrev = len(self)
1761 self.ui.note(_("committing files:\n"))
1761 self.ui.note(_("committing files:\n"))
1762 for f in sorted(ctx.modified() + ctx.added()):
1762 for f in sorted(ctx.modified() + ctx.added()):
1763 self.ui.note(f + "\n")
1763 self.ui.note(f + "\n")
1764 try:
1764 try:
1765 fctx = ctx[f]
1765 fctx = ctx[f]
1766 if fctx is None:
1766 if fctx is None:
1767 removed.append(f)
1767 removed.append(f)
1768 else:
1768 else:
1769 added.append(f)
1769 added.append(f)
1770 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1770 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1771 trp, changed)
1771 trp, changed)
1772 m.setflag(f, fctx.flags())
1772 m.setflag(f, fctx.flags())
1773 except OSError as inst:
1773 except OSError as inst:
1774 self.ui.warn(_("trouble committing %s!\n") % f)
1774 self.ui.warn(_("trouble committing %s!\n") % f)
1775 raise
1775 raise
1776 except IOError as inst:
1776 except IOError as inst:
1777 errcode = getattr(inst, 'errno', errno.ENOENT)
1777 errcode = getattr(inst, 'errno', errno.ENOENT)
1778 if error or errcode and errcode != errno.ENOENT:
1778 if error or errcode and errcode != errno.ENOENT:
1779 self.ui.warn(_("trouble committing %s!\n") % f)
1779 self.ui.warn(_("trouble committing %s!\n") % f)
1780 raise
1780 raise
1781
1781
1782 # update manifest
1782 # update manifest
1783 self.ui.note(_("committing manifest\n"))
1783 self.ui.note(_("committing manifest\n"))
1784 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1784 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1785 drop = [f for f in removed if f in m]
1785 drop = [f for f in removed if f in m]
1786 for f in drop:
1786 for f in drop:
1787 del m[f]
1787 del m[f]
1788 mn = mctx.write(trp, linkrev,
1788 mn = mctx.write(trp, linkrev,
1789 p1.manifestnode(), p2.manifestnode(),
1789 p1.manifestnode(), p2.manifestnode(),
1790 added, drop)
1790 added, drop)
1791 files = changed + removed
1791 files = changed + removed
1792 else:
1792 else:
1793 mn = p1.manifestnode()
1793 mn = p1.manifestnode()
1794 files = []
1794 files = []
1795
1795
1796 # update changelog
1796 # update changelog
1797 self.ui.note(_("committing changelog\n"))
1797 self.ui.note(_("committing changelog\n"))
1798 self.changelog.delayupdate(tr)
1798 self.changelog.delayupdate(tr)
1799 n = self.changelog.add(mn, files, ctx.description(),
1799 n = self.changelog.add(mn, files, ctx.description(),
1800 trp, p1.node(), p2.node(),
1800 trp, p1.node(), p2.node(),
1801 user, ctx.date(), ctx.extra().copy())
1801 user, ctx.date(), ctx.extra().copy())
1802 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1802 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1803 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1803 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1804 parent2=xp2)
1804 parent2=xp2)
1805 # set the new commit is proper phase
1805 # set the new commit is proper phase
1806 targetphase = subrepo.newcommitphase(self.ui, ctx)
1806 targetphase = subrepo.newcommitphase(self.ui, ctx)
1807 if targetphase:
1807 if targetphase:
1808 # retract boundary do not alter parent changeset.
1808 # retract boundary do not alter parent changeset.
1809 # if a parent have higher the resulting phase will
1809 # if a parent have higher the resulting phase will
1810 # be compliant anyway
1810 # be compliant anyway
1811 #
1811 #
1812 # if minimal phase was 0 we don't need to retract anything
1812 # if minimal phase was 0 we don't need to retract anything
1813 phases.retractboundary(self, tr, targetphase, [n])
1813 phases.retractboundary(self, tr, targetphase, [n])
1814 tr.close()
1814 tr.close()
1815 return n
1815 return n
1816 finally:
1816 finally:
1817 if tr:
1817 if tr:
1818 tr.release()
1818 tr.release()
1819 lock.release()
1819 lock.release()
1820
1820
1821 @unfilteredmethod
1821 @unfilteredmethod
1822 def destroying(self):
1822 def destroying(self):
1823 '''Inform the repository that nodes are about to be destroyed.
1823 '''Inform the repository that nodes are about to be destroyed.
1824 Intended for use by strip and rollback, so there's a common
1824 Intended for use by strip and rollback, so there's a common
1825 place for anything that has to be done before destroying history.
1825 place for anything that has to be done before destroying history.
1826
1826
1827 This is mostly useful for saving state that is in memory and waiting
1827 This is mostly useful for saving state that is in memory and waiting
1828 to be flushed when the current lock is released. Because a call to
1828 to be flushed when the current lock is released. Because a call to
1829 destroyed is imminent, the repo will be invalidated causing those
1829 destroyed is imminent, the repo will be invalidated causing those
1830 changes to stay in memory (waiting for the next unlock), or vanish
1830 changes to stay in memory (waiting for the next unlock), or vanish
1831 completely.
1831 completely.
1832 '''
1832 '''
1833 # When using the same lock to commit and strip, the phasecache is left
1833 # When using the same lock to commit and strip, the phasecache is left
1834 # dirty after committing. Then when we strip, the repo is invalidated,
1834 # dirty after committing. Then when we strip, the repo is invalidated,
1835 # causing those changes to disappear.
1835 # causing those changes to disappear.
1836 if '_phasecache' in vars(self):
1836 if '_phasecache' in vars(self):
1837 self._phasecache.write()
1837 self._phasecache.write()
1838
1838
1839 @unfilteredmethod
1839 @unfilteredmethod
1840 def destroyed(self):
1840 def destroyed(self):
1841 '''Inform the repository that nodes have been destroyed.
1841 '''Inform the repository that nodes have been destroyed.
1842 Intended for use by strip and rollback, so there's a common
1842 Intended for use by strip and rollback, so there's a common
1843 place for anything that has to be done after destroying history.
1843 place for anything that has to be done after destroying history.
1844 '''
1844 '''
1845 # When one tries to:
1845 # When one tries to:
1846 # 1) destroy nodes thus calling this method (e.g. strip)
1846 # 1) destroy nodes thus calling this method (e.g. strip)
1847 # 2) use phasecache somewhere (e.g. commit)
1847 # 2) use phasecache somewhere (e.g. commit)
1848 #
1848 #
1849 # then 2) will fail because the phasecache contains nodes that were
1849 # then 2) will fail because the phasecache contains nodes that were
1850 # removed. We can either remove phasecache from the filecache,
1850 # removed. We can either remove phasecache from the filecache,
1851 # causing it to reload next time it is accessed, or simply filter
1851 # causing it to reload next time it is accessed, or simply filter
1852 # the removed nodes now and write the updated cache.
1852 # the removed nodes now and write the updated cache.
1853 self._phasecache.filterunknown(self)
1853 self._phasecache.filterunknown(self)
1854 self._phasecache.write()
1854 self._phasecache.write()
1855
1855
1856 # refresh all repository caches
1856 # refresh all repository caches
1857 self.updatecaches()
1857 self.updatecaches()
1858
1858
1859 # Ensure the persistent tag cache is updated. Doing it now
1859 # Ensure the persistent tag cache is updated. Doing it now
1860 # means that the tag cache only has to worry about destroyed
1860 # means that the tag cache only has to worry about destroyed
1861 # heads immediately after a strip/rollback. That in turn
1861 # heads immediately after a strip/rollback. That in turn
1862 # guarantees that "cachetip == currenttip" (comparing both rev
1862 # guarantees that "cachetip == currenttip" (comparing both rev
1863 # and node) always means no nodes have been added or destroyed.
1863 # and node) always means no nodes have been added or destroyed.
1864
1864
1865 # XXX this is suboptimal when qrefresh'ing: we strip the current
1865 # XXX this is suboptimal when qrefresh'ing: we strip the current
1866 # head, refresh the tag cache, then immediately add a new head.
1866 # head, refresh the tag cache, then immediately add a new head.
1867 # But I think doing it this way is necessary for the "instant
1867 # But I think doing it this way is necessary for the "instant
1868 # tag cache retrieval" case to work.
1868 # tag cache retrieval" case to work.
1869 self.invalidate()
1869 self.invalidate()
1870
1870
1871 def walk(self, match, node=None):
1871 def walk(self, match, node=None):
1872 '''
1872 '''
1873 walk recursively through the directory tree or a given
1873 walk recursively through the directory tree or a given
1874 changeset, finding all files matched by the match
1874 changeset, finding all files matched by the match
1875 function
1875 function
1876 '''
1876 '''
1877 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1877 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1878 return self[node].walk(match)
1878 return self[node].walk(match)
1879
1879
1880 def status(self, node1='.', node2=None, match=None,
1880 def status(self, node1='.', node2=None, match=None,
1881 ignored=False, clean=False, unknown=False,
1881 ignored=False, clean=False, unknown=False,
1882 listsubrepos=False):
1882 listsubrepos=False):
1883 '''a convenience method that calls node1.status(node2)'''
1883 '''a convenience method that calls node1.status(node2)'''
1884 return self[node1].status(node2, match, ignored, clean, unknown,
1884 return self[node1].status(node2, match, ignored, clean, unknown,
1885 listsubrepos)
1885 listsubrepos)
1886
1886
1887 def heads(self, start=None):
1887 def heads(self, start=None):
1888 if start is None:
1888 if start is None:
1889 cl = self.changelog
1889 cl = self.changelog
1890 headrevs = reversed(cl.headrevs())
1890 headrevs = reversed(cl.headrevs())
1891 return [cl.node(rev) for rev in headrevs]
1891 return [cl.node(rev) for rev in headrevs]
1892
1892
1893 heads = self.changelog.heads(start)
1893 heads = self.changelog.heads(start)
1894 # sort the output in rev descending order
1894 # sort the output in rev descending order
1895 return sorted(heads, key=self.changelog.rev, reverse=True)
1895 return sorted(heads, key=self.changelog.rev, reverse=True)
1896
1896
1897 def branchheads(self, branch=None, start=None, closed=False):
1897 def branchheads(self, branch=None, start=None, closed=False):
1898 '''return a (possibly filtered) list of heads for the given branch
1898 '''return a (possibly filtered) list of heads for the given branch
1899
1899
1900 Heads are returned in topological order, from newest to oldest.
1900 Heads are returned in topological order, from newest to oldest.
1901 If branch is None, use the dirstate branch.
1901 If branch is None, use the dirstate branch.
1902 If start is not None, return only heads reachable from start.
1902 If start is not None, return only heads reachable from start.
1903 If closed is True, return heads that are marked as closed as well.
1903 If closed is True, return heads that are marked as closed as well.
1904 '''
1904 '''
1905 if branch is None:
1905 if branch is None:
1906 branch = self[None].branch()
1906 branch = self[None].branch()
1907 branches = self.branchmap()
1907 branches = self.branchmap()
1908 if branch not in branches:
1908 if branch not in branches:
1909 return []
1909 return []
1910 # the cache returns heads ordered lowest to highest
1910 # the cache returns heads ordered lowest to highest
1911 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1911 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1912 if start is not None:
1912 if start is not None:
1913 # filter out the heads that cannot be reached from startrev
1913 # filter out the heads that cannot be reached from startrev
1914 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1914 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1915 bheads = [h for h in bheads if h in fbheads]
1915 bheads = [h for h in bheads if h in fbheads]
1916 return bheads
1916 return bheads
1917
1917
1918 def branches(self, nodes):
1918 def branches(self, nodes):
1919 if not nodes:
1919 if not nodes:
1920 nodes = [self.changelog.tip()]
1920 nodes = [self.changelog.tip()]
1921 b = []
1921 b = []
1922 for n in nodes:
1922 for n in nodes:
1923 t = n
1923 t = n
1924 while True:
1924 while True:
1925 p = self.changelog.parents(n)
1925 p = self.changelog.parents(n)
1926 if p[1] != nullid or p[0] == nullid:
1926 if p[1] != nullid or p[0] == nullid:
1927 b.append((t, n, p[0], p[1]))
1927 b.append((t, n, p[0], p[1]))
1928 break
1928 break
1929 n = p[0]
1929 n = p[0]
1930 return b
1930 return b
1931
1931
1932 def between(self, pairs):
1932 def between(self, pairs):
1933 r = []
1933 r = []
1934
1934
1935 for top, bottom in pairs:
1935 for top, bottom in pairs:
1936 n, l, i = top, [], 0
1936 n, l, i = top, [], 0
1937 f = 1
1937 f = 1
1938
1938
1939 while n != bottom and n != nullid:
1939 while n != bottom and n != nullid:
1940 p = self.changelog.parents(n)[0]
1940 p = self.changelog.parents(n)[0]
1941 if i == f:
1941 if i == f:
1942 l.append(n)
1942 l.append(n)
1943 f = f * 2
1943 f = f * 2
1944 n = p
1944 n = p
1945 i += 1
1945 i += 1
1946
1946
1947 r.append(l)
1947 r.append(l)
1948
1948
1949 return r
1949 return r
1950
1950
1951 def checkpush(self, pushop):
1951 def checkpush(self, pushop):
1952 """Extensions can override this function if additional checks have
1952 """Extensions can override this function if additional checks have
1953 to be performed before pushing, or call it if they override push
1953 to be performed before pushing, or call it if they override push
1954 command.
1954 command.
1955 """
1955 """
1956 pass
1956 pass
1957
1957
1958 @unfilteredpropertycache
1958 @unfilteredpropertycache
1959 def prepushoutgoinghooks(self):
1959 def prepushoutgoinghooks(self):
1960 """Return util.hooks consists of a pushop with repo, remote, outgoing
1960 """Return util.hooks consists of a pushop with repo, remote, outgoing
1961 methods, which are called before pushing changesets.
1961 methods, which are called before pushing changesets.
1962 """
1962 """
1963 return util.hooks()
1963 return util.hooks()
1964
1964
1965 def pushkey(self, namespace, key, old, new):
1965 def pushkey(self, namespace, key, old, new):
1966 try:
1966 try:
1967 tr = self.currenttransaction()
1967 tr = self.currenttransaction()
1968 hookargs = {}
1968 hookargs = {}
1969 if tr is not None:
1969 if tr is not None:
1970 hookargs.update(tr.hookargs)
1970 hookargs.update(tr.hookargs)
1971 hookargs['namespace'] = namespace
1971 hookargs['namespace'] = namespace
1972 hookargs['key'] = key
1972 hookargs['key'] = key
1973 hookargs['old'] = old
1973 hookargs['old'] = old
1974 hookargs['new'] = new
1974 hookargs['new'] = new
1975 self.hook('prepushkey', throw=True, **hookargs)
1975 self.hook('prepushkey', throw=True, **hookargs)
1976 except error.HookAbort as exc:
1976 except error.HookAbort as exc:
1977 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1977 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1978 if exc.hint:
1978 if exc.hint:
1979 self.ui.write_err(_("(%s)\n") % exc.hint)
1979 self.ui.write_err(_("(%s)\n") % exc.hint)
1980 return False
1980 return False
1981 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1981 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1982 ret = pushkey.push(self, namespace, key, old, new)
1982 ret = pushkey.push(self, namespace, key, old, new)
1983 def runhook():
1983 def runhook():
1984 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1984 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1985 ret=ret)
1985 ret=ret)
1986 self._afterlock(runhook)
1986 self._afterlock(runhook)
1987 return ret
1987 return ret
1988
1988
1989 def listkeys(self, namespace):
1989 def listkeys(self, namespace):
1990 self.hook('prelistkeys', throw=True, namespace=namespace)
1990 self.hook('prelistkeys', throw=True, namespace=namespace)
1991 self.ui.debug('listing keys for "%s"\n' % namespace)
1991 self.ui.debug('listing keys for "%s"\n' % namespace)
1992 values = pushkey.list(self, namespace)
1992 values = pushkey.list(self, namespace)
1993 self.hook('listkeys', namespace=namespace, values=values)
1993 self.hook('listkeys', namespace=namespace, values=values)
1994 return values
1994 return values
1995
1995
1996 def debugwireargs(self, one, two, three=None, four=None, five=None):
1996 def debugwireargs(self, one, two, three=None, four=None, five=None):
1997 '''used to test argument passing over the wire'''
1997 '''used to test argument passing over the wire'''
1998 return "%s %s %s %s %s" % (one, two, three, four, five)
1998 return "%s %s %s %s %s" % (one, two, three, four, five)
1999
1999
2000 def savecommitmessage(self, text):
2000 def savecommitmessage(self, text):
2001 fp = self.vfs('last-message.txt', 'wb')
2001 fp = self.vfs('last-message.txt', 'wb')
2002 try:
2002 try:
2003 fp.write(text)
2003 fp.write(text)
2004 finally:
2004 finally:
2005 fp.close()
2005 fp.close()
2006 return self.pathto(fp.name[len(self.root) + 1:])
2006 return self.pathto(fp.name[len(self.root) + 1:])
2007
2007
2008 # used to avoid circular references so destructors work
2008 # used to avoid circular references so destructors work
2009 def aftertrans(files):
2009 def aftertrans(files):
2010 renamefiles = [tuple(t) for t in files]
2010 renamefiles = [tuple(t) for t in files]
2011 def a():
2011 def a():
2012 for vfs, src, dest in renamefiles:
2012 for vfs, src, dest in renamefiles:
2013 # if src and dest refer to a same file, vfs.rename is a no-op,
2013 # if src and dest refer to a same file, vfs.rename is a no-op,
2014 # leaving both src and dest on disk. delete dest to make sure
2014 # leaving both src and dest on disk. delete dest to make sure
2015 # the rename couldn't be such a no-op.
2015 # the rename couldn't be such a no-op.
2016 vfs.tryunlink(dest)
2016 vfs.tryunlink(dest)
2017 try:
2017 try:
2018 vfs.rename(src, dest)
2018 vfs.rename(src, dest)
2019 except OSError: # journal file does not yet exist
2019 except OSError: # journal file does not yet exist
2020 pass
2020 pass
2021 return a
2021 return a
2022
2022
2023 def undoname(fn):
2023 def undoname(fn):
2024 base, name = os.path.split(fn)
2024 base, name = os.path.split(fn)
2025 assert name.startswith('journal')
2025 assert name.startswith('journal')
2026 return os.path.join(base, name.replace('journal', 'undo', 1))
2026 return os.path.join(base, name.replace('journal', 'undo', 1))
2027
2027
2028 def instance(ui, path, create):
2028 def instance(ui, path, create):
2029 return localrepository(ui, util.urllocalpath(path), create)
2029 return localrepository(ui, util.urllocalpath(path), create)
2030
2030
2031 def islocal(path):
2031 def islocal(path):
2032 return True
2032 return True
2033
2033
2034 def newreporequirements(repo):
2034 def newreporequirements(repo):
2035 """Determine the set of requirements for a new local repository.
2035 """Determine the set of requirements for a new local repository.
2036
2036
2037 Extensions can wrap this function to specify custom requirements for
2037 Extensions can wrap this function to specify custom requirements for
2038 new repositories.
2038 new repositories.
2039 """
2039 """
2040 ui = repo.ui
2040 ui = repo.ui
2041 requirements = {'revlogv1'}
2041 requirements = {'revlogv1'}
2042 if ui.configbool('format', 'usestore', True):
2042 if ui.configbool('format', 'usestore', True):
2043 requirements.add('store')
2043 requirements.add('store')
2044 if ui.configbool('format', 'usefncache', True):
2044 if ui.configbool('format', 'usefncache', True):
2045 requirements.add('fncache')
2045 requirements.add('fncache')
2046 if ui.configbool('format', 'dotencode', True):
2046 if ui.configbool('format', 'dotencode', True):
2047 requirements.add('dotencode')
2047 requirements.add('dotencode')
2048
2048
2049 compengine = ui.config('experimental', 'format.compression', 'zlib')
2049 compengine = ui.config('experimental', 'format.compression', 'zlib')
2050 if compengine not in util.compengines:
2050 if compengine not in util.compengines:
2051 raise error.Abort(_('compression engine %s defined by '
2051 raise error.Abort(_('compression engine %s defined by '
2052 'experimental.format.compression not available') %
2052 'experimental.format.compression not available') %
2053 compengine,
2053 compengine,
2054 hint=_('run "hg debuginstall" to list available '
2054 hint=_('run "hg debuginstall" to list available '
2055 'compression engines'))
2055 'compression engines'))
2056
2056
2057 # zlib is the historical default and doesn't need an explicit requirement.
2057 # zlib is the historical default and doesn't need an explicit requirement.
2058 if compengine != 'zlib':
2058 if compengine != 'zlib':
2059 requirements.add('exp-compression-%s' % compengine)
2059 requirements.add('exp-compression-%s' % compengine)
2060
2060
2061 if scmutil.gdinitconfig(ui):
2061 if scmutil.gdinitconfig(ui):
2062 requirements.add('generaldelta')
2062 requirements.add('generaldelta')
2063 if ui.configbool('experimental', 'treemanifest', False):
2063 if ui.configbool('experimental', 'treemanifest', False):
2064 requirements.add('treemanifest')
2064 requirements.add('treemanifest')
2065 if ui.configbool('experimental', 'manifestv2', False):
2065 if ui.configbool('experimental', 'manifestv2', False):
2066 requirements.add('manifestv2')
2066 requirements.add('manifestv2')
2067
2067
2068 revlogv2 = ui.config('experimental', 'revlogv2')
2068 revlogv2 = ui.config('experimental', 'revlogv2')
2069 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2069 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2070 requirements.remove('revlogv1')
2070 requirements.remove('revlogv1')
2071 # generaldelta is implied by revlogv2.
2071 # generaldelta is implied by revlogv2.
2072 requirements.discard('generaldelta')
2072 requirements.discard('generaldelta')
2073 requirements.add(REVLOGV2_REQUIREMENT)
2073 requirements.add(REVLOGV2_REQUIREMENT)
2074
2074
2075 return requirements
2075 return requirements
@@ -1,190 +1,191 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import os
13 import os
14
14
15 from .i18n import _
15 from .i18n import _
16 from . import (
16 from . import (
17 byterange,
17 byterange,
18 changelog,
18 changelog,
19 error,
19 error,
20 localrepo,
20 localrepo,
21 manifest,
21 manifest,
22 namespaces,
22 namespaces,
23 scmutil,
23 scmutil,
24 store,
24 store,
25 url,
25 url,
26 util,
26 util,
27 vfs as vfsmod,
27 vfs as vfsmod,
28 )
28 )
29
29
30 urlerr = util.urlerr
30 urlerr = util.urlerr
31 urlreq = util.urlreq
31 urlreq = util.urlreq
32
32
33 class httprangereader(object):
33 class httprangereader(object):
34 def __init__(self, url, opener):
34 def __init__(self, url, opener):
35 # we assume opener has HTTPRangeHandler
35 # we assume opener has HTTPRangeHandler
36 self.url = url
36 self.url = url
37 self.pos = 0
37 self.pos = 0
38 self.opener = opener
38 self.opener = opener
39 self.name = url
39 self.name = url
40
40
41 def __enter__(self):
41 def __enter__(self):
42 return self
42 return self
43
43
44 def __exit__(self, exc_type, exc_value, traceback):
44 def __exit__(self, exc_type, exc_value, traceback):
45 self.close()
45 self.close()
46
46
47 def seek(self, pos):
47 def seek(self, pos):
48 self.pos = pos
48 self.pos = pos
49 def read(self, bytes=None):
49 def read(self, bytes=None):
50 req = urlreq.request(self.url)
50 req = urlreq.request(self.url)
51 end = ''
51 end = ''
52 if bytes:
52 if bytes:
53 end = self.pos + bytes - 1
53 end = self.pos + bytes - 1
54 if self.pos or end:
54 if self.pos or end:
55 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
55 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
56
56
57 try:
57 try:
58 f = self.opener.open(req)
58 f = self.opener.open(req)
59 data = f.read()
59 data = f.read()
60 code = f.code
60 code = f.code
61 except urlerr.httperror as inst:
61 except urlerr.httperror as inst:
62 num = inst.code == 404 and errno.ENOENT or None
62 num = inst.code == 404 and errno.ENOENT or None
63 raise IOError(num, inst)
63 raise IOError(num, inst)
64 except urlerr.urlerror as inst:
64 except urlerr.urlerror as inst:
65 raise IOError(None, inst.reason[1])
65 raise IOError(None, inst.reason[1])
66
66
67 if code == 200:
67 if code == 200:
68 # HTTPRangeHandler does nothing if remote does not support
68 # HTTPRangeHandler does nothing if remote does not support
69 # Range headers and returns the full entity. Let's slice it.
69 # Range headers and returns the full entity. Let's slice it.
70 if bytes:
70 if bytes:
71 data = data[self.pos:self.pos + bytes]
71 data = data[self.pos:self.pos + bytes]
72 else:
72 else:
73 data = data[self.pos:]
73 data = data[self.pos:]
74 elif bytes:
74 elif bytes:
75 data = data[:bytes]
75 data = data[:bytes]
76 self.pos += len(data)
76 self.pos += len(data)
77 return data
77 return data
78 def readlines(self):
78 def readlines(self):
79 return self.read().splitlines(True)
79 return self.read().splitlines(True)
80 def __iter__(self):
80 def __iter__(self):
81 return iter(self.readlines())
81 return iter(self.readlines())
82 def close(self):
82 def close(self):
83 pass
83 pass
84
84
85 def build_opener(ui, authinfo):
85 def build_opener(ui, authinfo):
86 # urllib cannot handle URLs with embedded user or passwd
86 # urllib cannot handle URLs with embedded user or passwd
87 urlopener = url.opener(ui, authinfo)
87 urlopener = url.opener(ui, authinfo)
88 urlopener.add_handler(byterange.HTTPRangeHandler())
88 urlopener.add_handler(byterange.HTTPRangeHandler())
89
89
90 class statichttpvfs(vfsmod.abstractvfs):
90 class statichttpvfs(vfsmod.abstractvfs):
91 def __init__(self, base):
91 def __init__(self, base):
92 self.base = base
92 self.base = base
93
93
94 def __call__(self, path, mode='r', *args, **kw):
94 def __call__(self, path, mode='r', *args, **kw):
95 if mode not in ('r', 'rb'):
95 if mode not in ('r', 'rb'):
96 raise IOError('Permission denied')
96 raise IOError('Permission denied')
97 f = "/".join((self.base, urlreq.quote(path)))
97 f = "/".join((self.base, urlreq.quote(path)))
98 return httprangereader(f, urlopener)
98 return httprangereader(f, urlopener)
99
99
100 def join(self, path):
100 def join(self, path):
101 if path:
101 if path:
102 return os.path.join(self.base, path)
102 return os.path.join(self.base, path)
103 else:
103 else:
104 return self.base
104 return self.base
105
105
106 return statichttpvfs
106 return statichttpvfs
107
107
108 class statichttppeer(localrepo.localpeer):
108 class statichttppeer(localrepo.localpeer):
109 def local(self):
109 def local(self):
110 return None
110 return None
111 def canpush(self):
111 def canpush(self):
112 return False
112 return False
113
113
114 class statichttprepository(localrepo.localrepository):
114 class statichttprepository(localrepo.localrepository):
115 supported = localrepo.localrepository._basesupported
115 supported = localrepo.localrepository._basesupported
116
116
117 def __init__(self, ui, path):
117 def __init__(self, ui, path):
118 self._url = path
118 self._url = path
119 self.ui = ui
119 self.ui = ui
120
120
121 self.root = path
121 self.root = path
122 u = util.url(path.rstrip('/') + "/.hg")
122 u = util.url(path.rstrip('/') + "/.hg")
123 self.path, authinfo = u.authinfo()
123 self.path, authinfo = u.authinfo()
124
124
125 vfsclass = build_opener(ui, authinfo)
125 vfsclass = build_opener(ui, authinfo)
126 self.vfs = vfsclass(self.path)
126 self.vfs = vfsclass(self.path)
127 self._phasedefaults = []
127 self._phasedefaults = []
128
128
129 self.names = namespaces.namespaces()
129 self.names = namespaces.namespaces()
130 self.filtername = None
130
131
131 try:
132 try:
132 requirements = scmutil.readrequires(self.vfs, self.supported)
133 requirements = scmutil.readrequires(self.vfs, self.supported)
133 except IOError as inst:
134 except IOError as inst:
134 if inst.errno != errno.ENOENT:
135 if inst.errno != errno.ENOENT:
135 raise
136 raise
136 requirements = set()
137 requirements = set()
137
138
138 # check if it is a non-empty old-style repository
139 # check if it is a non-empty old-style repository
139 try:
140 try:
140 fp = self.vfs("00changelog.i")
141 fp = self.vfs("00changelog.i")
141 fp.read(1)
142 fp.read(1)
142 fp.close()
143 fp.close()
143 except IOError as inst:
144 except IOError as inst:
144 if inst.errno != errno.ENOENT:
145 if inst.errno != errno.ENOENT:
145 raise
146 raise
146 # we do not care about empty old-style repositories here
147 # we do not care about empty old-style repositories here
147 msg = _("'%s' does not appear to be an hg repository") % path
148 msg = _("'%s' does not appear to be an hg repository") % path
148 raise error.RepoError(msg)
149 raise error.RepoError(msg)
149
150
150 # setup store
151 # setup store
151 self.store = store.store(requirements, self.path, vfsclass)
152 self.store = store.store(requirements, self.path, vfsclass)
152 self.spath = self.store.path
153 self.spath = self.store.path
153 self.svfs = self.store.opener
154 self.svfs = self.store.opener
154 self.sjoin = self.store.join
155 self.sjoin = self.store.join
155 self._filecache = {}
156 self._filecache = {}
156 self.requirements = requirements
157 self.requirements = requirements
157
158
158 self.manifestlog = manifest.manifestlog(self.svfs, self)
159 self.manifestlog = manifest.manifestlog(self.svfs, self)
159 self.changelog = changelog.changelog(self.svfs)
160 self.changelog = changelog.changelog(self.svfs)
160 self._tags = None
161 self._tags = None
161 self.nodetagscache = None
162 self.nodetagscache = None
162 self._branchcaches = {}
163 self._branchcaches = {}
163 self._revbranchcache = None
164 self._revbranchcache = None
164 self.encodepats = None
165 self.encodepats = None
165 self.decodepats = None
166 self.decodepats = None
166 self._transref = None
167 self._transref = None
167
168
168 def _restrictcapabilities(self, caps):
169 def _restrictcapabilities(self, caps):
169 caps = super(statichttprepository, self)._restrictcapabilities(caps)
170 caps = super(statichttprepository, self)._restrictcapabilities(caps)
170 return caps.difference(["pushkey"])
171 return caps.difference(["pushkey"])
171
172
172 def url(self):
173 def url(self):
173 return self._url
174 return self._url
174
175
175 def local(self):
176 def local(self):
176 return False
177 return False
177
178
178 def peer(self):
179 def peer(self):
179 return statichttppeer(self)
180 return statichttppeer(self)
180
181
181 def lock(self, wait=True):
182 def lock(self, wait=True):
182 raise error.Abort(_('cannot lock static-http repository'))
183 raise error.Abort(_('cannot lock static-http repository'))
183
184
184 def _writecaches(self):
185 def _writecaches(self):
185 pass # statichttprepository are read only
186 pass # statichttprepository are read only
186
187
187 def instance(ui, path, create):
188 def instance(ui, path, create):
188 if create:
189 if create:
189 raise error.Abort(_('cannot create new static-http repository'))
190 raise error.Abort(_('cannot create new static-http repository'))
190 return statichttprepository(ui, path[7:])
191 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now