##// END OF EJS Templates
localrepo: deprecate 'repo.opener' (API)...
Pierre-Yves David -
r31148:3eaff87a default
parent child Browse files
Show More
@@ -1,2061 +1,2065 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 store,
56 store,
57 subrepo,
57 subrepo,
58 tags as tagsmod,
58 tags as tagsmod,
59 transaction,
59 transaction,
60 txnutil,
60 txnutil,
61 util,
61 util,
62 )
62 )
63
63
64 release = lockmod.release
64 release = lockmod.release
65 urlerr = util.urlerr
65 urlerr = util.urlerr
66 urlreq = util.urlreq
66 urlreq = util.urlreq
67
67
68 class repofilecache(scmutil.filecache):
68 class repofilecache(scmutil.filecache):
69 """All filecache usage on repo are done for logic that should be unfiltered
69 """All filecache usage on repo are done for logic that should be unfiltered
70 """
70 """
71
71
72 def __get__(self, repo, type=None):
72 def __get__(self, repo, type=None):
73 if repo is None:
73 if repo is None:
74 return self
74 return self
75 return super(repofilecache, self).__get__(repo.unfiltered(), type)
75 return super(repofilecache, self).__get__(repo.unfiltered(), type)
76 def __set__(self, repo, value):
76 def __set__(self, repo, value):
77 return super(repofilecache, self).__set__(repo.unfiltered(), value)
77 return super(repofilecache, self).__set__(repo.unfiltered(), value)
78 def __delete__(self, repo):
78 def __delete__(self, repo):
79 return super(repofilecache, self).__delete__(repo.unfiltered())
79 return super(repofilecache, self).__delete__(repo.unfiltered())
80
80
81 class storecache(repofilecache):
81 class storecache(repofilecache):
82 """filecache for files in the store"""
82 """filecache for files in the store"""
83 def join(self, obj, fname):
83 def join(self, obj, fname):
84 return obj.sjoin(fname)
84 return obj.sjoin(fname)
85
85
86 class unfilteredpropertycache(util.propertycache):
86 class unfilteredpropertycache(util.propertycache):
87 """propertycache that apply to unfiltered repo only"""
87 """propertycache that apply to unfiltered repo only"""
88
88
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 unfi = repo.unfiltered()
90 unfi = repo.unfiltered()
91 if unfi is repo:
91 if unfi is repo:
92 return super(unfilteredpropertycache, self).__get__(unfi)
92 return super(unfilteredpropertycache, self).__get__(unfi)
93 return getattr(unfi, self.name)
93 return getattr(unfi, self.name)
94
94
95 class filteredpropertycache(util.propertycache):
95 class filteredpropertycache(util.propertycache):
96 """propertycache that must take filtering in account"""
96 """propertycache that must take filtering in account"""
97
97
98 def cachevalue(self, obj, value):
98 def cachevalue(self, obj, value):
99 object.__setattr__(obj, self.name, value)
99 object.__setattr__(obj, self.name, value)
100
100
101
101
102 def hasunfilteredcache(repo, name):
102 def hasunfilteredcache(repo, name):
103 """check if a repo has an unfilteredpropertycache value for <name>"""
103 """check if a repo has an unfilteredpropertycache value for <name>"""
104 return name in vars(repo.unfiltered())
104 return name in vars(repo.unfiltered())
105
105
106 def unfilteredmethod(orig):
106 def unfilteredmethod(orig):
107 """decorate method that always need to be run on unfiltered version"""
107 """decorate method that always need to be run on unfiltered version"""
108 def wrapper(repo, *args, **kwargs):
108 def wrapper(repo, *args, **kwargs):
109 return orig(repo.unfiltered(), *args, **kwargs)
109 return orig(repo.unfiltered(), *args, **kwargs)
110 return wrapper
110 return wrapper
111
111
112 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
112 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
113 'unbundle'))
113 'unbundle'))
114 legacycaps = moderncaps.union(set(['changegroupsubset']))
114 legacycaps = moderncaps.union(set(['changegroupsubset']))
115
115
116 class localpeer(peer.peerrepository):
116 class localpeer(peer.peerrepository):
117 '''peer for a local repo; reflects only the most recent API'''
117 '''peer for a local repo; reflects only the most recent API'''
118
118
119 def __init__(self, repo, caps=moderncaps):
119 def __init__(self, repo, caps=moderncaps):
120 peer.peerrepository.__init__(self)
120 peer.peerrepository.__init__(self)
121 self._repo = repo.filtered('served')
121 self._repo = repo.filtered('served')
122 self.ui = repo.ui
122 self.ui = repo.ui
123 self._caps = repo._restrictcapabilities(caps)
123 self._caps = repo._restrictcapabilities(caps)
124 self.requirements = repo.requirements
124 self.requirements = repo.requirements
125 self.supportedformats = repo.supportedformats
125 self.supportedformats = repo.supportedformats
126
126
127 def close(self):
127 def close(self):
128 self._repo.close()
128 self._repo.close()
129
129
130 def _capabilities(self):
130 def _capabilities(self):
131 return self._caps
131 return self._caps
132
132
133 def local(self):
133 def local(self):
134 return self._repo
134 return self._repo
135
135
136 def canpush(self):
136 def canpush(self):
137 return True
137 return True
138
138
139 def url(self):
139 def url(self):
140 return self._repo.url()
140 return self._repo.url()
141
141
142 def lookup(self, key):
142 def lookup(self, key):
143 return self._repo.lookup(key)
143 return self._repo.lookup(key)
144
144
145 def branchmap(self):
145 def branchmap(self):
146 return self._repo.branchmap()
146 return self._repo.branchmap()
147
147
148 def heads(self):
148 def heads(self):
149 return self._repo.heads()
149 return self._repo.heads()
150
150
151 def known(self, nodes):
151 def known(self, nodes):
152 return self._repo.known(nodes)
152 return self._repo.known(nodes)
153
153
154 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
154 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
155 **kwargs):
155 **kwargs):
156 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
156 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
157 common=common, bundlecaps=bundlecaps,
157 common=common, bundlecaps=bundlecaps,
158 **kwargs)
158 **kwargs)
159 cb = util.chunkbuffer(chunks)
159 cb = util.chunkbuffer(chunks)
160
160
161 if bundlecaps is not None and 'HG20' in bundlecaps:
161 if bundlecaps is not None and 'HG20' in bundlecaps:
162 # When requesting a bundle2, getbundle returns a stream to make the
162 # When requesting a bundle2, getbundle returns a stream to make the
163 # wire level function happier. We need to build a proper object
163 # wire level function happier. We need to build a proper object
164 # from it in local peer.
164 # from it in local peer.
165 return bundle2.getunbundler(self.ui, cb)
165 return bundle2.getunbundler(self.ui, cb)
166 else:
166 else:
167 return changegroup.getunbundler('01', cb, None)
167 return changegroup.getunbundler('01', cb, None)
168
168
169 # TODO We might want to move the next two calls into legacypeer and add
169 # TODO We might want to move the next two calls into legacypeer and add
170 # unbundle instead.
170 # unbundle instead.
171
171
172 def unbundle(self, cg, heads, url):
172 def unbundle(self, cg, heads, url):
173 """apply a bundle on a repo
173 """apply a bundle on a repo
174
174
175 This function handles the repo locking itself."""
175 This function handles the repo locking itself."""
176 try:
176 try:
177 try:
177 try:
178 cg = exchange.readbundle(self.ui, cg, None)
178 cg = exchange.readbundle(self.ui, cg, None)
179 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
179 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
180 if util.safehasattr(ret, 'getchunks'):
180 if util.safehasattr(ret, 'getchunks'):
181 # This is a bundle20 object, turn it into an unbundler.
181 # This is a bundle20 object, turn it into an unbundler.
182 # This little dance should be dropped eventually when the
182 # This little dance should be dropped eventually when the
183 # API is finally improved.
183 # API is finally improved.
184 stream = util.chunkbuffer(ret.getchunks())
184 stream = util.chunkbuffer(ret.getchunks())
185 ret = bundle2.getunbundler(self.ui, stream)
185 ret = bundle2.getunbundler(self.ui, stream)
186 return ret
186 return ret
187 except Exception as exc:
187 except Exception as exc:
188 # If the exception contains output salvaged from a bundle2
188 # If the exception contains output salvaged from a bundle2
189 # reply, we need to make sure it is printed before continuing
189 # reply, we need to make sure it is printed before continuing
190 # to fail. So we build a bundle2 with such output and consume
190 # to fail. So we build a bundle2 with such output and consume
191 # it directly.
191 # it directly.
192 #
192 #
193 # This is not very elegant but allows a "simple" solution for
193 # This is not very elegant but allows a "simple" solution for
194 # issue4594
194 # issue4594
195 output = getattr(exc, '_bundle2salvagedoutput', ())
195 output = getattr(exc, '_bundle2salvagedoutput', ())
196 if output:
196 if output:
197 bundler = bundle2.bundle20(self._repo.ui)
197 bundler = bundle2.bundle20(self._repo.ui)
198 for out in output:
198 for out in output:
199 bundler.addpart(out)
199 bundler.addpart(out)
200 stream = util.chunkbuffer(bundler.getchunks())
200 stream = util.chunkbuffer(bundler.getchunks())
201 b = bundle2.getunbundler(self.ui, stream)
201 b = bundle2.getunbundler(self.ui, stream)
202 bundle2.processbundle(self._repo, b)
202 bundle2.processbundle(self._repo, b)
203 raise
203 raise
204 except error.PushRaced as exc:
204 except error.PushRaced as exc:
205 raise error.ResponseError(_('push failed:'), str(exc))
205 raise error.ResponseError(_('push failed:'), str(exc))
206
206
207 def lock(self):
207 def lock(self):
208 return self._repo.lock()
208 return self._repo.lock()
209
209
210 def addchangegroup(self, cg, source, url):
210 def addchangegroup(self, cg, source, url):
211 return cg.apply(self._repo, source, url)
211 return cg.apply(self._repo, source, url)
212
212
213 def pushkey(self, namespace, key, old, new):
213 def pushkey(self, namespace, key, old, new):
214 return self._repo.pushkey(namespace, key, old, new)
214 return self._repo.pushkey(namespace, key, old, new)
215
215
216 def listkeys(self, namespace):
216 def listkeys(self, namespace):
217 return self._repo.listkeys(namespace)
217 return self._repo.listkeys(namespace)
218
218
219 def debugwireargs(self, one, two, three=None, four=None, five=None):
219 def debugwireargs(self, one, two, three=None, four=None, five=None):
220 '''used to test argument passing over the wire'''
220 '''used to test argument passing over the wire'''
221 return "%s %s %s %s %s" % (one, two, three, four, five)
221 return "%s %s %s %s %s" % (one, two, three, four, five)
222
222
223 class locallegacypeer(localpeer):
223 class locallegacypeer(localpeer):
224 '''peer extension which implements legacy methods too; used for tests with
224 '''peer extension which implements legacy methods too; used for tests with
225 restricted capabilities'''
225 restricted capabilities'''
226
226
227 def __init__(self, repo):
227 def __init__(self, repo):
228 localpeer.__init__(self, repo, caps=legacycaps)
228 localpeer.__init__(self, repo, caps=legacycaps)
229
229
230 def branches(self, nodes):
230 def branches(self, nodes):
231 return self._repo.branches(nodes)
231 return self._repo.branches(nodes)
232
232
233 def between(self, pairs):
233 def between(self, pairs):
234 return self._repo.between(pairs)
234 return self._repo.between(pairs)
235
235
236 def changegroup(self, basenodes, source):
236 def changegroup(self, basenodes, source):
237 return changegroup.changegroup(self._repo, basenodes, source)
237 return changegroup.changegroup(self._repo, basenodes, source)
238
238
239 def changegroupsubset(self, bases, heads, source):
239 def changegroupsubset(self, bases, heads, source):
240 return changegroup.changegroupsubset(self._repo, bases, heads, source)
240 return changegroup.changegroupsubset(self._repo, bases, heads, source)
241
241
242 class localrepository(object):
242 class localrepository(object):
243
243
244 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
244 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
245 'manifestv2'))
245 'manifestv2'))
246 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
246 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
247 'relshared', 'dotencode'))
247 'relshared', 'dotencode'))
248 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
248 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
249 filtername = None
249 filtername = None
250
250
251 # a list of (ui, featureset) functions.
251 # a list of (ui, featureset) functions.
252 # only functions defined in module of enabled extensions are invoked
252 # only functions defined in module of enabled extensions are invoked
253 featuresetupfuncs = set()
253 featuresetupfuncs = set()
254
254
255 def __init__(self, baseui, path, create=False):
255 def __init__(self, baseui, path, create=False):
256 self.requirements = set()
256 self.requirements = set()
257 # vfs to access the working copy
257 # vfs to access the working copy
258 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
258 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
259 # vfs to access the content of the repository
259 # vfs to access the content of the repository
260 self.vfs = None
260 self.vfs = None
261 # vfs to access the store part of the repository
261 # vfs to access the store part of the repository
262 self.svfs = None
262 self.svfs = None
263 self.root = self.wvfs.base
263 self.root = self.wvfs.base
264 self.path = self.wvfs.join(".hg")
264 self.path = self.wvfs.join(".hg")
265 self.origroot = path
265 self.origroot = path
266 self.auditor = pathutil.pathauditor(self.root, self._checknested)
266 self.auditor = pathutil.pathauditor(self.root, self._checknested)
267 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
267 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
268 realfs=False)
268 realfs=False)
269 self.vfs = scmutil.vfs(self.path)
269 self.vfs = scmutil.vfs(self.path)
270 self.opener = self.vfs
271 self.baseui = baseui
270 self.baseui = baseui
272 self.ui = baseui.copy()
271 self.ui = baseui.copy()
273 self.ui.copy = baseui.copy # prevent copying repo configuration
272 self.ui.copy = baseui.copy # prevent copying repo configuration
274 # A list of callback to shape the phase if no data were found.
273 # A list of callback to shape the phase if no data were found.
275 # Callback are in the form: func(repo, roots) --> processed root.
274 # Callback are in the form: func(repo, roots) --> processed root.
276 # This list it to be filled by extension during repo setup
275 # This list it to be filled by extension during repo setup
277 self._phasedefaults = []
276 self._phasedefaults = []
278 try:
277 try:
279 self.ui.readconfig(self.join("hgrc"), self.root)
278 self.ui.readconfig(self.join("hgrc"), self.root)
280 self._loadextensions()
279 self._loadextensions()
281 except IOError:
280 except IOError:
282 pass
281 pass
283
282
284 if self.featuresetupfuncs:
283 if self.featuresetupfuncs:
285 self.supported = set(self._basesupported) # use private copy
284 self.supported = set(self._basesupported) # use private copy
286 extmods = set(m.__name__ for n, m
285 extmods = set(m.__name__ for n, m
287 in extensions.extensions(self.ui))
286 in extensions.extensions(self.ui))
288 for setupfunc in self.featuresetupfuncs:
287 for setupfunc in self.featuresetupfuncs:
289 if setupfunc.__module__ in extmods:
288 if setupfunc.__module__ in extmods:
290 setupfunc(self.ui, self.supported)
289 setupfunc(self.ui, self.supported)
291 else:
290 else:
292 self.supported = self._basesupported
291 self.supported = self._basesupported
293 color.setup(self.ui)
292 color.setup(self.ui)
294
293
295 # Add compression engines.
294 # Add compression engines.
296 for name in util.compengines:
295 for name in util.compengines:
297 engine = util.compengines[name]
296 engine = util.compengines[name]
298 if engine.revlogheader():
297 if engine.revlogheader():
299 self.supported.add('exp-compression-%s' % name)
298 self.supported.add('exp-compression-%s' % name)
300
299
301 if not self.vfs.isdir():
300 if not self.vfs.isdir():
302 if create:
301 if create:
303 self.requirements = newreporequirements(self)
302 self.requirements = newreporequirements(self)
304
303
305 if not self.wvfs.exists():
304 if not self.wvfs.exists():
306 self.wvfs.makedirs()
305 self.wvfs.makedirs()
307 self.vfs.makedir(notindexed=True)
306 self.vfs.makedir(notindexed=True)
308
307
309 if 'store' in self.requirements:
308 if 'store' in self.requirements:
310 self.vfs.mkdir("store")
309 self.vfs.mkdir("store")
311
310
312 # create an invalid changelog
311 # create an invalid changelog
313 self.vfs.append(
312 self.vfs.append(
314 "00changelog.i",
313 "00changelog.i",
315 '\0\0\0\2' # represents revlogv2
314 '\0\0\0\2' # represents revlogv2
316 ' dummy changelog to prevent using the old repo layout'
315 ' dummy changelog to prevent using the old repo layout'
317 )
316 )
318 else:
317 else:
319 raise error.RepoError(_("repository %s not found") % path)
318 raise error.RepoError(_("repository %s not found") % path)
320 elif create:
319 elif create:
321 raise error.RepoError(_("repository %s already exists") % path)
320 raise error.RepoError(_("repository %s already exists") % path)
322 else:
321 else:
323 try:
322 try:
324 self.requirements = scmutil.readrequires(
323 self.requirements = scmutil.readrequires(
325 self.vfs, self.supported)
324 self.vfs, self.supported)
326 except IOError as inst:
325 except IOError as inst:
327 if inst.errno != errno.ENOENT:
326 if inst.errno != errno.ENOENT:
328 raise
327 raise
329
328
330 self.sharedpath = self.path
329 self.sharedpath = self.path
331 try:
330 try:
332 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
331 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
333 if 'relshared' in self.requirements:
332 if 'relshared' in self.requirements:
334 sharedpath = self.vfs.join(sharedpath)
333 sharedpath = self.vfs.join(sharedpath)
335 vfs = scmutil.vfs(sharedpath, realpath=True)
334 vfs = scmutil.vfs(sharedpath, realpath=True)
336
335
337 s = vfs.base
336 s = vfs.base
338 if not vfs.exists():
337 if not vfs.exists():
339 raise error.RepoError(
338 raise error.RepoError(
340 _('.hg/sharedpath points to nonexistent directory %s') % s)
339 _('.hg/sharedpath points to nonexistent directory %s') % s)
341 self.sharedpath = s
340 self.sharedpath = s
342 except IOError as inst:
341 except IOError as inst:
343 if inst.errno != errno.ENOENT:
342 if inst.errno != errno.ENOENT:
344 raise
343 raise
345
344
346 self.store = store.store(
345 self.store = store.store(
347 self.requirements, self.sharedpath, scmutil.vfs)
346 self.requirements, self.sharedpath, scmutil.vfs)
348 self.spath = self.store.path
347 self.spath = self.store.path
349 self.svfs = self.store.vfs
348 self.svfs = self.store.vfs
350 self.sjoin = self.store.join
349 self.sjoin = self.store.join
351 self.vfs.createmode = self.store.createmode
350 self.vfs.createmode = self.store.createmode
352 self._applyopenerreqs()
351 self._applyopenerreqs()
353 if create:
352 if create:
354 self._writerequirements()
353 self._writerequirements()
355
354
356 self._dirstatevalidatewarned = False
355 self._dirstatevalidatewarned = False
357
356
358 self._branchcaches = {}
357 self._branchcaches = {}
359 self._revbranchcache = None
358 self._revbranchcache = None
360 self.filterpats = {}
359 self.filterpats = {}
361 self._datafilters = {}
360 self._datafilters = {}
362 self._transref = self._lockref = self._wlockref = None
361 self._transref = self._lockref = self._wlockref = None
363
362
364 # A cache for various files under .hg/ that tracks file changes,
363 # A cache for various files under .hg/ that tracks file changes,
365 # (used by the filecache decorator)
364 # (used by the filecache decorator)
366 #
365 #
367 # Maps a property name to its util.filecacheentry
366 # Maps a property name to its util.filecacheentry
368 self._filecache = {}
367 self._filecache = {}
369
368
370 # hold sets of revision to be filtered
369 # hold sets of revision to be filtered
371 # should be cleared when something might have changed the filter value:
370 # should be cleared when something might have changed the filter value:
372 # - new changesets,
371 # - new changesets,
373 # - phase change,
372 # - phase change,
374 # - new obsolescence marker,
373 # - new obsolescence marker,
375 # - working directory parent change,
374 # - working directory parent change,
376 # - bookmark changes
375 # - bookmark changes
377 self.filteredrevcache = {}
376 self.filteredrevcache = {}
378
377
379 # generic mapping between names and nodes
378 # generic mapping between names and nodes
380 self.names = namespaces.namespaces()
379 self.names = namespaces.namespaces()
381
380
382 @property
381 @property
383 def wopener(self):
382 def wopener(self):
384 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
383 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
385 return self.wvfs
384 return self.wvfs
386
385
386 @property
387 def opener(self):
388 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
389 return self.vfs
390
387 def close(self):
391 def close(self):
388 self._writecaches()
392 self._writecaches()
389
393
390 def _loadextensions(self):
394 def _loadextensions(self):
391 extensions.loadall(self.ui)
395 extensions.loadall(self.ui)
392
396
393 def _writecaches(self):
397 def _writecaches(self):
394 if self._revbranchcache:
398 if self._revbranchcache:
395 self._revbranchcache.write()
399 self._revbranchcache.write()
396
400
397 def _restrictcapabilities(self, caps):
401 def _restrictcapabilities(self, caps):
398 if self.ui.configbool('experimental', 'bundle2-advertise', True):
402 if self.ui.configbool('experimental', 'bundle2-advertise', True):
399 caps = set(caps)
403 caps = set(caps)
400 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
404 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
401 caps.add('bundle2=' + urlreq.quote(capsblob))
405 caps.add('bundle2=' + urlreq.quote(capsblob))
402 return caps
406 return caps
403
407
404 def _applyopenerreqs(self):
408 def _applyopenerreqs(self):
405 self.svfs.options = dict((r, 1) for r in self.requirements
409 self.svfs.options = dict((r, 1) for r in self.requirements
406 if r in self.openerreqs)
410 if r in self.openerreqs)
407 # experimental config: format.chunkcachesize
411 # experimental config: format.chunkcachesize
408 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
412 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
409 if chunkcachesize is not None:
413 if chunkcachesize is not None:
410 self.svfs.options['chunkcachesize'] = chunkcachesize
414 self.svfs.options['chunkcachesize'] = chunkcachesize
411 # experimental config: format.maxchainlen
415 # experimental config: format.maxchainlen
412 maxchainlen = self.ui.configint('format', 'maxchainlen')
416 maxchainlen = self.ui.configint('format', 'maxchainlen')
413 if maxchainlen is not None:
417 if maxchainlen is not None:
414 self.svfs.options['maxchainlen'] = maxchainlen
418 self.svfs.options['maxchainlen'] = maxchainlen
415 # experimental config: format.manifestcachesize
419 # experimental config: format.manifestcachesize
416 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
420 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
417 if manifestcachesize is not None:
421 if manifestcachesize is not None:
418 self.svfs.options['manifestcachesize'] = manifestcachesize
422 self.svfs.options['manifestcachesize'] = manifestcachesize
419 # experimental config: format.aggressivemergedeltas
423 # experimental config: format.aggressivemergedeltas
420 aggressivemergedeltas = self.ui.configbool('format',
424 aggressivemergedeltas = self.ui.configbool('format',
421 'aggressivemergedeltas', False)
425 'aggressivemergedeltas', False)
422 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
426 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
423 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
427 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
424
428
425 for r in self.requirements:
429 for r in self.requirements:
426 if r.startswith('exp-compression-'):
430 if r.startswith('exp-compression-'):
427 self.svfs.options['compengine'] = r[len('exp-compression-'):]
431 self.svfs.options['compengine'] = r[len('exp-compression-'):]
428
432
429 def _writerequirements(self):
433 def _writerequirements(self):
430 scmutil.writerequires(self.vfs, self.requirements)
434 scmutil.writerequires(self.vfs, self.requirements)
431
435
432 def _checknested(self, path):
436 def _checknested(self, path):
433 """Determine if path is a legal nested repository."""
437 """Determine if path is a legal nested repository."""
434 if not path.startswith(self.root):
438 if not path.startswith(self.root):
435 return False
439 return False
436 subpath = path[len(self.root) + 1:]
440 subpath = path[len(self.root) + 1:]
437 normsubpath = util.pconvert(subpath)
441 normsubpath = util.pconvert(subpath)
438
442
439 # XXX: Checking against the current working copy is wrong in
443 # XXX: Checking against the current working copy is wrong in
440 # the sense that it can reject things like
444 # the sense that it can reject things like
441 #
445 #
442 # $ hg cat -r 10 sub/x.txt
446 # $ hg cat -r 10 sub/x.txt
443 #
447 #
444 # if sub/ is no longer a subrepository in the working copy
448 # if sub/ is no longer a subrepository in the working copy
445 # parent revision.
449 # parent revision.
446 #
450 #
447 # However, it can of course also allow things that would have
451 # However, it can of course also allow things that would have
448 # been rejected before, such as the above cat command if sub/
452 # been rejected before, such as the above cat command if sub/
449 # is a subrepository now, but was a normal directory before.
453 # is a subrepository now, but was a normal directory before.
450 # The old path auditor would have rejected by mistake since it
454 # The old path auditor would have rejected by mistake since it
451 # panics when it sees sub/.hg/.
455 # panics when it sees sub/.hg/.
452 #
456 #
453 # All in all, checking against the working copy seems sensible
457 # All in all, checking against the working copy seems sensible
454 # since we want to prevent access to nested repositories on
458 # since we want to prevent access to nested repositories on
455 # the filesystem *now*.
459 # the filesystem *now*.
456 ctx = self[None]
460 ctx = self[None]
457 parts = util.splitpath(subpath)
461 parts = util.splitpath(subpath)
458 while parts:
462 while parts:
459 prefix = '/'.join(parts)
463 prefix = '/'.join(parts)
460 if prefix in ctx.substate:
464 if prefix in ctx.substate:
461 if prefix == normsubpath:
465 if prefix == normsubpath:
462 return True
466 return True
463 else:
467 else:
464 sub = ctx.sub(prefix)
468 sub = ctx.sub(prefix)
465 return sub.checknested(subpath[len(prefix) + 1:])
469 return sub.checknested(subpath[len(prefix) + 1:])
466 else:
470 else:
467 parts.pop()
471 parts.pop()
468 return False
472 return False
469
473
470 def peer(self):
474 def peer(self):
471 return localpeer(self) # not cached to avoid reference cycle
475 return localpeer(self) # not cached to avoid reference cycle
472
476
473 def unfiltered(self):
477 def unfiltered(self):
474 """Return unfiltered version of the repository
478 """Return unfiltered version of the repository
475
479
476 Intended to be overwritten by filtered repo."""
480 Intended to be overwritten by filtered repo."""
477 return self
481 return self
478
482
479 def filtered(self, name):
483 def filtered(self, name):
480 """Return a filtered version of a repository"""
484 """Return a filtered version of a repository"""
481 # build a new class with the mixin and the current class
485 # build a new class with the mixin and the current class
482 # (possibly subclass of the repo)
486 # (possibly subclass of the repo)
483 class proxycls(repoview.repoview, self.unfiltered().__class__):
487 class proxycls(repoview.repoview, self.unfiltered().__class__):
484 pass
488 pass
485 return proxycls(self, name)
489 return proxycls(self, name)
486
490
487 @repofilecache('bookmarks', 'bookmarks.current')
491 @repofilecache('bookmarks', 'bookmarks.current')
488 def _bookmarks(self):
492 def _bookmarks(self):
489 return bookmarks.bmstore(self)
493 return bookmarks.bmstore(self)
490
494
491 @property
495 @property
492 def _activebookmark(self):
496 def _activebookmark(self):
493 return self._bookmarks.active
497 return self._bookmarks.active
494
498
495 def bookmarkheads(self, bookmark):
499 def bookmarkheads(self, bookmark):
496 name = bookmark.split('@', 1)[0]
500 name = bookmark.split('@', 1)[0]
497 heads = []
501 heads = []
498 for mark, n in self._bookmarks.iteritems():
502 for mark, n in self._bookmarks.iteritems():
499 if mark.split('@', 1)[0] == name:
503 if mark.split('@', 1)[0] == name:
500 heads.append(n)
504 heads.append(n)
501 return heads
505 return heads
502
506
503 # _phaserevs and _phasesets depend on changelog. what we need is to
507 # _phaserevs and _phasesets depend on changelog. what we need is to
504 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
508 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
505 # can't be easily expressed in filecache mechanism.
509 # can't be easily expressed in filecache mechanism.
506 @storecache('phaseroots', '00changelog.i')
510 @storecache('phaseroots', '00changelog.i')
507 def _phasecache(self):
511 def _phasecache(self):
508 return phases.phasecache(self, self._phasedefaults)
512 return phases.phasecache(self, self._phasedefaults)
509
513
510 @storecache('obsstore')
514 @storecache('obsstore')
511 def obsstore(self):
515 def obsstore(self):
512 # read default format for new obsstore.
516 # read default format for new obsstore.
513 # developer config: format.obsstore-version
517 # developer config: format.obsstore-version
514 defaultformat = self.ui.configint('format', 'obsstore-version', None)
518 defaultformat = self.ui.configint('format', 'obsstore-version', None)
515 # rely on obsstore class default when possible.
519 # rely on obsstore class default when possible.
516 kwargs = {}
520 kwargs = {}
517 if defaultformat is not None:
521 if defaultformat is not None:
518 kwargs['defaultformat'] = defaultformat
522 kwargs['defaultformat'] = defaultformat
519 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
523 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
520 store = obsolete.obsstore(self.svfs, readonly=readonly,
524 store = obsolete.obsstore(self.svfs, readonly=readonly,
521 **kwargs)
525 **kwargs)
522 if store and readonly:
526 if store and readonly:
523 self.ui.warn(
527 self.ui.warn(
524 _('obsolete feature not enabled but %i markers found!\n')
528 _('obsolete feature not enabled but %i markers found!\n')
525 % len(list(store)))
529 % len(list(store)))
526 return store
530 return store
527
531
528 @storecache('00changelog.i')
532 @storecache('00changelog.i')
529 def changelog(self):
533 def changelog(self):
530 c = changelog.changelog(self.svfs)
534 c = changelog.changelog(self.svfs)
531 if txnutil.mayhavepending(self.root):
535 if txnutil.mayhavepending(self.root):
532 c.readpending('00changelog.i.a')
536 c.readpending('00changelog.i.a')
533 return c
537 return c
534
538
535 def _constructmanifest(self):
539 def _constructmanifest(self):
536 # This is a temporary function while we migrate from manifest to
540 # This is a temporary function while we migrate from manifest to
537 # manifestlog. It allows bundlerepo and unionrepo to intercept the
541 # manifestlog. It allows bundlerepo and unionrepo to intercept the
538 # manifest creation.
542 # manifest creation.
539 return manifest.manifestrevlog(self.svfs)
543 return manifest.manifestrevlog(self.svfs)
540
544
541 @storecache('00manifest.i')
545 @storecache('00manifest.i')
542 def manifestlog(self):
546 def manifestlog(self):
543 return manifest.manifestlog(self.svfs, self)
547 return manifest.manifestlog(self.svfs, self)
544
548
545 @repofilecache('dirstate')
549 @repofilecache('dirstate')
546 def dirstate(self):
550 def dirstate(self):
547 return dirstate.dirstate(self.vfs, self.ui, self.root,
551 return dirstate.dirstate(self.vfs, self.ui, self.root,
548 self._dirstatevalidate)
552 self._dirstatevalidate)
549
553
550 def _dirstatevalidate(self, node):
554 def _dirstatevalidate(self, node):
551 try:
555 try:
552 self.changelog.rev(node)
556 self.changelog.rev(node)
553 return node
557 return node
554 except error.LookupError:
558 except error.LookupError:
555 if not self._dirstatevalidatewarned:
559 if not self._dirstatevalidatewarned:
556 self._dirstatevalidatewarned = True
560 self._dirstatevalidatewarned = True
557 self.ui.warn(_("warning: ignoring unknown"
561 self.ui.warn(_("warning: ignoring unknown"
558 " working parent %s!\n") % short(node))
562 " working parent %s!\n") % short(node))
559 return nullid
563 return nullid
560
564
561 def __getitem__(self, changeid):
565 def __getitem__(self, changeid):
562 if changeid is None or changeid == wdirrev:
566 if changeid is None or changeid == wdirrev:
563 return context.workingctx(self)
567 return context.workingctx(self)
564 if isinstance(changeid, slice):
568 if isinstance(changeid, slice):
565 return [context.changectx(self, i)
569 return [context.changectx(self, i)
566 for i in xrange(*changeid.indices(len(self)))
570 for i in xrange(*changeid.indices(len(self)))
567 if i not in self.changelog.filteredrevs]
571 if i not in self.changelog.filteredrevs]
568 return context.changectx(self, changeid)
572 return context.changectx(self, changeid)
569
573
570 def __contains__(self, changeid):
574 def __contains__(self, changeid):
571 try:
575 try:
572 self[changeid]
576 self[changeid]
573 return True
577 return True
574 except error.RepoLookupError:
578 except error.RepoLookupError:
575 return False
579 return False
576
580
577 def __nonzero__(self):
581 def __nonzero__(self):
578 return True
582 return True
579
583
580 def __len__(self):
584 def __len__(self):
581 return len(self.changelog)
585 return len(self.changelog)
582
586
583 def __iter__(self):
587 def __iter__(self):
584 return iter(self.changelog)
588 return iter(self.changelog)
585
589
586 def revs(self, expr, *args):
590 def revs(self, expr, *args):
587 '''Find revisions matching a revset.
591 '''Find revisions matching a revset.
588
592
589 The revset is specified as a string ``expr`` that may contain
593 The revset is specified as a string ``expr`` that may contain
590 %-formatting to escape certain types. See ``revsetlang.formatspec``.
594 %-formatting to escape certain types. See ``revsetlang.formatspec``.
591
595
592 Revset aliases from the configuration are not expanded. To expand
596 Revset aliases from the configuration are not expanded. To expand
593 user aliases, consider calling ``scmutil.revrange()`` or
597 user aliases, consider calling ``scmutil.revrange()`` or
594 ``repo.anyrevs([expr], user=True)``.
598 ``repo.anyrevs([expr], user=True)``.
595
599
596 Returns a revset.abstractsmartset, which is a list-like interface
600 Returns a revset.abstractsmartset, which is a list-like interface
597 that contains integer revisions.
601 that contains integer revisions.
598 '''
602 '''
599 expr = revsetlang.formatspec(expr, *args)
603 expr = revsetlang.formatspec(expr, *args)
600 m = revset.match(None, expr)
604 m = revset.match(None, expr)
601 return m(self)
605 return m(self)
602
606
603 def set(self, expr, *args):
607 def set(self, expr, *args):
604 '''Find revisions matching a revset and emit changectx instances.
608 '''Find revisions matching a revset and emit changectx instances.
605
609
606 This is a convenience wrapper around ``revs()`` that iterates the
610 This is a convenience wrapper around ``revs()`` that iterates the
607 result and is a generator of changectx instances.
611 result and is a generator of changectx instances.
608
612
609 Revset aliases from the configuration are not expanded. To expand
613 Revset aliases from the configuration are not expanded. To expand
610 user aliases, consider calling ``scmutil.revrange()``.
614 user aliases, consider calling ``scmutil.revrange()``.
611 '''
615 '''
612 for r in self.revs(expr, *args):
616 for r in self.revs(expr, *args):
613 yield self[r]
617 yield self[r]
614
618
615 def anyrevs(self, specs, user=False):
619 def anyrevs(self, specs, user=False):
616 '''Find revisions matching one of the given revsets.
620 '''Find revisions matching one of the given revsets.
617
621
618 Revset aliases from the configuration are not expanded by default. To
622 Revset aliases from the configuration are not expanded by default. To
619 expand user aliases, specify ``user=True``.
623 expand user aliases, specify ``user=True``.
620 '''
624 '''
621 if user:
625 if user:
622 m = revset.matchany(self.ui, specs, repo=self)
626 m = revset.matchany(self.ui, specs, repo=self)
623 else:
627 else:
624 m = revset.matchany(None, specs)
628 m = revset.matchany(None, specs)
625 return m(self)
629 return m(self)
626
630
627 def url(self):
631 def url(self):
628 return 'file:' + self.root
632 return 'file:' + self.root
629
633
630 def hook(self, name, throw=False, **args):
634 def hook(self, name, throw=False, **args):
631 """Call a hook, passing this repo instance.
635 """Call a hook, passing this repo instance.
632
636
633 This a convenience method to aid invoking hooks. Extensions likely
637 This a convenience method to aid invoking hooks. Extensions likely
634 won't call this unless they have registered a custom hook or are
638 won't call this unless they have registered a custom hook or are
635 replacing code that is expected to call a hook.
639 replacing code that is expected to call a hook.
636 """
640 """
637 return hook.hook(self.ui, self, name, throw, **args)
641 return hook.hook(self.ui, self, name, throw, **args)
638
642
639 @unfilteredmethod
643 @unfilteredmethod
640 def _tag(self, names, node, message, local, user, date, extra=None,
644 def _tag(self, names, node, message, local, user, date, extra=None,
641 editor=False):
645 editor=False):
642 if isinstance(names, str):
646 if isinstance(names, str):
643 names = (names,)
647 names = (names,)
644
648
645 branches = self.branchmap()
649 branches = self.branchmap()
646 for name in names:
650 for name in names:
647 self.hook('pretag', throw=True, node=hex(node), tag=name,
651 self.hook('pretag', throw=True, node=hex(node), tag=name,
648 local=local)
652 local=local)
649 if name in branches:
653 if name in branches:
650 self.ui.warn(_("warning: tag %s conflicts with existing"
654 self.ui.warn(_("warning: tag %s conflicts with existing"
651 " branch name\n") % name)
655 " branch name\n") % name)
652
656
653 def writetags(fp, names, munge, prevtags):
657 def writetags(fp, names, munge, prevtags):
654 fp.seek(0, 2)
658 fp.seek(0, 2)
655 if prevtags and prevtags[-1] != '\n':
659 if prevtags and prevtags[-1] != '\n':
656 fp.write('\n')
660 fp.write('\n')
657 for name in names:
661 for name in names:
658 if munge:
662 if munge:
659 m = munge(name)
663 m = munge(name)
660 else:
664 else:
661 m = name
665 m = name
662
666
663 if (self._tagscache.tagtypes and
667 if (self._tagscache.tagtypes and
664 name in self._tagscache.tagtypes):
668 name in self._tagscache.tagtypes):
665 old = self.tags().get(name, nullid)
669 old = self.tags().get(name, nullid)
666 fp.write('%s %s\n' % (hex(old), m))
670 fp.write('%s %s\n' % (hex(old), m))
667 fp.write('%s %s\n' % (hex(node), m))
671 fp.write('%s %s\n' % (hex(node), m))
668 fp.close()
672 fp.close()
669
673
670 prevtags = ''
674 prevtags = ''
671 if local:
675 if local:
672 try:
676 try:
673 fp = self.vfs('localtags', 'r+')
677 fp = self.vfs('localtags', 'r+')
674 except IOError:
678 except IOError:
675 fp = self.vfs('localtags', 'a')
679 fp = self.vfs('localtags', 'a')
676 else:
680 else:
677 prevtags = fp.read()
681 prevtags = fp.read()
678
682
679 # local tags are stored in the current charset
683 # local tags are stored in the current charset
680 writetags(fp, names, None, prevtags)
684 writetags(fp, names, None, prevtags)
681 for name in names:
685 for name in names:
682 self.hook('tag', node=hex(node), tag=name, local=local)
686 self.hook('tag', node=hex(node), tag=name, local=local)
683 return
687 return
684
688
685 try:
689 try:
686 fp = self.wfile('.hgtags', 'rb+')
690 fp = self.wfile('.hgtags', 'rb+')
687 except IOError as e:
691 except IOError as e:
688 if e.errno != errno.ENOENT:
692 if e.errno != errno.ENOENT:
689 raise
693 raise
690 fp = self.wfile('.hgtags', 'ab')
694 fp = self.wfile('.hgtags', 'ab')
691 else:
695 else:
692 prevtags = fp.read()
696 prevtags = fp.read()
693
697
694 # committed tags are stored in UTF-8
698 # committed tags are stored in UTF-8
695 writetags(fp, names, encoding.fromlocal, prevtags)
699 writetags(fp, names, encoding.fromlocal, prevtags)
696
700
697 fp.close()
701 fp.close()
698
702
699 self.invalidatecaches()
703 self.invalidatecaches()
700
704
701 if '.hgtags' not in self.dirstate:
705 if '.hgtags' not in self.dirstate:
702 self[None].add(['.hgtags'])
706 self[None].add(['.hgtags'])
703
707
704 m = matchmod.exact(self.root, '', ['.hgtags'])
708 m = matchmod.exact(self.root, '', ['.hgtags'])
705 tagnode = self.commit(message, user, date, extra=extra, match=m,
709 tagnode = self.commit(message, user, date, extra=extra, match=m,
706 editor=editor)
710 editor=editor)
707
711
708 for name in names:
712 for name in names:
709 self.hook('tag', node=hex(node), tag=name, local=local)
713 self.hook('tag', node=hex(node), tag=name, local=local)
710
714
711 return tagnode
715 return tagnode
712
716
713 def tag(self, names, node, message, local, user, date, editor=False):
717 def tag(self, names, node, message, local, user, date, editor=False):
714 '''tag a revision with one or more symbolic names.
718 '''tag a revision with one or more symbolic names.
715
719
716 names is a list of strings or, when adding a single tag, names may be a
720 names is a list of strings or, when adding a single tag, names may be a
717 string.
721 string.
718
722
719 if local is True, the tags are stored in a per-repository file.
723 if local is True, the tags are stored in a per-repository file.
720 otherwise, they are stored in the .hgtags file, and a new
724 otherwise, they are stored in the .hgtags file, and a new
721 changeset is committed with the change.
725 changeset is committed with the change.
722
726
723 keyword arguments:
727 keyword arguments:
724
728
725 local: whether to store tags in non-version-controlled file
729 local: whether to store tags in non-version-controlled file
726 (default False)
730 (default False)
727
731
728 message: commit message to use if committing
732 message: commit message to use if committing
729
733
730 user: name of user to use if committing
734 user: name of user to use if committing
731
735
732 date: date tuple to use if committing'''
736 date: date tuple to use if committing'''
733
737
734 if not local:
738 if not local:
735 m = matchmod.exact(self.root, '', ['.hgtags'])
739 m = matchmod.exact(self.root, '', ['.hgtags'])
736 if any(self.status(match=m, unknown=True, ignored=True)):
740 if any(self.status(match=m, unknown=True, ignored=True)):
737 raise error.Abort(_('working copy of .hgtags is changed'),
741 raise error.Abort(_('working copy of .hgtags is changed'),
738 hint=_('please commit .hgtags manually'))
742 hint=_('please commit .hgtags manually'))
739
743
740 self.tags() # instantiate the cache
744 self.tags() # instantiate the cache
741 self._tag(names, node, message, local, user, date, editor=editor)
745 self._tag(names, node, message, local, user, date, editor=editor)
742
746
743 @filteredpropertycache
747 @filteredpropertycache
744 def _tagscache(self):
748 def _tagscache(self):
745 '''Returns a tagscache object that contains various tags related
749 '''Returns a tagscache object that contains various tags related
746 caches.'''
750 caches.'''
747
751
748 # This simplifies its cache management by having one decorated
752 # This simplifies its cache management by having one decorated
749 # function (this one) and the rest simply fetch things from it.
753 # function (this one) and the rest simply fetch things from it.
750 class tagscache(object):
754 class tagscache(object):
751 def __init__(self):
755 def __init__(self):
752 # These two define the set of tags for this repository. tags
756 # These two define the set of tags for this repository. tags
753 # maps tag name to node; tagtypes maps tag name to 'global' or
757 # maps tag name to node; tagtypes maps tag name to 'global' or
754 # 'local'. (Global tags are defined by .hgtags across all
758 # 'local'. (Global tags are defined by .hgtags across all
755 # heads, and local tags are defined in .hg/localtags.)
759 # heads, and local tags are defined in .hg/localtags.)
756 # They constitute the in-memory cache of tags.
760 # They constitute the in-memory cache of tags.
757 self.tags = self.tagtypes = None
761 self.tags = self.tagtypes = None
758
762
759 self.nodetagscache = self.tagslist = None
763 self.nodetagscache = self.tagslist = None
760
764
761 cache = tagscache()
765 cache = tagscache()
762 cache.tags, cache.tagtypes = self._findtags()
766 cache.tags, cache.tagtypes = self._findtags()
763
767
764 return cache
768 return cache
765
769
766 def tags(self):
770 def tags(self):
767 '''return a mapping of tag to node'''
771 '''return a mapping of tag to node'''
768 t = {}
772 t = {}
769 if self.changelog.filteredrevs:
773 if self.changelog.filteredrevs:
770 tags, tt = self._findtags()
774 tags, tt = self._findtags()
771 else:
775 else:
772 tags = self._tagscache.tags
776 tags = self._tagscache.tags
773 for k, v in tags.iteritems():
777 for k, v in tags.iteritems():
774 try:
778 try:
775 # ignore tags to unknown nodes
779 # ignore tags to unknown nodes
776 self.changelog.rev(v)
780 self.changelog.rev(v)
777 t[k] = v
781 t[k] = v
778 except (error.LookupError, ValueError):
782 except (error.LookupError, ValueError):
779 pass
783 pass
780 return t
784 return t
781
785
782 def _findtags(self):
786 def _findtags(self):
783 '''Do the hard work of finding tags. Return a pair of dicts
787 '''Do the hard work of finding tags. Return a pair of dicts
784 (tags, tagtypes) where tags maps tag name to node, and tagtypes
788 (tags, tagtypes) where tags maps tag name to node, and tagtypes
785 maps tag name to a string like \'global\' or \'local\'.
789 maps tag name to a string like \'global\' or \'local\'.
786 Subclasses or extensions are free to add their own tags, but
790 Subclasses or extensions are free to add their own tags, but
787 should be aware that the returned dicts will be retained for the
791 should be aware that the returned dicts will be retained for the
788 duration of the localrepo object.'''
792 duration of the localrepo object.'''
789
793
790 # XXX what tagtype should subclasses/extensions use? Currently
794 # XXX what tagtype should subclasses/extensions use? Currently
791 # mq and bookmarks add tags, but do not set the tagtype at all.
795 # mq and bookmarks add tags, but do not set the tagtype at all.
792 # Should each extension invent its own tag type? Should there
796 # Should each extension invent its own tag type? Should there
793 # be one tagtype for all such "virtual" tags? Or is the status
797 # be one tagtype for all such "virtual" tags? Or is the status
794 # quo fine?
798 # quo fine?
795
799
796 alltags = {} # map tag name to (node, hist)
800 alltags = {} # map tag name to (node, hist)
797 tagtypes = {}
801 tagtypes = {}
798
802
799 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
803 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
800 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
804 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
801
805
802 # Build the return dicts. Have to re-encode tag names because
806 # Build the return dicts. Have to re-encode tag names because
803 # the tags module always uses UTF-8 (in order not to lose info
807 # the tags module always uses UTF-8 (in order not to lose info
804 # writing to the cache), but the rest of Mercurial wants them in
808 # writing to the cache), but the rest of Mercurial wants them in
805 # local encoding.
809 # local encoding.
806 tags = {}
810 tags = {}
807 for (name, (node, hist)) in alltags.iteritems():
811 for (name, (node, hist)) in alltags.iteritems():
808 if node != nullid:
812 if node != nullid:
809 tags[encoding.tolocal(name)] = node
813 tags[encoding.tolocal(name)] = node
810 tags['tip'] = self.changelog.tip()
814 tags['tip'] = self.changelog.tip()
811 tagtypes = dict([(encoding.tolocal(name), value)
815 tagtypes = dict([(encoding.tolocal(name), value)
812 for (name, value) in tagtypes.iteritems()])
816 for (name, value) in tagtypes.iteritems()])
813 return (tags, tagtypes)
817 return (tags, tagtypes)
814
818
815 def tagtype(self, tagname):
819 def tagtype(self, tagname):
816 '''
820 '''
817 return the type of the given tag. result can be:
821 return the type of the given tag. result can be:
818
822
819 'local' : a local tag
823 'local' : a local tag
820 'global' : a global tag
824 'global' : a global tag
821 None : tag does not exist
825 None : tag does not exist
822 '''
826 '''
823
827
824 return self._tagscache.tagtypes.get(tagname)
828 return self._tagscache.tagtypes.get(tagname)
825
829
826 def tagslist(self):
830 def tagslist(self):
827 '''return a list of tags ordered by revision'''
831 '''return a list of tags ordered by revision'''
828 if not self._tagscache.tagslist:
832 if not self._tagscache.tagslist:
829 l = []
833 l = []
830 for t, n in self.tags().iteritems():
834 for t, n in self.tags().iteritems():
831 l.append((self.changelog.rev(n), t, n))
835 l.append((self.changelog.rev(n), t, n))
832 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
836 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
833
837
834 return self._tagscache.tagslist
838 return self._tagscache.tagslist
835
839
836 def nodetags(self, node):
840 def nodetags(self, node):
837 '''return the tags associated with a node'''
841 '''return the tags associated with a node'''
838 if not self._tagscache.nodetagscache:
842 if not self._tagscache.nodetagscache:
839 nodetagscache = {}
843 nodetagscache = {}
840 for t, n in self._tagscache.tags.iteritems():
844 for t, n in self._tagscache.tags.iteritems():
841 nodetagscache.setdefault(n, []).append(t)
845 nodetagscache.setdefault(n, []).append(t)
842 for tags in nodetagscache.itervalues():
846 for tags in nodetagscache.itervalues():
843 tags.sort()
847 tags.sort()
844 self._tagscache.nodetagscache = nodetagscache
848 self._tagscache.nodetagscache = nodetagscache
845 return self._tagscache.nodetagscache.get(node, [])
849 return self._tagscache.nodetagscache.get(node, [])
846
850
847 def nodebookmarks(self, node):
851 def nodebookmarks(self, node):
848 """return the list of bookmarks pointing to the specified node"""
852 """return the list of bookmarks pointing to the specified node"""
849 marks = []
853 marks = []
850 for bookmark, n in self._bookmarks.iteritems():
854 for bookmark, n in self._bookmarks.iteritems():
851 if n == node:
855 if n == node:
852 marks.append(bookmark)
856 marks.append(bookmark)
853 return sorted(marks)
857 return sorted(marks)
854
858
855 def branchmap(self):
859 def branchmap(self):
856 '''returns a dictionary {branch: [branchheads]} with branchheads
860 '''returns a dictionary {branch: [branchheads]} with branchheads
857 ordered by increasing revision number'''
861 ordered by increasing revision number'''
858 branchmap.updatecache(self)
862 branchmap.updatecache(self)
859 return self._branchcaches[self.filtername]
863 return self._branchcaches[self.filtername]
860
864
861 @unfilteredmethod
865 @unfilteredmethod
862 def revbranchcache(self):
866 def revbranchcache(self):
863 if not self._revbranchcache:
867 if not self._revbranchcache:
864 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
868 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
865 return self._revbranchcache
869 return self._revbranchcache
866
870
867 def branchtip(self, branch, ignoremissing=False):
871 def branchtip(self, branch, ignoremissing=False):
868 '''return the tip node for a given branch
872 '''return the tip node for a given branch
869
873
870 If ignoremissing is True, then this method will not raise an error.
874 If ignoremissing is True, then this method will not raise an error.
871 This is helpful for callers that only expect None for a missing branch
875 This is helpful for callers that only expect None for a missing branch
872 (e.g. namespace).
876 (e.g. namespace).
873
877
874 '''
878 '''
875 try:
879 try:
876 return self.branchmap().branchtip(branch)
880 return self.branchmap().branchtip(branch)
877 except KeyError:
881 except KeyError:
878 if not ignoremissing:
882 if not ignoremissing:
879 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
883 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
880 else:
884 else:
881 pass
885 pass
882
886
883 def lookup(self, key):
887 def lookup(self, key):
884 return self[key].node()
888 return self[key].node()
885
889
886 def lookupbranch(self, key, remote=None):
890 def lookupbranch(self, key, remote=None):
887 repo = remote or self
891 repo = remote or self
888 if key in repo.branchmap():
892 if key in repo.branchmap():
889 return key
893 return key
890
894
891 repo = (remote and remote.local()) and remote or self
895 repo = (remote and remote.local()) and remote or self
892 return repo[key].branch()
896 return repo[key].branch()
893
897
894 def known(self, nodes):
898 def known(self, nodes):
895 cl = self.changelog
899 cl = self.changelog
896 nm = cl.nodemap
900 nm = cl.nodemap
897 filtered = cl.filteredrevs
901 filtered = cl.filteredrevs
898 result = []
902 result = []
899 for n in nodes:
903 for n in nodes:
900 r = nm.get(n)
904 r = nm.get(n)
901 resp = not (r is None or r in filtered)
905 resp = not (r is None or r in filtered)
902 result.append(resp)
906 result.append(resp)
903 return result
907 return result
904
908
905 def local(self):
909 def local(self):
906 return self
910 return self
907
911
908 def publishing(self):
912 def publishing(self):
909 # it's safe (and desirable) to trust the publish flag unconditionally
913 # it's safe (and desirable) to trust the publish flag unconditionally
910 # so that we don't finalize changes shared between users via ssh or nfs
914 # so that we don't finalize changes shared between users via ssh or nfs
911 return self.ui.configbool('phases', 'publish', True, untrusted=True)
915 return self.ui.configbool('phases', 'publish', True, untrusted=True)
912
916
913 def cancopy(self):
917 def cancopy(self):
914 # so statichttprepo's override of local() works
918 # so statichttprepo's override of local() works
915 if not self.local():
919 if not self.local():
916 return False
920 return False
917 if not self.publishing():
921 if not self.publishing():
918 return True
922 return True
919 # if publishing we can't copy if there is filtered content
923 # if publishing we can't copy if there is filtered content
920 return not self.filtered('visible').changelog.filteredrevs
924 return not self.filtered('visible').changelog.filteredrevs
921
925
922 def shared(self):
926 def shared(self):
923 '''the type of shared repository (None if not shared)'''
927 '''the type of shared repository (None if not shared)'''
924 if self.sharedpath != self.path:
928 if self.sharedpath != self.path:
925 return 'store'
929 return 'store'
926 return None
930 return None
927
931
928 def join(self, f, *insidef):
932 def join(self, f, *insidef):
929 return self.vfs.join(os.path.join(f, *insidef))
933 return self.vfs.join(os.path.join(f, *insidef))
930
934
931 def wjoin(self, f, *insidef):
935 def wjoin(self, f, *insidef):
932 return self.vfs.reljoin(self.root, f, *insidef)
936 return self.vfs.reljoin(self.root, f, *insidef)
933
937
934 def file(self, f):
938 def file(self, f):
935 if f[0] == '/':
939 if f[0] == '/':
936 f = f[1:]
940 f = f[1:]
937 return filelog.filelog(self.svfs, f)
941 return filelog.filelog(self.svfs, f)
938
942
939 def changectx(self, changeid):
943 def changectx(self, changeid):
940 return self[changeid]
944 return self[changeid]
941
945
942 def setparents(self, p1, p2=nullid):
946 def setparents(self, p1, p2=nullid):
943 self.dirstate.beginparentchange()
947 self.dirstate.beginparentchange()
944 copies = self.dirstate.setparents(p1, p2)
948 copies = self.dirstate.setparents(p1, p2)
945 pctx = self[p1]
949 pctx = self[p1]
946 if copies:
950 if copies:
947 # Adjust copy records, the dirstate cannot do it, it
951 # Adjust copy records, the dirstate cannot do it, it
948 # requires access to parents manifests. Preserve them
952 # requires access to parents manifests. Preserve them
949 # only for entries added to first parent.
953 # only for entries added to first parent.
950 for f in copies:
954 for f in copies:
951 if f not in pctx and copies[f] in pctx:
955 if f not in pctx and copies[f] in pctx:
952 self.dirstate.copy(copies[f], f)
956 self.dirstate.copy(copies[f], f)
953 if p2 == nullid:
957 if p2 == nullid:
954 for f, s in sorted(self.dirstate.copies().items()):
958 for f, s in sorted(self.dirstate.copies().items()):
955 if f not in pctx and s not in pctx:
959 if f not in pctx and s not in pctx:
956 self.dirstate.copy(None, f)
960 self.dirstate.copy(None, f)
957 self.dirstate.endparentchange()
961 self.dirstate.endparentchange()
958
962
959 def filectx(self, path, changeid=None, fileid=None):
963 def filectx(self, path, changeid=None, fileid=None):
960 """changeid can be a changeset revision, node, or tag.
964 """changeid can be a changeset revision, node, or tag.
961 fileid can be a file revision or node."""
965 fileid can be a file revision or node."""
962 return context.filectx(self, path, changeid, fileid)
966 return context.filectx(self, path, changeid, fileid)
963
967
964 def getcwd(self):
968 def getcwd(self):
965 return self.dirstate.getcwd()
969 return self.dirstate.getcwd()
966
970
967 def pathto(self, f, cwd=None):
971 def pathto(self, f, cwd=None):
968 return self.dirstate.pathto(f, cwd)
972 return self.dirstate.pathto(f, cwd)
969
973
970 def wfile(self, f, mode='r'):
974 def wfile(self, f, mode='r'):
971 return self.wvfs(f, mode)
975 return self.wvfs(f, mode)
972
976
973 def _link(self, f):
977 def _link(self, f):
974 return self.wvfs.islink(f)
978 return self.wvfs.islink(f)
975
979
976 def _loadfilter(self, filter):
980 def _loadfilter(self, filter):
977 if filter not in self.filterpats:
981 if filter not in self.filterpats:
978 l = []
982 l = []
979 for pat, cmd in self.ui.configitems(filter):
983 for pat, cmd in self.ui.configitems(filter):
980 if cmd == '!':
984 if cmd == '!':
981 continue
985 continue
982 mf = matchmod.match(self.root, '', [pat])
986 mf = matchmod.match(self.root, '', [pat])
983 fn = None
987 fn = None
984 params = cmd
988 params = cmd
985 for name, filterfn in self._datafilters.iteritems():
989 for name, filterfn in self._datafilters.iteritems():
986 if cmd.startswith(name):
990 if cmd.startswith(name):
987 fn = filterfn
991 fn = filterfn
988 params = cmd[len(name):].lstrip()
992 params = cmd[len(name):].lstrip()
989 break
993 break
990 if not fn:
994 if not fn:
991 fn = lambda s, c, **kwargs: util.filter(s, c)
995 fn = lambda s, c, **kwargs: util.filter(s, c)
992 # Wrap old filters not supporting keyword arguments
996 # Wrap old filters not supporting keyword arguments
993 if not inspect.getargspec(fn)[2]:
997 if not inspect.getargspec(fn)[2]:
994 oldfn = fn
998 oldfn = fn
995 fn = lambda s, c, **kwargs: oldfn(s, c)
999 fn = lambda s, c, **kwargs: oldfn(s, c)
996 l.append((mf, fn, params))
1000 l.append((mf, fn, params))
997 self.filterpats[filter] = l
1001 self.filterpats[filter] = l
998 return self.filterpats[filter]
1002 return self.filterpats[filter]
999
1003
1000 def _filter(self, filterpats, filename, data):
1004 def _filter(self, filterpats, filename, data):
1001 for mf, fn, cmd in filterpats:
1005 for mf, fn, cmd in filterpats:
1002 if mf(filename):
1006 if mf(filename):
1003 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1007 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1004 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1008 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1005 break
1009 break
1006
1010
1007 return data
1011 return data
1008
1012
1009 @unfilteredpropertycache
1013 @unfilteredpropertycache
1010 def _encodefilterpats(self):
1014 def _encodefilterpats(self):
1011 return self._loadfilter('encode')
1015 return self._loadfilter('encode')
1012
1016
1013 @unfilteredpropertycache
1017 @unfilteredpropertycache
1014 def _decodefilterpats(self):
1018 def _decodefilterpats(self):
1015 return self._loadfilter('decode')
1019 return self._loadfilter('decode')
1016
1020
1017 def adddatafilter(self, name, filter):
1021 def adddatafilter(self, name, filter):
1018 self._datafilters[name] = filter
1022 self._datafilters[name] = filter
1019
1023
1020 def wread(self, filename):
1024 def wread(self, filename):
1021 if self._link(filename):
1025 if self._link(filename):
1022 data = self.wvfs.readlink(filename)
1026 data = self.wvfs.readlink(filename)
1023 else:
1027 else:
1024 data = self.wvfs.read(filename)
1028 data = self.wvfs.read(filename)
1025 return self._filter(self._encodefilterpats, filename, data)
1029 return self._filter(self._encodefilterpats, filename, data)
1026
1030
1027 def wwrite(self, filename, data, flags, backgroundclose=False):
1031 def wwrite(self, filename, data, flags, backgroundclose=False):
1028 """write ``data`` into ``filename`` in the working directory
1032 """write ``data`` into ``filename`` in the working directory
1029
1033
1030 This returns length of written (maybe decoded) data.
1034 This returns length of written (maybe decoded) data.
1031 """
1035 """
1032 data = self._filter(self._decodefilterpats, filename, data)
1036 data = self._filter(self._decodefilterpats, filename, data)
1033 if 'l' in flags:
1037 if 'l' in flags:
1034 self.wvfs.symlink(data, filename)
1038 self.wvfs.symlink(data, filename)
1035 else:
1039 else:
1036 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1040 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1037 if 'x' in flags:
1041 if 'x' in flags:
1038 self.wvfs.setflags(filename, False, True)
1042 self.wvfs.setflags(filename, False, True)
1039 return len(data)
1043 return len(data)
1040
1044
1041 def wwritedata(self, filename, data):
1045 def wwritedata(self, filename, data):
1042 return self._filter(self._decodefilterpats, filename, data)
1046 return self._filter(self._decodefilterpats, filename, data)
1043
1047
1044 def currenttransaction(self):
1048 def currenttransaction(self):
1045 """return the current transaction or None if non exists"""
1049 """return the current transaction or None if non exists"""
1046 if self._transref:
1050 if self._transref:
1047 tr = self._transref()
1051 tr = self._transref()
1048 else:
1052 else:
1049 tr = None
1053 tr = None
1050
1054
1051 if tr and tr.running():
1055 if tr and tr.running():
1052 return tr
1056 return tr
1053 return None
1057 return None
1054
1058
1055 def transaction(self, desc, report=None):
1059 def transaction(self, desc, report=None):
1056 if (self.ui.configbool('devel', 'all-warnings')
1060 if (self.ui.configbool('devel', 'all-warnings')
1057 or self.ui.configbool('devel', 'check-locks')):
1061 or self.ui.configbool('devel', 'check-locks')):
1058 if self._currentlock(self._lockref) is None:
1062 if self._currentlock(self._lockref) is None:
1059 raise error.ProgrammingError('transaction requires locking')
1063 raise error.ProgrammingError('transaction requires locking')
1060 tr = self.currenttransaction()
1064 tr = self.currenttransaction()
1061 if tr is not None:
1065 if tr is not None:
1062 return tr.nest()
1066 return tr.nest()
1063
1067
1064 # abort here if the journal already exists
1068 # abort here if the journal already exists
1065 if self.svfs.exists("journal"):
1069 if self.svfs.exists("journal"):
1066 raise error.RepoError(
1070 raise error.RepoError(
1067 _("abandoned transaction found"),
1071 _("abandoned transaction found"),
1068 hint=_("run 'hg recover' to clean up transaction"))
1072 hint=_("run 'hg recover' to clean up transaction"))
1069
1073
1070 idbase = "%.40f#%f" % (random.random(), time.time())
1074 idbase = "%.40f#%f" % (random.random(), time.time())
1071 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1075 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1072 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1076 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1073
1077
1074 self._writejournal(desc)
1078 self._writejournal(desc)
1075 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1079 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1076 if report:
1080 if report:
1077 rp = report
1081 rp = report
1078 else:
1082 else:
1079 rp = self.ui.warn
1083 rp = self.ui.warn
1080 vfsmap = {'plain': self.vfs} # root of .hg/
1084 vfsmap = {'plain': self.vfs} # root of .hg/
1081 # we must avoid cyclic reference between repo and transaction.
1085 # we must avoid cyclic reference between repo and transaction.
1082 reporef = weakref.ref(self)
1086 reporef = weakref.ref(self)
1083 def validate(tr):
1087 def validate(tr):
1084 """will run pre-closing hooks"""
1088 """will run pre-closing hooks"""
1085 reporef().hook('pretxnclose', throw=True,
1089 reporef().hook('pretxnclose', throw=True,
1086 txnname=desc, **tr.hookargs)
1090 txnname=desc, **tr.hookargs)
1087 def releasefn(tr, success):
1091 def releasefn(tr, success):
1088 repo = reporef()
1092 repo = reporef()
1089 if success:
1093 if success:
1090 # this should be explicitly invoked here, because
1094 # this should be explicitly invoked here, because
1091 # in-memory changes aren't written out at closing
1095 # in-memory changes aren't written out at closing
1092 # transaction, if tr.addfilegenerator (via
1096 # transaction, if tr.addfilegenerator (via
1093 # dirstate.write or so) isn't invoked while
1097 # dirstate.write or so) isn't invoked while
1094 # transaction running
1098 # transaction running
1095 repo.dirstate.write(None)
1099 repo.dirstate.write(None)
1096 else:
1100 else:
1097 # discard all changes (including ones already written
1101 # discard all changes (including ones already written
1098 # out) in this transaction
1102 # out) in this transaction
1099 repo.dirstate.restorebackup(None, prefix='journal.')
1103 repo.dirstate.restorebackup(None, prefix='journal.')
1100
1104
1101 repo.invalidate(clearfilecache=True)
1105 repo.invalidate(clearfilecache=True)
1102
1106
1103 tr = transaction.transaction(rp, self.svfs, vfsmap,
1107 tr = transaction.transaction(rp, self.svfs, vfsmap,
1104 "journal",
1108 "journal",
1105 "undo",
1109 "undo",
1106 aftertrans(renames),
1110 aftertrans(renames),
1107 self.store.createmode,
1111 self.store.createmode,
1108 validator=validate,
1112 validator=validate,
1109 releasefn=releasefn)
1113 releasefn=releasefn)
1110
1114
1111 tr.hookargs['txnid'] = txnid
1115 tr.hookargs['txnid'] = txnid
1112 # note: writing the fncache only during finalize mean that the file is
1116 # note: writing the fncache only during finalize mean that the file is
1113 # outdated when running hooks. As fncache is used for streaming clone,
1117 # outdated when running hooks. As fncache is used for streaming clone,
1114 # this is not expected to break anything that happen during the hooks.
1118 # this is not expected to break anything that happen during the hooks.
1115 tr.addfinalize('flush-fncache', self.store.write)
1119 tr.addfinalize('flush-fncache', self.store.write)
1116 def txnclosehook(tr2):
1120 def txnclosehook(tr2):
1117 """To be run if transaction is successful, will schedule a hook run
1121 """To be run if transaction is successful, will schedule a hook run
1118 """
1122 """
1119 # Don't reference tr2 in hook() so we don't hold a reference.
1123 # Don't reference tr2 in hook() so we don't hold a reference.
1120 # This reduces memory consumption when there are multiple
1124 # This reduces memory consumption when there are multiple
1121 # transactions per lock. This can likely go away if issue5045
1125 # transactions per lock. This can likely go away if issue5045
1122 # fixes the function accumulation.
1126 # fixes the function accumulation.
1123 hookargs = tr2.hookargs
1127 hookargs = tr2.hookargs
1124
1128
1125 def hook():
1129 def hook():
1126 reporef().hook('txnclose', throw=False, txnname=desc,
1130 reporef().hook('txnclose', throw=False, txnname=desc,
1127 **hookargs)
1131 **hookargs)
1128 reporef()._afterlock(hook)
1132 reporef()._afterlock(hook)
1129 tr.addfinalize('txnclose-hook', txnclosehook)
1133 tr.addfinalize('txnclose-hook', txnclosehook)
1130 def txnaborthook(tr2):
1134 def txnaborthook(tr2):
1131 """To be run if transaction is aborted
1135 """To be run if transaction is aborted
1132 """
1136 """
1133 reporef().hook('txnabort', throw=False, txnname=desc,
1137 reporef().hook('txnabort', throw=False, txnname=desc,
1134 **tr2.hookargs)
1138 **tr2.hookargs)
1135 tr.addabort('txnabort-hook', txnaborthook)
1139 tr.addabort('txnabort-hook', txnaborthook)
1136 # avoid eager cache invalidation. in-memory data should be identical
1140 # avoid eager cache invalidation. in-memory data should be identical
1137 # to stored data if transaction has no error.
1141 # to stored data if transaction has no error.
1138 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1142 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1139 self._transref = weakref.ref(tr)
1143 self._transref = weakref.ref(tr)
1140 return tr
1144 return tr
1141
1145
1142 def _journalfiles(self):
1146 def _journalfiles(self):
1143 return ((self.svfs, 'journal'),
1147 return ((self.svfs, 'journal'),
1144 (self.vfs, 'journal.dirstate'),
1148 (self.vfs, 'journal.dirstate'),
1145 (self.vfs, 'journal.branch'),
1149 (self.vfs, 'journal.branch'),
1146 (self.vfs, 'journal.desc'),
1150 (self.vfs, 'journal.desc'),
1147 (self.vfs, 'journal.bookmarks'),
1151 (self.vfs, 'journal.bookmarks'),
1148 (self.svfs, 'journal.phaseroots'))
1152 (self.svfs, 'journal.phaseroots'))
1149
1153
1150 def undofiles(self):
1154 def undofiles(self):
1151 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1155 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1152
1156
1153 def _writejournal(self, desc):
1157 def _writejournal(self, desc):
1154 self.dirstate.savebackup(None, prefix='journal.')
1158 self.dirstate.savebackup(None, prefix='journal.')
1155 self.vfs.write("journal.branch",
1159 self.vfs.write("journal.branch",
1156 encoding.fromlocal(self.dirstate.branch()))
1160 encoding.fromlocal(self.dirstate.branch()))
1157 self.vfs.write("journal.desc",
1161 self.vfs.write("journal.desc",
1158 "%d\n%s\n" % (len(self), desc))
1162 "%d\n%s\n" % (len(self), desc))
1159 self.vfs.write("journal.bookmarks",
1163 self.vfs.write("journal.bookmarks",
1160 self.vfs.tryread("bookmarks"))
1164 self.vfs.tryread("bookmarks"))
1161 self.svfs.write("journal.phaseroots",
1165 self.svfs.write("journal.phaseroots",
1162 self.svfs.tryread("phaseroots"))
1166 self.svfs.tryread("phaseroots"))
1163
1167
1164 def recover(self):
1168 def recover(self):
1165 with self.lock():
1169 with self.lock():
1166 if self.svfs.exists("journal"):
1170 if self.svfs.exists("journal"):
1167 self.ui.status(_("rolling back interrupted transaction\n"))
1171 self.ui.status(_("rolling back interrupted transaction\n"))
1168 vfsmap = {'': self.svfs,
1172 vfsmap = {'': self.svfs,
1169 'plain': self.vfs,}
1173 'plain': self.vfs,}
1170 transaction.rollback(self.svfs, vfsmap, "journal",
1174 transaction.rollback(self.svfs, vfsmap, "journal",
1171 self.ui.warn)
1175 self.ui.warn)
1172 self.invalidate()
1176 self.invalidate()
1173 return True
1177 return True
1174 else:
1178 else:
1175 self.ui.warn(_("no interrupted transaction available\n"))
1179 self.ui.warn(_("no interrupted transaction available\n"))
1176 return False
1180 return False
1177
1181
1178 def rollback(self, dryrun=False, force=False):
1182 def rollback(self, dryrun=False, force=False):
1179 wlock = lock = dsguard = None
1183 wlock = lock = dsguard = None
1180 try:
1184 try:
1181 wlock = self.wlock()
1185 wlock = self.wlock()
1182 lock = self.lock()
1186 lock = self.lock()
1183 if self.svfs.exists("undo"):
1187 if self.svfs.exists("undo"):
1184 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1188 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1185
1189
1186 return self._rollback(dryrun, force, dsguard)
1190 return self._rollback(dryrun, force, dsguard)
1187 else:
1191 else:
1188 self.ui.warn(_("no rollback information available\n"))
1192 self.ui.warn(_("no rollback information available\n"))
1189 return 1
1193 return 1
1190 finally:
1194 finally:
1191 release(dsguard, lock, wlock)
1195 release(dsguard, lock, wlock)
1192
1196
1193 @unfilteredmethod # Until we get smarter cache management
1197 @unfilteredmethod # Until we get smarter cache management
1194 def _rollback(self, dryrun, force, dsguard):
1198 def _rollback(self, dryrun, force, dsguard):
1195 ui = self.ui
1199 ui = self.ui
1196 try:
1200 try:
1197 args = self.vfs.read('undo.desc').splitlines()
1201 args = self.vfs.read('undo.desc').splitlines()
1198 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1202 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1199 if len(args) >= 3:
1203 if len(args) >= 3:
1200 detail = args[2]
1204 detail = args[2]
1201 oldtip = oldlen - 1
1205 oldtip = oldlen - 1
1202
1206
1203 if detail and ui.verbose:
1207 if detail and ui.verbose:
1204 msg = (_('repository tip rolled back to revision %s'
1208 msg = (_('repository tip rolled back to revision %s'
1205 ' (undo %s: %s)\n')
1209 ' (undo %s: %s)\n')
1206 % (oldtip, desc, detail))
1210 % (oldtip, desc, detail))
1207 else:
1211 else:
1208 msg = (_('repository tip rolled back to revision %s'
1212 msg = (_('repository tip rolled back to revision %s'
1209 ' (undo %s)\n')
1213 ' (undo %s)\n')
1210 % (oldtip, desc))
1214 % (oldtip, desc))
1211 except IOError:
1215 except IOError:
1212 msg = _('rolling back unknown transaction\n')
1216 msg = _('rolling back unknown transaction\n')
1213 desc = None
1217 desc = None
1214
1218
1215 if not force and self['.'] != self['tip'] and desc == 'commit':
1219 if not force and self['.'] != self['tip'] and desc == 'commit':
1216 raise error.Abort(
1220 raise error.Abort(
1217 _('rollback of last commit while not checked out '
1221 _('rollback of last commit while not checked out '
1218 'may lose data'), hint=_('use -f to force'))
1222 'may lose data'), hint=_('use -f to force'))
1219
1223
1220 ui.status(msg)
1224 ui.status(msg)
1221 if dryrun:
1225 if dryrun:
1222 return 0
1226 return 0
1223
1227
1224 parents = self.dirstate.parents()
1228 parents = self.dirstate.parents()
1225 self.destroying()
1229 self.destroying()
1226 vfsmap = {'plain': self.vfs, '': self.svfs}
1230 vfsmap = {'plain': self.vfs, '': self.svfs}
1227 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1231 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1228 if self.vfs.exists('undo.bookmarks'):
1232 if self.vfs.exists('undo.bookmarks'):
1229 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1233 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1230 if self.svfs.exists('undo.phaseroots'):
1234 if self.svfs.exists('undo.phaseroots'):
1231 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1235 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1232 self.invalidate()
1236 self.invalidate()
1233
1237
1234 parentgone = (parents[0] not in self.changelog.nodemap or
1238 parentgone = (parents[0] not in self.changelog.nodemap or
1235 parents[1] not in self.changelog.nodemap)
1239 parents[1] not in self.changelog.nodemap)
1236 if parentgone:
1240 if parentgone:
1237 # prevent dirstateguard from overwriting already restored one
1241 # prevent dirstateguard from overwriting already restored one
1238 dsguard.close()
1242 dsguard.close()
1239
1243
1240 self.dirstate.restorebackup(None, prefix='undo.')
1244 self.dirstate.restorebackup(None, prefix='undo.')
1241 try:
1245 try:
1242 branch = self.vfs.read('undo.branch')
1246 branch = self.vfs.read('undo.branch')
1243 self.dirstate.setbranch(encoding.tolocal(branch))
1247 self.dirstate.setbranch(encoding.tolocal(branch))
1244 except IOError:
1248 except IOError:
1245 ui.warn(_('named branch could not be reset: '
1249 ui.warn(_('named branch could not be reset: '
1246 'current branch is still \'%s\'\n')
1250 'current branch is still \'%s\'\n')
1247 % self.dirstate.branch())
1251 % self.dirstate.branch())
1248
1252
1249 parents = tuple([p.rev() for p in self[None].parents()])
1253 parents = tuple([p.rev() for p in self[None].parents()])
1250 if len(parents) > 1:
1254 if len(parents) > 1:
1251 ui.status(_('working directory now based on '
1255 ui.status(_('working directory now based on '
1252 'revisions %d and %d\n') % parents)
1256 'revisions %d and %d\n') % parents)
1253 else:
1257 else:
1254 ui.status(_('working directory now based on '
1258 ui.status(_('working directory now based on '
1255 'revision %d\n') % parents)
1259 'revision %d\n') % parents)
1256 mergemod.mergestate.clean(self, self['.'].node())
1260 mergemod.mergestate.clean(self, self['.'].node())
1257
1261
1258 # TODO: if we know which new heads may result from this rollback, pass
1262 # TODO: if we know which new heads may result from this rollback, pass
1259 # them to destroy(), which will prevent the branchhead cache from being
1263 # them to destroy(), which will prevent the branchhead cache from being
1260 # invalidated.
1264 # invalidated.
1261 self.destroyed()
1265 self.destroyed()
1262 return 0
1266 return 0
1263
1267
1264 def invalidatecaches(self):
1268 def invalidatecaches(self):
1265
1269
1266 if '_tagscache' in vars(self):
1270 if '_tagscache' in vars(self):
1267 # can't use delattr on proxy
1271 # can't use delattr on proxy
1268 del self.__dict__['_tagscache']
1272 del self.__dict__['_tagscache']
1269
1273
1270 self.unfiltered()._branchcaches.clear()
1274 self.unfiltered()._branchcaches.clear()
1271 self.invalidatevolatilesets()
1275 self.invalidatevolatilesets()
1272
1276
1273 def invalidatevolatilesets(self):
1277 def invalidatevolatilesets(self):
1274 self.filteredrevcache.clear()
1278 self.filteredrevcache.clear()
1275 obsolete.clearobscaches(self)
1279 obsolete.clearobscaches(self)
1276
1280
1277 def invalidatedirstate(self):
1281 def invalidatedirstate(self):
1278 '''Invalidates the dirstate, causing the next call to dirstate
1282 '''Invalidates the dirstate, causing the next call to dirstate
1279 to check if it was modified since the last time it was read,
1283 to check if it was modified since the last time it was read,
1280 rereading it if it has.
1284 rereading it if it has.
1281
1285
1282 This is different to dirstate.invalidate() that it doesn't always
1286 This is different to dirstate.invalidate() that it doesn't always
1283 rereads the dirstate. Use dirstate.invalidate() if you want to
1287 rereads the dirstate. Use dirstate.invalidate() if you want to
1284 explicitly read the dirstate again (i.e. restoring it to a previous
1288 explicitly read the dirstate again (i.e. restoring it to a previous
1285 known good state).'''
1289 known good state).'''
1286 if hasunfilteredcache(self, 'dirstate'):
1290 if hasunfilteredcache(self, 'dirstate'):
1287 for k in self.dirstate._filecache:
1291 for k in self.dirstate._filecache:
1288 try:
1292 try:
1289 delattr(self.dirstate, k)
1293 delattr(self.dirstate, k)
1290 except AttributeError:
1294 except AttributeError:
1291 pass
1295 pass
1292 delattr(self.unfiltered(), 'dirstate')
1296 delattr(self.unfiltered(), 'dirstate')
1293
1297
1294 def invalidate(self, clearfilecache=False):
1298 def invalidate(self, clearfilecache=False):
1295 '''Invalidates both store and non-store parts other than dirstate
1299 '''Invalidates both store and non-store parts other than dirstate
1296
1300
1297 If a transaction is running, invalidation of store is omitted,
1301 If a transaction is running, invalidation of store is omitted,
1298 because discarding in-memory changes might cause inconsistency
1302 because discarding in-memory changes might cause inconsistency
1299 (e.g. incomplete fncache causes unintentional failure, but
1303 (e.g. incomplete fncache causes unintentional failure, but
1300 redundant one doesn't).
1304 redundant one doesn't).
1301 '''
1305 '''
1302 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1306 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1303 for k in self._filecache.keys():
1307 for k in self._filecache.keys():
1304 # dirstate is invalidated separately in invalidatedirstate()
1308 # dirstate is invalidated separately in invalidatedirstate()
1305 if k == 'dirstate':
1309 if k == 'dirstate':
1306 continue
1310 continue
1307
1311
1308 if clearfilecache:
1312 if clearfilecache:
1309 del self._filecache[k]
1313 del self._filecache[k]
1310 try:
1314 try:
1311 delattr(unfiltered, k)
1315 delattr(unfiltered, k)
1312 except AttributeError:
1316 except AttributeError:
1313 pass
1317 pass
1314 self.invalidatecaches()
1318 self.invalidatecaches()
1315 if not self.currenttransaction():
1319 if not self.currenttransaction():
1316 # TODO: Changing contents of store outside transaction
1320 # TODO: Changing contents of store outside transaction
1317 # causes inconsistency. We should make in-memory store
1321 # causes inconsistency. We should make in-memory store
1318 # changes detectable, and abort if changed.
1322 # changes detectable, and abort if changed.
1319 self.store.invalidatecaches()
1323 self.store.invalidatecaches()
1320
1324
1321 def invalidateall(self):
1325 def invalidateall(self):
1322 '''Fully invalidates both store and non-store parts, causing the
1326 '''Fully invalidates both store and non-store parts, causing the
1323 subsequent operation to reread any outside changes.'''
1327 subsequent operation to reread any outside changes.'''
1324 # extension should hook this to invalidate its caches
1328 # extension should hook this to invalidate its caches
1325 self.invalidate()
1329 self.invalidate()
1326 self.invalidatedirstate()
1330 self.invalidatedirstate()
1327
1331
1328 @unfilteredmethod
1332 @unfilteredmethod
1329 def _refreshfilecachestats(self, tr):
1333 def _refreshfilecachestats(self, tr):
1330 """Reload stats of cached files so that they are flagged as valid"""
1334 """Reload stats of cached files so that they are flagged as valid"""
1331 for k, ce in self._filecache.items():
1335 for k, ce in self._filecache.items():
1332 if k == 'dirstate' or k not in self.__dict__:
1336 if k == 'dirstate' or k not in self.__dict__:
1333 continue
1337 continue
1334 ce.refresh()
1338 ce.refresh()
1335
1339
1336 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1340 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1337 inheritchecker=None, parentenvvar=None):
1341 inheritchecker=None, parentenvvar=None):
1338 parentlock = None
1342 parentlock = None
1339 # the contents of parentenvvar are used by the underlying lock to
1343 # the contents of parentenvvar are used by the underlying lock to
1340 # determine whether it can be inherited
1344 # determine whether it can be inherited
1341 if parentenvvar is not None:
1345 if parentenvvar is not None:
1342 parentlock = encoding.environ.get(parentenvvar)
1346 parentlock = encoding.environ.get(parentenvvar)
1343 try:
1347 try:
1344 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1348 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1345 acquirefn=acquirefn, desc=desc,
1349 acquirefn=acquirefn, desc=desc,
1346 inheritchecker=inheritchecker,
1350 inheritchecker=inheritchecker,
1347 parentlock=parentlock)
1351 parentlock=parentlock)
1348 except error.LockHeld as inst:
1352 except error.LockHeld as inst:
1349 if not wait:
1353 if not wait:
1350 raise
1354 raise
1351 # show more details for new-style locks
1355 # show more details for new-style locks
1352 if ':' in inst.locker:
1356 if ':' in inst.locker:
1353 host, pid = inst.locker.split(":", 1)
1357 host, pid = inst.locker.split(":", 1)
1354 self.ui.warn(
1358 self.ui.warn(
1355 _("waiting for lock on %s held by process %r "
1359 _("waiting for lock on %s held by process %r "
1356 "on host %r\n") % (desc, pid, host))
1360 "on host %r\n") % (desc, pid, host))
1357 else:
1361 else:
1358 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1362 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1359 (desc, inst.locker))
1363 (desc, inst.locker))
1360 # default to 600 seconds timeout
1364 # default to 600 seconds timeout
1361 l = lockmod.lock(vfs, lockname,
1365 l = lockmod.lock(vfs, lockname,
1362 int(self.ui.config("ui", "timeout", "600")),
1366 int(self.ui.config("ui", "timeout", "600")),
1363 releasefn=releasefn, acquirefn=acquirefn,
1367 releasefn=releasefn, acquirefn=acquirefn,
1364 desc=desc)
1368 desc=desc)
1365 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1369 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1366 return l
1370 return l
1367
1371
1368 def _afterlock(self, callback):
1372 def _afterlock(self, callback):
1369 """add a callback to be run when the repository is fully unlocked
1373 """add a callback to be run when the repository is fully unlocked
1370
1374
1371 The callback will be executed when the outermost lock is released
1375 The callback will be executed when the outermost lock is released
1372 (with wlock being higher level than 'lock')."""
1376 (with wlock being higher level than 'lock')."""
1373 for ref in (self._wlockref, self._lockref):
1377 for ref in (self._wlockref, self._lockref):
1374 l = ref and ref()
1378 l = ref and ref()
1375 if l and l.held:
1379 if l and l.held:
1376 l.postrelease.append(callback)
1380 l.postrelease.append(callback)
1377 break
1381 break
1378 else: # no lock have been found.
1382 else: # no lock have been found.
1379 callback()
1383 callback()
1380
1384
1381 def lock(self, wait=True):
1385 def lock(self, wait=True):
1382 '''Lock the repository store (.hg/store) and return a weak reference
1386 '''Lock the repository store (.hg/store) and return a weak reference
1383 to the lock. Use this before modifying the store (e.g. committing or
1387 to the lock. Use this before modifying the store (e.g. committing or
1384 stripping). If you are opening a transaction, get a lock as well.)
1388 stripping). If you are opening a transaction, get a lock as well.)
1385
1389
1386 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1390 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1387 'wlock' first to avoid a dead-lock hazard.'''
1391 'wlock' first to avoid a dead-lock hazard.'''
1388 l = self._currentlock(self._lockref)
1392 l = self._currentlock(self._lockref)
1389 if l is not None:
1393 if l is not None:
1390 l.lock()
1394 l.lock()
1391 return l
1395 return l
1392
1396
1393 l = self._lock(self.svfs, "lock", wait, None,
1397 l = self._lock(self.svfs, "lock", wait, None,
1394 self.invalidate, _('repository %s') % self.origroot)
1398 self.invalidate, _('repository %s') % self.origroot)
1395 self._lockref = weakref.ref(l)
1399 self._lockref = weakref.ref(l)
1396 return l
1400 return l
1397
1401
1398 def _wlockchecktransaction(self):
1402 def _wlockchecktransaction(self):
1399 if self.currenttransaction() is not None:
1403 if self.currenttransaction() is not None:
1400 raise error.LockInheritanceContractViolation(
1404 raise error.LockInheritanceContractViolation(
1401 'wlock cannot be inherited in the middle of a transaction')
1405 'wlock cannot be inherited in the middle of a transaction')
1402
1406
1403 def wlock(self, wait=True):
1407 def wlock(self, wait=True):
1404 '''Lock the non-store parts of the repository (everything under
1408 '''Lock the non-store parts of the repository (everything under
1405 .hg except .hg/store) and return a weak reference to the lock.
1409 .hg except .hg/store) and return a weak reference to the lock.
1406
1410
1407 Use this before modifying files in .hg.
1411 Use this before modifying files in .hg.
1408
1412
1409 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1413 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1410 'wlock' first to avoid a dead-lock hazard.'''
1414 'wlock' first to avoid a dead-lock hazard.'''
1411 l = self._wlockref and self._wlockref()
1415 l = self._wlockref and self._wlockref()
1412 if l is not None and l.held:
1416 if l is not None and l.held:
1413 l.lock()
1417 l.lock()
1414 return l
1418 return l
1415
1419
1416 # We do not need to check for non-waiting lock acquisition. Such
1420 # We do not need to check for non-waiting lock acquisition. Such
1417 # acquisition would not cause dead-lock as they would just fail.
1421 # acquisition would not cause dead-lock as they would just fail.
1418 if wait and (self.ui.configbool('devel', 'all-warnings')
1422 if wait and (self.ui.configbool('devel', 'all-warnings')
1419 or self.ui.configbool('devel', 'check-locks')):
1423 or self.ui.configbool('devel', 'check-locks')):
1420 if self._currentlock(self._lockref) is not None:
1424 if self._currentlock(self._lockref) is not None:
1421 self.ui.develwarn('"wlock" acquired after "lock"')
1425 self.ui.develwarn('"wlock" acquired after "lock"')
1422
1426
1423 def unlock():
1427 def unlock():
1424 if self.dirstate.pendingparentchange():
1428 if self.dirstate.pendingparentchange():
1425 self.dirstate.invalidate()
1429 self.dirstate.invalidate()
1426 else:
1430 else:
1427 self.dirstate.write(None)
1431 self.dirstate.write(None)
1428
1432
1429 self._filecache['dirstate'].refresh()
1433 self._filecache['dirstate'].refresh()
1430
1434
1431 l = self._lock(self.vfs, "wlock", wait, unlock,
1435 l = self._lock(self.vfs, "wlock", wait, unlock,
1432 self.invalidatedirstate, _('working directory of %s') %
1436 self.invalidatedirstate, _('working directory of %s') %
1433 self.origroot,
1437 self.origroot,
1434 inheritchecker=self._wlockchecktransaction,
1438 inheritchecker=self._wlockchecktransaction,
1435 parentenvvar='HG_WLOCK_LOCKER')
1439 parentenvvar='HG_WLOCK_LOCKER')
1436 self._wlockref = weakref.ref(l)
1440 self._wlockref = weakref.ref(l)
1437 return l
1441 return l
1438
1442
1439 def _currentlock(self, lockref):
1443 def _currentlock(self, lockref):
1440 """Returns the lock if it's held, or None if it's not."""
1444 """Returns the lock if it's held, or None if it's not."""
1441 if lockref is None:
1445 if lockref is None:
1442 return None
1446 return None
1443 l = lockref()
1447 l = lockref()
1444 if l is None or not l.held:
1448 if l is None or not l.held:
1445 return None
1449 return None
1446 return l
1450 return l
1447
1451
1448 def currentwlock(self):
1452 def currentwlock(self):
1449 """Returns the wlock if it's held, or None if it's not."""
1453 """Returns the wlock if it's held, or None if it's not."""
1450 return self._currentlock(self._wlockref)
1454 return self._currentlock(self._wlockref)
1451
1455
1452 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1456 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1453 """
1457 """
1454 commit an individual file as part of a larger transaction
1458 commit an individual file as part of a larger transaction
1455 """
1459 """
1456
1460
1457 fname = fctx.path()
1461 fname = fctx.path()
1458 fparent1 = manifest1.get(fname, nullid)
1462 fparent1 = manifest1.get(fname, nullid)
1459 fparent2 = manifest2.get(fname, nullid)
1463 fparent2 = manifest2.get(fname, nullid)
1460 if isinstance(fctx, context.filectx):
1464 if isinstance(fctx, context.filectx):
1461 node = fctx.filenode()
1465 node = fctx.filenode()
1462 if node in [fparent1, fparent2]:
1466 if node in [fparent1, fparent2]:
1463 self.ui.debug('reusing %s filelog entry\n' % fname)
1467 self.ui.debug('reusing %s filelog entry\n' % fname)
1464 if manifest1.flags(fname) != fctx.flags():
1468 if manifest1.flags(fname) != fctx.flags():
1465 changelist.append(fname)
1469 changelist.append(fname)
1466 return node
1470 return node
1467
1471
1468 flog = self.file(fname)
1472 flog = self.file(fname)
1469 meta = {}
1473 meta = {}
1470 copy = fctx.renamed()
1474 copy = fctx.renamed()
1471 if copy and copy[0] != fname:
1475 if copy and copy[0] != fname:
1472 # Mark the new revision of this file as a copy of another
1476 # Mark the new revision of this file as a copy of another
1473 # file. This copy data will effectively act as a parent
1477 # file. This copy data will effectively act as a parent
1474 # of this new revision. If this is a merge, the first
1478 # of this new revision. If this is a merge, the first
1475 # parent will be the nullid (meaning "look up the copy data")
1479 # parent will be the nullid (meaning "look up the copy data")
1476 # and the second one will be the other parent. For example:
1480 # and the second one will be the other parent. For example:
1477 #
1481 #
1478 # 0 --- 1 --- 3 rev1 changes file foo
1482 # 0 --- 1 --- 3 rev1 changes file foo
1479 # \ / rev2 renames foo to bar and changes it
1483 # \ / rev2 renames foo to bar and changes it
1480 # \- 2 -/ rev3 should have bar with all changes and
1484 # \- 2 -/ rev3 should have bar with all changes and
1481 # should record that bar descends from
1485 # should record that bar descends from
1482 # bar in rev2 and foo in rev1
1486 # bar in rev2 and foo in rev1
1483 #
1487 #
1484 # this allows this merge to succeed:
1488 # this allows this merge to succeed:
1485 #
1489 #
1486 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1490 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1487 # \ / merging rev3 and rev4 should use bar@rev2
1491 # \ / merging rev3 and rev4 should use bar@rev2
1488 # \- 2 --- 4 as the merge base
1492 # \- 2 --- 4 as the merge base
1489 #
1493 #
1490
1494
1491 cfname = copy[0]
1495 cfname = copy[0]
1492 crev = manifest1.get(cfname)
1496 crev = manifest1.get(cfname)
1493 newfparent = fparent2
1497 newfparent = fparent2
1494
1498
1495 if manifest2: # branch merge
1499 if manifest2: # branch merge
1496 if fparent2 == nullid or crev is None: # copied on remote side
1500 if fparent2 == nullid or crev is None: # copied on remote side
1497 if cfname in manifest2:
1501 if cfname in manifest2:
1498 crev = manifest2[cfname]
1502 crev = manifest2[cfname]
1499 newfparent = fparent1
1503 newfparent = fparent1
1500
1504
1501 # Here, we used to search backwards through history to try to find
1505 # Here, we used to search backwards through history to try to find
1502 # where the file copy came from if the source of a copy was not in
1506 # where the file copy came from if the source of a copy was not in
1503 # the parent directory. However, this doesn't actually make sense to
1507 # the parent directory. However, this doesn't actually make sense to
1504 # do (what does a copy from something not in your working copy even
1508 # do (what does a copy from something not in your working copy even
1505 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1509 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1506 # the user that copy information was dropped, so if they didn't
1510 # the user that copy information was dropped, so if they didn't
1507 # expect this outcome it can be fixed, but this is the correct
1511 # expect this outcome it can be fixed, but this is the correct
1508 # behavior in this circumstance.
1512 # behavior in this circumstance.
1509
1513
1510 if crev:
1514 if crev:
1511 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1515 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1512 meta["copy"] = cfname
1516 meta["copy"] = cfname
1513 meta["copyrev"] = hex(crev)
1517 meta["copyrev"] = hex(crev)
1514 fparent1, fparent2 = nullid, newfparent
1518 fparent1, fparent2 = nullid, newfparent
1515 else:
1519 else:
1516 self.ui.warn(_("warning: can't find ancestor for '%s' "
1520 self.ui.warn(_("warning: can't find ancestor for '%s' "
1517 "copied from '%s'!\n") % (fname, cfname))
1521 "copied from '%s'!\n") % (fname, cfname))
1518
1522
1519 elif fparent1 == nullid:
1523 elif fparent1 == nullid:
1520 fparent1, fparent2 = fparent2, nullid
1524 fparent1, fparent2 = fparent2, nullid
1521 elif fparent2 != nullid:
1525 elif fparent2 != nullid:
1522 # is one parent an ancestor of the other?
1526 # is one parent an ancestor of the other?
1523 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1527 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1524 if fparent1 in fparentancestors:
1528 if fparent1 in fparentancestors:
1525 fparent1, fparent2 = fparent2, nullid
1529 fparent1, fparent2 = fparent2, nullid
1526 elif fparent2 in fparentancestors:
1530 elif fparent2 in fparentancestors:
1527 fparent2 = nullid
1531 fparent2 = nullid
1528
1532
1529 # is the file changed?
1533 # is the file changed?
1530 text = fctx.data()
1534 text = fctx.data()
1531 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1535 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1532 changelist.append(fname)
1536 changelist.append(fname)
1533 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1537 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1534 # are just the flags changed during merge?
1538 # are just the flags changed during merge?
1535 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1539 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1536 changelist.append(fname)
1540 changelist.append(fname)
1537
1541
1538 return fparent1
1542 return fparent1
1539
1543
1540 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1544 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1541 """check for commit arguments that aren't committable"""
1545 """check for commit arguments that aren't committable"""
1542 if match.isexact() or match.prefix():
1546 if match.isexact() or match.prefix():
1543 matched = set(status.modified + status.added + status.removed)
1547 matched = set(status.modified + status.added + status.removed)
1544
1548
1545 for f in match.files():
1549 for f in match.files():
1546 f = self.dirstate.normalize(f)
1550 f = self.dirstate.normalize(f)
1547 if f == '.' or f in matched or f in wctx.substate:
1551 if f == '.' or f in matched or f in wctx.substate:
1548 continue
1552 continue
1549 if f in status.deleted:
1553 if f in status.deleted:
1550 fail(f, _('file not found!'))
1554 fail(f, _('file not found!'))
1551 if f in vdirs: # visited directory
1555 if f in vdirs: # visited directory
1552 d = f + '/'
1556 d = f + '/'
1553 for mf in matched:
1557 for mf in matched:
1554 if mf.startswith(d):
1558 if mf.startswith(d):
1555 break
1559 break
1556 else:
1560 else:
1557 fail(f, _("no match under directory!"))
1561 fail(f, _("no match under directory!"))
1558 elif f not in self.dirstate:
1562 elif f not in self.dirstate:
1559 fail(f, _("file not tracked!"))
1563 fail(f, _("file not tracked!"))
1560
1564
1561 @unfilteredmethod
1565 @unfilteredmethod
1562 def commit(self, text="", user=None, date=None, match=None, force=False,
1566 def commit(self, text="", user=None, date=None, match=None, force=False,
1563 editor=False, extra=None):
1567 editor=False, extra=None):
1564 """Add a new revision to current repository.
1568 """Add a new revision to current repository.
1565
1569
1566 Revision information is gathered from the working directory,
1570 Revision information is gathered from the working directory,
1567 match can be used to filter the committed files. If editor is
1571 match can be used to filter the committed files. If editor is
1568 supplied, it is called to get a commit message.
1572 supplied, it is called to get a commit message.
1569 """
1573 """
1570 if extra is None:
1574 if extra is None:
1571 extra = {}
1575 extra = {}
1572
1576
1573 def fail(f, msg):
1577 def fail(f, msg):
1574 raise error.Abort('%s: %s' % (f, msg))
1578 raise error.Abort('%s: %s' % (f, msg))
1575
1579
1576 if not match:
1580 if not match:
1577 match = matchmod.always(self.root, '')
1581 match = matchmod.always(self.root, '')
1578
1582
1579 if not force:
1583 if not force:
1580 vdirs = []
1584 vdirs = []
1581 match.explicitdir = vdirs.append
1585 match.explicitdir = vdirs.append
1582 match.bad = fail
1586 match.bad = fail
1583
1587
1584 wlock = lock = tr = None
1588 wlock = lock = tr = None
1585 try:
1589 try:
1586 wlock = self.wlock()
1590 wlock = self.wlock()
1587 lock = self.lock() # for recent changelog (see issue4368)
1591 lock = self.lock() # for recent changelog (see issue4368)
1588
1592
1589 wctx = self[None]
1593 wctx = self[None]
1590 merge = len(wctx.parents()) > 1
1594 merge = len(wctx.parents()) > 1
1591
1595
1592 if not force and merge and match.ispartial():
1596 if not force and merge and match.ispartial():
1593 raise error.Abort(_('cannot partially commit a merge '
1597 raise error.Abort(_('cannot partially commit a merge '
1594 '(do not specify files or patterns)'))
1598 '(do not specify files or patterns)'))
1595
1599
1596 status = self.status(match=match, clean=force)
1600 status = self.status(match=match, clean=force)
1597 if force:
1601 if force:
1598 status.modified.extend(status.clean) # mq may commit clean files
1602 status.modified.extend(status.clean) # mq may commit clean files
1599
1603
1600 # check subrepos
1604 # check subrepos
1601 subs = []
1605 subs = []
1602 commitsubs = set()
1606 commitsubs = set()
1603 newstate = wctx.substate.copy()
1607 newstate = wctx.substate.copy()
1604 # only manage subrepos and .hgsubstate if .hgsub is present
1608 # only manage subrepos and .hgsubstate if .hgsub is present
1605 if '.hgsub' in wctx:
1609 if '.hgsub' in wctx:
1606 # we'll decide whether to track this ourselves, thanks
1610 # we'll decide whether to track this ourselves, thanks
1607 for c in status.modified, status.added, status.removed:
1611 for c in status.modified, status.added, status.removed:
1608 if '.hgsubstate' in c:
1612 if '.hgsubstate' in c:
1609 c.remove('.hgsubstate')
1613 c.remove('.hgsubstate')
1610
1614
1611 # compare current state to last committed state
1615 # compare current state to last committed state
1612 # build new substate based on last committed state
1616 # build new substate based on last committed state
1613 oldstate = wctx.p1().substate
1617 oldstate = wctx.p1().substate
1614 for s in sorted(newstate.keys()):
1618 for s in sorted(newstate.keys()):
1615 if not match(s):
1619 if not match(s):
1616 # ignore working copy, use old state if present
1620 # ignore working copy, use old state if present
1617 if s in oldstate:
1621 if s in oldstate:
1618 newstate[s] = oldstate[s]
1622 newstate[s] = oldstate[s]
1619 continue
1623 continue
1620 if not force:
1624 if not force:
1621 raise error.Abort(
1625 raise error.Abort(
1622 _("commit with new subrepo %s excluded") % s)
1626 _("commit with new subrepo %s excluded") % s)
1623 dirtyreason = wctx.sub(s).dirtyreason(True)
1627 dirtyreason = wctx.sub(s).dirtyreason(True)
1624 if dirtyreason:
1628 if dirtyreason:
1625 if not self.ui.configbool('ui', 'commitsubrepos'):
1629 if not self.ui.configbool('ui', 'commitsubrepos'):
1626 raise error.Abort(dirtyreason,
1630 raise error.Abort(dirtyreason,
1627 hint=_("use --subrepos for recursive commit"))
1631 hint=_("use --subrepos for recursive commit"))
1628 subs.append(s)
1632 subs.append(s)
1629 commitsubs.add(s)
1633 commitsubs.add(s)
1630 else:
1634 else:
1631 bs = wctx.sub(s).basestate()
1635 bs = wctx.sub(s).basestate()
1632 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1636 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1633 if oldstate.get(s, (None, None, None))[1] != bs:
1637 if oldstate.get(s, (None, None, None))[1] != bs:
1634 subs.append(s)
1638 subs.append(s)
1635
1639
1636 # check for removed subrepos
1640 # check for removed subrepos
1637 for p in wctx.parents():
1641 for p in wctx.parents():
1638 r = [s for s in p.substate if s not in newstate]
1642 r = [s for s in p.substate if s not in newstate]
1639 subs += [s for s in r if match(s)]
1643 subs += [s for s in r if match(s)]
1640 if subs:
1644 if subs:
1641 if (not match('.hgsub') and
1645 if (not match('.hgsub') and
1642 '.hgsub' in (wctx.modified() + wctx.added())):
1646 '.hgsub' in (wctx.modified() + wctx.added())):
1643 raise error.Abort(
1647 raise error.Abort(
1644 _("can't commit subrepos without .hgsub"))
1648 _("can't commit subrepos without .hgsub"))
1645 status.modified.insert(0, '.hgsubstate')
1649 status.modified.insert(0, '.hgsubstate')
1646
1650
1647 elif '.hgsub' in status.removed:
1651 elif '.hgsub' in status.removed:
1648 # clean up .hgsubstate when .hgsub is removed
1652 # clean up .hgsubstate when .hgsub is removed
1649 if ('.hgsubstate' in wctx and
1653 if ('.hgsubstate' in wctx and
1650 '.hgsubstate' not in (status.modified + status.added +
1654 '.hgsubstate' not in (status.modified + status.added +
1651 status.removed)):
1655 status.removed)):
1652 status.removed.insert(0, '.hgsubstate')
1656 status.removed.insert(0, '.hgsubstate')
1653
1657
1654 # make sure all explicit patterns are matched
1658 # make sure all explicit patterns are matched
1655 if not force:
1659 if not force:
1656 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1660 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1657
1661
1658 cctx = context.workingcommitctx(self, status,
1662 cctx = context.workingcommitctx(self, status,
1659 text, user, date, extra)
1663 text, user, date, extra)
1660
1664
1661 # internal config: ui.allowemptycommit
1665 # internal config: ui.allowemptycommit
1662 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1666 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1663 or extra.get('close') or merge or cctx.files()
1667 or extra.get('close') or merge or cctx.files()
1664 or self.ui.configbool('ui', 'allowemptycommit'))
1668 or self.ui.configbool('ui', 'allowemptycommit'))
1665 if not allowemptycommit:
1669 if not allowemptycommit:
1666 return None
1670 return None
1667
1671
1668 if merge and cctx.deleted():
1672 if merge and cctx.deleted():
1669 raise error.Abort(_("cannot commit merge with missing files"))
1673 raise error.Abort(_("cannot commit merge with missing files"))
1670
1674
1671 ms = mergemod.mergestate.read(self)
1675 ms = mergemod.mergestate.read(self)
1672 mergeutil.checkunresolved(ms)
1676 mergeutil.checkunresolved(ms)
1673
1677
1674 if editor:
1678 if editor:
1675 cctx._text = editor(self, cctx, subs)
1679 cctx._text = editor(self, cctx, subs)
1676 edited = (text != cctx._text)
1680 edited = (text != cctx._text)
1677
1681
1678 # Save commit message in case this transaction gets rolled back
1682 # Save commit message in case this transaction gets rolled back
1679 # (e.g. by a pretxncommit hook). Leave the content alone on
1683 # (e.g. by a pretxncommit hook). Leave the content alone on
1680 # the assumption that the user will use the same editor again.
1684 # the assumption that the user will use the same editor again.
1681 msgfn = self.savecommitmessage(cctx._text)
1685 msgfn = self.savecommitmessage(cctx._text)
1682
1686
1683 # commit subs and write new state
1687 # commit subs and write new state
1684 if subs:
1688 if subs:
1685 for s in sorted(commitsubs):
1689 for s in sorted(commitsubs):
1686 sub = wctx.sub(s)
1690 sub = wctx.sub(s)
1687 self.ui.status(_('committing subrepository %s\n') %
1691 self.ui.status(_('committing subrepository %s\n') %
1688 subrepo.subrelpath(sub))
1692 subrepo.subrelpath(sub))
1689 sr = sub.commit(cctx._text, user, date)
1693 sr = sub.commit(cctx._text, user, date)
1690 newstate[s] = (newstate[s][0], sr)
1694 newstate[s] = (newstate[s][0], sr)
1691 subrepo.writestate(self, newstate)
1695 subrepo.writestate(self, newstate)
1692
1696
1693 p1, p2 = self.dirstate.parents()
1697 p1, p2 = self.dirstate.parents()
1694 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1698 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1695 try:
1699 try:
1696 self.hook("precommit", throw=True, parent1=hookp1,
1700 self.hook("precommit", throw=True, parent1=hookp1,
1697 parent2=hookp2)
1701 parent2=hookp2)
1698 tr = self.transaction('commit')
1702 tr = self.transaction('commit')
1699 ret = self.commitctx(cctx, True)
1703 ret = self.commitctx(cctx, True)
1700 except: # re-raises
1704 except: # re-raises
1701 if edited:
1705 if edited:
1702 self.ui.write(
1706 self.ui.write(
1703 _('note: commit message saved in %s\n') % msgfn)
1707 _('note: commit message saved in %s\n') % msgfn)
1704 raise
1708 raise
1705 # update bookmarks, dirstate and mergestate
1709 # update bookmarks, dirstate and mergestate
1706 bookmarks.update(self, [p1, p2], ret)
1710 bookmarks.update(self, [p1, p2], ret)
1707 cctx.markcommitted(ret)
1711 cctx.markcommitted(ret)
1708 ms.reset()
1712 ms.reset()
1709 tr.close()
1713 tr.close()
1710
1714
1711 finally:
1715 finally:
1712 lockmod.release(tr, lock, wlock)
1716 lockmod.release(tr, lock, wlock)
1713
1717
1714 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1718 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1715 # hack for command that use a temporary commit (eg: histedit)
1719 # hack for command that use a temporary commit (eg: histedit)
1716 # temporary commit got stripped before hook release
1720 # temporary commit got stripped before hook release
1717 if self.changelog.hasnode(ret):
1721 if self.changelog.hasnode(ret):
1718 self.hook("commit", node=node, parent1=parent1,
1722 self.hook("commit", node=node, parent1=parent1,
1719 parent2=parent2)
1723 parent2=parent2)
1720 self._afterlock(commithook)
1724 self._afterlock(commithook)
1721 return ret
1725 return ret
1722
1726
1723 @unfilteredmethod
1727 @unfilteredmethod
1724 def commitctx(self, ctx, error=False):
1728 def commitctx(self, ctx, error=False):
1725 """Add a new revision to current repository.
1729 """Add a new revision to current repository.
1726 Revision information is passed via the context argument.
1730 Revision information is passed via the context argument.
1727 """
1731 """
1728
1732
1729 tr = None
1733 tr = None
1730 p1, p2 = ctx.p1(), ctx.p2()
1734 p1, p2 = ctx.p1(), ctx.p2()
1731 user = ctx.user()
1735 user = ctx.user()
1732
1736
1733 lock = self.lock()
1737 lock = self.lock()
1734 try:
1738 try:
1735 tr = self.transaction("commit")
1739 tr = self.transaction("commit")
1736 trp = weakref.proxy(tr)
1740 trp = weakref.proxy(tr)
1737
1741
1738 if ctx.manifestnode():
1742 if ctx.manifestnode():
1739 # reuse an existing manifest revision
1743 # reuse an existing manifest revision
1740 mn = ctx.manifestnode()
1744 mn = ctx.manifestnode()
1741 files = ctx.files()
1745 files = ctx.files()
1742 elif ctx.files():
1746 elif ctx.files():
1743 m1ctx = p1.manifestctx()
1747 m1ctx = p1.manifestctx()
1744 m2ctx = p2.manifestctx()
1748 m2ctx = p2.manifestctx()
1745 mctx = m1ctx.copy()
1749 mctx = m1ctx.copy()
1746
1750
1747 m = mctx.read()
1751 m = mctx.read()
1748 m1 = m1ctx.read()
1752 m1 = m1ctx.read()
1749 m2 = m2ctx.read()
1753 m2 = m2ctx.read()
1750
1754
1751 # check in files
1755 # check in files
1752 added = []
1756 added = []
1753 changed = []
1757 changed = []
1754 removed = list(ctx.removed())
1758 removed = list(ctx.removed())
1755 linkrev = len(self)
1759 linkrev = len(self)
1756 self.ui.note(_("committing files:\n"))
1760 self.ui.note(_("committing files:\n"))
1757 for f in sorted(ctx.modified() + ctx.added()):
1761 for f in sorted(ctx.modified() + ctx.added()):
1758 self.ui.note(f + "\n")
1762 self.ui.note(f + "\n")
1759 try:
1763 try:
1760 fctx = ctx[f]
1764 fctx = ctx[f]
1761 if fctx is None:
1765 if fctx is None:
1762 removed.append(f)
1766 removed.append(f)
1763 else:
1767 else:
1764 added.append(f)
1768 added.append(f)
1765 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1769 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1766 trp, changed)
1770 trp, changed)
1767 m.setflag(f, fctx.flags())
1771 m.setflag(f, fctx.flags())
1768 except OSError as inst:
1772 except OSError as inst:
1769 self.ui.warn(_("trouble committing %s!\n") % f)
1773 self.ui.warn(_("trouble committing %s!\n") % f)
1770 raise
1774 raise
1771 except IOError as inst:
1775 except IOError as inst:
1772 errcode = getattr(inst, 'errno', errno.ENOENT)
1776 errcode = getattr(inst, 'errno', errno.ENOENT)
1773 if error or errcode and errcode != errno.ENOENT:
1777 if error or errcode and errcode != errno.ENOENT:
1774 self.ui.warn(_("trouble committing %s!\n") % f)
1778 self.ui.warn(_("trouble committing %s!\n") % f)
1775 raise
1779 raise
1776
1780
1777 # update manifest
1781 # update manifest
1778 self.ui.note(_("committing manifest\n"))
1782 self.ui.note(_("committing manifest\n"))
1779 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1783 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1780 drop = [f for f in removed if f in m]
1784 drop = [f for f in removed if f in m]
1781 for f in drop:
1785 for f in drop:
1782 del m[f]
1786 del m[f]
1783 mn = mctx.write(trp, linkrev,
1787 mn = mctx.write(trp, linkrev,
1784 p1.manifestnode(), p2.manifestnode(),
1788 p1.manifestnode(), p2.manifestnode(),
1785 added, drop)
1789 added, drop)
1786 files = changed + removed
1790 files = changed + removed
1787 else:
1791 else:
1788 mn = p1.manifestnode()
1792 mn = p1.manifestnode()
1789 files = []
1793 files = []
1790
1794
1791 # update changelog
1795 # update changelog
1792 self.ui.note(_("committing changelog\n"))
1796 self.ui.note(_("committing changelog\n"))
1793 self.changelog.delayupdate(tr)
1797 self.changelog.delayupdate(tr)
1794 n = self.changelog.add(mn, files, ctx.description(),
1798 n = self.changelog.add(mn, files, ctx.description(),
1795 trp, p1.node(), p2.node(),
1799 trp, p1.node(), p2.node(),
1796 user, ctx.date(), ctx.extra().copy())
1800 user, ctx.date(), ctx.extra().copy())
1797 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1801 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1798 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1802 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1799 parent2=xp2)
1803 parent2=xp2)
1800 # set the new commit is proper phase
1804 # set the new commit is proper phase
1801 targetphase = subrepo.newcommitphase(self.ui, ctx)
1805 targetphase = subrepo.newcommitphase(self.ui, ctx)
1802 if targetphase:
1806 if targetphase:
1803 # retract boundary do not alter parent changeset.
1807 # retract boundary do not alter parent changeset.
1804 # if a parent have higher the resulting phase will
1808 # if a parent have higher the resulting phase will
1805 # be compliant anyway
1809 # be compliant anyway
1806 #
1810 #
1807 # if minimal phase was 0 we don't need to retract anything
1811 # if minimal phase was 0 we don't need to retract anything
1808 phases.retractboundary(self, tr, targetphase, [n])
1812 phases.retractboundary(self, tr, targetphase, [n])
1809 tr.close()
1813 tr.close()
1810 branchmap.updatecache(self.filtered('served'))
1814 branchmap.updatecache(self.filtered('served'))
1811 return n
1815 return n
1812 finally:
1816 finally:
1813 if tr:
1817 if tr:
1814 tr.release()
1818 tr.release()
1815 lock.release()
1819 lock.release()
1816
1820
1817 @unfilteredmethod
1821 @unfilteredmethod
1818 def destroying(self):
1822 def destroying(self):
1819 '''Inform the repository that nodes are about to be destroyed.
1823 '''Inform the repository that nodes are about to be destroyed.
1820 Intended for use by strip and rollback, so there's a common
1824 Intended for use by strip and rollback, so there's a common
1821 place for anything that has to be done before destroying history.
1825 place for anything that has to be done before destroying history.
1822
1826
1823 This is mostly useful for saving state that is in memory and waiting
1827 This is mostly useful for saving state that is in memory and waiting
1824 to be flushed when the current lock is released. Because a call to
1828 to be flushed when the current lock is released. Because a call to
1825 destroyed is imminent, the repo will be invalidated causing those
1829 destroyed is imminent, the repo will be invalidated causing those
1826 changes to stay in memory (waiting for the next unlock), or vanish
1830 changes to stay in memory (waiting for the next unlock), or vanish
1827 completely.
1831 completely.
1828 '''
1832 '''
1829 # When using the same lock to commit and strip, the phasecache is left
1833 # When using the same lock to commit and strip, the phasecache is left
1830 # dirty after committing. Then when we strip, the repo is invalidated,
1834 # dirty after committing. Then when we strip, the repo is invalidated,
1831 # causing those changes to disappear.
1835 # causing those changes to disappear.
1832 if '_phasecache' in vars(self):
1836 if '_phasecache' in vars(self):
1833 self._phasecache.write()
1837 self._phasecache.write()
1834
1838
1835 @unfilteredmethod
1839 @unfilteredmethod
1836 def destroyed(self):
1840 def destroyed(self):
1837 '''Inform the repository that nodes have been destroyed.
1841 '''Inform the repository that nodes have been destroyed.
1838 Intended for use by strip and rollback, so there's a common
1842 Intended for use by strip and rollback, so there's a common
1839 place for anything that has to be done after destroying history.
1843 place for anything that has to be done after destroying history.
1840 '''
1844 '''
1841 # When one tries to:
1845 # When one tries to:
1842 # 1) destroy nodes thus calling this method (e.g. strip)
1846 # 1) destroy nodes thus calling this method (e.g. strip)
1843 # 2) use phasecache somewhere (e.g. commit)
1847 # 2) use phasecache somewhere (e.g. commit)
1844 #
1848 #
1845 # then 2) will fail because the phasecache contains nodes that were
1849 # then 2) will fail because the phasecache contains nodes that were
1846 # removed. We can either remove phasecache from the filecache,
1850 # removed. We can either remove phasecache from the filecache,
1847 # causing it to reload next time it is accessed, or simply filter
1851 # causing it to reload next time it is accessed, or simply filter
1848 # the removed nodes now and write the updated cache.
1852 # the removed nodes now and write the updated cache.
1849 self._phasecache.filterunknown(self)
1853 self._phasecache.filterunknown(self)
1850 self._phasecache.write()
1854 self._phasecache.write()
1851
1855
1852 # update the 'served' branch cache to help read only server process
1856 # update the 'served' branch cache to help read only server process
1853 # Thanks to branchcache collaboration this is done from the nearest
1857 # Thanks to branchcache collaboration this is done from the nearest
1854 # filtered subset and it is expected to be fast.
1858 # filtered subset and it is expected to be fast.
1855 branchmap.updatecache(self.filtered('served'))
1859 branchmap.updatecache(self.filtered('served'))
1856
1860
1857 # Ensure the persistent tag cache is updated. Doing it now
1861 # Ensure the persistent tag cache is updated. Doing it now
1858 # means that the tag cache only has to worry about destroyed
1862 # means that the tag cache only has to worry about destroyed
1859 # heads immediately after a strip/rollback. That in turn
1863 # heads immediately after a strip/rollback. That in turn
1860 # guarantees that "cachetip == currenttip" (comparing both rev
1864 # guarantees that "cachetip == currenttip" (comparing both rev
1861 # and node) always means no nodes have been added or destroyed.
1865 # and node) always means no nodes have been added or destroyed.
1862
1866
1863 # XXX this is suboptimal when qrefresh'ing: we strip the current
1867 # XXX this is suboptimal when qrefresh'ing: we strip the current
1864 # head, refresh the tag cache, then immediately add a new head.
1868 # head, refresh the tag cache, then immediately add a new head.
1865 # But I think doing it this way is necessary for the "instant
1869 # But I think doing it this way is necessary for the "instant
1866 # tag cache retrieval" case to work.
1870 # tag cache retrieval" case to work.
1867 self.invalidate()
1871 self.invalidate()
1868
1872
1869 def walk(self, match, node=None):
1873 def walk(self, match, node=None):
1870 '''
1874 '''
1871 walk recursively through the directory tree or a given
1875 walk recursively through the directory tree or a given
1872 changeset, finding all files matched by the match
1876 changeset, finding all files matched by the match
1873 function
1877 function
1874 '''
1878 '''
1875 return self[node].walk(match)
1879 return self[node].walk(match)
1876
1880
1877 def status(self, node1='.', node2=None, match=None,
1881 def status(self, node1='.', node2=None, match=None,
1878 ignored=False, clean=False, unknown=False,
1882 ignored=False, clean=False, unknown=False,
1879 listsubrepos=False):
1883 listsubrepos=False):
1880 '''a convenience method that calls node1.status(node2)'''
1884 '''a convenience method that calls node1.status(node2)'''
1881 return self[node1].status(node2, match, ignored, clean, unknown,
1885 return self[node1].status(node2, match, ignored, clean, unknown,
1882 listsubrepos)
1886 listsubrepos)
1883
1887
1884 def heads(self, start=None):
1888 def heads(self, start=None):
1885 if start is None:
1889 if start is None:
1886 cl = self.changelog
1890 cl = self.changelog
1887 headrevs = reversed(cl.headrevs())
1891 headrevs = reversed(cl.headrevs())
1888 return [cl.node(rev) for rev in headrevs]
1892 return [cl.node(rev) for rev in headrevs]
1889
1893
1890 heads = self.changelog.heads(start)
1894 heads = self.changelog.heads(start)
1891 # sort the output in rev descending order
1895 # sort the output in rev descending order
1892 return sorted(heads, key=self.changelog.rev, reverse=True)
1896 return sorted(heads, key=self.changelog.rev, reverse=True)
1893
1897
1894 def branchheads(self, branch=None, start=None, closed=False):
1898 def branchheads(self, branch=None, start=None, closed=False):
1895 '''return a (possibly filtered) list of heads for the given branch
1899 '''return a (possibly filtered) list of heads for the given branch
1896
1900
1897 Heads are returned in topological order, from newest to oldest.
1901 Heads are returned in topological order, from newest to oldest.
1898 If branch is None, use the dirstate branch.
1902 If branch is None, use the dirstate branch.
1899 If start is not None, return only heads reachable from start.
1903 If start is not None, return only heads reachable from start.
1900 If closed is True, return heads that are marked as closed as well.
1904 If closed is True, return heads that are marked as closed as well.
1901 '''
1905 '''
1902 if branch is None:
1906 if branch is None:
1903 branch = self[None].branch()
1907 branch = self[None].branch()
1904 branches = self.branchmap()
1908 branches = self.branchmap()
1905 if branch not in branches:
1909 if branch not in branches:
1906 return []
1910 return []
1907 # the cache returns heads ordered lowest to highest
1911 # the cache returns heads ordered lowest to highest
1908 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1912 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1909 if start is not None:
1913 if start is not None:
1910 # filter out the heads that cannot be reached from startrev
1914 # filter out the heads that cannot be reached from startrev
1911 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1915 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1912 bheads = [h for h in bheads if h in fbheads]
1916 bheads = [h for h in bheads if h in fbheads]
1913 return bheads
1917 return bheads
1914
1918
1915 def branches(self, nodes):
1919 def branches(self, nodes):
1916 if not nodes:
1920 if not nodes:
1917 nodes = [self.changelog.tip()]
1921 nodes = [self.changelog.tip()]
1918 b = []
1922 b = []
1919 for n in nodes:
1923 for n in nodes:
1920 t = n
1924 t = n
1921 while True:
1925 while True:
1922 p = self.changelog.parents(n)
1926 p = self.changelog.parents(n)
1923 if p[1] != nullid or p[0] == nullid:
1927 if p[1] != nullid or p[0] == nullid:
1924 b.append((t, n, p[0], p[1]))
1928 b.append((t, n, p[0], p[1]))
1925 break
1929 break
1926 n = p[0]
1930 n = p[0]
1927 return b
1931 return b
1928
1932
1929 def between(self, pairs):
1933 def between(self, pairs):
1930 r = []
1934 r = []
1931
1935
1932 for top, bottom in pairs:
1936 for top, bottom in pairs:
1933 n, l, i = top, [], 0
1937 n, l, i = top, [], 0
1934 f = 1
1938 f = 1
1935
1939
1936 while n != bottom and n != nullid:
1940 while n != bottom and n != nullid:
1937 p = self.changelog.parents(n)[0]
1941 p = self.changelog.parents(n)[0]
1938 if i == f:
1942 if i == f:
1939 l.append(n)
1943 l.append(n)
1940 f = f * 2
1944 f = f * 2
1941 n = p
1945 n = p
1942 i += 1
1946 i += 1
1943
1947
1944 r.append(l)
1948 r.append(l)
1945
1949
1946 return r
1950 return r
1947
1951
1948 def checkpush(self, pushop):
1952 def checkpush(self, pushop):
1949 """Extensions can override this function if additional checks have
1953 """Extensions can override this function if additional checks have
1950 to be performed before pushing, or call it if they override push
1954 to be performed before pushing, or call it if they override push
1951 command.
1955 command.
1952 """
1956 """
1953 pass
1957 pass
1954
1958
1955 @unfilteredpropertycache
1959 @unfilteredpropertycache
1956 def prepushoutgoinghooks(self):
1960 def prepushoutgoinghooks(self):
1957 """Return util.hooks consists of a pushop with repo, remote, outgoing
1961 """Return util.hooks consists of a pushop with repo, remote, outgoing
1958 methods, which are called before pushing changesets.
1962 methods, which are called before pushing changesets.
1959 """
1963 """
1960 return util.hooks()
1964 return util.hooks()
1961
1965
1962 def pushkey(self, namespace, key, old, new):
1966 def pushkey(self, namespace, key, old, new):
1963 try:
1967 try:
1964 tr = self.currenttransaction()
1968 tr = self.currenttransaction()
1965 hookargs = {}
1969 hookargs = {}
1966 if tr is not None:
1970 if tr is not None:
1967 hookargs.update(tr.hookargs)
1971 hookargs.update(tr.hookargs)
1968 hookargs['namespace'] = namespace
1972 hookargs['namespace'] = namespace
1969 hookargs['key'] = key
1973 hookargs['key'] = key
1970 hookargs['old'] = old
1974 hookargs['old'] = old
1971 hookargs['new'] = new
1975 hookargs['new'] = new
1972 self.hook('prepushkey', throw=True, **hookargs)
1976 self.hook('prepushkey', throw=True, **hookargs)
1973 except error.HookAbort as exc:
1977 except error.HookAbort as exc:
1974 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1978 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1975 if exc.hint:
1979 if exc.hint:
1976 self.ui.write_err(_("(%s)\n") % exc.hint)
1980 self.ui.write_err(_("(%s)\n") % exc.hint)
1977 return False
1981 return False
1978 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1982 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1979 ret = pushkey.push(self, namespace, key, old, new)
1983 ret = pushkey.push(self, namespace, key, old, new)
1980 def runhook():
1984 def runhook():
1981 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1985 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1982 ret=ret)
1986 ret=ret)
1983 self._afterlock(runhook)
1987 self._afterlock(runhook)
1984 return ret
1988 return ret
1985
1989
1986 def listkeys(self, namespace):
1990 def listkeys(self, namespace):
1987 self.hook('prelistkeys', throw=True, namespace=namespace)
1991 self.hook('prelistkeys', throw=True, namespace=namespace)
1988 self.ui.debug('listing keys for "%s"\n' % namespace)
1992 self.ui.debug('listing keys for "%s"\n' % namespace)
1989 values = pushkey.list(self, namespace)
1993 values = pushkey.list(self, namespace)
1990 self.hook('listkeys', namespace=namespace, values=values)
1994 self.hook('listkeys', namespace=namespace, values=values)
1991 return values
1995 return values
1992
1996
1993 def debugwireargs(self, one, two, three=None, four=None, five=None):
1997 def debugwireargs(self, one, two, three=None, four=None, five=None):
1994 '''used to test argument passing over the wire'''
1998 '''used to test argument passing over the wire'''
1995 return "%s %s %s %s %s" % (one, two, three, four, five)
1999 return "%s %s %s %s %s" % (one, two, three, four, five)
1996
2000
1997 def savecommitmessage(self, text):
2001 def savecommitmessage(self, text):
1998 fp = self.vfs('last-message.txt', 'wb')
2002 fp = self.vfs('last-message.txt', 'wb')
1999 try:
2003 try:
2000 fp.write(text)
2004 fp.write(text)
2001 finally:
2005 finally:
2002 fp.close()
2006 fp.close()
2003 return self.pathto(fp.name[len(self.root) + 1:])
2007 return self.pathto(fp.name[len(self.root) + 1:])
2004
2008
2005 # used to avoid circular references so destructors work
2009 # used to avoid circular references so destructors work
2006 def aftertrans(files):
2010 def aftertrans(files):
2007 renamefiles = [tuple(t) for t in files]
2011 renamefiles = [tuple(t) for t in files]
2008 def a():
2012 def a():
2009 for vfs, src, dest in renamefiles:
2013 for vfs, src, dest in renamefiles:
2010 try:
2014 try:
2011 vfs.rename(src, dest)
2015 vfs.rename(src, dest)
2012 except OSError: # journal file does not yet exist
2016 except OSError: # journal file does not yet exist
2013 pass
2017 pass
2014 return a
2018 return a
2015
2019
2016 def undoname(fn):
2020 def undoname(fn):
2017 base, name = os.path.split(fn)
2021 base, name = os.path.split(fn)
2018 assert name.startswith('journal')
2022 assert name.startswith('journal')
2019 return os.path.join(base, name.replace('journal', 'undo', 1))
2023 return os.path.join(base, name.replace('journal', 'undo', 1))
2020
2024
2021 def instance(ui, path, create):
2025 def instance(ui, path, create):
2022 return localrepository(ui, util.urllocalpath(path), create)
2026 return localrepository(ui, util.urllocalpath(path), create)
2023
2027
2024 def islocal(path):
2028 def islocal(path):
2025 return True
2029 return True
2026
2030
2027 def newreporequirements(repo):
2031 def newreporequirements(repo):
2028 """Determine the set of requirements for a new local repository.
2032 """Determine the set of requirements for a new local repository.
2029
2033
2030 Extensions can wrap this function to specify custom requirements for
2034 Extensions can wrap this function to specify custom requirements for
2031 new repositories.
2035 new repositories.
2032 """
2036 """
2033 ui = repo.ui
2037 ui = repo.ui
2034 requirements = set(['revlogv1'])
2038 requirements = set(['revlogv1'])
2035 if ui.configbool('format', 'usestore', True):
2039 if ui.configbool('format', 'usestore', True):
2036 requirements.add('store')
2040 requirements.add('store')
2037 if ui.configbool('format', 'usefncache', True):
2041 if ui.configbool('format', 'usefncache', True):
2038 requirements.add('fncache')
2042 requirements.add('fncache')
2039 if ui.configbool('format', 'dotencode', True):
2043 if ui.configbool('format', 'dotencode', True):
2040 requirements.add('dotencode')
2044 requirements.add('dotencode')
2041
2045
2042 compengine = ui.config('experimental', 'format.compression', 'zlib')
2046 compengine = ui.config('experimental', 'format.compression', 'zlib')
2043 if compengine not in util.compengines:
2047 if compengine not in util.compengines:
2044 raise error.Abort(_('compression engine %s defined by '
2048 raise error.Abort(_('compression engine %s defined by '
2045 'experimental.format.compression not available') %
2049 'experimental.format.compression not available') %
2046 compengine,
2050 compengine,
2047 hint=_('run "hg debuginstall" to list available '
2051 hint=_('run "hg debuginstall" to list available '
2048 'compression engines'))
2052 'compression engines'))
2049
2053
2050 # zlib is the historical default and doesn't need an explicit requirement.
2054 # zlib is the historical default and doesn't need an explicit requirement.
2051 if compengine != 'zlib':
2055 if compengine != 'zlib':
2052 requirements.add('exp-compression-%s' % compengine)
2056 requirements.add('exp-compression-%s' % compengine)
2053
2057
2054 if scmutil.gdinitconfig(ui):
2058 if scmutil.gdinitconfig(ui):
2055 requirements.add('generaldelta')
2059 requirements.add('generaldelta')
2056 if ui.configbool('experimental', 'treemanifest', False):
2060 if ui.configbool('experimental', 'treemanifest', False):
2057 requirements.add('treemanifest')
2061 requirements.add('treemanifest')
2058 if ui.configbool('experimental', 'manifestv2', False):
2062 if ui.configbool('experimental', 'manifestv2', False):
2059 requirements.add('manifestv2')
2063 requirements.add('manifestv2')
2060
2064
2061 return requirements
2065 return requirements
@@ -1,190 +1,189 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import os
13 import os
14
14
15 from .i18n import _
15 from .i18n import _
16 from . import (
16 from . import (
17 byterange,
17 byterange,
18 changelog,
18 changelog,
19 error,
19 error,
20 localrepo,
20 localrepo,
21 manifest,
21 manifest,
22 namespaces,
22 namespaces,
23 scmutil,
23 scmutil,
24 store,
24 store,
25 url,
25 url,
26 util,
26 util,
27 )
27 )
28
28
29 urlerr = util.urlerr
29 urlerr = util.urlerr
30 urlreq = util.urlreq
30 urlreq = util.urlreq
31
31
32 class httprangereader(object):
32 class httprangereader(object):
33 def __init__(self, url, opener):
33 def __init__(self, url, opener):
34 # we assume opener has HTTPRangeHandler
34 # we assume opener has HTTPRangeHandler
35 self.url = url
35 self.url = url
36 self.pos = 0
36 self.pos = 0
37 self.opener = opener
37 self.opener = opener
38 self.name = url
38 self.name = url
39
39
40 def __enter__(self):
40 def __enter__(self):
41 return self
41 return self
42
42
43 def __exit__(self, exc_type, exc_value, traceback):
43 def __exit__(self, exc_type, exc_value, traceback):
44 self.close()
44 self.close()
45
45
46 def seek(self, pos):
46 def seek(self, pos):
47 self.pos = pos
47 self.pos = pos
48 def read(self, bytes=None):
48 def read(self, bytes=None):
49 req = urlreq.request(self.url)
49 req = urlreq.request(self.url)
50 end = ''
50 end = ''
51 if bytes:
51 if bytes:
52 end = self.pos + bytes - 1
52 end = self.pos + bytes - 1
53 if self.pos or end:
53 if self.pos or end:
54 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
54 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
55
55
56 try:
56 try:
57 f = self.opener.open(req)
57 f = self.opener.open(req)
58 data = f.read()
58 data = f.read()
59 code = f.code
59 code = f.code
60 except urlerr.httperror as inst:
60 except urlerr.httperror as inst:
61 num = inst.code == 404 and errno.ENOENT or None
61 num = inst.code == 404 and errno.ENOENT or None
62 raise IOError(num, inst)
62 raise IOError(num, inst)
63 except urlerr.urlerror as inst:
63 except urlerr.urlerror as inst:
64 raise IOError(None, inst.reason[1])
64 raise IOError(None, inst.reason[1])
65
65
66 if code == 200:
66 if code == 200:
67 # HTTPRangeHandler does nothing if remote does not support
67 # HTTPRangeHandler does nothing if remote does not support
68 # Range headers and returns the full entity. Let's slice it.
68 # Range headers and returns the full entity. Let's slice it.
69 if bytes:
69 if bytes:
70 data = data[self.pos:self.pos + bytes]
70 data = data[self.pos:self.pos + bytes]
71 else:
71 else:
72 data = data[self.pos:]
72 data = data[self.pos:]
73 elif bytes:
73 elif bytes:
74 data = data[:bytes]
74 data = data[:bytes]
75 self.pos += len(data)
75 self.pos += len(data)
76 return data
76 return data
77 def readlines(self):
77 def readlines(self):
78 return self.read().splitlines(True)
78 return self.read().splitlines(True)
79 def __iter__(self):
79 def __iter__(self):
80 return iter(self.readlines())
80 return iter(self.readlines())
81 def close(self):
81 def close(self):
82 pass
82 pass
83
83
84 def build_opener(ui, authinfo):
84 def build_opener(ui, authinfo):
85 # urllib cannot handle URLs with embedded user or passwd
85 # urllib cannot handle URLs with embedded user or passwd
86 urlopener = url.opener(ui, authinfo)
86 urlopener = url.opener(ui, authinfo)
87 urlopener.add_handler(byterange.HTTPRangeHandler())
87 urlopener.add_handler(byterange.HTTPRangeHandler())
88
88
89 class statichttpvfs(scmutil.abstractvfs):
89 class statichttpvfs(scmutil.abstractvfs):
90 def __init__(self, base):
90 def __init__(self, base):
91 self.base = base
91 self.base = base
92
92
93 def __call__(self, path, mode='r', *args, **kw):
93 def __call__(self, path, mode='r', *args, **kw):
94 if mode not in ('r', 'rb'):
94 if mode not in ('r', 'rb'):
95 raise IOError('Permission denied')
95 raise IOError('Permission denied')
96 f = "/".join((self.base, urlreq.quote(path)))
96 f = "/".join((self.base, urlreq.quote(path)))
97 return httprangereader(f, urlopener)
97 return httprangereader(f, urlopener)
98
98
99 def join(self, path):
99 def join(self, path):
100 if path:
100 if path:
101 return os.path.join(self.base, path)
101 return os.path.join(self.base, path)
102 else:
102 else:
103 return self.base
103 return self.base
104
104
105 return statichttpvfs
105 return statichttpvfs
106
106
107 class statichttppeer(localrepo.localpeer):
107 class statichttppeer(localrepo.localpeer):
108 def local(self):
108 def local(self):
109 return None
109 return None
110 def canpush(self):
110 def canpush(self):
111 return False
111 return False
112
112
113 class statichttprepository(localrepo.localrepository):
113 class statichttprepository(localrepo.localrepository):
114 supported = localrepo.localrepository._basesupported
114 supported = localrepo.localrepository._basesupported
115
115
116 def __init__(self, ui, path):
116 def __init__(self, ui, path):
117 self._url = path
117 self._url = path
118 self.ui = ui
118 self.ui = ui
119
119
120 self.root = path
120 self.root = path
121 u = util.url(path.rstrip('/') + "/.hg")
121 u = util.url(path.rstrip('/') + "/.hg")
122 self.path, authinfo = u.authinfo()
122 self.path, authinfo = u.authinfo()
123
123
124 vfsclass = build_opener(ui, authinfo)
124 vfsclass = build_opener(ui, authinfo)
125 self.vfs = vfsclass(self.path)
125 self.vfs = vfsclass(self.path)
126 self.opener = self.vfs
127 self._phasedefaults = []
126 self._phasedefaults = []
128
127
129 self.names = namespaces.namespaces()
128 self.names = namespaces.namespaces()
130
129
131 try:
130 try:
132 requirements = scmutil.readrequires(self.vfs, self.supported)
131 requirements = scmutil.readrequires(self.vfs, self.supported)
133 except IOError as inst:
132 except IOError as inst:
134 if inst.errno != errno.ENOENT:
133 if inst.errno != errno.ENOENT:
135 raise
134 raise
136 requirements = set()
135 requirements = set()
137
136
138 # check if it is a non-empty old-style repository
137 # check if it is a non-empty old-style repository
139 try:
138 try:
140 fp = self.vfs("00changelog.i")
139 fp = self.vfs("00changelog.i")
141 fp.read(1)
140 fp.read(1)
142 fp.close()
141 fp.close()
143 except IOError as inst:
142 except IOError as inst:
144 if inst.errno != errno.ENOENT:
143 if inst.errno != errno.ENOENT:
145 raise
144 raise
146 # we do not care about empty old-style repositories here
145 # we do not care about empty old-style repositories here
147 msg = _("'%s' does not appear to be an hg repository") % path
146 msg = _("'%s' does not appear to be an hg repository") % path
148 raise error.RepoError(msg)
147 raise error.RepoError(msg)
149
148
150 # setup store
149 # setup store
151 self.store = store.store(requirements, self.path, vfsclass)
150 self.store = store.store(requirements, self.path, vfsclass)
152 self.spath = self.store.path
151 self.spath = self.store.path
153 self.svfs = self.store.opener
152 self.svfs = self.store.opener
154 self.sjoin = self.store.join
153 self.sjoin = self.store.join
155 self._filecache = {}
154 self._filecache = {}
156 self.requirements = requirements
155 self.requirements = requirements
157
156
158 self.manifestlog = manifest.manifestlog(self.svfs, self)
157 self.manifestlog = manifest.manifestlog(self.svfs, self)
159 self.changelog = changelog.changelog(self.svfs)
158 self.changelog = changelog.changelog(self.svfs)
160 self._tags = None
159 self._tags = None
161 self.nodetagscache = None
160 self.nodetagscache = None
162 self._branchcaches = {}
161 self._branchcaches = {}
163 self._revbranchcache = None
162 self._revbranchcache = None
164 self.encodepats = None
163 self.encodepats = None
165 self.decodepats = None
164 self.decodepats = None
166 self._transref = None
165 self._transref = None
167
166
168 def _restrictcapabilities(self, caps):
167 def _restrictcapabilities(self, caps):
169 caps = super(statichttprepository, self)._restrictcapabilities(caps)
168 caps = super(statichttprepository, self)._restrictcapabilities(caps)
170 return caps.difference(["pushkey"])
169 return caps.difference(["pushkey"])
171
170
172 def url(self):
171 def url(self):
173 return self._url
172 return self._url
174
173
175 def local(self):
174 def local(self):
176 return False
175 return False
177
176
178 def peer(self):
177 def peer(self):
179 return statichttppeer(self)
178 return statichttppeer(self)
180
179
181 def lock(self, wait=True):
180 def lock(self, wait=True):
182 raise error.Abort(_('cannot lock static-http repository'))
181 raise error.Abort(_('cannot lock static-http repository'))
183
182
184 def _writecaches(self):
183 def _writecaches(self):
185 pass # statichttprepository are read only
184 pass # statichttprepository are read only
186
185
187 def instance(ui, path, create):
186 def instance(ui, path, create):
188 if create:
187 if create:
189 raise error.Abort(_('cannot create new static-http repository'))
188 raise error.Abort(_('cannot create new static-http repository'))
190 return statichttprepository(ui, path[7:])
189 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now