##// END OF EJS Templates
tag: remove a mutable default argument...
Pierre-Yves David -
r26323:ed884807 default
parent child Browse files
Show More
@@ -1,1963 +1,1963 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, wdirrev, short
7 from node import hex, nullid, wdirrev, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect, random
19 import weakref, errno, os, time, inspect, random
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception as exc:
138 except Exception as exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced as exc:
155 except error.PushRaced as exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 'manifestv2'))
196 'manifestv2'))
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 'dotencode'))
198 'dotencode'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 filtername = None
200 filtername = None
201
201
202 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
204 featuresetupfuncs = set()
205
205
206 def _baserequirements(self, create):
206 def _baserequirements(self, create):
207 return ['revlogv1']
207 return ['revlogv1']
208
208
209 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
210 self.requirements = set()
210 self.requirements = set()
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 self.wopener = self.wvfs
212 self.wopener = self.wvfs
213 self.root = self.wvfs.base
213 self.root = self.wvfs.base
214 self.path = self.wvfs.join(".hg")
214 self.path = self.wvfs.join(".hg")
215 self.origroot = path
215 self.origroot = path
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 self.vfs = scmutil.vfs(self.path)
217 self.vfs = scmutil.vfs(self.path)
218 self.opener = self.vfs
218 self.opener = self.vfs
219 self.baseui = baseui
219 self.baseui = baseui
220 self.ui = baseui.copy()
220 self.ui = baseui.copy()
221 self.ui.copy = baseui.copy # prevent copying repo configuration
221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 # A list of callback to shape the phase if no data were found.
222 # A list of callback to shape the phase if no data were found.
223 # Callback are in the form: func(repo, roots) --> processed root.
223 # Callback are in the form: func(repo, roots) --> processed root.
224 # This list it to be filled by extension during repo setup
224 # This list it to be filled by extension during repo setup
225 self._phasedefaults = []
225 self._phasedefaults = []
226 try:
226 try:
227 self.ui.readconfig(self.join("hgrc"), self.root)
227 self.ui.readconfig(self.join("hgrc"), self.root)
228 extensions.loadall(self.ui)
228 extensions.loadall(self.ui)
229 except IOError:
229 except IOError:
230 pass
230 pass
231
231
232 if self.featuresetupfuncs:
232 if self.featuresetupfuncs:
233 self.supported = set(self._basesupported) # use private copy
233 self.supported = set(self._basesupported) # use private copy
234 extmods = set(m.__name__ for n, m
234 extmods = set(m.__name__ for n, m
235 in extensions.extensions(self.ui))
235 in extensions.extensions(self.ui))
236 for setupfunc in self.featuresetupfuncs:
236 for setupfunc in self.featuresetupfuncs:
237 if setupfunc.__module__ in extmods:
237 if setupfunc.__module__ in extmods:
238 setupfunc(self.ui, self.supported)
238 setupfunc(self.ui, self.supported)
239 else:
239 else:
240 self.supported = self._basesupported
240 self.supported = self._basesupported
241
241
242 if not self.vfs.isdir():
242 if not self.vfs.isdir():
243 if create:
243 if create:
244 if not self.wvfs.exists():
244 if not self.wvfs.exists():
245 self.wvfs.makedirs()
245 self.wvfs.makedirs()
246 self.vfs.makedir(notindexed=True)
246 self.vfs.makedir(notindexed=True)
247 self.requirements.update(self._baserequirements(create))
247 self.requirements.update(self._baserequirements(create))
248 if self.ui.configbool('format', 'usestore', True):
248 if self.ui.configbool('format', 'usestore', True):
249 self.vfs.mkdir("store")
249 self.vfs.mkdir("store")
250 self.requirements.add("store")
250 self.requirements.add("store")
251 if self.ui.configbool('format', 'usefncache', True):
251 if self.ui.configbool('format', 'usefncache', True):
252 self.requirements.add("fncache")
252 self.requirements.add("fncache")
253 if self.ui.configbool('format', 'dotencode', True):
253 if self.ui.configbool('format', 'dotencode', True):
254 self.requirements.add('dotencode')
254 self.requirements.add('dotencode')
255 # create an invalid changelog
255 # create an invalid changelog
256 self.vfs.append(
256 self.vfs.append(
257 "00changelog.i",
257 "00changelog.i",
258 '\0\0\0\2' # represents revlogv2
258 '\0\0\0\2' # represents revlogv2
259 ' dummy changelog to prevent using the old repo layout'
259 ' dummy changelog to prevent using the old repo layout'
260 )
260 )
261 # experimental config: format.generaldelta
261 # experimental config: format.generaldelta
262 if self.ui.configbool('format', 'generaldelta', False):
262 if self.ui.configbool('format', 'generaldelta', False):
263 self.requirements.add("generaldelta")
263 self.requirements.add("generaldelta")
264 if self.ui.configbool('experimental', 'treemanifest', False):
264 if self.ui.configbool('experimental', 'treemanifest', False):
265 self.requirements.add("treemanifest")
265 self.requirements.add("treemanifest")
266 if self.ui.configbool('experimental', 'manifestv2', False):
266 if self.ui.configbool('experimental', 'manifestv2', False):
267 self.requirements.add("manifestv2")
267 self.requirements.add("manifestv2")
268 else:
268 else:
269 raise error.RepoError(_("repository %s not found") % path)
269 raise error.RepoError(_("repository %s not found") % path)
270 elif create:
270 elif create:
271 raise error.RepoError(_("repository %s already exists") % path)
271 raise error.RepoError(_("repository %s already exists") % path)
272 else:
272 else:
273 try:
273 try:
274 self.requirements = scmutil.readrequires(
274 self.requirements = scmutil.readrequires(
275 self.vfs, self.supported)
275 self.vfs, self.supported)
276 except IOError as inst:
276 except IOError as inst:
277 if inst.errno != errno.ENOENT:
277 if inst.errno != errno.ENOENT:
278 raise
278 raise
279
279
280 self.sharedpath = self.path
280 self.sharedpath = self.path
281 try:
281 try:
282 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
283 realpath=True)
283 realpath=True)
284 s = vfs.base
284 s = vfs.base
285 if not vfs.exists():
285 if not vfs.exists():
286 raise error.RepoError(
286 raise error.RepoError(
287 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 _('.hg/sharedpath points to nonexistent directory %s') % s)
288 self.sharedpath = s
288 self.sharedpath = s
289 except IOError as inst:
289 except IOError as inst:
290 if inst.errno != errno.ENOENT:
290 if inst.errno != errno.ENOENT:
291 raise
291 raise
292
292
293 self.store = store.store(
293 self.store = store.store(
294 self.requirements, self.sharedpath, scmutil.vfs)
294 self.requirements, self.sharedpath, scmutil.vfs)
295 self.spath = self.store.path
295 self.spath = self.store.path
296 self.svfs = self.store.vfs
296 self.svfs = self.store.vfs
297 self.sjoin = self.store.join
297 self.sjoin = self.store.join
298 self.vfs.createmode = self.store.createmode
298 self.vfs.createmode = self.store.createmode
299 self._applyopenerreqs()
299 self._applyopenerreqs()
300 if create:
300 if create:
301 self._writerequirements()
301 self._writerequirements()
302
302
303 self._dirstatevalidatewarned = False
303 self._dirstatevalidatewarned = False
304
304
305 self._branchcaches = {}
305 self._branchcaches = {}
306 self._revbranchcache = None
306 self._revbranchcache = None
307 self.filterpats = {}
307 self.filterpats = {}
308 self._datafilters = {}
308 self._datafilters = {}
309 self._transref = self._lockref = self._wlockref = None
309 self._transref = self._lockref = self._wlockref = None
310
310
311 # A cache for various files under .hg/ that tracks file changes,
311 # A cache for various files under .hg/ that tracks file changes,
312 # (used by the filecache decorator)
312 # (used by the filecache decorator)
313 #
313 #
314 # Maps a property name to its util.filecacheentry
314 # Maps a property name to its util.filecacheentry
315 self._filecache = {}
315 self._filecache = {}
316
316
317 # hold sets of revision to be filtered
317 # hold sets of revision to be filtered
318 # should be cleared when something might have changed the filter value:
318 # should be cleared when something might have changed the filter value:
319 # - new changesets,
319 # - new changesets,
320 # - phase change,
320 # - phase change,
321 # - new obsolescence marker,
321 # - new obsolescence marker,
322 # - working directory parent change,
322 # - working directory parent change,
323 # - bookmark changes
323 # - bookmark changes
324 self.filteredrevcache = {}
324 self.filteredrevcache = {}
325
325
326 # generic mapping between names and nodes
326 # generic mapping between names and nodes
327 self.names = namespaces.namespaces()
327 self.names = namespaces.namespaces()
328
328
329 def close(self):
329 def close(self):
330 self._writecaches()
330 self._writecaches()
331
331
332 def _writecaches(self):
332 def _writecaches(self):
333 if self._revbranchcache:
333 if self._revbranchcache:
334 self._revbranchcache.write()
334 self._revbranchcache.write()
335
335
336 def _restrictcapabilities(self, caps):
336 def _restrictcapabilities(self, caps):
337 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 if self.ui.configbool('experimental', 'bundle2-advertise', True):
338 caps = set(caps)
338 caps = set(caps)
339 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
340 caps.add('bundle2=' + urllib.quote(capsblob))
340 caps.add('bundle2=' + urllib.quote(capsblob))
341 return caps
341 return caps
342
342
343 def _applyopenerreqs(self):
343 def _applyopenerreqs(self):
344 self.svfs.options = dict((r, 1) for r in self.requirements
344 self.svfs.options = dict((r, 1) for r in self.requirements
345 if r in self.openerreqs)
345 if r in self.openerreqs)
346 # experimental config: format.chunkcachesize
346 # experimental config: format.chunkcachesize
347 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
347 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
348 if chunkcachesize is not None:
348 if chunkcachesize is not None:
349 self.svfs.options['chunkcachesize'] = chunkcachesize
349 self.svfs.options['chunkcachesize'] = chunkcachesize
350 # experimental config: format.maxchainlen
350 # experimental config: format.maxchainlen
351 maxchainlen = self.ui.configint('format', 'maxchainlen')
351 maxchainlen = self.ui.configint('format', 'maxchainlen')
352 if maxchainlen is not None:
352 if maxchainlen is not None:
353 self.svfs.options['maxchainlen'] = maxchainlen
353 self.svfs.options['maxchainlen'] = maxchainlen
354 # experimental config: format.manifestcachesize
354 # experimental config: format.manifestcachesize
355 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
355 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
356 if manifestcachesize is not None:
356 if manifestcachesize is not None:
357 self.svfs.options['manifestcachesize'] = manifestcachesize
357 self.svfs.options['manifestcachesize'] = manifestcachesize
358 # experimental config: format.aggressivemergedeltas
358 # experimental config: format.aggressivemergedeltas
359 aggressivemergedeltas = self.ui.configbool('format',
359 aggressivemergedeltas = self.ui.configbool('format',
360 'aggressivemergedeltas', False)
360 'aggressivemergedeltas', False)
361 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
361 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
362
362
363 def _writerequirements(self):
363 def _writerequirements(self):
364 scmutil.writerequires(self.vfs, self.requirements)
364 scmutil.writerequires(self.vfs, self.requirements)
365
365
366 def _checknested(self, path):
366 def _checknested(self, path):
367 """Determine if path is a legal nested repository."""
367 """Determine if path is a legal nested repository."""
368 if not path.startswith(self.root):
368 if not path.startswith(self.root):
369 return False
369 return False
370 subpath = path[len(self.root) + 1:]
370 subpath = path[len(self.root) + 1:]
371 normsubpath = util.pconvert(subpath)
371 normsubpath = util.pconvert(subpath)
372
372
373 # XXX: Checking against the current working copy is wrong in
373 # XXX: Checking against the current working copy is wrong in
374 # the sense that it can reject things like
374 # the sense that it can reject things like
375 #
375 #
376 # $ hg cat -r 10 sub/x.txt
376 # $ hg cat -r 10 sub/x.txt
377 #
377 #
378 # if sub/ is no longer a subrepository in the working copy
378 # if sub/ is no longer a subrepository in the working copy
379 # parent revision.
379 # parent revision.
380 #
380 #
381 # However, it can of course also allow things that would have
381 # However, it can of course also allow things that would have
382 # been rejected before, such as the above cat command if sub/
382 # been rejected before, such as the above cat command if sub/
383 # is a subrepository now, but was a normal directory before.
383 # is a subrepository now, but was a normal directory before.
384 # The old path auditor would have rejected by mistake since it
384 # The old path auditor would have rejected by mistake since it
385 # panics when it sees sub/.hg/.
385 # panics when it sees sub/.hg/.
386 #
386 #
387 # All in all, checking against the working copy seems sensible
387 # All in all, checking against the working copy seems sensible
388 # since we want to prevent access to nested repositories on
388 # since we want to prevent access to nested repositories on
389 # the filesystem *now*.
389 # the filesystem *now*.
390 ctx = self[None]
390 ctx = self[None]
391 parts = util.splitpath(subpath)
391 parts = util.splitpath(subpath)
392 while parts:
392 while parts:
393 prefix = '/'.join(parts)
393 prefix = '/'.join(parts)
394 if prefix in ctx.substate:
394 if prefix in ctx.substate:
395 if prefix == normsubpath:
395 if prefix == normsubpath:
396 return True
396 return True
397 else:
397 else:
398 sub = ctx.sub(prefix)
398 sub = ctx.sub(prefix)
399 return sub.checknested(subpath[len(prefix) + 1:])
399 return sub.checknested(subpath[len(prefix) + 1:])
400 else:
400 else:
401 parts.pop()
401 parts.pop()
402 return False
402 return False
403
403
404 def peer(self):
404 def peer(self):
405 return localpeer(self) # not cached to avoid reference cycle
405 return localpeer(self) # not cached to avoid reference cycle
406
406
407 def unfiltered(self):
407 def unfiltered(self):
408 """Return unfiltered version of the repository
408 """Return unfiltered version of the repository
409
409
410 Intended to be overwritten by filtered repo."""
410 Intended to be overwritten by filtered repo."""
411 return self
411 return self
412
412
413 def filtered(self, name):
413 def filtered(self, name):
414 """Return a filtered version of a repository"""
414 """Return a filtered version of a repository"""
415 # build a new class with the mixin and the current class
415 # build a new class with the mixin and the current class
416 # (possibly subclass of the repo)
416 # (possibly subclass of the repo)
417 class proxycls(repoview.repoview, self.unfiltered().__class__):
417 class proxycls(repoview.repoview, self.unfiltered().__class__):
418 pass
418 pass
419 return proxycls(self, name)
419 return proxycls(self, name)
420
420
421 @repofilecache('bookmarks')
421 @repofilecache('bookmarks')
422 def _bookmarks(self):
422 def _bookmarks(self):
423 return bookmarks.bmstore(self)
423 return bookmarks.bmstore(self)
424
424
425 @repofilecache('bookmarks.current')
425 @repofilecache('bookmarks.current')
426 def _activebookmark(self):
426 def _activebookmark(self):
427 return bookmarks.readactive(self)
427 return bookmarks.readactive(self)
428
428
429 def bookmarkheads(self, bookmark):
429 def bookmarkheads(self, bookmark):
430 name = bookmark.split('@', 1)[0]
430 name = bookmark.split('@', 1)[0]
431 heads = []
431 heads = []
432 for mark, n in self._bookmarks.iteritems():
432 for mark, n in self._bookmarks.iteritems():
433 if mark.split('@', 1)[0] == name:
433 if mark.split('@', 1)[0] == name:
434 heads.append(n)
434 heads.append(n)
435 return heads
435 return heads
436
436
437 @storecache('phaseroots')
437 @storecache('phaseroots')
438 def _phasecache(self):
438 def _phasecache(self):
439 return phases.phasecache(self, self._phasedefaults)
439 return phases.phasecache(self, self._phasedefaults)
440
440
441 @storecache('obsstore')
441 @storecache('obsstore')
442 def obsstore(self):
442 def obsstore(self):
443 # read default format for new obsstore.
443 # read default format for new obsstore.
444 # developer config: format.obsstore-version
444 # developer config: format.obsstore-version
445 defaultformat = self.ui.configint('format', 'obsstore-version', None)
445 defaultformat = self.ui.configint('format', 'obsstore-version', None)
446 # rely on obsstore class default when possible.
446 # rely on obsstore class default when possible.
447 kwargs = {}
447 kwargs = {}
448 if defaultformat is not None:
448 if defaultformat is not None:
449 kwargs['defaultformat'] = defaultformat
449 kwargs['defaultformat'] = defaultformat
450 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
450 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
451 store = obsolete.obsstore(self.svfs, readonly=readonly,
451 store = obsolete.obsstore(self.svfs, readonly=readonly,
452 **kwargs)
452 **kwargs)
453 if store and readonly:
453 if store and readonly:
454 self.ui.warn(
454 self.ui.warn(
455 _('obsolete feature not enabled but %i markers found!\n')
455 _('obsolete feature not enabled but %i markers found!\n')
456 % len(list(store)))
456 % len(list(store)))
457 return store
457 return store
458
458
459 @storecache('00changelog.i')
459 @storecache('00changelog.i')
460 def changelog(self):
460 def changelog(self):
461 c = changelog.changelog(self.svfs)
461 c = changelog.changelog(self.svfs)
462 if 'HG_PENDING' in os.environ:
462 if 'HG_PENDING' in os.environ:
463 p = os.environ['HG_PENDING']
463 p = os.environ['HG_PENDING']
464 if p.startswith(self.root):
464 if p.startswith(self.root):
465 c.readpending('00changelog.i.a')
465 c.readpending('00changelog.i.a')
466 return c
466 return c
467
467
468 @storecache('00manifest.i')
468 @storecache('00manifest.i')
469 def manifest(self):
469 def manifest(self):
470 return manifest.manifest(self.svfs)
470 return manifest.manifest(self.svfs)
471
471
472 def dirlog(self, dir):
472 def dirlog(self, dir):
473 return self.manifest.dirlog(dir)
473 return self.manifest.dirlog(dir)
474
474
475 @repofilecache('dirstate')
475 @repofilecache('dirstate')
476 def dirstate(self):
476 def dirstate(self):
477 return dirstate.dirstate(self.vfs, self.ui, self.root,
477 return dirstate.dirstate(self.vfs, self.ui, self.root,
478 self._dirstatevalidate)
478 self._dirstatevalidate)
479
479
480 def _dirstatevalidate(self, node):
480 def _dirstatevalidate(self, node):
481 try:
481 try:
482 self.changelog.rev(node)
482 self.changelog.rev(node)
483 return node
483 return node
484 except error.LookupError:
484 except error.LookupError:
485 if not self._dirstatevalidatewarned:
485 if not self._dirstatevalidatewarned:
486 self._dirstatevalidatewarned = True
486 self._dirstatevalidatewarned = True
487 self.ui.warn(_("warning: ignoring unknown"
487 self.ui.warn(_("warning: ignoring unknown"
488 " working parent %s!\n") % short(node))
488 " working parent %s!\n") % short(node))
489 return nullid
489 return nullid
490
490
491 def __getitem__(self, changeid):
491 def __getitem__(self, changeid):
492 if changeid is None or changeid == wdirrev:
492 if changeid is None or changeid == wdirrev:
493 return context.workingctx(self)
493 return context.workingctx(self)
494 if isinstance(changeid, slice):
494 if isinstance(changeid, slice):
495 return [context.changectx(self, i)
495 return [context.changectx(self, i)
496 for i in xrange(*changeid.indices(len(self)))
496 for i in xrange(*changeid.indices(len(self)))
497 if i not in self.changelog.filteredrevs]
497 if i not in self.changelog.filteredrevs]
498 return context.changectx(self, changeid)
498 return context.changectx(self, changeid)
499
499
500 def __contains__(self, changeid):
500 def __contains__(self, changeid):
501 try:
501 try:
502 self[changeid]
502 self[changeid]
503 return True
503 return True
504 except error.RepoLookupError:
504 except error.RepoLookupError:
505 return False
505 return False
506
506
507 def __nonzero__(self):
507 def __nonzero__(self):
508 return True
508 return True
509
509
510 def __len__(self):
510 def __len__(self):
511 return len(self.changelog)
511 return len(self.changelog)
512
512
513 def __iter__(self):
513 def __iter__(self):
514 return iter(self.changelog)
514 return iter(self.changelog)
515
515
516 def revs(self, expr, *args):
516 def revs(self, expr, *args):
517 '''Return a list of revisions matching the given revset'''
517 '''Return a list of revisions matching the given revset'''
518 expr = revset.formatspec(expr, *args)
518 expr = revset.formatspec(expr, *args)
519 m = revset.match(None, expr)
519 m = revset.match(None, expr)
520 return m(self)
520 return m(self)
521
521
522 def set(self, expr, *args):
522 def set(self, expr, *args):
523 '''
523 '''
524 Yield a context for each matching revision, after doing arg
524 Yield a context for each matching revision, after doing arg
525 replacement via revset.formatspec
525 replacement via revset.formatspec
526 '''
526 '''
527 for r in self.revs(expr, *args):
527 for r in self.revs(expr, *args):
528 yield self[r]
528 yield self[r]
529
529
530 def url(self):
530 def url(self):
531 return 'file:' + self.root
531 return 'file:' + self.root
532
532
533 def hook(self, name, throw=False, **args):
533 def hook(self, name, throw=False, **args):
534 """Call a hook, passing this repo instance.
534 """Call a hook, passing this repo instance.
535
535
536 This a convenience method to aid invoking hooks. Extensions likely
536 This a convenience method to aid invoking hooks. Extensions likely
537 won't call this unless they have registered a custom hook or are
537 won't call this unless they have registered a custom hook or are
538 replacing code that is expected to call a hook.
538 replacing code that is expected to call a hook.
539 """
539 """
540 return hook.hook(self.ui, self, name, throw, **args)
540 return hook.hook(self.ui, self, name, throw, **args)
541
541
542 @unfilteredmethod
542 @unfilteredmethod
543 def _tag(self, names, node, message, local, user, date, extra={},
543 def _tag(self, names, node, message, local, user, date, extra=None,
544 editor=False):
544 editor=False):
545 if isinstance(names, str):
545 if isinstance(names, str):
546 names = (names,)
546 names = (names,)
547
547
548 branches = self.branchmap()
548 branches = self.branchmap()
549 for name in names:
549 for name in names:
550 self.hook('pretag', throw=True, node=hex(node), tag=name,
550 self.hook('pretag', throw=True, node=hex(node), tag=name,
551 local=local)
551 local=local)
552 if name in branches:
552 if name in branches:
553 self.ui.warn(_("warning: tag %s conflicts with existing"
553 self.ui.warn(_("warning: tag %s conflicts with existing"
554 " branch name\n") % name)
554 " branch name\n") % name)
555
555
556 def writetags(fp, names, munge, prevtags):
556 def writetags(fp, names, munge, prevtags):
557 fp.seek(0, 2)
557 fp.seek(0, 2)
558 if prevtags and prevtags[-1] != '\n':
558 if prevtags and prevtags[-1] != '\n':
559 fp.write('\n')
559 fp.write('\n')
560 for name in names:
560 for name in names:
561 if munge:
561 if munge:
562 m = munge(name)
562 m = munge(name)
563 else:
563 else:
564 m = name
564 m = name
565
565
566 if (self._tagscache.tagtypes and
566 if (self._tagscache.tagtypes and
567 name in self._tagscache.tagtypes):
567 name in self._tagscache.tagtypes):
568 old = self.tags().get(name, nullid)
568 old = self.tags().get(name, nullid)
569 fp.write('%s %s\n' % (hex(old), m))
569 fp.write('%s %s\n' % (hex(old), m))
570 fp.write('%s %s\n' % (hex(node), m))
570 fp.write('%s %s\n' % (hex(node), m))
571 fp.close()
571 fp.close()
572
572
573 prevtags = ''
573 prevtags = ''
574 if local:
574 if local:
575 try:
575 try:
576 fp = self.vfs('localtags', 'r+')
576 fp = self.vfs('localtags', 'r+')
577 except IOError:
577 except IOError:
578 fp = self.vfs('localtags', 'a')
578 fp = self.vfs('localtags', 'a')
579 else:
579 else:
580 prevtags = fp.read()
580 prevtags = fp.read()
581
581
582 # local tags are stored in the current charset
582 # local tags are stored in the current charset
583 writetags(fp, names, None, prevtags)
583 writetags(fp, names, None, prevtags)
584 for name in names:
584 for name in names:
585 self.hook('tag', node=hex(node), tag=name, local=local)
585 self.hook('tag', node=hex(node), tag=name, local=local)
586 return
586 return
587
587
588 try:
588 try:
589 fp = self.wfile('.hgtags', 'rb+')
589 fp = self.wfile('.hgtags', 'rb+')
590 except IOError as e:
590 except IOError as e:
591 if e.errno != errno.ENOENT:
591 if e.errno != errno.ENOENT:
592 raise
592 raise
593 fp = self.wfile('.hgtags', 'ab')
593 fp = self.wfile('.hgtags', 'ab')
594 else:
594 else:
595 prevtags = fp.read()
595 prevtags = fp.read()
596
596
597 # committed tags are stored in UTF-8
597 # committed tags are stored in UTF-8
598 writetags(fp, names, encoding.fromlocal, prevtags)
598 writetags(fp, names, encoding.fromlocal, prevtags)
599
599
600 fp.close()
600 fp.close()
601
601
602 self.invalidatecaches()
602 self.invalidatecaches()
603
603
604 if '.hgtags' not in self.dirstate:
604 if '.hgtags' not in self.dirstate:
605 self[None].add(['.hgtags'])
605 self[None].add(['.hgtags'])
606
606
607 m = matchmod.exact(self.root, '', ['.hgtags'])
607 m = matchmod.exact(self.root, '', ['.hgtags'])
608 tagnode = self.commit(message, user, date, extra=extra, match=m,
608 tagnode = self.commit(message, user, date, extra=extra, match=m,
609 editor=editor)
609 editor=editor)
610
610
611 for name in names:
611 for name in names:
612 self.hook('tag', node=hex(node), tag=name, local=local)
612 self.hook('tag', node=hex(node), tag=name, local=local)
613
613
614 return tagnode
614 return tagnode
615
615
616 def tag(self, names, node, message, local, user, date, editor=False):
616 def tag(self, names, node, message, local, user, date, editor=False):
617 '''tag a revision with one or more symbolic names.
617 '''tag a revision with one or more symbolic names.
618
618
619 names is a list of strings or, when adding a single tag, names may be a
619 names is a list of strings or, when adding a single tag, names may be a
620 string.
620 string.
621
621
622 if local is True, the tags are stored in a per-repository file.
622 if local is True, the tags are stored in a per-repository file.
623 otherwise, they are stored in the .hgtags file, and a new
623 otherwise, they are stored in the .hgtags file, and a new
624 changeset is committed with the change.
624 changeset is committed with the change.
625
625
626 keyword arguments:
626 keyword arguments:
627
627
628 local: whether to store tags in non-version-controlled file
628 local: whether to store tags in non-version-controlled file
629 (default False)
629 (default False)
630
630
631 message: commit message to use if committing
631 message: commit message to use if committing
632
632
633 user: name of user to use if committing
633 user: name of user to use if committing
634
634
635 date: date tuple to use if committing'''
635 date: date tuple to use if committing'''
636
636
637 if not local:
637 if not local:
638 m = matchmod.exact(self.root, '', ['.hgtags'])
638 m = matchmod.exact(self.root, '', ['.hgtags'])
639 if any(self.status(match=m, unknown=True, ignored=True)):
639 if any(self.status(match=m, unknown=True, ignored=True)):
640 raise util.Abort(_('working copy of .hgtags is changed'),
640 raise util.Abort(_('working copy of .hgtags is changed'),
641 hint=_('please commit .hgtags manually'))
641 hint=_('please commit .hgtags manually'))
642
642
643 self.tags() # instantiate the cache
643 self.tags() # instantiate the cache
644 self._tag(names, node, message, local, user, date, editor=editor)
644 self._tag(names, node, message, local, user, date, editor=editor)
645
645
646 @filteredpropertycache
646 @filteredpropertycache
647 def _tagscache(self):
647 def _tagscache(self):
648 '''Returns a tagscache object that contains various tags related
648 '''Returns a tagscache object that contains various tags related
649 caches.'''
649 caches.'''
650
650
651 # This simplifies its cache management by having one decorated
651 # This simplifies its cache management by having one decorated
652 # function (this one) and the rest simply fetch things from it.
652 # function (this one) and the rest simply fetch things from it.
653 class tagscache(object):
653 class tagscache(object):
654 def __init__(self):
654 def __init__(self):
655 # These two define the set of tags for this repository. tags
655 # These two define the set of tags for this repository. tags
656 # maps tag name to node; tagtypes maps tag name to 'global' or
656 # maps tag name to node; tagtypes maps tag name to 'global' or
657 # 'local'. (Global tags are defined by .hgtags across all
657 # 'local'. (Global tags are defined by .hgtags across all
658 # heads, and local tags are defined in .hg/localtags.)
658 # heads, and local tags are defined in .hg/localtags.)
659 # They constitute the in-memory cache of tags.
659 # They constitute the in-memory cache of tags.
660 self.tags = self.tagtypes = None
660 self.tags = self.tagtypes = None
661
661
662 self.nodetagscache = self.tagslist = None
662 self.nodetagscache = self.tagslist = None
663
663
664 cache = tagscache()
664 cache = tagscache()
665 cache.tags, cache.tagtypes = self._findtags()
665 cache.tags, cache.tagtypes = self._findtags()
666
666
667 return cache
667 return cache
668
668
669 def tags(self):
669 def tags(self):
670 '''return a mapping of tag to node'''
670 '''return a mapping of tag to node'''
671 t = {}
671 t = {}
672 if self.changelog.filteredrevs:
672 if self.changelog.filteredrevs:
673 tags, tt = self._findtags()
673 tags, tt = self._findtags()
674 else:
674 else:
675 tags = self._tagscache.tags
675 tags = self._tagscache.tags
676 for k, v in tags.iteritems():
676 for k, v in tags.iteritems():
677 try:
677 try:
678 # ignore tags to unknown nodes
678 # ignore tags to unknown nodes
679 self.changelog.rev(v)
679 self.changelog.rev(v)
680 t[k] = v
680 t[k] = v
681 except (error.LookupError, ValueError):
681 except (error.LookupError, ValueError):
682 pass
682 pass
683 return t
683 return t
684
684
685 def _findtags(self):
685 def _findtags(self):
686 '''Do the hard work of finding tags. Return a pair of dicts
686 '''Do the hard work of finding tags. Return a pair of dicts
687 (tags, tagtypes) where tags maps tag name to node, and tagtypes
687 (tags, tagtypes) where tags maps tag name to node, and tagtypes
688 maps tag name to a string like \'global\' or \'local\'.
688 maps tag name to a string like \'global\' or \'local\'.
689 Subclasses or extensions are free to add their own tags, but
689 Subclasses or extensions are free to add their own tags, but
690 should be aware that the returned dicts will be retained for the
690 should be aware that the returned dicts will be retained for the
691 duration of the localrepo object.'''
691 duration of the localrepo object.'''
692
692
693 # XXX what tagtype should subclasses/extensions use? Currently
693 # XXX what tagtype should subclasses/extensions use? Currently
694 # mq and bookmarks add tags, but do not set the tagtype at all.
694 # mq and bookmarks add tags, but do not set the tagtype at all.
695 # Should each extension invent its own tag type? Should there
695 # Should each extension invent its own tag type? Should there
696 # be one tagtype for all such "virtual" tags? Or is the status
696 # be one tagtype for all such "virtual" tags? Or is the status
697 # quo fine?
697 # quo fine?
698
698
699 alltags = {} # map tag name to (node, hist)
699 alltags = {} # map tag name to (node, hist)
700 tagtypes = {}
700 tagtypes = {}
701
701
702 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
702 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
703 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
703 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
704
704
705 # Build the return dicts. Have to re-encode tag names because
705 # Build the return dicts. Have to re-encode tag names because
706 # the tags module always uses UTF-8 (in order not to lose info
706 # the tags module always uses UTF-8 (in order not to lose info
707 # writing to the cache), but the rest of Mercurial wants them in
707 # writing to the cache), but the rest of Mercurial wants them in
708 # local encoding.
708 # local encoding.
709 tags = {}
709 tags = {}
710 for (name, (node, hist)) in alltags.iteritems():
710 for (name, (node, hist)) in alltags.iteritems():
711 if node != nullid:
711 if node != nullid:
712 tags[encoding.tolocal(name)] = node
712 tags[encoding.tolocal(name)] = node
713 tags['tip'] = self.changelog.tip()
713 tags['tip'] = self.changelog.tip()
714 tagtypes = dict([(encoding.tolocal(name), value)
714 tagtypes = dict([(encoding.tolocal(name), value)
715 for (name, value) in tagtypes.iteritems()])
715 for (name, value) in tagtypes.iteritems()])
716 return (tags, tagtypes)
716 return (tags, tagtypes)
717
717
718 def tagtype(self, tagname):
718 def tagtype(self, tagname):
719 '''
719 '''
720 return the type of the given tag. result can be:
720 return the type of the given tag. result can be:
721
721
722 'local' : a local tag
722 'local' : a local tag
723 'global' : a global tag
723 'global' : a global tag
724 None : tag does not exist
724 None : tag does not exist
725 '''
725 '''
726
726
727 return self._tagscache.tagtypes.get(tagname)
727 return self._tagscache.tagtypes.get(tagname)
728
728
729 def tagslist(self):
729 def tagslist(self):
730 '''return a list of tags ordered by revision'''
730 '''return a list of tags ordered by revision'''
731 if not self._tagscache.tagslist:
731 if not self._tagscache.tagslist:
732 l = []
732 l = []
733 for t, n in self.tags().iteritems():
733 for t, n in self.tags().iteritems():
734 l.append((self.changelog.rev(n), t, n))
734 l.append((self.changelog.rev(n), t, n))
735 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
735 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
736
736
737 return self._tagscache.tagslist
737 return self._tagscache.tagslist
738
738
739 def nodetags(self, node):
739 def nodetags(self, node):
740 '''return the tags associated with a node'''
740 '''return the tags associated with a node'''
741 if not self._tagscache.nodetagscache:
741 if not self._tagscache.nodetagscache:
742 nodetagscache = {}
742 nodetagscache = {}
743 for t, n in self._tagscache.tags.iteritems():
743 for t, n in self._tagscache.tags.iteritems():
744 nodetagscache.setdefault(n, []).append(t)
744 nodetagscache.setdefault(n, []).append(t)
745 for tags in nodetagscache.itervalues():
745 for tags in nodetagscache.itervalues():
746 tags.sort()
746 tags.sort()
747 self._tagscache.nodetagscache = nodetagscache
747 self._tagscache.nodetagscache = nodetagscache
748 return self._tagscache.nodetagscache.get(node, [])
748 return self._tagscache.nodetagscache.get(node, [])
749
749
750 def nodebookmarks(self, node):
750 def nodebookmarks(self, node):
751 marks = []
751 marks = []
752 for bookmark, n in self._bookmarks.iteritems():
752 for bookmark, n in self._bookmarks.iteritems():
753 if n == node:
753 if n == node:
754 marks.append(bookmark)
754 marks.append(bookmark)
755 return sorted(marks)
755 return sorted(marks)
756
756
757 def branchmap(self):
757 def branchmap(self):
758 '''returns a dictionary {branch: [branchheads]} with branchheads
758 '''returns a dictionary {branch: [branchheads]} with branchheads
759 ordered by increasing revision number'''
759 ordered by increasing revision number'''
760 branchmap.updatecache(self)
760 branchmap.updatecache(self)
761 return self._branchcaches[self.filtername]
761 return self._branchcaches[self.filtername]
762
762
763 @unfilteredmethod
763 @unfilteredmethod
764 def revbranchcache(self):
764 def revbranchcache(self):
765 if not self._revbranchcache:
765 if not self._revbranchcache:
766 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
766 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
767 return self._revbranchcache
767 return self._revbranchcache
768
768
769 def branchtip(self, branch, ignoremissing=False):
769 def branchtip(self, branch, ignoremissing=False):
770 '''return the tip node for a given branch
770 '''return the tip node for a given branch
771
771
772 If ignoremissing is True, then this method will not raise an error.
772 If ignoremissing is True, then this method will not raise an error.
773 This is helpful for callers that only expect None for a missing branch
773 This is helpful for callers that only expect None for a missing branch
774 (e.g. namespace).
774 (e.g. namespace).
775
775
776 '''
776 '''
777 try:
777 try:
778 return self.branchmap().branchtip(branch)
778 return self.branchmap().branchtip(branch)
779 except KeyError:
779 except KeyError:
780 if not ignoremissing:
780 if not ignoremissing:
781 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
781 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
782 else:
782 else:
783 pass
783 pass
784
784
785 def lookup(self, key):
785 def lookup(self, key):
786 return self[key].node()
786 return self[key].node()
787
787
788 def lookupbranch(self, key, remote=None):
788 def lookupbranch(self, key, remote=None):
789 repo = remote or self
789 repo = remote or self
790 if key in repo.branchmap():
790 if key in repo.branchmap():
791 return key
791 return key
792
792
793 repo = (remote and remote.local()) and remote or self
793 repo = (remote and remote.local()) and remote or self
794 return repo[key].branch()
794 return repo[key].branch()
795
795
796 def known(self, nodes):
796 def known(self, nodes):
797 nm = self.changelog.nodemap
797 nm = self.changelog.nodemap
798 pc = self._phasecache
798 pc = self._phasecache
799 result = []
799 result = []
800 for n in nodes:
800 for n in nodes:
801 r = nm.get(n)
801 r = nm.get(n)
802 resp = not (r is None or pc.phase(self, r) >= phases.secret)
802 resp = not (r is None or pc.phase(self, r) >= phases.secret)
803 result.append(resp)
803 result.append(resp)
804 return result
804 return result
805
805
806 def local(self):
806 def local(self):
807 return self
807 return self
808
808
809 def publishing(self):
809 def publishing(self):
810 # it's safe (and desirable) to trust the publish flag unconditionally
810 # it's safe (and desirable) to trust the publish flag unconditionally
811 # so that we don't finalize changes shared between users via ssh or nfs
811 # so that we don't finalize changes shared between users via ssh or nfs
812 return self.ui.configbool('phases', 'publish', True, untrusted=True)
812 return self.ui.configbool('phases', 'publish', True, untrusted=True)
813
813
814 def cancopy(self):
814 def cancopy(self):
815 # so statichttprepo's override of local() works
815 # so statichttprepo's override of local() works
816 if not self.local():
816 if not self.local():
817 return False
817 return False
818 if not self.publishing():
818 if not self.publishing():
819 return True
819 return True
820 # if publishing we can't copy if there is filtered content
820 # if publishing we can't copy if there is filtered content
821 return not self.filtered('visible').changelog.filteredrevs
821 return not self.filtered('visible').changelog.filteredrevs
822
822
823 def shared(self):
823 def shared(self):
824 '''the type of shared repository (None if not shared)'''
824 '''the type of shared repository (None if not shared)'''
825 if self.sharedpath != self.path:
825 if self.sharedpath != self.path:
826 return 'store'
826 return 'store'
827 return None
827 return None
828
828
829 def join(self, f, *insidef):
829 def join(self, f, *insidef):
830 return self.vfs.join(os.path.join(f, *insidef))
830 return self.vfs.join(os.path.join(f, *insidef))
831
831
832 def wjoin(self, f, *insidef):
832 def wjoin(self, f, *insidef):
833 return self.vfs.reljoin(self.root, f, *insidef)
833 return self.vfs.reljoin(self.root, f, *insidef)
834
834
835 def file(self, f):
835 def file(self, f):
836 if f[0] == '/':
836 if f[0] == '/':
837 f = f[1:]
837 f = f[1:]
838 return filelog.filelog(self.svfs, f)
838 return filelog.filelog(self.svfs, f)
839
839
840 def changectx(self, changeid):
840 def changectx(self, changeid):
841 return self[changeid]
841 return self[changeid]
842
842
843 def parents(self, changeid=None):
843 def parents(self, changeid=None):
844 '''get list of changectxs for parents of changeid'''
844 '''get list of changectxs for parents of changeid'''
845 return self[changeid].parents()
845 return self[changeid].parents()
846
846
847 def setparents(self, p1, p2=nullid):
847 def setparents(self, p1, p2=nullid):
848 self.dirstate.beginparentchange()
848 self.dirstate.beginparentchange()
849 copies = self.dirstate.setparents(p1, p2)
849 copies = self.dirstate.setparents(p1, p2)
850 pctx = self[p1]
850 pctx = self[p1]
851 if copies:
851 if copies:
852 # Adjust copy records, the dirstate cannot do it, it
852 # Adjust copy records, the dirstate cannot do it, it
853 # requires access to parents manifests. Preserve them
853 # requires access to parents manifests. Preserve them
854 # only for entries added to first parent.
854 # only for entries added to first parent.
855 for f in copies:
855 for f in copies:
856 if f not in pctx and copies[f] in pctx:
856 if f not in pctx and copies[f] in pctx:
857 self.dirstate.copy(copies[f], f)
857 self.dirstate.copy(copies[f], f)
858 if p2 == nullid:
858 if p2 == nullid:
859 for f, s in sorted(self.dirstate.copies().items()):
859 for f, s in sorted(self.dirstate.copies().items()):
860 if f not in pctx and s not in pctx:
860 if f not in pctx and s not in pctx:
861 self.dirstate.copy(None, f)
861 self.dirstate.copy(None, f)
862 self.dirstate.endparentchange()
862 self.dirstate.endparentchange()
863
863
864 def filectx(self, path, changeid=None, fileid=None):
864 def filectx(self, path, changeid=None, fileid=None):
865 """changeid can be a changeset revision, node, or tag.
865 """changeid can be a changeset revision, node, or tag.
866 fileid can be a file revision or node."""
866 fileid can be a file revision or node."""
867 return context.filectx(self, path, changeid, fileid)
867 return context.filectx(self, path, changeid, fileid)
868
868
869 def getcwd(self):
869 def getcwd(self):
870 return self.dirstate.getcwd()
870 return self.dirstate.getcwd()
871
871
872 def pathto(self, f, cwd=None):
872 def pathto(self, f, cwd=None):
873 return self.dirstate.pathto(f, cwd)
873 return self.dirstate.pathto(f, cwd)
874
874
875 def wfile(self, f, mode='r'):
875 def wfile(self, f, mode='r'):
876 return self.wvfs(f, mode)
876 return self.wvfs(f, mode)
877
877
878 def _link(self, f):
878 def _link(self, f):
879 return self.wvfs.islink(f)
879 return self.wvfs.islink(f)
880
880
881 def _loadfilter(self, filter):
881 def _loadfilter(self, filter):
882 if filter not in self.filterpats:
882 if filter not in self.filterpats:
883 l = []
883 l = []
884 for pat, cmd in self.ui.configitems(filter):
884 for pat, cmd in self.ui.configitems(filter):
885 if cmd == '!':
885 if cmd == '!':
886 continue
886 continue
887 mf = matchmod.match(self.root, '', [pat])
887 mf = matchmod.match(self.root, '', [pat])
888 fn = None
888 fn = None
889 params = cmd
889 params = cmd
890 for name, filterfn in self._datafilters.iteritems():
890 for name, filterfn in self._datafilters.iteritems():
891 if cmd.startswith(name):
891 if cmd.startswith(name):
892 fn = filterfn
892 fn = filterfn
893 params = cmd[len(name):].lstrip()
893 params = cmd[len(name):].lstrip()
894 break
894 break
895 if not fn:
895 if not fn:
896 fn = lambda s, c, **kwargs: util.filter(s, c)
896 fn = lambda s, c, **kwargs: util.filter(s, c)
897 # Wrap old filters not supporting keyword arguments
897 # Wrap old filters not supporting keyword arguments
898 if not inspect.getargspec(fn)[2]:
898 if not inspect.getargspec(fn)[2]:
899 oldfn = fn
899 oldfn = fn
900 fn = lambda s, c, **kwargs: oldfn(s, c)
900 fn = lambda s, c, **kwargs: oldfn(s, c)
901 l.append((mf, fn, params))
901 l.append((mf, fn, params))
902 self.filterpats[filter] = l
902 self.filterpats[filter] = l
903 return self.filterpats[filter]
903 return self.filterpats[filter]
904
904
905 def _filter(self, filterpats, filename, data):
905 def _filter(self, filterpats, filename, data):
906 for mf, fn, cmd in filterpats:
906 for mf, fn, cmd in filterpats:
907 if mf(filename):
907 if mf(filename):
908 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
908 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
909 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
909 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
910 break
910 break
911
911
912 return data
912 return data
913
913
914 @unfilteredpropertycache
914 @unfilteredpropertycache
915 def _encodefilterpats(self):
915 def _encodefilterpats(self):
916 return self._loadfilter('encode')
916 return self._loadfilter('encode')
917
917
918 @unfilteredpropertycache
918 @unfilteredpropertycache
919 def _decodefilterpats(self):
919 def _decodefilterpats(self):
920 return self._loadfilter('decode')
920 return self._loadfilter('decode')
921
921
922 def adddatafilter(self, name, filter):
922 def adddatafilter(self, name, filter):
923 self._datafilters[name] = filter
923 self._datafilters[name] = filter
924
924
925 def wread(self, filename):
925 def wread(self, filename):
926 if self._link(filename):
926 if self._link(filename):
927 data = self.wvfs.readlink(filename)
927 data = self.wvfs.readlink(filename)
928 else:
928 else:
929 data = self.wvfs.read(filename)
929 data = self.wvfs.read(filename)
930 return self._filter(self._encodefilterpats, filename, data)
930 return self._filter(self._encodefilterpats, filename, data)
931
931
932 def wwrite(self, filename, data, flags):
932 def wwrite(self, filename, data, flags):
933 """write ``data`` into ``filename`` in the working directory
933 """write ``data`` into ``filename`` in the working directory
934
934
935 This returns length of written (maybe decoded) data.
935 This returns length of written (maybe decoded) data.
936 """
936 """
937 data = self._filter(self._decodefilterpats, filename, data)
937 data = self._filter(self._decodefilterpats, filename, data)
938 if 'l' in flags:
938 if 'l' in flags:
939 self.wvfs.symlink(data, filename)
939 self.wvfs.symlink(data, filename)
940 else:
940 else:
941 self.wvfs.write(filename, data)
941 self.wvfs.write(filename, data)
942 if 'x' in flags:
942 if 'x' in flags:
943 self.wvfs.setflags(filename, False, True)
943 self.wvfs.setflags(filename, False, True)
944 return len(data)
944 return len(data)
945
945
946 def wwritedata(self, filename, data):
946 def wwritedata(self, filename, data):
947 return self._filter(self._decodefilterpats, filename, data)
947 return self._filter(self._decodefilterpats, filename, data)
948
948
949 def currenttransaction(self):
949 def currenttransaction(self):
950 """return the current transaction or None if non exists"""
950 """return the current transaction or None if non exists"""
951 if self._transref:
951 if self._transref:
952 tr = self._transref()
952 tr = self._transref()
953 else:
953 else:
954 tr = None
954 tr = None
955
955
956 if tr and tr.running():
956 if tr and tr.running():
957 return tr
957 return tr
958 return None
958 return None
959
959
960 def transaction(self, desc, report=None):
960 def transaction(self, desc, report=None):
961 if (self.ui.configbool('devel', 'all-warnings')
961 if (self.ui.configbool('devel', 'all-warnings')
962 or self.ui.configbool('devel', 'check-locks')):
962 or self.ui.configbool('devel', 'check-locks')):
963 l = self._lockref and self._lockref()
963 l = self._lockref and self._lockref()
964 if l is None or not l.held:
964 if l is None or not l.held:
965 self.ui.develwarn('transaction with no lock')
965 self.ui.develwarn('transaction with no lock')
966 tr = self.currenttransaction()
966 tr = self.currenttransaction()
967 if tr is not None:
967 if tr is not None:
968 return tr.nest()
968 return tr.nest()
969
969
970 # abort here if the journal already exists
970 # abort here if the journal already exists
971 if self.svfs.exists("journal"):
971 if self.svfs.exists("journal"):
972 raise error.RepoError(
972 raise error.RepoError(
973 _("abandoned transaction found"),
973 _("abandoned transaction found"),
974 hint=_("run 'hg recover' to clean up transaction"))
974 hint=_("run 'hg recover' to clean up transaction"))
975
975
976 # make journal.dirstate contain in-memory changes at this point
976 # make journal.dirstate contain in-memory changes at this point
977 self.dirstate.write()
977 self.dirstate.write()
978
978
979 idbase = "%.40f#%f" % (random.random(), time.time())
979 idbase = "%.40f#%f" % (random.random(), time.time())
980 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
980 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
981 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
981 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
982
982
983 self._writejournal(desc)
983 self._writejournal(desc)
984 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
984 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
985 if report:
985 if report:
986 rp = report
986 rp = report
987 else:
987 else:
988 rp = self.ui.warn
988 rp = self.ui.warn
989 vfsmap = {'plain': self.vfs} # root of .hg/
989 vfsmap = {'plain': self.vfs} # root of .hg/
990 # we must avoid cyclic reference between repo and transaction.
990 # we must avoid cyclic reference between repo and transaction.
991 reporef = weakref.ref(self)
991 reporef = weakref.ref(self)
992 def validate(tr):
992 def validate(tr):
993 """will run pre-closing hooks"""
993 """will run pre-closing hooks"""
994 pending = lambda: tr.writepending() and self.root or ""
994 pending = lambda: tr.writepending() and self.root or ""
995 reporef().hook('pretxnclose', throw=True, pending=pending,
995 reporef().hook('pretxnclose', throw=True, pending=pending,
996 txnname=desc, **tr.hookargs)
996 txnname=desc, **tr.hookargs)
997
997
998 tr = transaction.transaction(rp, self.svfs, vfsmap,
998 tr = transaction.transaction(rp, self.svfs, vfsmap,
999 "journal",
999 "journal",
1000 "undo",
1000 "undo",
1001 aftertrans(renames),
1001 aftertrans(renames),
1002 self.store.createmode,
1002 self.store.createmode,
1003 validator=validate)
1003 validator=validate)
1004
1004
1005 tr.hookargs['txnid'] = txnid
1005 tr.hookargs['txnid'] = txnid
1006 # note: writing the fncache only during finalize mean that the file is
1006 # note: writing the fncache only during finalize mean that the file is
1007 # outdated when running hooks. As fncache is used for streaming clone,
1007 # outdated when running hooks. As fncache is used for streaming clone,
1008 # this is not expected to break anything that happen during the hooks.
1008 # this is not expected to break anything that happen during the hooks.
1009 tr.addfinalize('flush-fncache', self.store.write)
1009 tr.addfinalize('flush-fncache', self.store.write)
1010 def txnclosehook(tr2):
1010 def txnclosehook(tr2):
1011 """To be run if transaction is successful, will schedule a hook run
1011 """To be run if transaction is successful, will schedule a hook run
1012 """
1012 """
1013 def hook():
1013 def hook():
1014 reporef().hook('txnclose', throw=False, txnname=desc,
1014 reporef().hook('txnclose', throw=False, txnname=desc,
1015 **tr2.hookargs)
1015 **tr2.hookargs)
1016 reporef()._afterlock(hook)
1016 reporef()._afterlock(hook)
1017 tr.addfinalize('txnclose-hook', txnclosehook)
1017 tr.addfinalize('txnclose-hook', txnclosehook)
1018 def txnaborthook(tr2):
1018 def txnaborthook(tr2):
1019 """To be run if transaction is aborted
1019 """To be run if transaction is aborted
1020 """
1020 """
1021 reporef().hook('txnabort', throw=False, txnname=desc,
1021 reporef().hook('txnabort', throw=False, txnname=desc,
1022 **tr2.hookargs)
1022 **tr2.hookargs)
1023 tr.addabort('txnabort-hook', txnaborthook)
1023 tr.addabort('txnabort-hook', txnaborthook)
1024 # avoid eager cache invalidation. in-memory data should be identical
1024 # avoid eager cache invalidation. in-memory data should be identical
1025 # to stored data if transaction has no error.
1025 # to stored data if transaction has no error.
1026 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1026 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1027 self._transref = weakref.ref(tr)
1027 self._transref = weakref.ref(tr)
1028 return tr
1028 return tr
1029
1029
1030 def _journalfiles(self):
1030 def _journalfiles(self):
1031 return ((self.svfs, 'journal'),
1031 return ((self.svfs, 'journal'),
1032 (self.vfs, 'journal.dirstate'),
1032 (self.vfs, 'journal.dirstate'),
1033 (self.vfs, 'journal.branch'),
1033 (self.vfs, 'journal.branch'),
1034 (self.vfs, 'journal.desc'),
1034 (self.vfs, 'journal.desc'),
1035 (self.vfs, 'journal.bookmarks'),
1035 (self.vfs, 'journal.bookmarks'),
1036 (self.svfs, 'journal.phaseroots'))
1036 (self.svfs, 'journal.phaseroots'))
1037
1037
1038 def undofiles(self):
1038 def undofiles(self):
1039 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1039 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1040
1040
1041 def _writejournal(self, desc):
1041 def _writejournal(self, desc):
1042 self.vfs.write("journal.dirstate",
1042 self.vfs.write("journal.dirstate",
1043 self.vfs.tryread("dirstate"))
1043 self.vfs.tryread("dirstate"))
1044 self.vfs.write("journal.branch",
1044 self.vfs.write("journal.branch",
1045 encoding.fromlocal(self.dirstate.branch()))
1045 encoding.fromlocal(self.dirstate.branch()))
1046 self.vfs.write("journal.desc",
1046 self.vfs.write("journal.desc",
1047 "%d\n%s\n" % (len(self), desc))
1047 "%d\n%s\n" % (len(self), desc))
1048 self.vfs.write("journal.bookmarks",
1048 self.vfs.write("journal.bookmarks",
1049 self.vfs.tryread("bookmarks"))
1049 self.vfs.tryread("bookmarks"))
1050 self.svfs.write("journal.phaseroots",
1050 self.svfs.write("journal.phaseroots",
1051 self.svfs.tryread("phaseroots"))
1051 self.svfs.tryread("phaseroots"))
1052
1052
1053 def recover(self):
1053 def recover(self):
1054 lock = self.lock()
1054 lock = self.lock()
1055 try:
1055 try:
1056 if self.svfs.exists("journal"):
1056 if self.svfs.exists("journal"):
1057 self.ui.status(_("rolling back interrupted transaction\n"))
1057 self.ui.status(_("rolling back interrupted transaction\n"))
1058 vfsmap = {'': self.svfs,
1058 vfsmap = {'': self.svfs,
1059 'plain': self.vfs,}
1059 'plain': self.vfs,}
1060 transaction.rollback(self.svfs, vfsmap, "journal",
1060 transaction.rollback(self.svfs, vfsmap, "journal",
1061 self.ui.warn)
1061 self.ui.warn)
1062 self.invalidate()
1062 self.invalidate()
1063 return True
1063 return True
1064 else:
1064 else:
1065 self.ui.warn(_("no interrupted transaction available\n"))
1065 self.ui.warn(_("no interrupted transaction available\n"))
1066 return False
1066 return False
1067 finally:
1067 finally:
1068 lock.release()
1068 lock.release()
1069
1069
1070 def rollback(self, dryrun=False, force=False):
1070 def rollback(self, dryrun=False, force=False):
1071 wlock = lock = None
1071 wlock = lock = None
1072 try:
1072 try:
1073 wlock = self.wlock()
1073 wlock = self.wlock()
1074 lock = self.lock()
1074 lock = self.lock()
1075 if self.svfs.exists("undo"):
1075 if self.svfs.exists("undo"):
1076 return self._rollback(dryrun, force)
1076 return self._rollback(dryrun, force)
1077 else:
1077 else:
1078 self.ui.warn(_("no rollback information available\n"))
1078 self.ui.warn(_("no rollback information available\n"))
1079 return 1
1079 return 1
1080 finally:
1080 finally:
1081 release(lock, wlock)
1081 release(lock, wlock)
1082
1082
1083 @unfilteredmethod # Until we get smarter cache management
1083 @unfilteredmethod # Until we get smarter cache management
1084 def _rollback(self, dryrun, force):
1084 def _rollback(self, dryrun, force):
1085 ui = self.ui
1085 ui = self.ui
1086 try:
1086 try:
1087 args = self.vfs.read('undo.desc').splitlines()
1087 args = self.vfs.read('undo.desc').splitlines()
1088 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1088 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1089 if len(args) >= 3:
1089 if len(args) >= 3:
1090 detail = args[2]
1090 detail = args[2]
1091 oldtip = oldlen - 1
1091 oldtip = oldlen - 1
1092
1092
1093 if detail and ui.verbose:
1093 if detail and ui.verbose:
1094 msg = (_('repository tip rolled back to revision %s'
1094 msg = (_('repository tip rolled back to revision %s'
1095 ' (undo %s: %s)\n')
1095 ' (undo %s: %s)\n')
1096 % (oldtip, desc, detail))
1096 % (oldtip, desc, detail))
1097 else:
1097 else:
1098 msg = (_('repository tip rolled back to revision %s'
1098 msg = (_('repository tip rolled back to revision %s'
1099 ' (undo %s)\n')
1099 ' (undo %s)\n')
1100 % (oldtip, desc))
1100 % (oldtip, desc))
1101 except IOError:
1101 except IOError:
1102 msg = _('rolling back unknown transaction\n')
1102 msg = _('rolling back unknown transaction\n')
1103 desc = None
1103 desc = None
1104
1104
1105 if not force and self['.'] != self['tip'] and desc == 'commit':
1105 if not force and self['.'] != self['tip'] and desc == 'commit':
1106 raise util.Abort(
1106 raise util.Abort(
1107 _('rollback of last commit while not checked out '
1107 _('rollback of last commit while not checked out '
1108 'may lose data'), hint=_('use -f to force'))
1108 'may lose data'), hint=_('use -f to force'))
1109
1109
1110 ui.status(msg)
1110 ui.status(msg)
1111 if dryrun:
1111 if dryrun:
1112 return 0
1112 return 0
1113
1113
1114 parents = self.dirstate.parents()
1114 parents = self.dirstate.parents()
1115 self.destroying()
1115 self.destroying()
1116 vfsmap = {'plain': self.vfs, '': self.svfs}
1116 vfsmap = {'plain': self.vfs, '': self.svfs}
1117 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1117 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1118 if self.vfs.exists('undo.bookmarks'):
1118 if self.vfs.exists('undo.bookmarks'):
1119 self.vfs.rename('undo.bookmarks', 'bookmarks')
1119 self.vfs.rename('undo.bookmarks', 'bookmarks')
1120 if self.svfs.exists('undo.phaseroots'):
1120 if self.svfs.exists('undo.phaseroots'):
1121 self.svfs.rename('undo.phaseroots', 'phaseroots')
1121 self.svfs.rename('undo.phaseroots', 'phaseroots')
1122 self.invalidate()
1122 self.invalidate()
1123
1123
1124 parentgone = (parents[0] not in self.changelog.nodemap or
1124 parentgone = (parents[0] not in self.changelog.nodemap or
1125 parents[1] not in self.changelog.nodemap)
1125 parents[1] not in self.changelog.nodemap)
1126 if parentgone:
1126 if parentgone:
1127 self.vfs.rename('undo.dirstate', 'dirstate')
1127 self.vfs.rename('undo.dirstate', 'dirstate')
1128 try:
1128 try:
1129 branch = self.vfs.read('undo.branch')
1129 branch = self.vfs.read('undo.branch')
1130 self.dirstate.setbranch(encoding.tolocal(branch))
1130 self.dirstate.setbranch(encoding.tolocal(branch))
1131 except IOError:
1131 except IOError:
1132 ui.warn(_('named branch could not be reset: '
1132 ui.warn(_('named branch could not be reset: '
1133 'current branch is still \'%s\'\n')
1133 'current branch is still \'%s\'\n')
1134 % self.dirstate.branch())
1134 % self.dirstate.branch())
1135
1135
1136 self.dirstate.invalidate()
1136 self.dirstate.invalidate()
1137 parents = tuple([p.rev() for p in self.parents()])
1137 parents = tuple([p.rev() for p in self.parents()])
1138 if len(parents) > 1:
1138 if len(parents) > 1:
1139 ui.status(_('working directory now based on '
1139 ui.status(_('working directory now based on '
1140 'revisions %d and %d\n') % parents)
1140 'revisions %d and %d\n') % parents)
1141 else:
1141 else:
1142 ui.status(_('working directory now based on '
1142 ui.status(_('working directory now based on '
1143 'revision %d\n') % parents)
1143 'revision %d\n') % parents)
1144 ms = mergemod.mergestate(self)
1144 ms = mergemod.mergestate(self)
1145 ms.reset(self['.'].node())
1145 ms.reset(self['.'].node())
1146
1146
1147 # TODO: if we know which new heads may result from this rollback, pass
1147 # TODO: if we know which new heads may result from this rollback, pass
1148 # them to destroy(), which will prevent the branchhead cache from being
1148 # them to destroy(), which will prevent the branchhead cache from being
1149 # invalidated.
1149 # invalidated.
1150 self.destroyed()
1150 self.destroyed()
1151 return 0
1151 return 0
1152
1152
1153 def invalidatecaches(self):
1153 def invalidatecaches(self):
1154
1154
1155 if '_tagscache' in vars(self):
1155 if '_tagscache' in vars(self):
1156 # can't use delattr on proxy
1156 # can't use delattr on proxy
1157 del self.__dict__['_tagscache']
1157 del self.__dict__['_tagscache']
1158
1158
1159 self.unfiltered()._branchcaches.clear()
1159 self.unfiltered()._branchcaches.clear()
1160 self.invalidatevolatilesets()
1160 self.invalidatevolatilesets()
1161
1161
1162 def invalidatevolatilesets(self):
1162 def invalidatevolatilesets(self):
1163 self.filteredrevcache.clear()
1163 self.filteredrevcache.clear()
1164 obsolete.clearobscaches(self)
1164 obsolete.clearobscaches(self)
1165
1165
1166 def invalidatedirstate(self):
1166 def invalidatedirstate(self):
1167 '''Invalidates the dirstate, causing the next call to dirstate
1167 '''Invalidates the dirstate, causing the next call to dirstate
1168 to check if it was modified since the last time it was read,
1168 to check if it was modified since the last time it was read,
1169 rereading it if it has.
1169 rereading it if it has.
1170
1170
1171 This is different to dirstate.invalidate() that it doesn't always
1171 This is different to dirstate.invalidate() that it doesn't always
1172 rereads the dirstate. Use dirstate.invalidate() if you want to
1172 rereads the dirstate. Use dirstate.invalidate() if you want to
1173 explicitly read the dirstate again (i.e. restoring it to a previous
1173 explicitly read the dirstate again (i.e. restoring it to a previous
1174 known good state).'''
1174 known good state).'''
1175 if hasunfilteredcache(self, 'dirstate'):
1175 if hasunfilteredcache(self, 'dirstate'):
1176 for k in self.dirstate._filecache:
1176 for k in self.dirstate._filecache:
1177 try:
1177 try:
1178 delattr(self.dirstate, k)
1178 delattr(self.dirstate, k)
1179 except AttributeError:
1179 except AttributeError:
1180 pass
1180 pass
1181 delattr(self.unfiltered(), 'dirstate')
1181 delattr(self.unfiltered(), 'dirstate')
1182
1182
1183 def invalidate(self):
1183 def invalidate(self):
1184 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1184 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1185 for k in self._filecache:
1185 for k in self._filecache:
1186 # dirstate is invalidated separately in invalidatedirstate()
1186 # dirstate is invalidated separately in invalidatedirstate()
1187 if k == 'dirstate':
1187 if k == 'dirstate':
1188 continue
1188 continue
1189
1189
1190 try:
1190 try:
1191 delattr(unfiltered, k)
1191 delattr(unfiltered, k)
1192 except AttributeError:
1192 except AttributeError:
1193 pass
1193 pass
1194 self.invalidatecaches()
1194 self.invalidatecaches()
1195 self.store.invalidatecaches()
1195 self.store.invalidatecaches()
1196
1196
1197 def invalidateall(self):
1197 def invalidateall(self):
1198 '''Fully invalidates both store and non-store parts, causing the
1198 '''Fully invalidates both store and non-store parts, causing the
1199 subsequent operation to reread any outside changes.'''
1199 subsequent operation to reread any outside changes.'''
1200 # extension should hook this to invalidate its caches
1200 # extension should hook this to invalidate its caches
1201 self.invalidate()
1201 self.invalidate()
1202 self.invalidatedirstate()
1202 self.invalidatedirstate()
1203
1203
1204 def _refreshfilecachestats(self, tr):
1204 def _refreshfilecachestats(self, tr):
1205 """Reload stats of cached files so that they are flagged as valid"""
1205 """Reload stats of cached files so that they are flagged as valid"""
1206 for k, ce in self._filecache.items():
1206 for k, ce in self._filecache.items():
1207 if k == 'dirstate' or k not in self.__dict__:
1207 if k == 'dirstate' or k not in self.__dict__:
1208 continue
1208 continue
1209 ce.refresh()
1209 ce.refresh()
1210
1210
1211 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1211 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1212 try:
1212 try:
1213 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1213 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1214 acquirefn=acquirefn, desc=desc)
1214 acquirefn=acquirefn, desc=desc)
1215 except error.LockHeld as inst:
1215 except error.LockHeld as inst:
1216 if not wait:
1216 if not wait:
1217 raise
1217 raise
1218 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1218 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1219 (desc, inst.locker))
1219 (desc, inst.locker))
1220 # default to 600 seconds timeout
1220 # default to 600 seconds timeout
1221 l = lockmod.lock(vfs, lockname,
1221 l = lockmod.lock(vfs, lockname,
1222 int(self.ui.config("ui", "timeout", "600")),
1222 int(self.ui.config("ui", "timeout", "600")),
1223 releasefn=releasefn, acquirefn=acquirefn,
1223 releasefn=releasefn, acquirefn=acquirefn,
1224 desc=desc)
1224 desc=desc)
1225 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1225 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1226 return l
1226 return l
1227
1227
1228 def _afterlock(self, callback):
1228 def _afterlock(self, callback):
1229 """add a callback to be run when the repository is fully unlocked
1229 """add a callback to be run when the repository is fully unlocked
1230
1230
1231 The callback will be executed when the outermost lock is released
1231 The callback will be executed when the outermost lock is released
1232 (with wlock being higher level than 'lock')."""
1232 (with wlock being higher level than 'lock')."""
1233 for ref in (self._wlockref, self._lockref):
1233 for ref in (self._wlockref, self._lockref):
1234 l = ref and ref()
1234 l = ref and ref()
1235 if l and l.held:
1235 if l and l.held:
1236 l.postrelease.append(callback)
1236 l.postrelease.append(callback)
1237 break
1237 break
1238 else: # no lock have been found.
1238 else: # no lock have been found.
1239 callback()
1239 callback()
1240
1240
1241 def lock(self, wait=True):
1241 def lock(self, wait=True):
1242 '''Lock the repository store (.hg/store) and return a weak reference
1242 '''Lock the repository store (.hg/store) and return a weak reference
1243 to the lock. Use this before modifying the store (e.g. committing or
1243 to the lock. Use this before modifying the store (e.g. committing or
1244 stripping). If you are opening a transaction, get a lock as well.)
1244 stripping). If you are opening a transaction, get a lock as well.)
1245
1245
1246 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1246 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1247 'wlock' first to avoid a dead-lock hazard.'''
1247 'wlock' first to avoid a dead-lock hazard.'''
1248 l = self._lockref and self._lockref()
1248 l = self._lockref and self._lockref()
1249 if l is not None and l.held:
1249 if l is not None and l.held:
1250 l.lock()
1250 l.lock()
1251 return l
1251 return l
1252
1252
1253 l = self._lock(self.svfs, "lock", wait, None,
1253 l = self._lock(self.svfs, "lock", wait, None,
1254 self.invalidate, _('repository %s') % self.origroot)
1254 self.invalidate, _('repository %s') % self.origroot)
1255 self._lockref = weakref.ref(l)
1255 self._lockref = weakref.ref(l)
1256 return l
1256 return l
1257
1257
1258 def wlock(self, wait=True):
1258 def wlock(self, wait=True):
1259 '''Lock the non-store parts of the repository (everything under
1259 '''Lock the non-store parts of the repository (everything under
1260 .hg except .hg/store) and return a weak reference to the lock.
1260 .hg except .hg/store) and return a weak reference to the lock.
1261
1261
1262 Use this before modifying files in .hg.
1262 Use this before modifying files in .hg.
1263
1263
1264 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1264 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1265 'wlock' first to avoid a dead-lock hazard.'''
1265 'wlock' first to avoid a dead-lock hazard.'''
1266 l = self._wlockref and self._wlockref()
1266 l = self._wlockref and self._wlockref()
1267 if l is not None and l.held:
1267 if l is not None and l.held:
1268 l.lock()
1268 l.lock()
1269 return l
1269 return l
1270
1270
1271 # We do not need to check for non-waiting lock aquisition. Such
1271 # We do not need to check for non-waiting lock aquisition. Such
1272 # acquisition would not cause dead-lock as they would just fail.
1272 # acquisition would not cause dead-lock as they would just fail.
1273 if wait and (self.ui.configbool('devel', 'all-warnings')
1273 if wait and (self.ui.configbool('devel', 'all-warnings')
1274 or self.ui.configbool('devel', 'check-locks')):
1274 or self.ui.configbool('devel', 'check-locks')):
1275 l = self._lockref and self._lockref()
1275 l = self._lockref and self._lockref()
1276 if l is not None and l.held:
1276 if l is not None and l.held:
1277 self.ui.develwarn('"wlock" acquired after "lock"')
1277 self.ui.develwarn('"wlock" acquired after "lock"')
1278
1278
1279 def unlock():
1279 def unlock():
1280 if self.dirstate.pendingparentchange():
1280 if self.dirstate.pendingparentchange():
1281 self.dirstate.invalidate()
1281 self.dirstate.invalidate()
1282 else:
1282 else:
1283 self.dirstate.write()
1283 self.dirstate.write()
1284
1284
1285 self._filecache['dirstate'].refresh()
1285 self._filecache['dirstate'].refresh()
1286
1286
1287 l = self._lock(self.vfs, "wlock", wait, unlock,
1287 l = self._lock(self.vfs, "wlock", wait, unlock,
1288 self.invalidatedirstate, _('working directory of %s') %
1288 self.invalidatedirstate, _('working directory of %s') %
1289 self.origroot)
1289 self.origroot)
1290 self._wlockref = weakref.ref(l)
1290 self._wlockref = weakref.ref(l)
1291 return l
1291 return l
1292
1292
1293 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1293 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1294 """
1294 """
1295 commit an individual file as part of a larger transaction
1295 commit an individual file as part of a larger transaction
1296 """
1296 """
1297
1297
1298 fname = fctx.path()
1298 fname = fctx.path()
1299 fparent1 = manifest1.get(fname, nullid)
1299 fparent1 = manifest1.get(fname, nullid)
1300 fparent2 = manifest2.get(fname, nullid)
1300 fparent2 = manifest2.get(fname, nullid)
1301 if isinstance(fctx, context.filectx):
1301 if isinstance(fctx, context.filectx):
1302 node = fctx.filenode()
1302 node = fctx.filenode()
1303 if node in [fparent1, fparent2]:
1303 if node in [fparent1, fparent2]:
1304 self.ui.debug('reusing %s filelog entry\n' % fname)
1304 self.ui.debug('reusing %s filelog entry\n' % fname)
1305 return node
1305 return node
1306
1306
1307 flog = self.file(fname)
1307 flog = self.file(fname)
1308 meta = {}
1308 meta = {}
1309 copy = fctx.renamed()
1309 copy = fctx.renamed()
1310 if copy and copy[0] != fname:
1310 if copy and copy[0] != fname:
1311 # Mark the new revision of this file as a copy of another
1311 # Mark the new revision of this file as a copy of another
1312 # file. This copy data will effectively act as a parent
1312 # file. This copy data will effectively act as a parent
1313 # of this new revision. If this is a merge, the first
1313 # of this new revision. If this is a merge, the first
1314 # parent will be the nullid (meaning "look up the copy data")
1314 # parent will be the nullid (meaning "look up the copy data")
1315 # and the second one will be the other parent. For example:
1315 # and the second one will be the other parent. For example:
1316 #
1316 #
1317 # 0 --- 1 --- 3 rev1 changes file foo
1317 # 0 --- 1 --- 3 rev1 changes file foo
1318 # \ / rev2 renames foo to bar and changes it
1318 # \ / rev2 renames foo to bar and changes it
1319 # \- 2 -/ rev3 should have bar with all changes and
1319 # \- 2 -/ rev3 should have bar with all changes and
1320 # should record that bar descends from
1320 # should record that bar descends from
1321 # bar in rev2 and foo in rev1
1321 # bar in rev2 and foo in rev1
1322 #
1322 #
1323 # this allows this merge to succeed:
1323 # this allows this merge to succeed:
1324 #
1324 #
1325 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1325 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1326 # \ / merging rev3 and rev4 should use bar@rev2
1326 # \ / merging rev3 and rev4 should use bar@rev2
1327 # \- 2 --- 4 as the merge base
1327 # \- 2 --- 4 as the merge base
1328 #
1328 #
1329
1329
1330 cfname = copy[0]
1330 cfname = copy[0]
1331 crev = manifest1.get(cfname)
1331 crev = manifest1.get(cfname)
1332 newfparent = fparent2
1332 newfparent = fparent2
1333
1333
1334 if manifest2: # branch merge
1334 if manifest2: # branch merge
1335 if fparent2 == nullid or crev is None: # copied on remote side
1335 if fparent2 == nullid or crev is None: # copied on remote side
1336 if cfname in manifest2:
1336 if cfname in manifest2:
1337 crev = manifest2[cfname]
1337 crev = manifest2[cfname]
1338 newfparent = fparent1
1338 newfparent = fparent1
1339
1339
1340 # Here, we used to search backwards through history to try to find
1340 # Here, we used to search backwards through history to try to find
1341 # where the file copy came from if the source of a copy was not in
1341 # where the file copy came from if the source of a copy was not in
1342 # the parent directory. However, this doesn't actually make sense to
1342 # the parent directory. However, this doesn't actually make sense to
1343 # do (what does a copy from something not in your working copy even
1343 # do (what does a copy from something not in your working copy even
1344 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1344 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1345 # the user that copy information was dropped, so if they didn't
1345 # the user that copy information was dropped, so if they didn't
1346 # expect this outcome it can be fixed, but this is the correct
1346 # expect this outcome it can be fixed, but this is the correct
1347 # behavior in this circumstance.
1347 # behavior in this circumstance.
1348
1348
1349 if crev:
1349 if crev:
1350 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1350 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1351 meta["copy"] = cfname
1351 meta["copy"] = cfname
1352 meta["copyrev"] = hex(crev)
1352 meta["copyrev"] = hex(crev)
1353 fparent1, fparent2 = nullid, newfparent
1353 fparent1, fparent2 = nullid, newfparent
1354 else:
1354 else:
1355 self.ui.warn(_("warning: can't find ancestor for '%s' "
1355 self.ui.warn(_("warning: can't find ancestor for '%s' "
1356 "copied from '%s'!\n") % (fname, cfname))
1356 "copied from '%s'!\n") % (fname, cfname))
1357
1357
1358 elif fparent1 == nullid:
1358 elif fparent1 == nullid:
1359 fparent1, fparent2 = fparent2, nullid
1359 fparent1, fparent2 = fparent2, nullid
1360 elif fparent2 != nullid:
1360 elif fparent2 != nullid:
1361 # is one parent an ancestor of the other?
1361 # is one parent an ancestor of the other?
1362 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1362 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1363 if fparent1 in fparentancestors:
1363 if fparent1 in fparentancestors:
1364 fparent1, fparent2 = fparent2, nullid
1364 fparent1, fparent2 = fparent2, nullid
1365 elif fparent2 in fparentancestors:
1365 elif fparent2 in fparentancestors:
1366 fparent2 = nullid
1366 fparent2 = nullid
1367
1367
1368 # is the file changed?
1368 # is the file changed?
1369 text = fctx.data()
1369 text = fctx.data()
1370 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1370 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1371 changelist.append(fname)
1371 changelist.append(fname)
1372 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1372 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1373 # are just the flags changed during merge?
1373 # are just the flags changed during merge?
1374 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1374 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1375 changelist.append(fname)
1375 changelist.append(fname)
1376
1376
1377 return fparent1
1377 return fparent1
1378
1378
1379 @unfilteredmethod
1379 @unfilteredmethod
1380 def commit(self, text="", user=None, date=None, match=None, force=False,
1380 def commit(self, text="", user=None, date=None, match=None, force=False,
1381 editor=False, extra=None):
1381 editor=False, extra=None):
1382 """Add a new revision to current repository.
1382 """Add a new revision to current repository.
1383
1383
1384 Revision information is gathered from the working directory,
1384 Revision information is gathered from the working directory,
1385 match can be used to filter the committed files. If editor is
1385 match can be used to filter the committed files. If editor is
1386 supplied, it is called to get a commit message.
1386 supplied, it is called to get a commit message.
1387 """
1387 """
1388 if extra is None:
1388 if extra is None:
1389 extra = {}
1389 extra = {}
1390
1390
1391 def fail(f, msg):
1391 def fail(f, msg):
1392 raise util.Abort('%s: %s' % (f, msg))
1392 raise util.Abort('%s: %s' % (f, msg))
1393
1393
1394 if not match:
1394 if not match:
1395 match = matchmod.always(self.root, '')
1395 match = matchmod.always(self.root, '')
1396
1396
1397 if not force:
1397 if not force:
1398 vdirs = []
1398 vdirs = []
1399 match.explicitdir = vdirs.append
1399 match.explicitdir = vdirs.append
1400 match.bad = fail
1400 match.bad = fail
1401
1401
1402 wlock = self.wlock()
1402 wlock = self.wlock()
1403 try:
1403 try:
1404 wctx = self[None]
1404 wctx = self[None]
1405 merge = len(wctx.parents()) > 1
1405 merge = len(wctx.parents()) > 1
1406
1406
1407 if not force and merge and match.ispartial():
1407 if not force and merge and match.ispartial():
1408 raise util.Abort(_('cannot partially commit a merge '
1408 raise util.Abort(_('cannot partially commit a merge '
1409 '(do not specify files or patterns)'))
1409 '(do not specify files or patterns)'))
1410
1410
1411 status = self.status(match=match, clean=force)
1411 status = self.status(match=match, clean=force)
1412 if force:
1412 if force:
1413 status.modified.extend(status.clean) # mq may commit clean files
1413 status.modified.extend(status.clean) # mq may commit clean files
1414
1414
1415 # check subrepos
1415 # check subrepos
1416 subs = []
1416 subs = []
1417 commitsubs = set()
1417 commitsubs = set()
1418 newstate = wctx.substate.copy()
1418 newstate = wctx.substate.copy()
1419 # only manage subrepos and .hgsubstate if .hgsub is present
1419 # only manage subrepos and .hgsubstate if .hgsub is present
1420 if '.hgsub' in wctx:
1420 if '.hgsub' in wctx:
1421 # we'll decide whether to track this ourselves, thanks
1421 # we'll decide whether to track this ourselves, thanks
1422 for c in status.modified, status.added, status.removed:
1422 for c in status.modified, status.added, status.removed:
1423 if '.hgsubstate' in c:
1423 if '.hgsubstate' in c:
1424 c.remove('.hgsubstate')
1424 c.remove('.hgsubstate')
1425
1425
1426 # compare current state to last committed state
1426 # compare current state to last committed state
1427 # build new substate based on last committed state
1427 # build new substate based on last committed state
1428 oldstate = wctx.p1().substate
1428 oldstate = wctx.p1().substate
1429 for s in sorted(newstate.keys()):
1429 for s in sorted(newstate.keys()):
1430 if not match(s):
1430 if not match(s):
1431 # ignore working copy, use old state if present
1431 # ignore working copy, use old state if present
1432 if s in oldstate:
1432 if s in oldstate:
1433 newstate[s] = oldstate[s]
1433 newstate[s] = oldstate[s]
1434 continue
1434 continue
1435 if not force:
1435 if not force:
1436 raise util.Abort(
1436 raise util.Abort(
1437 _("commit with new subrepo %s excluded") % s)
1437 _("commit with new subrepo %s excluded") % s)
1438 dirtyreason = wctx.sub(s).dirtyreason(True)
1438 dirtyreason = wctx.sub(s).dirtyreason(True)
1439 if dirtyreason:
1439 if dirtyreason:
1440 if not self.ui.configbool('ui', 'commitsubrepos'):
1440 if not self.ui.configbool('ui', 'commitsubrepos'):
1441 raise util.Abort(dirtyreason,
1441 raise util.Abort(dirtyreason,
1442 hint=_("use --subrepos for recursive commit"))
1442 hint=_("use --subrepos for recursive commit"))
1443 subs.append(s)
1443 subs.append(s)
1444 commitsubs.add(s)
1444 commitsubs.add(s)
1445 else:
1445 else:
1446 bs = wctx.sub(s).basestate()
1446 bs = wctx.sub(s).basestate()
1447 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1447 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1448 if oldstate.get(s, (None, None, None))[1] != bs:
1448 if oldstate.get(s, (None, None, None))[1] != bs:
1449 subs.append(s)
1449 subs.append(s)
1450
1450
1451 # check for removed subrepos
1451 # check for removed subrepos
1452 for p in wctx.parents():
1452 for p in wctx.parents():
1453 r = [s for s in p.substate if s not in newstate]
1453 r = [s for s in p.substate if s not in newstate]
1454 subs += [s for s in r if match(s)]
1454 subs += [s for s in r if match(s)]
1455 if subs:
1455 if subs:
1456 if (not match('.hgsub') and
1456 if (not match('.hgsub') and
1457 '.hgsub' in (wctx.modified() + wctx.added())):
1457 '.hgsub' in (wctx.modified() + wctx.added())):
1458 raise util.Abort(
1458 raise util.Abort(
1459 _("can't commit subrepos without .hgsub"))
1459 _("can't commit subrepos without .hgsub"))
1460 status.modified.insert(0, '.hgsubstate')
1460 status.modified.insert(0, '.hgsubstate')
1461
1461
1462 elif '.hgsub' in status.removed:
1462 elif '.hgsub' in status.removed:
1463 # clean up .hgsubstate when .hgsub is removed
1463 # clean up .hgsubstate when .hgsub is removed
1464 if ('.hgsubstate' in wctx and
1464 if ('.hgsubstate' in wctx and
1465 '.hgsubstate' not in (status.modified + status.added +
1465 '.hgsubstate' not in (status.modified + status.added +
1466 status.removed)):
1466 status.removed)):
1467 status.removed.insert(0, '.hgsubstate')
1467 status.removed.insert(0, '.hgsubstate')
1468
1468
1469 # make sure all explicit patterns are matched
1469 # make sure all explicit patterns are matched
1470 if not force and (match.isexact() or match.prefix()):
1470 if not force and (match.isexact() or match.prefix()):
1471 matched = set(status.modified + status.added + status.removed)
1471 matched = set(status.modified + status.added + status.removed)
1472
1472
1473 for f in match.files():
1473 for f in match.files():
1474 f = self.dirstate.normalize(f)
1474 f = self.dirstate.normalize(f)
1475 if f == '.' or f in matched or f in wctx.substate:
1475 if f == '.' or f in matched or f in wctx.substate:
1476 continue
1476 continue
1477 if f in status.deleted:
1477 if f in status.deleted:
1478 fail(f, _('file not found!'))
1478 fail(f, _('file not found!'))
1479 if f in vdirs: # visited directory
1479 if f in vdirs: # visited directory
1480 d = f + '/'
1480 d = f + '/'
1481 for mf in matched:
1481 for mf in matched:
1482 if mf.startswith(d):
1482 if mf.startswith(d):
1483 break
1483 break
1484 else:
1484 else:
1485 fail(f, _("no match under directory!"))
1485 fail(f, _("no match under directory!"))
1486 elif f not in self.dirstate:
1486 elif f not in self.dirstate:
1487 fail(f, _("file not tracked!"))
1487 fail(f, _("file not tracked!"))
1488
1488
1489 cctx = context.workingcommitctx(self, status,
1489 cctx = context.workingcommitctx(self, status,
1490 text, user, date, extra)
1490 text, user, date, extra)
1491
1491
1492 # internal config: ui.allowemptycommit
1492 # internal config: ui.allowemptycommit
1493 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1493 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1494 or extra.get('close') or merge or cctx.files()
1494 or extra.get('close') or merge or cctx.files()
1495 or self.ui.configbool('ui', 'allowemptycommit'))
1495 or self.ui.configbool('ui', 'allowemptycommit'))
1496 if not allowemptycommit:
1496 if not allowemptycommit:
1497 return None
1497 return None
1498
1498
1499 if merge and cctx.deleted():
1499 if merge and cctx.deleted():
1500 raise util.Abort(_("cannot commit merge with missing files"))
1500 raise util.Abort(_("cannot commit merge with missing files"))
1501
1501
1502 ms = mergemod.mergestate(self)
1502 ms = mergemod.mergestate(self)
1503 for f in status.modified:
1503 for f in status.modified:
1504 if f in ms and ms[f] == 'u':
1504 if f in ms and ms[f] == 'u':
1505 raise util.Abort(_('unresolved merge conflicts '
1505 raise util.Abort(_('unresolved merge conflicts '
1506 '(see "hg help resolve")'))
1506 '(see "hg help resolve")'))
1507
1507
1508 if editor:
1508 if editor:
1509 cctx._text = editor(self, cctx, subs)
1509 cctx._text = editor(self, cctx, subs)
1510 edited = (text != cctx._text)
1510 edited = (text != cctx._text)
1511
1511
1512 # Save commit message in case this transaction gets rolled back
1512 # Save commit message in case this transaction gets rolled back
1513 # (e.g. by a pretxncommit hook). Leave the content alone on
1513 # (e.g. by a pretxncommit hook). Leave the content alone on
1514 # the assumption that the user will use the same editor again.
1514 # the assumption that the user will use the same editor again.
1515 msgfn = self.savecommitmessage(cctx._text)
1515 msgfn = self.savecommitmessage(cctx._text)
1516
1516
1517 # commit subs and write new state
1517 # commit subs and write new state
1518 if subs:
1518 if subs:
1519 for s in sorted(commitsubs):
1519 for s in sorted(commitsubs):
1520 sub = wctx.sub(s)
1520 sub = wctx.sub(s)
1521 self.ui.status(_('committing subrepository %s\n') %
1521 self.ui.status(_('committing subrepository %s\n') %
1522 subrepo.subrelpath(sub))
1522 subrepo.subrelpath(sub))
1523 sr = sub.commit(cctx._text, user, date)
1523 sr = sub.commit(cctx._text, user, date)
1524 newstate[s] = (newstate[s][0], sr)
1524 newstate[s] = (newstate[s][0], sr)
1525 subrepo.writestate(self, newstate)
1525 subrepo.writestate(self, newstate)
1526
1526
1527 p1, p2 = self.dirstate.parents()
1527 p1, p2 = self.dirstate.parents()
1528 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1528 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1529 try:
1529 try:
1530 self.hook("precommit", throw=True, parent1=hookp1,
1530 self.hook("precommit", throw=True, parent1=hookp1,
1531 parent2=hookp2)
1531 parent2=hookp2)
1532 ret = self.commitctx(cctx, True)
1532 ret = self.commitctx(cctx, True)
1533 except: # re-raises
1533 except: # re-raises
1534 if edited:
1534 if edited:
1535 self.ui.write(
1535 self.ui.write(
1536 _('note: commit message saved in %s\n') % msgfn)
1536 _('note: commit message saved in %s\n') % msgfn)
1537 raise
1537 raise
1538
1538
1539 # update bookmarks, dirstate and mergestate
1539 # update bookmarks, dirstate and mergestate
1540 bookmarks.update(self, [p1, p2], ret)
1540 bookmarks.update(self, [p1, p2], ret)
1541 cctx.markcommitted(ret)
1541 cctx.markcommitted(ret)
1542 ms.reset()
1542 ms.reset()
1543 finally:
1543 finally:
1544 wlock.release()
1544 wlock.release()
1545
1545
1546 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1546 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1547 # hack for command that use a temporary commit (eg: histedit)
1547 # hack for command that use a temporary commit (eg: histedit)
1548 # temporary commit got stripped before hook release
1548 # temporary commit got stripped before hook release
1549 if self.changelog.hasnode(ret):
1549 if self.changelog.hasnode(ret):
1550 self.hook("commit", node=node, parent1=parent1,
1550 self.hook("commit", node=node, parent1=parent1,
1551 parent2=parent2)
1551 parent2=parent2)
1552 self._afterlock(commithook)
1552 self._afterlock(commithook)
1553 return ret
1553 return ret
1554
1554
1555 @unfilteredmethod
1555 @unfilteredmethod
1556 def commitctx(self, ctx, error=False):
1556 def commitctx(self, ctx, error=False):
1557 """Add a new revision to current repository.
1557 """Add a new revision to current repository.
1558 Revision information is passed via the context argument.
1558 Revision information is passed via the context argument.
1559 """
1559 """
1560
1560
1561 tr = None
1561 tr = None
1562 p1, p2 = ctx.p1(), ctx.p2()
1562 p1, p2 = ctx.p1(), ctx.p2()
1563 user = ctx.user()
1563 user = ctx.user()
1564
1564
1565 lock = self.lock()
1565 lock = self.lock()
1566 try:
1566 try:
1567 tr = self.transaction("commit")
1567 tr = self.transaction("commit")
1568 trp = weakref.proxy(tr)
1568 trp = weakref.proxy(tr)
1569
1569
1570 if ctx.files():
1570 if ctx.files():
1571 m1 = p1.manifest()
1571 m1 = p1.manifest()
1572 m2 = p2.manifest()
1572 m2 = p2.manifest()
1573 m = m1.copy()
1573 m = m1.copy()
1574
1574
1575 # check in files
1575 # check in files
1576 added = []
1576 added = []
1577 changed = []
1577 changed = []
1578 removed = list(ctx.removed())
1578 removed = list(ctx.removed())
1579 linkrev = len(self)
1579 linkrev = len(self)
1580 self.ui.note(_("committing files:\n"))
1580 self.ui.note(_("committing files:\n"))
1581 for f in sorted(ctx.modified() + ctx.added()):
1581 for f in sorted(ctx.modified() + ctx.added()):
1582 self.ui.note(f + "\n")
1582 self.ui.note(f + "\n")
1583 try:
1583 try:
1584 fctx = ctx[f]
1584 fctx = ctx[f]
1585 if fctx is None:
1585 if fctx is None:
1586 removed.append(f)
1586 removed.append(f)
1587 else:
1587 else:
1588 added.append(f)
1588 added.append(f)
1589 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1589 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1590 trp, changed)
1590 trp, changed)
1591 m.setflag(f, fctx.flags())
1591 m.setflag(f, fctx.flags())
1592 except OSError as inst:
1592 except OSError as inst:
1593 self.ui.warn(_("trouble committing %s!\n") % f)
1593 self.ui.warn(_("trouble committing %s!\n") % f)
1594 raise
1594 raise
1595 except IOError as inst:
1595 except IOError as inst:
1596 errcode = getattr(inst, 'errno', errno.ENOENT)
1596 errcode = getattr(inst, 'errno', errno.ENOENT)
1597 if error or errcode and errcode != errno.ENOENT:
1597 if error or errcode and errcode != errno.ENOENT:
1598 self.ui.warn(_("trouble committing %s!\n") % f)
1598 self.ui.warn(_("trouble committing %s!\n") % f)
1599 raise
1599 raise
1600
1600
1601 # update manifest
1601 # update manifest
1602 self.ui.note(_("committing manifest\n"))
1602 self.ui.note(_("committing manifest\n"))
1603 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1603 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1604 drop = [f for f in removed if f in m]
1604 drop = [f for f in removed if f in m]
1605 for f in drop:
1605 for f in drop:
1606 del m[f]
1606 del m[f]
1607 mn = self.manifest.add(m, trp, linkrev,
1607 mn = self.manifest.add(m, trp, linkrev,
1608 p1.manifestnode(), p2.manifestnode(),
1608 p1.manifestnode(), p2.manifestnode(),
1609 added, drop)
1609 added, drop)
1610 files = changed + removed
1610 files = changed + removed
1611 else:
1611 else:
1612 mn = p1.manifestnode()
1612 mn = p1.manifestnode()
1613 files = []
1613 files = []
1614
1614
1615 # update changelog
1615 # update changelog
1616 self.ui.note(_("committing changelog\n"))
1616 self.ui.note(_("committing changelog\n"))
1617 self.changelog.delayupdate(tr)
1617 self.changelog.delayupdate(tr)
1618 n = self.changelog.add(mn, files, ctx.description(),
1618 n = self.changelog.add(mn, files, ctx.description(),
1619 trp, p1.node(), p2.node(),
1619 trp, p1.node(), p2.node(),
1620 user, ctx.date(), ctx.extra().copy())
1620 user, ctx.date(), ctx.extra().copy())
1621 p = lambda: tr.writepending() and self.root or ""
1621 p = lambda: tr.writepending() and self.root or ""
1622 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1622 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1623 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1623 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1624 parent2=xp2, pending=p)
1624 parent2=xp2, pending=p)
1625 # set the new commit is proper phase
1625 # set the new commit is proper phase
1626 targetphase = subrepo.newcommitphase(self.ui, ctx)
1626 targetphase = subrepo.newcommitphase(self.ui, ctx)
1627 if targetphase:
1627 if targetphase:
1628 # retract boundary do not alter parent changeset.
1628 # retract boundary do not alter parent changeset.
1629 # if a parent have higher the resulting phase will
1629 # if a parent have higher the resulting phase will
1630 # be compliant anyway
1630 # be compliant anyway
1631 #
1631 #
1632 # if minimal phase was 0 we don't need to retract anything
1632 # if minimal phase was 0 we don't need to retract anything
1633 phases.retractboundary(self, tr, targetphase, [n])
1633 phases.retractboundary(self, tr, targetphase, [n])
1634 tr.close()
1634 tr.close()
1635 branchmap.updatecache(self.filtered('served'))
1635 branchmap.updatecache(self.filtered('served'))
1636 return n
1636 return n
1637 finally:
1637 finally:
1638 if tr:
1638 if tr:
1639 tr.release()
1639 tr.release()
1640 lock.release()
1640 lock.release()
1641
1641
1642 @unfilteredmethod
1642 @unfilteredmethod
1643 def destroying(self):
1643 def destroying(self):
1644 '''Inform the repository that nodes are about to be destroyed.
1644 '''Inform the repository that nodes are about to be destroyed.
1645 Intended for use by strip and rollback, so there's a common
1645 Intended for use by strip and rollback, so there's a common
1646 place for anything that has to be done before destroying history.
1646 place for anything that has to be done before destroying history.
1647
1647
1648 This is mostly useful for saving state that is in memory and waiting
1648 This is mostly useful for saving state that is in memory and waiting
1649 to be flushed when the current lock is released. Because a call to
1649 to be flushed when the current lock is released. Because a call to
1650 destroyed is imminent, the repo will be invalidated causing those
1650 destroyed is imminent, the repo will be invalidated causing those
1651 changes to stay in memory (waiting for the next unlock), or vanish
1651 changes to stay in memory (waiting for the next unlock), or vanish
1652 completely.
1652 completely.
1653 '''
1653 '''
1654 # When using the same lock to commit and strip, the phasecache is left
1654 # When using the same lock to commit and strip, the phasecache is left
1655 # dirty after committing. Then when we strip, the repo is invalidated,
1655 # dirty after committing. Then when we strip, the repo is invalidated,
1656 # causing those changes to disappear.
1656 # causing those changes to disappear.
1657 if '_phasecache' in vars(self):
1657 if '_phasecache' in vars(self):
1658 self._phasecache.write()
1658 self._phasecache.write()
1659
1659
1660 @unfilteredmethod
1660 @unfilteredmethod
1661 def destroyed(self):
1661 def destroyed(self):
1662 '''Inform the repository that nodes have been destroyed.
1662 '''Inform the repository that nodes have been destroyed.
1663 Intended for use by strip and rollback, so there's a common
1663 Intended for use by strip and rollback, so there's a common
1664 place for anything that has to be done after destroying history.
1664 place for anything that has to be done after destroying history.
1665 '''
1665 '''
1666 # When one tries to:
1666 # When one tries to:
1667 # 1) destroy nodes thus calling this method (e.g. strip)
1667 # 1) destroy nodes thus calling this method (e.g. strip)
1668 # 2) use phasecache somewhere (e.g. commit)
1668 # 2) use phasecache somewhere (e.g. commit)
1669 #
1669 #
1670 # then 2) will fail because the phasecache contains nodes that were
1670 # then 2) will fail because the phasecache contains nodes that were
1671 # removed. We can either remove phasecache from the filecache,
1671 # removed. We can either remove phasecache from the filecache,
1672 # causing it to reload next time it is accessed, or simply filter
1672 # causing it to reload next time it is accessed, or simply filter
1673 # the removed nodes now and write the updated cache.
1673 # the removed nodes now and write the updated cache.
1674 self._phasecache.filterunknown(self)
1674 self._phasecache.filterunknown(self)
1675 self._phasecache.write()
1675 self._phasecache.write()
1676
1676
1677 # update the 'served' branch cache to help read only server process
1677 # update the 'served' branch cache to help read only server process
1678 # Thanks to branchcache collaboration this is done from the nearest
1678 # Thanks to branchcache collaboration this is done from the nearest
1679 # filtered subset and it is expected to be fast.
1679 # filtered subset and it is expected to be fast.
1680 branchmap.updatecache(self.filtered('served'))
1680 branchmap.updatecache(self.filtered('served'))
1681
1681
1682 # Ensure the persistent tag cache is updated. Doing it now
1682 # Ensure the persistent tag cache is updated. Doing it now
1683 # means that the tag cache only has to worry about destroyed
1683 # means that the tag cache only has to worry about destroyed
1684 # heads immediately after a strip/rollback. That in turn
1684 # heads immediately after a strip/rollback. That in turn
1685 # guarantees that "cachetip == currenttip" (comparing both rev
1685 # guarantees that "cachetip == currenttip" (comparing both rev
1686 # and node) always means no nodes have been added or destroyed.
1686 # and node) always means no nodes have been added or destroyed.
1687
1687
1688 # XXX this is suboptimal when qrefresh'ing: we strip the current
1688 # XXX this is suboptimal when qrefresh'ing: we strip the current
1689 # head, refresh the tag cache, then immediately add a new head.
1689 # head, refresh the tag cache, then immediately add a new head.
1690 # But I think doing it this way is necessary for the "instant
1690 # But I think doing it this way is necessary for the "instant
1691 # tag cache retrieval" case to work.
1691 # tag cache retrieval" case to work.
1692 self.invalidate()
1692 self.invalidate()
1693
1693
1694 def walk(self, match, node=None):
1694 def walk(self, match, node=None):
1695 '''
1695 '''
1696 walk recursively through the directory tree or a given
1696 walk recursively through the directory tree or a given
1697 changeset, finding all files matched by the match
1697 changeset, finding all files matched by the match
1698 function
1698 function
1699 '''
1699 '''
1700 return self[node].walk(match)
1700 return self[node].walk(match)
1701
1701
1702 def status(self, node1='.', node2=None, match=None,
1702 def status(self, node1='.', node2=None, match=None,
1703 ignored=False, clean=False, unknown=False,
1703 ignored=False, clean=False, unknown=False,
1704 listsubrepos=False):
1704 listsubrepos=False):
1705 '''a convenience method that calls node1.status(node2)'''
1705 '''a convenience method that calls node1.status(node2)'''
1706 return self[node1].status(node2, match, ignored, clean, unknown,
1706 return self[node1].status(node2, match, ignored, clean, unknown,
1707 listsubrepos)
1707 listsubrepos)
1708
1708
1709 def heads(self, start=None):
1709 def heads(self, start=None):
1710 heads = self.changelog.heads(start)
1710 heads = self.changelog.heads(start)
1711 # sort the output in rev descending order
1711 # sort the output in rev descending order
1712 return sorted(heads, key=self.changelog.rev, reverse=True)
1712 return sorted(heads, key=self.changelog.rev, reverse=True)
1713
1713
1714 def branchheads(self, branch=None, start=None, closed=False):
1714 def branchheads(self, branch=None, start=None, closed=False):
1715 '''return a (possibly filtered) list of heads for the given branch
1715 '''return a (possibly filtered) list of heads for the given branch
1716
1716
1717 Heads are returned in topological order, from newest to oldest.
1717 Heads are returned in topological order, from newest to oldest.
1718 If branch is None, use the dirstate branch.
1718 If branch is None, use the dirstate branch.
1719 If start is not None, return only heads reachable from start.
1719 If start is not None, return only heads reachable from start.
1720 If closed is True, return heads that are marked as closed as well.
1720 If closed is True, return heads that are marked as closed as well.
1721 '''
1721 '''
1722 if branch is None:
1722 if branch is None:
1723 branch = self[None].branch()
1723 branch = self[None].branch()
1724 branches = self.branchmap()
1724 branches = self.branchmap()
1725 if branch not in branches:
1725 if branch not in branches:
1726 return []
1726 return []
1727 # the cache returns heads ordered lowest to highest
1727 # the cache returns heads ordered lowest to highest
1728 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1728 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1729 if start is not None:
1729 if start is not None:
1730 # filter out the heads that cannot be reached from startrev
1730 # filter out the heads that cannot be reached from startrev
1731 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1731 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1732 bheads = [h for h in bheads if h in fbheads]
1732 bheads = [h for h in bheads if h in fbheads]
1733 return bheads
1733 return bheads
1734
1734
1735 def branches(self, nodes):
1735 def branches(self, nodes):
1736 if not nodes:
1736 if not nodes:
1737 nodes = [self.changelog.tip()]
1737 nodes = [self.changelog.tip()]
1738 b = []
1738 b = []
1739 for n in nodes:
1739 for n in nodes:
1740 t = n
1740 t = n
1741 while True:
1741 while True:
1742 p = self.changelog.parents(n)
1742 p = self.changelog.parents(n)
1743 if p[1] != nullid or p[0] == nullid:
1743 if p[1] != nullid or p[0] == nullid:
1744 b.append((t, n, p[0], p[1]))
1744 b.append((t, n, p[0], p[1]))
1745 break
1745 break
1746 n = p[0]
1746 n = p[0]
1747 return b
1747 return b
1748
1748
1749 def between(self, pairs):
1749 def between(self, pairs):
1750 r = []
1750 r = []
1751
1751
1752 for top, bottom in pairs:
1752 for top, bottom in pairs:
1753 n, l, i = top, [], 0
1753 n, l, i = top, [], 0
1754 f = 1
1754 f = 1
1755
1755
1756 while n != bottom and n != nullid:
1756 while n != bottom and n != nullid:
1757 p = self.changelog.parents(n)[0]
1757 p = self.changelog.parents(n)[0]
1758 if i == f:
1758 if i == f:
1759 l.append(n)
1759 l.append(n)
1760 f = f * 2
1760 f = f * 2
1761 n = p
1761 n = p
1762 i += 1
1762 i += 1
1763
1763
1764 r.append(l)
1764 r.append(l)
1765
1765
1766 return r
1766 return r
1767
1767
1768 def checkpush(self, pushop):
1768 def checkpush(self, pushop):
1769 """Extensions can override this function if additional checks have
1769 """Extensions can override this function if additional checks have
1770 to be performed before pushing, or call it if they override push
1770 to be performed before pushing, or call it if they override push
1771 command.
1771 command.
1772 """
1772 """
1773 pass
1773 pass
1774
1774
1775 @unfilteredpropertycache
1775 @unfilteredpropertycache
1776 def prepushoutgoinghooks(self):
1776 def prepushoutgoinghooks(self):
1777 """Return util.hooks consists of "(repo, remote, outgoing)"
1777 """Return util.hooks consists of "(repo, remote, outgoing)"
1778 functions, which are called before pushing changesets.
1778 functions, which are called before pushing changesets.
1779 """
1779 """
1780 return util.hooks()
1780 return util.hooks()
1781
1781
1782 def stream_in(self, remote, remotereqs):
1782 def stream_in(self, remote, remotereqs):
1783 # Save remote branchmap. We will use it later
1783 # Save remote branchmap. We will use it later
1784 # to speed up branchcache creation
1784 # to speed up branchcache creation
1785 rbranchmap = None
1785 rbranchmap = None
1786 if remote.capable("branchmap"):
1786 if remote.capable("branchmap"):
1787 rbranchmap = remote.branchmap()
1787 rbranchmap = remote.branchmap()
1788
1788
1789 fp = remote.stream_out()
1789 fp = remote.stream_out()
1790 l = fp.readline()
1790 l = fp.readline()
1791 try:
1791 try:
1792 resp = int(l)
1792 resp = int(l)
1793 except ValueError:
1793 except ValueError:
1794 raise error.ResponseError(
1794 raise error.ResponseError(
1795 _('unexpected response from remote server:'), l)
1795 _('unexpected response from remote server:'), l)
1796 if resp == 1:
1796 if resp == 1:
1797 raise util.Abort(_('operation forbidden by server'))
1797 raise util.Abort(_('operation forbidden by server'))
1798 elif resp == 2:
1798 elif resp == 2:
1799 raise util.Abort(_('locking the remote repository failed'))
1799 raise util.Abort(_('locking the remote repository failed'))
1800 elif resp != 0:
1800 elif resp != 0:
1801 raise util.Abort(_('the server sent an unknown error code'))
1801 raise util.Abort(_('the server sent an unknown error code'))
1802
1802
1803 self.applystreamclone(remotereqs, rbranchmap, fp)
1803 self.applystreamclone(remotereqs, rbranchmap, fp)
1804 return len(self.heads()) + 1
1804 return len(self.heads()) + 1
1805
1805
1806 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1806 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1807 """Apply stream clone data to this repository.
1807 """Apply stream clone data to this repository.
1808
1808
1809 "remotereqs" is a set of requirements to handle the incoming data.
1809 "remotereqs" is a set of requirements to handle the incoming data.
1810 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1810 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1811 can be None.
1811 can be None.
1812 "fp" is a file object containing the raw stream data, suitable for
1812 "fp" is a file object containing the raw stream data, suitable for
1813 feeding into exchange.consumestreamclone.
1813 feeding into exchange.consumestreamclone.
1814 """
1814 """
1815 lock = self.lock()
1815 lock = self.lock()
1816 try:
1816 try:
1817 exchange.consumestreamclone(self, fp)
1817 exchange.consumestreamclone(self, fp)
1818
1818
1819 # new requirements = old non-format requirements +
1819 # new requirements = old non-format requirements +
1820 # new format-related remote requirements
1820 # new format-related remote requirements
1821 # requirements from the streamed-in repository
1821 # requirements from the streamed-in repository
1822 self.requirements = remotereqs | (
1822 self.requirements = remotereqs | (
1823 self.requirements - self.supportedformats)
1823 self.requirements - self.supportedformats)
1824 self._applyopenerreqs()
1824 self._applyopenerreqs()
1825 self._writerequirements()
1825 self._writerequirements()
1826
1826
1827 if remotebranchmap:
1827 if remotebranchmap:
1828 rbheads = []
1828 rbheads = []
1829 closed = []
1829 closed = []
1830 for bheads in remotebranchmap.itervalues():
1830 for bheads in remotebranchmap.itervalues():
1831 rbheads.extend(bheads)
1831 rbheads.extend(bheads)
1832 for h in bheads:
1832 for h in bheads:
1833 r = self.changelog.rev(h)
1833 r = self.changelog.rev(h)
1834 b, c = self.changelog.branchinfo(r)
1834 b, c = self.changelog.branchinfo(r)
1835 if c:
1835 if c:
1836 closed.append(h)
1836 closed.append(h)
1837
1837
1838 if rbheads:
1838 if rbheads:
1839 rtiprev = max((int(self.changelog.rev(node))
1839 rtiprev = max((int(self.changelog.rev(node))
1840 for node in rbheads))
1840 for node in rbheads))
1841 cache = branchmap.branchcache(remotebranchmap,
1841 cache = branchmap.branchcache(remotebranchmap,
1842 self[rtiprev].node(),
1842 self[rtiprev].node(),
1843 rtiprev,
1843 rtiprev,
1844 closednodes=closed)
1844 closednodes=closed)
1845 # Try to stick it as low as possible
1845 # Try to stick it as low as possible
1846 # filter above served are unlikely to be fetch from a clone
1846 # filter above served are unlikely to be fetch from a clone
1847 for candidate in ('base', 'immutable', 'served'):
1847 for candidate in ('base', 'immutable', 'served'):
1848 rview = self.filtered(candidate)
1848 rview = self.filtered(candidate)
1849 if cache.validfor(rview):
1849 if cache.validfor(rview):
1850 self._branchcaches[candidate] = cache
1850 self._branchcaches[candidate] = cache
1851 cache.write(rview)
1851 cache.write(rview)
1852 break
1852 break
1853 self.invalidate()
1853 self.invalidate()
1854 finally:
1854 finally:
1855 lock.release()
1855 lock.release()
1856
1856
1857 def clone(self, remote, heads=[], stream=None):
1857 def clone(self, remote, heads=[], stream=None):
1858 '''clone remote repository.
1858 '''clone remote repository.
1859
1859
1860 keyword arguments:
1860 keyword arguments:
1861 heads: list of revs to clone (forces use of pull)
1861 heads: list of revs to clone (forces use of pull)
1862 stream: use streaming clone if possible'''
1862 stream: use streaming clone if possible'''
1863
1863
1864 # now, all clients that can request uncompressed clones can
1864 # now, all clients that can request uncompressed clones can
1865 # read repo formats supported by all servers that can serve
1865 # read repo formats supported by all servers that can serve
1866 # them.
1866 # them.
1867
1867
1868 # if revlog format changes, client will have to check version
1868 # if revlog format changes, client will have to check version
1869 # and format flags on "stream" capability, and use
1869 # and format flags on "stream" capability, and use
1870 # uncompressed only if compatible.
1870 # uncompressed only if compatible.
1871
1871
1872 if stream is None:
1872 if stream is None:
1873 # if the server explicitly prefers to stream (for fast LANs)
1873 # if the server explicitly prefers to stream (for fast LANs)
1874 stream = remote.capable('stream-preferred')
1874 stream = remote.capable('stream-preferred')
1875
1875
1876 if stream and not heads:
1876 if stream and not heads:
1877 # 'stream' means remote revlog format is revlogv1 only
1877 # 'stream' means remote revlog format is revlogv1 only
1878 if remote.capable('stream'):
1878 if remote.capable('stream'):
1879 self.stream_in(remote, set(('revlogv1',)))
1879 self.stream_in(remote, set(('revlogv1',)))
1880 else:
1880 else:
1881 # otherwise, 'streamreqs' contains the remote revlog format
1881 # otherwise, 'streamreqs' contains the remote revlog format
1882 streamreqs = remote.capable('streamreqs')
1882 streamreqs = remote.capable('streamreqs')
1883 if streamreqs:
1883 if streamreqs:
1884 streamreqs = set(streamreqs.split(','))
1884 streamreqs = set(streamreqs.split(','))
1885 # if we support it, stream in and adjust our requirements
1885 # if we support it, stream in and adjust our requirements
1886 if not streamreqs - self.supportedformats:
1886 if not streamreqs - self.supportedformats:
1887 self.stream_in(remote, streamreqs)
1887 self.stream_in(remote, streamreqs)
1888
1888
1889 # internal config: ui.quietbookmarkmove
1889 # internal config: ui.quietbookmarkmove
1890 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1890 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1891 try:
1891 try:
1892 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1892 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1893 ret = exchange.pull(self, remote, heads).cgresult
1893 ret = exchange.pull(self, remote, heads).cgresult
1894 finally:
1894 finally:
1895 self.ui.restoreconfig(quiet)
1895 self.ui.restoreconfig(quiet)
1896 return ret
1896 return ret
1897
1897
1898 def pushkey(self, namespace, key, old, new):
1898 def pushkey(self, namespace, key, old, new):
1899 try:
1899 try:
1900 tr = self.currenttransaction()
1900 tr = self.currenttransaction()
1901 hookargs = {}
1901 hookargs = {}
1902 if tr is not None:
1902 if tr is not None:
1903 hookargs.update(tr.hookargs)
1903 hookargs.update(tr.hookargs)
1904 pending = lambda: tr.writepending() and self.root or ""
1904 pending = lambda: tr.writepending() and self.root or ""
1905 hookargs['pending'] = pending
1905 hookargs['pending'] = pending
1906 hookargs['namespace'] = namespace
1906 hookargs['namespace'] = namespace
1907 hookargs['key'] = key
1907 hookargs['key'] = key
1908 hookargs['old'] = old
1908 hookargs['old'] = old
1909 hookargs['new'] = new
1909 hookargs['new'] = new
1910 self.hook('prepushkey', throw=True, **hookargs)
1910 self.hook('prepushkey', throw=True, **hookargs)
1911 except error.HookAbort as exc:
1911 except error.HookAbort as exc:
1912 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1912 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1913 if exc.hint:
1913 if exc.hint:
1914 self.ui.write_err(_("(%s)\n") % exc.hint)
1914 self.ui.write_err(_("(%s)\n") % exc.hint)
1915 return False
1915 return False
1916 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1916 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1917 ret = pushkey.push(self, namespace, key, old, new)
1917 ret = pushkey.push(self, namespace, key, old, new)
1918 def runhook():
1918 def runhook():
1919 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1919 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1920 ret=ret)
1920 ret=ret)
1921 self._afterlock(runhook)
1921 self._afterlock(runhook)
1922 return ret
1922 return ret
1923
1923
1924 def listkeys(self, namespace):
1924 def listkeys(self, namespace):
1925 self.hook('prelistkeys', throw=True, namespace=namespace)
1925 self.hook('prelistkeys', throw=True, namespace=namespace)
1926 self.ui.debug('listing keys for "%s"\n' % namespace)
1926 self.ui.debug('listing keys for "%s"\n' % namespace)
1927 values = pushkey.list(self, namespace)
1927 values = pushkey.list(self, namespace)
1928 self.hook('listkeys', namespace=namespace, values=values)
1928 self.hook('listkeys', namespace=namespace, values=values)
1929 return values
1929 return values
1930
1930
1931 def debugwireargs(self, one, two, three=None, four=None, five=None):
1931 def debugwireargs(self, one, two, three=None, four=None, five=None):
1932 '''used to test argument passing over the wire'''
1932 '''used to test argument passing over the wire'''
1933 return "%s %s %s %s %s" % (one, two, three, four, five)
1933 return "%s %s %s %s %s" % (one, two, three, four, five)
1934
1934
1935 def savecommitmessage(self, text):
1935 def savecommitmessage(self, text):
1936 fp = self.vfs('last-message.txt', 'wb')
1936 fp = self.vfs('last-message.txt', 'wb')
1937 try:
1937 try:
1938 fp.write(text)
1938 fp.write(text)
1939 finally:
1939 finally:
1940 fp.close()
1940 fp.close()
1941 return self.pathto(fp.name[len(self.root) + 1:])
1941 return self.pathto(fp.name[len(self.root) + 1:])
1942
1942
1943 # used to avoid circular references so destructors work
1943 # used to avoid circular references so destructors work
1944 def aftertrans(files):
1944 def aftertrans(files):
1945 renamefiles = [tuple(t) for t in files]
1945 renamefiles = [tuple(t) for t in files]
1946 def a():
1946 def a():
1947 for vfs, src, dest in renamefiles:
1947 for vfs, src, dest in renamefiles:
1948 try:
1948 try:
1949 vfs.rename(src, dest)
1949 vfs.rename(src, dest)
1950 except OSError: # journal file does not yet exist
1950 except OSError: # journal file does not yet exist
1951 pass
1951 pass
1952 return a
1952 return a
1953
1953
1954 def undoname(fn):
1954 def undoname(fn):
1955 base, name = os.path.split(fn)
1955 base, name = os.path.split(fn)
1956 assert name.startswith('journal')
1956 assert name.startswith('journal')
1957 return os.path.join(base, name.replace('journal', 'undo', 1))
1957 return os.path.join(base, name.replace('journal', 'undo', 1))
1958
1958
1959 def instance(ui, path, create):
1959 def instance(ui, path, create):
1960 return localrepository(ui, util.urllocalpath(path), create)
1960 return localrepository(ui, util.urllocalpath(path), create)
1961
1961
1962 def islocal(path):
1962 def islocal(path):
1963 return True
1963 return True
General Comments 0
You need to be logged in to leave comments. Login now