##// END OF EJS Templates
localrepo: provide workingctx by integer revision...
Yuya Nishihara -
r25764:22049b56 default
parent child Browse files
Show More
@@ -1,1942 +1,1942
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, wdirrev, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect, random
19 import weakref, errno, os, time, inspect, random
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception as exc:
138 except Exception as exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced as exc:
155 except error.PushRaced as exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 'manifestv2'))
196 'manifestv2'))
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 'dotencode'))
198 'dotencode'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 filtername = None
200 filtername = None
201
201
202 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
204 featuresetupfuncs = set()
205
205
206 def _baserequirements(self, create):
206 def _baserequirements(self, create):
207 return ['revlogv1']
207 return ['revlogv1']
208
208
209 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
210 self.requirements = set()
210 self.requirements = set()
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 self.wopener = self.wvfs
212 self.wopener = self.wvfs
213 self.root = self.wvfs.base
213 self.root = self.wvfs.base
214 self.path = self.wvfs.join(".hg")
214 self.path = self.wvfs.join(".hg")
215 self.origroot = path
215 self.origroot = path
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 self.vfs = scmutil.vfs(self.path)
217 self.vfs = scmutil.vfs(self.path)
218 self.opener = self.vfs
218 self.opener = self.vfs
219 self.baseui = baseui
219 self.baseui = baseui
220 self.ui = baseui.copy()
220 self.ui = baseui.copy()
221 self.ui.copy = baseui.copy # prevent copying repo configuration
221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 # A list of callback to shape the phase if no data were found.
222 # A list of callback to shape the phase if no data were found.
223 # Callback are in the form: func(repo, roots) --> processed root.
223 # Callback are in the form: func(repo, roots) --> processed root.
224 # This list it to be filled by extension during repo setup
224 # This list it to be filled by extension during repo setup
225 self._phasedefaults = []
225 self._phasedefaults = []
226 try:
226 try:
227 self.ui.readconfig(self.join("hgrc"), self.root)
227 self.ui.readconfig(self.join("hgrc"), self.root)
228 extensions.loadall(self.ui)
228 extensions.loadall(self.ui)
229 except IOError:
229 except IOError:
230 pass
230 pass
231
231
232 if self.featuresetupfuncs:
232 if self.featuresetupfuncs:
233 self.supported = set(self._basesupported) # use private copy
233 self.supported = set(self._basesupported) # use private copy
234 extmods = set(m.__name__ for n, m
234 extmods = set(m.__name__ for n, m
235 in extensions.extensions(self.ui))
235 in extensions.extensions(self.ui))
236 for setupfunc in self.featuresetupfuncs:
236 for setupfunc in self.featuresetupfuncs:
237 if setupfunc.__module__ in extmods:
237 if setupfunc.__module__ in extmods:
238 setupfunc(self.ui, self.supported)
238 setupfunc(self.ui, self.supported)
239 else:
239 else:
240 self.supported = self._basesupported
240 self.supported = self._basesupported
241
241
242 if not self.vfs.isdir():
242 if not self.vfs.isdir():
243 if create:
243 if create:
244 if not self.wvfs.exists():
244 if not self.wvfs.exists():
245 self.wvfs.makedirs()
245 self.wvfs.makedirs()
246 self.vfs.makedir(notindexed=True)
246 self.vfs.makedir(notindexed=True)
247 self.requirements.update(self._baserequirements(create))
247 self.requirements.update(self._baserequirements(create))
248 if self.ui.configbool('format', 'usestore', True):
248 if self.ui.configbool('format', 'usestore', True):
249 self.vfs.mkdir("store")
249 self.vfs.mkdir("store")
250 self.requirements.add("store")
250 self.requirements.add("store")
251 if self.ui.configbool('format', 'usefncache', True):
251 if self.ui.configbool('format', 'usefncache', True):
252 self.requirements.add("fncache")
252 self.requirements.add("fncache")
253 if self.ui.configbool('format', 'dotencode', True):
253 if self.ui.configbool('format', 'dotencode', True):
254 self.requirements.add('dotencode')
254 self.requirements.add('dotencode')
255 # create an invalid changelog
255 # create an invalid changelog
256 self.vfs.append(
256 self.vfs.append(
257 "00changelog.i",
257 "00changelog.i",
258 '\0\0\0\2' # represents revlogv2
258 '\0\0\0\2' # represents revlogv2
259 ' dummy changelog to prevent using the old repo layout'
259 ' dummy changelog to prevent using the old repo layout'
260 )
260 )
261 if self.ui.configbool('format', 'generaldelta', False):
261 if self.ui.configbool('format', 'generaldelta', False):
262 self.requirements.add("generaldelta")
262 self.requirements.add("generaldelta")
263 if self.ui.configbool('experimental', 'treemanifest', False):
263 if self.ui.configbool('experimental', 'treemanifest', False):
264 self.requirements.add("treemanifest")
264 self.requirements.add("treemanifest")
265 if self.ui.configbool('experimental', 'manifestv2', False):
265 if self.ui.configbool('experimental', 'manifestv2', False):
266 self.requirements.add("manifestv2")
266 self.requirements.add("manifestv2")
267 else:
267 else:
268 raise error.RepoError(_("repository %s not found") % path)
268 raise error.RepoError(_("repository %s not found") % path)
269 elif create:
269 elif create:
270 raise error.RepoError(_("repository %s already exists") % path)
270 raise error.RepoError(_("repository %s already exists") % path)
271 else:
271 else:
272 try:
272 try:
273 self.requirements = scmutil.readrequires(
273 self.requirements = scmutil.readrequires(
274 self.vfs, self.supported)
274 self.vfs, self.supported)
275 except IOError as inst:
275 except IOError as inst:
276 if inst.errno != errno.ENOENT:
276 if inst.errno != errno.ENOENT:
277 raise
277 raise
278
278
279 self.sharedpath = self.path
279 self.sharedpath = self.path
280 try:
280 try:
281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 realpath=True)
282 realpath=True)
283 s = vfs.base
283 s = vfs.base
284 if not vfs.exists():
284 if not vfs.exists():
285 raise error.RepoError(
285 raise error.RepoError(
286 _('.hg/sharedpath points to nonexistent directory %s') % s)
286 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 self.sharedpath = s
287 self.sharedpath = s
288 except IOError as inst:
288 except IOError as inst:
289 if inst.errno != errno.ENOENT:
289 if inst.errno != errno.ENOENT:
290 raise
290 raise
291
291
292 self.store = store.store(
292 self.store = store.store(
293 self.requirements, self.sharedpath, scmutil.vfs)
293 self.requirements, self.sharedpath, scmutil.vfs)
294 self.spath = self.store.path
294 self.spath = self.store.path
295 self.svfs = self.store.vfs
295 self.svfs = self.store.vfs
296 self.sjoin = self.store.join
296 self.sjoin = self.store.join
297 self.vfs.createmode = self.store.createmode
297 self.vfs.createmode = self.store.createmode
298 self._applyopenerreqs()
298 self._applyopenerreqs()
299 if create:
299 if create:
300 self._writerequirements()
300 self._writerequirements()
301
301
302
302
303 self._branchcaches = {}
303 self._branchcaches = {}
304 self._revbranchcache = None
304 self._revbranchcache = None
305 self.filterpats = {}
305 self.filterpats = {}
306 self._datafilters = {}
306 self._datafilters = {}
307 self._transref = self._lockref = self._wlockref = None
307 self._transref = self._lockref = self._wlockref = None
308
308
309 # A cache for various files under .hg/ that tracks file changes,
309 # A cache for various files under .hg/ that tracks file changes,
310 # (used by the filecache decorator)
310 # (used by the filecache decorator)
311 #
311 #
312 # Maps a property name to its util.filecacheentry
312 # Maps a property name to its util.filecacheentry
313 self._filecache = {}
313 self._filecache = {}
314
314
315 # hold sets of revision to be filtered
315 # hold sets of revision to be filtered
316 # should be cleared when something might have changed the filter value:
316 # should be cleared when something might have changed the filter value:
317 # - new changesets,
317 # - new changesets,
318 # - phase change,
318 # - phase change,
319 # - new obsolescence marker,
319 # - new obsolescence marker,
320 # - working directory parent change,
320 # - working directory parent change,
321 # - bookmark changes
321 # - bookmark changes
322 self.filteredrevcache = {}
322 self.filteredrevcache = {}
323
323
324 # generic mapping between names and nodes
324 # generic mapping between names and nodes
325 self.names = namespaces.namespaces()
325 self.names = namespaces.namespaces()
326
326
327 def close(self):
327 def close(self):
328 self._writecaches()
328 self._writecaches()
329
329
330 def _writecaches(self):
330 def _writecaches(self):
331 if self._revbranchcache:
331 if self._revbranchcache:
332 self._revbranchcache.write()
332 self._revbranchcache.write()
333
333
334 def _restrictcapabilities(self, caps):
334 def _restrictcapabilities(self, caps):
335 if self.ui.configbool('experimental', 'bundle2-advertise', True):
335 if self.ui.configbool('experimental', 'bundle2-advertise', True):
336 caps = set(caps)
336 caps = set(caps)
337 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
337 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
338 caps.add('bundle2=' + urllib.quote(capsblob))
338 caps.add('bundle2=' + urllib.quote(capsblob))
339 return caps
339 return caps
340
340
341 def _applyopenerreqs(self):
341 def _applyopenerreqs(self):
342 self.svfs.options = dict((r, 1) for r in self.requirements
342 self.svfs.options = dict((r, 1) for r in self.requirements
343 if r in self.openerreqs)
343 if r in self.openerreqs)
344 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
344 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
345 if chunkcachesize is not None:
345 if chunkcachesize is not None:
346 self.svfs.options['chunkcachesize'] = chunkcachesize
346 self.svfs.options['chunkcachesize'] = chunkcachesize
347 maxchainlen = self.ui.configint('format', 'maxchainlen')
347 maxchainlen = self.ui.configint('format', 'maxchainlen')
348 if maxchainlen is not None:
348 if maxchainlen is not None:
349 self.svfs.options['maxchainlen'] = maxchainlen
349 self.svfs.options['maxchainlen'] = maxchainlen
350 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
350 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
351 if manifestcachesize is not None:
351 if manifestcachesize is not None:
352 self.svfs.options['manifestcachesize'] = manifestcachesize
352 self.svfs.options['manifestcachesize'] = manifestcachesize
353
353
354 def _writerequirements(self):
354 def _writerequirements(self):
355 scmutil.writerequires(self.vfs, self.requirements)
355 scmutil.writerequires(self.vfs, self.requirements)
356
356
357 def _checknested(self, path):
357 def _checknested(self, path):
358 """Determine if path is a legal nested repository."""
358 """Determine if path is a legal nested repository."""
359 if not path.startswith(self.root):
359 if not path.startswith(self.root):
360 return False
360 return False
361 subpath = path[len(self.root) + 1:]
361 subpath = path[len(self.root) + 1:]
362 normsubpath = util.pconvert(subpath)
362 normsubpath = util.pconvert(subpath)
363
363
364 # XXX: Checking against the current working copy is wrong in
364 # XXX: Checking against the current working copy is wrong in
365 # the sense that it can reject things like
365 # the sense that it can reject things like
366 #
366 #
367 # $ hg cat -r 10 sub/x.txt
367 # $ hg cat -r 10 sub/x.txt
368 #
368 #
369 # if sub/ is no longer a subrepository in the working copy
369 # if sub/ is no longer a subrepository in the working copy
370 # parent revision.
370 # parent revision.
371 #
371 #
372 # However, it can of course also allow things that would have
372 # However, it can of course also allow things that would have
373 # been rejected before, such as the above cat command if sub/
373 # been rejected before, such as the above cat command if sub/
374 # is a subrepository now, but was a normal directory before.
374 # is a subrepository now, but was a normal directory before.
375 # The old path auditor would have rejected by mistake since it
375 # The old path auditor would have rejected by mistake since it
376 # panics when it sees sub/.hg/.
376 # panics when it sees sub/.hg/.
377 #
377 #
378 # All in all, checking against the working copy seems sensible
378 # All in all, checking against the working copy seems sensible
379 # since we want to prevent access to nested repositories on
379 # since we want to prevent access to nested repositories on
380 # the filesystem *now*.
380 # the filesystem *now*.
381 ctx = self[None]
381 ctx = self[None]
382 parts = util.splitpath(subpath)
382 parts = util.splitpath(subpath)
383 while parts:
383 while parts:
384 prefix = '/'.join(parts)
384 prefix = '/'.join(parts)
385 if prefix in ctx.substate:
385 if prefix in ctx.substate:
386 if prefix == normsubpath:
386 if prefix == normsubpath:
387 return True
387 return True
388 else:
388 else:
389 sub = ctx.sub(prefix)
389 sub = ctx.sub(prefix)
390 return sub.checknested(subpath[len(prefix) + 1:])
390 return sub.checknested(subpath[len(prefix) + 1:])
391 else:
391 else:
392 parts.pop()
392 parts.pop()
393 return False
393 return False
394
394
395 def peer(self):
395 def peer(self):
396 return localpeer(self) # not cached to avoid reference cycle
396 return localpeer(self) # not cached to avoid reference cycle
397
397
398 def unfiltered(self):
398 def unfiltered(self):
399 """Return unfiltered version of the repository
399 """Return unfiltered version of the repository
400
400
401 Intended to be overwritten by filtered repo."""
401 Intended to be overwritten by filtered repo."""
402 return self
402 return self
403
403
404 def filtered(self, name):
404 def filtered(self, name):
405 """Return a filtered version of a repository"""
405 """Return a filtered version of a repository"""
406 # build a new class with the mixin and the current class
406 # build a new class with the mixin and the current class
407 # (possibly subclass of the repo)
407 # (possibly subclass of the repo)
408 class proxycls(repoview.repoview, self.unfiltered().__class__):
408 class proxycls(repoview.repoview, self.unfiltered().__class__):
409 pass
409 pass
410 return proxycls(self, name)
410 return proxycls(self, name)
411
411
412 @repofilecache('bookmarks')
412 @repofilecache('bookmarks')
413 def _bookmarks(self):
413 def _bookmarks(self):
414 return bookmarks.bmstore(self)
414 return bookmarks.bmstore(self)
415
415
416 @repofilecache('bookmarks.current')
416 @repofilecache('bookmarks.current')
417 def _activebookmark(self):
417 def _activebookmark(self):
418 return bookmarks.readactive(self)
418 return bookmarks.readactive(self)
419
419
420 def bookmarkheads(self, bookmark):
420 def bookmarkheads(self, bookmark):
421 name = bookmark.split('@', 1)[0]
421 name = bookmark.split('@', 1)[0]
422 heads = []
422 heads = []
423 for mark, n in self._bookmarks.iteritems():
423 for mark, n in self._bookmarks.iteritems():
424 if mark.split('@', 1)[0] == name:
424 if mark.split('@', 1)[0] == name:
425 heads.append(n)
425 heads.append(n)
426 return heads
426 return heads
427
427
428 @storecache('phaseroots')
428 @storecache('phaseroots')
429 def _phasecache(self):
429 def _phasecache(self):
430 return phases.phasecache(self, self._phasedefaults)
430 return phases.phasecache(self, self._phasedefaults)
431
431
432 @storecache('obsstore')
432 @storecache('obsstore')
433 def obsstore(self):
433 def obsstore(self):
434 # read default format for new obsstore.
434 # read default format for new obsstore.
435 defaultformat = self.ui.configint('format', 'obsstore-version', None)
435 defaultformat = self.ui.configint('format', 'obsstore-version', None)
436 # rely on obsstore class default when possible.
436 # rely on obsstore class default when possible.
437 kwargs = {}
437 kwargs = {}
438 if defaultformat is not None:
438 if defaultformat is not None:
439 kwargs['defaultformat'] = defaultformat
439 kwargs['defaultformat'] = defaultformat
440 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
440 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
441 store = obsolete.obsstore(self.svfs, readonly=readonly,
441 store = obsolete.obsstore(self.svfs, readonly=readonly,
442 **kwargs)
442 **kwargs)
443 if store and readonly:
443 if store and readonly:
444 self.ui.warn(
444 self.ui.warn(
445 _('obsolete feature not enabled but %i markers found!\n')
445 _('obsolete feature not enabled but %i markers found!\n')
446 % len(list(store)))
446 % len(list(store)))
447 return store
447 return store
448
448
449 @storecache('00changelog.i')
449 @storecache('00changelog.i')
450 def changelog(self):
450 def changelog(self):
451 c = changelog.changelog(self.svfs)
451 c = changelog.changelog(self.svfs)
452 if 'HG_PENDING' in os.environ:
452 if 'HG_PENDING' in os.environ:
453 p = os.environ['HG_PENDING']
453 p = os.environ['HG_PENDING']
454 if p.startswith(self.root):
454 if p.startswith(self.root):
455 c.readpending('00changelog.i.a')
455 c.readpending('00changelog.i.a')
456 return c
456 return c
457
457
458 @storecache('00manifest.i')
458 @storecache('00manifest.i')
459 def manifest(self):
459 def manifest(self):
460 return manifest.manifest(self.svfs)
460 return manifest.manifest(self.svfs)
461
461
462 def dirlog(self, dir):
462 def dirlog(self, dir):
463 return self.manifest.dirlog(dir)
463 return self.manifest.dirlog(dir)
464
464
465 @repofilecache('dirstate')
465 @repofilecache('dirstate')
466 def dirstate(self):
466 def dirstate(self):
467 warned = [0]
467 warned = [0]
468 def validate(node):
468 def validate(node):
469 try:
469 try:
470 self.changelog.rev(node)
470 self.changelog.rev(node)
471 return node
471 return node
472 except error.LookupError:
472 except error.LookupError:
473 if not warned[0]:
473 if not warned[0]:
474 warned[0] = True
474 warned[0] = True
475 self.ui.warn(_("warning: ignoring unknown"
475 self.ui.warn(_("warning: ignoring unknown"
476 " working parent %s!\n") % short(node))
476 " working parent %s!\n") % short(node))
477 return nullid
477 return nullid
478
478
479 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
479 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
480
480
481 def __getitem__(self, changeid):
481 def __getitem__(self, changeid):
482 if changeid is None:
482 if changeid is None or changeid == wdirrev:
483 return context.workingctx(self)
483 return context.workingctx(self)
484 if isinstance(changeid, slice):
484 if isinstance(changeid, slice):
485 return [context.changectx(self, i)
485 return [context.changectx(self, i)
486 for i in xrange(*changeid.indices(len(self)))
486 for i in xrange(*changeid.indices(len(self)))
487 if i not in self.changelog.filteredrevs]
487 if i not in self.changelog.filteredrevs]
488 return context.changectx(self, changeid)
488 return context.changectx(self, changeid)
489
489
490 def __contains__(self, changeid):
490 def __contains__(self, changeid):
491 try:
491 try:
492 self[changeid]
492 self[changeid]
493 return True
493 return True
494 except error.RepoLookupError:
494 except error.RepoLookupError:
495 return False
495 return False
496
496
497 def __nonzero__(self):
497 def __nonzero__(self):
498 return True
498 return True
499
499
500 def __len__(self):
500 def __len__(self):
501 return len(self.changelog)
501 return len(self.changelog)
502
502
503 def __iter__(self):
503 def __iter__(self):
504 return iter(self.changelog)
504 return iter(self.changelog)
505
505
506 def revs(self, expr, *args):
506 def revs(self, expr, *args):
507 '''Return a list of revisions matching the given revset'''
507 '''Return a list of revisions matching the given revset'''
508 expr = revset.formatspec(expr, *args)
508 expr = revset.formatspec(expr, *args)
509 m = revset.match(None, expr)
509 m = revset.match(None, expr)
510 return m(self)
510 return m(self)
511
511
512 def set(self, expr, *args):
512 def set(self, expr, *args):
513 '''
513 '''
514 Yield a context for each matching revision, after doing arg
514 Yield a context for each matching revision, after doing arg
515 replacement via revset.formatspec
515 replacement via revset.formatspec
516 '''
516 '''
517 for r in self.revs(expr, *args):
517 for r in self.revs(expr, *args):
518 yield self[r]
518 yield self[r]
519
519
520 def url(self):
520 def url(self):
521 return 'file:' + self.root
521 return 'file:' + self.root
522
522
523 def hook(self, name, throw=False, **args):
523 def hook(self, name, throw=False, **args):
524 """Call a hook, passing this repo instance.
524 """Call a hook, passing this repo instance.
525
525
526 This a convenience method to aid invoking hooks. Extensions likely
526 This a convenience method to aid invoking hooks. Extensions likely
527 won't call this unless they have registered a custom hook or are
527 won't call this unless they have registered a custom hook or are
528 replacing code that is expected to call a hook.
528 replacing code that is expected to call a hook.
529 """
529 """
530 return hook.hook(self.ui, self, name, throw, **args)
530 return hook.hook(self.ui, self, name, throw, **args)
531
531
532 @unfilteredmethod
532 @unfilteredmethod
533 def _tag(self, names, node, message, local, user, date, extra={},
533 def _tag(self, names, node, message, local, user, date, extra={},
534 editor=False):
534 editor=False):
535 if isinstance(names, str):
535 if isinstance(names, str):
536 names = (names,)
536 names = (names,)
537
537
538 branches = self.branchmap()
538 branches = self.branchmap()
539 for name in names:
539 for name in names:
540 self.hook('pretag', throw=True, node=hex(node), tag=name,
540 self.hook('pretag', throw=True, node=hex(node), tag=name,
541 local=local)
541 local=local)
542 if name in branches:
542 if name in branches:
543 self.ui.warn(_("warning: tag %s conflicts with existing"
543 self.ui.warn(_("warning: tag %s conflicts with existing"
544 " branch name\n") % name)
544 " branch name\n") % name)
545
545
546 def writetags(fp, names, munge, prevtags):
546 def writetags(fp, names, munge, prevtags):
547 fp.seek(0, 2)
547 fp.seek(0, 2)
548 if prevtags and prevtags[-1] != '\n':
548 if prevtags and prevtags[-1] != '\n':
549 fp.write('\n')
549 fp.write('\n')
550 for name in names:
550 for name in names:
551 if munge:
551 if munge:
552 m = munge(name)
552 m = munge(name)
553 else:
553 else:
554 m = name
554 m = name
555
555
556 if (self._tagscache.tagtypes and
556 if (self._tagscache.tagtypes and
557 name in self._tagscache.tagtypes):
557 name in self._tagscache.tagtypes):
558 old = self.tags().get(name, nullid)
558 old = self.tags().get(name, nullid)
559 fp.write('%s %s\n' % (hex(old), m))
559 fp.write('%s %s\n' % (hex(old), m))
560 fp.write('%s %s\n' % (hex(node), m))
560 fp.write('%s %s\n' % (hex(node), m))
561 fp.close()
561 fp.close()
562
562
563 prevtags = ''
563 prevtags = ''
564 if local:
564 if local:
565 try:
565 try:
566 fp = self.vfs('localtags', 'r+')
566 fp = self.vfs('localtags', 'r+')
567 except IOError:
567 except IOError:
568 fp = self.vfs('localtags', 'a')
568 fp = self.vfs('localtags', 'a')
569 else:
569 else:
570 prevtags = fp.read()
570 prevtags = fp.read()
571
571
572 # local tags are stored in the current charset
572 # local tags are stored in the current charset
573 writetags(fp, names, None, prevtags)
573 writetags(fp, names, None, prevtags)
574 for name in names:
574 for name in names:
575 self.hook('tag', node=hex(node), tag=name, local=local)
575 self.hook('tag', node=hex(node), tag=name, local=local)
576 return
576 return
577
577
578 try:
578 try:
579 fp = self.wfile('.hgtags', 'rb+')
579 fp = self.wfile('.hgtags', 'rb+')
580 except IOError as e:
580 except IOError as e:
581 if e.errno != errno.ENOENT:
581 if e.errno != errno.ENOENT:
582 raise
582 raise
583 fp = self.wfile('.hgtags', 'ab')
583 fp = self.wfile('.hgtags', 'ab')
584 else:
584 else:
585 prevtags = fp.read()
585 prevtags = fp.read()
586
586
587 # committed tags are stored in UTF-8
587 # committed tags are stored in UTF-8
588 writetags(fp, names, encoding.fromlocal, prevtags)
588 writetags(fp, names, encoding.fromlocal, prevtags)
589
589
590 fp.close()
590 fp.close()
591
591
592 self.invalidatecaches()
592 self.invalidatecaches()
593
593
594 if '.hgtags' not in self.dirstate:
594 if '.hgtags' not in self.dirstate:
595 self[None].add(['.hgtags'])
595 self[None].add(['.hgtags'])
596
596
597 m = matchmod.exact(self.root, '', ['.hgtags'])
597 m = matchmod.exact(self.root, '', ['.hgtags'])
598 tagnode = self.commit(message, user, date, extra=extra, match=m,
598 tagnode = self.commit(message, user, date, extra=extra, match=m,
599 editor=editor)
599 editor=editor)
600
600
601 for name in names:
601 for name in names:
602 self.hook('tag', node=hex(node), tag=name, local=local)
602 self.hook('tag', node=hex(node), tag=name, local=local)
603
603
604 return tagnode
604 return tagnode
605
605
606 def tag(self, names, node, message, local, user, date, editor=False):
606 def tag(self, names, node, message, local, user, date, editor=False):
607 '''tag a revision with one or more symbolic names.
607 '''tag a revision with one or more symbolic names.
608
608
609 names is a list of strings or, when adding a single tag, names may be a
609 names is a list of strings or, when adding a single tag, names may be a
610 string.
610 string.
611
611
612 if local is True, the tags are stored in a per-repository file.
612 if local is True, the tags are stored in a per-repository file.
613 otherwise, they are stored in the .hgtags file, and a new
613 otherwise, they are stored in the .hgtags file, and a new
614 changeset is committed with the change.
614 changeset is committed with the change.
615
615
616 keyword arguments:
616 keyword arguments:
617
617
618 local: whether to store tags in non-version-controlled file
618 local: whether to store tags in non-version-controlled file
619 (default False)
619 (default False)
620
620
621 message: commit message to use if committing
621 message: commit message to use if committing
622
622
623 user: name of user to use if committing
623 user: name of user to use if committing
624
624
625 date: date tuple to use if committing'''
625 date: date tuple to use if committing'''
626
626
627 if not local:
627 if not local:
628 m = matchmod.exact(self.root, '', ['.hgtags'])
628 m = matchmod.exact(self.root, '', ['.hgtags'])
629 if any(self.status(match=m, unknown=True, ignored=True)):
629 if any(self.status(match=m, unknown=True, ignored=True)):
630 raise util.Abort(_('working copy of .hgtags is changed'),
630 raise util.Abort(_('working copy of .hgtags is changed'),
631 hint=_('please commit .hgtags manually'))
631 hint=_('please commit .hgtags manually'))
632
632
633 self.tags() # instantiate the cache
633 self.tags() # instantiate the cache
634 self._tag(names, node, message, local, user, date, editor=editor)
634 self._tag(names, node, message, local, user, date, editor=editor)
635
635
636 @filteredpropertycache
636 @filteredpropertycache
637 def _tagscache(self):
637 def _tagscache(self):
638 '''Returns a tagscache object that contains various tags related
638 '''Returns a tagscache object that contains various tags related
639 caches.'''
639 caches.'''
640
640
641 # This simplifies its cache management by having one decorated
641 # This simplifies its cache management by having one decorated
642 # function (this one) and the rest simply fetch things from it.
642 # function (this one) and the rest simply fetch things from it.
643 class tagscache(object):
643 class tagscache(object):
644 def __init__(self):
644 def __init__(self):
645 # These two define the set of tags for this repository. tags
645 # These two define the set of tags for this repository. tags
646 # maps tag name to node; tagtypes maps tag name to 'global' or
646 # maps tag name to node; tagtypes maps tag name to 'global' or
647 # 'local'. (Global tags are defined by .hgtags across all
647 # 'local'. (Global tags are defined by .hgtags across all
648 # heads, and local tags are defined in .hg/localtags.)
648 # heads, and local tags are defined in .hg/localtags.)
649 # They constitute the in-memory cache of tags.
649 # They constitute the in-memory cache of tags.
650 self.tags = self.tagtypes = None
650 self.tags = self.tagtypes = None
651
651
652 self.nodetagscache = self.tagslist = None
652 self.nodetagscache = self.tagslist = None
653
653
654 cache = tagscache()
654 cache = tagscache()
655 cache.tags, cache.tagtypes = self._findtags()
655 cache.tags, cache.tagtypes = self._findtags()
656
656
657 return cache
657 return cache
658
658
659 def tags(self):
659 def tags(self):
660 '''return a mapping of tag to node'''
660 '''return a mapping of tag to node'''
661 t = {}
661 t = {}
662 if self.changelog.filteredrevs:
662 if self.changelog.filteredrevs:
663 tags, tt = self._findtags()
663 tags, tt = self._findtags()
664 else:
664 else:
665 tags = self._tagscache.tags
665 tags = self._tagscache.tags
666 for k, v in tags.iteritems():
666 for k, v in tags.iteritems():
667 try:
667 try:
668 # ignore tags to unknown nodes
668 # ignore tags to unknown nodes
669 self.changelog.rev(v)
669 self.changelog.rev(v)
670 t[k] = v
670 t[k] = v
671 except (error.LookupError, ValueError):
671 except (error.LookupError, ValueError):
672 pass
672 pass
673 return t
673 return t
674
674
675 def _findtags(self):
675 def _findtags(self):
676 '''Do the hard work of finding tags. Return a pair of dicts
676 '''Do the hard work of finding tags. Return a pair of dicts
677 (tags, tagtypes) where tags maps tag name to node, and tagtypes
677 (tags, tagtypes) where tags maps tag name to node, and tagtypes
678 maps tag name to a string like \'global\' or \'local\'.
678 maps tag name to a string like \'global\' or \'local\'.
679 Subclasses or extensions are free to add their own tags, but
679 Subclasses or extensions are free to add their own tags, but
680 should be aware that the returned dicts will be retained for the
680 should be aware that the returned dicts will be retained for the
681 duration of the localrepo object.'''
681 duration of the localrepo object.'''
682
682
683 # XXX what tagtype should subclasses/extensions use? Currently
683 # XXX what tagtype should subclasses/extensions use? Currently
684 # mq and bookmarks add tags, but do not set the tagtype at all.
684 # mq and bookmarks add tags, but do not set the tagtype at all.
685 # Should each extension invent its own tag type? Should there
685 # Should each extension invent its own tag type? Should there
686 # be one tagtype for all such "virtual" tags? Or is the status
686 # be one tagtype for all such "virtual" tags? Or is the status
687 # quo fine?
687 # quo fine?
688
688
689 alltags = {} # map tag name to (node, hist)
689 alltags = {} # map tag name to (node, hist)
690 tagtypes = {}
690 tagtypes = {}
691
691
692 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
692 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
693 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
693 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
694
694
695 # Build the return dicts. Have to re-encode tag names because
695 # Build the return dicts. Have to re-encode tag names because
696 # the tags module always uses UTF-8 (in order not to lose info
696 # the tags module always uses UTF-8 (in order not to lose info
697 # writing to the cache), but the rest of Mercurial wants them in
697 # writing to the cache), but the rest of Mercurial wants them in
698 # local encoding.
698 # local encoding.
699 tags = {}
699 tags = {}
700 for (name, (node, hist)) in alltags.iteritems():
700 for (name, (node, hist)) in alltags.iteritems():
701 if node != nullid:
701 if node != nullid:
702 tags[encoding.tolocal(name)] = node
702 tags[encoding.tolocal(name)] = node
703 tags['tip'] = self.changelog.tip()
703 tags['tip'] = self.changelog.tip()
704 tagtypes = dict([(encoding.tolocal(name), value)
704 tagtypes = dict([(encoding.tolocal(name), value)
705 for (name, value) in tagtypes.iteritems()])
705 for (name, value) in tagtypes.iteritems()])
706 return (tags, tagtypes)
706 return (tags, tagtypes)
707
707
708 def tagtype(self, tagname):
708 def tagtype(self, tagname):
709 '''
709 '''
710 return the type of the given tag. result can be:
710 return the type of the given tag. result can be:
711
711
712 'local' : a local tag
712 'local' : a local tag
713 'global' : a global tag
713 'global' : a global tag
714 None : tag does not exist
714 None : tag does not exist
715 '''
715 '''
716
716
717 return self._tagscache.tagtypes.get(tagname)
717 return self._tagscache.tagtypes.get(tagname)
718
718
719 def tagslist(self):
719 def tagslist(self):
720 '''return a list of tags ordered by revision'''
720 '''return a list of tags ordered by revision'''
721 if not self._tagscache.tagslist:
721 if not self._tagscache.tagslist:
722 l = []
722 l = []
723 for t, n in self.tags().iteritems():
723 for t, n in self.tags().iteritems():
724 l.append((self.changelog.rev(n), t, n))
724 l.append((self.changelog.rev(n), t, n))
725 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
725 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
726
726
727 return self._tagscache.tagslist
727 return self._tagscache.tagslist
728
728
729 def nodetags(self, node):
729 def nodetags(self, node):
730 '''return the tags associated with a node'''
730 '''return the tags associated with a node'''
731 if not self._tagscache.nodetagscache:
731 if not self._tagscache.nodetagscache:
732 nodetagscache = {}
732 nodetagscache = {}
733 for t, n in self._tagscache.tags.iteritems():
733 for t, n in self._tagscache.tags.iteritems():
734 nodetagscache.setdefault(n, []).append(t)
734 nodetagscache.setdefault(n, []).append(t)
735 for tags in nodetagscache.itervalues():
735 for tags in nodetagscache.itervalues():
736 tags.sort()
736 tags.sort()
737 self._tagscache.nodetagscache = nodetagscache
737 self._tagscache.nodetagscache = nodetagscache
738 return self._tagscache.nodetagscache.get(node, [])
738 return self._tagscache.nodetagscache.get(node, [])
739
739
740 def nodebookmarks(self, node):
740 def nodebookmarks(self, node):
741 marks = []
741 marks = []
742 for bookmark, n in self._bookmarks.iteritems():
742 for bookmark, n in self._bookmarks.iteritems():
743 if n == node:
743 if n == node:
744 marks.append(bookmark)
744 marks.append(bookmark)
745 return sorted(marks)
745 return sorted(marks)
746
746
747 def branchmap(self):
747 def branchmap(self):
748 '''returns a dictionary {branch: [branchheads]} with branchheads
748 '''returns a dictionary {branch: [branchheads]} with branchheads
749 ordered by increasing revision number'''
749 ordered by increasing revision number'''
750 branchmap.updatecache(self)
750 branchmap.updatecache(self)
751 return self._branchcaches[self.filtername]
751 return self._branchcaches[self.filtername]
752
752
753 @unfilteredmethod
753 @unfilteredmethod
754 def revbranchcache(self):
754 def revbranchcache(self):
755 if not self._revbranchcache:
755 if not self._revbranchcache:
756 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
756 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
757 return self._revbranchcache
757 return self._revbranchcache
758
758
759 def branchtip(self, branch, ignoremissing=False):
759 def branchtip(self, branch, ignoremissing=False):
760 '''return the tip node for a given branch
760 '''return the tip node for a given branch
761
761
762 If ignoremissing is True, then this method will not raise an error.
762 If ignoremissing is True, then this method will not raise an error.
763 This is helpful for callers that only expect None for a missing branch
763 This is helpful for callers that only expect None for a missing branch
764 (e.g. namespace).
764 (e.g. namespace).
765
765
766 '''
766 '''
767 try:
767 try:
768 return self.branchmap().branchtip(branch)
768 return self.branchmap().branchtip(branch)
769 except KeyError:
769 except KeyError:
770 if not ignoremissing:
770 if not ignoremissing:
771 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
771 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
772 else:
772 else:
773 pass
773 pass
774
774
775 def lookup(self, key):
775 def lookup(self, key):
776 return self[key].node()
776 return self[key].node()
777
777
778 def lookupbranch(self, key, remote=None):
778 def lookupbranch(self, key, remote=None):
779 repo = remote or self
779 repo = remote or self
780 if key in repo.branchmap():
780 if key in repo.branchmap():
781 return key
781 return key
782
782
783 repo = (remote and remote.local()) and remote or self
783 repo = (remote and remote.local()) and remote or self
784 return repo[key].branch()
784 return repo[key].branch()
785
785
786 def known(self, nodes):
786 def known(self, nodes):
787 nm = self.changelog.nodemap
787 nm = self.changelog.nodemap
788 pc = self._phasecache
788 pc = self._phasecache
789 result = []
789 result = []
790 for n in nodes:
790 for n in nodes:
791 r = nm.get(n)
791 r = nm.get(n)
792 resp = not (r is None or pc.phase(self, r) >= phases.secret)
792 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 result.append(resp)
793 result.append(resp)
794 return result
794 return result
795
795
796 def local(self):
796 def local(self):
797 return self
797 return self
798
798
799 def publishing(self):
799 def publishing(self):
800 # it's safe (and desirable) to trust the publish flag unconditionally
800 # it's safe (and desirable) to trust the publish flag unconditionally
801 # so that we don't finalize changes shared between users via ssh or nfs
801 # so that we don't finalize changes shared between users via ssh or nfs
802 return self.ui.configbool('phases', 'publish', True, untrusted=True)
802 return self.ui.configbool('phases', 'publish', True, untrusted=True)
803
803
804 def cancopy(self):
804 def cancopy(self):
805 # so statichttprepo's override of local() works
805 # so statichttprepo's override of local() works
806 if not self.local():
806 if not self.local():
807 return False
807 return False
808 if not self.publishing():
808 if not self.publishing():
809 return True
809 return True
810 # if publishing we can't copy if there is filtered content
810 # if publishing we can't copy if there is filtered content
811 return not self.filtered('visible').changelog.filteredrevs
811 return not self.filtered('visible').changelog.filteredrevs
812
812
813 def shared(self):
813 def shared(self):
814 '''the type of shared repository (None if not shared)'''
814 '''the type of shared repository (None if not shared)'''
815 if self.sharedpath != self.path:
815 if self.sharedpath != self.path:
816 return 'store'
816 return 'store'
817 return None
817 return None
818
818
819 def join(self, f, *insidef):
819 def join(self, f, *insidef):
820 return self.vfs.join(os.path.join(f, *insidef))
820 return self.vfs.join(os.path.join(f, *insidef))
821
821
822 def wjoin(self, f, *insidef):
822 def wjoin(self, f, *insidef):
823 return self.vfs.reljoin(self.root, f, *insidef)
823 return self.vfs.reljoin(self.root, f, *insidef)
824
824
825 def file(self, f):
825 def file(self, f):
826 if f[0] == '/':
826 if f[0] == '/':
827 f = f[1:]
827 f = f[1:]
828 return filelog.filelog(self.svfs, f)
828 return filelog.filelog(self.svfs, f)
829
829
830 def changectx(self, changeid):
830 def changectx(self, changeid):
831 return self[changeid]
831 return self[changeid]
832
832
833 def parents(self, changeid=None):
833 def parents(self, changeid=None):
834 '''get list of changectxs for parents of changeid'''
834 '''get list of changectxs for parents of changeid'''
835 return self[changeid].parents()
835 return self[changeid].parents()
836
836
837 def setparents(self, p1, p2=nullid):
837 def setparents(self, p1, p2=nullid):
838 self.dirstate.beginparentchange()
838 self.dirstate.beginparentchange()
839 copies = self.dirstate.setparents(p1, p2)
839 copies = self.dirstate.setparents(p1, p2)
840 pctx = self[p1]
840 pctx = self[p1]
841 if copies:
841 if copies:
842 # Adjust copy records, the dirstate cannot do it, it
842 # Adjust copy records, the dirstate cannot do it, it
843 # requires access to parents manifests. Preserve them
843 # requires access to parents manifests. Preserve them
844 # only for entries added to first parent.
844 # only for entries added to first parent.
845 for f in copies:
845 for f in copies:
846 if f not in pctx and copies[f] in pctx:
846 if f not in pctx and copies[f] in pctx:
847 self.dirstate.copy(copies[f], f)
847 self.dirstate.copy(copies[f], f)
848 if p2 == nullid:
848 if p2 == nullid:
849 for f, s in sorted(self.dirstate.copies().items()):
849 for f, s in sorted(self.dirstate.copies().items()):
850 if f not in pctx and s not in pctx:
850 if f not in pctx and s not in pctx:
851 self.dirstate.copy(None, f)
851 self.dirstate.copy(None, f)
852 self.dirstate.endparentchange()
852 self.dirstate.endparentchange()
853
853
854 def filectx(self, path, changeid=None, fileid=None):
854 def filectx(self, path, changeid=None, fileid=None):
855 """changeid can be a changeset revision, node, or tag.
855 """changeid can be a changeset revision, node, or tag.
856 fileid can be a file revision or node."""
856 fileid can be a file revision or node."""
857 return context.filectx(self, path, changeid, fileid)
857 return context.filectx(self, path, changeid, fileid)
858
858
859 def getcwd(self):
859 def getcwd(self):
860 return self.dirstate.getcwd()
860 return self.dirstate.getcwd()
861
861
862 def pathto(self, f, cwd=None):
862 def pathto(self, f, cwd=None):
863 return self.dirstate.pathto(f, cwd)
863 return self.dirstate.pathto(f, cwd)
864
864
865 def wfile(self, f, mode='r'):
865 def wfile(self, f, mode='r'):
866 return self.wvfs(f, mode)
866 return self.wvfs(f, mode)
867
867
868 def _link(self, f):
868 def _link(self, f):
869 return self.wvfs.islink(f)
869 return self.wvfs.islink(f)
870
870
871 def _loadfilter(self, filter):
871 def _loadfilter(self, filter):
872 if filter not in self.filterpats:
872 if filter not in self.filterpats:
873 l = []
873 l = []
874 for pat, cmd in self.ui.configitems(filter):
874 for pat, cmd in self.ui.configitems(filter):
875 if cmd == '!':
875 if cmd == '!':
876 continue
876 continue
877 mf = matchmod.match(self.root, '', [pat])
877 mf = matchmod.match(self.root, '', [pat])
878 fn = None
878 fn = None
879 params = cmd
879 params = cmd
880 for name, filterfn in self._datafilters.iteritems():
880 for name, filterfn in self._datafilters.iteritems():
881 if cmd.startswith(name):
881 if cmd.startswith(name):
882 fn = filterfn
882 fn = filterfn
883 params = cmd[len(name):].lstrip()
883 params = cmd[len(name):].lstrip()
884 break
884 break
885 if not fn:
885 if not fn:
886 fn = lambda s, c, **kwargs: util.filter(s, c)
886 fn = lambda s, c, **kwargs: util.filter(s, c)
887 # Wrap old filters not supporting keyword arguments
887 # Wrap old filters not supporting keyword arguments
888 if not inspect.getargspec(fn)[2]:
888 if not inspect.getargspec(fn)[2]:
889 oldfn = fn
889 oldfn = fn
890 fn = lambda s, c, **kwargs: oldfn(s, c)
890 fn = lambda s, c, **kwargs: oldfn(s, c)
891 l.append((mf, fn, params))
891 l.append((mf, fn, params))
892 self.filterpats[filter] = l
892 self.filterpats[filter] = l
893 return self.filterpats[filter]
893 return self.filterpats[filter]
894
894
895 def _filter(self, filterpats, filename, data):
895 def _filter(self, filterpats, filename, data):
896 for mf, fn, cmd in filterpats:
896 for mf, fn, cmd in filterpats:
897 if mf(filename):
897 if mf(filename):
898 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
898 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
899 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
899 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
900 break
900 break
901
901
902 return data
902 return data
903
903
904 @unfilteredpropertycache
904 @unfilteredpropertycache
905 def _encodefilterpats(self):
905 def _encodefilterpats(self):
906 return self._loadfilter('encode')
906 return self._loadfilter('encode')
907
907
908 @unfilteredpropertycache
908 @unfilteredpropertycache
909 def _decodefilterpats(self):
909 def _decodefilterpats(self):
910 return self._loadfilter('decode')
910 return self._loadfilter('decode')
911
911
912 def adddatafilter(self, name, filter):
912 def adddatafilter(self, name, filter):
913 self._datafilters[name] = filter
913 self._datafilters[name] = filter
914
914
915 def wread(self, filename):
915 def wread(self, filename):
916 if self._link(filename):
916 if self._link(filename):
917 data = self.wvfs.readlink(filename)
917 data = self.wvfs.readlink(filename)
918 else:
918 else:
919 data = self.wvfs.read(filename)
919 data = self.wvfs.read(filename)
920 return self._filter(self._encodefilterpats, filename, data)
920 return self._filter(self._encodefilterpats, filename, data)
921
921
922 def wwrite(self, filename, data, flags):
922 def wwrite(self, filename, data, flags):
923 """write ``data`` into ``filename`` in the working directory
923 """write ``data`` into ``filename`` in the working directory
924
924
925 This returns length of written (maybe decoded) data.
925 This returns length of written (maybe decoded) data.
926 """
926 """
927 data = self._filter(self._decodefilterpats, filename, data)
927 data = self._filter(self._decodefilterpats, filename, data)
928 if 'l' in flags:
928 if 'l' in flags:
929 self.wvfs.symlink(data, filename)
929 self.wvfs.symlink(data, filename)
930 else:
930 else:
931 self.wvfs.write(filename, data)
931 self.wvfs.write(filename, data)
932 if 'x' in flags:
932 if 'x' in flags:
933 self.wvfs.setflags(filename, False, True)
933 self.wvfs.setflags(filename, False, True)
934 return len(data)
934 return len(data)
935
935
936 def wwritedata(self, filename, data):
936 def wwritedata(self, filename, data):
937 return self._filter(self._decodefilterpats, filename, data)
937 return self._filter(self._decodefilterpats, filename, data)
938
938
939 def currenttransaction(self):
939 def currenttransaction(self):
940 """return the current transaction or None if non exists"""
940 """return the current transaction or None if non exists"""
941 if self._transref:
941 if self._transref:
942 tr = self._transref()
942 tr = self._transref()
943 else:
943 else:
944 tr = None
944 tr = None
945
945
946 if tr and tr.running():
946 if tr and tr.running():
947 return tr
947 return tr
948 return None
948 return None
949
949
950 def transaction(self, desc, report=None):
950 def transaction(self, desc, report=None):
951 if (self.ui.configbool('devel', 'all-warnings')
951 if (self.ui.configbool('devel', 'all-warnings')
952 or self.ui.configbool('devel', 'check-locks')):
952 or self.ui.configbool('devel', 'check-locks')):
953 l = self._lockref and self._lockref()
953 l = self._lockref and self._lockref()
954 if l is None or not l.held:
954 if l is None or not l.held:
955 self.ui.develwarn('transaction with no lock')
955 self.ui.develwarn('transaction with no lock')
956 tr = self.currenttransaction()
956 tr = self.currenttransaction()
957 if tr is not None:
957 if tr is not None:
958 return tr.nest()
958 return tr.nest()
959
959
960 # abort here if the journal already exists
960 # abort here if the journal already exists
961 if self.svfs.exists("journal"):
961 if self.svfs.exists("journal"):
962 raise error.RepoError(
962 raise error.RepoError(
963 _("abandoned transaction found"),
963 _("abandoned transaction found"),
964 hint=_("run 'hg recover' to clean up transaction"))
964 hint=_("run 'hg recover' to clean up transaction"))
965
965
966 idbase = "%.40f#%f" % (random.random(), time.time())
966 idbase = "%.40f#%f" % (random.random(), time.time())
967 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
967 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
968 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
968 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
969
969
970 self._writejournal(desc)
970 self._writejournal(desc)
971 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
971 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
972 if report:
972 if report:
973 rp = report
973 rp = report
974 else:
974 else:
975 rp = self.ui.warn
975 rp = self.ui.warn
976 vfsmap = {'plain': self.vfs} # root of .hg/
976 vfsmap = {'plain': self.vfs} # root of .hg/
977 # we must avoid cyclic reference between repo and transaction.
977 # we must avoid cyclic reference between repo and transaction.
978 reporef = weakref.ref(self)
978 reporef = weakref.ref(self)
979 def validate(tr):
979 def validate(tr):
980 """will run pre-closing hooks"""
980 """will run pre-closing hooks"""
981 pending = lambda: tr.writepending() and self.root or ""
981 pending = lambda: tr.writepending() and self.root or ""
982 reporef().hook('pretxnclose', throw=True, pending=pending,
982 reporef().hook('pretxnclose', throw=True, pending=pending,
983 txnname=desc, **tr.hookargs)
983 txnname=desc, **tr.hookargs)
984
984
985 tr = transaction.transaction(rp, self.svfs, vfsmap,
985 tr = transaction.transaction(rp, self.svfs, vfsmap,
986 "journal",
986 "journal",
987 "undo",
987 "undo",
988 aftertrans(renames),
988 aftertrans(renames),
989 self.store.createmode,
989 self.store.createmode,
990 validator=validate)
990 validator=validate)
991
991
992 tr.hookargs['txnid'] = txnid
992 tr.hookargs['txnid'] = txnid
993 # note: writing the fncache only during finalize mean that the file is
993 # note: writing the fncache only during finalize mean that the file is
994 # outdated when running hooks. As fncache is used for streaming clone,
994 # outdated when running hooks. As fncache is used for streaming clone,
995 # this is not expected to break anything that happen during the hooks.
995 # this is not expected to break anything that happen during the hooks.
996 tr.addfinalize('flush-fncache', self.store.write)
996 tr.addfinalize('flush-fncache', self.store.write)
997 def txnclosehook(tr2):
997 def txnclosehook(tr2):
998 """To be run if transaction is successful, will schedule a hook run
998 """To be run if transaction is successful, will schedule a hook run
999 """
999 """
1000 def hook():
1000 def hook():
1001 reporef().hook('txnclose', throw=False, txnname=desc,
1001 reporef().hook('txnclose', throw=False, txnname=desc,
1002 **tr2.hookargs)
1002 **tr2.hookargs)
1003 reporef()._afterlock(hook)
1003 reporef()._afterlock(hook)
1004 tr.addfinalize('txnclose-hook', txnclosehook)
1004 tr.addfinalize('txnclose-hook', txnclosehook)
1005 def txnaborthook(tr2):
1005 def txnaborthook(tr2):
1006 """To be run if transaction is aborted
1006 """To be run if transaction is aborted
1007 """
1007 """
1008 reporef().hook('txnabort', throw=False, txnname=desc,
1008 reporef().hook('txnabort', throw=False, txnname=desc,
1009 **tr2.hookargs)
1009 **tr2.hookargs)
1010 tr.addabort('txnabort-hook', txnaborthook)
1010 tr.addabort('txnabort-hook', txnaborthook)
1011 self._transref = weakref.ref(tr)
1011 self._transref = weakref.ref(tr)
1012 return tr
1012 return tr
1013
1013
1014 def _journalfiles(self):
1014 def _journalfiles(self):
1015 return ((self.svfs, 'journal'),
1015 return ((self.svfs, 'journal'),
1016 (self.vfs, 'journal.dirstate'),
1016 (self.vfs, 'journal.dirstate'),
1017 (self.vfs, 'journal.branch'),
1017 (self.vfs, 'journal.branch'),
1018 (self.vfs, 'journal.desc'),
1018 (self.vfs, 'journal.desc'),
1019 (self.vfs, 'journal.bookmarks'),
1019 (self.vfs, 'journal.bookmarks'),
1020 (self.svfs, 'journal.phaseroots'))
1020 (self.svfs, 'journal.phaseroots'))
1021
1021
1022 def undofiles(self):
1022 def undofiles(self):
1023 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1023 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1024
1024
1025 def _writejournal(self, desc):
1025 def _writejournal(self, desc):
1026 self.vfs.write("journal.dirstate",
1026 self.vfs.write("journal.dirstate",
1027 self.vfs.tryread("dirstate"))
1027 self.vfs.tryread("dirstate"))
1028 self.vfs.write("journal.branch",
1028 self.vfs.write("journal.branch",
1029 encoding.fromlocal(self.dirstate.branch()))
1029 encoding.fromlocal(self.dirstate.branch()))
1030 self.vfs.write("journal.desc",
1030 self.vfs.write("journal.desc",
1031 "%d\n%s\n" % (len(self), desc))
1031 "%d\n%s\n" % (len(self), desc))
1032 self.vfs.write("journal.bookmarks",
1032 self.vfs.write("journal.bookmarks",
1033 self.vfs.tryread("bookmarks"))
1033 self.vfs.tryread("bookmarks"))
1034 self.svfs.write("journal.phaseroots",
1034 self.svfs.write("journal.phaseroots",
1035 self.svfs.tryread("phaseroots"))
1035 self.svfs.tryread("phaseroots"))
1036
1036
1037 def recover(self):
1037 def recover(self):
1038 lock = self.lock()
1038 lock = self.lock()
1039 try:
1039 try:
1040 if self.svfs.exists("journal"):
1040 if self.svfs.exists("journal"):
1041 self.ui.status(_("rolling back interrupted transaction\n"))
1041 self.ui.status(_("rolling back interrupted transaction\n"))
1042 vfsmap = {'': self.svfs,
1042 vfsmap = {'': self.svfs,
1043 'plain': self.vfs,}
1043 'plain': self.vfs,}
1044 transaction.rollback(self.svfs, vfsmap, "journal",
1044 transaction.rollback(self.svfs, vfsmap, "journal",
1045 self.ui.warn)
1045 self.ui.warn)
1046 self.invalidate()
1046 self.invalidate()
1047 return True
1047 return True
1048 else:
1048 else:
1049 self.ui.warn(_("no interrupted transaction available\n"))
1049 self.ui.warn(_("no interrupted transaction available\n"))
1050 return False
1050 return False
1051 finally:
1051 finally:
1052 lock.release()
1052 lock.release()
1053
1053
1054 def rollback(self, dryrun=False, force=False):
1054 def rollback(self, dryrun=False, force=False):
1055 wlock = lock = None
1055 wlock = lock = None
1056 try:
1056 try:
1057 wlock = self.wlock()
1057 wlock = self.wlock()
1058 lock = self.lock()
1058 lock = self.lock()
1059 if self.svfs.exists("undo"):
1059 if self.svfs.exists("undo"):
1060 return self._rollback(dryrun, force)
1060 return self._rollback(dryrun, force)
1061 else:
1061 else:
1062 self.ui.warn(_("no rollback information available\n"))
1062 self.ui.warn(_("no rollback information available\n"))
1063 return 1
1063 return 1
1064 finally:
1064 finally:
1065 release(lock, wlock)
1065 release(lock, wlock)
1066
1066
1067 @unfilteredmethod # Until we get smarter cache management
1067 @unfilteredmethod # Until we get smarter cache management
1068 def _rollback(self, dryrun, force):
1068 def _rollback(self, dryrun, force):
1069 ui = self.ui
1069 ui = self.ui
1070 try:
1070 try:
1071 args = self.vfs.read('undo.desc').splitlines()
1071 args = self.vfs.read('undo.desc').splitlines()
1072 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1072 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1073 if len(args) >= 3:
1073 if len(args) >= 3:
1074 detail = args[2]
1074 detail = args[2]
1075 oldtip = oldlen - 1
1075 oldtip = oldlen - 1
1076
1076
1077 if detail and ui.verbose:
1077 if detail and ui.verbose:
1078 msg = (_('repository tip rolled back to revision %s'
1078 msg = (_('repository tip rolled back to revision %s'
1079 ' (undo %s: %s)\n')
1079 ' (undo %s: %s)\n')
1080 % (oldtip, desc, detail))
1080 % (oldtip, desc, detail))
1081 else:
1081 else:
1082 msg = (_('repository tip rolled back to revision %s'
1082 msg = (_('repository tip rolled back to revision %s'
1083 ' (undo %s)\n')
1083 ' (undo %s)\n')
1084 % (oldtip, desc))
1084 % (oldtip, desc))
1085 except IOError:
1085 except IOError:
1086 msg = _('rolling back unknown transaction\n')
1086 msg = _('rolling back unknown transaction\n')
1087 desc = None
1087 desc = None
1088
1088
1089 if not force and self['.'] != self['tip'] and desc == 'commit':
1089 if not force and self['.'] != self['tip'] and desc == 'commit':
1090 raise util.Abort(
1090 raise util.Abort(
1091 _('rollback of last commit while not checked out '
1091 _('rollback of last commit while not checked out '
1092 'may lose data'), hint=_('use -f to force'))
1092 'may lose data'), hint=_('use -f to force'))
1093
1093
1094 ui.status(msg)
1094 ui.status(msg)
1095 if dryrun:
1095 if dryrun:
1096 return 0
1096 return 0
1097
1097
1098 parents = self.dirstate.parents()
1098 parents = self.dirstate.parents()
1099 self.destroying()
1099 self.destroying()
1100 vfsmap = {'plain': self.vfs, '': self.svfs}
1100 vfsmap = {'plain': self.vfs, '': self.svfs}
1101 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1101 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1102 if self.vfs.exists('undo.bookmarks'):
1102 if self.vfs.exists('undo.bookmarks'):
1103 self.vfs.rename('undo.bookmarks', 'bookmarks')
1103 self.vfs.rename('undo.bookmarks', 'bookmarks')
1104 if self.svfs.exists('undo.phaseroots'):
1104 if self.svfs.exists('undo.phaseroots'):
1105 self.svfs.rename('undo.phaseroots', 'phaseroots')
1105 self.svfs.rename('undo.phaseroots', 'phaseroots')
1106 self.invalidate()
1106 self.invalidate()
1107
1107
1108 parentgone = (parents[0] not in self.changelog.nodemap or
1108 parentgone = (parents[0] not in self.changelog.nodemap or
1109 parents[1] not in self.changelog.nodemap)
1109 parents[1] not in self.changelog.nodemap)
1110 if parentgone:
1110 if parentgone:
1111 self.vfs.rename('undo.dirstate', 'dirstate')
1111 self.vfs.rename('undo.dirstate', 'dirstate')
1112 try:
1112 try:
1113 branch = self.vfs.read('undo.branch')
1113 branch = self.vfs.read('undo.branch')
1114 self.dirstate.setbranch(encoding.tolocal(branch))
1114 self.dirstate.setbranch(encoding.tolocal(branch))
1115 except IOError:
1115 except IOError:
1116 ui.warn(_('named branch could not be reset: '
1116 ui.warn(_('named branch could not be reset: '
1117 'current branch is still \'%s\'\n')
1117 'current branch is still \'%s\'\n')
1118 % self.dirstate.branch())
1118 % self.dirstate.branch())
1119
1119
1120 self.dirstate.invalidate()
1120 self.dirstate.invalidate()
1121 parents = tuple([p.rev() for p in self.parents()])
1121 parents = tuple([p.rev() for p in self.parents()])
1122 if len(parents) > 1:
1122 if len(parents) > 1:
1123 ui.status(_('working directory now based on '
1123 ui.status(_('working directory now based on '
1124 'revisions %d and %d\n') % parents)
1124 'revisions %d and %d\n') % parents)
1125 else:
1125 else:
1126 ui.status(_('working directory now based on '
1126 ui.status(_('working directory now based on '
1127 'revision %d\n') % parents)
1127 'revision %d\n') % parents)
1128 ms = mergemod.mergestate(self)
1128 ms = mergemod.mergestate(self)
1129 ms.reset(self['.'].node())
1129 ms.reset(self['.'].node())
1130
1130
1131 # TODO: if we know which new heads may result from this rollback, pass
1131 # TODO: if we know which new heads may result from this rollback, pass
1132 # them to destroy(), which will prevent the branchhead cache from being
1132 # them to destroy(), which will prevent the branchhead cache from being
1133 # invalidated.
1133 # invalidated.
1134 self.destroyed()
1134 self.destroyed()
1135 return 0
1135 return 0
1136
1136
1137 def invalidatecaches(self):
1137 def invalidatecaches(self):
1138
1138
1139 if '_tagscache' in vars(self):
1139 if '_tagscache' in vars(self):
1140 # can't use delattr on proxy
1140 # can't use delattr on proxy
1141 del self.__dict__['_tagscache']
1141 del self.__dict__['_tagscache']
1142
1142
1143 self.unfiltered()._branchcaches.clear()
1143 self.unfiltered()._branchcaches.clear()
1144 self.invalidatevolatilesets()
1144 self.invalidatevolatilesets()
1145
1145
1146 def invalidatevolatilesets(self):
1146 def invalidatevolatilesets(self):
1147 self.filteredrevcache.clear()
1147 self.filteredrevcache.clear()
1148 obsolete.clearobscaches(self)
1148 obsolete.clearobscaches(self)
1149
1149
1150 def invalidatedirstate(self):
1150 def invalidatedirstate(self):
1151 '''Invalidates the dirstate, causing the next call to dirstate
1151 '''Invalidates the dirstate, causing the next call to dirstate
1152 to check if it was modified since the last time it was read,
1152 to check if it was modified since the last time it was read,
1153 rereading it if it has.
1153 rereading it if it has.
1154
1154
1155 This is different to dirstate.invalidate() that it doesn't always
1155 This is different to dirstate.invalidate() that it doesn't always
1156 rereads the dirstate. Use dirstate.invalidate() if you want to
1156 rereads the dirstate. Use dirstate.invalidate() if you want to
1157 explicitly read the dirstate again (i.e. restoring it to a previous
1157 explicitly read the dirstate again (i.e. restoring it to a previous
1158 known good state).'''
1158 known good state).'''
1159 if hasunfilteredcache(self, 'dirstate'):
1159 if hasunfilteredcache(self, 'dirstate'):
1160 for k in self.dirstate._filecache:
1160 for k in self.dirstate._filecache:
1161 try:
1161 try:
1162 delattr(self.dirstate, k)
1162 delattr(self.dirstate, k)
1163 except AttributeError:
1163 except AttributeError:
1164 pass
1164 pass
1165 delattr(self.unfiltered(), 'dirstate')
1165 delattr(self.unfiltered(), 'dirstate')
1166
1166
1167 def invalidate(self):
1167 def invalidate(self):
1168 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1168 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1169 for k in self._filecache:
1169 for k in self._filecache:
1170 # dirstate is invalidated separately in invalidatedirstate()
1170 # dirstate is invalidated separately in invalidatedirstate()
1171 if k == 'dirstate':
1171 if k == 'dirstate':
1172 continue
1172 continue
1173
1173
1174 try:
1174 try:
1175 delattr(unfiltered, k)
1175 delattr(unfiltered, k)
1176 except AttributeError:
1176 except AttributeError:
1177 pass
1177 pass
1178 self.invalidatecaches()
1178 self.invalidatecaches()
1179 self.store.invalidatecaches()
1179 self.store.invalidatecaches()
1180
1180
1181 def invalidateall(self):
1181 def invalidateall(self):
1182 '''Fully invalidates both store and non-store parts, causing the
1182 '''Fully invalidates both store and non-store parts, causing the
1183 subsequent operation to reread any outside changes.'''
1183 subsequent operation to reread any outside changes.'''
1184 # extension should hook this to invalidate its caches
1184 # extension should hook this to invalidate its caches
1185 self.invalidate()
1185 self.invalidate()
1186 self.invalidatedirstate()
1186 self.invalidatedirstate()
1187
1187
1188 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1188 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1189 try:
1189 try:
1190 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1190 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1191 except error.LockHeld as inst:
1191 except error.LockHeld as inst:
1192 if not wait:
1192 if not wait:
1193 raise
1193 raise
1194 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1194 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1195 (desc, inst.locker))
1195 (desc, inst.locker))
1196 # default to 600 seconds timeout
1196 # default to 600 seconds timeout
1197 l = lockmod.lock(vfs, lockname,
1197 l = lockmod.lock(vfs, lockname,
1198 int(self.ui.config("ui", "timeout", "600")),
1198 int(self.ui.config("ui", "timeout", "600")),
1199 releasefn, desc=desc)
1199 releasefn, desc=desc)
1200 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1200 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1201 if acquirefn:
1201 if acquirefn:
1202 acquirefn()
1202 acquirefn()
1203 return l
1203 return l
1204
1204
1205 def _afterlock(self, callback):
1205 def _afterlock(self, callback):
1206 """add a callback to be run when the repository is fully unlocked
1206 """add a callback to be run when the repository is fully unlocked
1207
1207
1208 The callback will be executed when the outermost lock is released
1208 The callback will be executed when the outermost lock is released
1209 (with wlock being higher level than 'lock')."""
1209 (with wlock being higher level than 'lock')."""
1210 for ref in (self._wlockref, self._lockref):
1210 for ref in (self._wlockref, self._lockref):
1211 l = ref and ref()
1211 l = ref and ref()
1212 if l and l.held:
1212 if l and l.held:
1213 l.postrelease.append(callback)
1213 l.postrelease.append(callback)
1214 break
1214 break
1215 else: # no lock have been found.
1215 else: # no lock have been found.
1216 callback()
1216 callback()
1217
1217
1218 def lock(self, wait=True):
1218 def lock(self, wait=True):
1219 '''Lock the repository store (.hg/store) and return a weak reference
1219 '''Lock the repository store (.hg/store) and return a weak reference
1220 to the lock. Use this before modifying the store (e.g. committing or
1220 to the lock. Use this before modifying the store (e.g. committing or
1221 stripping). If you are opening a transaction, get a lock as well.)
1221 stripping). If you are opening a transaction, get a lock as well.)
1222
1222
1223 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1223 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1224 'wlock' first to avoid a dead-lock hazard.'''
1224 'wlock' first to avoid a dead-lock hazard.'''
1225 l = self._lockref and self._lockref()
1225 l = self._lockref and self._lockref()
1226 if l is not None and l.held:
1226 if l is not None and l.held:
1227 l.lock()
1227 l.lock()
1228 return l
1228 return l
1229
1229
1230 def unlock():
1230 def unlock():
1231 for k, ce in self._filecache.items():
1231 for k, ce in self._filecache.items():
1232 if k == 'dirstate' or k not in self.__dict__:
1232 if k == 'dirstate' or k not in self.__dict__:
1233 continue
1233 continue
1234 ce.refresh()
1234 ce.refresh()
1235
1235
1236 l = self._lock(self.svfs, "lock", wait, unlock,
1236 l = self._lock(self.svfs, "lock", wait, unlock,
1237 self.invalidate, _('repository %s') % self.origroot)
1237 self.invalidate, _('repository %s') % self.origroot)
1238 self._lockref = weakref.ref(l)
1238 self._lockref = weakref.ref(l)
1239 return l
1239 return l
1240
1240
1241 def wlock(self, wait=True):
1241 def wlock(self, wait=True):
1242 '''Lock the non-store parts of the repository (everything under
1242 '''Lock the non-store parts of the repository (everything under
1243 .hg except .hg/store) and return a weak reference to the lock.
1243 .hg except .hg/store) and return a weak reference to the lock.
1244
1244
1245 Use this before modifying files in .hg.
1245 Use this before modifying files in .hg.
1246
1246
1247 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1247 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1248 'wlock' first to avoid a dead-lock hazard.'''
1248 'wlock' first to avoid a dead-lock hazard.'''
1249 l = self._wlockref and self._wlockref()
1249 l = self._wlockref and self._wlockref()
1250 if l is not None and l.held:
1250 if l is not None and l.held:
1251 l.lock()
1251 l.lock()
1252 return l
1252 return l
1253
1253
1254 # We do not need to check for non-waiting lock aquisition. Such
1254 # We do not need to check for non-waiting lock aquisition. Such
1255 # acquisition would not cause dead-lock as they would just fail.
1255 # acquisition would not cause dead-lock as they would just fail.
1256 if wait and (self.ui.configbool('devel', 'all-warnings')
1256 if wait and (self.ui.configbool('devel', 'all-warnings')
1257 or self.ui.configbool('devel', 'check-locks')):
1257 or self.ui.configbool('devel', 'check-locks')):
1258 l = self._lockref and self._lockref()
1258 l = self._lockref and self._lockref()
1259 if l is not None and l.held:
1259 if l is not None and l.held:
1260 self.ui.develwarn('"wlock" acquired after "lock"')
1260 self.ui.develwarn('"wlock" acquired after "lock"')
1261
1261
1262 def unlock():
1262 def unlock():
1263 if self.dirstate.pendingparentchange():
1263 if self.dirstate.pendingparentchange():
1264 self.dirstate.invalidate()
1264 self.dirstate.invalidate()
1265 else:
1265 else:
1266 self.dirstate.write()
1266 self.dirstate.write()
1267
1267
1268 self._filecache['dirstate'].refresh()
1268 self._filecache['dirstate'].refresh()
1269
1269
1270 l = self._lock(self.vfs, "wlock", wait, unlock,
1270 l = self._lock(self.vfs, "wlock", wait, unlock,
1271 self.invalidatedirstate, _('working directory of %s') %
1271 self.invalidatedirstate, _('working directory of %s') %
1272 self.origroot)
1272 self.origroot)
1273 self._wlockref = weakref.ref(l)
1273 self._wlockref = weakref.ref(l)
1274 return l
1274 return l
1275
1275
1276 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1276 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1277 """
1277 """
1278 commit an individual file as part of a larger transaction
1278 commit an individual file as part of a larger transaction
1279 """
1279 """
1280
1280
1281 fname = fctx.path()
1281 fname = fctx.path()
1282 fparent1 = manifest1.get(fname, nullid)
1282 fparent1 = manifest1.get(fname, nullid)
1283 fparent2 = manifest2.get(fname, nullid)
1283 fparent2 = manifest2.get(fname, nullid)
1284 if isinstance(fctx, context.filectx):
1284 if isinstance(fctx, context.filectx):
1285 node = fctx.filenode()
1285 node = fctx.filenode()
1286 if node in [fparent1, fparent2]:
1286 if node in [fparent1, fparent2]:
1287 self.ui.debug('reusing %s filelog entry\n' % fname)
1287 self.ui.debug('reusing %s filelog entry\n' % fname)
1288 return node
1288 return node
1289
1289
1290 flog = self.file(fname)
1290 flog = self.file(fname)
1291 meta = {}
1291 meta = {}
1292 copy = fctx.renamed()
1292 copy = fctx.renamed()
1293 if copy and copy[0] != fname:
1293 if copy and copy[0] != fname:
1294 # Mark the new revision of this file as a copy of another
1294 # Mark the new revision of this file as a copy of another
1295 # file. This copy data will effectively act as a parent
1295 # file. This copy data will effectively act as a parent
1296 # of this new revision. If this is a merge, the first
1296 # of this new revision. If this is a merge, the first
1297 # parent will be the nullid (meaning "look up the copy data")
1297 # parent will be the nullid (meaning "look up the copy data")
1298 # and the second one will be the other parent. For example:
1298 # and the second one will be the other parent. For example:
1299 #
1299 #
1300 # 0 --- 1 --- 3 rev1 changes file foo
1300 # 0 --- 1 --- 3 rev1 changes file foo
1301 # \ / rev2 renames foo to bar and changes it
1301 # \ / rev2 renames foo to bar and changes it
1302 # \- 2 -/ rev3 should have bar with all changes and
1302 # \- 2 -/ rev3 should have bar with all changes and
1303 # should record that bar descends from
1303 # should record that bar descends from
1304 # bar in rev2 and foo in rev1
1304 # bar in rev2 and foo in rev1
1305 #
1305 #
1306 # this allows this merge to succeed:
1306 # this allows this merge to succeed:
1307 #
1307 #
1308 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1308 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1309 # \ / merging rev3 and rev4 should use bar@rev2
1309 # \ / merging rev3 and rev4 should use bar@rev2
1310 # \- 2 --- 4 as the merge base
1310 # \- 2 --- 4 as the merge base
1311 #
1311 #
1312
1312
1313 cfname = copy[0]
1313 cfname = copy[0]
1314 crev = manifest1.get(cfname)
1314 crev = manifest1.get(cfname)
1315 newfparent = fparent2
1315 newfparent = fparent2
1316
1316
1317 if manifest2: # branch merge
1317 if manifest2: # branch merge
1318 if fparent2 == nullid or crev is None: # copied on remote side
1318 if fparent2 == nullid or crev is None: # copied on remote side
1319 if cfname in manifest2:
1319 if cfname in manifest2:
1320 crev = manifest2[cfname]
1320 crev = manifest2[cfname]
1321 newfparent = fparent1
1321 newfparent = fparent1
1322
1322
1323 # Here, we used to search backwards through history to try to find
1323 # Here, we used to search backwards through history to try to find
1324 # where the file copy came from if the source of a copy was not in
1324 # where the file copy came from if the source of a copy was not in
1325 # the parent directory. However, this doesn't actually make sense to
1325 # the parent directory. However, this doesn't actually make sense to
1326 # do (what does a copy from something not in your working copy even
1326 # do (what does a copy from something not in your working copy even
1327 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1327 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1328 # the user that copy information was dropped, so if they didn't
1328 # the user that copy information was dropped, so if they didn't
1329 # expect this outcome it can be fixed, but this is the correct
1329 # expect this outcome it can be fixed, but this is the correct
1330 # behavior in this circumstance.
1330 # behavior in this circumstance.
1331
1331
1332 if crev:
1332 if crev:
1333 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1333 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1334 meta["copy"] = cfname
1334 meta["copy"] = cfname
1335 meta["copyrev"] = hex(crev)
1335 meta["copyrev"] = hex(crev)
1336 fparent1, fparent2 = nullid, newfparent
1336 fparent1, fparent2 = nullid, newfparent
1337 else:
1337 else:
1338 self.ui.warn(_("warning: can't find ancestor for '%s' "
1338 self.ui.warn(_("warning: can't find ancestor for '%s' "
1339 "copied from '%s'!\n") % (fname, cfname))
1339 "copied from '%s'!\n") % (fname, cfname))
1340
1340
1341 elif fparent1 == nullid:
1341 elif fparent1 == nullid:
1342 fparent1, fparent2 = fparent2, nullid
1342 fparent1, fparent2 = fparent2, nullid
1343 elif fparent2 != nullid:
1343 elif fparent2 != nullid:
1344 # is one parent an ancestor of the other?
1344 # is one parent an ancestor of the other?
1345 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1345 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1346 if fparent1 in fparentancestors:
1346 if fparent1 in fparentancestors:
1347 fparent1, fparent2 = fparent2, nullid
1347 fparent1, fparent2 = fparent2, nullid
1348 elif fparent2 in fparentancestors:
1348 elif fparent2 in fparentancestors:
1349 fparent2 = nullid
1349 fparent2 = nullid
1350
1350
1351 # is the file changed?
1351 # is the file changed?
1352 text = fctx.data()
1352 text = fctx.data()
1353 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1353 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1354 changelist.append(fname)
1354 changelist.append(fname)
1355 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1355 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1356 # are just the flags changed during merge?
1356 # are just the flags changed during merge?
1357 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1357 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1358 changelist.append(fname)
1358 changelist.append(fname)
1359
1359
1360 return fparent1
1360 return fparent1
1361
1361
1362 @unfilteredmethod
1362 @unfilteredmethod
1363 def commit(self, text="", user=None, date=None, match=None, force=False,
1363 def commit(self, text="", user=None, date=None, match=None, force=False,
1364 editor=False, extra={}):
1364 editor=False, extra={}):
1365 """Add a new revision to current repository.
1365 """Add a new revision to current repository.
1366
1366
1367 Revision information is gathered from the working directory,
1367 Revision information is gathered from the working directory,
1368 match can be used to filter the committed files. If editor is
1368 match can be used to filter the committed files. If editor is
1369 supplied, it is called to get a commit message.
1369 supplied, it is called to get a commit message.
1370 """
1370 """
1371
1371
1372 def fail(f, msg):
1372 def fail(f, msg):
1373 raise util.Abort('%s: %s' % (f, msg))
1373 raise util.Abort('%s: %s' % (f, msg))
1374
1374
1375 if not match:
1375 if not match:
1376 match = matchmod.always(self.root, '')
1376 match = matchmod.always(self.root, '')
1377
1377
1378 if not force:
1378 if not force:
1379 vdirs = []
1379 vdirs = []
1380 match.explicitdir = vdirs.append
1380 match.explicitdir = vdirs.append
1381 match.bad = fail
1381 match.bad = fail
1382
1382
1383 wlock = self.wlock()
1383 wlock = self.wlock()
1384 try:
1384 try:
1385 wctx = self[None]
1385 wctx = self[None]
1386 merge = len(wctx.parents()) > 1
1386 merge = len(wctx.parents()) > 1
1387
1387
1388 if not force and merge and match.ispartial():
1388 if not force and merge and match.ispartial():
1389 raise util.Abort(_('cannot partially commit a merge '
1389 raise util.Abort(_('cannot partially commit a merge '
1390 '(do not specify files or patterns)'))
1390 '(do not specify files or patterns)'))
1391
1391
1392 status = self.status(match=match, clean=force)
1392 status = self.status(match=match, clean=force)
1393 if force:
1393 if force:
1394 status.modified.extend(status.clean) # mq may commit clean files
1394 status.modified.extend(status.clean) # mq may commit clean files
1395
1395
1396 # check subrepos
1396 # check subrepos
1397 subs = []
1397 subs = []
1398 commitsubs = set()
1398 commitsubs = set()
1399 newstate = wctx.substate.copy()
1399 newstate = wctx.substate.copy()
1400 # only manage subrepos and .hgsubstate if .hgsub is present
1400 # only manage subrepos and .hgsubstate if .hgsub is present
1401 if '.hgsub' in wctx:
1401 if '.hgsub' in wctx:
1402 # we'll decide whether to track this ourselves, thanks
1402 # we'll decide whether to track this ourselves, thanks
1403 for c in status.modified, status.added, status.removed:
1403 for c in status.modified, status.added, status.removed:
1404 if '.hgsubstate' in c:
1404 if '.hgsubstate' in c:
1405 c.remove('.hgsubstate')
1405 c.remove('.hgsubstate')
1406
1406
1407 # compare current state to last committed state
1407 # compare current state to last committed state
1408 # build new substate based on last committed state
1408 # build new substate based on last committed state
1409 oldstate = wctx.p1().substate
1409 oldstate = wctx.p1().substate
1410 for s in sorted(newstate.keys()):
1410 for s in sorted(newstate.keys()):
1411 if not match(s):
1411 if not match(s):
1412 # ignore working copy, use old state if present
1412 # ignore working copy, use old state if present
1413 if s in oldstate:
1413 if s in oldstate:
1414 newstate[s] = oldstate[s]
1414 newstate[s] = oldstate[s]
1415 continue
1415 continue
1416 if not force:
1416 if not force:
1417 raise util.Abort(
1417 raise util.Abort(
1418 _("commit with new subrepo %s excluded") % s)
1418 _("commit with new subrepo %s excluded") % s)
1419 dirtyreason = wctx.sub(s).dirtyreason(True)
1419 dirtyreason = wctx.sub(s).dirtyreason(True)
1420 if dirtyreason:
1420 if dirtyreason:
1421 if not self.ui.configbool('ui', 'commitsubrepos'):
1421 if not self.ui.configbool('ui', 'commitsubrepos'):
1422 raise util.Abort(dirtyreason,
1422 raise util.Abort(dirtyreason,
1423 hint=_("use --subrepos for recursive commit"))
1423 hint=_("use --subrepos for recursive commit"))
1424 subs.append(s)
1424 subs.append(s)
1425 commitsubs.add(s)
1425 commitsubs.add(s)
1426 else:
1426 else:
1427 bs = wctx.sub(s).basestate()
1427 bs = wctx.sub(s).basestate()
1428 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1428 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1429 if oldstate.get(s, (None, None, None))[1] != bs:
1429 if oldstate.get(s, (None, None, None))[1] != bs:
1430 subs.append(s)
1430 subs.append(s)
1431
1431
1432 # check for removed subrepos
1432 # check for removed subrepos
1433 for p in wctx.parents():
1433 for p in wctx.parents():
1434 r = [s for s in p.substate if s not in newstate]
1434 r = [s for s in p.substate if s not in newstate]
1435 subs += [s for s in r if match(s)]
1435 subs += [s for s in r if match(s)]
1436 if subs:
1436 if subs:
1437 if (not match('.hgsub') and
1437 if (not match('.hgsub') and
1438 '.hgsub' in (wctx.modified() + wctx.added())):
1438 '.hgsub' in (wctx.modified() + wctx.added())):
1439 raise util.Abort(
1439 raise util.Abort(
1440 _("can't commit subrepos without .hgsub"))
1440 _("can't commit subrepos without .hgsub"))
1441 status.modified.insert(0, '.hgsubstate')
1441 status.modified.insert(0, '.hgsubstate')
1442
1442
1443 elif '.hgsub' in status.removed:
1443 elif '.hgsub' in status.removed:
1444 # clean up .hgsubstate when .hgsub is removed
1444 # clean up .hgsubstate when .hgsub is removed
1445 if ('.hgsubstate' in wctx and
1445 if ('.hgsubstate' in wctx and
1446 '.hgsubstate' not in (status.modified + status.added +
1446 '.hgsubstate' not in (status.modified + status.added +
1447 status.removed)):
1447 status.removed)):
1448 status.removed.insert(0, '.hgsubstate')
1448 status.removed.insert(0, '.hgsubstate')
1449
1449
1450 # make sure all explicit patterns are matched
1450 # make sure all explicit patterns are matched
1451 if not force and (match.isexact() or match.prefix()):
1451 if not force and (match.isexact() or match.prefix()):
1452 matched = set(status.modified + status.added + status.removed)
1452 matched = set(status.modified + status.added + status.removed)
1453
1453
1454 for f in match.files():
1454 for f in match.files():
1455 f = self.dirstate.normalize(f)
1455 f = self.dirstate.normalize(f)
1456 if f == '.' or f in matched or f in wctx.substate:
1456 if f == '.' or f in matched or f in wctx.substate:
1457 continue
1457 continue
1458 if f in status.deleted:
1458 if f in status.deleted:
1459 fail(f, _('file not found!'))
1459 fail(f, _('file not found!'))
1460 if f in vdirs: # visited directory
1460 if f in vdirs: # visited directory
1461 d = f + '/'
1461 d = f + '/'
1462 for mf in matched:
1462 for mf in matched:
1463 if mf.startswith(d):
1463 if mf.startswith(d):
1464 break
1464 break
1465 else:
1465 else:
1466 fail(f, _("no match under directory!"))
1466 fail(f, _("no match under directory!"))
1467 elif f not in self.dirstate:
1467 elif f not in self.dirstate:
1468 fail(f, _("file not tracked!"))
1468 fail(f, _("file not tracked!"))
1469
1469
1470 cctx = context.workingcommitctx(self, status,
1470 cctx = context.workingcommitctx(self, status,
1471 text, user, date, extra)
1471 text, user, date, extra)
1472
1472
1473 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1473 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1474 or extra.get('close') or merge or cctx.files()
1474 or extra.get('close') or merge or cctx.files()
1475 or self.ui.configbool('ui', 'allowemptycommit'))
1475 or self.ui.configbool('ui', 'allowemptycommit'))
1476 if not allowemptycommit:
1476 if not allowemptycommit:
1477 return None
1477 return None
1478
1478
1479 if merge and cctx.deleted():
1479 if merge and cctx.deleted():
1480 raise util.Abort(_("cannot commit merge with missing files"))
1480 raise util.Abort(_("cannot commit merge with missing files"))
1481
1481
1482 ms = mergemod.mergestate(self)
1482 ms = mergemod.mergestate(self)
1483 for f in status.modified:
1483 for f in status.modified:
1484 if f in ms and ms[f] == 'u':
1484 if f in ms and ms[f] == 'u':
1485 raise util.Abort(_('unresolved merge conflicts '
1485 raise util.Abort(_('unresolved merge conflicts '
1486 '(see "hg help resolve")'))
1486 '(see "hg help resolve")'))
1487
1487
1488 if editor:
1488 if editor:
1489 cctx._text = editor(self, cctx, subs)
1489 cctx._text = editor(self, cctx, subs)
1490 edited = (text != cctx._text)
1490 edited = (text != cctx._text)
1491
1491
1492 # Save commit message in case this transaction gets rolled back
1492 # Save commit message in case this transaction gets rolled back
1493 # (e.g. by a pretxncommit hook). Leave the content alone on
1493 # (e.g. by a pretxncommit hook). Leave the content alone on
1494 # the assumption that the user will use the same editor again.
1494 # the assumption that the user will use the same editor again.
1495 msgfn = self.savecommitmessage(cctx._text)
1495 msgfn = self.savecommitmessage(cctx._text)
1496
1496
1497 # commit subs and write new state
1497 # commit subs and write new state
1498 if subs:
1498 if subs:
1499 for s in sorted(commitsubs):
1499 for s in sorted(commitsubs):
1500 sub = wctx.sub(s)
1500 sub = wctx.sub(s)
1501 self.ui.status(_('committing subrepository %s\n') %
1501 self.ui.status(_('committing subrepository %s\n') %
1502 subrepo.subrelpath(sub))
1502 subrepo.subrelpath(sub))
1503 sr = sub.commit(cctx._text, user, date)
1503 sr = sub.commit(cctx._text, user, date)
1504 newstate[s] = (newstate[s][0], sr)
1504 newstate[s] = (newstate[s][0], sr)
1505 subrepo.writestate(self, newstate)
1505 subrepo.writestate(self, newstate)
1506
1506
1507 p1, p2 = self.dirstate.parents()
1507 p1, p2 = self.dirstate.parents()
1508 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1508 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1509 try:
1509 try:
1510 self.hook("precommit", throw=True, parent1=hookp1,
1510 self.hook("precommit", throw=True, parent1=hookp1,
1511 parent2=hookp2)
1511 parent2=hookp2)
1512 ret = self.commitctx(cctx, True)
1512 ret = self.commitctx(cctx, True)
1513 except: # re-raises
1513 except: # re-raises
1514 if edited:
1514 if edited:
1515 self.ui.write(
1515 self.ui.write(
1516 _('note: commit message saved in %s\n') % msgfn)
1516 _('note: commit message saved in %s\n') % msgfn)
1517 raise
1517 raise
1518
1518
1519 # update bookmarks, dirstate and mergestate
1519 # update bookmarks, dirstate and mergestate
1520 bookmarks.update(self, [p1, p2], ret)
1520 bookmarks.update(self, [p1, p2], ret)
1521 cctx.markcommitted(ret)
1521 cctx.markcommitted(ret)
1522 ms.reset()
1522 ms.reset()
1523 finally:
1523 finally:
1524 wlock.release()
1524 wlock.release()
1525
1525
1526 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1526 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1527 # hack for command that use a temporary commit (eg: histedit)
1527 # hack for command that use a temporary commit (eg: histedit)
1528 # temporary commit got stripped before hook release
1528 # temporary commit got stripped before hook release
1529 if self.changelog.hasnode(ret):
1529 if self.changelog.hasnode(ret):
1530 self.hook("commit", node=node, parent1=parent1,
1530 self.hook("commit", node=node, parent1=parent1,
1531 parent2=parent2)
1531 parent2=parent2)
1532 self._afterlock(commithook)
1532 self._afterlock(commithook)
1533 return ret
1533 return ret
1534
1534
1535 @unfilteredmethod
1535 @unfilteredmethod
1536 def commitctx(self, ctx, error=False):
1536 def commitctx(self, ctx, error=False):
1537 """Add a new revision to current repository.
1537 """Add a new revision to current repository.
1538 Revision information is passed via the context argument.
1538 Revision information is passed via the context argument.
1539 """
1539 """
1540
1540
1541 tr = None
1541 tr = None
1542 p1, p2 = ctx.p1(), ctx.p2()
1542 p1, p2 = ctx.p1(), ctx.p2()
1543 user = ctx.user()
1543 user = ctx.user()
1544
1544
1545 lock = self.lock()
1545 lock = self.lock()
1546 try:
1546 try:
1547 tr = self.transaction("commit")
1547 tr = self.transaction("commit")
1548 trp = weakref.proxy(tr)
1548 trp = weakref.proxy(tr)
1549
1549
1550 if ctx.files():
1550 if ctx.files():
1551 m1 = p1.manifest()
1551 m1 = p1.manifest()
1552 m2 = p2.manifest()
1552 m2 = p2.manifest()
1553 m = m1.copy()
1553 m = m1.copy()
1554
1554
1555 # check in files
1555 # check in files
1556 added = []
1556 added = []
1557 changed = []
1557 changed = []
1558 removed = list(ctx.removed())
1558 removed = list(ctx.removed())
1559 linkrev = len(self)
1559 linkrev = len(self)
1560 self.ui.note(_("committing files:\n"))
1560 self.ui.note(_("committing files:\n"))
1561 for f in sorted(ctx.modified() + ctx.added()):
1561 for f in sorted(ctx.modified() + ctx.added()):
1562 self.ui.note(f + "\n")
1562 self.ui.note(f + "\n")
1563 try:
1563 try:
1564 fctx = ctx[f]
1564 fctx = ctx[f]
1565 if fctx is None:
1565 if fctx is None:
1566 removed.append(f)
1566 removed.append(f)
1567 else:
1567 else:
1568 added.append(f)
1568 added.append(f)
1569 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1569 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1570 trp, changed)
1570 trp, changed)
1571 m.setflag(f, fctx.flags())
1571 m.setflag(f, fctx.flags())
1572 except OSError as inst:
1572 except OSError as inst:
1573 self.ui.warn(_("trouble committing %s!\n") % f)
1573 self.ui.warn(_("trouble committing %s!\n") % f)
1574 raise
1574 raise
1575 except IOError as inst:
1575 except IOError as inst:
1576 errcode = getattr(inst, 'errno', errno.ENOENT)
1576 errcode = getattr(inst, 'errno', errno.ENOENT)
1577 if error or errcode and errcode != errno.ENOENT:
1577 if error or errcode and errcode != errno.ENOENT:
1578 self.ui.warn(_("trouble committing %s!\n") % f)
1578 self.ui.warn(_("trouble committing %s!\n") % f)
1579 raise
1579 raise
1580
1580
1581 # update manifest
1581 # update manifest
1582 self.ui.note(_("committing manifest\n"))
1582 self.ui.note(_("committing manifest\n"))
1583 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1583 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1584 drop = [f for f in removed if f in m]
1584 drop = [f for f in removed if f in m]
1585 for f in drop:
1585 for f in drop:
1586 del m[f]
1586 del m[f]
1587 mn = self.manifest.add(m, trp, linkrev,
1587 mn = self.manifest.add(m, trp, linkrev,
1588 p1.manifestnode(), p2.manifestnode(),
1588 p1.manifestnode(), p2.manifestnode(),
1589 added, drop)
1589 added, drop)
1590 files = changed + removed
1590 files = changed + removed
1591 else:
1591 else:
1592 mn = p1.manifestnode()
1592 mn = p1.manifestnode()
1593 files = []
1593 files = []
1594
1594
1595 # update changelog
1595 # update changelog
1596 self.ui.note(_("committing changelog\n"))
1596 self.ui.note(_("committing changelog\n"))
1597 self.changelog.delayupdate(tr)
1597 self.changelog.delayupdate(tr)
1598 n = self.changelog.add(mn, files, ctx.description(),
1598 n = self.changelog.add(mn, files, ctx.description(),
1599 trp, p1.node(), p2.node(),
1599 trp, p1.node(), p2.node(),
1600 user, ctx.date(), ctx.extra().copy())
1600 user, ctx.date(), ctx.extra().copy())
1601 p = lambda: tr.writepending() and self.root or ""
1601 p = lambda: tr.writepending() and self.root or ""
1602 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1602 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1603 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1603 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1604 parent2=xp2, pending=p)
1604 parent2=xp2, pending=p)
1605 # set the new commit is proper phase
1605 # set the new commit is proper phase
1606 targetphase = subrepo.newcommitphase(self.ui, ctx)
1606 targetphase = subrepo.newcommitphase(self.ui, ctx)
1607 if targetphase:
1607 if targetphase:
1608 # retract boundary do not alter parent changeset.
1608 # retract boundary do not alter parent changeset.
1609 # if a parent have higher the resulting phase will
1609 # if a parent have higher the resulting phase will
1610 # be compliant anyway
1610 # be compliant anyway
1611 #
1611 #
1612 # if minimal phase was 0 we don't need to retract anything
1612 # if minimal phase was 0 we don't need to retract anything
1613 phases.retractboundary(self, tr, targetphase, [n])
1613 phases.retractboundary(self, tr, targetphase, [n])
1614 tr.close()
1614 tr.close()
1615 branchmap.updatecache(self.filtered('served'))
1615 branchmap.updatecache(self.filtered('served'))
1616 return n
1616 return n
1617 finally:
1617 finally:
1618 if tr:
1618 if tr:
1619 tr.release()
1619 tr.release()
1620 lock.release()
1620 lock.release()
1621
1621
1622 @unfilteredmethod
1622 @unfilteredmethod
1623 def destroying(self):
1623 def destroying(self):
1624 '''Inform the repository that nodes are about to be destroyed.
1624 '''Inform the repository that nodes are about to be destroyed.
1625 Intended for use by strip and rollback, so there's a common
1625 Intended for use by strip and rollback, so there's a common
1626 place for anything that has to be done before destroying history.
1626 place for anything that has to be done before destroying history.
1627
1627
1628 This is mostly useful for saving state that is in memory and waiting
1628 This is mostly useful for saving state that is in memory and waiting
1629 to be flushed when the current lock is released. Because a call to
1629 to be flushed when the current lock is released. Because a call to
1630 destroyed is imminent, the repo will be invalidated causing those
1630 destroyed is imminent, the repo will be invalidated causing those
1631 changes to stay in memory (waiting for the next unlock), or vanish
1631 changes to stay in memory (waiting for the next unlock), or vanish
1632 completely.
1632 completely.
1633 '''
1633 '''
1634 # When using the same lock to commit and strip, the phasecache is left
1634 # When using the same lock to commit and strip, the phasecache is left
1635 # dirty after committing. Then when we strip, the repo is invalidated,
1635 # dirty after committing. Then when we strip, the repo is invalidated,
1636 # causing those changes to disappear.
1636 # causing those changes to disappear.
1637 if '_phasecache' in vars(self):
1637 if '_phasecache' in vars(self):
1638 self._phasecache.write()
1638 self._phasecache.write()
1639
1639
1640 @unfilteredmethod
1640 @unfilteredmethod
1641 def destroyed(self):
1641 def destroyed(self):
1642 '''Inform the repository that nodes have been destroyed.
1642 '''Inform the repository that nodes have been destroyed.
1643 Intended for use by strip and rollback, so there's a common
1643 Intended for use by strip and rollback, so there's a common
1644 place for anything that has to be done after destroying history.
1644 place for anything that has to be done after destroying history.
1645 '''
1645 '''
1646 # When one tries to:
1646 # When one tries to:
1647 # 1) destroy nodes thus calling this method (e.g. strip)
1647 # 1) destroy nodes thus calling this method (e.g. strip)
1648 # 2) use phasecache somewhere (e.g. commit)
1648 # 2) use phasecache somewhere (e.g. commit)
1649 #
1649 #
1650 # then 2) will fail because the phasecache contains nodes that were
1650 # then 2) will fail because the phasecache contains nodes that were
1651 # removed. We can either remove phasecache from the filecache,
1651 # removed. We can either remove phasecache from the filecache,
1652 # causing it to reload next time it is accessed, or simply filter
1652 # causing it to reload next time it is accessed, or simply filter
1653 # the removed nodes now and write the updated cache.
1653 # the removed nodes now and write the updated cache.
1654 self._phasecache.filterunknown(self)
1654 self._phasecache.filterunknown(self)
1655 self._phasecache.write()
1655 self._phasecache.write()
1656
1656
1657 # update the 'served' branch cache to help read only server process
1657 # update the 'served' branch cache to help read only server process
1658 # Thanks to branchcache collaboration this is done from the nearest
1658 # Thanks to branchcache collaboration this is done from the nearest
1659 # filtered subset and it is expected to be fast.
1659 # filtered subset and it is expected to be fast.
1660 branchmap.updatecache(self.filtered('served'))
1660 branchmap.updatecache(self.filtered('served'))
1661
1661
1662 # Ensure the persistent tag cache is updated. Doing it now
1662 # Ensure the persistent tag cache is updated. Doing it now
1663 # means that the tag cache only has to worry about destroyed
1663 # means that the tag cache only has to worry about destroyed
1664 # heads immediately after a strip/rollback. That in turn
1664 # heads immediately after a strip/rollback. That in turn
1665 # guarantees that "cachetip == currenttip" (comparing both rev
1665 # guarantees that "cachetip == currenttip" (comparing both rev
1666 # and node) always means no nodes have been added or destroyed.
1666 # and node) always means no nodes have been added or destroyed.
1667
1667
1668 # XXX this is suboptimal when qrefresh'ing: we strip the current
1668 # XXX this is suboptimal when qrefresh'ing: we strip the current
1669 # head, refresh the tag cache, then immediately add a new head.
1669 # head, refresh the tag cache, then immediately add a new head.
1670 # But I think doing it this way is necessary for the "instant
1670 # But I think doing it this way is necessary for the "instant
1671 # tag cache retrieval" case to work.
1671 # tag cache retrieval" case to work.
1672 self.invalidate()
1672 self.invalidate()
1673
1673
1674 def walk(self, match, node=None):
1674 def walk(self, match, node=None):
1675 '''
1675 '''
1676 walk recursively through the directory tree or a given
1676 walk recursively through the directory tree or a given
1677 changeset, finding all files matched by the match
1677 changeset, finding all files matched by the match
1678 function
1678 function
1679 '''
1679 '''
1680 return self[node].walk(match)
1680 return self[node].walk(match)
1681
1681
1682 def status(self, node1='.', node2=None, match=None,
1682 def status(self, node1='.', node2=None, match=None,
1683 ignored=False, clean=False, unknown=False,
1683 ignored=False, clean=False, unknown=False,
1684 listsubrepos=False):
1684 listsubrepos=False):
1685 '''a convenience method that calls node1.status(node2)'''
1685 '''a convenience method that calls node1.status(node2)'''
1686 return self[node1].status(node2, match, ignored, clean, unknown,
1686 return self[node1].status(node2, match, ignored, clean, unknown,
1687 listsubrepos)
1687 listsubrepos)
1688
1688
1689 def heads(self, start=None):
1689 def heads(self, start=None):
1690 heads = self.changelog.heads(start)
1690 heads = self.changelog.heads(start)
1691 # sort the output in rev descending order
1691 # sort the output in rev descending order
1692 return sorted(heads, key=self.changelog.rev, reverse=True)
1692 return sorted(heads, key=self.changelog.rev, reverse=True)
1693
1693
1694 def branchheads(self, branch=None, start=None, closed=False):
1694 def branchheads(self, branch=None, start=None, closed=False):
1695 '''return a (possibly filtered) list of heads for the given branch
1695 '''return a (possibly filtered) list of heads for the given branch
1696
1696
1697 Heads are returned in topological order, from newest to oldest.
1697 Heads are returned in topological order, from newest to oldest.
1698 If branch is None, use the dirstate branch.
1698 If branch is None, use the dirstate branch.
1699 If start is not None, return only heads reachable from start.
1699 If start is not None, return only heads reachable from start.
1700 If closed is True, return heads that are marked as closed as well.
1700 If closed is True, return heads that are marked as closed as well.
1701 '''
1701 '''
1702 if branch is None:
1702 if branch is None:
1703 branch = self[None].branch()
1703 branch = self[None].branch()
1704 branches = self.branchmap()
1704 branches = self.branchmap()
1705 if branch not in branches:
1705 if branch not in branches:
1706 return []
1706 return []
1707 # the cache returns heads ordered lowest to highest
1707 # the cache returns heads ordered lowest to highest
1708 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1708 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1709 if start is not None:
1709 if start is not None:
1710 # filter out the heads that cannot be reached from startrev
1710 # filter out the heads that cannot be reached from startrev
1711 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1711 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1712 bheads = [h for h in bheads if h in fbheads]
1712 bheads = [h for h in bheads if h in fbheads]
1713 return bheads
1713 return bheads
1714
1714
1715 def branches(self, nodes):
1715 def branches(self, nodes):
1716 if not nodes:
1716 if not nodes:
1717 nodes = [self.changelog.tip()]
1717 nodes = [self.changelog.tip()]
1718 b = []
1718 b = []
1719 for n in nodes:
1719 for n in nodes:
1720 t = n
1720 t = n
1721 while True:
1721 while True:
1722 p = self.changelog.parents(n)
1722 p = self.changelog.parents(n)
1723 if p[1] != nullid or p[0] == nullid:
1723 if p[1] != nullid or p[0] == nullid:
1724 b.append((t, n, p[0], p[1]))
1724 b.append((t, n, p[0], p[1]))
1725 break
1725 break
1726 n = p[0]
1726 n = p[0]
1727 return b
1727 return b
1728
1728
1729 def between(self, pairs):
1729 def between(self, pairs):
1730 r = []
1730 r = []
1731
1731
1732 for top, bottom in pairs:
1732 for top, bottom in pairs:
1733 n, l, i = top, [], 0
1733 n, l, i = top, [], 0
1734 f = 1
1734 f = 1
1735
1735
1736 while n != bottom and n != nullid:
1736 while n != bottom and n != nullid:
1737 p = self.changelog.parents(n)[0]
1737 p = self.changelog.parents(n)[0]
1738 if i == f:
1738 if i == f:
1739 l.append(n)
1739 l.append(n)
1740 f = f * 2
1740 f = f * 2
1741 n = p
1741 n = p
1742 i += 1
1742 i += 1
1743
1743
1744 r.append(l)
1744 r.append(l)
1745
1745
1746 return r
1746 return r
1747
1747
1748 def checkpush(self, pushop):
1748 def checkpush(self, pushop):
1749 """Extensions can override this function if additional checks have
1749 """Extensions can override this function if additional checks have
1750 to be performed before pushing, or call it if they override push
1750 to be performed before pushing, or call it if they override push
1751 command.
1751 command.
1752 """
1752 """
1753 pass
1753 pass
1754
1754
1755 @unfilteredpropertycache
1755 @unfilteredpropertycache
1756 def prepushoutgoinghooks(self):
1756 def prepushoutgoinghooks(self):
1757 """Return util.hooks consists of "(repo, remote, outgoing)"
1757 """Return util.hooks consists of "(repo, remote, outgoing)"
1758 functions, which are called before pushing changesets.
1758 functions, which are called before pushing changesets.
1759 """
1759 """
1760 return util.hooks()
1760 return util.hooks()
1761
1761
1762 def stream_in(self, remote, remotereqs):
1762 def stream_in(self, remote, remotereqs):
1763 # Save remote branchmap. We will use it later
1763 # Save remote branchmap. We will use it later
1764 # to speed up branchcache creation
1764 # to speed up branchcache creation
1765 rbranchmap = None
1765 rbranchmap = None
1766 if remote.capable("branchmap"):
1766 if remote.capable("branchmap"):
1767 rbranchmap = remote.branchmap()
1767 rbranchmap = remote.branchmap()
1768
1768
1769 fp = remote.stream_out()
1769 fp = remote.stream_out()
1770 l = fp.readline()
1770 l = fp.readline()
1771 try:
1771 try:
1772 resp = int(l)
1772 resp = int(l)
1773 except ValueError:
1773 except ValueError:
1774 raise error.ResponseError(
1774 raise error.ResponseError(
1775 _('unexpected response from remote server:'), l)
1775 _('unexpected response from remote server:'), l)
1776 if resp == 1:
1776 if resp == 1:
1777 raise util.Abort(_('operation forbidden by server'))
1777 raise util.Abort(_('operation forbidden by server'))
1778 elif resp == 2:
1778 elif resp == 2:
1779 raise util.Abort(_('locking the remote repository failed'))
1779 raise util.Abort(_('locking the remote repository failed'))
1780 elif resp != 0:
1780 elif resp != 0:
1781 raise util.Abort(_('the server sent an unknown error code'))
1781 raise util.Abort(_('the server sent an unknown error code'))
1782
1782
1783 self.applystreamclone(remotereqs, rbranchmap, fp)
1783 self.applystreamclone(remotereqs, rbranchmap, fp)
1784 return len(self.heads()) + 1
1784 return len(self.heads()) + 1
1785
1785
1786 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1786 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1787 """Apply stream clone data to this repository.
1787 """Apply stream clone data to this repository.
1788
1788
1789 "remotereqs" is a set of requirements to handle the incoming data.
1789 "remotereqs" is a set of requirements to handle the incoming data.
1790 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1790 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1791 can be None.
1791 can be None.
1792 "fp" is a file object containing the raw stream data, suitable for
1792 "fp" is a file object containing the raw stream data, suitable for
1793 feeding into exchange.consumestreamclone.
1793 feeding into exchange.consumestreamclone.
1794 """
1794 """
1795 lock = self.lock()
1795 lock = self.lock()
1796 try:
1796 try:
1797 exchange.consumestreamclone(self, fp)
1797 exchange.consumestreamclone(self, fp)
1798
1798
1799 # new requirements = old non-format requirements +
1799 # new requirements = old non-format requirements +
1800 # new format-related remote requirements
1800 # new format-related remote requirements
1801 # requirements from the streamed-in repository
1801 # requirements from the streamed-in repository
1802 self.requirements = remotereqs | (
1802 self.requirements = remotereqs | (
1803 self.requirements - self.supportedformats)
1803 self.requirements - self.supportedformats)
1804 self._applyopenerreqs()
1804 self._applyopenerreqs()
1805 self._writerequirements()
1805 self._writerequirements()
1806
1806
1807 if remotebranchmap:
1807 if remotebranchmap:
1808 rbheads = []
1808 rbheads = []
1809 closed = []
1809 closed = []
1810 for bheads in remotebranchmap.itervalues():
1810 for bheads in remotebranchmap.itervalues():
1811 rbheads.extend(bheads)
1811 rbheads.extend(bheads)
1812 for h in bheads:
1812 for h in bheads:
1813 r = self.changelog.rev(h)
1813 r = self.changelog.rev(h)
1814 b, c = self.changelog.branchinfo(r)
1814 b, c = self.changelog.branchinfo(r)
1815 if c:
1815 if c:
1816 closed.append(h)
1816 closed.append(h)
1817
1817
1818 if rbheads:
1818 if rbheads:
1819 rtiprev = max((int(self.changelog.rev(node))
1819 rtiprev = max((int(self.changelog.rev(node))
1820 for node in rbheads))
1820 for node in rbheads))
1821 cache = branchmap.branchcache(remotebranchmap,
1821 cache = branchmap.branchcache(remotebranchmap,
1822 self[rtiprev].node(),
1822 self[rtiprev].node(),
1823 rtiprev,
1823 rtiprev,
1824 closednodes=closed)
1824 closednodes=closed)
1825 # Try to stick it as low as possible
1825 # Try to stick it as low as possible
1826 # filter above served are unlikely to be fetch from a clone
1826 # filter above served are unlikely to be fetch from a clone
1827 for candidate in ('base', 'immutable', 'served'):
1827 for candidate in ('base', 'immutable', 'served'):
1828 rview = self.filtered(candidate)
1828 rview = self.filtered(candidate)
1829 if cache.validfor(rview):
1829 if cache.validfor(rview):
1830 self._branchcaches[candidate] = cache
1830 self._branchcaches[candidate] = cache
1831 cache.write(rview)
1831 cache.write(rview)
1832 break
1832 break
1833 self.invalidate()
1833 self.invalidate()
1834 finally:
1834 finally:
1835 lock.release()
1835 lock.release()
1836
1836
1837 def clone(self, remote, heads=[], stream=None):
1837 def clone(self, remote, heads=[], stream=None):
1838 '''clone remote repository.
1838 '''clone remote repository.
1839
1839
1840 keyword arguments:
1840 keyword arguments:
1841 heads: list of revs to clone (forces use of pull)
1841 heads: list of revs to clone (forces use of pull)
1842 stream: use streaming clone if possible'''
1842 stream: use streaming clone if possible'''
1843
1843
1844 # now, all clients that can request uncompressed clones can
1844 # now, all clients that can request uncompressed clones can
1845 # read repo formats supported by all servers that can serve
1845 # read repo formats supported by all servers that can serve
1846 # them.
1846 # them.
1847
1847
1848 # if revlog format changes, client will have to check version
1848 # if revlog format changes, client will have to check version
1849 # and format flags on "stream" capability, and use
1849 # and format flags on "stream" capability, and use
1850 # uncompressed only if compatible.
1850 # uncompressed only if compatible.
1851
1851
1852 if stream is None:
1852 if stream is None:
1853 # if the server explicitly prefers to stream (for fast LANs)
1853 # if the server explicitly prefers to stream (for fast LANs)
1854 stream = remote.capable('stream-preferred')
1854 stream = remote.capable('stream-preferred')
1855
1855
1856 if stream and not heads:
1856 if stream and not heads:
1857 # 'stream' means remote revlog format is revlogv1 only
1857 # 'stream' means remote revlog format is revlogv1 only
1858 if remote.capable('stream'):
1858 if remote.capable('stream'):
1859 self.stream_in(remote, set(('revlogv1',)))
1859 self.stream_in(remote, set(('revlogv1',)))
1860 else:
1860 else:
1861 # otherwise, 'streamreqs' contains the remote revlog format
1861 # otherwise, 'streamreqs' contains the remote revlog format
1862 streamreqs = remote.capable('streamreqs')
1862 streamreqs = remote.capable('streamreqs')
1863 if streamreqs:
1863 if streamreqs:
1864 streamreqs = set(streamreqs.split(','))
1864 streamreqs = set(streamreqs.split(','))
1865 # if we support it, stream in and adjust our requirements
1865 # if we support it, stream in and adjust our requirements
1866 if not streamreqs - self.supportedformats:
1866 if not streamreqs - self.supportedformats:
1867 self.stream_in(remote, streamreqs)
1867 self.stream_in(remote, streamreqs)
1868
1868
1869 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1869 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1870 try:
1870 try:
1871 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1871 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1872 ret = exchange.pull(self, remote, heads).cgresult
1872 ret = exchange.pull(self, remote, heads).cgresult
1873 finally:
1873 finally:
1874 self.ui.restoreconfig(quiet)
1874 self.ui.restoreconfig(quiet)
1875 return ret
1875 return ret
1876
1876
1877 def pushkey(self, namespace, key, old, new):
1877 def pushkey(self, namespace, key, old, new):
1878 try:
1878 try:
1879 tr = self.currenttransaction()
1879 tr = self.currenttransaction()
1880 hookargs = {}
1880 hookargs = {}
1881 if tr is not None:
1881 if tr is not None:
1882 hookargs.update(tr.hookargs)
1882 hookargs.update(tr.hookargs)
1883 pending = lambda: tr.writepending() and self.root or ""
1883 pending = lambda: tr.writepending() and self.root or ""
1884 hookargs['pending'] = pending
1884 hookargs['pending'] = pending
1885 hookargs['namespace'] = namespace
1885 hookargs['namespace'] = namespace
1886 hookargs['key'] = key
1886 hookargs['key'] = key
1887 hookargs['old'] = old
1887 hookargs['old'] = old
1888 hookargs['new'] = new
1888 hookargs['new'] = new
1889 self.hook('prepushkey', throw=True, **hookargs)
1889 self.hook('prepushkey', throw=True, **hookargs)
1890 except error.HookAbort as exc:
1890 except error.HookAbort as exc:
1891 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1891 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1892 if exc.hint:
1892 if exc.hint:
1893 self.ui.write_err(_("(%s)\n") % exc.hint)
1893 self.ui.write_err(_("(%s)\n") % exc.hint)
1894 return False
1894 return False
1895 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1895 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1896 ret = pushkey.push(self, namespace, key, old, new)
1896 ret = pushkey.push(self, namespace, key, old, new)
1897 def runhook():
1897 def runhook():
1898 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1898 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1899 ret=ret)
1899 ret=ret)
1900 self._afterlock(runhook)
1900 self._afterlock(runhook)
1901 return ret
1901 return ret
1902
1902
1903 def listkeys(self, namespace):
1903 def listkeys(self, namespace):
1904 self.hook('prelistkeys', throw=True, namespace=namespace)
1904 self.hook('prelistkeys', throw=True, namespace=namespace)
1905 self.ui.debug('listing keys for "%s"\n' % namespace)
1905 self.ui.debug('listing keys for "%s"\n' % namespace)
1906 values = pushkey.list(self, namespace)
1906 values = pushkey.list(self, namespace)
1907 self.hook('listkeys', namespace=namespace, values=values)
1907 self.hook('listkeys', namespace=namespace, values=values)
1908 return values
1908 return values
1909
1909
1910 def debugwireargs(self, one, two, three=None, four=None, five=None):
1910 def debugwireargs(self, one, two, three=None, four=None, five=None):
1911 '''used to test argument passing over the wire'''
1911 '''used to test argument passing over the wire'''
1912 return "%s %s %s %s %s" % (one, two, three, four, five)
1912 return "%s %s %s %s %s" % (one, two, three, four, five)
1913
1913
1914 def savecommitmessage(self, text):
1914 def savecommitmessage(self, text):
1915 fp = self.vfs('last-message.txt', 'wb')
1915 fp = self.vfs('last-message.txt', 'wb')
1916 try:
1916 try:
1917 fp.write(text)
1917 fp.write(text)
1918 finally:
1918 finally:
1919 fp.close()
1919 fp.close()
1920 return self.pathto(fp.name[len(self.root) + 1:])
1920 return self.pathto(fp.name[len(self.root) + 1:])
1921
1921
1922 # used to avoid circular references so destructors work
1922 # used to avoid circular references so destructors work
1923 def aftertrans(files):
1923 def aftertrans(files):
1924 renamefiles = [tuple(t) for t in files]
1924 renamefiles = [tuple(t) for t in files]
1925 def a():
1925 def a():
1926 for vfs, src, dest in renamefiles:
1926 for vfs, src, dest in renamefiles:
1927 try:
1927 try:
1928 vfs.rename(src, dest)
1928 vfs.rename(src, dest)
1929 except OSError: # journal file does not yet exist
1929 except OSError: # journal file does not yet exist
1930 pass
1930 pass
1931 return a
1931 return a
1932
1932
1933 def undoname(fn):
1933 def undoname(fn):
1934 base, name = os.path.split(fn)
1934 base, name = os.path.split(fn)
1935 assert name.startswith('journal')
1935 assert name.startswith('journal')
1936 return os.path.join(base, name.replace('journal', 'undo', 1))
1936 return os.path.join(base, name.replace('journal', 'undo', 1))
1937
1937
1938 def instance(ui, path, create):
1938 def instance(ui, path, create):
1939 return localrepository(ui, util.urllocalpath(path), create)
1939 return localrepository(ui, util.urllocalpath(path), create)
1940
1940
1941 def islocal(path):
1941 def islocal(path):
1942 return True
1942 return True
General Comments 0
You need to be logged in to leave comments. Login now