##// END OF EJS Templates
transaction: separate calculating TXNID from creating transaction object...
FUJIWARA Katsunori -
r25267:69c5cab0 stable
parent child Browse files
Show More
@@ -1,1972 +1,1973 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect, random
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception, exc:
138 except Exception, exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced, exc:
155 except error.PushRaced, exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 'dotencode'))
197 'dotencode'))
198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
199 requirements = ['revlogv1']
199 requirements = ['revlogv1']
200 filtername = None
200 filtername = None
201
201
202 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
204 featuresetupfuncs = set()
205
205
206 def _baserequirements(self, create):
206 def _baserequirements(self, create):
207 return self.requirements[:]
207 return self.requirements[:]
208
208
209 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
210 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
210 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wopener = self.wvfs
211 self.wopener = self.wvfs
212 self.root = self.wvfs.base
212 self.root = self.wvfs.base
213 self.path = self.wvfs.join(".hg")
213 self.path = self.wvfs.join(".hg")
214 self.origroot = path
214 self.origroot = path
215 self.auditor = pathutil.pathauditor(self.root, self._checknested)
215 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.vfs = scmutil.vfs(self.path)
216 self.vfs = scmutil.vfs(self.path)
217 self.opener = self.vfs
217 self.opener = self.vfs
218 self.baseui = baseui
218 self.baseui = baseui
219 self.ui = baseui.copy()
219 self.ui = baseui.copy()
220 self.ui.copy = baseui.copy # prevent copying repo configuration
220 self.ui.copy = baseui.copy # prevent copying repo configuration
221 # A list of callback to shape the phase if no data were found.
221 # A list of callback to shape the phase if no data were found.
222 # Callback are in the form: func(repo, roots) --> processed root.
222 # Callback are in the form: func(repo, roots) --> processed root.
223 # This list it to be filled by extension during repo setup
223 # This list it to be filled by extension during repo setup
224 self._phasedefaults = []
224 self._phasedefaults = []
225 try:
225 try:
226 self.ui.readconfig(self.join("hgrc"), self.root)
226 self.ui.readconfig(self.join("hgrc"), self.root)
227 extensions.loadall(self.ui)
227 extensions.loadall(self.ui)
228 except IOError:
228 except IOError:
229 pass
229 pass
230
230
231 if self.featuresetupfuncs:
231 if self.featuresetupfuncs:
232 self.supported = set(self._basesupported) # use private copy
232 self.supported = set(self._basesupported) # use private copy
233 extmods = set(m.__name__ for n, m
233 extmods = set(m.__name__ for n, m
234 in extensions.extensions(self.ui))
234 in extensions.extensions(self.ui))
235 for setupfunc in self.featuresetupfuncs:
235 for setupfunc in self.featuresetupfuncs:
236 if setupfunc.__module__ in extmods:
236 if setupfunc.__module__ in extmods:
237 setupfunc(self.ui, self.supported)
237 setupfunc(self.ui, self.supported)
238 else:
238 else:
239 self.supported = self._basesupported
239 self.supported = self._basesupported
240
240
241 if not self.vfs.isdir():
241 if not self.vfs.isdir():
242 if create:
242 if create:
243 if not self.wvfs.exists():
243 if not self.wvfs.exists():
244 self.wvfs.makedirs()
244 self.wvfs.makedirs()
245 self.vfs.makedir(notindexed=True)
245 self.vfs.makedir(notindexed=True)
246 requirements = self._baserequirements(create)
246 requirements = self._baserequirements(create)
247 if self.ui.configbool('format', 'usestore', True):
247 if self.ui.configbool('format', 'usestore', True):
248 self.vfs.mkdir("store")
248 self.vfs.mkdir("store")
249 requirements.append("store")
249 requirements.append("store")
250 if self.ui.configbool('format', 'usefncache', True):
250 if self.ui.configbool('format', 'usefncache', True):
251 requirements.append("fncache")
251 requirements.append("fncache")
252 if self.ui.configbool('format', 'dotencode', True):
252 if self.ui.configbool('format', 'dotencode', True):
253 requirements.append('dotencode')
253 requirements.append('dotencode')
254 # create an invalid changelog
254 # create an invalid changelog
255 self.vfs.append(
255 self.vfs.append(
256 "00changelog.i",
256 "00changelog.i",
257 '\0\0\0\2' # represents revlogv2
257 '\0\0\0\2' # represents revlogv2
258 ' dummy changelog to prevent using the old repo layout'
258 ' dummy changelog to prevent using the old repo layout'
259 )
259 )
260 if self.ui.configbool('format', 'generaldelta', False):
260 if self.ui.configbool('format', 'generaldelta', False):
261 requirements.append("generaldelta")
261 requirements.append("generaldelta")
262 if self.ui.configbool('experimental', 'manifestv2', False):
262 if self.ui.configbool('experimental', 'manifestv2', False):
263 requirements.append("manifestv2")
263 requirements.append("manifestv2")
264 requirements = set(requirements)
264 requirements = set(requirements)
265 else:
265 else:
266 raise error.RepoError(_("repository %s not found") % path)
266 raise error.RepoError(_("repository %s not found") % path)
267 elif create:
267 elif create:
268 raise error.RepoError(_("repository %s already exists") % path)
268 raise error.RepoError(_("repository %s already exists") % path)
269 else:
269 else:
270 try:
270 try:
271 requirements = scmutil.readrequires(self.vfs, self.supported)
271 requirements = scmutil.readrequires(self.vfs, self.supported)
272 except IOError, inst:
272 except IOError, inst:
273 if inst.errno != errno.ENOENT:
273 if inst.errno != errno.ENOENT:
274 raise
274 raise
275 requirements = set()
275 requirements = set()
276
276
277 self.sharedpath = self.path
277 self.sharedpath = self.path
278 try:
278 try:
279 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
279 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
280 realpath=True)
280 realpath=True)
281 s = vfs.base
281 s = vfs.base
282 if not vfs.exists():
282 if not vfs.exists():
283 raise error.RepoError(
283 raise error.RepoError(
284 _('.hg/sharedpath points to nonexistent directory %s') % s)
284 _('.hg/sharedpath points to nonexistent directory %s') % s)
285 self.sharedpath = s
285 self.sharedpath = s
286 except IOError, inst:
286 except IOError, inst:
287 if inst.errno != errno.ENOENT:
287 if inst.errno != errno.ENOENT:
288 raise
288 raise
289
289
290 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
290 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
291 self.spath = self.store.path
291 self.spath = self.store.path
292 self.svfs = self.store.vfs
292 self.svfs = self.store.vfs
293 self.sopener = self.svfs
293 self.sopener = self.svfs
294 self.sjoin = self.store.join
294 self.sjoin = self.store.join
295 self.vfs.createmode = self.store.createmode
295 self.vfs.createmode = self.store.createmode
296 self._applyrequirements(requirements)
296 self._applyrequirements(requirements)
297 if create:
297 if create:
298 self._writerequirements()
298 self._writerequirements()
299
299
300
300
301 self._branchcaches = {}
301 self._branchcaches = {}
302 self._revbranchcache = None
302 self._revbranchcache = None
303 self.filterpats = {}
303 self.filterpats = {}
304 self._datafilters = {}
304 self._datafilters = {}
305 self._transref = self._lockref = self._wlockref = None
305 self._transref = self._lockref = self._wlockref = None
306
306
307 # A cache for various files under .hg/ that tracks file changes,
307 # A cache for various files under .hg/ that tracks file changes,
308 # (used by the filecache decorator)
308 # (used by the filecache decorator)
309 #
309 #
310 # Maps a property name to its util.filecacheentry
310 # Maps a property name to its util.filecacheentry
311 self._filecache = {}
311 self._filecache = {}
312
312
313 # hold sets of revision to be filtered
313 # hold sets of revision to be filtered
314 # should be cleared when something might have changed the filter value:
314 # should be cleared when something might have changed the filter value:
315 # - new changesets,
315 # - new changesets,
316 # - phase change,
316 # - phase change,
317 # - new obsolescence marker,
317 # - new obsolescence marker,
318 # - working directory parent change,
318 # - working directory parent change,
319 # - bookmark changes
319 # - bookmark changes
320 self.filteredrevcache = {}
320 self.filteredrevcache = {}
321
321
322 # generic mapping between names and nodes
322 # generic mapping between names and nodes
323 self.names = namespaces.namespaces()
323 self.names = namespaces.namespaces()
324
324
325 def close(self):
325 def close(self):
326 self._writecaches()
326 self._writecaches()
327
327
328 def _writecaches(self):
328 def _writecaches(self):
329 if self._revbranchcache:
329 if self._revbranchcache:
330 self._revbranchcache.write()
330 self._revbranchcache.write()
331
331
332 def _restrictcapabilities(self, caps):
332 def _restrictcapabilities(self, caps):
333 if self.ui.configbool('experimental', 'bundle2-advertise', True):
333 if self.ui.configbool('experimental', 'bundle2-advertise', True):
334 caps = set(caps)
334 caps = set(caps)
335 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
335 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
336 caps.add('bundle2=' + urllib.quote(capsblob))
336 caps.add('bundle2=' + urllib.quote(capsblob))
337 return caps
337 return caps
338
338
339 def _applyrequirements(self, requirements):
339 def _applyrequirements(self, requirements):
340 self.requirements = requirements
340 self.requirements = requirements
341 self.svfs.options = dict((r, 1) for r in requirements
341 self.svfs.options = dict((r, 1) for r in requirements
342 if r in self.openerreqs)
342 if r in self.openerreqs)
343 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
343 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
344 if chunkcachesize is not None:
344 if chunkcachesize is not None:
345 self.svfs.options['chunkcachesize'] = chunkcachesize
345 self.svfs.options['chunkcachesize'] = chunkcachesize
346 maxchainlen = self.ui.configint('format', 'maxchainlen')
346 maxchainlen = self.ui.configint('format', 'maxchainlen')
347 if maxchainlen is not None:
347 if maxchainlen is not None:
348 self.svfs.options['maxchainlen'] = maxchainlen
348 self.svfs.options['maxchainlen'] = maxchainlen
349 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
349 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
350 if manifestcachesize is not None:
350 if manifestcachesize is not None:
351 self.svfs.options['manifestcachesize'] = manifestcachesize
351 self.svfs.options['manifestcachesize'] = manifestcachesize
352 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
352 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
353 if usetreemanifest is not None:
353 if usetreemanifest is not None:
354 self.svfs.options['usetreemanifest'] = usetreemanifest
354 self.svfs.options['usetreemanifest'] = usetreemanifest
355
355
356 def _writerequirements(self):
356 def _writerequirements(self):
357 reqfile = self.vfs("requires", "w")
357 reqfile = self.vfs("requires", "w")
358 for r in sorted(self.requirements):
358 for r in sorted(self.requirements):
359 reqfile.write("%s\n" % r)
359 reqfile.write("%s\n" % r)
360 reqfile.close()
360 reqfile.close()
361
361
362 def _checknested(self, path):
362 def _checknested(self, path):
363 """Determine if path is a legal nested repository."""
363 """Determine if path is a legal nested repository."""
364 if not path.startswith(self.root):
364 if not path.startswith(self.root):
365 return False
365 return False
366 subpath = path[len(self.root) + 1:]
366 subpath = path[len(self.root) + 1:]
367 normsubpath = util.pconvert(subpath)
367 normsubpath = util.pconvert(subpath)
368
368
369 # XXX: Checking against the current working copy is wrong in
369 # XXX: Checking against the current working copy is wrong in
370 # the sense that it can reject things like
370 # the sense that it can reject things like
371 #
371 #
372 # $ hg cat -r 10 sub/x.txt
372 # $ hg cat -r 10 sub/x.txt
373 #
373 #
374 # if sub/ is no longer a subrepository in the working copy
374 # if sub/ is no longer a subrepository in the working copy
375 # parent revision.
375 # parent revision.
376 #
376 #
377 # However, it can of course also allow things that would have
377 # However, it can of course also allow things that would have
378 # been rejected before, such as the above cat command if sub/
378 # been rejected before, such as the above cat command if sub/
379 # is a subrepository now, but was a normal directory before.
379 # is a subrepository now, but was a normal directory before.
380 # The old path auditor would have rejected by mistake since it
380 # The old path auditor would have rejected by mistake since it
381 # panics when it sees sub/.hg/.
381 # panics when it sees sub/.hg/.
382 #
382 #
383 # All in all, checking against the working copy seems sensible
383 # All in all, checking against the working copy seems sensible
384 # since we want to prevent access to nested repositories on
384 # since we want to prevent access to nested repositories on
385 # the filesystem *now*.
385 # the filesystem *now*.
386 ctx = self[None]
386 ctx = self[None]
387 parts = util.splitpath(subpath)
387 parts = util.splitpath(subpath)
388 while parts:
388 while parts:
389 prefix = '/'.join(parts)
389 prefix = '/'.join(parts)
390 if prefix in ctx.substate:
390 if prefix in ctx.substate:
391 if prefix == normsubpath:
391 if prefix == normsubpath:
392 return True
392 return True
393 else:
393 else:
394 sub = ctx.sub(prefix)
394 sub = ctx.sub(prefix)
395 return sub.checknested(subpath[len(prefix) + 1:])
395 return sub.checknested(subpath[len(prefix) + 1:])
396 else:
396 else:
397 parts.pop()
397 parts.pop()
398 return False
398 return False
399
399
400 def peer(self):
400 def peer(self):
401 return localpeer(self) # not cached to avoid reference cycle
401 return localpeer(self) # not cached to avoid reference cycle
402
402
403 def unfiltered(self):
403 def unfiltered(self):
404 """Return unfiltered version of the repository
404 """Return unfiltered version of the repository
405
405
406 Intended to be overwritten by filtered repo."""
406 Intended to be overwritten by filtered repo."""
407 return self
407 return self
408
408
409 def filtered(self, name):
409 def filtered(self, name):
410 """Return a filtered version of a repository"""
410 """Return a filtered version of a repository"""
411 # build a new class with the mixin and the current class
411 # build a new class with the mixin and the current class
412 # (possibly subclass of the repo)
412 # (possibly subclass of the repo)
413 class proxycls(repoview.repoview, self.unfiltered().__class__):
413 class proxycls(repoview.repoview, self.unfiltered().__class__):
414 pass
414 pass
415 return proxycls(self, name)
415 return proxycls(self, name)
416
416
417 @repofilecache('bookmarks')
417 @repofilecache('bookmarks')
418 def _bookmarks(self):
418 def _bookmarks(self):
419 return bookmarks.bmstore(self)
419 return bookmarks.bmstore(self)
420
420
421 @repofilecache('bookmarks.current')
421 @repofilecache('bookmarks.current')
422 def _bookmarkcurrent(self):
422 def _bookmarkcurrent(self):
423 return bookmarks.readcurrent(self)
423 return bookmarks.readcurrent(self)
424
424
425 def bookmarkheads(self, bookmark):
425 def bookmarkheads(self, bookmark):
426 name = bookmark.split('@', 1)[0]
426 name = bookmark.split('@', 1)[0]
427 heads = []
427 heads = []
428 for mark, n in self._bookmarks.iteritems():
428 for mark, n in self._bookmarks.iteritems():
429 if mark.split('@', 1)[0] == name:
429 if mark.split('@', 1)[0] == name:
430 heads.append(n)
430 heads.append(n)
431 return heads
431 return heads
432
432
433 @storecache('phaseroots')
433 @storecache('phaseroots')
434 def _phasecache(self):
434 def _phasecache(self):
435 return phases.phasecache(self, self._phasedefaults)
435 return phases.phasecache(self, self._phasedefaults)
436
436
437 @storecache('obsstore')
437 @storecache('obsstore')
438 def obsstore(self):
438 def obsstore(self):
439 # read default format for new obsstore.
439 # read default format for new obsstore.
440 defaultformat = self.ui.configint('format', 'obsstore-version', None)
440 defaultformat = self.ui.configint('format', 'obsstore-version', None)
441 # rely on obsstore class default when possible.
441 # rely on obsstore class default when possible.
442 kwargs = {}
442 kwargs = {}
443 if defaultformat is not None:
443 if defaultformat is not None:
444 kwargs['defaultformat'] = defaultformat
444 kwargs['defaultformat'] = defaultformat
445 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
445 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
446 store = obsolete.obsstore(self.svfs, readonly=readonly,
446 store = obsolete.obsstore(self.svfs, readonly=readonly,
447 **kwargs)
447 **kwargs)
448 if store and readonly:
448 if store and readonly:
449 self.ui.warn(
449 self.ui.warn(
450 _('obsolete feature not enabled but %i markers found!\n')
450 _('obsolete feature not enabled but %i markers found!\n')
451 % len(list(store)))
451 % len(list(store)))
452 return store
452 return store
453
453
454 @storecache('00changelog.i')
454 @storecache('00changelog.i')
455 def changelog(self):
455 def changelog(self):
456 c = changelog.changelog(self.svfs)
456 c = changelog.changelog(self.svfs)
457 if 'HG_PENDING' in os.environ:
457 if 'HG_PENDING' in os.environ:
458 p = os.environ['HG_PENDING']
458 p = os.environ['HG_PENDING']
459 if p.startswith(self.root):
459 if p.startswith(self.root):
460 c.readpending('00changelog.i.a')
460 c.readpending('00changelog.i.a')
461 return c
461 return c
462
462
463 @storecache('00manifest.i')
463 @storecache('00manifest.i')
464 def manifest(self):
464 def manifest(self):
465 return manifest.manifest(self.svfs)
465 return manifest.manifest(self.svfs)
466
466
467 @repofilecache('dirstate')
467 @repofilecache('dirstate')
468 def dirstate(self):
468 def dirstate(self):
469 warned = [0]
469 warned = [0]
470 def validate(node):
470 def validate(node):
471 try:
471 try:
472 self.changelog.rev(node)
472 self.changelog.rev(node)
473 return node
473 return node
474 except error.LookupError:
474 except error.LookupError:
475 if not warned[0]:
475 if not warned[0]:
476 warned[0] = True
476 warned[0] = True
477 self.ui.warn(_("warning: ignoring unknown"
477 self.ui.warn(_("warning: ignoring unknown"
478 " working parent %s!\n") % short(node))
478 " working parent %s!\n") % short(node))
479 return nullid
479 return nullid
480
480
481 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
481 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
482
482
483 def __getitem__(self, changeid):
483 def __getitem__(self, changeid):
484 if changeid is None:
484 if changeid is None:
485 return context.workingctx(self)
485 return context.workingctx(self)
486 if isinstance(changeid, slice):
486 if isinstance(changeid, slice):
487 return [context.changectx(self, i)
487 return [context.changectx(self, i)
488 for i in xrange(*changeid.indices(len(self)))
488 for i in xrange(*changeid.indices(len(self)))
489 if i not in self.changelog.filteredrevs]
489 if i not in self.changelog.filteredrevs]
490 return context.changectx(self, changeid)
490 return context.changectx(self, changeid)
491
491
492 def __contains__(self, changeid):
492 def __contains__(self, changeid):
493 try:
493 try:
494 self[changeid]
494 self[changeid]
495 return True
495 return True
496 except error.RepoLookupError:
496 except error.RepoLookupError:
497 return False
497 return False
498
498
499 def __nonzero__(self):
499 def __nonzero__(self):
500 return True
500 return True
501
501
502 def __len__(self):
502 def __len__(self):
503 return len(self.changelog)
503 return len(self.changelog)
504
504
505 def __iter__(self):
505 def __iter__(self):
506 return iter(self.changelog)
506 return iter(self.changelog)
507
507
508 def revs(self, expr, *args):
508 def revs(self, expr, *args):
509 '''Return a list of revisions matching the given revset'''
509 '''Return a list of revisions matching the given revset'''
510 expr = revset.formatspec(expr, *args)
510 expr = revset.formatspec(expr, *args)
511 m = revset.match(None, expr)
511 m = revset.match(None, expr)
512 return m(self)
512 return m(self)
513
513
514 def set(self, expr, *args):
514 def set(self, expr, *args):
515 '''
515 '''
516 Yield a context for each matching revision, after doing arg
516 Yield a context for each matching revision, after doing arg
517 replacement via revset.formatspec
517 replacement via revset.formatspec
518 '''
518 '''
519 for r in self.revs(expr, *args):
519 for r in self.revs(expr, *args):
520 yield self[r]
520 yield self[r]
521
521
522 def url(self):
522 def url(self):
523 return 'file:' + self.root
523 return 'file:' + self.root
524
524
525 def hook(self, name, throw=False, **args):
525 def hook(self, name, throw=False, **args):
526 """Call a hook, passing this repo instance.
526 """Call a hook, passing this repo instance.
527
527
528 This a convenience method to aid invoking hooks. Extensions likely
528 This a convenience method to aid invoking hooks. Extensions likely
529 won't call this unless they have registered a custom hook or are
529 won't call this unless they have registered a custom hook or are
530 replacing code that is expected to call a hook.
530 replacing code that is expected to call a hook.
531 """
531 """
532 return hook.hook(self.ui, self, name, throw, **args)
532 return hook.hook(self.ui, self, name, throw, **args)
533
533
534 @unfilteredmethod
534 @unfilteredmethod
535 def _tag(self, names, node, message, local, user, date, extra={},
535 def _tag(self, names, node, message, local, user, date, extra={},
536 editor=False):
536 editor=False):
537 if isinstance(names, str):
537 if isinstance(names, str):
538 names = (names,)
538 names = (names,)
539
539
540 branches = self.branchmap()
540 branches = self.branchmap()
541 for name in names:
541 for name in names:
542 self.hook('pretag', throw=True, node=hex(node), tag=name,
542 self.hook('pretag', throw=True, node=hex(node), tag=name,
543 local=local)
543 local=local)
544 if name in branches:
544 if name in branches:
545 self.ui.warn(_("warning: tag %s conflicts with existing"
545 self.ui.warn(_("warning: tag %s conflicts with existing"
546 " branch name\n") % name)
546 " branch name\n") % name)
547
547
548 def writetags(fp, names, munge, prevtags):
548 def writetags(fp, names, munge, prevtags):
549 fp.seek(0, 2)
549 fp.seek(0, 2)
550 if prevtags and prevtags[-1] != '\n':
550 if prevtags and prevtags[-1] != '\n':
551 fp.write('\n')
551 fp.write('\n')
552 for name in names:
552 for name in names:
553 if munge:
553 if munge:
554 m = munge(name)
554 m = munge(name)
555 else:
555 else:
556 m = name
556 m = name
557
557
558 if (self._tagscache.tagtypes and
558 if (self._tagscache.tagtypes and
559 name in self._tagscache.tagtypes):
559 name in self._tagscache.tagtypes):
560 old = self.tags().get(name, nullid)
560 old = self.tags().get(name, nullid)
561 fp.write('%s %s\n' % (hex(old), m))
561 fp.write('%s %s\n' % (hex(old), m))
562 fp.write('%s %s\n' % (hex(node), m))
562 fp.write('%s %s\n' % (hex(node), m))
563 fp.close()
563 fp.close()
564
564
565 prevtags = ''
565 prevtags = ''
566 if local:
566 if local:
567 try:
567 try:
568 fp = self.vfs('localtags', 'r+')
568 fp = self.vfs('localtags', 'r+')
569 except IOError:
569 except IOError:
570 fp = self.vfs('localtags', 'a')
570 fp = self.vfs('localtags', 'a')
571 else:
571 else:
572 prevtags = fp.read()
572 prevtags = fp.read()
573
573
574 # local tags are stored in the current charset
574 # local tags are stored in the current charset
575 writetags(fp, names, None, prevtags)
575 writetags(fp, names, None, prevtags)
576 for name in names:
576 for name in names:
577 self.hook('tag', node=hex(node), tag=name, local=local)
577 self.hook('tag', node=hex(node), tag=name, local=local)
578 return
578 return
579
579
580 try:
580 try:
581 fp = self.wfile('.hgtags', 'rb+')
581 fp = self.wfile('.hgtags', 'rb+')
582 except IOError, e:
582 except IOError, e:
583 if e.errno != errno.ENOENT:
583 if e.errno != errno.ENOENT:
584 raise
584 raise
585 fp = self.wfile('.hgtags', 'ab')
585 fp = self.wfile('.hgtags', 'ab')
586 else:
586 else:
587 prevtags = fp.read()
587 prevtags = fp.read()
588
588
589 # committed tags are stored in UTF-8
589 # committed tags are stored in UTF-8
590 writetags(fp, names, encoding.fromlocal, prevtags)
590 writetags(fp, names, encoding.fromlocal, prevtags)
591
591
592 fp.close()
592 fp.close()
593
593
594 self.invalidatecaches()
594 self.invalidatecaches()
595
595
596 if '.hgtags' not in self.dirstate:
596 if '.hgtags' not in self.dirstate:
597 self[None].add(['.hgtags'])
597 self[None].add(['.hgtags'])
598
598
599 m = matchmod.exact(self.root, '', ['.hgtags'])
599 m = matchmod.exact(self.root, '', ['.hgtags'])
600 tagnode = self.commit(message, user, date, extra=extra, match=m,
600 tagnode = self.commit(message, user, date, extra=extra, match=m,
601 editor=editor)
601 editor=editor)
602
602
603 for name in names:
603 for name in names:
604 self.hook('tag', node=hex(node), tag=name, local=local)
604 self.hook('tag', node=hex(node), tag=name, local=local)
605
605
606 return tagnode
606 return tagnode
607
607
608 def tag(self, names, node, message, local, user, date, editor=False):
608 def tag(self, names, node, message, local, user, date, editor=False):
609 '''tag a revision with one or more symbolic names.
609 '''tag a revision with one or more symbolic names.
610
610
611 names is a list of strings or, when adding a single tag, names may be a
611 names is a list of strings or, when adding a single tag, names may be a
612 string.
612 string.
613
613
614 if local is True, the tags are stored in a per-repository file.
614 if local is True, the tags are stored in a per-repository file.
615 otherwise, they are stored in the .hgtags file, and a new
615 otherwise, they are stored in the .hgtags file, and a new
616 changeset is committed with the change.
616 changeset is committed with the change.
617
617
618 keyword arguments:
618 keyword arguments:
619
619
620 local: whether to store tags in non-version-controlled file
620 local: whether to store tags in non-version-controlled file
621 (default False)
621 (default False)
622
622
623 message: commit message to use if committing
623 message: commit message to use if committing
624
624
625 user: name of user to use if committing
625 user: name of user to use if committing
626
626
627 date: date tuple to use if committing'''
627 date: date tuple to use if committing'''
628
628
629 if not local:
629 if not local:
630 m = matchmod.exact(self.root, '', ['.hgtags'])
630 m = matchmod.exact(self.root, '', ['.hgtags'])
631 if util.any(self.status(match=m, unknown=True, ignored=True)):
631 if util.any(self.status(match=m, unknown=True, ignored=True)):
632 raise util.Abort(_('working copy of .hgtags is changed'),
632 raise util.Abort(_('working copy of .hgtags is changed'),
633 hint=_('please commit .hgtags manually'))
633 hint=_('please commit .hgtags manually'))
634
634
635 self.tags() # instantiate the cache
635 self.tags() # instantiate the cache
636 self._tag(names, node, message, local, user, date, editor=editor)
636 self._tag(names, node, message, local, user, date, editor=editor)
637
637
638 @filteredpropertycache
638 @filteredpropertycache
639 def _tagscache(self):
639 def _tagscache(self):
640 '''Returns a tagscache object that contains various tags related
640 '''Returns a tagscache object that contains various tags related
641 caches.'''
641 caches.'''
642
642
643 # This simplifies its cache management by having one decorated
643 # This simplifies its cache management by having one decorated
644 # function (this one) and the rest simply fetch things from it.
644 # function (this one) and the rest simply fetch things from it.
645 class tagscache(object):
645 class tagscache(object):
646 def __init__(self):
646 def __init__(self):
647 # These two define the set of tags for this repository. tags
647 # These two define the set of tags for this repository. tags
648 # maps tag name to node; tagtypes maps tag name to 'global' or
648 # maps tag name to node; tagtypes maps tag name to 'global' or
649 # 'local'. (Global tags are defined by .hgtags across all
649 # 'local'. (Global tags are defined by .hgtags across all
650 # heads, and local tags are defined in .hg/localtags.)
650 # heads, and local tags are defined in .hg/localtags.)
651 # They constitute the in-memory cache of tags.
651 # They constitute the in-memory cache of tags.
652 self.tags = self.tagtypes = None
652 self.tags = self.tagtypes = None
653
653
654 self.nodetagscache = self.tagslist = None
654 self.nodetagscache = self.tagslist = None
655
655
656 cache = tagscache()
656 cache = tagscache()
657 cache.tags, cache.tagtypes = self._findtags()
657 cache.tags, cache.tagtypes = self._findtags()
658
658
659 return cache
659 return cache
660
660
661 def tags(self):
661 def tags(self):
662 '''return a mapping of tag to node'''
662 '''return a mapping of tag to node'''
663 t = {}
663 t = {}
664 if self.changelog.filteredrevs:
664 if self.changelog.filteredrevs:
665 tags, tt = self._findtags()
665 tags, tt = self._findtags()
666 else:
666 else:
667 tags = self._tagscache.tags
667 tags = self._tagscache.tags
668 for k, v in tags.iteritems():
668 for k, v in tags.iteritems():
669 try:
669 try:
670 # ignore tags to unknown nodes
670 # ignore tags to unknown nodes
671 self.changelog.rev(v)
671 self.changelog.rev(v)
672 t[k] = v
672 t[k] = v
673 except (error.LookupError, ValueError):
673 except (error.LookupError, ValueError):
674 pass
674 pass
675 return t
675 return t
676
676
677 def _findtags(self):
677 def _findtags(self):
678 '''Do the hard work of finding tags. Return a pair of dicts
678 '''Do the hard work of finding tags. Return a pair of dicts
679 (tags, tagtypes) where tags maps tag name to node, and tagtypes
679 (tags, tagtypes) where tags maps tag name to node, and tagtypes
680 maps tag name to a string like \'global\' or \'local\'.
680 maps tag name to a string like \'global\' or \'local\'.
681 Subclasses or extensions are free to add their own tags, but
681 Subclasses or extensions are free to add their own tags, but
682 should be aware that the returned dicts will be retained for the
682 should be aware that the returned dicts will be retained for the
683 duration of the localrepo object.'''
683 duration of the localrepo object.'''
684
684
685 # XXX what tagtype should subclasses/extensions use? Currently
685 # XXX what tagtype should subclasses/extensions use? Currently
686 # mq and bookmarks add tags, but do not set the tagtype at all.
686 # mq and bookmarks add tags, but do not set the tagtype at all.
687 # Should each extension invent its own tag type? Should there
687 # Should each extension invent its own tag type? Should there
688 # be one tagtype for all such "virtual" tags? Or is the status
688 # be one tagtype for all such "virtual" tags? Or is the status
689 # quo fine?
689 # quo fine?
690
690
691 alltags = {} # map tag name to (node, hist)
691 alltags = {} # map tag name to (node, hist)
692 tagtypes = {}
692 tagtypes = {}
693
693
694 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
694 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
695 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
695 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
696
696
697 # Build the return dicts. Have to re-encode tag names because
697 # Build the return dicts. Have to re-encode tag names because
698 # the tags module always uses UTF-8 (in order not to lose info
698 # the tags module always uses UTF-8 (in order not to lose info
699 # writing to the cache), but the rest of Mercurial wants them in
699 # writing to the cache), but the rest of Mercurial wants them in
700 # local encoding.
700 # local encoding.
701 tags = {}
701 tags = {}
702 for (name, (node, hist)) in alltags.iteritems():
702 for (name, (node, hist)) in alltags.iteritems():
703 if node != nullid:
703 if node != nullid:
704 tags[encoding.tolocal(name)] = node
704 tags[encoding.tolocal(name)] = node
705 tags['tip'] = self.changelog.tip()
705 tags['tip'] = self.changelog.tip()
706 tagtypes = dict([(encoding.tolocal(name), value)
706 tagtypes = dict([(encoding.tolocal(name), value)
707 for (name, value) in tagtypes.iteritems()])
707 for (name, value) in tagtypes.iteritems()])
708 return (tags, tagtypes)
708 return (tags, tagtypes)
709
709
710 def tagtype(self, tagname):
710 def tagtype(self, tagname):
711 '''
711 '''
712 return the type of the given tag. result can be:
712 return the type of the given tag. result can be:
713
713
714 'local' : a local tag
714 'local' : a local tag
715 'global' : a global tag
715 'global' : a global tag
716 None : tag does not exist
716 None : tag does not exist
717 '''
717 '''
718
718
719 return self._tagscache.tagtypes.get(tagname)
719 return self._tagscache.tagtypes.get(tagname)
720
720
721 def tagslist(self):
721 def tagslist(self):
722 '''return a list of tags ordered by revision'''
722 '''return a list of tags ordered by revision'''
723 if not self._tagscache.tagslist:
723 if not self._tagscache.tagslist:
724 l = []
724 l = []
725 for t, n in self.tags().iteritems():
725 for t, n in self.tags().iteritems():
726 l.append((self.changelog.rev(n), t, n))
726 l.append((self.changelog.rev(n), t, n))
727 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
727 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
728
728
729 return self._tagscache.tagslist
729 return self._tagscache.tagslist
730
730
731 def nodetags(self, node):
731 def nodetags(self, node):
732 '''return the tags associated with a node'''
732 '''return the tags associated with a node'''
733 if not self._tagscache.nodetagscache:
733 if not self._tagscache.nodetagscache:
734 nodetagscache = {}
734 nodetagscache = {}
735 for t, n in self._tagscache.tags.iteritems():
735 for t, n in self._tagscache.tags.iteritems():
736 nodetagscache.setdefault(n, []).append(t)
736 nodetagscache.setdefault(n, []).append(t)
737 for tags in nodetagscache.itervalues():
737 for tags in nodetagscache.itervalues():
738 tags.sort()
738 tags.sort()
739 self._tagscache.nodetagscache = nodetagscache
739 self._tagscache.nodetagscache = nodetagscache
740 return self._tagscache.nodetagscache.get(node, [])
740 return self._tagscache.nodetagscache.get(node, [])
741
741
742 def nodebookmarks(self, node):
742 def nodebookmarks(self, node):
743 marks = []
743 marks = []
744 for bookmark, n in self._bookmarks.iteritems():
744 for bookmark, n in self._bookmarks.iteritems():
745 if n == node:
745 if n == node:
746 marks.append(bookmark)
746 marks.append(bookmark)
747 return sorted(marks)
747 return sorted(marks)
748
748
749 def branchmap(self):
749 def branchmap(self):
750 '''returns a dictionary {branch: [branchheads]} with branchheads
750 '''returns a dictionary {branch: [branchheads]} with branchheads
751 ordered by increasing revision number'''
751 ordered by increasing revision number'''
752 branchmap.updatecache(self)
752 branchmap.updatecache(self)
753 return self._branchcaches[self.filtername]
753 return self._branchcaches[self.filtername]
754
754
755 @unfilteredmethod
755 @unfilteredmethod
756 def revbranchcache(self):
756 def revbranchcache(self):
757 if not self._revbranchcache:
757 if not self._revbranchcache:
758 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
758 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
759 return self._revbranchcache
759 return self._revbranchcache
760
760
761 def branchtip(self, branch, ignoremissing=False):
761 def branchtip(self, branch, ignoremissing=False):
762 '''return the tip node for a given branch
762 '''return the tip node for a given branch
763
763
764 If ignoremissing is True, then this method will not raise an error.
764 If ignoremissing is True, then this method will not raise an error.
765 This is helpful for callers that only expect None for a missing branch
765 This is helpful for callers that only expect None for a missing branch
766 (e.g. namespace).
766 (e.g. namespace).
767
767
768 '''
768 '''
769 try:
769 try:
770 return self.branchmap().branchtip(branch)
770 return self.branchmap().branchtip(branch)
771 except KeyError:
771 except KeyError:
772 if not ignoremissing:
772 if not ignoremissing:
773 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
773 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
774 else:
774 else:
775 pass
775 pass
776
776
777 def lookup(self, key):
777 def lookup(self, key):
778 return self[key].node()
778 return self[key].node()
779
779
780 def lookupbranch(self, key, remote=None):
780 def lookupbranch(self, key, remote=None):
781 repo = remote or self
781 repo = remote or self
782 if key in repo.branchmap():
782 if key in repo.branchmap():
783 return key
783 return key
784
784
785 repo = (remote and remote.local()) and remote or self
785 repo = (remote and remote.local()) and remote or self
786 return repo[key].branch()
786 return repo[key].branch()
787
787
788 def known(self, nodes):
788 def known(self, nodes):
789 nm = self.changelog.nodemap
789 nm = self.changelog.nodemap
790 pc = self._phasecache
790 pc = self._phasecache
791 result = []
791 result = []
792 for n in nodes:
792 for n in nodes:
793 r = nm.get(n)
793 r = nm.get(n)
794 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 resp = not (r is None or pc.phase(self, r) >= phases.secret)
795 result.append(resp)
795 result.append(resp)
796 return result
796 return result
797
797
798 def local(self):
798 def local(self):
799 return self
799 return self
800
800
801 def cancopy(self):
801 def cancopy(self):
802 # so statichttprepo's override of local() works
802 # so statichttprepo's override of local() works
803 if not self.local():
803 if not self.local():
804 return False
804 return False
805 if not self.ui.configbool('phases', 'publish', True):
805 if not self.ui.configbool('phases', 'publish', True):
806 return True
806 return True
807 # if publishing we can't copy if there is filtered content
807 # if publishing we can't copy if there is filtered content
808 return not self.filtered('visible').changelog.filteredrevs
808 return not self.filtered('visible').changelog.filteredrevs
809
809
810 def shared(self):
810 def shared(self):
811 '''the type of shared repository (None if not shared)'''
811 '''the type of shared repository (None if not shared)'''
812 if self.sharedpath != self.path:
812 if self.sharedpath != self.path:
813 return 'store'
813 return 'store'
814 return None
814 return None
815
815
816 def join(self, f, *insidef):
816 def join(self, f, *insidef):
817 return self.vfs.join(os.path.join(f, *insidef))
817 return self.vfs.join(os.path.join(f, *insidef))
818
818
819 def wjoin(self, f, *insidef):
819 def wjoin(self, f, *insidef):
820 return self.vfs.reljoin(self.root, f, *insidef)
820 return self.vfs.reljoin(self.root, f, *insidef)
821
821
822 def file(self, f):
822 def file(self, f):
823 if f[0] == '/':
823 if f[0] == '/':
824 f = f[1:]
824 f = f[1:]
825 return filelog.filelog(self.svfs, f)
825 return filelog.filelog(self.svfs, f)
826
826
827 def changectx(self, changeid):
827 def changectx(self, changeid):
828 return self[changeid]
828 return self[changeid]
829
829
830 def parents(self, changeid=None):
830 def parents(self, changeid=None):
831 '''get list of changectxs for parents of changeid'''
831 '''get list of changectxs for parents of changeid'''
832 return self[changeid].parents()
832 return self[changeid].parents()
833
833
834 def setparents(self, p1, p2=nullid):
834 def setparents(self, p1, p2=nullid):
835 self.dirstate.beginparentchange()
835 self.dirstate.beginparentchange()
836 copies = self.dirstate.setparents(p1, p2)
836 copies = self.dirstate.setparents(p1, p2)
837 pctx = self[p1]
837 pctx = self[p1]
838 if copies:
838 if copies:
839 # Adjust copy records, the dirstate cannot do it, it
839 # Adjust copy records, the dirstate cannot do it, it
840 # requires access to parents manifests. Preserve them
840 # requires access to parents manifests. Preserve them
841 # only for entries added to first parent.
841 # only for entries added to first parent.
842 for f in copies:
842 for f in copies:
843 if f not in pctx and copies[f] in pctx:
843 if f not in pctx and copies[f] in pctx:
844 self.dirstate.copy(copies[f], f)
844 self.dirstate.copy(copies[f], f)
845 if p2 == nullid:
845 if p2 == nullid:
846 for f, s in sorted(self.dirstate.copies().items()):
846 for f, s in sorted(self.dirstate.copies().items()):
847 if f not in pctx and s not in pctx:
847 if f not in pctx and s not in pctx:
848 self.dirstate.copy(None, f)
848 self.dirstate.copy(None, f)
849 self.dirstate.endparentchange()
849 self.dirstate.endparentchange()
850
850
851 def filectx(self, path, changeid=None, fileid=None):
851 def filectx(self, path, changeid=None, fileid=None):
852 """changeid can be a changeset revision, node, or tag.
852 """changeid can be a changeset revision, node, or tag.
853 fileid can be a file revision or node."""
853 fileid can be a file revision or node."""
854 return context.filectx(self, path, changeid, fileid)
854 return context.filectx(self, path, changeid, fileid)
855
855
856 def getcwd(self):
856 def getcwd(self):
857 return self.dirstate.getcwd()
857 return self.dirstate.getcwd()
858
858
859 def pathto(self, f, cwd=None):
859 def pathto(self, f, cwd=None):
860 return self.dirstate.pathto(f, cwd)
860 return self.dirstate.pathto(f, cwd)
861
861
862 def wfile(self, f, mode='r'):
862 def wfile(self, f, mode='r'):
863 return self.wvfs(f, mode)
863 return self.wvfs(f, mode)
864
864
865 def _link(self, f):
865 def _link(self, f):
866 return self.wvfs.islink(f)
866 return self.wvfs.islink(f)
867
867
868 def _loadfilter(self, filter):
868 def _loadfilter(self, filter):
869 if filter not in self.filterpats:
869 if filter not in self.filterpats:
870 l = []
870 l = []
871 for pat, cmd in self.ui.configitems(filter):
871 for pat, cmd in self.ui.configitems(filter):
872 if cmd == '!':
872 if cmd == '!':
873 continue
873 continue
874 mf = matchmod.match(self.root, '', [pat])
874 mf = matchmod.match(self.root, '', [pat])
875 fn = None
875 fn = None
876 params = cmd
876 params = cmd
877 for name, filterfn in self._datafilters.iteritems():
877 for name, filterfn in self._datafilters.iteritems():
878 if cmd.startswith(name):
878 if cmd.startswith(name):
879 fn = filterfn
879 fn = filterfn
880 params = cmd[len(name):].lstrip()
880 params = cmd[len(name):].lstrip()
881 break
881 break
882 if not fn:
882 if not fn:
883 fn = lambda s, c, **kwargs: util.filter(s, c)
883 fn = lambda s, c, **kwargs: util.filter(s, c)
884 # Wrap old filters not supporting keyword arguments
884 # Wrap old filters not supporting keyword arguments
885 if not inspect.getargspec(fn)[2]:
885 if not inspect.getargspec(fn)[2]:
886 oldfn = fn
886 oldfn = fn
887 fn = lambda s, c, **kwargs: oldfn(s, c)
887 fn = lambda s, c, **kwargs: oldfn(s, c)
888 l.append((mf, fn, params))
888 l.append((mf, fn, params))
889 self.filterpats[filter] = l
889 self.filterpats[filter] = l
890 return self.filterpats[filter]
890 return self.filterpats[filter]
891
891
892 def _filter(self, filterpats, filename, data):
892 def _filter(self, filterpats, filename, data):
893 for mf, fn, cmd in filterpats:
893 for mf, fn, cmd in filterpats:
894 if mf(filename):
894 if mf(filename):
895 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
895 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
896 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
896 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
897 break
897 break
898
898
899 return data
899 return data
900
900
901 @unfilteredpropertycache
901 @unfilteredpropertycache
902 def _encodefilterpats(self):
902 def _encodefilterpats(self):
903 return self._loadfilter('encode')
903 return self._loadfilter('encode')
904
904
905 @unfilteredpropertycache
905 @unfilteredpropertycache
906 def _decodefilterpats(self):
906 def _decodefilterpats(self):
907 return self._loadfilter('decode')
907 return self._loadfilter('decode')
908
908
909 def adddatafilter(self, name, filter):
909 def adddatafilter(self, name, filter):
910 self._datafilters[name] = filter
910 self._datafilters[name] = filter
911
911
912 def wread(self, filename):
912 def wread(self, filename):
913 if self._link(filename):
913 if self._link(filename):
914 data = self.wvfs.readlink(filename)
914 data = self.wvfs.readlink(filename)
915 else:
915 else:
916 data = self.wvfs.read(filename)
916 data = self.wvfs.read(filename)
917 return self._filter(self._encodefilterpats, filename, data)
917 return self._filter(self._encodefilterpats, filename, data)
918
918
919 def wwrite(self, filename, data, flags):
919 def wwrite(self, filename, data, flags):
920 """write ``data`` into ``filename`` in the working directory
920 """write ``data`` into ``filename`` in the working directory
921
921
922 This returns length of written (maybe decoded) data.
922 This returns length of written (maybe decoded) data.
923 """
923 """
924 data = self._filter(self._decodefilterpats, filename, data)
924 data = self._filter(self._decodefilterpats, filename, data)
925 if 'l' in flags:
925 if 'l' in flags:
926 self.wvfs.symlink(data, filename)
926 self.wvfs.symlink(data, filename)
927 else:
927 else:
928 self.wvfs.write(filename, data)
928 self.wvfs.write(filename, data)
929 if 'x' in flags:
929 if 'x' in flags:
930 self.wvfs.setflags(filename, False, True)
930 self.wvfs.setflags(filename, False, True)
931 return len(data)
931 return len(data)
932
932
933 def wwritedata(self, filename, data):
933 def wwritedata(self, filename, data):
934 return self._filter(self._decodefilterpats, filename, data)
934 return self._filter(self._decodefilterpats, filename, data)
935
935
936 def currenttransaction(self):
936 def currenttransaction(self):
937 """return the current transaction or None if non exists"""
937 """return the current transaction or None if non exists"""
938 if self._transref:
938 if self._transref:
939 tr = self._transref()
939 tr = self._transref()
940 else:
940 else:
941 tr = None
941 tr = None
942
942
943 if tr and tr.running():
943 if tr and tr.running():
944 return tr
944 return tr
945 return None
945 return None
946
946
947 def transaction(self, desc, report=None):
947 def transaction(self, desc, report=None):
948 if (self.ui.configbool('devel', 'all')
948 if (self.ui.configbool('devel', 'all')
949 or self.ui.configbool('devel', 'check-locks')):
949 or self.ui.configbool('devel', 'check-locks')):
950 l = self._lockref and self._lockref()
950 l = self._lockref and self._lockref()
951 if l is None or not l.held:
951 if l is None or not l.held:
952 scmutil.develwarn(self.ui, 'transaction with no lock')
952 scmutil.develwarn(self.ui, 'transaction with no lock')
953 tr = self.currenttransaction()
953 tr = self.currenttransaction()
954 if tr is not None:
954 if tr is not None:
955 return tr.nest()
955 return tr.nest()
956
956
957 # abort here if the journal already exists
957 # abort here if the journal already exists
958 if self.svfs.exists("journal"):
958 if self.svfs.exists("journal"):
959 raise error.RepoError(
959 raise error.RepoError(
960 _("abandoned transaction found"),
960 _("abandoned transaction found"),
961 hint=_("run 'hg recover' to clean up transaction"))
961 hint=_("run 'hg recover' to clean up transaction"))
962
962
963 idbase = "%.40f#%f" % (random.random(), time.time())
964 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
963 self.hook('pretxnopen', throw=True, txnname=desc)
965 self.hook('pretxnopen', throw=True, txnname=desc)
964
966
965 self._writejournal(desc)
967 self._writejournal(desc)
966 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
968 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
967 if report:
969 if report:
968 rp = report
970 rp = report
969 else:
971 else:
970 rp = self.ui.warn
972 rp = self.ui.warn
971 vfsmap = {'plain': self.vfs} # root of .hg/
973 vfsmap = {'plain': self.vfs} # root of .hg/
972 # we must avoid cyclic reference between repo and transaction.
974 # we must avoid cyclic reference between repo and transaction.
973 reporef = weakref.ref(self)
975 reporef = weakref.ref(self)
974 def validate(tr):
976 def validate(tr):
975 """will run pre-closing hooks"""
977 """will run pre-closing hooks"""
976 pending = lambda: tr.writepending() and self.root or ""
978 pending = lambda: tr.writepending() and self.root or ""
977 reporef().hook('pretxnclose', throw=True, pending=pending,
979 reporef().hook('pretxnclose', throw=True, pending=pending,
978 txnname=desc, **tr.hookargs)
980 txnname=desc, **tr.hookargs)
979
981
980 tr = transaction.transaction(rp, self.sopener, vfsmap,
982 tr = transaction.transaction(rp, self.sopener, vfsmap,
981 "journal",
983 "journal",
982 "undo",
984 "undo",
983 aftertrans(renames),
985 aftertrans(renames),
984 self.store.createmode,
986 self.store.createmode,
985 validator=validate)
987 validator=validate)
986
988
987 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
989 tr.hookargs['txnid'] = txnid
988 tr.hookargs['txnid'] = trid
989 # note: writing the fncache only during finalize mean that the file is
990 # note: writing the fncache only during finalize mean that the file is
990 # outdated when running hooks. As fncache is used for streaming clone,
991 # outdated when running hooks. As fncache is used for streaming clone,
991 # this is not expected to break anything that happen during the hooks.
992 # this is not expected to break anything that happen during the hooks.
992 tr.addfinalize('flush-fncache', self.store.write)
993 tr.addfinalize('flush-fncache', self.store.write)
993 def txnclosehook(tr2):
994 def txnclosehook(tr2):
994 """To be run if transaction is successful, will schedule a hook run
995 """To be run if transaction is successful, will schedule a hook run
995 """
996 """
996 def hook():
997 def hook():
997 reporef().hook('txnclose', throw=False, txnname=desc,
998 reporef().hook('txnclose', throw=False, txnname=desc,
998 **tr2.hookargs)
999 **tr2.hookargs)
999 reporef()._afterlock(hook)
1000 reporef()._afterlock(hook)
1000 tr.addfinalize('txnclose-hook', txnclosehook)
1001 tr.addfinalize('txnclose-hook', txnclosehook)
1001 def txnaborthook(tr2):
1002 def txnaborthook(tr2):
1002 """To be run if transaction is aborted
1003 """To be run if transaction is aborted
1003 """
1004 """
1004 reporef().hook('txnabort', throw=False, txnname=desc,
1005 reporef().hook('txnabort', throw=False, txnname=desc,
1005 **tr2.hookargs)
1006 **tr2.hookargs)
1006 tr.addabort('txnabort-hook', txnaborthook)
1007 tr.addabort('txnabort-hook', txnaborthook)
1007 self._transref = weakref.ref(tr)
1008 self._transref = weakref.ref(tr)
1008 return tr
1009 return tr
1009
1010
1010 def _journalfiles(self):
1011 def _journalfiles(self):
1011 return ((self.svfs, 'journal'),
1012 return ((self.svfs, 'journal'),
1012 (self.vfs, 'journal.dirstate'),
1013 (self.vfs, 'journal.dirstate'),
1013 (self.vfs, 'journal.branch'),
1014 (self.vfs, 'journal.branch'),
1014 (self.vfs, 'journal.desc'),
1015 (self.vfs, 'journal.desc'),
1015 (self.vfs, 'journal.bookmarks'),
1016 (self.vfs, 'journal.bookmarks'),
1016 (self.svfs, 'journal.phaseroots'))
1017 (self.svfs, 'journal.phaseroots'))
1017
1018
1018 def undofiles(self):
1019 def undofiles(self):
1019 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1020 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1020
1021
1021 def _writejournal(self, desc):
1022 def _writejournal(self, desc):
1022 self.vfs.write("journal.dirstate",
1023 self.vfs.write("journal.dirstate",
1023 self.vfs.tryread("dirstate"))
1024 self.vfs.tryread("dirstate"))
1024 self.vfs.write("journal.branch",
1025 self.vfs.write("journal.branch",
1025 encoding.fromlocal(self.dirstate.branch()))
1026 encoding.fromlocal(self.dirstate.branch()))
1026 self.vfs.write("journal.desc",
1027 self.vfs.write("journal.desc",
1027 "%d\n%s\n" % (len(self), desc))
1028 "%d\n%s\n" % (len(self), desc))
1028 self.vfs.write("journal.bookmarks",
1029 self.vfs.write("journal.bookmarks",
1029 self.vfs.tryread("bookmarks"))
1030 self.vfs.tryread("bookmarks"))
1030 self.svfs.write("journal.phaseroots",
1031 self.svfs.write("journal.phaseroots",
1031 self.svfs.tryread("phaseroots"))
1032 self.svfs.tryread("phaseroots"))
1032
1033
1033 def recover(self):
1034 def recover(self):
1034 lock = self.lock()
1035 lock = self.lock()
1035 try:
1036 try:
1036 if self.svfs.exists("journal"):
1037 if self.svfs.exists("journal"):
1037 self.ui.status(_("rolling back interrupted transaction\n"))
1038 self.ui.status(_("rolling back interrupted transaction\n"))
1038 vfsmap = {'': self.svfs,
1039 vfsmap = {'': self.svfs,
1039 'plain': self.vfs,}
1040 'plain': self.vfs,}
1040 transaction.rollback(self.svfs, vfsmap, "journal",
1041 transaction.rollback(self.svfs, vfsmap, "journal",
1041 self.ui.warn)
1042 self.ui.warn)
1042 self.invalidate()
1043 self.invalidate()
1043 return True
1044 return True
1044 else:
1045 else:
1045 self.ui.warn(_("no interrupted transaction available\n"))
1046 self.ui.warn(_("no interrupted transaction available\n"))
1046 return False
1047 return False
1047 finally:
1048 finally:
1048 lock.release()
1049 lock.release()
1049
1050
1050 def rollback(self, dryrun=False, force=False):
1051 def rollback(self, dryrun=False, force=False):
1051 wlock = lock = None
1052 wlock = lock = None
1052 try:
1053 try:
1053 wlock = self.wlock()
1054 wlock = self.wlock()
1054 lock = self.lock()
1055 lock = self.lock()
1055 if self.svfs.exists("undo"):
1056 if self.svfs.exists("undo"):
1056 return self._rollback(dryrun, force)
1057 return self._rollback(dryrun, force)
1057 else:
1058 else:
1058 self.ui.warn(_("no rollback information available\n"))
1059 self.ui.warn(_("no rollback information available\n"))
1059 return 1
1060 return 1
1060 finally:
1061 finally:
1061 release(lock, wlock)
1062 release(lock, wlock)
1062
1063
1063 @unfilteredmethod # Until we get smarter cache management
1064 @unfilteredmethod # Until we get smarter cache management
1064 def _rollback(self, dryrun, force):
1065 def _rollback(self, dryrun, force):
1065 ui = self.ui
1066 ui = self.ui
1066 try:
1067 try:
1067 args = self.vfs.read('undo.desc').splitlines()
1068 args = self.vfs.read('undo.desc').splitlines()
1068 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1069 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1069 if len(args) >= 3:
1070 if len(args) >= 3:
1070 detail = args[2]
1071 detail = args[2]
1071 oldtip = oldlen - 1
1072 oldtip = oldlen - 1
1072
1073
1073 if detail and ui.verbose:
1074 if detail and ui.verbose:
1074 msg = (_('repository tip rolled back to revision %s'
1075 msg = (_('repository tip rolled back to revision %s'
1075 ' (undo %s: %s)\n')
1076 ' (undo %s: %s)\n')
1076 % (oldtip, desc, detail))
1077 % (oldtip, desc, detail))
1077 else:
1078 else:
1078 msg = (_('repository tip rolled back to revision %s'
1079 msg = (_('repository tip rolled back to revision %s'
1079 ' (undo %s)\n')
1080 ' (undo %s)\n')
1080 % (oldtip, desc))
1081 % (oldtip, desc))
1081 except IOError:
1082 except IOError:
1082 msg = _('rolling back unknown transaction\n')
1083 msg = _('rolling back unknown transaction\n')
1083 desc = None
1084 desc = None
1084
1085
1085 if not force and self['.'] != self['tip'] and desc == 'commit':
1086 if not force and self['.'] != self['tip'] and desc == 'commit':
1086 raise util.Abort(
1087 raise util.Abort(
1087 _('rollback of last commit while not checked out '
1088 _('rollback of last commit while not checked out '
1088 'may lose data'), hint=_('use -f to force'))
1089 'may lose data'), hint=_('use -f to force'))
1089
1090
1090 ui.status(msg)
1091 ui.status(msg)
1091 if dryrun:
1092 if dryrun:
1092 return 0
1093 return 0
1093
1094
1094 parents = self.dirstate.parents()
1095 parents = self.dirstate.parents()
1095 self.destroying()
1096 self.destroying()
1096 vfsmap = {'plain': self.vfs, '': self.svfs}
1097 vfsmap = {'plain': self.vfs, '': self.svfs}
1097 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1098 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1098 if self.vfs.exists('undo.bookmarks'):
1099 if self.vfs.exists('undo.bookmarks'):
1099 self.vfs.rename('undo.bookmarks', 'bookmarks')
1100 self.vfs.rename('undo.bookmarks', 'bookmarks')
1100 if self.svfs.exists('undo.phaseroots'):
1101 if self.svfs.exists('undo.phaseroots'):
1101 self.svfs.rename('undo.phaseroots', 'phaseroots')
1102 self.svfs.rename('undo.phaseroots', 'phaseroots')
1102 self.invalidate()
1103 self.invalidate()
1103
1104
1104 parentgone = (parents[0] not in self.changelog.nodemap or
1105 parentgone = (parents[0] not in self.changelog.nodemap or
1105 parents[1] not in self.changelog.nodemap)
1106 parents[1] not in self.changelog.nodemap)
1106 if parentgone:
1107 if parentgone:
1107 self.vfs.rename('undo.dirstate', 'dirstate')
1108 self.vfs.rename('undo.dirstate', 'dirstate')
1108 try:
1109 try:
1109 branch = self.vfs.read('undo.branch')
1110 branch = self.vfs.read('undo.branch')
1110 self.dirstate.setbranch(encoding.tolocal(branch))
1111 self.dirstate.setbranch(encoding.tolocal(branch))
1111 except IOError:
1112 except IOError:
1112 ui.warn(_('named branch could not be reset: '
1113 ui.warn(_('named branch could not be reset: '
1113 'current branch is still \'%s\'\n')
1114 'current branch is still \'%s\'\n')
1114 % self.dirstate.branch())
1115 % self.dirstate.branch())
1115
1116
1116 self.dirstate.invalidate()
1117 self.dirstate.invalidate()
1117 parents = tuple([p.rev() for p in self.parents()])
1118 parents = tuple([p.rev() for p in self.parents()])
1118 if len(parents) > 1:
1119 if len(parents) > 1:
1119 ui.status(_('working directory now based on '
1120 ui.status(_('working directory now based on '
1120 'revisions %d and %d\n') % parents)
1121 'revisions %d and %d\n') % parents)
1121 else:
1122 else:
1122 ui.status(_('working directory now based on '
1123 ui.status(_('working directory now based on '
1123 'revision %d\n') % parents)
1124 'revision %d\n') % parents)
1124 ms = mergemod.mergestate(self)
1125 ms = mergemod.mergestate(self)
1125 ms.reset(self['.'].node())
1126 ms.reset(self['.'].node())
1126
1127
1127 # TODO: if we know which new heads may result from this rollback, pass
1128 # TODO: if we know which new heads may result from this rollback, pass
1128 # them to destroy(), which will prevent the branchhead cache from being
1129 # them to destroy(), which will prevent the branchhead cache from being
1129 # invalidated.
1130 # invalidated.
1130 self.destroyed()
1131 self.destroyed()
1131 return 0
1132 return 0
1132
1133
1133 def invalidatecaches(self):
1134 def invalidatecaches(self):
1134
1135
1135 if '_tagscache' in vars(self):
1136 if '_tagscache' in vars(self):
1136 # can't use delattr on proxy
1137 # can't use delattr on proxy
1137 del self.__dict__['_tagscache']
1138 del self.__dict__['_tagscache']
1138
1139
1139 self.unfiltered()._branchcaches.clear()
1140 self.unfiltered()._branchcaches.clear()
1140 self.invalidatevolatilesets()
1141 self.invalidatevolatilesets()
1141
1142
1142 def invalidatevolatilesets(self):
1143 def invalidatevolatilesets(self):
1143 self.filteredrevcache.clear()
1144 self.filteredrevcache.clear()
1144 obsolete.clearobscaches(self)
1145 obsolete.clearobscaches(self)
1145
1146
1146 def invalidatedirstate(self):
1147 def invalidatedirstate(self):
1147 '''Invalidates the dirstate, causing the next call to dirstate
1148 '''Invalidates the dirstate, causing the next call to dirstate
1148 to check if it was modified since the last time it was read,
1149 to check if it was modified since the last time it was read,
1149 rereading it if it has.
1150 rereading it if it has.
1150
1151
1151 This is different to dirstate.invalidate() that it doesn't always
1152 This is different to dirstate.invalidate() that it doesn't always
1152 rereads the dirstate. Use dirstate.invalidate() if you want to
1153 rereads the dirstate. Use dirstate.invalidate() if you want to
1153 explicitly read the dirstate again (i.e. restoring it to a previous
1154 explicitly read the dirstate again (i.e. restoring it to a previous
1154 known good state).'''
1155 known good state).'''
1155 if hasunfilteredcache(self, 'dirstate'):
1156 if hasunfilteredcache(self, 'dirstate'):
1156 for k in self.dirstate._filecache:
1157 for k in self.dirstate._filecache:
1157 try:
1158 try:
1158 delattr(self.dirstate, k)
1159 delattr(self.dirstate, k)
1159 except AttributeError:
1160 except AttributeError:
1160 pass
1161 pass
1161 delattr(self.unfiltered(), 'dirstate')
1162 delattr(self.unfiltered(), 'dirstate')
1162
1163
1163 def invalidate(self):
1164 def invalidate(self):
1164 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1165 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1165 for k in self._filecache:
1166 for k in self._filecache:
1166 # dirstate is invalidated separately in invalidatedirstate()
1167 # dirstate is invalidated separately in invalidatedirstate()
1167 if k == 'dirstate':
1168 if k == 'dirstate':
1168 continue
1169 continue
1169
1170
1170 try:
1171 try:
1171 delattr(unfiltered, k)
1172 delattr(unfiltered, k)
1172 except AttributeError:
1173 except AttributeError:
1173 pass
1174 pass
1174 self.invalidatecaches()
1175 self.invalidatecaches()
1175 self.store.invalidatecaches()
1176 self.store.invalidatecaches()
1176
1177
1177 def invalidateall(self):
1178 def invalidateall(self):
1178 '''Fully invalidates both store and non-store parts, causing the
1179 '''Fully invalidates both store and non-store parts, causing the
1179 subsequent operation to reread any outside changes.'''
1180 subsequent operation to reread any outside changes.'''
1180 # extension should hook this to invalidate its caches
1181 # extension should hook this to invalidate its caches
1181 self.invalidate()
1182 self.invalidate()
1182 self.invalidatedirstate()
1183 self.invalidatedirstate()
1183
1184
1184 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1185 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1185 try:
1186 try:
1186 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1187 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1187 except error.LockHeld, inst:
1188 except error.LockHeld, inst:
1188 if not wait:
1189 if not wait:
1189 raise
1190 raise
1190 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1191 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1191 (desc, inst.locker))
1192 (desc, inst.locker))
1192 # default to 600 seconds timeout
1193 # default to 600 seconds timeout
1193 l = lockmod.lock(vfs, lockname,
1194 l = lockmod.lock(vfs, lockname,
1194 int(self.ui.config("ui", "timeout", "600")),
1195 int(self.ui.config("ui", "timeout", "600")),
1195 releasefn, desc=desc)
1196 releasefn, desc=desc)
1196 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1197 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1197 if acquirefn:
1198 if acquirefn:
1198 acquirefn()
1199 acquirefn()
1199 return l
1200 return l
1200
1201
1201 def _afterlock(self, callback):
1202 def _afterlock(self, callback):
1202 """add a callback to be run when the repository is fully unlocked
1203 """add a callback to be run when the repository is fully unlocked
1203
1204
1204 The callback will be executed when the outermost lock is released
1205 The callback will be executed when the outermost lock is released
1205 (with wlock being higher level than 'lock')."""
1206 (with wlock being higher level than 'lock')."""
1206 for ref in (self._wlockref, self._lockref):
1207 for ref in (self._wlockref, self._lockref):
1207 l = ref and ref()
1208 l = ref and ref()
1208 if l and l.held:
1209 if l and l.held:
1209 l.postrelease.append(callback)
1210 l.postrelease.append(callback)
1210 break
1211 break
1211 else: # no lock have been found.
1212 else: # no lock have been found.
1212 callback()
1213 callback()
1213
1214
1214 def lock(self, wait=True):
1215 def lock(self, wait=True):
1215 '''Lock the repository store (.hg/store) and return a weak reference
1216 '''Lock the repository store (.hg/store) and return a weak reference
1216 to the lock. Use this before modifying the store (e.g. committing or
1217 to the lock. Use this before modifying the store (e.g. committing or
1217 stripping). If you are opening a transaction, get a lock as well.)
1218 stripping). If you are opening a transaction, get a lock as well.)
1218
1219
1219 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1220 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1220 'wlock' first to avoid a dead-lock hazard.'''
1221 'wlock' first to avoid a dead-lock hazard.'''
1221 l = self._lockref and self._lockref()
1222 l = self._lockref and self._lockref()
1222 if l is not None and l.held:
1223 if l is not None and l.held:
1223 l.lock()
1224 l.lock()
1224 return l
1225 return l
1225
1226
1226 def unlock():
1227 def unlock():
1227 for k, ce in self._filecache.items():
1228 for k, ce in self._filecache.items():
1228 if k == 'dirstate' or k not in self.__dict__:
1229 if k == 'dirstate' or k not in self.__dict__:
1229 continue
1230 continue
1230 ce.refresh()
1231 ce.refresh()
1231
1232
1232 l = self._lock(self.svfs, "lock", wait, unlock,
1233 l = self._lock(self.svfs, "lock", wait, unlock,
1233 self.invalidate, _('repository %s') % self.origroot)
1234 self.invalidate, _('repository %s') % self.origroot)
1234 self._lockref = weakref.ref(l)
1235 self._lockref = weakref.ref(l)
1235 return l
1236 return l
1236
1237
1237 def wlock(self, wait=True):
1238 def wlock(self, wait=True):
1238 '''Lock the non-store parts of the repository (everything under
1239 '''Lock the non-store parts of the repository (everything under
1239 .hg except .hg/store) and return a weak reference to the lock.
1240 .hg except .hg/store) and return a weak reference to the lock.
1240
1241
1241 Use this before modifying files in .hg.
1242 Use this before modifying files in .hg.
1242
1243
1243 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1244 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1244 'wlock' first to avoid a dead-lock hazard.'''
1245 'wlock' first to avoid a dead-lock hazard.'''
1245 l = self._wlockref and self._wlockref()
1246 l = self._wlockref and self._wlockref()
1246 if l is not None and l.held:
1247 if l is not None and l.held:
1247 l.lock()
1248 l.lock()
1248 return l
1249 return l
1249
1250
1250 # We do not need to check for non-waiting lock aquisition. Such
1251 # We do not need to check for non-waiting lock aquisition. Such
1251 # acquisition would not cause dead-lock as they would just fail.
1252 # acquisition would not cause dead-lock as they would just fail.
1252 if wait and (self.ui.configbool('devel', 'all')
1253 if wait and (self.ui.configbool('devel', 'all')
1253 or self.ui.configbool('devel', 'check-locks')):
1254 or self.ui.configbool('devel', 'check-locks')):
1254 l = self._lockref and self._lockref()
1255 l = self._lockref and self._lockref()
1255 if l is not None and l.held:
1256 if l is not None and l.held:
1256 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1257 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1257
1258
1258 def unlock():
1259 def unlock():
1259 if self.dirstate.pendingparentchange():
1260 if self.dirstate.pendingparentchange():
1260 self.dirstate.invalidate()
1261 self.dirstate.invalidate()
1261 else:
1262 else:
1262 self.dirstate.write()
1263 self.dirstate.write()
1263
1264
1264 self._filecache['dirstate'].refresh()
1265 self._filecache['dirstate'].refresh()
1265
1266
1266 l = self._lock(self.vfs, "wlock", wait, unlock,
1267 l = self._lock(self.vfs, "wlock", wait, unlock,
1267 self.invalidatedirstate, _('working directory of %s') %
1268 self.invalidatedirstate, _('working directory of %s') %
1268 self.origroot)
1269 self.origroot)
1269 self._wlockref = weakref.ref(l)
1270 self._wlockref = weakref.ref(l)
1270 return l
1271 return l
1271
1272
1272 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1273 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1273 """
1274 """
1274 commit an individual file as part of a larger transaction
1275 commit an individual file as part of a larger transaction
1275 """
1276 """
1276
1277
1277 fname = fctx.path()
1278 fname = fctx.path()
1278 fparent1 = manifest1.get(fname, nullid)
1279 fparent1 = manifest1.get(fname, nullid)
1279 fparent2 = manifest2.get(fname, nullid)
1280 fparent2 = manifest2.get(fname, nullid)
1280 if isinstance(fctx, context.filectx):
1281 if isinstance(fctx, context.filectx):
1281 node = fctx.filenode()
1282 node = fctx.filenode()
1282 if node in [fparent1, fparent2]:
1283 if node in [fparent1, fparent2]:
1283 self.ui.debug('reusing %s filelog entry\n' % fname)
1284 self.ui.debug('reusing %s filelog entry\n' % fname)
1284 return node
1285 return node
1285
1286
1286 flog = self.file(fname)
1287 flog = self.file(fname)
1287 meta = {}
1288 meta = {}
1288 copy = fctx.renamed()
1289 copy = fctx.renamed()
1289 if copy and copy[0] != fname:
1290 if copy and copy[0] != fname:
1290 # Mark the new revision of this file as a copy of another
1291 # Mark the new revision of this file as a copy of another
1291 # file. This copy data will effectively act as a parent
1292 # file. This copy data will effectively act as a parent
1292 # of this new revision. If this is a merge, the first
1293 # of this new revision. If this is a merge, the first
1293 # parent will be the nullid (meaning "look up the copy data")
1294 # parent will be the nullid (meaning "look up the copy data")
1294 # and the second one will be the other parent. For example:
1295 # and the second one will be the other parent. For example:
1295 #
1296 #
1296 # 0 --- 1 --- 3 rev1 changes file foo
1297 # 0 --- 1 --- 3 rev1 changes file foo
1297 # \ / rev2 renames foo to bar and changes it
1298 # \ / rev2 renames foo to bar and changes it
1298 # \- 2 -/ rev3 should have bar with all changes and
1299 # \- 2 -/ rev3 should have bar with all changes and
1299 # should record that bar descends from
1300 # should record that bar descends from
1300 # bar in rev2 and foo in rev1
1301 # bar in rev2 and foo in rev1
1301 #
1302 #
1302 # this allows this merge to succeed:
1303 # this allows this merge to succeed:
1303 #
1304 #
1304 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1305 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1305 # \ / merging rev3 and rev4 should use bar@rev2
1306 # \ / merging rev3 and rev4 should use bar@rev2
1306 # \- 2 --- 4 as the merge base
1307 # \- 2 --- 4 as the merge base
1307 #
1308 #
1308
1309
1309 cfname = copy[0]
1310 cfname = copy[0]
1310 crev = manifest1.get(cfname)
1311 crev = manifest1.get(cfname)
1311 newfparent = fparent2
1312 newfparent = fparent2
1312
1313
1313 if manifest2: # branch merge
1314 if manifest2: # branch merge
1314 if fparent2 == nullid or crev is None: # copied on remote side
1315 if fparent2 == nullid or crev is None: # copied on remote side
1315 if cfname in manifest2:
1316 if cfname in manifest2:
1316 crev = manifest2[cfname]
1317 crev = manifest2[cfname]
1317 newfparent = fparent1
1318 newfparent = fparent1
1318
1319
1319 # Here, we used to search backwards through history to try to find
1320 # Here, we used to search backwards through history to try to find
1320 # where the file copy came from if the source of a copy was not in
1321 # where the file copy came from if the source of a copy was not in
1321 # the parent directory. However, this doesn't actually make sense to
1322 # the parent directory. However, this doesn't actually make sense to
1322 # do (what does a copy from something not in your working copy even
1323 # do (what does a copy from something not in your working copy even
1323 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1324 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1324 # the user that copy information was dropped, so if they didn't
1325 # the user that copy information was dropped, so if they didn't
1325 # expect this outcome it can be fixed, but this is the correct
1326 # expect this outcome it can be fixed, but this is the correct
1326 # behavior in this circumstance.
1327 # behavior in this circumstance.
1327
1328
1328 if crev:
1329 if crev:
1329 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1330 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1330 meta["copy"] = cfname
1331 meta["copy"] = cfname
1331 meta["copyrev"] = hex(crev)
1332 meta["copyrev"] = hex(crev)
1332 fparent1, fparent2 = nullid, newfparent
1333 fparent1, fparent2 = nullid, newfparent
1333 else:
1334 else:
1334 self.ui.warn(_("warning: can't find ancestor for '%s' "
1335 self.ui.warn(_("warning: can't find ancestor for '%s' "
1335 "copied from '%s'!\n") % (fname, cfname))
1336 "copied from '%s'!\n") % (fname, cfname))
1336
1337
1337 elif fparent1 == nullid:
1338 elif fparent1 == nullid:
1338 fparent1, fparent2 = fparent2, nullid
1339 fparent1, fparent2 = fparent2, nullid
1339 elif fparent2 != nullid:
1340 elif fparent2 != nullid:
1340 # is one parent an ancestor of the other?
1341 # is one parent an ancestor of the other?
1341 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1342 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1342 if fparent1 in fparentancestors:
1343 if fparent1 in fparentancestors:
1343 fparent1, fparent2 = fparent2, nullid
1344 fparent1, fparent2 = fparent2, nullid
1344 elif fparent2 in fparentancestors:
1345 elif fparent2 in fparentancestors:
1345 fparent2 = nullid
1346 fparent2 = nullid
1346
1347
1347 # is the file changed?
1348 # is the file changed?
1348 text = fctx.data()
1349 text = fctx.data()
1349 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1350 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1350 changelist.append(fname)
1351 changelist.append(fname)
1351 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1352 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1352 # are just the flags changed during merge?
1353 # are just the flags changed during merge?
1353 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1354 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1354 changelist.append(fname)
1355 changelist.append(fname)
1355
1356
1356 return fparent1
1357 return fparent1
1357
1358
1358 @unfilteredmethod
1359 @unfilteredmethod
1359 def commit(self, text="", user=None, date=None, match=None, force=False,
1360 def commit(self, text="", user=None, date=None, match=None, force=False,
1360 editor=False, extra={}):
1361 editor=False, extra={}):
1361 """Add a new revision to current repository.
1362 """Add a new revision to current repository.
1362
1363
1363 Revision information is gathered from the working directory,
1364 Revision information is gathered from the working directory,
1364 match can be used to filter the committed files. If editor is
1365 match can be used to filter the committed files. If editor is
1365 supplied, it is called to get a commit message.
1366 supplied, it is called to get a commit message.
1366 """
1367 """
1367
1368
1368 def fail(f, msg):
1369 def fail(f, msg):
1369 raise util.Abort('%s: %s' % (f, msg))
1370 raise util.Abort('%s: %s' % (f, msg))
1370
1371
1371 if not match:
1372 if not match:
1372 match = matchmod.always(self.root, '')
1373 match = matchmod.always(self.root, '')
1373
1374
1374 if not force:
1375 if not force:
1375 vdirs = []
1376 vdirs = []
1376 match.explicitdir = vdirs.append
1377 match.explicitdir = vdirs.append
1377 match.bad = fail
1378 match.bad = fail
1378
1379
1379 wlock = self.wlock()
1380 wlock = self.wlock()
1380 try:
1381 try:
1381 wctx = self[None]
1382 wctx = self[None]
1382 merge = len(wctx.parents()) > 1
1383 merge = len(wctx.parents()) > 1
1383
1384
1384 if not force and merge and not match.always():
1385 if not force and merge and not match.always():
1385 raise util.Abort(_('cannot partially commit a merge '
1386 raise util.Abort(_('cannot partially commit a merge '
1386 '(do not specify files or patterns)'))
1387 '(do not specify files or patterns)'))
1387
1388
1388 status = self.status(match=match, clean=force)
1389 status = self.status(match=match, clean=force)
1389 if force:
1390 if force:
1390 status.modified.extend(status.clean) # mq may commit clean files
1391 status.modified.extend(status.clean) # mq may commit clean files
1391
1392
1392 # check subrepos
1393 # check subrepos
1393 subs = []
1394 subs = []
1394 commitsubs = set()
1395 commitsubs = set()
1395 newstate = wctx.substate.copy()
1396 newstate = wctx.substate.copy()
1396 # only manage subrepos and .hgsubstate if .hgsub is present
1397 # only manage subrepos and .hgsubstate if .hgsub is present
1397 if '.hgsub' in wctx:
1398 if '.hgsub' in wctx:
1398 # we'll decide whether to track this ourselves, thanks
1399 # we'll decide whether to track this ourselves, thanks
1399 for c in status.modified, status.added, status.removed:
1400 for c in status.modified, status.added, status.removed:
1400 if '.hgsubstate' in c:
1401 if '.hgsubstate' in c:
1401 c.remove('.hgsubstate')
1402 c.remove('.hgsubstate')
1402
1403
1403 # compare current state to last committed state
1404 # compare current state to last committed state
1404 # build new substate based on last committed state
1405 # build new substate based on last committed state
1405 oldstate = wctx.p1().substate
1406 oldstate = wctx.p1().substate
1406 for s in sorted(newstate.keys()):
1407 for s in sorted(newstate.keys()):
1407 if not match(s):
1408 if not match(s):
1408 # ignore working copy, use old state if present
1409 # ignore working copy, use old state if present
1409 if s in oldstate:
1410 if s in oldstate:
1410 newstate[s] = oldstate[s]
1411 newstate[s] = oldstate[s]
1411 continue
1412 continue
1412 if not force:
1413 if not force:
1413 raise util.Abort(
1414 raise util.Abort(
1414 _("commit with new subrepo %s excluded") % s)
1415 _("commit with new subrepo %s excluded") % s)
1415 dirtyreason = wctx.sub(s).dirtyreason(True)
1416 dirtyreason = wctx.sub(s).dirtyreason(True)
1416 if dirtyreason:
1417 if dirtyreason:
1417 if not self.ui.configbool('ui', 'commitsubrepos'):
1418 if not self.ui.configbool('ui', 'commitsubrepos'):
1418 raise util.Abort(dirtyreason,
1419 raise util.Abort(dirtyreason,
1419 hint=_("use --subrepos for recursive commit"))
1420 hint=_("use --subrepos for recursive commit"))
1420 subs.append(s)
1421 subs.append(s)
1421 commitsubs.add(s)
1422 commitsubs.add(s)
1422 else:
1423 else:
1423 bs = wctx.sub(s).basestate()
1424 bs = wctx.sub(s).basestate()
1424 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1425 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1425 if oldstate.get(s, (None, None, None))[1] != bs:
1426 if oldstate.get(s, (None, None, None))[1] != bs:
1426 subs.append(s)
1427 subs.append(s)
1427
1428
1428 # check for removed subrepos
1429 # check for removed subrepos
1429 for p in wctx.parents():
1430 for p in wctx.parents():
1430 r = [s for s in p.substate if s not in newstate]
1431 r = [s for s in p.substate if s not in newstate]
1431 subs += [s for s in r if match(s)]
1432 subs += [s for s in r if match(s)]
1432 if subs:
1433 if subs:
1433 if (not match('.hgsub') and
1434 if (not match('.hgsub') and
1434 '.hgsub' in (wctx.modified() + wctx.added())):
1435 '.hgsub' in (wctx.modified() + wctx.added())):
1435 raise util.Abort(
1436 raise util.Abort(
1436 _("can't commit subrepos without .hgsub"))
1437 _("can't commit subrepos without .hgsub"))
1437 status.modified.insert(0, '.hgsubstate')
1438 status.modified.insert(0, '.hgsubstate')
1438
1439
1439 elif '.hgsub' in status.removed:
1440 elif '.hgsub' in status.removed:
1440 # clean up .hgsubstate when .hgsub is removed
1441 # clean up .hgsubstate when .hgsub is removed
1441 if ('.hgsubstate' in wctx and
1442 if ('.hgsubstate' in wctx and
1442 '.hgsubstate' not in (status.modified + status.added +
1443 '.hgsubstate' not in (status.modified + status.added +
1443 status.removed)):
1444 status.removed)):
1444 status.removed.insert(0, '.hgsubstate')
1445 status.removed.insert(0, '.hgsubstate')
1445
1446
1446 # make sure all explicit patterns are matched
1447 # make sure all explicit patterns are matched
1447 if not force and match.files():
1448 if not force and match.files():
1448 matched = set(status.modified + status.added + status.removed)
1449 matched = set(status.modified + status.added + status.removed)
1449
1450
1450 for f in match.files():
1451 for f in match.files():
1451 f = self.dirstate.normalize(f)
1452 f = self.dirstate.normalize(f)
1452 if f == '.' or f in matched or f in wctx.substate:
1453 if f == '.' or f in matched or f in wctx.substate:
1453 continue
1454 continue
1454 if f in status.deleted:
1455 if f in status.deleted:
1455 fail(f, _('file not found!'))
1456 fail(f, _('file not found!'))
1456 if f in vdirs: # visited directory
1457 if f in vdirs: # visited directory
1457 d = f + '/'
1458 d = f + '/'
1458 for mf in matched:
1459 for mf in matched:
1459 if mf.startswith(d):
1460 if mf.startswith(d):
1460 break
1461 break
1461 else:
1462 else:
1462 fail(f, _("no match under directory!"))
1463 fail(f, _("no match under directory!"))
1463 elif f not in self.dirstate:
1464 elif f not in self.dirstate:
1464 fail(f, _("file not tracked!"))
1465 fail(f, _("file not tracked!"))
1465
1466
1466 cctx = context.workingcommitctx(self, status,
1467 cctx = context.workingcommitctx(self, status,
1467 text, user, date, extra)
1468 text, user, date, extra)
1468
1469
1469 if (not force and not extra.get("close") and not merge
1470 if (not force and not extra.get("close") and not merge
1470 and not cctx.files()
1471 and not cctx.files()
1471 and wctx.branch() == wctx.p1().branch()):
1472 and wctx.branch() == wctx.p1().branch()):
1472 return None
1473 return None
1473
1474
1474 if merge and cctx.deleted():
1475 if merge and cctx.deleted():
1475 raise util.Abort(_("cannot commit merge with missing files"))
1476 raise util.Abort(_("cannot commit merge with missing files"))
1476
1477
1477 ms = mergemod.mergestate(self)
1478 ms = mergemod.mergestate(self)
1478 for f in status.modified:
1479 for f in status.modified:
1479 if f in ms and ms[f] == 'u':
1480 if f in ms and ms[f] == 'u':
1480 raise util.Abort(_('unresolved merge conflicts '
1481 raise util.Abort(_('unresolved merge conflicts '
1481 '(see "hg help resolve")'))
1482 '(see "hg help resolve")'))
1482
1483
1483 if editor:
1484 if editor:
1484 cctx._text = editor(self, cctx, subs)
1485 cctx._text = editor(self, cctx, subs)
1485 edited = (text != cctx._text)
1486 edited = (text != cctx._text)
1486
1487
1487 # Save commit message in case this transaction gets rolled back
1488 # Save commit message in case this transaction gets rolled back
1488 # (e.g. by a pretxncommit hook). Leave the content alone on
1489 # (e.g. by a pretxncommit hook). Leave the content alone on
1489 # the assumption that the user will use the same editor again.
1490 # the assumption that the user will use the same editor again.
1490 msgfn = self.savecommitmessage(cctx._text)
1491 msgfn = self.savecommitmessage(cctx._text)
1491
1492
1492 # commit subs and write new state
1493 # commit subs and write new state
1493 if subs:
1494 if subs:
1494 for s in sorted(commitsubs):
1495 for s in sorted(commitsubs):
1495 sub = wctx.sub(s)
1496 sub = wctx.sub(s)
1496 self.ui.status(_('committing subrepository %s\n') %
1497 self.ui.status(_('committing subrepository %s\n') %
1497 subrepo.subrelpath(sub))
1498 subrepo.subrelpath(sub))
1498 sr = sub.commit(cctx._text, user, date)
1499 sr = sub.commit(cctx._text, user, date)
1499 newstate[s] = (newstate[s][0], sr)
1500 newstate[s] = (newstate[s][0], sr)
1500 subrepo.writestate(self, newstate)
1501 subrepo.writestate(self, newstate)
1501
1502
1502 p1, p2 = self.dirstate.parents()
1503 p1, p2 = self.dirstate.parents()
1503 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1504 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1504 try:
1505 try:
1505 self.hook("precommit", throw=True, parent1=hookp1,
1506 self.hook("precommit", throw=True, parent1=hookp1,
1506 parent2=hookp2)
1507 parent2=hookp2)
1507 ret = self.commitctx(cctx, True)
1508 ret = self.commitctx(cctx, True)
1508 except: # re-raises
1509 except: # re-raises
1509 if edited:
1510 if edited:
1510 self.ui.write(
1511 self.ui.write(
1511 _('note: commit message saved in %s\n') % msgfn)
1512 _('note: commit message saved in %s\n') % msgfn)
1512 raise
1513 raise
1513
1514
1514 # update bookmarks, dirstate and mergestate
1515 # update bookmarks, dirstate and mergestate
1515 bookmarks.update(self, [p1, p2], ret)
1516 bookmarks.update(self, [p1, p2], ret)
1516 cctx.markcommitted(ret)
1517 cctx.markcommitted(ret)
1517 ms.reset()
1518 ms.reset()
1518 finally:
1519 finally:
1519 wlock.release()
1520 wlock.release()
1520
1521
1521 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1522 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1522 # hack for command that use a temporary commit (eg: histedit)
1523 # hack for command that use a temporary commit (eg: histedit)
1523 # temporary commit got stripped before hook release
1524 # temporary commit got stripped before hook release
1524 if node in self:
1525 if node in self:
1525 self.hook("commit", node=node, parent1=parent1,
1526 self.hook("commit", node=node, parent1=parent1,
1526 parent2=parent2)
1527 parent2=parent2)
1527 self._afterlock(commithook)
1528 self._afterlock(commithook)
1528 return ret
1529 return ret
1529
1530
1530 @unfilteredmethod
1531 @unfilteredmethod
1531 def commitctx(self, ctx, error=False):
1532 def commitctx(self, ctx, error=False):
1532 """Add a new revision to current repository.
1533 """Add a new revision to current repository.
1533 Revision information is passed via the context argument.
1534 Revision information is passed via the context argument.
1534 """
1535 """
1535
1536
1536 tr = None
1537 tr = None
1537 p1, p2 = ctx.p1(), ctx.p2()
1538 p1, p2 = ctx.p1(), ctx.p2()
1538 user = ctx.user()
1539 user = ctx.user()
1539
1540
1540 lock = self.lock()
1541 lock = self.lock()
1541 try:
1542 try:
1542 tr = self.transaction("commit")
1543 tr = self.transaction("commit")
1543 trp = weakref.proxy(tr)
1544 trp = weakref.proxy(tr)
1544
1545
1545 if ctx.files():
1546 if ctx.files():
1546 m1 = p1.manifest()
1547 m1 = p1.manifest()
1547 m2 = p2.manifest()
1548 m2 = p2.manifest()
1548 m = m1.copy()
1549 m = m1.copy()
1549
1550
1550 # check in files
1551 # check in files
1551 added = []
1552 added = []
1552 changed = []
1553 changed = []
1553 removed = list(ctx.removed())
1554 removed = list(ctx.removed())
1554 linkrev = len(self)
1555 linkrev = len(self)
1555 self.ui.note(_("committing files:\n"))
1556 self.ui.note(_("committing files:\n"))
1556 for f in sorted(ctx.modified() + ctx.added()):
1557 for f in sorted(ctx.modified() + ctx.added()):
1557 self.ui.note(f + "\n")
1558 self.ui.note(f + "\n")
1558 try:
1559 try:
1559 fctx = ctx[f]
1560 fctx = ctx[f]
1560 if fctx is None:
1561 if fctx is None:
1561 removed.append(f)
1562 removed.append(f)
1562 else:
1563 else:
1563 added.append(f)
1564 added.append(f)
1564 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1565 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1565 trp, changed)
1566 trp, changed)
1566 m.setflag(f, fctx.flags())
1567 m.setflag(f, fctx.flags())
1567 except OSError, inst:
1568 except OSError, inst:
1568 self.ui.warn(_("trouble committing %s!\n") % f)
1569 self.ui.warn(_("trouble committing %s!\n") % f)
1569 raise
1570 raise
1570 except IOError, inst:
1571 except IOError, inst:
1571 errcode = getattr(inst, 'errno', errno.ENOENT)
1572 errcode = getattr(inst, 'errno', errno.ENOENT)
1572 if error or errcode and errcode != errno.ENOENT:
1573 if error or errcode and errcode != errno.ENOENT:
1573 self.ui.warn(_("trouble committing %s!\n") % f)
1574 self.ui.warn(_("trouble committing %s!\n") % f)
1574 raise
1575 raise
1575
1576
1576 # update manifest
1577 # update manifest
1577 self.ui.note(_("committing manifest\n"))
1578 self.ui.note(_("committing manifest\n"))
1578 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1579 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1579 drop = [f for f in removed if f in m]
1580 drop = [f for f in removed if f in m]
1580 for f in drop:
1581 for f in drop:
1581 del m[f]
1582 del m[f]
1582 mn = self.manifest.add(m, trp, linkrev,
1583 mn = self.manifest.add(m, trp, linkrev,
1583 p1.manifestnode(), p2.manifestnode(),
1584 p1.manifestnode(), p2.manifestnode(),
1584 added, drop)
1585 added, drop)
1585 files = changed + removed
1586 files = changed + removed
1586 else:
1587 else:
1587 mn = p1.manifestnode()
1588 mn = p1.manifestnode()
1588 files = []
1589 files = []
1589
1590
1590 # update changelog
1591 # update changelog
1591 self.ui.note(_("committing changelog\n"))
1592 self.ui.note(_("committing changelog\n"))
1592 self.changelog.delayupdate(tr)
1593 self.changelog.delayupdate(tr)
1593 n = self.changelog.add(mn, files, ctx.description(),
1594 n = self.changelog.add(mn, files, ctx.description(),
1594 trp, p1.node(), p2.node(),
1595 trp, p1.node(), p2.node(),
1595 user, ctx.date(), ctx.extra().copy())
1596 user, ctx.date(), ctx.extra().copy())
1596 p = lambda: tr.writepending() and self.root or ""
1597 p = lambda: tr.writepending() and self.root or ""
1597 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1598 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1598 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1599 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1599 parent2=xp2, pending=p)
1600 parent2=xp2, pending=p)
1600 # set the new commit is proper phase
1601 # set the new commit is proper phase
1601 targetphase = subrepo.newcommitphase(self.ui, ctx)
1602 targetphase = subrepo.newcommitphase(self.ui, ctx)
1602 if targetphase:
1603 if targetphase:
1603 # retract boundary do not alter parent changeset.
1604 # retract boundary do not alter parent changeset.
1604 # if a parent have higher the resulting phase will
1605 # if a parent have higher the resulting phase will
1605 # be compliant anyway
1606 # be compliant anyway
1606 #
1607 #
1607 # if minimal phase was 0 we don't need to retract anything
1608 # if minimal phase was 0 we don't need to retract anything
1608 phases.retractboundary(self, tr, targetphase, [n])
1609 phases.retractboundary(self, tr, targetphase, [n])
1609 tr.close()
1610 tr.close()
1610 branchmap.updatecache(self.filtered('served'))
1611 branchmap.updatecache(self.filtered('served'))
1611 return n
1612 return n
1612 finally:
1613 finally:
1613 if tr:
1614 if tr:
1614 tr.release()
1615 tr.release()
1615 lock.release()
1616 lock.release()
1616
1617
1617 @unfilteredmethod
1618 @unfilteredmethod
1618 def destroying(self):
1619 def destroying(self):
1619 '''Inform the repository that nodes are about to be destroyed.
1620 '''Inform the repository that nodes are about to be destroyed.
1620 Intended for use by strip and rollback, so there's a common
1621 Intended for use by strip and rollback, so there's a common
1621 place for anything that has to be done before destroying history.
1622 place for anything that has to be done before destroying history.
1622
1623
1623 This is mostly useful for saving state that is in memory and waiting
1624 This is mostly useful for saving state that is in memory and waiting
1624 to be flushed when the current lock is released. Because a call to
1625 to be flushed when the current lock is released. Because a call to
1625 destroyed is imminent, the repo will be invalidated causing those
1626 destroyed is imminent, the repo will be invalidated causing those
1626 changes to stay in memory (waiting for the next unlock), or vanish
1627 changes to stay in memory (waiting for the next unlock), or vanish
1627 completely.
1628 completely.
1628 '''
1629 '''
1629 # When using the same lock to commit and strip, the phasecache is left
1630 # When using the same lock to commit and strip, the phasecache is left
1630 # dirty after committing. Then when we strip, the repo is invalidated,
1631 # dirty after committing. Then when we strip, the repo is invalidated,
1631 # causing those changes to disappear.
1632 # causing those changes to disappear.
1632 if '_phasecache' in vars(self):
1633 if '_phasecache' in vars(self):
1633 self._phasecache.write()
1634 self._phasecache.write()
1634
1635
1635 @unfilteredmethod
1636 @unfilteredmethod
1636 def destroyed(self):
1637 def destroyed(self):
1637 '''Inform the repository that nodes have been destroyed.
1638 '''Inform the repository that nodes have been destroyed.
1638 Intended for use by strip and rollback, so there's a common
1639 Intended for use by strip and rollback, so there's a common
1639 place for anything that has to be done after destroying history.
1640 place for anything that has to be done after destroying history.
1640 '''
1641 '''
1641 # When one tries to:
1642 # When one tries to:
1642 # 1) destroy nodes thus calling this method (e.g. strip)
1643 # 1) destroy nodes thus calling this method (e.g. strip)
1643 # 2) use phasecache somewhere (e.g. commit)
1644 # 2) use phasecache somewhere (e.g. commit)
1644 #
1645 #
1645 # then 2) will fail because the phasecache contains nodes that were
1646 # then 2) will fail because the phasecache contains nodes that were
1646 # removed. We can either remove phasecache from the filecache,
1647 # removed. We can either remove phasecache from the filecache,
1647 # causing it to reload next time it is accessed, or simply filter
1648 # causing it to reload next time it is accessed, or simply filter
1648 # the removed nodes now and write the updated cache.
1649 # the removed nodes now and write the updated cache.
1649 self._phasecache.filterunknown(self)
1650 self._phasecache.filterunknown(self)
1650 self._phasecache.write()
1651 self._phasecache.write()
1651
1652
1652 # update the 'served' branch cache to help read only server process
1653 # update the 'served' branch cache to help read only server process
1653 # Thanks to branchcache collaboration this is done from the nearest
1654 # Thanks to branchcache collaboration this is done from the nearest
1654 # filtered subset and it is expected to be fast.
1655 # filtered subset and it is expected to be fast.
1655 branchmap.updatecache(self.filtered('served'))
1656 branchmap.updatecache(self.filtered('served'))
1656
1657
1657 # Ensure the persistent tag cache is updated. Doing it now
1658 # Ensure the persistent tag cache is updated. Doing it now
1658 # means that the tag cache only has to worry about destroyed
1659 # means that the tag cache only has to worry about destroyed
1659 # heads immediately after a strip/rollback. That in turn
1660 # heads immediately after a strip/rollback. That in turn
1660 # guarantees that "cachetip == currenttip" (comparing both rev
1661 # guarantees that "cachetip == currenttip" (comparing both rev
1661 # and node) always means no nodes have been added or destroyed.
1662 # and node) always means no nodes have been added or destroyed.
1662
1663
1663 # XXX this is suboptimal when qrefresh'ing: we strip the current
1664 # XXX this is suboptimal when qrefresh'ing: we strip the current
1664 # head, refresh the tag cache, then immediately add a new head.
1665 # head, refresh the tag cache, then immediately add a new head.
1665 # But I think doing it this way is necessary for the "instant
1666 # But I think doing it this way is necessary for the "instant
1666 # tag cache retrieval" case to work.
1667 # tag cache retrieval" case to work.
1667 self.invalidate()
1668 self.invalidate()
1668
1669
1669 def walk(self, match, node=None):
1670 def walk(self, match, node=None):
1670 '''
1671 '''
1671 walk recursively through the directory tree or a given
1672 walk recursively through the directory tree or a given
1672 changeset, finding all files matched by the match
1673 changeset, finding all files matched by the match
1673 function
1674 function
1674 '''
1675 '''
1675 return self[node].walk(match)
1676 return self[node].walk(match)
1676
1677
1677 def status(self, node1='.', node2=None, match=None,
1678 def status(self, node1='.', node2=None, match=None,
1678 ignored=False, clean=False, unknown=False,
1679 ignored=False, clean=False, unknown=False,
1679 listsubrepos=False):
1680 listsubrepos=False):
1680 '''a convenience method that calls node1.status(node2)'''
1681 '''a convenience method that calls node1.status(node2)'''
1681 return self[node1].status(node2, match, ignored, clean, unknown,
1682 return self[node1].status(node2, match, ignored, clean, unknown,
1682 listsubrepos)
1683 listsubrepos)
1683
1684
1684 def heads(self, start=None):
1685 def heads(self, start=None):
1685 heads = self.changelog.heads(start)
1686 heads = self.changelog.heads(start)
1686 # sort the output in rev descending order
1687 # sort the output in rev descending order
1687 return sorted(heads, key=self.changelog.rev, reverse=True)
1688 return sorted(heads, key=self.changelog.rev, reverse=True)
1688
1689
1689 def branchheads(self, branch=None, start=None, closed=False):
1690 def branchheads(self, branch=None, start=None, closed=False):
1690 '''return a (possibly filtered) list of heads for the given branch
1691 '''return a (possibly filtered) list of heads for the given branch
1691
1692
1692 Heads are returned in topological order, from newest to oldest.
1693 Heads are returned in topological order, from newest to oldest.
1693 If branch is None, use the dirstate branch.
1694 If branch is None, use the dirstate branch.
1694 If start is not None, return only heads reachable from start.
1695 If start is not None, return only heads reachable from start.
1695 If closed is True, return heads that are marked as closed as well.
1696 If closed is True, return heads that are marked as closed as well.
1696 '''
1697 '''
1697 if branch is None:
1698 if branch is None:
1698 branch = self[None].branch()
1699 branch = self[None].branch()
1699 branches = self.branchmap()
1700 branches = self.branchmap()
1700 if branch not in branches:
1701 if branch not in branches:
1701 return []
1702 return []
1702 # the cache returns heads ordered lowest to highest
1703 # the cache returns heads ordered lowest to highest
1703 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1704 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1704 if start is not None:
1705 if start is not None:
1705 # filter out the heads that cannot be reached from startrev
1706 # filter out the heads that cannot be reached from startrev
1706 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1707 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1707 bheads = [h for h in bheads if h in fbheads]
1708 bheads = [h for h in bheads if h in fbheads]
1708 return bheads
1709 return bheads
1709
1710
1710 def branches(self, nodes):
1711 def branches(self, nodes):
1711 if not nodes:
1712 if not nodes:
1712 nodes = [self.changelog.tip()]
1713 nodes = [self.changelog.tip()]
1713 b = []
1714 b = []
1714 for n in nodes:
1715 for n in nodes:
1715 t = n
1716 t = n
1716 while True:
1717 while True:
1717 p = self.changelog.parents(n)
1718 p = self.changelog.parents(n)
1718 if p[1] != nullid or p[0] == nullid:
1719 if p[1] != nullid or p[0] == nullid:
1719 b.append((t, n, p[0], p[1]))
1720 b.append((t, n, p[0], p[1]))
1720 break
1721 break
1721 n = p[0]
1722 n = p[0]
1722 return b
1723 return b
1723
1724
1724 def between(self, pairs):
1725 def between(self, pairs):
1725 r = []
1726 r = []
1726
1727
1727 for top, bottom in pairs:
1728 for top, bottom in pairs:
1728 n, l, i = top, [], 0
1729 n, l, i = top, [], 0
1729 f = 1
1730 f = 1
1730
1731
1731 while n != bottom and n != nullid:
1732 while n != bottom and n != nullid:
1732 p = self.changelog.parents(n)[0]
1733 p = self.changelog.parents(n)[0]
1733 if i == f:
1734 if i == f:
1734 l.append(n)
1735 l.append(n)
1735 f = f * 2
1736 f = f * 2
1736 n = p
1737 n = p
1737 i += 1
1738 i += 1
1738
1739
1739 r.append(l)
1740 r.append(l)
1740
1741
1741 return r
1742 return r
1742
1743
1743 def checkpush(self, pushop):
1744 def checkpush(self, pushop):
1744 """Extensions can override this function if additional checks have
1745 """Extensions can override this function if additional checks have
1745 to be performed before pushing, or call it if they override push
1746 to be performed before pushing, or call it if they override push
1746 command.
1747 command.
1747 """
1748 """
1748 pass
1749 pass
1749
1750
1750 @unfilteredpropertycache
1751 @unfilteredpropertycache
1751 def prepushoutgoinghooks(self):
1752 def prepushoutgoinghooks(self):
1752 """Return util.hooks consists of "(repo, remote, outgoing)"
1753 """Return util.hooks consists of "(repo, remote, outgoing)"
1753 functions, which are called before pushing changesets.
1754 functions, which are called before pushing changesets.
1754 """
1755 """
1755 return util.hooks()
1756 return util.hooks()
1756
1757
1757 def stream_in(self, remote, requirements):
1758 def stream_in(self, remote, requirements):
1758 lock = self.lock()
1759 lock = self.lock()
1759 try:
1760 try:
1760 # Save remote branchmap. We will use it later
1761 # Save remote branchmap. We will use it later
1761 # to speed up branchcache creation
1762 # to speed up branchcache creation
1762 rbranchmap = None
1763 rbranchmap = None
1763 if remote.capable("branchmap"):
1764 if remote.capable("branchmap"):
1764 rbranchmap = remote.branchmap()
1765 rbranchmap = remote.branchmap()
1765
1766
1766 fp = remote.stream_out()
1767 fp = remote.stream_out()
1767 l = fp.readline()
1768 l = fp.readline()
1768 try:
1769 try:
1769 resp = int(l)
1770 resp = int(l)
1770 except ValueError:
1771 except ValueError:
1771 raise error.ResponseError(
1772 raise error.ResponseError(
1772 _('unexpected response from remote server:'), l)
1773 _('unexpected response from remote server:'), l)
1773 if resp == 1:
1774 if resp == 1:
1774 raise util.Abort(_('operation forbidden by server'))
1775 raise util.Abort(_('operation forbidden by server'))
1775 elif resp == 2:
1776 elif resp == 2:
1776 raise util.Abort(_('locking the remote repository failed'))
1777 raise util.Abort(_('locking the remote repository failed'))
1777 elif resp != 0:
1778 elif resp != 0:
1778 raise util.Abort(_('the server sent an unknown error code'))
1779 raise util.Abort(_('the server sent an unknown error code'))
1779 self.ui.status(_('streaming all changes\n'))
1780 self.ui.status(_('streaming all changes\n'))
1780 l = fp.readline()
1781 l = fp.readline()
1781 try:
1782 try:
1782 total_files, total_bytes = map(int, l.split(' ', 1))
1783 total_files, total_bytes = map(int, l.split(' ', 1))
1783 except (ValueError, TypeError):
1784 except (ValueError, TypeError):
1784 raise error.ResponseError(
1785 raise error.ResponseError(
1785 _('unexpected response from remote server:'), l)
1786 _('unexpected response from remote server:'), l)
1786 self.ui.status(_('%d files to transfer, %s of data\n') %
1787 self.ui.status(_('%d files to transfer, %s of data\n') %
1787 (total_files, util.bytecount(total_bytes)))
1788 (total_files, util.bytecount(total_bytes)))
1788 handled_bytes = 0
1789 handled_bytes = 0
1789 self.ui.progress(_('clone'), 0, total=total_bytes)
1790 self.ui.progress(_('clone'), 0, total=total_bytes)
1790 start = time.time()
1791 start = time.time()
1791
1792
1792 tr = self.transaction(_('clone'))
1793 tr = self.transaction(_('clone'))
1793 try:
1794 try:
1794 for i in xrange(total_files):
1795 for i in xrange(total_files):
1795 # XXX doesn't support '\n' or '\r' in filenames
1796 # XXX doesn't support '\n' or '\r' in filenames
1796 l = fp.readline()
1797 l = fp.readline()
1797 try:
1798 try:
1798 name, size = l.split('\0', 1)
1799 name, size = l.split('\0', 1)
1799 size = int(size)
1800 size = int(size)
1800 except (ValueError, TypeError):
1801 except (ValueError, TypeError):
1801 raise error.ResponseError(
1802 raise error.ResponseError(
1802 _('unexpected response from remote server:'), l)
1803 _('unexpected response from remote server:'), l)
1803 if self.ui.debugflag:
1804 if self.ui.debugflag:
1804 self.ui.debug('adding %s (%s)\n' %
1805 self.ui.debug('adding %s (%s)\n' %
1805 (name, util.bytecount(size)))
1806 (name, util.bytecount(size)))
1806 # for backwards compat, name was partially encoded
1807 # for backwards compat, name was partially encoded
1807 ofp = self.svfs(store.decodedir(name), 'w')
1808 ofp = self.svfs(store.decodedir(name), 'w')
1808 for chunk in util.filechunkiter(fp, limit=size):
1809 for chunk in util.filechunkiter(fp, limit=size):
1809 handled_bytes += len(chunk)
1810 handled_bytes += len(chunk)
1810 self.ui.progress(_('clone'), handled_bytes,
1811 self.ui.progress(_('clone'), handled_bytes,
1811 total=total_bytes)
1812 total=total_bytes)
1812 ofp.write(chunk)
1813 ofp.write(chunk)
1813 ofp.close()
1814 ofp.close()
1814 tr.close()
1815 tr.close()
1815 finally:
1816 finally:
1816 tr.release()
1817 tr.release()
1817
1818
1818 # Writing straight to files circumvented the inmemory caches
1819 # Writing straight to files circumvented the inmemory caches
1819 self.invalidate()
1820 self.invalidate()
1820
1821
1821 elapsed = time.time() - start
1822 elapsed = time.time() - start
1822 if elapsed <= 0:
1823 if elapsed <= 0:
1823 elapsed = 0.001
1824 elapsed = 0.001
1824 self.ui.progress(_('clone'), None)
1825 self.ui.progress(_('clone'), None)
1825 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1826 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1826 (util.bytecount(total_bytes), elapsed,
1827 (util.bytecount(total_bytes), elapsed,
1827 util.bytecount(total_bytes / elapsed)))
1828 util.bytecount(total_bytes / elapsed)))
1828
1829
1829 # new requirements = old non-format requirements +
1830 # new requirements = old non-format requirements +
1830 # new format-related
1831 # new format-related
1831 # requirements from the streamed-in repository
1832 # requirements from the streamed-in repository
1832 requirements.update(set(self.requirements) - self.supportedformats)
1833 requirements.update(set(self.requirements) - self.supportedformats)
1833 self._applyrequirements(requirements)
1834 self._applyrequirements(requirements)
1834 self._writerequirements()
1835 self._writerequirements()
1835
1836
1836 if rbranchmap:
1837 if rbranchmap:
1837 rbheads = []
1838 rbheads = []
1838 closed = []
1839 closed = []
1839 for bheads in rbranchmap.itervalues():
1840 for bheads in rbranchmap.itervalues():
1840 rbheads.extend(bheads)
1841 rbheads.extend(bheads)
1841 for h in bheads:
1842 for h in bheads:
1842 r = self.changelog.rev(h)
1843 r = self.changelog.rev(h)
1843 b, c = self.changelog.branchinfo(r)
1844 b, c = self.changelog.branchinfo(r)
1844 if c:
1845 if c:
1845 closed.append(h)
1846 closed.append(h)
1846
1847
1847 if rbheads:
1848 if rbheads:
1848 rtiprev = max((int(self.changelog.rev(node))
1849 rtiprev = max((int(self.changelog.rev(node))
1849 for node in rbheads))
1850 for node in rbheads))
1850 cache = branchmap.branchcache(rbranchmap,
1851 cache = branchmap.branchcache(rbranchmap,
1851 self[rtiprev].node(),
1852 self[rtiprev].node(),
1852 rtiprev,
1853 rtiprev,
1853 closednodes=closed)
1854 closednodes=closed)
1854 # Try to stick it as low as possible
1855 # Try to stick it as low as possible
1855 # filter above served are unlikely to be fetch from a clone
1856 # filter above served are unlikely to be fetch from a clone
1856 for candidate in ('base', 'immutable', 'served'):
1857 for candidate in ('base', 'immutable', 'served'):
1857 rview = self.filtered(candidate)
1858 rview = self.filtered(candidate)
1858 if cache.validfor(rview):
1859 if cache.validfor(rview):
1859 self._branchcaches[candidate] = cache
1860 self._branchcaches[candidate] = cache
1860 cache.write(rview)
1861 cache.write(rview)
1861 break
1862 break
1862 self.invalidate()
1863 self.invalidate()
1863 return len(self.heads()) + 1
1864 return len(self.heads()) + 1
1864 finally:
1865 finally:
1865 lock.release()
1866 lock.release()
1866
1867
1867 def clone(self, remote, heads=[], stream=None):
1868 def clone(self, remote, heads=[], stream=None):
1868 '''clone remote repository.
1869 '''clone remote repository.
1869
1870
1870 keyword arguments:
1871 keyword arguments:
1871 heads: list of revs to clone (forces use of pull)
1872 heads: list of revs to clone (forces use of pull)
1872 stream: use streaming clone if possible'''
1873 stream: use streaming clone if possible'''
1873
1874
1874 # now, all clients that can request uncompressed clones can
1875 # now, all clients that can request uncompressed clones can
1875 # read repo formats supported by all servers that can serve
1876 # read repo formats supported by all servers that can serve
1876 # them.
1877 # them.
1877
1878
1878 # if revlog format changes, client will have to check version
1879 # if revlog format changes, client will have to check version
1879 # and format flags on "stream" capability, and use
1880 # and format flags on "stream" capability, and use
1880 # uncompressed only if compatible.
1881 # uncompressed only if compatible.
1881
1882
1882 if stream is None:
1883 if stream is None:
1883 # if the server explicitly prefers to stream (for fast LANs)
1884 # if the server explicitly prefers to stream (for fast LANs)
1884 stream = remote.capable('stream-preferred')
1885 stream = remote.capable('stream-preferred')
1885
1886
1886 if stream and not heads:
1887 if stream and not heads:
1887 # 'stream' means remote revlog format is revlogv1 only
1888 # 'stream' means remote revlog format is revlogv1 only
1888 if remote.capable('stream'):
1889 if remote.capable('stream'):
1889 self.stream_in(remote, set(('revlogv1',)))
1890 self.stream_in(remote, set(('revlogv1',)))
1890 else:
1891 else:
1891 # otherwise, 'streamreqs' contains the remote revlog format
1892 # otherwise, 'streamreqs' contains the remote revlog format
1892 streamreqs = remote.capable('streamreqs')
1893 streamreqs = remote.capable('streamreqs')
1893 if streamreqs:
1894 if streamreqs:
1894 streamreqs = set(streamreqs.split(','))
1895 streamreqs = set(streamreqs.split(','))
1895 # if we support it, stream in and adjust our requirements
1896 # if we support it, stream in and adjust our requirements
1896 if not streamreqs - self.supportedformats:
1897 if not streamreqs - self.supportedformats:
1897 self.stream_in(remote, streamreqs)
1898 self.stream_in(remote, streamreqs)
1898
1899
1899 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1900 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1900 try:
1901 try:
1901 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1902 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1902 ret = exchange.pull(self, remote, heads).cgresult
1903 ret = exchange.pull(self, remote, heads).cgresult
1903 finally:
1904 finally:
1904 self.ui.restoreconfig(quiet)
1905 self.ui.restoreconfig(quiet)
1905 return ret
1906 return ret
1906
1907
1907 def pushkey(self, namespace, key, old, new):
1908 def pushkey(self, namespace, key, old, new):
1908 try:
1909 try:
1909 tr = self.currenttransaction()
1910 tr = self.currenttransaction()
1910 hookargs = {}
1911 hookargs = {}
1911 if tr is not None:
1912 if tr is not None:
1912 hookargs.update(tr.hookargs)
1913 hookargs.update(tr.hookargs)
1913 pending = lambda: tr.writepending() and self.root or ""
1914 pending = lambda: tr.writepending() and self.root or ""
1914 hookargs['pending'] = pending
1915 hookargs['pending'] = pending
1915 hookargs['namespace'] = namespace
1916 hookargs['namespace'] = namespace
1916 hookargs['key'] = key
1917 hookargs['key'] = key
1917 hookargs['old'] = old
1918 hookargs['old'] = old
1918 hookargs['new'] = new
1919 hookargs['new'] = new
1919 self.hook('prepushkey', throw=True, **hookargs)
1920 self.hook('prepushkey', throw=True, **hookargs)
1920 except error.HookAbort, exc:
1921 except error.HookAbort, exc:
1921 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1922 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1922 if exc.hint:
1923 if exc.hint:
1923 self.ui.write_err(_("(%s)\n") % exc.hint)
1924 self.ui.write_err(_("(%s)\n") % exc.hint)
1924 return False
1925 return False
1925 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1926 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1926 ret = pushkey.push(self, namespace, key, old, new)
1927 ret = pushkey.push(self, namespace, key, old, new)
1927 def runhook():
1928 def runhook():
1928 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1929 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1929 ret=ret)
1930 ret=ret)
1930 self._afterlock(runhook)
1931 self._afterlock(runhook)
1931 return ret
1932 return ret
1932
1933
1933 def listkeys(self, namespace):
1934 def listkeys(self, namespace):
1934 self.hook('prelistkeys', throw=True, namespace=namespace)
1935 self.hook('prelistkeys', throw=True, namespace=namespace)
1935 self.ui.debug('listing keys for "%s"\n' % namespace)
1936 self.ui.debug('listing keys for "%s"\n' % namespace)
1936 values = pushkey.list(self, namespace)
1937 values = pushkey.list(self, namespace)
1937 self.hook('listkeys', namespace=namespace, values=values)
1938 self.hook('listkeys', namespace=namespace, values=values)
1938 return values
1939 return values
1939
1940
1940 def debugwireargs(self, one, two, three=None, four=None, five=None):
1941 def debugwireargs(self, one, two, three=None, four=None, five=None):
1941 '''used to test argument passing over the wire'''
1942 '''used to test argument passing over the wire'''
1942 return "%s %s %s %s %s" % (one, two, three, four, five)
1943 return "%s %s %s %s %s" % (one, two, three, four, five)
1943
1944
1944 def savecommitmessage(self, text):
1945 def savecommitmessage(self, text):
1945 fp = self.vfs('last-message.txt', 'wb')
1946 fp = self.vfs('last-message.txt', 'wb')
1946 try:
1947 try:
1947 fp.write(text)
1948 fp.write(text)
1948 finally:
1949 finally:
1949 fp.close()
1950 fp.close()
1950 return self.pathto(fp.name[len(self.root) + 1:])
1951 return self.pathto(fp.name[len(self.root) + 1:])
1951
1952
1952 # used to avoid circular references so destructors work
1953 # used to avoid circular references so destructors work
1953 def aftertrans(files):
1954 def aftertrans(files):
1954 renamefiles = [tuple(t) for t in files]
1955 renamefiles = [tuple(t) for t in files]
1955 def a():
1956 def a():
1956 for vfs, src, dest in renamefiles:
1957 for vfs, src, dest in renamefiles:
1957 try:
1958 try:
1958 vfs.rename(src, dest)
1959 vfs.rename(src, dest)
1959 except OSError: # journal file does not yet exist
1960 except OSError: # journal file does not yet exist
1960 pass
1961 pass
1961 return a
1962 return a
1962
1963
1963 def undoname(fn):
1964 def undoname(fn):
1964 base, name = os.path.split(fn)
1965 base, name = os.path.split(fn)
1965 assert name.startswith('journal')
1966 assert name.startswith('journal')
1966 return os.path.join(base, name.replace('journal', 'undo', 1))
1967 return os.path.join(base, name.replace('journal', 'undo', 1))
1967
1968
1968 def instance(ui, path, create):
1969 def instance(ui, path, create):
1969 return localrepository(ui, util.urllocalpath(path), create)
1970 return localrepository(ui, util.urllocalpath(path), create)
1970
1971
1971 def islocal(path):
1972 def islocal(path):
1972 return True
1973 return True
General Comments 0
You need to be logged in to leave comments. Login now