##// END OF EJS Templates
commit: no longer allow empty commit with the 'force' argument (API)...
Pierre-Yves David -
r25021:9a74b991 default
parent child Browse files
Show More
@@ -1,1970 +1,1970 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception, exc:
138 except Exception, exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced, exc:
155 except error.PushRaced, exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 'manifestv2'))
196 'manifestv2'))
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 'dotencode'))
198 'dotencode'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 filtername = None
200 filtername = None
201
201
202 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
204 featuresetupfuncs = set()
205
205
206 def _baserequirements(self, create):
206 def _baserequirements(self, create):
207 return ['revlogv1']
207 return ['revlogv1']
208
208
209 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
210 self.requirements = set()
210 self.requirements = set()
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 self.wopener = self.wvfs
212 self.wopener = self.wvfs
213 self.root = self.wvfs.base
213 self.root = self.wvfs.base
214 self.path = self.wvfs.join(".hg")
214 self.path = self.wvfs.join(".hg")
215 self.origroot = path
215 self.origroot = path
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 self.vfs = scmutil.vfs(self.path)
217 self.vfs = scmutil.vfs(self.path)
218 self.opener = self.vfs
218 self.opener = self.vfs
219 self.baseui = baseui
219 self.baseui = baseui
220 self.ui = baseui.copy()
220 self.ui = baseui.copy()
221 self.ui.copy = baseui.copy # prevent copying repo configuration
221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 # A list of callback to shape the phase if no data were found.
222 # A list of callback to shape the phase if no data were found.
223 # Callback are in the form: func(repo, roots) --> processed root.
223 # Callback are in the form: func(repo, roots) --> processed root.
224 # This list it to be filled by extension during repo setup
224 # This list it to be filled by extension during repo setup
225 self._phasedefaults = []
225 self._phasedefaults = []
226 try:
226 try:
227 self.ui.readconfig(self.join("hgrc"), self.root)
227 self.ui.readconfig(self.join("hgrc"), self.root)
228 extensions.loadall(self.ui)
228 extensions.loadall(self.ui)
229 except IOError:
229 except IOError:
230 pass
230 pass
231
231
232 if self.featuresetupfuncs:
232 if self.featuresetupfuncs:
233 self.supported = set(self._basesupported) # use private copy
233 self.supported = set(self._basesupported) # use private copy
234 extmods = set(m.__name__ for n, m
234 extmods = set(m.__name__ for n, m
235 in extensions.extensions(self.ui))
235 in extensions.extensions(self.ui))
236 for setupfunc in self.featuresetupfuncs:
236 for setupfunc in self.featuresetupfuncs:
237 if setupfunc.__module__ in extmods:
237 if setupfunc.__module__ in extmods:
238 setupfunc(self.ui, self.supported)
238 setupfunc(self.ui, self.supported)
239 else:
239 else:
240 self.supported = self._basesupported
240 self.supported = self._basesupported
241
241
242 if not self.vfs.isdir():
242 if not self.vfs.isdir():
243 if create:
243 if create:
244 if not self.wvfs.exists():
244 if not self.wvfs.exists():
245 self.wvfs.makedirs()
245 self.wvfs.makedirs()
246 self.vfs.makedir(notindexed=True)
246 self.vfs.makedir(notindexed=True)
247 self.requirements.update(self._baserequirements(create))
247 self.requirements.update(self._baserequirements(create))
248 if self.ui.configbool('format', 'usestore', True):
248 if self.ui.configbool('format', 'usestore', True):
249 self.vfs.mkdir("store")
249 self.vfs.mkdir("store")
250 self.requirements.add("store")
250 self.requirements.add("store")
251 if self.ui.configbool('format', 'usefncache', True):
251 if self.ui.configbool('format', 'usefncache', True):
252 self.requirements.add("fncache")
252 self.requirements.add("fncache")
253 if self.ui.configbool('format', 'dotencode', True):
253 if self.ui.configbool('format', 'dotencode', True):
254 self.requirements.add('dotencode')
254 self.requirements.add('dotencode')
255 # create an invalid changelog
255 # create an invalid changelog
256 self.vfs.append(
256 self.vfs.append(
257 "00changelog.i",
257 "00changelog.i",
258 '\0\0\0\2' # represents revlogv2
258 '\0\0\0\2' # represents revlogv2
259 ' dummy changelog to prevent using the old repo layout'
259 ' dummy changelog to prevent using the old repo layout'
260 )
260 )
261 if self.ui.configbool('format', 'generaldelta', False):
261 if self.ui.configbool('format', 'generaldelta', False):
262 self.requirements.add("generaldelta")
262 self.requirements.add("generaldelta")
263 if self.ui.configbool('experimental', 'treemanifest', False):
263 if self.ui.configbool('experimental', 'treemanifest', False):
264 self.requirements.add("treemanifest")
264 self.requirements.add("treemanifest")
265 if self.ui.configbool('experimental', 'manifestv2', False):
265 if self.ui.configbool('experimental', 'manifestv2', False):
266 self.requirements.add("manifestv2")
266 self.requirements.add("manifestv2")
267 else:
267 else:
268 raise error.RepoError(_("repository %s not found") % path)
268 raise error.RepoError(_("repository %s not found") % path)
269 elif create:
269 elif create:
270 raise error.RepoError(_("repository %s already exists") % path)
270 raise error.RepoError(_("repository %s already exists") % path)
271 else:
271 else:
272 try:
272 try:
273 self.requirements = scmutil.readrequires(
273 self.requirements = scmutil.readrequires(
274 self.vfs, self.supported)
274 self.vfs, self.supported)
275 except IOError, inst:
275 except IOError, inst:
276 if inst.errno != errno.ENOENT:
276 if inst.errno != errno.ENOENT:
277 raise
277 raise
278
278
279 self.sharedpath = self.path
279 self.sharedpath = self.path
280 try:
280 try:
281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 realpath=True)
282 realpath=True)
283 s = vfs.base
283 s = vfs.base
284 if not vfs.exists():
284 if not vfs.exists():
285 raise error.RepoError(
285 raise error.RepoError(
286 _('.hg/sharedpath points to nonexistent directory %s') % s)
286 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 self.sharedpath = s
287 self.sharedpath = s
288 except IOError, inst:
288 except IOError, inst:
289 if inst.errno != errno.ENOENT:
289 if inst.errno != errno.ENOENT:
290 raise
290 raise
291
291
292 self.store = store.store(
292 self.store = store.store(
293 self.requirements, self.sharedpath, scmutil.vfs)
293 self.requirements, self.sharedpath, scmutil.vfs)
294 self.spath = self.store.path
294 self.spath = self.store.path
295 self.svfs = self.store.vfs
295 self.svfs = self.store.vfs
296 self.sopener = self.svfs
296 self.sopener = self.svfs
297 self.sjoin = self.store.join
297 self.sjoin = self.store.join
298 self.vfs.createmode = self.store.createmode
298 self.vfs.createmode = self.store.createmode
299 self._applyopenerreqs()
299 self._applyopenerreqs()
300 if create:
300 if create:
301 self._writerequirements()
301 self._writerequirements()
302
302
303
303
304 self._branchcaches = {}
304 self._branchcaches = {}
305 self._revbranchcache = None
305 self._revbranchcache = None
306 self.filterpats = {}
306 self.filterpats = {}
307 self._datafilters = {}
307 self._datafilters = {}
308 self._transref = self._lockref = self._wlockref = None
308 self._transref = self._lockref = self._wlockref = None
309
309
310 # A cache for various files under .hg/ that tracks file changes,
310 # A cache for various files under .hg/ that tracks file changes,
311 # (used by the filecache decorator)
311 # (used by the filecache decorator)
312 #
312 #
313 # Maps a property name to its util.filecacheentry
313 # Maps a property name to its util.filecacheentry
314 self._filecache = {}
314 self._filecache = {}
315
315
316 # hold sets of revision to be filtered
316 # hold sets of revision to be filtered
317 # should be cleared when something might have changed the filter value:
317 # should be cleared when something might have changed the filter value:
318 # - new changesets,
318 # - new changesets,
319 # - phase change,
319 # - phase change,
320 # - new obsolescence marker,
320 # - new obsolescence marker,
321 # - working directory parent change,
321 # - working directory parent change,
322 # - bookmark changes
322 # - bookmark changes
323 self.filteredrevcache = {}
323 self.filteredrevcache = {}
324
324
325 # generic mapping between names and nodes
325 # generic mapping between names and nodes
326 self.names = namespaces.namespaces()
326 self.names = namespaces.namespaces()
327
327
328 def close(self):
328 def close(self):
329 self._writecaches()
329 self._writecaches()
330
330
331 def _writecaches(self):
331 def _writecaches(self):
332 if self._revbranchcache:
332 if self._revbranchcache:
333 self._revbranchcache.write()
333 self._revbranchcache.write()
334
334
335 def _restrictcapabilities(self, caps):
335 def _restrictcapabilities(self, caps):
336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 caps = set(caps)
337 caps = set(caps)
338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 caps.add('bundle2=' + urllib.quote(capsblob))
339 caps.add('bundle2=' + urllib.quote(capsblob))
340 return caps
340 return caps
341
341
342 def _applyopenerreqs(self):
342 def _applyopenerreqs(self):
343 self.svfs.options = dict((r, 1) for r in self.requirements
343 self.svfs.options = dict((r, 1) for r in self.requirements
344 if r in self.openerreqs)
344 if r in self.openerreqs)
345 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
345 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
346 if chunkcachesize is not None:
346 if chunkcachesize is not None:
347 self.svfs.options['chunkcachesize'] = chunkcachesize
347 self.svfs.options['chunkcachesize'] = chunkcachesize
348 maxchainlen = self.ui.configint('format', 'maxchainlen')
348 maxchainlen = self.ui.configint('format', 'maxchainlen')
349 if maxchainlen is not None:
349 if maxchainlen is not None:
350 self.svfs.options['maxchainlen'] = maxchainlen
350 self.svfs.options['maxchainlen'] = maxchainlen
351 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
351 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
352 if manifestcachesize is not None:
352 if manifestcachesize is not None:
353 self.svfs.options['manifestcachesize'] = manifestcachesize
353 self.svfs.options['manifestcachesize'] = manifestcachesize
354
354
355 def _writerequirements(self):
355 def _writerequirements(self):
356 scmutil.writerequires(self.vfs, self.requirements)
356 scmutil.writerequires(self.vfs, self.requirements)
357
357
358 def _checknested(self, path):
358 def _checknested(self, path):
359 """Determine if path is a legal nested repository."""
359 """Determine if path is a legal nested repository."""
360 if not path.startswith(self.root):
360 if not path.startswith(self.root):
361 return False
361 return False
362 subpath = path[len(self.root) + 1:]
362 subpath = path[len(self.root) + 1:]
363 normsubpath = util.pconvert(subpath)
363 normsubpath = util.pconvert(subpath)
364
364
365 # XXX: Checking against the current working copy is wrong in
365 # XXX: Checking against the current working copy is wrong in
366 # the sense that it can reject things like
366 # the sense that it can reject things like
367 #
367 #
368 # $ hg cat -r 10 sub/x.txt
368 # $ hg cat -r 10 sub/x.txt
369 #
369 #
370 # if sub/ is no longer a subrepository in the working copy
370 # if sub/ is no longer a subrepository in the working copy
371 # parent revision.
371 # parent revision.
372 #
372 #
373 # However, it can of course also allow things that would have
373 # However, it can of course also allow things that would have
374 # been rejected before, such as the above cat command if sub/
374 # been rejected before, such as the above cat command if sub/
375 # is a subrepository now, but was a normal directory before.
375 # is a subrepository now, but was a normal directory before.
376 # The old path auditor would have rejected by mistake since it
376 # The old path auditor would have rejected by mistake since it
377 # panics when it sees sub/.hg/.
377 # panics when it sees sub/.hg/.
378 #
378 #
379 # All in all, checking against the working copy seems sensible
379 # All in all, checking against the working copy seems sensible
380 # since we want to prevent access to nested repositories on
380 # since we want to prevent access to nested repositories on
381 # the filesystem *now*.
381 # the filesystem *now*.
382 ctx = self[None]
382 ctx = self[None]
383 parts = util.splitpath(subpath)
383 parts = util.splitpath(subpath)
384 while parts:
384 while parts:
385 prefix = '/'.join(parts)
385 prefix = '/'.join(parts)
386 if prefix in ctx.substate:
386 if prefix in ctx.substate:
387 if prefix == normsubpath:
387 if prefix == normsubpath:
388 return True
388 return True
389 else:
389 else:
390 sub = ctx.sub(prefix)
390 sub = ctx.sub(prefix)
391 return sub.checknested(subpath[len(prefix) + 1:])
391 return sub.checknested(subpath[len(prefix) + 1:])
392 else:
392 else:
393 parts.pop()
393 parts.pop()
394 return False
394 return False
395
395
396 def peer(self):
396 def peer(self):
397 return localpeer(self) # not cached to avoid reference cycle
397 return localpeer(self) # not cached to avoid reference cycle
398
398
399 def unfiltered(self):
399 def unfiltered(self):
400 """Return unfiltered version of the repository
400 """Return unfiltered version of the repository
401
401
402 Intended to be overwritten by filtered repo."""
402 Intended to be overwritten by filtered repo."""
403 return self
403 return self
404
404
405 def filtered(self, name):
405 def filtered(self, name):
406 """Return a filtered version of a repository"""
406 """Return a filtered version of a repository"""
407 # build a new class with the mixin and the current class
407 # build a new class with the mixin and the current class
408 # (possibly subclass of the repo)
408 # (possibly subclass of the repo)
409 class proxycls(repoview.repoview, self.unfiltered().__class__):
409 class proxycls(repoview.repoview, self.unfiltered().__class__):
410 pass
410 pass
411 return proxycls(self, name)
411 return proxycls(self, name)
412
412
413 @repofilecache('bookmarks')
413 @repofilecache('bookmarks')
414 def _bookmarks(self):
414 def _bookmarks(self):
415 return bookmarks.bmstore(self)
415 return bookmarks.bmstore(self)
416
416
417 @repofilecache('bookmarks.current')
417 @repofilecache('bookmarks.current')
418 def _activebookmark(self):
418 def _activebookmark(self):
419 return bookmarks.readactive(self)
419 return bookmarks.readactive(self)
420
420
421 def bookmarkheads(self, bookmark):
421 def bookmarkheads(self, bookmark):
422 name = bookmark.split('@', 1)[0]
422 name = bookmark.split('@', 1)[0]
423 heads = []
423 heads = []
424 for mark, n in self._bookmarks.iteritems():
424 for mark, n in self._bookmarks.iteritems():
425 if mark.split('@', 1)[0] == name:
425 if mark.split('@', 1)[0] == name:
426 heads.append(n)
426 heads.append(n)
427 return heads
427 return heads
428
428
429 @storecache('phaseroots')
429 @storecache('phaseroots')
430 def _phasecache(self):
430 def _phasecache(self):
431 return phases.phasecache(self, self._phasedefaults)
431 return phases.phasecache(self, self._phasedefaults)
432
432
433 @storecache('obsstore')
433 @storecache('obsstore')
434 def obsstore(self):
434 def obsstore(self):
435 # read default format for new obsstore.
435 # read default format for new obsstore.
436 defaultformat = self.ui.configint('format', 'obsstore-version', None)
436 defaultformat = self.ui.configint('format', 'obsstore-version', None)
437 # rely on obsstore class default when possible.
437 # rely on obsstore class default when possible.
438 kwargs = {}
438 kwargs = {}
439 if defaultformat is not None:
439 if defaultformat is not None:
440 kwargs['defaultformat'] = defaultformat
440 kwargs['defaultformat'] = defaultformat
441 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
441 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
442 store = obsolete.obsstore(self.svfs, readonly=readonly,
442 store = obsolete.obsstore(self.svfs, readonly=readonly,
443 **kwargs)
443 **kwargs)
444 if store and readonly:
444 if store and readonly:
445 self.ui.warn(
445 self.ui.warn(
446 _('obsolete feature not enabled but %i markers found!\n')
446 _('obsolete feature not enabled but %i markers found!\n')
447 % len(list(store)))
447 % len(list(store)))
448 return store
448 return store
449
449
450 @storecache('00changelog.i')
450 @storecache('00changelog.i')
451 def changelog(self):
451 def changelog(self):
452 c = changelog.changelog(self.svfs)
452 c = changelog.changelog(self.svfs)
453 if 'HG_PENDING' in os.environ:
453 if 'HG_PENDING' in os.environ:
454 p = os.environ['HG_PENDING']
454 p = os.environ['HG_PENDING']
455 if p.startswith(self.root):
455 if p.startswith(self.root):
456 c.readpending('00changelog.i.a')
456 c.readpending('00changelog.i.a')
457 return c
457 return c
458
458
459 @storecache('00manifest.i')
459 @storecache('00manifest.i')
460 def manifest(self):
460 def manifest(self):
461 return manifest.manifest(self.svfs)
461 return manifest.manifest(self.svfs)
462
462
463 @repofilecache('dirstate')
463 @repofilecache('dirstate')
464 def dirstate(self):
464 def dirstate(self):
465 warned = [0]
465 warned = [0]
466 def validate(node):
466 def validate(node):
467 try:
467 try:
468 self.changelog.rev(node)
468 self.changelog.rev(node)
469 return node
469 return node
470 except error.LookupError:
470 except error.LookupError:
471 if not warned[0]:
471 if not warned[0]:
472 warned[0] = True
472 warned[0] = True
473 self.ui.warn(_("warning: ignoring unknown"
473 self.ui.warn(_("warning: ignoring unknown"
474 " working parent %s!\n") % short(node))
474 " working parent %s!\n") % short(node))
475 return nullid
475 return nullid
476
476
477 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
477 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
478
478
479 def __getitem__(self, changeid):
479 def __getitem__(self, changeid):
480 if changeid is None:
480 if changeid is None:
481 return context.workingctx(self)
481 return context.workingctx(self)
482 if isinstance(changeid, slice):
482 if isinstance(changeid, slice):
483 return [context.changectx(self, i)
483 return [context.changectx(self, i)
484 for i in xrange(*changeid.indices(len(self)))
484 for i in xrange(*changeid.indices(len(self)))
485 if i not in self.changelog.filteredrevs]
485 if i not in self.changelog.filteredrevs]
486 return context.changectx(self, changeid)
486 return context.changectx(self, changeid)
487
487
488 def __contains__(self, changeid):
488 def __contains__(self, changeid):
489 try:
489 try:
490 self[changeid]
490 self[changeid]
491 return True
491 return True
492 except error.RepoLookupError:
492 except error.RepoLookupError:
493 return False
493 return False
494
494
495 def __nonzero__(self):
495 def __nonzero__(self):
496 return True
496 return True
497
497
498 def __len__(self):
498 def __len__(self):
499 return len(self.changelog)
499 return len(self.changelog)
500
500
501 def __iter__(self):
501 def __iter__(self):
502 return iter(self.changelog)
502 return iter(self.changelog)
503
503
504 def revs(self, expr, *args):
504 def revs(self, expr, *args):
505 '''Return a list of revisions matching the given revset'''
505 '''Return a list of revisions matching the given revset'''
506 expr = revset.formatspec(expr, *args)
506 expr = revset.formatspec(expr, *args)
507 m = revset.match(None, expr)
507 m = revset.match(None, expr)
508 return m(self)
508 return m(self)
509
509
510 def set(self, expr, *args):
510 def set(self, expr, *args):
511 '''
511 '''
512 Yield a context for each matching revision, after doing arg
512 Yield a context for each matching revision, after doing arg
513 replacement via revset.formatspec
513 replacement via revset.formatspec
514 '''
514 '''
515 for r in self.revs(expr, *args):
515 for r in self.revs(expr, *args):
516 yield self[r]
516 yield self[r]
517
517
518 def url(self):
518 def url(self):
519 return 'file:' + self.root
519 return 'file:' + self.root
520
520
521 def hook(self, name, throw=False, **args):
521 def hook(self, name, throw=False, **args):
522 """Call a hook, passing this repo instance.
522 """Call a hook, passing this repo instance.
523
523
524 This a convenience method to aid invoking hooks. Extensions likely
524 This a convenience method to aid invoking hooks. Extensions likely
525 won't call this unless they have registered a custom hook or are
525 won't call this unless they have registered a custom hook or are
526 replacing code that is expected to call a hook.
526 replacing code that is expected to call a hook.
527 """
527 """
528 return hook.hook(self.ui, self, name, throw, **args)
528 return hook.hook(self.ui, self, name, throw, **args)
529
529
530 @unfilteredmethod
530 @unfilteredmethod
531 def _tag(self, names, node, message, local, user, date, extra={},
531 def _tag(self, names, node, message, local, user, date, extra={},
532 editor=False):
532 editor=False):
533 if isinstance(names, str):
533 if isinstance(names, str):
534 names = (names,)
534 names = (names,)
535
535
536 branches = self.branchmap()
536 branches = self.branchmap()
537 for name in names:
537 for name in names:
538 self.hook('pretag', throw=True, node=hex(node), tag=name,
538 self.hook('pretag', throw=True, node=hex(node), tag=name,
539 local=local)
539 local=local)
540 if name in branches:
540 if name in branches:
541 self.ui.warn(_("warning: tag %s conflicts with existing"
541 self.ui.warn(_("warning: tag %s conflicts with existing"
542 " branch name\n") % name)
542 " branch name\n") % name)
543
543
544 def writetags(fp, names, munge, prevtags):
544 def writetags(fp, names, munge, prevtags):
545 fp.seek(0, 2)
545 fp.seek(0, 2)
546 if prevtags and prevtags[-1] != '\n':
546 if prevtags and prevtags[-1] != '\n':
547 fp.write('\n')
547 fp.write('\n')
548 for name in names:
548 for name in names:
549 if munge:
549 if munge:
550 m = munge(name)
550 m = munge(name)
551 else:
551 else:
552 m = name
552 m = name
553
553
554 if (self._tagscache.tagtypes and
554 if (self._tagscache.tagtypes and
555 name in self._tagscache.tagtypes):
555 name in self._tagscache.tagtypes):
556 old = self.tags().get(name, nullid)
556 old = self.tags().get(name, nullid)
557 fp.write('%s %s\n' % (hex(old), m))
557 fp.write('%s %s\n' % (hex(old), m))
558 fp.write('%s %s\n' % (hex(node), m))
558 fp.write('%s %s\n' % (hex(node), m))
559 fp.close()
559 fp.close()
560
560
561 prevtags = ''
561 prevtags = ''
562 if local:
562 if local:
563 try:
563 try:
564 fp = self.vfs('localtags', 'r+')
564 fp = self.vfs('localtags', 'r+')
565 except IOError:
565 except IOError:
566 fp = self.vfs('localtags', 'a')
566 fp = self.vfs('localtags', 'a')
567 else:
567 else:
568 prevtags = fp.read()
568 prevtags = fp.read()
569
569
570 # local tags are stored in the current charset
570 # local tags are stored in the current charset
571 writetags(fp, names, None, prevtags)
571 writetags(fp, names, None, prevtags)
572 for name in names:
572 for name in names:
573 self.hook('tag', node=hex(node), tag=name, local=local)
573 self.hook('tag', node=hex(node), tag=name, local=local)
574 return
574 return
575
575
576 try:
576 try:
577 fp = self.wfile('.hgtags', 'rb+')
577 fp = self.wfile('.hgtags', 'rb+')
578 except IOError, e:
578 except IOError, e:
579 if e.errno != errno.ENOENT:
579 if e.errno != errno.ENOENT:
580 raise
580 raise
581 fp = self.wfile('.hgtags', 'ab')
581 fp = self.wfile('.hgtags', 'ab')
582 else:
582 else:
583 prevtags = fp.read()
583 prevtags = fp.read()
584
584
585 # committed tags are stored in UTF-8
585 # committed tags are stored in UTF-8
586 writetags(fp, names, encoding.fromlocal, prevtags)
586 writetags(fp, names, encoding.fromlocal, prevtags)
587
587
588 fp.close()
588 fp.close()
589
589
590 self.invalidatecaches()
590 self.invalidatecaches()
591
591
592 if '.hgtags' not in self.dirstate:
592 if '.hgtags' not in self.dirstate:
593 self[None].add(['.hgtags'])
593 self[None].add(['.hgtags'])
594
594
595 m = matchmod.exact(self.root, '', ['.hgtags'])
595 m = matchmod.exact(self.root, '', ['.hgtags'])
596 tagnode = self.commit(message, user, date, extra=extra, match=m,
596 tagnode = self.commit(message, user, date, extra=extra, match=m,
597 editor=editor)
597 editor=editor)
598
598
599 for name in names:
599 for name in names:
600 self.hook('tag', node=hex(node), tag=name, local=local)
600 self.hook('tag', node=hex(node), tag=name, local=local)
601
601
602 return tagnode
602 return tagnode
603
603
604 def tag(self, names, node, message, local, user, date, editor=False):
604 def tag(self, names, node, message, local, user, date, editor=False):
605 '''tag a revision with one or more symbolic names.
605 '''tag a revision with one or more symbolic names.
606
606
607 names is a list of strings or, when adding a single tag, names may be a
607 names is a list of strings or, when adding a single tag, names may be a
608 string.
608 string.
609
609
610 if local is True, the tags are stored in a per-repository file.
610 if local is True, the tags are stored in a per-repository file.
611 otherwise, they are stored in the .hgtags file, and a new
611 otherwise, they are stored in the .hgtags file, and a new
612 changeset is committed with the change.
612 changeset is committed with the change.
613
613
614 keyword arguments:
614 keyword arguments:
615
615
616 local: whether to store tags in non-version-controlled file
616 local: whether to store tags in non-version-controlled file
617 (default False)
617 (default False)
618
618
619 message: commit message to use if committing
619 message: commit message to use if committing
620
620
621 user: name of user to use if committing
621 user: name of user to use if committing
622
622
623 date: date tuple to use if committing'''
623 date: date tuple to use if committing'''
624
624
625 if not local:
625 if not local:
626 m = matchmod.exact(self.root, '', ['.hgtags'])
626 m = matchmod.exact(self.root, '', ['.hgtags'])
627 if util.any(self.status(match=m, unknown=True, ignored=True)):
627 if util.any(self.status(match=m, unknown=True, ignored=True)):
628 raise util.Abort(_('working copy of .hgtags is changed'),
628 raise util.Abort(_('working copy of .hgtags is changed'),
629 hint=_('please commit .hgtags manually'))
629 hint=_('please commit .hgtags manually'))
630
630
631 self.tags() # instantiate the cache
631 self.tags() # instantiate the cache
632 self._tag(names, node, message, local, user, date, editor=editor)
632 self._tag(names, node, message, local, user, date, editor=editor)
633
633
634 @filteredpropertycache
634 @filteredpropertycache
635 def _tagscache(self):
635 def _tagscache(self):
636 '''Returns a tagscache object that contains various tags related
636 '''Returns a tagscache object that contains various tags related
637 caches.'''
637 caches.'''
638
638
639 # This simplifies its cache management by having one decorated
639 # This simplifies its cache management by having one decorated
640 # function (this one) and the rest simply fetch things from it.
640 # function (this one) and the rest simply fetch things from it.
641 class tagscache(object):
641 class tagscache(object):
642 def __init__(self):
642 def __init__(self):
643 # These two define the set of tags for this repository. tags
643 # These two define the set of tags for this repository. tags
644 # maps tag name to node; tagtypes maps tag name to 'global' or
644 # maps tag name to node; tagtypes maps tag name to 'global' or
645 # 'local'. (Global tags are defined by .hgtags across all
645 # 'local'. (Global tags are defined by .hgtags across all
646 # heads, and local tags are defined in .hg/localtags.)
646 # heads, and local tags are defined in .hg/localtags.)
647 # They constitute the in-memory cache of tags.
647 # They constitute the in-memory cache of tags.
648 self.tags = self.tagtypes = None
648 self.tags = self.tagtypes = None
649
649
650 self.nodetagscache = self.tagslist = None
650 self.nodetagscache = self.tagslist = None
651
651
652 cache = tagscache()
652 cache = tagscache()
653 cache.tags, cache.tagtypes = self._findtags()
653 cache.tags, cache.tagtypes = self._findtags()
654
654
655 return cache
655 return cache
656
656
657 def tags(self):
657 def tags(self):
658 '''return a mapping of tag to node'''
658 '''return a mapping of tag to node'''
659 t = {}
659 t = {}
660 if self.changelog.filteredrevs:
660 if self.changelog.filteredrevs:
661 tags, tt = self._findtags()
661 tags, tt = self._findtags()
662 else:
662 else:
663 tags = self._tagscache.tags
663 tags = self._tagscache.tags
664 for k, v in tags.iteritems():
664 for k, v in tags.iteritems():
665 try:
665 try:
666 # ignore tags to unknown nodes
666 # ignore tags to unknown nodes
667 self.changelog.rev(v)
667 self.changelog.rev(v)
668 t[k] = v
668 t[k] = v
669 except (error.LookupError, ValueError):
669 except (error.LookupError, ValueError):
670 pass
670 pass
671 return t
671 return t
672
672
673 def _findtags(self):
673 def _findtags(self):
674 '''Do the hard work of finding tags. Return a pair of dicts
674 '''Do the hard work of finding tags. Return a pair of dicts
675 (tags, tagtypes) where tags maps tag name to node, and tagtypes
675 (tags, tagtypes) where tags maps tag name to node, and tagtypes
676 maps tag name to a string like \'global\' or \'local\'.
676 maps tag name to a string like \'global\' or \'local\'.
677 Subclasses or extensions are free to add their own tags, but
677 Subclasses or extensions are free to add their own tags, but
678 should be aware that the returned dicts will be retained for the
678 should be aware that the returned dicts will be retained for the
679 duration of the localrepo object.'''
679 duration of the localrepo object.'''
680
680
681 # XXX what tagtype should subclasses/extensions use? Currently
681 # XXX what tagtype should subclasses/extensions use? Currently
682 # mq and bookmarks add tags, but do not set the tagtype at all.
682 # mq and bookmarks add tags, but do not set the tagtype at all.
683 # Should each extension invent its own tag type? Should there
683 # Should each extension invent its own tag type? Should there
684 # be one tagtype for all such "virtual" tags? Or is the status
684 # be one tagtype for all such "virtual" tags? Or is the status
685 # quo fine?
685 # quo fine?
686
686
687 alltags = {} # map tag name to (node, hist)
687 alltags = {} # map tag name to (node, hist)
688 tagtypes = {}
688 tagtypes = {}
689
689
690 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
690 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
691 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
691 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
692
692
693 # Build the return dicts. Have to re-encode tag names because
693 # Build the return dicts. Have to re-encode tag names because
694 # the tags module always uses UTF-8 (in order not to lose info
694 # the tags module always uses UTF-8 (in order not to lose info
695 # writing to the cache), but the rest of Mercurial wants them in
695 # writing to the cache), but the rest of Mercurial wants them in
696 # local encoding.
696 # local encoding.
697 tags = {}
697 tags = {}
698 for (name, (node, hist)) in alltags.iteritems():
698 for (name, (node, hist)) in alltags.iteritems():
699 if node != nullid:
699 if node != nullid:
700 tags[encoding.tolocal(name)] = node
700 tags[encoding.tolocal(name)] = node
701 tags['tip'] = self.changelog.tip()
701 tags['tip'] = self.changelog.tip()
702 tagtypes = dict([(encoding.tolocal(name), value)
702 tagtypes = dict([(encoding.tolocal(name), value)
703 for (name, value) in tagtypes.iteritems()])
703 for (name, value) in tagtypes.iteritems()])
704 return (tags, tagtypes)
704 return (tags, tagtypes)
705
705
706 def tagtype(self, tagname):
706 def tagtype(self, tagname):
707 '''
707 '''
708 return the type of the given tag. result can be:
708 return the type of the given tag. result can be:
709
709
710 'local' : a local tag
710 'local' : a local tag
711 'global' : a global tag
711 'global' : a global tag
712 None : tag does not exist
712 None : tag does not exist
713 '''
713 '''
714
714
715 return self._tagscache.tagtypes.get(tagname)
715 return self._tagscache.tagtypes.get(tagname)
716
716
717 def tagslist(self):
717 def tagslist(self):
718 '''return a list of tags ordered by revision'''
718 '''return a list of tags ordered by revision'''
719 if not self._tagscache.tagslist:
719 if not self._tagscache.tagslist:
720 l = []
720 l = []
721 for t, n in self.tags().iteritems():
721 for t, n in self.tags().iteritems():
722 l.append((self.changelog.rev(n), t, n))
722 l.append((self.changelog.rev(n), t, n))
723 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
723 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
724
724
725 return self._tagscache.tagslist
725 return self._tagscache.tagslist
726
726
727 def nodetags(self, node):
727 def nodetags(self, node):
728 '''return the tags associated with a node'''
728 '''return the tags associated with a node'''
729 if not self._tagscache.nodetagscache:
729 if not self._tagscache.nodetagscache:
730 nodetagscache = {}
730 nodetagscache = {}
731 for t, n in self._tagscache.tags.iteritems():
731 for t, n in self._tagscache.tags.iteritems():
732 nodetagscache.setdefault(n, []).append(t)
732 nodetagscache.setdefault(n, []).append(t)
733 for tags in nodetagscache.itervalues():
733 for tags in nodetagscache.itervalues():
734 tags.sort()
734 tags.sort()
735 self._tagscache.nodetagscache = nodetagscache
735 self._tagscache.nodetagscache = nodetagscache
736 return self._tagscache.nodetagscache.get(node, [])
736 return self._tagscache.nodetagscache.get(node, [])
737
737
738 def nodebookmarks(self, node):
738 def nodebookmarks(self, node):
739 marks = []
739 marks = []
740 for bookmark, n in self._bookmarks.iteritems():
740 for bookmark, n in self._bookmarks.iteritems():
741 if n == node:
741 if n == node:
742 marks.append(bookmark)
742 marks.append(bookmark)
743 return sorted(marks)
743 return sorted(marks)
744
744
745 def branchmap(self):
745 def branchmap(self):
746 '''returns a dictionary {branch: [branchheads]} with branchheads
746 '''returns a dictionary {branch: [branchheads]} with branchheads
747 ordered by increasing revision number'''
747 ordered by increasing revision number'''
748 branchmap.updatecache(self)
748 branchmap.updatecache(self)
749 return self._branchcaches[self.filtername]
749 return self._branchcaches[self.filtername]
750
750
751 @unfilteredmethod
751 @unfilteredmethod
752 def revbranchcache(self):
752 def revbranchcache(self):
753 if not self._revbranchcache:
753 if not self._revbranchcache:
754 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
754 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
755 return self._revbranchcache
755 return self._revbranchcache
756
756
757 def branchtip(self, branch, ignoremissing=False):
757 def branchtip(self, branch, ignoremissing=False):
758 '''return the tip node for a given branch
758 '''return the tip node for a given branch
759
759
760 If ignoremissing is True, then this method will not raise an error.
760 If ignoremissing is True, then this method will not raise an error.
761 This is helpful for callers that only expect None for a missing branch
761 This is helpful for callers that only expect None for a missing branch
762 (e.g. namespace).
762 (e.g. namespace).
763
763
764 '''
764 '''
765 try:
765 try:
766 return self.branchmap().branchtip(branch)
766 return self.branchmap().branchtip(branch)
767 except KeyError:
767 except KeyError:
768 if not ignoremissing:
768 if not ignoremissing:
769 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
769 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
770 else:
770 else:
771 pass
771 pass
772
772
773 def lookup(self, key):
773 def lookup(self, key):
774 return self[key].node()
774 return self[key].node()
775
775
776 def lookupbranch(self, key, remote=None):
776 def lookupbranch(self, key, remote=None):
777 repo = remote or self
777 repo = remote or self
778 if key in repo.branchmap():
778 if key in repo.branchmap():
779 return key
779 return key
780
780
781 repo = (remote and remote.local()) and remote or self
781 repo = (remote and remote.local()) and remote or self
782 return repo[key].branch()
782 return repo[key].branch()
783
783
784 def known(self, nodes):
784 def known(self, nodes):
785 nm = self.changelog.nodemap
785 nm = self.changelog.nodemap
786 pc = self._phasecache
786 pc = self._phasecache
787 result = []
787 result = []
788 for n in nodes:
788 for n in nodes:
789 r = nm.get(n)
789 r = nm.get(n)
790 resp = not (r is None or pc.phase(self, r) >= phases.secret)
790 resp = not (r is None or pc.phase(self, r) >= phases.secret)
791 result.append(resp)
791 result.append(resp)
792 return result
792 return result
793
793
794 def local(self):
794 def local(self):
795 return self
795 return self
796
796
797 def cancopy(self):
797 def cancopy(self):
798 # so statichttprepo's override of local() works
798 # so statichttprepo's override of local() works
799 if not self.local():
799 if not self.local():
800 return False
800 return False
801 if not self.ui.configbool('phases', 'publish', True):
801 if not self.ui.configbool('phases', 'publish', True):
802 return True
802 return True
803 # if publishing we can't copy if there is filtered content
803 # if publishing we can't copy if there is filtered content
804 return not self.filtered('visible').changelog.filteredrevs
804 return not self.filtered('visible').changelog.filteredrevs
805
805
806 def shared(self):
806 def shared(self):
807 '''the type of shared repository (None if not shared)'''
807 '''the type of shared repository (None if not shared)'''
808 if self.sharedpath != self.path:
808 if self.sharedpath != self.path:
809 return 'store'
809 return 'store'
810 return None
810 return None
811
811
812 def join(self, f, *insidef):
812 def join(self, f, *insidef):
813 return self.vfs.join(os.path.join(f, *insidef))
813 return self.vfs.join(os.path.join(f, *insidef))
814
814
815 def wjoin(self, f, *insidef):
815 def wjoin(self, f, *insidef):
816 return self.vfs.reljoin(self.root, f, *insidef)
816 return self.vfs.reljoin(self.root, f, *insidef)
817
817
818 def file(self, f):
818 def file(self, f):
819 if f[0] == '/':
819 if f[0] == '/':
820 f = f[1:]
820 f = f[1:]
821 return filelog.filelog(self.svfs, f)
821 return filelog.filelog(self.svfs, f)
822
822
823 def changectx(self, changeid):
823 def changectx(self, changeid):
824 return self[changeid]
824 return self[changeid]
825
825
826 def parents(self, changeid=None):
826 def parents(self, changeid=None):
827 '''get list of changectxs for parents of changeid'''
827 '''get list of changectxs for parents of changeid'''
828 return self[changeid].parents()
828 return self[changeid].parents()
829
829
830 def setparents(self, p1, p2=nullid):
830 def setparents(self, p1, p2=nullid):
831 self.dirstate.beginparentchange()
831 self.dirstate.beginparentchange()
832 copies = self.dirstate.setparents(p1, p2)
832 copies = self.dirstate.setparents(p1, p2)
833 pctx = self[p1]
833 pctx = self[p1]
834 if copies:
834 if copies:
835 # Adjust copy records, the dirstate cannot do it, it
835 # Adjust copy records, the dirstate cannot do it, it
836 # requires access to parents manifests. Preserve them
836 # requires access to parents manifests. Preserve them
837 # only for entries added to first parent.
837 # only for entries added to first parent.
838 for f in copies:
838 for f in copies:
839 if f not in pctx and copies[f] in pctx:
839 if f not in pctx and copies[f] in pctx:
840 self.dirstate.copy(copies[f], f)
840 self.dirstate.copy(copies[f], f)
841 if p2 == nullid:
841 if p2 == nullid:
842 for f, s in sorted(self.dirstate.copies().items()):
842 for f, s in sorted(self.dirstate.copies().items()):
843 if f not in pctx and s not in pctx:
843 if f not in pctx and s not in pctx:
844 self.dirstate.copy(None, f)
844 self.dirstate.copy(None, f)
845 self.dirstate.endparentchange()
845 self.dirstate.endparentchange()
846
846
847 def filectx(self, path, changeid=None, fileid=None):
847 def filectx(self, path, changeid=None, fileid=None):
848 """changeid can be a changeset revision, node, or tag.
848 """changeid can be a changeset revision, node, or tag.
849 fileid can be a file revision or node."""
849 fileid can be a file revision or node."""
850 return context.filectx(self, path, changeid, fileid)
850 return context.filectx(self, path, changeid, fileid)
851
851
852 def getcwd(self):
852 def getcwd(self):
853 return self.dirstate.getcwd()
853 return self.dirstate.getcwd()
854
854
855 def pathto(self, f, cwd=None):
855 def pathto(self, f, cwd=None):
856 return self.dirstate.pathto(f, cwd)
856 return self.dirstate.pathto(f, cwd)
857
857
858 def wfile(self, f, mode='r'):
858 def wfile(self, f, mode='r'):
859 return self.wvfs(f, mode)
859 return self.wvfs(f, mode)
860
860
861 def _link(self, f):
861 def _link(self, f):
862 return self.wvfs.islink(f)
862 return self.wvfs.islink(f)
863
863
864 def _loadfilter(self, filter):
864 def _loadfilter(self, filter):
865 if filter not in self.filterpats:
865 if filter not in self.filterpats:
866 l = []
866 l = []
867 for pat, cmd in self.ui.configitems(filter):
867 for pat, cmd in self.ui.configitems(filter):
868 if cmd == '!':
868 if cmd == '!':
869 continue
869 continue
870 mf = matchmod.match(self.root, '', [pat])
870 mf = matchmod.match(self.root, '', [pat])
871 fn = None
871 fn = None
872 params = cmd
872 params = cmd
873 for name, filterfn in self._datafilters.iteritems():
873 for name, filterfn in self._datafilters.iteritems():
874 if cmd.startswith(name):
874 if cmd.startswith(name):
875 fn = filterfn
875 fn = filterfn
876 params = cmd[len(name):].lstrip()
876 params = cmd[len(name):].lstrip()
877 break
877 break
878 if not fn:
878 if not fn:
879 fn = lambda s, c, **kwargs: util.filter(s, c)
879 fn = lambda s, c, **kwargs: util.filter(s, c)
880 # Wrap old filters not supporting keyword arguments
880 # Wrap old filters not supporting keyword arguments
881 if not inspect.getargspec(fn)[2]:
881 if not inspect.getargspec(fn)[2]:
882 oldfn = fn
882 oldfn = fn
883 fn = lambda s, c, **kwargs: oldfn(s, c)
883 fn = lambda s, c, **kwargs: oldfn(s, c)
884 l.append((mf, fn, params))
884 l.append((mf, fn, params))
885 self.filterpats[filter] = l
885 self.filterpats[filter] = l
886 return self.filterpats[filter]
886 return self.filterpats[filter]
887
887
888 def _filter(self, filterpats, filename, data):
888 def _filter(self, filterpats, filename, data):
889 for mf, fn, cmd in filterpats:
889 for mf, fn, cmd in filterpats:
890 if mf(filename):
890 if mf(filename):
891 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
891 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
892 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
892 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
893 break
893 break
894
894
895 return data
895 return data
896
896
897 @unfilteredpropertycache
897 @unfilteredpropertycache
898 def _encodefilterpats(self):
898 def _encodefilterpats(self):
899 return self._loadfilter('encode')
899 return self._loadfilter('encode')
900
900
901 @unfilteredpropertycache
901 @unfilteredpropertycache
902 def _decodefilterpats(self):
902 def _decodefilterpats(self):
903 return self._loadfilter('decode')
903 return self._loadfilter('decode')
904
904
905 def adddatafilter(self, name, filter):
905 def adddatafilter(self, name, filter):
906 self._datafilters[name] = filter
906 self._datafilters[name] = filter
907
907
908 def wread(self, filename):
908 def wread(self, filename):
909 if self._link(filename):
909 if self._link(filename):
910 data = self.wvfs.readlink(filename)
910 data = self.wvfs.readlink(filename)
911 else:
911 else:
912 data = self.wvfs.read(filename)
912 data = self.wvfs.read(filename)
913 return self._filter(self._encodefilterpats, filename, data)
913 return self._filter(self._encodefilterpats, filename, data)
914
914
915 def wwrite(self, filename, data, flags):
915 def wwrite(self, filename, data, flags):
916 """write ``data`` into ``filename`` in the working directory
916 """write ``data`` into ``filename`` in the working directory
917
917
918 This returns length of written (maybe decoded) data.
918 This returns length of written (maybe decoded) data.
919 """
919 """
920 data = self._filter(self._decodefilterpats, filename, data)
920 data = self._filter(self._decodefilterpats, filename, data)
921 if 'l' in flags:
921 if 'l' in flags:
922 self.wvfs.symlink(data, filename)
922 self.wvfs.symlink(data, filename)
923 else:
923 else:
924 self.wvfs.write(filename, data)
924 self.wvfs.write(filename, data)
925 if 'x' in flags:
925 if 'x' in flags:
926 self.wvfs.setflags(filename, False, True)
926 self.wvfs.setflags(filename, False, True)
927 return len(data)
927 return len(data)
928
928
929 def wwritedata(self, filename, data):
929 def wwritedata(self, filename, data):
930 return self._filter(self._decodefilterpats, filename, data)
930 return self._filter(self._decodefilterpats, filename, data)
931
931
932 def currenttransaction(self):
932 def currenttransaction(self):
933 """return the current transaction or None if non exists"""
933 """return the current transaction or None if non exists"""
934 if self._transref:
934 if self._transref:
935 tr = self._transref()
935 tr = self._transref()
936 else:
936 else:
937 tr = None
937 tr = None
938
938
939 if tr and tr.running():
939 if tr and tr.running():
940 return tr
940 return tr
941 return None
941 return None
942
942
943 def transaction(self, desc, report=None):
943 def transaction(self, desc, report=None):
944 if (self.ui.configbool('devel', 'all')
944 if (self.ui.configbool('devel', 'all')
945 or self.ui.configbool('devel', 'check-locks')):
945 or self.ui.configbool('devel', 'check-locks')):
946 l = self._lockref and self._lockref()
946 l = self._lockref and self._lockref()
947 if l is None or not l.held:
947 if l is None or not l.held:
948 scmutil.develwarn(self.ui, 'transaction with no lock')
948 scmutil.develwarn(self.ui, 'transaction with no lock')
949 tr = self.currenttransaction()
949 tr = self.currenttransaction()
950 if tr is not None:
950 if tr is not None:
951 return tr.nest()
951 return tr.nest()
952
952
953 # abort here if the journal already exists
953 # abort here if the journal already exists
954 if self.svfs.exists("journal"):
954 if self.svfs.exists("journal"):
955 raise error.RepoError(
955 raise error.RepoError(
956 _("abandoned transaction found"),
956 _("abandoned transaction found"),
957 hint=_("run 'hg recover' to clean up transaction"))
957 hint=_("run 'hg recover' to clean up transaction"))
958
958
959 self.hook('pretxnopen', throw=True, txnname=desc)
959 self.hook('pretxnopen', throw=True, txnname=desc)
960
960
961 self._writejournal(desc)
961 self._writejournal(desc)
962 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
962 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
963 if report:
963 if report:
964 rp = report
964 rp = report
965 else:
965 else:
966 rp = self.ui.warn
966 rp = self.ui.warn
967 vfsmap = {'plain': self.vfs} # root of .hg/
967 vfsmap = {'plain': self.vfs} # root of .hg/
968 # we must avoid cyclic reference between repo and transaction.
968 # we must avoid cyclic reference between repo and transaction.
969 reporef = weakref.ref(self)
969 reporef = weakref.ref(self)
970 def validate(tr):
970 def validate(tr):
971 """will run pre-closing hooks"""
971 """will run pre-closing hooks"""
972 pending = lambda: tr.writepending() and self.root or ""
972 pending = lambda: tr.writepending() and self.root or ""
973 reporef().hook('pretxnclose', throw=True, pending=pending,
973 reporef().hook('pretxnclose', throw=True, pending=pending,
974 xnname=desc, **tr.hookargs)
974 xnname=desc, **tr.hookargs)
975
975
976 tr = transaction.transaction(rp, self.sopener, vfsmap,
976 tr = transaction.transaction(rp, self.sopener, vfsmap,
977 "journal",
977 "journal",
978 "undo",
978 "undo",
979 aftertrans(renames),
979 aftertrans(renames),
980 self.store.createmode,
980 self.store.createmode,
981 validator=validate)
981 validator=validate)
982
982
983 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
983 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
984 tr.hookargs['TXNID'] = trid
984 tr.hookargs['TXNID'] = trid
985 # note: writing the fncache only during finalize mean that the file is
985 # note: writing the fncache only during finalize mean that the file is
986 # outdated when running hooks. As fncache is used for streaming clone,
986 # outdated when running hooks. As fncache is used for streaming clone,
987 # this is not expected to break anything that happen during the hooks.
987 # this is not expected to break anything that happen during the hooks.
988 tr.addfinalize('flush-fncache', self.store.write)
988 tr.addfinalize('flush-fncache', self.store.write)
989 def txnclosehook(tr2):
989 def txnclosehook(tr2):
990 """To be run if transaction is successful, will schedule a hook run
990 """To be run if transaction is successful, will schedule a hook run
991 """
991 """
992 def hook():
992 def hook():
993 reporef().hook('txnclose', throw=False, txnname=desc,
993 reporef().hook('txnclose', throw=False, txnname=desc,
994 **tr2.hookargs)
994 **tr2.hookargs)
995 reporef()._afterlock(hook)
995 reporef()._afterlock(hook)
996 tr.addfinalize('txnclose-hook', txnclosehook)
996 tr.addfinalize('txnclose-hook', txnclosehook)
997 def txnaborthook(tr2):
997 def txnaborthook(tr2):
998 """To be run if transaction is aborted
998 """To be run if transaction is aborted
999 """
999 """
1000 reporef().hook('txnabort', throw=False, txnname=desc,
1000 reporef().hook('txnabort', throw=False, txnname=desc,
1001 **tr2.hookargs)
1001 **tr2.hookargs)
1002 tr.addabort('txnabort-hook', txnaborthook)
1002 tr.addabort('txnabort-hook', txnaborthook)
1003 self._transref = weakref.ref(tr)
1003 self._transref = weakref.ref(tr)
1004 return tr
1004 return tr
1005
1005
1006 def _journalfiles(self):
1006 def _journalfiles(self):
1007 return ((self.svfs, 'journal'),
1007 return ((self.svfs, 'journal'),
1008 (self.vfs, 'journal.dirstate'),
1008 (self.vfs, 'journal.dirstate'),
1009 (self.vfs, 'journal.branch'),
1009 (self.vfs, 'journal.branch'),
1010 (self.vfs, 'journal.desc'),
1010 (self.vfs, 'journal.desc'),
1011 (self.vfs, 'journal.bookmarks'),
1011 (self.vfs, 'journal.bookmarks'),
1012 (self.svfs, 'journal.phaseroots'))
1012 (self.svfs, 'journal.phaseroots'))
1013
1013
1014 def undofiles(self):
1014 def undofiles(self):
1015 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1015 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1016
1016
1017 def _writejournal(self, desc):
1017 def _writejournal(self, desc):
1018 self.vfs.write("journal.dirstate",
1018 self.vfs.write("journal.dirstate",
1019 self.vfs.tryread("dirstate"))
1019 self.vfs.tryread("dirstate"))
1020 self.vfs.write("journal.branch",
1020 self.vfs.write("journal.branch",
1021 encoding.fromlocal(self.dirstate.branch()))
1021 encoding.fromlocal(self.dirstate.branch()))
1022 self.vfs.write("journal.desc",
1022 self.vfs.write("journal.desc",
1023 "%d\n%s\n" % (len(self), desc))
1023 "%d\n%s\n" % (len(self), desc))
1024 self.vfs.write("journal.bookmarks",
1024 self.vfs.write("journal.bookmarks",
1025 self.vfs.tryread("bookmarks"))
1025 self.vfs.tryread("bookmarks"))
1026 self.svfs.write("journal.phaseroots",
1026 self.svfs.write("journal.phaseroots",
1027 self.svfs.tryread("phaseroots"))
1027 self.svfs.tryread("phaseroots"))
1028
1028
1029 def recover(self):
1029 def recover(self):
1030 lock = self.lock()
1030 lock = self.lock()
1031 try:
1031 try:
1032 if self.svfs.exists("journal"):
1032 if self.svfs.exists("journal"):
1033 self.ui.status(_("rolling back interrupted transaction\n"))
1033 self.ui.status(_("rolling back interrupted transaction\n"))
1034 vfsmap = {'': self.svfs,
1034 vfsmap = {'': self.svfs,
1035 'plain': self.vfs,}
1035 'plain': self.vfs,}
1036 transaction.rollback(self.svfs, vfsmap, "journal",
1036 transaction.rollback(self.svfs, vfsmap, "journal",
1037 self.ui.warn)
1037 self.ui.warn)
1038 self.invalidate()
1038 self.invalidate()
1039 return True
1039 return True
1040 else:
1040 else:
1041 self.ui.warn(_("no interrupted transaction available\n"))
1041 self.ui.warn(_("no interrupted transaction available\n"))
1042 return False
1042 return False
1043 finally:
1043 finally:
1044 lock.release()
1044 lock.release()
1045
1045
1046 def rollback(self, dryrun=False, force=False):
1046 def rollback(self, dryrun=False, force=False):
1047 wlock = lock = None
1047 wlock = lock = None
1048 try:
1048 try:
1049 wlock = self.wlock()
1049 wlock = self.wlock()
1050 lock = self.lock()
1050 lock = self.lock()
1051 if self.svfs.exists("undo"):
1051 if self.svfs.exists("undo"):
1052 return self._rollback(dryrun, force)
1052 return self._rollback(dryrun, force)
1053 else:
1053 else:
1054 self.ui.warn(_("no rollback information available\n"))
1054 self.ui.warn(_("no rollback information available\n"))
1055 return 1
1055 return 1
1056 finally:
1056 finally:
1057 release(lock, wlock)
1057 release(lock, wlock)
1058
1058
1059 @unfilteredmethod # Until we get smarter cache management
1059 @unfilteredmethod # Until we get smarter cache management
1060 def _rollback(self, dryrun, force):
1060 def _rollback(self, dryrun, force):
1061 ui = self.ui
1061 ui = self.ui
1062 try:
1062 try:
1063 args = self.vfs.read('undo.desc').splitlines()
1063 args = self.vfs.read('undo.desc').splitlines()
1064 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1064 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1065 if len(args) >= 3:
1065 if len(args) >= 3:
1066 detail = args[2]
1066 detail = args[2]
1067 oldtip = oldlen - 1
1067 oldtip = oldlen - 1
1068
1068
1069 if detail and ui.verbose:
1069 if detail and ui.verbose:
1070 msg = (_('repository tip rolled back to revision %s'
1070 msg = (_('repository tip rolled back to revision %s'
1071 ' (undo %s: %s)\n')
1071 ' (undo %s: %s)\n')
1072 % (oldtip, desc, detail))
1072 % (oldtip, desc, detail))
1073 else:
1073 else:
1074 msg = (_('repository tip rolled back to revision %s'
1074 msg = (_('repository tip rolled back to revision %s'
1075 ' (undo %s)\n')
1075 ' (undo %s)\n')
1076 % (oldtip, desc))
1076 % (oldtip, desc))
1077 except IOError:
1077 except IOError:
1078 msg = _('rolling back unknown transaction\n')
1078 msg = _('rolling back unknown transaction\n')
1079 desc = None
1079 desc = None
1080
1080
1081 if not force and self['.'] != self['tip'] and desc == 'commit':
1081 if not force and self['.'] != self['tip'] and desc == 'commit':
1082 raise util.Abort(
1082 raise util.Abort(
1083 _('rollback of last commit while not checked out '
1083 _('rollback of last commit while not checked out '
1084 'may lose data'), hint=_('use -f to force'))
1084 'may lose data'), hint=_('use -f to force'))
1085
1085
1086 ui.status(msg)
1086 ui.status(msg)
1087 if dryrun:
1087 if dryrun:
1088 return 0
1088 return 0
1089
1089
1090 parents = self.dirstate.parents()
1090 parents = self.dirstate.parents()
1091 self.destroying()
1091 self.destroying()
1092 vfsmap = {'plain': self.vfs, '': self.svfs}
1092 vfsmap = {'plain': self.vfs, '': self.svfs}
1093 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1093 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1094 if self.vfs.exists('undo.bookmarks'):
1094 if self.vfs.exists('undo.bookmarks'):
1095 self.vfs.rename('undo.bookmarks', 'bookmarks')
1095 self.vfs.rename('undo.bookmarks', 'bookmarks')
1096 if self.svfs.exists('undo.phaseroots'):
1096 if self.svfs.exists('undo.phaseroots'):
1097 self.svfs.rename('undo.phaseroots', 'phaseroots')
1097 self.svfs.rename('undo.phaseroots', 'phaseroots')
1098 self.invalidate()
1098 self.invalidate()
1099
1099
1100 parentgone = (parents[0] not in self.changelog.nodemap or
1100 parentgone = (parents[0] not in self.changelog.nodemap or
1101 parents[1] not in self.changelog.nodemap)
1101 parents[1] not in self.changelog.nodemap)
1102 if parentgone:
1102 if parentgone:
1103 self.vfs.rename('undo.dirstate', 'dirstate')
1103 self.vfs.rename('undo.dirstate', 'dirstate')
1104 try:
1104 try:
1105 branch = self.vfs.read('undo.branch')
1105 branch = self.vfs.read('undo.branch')
1106 self.dirstate.setbranch(encoding.tolocal(branch))
1106 self.dirstate.setbranch(encoding.tolocal(branch))
1107 except IOError:
1107 except IOError:
1108 ui.warn(_('named branch could not be reset: '
1108 ui.warn(_('named branch could not be reset: '
1109 'current branch is still \'%s\'\n')
1109 'current branch is still \'%s\'\n')
1110 % self.dirstate.branch())
1110 % self.dirstate.branch())
1111
1111
1112 self.dirstate.invalidate()
1112 self.dirstate.invalidate()
1113 parents = tuple([p.rev() for p in self.parents()])
1113 parents = tuple([p.rev() for p in self.parents()])
1114 if len(parents) > 1:
1114 if len(parents) > 1:
1115 ui.status(_('working directory now based on '
1115 ui.status(_('working directory now based on '
1116 'revisions %d and %d\n') % parents)
1116 'revisions %d and %d\n') % parents)
1117 else:
1117 else:
1118 ui.status(_('working directory now based on '
1118 ui.status(_('working directory now based on '
1119 'revision %d\n') % parents)
1119 'revision %d\n') % parents)
1120 ms = mergemod.mergestate(self)
1120 ms = mergemod.mergestate(self)
1121 ms.reset(self['.'].node())
1121 ms.reset(self['.'].node())
1122
1122
1123 # TODO: if we know which new heads may result from this rollback, pass
1123 # TODO: if we know which new heads may result from this rollback, pass
1124 # them to destroy(), which will prevent the branchhead cache from being
1124 # them to destroy(), which will prevent the branchhead cache from being
1125 # invalidated.
1125 # invalidated.
1126 self.destroyed()
1126 self.destroyed()
1127 return 0
1127 return 0
1128
1128
1129 def invalidatecaches(self):
1129 def invalidatecaches(self):
1130
1130
1131 if '_tagscache' in vars(self):
1131 if '_tagscache' in vars(self):
1132 # can't use delattr on proxy
1132 # can't use delattr on proxy
1133 del self.__dict__['_tagscache']
1133 del self.__dict__['_tagscache']
1134
1134
1135 self.unfiltered()._branchcaches.clear()
1135 self.unfiltered()._branchcaches.clear()
1136 self.invalidatevolatilesets()
1136 self.invalidatevolatilesets()
1137
1137
1138 def invalidatevolatilesets(self):
1138 def invalidatevolatilesets(self):
1139 self.filteredrevcache.clear()
1139 self.filteredrevcache.clear()
1140 obsolete.clearobscaches(self)
1140 obsolete.clearobscaches(self)
1141
1141
1142 def invalidatedirstate(self):
1142 def invalidatedirstate(self):
1143 '''Invalidates the dirstate, causing the next call to dirstate
1143 '''Invalidates the dirstate, causing the next call to dirstate
1144 to check if it was modified since the last time it was read,
1144 to check if it was modified since the last time it was read,
1145 rereading it if it has.
1145 rereading it if it has.
1146
1146
1147 This is different to dirstate.invalidate() that it doesn't always
1147 This is different to dirstate.invalidate() that it doesn't always
1148 rereads the dirstate. Use dirstate.invalidate() if you want to
1148 rereads the dirstate. Use dirstate.invalidate() if you want to
1149 explicitly read the dirstate again (i.e. restoring it to a previous
1149 explicitly read the dirstate again (i.e. restoring it to a previous
1150 known good state).'''
1150 known good state).'''
1151 if hasunfilteredcache(self, 'dirstate'):
1151 if hasunfilteredcache(self, 'dirstate'):
1152 for k in self.dirstate._filecache:
1152 for k in self.dirstate._filecache:
1153 try:
1153 try:
1154 delattr(self.dirstate, k)
1154 delattr(self.dirstate, k)
1155 except AttributeError:
1155 except AttributeError:
1156 pass
1156 pass
1157 delattr(self.unfiltered(), 'dirstate')
1157 delattr(self.unfiltered(), 'dirstate')
1158
1158
1159 def invalidate(self):
1159 def invalidate(self):
1160 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1160 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1161 for k in self._filecache:
1161 for k in self._filecache:
1162 # dirstate is invalidated separately in invalidatedirstate()
1162 # dirstate is invalidated separately in invalidatedirstate()
1163 if k == 'dirstate':
1163 if k == 'dirstate':
1164 continue
1164 continue
1165
1165
1166 try:
1166 try:
1167 delattr(unfiltered, k)
1167 delattr(unfiltered, k)
1168 except AttributeError:
1168 except AttributeError:
1169 pass
1169 pass
1170 self.invalidatecaches()
1170 self.invalidatecaches()
1171 self.store.invalidatecaches()
1171 self.store.invalidatecaches()
1172
1172
1173 def invalidateall(self):
1173 def invalidateall(self):
1174 '''Fully invalidates both store and non-store parts, causing the
1174 '''Fully invalidates both store and non-store parts, causing the
1175 subsequent operation to reread any outside changes.'''
1175 subsequent operation to reread any outside changes.'''
1176 # extension should hook this to invalidate its caches
1176 # extension should hook this to invalidate its caches
1177 self.invalidate()
1177 self.invalidate()
1178 self.invalidatedirstate()
1178 self.invalidatedirstate()
1179
1179
1180 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1180 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1181 try:
1181 try:
1182 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1182 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1183 except error.LockHeld, inst:
1183 except error.LockHeld, inst:
1184 if not wait:
1184 if not wait:
1185 raise
1185 raise
1186 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1186 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1187 (desc, inst.locker))
1187 (desc, inst.locker))
1188 # default to 600 seconds timeout
1188 # default to 600 seconds timeout
1189 l = lockmod.lock(vfs, lockname,
1189 l = lockmod.lock(vfs, lockname,
1190 int(self.ui.config("ui", "timeout", "600")),
1190 int(self.ui.config("ui", "timeout", "600")),
1191 releasefn, desc=desc)
1191 releasefn, desc=desc)
1192 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1192 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1193 if acquirefn:
1193 if acquirefn:
1194 acquirefn()
1194 acquirefn()
1195 return l
1195 return l
1196
1196
1197 def _afterlock(self, callback):
1197 def _afterlock(self, callback):
1198 """add a callback to be run when the repository is fully unlocked
1198 """add a callback to be run when the repository is fully unlocked
1199
1199
1200 The callback will be executed when the outermost lock is released
1200 The callback will be executed when the outermost lock is released
1201 (with wlock being higher level than 'lock')."""
1201 (with wlock being higher level than 'lock')."""
1202 for ref in (self._wlockref, self._lockref):
1202 for ref in (self._wlockref, self._lockref):
1203 l = ref and ref()
1203 l = ref and ref()
1204 if l and l.held:
1204 if l and l.held:
1205 l.postrelease.append(callback)
1205 l.postrelease.append(callback)
1206 break
1206 break
1207 else: # no lock have been found.
1207 else: # no lock have been found.
1208 callback()
1208 callback()
1209
1209
1210 def lock(self, wait=True):
1210 def lock(self, wait=True):
1211 '''Lock the repository store (.hg/store) and return a weak reference
1211 '''Lock the repository store (.hg/store) and return a weak reference
1212 to the lock. Use this before modifying the store (e.g. committing or
1212 to the lock. Use this before modifying the store (e.g. committing or
1213 stripping). If you are opening a transaction, get a lock as well.)
1213 stripping). If you are opening a transaction, get a lock as well.)
1214
1214
1215 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1215 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1216 'wlock' first to avoid a dead-lock hazard.'''
1216 'wlock' first to avoid a dead-lock hazard.'''
1217 l = self._lockref and self._lockref()
1217 l = self._lockref and self._lockref()
1218 if l is not None and l.held:
1218 if l is not None and l.held:
1219 l.lock()
1219 l.lock()
1220 return l
1220 return l
1221
1221
1222 def unlock():
1222 def unlock():
1223 for k, ce in self._filecache.items():
1223 for k, ce in self._filecache.items():
1224 if k == 'dirstate' or k not in self.__dict__:
1224 if k == 'dirstate' or k not in self.__dict__:
1225 continue
1225 continue
1226 ce.refresh()
1226 ce.refresh()
1227
1227
1228 l = self._lock(self.svfs, "lock", wait, unlock,
1228 l = self._lock(self.svfs, "lock", wait, unlock,
1229 self.invalidate, _('repository %s') % self.origroot)
1229 self.invalidate, _('repository %s') % self.origroot)
1230 self._lockref = weakref.ref(l)
1230 self._lockref = weakref.ref(l)
1231 return l
1231 return l
1232
1232
1233 def wlock(self, wait=True):
1233 def wlock(self, wait=True):
1234 '''Lock the non-store parts of the repository (everything under
1234 '''Lock the non-store parts of the repository (everything under
1235 .hg except .hg/store) and return a weak reference to the lock.
1235 .hg except .hg/store) and return a weak reference to the lock.
1236
1236
1237 Use this before modifying files in .hg.
1237 Use this before modifying files in .hg.
1238
1238
1239 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1239 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1240 'wlock' first to avoid a dead-lock hazard.'''
1240 'wlock' first to avoid a dead-lock hazard.'''
1241 l = self._wlockref and self._wlockref()
1241 l = self._wlockref and self._wlockref()
1242 if l is not None and l.held:
1242 if l is not None and l.held:
1243 l.lock()
1243 l.lock()
1244 return l
1244 return l
1245
1245
1246 # We do not need to check for non-waiting lock aquisition. Such
1246 # We do not need to check for non-waiting lock aquisition. Such
1247 # acquisition would not cause dead-lock as they would just fail.
1247 # acquisition would not cause dead-lock as they would just fail.
1248 if wait and (self.ui.configbool('devel', 'all')
1248 if wait and (self.ui.configbool('devel', 'all')
1249 or self.ui.configbool('devel', 'check-locks')):
1249 or self.ui.configbool('devel', 'check-locks')):
1250 l = self._lockref and self._lockref()
1250 l = self._lockref and self._lockref()
1251 if l is not None and l.held:
1251 if l is not None and l.held:
1252 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1252 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1253
1253
1254 def unlock():
1254 def unlock():
1255 if self.dirstate.pendingparentchange():
1255 if self.dirstate.pendingparentchange():
1256 self.dirstate.invalidate()
1256 self.dirstate.invalidate()
1257 else:
1257 else:
1258 self.dirstate.write()
1258 self.dirstate.write()
1259
1259
1260 self._filecache['dirstate'].refresh()
1260 self._filecache['dirstate'].refresh()
1261
1261
1262 l = self._lock(self.vfs, "wlock", wait, unlock,
1262 l = self._lock(self.vfs, "wlock", wait, unlock,
1263 self.invalidatedirstate, _('working directory of %s') %
1263 self.invalidatedirstate, _('working directory of %s') %
1264 self.origroot)
1264 self.origroot)
1265 self._wlockref = weakref.ref(l)
1265 self._wlockref = weakref.ref(l)
1266 return l
1266 return l
1267
1267
1268 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1268 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1269 """
1269 """
1270 commit an individual file as part of a larger transaction
1270 commit an individual file as part of a larger transaction
1271 """
1271 """
1272
1272
1273 fname = fctx.path()
1273 fname = fctx.path()
1274 fparent1 = manifest1.get(fname, nullid)
1274 fparent1 = manifest1.get(fname, nullid)
1275 fparent2 = manifest2.get(fname, nullid)
1275 fparent2 = manifest2.get(fname, nullid)
1276 if isinstance(fctx, context.filectx):
1276 if isinstance(fctx, context.filectx):
1277 node = fctx.filenode()
1277 node = fctx.filenode()
1278 if node in [fparent1, fparent2]:
1278 if node in [fparent1, fparent2]:
1279 self.ui.debug('reusing %s filelog entry\n' % fname)
1279 self.ui.debug('reusing %s filelog entry\n' % fname)
1280 return node
1280 return node
1281
1281
1282 flog = self.file(fname)
1282 flog = self.file(fname)
1283 meta = {}
1283 meta = {}
1284 copy = fctx.renamed()
1284 copy = fctx.renamed()
1285 if copy and copy[0] != fname:
1285 if copy and copy[0] != fname:
1286 # Mark the new revision of this file as a copy of another
1286 # Mark the new revision of this file as a copy of another
1287 # file. This copy data will effectively act as a parent
1287 # file. This copy data will effectively act as a parent
1288 # of this new revision. If this is a merge, the first
1288 # of this new revision. If this is a merge, the first
1289 # parent will be the nullid (meaning "look up the copy data")
1289 # parent will be the nullid (meaning "look up the copy data")
1290 # and the second one will be the other parent. For example:
1290 # and the second one will be the other parent. For example:
1291 #
1291 #
1292 # 0 --- 1 --- 3 rev1 changes file foo
1292 # 0 --- 1 --- 3 rev1 changes file foo
1293 # \ / rev2 renames foo to bar and changes it
1293 # \ / rev2 renames foo to bar and changes it
1294 # \- 2 -/ rev3 should have bar with all changes and
1294 # \- 2 -/ rev3 should have bar with all changes and
1295 # should record that bar descends from
1295 # should record that bar descends from
1296 # bar in rev2 and foo in rev1
1296 # bar in rev2 and foo in rev1
1297 #
1297 #
1298 # this allows this merge to succeed:
1298 # this allows this merge to succeed:
1299 #
1299 #
1300 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1300 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1301 # \ / merging rev3 and rev4 should use bar@rev2
1301 # \ / merging rev3 and rev4 should use bar@rev2
1302 # \- 2 --- 4 as the merge base
1302 # \- 2 --- 4 as the merge base
1303 #
1303 #
1304
1304
1305 cfname = copy[0]
1305 cfname = copy[0]
1306 crev = manifest1.get(cfname)
1306 crev = manifest1.get(cfname)
1307 newfparent = fparent2
1307 newfparent = fparent2
1308
1308
1309 if manifest2: # branch merge
1309 if manifest2: # branch merge
1310 if fparent2 == nullid or crev is None: # copied on remote side
1310 if fparent2 == nullid or crev is None: # copied on remote side
1311 if cfname in manifest2:
1311 if cfname in manifest2:
1312 crev = manifest2[cfname]
1312 crev = manifest2[cfname]
1313 newfparent = fparent1
1313 newfparent = fparent1
1314
1314
1315 # Here, we used to search backwards through history to try to find
1315 # Here, we used to search backwards through history to try to find
1316 # where the file copy came from if the source of a copy was not in
1316 # where the file copy came from if the source of a copy was not in
1317 # the parent directory. However, this doesn't actually make sense to
1317 # the parent directory. However, this doesn't actually make sense to
1318 # do (what does a copy from something not in your working copy even
1318 # do (what does a copy from something not in your working copy even
1319 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1319 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1320 # the user that copy information was dropped, so if they didn't
1320 # the user that copy information was dropped, so if they didn't
1321 # expect this outcome it can be fixed, but this is the correct
1321 # expect this outcome it can be fixed, but this is the correct
1322 # behavior in this circumstance.
1322 # behavior in this circumstance.
1323
1323
1324 if crev:
1324 if crev:
1325 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1325 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1326 meta["copy"] = cfname
1326 meta["copy"] = cfname
1327 meta["copyrev"] = hex(crev)
1327 meta["copyrev"] = hex(crev)
1328 fparent1, fparent2 = nullid, newfparent
1328 fparent1, fparent2 = nullid, newfparent
1329 else:
1329 else:
1330 self.ui.warn(_("warning: can't find ancestor for '%s' "
1330 self.ui.warn(_("warning: can't find ancestor for '%s' "
1331 "copied from '%s'!\n") % (fname, cfname))
1331 "copied from '%s'!\n") % (fname, cfname))
1332
1332
1333 elif fparent1 == nullid:
1333 elif fparent1 == nullid:
1334 fparent1, fparent2 = fparent2, nullid
1334 fparent1, fparent2 = fparent2, nullid
1335 elif fparent2 != nullid:
1335 elif fparent2 != nullid:
1336 # is one parent an ancestor of the other?
1336 # is one parent an ancestor of the other?
1337 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1337 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1338 if fparent1 in fparentancestors:
1338 if fparent1 in fparentancestors:
1339 fparent1, fparent2 = fparent2, nullid
1339 fparent1, fparent2 = fparent2, nullid
1340 elif fparent2 in fparentancestors:
1340 elif fparent2 in fparentancestors:
1341 fparent2 = nullid
1341 fparent2 = nullid
1342
1342
1343 # is the file changed?
1343 # is the file changed?
1344 text = fctx.data()
1344 text = fctx.data()
1345 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1345 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1346 changelist.append(fname)
1346 changelist.append(fname)
1347 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1347 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1348 # are just the flags changed during merge?
1348 # are just the flags changed during merge?
1349 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1349 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1350 changelist.append(fname)
1350 changelist.append(fname)
1351
1351
1352 return fparent1
1352 return fparent1
1353
1353
1354 @unfilteredmethod
1354 @unfilteredmethod
1355 def commit(self, text="", user=None, date=None, match=None, force=False,
1355 def commit(self, text="", user=None, date=None, match=None, force=False,
1356 editor=False, extra={}):
1356 editor=False, extra={}):
1357 """Add a new revision to current repository.
1357 """Add a new revision to current repository.
1358
1358
1359 Revision information is gathered from the working directory,
1359 Revision information is gathered from the working directory,
1360 match can be used to filter the committed files. If editor is
1360 match can be used to filter the committed files. If editor is
1361 supplied, it is called to get a commit message.
1361 supplied, it is called to get a commit message.
1362 """
1362 """
1363
1363
1364 def fail(f, msg):
1364 def fail(f, msg):
1365 raise util.Abort('%s: %s' % (f, msg))
1365 raise util.Abort('%s: %s' % (f, msg))
1366
1366
1367 if not match:
1367 if not match:
1368 match = matchmod.always(self.root, '')
1368 match = matchmod.always(self.root, '')
1369
1369
1370 if not force:
1370 if not force:
1371 vdirs = []
1371 vdirs = []
1372 match.explicitdir = vdirs.append
1372 match.explicitdir = vdirs.append
1373 match.bad = fail
1373 match.bad = fail
1374
1374
1375 wlock = self.wlock()
1375 wlock = self.wlock()
1376 try:
1376 try:
1377 wctx = self[None]
1377 wctx = self[None]
1378 merge = len(wctx.parents()) > 1
1378 merge = len(wctx.parents()) > 1
1379
1379
1380 if not force and merge and not match.always():
1380 if not force and merge and not match.always():
1381 raise util.Abort(_('cannot partially commit a merge '
1381 raise util.Abort(_('cannot partially commit a merge '
1382 '(do not specify files or patterns)'))
1382 '(do not specify files or patterns)'))
1383
1383
1384 status = self.status(match=match, clean=force)
1384 status = self.status(match=match, clean=force)
1385 if force:
1385 if force:
1386 status.modified.extend(status.clean) # mq may commit clean files
1386 status.modified.extend(status.clean) # mq may commit clean files
1387
1387
1388 # check subrepos
1388 # check subrepos
1389 subs = []
1389 subs = []
1390 commitsubs = set()
1390 commitsubs = set()
1391 newstate = wctx.substate.copy()
1391 newstate = wctx.substate.copy()
1392 # only manage subrepos and .hgsubstate if .hgsub is present
1392 # only manage subrepos and .hgsubstate if .hgsub is present
1393 if '.hgsub' in wctx:
1393 if '.hgsub' in wctx:
1394 # we'll decide whether to track this ourselves, thanks
1394 # we'll decide whether to track this ourselves, thanks
1395 for c in status.modified, status.added, status.removed:
1395 for c in status.modified, status.added, status.removed:
1396 if '.hgsubstate' in c:
1396 if '.hgsubstate' in c:
1397 c.remove('.hgsubstate')
1397 c.remove('.hgsubstate')
1398
1398
1399 # compare current state to last committed state
1399 # compare current state to last committed state
1400 # build new substate based on last committed state
1400 # build new substate based on last committed state
1401 oldstate = wctx.p1().substate
1401 oldstate = wctx.p1().substate
1402 for s in sorted(newstate.keys()):
1402 for s in sorted(newstate.keys()):
1403 if not match(s):
1403 if not match(s):
1404 # ignore working copy, use old state if present
1404 # ignore working copy, use old state if present
1405 if s in oldstate:
1405 if s in oldstate:
1406 newstate[s] = oldstate[s]
1406 newstate[s] = oldstate[s]
1407 continue
1407 continue
1408 if not force:
1408 if not force:
1409 raise util.Abort(
1409 raise util.Abort(
1410 _("commit with new subrepo %s excluded") % s)
1410 _("commit with new subrepo %s excluded") % s)
1411 dirtyreason = wctx.sub(s).dirtyreason(True)
1411 dirtyreason = wctx.sub(s).dirtyreason(True)
1412 if dirtyreason:
1412 if dirtyreason:
1413 if not self.ui.configbool('ui', 'commitsubrepos'):
1413 if not self.ui.configbool('ui', 'commitsubrepos'):
1414 raise util.Abort(dirtyreason,
1414 raise util.Abort(dirtyreason,
1415 hint=_("use --subrepos for recursive commit"))
1415 hint=_("use --subrepos for recursive commit"))
1416 subs.append(s)
1416 subs.append(s)
1417 commitsubs.add(s)
1417 commitsubs.add(s)
1418 else:
1418 else:
1419 bs = wctx.sub(s).basestate()
1419 bs = wctx.sub(s).basestate()
1420 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1420 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1421 if oldstate.get(s, (None, None, None))[1] != bs:
1421 if oldstate.get(s, (None, None, None))[1] != bs:
1422 subs.append(s)
1422 subs.append(s)
1423
1423
1424 # check for removed subrepos
1424 # check for removed subrepos
1425 for p in wctx.parents():
1425 for p in wctx.parents():
1426 r = [s for s in p.substate if s not in newstate]
1426 r = [s for s in p.substate if s not in newstate]
1427 subs += [s for s in r if match(s)]
1427 subs += [s for s in r if match(s)]
1428 if subs:
1428 if subs:
1429 if (not match('.hgsub') and
1429 if (not match('.hgsub') and
1430 '.hgsub' in (wctx.modified() + wctx.added())):
1430 '.hgsub' in (wctx.modified() + wctx.added())):
1431 raise util.Abort(
1431 raise util.Abort(
1432 _("can't commit subrepos without .hgsub"))
1432 _("can't commit subrepos without .hgsub"))
1433 status.modified.insert(0, '.hgsubstate')
1433 status.modified.insert(0, '.hgsubstate')
1434
1434
1435 elif '.hgsub' in status.removed:
1435 elif '.hgsub' in status.removed:
1436 # clean up .hgsubstate when .hgsub is removed
1436 # clean up .hgsubstate when .hgsub is removed
1437 if ('.hgsubstate' in wctx and
1437 if ('.hgsubstate' in wctx and
1438 '.hgsubstate' not in (status.modified + status.added +
1438 '.hgsubstate' not in (status.modified + status.added +
1439 status.removed)):
1439 status.removed)):
1440 status.removed.insert(0, '.hgsubstate')
1440 status.removed.insert(0, '.hgsubstate')
1441
1441
1442 # make sure all explicit patterns are matched
1442 # make sure all explicit patterns are matched
1443 if not force and match.files():
1443 if not force and match.files():
1444 matched = set(status.modified + status.added + status.removed)
1444 matched = set(status.modified + status.added + status.removed)
1445
1445
1446 for f in match.files():
1446 for f in match.files():
1447 f = self.dirstate.normalize(f)
1447 f = self.dirstate.normalize(f)
1448 if f == '.' or f in matched or f in wctx.substate:
1448 if f == '.' or f in matched or f in wctx.substate:
1449 continue
1449 continue
1450 if f in status.deleted:
1450 if f in status.deleted:
1451 fail(f, _('file not found!'))
1451 fail(f, _('file not found!'))
1452 if f in vdirs: # visited directory
1452 if f in vdirs: # visited directory
1453 d = f + '/'
1453 d = f + '/'
1454 for mf in matched:
1454 for mf in matched:
1455 if mf.startswith(d):
1455 if mf.startswith(d):
1456 break
1456 break
1457 else:
1457 else:
1458 fail(f, _("no match under directory!"))
1458 fail(f, _("no match under directory!"))
1459 elif f not in self.dirstate:
1459 elif f not in self.dirstate:
1460 fail(f, _("file not tracked!"))
1460 fail(f, _("file not tracked!"))
1461
1461
1462 cctx = context.workingcommitctx(self, status,
1462 cctx = context.workingcommitctx(self, status,
1463 text, user, date, extra)
1463 text, user, date, extra)
1464
1464
1465 allowemptycommit = (wctx.branch() != wctx.p1().branch() or force
1465 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1466 or extra.get('close') or merge or cctx.files()
1466 or extra.get('close') or merge or cctx.files()
1467 or self.ui.configbool('ui', 'allowemptycommit'))
1467 or self.ui.configbool('ui', 'allowemptycommit'))
1468 if not allowemptycommit:
1468 if not allowemptycommit:
1469 return None
1469 return None
1470
1470
1471 if merge and cctx.deleted():
1471 if merge and cctx.deleted():
1472 raise util.Abort(_("cannot commit merge with missing files"))
1472 raise util.Abort(_("cannot commit merge with missing files"))
1473
1473
1474 ms = mergemod.mergestate(self)
1474 ms = mergemod.mergestate(self)
1475 for f in status.modified:
1475 for f in status.modified:
1476 if f in ms and ms[f] == 'u':
1476 if f in ms and ms[f] == 'u':
1477 raise util.Abort(_('unresolved merge conflicts '
1477 raise util.Abort(_('unresolved merge conflicts '
1478 '(see "hg help resolve")'))
1478 '(see "hg help resolve")'))
1479
1479
1480 if editor:
1480 if editor:
1481 cctx._text = editor(self, cctx, subs)
1481 cctx._text = editor(self, cctx, subs)
1482 edited = (text != cctx._text)
1482 edited = (text != cctx._text)
1483
1483
1484 # Save commit message in case this transaction gets rolled back
1484 # Save commit message in case this transaction gets rolled back
1485 # (e.g. by a pretxncommit hook). Leave the content alone on
1485 # (e.g. by a pretxncommit hook). Leave the content alone on
1486 # the assumption that the user will use the same editor again.
1486 # the assumption that the user will use the same editor again.
1487 msgfn = self.savecommitmessage(cctx._text)
1487 msgfn = self.savecommitmessage(cctx._text)
1488
1488
1489 # commit subs and write new state
1489 # commit subs and write new state
1490 if subs:
1490 if subs:
1491 for s in sorted(commitsubs):
1491 for s in sorted(commitsubs):
1492 sub = wctx.sub(s)
1492 sub = wctx.sub(s)
1493 self.ui.status(_('committing subrepository %s\n') %
1493 self.ui.status(_('committing subrepository %s\n') %
1494 subrepo.subrelpath(sub))
1494 subrepo.subrelpath(sub))
1495 sr = sub.commit(cctx._text, user, date)
1495 sr = sub.commit(cctx._text, user, date)
1496 newstate[s] = (newstate[s][0], sr)
1496 newstate[s] = (newstate[s][0], sr)
1497 subrepo.writestate(self, newstate)
1497 subrepo.writestate(self, newstate)
1498
1498
1499 p1, p2 = self.dirstate.parents()
1499 p1, p2 = self.dirstate.parents()
1500 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1500 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1501 try:
1501 try:
1502 self.hook("precommit", throw=True, parent1=hookp1,
1502 self.hook("precommit", throw=True, parent1=hookp1,
1503 parent2=hookp2)
1503 parent2=hookp2)
1504 ret = self.commitctx(cctx, True)
1504 ret = self.commitctx(cctx, True)
1505 except: # re-raises
1505 except: # re-raises
1506 if edited:
1506 if edited:
1507 self.ui.write(
1507 self.ui.write(
1508 _('note: commit message saved in %s\n') % msgfn)
1508 _('note: commit message saved in %s\n') % msgfn)
1509 raise
1509 raise
1510
1510
1511 # update bookmarks, dirstate and mergestate
1511 # update bookmarks, dirstate and mergestate
1512 bookmarks.update(self, [p1, p2], ret)
1512 bookmarks.update(self, [p1, p2], ret)
1513 cctx.markcommitted(ret)
1513 cctx.markcommitted(ret)
1514 ms.reset()
1514 ms.reset()
1515 finally:
1515 finally:
1516 wlock.release()
1516 wlock.release()
1517
1517
1518 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1518 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1519 # hack for command that use a temporary commit (eg: histedit)
1519 # hack for command that use a temporary commit (eg: histedit)
1520 # temporary commit got stripped before hook release
1520 # temporary commit got stripped before hook release
1521 if self.changelog.hasnode(ret):
1521 if self.changelog.hasnode(ret):
1522 self.hook("commit", node=node, parent1=parent1,
1522 self.hook("commit", node=node, parent1=parent1,
1523 parent2=parent2)
1523 parent2=parent2)
1524 self._afterlock(commithook)
1524 self._afterlock(commithook)
1525 return ret
1525 return ret
1526
1526
1527 @unfilteredmethod
1527 @unfilteredmethod
1528 def commitctx(self, ctx, error=False):
1528 def commitctx(self, ctx, error=False):
1529 """Add a new revision to current repository.
1529 """Add a new revision to current repository.
1530 Revision information is passed via the context argument.
1530 Revision information is passed via the context argument.
1531 """
1531 """
1532
1532
1533 tr = None
1533 tr = None
1534 p1, p2 = ctx.p1(), ctx.p2()
1534 p1, p2 = ctx.p1(), ctx.p2()
1535 user = ctx.user()
1535 user = ctx.user()
1536
1536
1537 lock = self.lock()
1537 lock = self.lock()
1538 try:
1538 try:
1539 tr = self.transaction("commit")
1539 tr = self.transaction("commit")
1540 trp = weakref.proxy(tr)
1540 trp = weakref.proxy(tr)
1541
1541
1542 if ctx.files():
1542 if ctx.files():
1543 m1 = p1.manifest()
1543 m1 = p1.manifest()
1544 m2 = p2.manifest()
1544 m2 = p2.manifest()
1545 m = m1.copy()
1545 m = m1.copy()
1546
1546
1547 # check in files
1547 # check in files
1548 added = []
1548 added = []
1549 changed = []
1549 changed = []
1550 removed = list(ctx.removed())
1550 removed = list(ctx.removed())
1551 linkrev = len(self)
1551 linkrev = len(self)
1552 self.ui.note(_("committing files:\n"))
1552 self.ui.note(_("committing files:\n"))
1553 for f in sorted(ctx.modified() + ctx.added()):
1553 for f in sorted(ctx.modified() + ctx.added()):
1554 self.ui.note(f + "\n")
1554 self.ui.note(f + "\n")
1555 try:
1555 try:
1556 fctx = ctx[f]
1556 fctx = ctx[f]
1557 if fctx is None:
1557 if fctx is None:
1558 removed.append(f)
1558 removed.append(f)
1559 else:
1559 else:
1560 added.append(f)
1560 added.append(f)
1561 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1561 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1562 trp, changed)
1562 trp, changed)
1563 m.setflag(f, fctx.flags())
1563 m.setflag(f, fctx.flags())
1564 except OSError, inst:
1564 except OSError, inst:
1565 self.ui.warn(_("trouble committing %s!\n") % f)
1565 self.ui.warn(_("trouble committing %s!\n") % f)
1566 raise
1566 raise
1567 except IOError, inst:
1567 except IOError, inst:
1568 errcode = getattr(inst, 'errno', errno.ENOENT)
1568 errcode = getattr(inst, 'errno', errno.ENOENT)
1569 if error or errcode and errcode != errno.ENOENT:
1569 if error or errcode and errcode != errno.ENOENT:
1570 self.ui.warn(_("trouble committing %s!\n") % f)
1570 self.ui.warn(_("trouble committing %s!\n") % f)
1571 raise
1571 raise
1572
1572
1573 # update manifest
1573 # update manifest
1574 self.ui.note(_("committing manifest\n"))
1574 self.ui.note(_("committing manifest\n"))
1575 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1575 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1576 drop = [f for f in removed if f in m]
1576 drop = [f for f in removed if f in m]
1577 for f in drop:
1577 for f in drop:
1578 del m[f]
1578 del m[f]
1579 mn = self.manifest.add(m, trp, linkrev,
1579 mn = self.manifest.add(m, trp, linkrev,
1580 p1.manifestnode(), p2.manifestnode(),
1580 p1.manifestnode(), p2.manifestnode(),
1581 added, drop)
1581 added, drop)
1582 files = changed + removed
1582 files = changed + removed
1583 else:
1583 else:
1584 mn = p1.manifestnode()
1584 mn = p1.manifestnode()
1585 files = []
1585 files = []
1586
1586
1587 # update changelog
1587 # update changelog
1588 self.ui.note(_("committing changelog\n"))
1588 self.ui.note(_("committing changelog\n"))
1589 self.changelog.delayupdate(tr)
1589 self.changelog.delayupdate(tr)
1590 n = self.changelog.add(mn, files, ctx.description(),
1590 n = self.changelog.add(mn, files, ctx.description(),
1591 trp, p1.node(), p2.node(),
1591 trp, p1.node(), p2.node(),
1592 user, ctx.date(), ctx.extra().copy())
1592 user, ctx.date(), ctx.extra().copy())
1593 p = lambda: tr.writepending() and self.root or ""
1593 p = lambda: tr.writepending() and self.root or ""
1594 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1594 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1595 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1595 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1596 parent2=xp2, pending=p)
1596 parent2=xp2, pending=p)
1597 # set the new commit is proper phase
1597 # set the new commit is proper phase
1598 targetphase = subrepo.newcommitphase(self.ui, ctx)
1598 targetphase = subrepo.newcommitphase(self.ui, ctx)
1599 if targetphase:
1599 if targetphase:
1600 # retract boundary do not alter parent changeset.
1600 # retract boundary do not alter parent changeset.
1601 # if a parent have higher the resulting phase will
1601 # if a parent have higher the resulting phase will
1602 # be compliant anyway
1602 # be compliant anyway
1603 #
1603 #
1604 # if minimal phase was 0 we don't need to retract anything
1604 # if minimal phase was 0 we don't need to retract anything
1605 phases.retractboundary(self, tr, targetphase, [n])
1605 phases.retractboundary(self, tr, targetphase, [n])
1606 tr.close()
1606 tr.close()
1607 branchmap.updatecache(self.filtered('served'))
1607 branchmap.updatecache(self.filtered('served'))
1608 return n
1608 return n
1609 finally:
1609 finally:
1610 if tr:
1610 if tr:
1611 tr.release()
1611 tr.release()
1612 lock.release()
1612 lock.release()
1613
1613
1614 @unfilteredmethod
1614 @unfilteredmethod
1615 def destroying(self):
1615 def destroying(self):
1616 '''Inform the repository that nodes are about to be destroyed.
1616 '''Inform the repository that nodes are about to be destroyed.
1617 Intended for use by strip and rollback, so there's a common
1617 Intended for use by strip and rollback, so there's a common
1618 place for anything that has to be done before destroying history.
1618 place for anything that has to be done before destroying history.
1619
1619
1620 This is mostly useful for saving state that is in memory and waiting
1620 This is mostly useful for saving state that is in memory and waiting
1621 to be flushed when the current lock is released. Because a call to
1621 to be flushed when the current lock is released. Because a call to
1622 destroyed is imminent, the repo will be invalidated causing those
1622 destroyed is imminent, the repo will be invalidated causing those
1623 changes to stay in memory (waiting for the next unlock), or vanish
1623 changes to stay in memory (waiting for the next unlock), or vanish
1624 completely.
1624 completely.
1625 '''
1625 '''
1626 # When using the same lock to commit and strip, the phasecache is left
1626 # When using the same lock to commit and strip, the phasecache is left
1627 # dirty after committing. Then when we strip, the repo is invalidated,
1627 # dirty after committing. Then when we strip, the repo is invalidated,
1628 # causing those changes to disappear.
1628 # causing those changes to disappear.
1629 if '_phasecache' in vars(self):
1629 if '_phasecache' in vars(self):
1630 self._phasecache.write()
1630 self._phasecache.write()
1631
1631
1632 @unfilteredmethod
1632 @unfilteredmethod
1633 def destroyed(self):
1633 def destroyed(self):
1634 '''Inform the repository that nodes have been destroyed.
1634 '''Inform the repository that nodes have been destroyed.
1635 Intended for use by strip and rollback, so there's a common
1635 Intended for use by strip and rollback, so there's a common
1636 place for anything that has to be done after destroying history.
1636 place for anything that has to be done after destroying history.
1637 '''
1637 '''
1638 # When one tries to:
1638 # When one tries to:
1639 # 1) destroy nodes thus calling this method (e.g. strip)
1639 # 1) destroy nodes thus calling this method (e.g. strip)
1640 # 2) use phasecache somewhere (e.g. commit)
1640 # 2) use phasecache somewhere (e.g. commit)
1641 #
1641 #
1642 # then 2) will fail because the phasecache contains nodes that were
1642 # then 2) will fail because the phasecache contains nodes that were
1643 # removed. We can either remove phasecache from the filecache,
1643 # removed. We can either remove phasecache from the filecache,
1644 # causing it to reload next time it is accessed, or simply filter
1644 # causing it to reload next time it is accessed, or simply filter
1645 # the removed nodes now and write the updated cache.
1645 # the removed nodes now and write the updated cache.
1646 self._phasecache.filterunknown(self)
1646 self._phasecache.filterunknown(self)
1647 self._phasecache.write()
1647 self._phasecache.write()
1648
1648
1649 # update the 'served' branch cache to help read only server process
1649 # update the 'served' branch cache to help read only server process
1650 # Thanks to branchcache collaboration this is done from the nearest
1650 # Thanks to branchcache collaboration this is done from the nearest
1651 # filtered subset and it is expected to be fast.
1651 # filtered subset and it is expected to be fast.
1652 branchmap.updatecache(self.filtered('served'))
1652 branchmap.updatecache(self.filtered('served'))
1653
1653
1654 # Ensure the persistent tag cache is updated. Doing it now
1654 # Ensure the persistent tag cache is updated. Doing it now
1655 # means that the tag cache only has to worry about destroyed
1655 # means that the tag cache only has to worry about destroyed
1656 # heads immediately after a strip/rollback. That in turn
1656 # heads immediately after a strip/rollback. That in turn
1657 # guarantees that "cachetip == currenttip" (comparing both rev
1657 # guarantees that "cachetip == currenttip" (comparing both rev
1658 # and node) always means no nodes have been added or destroyed.
1658 # and node) always means no nodes have been added or destroyed.
1659
1659
1660 # XXX this is suboptimal when qrefresh'ing: we strip the current
1660 # XXX this is suboptimal when qrefresh'ing: we strip the current
1661 # head, refresh the tag cache, then immediately add a new head.
1661 # head, refresh the tag cache, then immediately add a new head.
1662 # But I think doing it this way is necessary for the "instant
1662 # But I think doing it this way is necessary for the "instant
1663 # tag cache retrieval" case to work.
1663 # tag cache retrieval" case to work.
1664 self.invalidate()
1664 self.invalidate()
1665
1665
1666 def walk(self, match, node=None):
1666 def walk(self, match, node=None):
1667 '''
1667 '''
1668 walk recursively through the directory tree or a given
1668 walk recursively through the directory tree or a given
1669 changeset, finding all files matched by the match
1669 changeset, finding all files matched by the match
1670 function
1670 function
1671 '''
1671 '''
1672 return self[node].walk(match)
1672 return self[node].walk(match)
1673
1673
1674 def status(self, node1='.', node2=None, match=None,
1674 def status(self, node1='.', node2=None, match=None,
1675 ignored=False, clean=False, unknown=False,
1675 ignored=False, clean=False, unknown=False,
1676 listsubrepos=False):
1676 listsubrepos=False):
1677 '''a convenience method that calls node1.status(node2)'''
1677 '''a convenience method that calls node1.status(node2)'''
1678 return self[node1].status(node2, match, ignored, clean, unknown,
1678 return self[node1].status(node2, match, ignored, clean, unknown,
1679 listsubrepos)
1679 listsubrepos)
1680
1680
1681 def heads(self, start=None):
1681 def heads(self, start=None):
1682 heads = self.changelog.heads(start)
1682 heads = self.changelog.heads(start)
1683 # sort the output in rev descending order
1683 # sort the output in rev descending order
1684 return sorted(heads, key=self.changelog.rev, reverse=True)
1684 return sorted(heads, key=self.changelog.rev, reverse=True)
1685
1685
1686 def branchheads(self, branch=None, start=None, closed=False):
1686 def branchheads(self, branch=None, start=None, closed=False):
1687 '''return a (possibly filtered) list of heads for the given branch
1687 '''return a (possibly filtered) list of heads for the given branch
1688
1688
1689 Heads are returned in topological order, from newest to oldest.
1689 Heads are returned in topological order, from newest to oldest.
1690 If branch is None, use the dirstate branch.
1690 If branch is None, use the dirstate branch.
1691 If start is not None, return only heads reachable from start.
1691 If start is not None, return only heads reachable from start.
1692 If closed is True, return heads that are marked as closed as well.
1692 If closed is True, return heads that are marked as closed as well.
1693 '''
1693 '''
1694 if branch is None:
1694 if branch is None:
1695 branch = self[None].branch()
1695 branch = self[None].branch()
1696 branches = self.branchmap()
1696 branches = self.branchmap()
1697 if branch not in branches:
1697 if branch not in branches:
1698 return []
1698 return []
1699 # the cache returns heads ordered lowest to highest
1699 # the cache returns heads ordered lowest to highest
1700 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1700 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1701 if start is not None:
1701 if start is not None:
1702 # filter out the heads that cannot be reached from startrev
1702 # filter out the heads that cannot be reached from startrev
1703 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1703 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1704 bheads = [h for h in bheads if h in fbheads]
1704 bheads = [h for h in bheads if h in fbheads]
1705 return bheads
1705 return bheads
1706
1706
1707 def branches(self, nodes):
1707 def branches(self, nodes):
1708 if not nodes:
1708 if not nodes:
1709 nodes = [self.changelog.tip()]
1709 nodes = [self.changelog.tip()]
1710 b = []
1710 b = []
1711 for n in nodes:
1711 for n in nodes:
1712 t = n
1712 t = n
1713 while True:
1713 while True:
1714 p = self.changelog.parents(n)
1714 p = self.changelog.parents(n)
1715 if p[1] != nullid or p[0] == nullid:
1715 if p[1] != nullid or p[0] == nullid:
1716 b.append((t, n, p[0], p[1]))
1716 b.append((t, n, p[0], p[1]))
1717 break
1717 break
1718 n = p[0]
1718 n = p[0]
1719 return b
1719 return b
1720
1720
1721 def between(self, pairs):
1721 def between(self, pairs):
1722 r = []
1722 r = []
1723
1723
1724 for top, bottom in pairs:
1724 for top, bottom in pairs:
1725 n, l, i = top, [], 0
1725 n, l, i = top, [], 0
1726 f = 1
1726 f = 1
1727
1727
1728 while n != bottom and n != nullid:
1728 while n != bottom and n != nullid:
1729 p = self.changelog.parents(n)[0]
1729 p = self.changelog.parents(n)[0]
1730 if i == f:
1730 if i == f:
1731 l.append(n)
1731 l.append(n)
1732 f = f * 2
1732 f = f * 2
1733 n = p
1733 n = p
1734 i += 1
1734 i += 1
1735
1735
1736 r.append(l)
1736 r.append(l)
1737
1737
1738 return r
1738 return r
1739
1739
1740 def checkpush(self, pushop):
1740 def checkpush(self, pushop):
1741 """Extensions can override this function if additional checks have
1741 """Extensions can override this function if additional checks have
1742 to be performed before pushing, or call it if they override push
1742 to be performed before pushing, or call it if they override push
1743 command.
1743 command.
1744 """
1744 """
1745 pass
1745 pass
1746
1746
1747 @unfilteredpropertycache
1747 @unfilteredpropertycache
1748 def prepushoutgoinghooks(self):
1748 def prepushoutgoinghooks(self):
1749 """Return util.hooks consists of "(repo, remote, outgoing)"
1749 """Return util.hooks consists of "(repo, remote, outgoing)"
1750 functions, which are called before pushing changesets.
1750 functions, which are called before pushing changesets.
1751 """
1751 """
1752 return util.hooks()
1752 return util.hooks()
1753
1753
1754 def stream_in(self, remote, remotereqs):
1754 def stream_in(self, remote, remotereqs):
1755 lock = self.lock()
1755 lock = self.lock()
1756 try:
1756 try:
1757 # Save remote branchmap. We will use it later
1757 # Save remote branchmap. We will use it later
1758 # to speed up branchcache creation
1758 # to speed up branchcache creation
1759 rbranchmap = None
1759 rbranchmap = None
1760 if remote.capable("branchmap"):
1760 if remote.capable("branchmap"):
1761 rbranchmap = remote.branchmap()
1761 rbranchmap = remote.branchmap()
1762
1762
1763 fp = remote.stream_out()
1763 fp = remote.stream_out()
1764 l = fp.readline()
1764 l = fp.readline()
1765 try:
1765 try:
1766 resp = int(l)
1766 resp = int(l)
1767 except ValueError:
1767 except ValueError:
1768 raise error.ResponseError(
1768 raise error.ResponseError(
1769 _('unexpected response from remote server:'), l)
1769 _('unexpected response from remote server:'), l)
1770 if resp == 1:
1770 if resp == 1:
1771 raise util.Abort(_('operation forbidden by server'))
1771 raise util.Abort(_('operation forbidden by server'))
1772 elif resp == 2:
1772 elif resp == 2:
1773 raise util.Abort(_('locking the remote repository failed'))
1773 raise util.Abort(_('locking the remote repository failed'))
1774 elif resp != 0:
1774 elif resp != 0:
1775 raise util.Abort(_('the server sent an unknown error code'))
1775 raise util.Abort(_('the server sent an unknown error code'))
1776 self.ui.status(_('streaming all changes\n'))
1776 self.ui.status(_('streaming all changes\n'))
1777 l = fp.readline()
1777 l = fp.readline()
1778 try:
1778 try:
1779 total_files, total_bytes = map(int, l.split(' ', 1))
1779 total_files, total_bytes = map(int, l.split(' ', 1))
1780 except (ValueError, TypeError):
1780 except (ValueError, TypeError):
1781 raise error.ResponseError(
1781 raise error.ResponseError(
1782 _('unexpected response from remote server:'), l)
1782 _('unexpected response from remote server:'), l)
1783 self.ui.status(_('%d files to transfer, %s of data\n') %
1783 self.ui.status(_('%d files to transfer, %s of data\n') %
1784 (total_files, util.bytecount(total_bytes)))
1784 (total_files, util.bytecount(total_bytes)))
1785 handled_bytes = 0
1785 handled_bytes = 0
1786 self.ui.progress(_('clone'), 0, total=total_bytes)
1786 self.ui.progress(_('clone'), 0, total=total_bytes)
1787 start = time.time()
1787 start = time.time()
1788
1788
1789 tr = self.transaction(_('clone'))
1789 tr = self.transaction(_('clone'))
1790 try:
1790 try:
1791 for i in xrange(total_files):
1791 for i in xrange(total_files):
1792 # XXX doesn't support '\n' or '\r' in filenames
1792 # XXX doesn't support '\n' or '\r' in filenames
1793 l = fp.readline()
1793 l = fp.readline()
1794 try:
1794 try:
1795 name, size = l.split('\0', 1)
1795 name, size = l.split('\0', 1)
1796 size = int(size)
1796 size = int(size)
1797 except (ValueError, TypeError):
1797 except (ValueError, TypeError):
1798 raise error.ResponseError(
1798 raise error.ResponseError(
1799 _('unexpected response from remote server:'), l)
1799 _('unexpected response from remote server:'), l)
1800 if self.ui.debugflag:
1800 if self.ui.debugflag:
1801 self.ui.debug('adding %s (%s)\n' %
1801 self.ui.debug('adding %s (%s)\n' %
1802 (name, util.bytecount(size)))
1802 (name, util.bytecount(size)))
1803 # for backwards compat, name was partially encoded
1803 # for backwards compat, name was partially encoded
1804 ofp = self.svfs(store.decodedir(name), 'w')
1804 ofp = self.svfs(store.decodedir(name), 'w')
1805 for chunk in util.filechunkiter(fp, limit=size):
1805 for chunk in util.filechunkiter(fp, limit=size):
1806 handled_bytes += len(chunk)
1806 handled_bytes += len(chunk)
1807 self.ui.progress(_('clone'), handled_bytes,
1807 self.ui.progress(_('clone'), handled_bytes,
1808 total=total_bytes)
1808 total=total_bytes)
1809 ofp.write(chunk)
1809 ofp.write(chunk)
1810 ofp.close()
1810 ofp.close()
1811 tr.close()
1811 tr.close()
1812 finally:
1812 finally:
1813 tr.release()
1813 tr.release()
1814
1814
1815 # Writing straight to files circumvented the inmemory caches
1815 # Writing straight to files circumvented the inmemory caches
1816 self.invalidate()
1816 self.invalidate()
1817
1817
1818 elapsed = time.time() - start
1818 elapsed = time.time() - start
1819 if elapsed <= 0:
1819 if elapsed <= 0:
1820 elapsed = 0.001
1820 elapsed = 0.001
1821 self.ui.progress(_('clone'), None)
1821 self.ui.progress(_('clone'), None)
1822 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1822 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1823 (util.bytecount(total_bytes), elapsed,
1823 (util.bytecount(total_bytes), elapsed,
1824 util.bytecount(total_bytes / elapsed)))
1824 util.bytecount(total_bytes / elapsed)))
1825
1825
1826 # new requirements = old non-format requirements +
1826 # new requirements = old non-format requirements +
1827 # new format-related remote requirements
1827 # new format-related remote requirements
1828 # requirements from the streamed-in repository
1828 # requirements from the streamed-in repository
1829 self.requirements = remotereqs | (
1829 self.requirements = remotereqs | (
1830 self.requirements - self.supportedformats)
1830 self.requirements - self.supportedformats)
1831 self._applyopenerreqs()
1831 self._applyopenerreqs()
1832 self._writerequirements()
1832 self._writerequirements()
1833
1833
1834 if rbranchmap:
1834 if rbranchmap:
1835 rbheads = []
1835 rbheads = []
1836 closed = []
1836 closed = []
1837 for bheads in rbranchmap.itervalues():
1837 for bheads in rbranchmap.itervalues():
1838 rbheads.extend(bheads)
1838 rbheads.extend(bheads)
1839 for h in bheads:
1839 for h in bheads:
1840 r = self.changelog.rev(h)
1840 r = self.changelog.rev(h)
1841 b, c = self.changelog.branchinfo(r)
1841 b, c = self.changelog.branchinfo(r)
1842 if c:
1842 if c:
1843 closed.append(h)
1843 closed.append(h)
1844
1844
1845 if rbheads:
1845 if rbheads:
1846 rtiprev = max((int(self.changelog.rev(node))
1846 rtiprev = max((int(self.changelog.rev(node))
1847 for node in rbheads))
1847 for node in rbheads))
1848 cache = branchmap.branchcache(rbranchmap,
1848 cache = branchmap.branchcache(rbranchmap,
1849 self[rtiprev].node(),
1849 self[rtiprev].node(),
1850 rtiprev,
1850 rtiprev,
1851 closednodes=closed)
1851 closednodes=closed)
1852 # Try to stick it as low as possible
1852 # Try to stick it as low as possible
1853 # filter above served are unlikely to be fetch from a clone
1853 # filter above served are unlikely to be fetch from a clone
1854 for candidate in ('base', 'immutable', 'served'):
1854 for candidate in ('base', 'immutable', 'served'):
1855 rview = self.filtered(candidate)
1855 rview = self.filtered(candidate)
1856 if cache.validfor(rview):
1856 if cache.validfor(rview):
1857 self._branchcaches[candidate] = cache
1857 self._branchcaches[candidate] = cache
1858 cache.write(rview)
1858 cache.write(rview)
1859 break
1859 break
1860 self.invalidate()
1860 self.invalidate()
1861 return len(self.heads()) + 1
1861 return len(self.heads()) + 1
1862 finally:
1862 finally:
1863 lock.release()
1863 lock.release()
1864
1864
1865 def clone(self, remote, heads=[], stream=None):
1865 def clone(self, remote, heads=[], stream=None):
1866 '''clone remote repository.
1866 '''clone remote repository.
1867
1867
1868 keyword arguments:
1868 keyword arguments:
1869 heads: list of revs to clone (forces use of pull)
1869 heads: list of revs to clone (forces use of pull)
1870 stream: use streaming clone if possible'''
1870 stream: use streaming clone if possible'''
1871
1871
1872 # now, all clients that can request uncompressed clones can
1872 # now, all clients that can request uncompressed clones can
1873 # read repo formats supported by all servers that can serve
1873 # read repo formats supported by all servers that can serve
1874 # them.
1874 # them.
1875
1875
1876 # if revlog format changes, client will have to check version
1876 # if revlog format changes, client will have to check version
1877 # and format flags on "stream" capability, and use
1877 # and format flags on "stream" capability, and use
1878 # uncompressed only if compatible.
1878 # uncompressed only if compatible.
1879
1879
1880 if stream is None:
1880 if stream is None:
1881 # if the server explicitly prefers to stream (for fast LANs)
1881 # if the server explicitly prefers to stream (for fast LANs)
1882 stream = remote.capable('stream-preferred')
1882 stream = remote.capable('stream-preferred')
1883
1883
1884 if stream and not heads:
1884 if stream and not heads:
1885 # 'stream' means remote revlog format is revlogv1 only
1885 # 'stream' means remote revlog format is revlogv1 only
1886 if remote.capable('stream'):
1886 if remote.capable('stream'):
1887 self.stream_in(remote, set(('revlogv1',)))
1887 self.stream_in(remote, set(('revlogv1',)))
1888 else:
1888 else:
1889 # otherwise, 'streamreqs' contains the remote revlog format
1889 # otherwise, 'streamreqs' contains the remote revlog format
1890 streamreqs = remote.capable('streamreqs')
1890 streamreqs = remote.capable('streamreqs')
1891 if streamreqs:
1891 if streamreqs:
1892 streamreqs = set(streamreqs.split(','))
1892 streamreqs = set(streamreqs.split(','))
1893 # if we support it, stream in and adjust our requirements
1893 # if we support it, stream in and adjust our requirements
1894 if not streamreqs - self.supportedformats:
1894 if not streamreqs - self.supportedformats:
1895 self.stream_in(remote, streamreqs)
1895 self.stream_in(remote, streamreqs)
1896
1896
1897 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1897 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1898 try:
1898 try:
1899 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1899 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1900 ret = exchange.pull(self, remote, heads).cgresult
1900 ret = exchange.pull(self, remote, heads).cgresult
1901 finally:
1901 finally:
1902 self.ui.restoreconfig(quiet)
1902 self.ui.restoreconfig(quiet)
1903 return ret
1903 return ret
1904
1904
1905 def pushkey(self, namespace, key, old, new):
1905 def pushkey(self, namespace, key, old, new):
1906 try:
1906 try:
1907 tr = self.currenttransaction()
1907 tr = self.currenttransaction()
1908 hookargs = {}
1908 hookargs = {}
1909 if tr is not None:
1909 if tr is not None:
1910 hookargs.update(tr.hookargs)
1910 hookargs.update(tr.hookargs)
1911 pending = lambda: tr.writepending() and self.root or ""
1911 pending = lambda: tr.writepending() and self.root or ""
1912 hookargs['pending'] = pending
1912 hookargs['pending'] = pending
1913 hookargs['namespace'] = namespace
1913 hookargs['namespace'] = namespace
1914 hookargs['key'] = key
1914 hookargs['key'] = key
1915 hookargs['old'] = old
1915 hookargs['old'] = old
1916 hookargs['new'] = new
1916 hookargs['new'] = new
1917 self.hook('prepushkey', throw=True, **hookargs)
1917 self.hook('prepushkey', throw=True, **hookargs)
1918 except error.HookAbort, exc:
1918 except error.HookAbort, exc:
1919 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1919 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1920 if exc.hint:
1920 if exc.hint:
1921 self.ui.write_err(_("(%s)\n") % exc.hint)
1921 self.ui.write_err(_("(%s)\n") % exc.hint)
1922 return False
1922 return False
1923 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1923 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1924 ret = pushkey.push(self, namespace, key, old, new)
1924 ret = pushkey.push(self, namespace, key, old, new)
1925 def runhook():
1925 def runhook():
1926 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1926 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1927 ret=ret)
1927 ret=ret)
1928 self._afterlock(runhook)
1928 self._afterlock(runhook)
1929 return ret
1929 return ret
1930
1930
1931 def listkeys(self, namespace):
1931 def listkeys(self, namespace):
1932 self.hook('prelistkeys', throw=True, namespace=namespace)
1932 self.hook('prelistkeys', throw=True, namespace=namespace)
1933 self.ui.debug('listing keys for "%s"\n' % namespace)
1933 self.ui.debug('listing keys for "%s"\n' % namespace)
1934 values = pushkey.list(self, namespace)
1934 values = pushkey.list(self, namespace)
1935 self.hook('listkeys', namespace=namespace, values=values)
1935 self.hook('listkeys', namespace=namespace, values=values)
1936 return values
1936 return values
1937
1937
1938 def debugwireargs(self, one, two, three=None, four=None, five=None):
1938 def debugwireargs(self, one, two, three=None, four=None, five=None):
1939 '''used to test argument passing over the wire'''
1939 '''used to test argument passing over the wire'''
1940 return "%s %s %s %s %s" % (one, two, three, four, five)
1940 return "%s %s %s %s %s" % (one, two, three, four, five)
1941
1941
1942 def savecommitmessage(self, text):
1942 def savecommitmessage(self, text):
1943 fp = self.vfs('last-message.txt', 'wb')
1943 fp = self.vfs('last-message.txt', 'wb')
1944 try:
1944 try:
1945 fp.write(text)
1945 fp.write(text)
1946 finally:
1946 finally:
1947 fp.close()
1947 fp.close()
1948 return self.pathto(fp.name[len(self.root) + 1:])
1948 return self.pathto(fp.name[len(self.root) + 1:])
1949
1949
1950 # used to avoid circular references so destructors work
1950 # used to avoid circular references so destructors work
1951 def aftertrans(files):
1951 def aftertrans(files):
1952 renamefiles = [tuple(t) for t in files]
1952 renamefiles = [tuple(t) for t in files]
1953 def a():
1953 def a():
1954 for vfs, src, dest in renamefiles:
1954 for vfs, src, dest in renamefiles:
1955 try:
1955 try:
1956 vfs.rename(src, dest)
1956 vfs.rename(src, dest)
1957 except OSError: # journal file does not yet exist
1957 except OSError: # journal file does not yet exist
1958 pass
1958 pass
1959 return a
1959 return a
1960
1960
1961 def undoname(fn):
1961 def undoname(fn):
1962 base, name = os.path.split(fn)
1962 base, name = os.path.split(fn)
1963 assert name.startswith('journal')
1963 assert name.startswith('journal')
1964 return os.path.join(base, name.replace('journal', 'undo', 1))
1964 return os.path.join(base, name.replace('journal', 'undo', 1))
1965
1965
1966 def instance(ui, path, create):
1966 def instance(ui, path, create):
1967 return localrepository(ui, util.urllocalpath(path), create)
1967 return localrepository(ui, util.urllocalpath(path), create)
1968
1968
1969 def islocal(path):
1969 def islocal(path):
1970 return True
1970 return True
General Comments 0
You need to be logged in to leave comments. Login now