##// END OF EJS Templates
localrepo: reuse commit of parent filectx entries without rehashing...
Mads Kiilerich -
r24394:03163826 default
parent child Browse files
Show More
@@ -1,1915 +1,1920
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 format='HG10', **kwargs):
110 format='HG10', **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.unbundle20(self.ui, cg)
117 cg = bundle2.unbundle20(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 cg = exchange.readbundle(self.ui, cg, None)
128 cg = exchange.readbundle(self.ui, cg, None)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 if util.safehasattr(ret, 'getchunks'):
130 if util.safehasattr(ret, 'getchunks'):
131 # This is a bundle20 object, turn it into an unbundler.
131 # This is a bundle20 object, turn it into an unbundler.
132 # This little dance should be dropped eventually when the API
132 # This little dance should be dropped eventually when the API
133 # is finally improved.
133 # is finally improved.
134 stream = util.chunkbuffer(ret.getchunks())
134 stream = util.chunkbuffer(ret.getchunks())
135 ret = bundle2.unbundle20(self.ui, stream)
135 ret = bundle2.unbundle20(self.ui, stream)
136 return ret
136 return ret
137 except error.PushRaced, exc:
137 except error.PushRaced, exc:
138 raise error.ResponseError(_('push failed:'), str(exc))
138 raise error.ResponseError(_('push failed:'), str(exc))
139
139
140 def lock(self):
140 def lock(self):
141 return self._repo.lock()
141 return self._repo.lock()
142
142
143 def addchangegroup(self, cg, source, url):
143 def addchangegroup(self, cg, source, url):
144 return changegroup.addchangegroup(self._repo, cg, source, url)
144 return changegroup.addchangegroup(self._repo, cg, source, url)
145
145
146 def pushkey(self, namespace, key, old, new):
146 def pushkey(self, namespace, key, old, new):
147 return self._repo.pushkey(namespace, key, old, new)
147 return self._repo.pushkey(namespace, key, old, new)
148
148
149 def listkeys(self, namespace):
149 def listkeys(self, namespace):
150 return self._repo.listkeys(namespace)
150 return self._repo.listkeys(namespace)
151
151
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 '''used to test argument passing over the wire'''
153 '''used to test argument passing over the wire'''
154 return "%s %s %s %s %s" % (one, two, three, four, five)
154 return "%s %s %s %s %s" % (one, two, three, four, five)
155
155
156 class locallegacypeer(localpeer):
156 class locallegacypeer(localpeer):
157 '''peer extension which implements legacy methods too; used for tests with
157 '''peer extension which implements legacy methods too; used for tests with
158 restricted capabilities'''
158 restricted capabilities'''
159
159
160 def __init__(self, repo):
160 def __init__(self, repo):
161 localpeer.__init__(self, repo, caps=legacycaps)
161 localpeer.__init__(self, repo, caps=legacycaps)
162
162
163 def branches(self, nodes):
163 def branches(self, nodes):
164 return self._repo.branches(nodes)
164 return self._repo.branches(nodes)
165
165
166 def between(self, pairs):
166 def between(self, pairs):
167 return self._repo.between(pairs)
167 return self._repo.between(pairs)
168
168
169 def changegroup(self, basenodes, source):
169 def changegroup(self, basenodes, source):
170 return changegroup.changegroup(self._repo, basenodes, source)
170 return changegroup.changegroup(self._repo, basenodes, source)
171
171
172 def changegroupsubset(self, bases, heads, source):
172 def changegroupsubset(self, bases, heads, source):
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174
174
175 class localrepository(object):
175 class localrepository(object):
176
176
177 supportedformats = set(('revlogv1', 'generaldelta'))
177 supportedformats = set(('revlogv1', 'generaldelta'))
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 'dotencode'))
179 'dotencode'))
180 openerreqs = set(('revlogv1', 'generaldelta'))
180 openerreqs = set(('revlogv1', 'generaldelta'))
181 requirements = ['revlogv1']
181 requirements = ['revlogv1']
182 filtername = None
182 filtername = None
183
183
184 # a list of (ui, featureset) functions.
184 # a list of (ui, featureset) functions.
185 # only functions defined in module of enabled extensions are invoked
185 # only functions defined in module of enabled extensions are invoked
186 featuresetupfuncs = set()
186 featuresetupfuncs = set()
187
187
188 def _baserequirements(self, create):
188 def _baserequirements(self, create):
189 return self.requirements[:]
189 return self.requirements[:]
190
190
191 def __init__(self, baseui, path=None, create=False):
191 def __init__(self, baseui, path=None, create=False):
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 self.wopener = self.wvfs
193 self.wopener = self.wvfs
194 self.root = self.wvfs.base
194 self.root = self.wvfs.base
195 self.path = self.wvfs.join(".hg")
195 self.path = self.wvfs.join(".hg")
196 self.origroot = path
196 self.origroot = path
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 self.vfs = scmutil.vfs(self.path)
198 self.vfs = scmutil.vfs(self.path)
199 self.opener = self.vfs
199 self.opener = self.vfs
200 self.baseui = baseui
200 self.baseui = baseui
201 self.ui = baseui.copy()
201 self.ui = baseui.copy()
202 self.ui.copy = baseui.copy # prevent copying repo configuration
202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 # A list of callback to shape the phase if no data were found.
203 # A list of callback to shape the phase if no data were found.
204 # Callback are in the form: func(repo, roots) --> processed root.
204 # Callback are in the form: func(repo, roots) --> processed root.
205 # This list it to be filled by extension during repo setup
205 # This list it to be filled by extension during repo setup
206 self._phasedefaults = []
206 self._phasedefaults = []
207 try:
207 try:
208 self.ui.readconfig(self.join("hgrc"), self.root)
208 self.ui.readconfig(self.join("hgrc"), self.root)
209 extensions.loadall(self.ui)
209 extensions.loadall(self.ui)
210 except IOError:
210 except IOError:
211 pass
211 pass
212
212
213 if self.featuresetupfuncs:
213 if self.featuresetupfuncs:
214 self.supported = set(self._basesupported) # use private copy
214 self.supported = set(self._basesupported) # use private copy
215 extmods = set(m.__name__ for n, m
215 extmods = set(m.__name__ for n, m
216 in extensions.extensions(self.ui))
216 in extensions.extensions(self.ui))
217 for setupfunc in self.featuresetupfuncs:
217 for setupfunc in self.featuresetupfuncs:
218 if setupfunc.__module__ in extmods:
218 if setupfunc.__module__ in extmods:
219 setupfunc(self.ui, self.supported)
219 setupfunc(self.ui, self.supported)
220 else:
220 else:
221 self.supported = self._basesupported
221 self.supported = self._basesupported
222
222
223 if not self.vfs.isdir():
223 if not self.vfs.isdir():
224 if create:
224 if create:
225 if not self.wvfs.exists():
225 if not self.wvfs.exists():
226 self.wvfs.makedirs()
226 self.wvfs.makedirs()
227 self.vfs.makedir(notindexed=True)
227 self.vfs.makedir(notindexed=True)
228 requirements = self._baserequirements(create)
228 requirements = self._baserequirements(create)
229 if self.ui.configbool('format', 'usestore', True):
229 if self.ui.configbool('format', 'usestore', True):
230 self.vfs.mkdir("store")
230 self.vfs.mkdir("store")
231 requirements.append("store")
231 requirements.append("store")
232 if self.ui.configbool('format', 'usefncache', True):
232 if self.ui.configbool('format', 'usefncache', True):
233 requirements.append("fncache")
233 requirements.append("fncache")
234 if self.ui.configbool('format', 'dotencode', True):
234 if self.ui.configbool('format', 'dotencode', True):
235 requirements.append('dotencode')
235 requirements.append('dotencode')
236 # create an invalid changelog
236 # create an invalid changelog
237 self.vfs.append(
237 self.vfs.append(
238 "00changelog.i",
238 "00changelog.i",
239 '\0\0\0\2' # represents revlogv2
239 '\0\0\0\2' # represents revlogv2
240 ' dummy changelog to prevent using the old repo layout'
240 ' dummy changelog to prevent using the old repo layout'
241 )
241 )
242 if self.ui.configbool('format', 'generaldelta', False):
242 if self.ui.configbool('format', 'generaldelta', False):
243 requirements.append("generaldelta")
243 requirements.append("generaldelta")
244 requirements = set(requirements)
244 requirements = set(requirements)
245 else:
245 else:
246 raise error.RepoError(_("repository %s not found") % path)
246 raise error.RepoError(_("repository %s not found") % path)
247 elif create:
247 elif create:
248 raise error.RepoError(_("repository %s already exists") % path)
248 raise error.RepoError(_("repository %s already exists") % path)
249 else:
249 else:
250 try:
250 try:
251 requirements = scmutil.readrequires(self.vfs, self.supported)
251 requirements = scmutil.readrequires(self.vfs, self.supported)
252 except IOError, inst:
252 except IOError, inst:
253 if inst.errno != errno.ENOENT:
253 if inst.errno != errno.ENOENT:
254 raise
254 raise
255 requirements = set()
255 requirements = set()
256
256
257 self.sharedpath = self.path
257 self.sharedpath = self.path
258 try:
258 try:
259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 realpath=True)
260 realpath=True)
261 s = vfs.base
261 s = vfs.base
262 if not vfs.exists():
262 if not vfs.exists():
263 raise error.RepoError(
263 raise error.RepoError(
264 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 self.sharedpath = s
265 self.sharedpath = s
266 except IOError, inst:
266 except IOError, inst:
267 if inst.errno != errno.ENOENT:
267 if inst.errno != errno.ENOENT:
268 raise
268 raise
269
269
270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 self.spath = self.store.path
271 self.spath = self.store.path
272 self.svfs = self.store.vfs
272 self.svfs = self.store.vfs
273 self.sopener = self.svfs
273 self.sopener = self.svfs
274 self.sjoin = self.store.join
274 self.sjoin = self.store.join
275 self.vfs.createmode = self.store.createmode
275 self.vfs.createmode = self.store.createmode
276 self._applyrequirements(requirements)
276 self._applyrequirements(requirements)
277 if create:
277 if create:
278 self._writerequirements()
278 self._writerequirements()
279
279
280
280
281 self._branchcaches = {}
281 self._branchcaches = {}
282 self._revbranchcache = None
282 self._revbranchcache = None
283 self.filterpats = {}
283 self.filterpats = {}
284 self._datafilters = {}
284 self._datafilters = {}
285 self._transref = self._lockref = self._wlockref = None
285 self._transref = self._lockref = self._wlockref = None
286
286
287 # A cache for various files under .hg/ that tracks file changes,
287 # A cache for various files under .hg/ that tracks file changes,
288 # (used by the filecache decorator)
288 # (used by the filecache decorator)
289 #
289 #
290 # Maps a property name to its util.filecacheentry
290 # Maps a property name to its util.filecacheentry
291 self._filecache = {}
291 self._filecache = {}
292
292
293 # hold sets of revision to be filtered
293 # hold sets of revision to be filtered
294 # should be cleared when something might have changed the filter value:
294 # should be cleared when something might have changed the filter value:
295 # - new changesets,
295 # - new changesets,
296 # - phase change,
296 # - phase change,
297 # - new obsolescence marker,
297 # - new obsolescence marker,
298 # - working directory parent change,
298 # - working directory parent change,
299 # - bookmark changes
299 # - bookmark changes
300 self.filteredrevcache = {}
300 self.filteredrevcache = {}
301
301
302 # generic mapping between names and nodes
302 # generic mapping between names and nodes
303 self.names = namespaces.namespaces()
303 self.names = namespaces.namespaces()
304
304
305 def close(self):
305 def close(self):
306 self._writecaches()
306 self._writecaches()
307
307
308 def _writecaches(self):
308 def _writecaches(self):
309 if self._revbranchcache:
309 if self._revbranchcache:
310 self._revbranchcache.write()
310 self._revbranchcache.write()
311
311
312 def _restrictcapabilities(self, caps):
312 def _restrictcapabilities(self, caps):
313 # bundle2 is not ready for prime time, drop it unless explicitly
313 # bundle2 is not ready for prime time, drop it unless explicitly
314 # required by the tests (or some brave tester)
314 # required by the tests (or some brave tester)
315 if self.ui.configbool('experimental', 'bundle2-exp', False):
315 if self.ui.configbool('experimental', 'bundle2-exp', False):
316 caps = set(caps)
316 caps = set(caps)
317 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
317 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
318 caps.add('bundle2-exp=' + urllib.quote(capsblob))
318 caps.add('bundle2-exp=' + urllib.quote(capsblob))
319 return caps
319 return caps
320
320
321 def _applyrequirements(self, requirements):
321 def _applyrequirements(self, requirements):
322 self.requirements = requirements
322 self.requirements = requirements
323 self.svfs.options = dict((r, 1) for r in requirements
323 self.svfs.options = dict((r, 1) for r in requirements
324 if r in self.openerreqs)
324 if r in self.openerreqs)
325 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
325 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
326 if chunkcachesize is not None:
326 if chunkcachesize is not None:
327 self.svfs.options['chunkcachesize'] = chunkcachesize
327 self.svfs.options['chunkcachesize'] = chunkcachesize
328 maxchainlen = self.ui.configint('format', 'maxchainlen')
328 maxchainlen = self.ui.configint('format', 'maxchainlen')
329 if maxchainlen is not None:
329 if maxchainlen is not None:
330 self.svfs.options['maxchainlen'] = maxchainlen
330 self.svfs.options['maxchainlen'] = maxchainlen
331 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
331 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
332 if manifestcachesize is not None:
332 if manifestcachesize is not None:
333 self.svfs.options['manifestcachesize'] = manifestcachesize
333 self.svfs.options['manifestcachesize'] = manifestcachesize
334
334
335 def _writerequirements(self):
335 def _writerequirements(self):
336 reqfile = self.vfs("requires", "w")
336 reqfile = self.vfs("requires", "w")
337 for r in sorted(self.requirements):
337 for r in sorted(self.requirements):
338 reqfile.write("%s\n" % r)
338 reqfile.write("%s\n" % r)
339 reqfile.close()
339 reqfile.close()
340
340
341 def _checknested(self, path):
341 def _checknested(self, path):
342 """Determine if path is a legal nested repository."""
342 """Determine if path is a legal nested repository."""
343 if not path.startswith(self.root):
343 if not path.startswith(self.root):
344 return False
344 return False
345 subpath = path[len(self.root) + 1:]
345 subpath = path[len(self.root) + 1:]
346 normsubpath = util.pconvert(subpath)
346 normsubpath = util.pconvert(subpath)
347
347
348 # XXX: Checking against the current working copy is wrong in
348 # XXX: Checking against the current working copy is wrong in
349 # the sense that it can reject things like
349 # the sense that it can reject things like
350 #
350 #
351 # $ hg cat -r 10 sub/x.txt
351 # $ hg cat -r 10 sub/x.txt
352 #
352 #
353 # if sub/ is no longer a subrepository in the working copy
353 # if sub/ is no longer a subrepository in the working copy
354 # parent revision.
354 # parent revision.
355 #
355 #
356 # However, it can of course also allow things that would have
356 # However, it can of course also allow things that would have
357 # been rejected before, such as the above cat command if sub/
357 # been rejected before, such as the above cat command if sub/
358 # is a subrepository now, but was a normal directory before.
358 # is a subrepository now, but was a normal directory before.
359 # The old path auditor would have rejected by mistake since it
359 # The old path auditor would have rejected by mistake since it
360 # panics when it sees sub/.hg/.
360 # panics when it sees sub/.hg/.
361 #
361 #
362 # All in all, checking against the working copy seems sensible
362 # All in all, checking against the working copy seems sensible
363 # since we want to prevent access to nested repositories on
363 # since we want to prevent access to nested repositories on
364 # the filesystem *now*.
364 # the filesystem *now*.
365 ctx = self[None]
365 ctx = self[None]
366 parts = util.splitpath(subpath)
366 parts = util.splitpath(subpath)
367 while parts:
367 while parts:
368 prefix = '/'.join(parts)
368 prefix = '/'.join(parts)
369 if prefix in ctx.substate:
369 if prefix in ctx.substate:
370 if prefix == normsubpath:
370 if prefix == normsubpath:
371 return True
371 return True
372 else:
372 else:
373 sub = ctx.sub(prefix)
373 sub = ctx.sub(prefix)
374 return sub.checknested(subpath[len(prefix) + 1:])
374 return sub.checknested(subpath[len(prefix) + 1:])
375 else:
375 else:
376 parts.pop()
376 parts.pop()
377 return False
377 return False
378
378
379 def peer(self):
379 def peer(self):
380 return localpeer(self) # not cached to avoid reference cycle
380 return localpeer(self) # not cached to avoid reference cycle
381
381
382 def unfiltered(self):
382 def unfiltered(self):
383 """Return unfiltered version of the repository
383 """Return unfiltered version of the repository
384
384
385 Intended to be overwritten by filtered repo."""
385 Intended to be overwritten by filtered repo."""
386 return self
386 return self
387
387
388 def filtered(self, name):
388 def filtered(self, name):
389 """Return a filtered version of a repository"""
389 """Return a filtered version of a repository"""
390 # build a new class with the mixin and the current class
390 # build a new class with the mixin and the current class
391 # (possibly subclass of the repo)
391 # (possibly subclass of the repo)
392 class proxycls(repoview.repoview, self.unfiltered().__class__):
392 class proxycls(repoview.repoview, self.unfiltered().__class__):
393 pass
393 pass
394 return proxycls(self, name)
394 return proxycls(self, name)
395
395
396 @repofilecache('bookmarks')
396 @repofilecache('bookmarks')
397 def _bookmarks(self):
397 def _bookmarks(self):
398 return bookmarks.bmstore(self)
398 return bookmarks.bmstore(self)
399
399
400 @repofilecache('bookmarks.current')
400 @repofilecache('bookmarks.current')
401 def _bookmarkcurrent(self):
401 def _bookmarkcurrent(self):
402 return bookmarks.readcurrent(self)
402 return bookmarks.readcurrent(self)
403
403
404 def bookmarkheads(self, bookmark):
404 def bookmarkheads(self, bookmark):
405 name = bookmark.split('@', 1)[0]
405 name = bookmark.split('@', 1)[0]
406 heads = []
406 heads = []
407 for mark, n in self._bookmarks.iteritems():
407 for mark, n in self._bookmarks.iteritems():
408 if mark.split('@', 1)[0] == name:
408 if mark.split('@', 1)[0] == name:
409 heads.append(n)
409 heads.append(n)
410 return heads
410 return heads
411
411
412 @storecache('phaseroots')
412 @storecache('phaseroots')
413 def _phasecache(self):
413 def _phasecache(self):
414 return phases.phasecache(self, self._phasedefaults)
414 return phases.phasecache(self, self._phasedefaults)
415
415
416 @storecache('obsstore')
416 @storecache('obsstore')
417 def obsstore(self):
417 def obsstore(self):
418 # read default format for new obsstore.
418 # read default format for new obsstore.
419 defaultformat = self.ui.configint('format', 'obsstore-version', None)
419 defaultformat = self.ui.configint('format', 'obsstore-version', None)
420 # rely on obsstore class default when possible.
420 # rely on obsstore class default when possible.
421 kwargs = {}
421 kwargs = {}
422 if defaultformat is not None:
422 if defaultformat is not None:
423 kwargs['defaultformat'] = defaultformat
423 kwargs['defaultformat'] = defaultformat
424 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
424 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
425 store = obsolete.obsstore(self.svfs, readonly=readonly,
425 store = obsolete.obsstore(self.svfs, readonly=readonly,
426 **kwargs)
426 **kwargs)
427 if store and readonly:
427 if store and readonly:
428 # message is rare enough to not be translated
428 # message is rare enough to not be translated
429 msg = 'obsolete feature not enabled but %i markers found!\n'
429 msg = 'obsolete feature not enabled but %i markers found!\n'
430 self.ui.warn(msg % len(list(store)))
430 self.ui.warn(msg % len(list(store)))
431 return store
431 return store
432
432
433 @storecache('00changelog.i')
433 @storecache('00changelog.i')
434 def changelog(self):
434 def changelog(self):
435 c = changelog.changelog(self.svfs)
435 c = changelog.changelog(self.svfs)
436 if 'HG_PENDING' in os.environ:
436 if 'HG_PENDING' in os.environ:
437 p = os.environ['HG_PENDING']
437 p = os.environ['HG_PENDING']
438 if p.startswith(self.root):
438 if p.startswith(self.root):
439 c.readpending('00changelog.i.a')
439 c.readpending('00changelog.i.a')
440 return c
440 return c
441
441
442 @storecache('00manifest.i')
442 @storecache('00manifest.i')
443 def manifest(self):
443 def manifest(self):
444 return manifest.manifest(self.svfs)
444 return manifest.manifest(self.svfs)
445
445
446 @repofilecache('dirstate')
446 @repofilecache('dirstate')
447 def dirstate(self):
447 def dirstate(self):
448 warned = [0]
448 warned = [0]
449 def validate(node):
449 def validate(node):
450 try:
450 try:
451 self.changelog.rev(node)
451 self.changelog.rev(node)
452 return node
452 return node
453 except error.LookupError:
453 except error.LookupError:
454 if not warned[0]:
454 if not warned[0]:
455 warned[0] = True
455 warned[0] = True
456 self.ui.warn(_("warning: ignoring unknown"
456 self.ui.warn(_("warning: ignoring unknown"
457 " working parent %s!\n") % short(node))
457 " working parent %s!\n") % short(node))
458 return nullid
458 return nullid
459
459
460 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
460 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
461
461
462 def __getitem__(self, changeid):
462 def __getitem__(self, changeid):
463 if changeid is None:
463 if changeid is None:
464 return context.workingctx(self)
464 return context.workingctx(self)
465 if isinstance(changeid, slice):
465 if isinstance(changeid, slice):
466 return [context.changectx(self, i)
466 return [context.changectx(self, i)
467 for i in xrange(*changeid.indices(len(self)))
467 for i in xrange(*changeid.indices(len(self)))
468 if i not in self.changelog.filteredrevs]
468 if i not in self.changelog.filteredrevs]
469 return context.changectx(self, changeid)
469 return context.changectx(self, changeid)
470
470
471 def __contains__(self, changeid):
471 def __contains__(self, changeid):
472 try:
472 try:
473 self[changeid]
473 self[changeid]
474 return True
474 return True
475 except error.RepoLookupError:
475 except error.RepoLookupError:
476 return False
476 return False
477
477
478 def __nonzero__(self):
478 def __nonzero__(self):
479 return True
479 return True
480
480
481 def __len__(self):
481 def __len__(self):
482 return len(self.changelog)
482 return len(self.changelog)
483
483
484 def __iter__(self):
484 def __iter__(self):
485 return iter(self.changelog)
485 return iter(self.changelog)
486
486
487 def revs(self, expr, *args):
487 def revs(self, expr, *args):
488 '''Return a list of revisions matching the given revset'''
488 '''Return a list of revisions matching the given revset'''
489 expr = revset.formatspec(expr, *args)
489 expr = revset.formatspec(expr, *args)
490 m = revset.match(None, expr)
490 m = revset.match(None, expr)
491 return m(self)
491 return m(self)
492
492
493 def set(self, expr, *args):
493 def set(self, expr, *args):
494 '''
494 '''
495 Yield a context for each matching revision, after doing arg
495 Yield a context for each matching revision, after doing arg
496 replacement via revset.formatspec
496 replacement via revset.formatspec
497 '''
497 '''
498 for r in self.revs(expr, *args):
498 for r in self.revs(expr, *args):
499 yield self[r]
499 yield self[r]
500
500
501 def url(self):
501 def url(self):
502 return 'file:' + self.root
502 return 'file:' + self.root
503
503
504 def hook(self, name, throw=False, **args):
504 def hook(self, name, throw=False, **args):
505 """Call a hook, passing this repo instance.
505 """Call a hook, passing this repo instance.
506
506
507 This a convenience method to aid invoking hooks. Extensions likely
507 This a convenience method to aid invoking hooks. Extensions likely
508 won't call this unless they have registered a custom hook or are
508 won't call this unless they have registered a custom hook or are
509 replacing code that is expected to call a hook.
509 replacing code that is expected to call a hook.
510 """
510 """
511 return hook.hook(self.ui, self, name, throw, **args)
511 return hook.hook(self.ui, self, name, throw, **args)
512
512
513 @unfilteredmethod
513 @unfilteredmethod
514 def _tag(self, names, node, message, local, user, date, extra={},
514 def _tag(self, names, node, message, local, user, date, extra={},
515 editor=False):
515 editor=False):
516 if isinstance(names, str):
516 if isinstance(names, str):
517 names = (names,)
517 names = (names,)
518
518
519 branches = self.branchmap()
519 branches = self.branchmap()
520 for name in names:
520 for name in names:
521 self.hook('pretag', throw=True, node=hex(node), tag=name,
521 self.hook('pretag', throw=True, node=hex(node), tag=name,
522 local=local)
522 local=local)
523 if name in branches:
523 if name in branches:
524 self.ui.warn(_("warning: tag %s conflicts with existing"
524 self.ui.warn(_("warning: tag %s conflicts with existing"
525 " branch name\n") % name)
525 " branch name\n") % name)
526
526
527 def writetags(fp, names, munge, prevtags):
527 def writetags(fp, names, munge, prevtags):
528 fp.seek(0, 2)
528 fp.seek(0, 2)
529 if prevtags and prevtags[-1] != '\n':
529 if prevtags and prevtags[-1] != '\n':
530 fp.write('\n')
530 fp.write('\n')
531 for name in names:
531 for name in names:
532 if munge:
532 if munge:
533 m = munge(name)
533 m = munge(name)
534 else:
534 else:
535 m = name
535 m = name
536
536
537 if (self._tagscache.tagtypes and
537 if (self._tagscache.tagtypes and
538 name in self._tagscache.tagtypes):
538 name in self._tagscache.tagtypes):
539 old = self.tags().get(name, nullid)
539 old = self.tags().get(name, nullid)
540 fp.write('%s %s\n' % (hex(old), m))
540 fp.write('%s %s\n' % (hex(old), m))
541 fp.write('%s %s\n' % (hex(node), m))
541 fp.write('%s %s\n' % (hex(node), m))
542 fp.close()
542 fp.close()
543
543
544 prevtags = ''
544 prevtags = ''
545 if local:
545 if local:
546 try:
546 try:
547 fp = self.vfs('localtags', 'r+')
547 fp = self.vfs('localtags', 'r+')
548 except IOError:
548 except IOError:
549 fp = self.vfs('localtags', 'a')
549 fp = self.vfs('localtags', 'a')
550 else:
550 else:
551 prevtags = fp.read()
551 prevtags = fp.read()
552
552
553 # local tags are stored in the current charset
553 # local tags are stored in the current charset
554 writetags(fp, names, None, prevtags)
554 writetags(fp, names, None, prevtags)
555 for name in names:
555 for name in names:
556 self.hook('tag', node=hex(node), tag=name, local=local)
556 self.hook('tag', node=hex(node), tag=name, local=local)
557 return
557 return
558
558
559 try:
559 try:
560 fp = self.wfile('.hgtags', 'rb+')
560 fp = self.wfile('.hgtags', 'rb+')
561 except IOError, e:
561 except IOError, e:
562 if e.errno != errno.ENOENT:
562 if e.errno != errno.ENOENT:
563 raise
563 raise
564 fp = self.wfile('.hgtags', 'ab')
564 fp = self.wfile('.hgtags', 'ab')
565 else:
565 else:
566 prevtags = fp.read()
566 prevtags = fp.read()
567
567
568 # committed tags are stored in UTF-8
568 # committed tags are stored in UTF-8
569 writetags(fp, names, encoding.fromlocal, prevtags)
569 writetags(fp, names, encoding.fromlocal, prevtags)
570
570
571 fp.close()
571 fp.close()
572
572
573 self.invalidatecaches()
573 self.invalidatecaches()
574
574
575 if '.hgtags' not in self.dirstate:
575 if '.hgtags' not in self.dirstate:
576 self[None].add(['.hgtags'])
576 self[None].add(['.hgtags'])
577
577
578 m = matchmod.exact(self.root, '', ['.hgtags'])
578 m = matchmod.exact(self.root, '', ['.hgtags'])
579 tagnode = self.commit(message, user, date, extra=extra, match=m,
579 tagnode = self.commit(message, user, date, extra=extra, match=m,
580 editor=editor)
580 editor=editor)
581
581
582 for name in names:
582 for name in names:
583 self.hook('tag', node=hex(node), tag=name, local=local)
583 self.hook('tag', node=hex(node), tag=name, local=local)
584
584
585 return tagnode
585 return tagnode
586
586
587 def tag(self, names, node, message, local, user, date, editor=False):
587 def tag(self, names, node, message, local, user, date, editor=False):
588 '''tag a revision with one or more symbolic names.
588 '''tag a revision with one or more symbolic names.
589
589
590 names is a list of strings or, when adding a single tag, names may be a
590 names is a list of strings or, when adding a single tag, names may be a
591 string.
591 string.
592
592
593 if local is True, the tags are stored in a per-repository file.
593 if local is True, the tags are stored in a per-repository file.
594 otherwise, they are stored in the .hgtags file, and a new
594 otherwise, they are stored in the .hgtags file, and a new
595 changeset is committed with the change.
595 changeset is committed with the change.
596
596
597 keyword arguments:
597 keyword arguments:
598
598
599 local: whether to store tags in non-version-controlled file
599 local: whether to store tags in non-version-controlled file
600 (default False)
600 (default False)
601
601
602 message: commit message to use if committing
602 message: commit message to use if committing
603
603
604 user: name of user to use if committing
604 user: name of user to use if committing
605
605
606 date: date tuple to use if committing'''
606 date: date tuple to use if committing'''
607
607
608 if not local:
608 if not local:
609 m = matchmod.exact(self.root, '', ['.hgtags'])
609 m = matchmod.exact(self.root, '', ['.hgtags'])
610 if util.any(self.status(match=m, unknown=True, ignored=True)):
610 if util.any(self.status(match=m, unknown=True, ignored=True)):
611 raise util.Abort(_('working copy of .hgtags is changed'),
611 raise util.Abort(_('working copy of .hgtags is changed'),
612 hint=_('please commit .hgtags manually'))
612 hint=_('please commit .hgtags manually'))
613
613
614 self.tags() # instantiate the cache
614 self.tags() # instantiate the cache
615 self._tag(names, node, message, local, user, date, editor=editor)
615 self._tag(names, node, message, local, user, date, editor=editor)
616
616
617 @filteredpropertycache
617 @filteredpropertycache
618 def _tagscache(self):
618 def _tagscache(self):
619 '''Returns a tagscache object that contains various tags related
619 '''Returns a tagscache object that contains various tags related
620 caches.'''
620 caches.'''
621
621
622 # This simplifies its cache management by having one decorated
622 # This simplifies its cache management by having one decorated
623 # function (this one) and the rest simply fetch things from it.
623 # function (this one) and the rest simply fetch things from it.
624 class tagscache(object):
624 class tagscache(object):
625 def __init__(self):
625 def __init__(self):
626 # These two define the set of tags for this repository. tags
626 # These two define the set of tags for this repository. tags
627 # maps tag name to node; tagtypes maps tag name to 'global' or
627 # maps tag name to node; tagtypes maps tag name to 'global' or
628 # 'local'. (Global tags are defined by .hgtags across all
628 # 'local'. (Global tags are defined by .hgtags across all
629 # heads, and local tags are defined in .hg/localtags.)
629 # heads, and local tags are defined in .hg/localtags.)
630 # They constitute the in-memory cache of tags.
630 # They constitute the in-memory cache of tags.
631 self.tags = self.tagtypes = None
631 self.tags = self.tagtypes = None
632
632
633 self.nodetagscache = self.tagslist = None
633 self.nodetagscache = self.tagslist = None
634
634
635 cache = tagscache()
635 cache = tagscache()
636 cache.tags, cache.tagtypes = self._findtags()
636 cache.tags, cache.tagtypes = self._findtags()
637
637
638 return cache
638 return cache
639
639
640 def tags(self):
640 def tags(self):
641 '''return a mapping of tag to node'''
641 '''return a mapping of tag to node'''
642 t = {}
642 t = {}
643 if self.changelog.filteredrevs:
643 if self.changelog.filteredrevs:
644 tags, tt = self._findtags()
644 tags, tt = self._findtags()
645 else:
645 else:
646 tags = self._tagscache.tags
646 tags = self._tagscache.tags
647 for k, v in tags.iteritems():
647 for k, v in tags.iteritems():
648 try:
648 try:
649 # ignore tags to unknown nodes
649 # ignore tags to unknown nodes
650 self.changelog.rev(v)
650 self.changelog.rev(v)
651 t[k] = v
651 t[k] = v
652 except (error.LookupError, ValueError):
652 except (error.LookupError, ValueError):
653 pass
653 pass
654 return t
654 return t
655
655
656 def _findtags(self):
656 def _findtags(self):
657 '''Do the hard work of finding tags. Return a pair of dicts
657 '''Do the hard work of finding tags. Return a pair of dicts
658 (tags, tagtypes) where tags maps tag name to node, and tagtypes
658 (tags, tagtypes) where tags maps tag name to node, and tagtypes
659 maps tag name to a string like \'global\' or \'local\'.
659 maps tag name to a string like \'global\' or \'local\'.
660 Subclasses or extensions are free to add their own tags, but
660 Subclasses or extensions are free to add their own tags, but
661 should be aware that the returned dicts will be retained for the
661 should be aware that the returned dicts will be retained for the
662 duration of the localrepo object.'''
662 duration of the localrepo object.'''
663
663
664 # XXX what tagtype should subclasses/extensions use? Currently
664 # XXX what tagtype should subclasses/extensions use? Currently
665 # mq and bookmarks add tags, but do not set the tagtype at all.
665 # mq and bookmarks add tags, but do not set the tagtype at all.
666 # Should each extension invent its own tag type? Should there
666 # Should each extension invent its own tag type? Should there
667 # be one tagtype for all such "virtual" tags? Or is the status
667 # be one tagtype for all such "virtual" tags? Or is the status
668 # quo fine?
668 # quo fine?
669
669
670 alltags = {} # map tag name to (node, hist)
670 alltags = {} # map tag name to (node, hist)
671 tagtypes = {}
671 tagtypes = {}
672
672
673 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
673 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
674 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
674 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
675
675
676 # Build the return dicts. Have to re-encode tag names because
676 # Build the return dicts. Have to re-encode tag names because
677 # the tags module always uses UTF-8 (in order not to lose info
677 # the tags module always uses UTF-8 (in order not to lose info
678 # writing to the cache), but the rest of Mercurial wants them in
678 # writing to the cache), but the rest of Mercurial wants them in
679 # local encoding.
679 # local encoding.
680 tags = {}
680 tags = {}
681 for (name, (node, hist)) in alltags.iteritems():
681 for (name, (node, hist)) in alltags.iteritems():
682 if node != nullid:
682 if node != nullid:
683 tags[encoding.tolocal(name)] = node
683 tags[encoding.tolocal(name)] = node
684 tags['tip'] = self.changelog.tip()
684 tags['tip'] = self.changelog.tip()
685 tagtypes = dict([(encoding.tolocal(name), value)
685 tagtypes = dict([(encoding.tolocal(name), value)
686 for (name, value) in tagtypes.iteritems()])
686 for (name, value) in tagtypes.iteritems()])
687 return (tags, tagtypes)
687 return (tags, tagtypes)
688
688
689 def tagtype(self, tagname):
689 def tagtype(self, tagname):
690 '''
690 '''
691 return the type of the given tag. result can be:
691 return the type of the given tag. result can be:
692
692
693 'local' : a local tag
693 'local' : a local tag
694 'global' : a global tag
694 'global' : a global tag
695 None : tag does not exist
695 None : tag does not exist
696 '''
696 '''
697
697
698 return self._tagscache.tagtypes.get(tagname)
698 return self._tagscache.tagtypes.get(tagname)
699
699
700 def tagslist(self):
700 def tagslist(self):
701 '''return a list of tags ordered by revision'''
701 '''return a list of tags ordered by revision'''
702 if not self._tagscache.tagslist:
702 if not self._tagscache.tagslist:
703 l = []
703 l = []
704 for t, n in self.tags().iteritems():
704 for t, n in self.tags().iteritems():
705 l.append((self.changelog.rev(n), t, n))
705 l.append((self.changelog.rev(n), t, n))
706 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
706 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
707
707
708 return self._tagscache.tagslist
708 return self._tagscache.tagslist
709
709
710 def nodetags(self, node):
710 def nodetags(self, node):
711 '''return the tags associated with a node'''
711 '''return the tags associated with a node'''
712 if not self._tagscache.nodetagscache:
712 if not self._tagscache.nodetagscache:
713 nodetagscache = {}
713 nodetagscache = {}
714 for t, n in self._tagscache.tags.iteritems():
714 for t, n in self._tagscache.tags.iteritems():
715 nodetagscache.setdefault(n, []).append(t)
715 nodetagscache.setdefault(n, []).append(t)
716 for tags in nodetagscache.itervalues():
716 for tags in nodetagscache.itervalues():
717 tags.sort()
717 tags.sort()
718 self._tagscache.nodetagscache = nodetagscache
718 self._tagscache.nodetagscache = nodetagscache
719 return self._tagscache.nodetagscache.get(node, [])
719 return self._tagscache.nodetagscache.get(node, [])
720
720
721 def nodebookmarks(self, node):
721 def nodebookmarks(self, node):
722 marks = []
722 marks = []
723 for bookmark, n in self._bookmarks.iteritems():
723 for bookmark, n in self._bookmarks.iteritems():
724 if n == node:
724 if n == node:
725 marks.append(bookmark)
725 marks.append(bookmark)
726 return sorted(marks)
726 return sorted(marks)
727
727
728 def branchmap(self):
728 def branchmap(self):
729 '''returns a dictionary {branch: [branchheads]} with branchheads
729 '''returns a dictionary {branch: [branchheads]} with branchheads
730 ordered by increasing revision number'''
730 ordered by increasing revision number'''
731 branchmap.updatecache(self)
731 branchmap.updatecache(self)
732 return self._branchcaches[self.filtername]
732 return self._branchcaches[self.filtername]
733
733
734 @unfilteredmethod
734 @unfilteredmethod
735 def revbranchcache(self):
735 def revbranchcache(self):
736 if not self._revbranchcache:
736 if not self._revbranchcache:
737 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
737 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
738 return self._revbranchcache
738 return self._revbranchcache
739
739
740 def branchtip(self, branch, ignoremissing=False):
740 def branchtip(self, branch, ignoremissing=False):
741 '''return the tip node for a given branch
741 '''return the tip node for a given branch
742
742
743 If ignoremissing is True, then this method will not raise an error.
743 If ignoremissing is True, then this method will not raise an error.
744 This is helpful for callers that only expect None for a missing branch
744 This is helpful for callers that only expect None for a missing branch
745 (e.g. namespace).
745 (e.g. namespace).
746
746
747 '''
747 '''
748 try:
748 try:
749 return self.branchmap().branchtip(branch)
749 return self.branchmap().branchtip(branch)
750 except KeyError:
750 except KeyError:
751 if not ignoremissing:
751 if not ignoremissing:
752 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
752 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
753 else:
753 else:
754 pass
754 pass
755
755
756 def lookup(self, key):
756 def lookup(self, key):
757 return self[key].node()
757 return self[key].node()
758
758
759 def lookupbranch(self, key, remote=None):
759 def lookupbranch(self, key, remote=None):
760 repo = remote or self
760 repo = remote or self
761 if key in repo.branchmap():
761 if key in repo.branchmap():
762 return key
762 return key
763
763
764 repo = (remote and remote.local()) and remote or self
764 repo = (remote and remote.local()) and remote or self
765 return repo[key].branch()
765 return repo[key].branch()
766
766
767 def known(self, nodes):
767 def known(self, nodes):
768 nm = self.changelog.nodemap
768 nm = self.changelog.nodemap
769 pc = self._phasecache
769 pc = self._phasecache
770 result = []
770 result = []
771 for n in nodes:
771 for n in nodes:
772 r = nm.get(n)
772 r = nm.get(n)
773 resp = not (r is None or pc.phase(self, r) >= phases.secret)
773 resp = not (r is None or pc.phase(self, r) >= phases.secret)
774 result.append(resp)
774 result.append(resp)
775 return result
775 return result
776
776
777 def local(self):
777 def local(self):
778 return self
778 return self
779
779
780 def cancopy(self):
780 def cancopy(self):
781 # so statichttprepo's override of local() works
781 # so statichttprepo's override of local() works
782 if not self.local():
782 if not self.local():
783 return False
783 return False
784 if not self.ui.configbool('phases', 'publish', True):
784 if not self.ui.configbool('phases', 'publish', True):
785 return True
785 return True
786 # if publishing we can't copy if there is filtered content
786 # if publishing we can't copy if there is filtered content
787 return not self.filtered('visible').changelog.filteredrevs
787 return not self.filtered('visible').changelog.filteredrevs
788
788
789 def shared(self):
789 def shared(self):
790 '''the type of shared repository (None if not shared)'''
790 '''the type of shared repository (None if not shared)'''
791 if self.sharedpath != self.path:
791 if self.sharedpath != self.path:
792 return 'store'
792 return 'store'
793 return None
793 return None
794
794
795 def join(self, f, *insidef):
795 def join(self, f, *insidef):
796 return self.vfs.join(os.path.join(f, *insidef))
796 return self.vfs.join(os.path.join(f, *insidef))
797
797
798 def wjoin(self, f, *insidef):
798 def wjoin(self, f, *insidef):
799 return self.vfs.reljoin(self.root, f, *insidef)
799 return self.vfs.reljoin(self.root, f, *insidef)
800
800
801 def file(self, f):
801 def file(self, f):
802 if f[0] == '/':
802 if f[0] == '/':
803 f = f[1:]
803 f = f[1:]
804 return filelog.filelog(self.svfs, f)
804 return filelog.filelog(self.svfs, f)
805
805
806 def changectx(self, changeid):
806 def changectx(self, changeid):
807 return self[changeid]
807 return self[changeid]
808
808
809 def parents(self, changeid=None):
809 def parents(self, changeid=None):
810 '''get list of changectxs for parents of changeid'''
810 '''get list of changectxs for parents of changeid'''
811 return self[changeid].parents()
811 return self[changeid].parents()
812
812
813 def setparents(self, p1, p2=nullid):
813 def setparents(self, p1, p2=nullid):
814 self.dirstate.beginparentchange()
814 self.dirstate.beginparentchange()
815 copies = self.dirstate.setparents(p1, p2)
815 copies = self.dirstate.setparents(p1, p2)
816 pctx = self[p1]
816 pctx = self[p1]
817 if copies:
817 if copies:
818 # Adjust copy records, the dirstate cannot do it, it
818 # Adjust copy records, the dirstate cannot do it, it
819 # requires access to parents manifests. Preserve them
819 # requires access to parents manifests. Preserve them
820 # only for entries added to first parent.
820 # only for entries added to first parent.
821 for f in copies:
821 for f in copies:
822 if f not in pctx and copies[f] in pctx:
822 if f not in pctx and copies[f] in pctx:
823 self.dirstate.copy(copies[f], f)
823 self.dirstate.copy(copies[f], f)
824 if p2 == nullid:
824 if p2 == nullid:
825 for f, s in sorted(self.dirstate.copies().items()):
825 for f, s in sorted(self.dirstate.copies().items()):
826 if f not in pctx and s not in pctx:
826 if f not in pctx and s not in pctx:
827 self.dirstate.copy(None, f)
827 self.dirstate.copy(None, f)
828 self.dirstate.endparentchange()
828 self.dirstate.endparentchange()
829
829
830 def filectx(self, path, changeid=None, fileid=None):
830 def filectx(self, path, changeid=None, fileid=None):
831 """changeid can be a changeset revision, node, or tag.
831 """changeid can be a changeset revision, node, or tag.
832 fileid can be a file revision or node."""
832 fileid can be a file revision or node."""
833 return context.filectx(self, path, changeid, fileid)
833 return context.filectx(self, path, changeid, fileid)
834
834
835 def getcwd(self):
835 def getcwd(self):
836 return self.dirstate.getcwd()
836 return self.dirstate.getcwd()
837
837
838 def pathto(self, f, cwd=None):
838 def pathto(self, f, cwd=None):
839 return self.dirstate.pathto(f, cwd)
839 return self.dirstate.pathto(f, cwd)
840
840
841 def wfile(self, f, mode='r'):
841 def wfile(self, f, mode='r'):
842 return self.wvfs(f, mode)
842 return self.wvfs(f, mode)
843
843
844 def _link(self, f):
844 def _link(self, f):
845 return self.wvfs.islink(f)
845 return self.wvfs.islink(f)
846
846
847 def _loadfilter(self, filter):
847 def _loadfilter(self, filter):
848 if filter not in self.filterpats:
848 if filter not in self.filterpats:
849 l = []
849 l = []
850 for pat, cmd in self.ui.configitems(filter):
850 for pat, cmd in self.ui.configitems(filter):
851 if cmd == '!':
851 if cmd == '!':
852 continue
852 continue
853 mf = matchmod.match(self.root, '', [pat])
853 mf = matchmod.match(self.root, '', [pat])
854 fn = None
854 fn = None
855 params = cmd
855 params = cmd
856 for name, filterfn in self._datafilters.iteritems():
856 for name, filterfn in self._datafilters.iteritems():
857 if cmd.startswith(name):
857 if cmd.startswith(name):
858 fn = filterfn
858 fn = filterfn
859 params = cmd[len(name):].lstrip()
859 params = cmd[len(name):].lstrip()
860 break
860 break
861 if not fn:
861 if not fn:
862 fn = lambda s, c, **kwargs: util.filter(s, c)
862 fn = lambda s, c, **kwargs: util.filter(s, c)
863 # Wrap old filters not supporting keyword arguments
863 # Wrap old filters not supporting keyword arguments
864 if not inspect.getargspec(fn)[2]:
864 if not inspect.getargspec(fn)[2]:
865 oldfn = fn
865 oldfn = fn
866 fn = lambda s, c, **kwargs: oldfn(s, c)
866 fn = lambda s, c, **kwargs: oldfn(s, c)
867 l.append((mf, fn, params))
867 l.append((mf, fn, params))
868 self.filterpats[filter] = l
868 self.filterpats[filter] = l
869 return self.filterpats[filter]
869 return self.filterpats[filter]
870
870
871 def _filter(self, filterpats, filename, data):
871 def _filter(self, filterpats, filename, data):
872 for mf, fn, cmd in filterpats:
872 for mf, fn, cmd in filterpats:
873 if mf(filename):
873 if mf(filename):
874 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
874 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
875 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
875 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
876 break
876 break
877
877
878 return data
878 return data
879
879
880 @unfilteredpropertycache
880 @unfilteredpropertycache
881 def _encodefilterpats(self):
881 def _encodefilterpats(self):
882 return self._loadfilter('encode')
882 return self._loadfilter('encode')
883
883
884 @unfilteredpropertycache
884 @unfilteredpropertycache
885 def _decodefilterpats(self):
885 def _decodefilterpats(self):
886 return self._loadfilter('decode')
886 return self._loadfilter('decode')
887
887
888 def adddatafilter(self, name, filter):
888 def adddatafilter(self, name, filter):
889 self._datafilters[name] = filter
889 self._datafilters[name] = filter
890
890
891 def wread(self, filename):
891 def wread(self, filename):
892 if self._link(filename):
892 if self._link(filename):
893 data = self.wvfs.readlink(filename)
893 data = self.wvfs.readlink(filename)
894 else:
894 else:
895 data = self.wvfs.read(filename)
895 data = self.wvfs.read(filename)
896 return self._filter(self._encodefilterpats, filename, data)
896 return self._filter(self._encodefilterpats, filename, data)
897
897
898 def wwrite(self, filename, data, flags):
898 def wwrite(self, filename, data, flags):
899 data = self._filter(self._decodefilterpats, filename, data)
899 data = self._filter(self._decodefilterpats, filename, data)
900 if 'l' in flags:
900 if 'l' in flags:
901 self.wvfs.symlink(data, filename)
901 self.wvfs.symlink(data, filename)
902 else:
902 else:
903 self.wvfs.write(filename, data)
903 self.wvfs.write(filename, data)
904 if 'x' in flags:
904 if 'x' in flags:
905 self.wvfs.setflags(filename, False, True)
905 self.wvfs.setflags(filename, False, True)
906
906
907 def wwritedata(self, filename, data):
907 def wwritedata(self, filename, data):
908 return self._filter(self._decodefilterpats, filename, data)
908 return self._filter(self._decodefilterpats, filename, data)
909
909
910 def currenttransaction(self):
910 def currenttransaction(self):
911 """return the current transaction or None if non exists"""
911 """return the current transaction or None if non exists"""
912 if self._transref:
912 if self._transref:
913 tr = self._transref()
913 tr = self._transref()
914 else:
914 else:
915 tr = None
915 tr = None
916
916
917 if tr and tr.running():
917 if tr and tr.running():
918 return tr
918 return tr
919 return None
919 return None
920
920
921 def transaction(self, desc, report=None):
921 def transaction(self, desc, report=None):
922 if (self.ui.configbool('devel', 'all')
922 if (self.ui.configbool('devel', 'all')
923 or self.ui.configbool('devel', 'check-locks')):
923 or self.ui.configbool('devel', 'check-locks')):
924 l = self._lockref and self._lockref()
924 l = self._lockref and self._lockref()
925 if l is None or not l.held:
925 if l is None or not l.held:
926 msg = 'transaction with no lock\n'
926 msg = 'transaction with no lock\n'
927 if self.ui.tracebackflag:
927 if self.ui.tracebackflag:
928 util.debugstacktrace(msg, 1)
928 util.debugstacktrace(msg, 1)
929 else:
929 else:
930 self.ui.write_err(msg)
930 self.ui.write_err(msg)
931 tr = self.currenttransaction()
931 tr = self.currenttransaction()
932 if tr is not None:
932 if tr is not None:
933 return tr.nest()
933 return tr.nest()
934
934
935 # abort here if the journal already exists
935 # abort here if the journal already exists
936 if self.svfs.exists("journal"):
936 if self.svfs.exists("journal"):
937 raise error.RepoError(
937 raise error.RepoError(
938 _("abandoned transaction found"),
938 _("abandoned transaction found"),
939 hint=_("run 'hg recover' to clean up transaction"))
939 hint=_("run 'hg recover' to clean up transaction"))
940
940
941 self.hook('pretxnopen', throw=True, txnname=desc)
941 self.hook('pretxnopen', throw=True, txnname=desc)
942
942
943 self._writejournal(desc)
943 self._writejournal(desc)
944 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
944 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
945 if report:
945 if report:
946 rp = report
946 rp = report
947 else:
947 else:
948 rp = self.ui.warn
948 rp = self.ui.warn
949 vfsmap = {'plain': self.vfs} # root of .hg/
949 vfsmap = {'plain': self.vfs} # root of .hg/
950 # we must avoid cyclic reference between repo and transaction.
950 # we must avoid cyclic reference between repo and transaction.
951 reporef = weakref.ref(self)
951 reporef = weakref.ref(self)
952 def validate(tr):
952 def validate(tr):
953 """will run pre-closing hooks"""
953 """will run pre-closing hooks"""
954 pending = lambda: tr.writepending() and self.root or ""
954 pending = lambda: tr.writepending() and self.root or ""
955 reporef().hook('pretxnclose', throw=True, pending=pending,
955 reporef().hook('pretxnclose', throw=True, pending=pending,
956 xnname=desc)
956 xnname=desc)
957
957
958 tr = transaction.transaction(rp, self.sopener, vfsmap,
958 tr = transaction.transaction(rp, self.sopener, vfsmap,
959 "journal",
959 "journal",
960 "undo",
960 "undo",
961 aftertrans(renames),
961 aftertrans(renames),
962 self.store.createmode,
962 self.store.createmode,
963 validator=validate)
963 validator=validate)
964 # note: writing the fncache only during finalize mean that the file is
964 # note: writing the fncache only during finalize mean that the file is
965 # outdated when running hooks. As fncache is used for streaming clone,
965 # outdated when running hooks. As fncache is used for streaming clone,
966 # this is not expected to break anything that happen during the hooks.
966 # this is not expected to break anything that happen during the hooks.
967 tr.addfinalize('flush-fncache', self.store.write)
967 tr.addfinalize('flush-fncache', self.store.write)
968 def txnclosehook(tr2):
968 def txnclosehook(tr2):
969 """To be run if transaction is successful, will schedule a hook run
969 """To be run if transaction is successful, will schedule a hook run
970 """
970 """
971 def hook():
971 def hook():
972 reporef().hook('txnclose', throw=False, txnname=desc,
972 reporef().hook('txnclose', throw=False, txnname=desc,
973 **tr2.hookargs)
973 **tr2.hookargs)
974 reporef()._afterlock(hook)
974 reporef()._afterlock(hook)
975 tr.addfinalize('txnclose-hook', txnclosehook)
975 tr.addfinalize('txnclose-hook', txnclosehook)
976 self._transref = weakref.ref(tr)
976 self._transref = weakref.ref(tr)
977 return tr
977 return tr
978
978
979 def _journalfiles(self):
979 def _journalfiles(self):
980 return ((self.svfs, 'journal'),
980 return ((self.svfs, 'journal'),
981 (self.vfs, 'journal.dirstate'),
981 (self.vfs, 'journal.dirstate'),
982 (self.vfs, 'journal.branch'),
982 (self.vfs, 'journal.branch'),
983 (self.vfs, 'journal.desc'),
983 (self.vfs, 'journal.desc'),
984 (self.vfs, 'journal.bookmarks'),
984 (self.vfs, 'journal.bookmarks'),
985 (self.svfs, 'journal.phaseroots'))
985 (self.svfs, 'journal.phaseroots'))
986
986
987 def undofiles(self):
987 def undofiles(self):
988 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
988 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
989
989
990 def _writejournal(self, desc):
990 def _writejournal(self, desc):
991 self.vfs.write("journal.dirstate",
991 self.vfs.write("journal.dirstate",
992 self.vfs.tryread("dirstate"))
992 self.vfs.tryread("dirstate"))
993 self.vfs.write("journal.branch",
993 self.vfs.write("journal.branch",
994 encoding.fromlocal(self.dirstate.branch()))
994 encoding.fromlocal(self.dirstate.branch()))
995 self.vfs.write("journal.desc",
995 self.vfs.write("journal.desc",
996 "%d\n%s\n" % (len(self), desc))
996 "%d\n%s\n" % (len(self), desc))
997 self.vfs.write("journal.bookmarks",
997 self.vfs.write("journal.bookmarks",
998 self.vfs.tryread("bookmarks"))
998 self.vfs.tryread("bookmarks"))
999 self.svfs.write("journal.phaseroots",
999 self.svfs.write("journal.phaseroots",
1000 self.svfs.tryread("phaseroots"))
1000 self.svfs.tryread("phaseroots"))
1001
1001
1002 def recover(self):
1002 def recover(self):
1003 lock = self.lock()
1003 lock = self.lock()
1004 try:
1004 try:
1005 if self.svfs.exists("journal"):
1005 if self.svfs.exists("journal"):
1006 self.ui.status(_("rolling back interrupted transaction\n"))
1006 self.ui.status(_("rolling back interrupted transaction\n"))
1007 vfsmap = {'': self.svfs,
1007 vfsmap = {'': self.svfs,
1008 'plain': self.vfs,}
1008 'plain': self.vfs,}
1009 transaction.rollback(self.svfs, vfsmap, "journal",
1009 transaction.rollback(self.svfs, vfsmap, "journal",
1010 self.ui.warn)
1010 self.ui.warn)
1011 self.invalidate()
1011 self.invalidate()
1012 return True
1012 return True
1013 else:
1013 else:
1014 self.ui.warn(_("no interrupted transaction available\n"))
1014 self.ui.warn(_("no interrupted transaction available\n"))
1015 return False
1015 return False
1016 finally:
1016 finally:
1017 lock.release()
1017 lock.release()
1018
1018
1019 def rollback(self, dryrun=False, force=False):
1019 def rollback(self, dryrun=False, force=False):
1020 wlock = lock = None
1020 wlock = lock = None
1021 try:
1021 try:
1022 wlock = self.wlock()
1022 wlock = self.wlock()
1023 lock = self.lock()
1023 lock = self.lock()
1024 if self.svfs.exists("undo"):
1024 if self.svfs.exists("undo"):
1025 return self._rollback(dryrun, force)
1025 return self._rollback(dryrun, force)
1026 else:
1026 else:
1027 self.ui.warn(_("no rollback information available\n"))
1027 self.ui.warn(_("no rollback information available\n"))
1028 return 1
1028 return 1
1029 finally:
1029 finally:
1030 release(lock, wlock)
1030 release(lock, wlock)
1031
1031
1032 @unfilteredmethod # Until we get smarter cache management
1032 @unfilteredmethod # Until we get smarter cache management
1033 def _rollback(self, dryrun, force):
1033 def _rollback(self, dryrun, force):
1034 ui = self.ui
1034 ui = self.ui
1035 try:
1035 try:
1036 args = self.vfs.read('undo.desc').splitlines()
1036 args = self.vfs.read('undo.desc').splitlines()
1037 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1037 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1038 if len(args) >= 3:
1038 if len(args) >= 3:
1039 detail = args[2]
1039 detail = args[2]
1040 oldtip = oldlen - 1
1040 oldtip = oldlen - 1
1041
1041
1042 if detail and ui.verbose:
1042 if detail and ui.verbose:
1043 msg = (_('repository tip rolled back to revision %s'
1043 msg = (_('repository tip rolled back to revision %s'
1044 ' (undo %s: %s)\n')
1044 ' (undo %s: %s)\n')
1045 % (oldtip, desc, detail))
1045 % (oldtip, desc, detail))
1046 else:
1046 else:
1047 msg = (_('repository tip rolled back to revision %s'
1047 msg = (_('repository tip rolled back to revision %s'
1048 ' (undo %s)\n')
1048 ' (undo %s)\n')
1049 % (oldtip, desc))
1049 % (oldtip, desc))
1050 except IOError:
1050 except IOError:
1051 msg = _('rolling back unknown transaction\n')
1051 msg = _('rolling back unknown transaction\n')
1052 desc = None
1052 desc = None
1053
1053
1054 if not force and self['.'] != self['tip'] and desc == 'commit':
1054 if not force and self['.'] != self['tip'] and desc == 'commit':
1055 raise util.Abort(
1055 raise util.Abort(
1056 _('rollback of last commit while not checked out '
1056 _('rollback of last commit while not checked out '
1057 'may lose data'), hint=_('use -f to force'))
1057 'may lose data'), hint=_('use -f to force'))
1058
1058
1059 ui.status(msg)
1059 ui.status(msg)
1060 if dryrun:
1060 if dryrun:
1061 return 0
1061 return 0
1062
1062
1063 parents = self.dirstate.parents()
1063 parents = self.dirstate.parents()
1064 self.destroying()
1064 self.destroying()
1065 vfsmap = {'plain': self.vfs, '': self.svfs}
1065 vfsmap = {'plain': self.vfs, '': self.svfs}
1066 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1066 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1067 if self.vfs.exists('undo.bookmarks'):
1067 if self.vfs.exists('undo.bookmarks'):
1068 self.vfs.rename('undo.bookmarks', 'bookmarks')
1068 self.vfs.rename('undo.bookmarks', 'bookmarks')
1069 if self.svfs.exists('undo.phaseroots'):
1069 if self.svfs.exists('undo.phaseroots'):
1070 self.svfs.rename('undo.phaseroots', 'phaseroots')
1070 self.svfs.rename('undo.phaseroots', 'phaseroots')
1071 self.invalidate()
1071 self.invalidate()
1072
1072
1073 parentgone = (parents[0] not in self.changelog.nodemap or
1073 parentgone = (parents[0] not in self.changelog.nodemap or
1074 parents[1] not in self.changelog.nodemap)
1074 parents[1] not in self.changelog.nodemap)
1075 if parentgone:
1075 if parentgone:
1076 self.vfs.rename('undo.dirstate', 'dirstate')
1076 self.vfs.rename('undo.dirstate', 'dirstate')
1077 try:
1077 try:
1078 branch = self.vfs.read('undo.branch')
1078 branch = self.vfs.read('undo.branch')
1079 self.dirstate.setbranch(encoding.tolocal(branch))
1079 self.dirstate.setbranch(encoding.tolocal(branch))
1080 except IOError:
1080 except IOError:
1081 ui.warn(_('named branch could not be reset: '
1081 ui.warn(_('named branch could not be reset: '
1082 'current branch is still \'%s\'\n')
1082 'current branch is still \'%s\'\n')
1083 % self.dirstate.branch())
1083 % self.dirstate.branch())
1084
1084
1085 self.dirstate.invalidate()
1085 self.dirstate.invalidate()
1086 parents = tuple([p.rev() for p in self.parents()])
1086 parents = tuple([p.rev() for p in self.parents()])
1087 if len(parents) > 1:
1087 if len(parents) > 1:
1088 ui.status(_('working directory now based on '
1088 ui.status(_('working directory now based on '
1089 'revisions %d and %d\n') % parents)
1089 'revisions %d and %d\n') % parents)
1090 else:
1090 else:
1091 ui.status(_('working directory now based on '
1091 ui.status(_('working directory now based on '
1092 'revision %d\n') % parents)
1092 'revision %d\n') % parents)
1093 # TODO: if we know which new heads may result from this rollback, pass
1093 # TODO: if we know which new heads may result from this rollback, pass
1094 # them to destroy(), which will prevent the branchhead cache from being
1094 # them to destroy(), which will prevent the branchhead cache from being
1095 # invalidated.
1095 # invalidated.
1096 self.destroyed()
1096 self.destroyed()
1097 return 0
1097 return 0
1098
1098
1099 def invalidatecaches(self):
1099 def invalidatecaches(self):
1100
1100
1101 if '_tagscache' in vars(self):
1101 if '_tagscache' in vars(self):
1102 # can't use delattr on proxy
1102 # can't use delattr on proxy
1103 del self.__dict__['_tagscache']
1103 del self.__dict__['_tagscache']
1104
1104
1105 self.unfiltered()._branchcaches.clear()
1105 self.unfiltered()._branchcaches.clear()
1106 self.invalidatevolatilesets()
1106 self.invalidatevolatilesets()
1107
1107
1108 def invalidatevolatilesets(self):
1108 def invalidatevolatilesets(self):
1109 self.filteredrevcache.clear()
1109 self.filteredrevcache.clear()
1110 obsolete.clearobscaches(self)
1110 obsolete.clearobscaches(self)
1111
1111
1112 def invalidatedirstate(self):
1112 def invalidatedirstate(self):
1113 '''Invalidates the dirstate, causing the next call to dirstate
1113 '''Invalidates the dirstate, causing the next call to dirstate
1114 to check if it was modified since the last time it was read,
1114 to check if it was modified since the last time it was read,
1115 rereading it if it has.
1115 rereading it if it has.
1116
1116
1117 This is different to dirstate.invalidate() that it doesn't always
1117 This is different to dirstate.invalidate() that it doesn't always
1118 rereads the dirstate. Use dirstate.invalidate() if you want to
1118 rereads the dirstate. Use dirstate.invalidate() if you want to
1119 explicitly read the dirstate again (i.e. restoring it to a previous
1119 explicitly read the dirstate again (i.e. restoring it to a previous
1120 known good state).'''
1120 known good state).'''
1121 if hasunfilteredcache(self, 'dirstate'):
1121 if hasunfilteredcache(self, 'dirstate'):
1122 for k in self.dirstate._filecache:
1122 for k in self.dirstate._filecache:
1123 try:
1123 try:
1124 delattr(self.dirstate, k)
1124 delattr(self.dirstate, k)
1125 except AttributeError:
1125 except AttributeError:
1126 pass
1126 pass
1127 delattr(self.unfiltered(), 'dirstate')
1127 delattr(self.unfiltered(), 'dirstate')
1128
1128
1129 def invalidate(self):
1129 def invalidate(self):
1130 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1130 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1131 for k in self._filecache:
1131 for k in self._filecache:
1132 # dirstate is invalidated separately in invalidatedirstate()
1132 # dirstate is invalidated separately in invalidatedirstate()
1133 if k == 'dirstate':
1133 if k == 'dirstate':
1134 continue
1134 continue
1135
1135
1136 try:
1136 try:
1137 delattr(unfiltered, k)
1137 delattr(unfiltered, k)
1138 except AttributeError:
1138 except AttributeError:
1139 pass
1139 pass
1140 self.invalidatecaches()
1140 self.invalidatecaches()
1141 self.store.invalidatecaches()
1141 self.store.invalidatecaches()
1142
1142
1143 def invalidateall(self):
1143 def invalidateall(self):
1144 '''Fully invalidates both store and non-store parts, causing the
1144 '''Fully invalidates both store and non-store parts, causing the
1145 subsequent operation to reread any outside changes.'''
1145 subsequent operation to reread any outside changes.'''
1146 # extension should hook this to invalidate its caches
1146 # extension should hook this to invalidate its caches
1147 self.invalidate()
1147 self.invalidate()
1148 self.invalidatedirstate()
1148 self.invalidatedirstate()
1149
1149
1150 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1150 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1151 try:
1151 try:
1152 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1152 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1153 except error.LockHeld, inst:
1153 except error.LockHeld, inst:
1154 if not wait:
1154 if not wait:
1155 raise
1155 raise
1156 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1156 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1157 (desc, inst.locker))
1157 (desc, inst.locker))
1158 # default to 600 seconds timeout
1158 # default to 600 seconds timeout
1159 l = lockmod.lock(vfs, lockname,
1159 l = lockmod.lock(vfs, lockname,
1160 int(self.ui.config("ui", "timeout", "600")),
1160 int(self.ui.config("ui", "timeout", "600")),
1161 releasefn, desc=desc)
1161 releasefn, desc=desc)
1162 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1162 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1163 if acquirefn:
1163 if acquirefn:
1164 acquirefn()
1164 acquirefn()
1165 return l
1165 return l
1166
1166
1167 def _afterlock(self, callback):
1167 def _afterlock(self, callback):
1168 """add a callback to the current repository lock.
1168 """add a callback to the current repository lock.
1169
1169
1170 The callback will be executed on lock release."""
1170 The callback will be executed on lock release."""
1171 l = self._lockref and self._lockref()
1171 l = self._lockref and self._lockref()
1172 if l:
1172 if l:
1173 l.postrelease.append(callback)
1173 l.postrelease.append(callback)
1174 else:
1174 else:
1175 callback()
1175 callback()
1176
1176
1177 def lock(self, wait=True):
1177 def lock(self, wait=True):
1178 '''Lock the repository store (.hg/store) and return a weak reference
1178 '''Lock the repository store (.hg/store) and return a weak reference
1179 to the lock. Use this before modifying the store (e.g. committing or
1179 to the lock. Use this before modifying the store (e.g. committing or
1180 stripping). If you are opening a transaction, get a lock as well.)'''
1180 stripping). If you are opening a transaction, get a lock as well.)'''
1181 l = self._lockref and self._lockref()
1181 l = self._lockref and self._lockref()
1182 if l is not None and l.held:
1182 if l is not None and l.held:
1183 l.lock()
1183 l.lock()
1184 return l
1184 return l
1185
1185
1186 def unlock():
1186 def unlock():
1187 for k, ce in self._filecache.items():
1187 for k, ce in self._filecache.items():
1188 if k == 'dirstate' or k not in self.__dict__:
1188 if k == 'dirstate' or k not in self.__dict__:
1189 continue
1189 continue
1190 ce.refresh()
1190 ce.refresh()
1191
1191
1192 l = self._lock(self.svfs, "lock", wait, unlock,
1192 l = self._lock(self.svfs, "lock", wait, unlock,
1193 self.invalidate, _('repository %s') % self.origroot)
1193 self.invalidate, _('repository %s') % self.origroot)
1194 self._lockref = weakref.ref(l)
1194 self._lockref = weakref.ref(l)
1195 return l
1195 return l
1196
1196
1197 def wlock(self, wait=True):
1197 def wlock(self, wait=True):
1198 '''Lock the non-store parts of the repository (everything under
1198 '''Lock the non-store parts of the repository (everything under
1199 .hg except .hg/store) and return a weak reference to the lock.
1199 .hg except .hg/store) and return a weak reference to the lock.
1200 Use this before modifying files in .hg.'''
1200 Use this before modifying files in .hg.'''
1201 if (self.ui.configbool('devel', 'all')
1201 if (self.ui.configbool('devel', 'all')
1202 or self.ui.configbool('devel', 'check-locks')):
1202 or self.ui.configbool('devel', 'check-locks')):
1203 l = self._lockref and self._lockref()
1203 l = self._lockref and self._lockref()
1204 if l is not None and l.held:
1204 if l is not None and l.held:
1205 msg = '"lock" taken before "wlock"\n'
1205 msg = '"lock" taken before "wlock"\n'
1206 if self.ui.tracebackflag:
1206 if self.ui.tracebackflag:
1207 util.debugstacktrace(msg, 1)
1207 util.debugstacktrace(msg, 1)
1208 else:
1208 else:
1209 self.ui.write_err(msg)
1209 self.ui.write_err(msg)
1210 l = self._wlockref and self._wlockref()
1210 l = self._wlockref and self._wlockref()
1211 if l is not None and l.held:
1211 if l is not None and l.held:
1212 l.lock()
1212 l.lock()
1213 return l
1213 return l
1214
1214
1215 def unlock():
1215 def unlock():
1216 if self.dirstate.pendingparentchange():
1216 if self.dirstate.pendingparentchange():
1217 self.dirstate.invalidate()
1217 self.dirstate.invalidate()
1218 else:
1218 else:
1219 self.dirstate.write()
1219 self.dirstate.write()
1220
1220
1221 self._filecache['dirstate'].refresh()
1221 self._filecache['dirstate'].refresh()
1222
1222
1223 l = self._lock(self.vfs, "wlock", wait, unlock,
1223 l = self._lock(self.vfs, "wlock", wait, unlock,
1224 self.invalidatedirstate, _('working directory of %s') %
1224 self.invalidatedirstate, _('working directory of %s') %
1225 self.origroot)
1225 self.origroot)
1226 self._wlockref = weakref.ref(l)
1226 self._wlockref = weakref.ref(l)
1227 return l
1227 return l
1228
1228
1229 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1229 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1230 """
1230 """
1231 commit an individual file as part of a larger transaction
1231 commit an individual file as part of a larger transaction
1232 """
1232 """
1233
1233
1234 fname = fctx.path()
1234 fname = fctx.path()
1235 text = fctx.data()
1236 flog = self.file(fname)
1237 fparent1 = manifest1.get(fname, nullid)
1235 fparent1 = manifest1.get(fname, nullid)
1238 fparent2 = manifest2.get(fname, nullid)
1236 fparent2 = manifest2.get(fname, nullid)
1237 if isinstance(fctx, context.filectx):
1238 node = fctx.filenode()
1239 if node in [fparent1, fparent2]:
1240 self.ui.debug('reusing %s filelog entry\n' % fname)
1241 return node
1239
1242
1243 flog = self.file(fname)
1240 meta = {}
1244 meta = {}
1241 copy = fctx.renamed()
1245 copy = fctx.renamed()
1242 if copy and copy[0] != fname:
1246 if copy and copy[0] != fname:
1243 # Mark the new revision of this file as a copy of another
1247 # Mark the new revision of this file as a copy of another
1244 # file. This copy data will effectively act as a parent
1248 # file. This copy data will effectively act as a parent
1245 # of this new revision. If this is a merge, the first
1249 # of this new revision. If this is a merge, the first
1246 # parent will be the nullid (meaning "look up the copy data")
1250 # parent will be the nullid (meaning "look up the copy data")
1247 # and the second one will be the other parent. For example:
1251 # and the second one will be the other parent. For example:
1248 #
1252 #
1249 # 0 --- 1 --- 3 rev1 changes file foo
1253 # 0 --- 1 --- 3 rev1 changes file foo
1250 # \ / rev2 renames foo to bar and changes it
1254 # \ / rev2 renames foo to bar and changes it
1251 # \- 2 -/ rev3 should have bar with all changes and
1255 # \- 2 -/ rev3 should have bar with all changes and
1252 # should record that bar descends from
1256 # should record that bar descends from
1253 # bar in rev2 and foo in rev1
1257 # bar in rev2 and foo in rev1
1254 #
1258 #
1255 # this allows this merge to succeed:
1259 # this allows this merge to succeed:
1256 #
1260 #
1257 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1261 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1258 # \ / merging rev3 and rev4 should use bar@rev2
1262 # \ / merging rev3 and rev4 should use bar@rev2
1259 # \- 2 --- 4 as the merge base
1263 # \- 2 --- 4 as the merge base
1260 #
1264 #
1261
1265
1262 cfname = copy[0]
1266 cfname = copy[0]
1263 crev = manifest1.get(cfname)
1267 crev = manifest1.get(cfname)
1264 newfparent = fparent2
1268 newfparent = fparent2
1265
1269
1266 if manifest2: # branch merge
1270 if manifest2: # branch merge
1267 if fparent2 == nullid or crev is None: # copied on remote side
1271 if fparent2 == nullid or crev is None: # copied on remote side
1268 if cfname in manifest2:
1272 if cfname in manifest2:
1269 crev = manifest2[cfname]
1273 crev = manifest2[cfname]
1270 newfparent = fparent1
1274 newfparent = fparent1
1271
1275
1272 # Here, we used to search backwards through history to try to find
1276 # Here, we used to search backwards through history to try to find
1273 # where the file copy came from if the source of a copy was not in
1277 # where the file copy came from if the source of a copy was not in
1274 # the parent directory. However, this doesn't actually make sense to
1278 # the parent directory. However, this doesn't actually make sense to
1275 # do (what does a copy from something not in your working copy even
1279 # do (what does a copy from something not in your working copy even
1276 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1280 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1277 # the user that copy information was dropped, so if they didn't
1281 # the user that copy information was dropped, so if they didn't
1278 # expect this outcome it can be fixed, but this is the correct
1282 # expect this outcome it can be fixed, but this is the correct
1279 # behavior in this circumstance.
1283 # behavior in this circumstance.
1280
1284
1281 if crev:
1285 if crev:
1282 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1286 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1283 meta["copy"] = cfname
1287 meta["copy"] = cfname
1284 meta["copyrev"] = hex(crev)
1288 meta["copyrev"] = hex(crev)
1285 fparent1, fparent2 = nullid, newfparent
1289 fparent1, fparent2 = nullid, newfparent
1286 else:
1290 else:
1287 self.ui.warn(_("warning: can't find ancestor for '%s' "
1291 self.ui.warn(_("warning: can't find ancestor for '%s' "
1288 "copied from '%s'!\n") % (fname, cfname))
1292 "copied from '%s'!\n") % (fname, cfname))
1289
1293
1290 elif fparent1 == nullid:
1294 elif fparent1 == nullid:
1291 fparent1, fparent2 = fparent2, nullid
1295 fparent1, fparent2 = fparent2, nullid
1292 elif fparent2 != nullid:
1296 elif fparent2 != nullid:
1293 # is one parent an ancestor of the other?
1297 # is one parent an ancestor of the other?
1294 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1298 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1295 if fparent1 in fparentancestors:
1299 if fparent1 in fparentancestors:
1296 fparent1, fparent2 = fparent2, nullid
1300 fparent1, fparent2 = fparent2, nullid
1297 elif fparent2 in fparentancestors:
1301 elif fparent2 in fparentancestors:
1298 fparent2 = nullid
1302 fparent2 = nullid
1299
1303
1300 # is the file changed?
1304 # is the file changed?
1305 text = fctx.data()
1301 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1306 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1302 changelist.append(fname)
1307 changelist.append(fname)
1303 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1308 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1304 # are just the flags changed during merge?
1309 # are just the flags changed during merge?
1305 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1310 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1306 changelist.append(fname)
1311 changelist.append(fname)
1307
1312
1308 return fparent1
1313 return fparent1
1309
1314
1310 @unfilteredmethod
1315 @unfilteredmethod
1311 def commit(self, text="", user=None, date=None, match=None, force=False,
1316 def commit(self, text="", user=None, date=None, match=None, force=False,
1312 editor=False, extra={}):
1317 editor=False, extra={}):
1313 """Add a new revision to current repository.
1318 """Add a new revision to current repository.
1314
1319
1315 Revision information is gathered from the working directory,
1320 Revision information is gathered from the working directory,
1316 match can be used to filter the committed files. If editor is
1321 match can be used to filter the committed files. If editor is
1317 supplied, it is called to get a commit message.
1322 supplied, it is called to get a commit message.
1318 """
1323 """
1319
1324
1320 def fail(f, msg):
1325 def fail(f, msg):
1321 raise util.Abort('%s: %s' % (f, msg))
1326 raise util.Abort('%s: %s' % (f, msg))
1322
1327
1323 if not match:
1328 if not match:
1324 match = matchmod.always(self.root, '')
1329 match = matchmod.always(self.root, '')
1325
1330
1326 if not force:
1331 if not force:
1327 vdirs = []
1332 vdirs = []
1328 match.explicitdir = vdirs.append
1333 match.explicitdir = vdirs.append
1329 match.bad = fail
1334 match.bad = fail
1330
1335
1331 wlock = self.wlock()
1336 wlock = self.wlock()
1332 try:
1337 try:
1333 wctx = self[None]
1338 wctx = self[None]
1334 merge = len(wctx.parents()) > 1
1339 merge = len(wctx.parents()) > 1
1335
1340
1336 if not force and merge and not match.always():
1341 if not force and merge and not match.always():
1337 raise util.Abort(_('cannot partially commit a merge '
1342 raise util.Abort(_('cannot partially commit a merge '
1338 '(do not specify files or patterns)'))
1343 '(do not specify files or patterns)'))
1339
1344
1340 status = self.status(match=match, clean=force)
1345 status = self.status(match=match, clean=force)
1341 if force:
1346 if force:
1342 status.modified.extend(status.clean) # mq may commit clean files
1347 status.modified.extend(status.clean) # mq may commit clean files
1343
1348
1344 # check subrepos
1349 # check subrepos
1345 subs = []
1350 subs = []
1346 commitsubs = set()
1351 commitsubs = set()
1347 newstate = wctx.substate.copy()
1352 newstate = wctx.substate.copy()
1348 # only manage subrepos and .hgsubstate if .hgsub is present
1353 # only manage subrepos and .hgsubstate if .hgsub is present
1349 if '.hgsub' in wctx:
1354 if '.hgsub' in wctx:
1350 # we'll decide whether to track this ourselves, thanks
1355 # we'll decide whether to track this ourselves, thanks
1351 for c in status.modified, status.added, status.removed:
1356 for c in status.modified, status.added, status.removed:
1352 if '.hgsubstate' in c:
1357 if '.hgsubstate' in c:
1353 c.remove('.hgsubstate')
1358 c.remove('.hgsubstate')
1354
1359
1355 # compare current state to last committed state
1360 # compare current state to last committed state
1356 # build new substate based on last committed state
1361 # build new substate based on last committed state
1357 oldstate = wctx.p1().substate
1362 oldstate = wctx.p1().substate
1358 for s in sorted(newstate.keys()):
1363 for s in sorted(newstate.keys()):
1359 if not match(s):
1364 if not match(s):
1360 # ignore working copy, use old state if present
1365 # ignore working copy, use old state if present
1361 if s in oldstate:
1366 if s in oldstate:
1362 newstate[s] = oldstate[s]
1367 newstate[s] = oldstate[s]
1363 continue
1368 continue
1364 if not force:
1369 if not force:
1365 raise util.Abort(
1370 raise util.Abort(
1366 _("commit with new subrepo %s excluded") % s)
1371 _("commit with new subrepo %s excluded") % s)
1367 if wctx.sub(s).dirty(True):
1372 if wctx.sub(s).dirty(True):
1368 if not self.ui.configbool('ui', 'commitsubrepos'):
1373 if not self.ui.configbool('ui', 'commitsubrepos'):
1369 raise util.Abort(
1374 raise util.Abort(
1370 _("uncommitted changes in subrepo %s") % s,
1375 _("uncommitted changes in subrepo %s") % s,
1371 hint=_("use --subrepos for recursive commit"))
1376 hint=_("use --subrepos for recursive commit"))
1372 subs.append(s)
1377 subs.append(s)
1373 commitsubs.add(s)
1378 commitsubs.add(s)
1374 else:
1379 else:
1375 bs = wctx.sub(s).basestate()
1380 bs = wctx.sub(s).basestate()
1376 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1381 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1377 if oldstate.get(s, (None, None, None))[1] != bs:
1382 if oldstate.get(s, (None, None, None))[1] != bs:
1378 subs.append(s)
1383 subs.append(s)
1379
1384
1380 # check for removed subrepos
1385 # check for removed subrepos
1381 for p in wctx.parents():
1386 for p in wctx.parents():
1382 r = [s for s in p.substate if s not in newstate]
1387 r = [s for s in p.substate if s not in newstate]
1383 subs += [s for s in r if match(s)]
1388 subs += [s for s in r if match(s)]
1384 if subs:
1389 if subs:
1385 if (not match('.hgsub') and
1390 if (not match('.hgsub') and
1386 '.hgsub' in (wctx.modified() + wctx.added())):
1391 '.hgsub' in (wctx.modified() + wctx.added())):
1387 raise util.Abort(
1392 raise util.Abort(
1388 _("can't commit subrepos without .hgsub"))
1393 _("can't commit subrepos without .hgsub"))
1389 status.modified.insert(0, '.hgsubstate')
1394 status.modified.insert(0, '.hgsubstate')
1390
1395
1391 elif '.hgsub' in status.removed:
1396 elif '.hgsub' in status.removed:
1392 # clean up .hgsubstate when .hgsub is removed
1397 # clean up .hgsubstate when .hgsub is removed
1393 if ('.hgsubstate' in wctx and
1398 if ('.hgsubstate' in wctx and
1394 '.hgsubstate' not in (status.modified + status.added +
1399 '.hgsubstate' not in (status.modified + status.added +
1395 status.removed)):
1400 status.removed)):
1396 status.removed.insert(0, '.hgsubstate')
1401 status.removed.insert(0, '.hgsubstate')
1397
1402
1398 # make sure all explicit patterns are matched
1403 # make sure all explicit patterns are matched
1399 if not force and match.files():
1404 if not force and match.files():
1400 matched = set(status.modified + status.added + status.removed)
1405 matched = set(status.modified + status.added + status.removed)
1401
1406
1402 for f in match.files():
1407 for f in match.files():
1403 f = self.dirstate.normalize(f)
1408 f = self.dirstate.normalize(f)
1404 if f == '.' or f in matched or f in wctx.substate:
1409 if f == '.' or f in matched or f in wctx.substate:
1405 continue
1410 continue
1406 if f in status.deleted:
1411 if f in status.deleted:
1407 fail(f, _('file not found!'))
1412 fail(f, _('file not found!'))
1408 if f in vdirs: # visited directory
1413 if f in vdirs: # visited directory
1409 d = f + '/'
1414 d = f + '/'
1410 for mf in matched:
1415 for mf in matched:
1411 if mf.startswith(d):
1416 if mf.startswith(d):
1412 break
1417 break
1413 else:
1418 else:
1414 fail(f, _("no match under directory!"))
1419 fail(f, _("no match under directory!"))
1415 elif f not in self.dirstate:
1420 elif f not in self.dirstate:
1416 fail(f, _("file not tracked!"))
1421 fail(f, _("file not tracked!"))
1417
1422
1418 cctx = context.workingcommitctx(self, status,
1423 cctx = context.workingcommitctx(self, status,
1419 text, user, date, extra)
1424 text, user, date, extra)
1420
1425
1421 if (not force and not extra.get("close") and not merge
1426 if (not force and not extra.get("close") and not merge
1422 and not cctx.files()
1427 and not cctx.files()
1423 and wctx.branch() == wctx.p1().branch()):
1428 and wctx.branch() == wctx.p1().branch()):
1424 return None
1429 return None
1425
1430
1426 if merge and cctx.deleted():
1431 if merge and cctx.deleted():
1427 raise util.Abort(_("cannot commit merge with missing files"))
1432 raise util.Abort(_("cannot commit merge with missing files"))
1428
1433
1429 ms = mergemod.mergestate(self)
1434 ms = mergemod.mergestate(self)
1430 for f in status.modified:
1435 for f in status.modified:
1431 if f in ms and ms[f] == 'u':
1436 if f in ms and ms[f] == 'u':
1432 raise util.Abort(_('unresolved merge conflicts '
1437 raise util.Abort(_('unresolved merge conflicts '
1433 '(see "hg help resolve")'))
1438 '(see "hg help resolve")'))
1434
1439
1435 if editor:
1440 if editor:
1436 cctx._text = editor(self, cctx, subs)
1441 cctx._text = editor(self, cctx, subs)
1437 edited = (text != cctx._text)
1442 edited = (text != cctx._text)
1438
1443
1439 # Save commit message in case this transaction gets rolled back
1444 # Save commit message in case this transaction gets rolled back
1440 # (e.g. by a pretxncommit hook). Leave the content alone on
1445 # (e.g. by a pretxncommit hook). Leave the content alone on
1441 # the assumption that the user will use the same editor again.
1446 # the assumption that the user will use the same editor again.
1442 msgfn = self.savecommitmessage(cctx._text)
1447 msgfn = self.savecommitmessage(cctx._text)
1443
1448
1444 # commit subs and write new state
1449 # commit subs and write new state
1445 if subs:
1450 if subs:
1446 for s in sorted(commitsubs):
1451 for s in sorted(commitsubs):
1447 sub = wctx.sub(s)
1452 sub = wctx.sub(s)
1448 self.ui.status(_('committing subrepository %s\n') %
1453 self.ui.status(_('committing subrepository %s\n') %
1449 subrepo.subrelpath(sub))
1454 subrepo.subrelpath(sub))
1450 sr = sub.commit(cctx._text, user, date)
1455 sr = sub.commit(cctx._text, user, date)
1451 newstate[s] = (newstate[s][0], sr)
1456 newstate[s] = (newstate[s][0], sr)
1452 subrepo.writestate(self, newstate)
1457 subrepo.writestate(self, newstate)
1453
1458
1454 p1, p2 = self.dirstate.parents()
1459 p1, p2 = self.dirstate.parents()
1455 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1460 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1456 try:
1461 try:
1457 self.hook("precommit", throw=True, parent1=hookp1,
1462 self.hook("precommit", throw=True, parent1=hookp1,
1458 parent2=hookp2)
1463 parent2=hookp2)
1459 ret = self.commitctx(cctx, True)
1464 ret = self.commitctx(cctx, True)
1460 except: # re-raises
1465 except: # re-raises
1461 if edited:
1466 if edited:
1462 self.ui.write(
1467 self.ui.write(
1463 _('note: commit message saved in %s\n') % msgfn)
1468 _('note: commit message saved in %s\n') % msgfn)
1464 raise
1469 raise
1465
1470
1466 # update bookmarks, dirstate and mergestate
1471 # update bookmarks, dirstate and mergestate
1467 bookmarks.update(self, [p1, p2], ret)
1472 bookmarks.update(self, [p1, p2], ret)
1468 cctx.markcommitted(ret)
1473 cctx.markcommitted(ret)
1469 ms.reset()
1474 ms.reset()
1470 finally:
1475 finally:
1471 wlock.release()
1476 wlock.release()
1472
1477
1473 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1478 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1474 # hack for command that use a temporary commit (eg: histedit)
1479 # hack for command that use a temporary commit (eg: histedit)
1475 # temporary commit got stripped before hook release
1480 # temporary commit got stripped before hook release
1476 if node in self:
1481 if node in self:
1477 self.hook("commit", node=node, parent1=parent1,
1482 self.hook("commit", node=node, parent1=parent1,
1478 parent2=parent2)
1483 parent2=parent2)
1479 self._afterlock(commithook)
1484 self._afterlock(commithook)
1480 return ret
1485 return ret
1481
1486
1482 @unfilteredmethod
1487 @unfilteredmethod
1483 def commitctx(self, ctx, error=False):
1488 def commitctx(self, ctx, error=False):
1484 """Add a new revision to current repository.
1489 """Add a new revision to current repository.
1485 Revision information is passed via the context argument.
1490 Revision information is passed via the context argument.
1486 """
1491 """
1487
1492
1488 tr = None
1493 tr = None
1489 p1, p2 = ctx.p1(), ctx.p2()
1494 p1, p2 = ctx.p1(), ctx.p2()
1490 user = ctx.user()
1495 user = ctx.user()
1491
1496
1492 lock = self.lock()
1497 lock = self.lock()
1493 try:
1498 try:
1494 tr = self.transaction("commit")
1499 tr = self.transaction("commit")
1495 trp = weakref.proxy(tr)
1500 trp = weakref.proxy(tr)
1496
1501
1497 if ctx.files():
1502 if ctx.files():
1498 m1 = p1.manifest()
1503 m1 = p1.manifest()
1499 m2 = p2.manifest()
1504 m2 = p2.manifest()
1500 m = m1.copy()
1505 m = m1.copy()
1501
1506
1502 # check in files
1507 # check in files
1503 added = []
1508 added = []
1504 changed = []
1509 changed = []
1505 removed = list(ctx.removed())
1510 removed = list(ctx.removed())
1506 linkrev = len(self)
1511 linkrev = len(self)
1507 self.ui.note(_("committing files:\n"))
1512 self.ui.note(_("committing files:\n"))
1508 for f in sorted(ctx.modified() + ctx.added()):
1513 for f in sorted(ctx.modified() + ctx.added()):
1509 self.ui.note(f + "\n")
1514 self.ui.note(f + "\n")
1510 try:
1515 try:
1511 fctx = ctx[f]
1516 fctx = ctx[f]
1512 if fctx is None:
1517 if fctx is None:
1513 removed.append(f)
1518 removed.append(f)
1514 else:
1519 else:
1515 added.append(f)
1520 added.append(f)
1516 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1521 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1517 trp, changed)
1522 trp, changed)
1518 m.setflag(f, fctx.flags())
1523 m.setflag(f, fctx.flags())
1519 except OSError, inst:
1524 except OSError, inst:
1520 self.ui.warn(_("trouble committing %s!\n") % f)
1525 self.ui.warn(_("trouble committing %s!\n") % f)
1521 raise
1526 raise
1522 except IOError, inst:
1527 except IOError, inst:
1523 errcode = getattr(inst, 'errno', errno.ENOENT)
1528 errcode = getattr(inst, 'errno', errno.ENOENT)
1524 if error or errcode and errcode != errno.ENOENT:
1529 if error or errcode and errcode != errno.ENOENT:
1525 self.ui.warn(_("trouble committing %s!\n") % f)
1530 self.ui.warn(_("trouble committing %s!\n") % f)
1526 raise
1531 raise
1527
1532
1528 # update manifest
1533 # update manifest
1529 self.ui.note(_("committing manifest\n"))
1534 self.ui.note(_("committing manifest\n"))
1530 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1535 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1531 drop = [f for f in removed if f in m]
1536 drop = [f for f in removed if f in m]
1532 for f in drop:
1537 for f in drop:
1533 del m[f]
1538 del m[f]
1534 mn = self.manifest.add(m, trp, linkrev,
1539 mn = self.manifest.add(m, trp, linkrev,
1535 p1.manifestnode(), p2.manifestnode(),
1540 p1.manifestnode(), p2.manifestnode(),
1536 added, drop)
1541 added, drop)
1537 files = changed + removed
1542 files = changed + removed
1538 else:
1543 else:
1539 mn = p1.manifestnode()
1544 mn = p1.manifestnode()
1540 files = []
1545 files = []
1541
1546
1542 # update changelog
1547 # update changelog
1543 self.ui.note(_("committing changelog\n"))
1548 self.ui.note(_("committing changelog\n"))
1544 self.changelog.delayupdate(tr)
1549 self.changelog.delayupdate(tr)
1545 n = self.changelog.add(mn, files, ctx.description(),
1550 n = self.changelog.add(mn, files, ctx.description(),
1546 trp, p1.node(), p2.node(),
1551 trp, p1.node(), p2.node(),
1547 user, ctx.date(), ctx.extra().copy())
1552 user, ctx.date(), ctx.extra().copy())
1548 p = lambda: tr.writepending() and self.root or ""
1553 p = lambda: tr.writepending() and self.root or ""
1549 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1554 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1550 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1555 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1551 parent2=xp2, pending=p)
1556 parent2=xp2, pending=p)
1552 # set the new commit is proper phase
1557 # set the new commit is proper phase
1553 targetphase = subrepo.newcommitphase(self.ui, ctx)
1558 targetphase = subrepo.newcommitphase(self.ui, ctx)
1554 if targetphase:
1559 if targetphase:
1555 # retract boundary do not alter parent changeset.
1560 # retract boundary do not alter parent changeset.
1556 # if a parent have higher the resulting phase will
1561 # if a parent have higher the resulting phase will
1557 # be compliant anyway
1562 # be compliant anyway
1558 #
1563 #
1559 # if minimal phase was 0 we don't need to retract anything
1564 # if minimal phase was 0 we don't need to retract anything
1560 phases.retractboundary(self, tr, targetphase, [n])
1565 phases.retractboundary(self, tr, targetphase, [n])
1561 tr.close()
1566 tr.close()
1562 branchmap.updatecache(self.filtered('served'))
1567 branchmap.updatecache(self.filtered('served'))
1563 return n
1568 return n
1564 finally:
1569 finally:
1565 if tr:
1570 if tr:
1566 tr.release()
1571 tr.release()
1567 lock.release()
1572 lock.release()
1568
1573
1569 @unfilteredmethod
1574 @unfilteredmethod
1570 def destroying(self):
1575 def destroying(self):
1571 '''Inform the repository that nodes are about to be destroyed.
1576 '''Inform the repository that nodes are about to be destroyed.
1572 Intended for use by strip and rollback, so there's a common
1577 Intended for use by strip and rollback, so there's a common
1573 place for anything that has to be done before destroying history.
1578 place for anything that has to be done before destroying history.
1574
1579
1575 This is mostly useful for saving state that is in memory and waiting
1580 This is mostly useful for saving state that is in memory and waiting
1576 to be flushed when the current lock is released. Because a call to
1581 to be flushed when the current lock is released. Because a call to
1577 destroyed is imminent, the repo will be invalidated causing those
1582 destroyed is imminent, the repo will be invalidated causing those
1578 changes to stay in memory (waiting for the next unlock), or vanish
1583 changes to stay in memory (waiting for the next unlock), or vanish
1579 completely.
1584 completely.
1580 '''
1585 '''
1581 # When using the same lock to commit and strip, the phasecache is left
1586 # When using the same lock to commit and strip, the phasecache is left
1582 # dirty after committing. Then when we strip, the repo is invalidated,
1587 # dirty after committing. Then when we strip, the repo is invalidated,
1583 # causing those changes to disappear.
1588 # causing those changes to disappear.
1584 if '_phasecache' in vars(self):
1589 if '_phasecache' in vars(self):
1585 self._phasecache.write()
1590 self._phasecache.write()
1586
1591
1587 @unfilteredmethod
1592 @unfilteredmethod
1588 def destroyed(self):
1593 def destroyed(self):
1589 '''Inform the repository that nodes have been destroyed.
1594 '''Inform the repository that nodes have been destroyed.
1590 Intended for use by strip and rollback, so there's a common
1595 Intended for use by strip and rollback, so there's a common
1591 place for anything that has to be done after destroying history.
1596 place for anything that has to be done after destroying history.
1592 '''
1597 '''
1593 # When one tries to:
1598 # When one tries to:
1594 # 1) destroy nodes thus calling this method (e.g. strip)
1599 # 1) destroy nodes thus calling this method (e.g. strip)
1595 # 2) use phasecache somewhere (e.g. commit)
1600 # 2) use phasecache somewhere (e.g. commit)
1596 #
1601 #
1597 # then 2) will fail because the phasecache contains nodes that were
1602 # then 2) will fail because the phasecache contains nodes that were
1598 # removed. We can either remove phasecache from the filecache,
1603 # removed. We can either remove phasecache from the filecache,
1599 # causing it to reload next time it is accessed, or simply filter
1604 # causing it to reload next time it is accessed, or simply filter
1600 # the removed nodes now and write the updated cache.
1605 # the removed nodes now and write the updated cache.
1601 self._phasecache.filterunknown(self)
1606 self._phasecache.filterunknown(self)
1602 self._phasecache.write()
1607 self._phasecache.write()
1603
1608
1604 # update the 'served' branch cache to help read only server process
1609 # update the 'served' branch cache to help read only server process
1605 # Thanks to branchcache collaboration this is done from the nearest
1610 # Thanks to branchcache collaboration this is done from the nearest
1606 # filtered subset and it is expected to be fast.
1611 # filtered subset and it is expected to be fast.
1607 branchmap.updatecache(self.filtered('served'))
1612 branchmap.updatecache(self.filtered('served'))
1608
1613
1609 # Ensure the persistent tag cache is updated. Doing it now
1614 # Ensure the persistent tag cache is updated. Doing it now
1610 # means that the tag cache only has to worry about destroyed
1615 # means that the tag cache only has to worry about destroyed
1611 # heads immediately after a strip/rollback. That in turn
1616 # heads immediately after a strip/rollback. That in turn
1612 # guarantees that "cachetip == currenttip" (comparing both rev
1617 # guarantees that "cachetip == currenttip" (comparing both rev
1613 # and node) always means no nodes have been added or destroyed.
1618 # and node) always means no nodes have been added or destroyed.
1614
1619
1615 # XXX this is suboptimal when qrefresh'ing: we strip the current
1620 # XXX this is suboptimal when qrefresh'ing: we strip the current
1616 # head, refresh the tag cache, then immediately add a new head.
1621 # head, refresh the tag cache, then immediately add a new head.
1617 # But I think doing it this way is necessary for the "instant
1622 # But I think doing it this way is necessary for the "instant
1618 # tag cache retrieval" case to work.
1623 # tag cache retrieval" case to work.
1619 self.invalidate()
1624 self.invalidate()
1620
1625
1621 def walk(self, match, node=None):
1626 def walk(self, match, node=None):
1622 '''
1627 '''
1623 walk recursively through the directory tree or a given
1628 walk recursively through the directory tree or a given
1624 changeset, finding all files matched by the match
1629 changeset, finding all files matched by the match
1625 function
1630 function
1626 '''
1631 '''
1627 return self[node].walk(match)
1632 return self[node].walk(match)
1628
1633
1629 def status(self, node1='.', node2=None, match=None,
1634 def status(self, node1='.', node2=None, match=None,
1630 ignored=False, clean=False, unknown=False,
1635 ignored=False, clean=False, unknown=False,
1631 listsubrepos=False):
1636 listsubrepos=False):
1632 '''a convenience method that calls node1.status(node2)'''
1637 '''a convenience method that calls node1.status(node2)'''
1633 return self[node1].status(node2, match, ignored, clean, unknown,
1638 return self[node1].status(node2, match, ignored, clean, unknown,
1634 listsubrepos)
1639 listsubrepos)
1635
1640
1636 def heads(self, start=None):
1641 def heads(self, start=None):
1637 heads = self.changelog.heads(start)
1642 heads = self.changelog.heads(start)
1638 # sort the output in rev descending order
1643 # sort the output in rev descending order
1639 return sorted(heads, key=self.changelog.rev, reverse=True)
1644 return sorted(heads, key=self.changelog.rev, reverse=True)
1640
1645
1641 def branchheads(self, branch=None, start=None, closed=False):
1646 def branchheads(self, branch=None, start=None, closed=False):
1642 '''return a (possibly filtered) list of heads for the given branch
1647 '''return a (possibly filtered) list of heads for the given branch
1643
1648
1644 Heads are returned in topological order, from newest to oldest.
1649 Heads are returned in topological order, from newest to oldest.
1645 If branch is None, use the dirstate branch.
1650 If branch is None, use the dirstate branch.
1646 If start is not None, return only heads reachable from start.
1651 If start is not None, return only heads reachable from start.
1647 If closed is True, return heads that are marked as closed as well.
1652 If closed is True, return heads that are marked as closed as well.
1648 '''
1653 '''
1649 if branch is None:
1654 if branch is None:
1650 branch = self[None].branch()
1655 branch = self[None].branch()
1651 branches = self.branchmap()
1656 branches = self.branchmap()
1652 if branch not in branches:
1657 if branch not in branches:
1653 return []
1658 return []
1654 # the cache returns heads ordered lowest to highest
1659 # the cache returns heads ordered lowest to highest
1655 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1660 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1656 if start is not None:
1661 if start is not None:
1657 # filter out the heads that cannot be reached from startrev
1662 # filter out the heads that cannot be reached from startrev
1658 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1663 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1659 bheads = [h for h in bheads if h in fbheads]
1664 bheads = [h for h in bheads if h in fbheads]
1660 return bheads
1665 return bheads
1661
1666
1662 def branches(self, nodes):
1667 def branches(self, nodes):
1663 if not nodes:
1668 if not nodes:
1664 nodes = [self.changelog.tip()]
1669 nodes = [self.changelog.tip()]
1665 b = []
1670 b = []
1666 for n in nodes:
1671 for n in nodes:
1667 t = n
1672 t = n
1668 while True:
1673 while True:
1669 p = self.changelog.parents(n)
1674 p = self.changelog.parents(n)
1670 if p[1] != nullid or p[0] == nullid:
1675 if p[1] != nullid or p[0] == nullid:
1671 b.append((t, n, p[0], p[1]))
1676 b.append((t, n, p[0], p[1]))
1672 break
1677 break
1673 n = p[0]
1678 n = p[0]
1674 return b
1679 return b
1675
1680
1676 def between(self, pairs):
1681 def between(self, pairs):
1677 r = []
1682 r = []
1678
1683
1679 for top, bottom in pairs:
1684 for top, bottom in pairs:
1680 n, l, i = top, [], 0
1685 n, l, i = top, [], 0
1681 f = 1
1686 f = 1
1682
1687
1683 while n != bottom and n != nullid:
1688 while n != bottom and n != nullid:
1684 p = self.changelog.parents(n)[0]
1689 p = self.changelog.parents(n)[0]
1685 if i == f:
1690 if i == f:
1686 l.append(n)
1691 l.append(n)
1687 f = f * 2
1692 f = f * 2
1688 n = p
1693 n = p
1689 i += 1
1694 i += 1
1690
1695
1691 r.append(l)
1696 r.append(l)
1692
1697
1693 return r
1698 return r
1694
1699
1695 def checkpush(self, pushop):
1700 def checkpush(self, pushop):
1696 """Extensions can override this function if additional checks have
1701 """Extensions can override this function if additional checks have
1697 to be performed before pushing, or call it if they override push
1702 to be performed before pushing, or call it if they override push
1698 command.
1703 command.
1699 """
1704 """
1700 pass
1705 pass
1701
1706
1702 @unfilteredpropertycache
1707 @unfilteredpropertycache
1703 def prepushoutgoinghooks(self):
1708 def prepushoutgoinghooks(self):
1704 """Return util.hooks consists of "(repo, remote, outgoing)"
1709 """Return util.hooks consists of "(repo, remote, outgoing)"
1705 functions, which are called before pushing changesets.
1710 functions, which are called before pushing changesets.
1706 """
1711 """
1707 return util.hooks()
1712 return util.hooks()
1708
1713
1709 def stream_in(self, remote, requirements):
1714 def stream_in(self, remote, requirements):
1710 lock = self.lock()
1715 lock = self.lock()
1711 try:
1716 try:
1712 # Save remote branchmap. We will use it later
1717 # Save remote branchmap. We will use it later
1713 # to speed up branchcache creation
1718 # to speed up branchcache creation
1714 rbranchmap = None
1719 rbranchmap = None
1715 if remote.capable("branchmap"):
1720 if remote.capable("branchmap"):
1716 rbranchmap = remote.branchmap()
1721 rbranchmap = remote.branchmap()
1717
1722
1718 fp = remote.stream_out()
1723 fp = remote.stream_out()
1719 l = fp.readline()
1724 l = fp.readline()
1720 try:
1725 try:
1721 resp = int(l)
1726 resp = int(l)
1722 except ValueError:
1727 except ValueError:
1723 raise error.ResponseError(
1728 raise error.ResponseError(
1724 _('unexpected response from remote server:'), l)
1729 _('unexpected response from remote server:'), l)
1725 if resp == 1:
1730 if resp == 1:
1726 raise util.Abort(_('operation forbidden by server'))
1731 raise util.Abort(_('operation forbidden by server'))
1727 elif resp == 2:
1732 elif resp == 2:
1728 raise util.Abort(_('locking the remote repository failed'))
1733 raise util.Abort(_('locking the remote repository failed'))
1729 elif resp != 0:
1734 elif resp != 0:
1730 raise util.Abort(_('the server sent an unknown error code'))
1735 raise util.Abort(_('the server sent an unknown error code'))
1731 self.ui.status(_('streaming all changes\n'))
1736 self.ui.status(_('streaming all changes\n'))
1732 l = fp.readline()
1737 l = fp.readline()
1733 try:
1738 try:
1734 total_files, total_bytes = map(int, l.split(' ', 1))
1739 total_files, total_bytes = map(int, l.split(' ', 1))
1735 except (ValueError, TypeError):
1740 except (ValueError, TypeError):
1736 raise error.ResponseError(
1741 raise error.ResponseError(
1737 _('unexpected response from remote server:'), l)
1742 _('unexpected response from remote server:'), l)
1738 self.ui.status(_('%d files to transfer, %s of data\n') %
1743 self.ui.status(_('%d files to transfer, %s of data\n') %
1739 (total_files, util.bytecount(total_bytes)))
1744 (total_files, util.bytecount(total_bytes)))
1740 handled_bytes = 0
1745 handled_bytes = 0
1741 self.ui.progress(_('clone'), 0, total=total_bytes)
1746 self.ui.progress(_('clone'), 0, total=total_bytes)
1742 start = time.time()
1747 start = time.time()
1743
1748
1744 tr = self.transaction(_('clone'))
1749 tr = self.transaction(_('clone'))
1745 try:
1750 try:
1746 for i in xrange(total_files):
1751 for i in xrange(total_files):
1747 # XXX doesn't support '\n' or '\r' in filenames
1752 # XXX doesn't support '\n' or '\r' in filenames
1748 l = fp.readline()
1753 l = fp.readline()
1749 try:
1754 try:
1750 name, size = l.split('\0', 1)
1755 name, size = l.split('\0', 1)
1751 size = int(size)
1756 size = int(size)
1752 except (ValueError, TypeError):
1757 except (ValueError, TypeError):
1753 raise error.ResponseError(
1758 raise error.ResponseError(
1754 _('unexpected response from remote server:'), l)
1759 _('unexpected response from remote server:'), l)
1755 if self.ui.debugflag:
1760 if self.ui.debugflag:
1756 self.ui.debug('adding %s (%s)\n' %
1761 self.ui.debug('adding %s (%s)\n' %
1757 (name, util.bytecount(size)))
1762 (name, util.bytecount(size)))
1758 # for backwards compat, name was partially encoded
1763 # for backwards compat, name was partially encoded
1759 ofp = self.svfs(store.decodedir(name), 'w')
1764 ofp = self.svfs(store.decodedir(name), 'w')
1760 for chunk in util.filechunkiter(fp, limit=size):
1765 for chunk in util.filechunkiter(fp, limit=size):
1761 handled_bytes += len(chunk)
1766 handled_bytes += len(chunk)
1762 self.ui.progress(_('clone'), handled_bytes,
1767 self.ui.progress(_('clone'), handled_bytes,
1763 total=total_bytes)
1768 total=total_bytes)
1764 ofp.write(chunk)
1769 ofp.write(chunk)
1765 ofp.close()
1770 ofp.close()
1766 tr.close()
1771 tr.close()
1767 finally:
1772 finally:
1768 tr.release()
1773 tr.release()
1769
1774
1770 # Writing straight to files circumvented the inmemory caches
1775 # Writing straight to files circumvented the inmemory caches
1771 self.invalidate()
1776 self.invalidate()
1772
1777
1773 elapsed = time.time() - start
1778 elapsed = time.time() - start
1774 if elapsed <= 0:
1779 if elapsed <= 0:
1775 elapsed = 0.001
1780 elapsed = 0.001
1776 self.ui.progress(_('clone'), None)
1781 self.ui.progress(_('clone'), None)
1777 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1782 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1778 (util.bytecount(total_bytes), elapsed,
1783 (util.bytecount(total_bytes), elapsed,
1779 util.bytecount(total_bytes / elapsed)))
1784 util.bytecount(total_bytes / elapsed)))
1780
1785
1781 # new requirements = old non-format requirements +
1786 # new requirements = old non-format requirements +
1782 # new format-related
1787 # new format-related
1783 # requirements from the streamed-in repository
1788 # requirements from the streamed-in repository
1784 requirements.update(set(self.requirements) - self.supportedformats)
1789 requirements.update(set(self.requirements) - self.supportedformats)
1785 self._applyrequirements(requirements)
1790 self._applyrequirements(requirements)
1786 self._writerequirements()
1791 self._writerequirements()
1787
1792
1788 if rbranchmap:
1793 if rbranchmap:
1789 rbheads = []
1794 rbheads = []
1790 closed = []
1795 closed = []
1791 for bheads in rbranchmap.itervalues():
1796 for bheads in rbranchmap.itervalues():
1792 rbheads.extend(bheads)
1797 rbheads.extend(bheads)
1793 for h in bheads:
1798 for h in bheads:
1794 r = self.changelog.rev(h)
1799 r = self.changelog.rev(h)
1795 b, c = self.changelog.branchinfo(r)
1800 b, c = self.changelog.branchinfo(r)
1796 if c:
1801 if c:
1797 closed.append(h)
1802 closed.append(h)
1798
1803
1799 if rbheads:
1804 if rbheads:
1800 rtiprev = max((int(self.changelog.rev(node))
1805 rtiprev = max((int(self.changelog.rev(node))
1801 for node in rbheads))
1806 for node in rbheads))
1802 cache = branchmap.branchcache(rbranchmap,
1807 cache = branchmap.branchcache(rbranchmap,
1803 self[rtiprev].node(),
1808 self[rtiprev].node(),
1804 rtiprev,
1809 rtiprev,
1805 closednodes=closed)
1810 closednodes=closed)
1806 # Try to stick it as low as possible
1811 # Try to stick it as low as possible
1807 # filter above served are unlikely to be fetch from a clone
1812 # filter above served are unlikely to be fetch from a clone
1808 for candidate in ('base', 'immutable', 'served'):
1813 for candidate in ('base', 'immutable', 'served'):
1809 rview = self.filtered(candidate)
1814 rview = self.filtered(candidate)
1810 if cache.validfor(rview):
1815 if cache.validfor(rview):
1811 self._branchcaches[candidate] = cache
1816 self._branchcaches[candidate] = cache
1812 cache.write(rview)
1817 cache.write(rview)
1813 break
1818 break
1814 self.invalidate()
1819 self.invalidate()
1815 return len(self.heads()) + 1
1820 return len(self.heads()) + 1
1816 finally:
1821 finally:
1817 lock.release()
1822 lock.release()
1818
1823
1819 def clone(self, remote, heads=[], stream=None):
1824 def clone(self, remote, heads=[], stream=None):
1820 '''clone remote repository.
1825 '''clone remote repository.
1821
1826
1822 keyword arguments:
1827 keyword arguments:
1823 heads: list of revs to clone (forces use of pull)
1828 heads: list of revs to clone (forces use of pull)
1824 stream: use streaming clone if possible'''
1829 stream: use streaming clone if possible'''
1825
1830
1826 # now, all clients that can request uncompressed clones can
1831 # now, all clients that can request uncompressed clones can
1827 # read repo formats supported by all servers that can serve
1832 # read repo formats supported by all servers that can serve
1828 # them.
1833 # them.
1829
1834
1830 # if revlog format changes, client will have to check version
1835 # if revlog format changes, client will have to check version
1831 # and format flags on "stream" capability, and use
1836 # and format flags on "stream" capability, and use
1832 # uncompressed only if compatible.
1837 # uncompressed only if compatible.
1833
1838
1834 if stream is None:
1839 if stream is None:
1835 # if the server explicitly prefers to stream (for fast LANs)
1840 # if the server explicitly prefers to stream (for fast LANs)
1836 stream = remote.capable('stream-preferred')
1841 stream = remote.capable('stream-preferred')
1837
1842
1838 if stream and not heads:
1843 if stream and not heads:
1839 # 'stream' means remote revlog format is revlogv1 only
1844 # 'stream' means remote revlog format is revlogv1 only
1840 if remote.capable('stream'):
1845 if remote.capable('stream'):
1841 self.stream_in(remote, set(('revlogv1',)))
1846 self.stream_in(remote, set(('revlogv1',)))
1842 else:
1847 else:
1843 # otherwise, 'streamreqs' contains the remote revlog format
1848 # otherwise, 'streamreqs' contains the remote revlog format
1844 streamreqs = remote.capable('streamreqs')
1849 streamreqs = remote.capable('streamreqs')
1845 if streamreqs:
1850 if streamreqs:
1846 streamreqs = set(streamreqs.split(','))
1851 streamreqs = set(streamreqs.split(','))
1847 # if we support it, stream in and adjust our requirements
1852 # if we support it, stream in and adjust our requirements
1848 if not streamreqs - self.supportedformats:
1853 if not streamreqs - self.supportedformats:
1849 self.stream_in(remote, streamreqs)
1854 self.stream_in(remote, streamreqs)
1850
1855
1851 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1856 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1852 try:
1857 try:
1853 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1858 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1854 ret = exchange.pull(self, remote, heads).cgresult
1859 ret = exchange.pull(self, remote, heads).cgresult
1855 finally:
1860 finally:
1856 self.ui.restoreconfig(quiet)
1861 self.ui.restoreconfig(quiet)
1857 return ret
1862 return ret
1858
1863
1859 def pushkey(self, namespace, key, old, new):
1864 def pushkey(self, namespace, key, old, new):
1860 try:
1865 try:
1861 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1866 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1862 old=old, new=new)
1867 old=old, new=new)
1863 except error.HookAbort, exc:
1868 except error.HookAbort, exc:
1864 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1869 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1865 if exc.hint:
1870 if exc.hint:
1866 self.ui.write_err(_("(%s)\n") % exc.hint)
1871 self.ui.write_err(_("(%s)\n") % exc.hint)
1867 return False
1872 return False
1868 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1873 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1869 ret = pushkey.push(self, namespace, key, old, new)
1874 ret = pushkey.push(self, namespace, key, old, new)
1870 def runhook():
1875 def runhook():
1871 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1876 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1872 ret=ret)
1877 ret=ret)
1873 self._afterlock(runhook)
1878 self._afterlock(runhook)
1874 return ret
1879 return ret
1875
1880
1876 def listkeys(self, namespace):
1881 def listkeys(self, namespace):
1877 self.hook('prelistkeys', throw=True, namespace=namespace)
1882 self.hook('prelistkeys', throw=True, namespace=namespace)
1878 self.ui.debug('listing keys for "%s"\n' % namespace)
1883 self.ui.debug('listing keys for "%s"\n' % namespace)
1879 values = pushkey.list(self, namespace)
1884 values = pushkey.list(self, namespace)
1880 self.hook('listkeys', namespace=namespace, values=values)
1885 self.hook('listkeys', namespace=namespace, values=values)
1881 return values
1886 return values
1882
1887
1883 def debugwireargs(self, one, two, three=None, four=None, five=None):
1888 def debugwireargs(self, one, two, three=None, four=None, five=None):
1884 '''used to test argument passing over the wire'''
1889 '''used to test argument passing over the wire'''
1885 return "%s %s %s %s %s" % (one, two, three, four, five)
1890 return "%s %s %s %s %s" % (one, two, three, four, five)
1886
1891
1887 def savecommitmessage(self, text):
1892 def savecommitmessage(self, text):
1888 fp = self.vfs('last-message.txt', 'wb')
1893 fp = self.vfs('last-message.txt', 'wb')
1889 try:
1894 try:
1890 fp.write(text)
1895 fp.write(text)
1891 finally:
1896 finally:
1892 fp.close()
1897 fp.close()
1893 return self.pathto(fp.name[len(self.root) + 1:])
1898 return self.pathto(fp.name[len(self.root) + 1:])
1894
1899
1895 # used to avoid circular references so destructors work
1900 # used to avoid circular references so destructors work
1896 def aftertrans(files):
1901 def aftertrans(files):
1897 renamefiles = [tuple(t) for t in files]
1902 renamefiles = [tuple(t) for t in files]
1898 def a():
1903 def a():
1899 for vfs, src, dest in renamefiles:
1904 for vfs, src, dest in renamefiles:
1900 try:
1905 try:
1901 vfs.rename(src, dest)
1906 vfs.rename(src, dest)
1902 except OSError: # journal file does not yet exist
1907 except OSError: # journal file does not yet exist
1903 pass
1908 pass
1904 return a
1909 return a
1905
1910
1906 def undoname(fn):
1911 def undoname(fn):
1907 base, name = os.path.split(fn)
1912 base, name = os.path.split(fn)
1908 assert name.startswith('journal')
1913 assert name.startswith('journal')
1909 return os.path.join(base, name.replace('journal', 'undo', 1))
1914 return os.path.join(base, name.replace('journal', 'undo', 1))
1910
1915
1911 def instance(ui, path, create):
1916 def instance(ui, path, create):
1912 return localrepository(ui, util.urllocalpath(path), create)
1917 return localrepository(ui, util.urllocalpath(path), create)
1913
1918
1914 def islocal(path):
1919 def islocal(path):
1915 return True
1920 return True
General Comments 0
You need to be logged in to leave comments. Login now