##// END OF EJS Templates
localrepo.getbundle: drop unused 'format' argument...
Martin von Zweigbergk -
r24639:c79b1e69 default
parent child Browse files
Show More
@@ -1,1925 +1,1925 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 format='HG10', **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.unbundle20(self.ui, cg)
117 cg = bundle2.unbundle20(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 cg = exchange.readbundle(self.ui, cg, None)
128 cg = exchange.readbundle(self.ui, cg, None)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 if util.safehasattr(ret, 'getchunks'):
130 if util.safehasattr(ret, 'getchunks'):
131 # This is a bundle20 object, turn it into an unbundler.
131 # This is a bundle20 object, turn it into an unbundler.
132 # This little dance should be dropped eventually when the API
132 # This little dance should be dropped eventually when the API
133 # is finally improved.
133 # is finally improved.
134 stream = util.chunkbuffer(ret.getchunks())
134 stream = util.chunkbuffer(ret.getchunks())
135 ret = bundle2.unbundle20(self.ui, stream)
135 ret = bundle2.unbundle20(self.ui, stream)
136 return ret
136 return ret
137 except error.PushRaced, exc:
137 except error.PushRaced, exc:
138 raise error.ResponseError(_('push failed:'), str(exc))
138 raise error.ResponseError(_('push failed:'), str(exc))
139
139
140 def lock(self):
140 def lock(self):
141 return self._repo.lock()
141 return self._repo.lock()
142
142
143 def addchangegroup(self, cg, source, url):
143 def addchangegroup(self, cg, source, url):
144 return changegroup.addchangegroup(self._repo, cg, source, url)
144 return changegroup.addchangegroup(self._repo, cg, source, url)
145
145
146 def pushkey(self, namespace, key, old, new):
146 def pushkey(self, namespace, key, old, new):
147 return self._repo.pushkey(namespace, key, old, new)
147 return self._repo.pushkey(namespace, key, old, new)
148
148
149 def listkeys(self, namespace):
149 def listkeys(self, namespace):
150 return self._repo.listkeys(namespace)
150 return self._repo.listkeys(namespace)
151
151
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 '''used to test argument passing over the wire'''
153 '''used to test argument passing over the wire'''
154 return "%s %s %s %s %s" % (one, two, three, four, five)
154 return "%s %s %s %s %s" % (one, two, three, four, five)
155
155
156 class locallegacypeer(localpeer):
156 class locallegacypeer(localpeer):
157 '''peer extension which implements legacy methods too; used for tests with
157 '''peer extension which implements legacy methods too; used for tests with
158 restricted capabilities'''
158 restricted capabilities'''
159
159
160 def __init__(self, repo):
160 def __init__(self, repo):
161 localpeer.__init__(self, repo, caps=legacycaps)
161 localpeer.__init__(self, repo, caps=legacycaps)
162
162
163 def branches(self, nodes):
163 def branches(self, nodes):
164 return self._repo.branches(nodes)
164 return self._repo.branches(nodes)
165
165
166 def between(self, pairs):
166 def between(self, pairs):
167 return self._repo.between(pairs)
167 return self._repo.between(pairs)
168
168
169 def changegroup(self, basenodes, source):
169 def changegroup(self, basenodes, source):
170 return changegroup.changegroup(self._repo, basenodes, source)
170 return changegroup.changegroup(self._repo, basenodes, source)
171
171
172 def changegroupsubset(self, bases, heads, source):
172 def changegroupsubset(self, bases, heads, source):
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174
174
175 class localrepository(object):
175 class localrepository(object):
176
176
177 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
177 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 'dotencode'))
179 'dotencode'))
180 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
180 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
181 requirements = ['revlogv1']
181 requirements = ['revlogv1']
182 filtername = None
182 filtername = None
183
183
184 # a list of (ui, featureset) functions.
184 # a list of (ui, featureset) functions.
185 # only functions defined in module of enabled extensions are invoked
185 # only functions defined in module of enabled extensions are invoked
186 featuresetupfuncs = set()
186 featuresetupfuncs = set()
187
187
188 def _baserequirements(self, create):
188 def _baserequirements(self, create):
189 return self.requirements[:]
189 return self.requirements[:]
190
190
191 def __init__(self, baseui, path=None, create=False):
191 def __init__(self, baseui, path=None, create=False):
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 self.wopener = self.wvfs
193 self.wopener = self.wvfs
194 self.root = self.wvfs.base
194 self.root = self.wvfs.base
195 self.path = self.wvfs.join(".hg")
195 self.path = self.wvfs.join(".hg")
196 self.origroot = path
196 self.origroot = path
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 self.vfs = scmutil.vfs(self.path)
198 self.vfs = scmutil.vfs(self.path)
199 self.opener = self.vfs
199 self.opener = self.vfs
200 self.baseui = baseui
200 self.baseui = baseui
201 self.ui = baseui.copy()
201 self.ui = baseui.copy()
202 self.ui.copy = baseui.copy # prevent copying repo configuration
202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 # A list of callback to shape the phase if no data were found.
203 # A list of callback to shape the phase if no data were found.
204 # Callback are in the form: func(repo, roots) --> processed root.
204 # Callback are in the form: func(repo, roots) --> processed root.
205 # This list it to be filled by extension during repo setup
205 # This list it to be filled by extension during repo setup
206 self._phasedefaults = []
206 self._phasedefaults = []
207 try:
207 try:
208 self.ui.readconfig(self.join("hgrc"), self.root)
208 self.ui.readconfig(self.join("hgrc"), self.root)
209 extensions.loadall(self.ui)
209 extensions.loadall(self.ui)
210 except IOError:
210 except IOError:
211 pass
211 pass
212
212
213 if self.featuresetupfuncs:
213 if self.featuresetupfuncs:
214 self.supported = set(self._basesupported) # use private copy
214 self.supported = set(self._basesupported) # use private copy
215 extmods = set(m.__name__ for n, m
215 extmods = set(m.__name__ for n, m
216 in extensions.extensions(self.ui))
216 in extensions.extensions(self.ui))
217 for setupfunc in self.featuresetupfuncs:
217 for setupfunc in self.featuresetupfuncs:
218 if setupfunc.__module__ in extmods:
218 if setupfunc.__module__ in extmods:
219 setupfunc(self.ui, self.supported)
219 setupfunc(self.ui, self.supported)
220 else:
220 else:
221 self.supported = self._basesupported
221 self.supported = self._basesupported
222
222
223 if not self.vfs.isdir():
223 if not self.vfs.isdir():
224 if create:
224 if create:
225 if not self.wvfs.exists():
225 if not self.wvfs.exists():
226 self.wvfs.makedirs()
226 self.wvfs.makedirs()
227 self.vfs.makedir(notindexed=True)
227 self.vfs.makedir(notindexed=True)
228 requirements = self._baserequirements(create)
228 requirements = self._baserequirements(create)
229 if self.ui.configbool('format', 'usestore', True):
229 if self.ui.configbool('format', 'usestore', True):
230 self.vfs.mkdir("store")
230 self.vfs.mkdir("store")
231 requirements.append("store")
231 requirements.append("store")
232 if self.ui.configbool('format', 'usefncache', True):
232 if self.ui.configbool('format', 'usefncache', True):
233 requirements.append("fncache")
233 requirements.append("fncache")
234 if self.ui.configbool('format', 'dotencode', True):
234 if self.ui.configbool('format', 'dotencode', True):
235 requirements.append('dotencode')
235 requirements.append('dotencode')
236 # create an invalid changelog
236 # create an invalid changelog
237 self.vfs.append(
237 self.vfs.append(
238 "00changelog.i",
238 "00changelog.i",
239 '\0\0\0\2' # represents revlogv2
239 '\0\0\0\2' # represents revlogv2
240 ' dummy changelog to prevent using the old repo layout'
240 ' dummy changelog to prevent using the old repo layout'
241 )
241 )
242 if self.ui.configbool('format', 'generaldelta', False):
242 if self.ui.configbool('format', 'generaldelta', False):
243 requirements.append("generaldelta")
243 requirements.append("generaldelta")
244 if self.ui.configbool('experimental', 'manifestv2', False):
244 if self.ui.configbool('experimental', 'manifestv2', False):
245 requirements.append("manifestv2")
245 requirements.append("manifestv2")
246 requirements = set(requirements)
246 requirements = set(requirements)
247 else:
247 else:
248 raise error.RepoError(_("repository %s not found") % path)
248 raise error.RepoError(_("repository %s not found") % path)
249 elif create:
249 elif create:
250 raise error.RepoError(_("repository %s already exists") % path)
250 raise error.RepoError(_("repository %s already exists") % path)
251 else:
251 else:
252 try:
252 try:
253 requirements = scmutil.readrequires(self.vfs, self.supported)
253 requirements = scmutil.readrequires(self.vfs, self.supported)
254 except IOError, inst:
254 except IOError, inst:
255 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
256 raise
256 raise
257 requirements = set()
257 requirements = set()
258
258
259 self.sharedpath = self.path
259 self.sharedpath = self.path
260 try:
260 try:
261 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
261 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
262 realpath=True)
262 realpath=True)
263 s = vfs.base
263 s = vfs.base
264 if not vfs.exists():
264 if not vfs.exists():
265 raise error.RepoError(
265 raise error.RepoError(
266 _('.hg/sharedpath points to nonexistent directory %s') % s)
266 _('.hg/sharedpath points to nonexistent directory %s') % s)
267 self.sharedpath = s
267 self.sharedpath = s
268 except IOError, inst:
268 except IOError, inst:
269 if inst.errno != errno.ENOENT:
269 if inst.errno != errno.ENOENT:
270 raise
270 raise
271
271
272 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
272 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
273 self.spath = self.store.path
273 self.spath = self.store.path
274 self.svfs = self.store.vfs
274 self.svfs = self.store.vfs
275 self.sopener = self.svfs
275 self.sopener = self.svfs
276 self.sjoin = self.store.join
276 self.sjoin = self.store.join
277 self.vfs.createmode = self.store.createmode
277 self.vfs.createmode = self.store.createmode
278 self._applyrequirements(requirements)
278 self._applyrequirements(requirements)
279 if create:
279 if create:
280 self._writerequirements()
280 self._writerequirements()
281
281
282
282
283 self._branchcaches = {}
283 self._branchcaches = {}
284 self._revbranchcache = None
284 self._revbranchcache = None
285 self.filterpats = {}
285 self.filterpats = {}
286 self._datafilters = {}
286 self._datafilters = {}
287 self._transref = self._lockref = self._wlockref = None
287 self._transref = self._lockref = self._wlockref = None
288
288
289 # A cache for various files under .hg/ that tracks file changes,
289 # A cache for various files under .hg/ that tracks file changes,
290 # (used by the filecache decorator)
290 # (used by the filecache decorator)
291 #
291 #
292 # Maps a property name to its util.filecacheentry
292 # Maps a property name to its util.filecacheentry
293 self._filecache = {}
293 self._filecache = {}
294
294
295 # hold sets of revision to be filtered
295 # hold sets of revision to be filtered
296 # should be cleared when something might have changed the filter value:
296 # should be cleared when something might have changed the filter value:
297 # - new changesets,
297 # - new changesets,
298 # - phase change,
298 # - phase change,
299 # - new obsolescence marker,
299 # - new obsolescence marker,
300 # - working directory parent change,
300 # - working directory parent change,
301 # - bookmark changes
301 # - bookmark changes
302 self.filteredrevcache = {}
302 self.filteredrevcache = {}
303
303
304 # generic mapping between names and nodes
304 # generic mapping between names and nodes
305 self.names = namespaces.namespaces()
305 self.names = namespaces.namespaces()
306
306
307 def close(self):
307 def close(self):
308 self._writecaches()
308 self._writecaches()
309
309
310 def _writecaches(self):
310 def _writecaches(self):
311 if self._revbranchcache:
311 if self._revbranchcache:
312 self._revbranchcache.write()
312 self._revbranchcache.write()
313
313
314 def _restrictcapabilities(self, caps):
314 def _restrictcapabilities(self, caps):
315 # bundle2 is not ready for prime time, drop it unless explicitly
315 # bundle2 is not ready for prime time, drop it unless explicitly
316 # required by the tests (or some brave tester)
316 # required by the tests (or some brave tester)
317 if self.ui.configbool('experimental', 'bundle2-exp', False):
317 if self.ui.configbool('experimental', 'bundle2-exp', False):
318 caps = set(caps)
318 caps = set(caps)
319 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
319 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
320 caps.add('bundle2-exp=' + urllib.quote(capsblob))
320 caps.add('bundle2-exp=' + urllib.quote(capsblob))
321 return caps
321 return caps
322
322
323 def _applyrequirements(self, requirements):
323 def _applyrequirements(self, requirements):
324 self.requirements = requirements
324 self.requirements = requirements
325 self.svfs.options = dict((r, 1) for r in requirements
325 self.svfs.options = dict((r, 1) for r in requirements
326 if r in self.openerreqs)
326 if r in self.openerreqs)
327 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
327 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
328 if chunkcachesize is not None:
328 if chunkcachesize is not None:
329 self.svfs.options['chunkcachesize'] = chunkcachesize
329 self.svfs.options['chunkcachesize'] = chunkcachesize
330 maxchainlen = self.ui.configint('format', 'maxchainlen')
330 maxchainlen = self.ui.configint('format', 'maxchainlen')
331 if maxchainlen is not None:
331 if maxchainlen is not None:
332 self.svfs.options['maxchainlen'] = maxchainlen
332 self.svfs.options['maxchainlen'] = maxchainlen
333 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
333 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
334 if manifestcachesize is not None:
334 if manifestcachesize is not None:
335 self.svfs.options['manifestcachesize'] = manifestcachesize
335 self.svfs.options['manifestcachesize'] = manifestcachesize
336 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
336 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
337 if usetreemanifest is not None:
337 if usetreemanifest is not None:
338 self.svfs.options['usetreemanifest'] = usetreemanifest
338 self.svfs.options['usetreemanifest'] = usetreemanifest
339
339
340 def _writerequirements(self):
340 def _writerequirements(self):
341 reqfile = self.vfs("requires", "w")
341 reqfile = self.vfs("requires", "w")
342 for r in sorted(self.requirements):
342 for r in sorted(self.requirements):
343 reqfile.write("%s\n" % r)
343 reqfile.write("%s\n" % r)
344 reqfile.close()
344 reqfile.close()
345
345
346 def _checknested(self, path):
346 def _checknested(self, path):
347 """Determine if path is a legal nested repository."""
347 """Determine if path is a legal nested repository."""
348 if not path.startswith(self.root):
348 if not path.startswith(self.root):
349 return False
349 return False
350 subpath = path[len(self.root) + 1:]
350 subpath = path[len(self.root) + 1:]
351 normsubpath = util.pconvert(subpath)
351 normsubpath = util.pconvert(subpath)
352
352
353 # XXX: Checking against the current working copy is wrong in
353 # XXX: Checking against the current working copy is wrong in
354 # the sense that it can reject things like
354 # the sense that it can reject things like
355 #
355 #
356 # $ hg cat -r 10 sub/x.txt
356 # $ hg cat -r 10 sub/x.txt
357 #
357 #
358 # if sub/ is no longer a subrepository in the working copy
358 # if sub/ is no longer a subrepository in the working copy
359 # parent revision.
359 # parent revision.
360 #
360 #
361 # However, it can of course also allow things that would have
361 # However, it can of course also allow things that would have
362 # been rejected before, such as the above cat command if sub/
362 # been rejected before, such as the above cat command if sub/
363 # is a subrepository now, but was a normal directory before.
363 # is a subrepository now, but was a normal directory before.
364 # The old path auditor would have rejected by mistake since it
364 # The old path auditor would have rejected by mistake since it
365 # panics when it sees sub/.hg/.
365 # panics when it sees sub/.hg/.
366 #
366 #
367 # All in all, checking against the working copy seems sensible
367 # All in all, checking against the working copy seems sensible
368 # since we want to prevent access to nested repositories on
368 # since we want to prevent access to nested repositories on
369 # the filesystem *now*.
369 # the filesystem *now*.
370 ctx = self[None]
370 ctx = self[None]
371 parts = util.splitpath(subpath)
371 parts = util.splitpath(subpath)
372 while parts:
372 while parts:
373 prefix = '/'.join(parts)
373 prefix = '/'.join(parts)
374 if prefix in ctx.substate:
374 if prefix in ctx.substate:
375 if prefix == normsubpath:
375 if prefix == normsubpath:
376 return True
376 return True
377 else:
377 else:
378 sub = ctx.sub(prefix)
378 sub = ctx.sub(prefix)
379 return sub.checknested(subpath[len(prefix) + 1:])
379 return sub.checknested(subpath[len(prefix) + 1:])
380 else:
380 else:
381 parts.pop()
381 parts.pop()
382 return False
382 return False
383
383
384 def peer(self):
384 def peer(self):
385 return localpeer(self) # not cached to avoid reference cycle
385 return localpeer(self) # not cached to avoid reference cycle
386
386
387 def unfiltered(self):
387 def unfiltered(self):
388 """Return unfiltered version of the repository
388 """Return unfiltered version of the repository
389
389
390 Intended to be overwritten by filtered repo."""
390 Intended to be overwritten by filtered repo."""
391 return self
391 return self
392
392
393 def filtered(self, name):
393 def filtered(self, name):
394 """Return a filtered version of a repository"""
394 """Return a filtered version of a repository"""
395 # build a new class with the mixin and the current class
395 # build a new class with the mixin and the current class
396 # (possibly subclass of the repo)
396 # (possibly subclass of the repo)
397 class proxycls(repoview.repoview, self.unfiltered().__class__):
397 class proxycls(repoview.repoview, self.unfiltered().__class__):
398 pass
398 pass
399 return proxycls(self, name)
399 return proxycls(self, name)
400
400
401 @repofilecache('bookmarks')
401 @repofilecache('bookmarks')
402 def _bookmarks(self):
402 def _bookmarks(self):
403 return bookmarks.bmstore(self)
403 return bookmarks.bmstore(self)
404
404
405 @repofilecache('bookmarks.current')
405 @repofilecache('bookmarks.current')
406 def _bookmarkcurrent(self):
406 def _bookmarkcurrent(self):
407 return bookmarks.readcurrent(self)
407 return bookmarks.readcurrent(self)
408
408
409 def bookmarkheads(self, bookmark):
409 def bookmarkheads(self, bookmark):
410 name = bookmark.split('@', 1)[0]
410 name = bookmark.split('@', 1)[0]
411 heads = []
411 heads = []
412 for mark, n in self._bookmarks.iteritems():
412 for mark, n in self._bookmarks.iteritems():
413 if mark.split('@', 1)[0] == name:
413 if mark.split('@', 1)[0] == name:
414 heads.append(n)
414 heads.append(n)
415 return heads
415 return heads
416
416
417 @storecache('phaseroots')
417 @storecache('phaseroots')
418 def _phasecache(self):
418 def _phasecache(self):
419 return phases.phasecache(self, self._phasedefaults)
419 return phases.phasecache(self, self._phasedefaults)
420
420
421 @storecache('obsstore')
421 @storecache('obsstore')
422 def obsstore(self):
422 def obsstore(self):
423 # read default format for new obsstore.
423 # read default format for new obsstore.
424 defaultformat = self.ui.configint('format', 'obsstore-version', None)
424 defaultformat = self.ui.configint('format', 'obsstore-version', None)
425 # rely on obsstore class default when possible.
425 # rely on obsstore class default when possible.
426 kwargs = {}
426 kwargs = {}
427 if defaultformat is not None:
427 if defaultformat is not None:
428 kwargs['defaultformat'] = defaultformat
428 kwargs['defaultformat'] = defaultformat
429 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
429 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
430 store = obsolete.obsstore(self.svfs, readonly=readonly,
430 store = obsolete.obsstore(self.svfs, readonly=readonly,
431 **kwargs)
431 **kwargs)
432 if store and readonly:
432 if store and readonly:
433 self.ui.warn(
433 self.ui.warn(
434 _('obsolete feature not enabled but %i markers found!\n')
434 _('obsolete feature not enabled but %i markers found!\n')
435 % len(list(store)))
435 % len(list(store)))
436 return store
436 return store
437
437
438 @storecache('00changelog.i')
438 @storecache('00changelog.i')
439 def changelog(self):
439 def changelog(self):
440 c = changelog.changelog(self.svfs)
440 c = changelog.changelog(self.svfs)
441 if 'HG_PENDING' in os.environ:
441 if 'HG_PENDING' in os.environ:
442 p = os.environ['HG_PENDING']
442 p = os.environ['HG_PENDING']
443 if p.startswith(self.root):
443 if p.startswith(self.root):
444 c.readpending('00changelog.i.a')
444 c.readpending('00changelog.i.a')
445 return c
445 return c
446
446
447 @storecache('00manifest.i')
447 @storecache('00manifest.i')
448 def manifest(self):
448 def manifest(self):
449 return manifest.manifest(self.svfs)
449 return manifest.manifest(self.svfs)
450
450
451 @repofilecache('dirstate')
451 @repofilecache('dirstate')
452 def dirstate(self):
452 def dirstate(self):
453 warned = [0]
453 warned = [0]
454 def validate(node):
454 def validate(node):
455 try:
455 try:
456 self.changelog.rev(node)
456 self.changelog.rev(node)
457 return node
457 return node
458 except error.LookupError:
458 except error.LookupError:
459 if not warned[0]:
459 if not warned[0]:
460 warned[0] = True
460 warned[0] = True
461 self.ui.warn(_("warning: ignoring unknown"
461 self.ui.warn(_("warning: ignoring unknown"
462 " working parent %s!\n") % short(node))
462 " working parent %s!\n") % short(node))
463 return nullid
463 return nullid
464
464
465 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
465 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
466
466
467 def __getitem__(self, changeid):
467 def __getitem__(self, changeid):
468 if changeid is None:
468 if changeid is None:
469 return context.workingctx(self)
469 return context.workingctx(self)
470 if isinstance(changeid, slice):
470 if isinstance(changeid, slice):
471 return [context.changectx(self, i)
471 return [context.changectx(self, i)
472 for i in xrange(*changeid.indices(len(self)))
472 for i in xrange(*changeid.indices(len(self)))
473 if i not in self.changelog.filteredrevs]
473 if i not in self.changelog.filteredrevs]
474 return context.changectx(self, changeid)
474 return context.changectx(self, changeid)
475
475
476 def __contains__(self, changeid):
476 def __contains__(self, changeid):
477 try:
477 try:
478 self[changeid]
478 self[changeid]
479 return True
479 return True
480 except error.RepoLookupError:
480 except error.RepoLookupError:
481 return False
481 return False
482
482
483 def __nonzero__(self):
483 def __nonzero__(self):
484 return True
484 return True
485
485
486 def __len__(self):
486 def __len__(self):
487 return len(self.changelog)
487 return len(self.changelog)
488
488
489 def __iter__(self):
489 def __iter__(self):
490 return iter(self.changelog)
490 return iter(self.changelog)
491
491
492 def revs(self, expr, *args):
492 def revs(self, expr, *args):
493 '''Return a list of revisions matching the given revset'''
493 '''Return a list of revisions matching the given revset'''
494 expr = revset.formatspec(expr, *args)
494 expr = revset.formatspec(expr, *args)
495 m = revset.match(None, expr)
495 m = revset.match(None, expr)
496 return m(self)
496 return m(self)
497
497
498 def set(self, expr, *args):
498 def set(self, expr, *args):
499 '''
499 '''
500 Yield a context for each matching revision, after doing arg
500 Yield a context for each matching revision, after doing arg
501 replacement via revset.formatspec
501 replacement via revset.formatspec
502 '''
502 '''
503 for r in self.revs(expr, *args):
503 for r in self.revs(expr, *args):
504 yield self[r]
504 yield self[r]
505
505
506 def url(self):
506 def url(self):
507 return 'file:' + self.root
507 return 'file:' + self.root
508
508
509 def hook(self, name, throw=False, **args):
509 def hook(self, name, throw=False, **args):
510 """Call a hook, passing this repo instance.
510 """Call a hook, passing this repo instance.
511
511
512 This a convenience method to aid invoking hooks. Extensions likely
512 This a convenience method to aid invoking hooks. Extensions likely
513 won't call this unless they have registered a custom hook or are
513 won't call this unless they have registered a custom hook or are
514 replacing code that is expected to call a hook.
514 replacing code that is expected to call a hook.
515 """
515 """
516 return hook.hook(self.ui, self, name, throw, **args)
516 return hook.hook(self.ui, self, name, throw, **args)
517
517
518 @unfilteredmethod
518 @unfilteredmethod
519 def _tag(self, names, node, message, local, user, date, extra={},
519 def _tag(self, names, node, message, local, user, date, extra={},
520 editor=False):
520 editor=False):
521 if isinstance(names, str):
521 if isinstance(names, str):
522 names = (names,)
522 names = (names,)
523
523
524 branches = self.branchmap()
524 branches = self.branchmap()
525 for name in names:
525 for name in names:
526 self.hook('pretag', throw=True, node=hex(node), tag=name,
526 self.hook('pretag', throw=True, node=hex(node), tag=name,
527 local=local)
527 local=local)
528 if name in branches:
528 if name in branches:
529 self.ui.warn(_("warning: tag %s conflicts with existing"
529 self.ui.warn(_("warning: tag %s conflicts with existing"
530 " branch name\n") % name)
530 " branch name\n") % name)
531
531
532 def writetags(fp, names, munge, prevtags):
532 def writetags(fp, names, munge, prevtags):
533 fp.seek(0, 2)
533 fp.seek(0, 2)
534 if prevtags and prevtags[-1] != '\n':
534 if prevtags and prevtags[-1] != '\n':
535 fp.write('\n')
535 fp.write('\n')
536 for name in names:
536 for name in names:
537 if munge:
537 if munge:
538 m = munge(name)
538 m = munge(name)
539 else:
539 else:
540 m = name
540 m = name
541
541
542 if (self._tagscache.tagtypes and
542 if (self._tagscache.tagtypes and
543 name in self._tagscache.tagtypes):
543 name in self._tagscache.tagtypes):
544 old = self.tags().get(name, nullid)
544 old = self.tags().get(name, nullid)
545 fp.write('%s %s\n' % (hex(old), m))
545 fp.write('%s %s\n' % (hex(old), m))
546 fp.write('%s %s\n' % (hex(node), m))
546 fp.write('%s %s\n' % (hex(node), m))
547 fp.close()
547 fp.close()
548
548
549 prevtags = ''
549 prevtags = ''
550 if local:
550 if local:
551 try:
551 try:
552 fp = self.vfs('localtags', 'r+')
552 fp = self.vfs('localtags', 'r+')
553 except IOError:
553 except IOError:
554 fp = self.vfs('localtags', 'a')
554 fp = self.vfs('localtags', 'a')
555 else:
555 else:
556 prevtags = fp.read()
556 prevtags = fp.read()
557
557
558 # local tags are stored in the current charset
558 # local tags are stored in the current charset
559 writetags(fp, names, None, prevtags)
559 writetags(fp, names, None, prevtags)
560 for name in names:
560 for name in names:
561 self.hook('tag', node=hex(node), tag=name, local=local)
561 self.hook('tag', node=hex(node), tag=name, local=local)
562 return
562 return
563
563
564 try:
564 try:
565 fp = self.wfile('.hgtags', 'rb+')
565 fp = self.wfile('.hgtags', 'rb+')
566 except IOError, e:
566 except IOError, e:
567 if e.errno != errno.ENOENT:
567 if e.errno != errno.ENOENT:
568 raise
568 raise
569 fp = self.wfile('.hgtags', 'ab')
569 fp = self.wfile('.hgtags', 'ab')
570 else:
570 else:
571 prevtags = fp.read()
571 prevtags = fp.read()
572
572
573 # committed tags are stored in UTF-8
573 # committed tags are stored in UTF-8
574 writetags(fp, names, encoding.fromlocal, prevtags)
574 writetags(fp, names, encoding.fromlocal, prevtags)
575
575
576 fp.close()
576 fp.close()
577
577
578 self.invalidatecaches()
578 self.invalidatecaches()
579
579
580 if '.hgtags' not in self.dirstate:
580 if '.hgtags' not in self.dirstate:
581 self[None].add(['.hgtags'])
581 self[None].add(['.hgtags'])
582
582
583 m = matchmod.exact(self.root, '', ['.hgtags'])
583 m = matchmod.exact(self.root, '', ['.hgtags'])
584 tagnode = self.commit(message, user, date, extra=extra, match=m,
584 tagnode = self.commit(message, user, date, extra=extra, match=m,
585 editor=editor)
585 editor=editor)
586
586
587 for name in names:
587 for name in names:
588 self.hook('tag', node=hex(node), tag=name, local=local)
588 self.hook('tag', node=hex(node), tag=name, local=local)
589
589
590 return tagnode
590 return tagnode
591
591
592 def tag(self, names, node, message, local, user, date, editor=False):
592 def tag(self, names, node, message, local, user, date, editor=False):
593 '''tag a revision with one or more symbolic names.
593 '''tag a revision with one or more symbolic names.
594
594
595 names is a list of strings or, when adding a single tag, names may be a
595 names is a list of strings or, when adding a single tag, names may be a
596 string.
596 string.
597
597
598 if local is True, the tags are stored in a per-repository file.
598 if local is True, the tags are stored in a per-repository file.
599 otherwise, they are stored in the .hgtags file, and a new
599 otherwise, they are stored in the .hgtags file, and a new
600 changeset is committed with the change.
600 changeset is committed with the change.
601
601
602 keyword arguments:
602 keyword arguments:
603
603
604 local: whether to store tags in non-version-controlled file
604 local: whether to store tags in non-version-controlled file
605 (default False)
605 (default False)
606
606
607 message: commit message to use if committing
607 message: commit message to use if committing
608
608
609 user: name of user to use if committing
609 user: name of user to use if committing
610
610
611 date: date tuple to use if committing'''
611 date: date tuple to use if committing'''
612
612
613 if not local:
613 if not local:
614 m = matchmod.exact(self.root, '', ['.hgtags'])
614 m = matchmod.exact(self.root, '', ['.hgtags'])
615 if util.any(self.status(match=m, unknown=True, ignored=True)):
615 if util.any(self.status(match=m, unknown=True, ignored=True)):
616 raise util.Abort(_('working copy of .hgtags is changed'),
616 raise util.Abort(_('working copy of .hgtags is changed'),
617 hint=_('please commit .hgtags manually'))
617 hint=_('please commit .hgtags manually'))
618
618
619 self.tags() # instantiate the cache
619 self.tags() # instantiate the cache
620 self._tag(names, node, message, local, user, date, editor=editor)
620 self._tag(names, node, message, local, user, date, editor=editor)
621
621
622 @filteredpropertycache
622 @filteredpropertycache
623 def _tagscache(self):
623 def _tagscache(self):
624 '''Returns a tagscache object that contains various tags related
624 '''Returns a tagscache object that contains various tags related
625 caches.'''
625 caches.'''
626
626
627 # This simplifies its cache management by having one decorated
627 # This simplifies its cache management by having one decorated
628 # function (this one) and the rest simply fetch things from it.
628 # function (this one) and the rest simply fetch things from it.
629 class tagscache(object):
629 class tagscache(object):
630 def __init__(self):
630 def __init__(self):
631 # These two define the set of tags for this repository. tags
631 # These two define the set of tags for this repository. tags
632 # maps tag name to node; tagtypes maps tag name to 'global' or
632 # maps tag name to node; tagtypes maps tag name to 'global' or
633 # 'local'. (Global tags are defined by .hgtags across all
633 # 'local'. (Global tags are defined by .hgtags across all
634 # heads, and local tags are defined in .hg/localtags.)
634 # heads, and local tags are defined in .hg/localtags.)
635 # They constitute the in-memory cache of tags.
635 # They constitute the in-memory cache of tags.
636 self.tags = self.tagtypes = None
636 self.tags = self.tagtypes = None
637
637
638 self.nodetagscache = self.tagslist = None
638 self.nodetagscache = self.tagslist = None
639
639
640 cache = tagscache()
640 cache = tagscache()
641 cache.tags, cache.tagtypes = self._findtags()
641 cache.tags, cache.tagtypes = self._findtags()
642
642
643 return cache
643 return cache
644
644
645 def tags(self):
645 def tags(self):
646 '''return a mapping of tag to node'''
646 '''return a mapping of tag to node'''
647 t = {}
647 t = {}
648 if self.changelog.filteredrevs:
648 if self.changelog.filteredrevs:
649 tags, tt = self._findtags()
649 tags, tt = self._findtags()
650 else:
650 else:
651 tags = self._tagscache.tags
651 tags = self._tagscache.tags
652 for k, v in tags.iteritems():
652 for k, v in tags.iteritems():
653 try:
653 try:
654 # ignore tags to unknown nodes
654 # ignore tags to unknown nodes
655 self.changelog.rev(v)
655 self.changelog.rev(v)
656 t[k] = v
656 t[k] = v
657 except (error.LookupError, ValueError):
657 except (error.LookupError, ValueError):
658 pass
658 pass
659 return t
659 return t
660
660
661 def _findtags(self):
661 def _findtags(self):
662 '''Do the hard work of finding tags. Return a pair of dicts
662 '''Do the hard work of finding tags. Return a pair of dicts
663 (tags, tagtypes) where tags maps tag name to node, and tagtypes
663 (tags, tagtypes) where tags maps tag name to node, and tagtypes
664 maps tag name to a string like \'global\' or \'local\'.
664 maps tag name to a string like \'global\' or \'local\'.
665 Subclasses or extensions are free to add their own tags, but
665 Subclasses or extensions are free to add their own tags, but
666 should be aware that the returned dicts will be retained for the
666 should be aware that the returned dicts will be retained for the
667 duration of the localrepo object.'''
667 duration of the localrepo object.'''
668
668
669 # XXX what tagtype should subclasses/extensions use? Currently
669 # XXX what tagtype should subclasses/extensions use? Currently
670 # mq and bookmarks add tags, but do not set the tagtype at all.
670 # mq and bookmarks add tags, but do not set the tagtype at all.
671 # Should each extension invent its own tag type? Should there
671 # Should each extension invent its own tag type? Should there
672 # be one tagtype for all such "virtual" tags? Or is the status
672 # be one tagtype for all such "virtual" tags? Or is the status
673 # quo fine?
673 # quo fine?
674
674
675 alltags = {} # map tag name to (node, hist)
675 alltags = {} # map tag name to (node, hist)
676 tagtypes = {}
676 tagtypes = {}
677
677
678 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
678 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
679 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
679 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
680
680
681 # Build the return dicts. Have to re-encode tag names because
681 # Build the return dicts. Have to re-encode tag names because
682 # the tags module always uses UTF-8 (in order not to lose info
682 # the tags module always uses UTF-8 (in order not to lose info
683 # writing to the cache), but the rest of Mercurial wants them in
683 # writing to the cache), but the rest of Mercurial wants them in
684 # local encoding.
684 # local encoding.
685 tags = {}
685 tags = {}
686 for (name, (node, hist)) in alltags.iteritems():
686 for (name, (node, hist)) in alltags.iteritems():
687 if node != nullid:
687 if node != nullid:
688 tags[encoding.tolocal(name)] = node
688 tags[encoding.tolocal(name)] = node
689 tags['tip'] = self.changelog.tip()
689 tags['tip'] = self.changelog.tip()
690 tagtypes = dict([(encoding.tolocal(name), value)
690 tagtypes = dict([(encoding.tolocal(name), value)
691 for (name, value) in tagtypes.iteritems()])
691 for (name, value) in tagtypes.iteritems()])
692 return (tags, tagtypes)
692 return (tags, tagtypes)
693
693
694 def tagtype(self, tagname):
694 def tagtype(self, tagname):
695 '''
695 '''
696 return the type of the given tag. result can be:
696 return the type of the given tag. result can be:
697
697
698 'local' : a local tag
698 'local' : a local tag
699 'global' : a global tag
699 'global' : a global tag
700 None : tag does not exist
700 None : tag does not exist
701 '''
701 '''
702
702
703 return self._tagscache.tagtypes.get(tagname)
703 return self._tagscache.tagtypes.get(tagname)
704
704
705 def tagslist(self):
705 def tagslist(self):
706 '''return a list of tags ordered by revision'''
706 '''return a list of tags ordered by revision'''
707 if not self._tagscache.tagslist:
707 if not self._tagscache.tagslist:
708 l = []
708 l = []
709 for t, n in self.tags().iteritems():
709 for t, n in self.tags().iteritems():
710 l.append((self.changelog.rev(n), t, n))
710 l.append((self.changelog.rev(n), t, n))
711 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
711 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
712
712
713 return self._tagscache.tagslist
713 return self._tagscache.tagslist
714
714
715 def nodetags(self, node):
715 def nodetags(self, node):
716 '''return the tags associated with a node'''
716 '''return the tags associated with a node'''
717 if not self._tagscache.nodetagscache:
717 if not self._tagscache.nodetagscache:
718 nodetagscache = {}
718 nodetagscache = {}
719 for t, n in self._tagscache.tags.iteritems():
719 for t, n in self._tagscache.tags.iteritems():
720 nodetagscache.setdefault(n, []).append(t)
720 nodetagscache.setdefault(n, []).append(t)
721 for tags in nodetagscache.itervalues():
721 for tags in nodetagscache.itervalues():
722 tags.sort()
722 tags.sort()
723 self._tagscache.nodetagscache = nodetagscache
723 self._tagscache.nodetagscache = nodetagscache
724 return self._tagscache.nodetagscache.get(node, [])
724 return self._tagscache.nodetagscache.get(node, [])
725
725
726 def nodebookmarks(self, node):
726 def nodebookmarks(self, node):
727 marks = []
727 marks = []
728 for bookmark, n in self._bookmarks.iteritems():
728 for bookmark, n in self._bookmarks.iteritems():
729 if n == node:
729 if n == node:
730 marks.append(bookmark)
730 marks.append(bookmark)
731 return sorted(marks)
731 return sorted(marks)
732
732
733 def branchmap(self):
733 def branchmap(self):
734 '''returns a dictionary {branch: [branchheads]} with branchheads
734 '''returns a dictionary {branch: [branchheads]} with branchheads
735 ordered by increasing revision number'''
735 ordered by increasing revision number'''
736 branchmap.updatecache(self)
736 branchmap.updatecache(self)
737 return self._branchcaches[self.filtername]
737 return self._branchcaches[self.filtername]
738
738
739 @unfilteredmethod
739 @unfilteredmethod
740 def revbranchcache(self):
740 def revbranchcache(self):
741 if not self._revbranchcache:
741 if not self._revbranchcache:
742 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
742 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
743 return self._revbranchcache
743 return self._revbranchcache
744
744
745 def branchtip(self, branch, ignoremissing=False):
745 def branchtip(self, branch, ignoremissing=False):
746 '''return the tip node for a given branch
746 '''return the tip node for a given branch
747
747
748 If ignoremissing is True, then this method will not raise an error.
748 If ignoremissing is True, then this method will not raise an error.
749 This is helpful for callers that only expect None for a missing branch
749 This is helpful for callers that only expect None for a missing branch
750 (e.g. namespace).
750 (e.g. namespace).
751
751
752 '''
752 '''
753 try:
753 try:
754 return self.branchmap().branchtip(branch)
754 return self.branchmap().branchtip(branch)
755 except KeyError:
755 except KeyError:
756 if not ignoremissing:
756 if not ignoremissing:
757 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
757 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
758 else:
758 else:
759 pass
759 pass
760
760
761 def lookup(self, key):
761 def lookup(self, key):
762 return self[key].node()
762 return self[key].node()
763
763
764 def lookupbranch(self, key, remote=None):
764 def lookupbranch(self, key, remote=None):
765 repo = remote or self
765 repo = remote or self
766 if key in repo.branchmap():
766 if key in repo.branchmap():
767 return key
767 return key
768
768
769 repo = (remote and remote.local()) and remote or self
769 repo = (remote and remote.local()) and remote or self
770 return repo[key].branch()
770 return repo[key].branch()
771
771
772 def known(self, nodes):
772 def known(self, nodes):
773 nm = self.changelog.nodemap
773 nm = self.changelog.nodemap
774 pc = self._phasecache
774 pc = self._phasecache
775 result = []
775 result = []
776 for n in nodes:
776 for n in nodes:
777 r = nm.get(n)
777 r = nm.get(n)
778 resp = not (r is None or pc.phase(self, r) >= phases.secret)
778 resp = not (r is None or pc.phase(self, r) >= phases.secret)
779 result.append(resp)
779 result.append(resp)
780 return result
780 return result
781
781
782 def local(self):
782 def local(self):
783 return self
783 return self
784
784
785 def cancopy(self):
785 def cancopy(self):
786 # so statichttprepo's override of local() works
786 # so statichttprepo's override of local() works
787 if not self.local():
787 if not self.local():
788 return False
788 return False
789 if not self.ui.configbool('phases', 'publish', True):
789 if not self.ui.configbool('phases', 'publish', True):
790 return True
790 return True
791 # if publishing we can't copy if there is filtered content
791 # if publishing we can't copy if there is filtered content
792 return not self.filtered('visible').changelog.filteredrevs
792 return not self.filtered('visible').changelog.filteredrevs
793
793
794 def shared(self):
794 def shared(self):
795 '''the type of shared repository (None if not shared)'''
795 '''the type of shared repository (None if not shared)'''
796 if self.sharedpath != self.path:
796 if self.sharedpath != self.path:
797 return 'store'
797 return 'store'
798 return None
798 return None
799
799
800 def join(self, f, *insidef):
800 def join(self, f, *insidef):
801 return self.vfs.join(os.path.join(f, *insidef))
801 return self.vfs.join(os.path.join(f, *insidef))
802
802
803 def wjoin(self, f, *insidef):
803 def wjoin(self, f, *insidef):
804 return self.vfs.reljoin(self.root, f, *insidef)
804 return self.vfs.reljoin(self.root, f, *insidef)
805
805
806 def file(self, f):
806 def file(self, f):
807 if f[0] == '/':
807 if f[0] == '/':
808 f = f[1:]
808 f = f[1:]
809 return filelog.filelog(self.svfs, f)
809 return filelog.filelog(self.svfs, f)
810
810
811 def changectx(self, changeid):
811 def changectx(self, changeid):
812 return self[changeid]
812 return self[changeid]
813
813
814 def parents(self, changeid=None):
814 def parents(self, changeid=None):
815 '''get list of changectxs for parents of changeid'''
815 '''get list of changectxs for parents of changeid'''
816 return self[changeid].parents()
816 return self[changeid].parents()
817
817
818 def setparents(self, p1, p2=nullid):
818 def setparents(self, p1, p2=nullid):
819 self.dirstate.beginparentchange()
819 self.dirstate.beginparentchange()
820 copies = self.dirstate.setparents(p1, p2)
820 copies = self.dirstate.setparents(p1, p2)
821 pctx = self[p1]
821 pctx = self[p1]
822 if copies:
822 if copies:
823 # Adjust copy records, the dirstate cannot do it, it
823 # Adjust copy records, the dirstate cannot do it, it
824 # requires access to parents manifests. Preserve them
824 # requires access to parents manifests. Preserve them
825 # only for entries added to first parent.
825 # only for entries added to first parent.
826 for f in copies:
826 for f in copies:
827 if f not in pctx and copies[f] in pctx:
827 if f not in pctx and copies[f] in pctx:
828 self.dirstate.copy(copies[f], f)
828 self.dirstate.copy(copies[f], f)
829 if p2 == nullid:
829 if p2 == nullid:
830 for f, s in sorted(self.dirstate.copies().items()):
830 for f, s in sorted(self.dirstate.copies().items()):
831 if f not in pctx and s not in pctx:
831 if f not in pctx and s not in pctx:
832 self.dirstate.copy(None, f)
832 self.dirstate.copy(None, f)
833 self.dirstate.endparentchange()
833 self.dirstate.endparentchange()
834
834
835 def filectx(self, path, changeid=None, fileid=None):
835 def filectx(self, path, changeid=None, fileid=None):
836 """changeid can be a changeset revision, node, or tag.
836 """changeid can be a changeset revision, node, or tag.
837 fileid can be a file revision or node."""
837 fileid can be a file revision or node."""
838 return context.filectx(self, path, changeid, fileid)
838 return context.filectx(self, path, changeid, fileid)
839
839
840 def getcwd(self):
840 def getcwd(self):
841 return self.dirstate.getcwd()
841 return self.dirstate.getcwd()
842
842
843 def pathto(self, f, cwd=None):
843 def pathto(self, f, cwd=None):
844 return self.dirstate.pathto(f, cwd)
844 return self.dirstate.pathto(f, cwd)
845
845
846 def wfile(self, f, mode='r'):
846 def wfile(self, f, mode='r'):
847 return self.wvfs(f, mode)
847 return self.wvfs(f, mode)
848
848
849 def _link(self, f):
849 def _link(self, f):
850 return self.wvfs.islink(f)
850 return self.wvfs.islink(f)
851
851
852 def _loadfilter(self, filter):
852 def _loadfilter(self, filter):
853 if filter not in self.filterpats:
853 if filter not in self.filterpats:
854 l = []
854 l = []
855 for pat, cmd in self.ui.configitems(filter):
855 for pat, cmd in self.ui.configitems(filter):
856 if cmd == '!':
856 if cmd == '!':
857 continue
857 continue
858 mf = matchmod.match(self.root, '', [pat])
858 mf = matchmod.match(self.root, '', [pat])
859 fn = None
859 fn = None
860 params = cmd
860 params = cmd
861 for name, filterfn in self._datafilters.iteritems():
861 for name, filterfn in self._datafilters.iteritems():
862 if cmd.startswith(name):
862 if cmd.startswith(name):
863 fn = filterfn
863 fn = filterfn
864 params = cmd[len(name):].lstrip()
864 params = cmd[len(name):].lstrip()
865 break
865 break
866 if not fn:
866 if not fn:
867 fn = lambda s, c, **kwargs: util.filter(s, c)
867 fn = lambda s, c, **kwargs: util.filter(s, c)
868 # Wrap old filters not supporting keyword arguments
868 # Wrap old filters not supporting keyword arguments
869 if not inspect.getargspec(fn)[2]:
869 if not inspect.getargspec(fn)[2]:
870 oldfn = fn
870 oldfn = fn
871 fn = lambda s, c, **kwargs: oldfn(s, c)
871 fn = lambda s, c, **kwargs: oldfn(s, c)
872 l.append((mf, fn, params))
872 l.append((mf, fn, params))
873 self.filterpats[filter] = l
873 self.filterpats[filter] = l
874 return self.filterpats[filter]
874 return self.filterpats[filter]
875
875
876 def _filter(self, filterpats, filename, data):
876 def _filter(self, filterpats, filename, data):
877 for mf, fn, cmd in filterpats:
877 for mf, fn, cmd in filterpats:
878 if mf(filename):
878 if mf(filename):
879 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
879 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
880 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
880 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
881 break
881 break
882
882
883 return data
883 return data
884
884
885 @unfilteredpropertycache
885 @unfilteredpropertycache
886 def _encodefilterpats(self):
886 def _encodefilterpats(self):
887 return self._loadfilter('encode')
887 return self._loadfilter('encode')
888
888
889 @unfilteredpropertycache
889 @unfilteredpropertycache
890 def _decodefilterpats(self):
890 def _decodefilterpats(self):
891 return self._loadfilter('decode')
891 return self._loadfilter('decode')
892
892
893 def adddatafilter(self, name, filter):
893 def adddatafilter(self, name, filter):
894 self._datafilters[name] = filter
894 self._datafilters[name] = filter
895
895
896 def wread(self, filename):
896 def wread(self, filename):
897 if self._link(filename):
897 if self._link(filename):
898 data = self.wvfs.readlink(filename)
898 data = self.wvfs.readlink(filename)
899 else:
899 else:
900 data = self.wvfs.read(filename)
900 data = self.wvfs.read(filename)
901 return self._filter(self._encodefilterpats, filename, data)
901 return self._filter(self._encodefilterpats, filename, data)
902
902
903 def wwrite(self, filename, data, flags):
903 def wwrite(self, filename, data, flags):
904 data = self._filter(self._decodefilterpats, filename, data)
904 data = self._filter(self._decodefilterpats, filename, data)
905 if 'l' in flags:
905 if 'l' in flags:
906 self.wvfs.symlink(data, filename)
906 self.wvfs.symlink(data, filename)
907 else:
907 else:
908 self.wvfs.write(filename, data)
908 self.wvfs.write(filename, data)
909 if 'x' in flags:
909 if 'x' in flags:
910 self.wvfs.setflags(filename, False, True)
910 self.wvfs.setflags(filename, False, True)
911
911
912 def wwritedata(self, filename, data):
912 def wwritedata(self, filename, data):
913 return self._filter(self._decodefilterpats, filename, data)
913 return self._filter(self._decodefilterpats, filename, data)
914
914
915 def currenttransaction(self):
915 def currenttransaction(self):
916 """return the current transaction or None if non exists"""
916 """return the current transaction or None if non exists"""
917 if self._transref:
917 if self._transref:
918 tr = self._transref()
918 tr = self._transref()
919 else:
919 else:
920 tr = None
920 tr = None
921
921
922 if tr and tr.running():
922 if tr and tr.running():
923 return tr
923 return tr
924 return None
924 return None
925
925
926 def transaction(self, desc, report=None):
926 def transaction(self, desc, report=None):
927 if (self.ui.configbool('devel', 'all')
927 if (self.ui.configbool('devel', 'all')
928 or self.ui.configbool('devel', 'check-locks')):
928 or self.ui.configbool('devel', 'check-locks')):
929 l = self._lockref and self._lockref()
929 l = self._lockref and self._lockref()
930 if l is None or not l.held:
930 if l is None or not l.held:
931 msg = 'transaction with no lock\n'
931 msg = 'transaction with no lock\n'
932 if self.ui.tracebackflag:
932 if self.ui.tracebackflag:
933 util.debugstacktrace(msg, 1)
933 util.debugstacktrace(msg, 1)
934 else:
934 else:
935 self.ui.write_err(msg)
935 self.ui.write_err(msg)
936 tr = self.currenttransaction()
936 tr = self.currenttransaction()
937 if tr is not None:
937 if tr is not None:
938 return tr.nest()
938 return tr.nest()
939
939
940 # abort here if the journal already exists
940 # abort here if the journal already exists
941 if self.svfs.exists("journal"):
941 if self.svfs.exists("journal"):
942 raise error.RepoError(
942 raise error.RepoError(
943 _("abandoned transaction found"),
943 _("abandoned transaction found"),
944 hint=_("run 'hg recover' to clean up transaction"))
944 hint=_("run 'hg recover' to clean up transaction"))
945
945
946 self.hook('pretxnopen', throw=True, txnname=desc)
946 self.hook('pretxnopen', throw=True, txnname=desc)
947
947
948 self._writejournal(desc)
948 self._writejournal(desc)
949 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
949 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
950 if report:
950 if report:
951 rp = report
951 rp = report
952 else:
952 else:
953 rp = self.ui.warn
953 rp = self.ui.warn
954 vfsmap = {'plain': self.vfs} # root of .hg/
954 vfsmap = {'plain': self.vfs} # root of .hg/
955 # we must avoid cyclic reference between repo and transaction.
955 # we must avoid cyclic reference between repo and transaction.
956 reporef = weakref.ref(self)
956 reporef = weakref.ref(self)
957 def validate(tr):
957 def validate(tr):
958 """will run pre-closing hooks"""
958 """will run pre-closing hooks"""
959 pending = lambda: tr.writepending() and self.root or ""
959 pending = lambda: tr.writepending() and self.root or ""
960 reporef().hook('pretxnclose', throw=True, pending=pending,
960 reporef().hook('pretxnclose', throw=True, pending=pending,
961 xnname=desc)
961 xnname=desc)
962
962
963 tr = transaction.transaction(rp, self.sopener, vfsmap,
963 tr = transaction.transaction(rp, self.sopener, vfsmap,
964 "journal",
964 "journal",
965 "undo",
965 "undo",
966 aftertrans(renames),
966 aftertrans(renames),
967 self.store.createmode,
967 self.store.createmode,
968 validator=validate)
968 validator=validate)
969 # note: writing the fncache only during finalize mean that the file is
969 # note: writing the fncache only during finalize mean that the file is
970 # outdated when running hooks. As fncache is used for streaming clone,
970 # outdated when running hooks. As fncache is used for streaming clone,
971 # this is not expected to break anything that happen during the hooks.
971 # this is not expected to break anything that happen during the hooks.
972 tr.addfinalize('flush-fncache', self.store.write)
972 tr.addfinalize('flush-fncache', self.store.write)
973 def txnclosehook(tr2):
973 def txnclosehook(tr2):
974 """To be run if transaction is successful, will schedule a hook run
974 """To be run if transaction is successful, will schedule a hook run
975 """
975 """
976 def hook():
976 def hook():
977 reporef().hook('txnclose', throw=False, txnname=desc,
977 reporef().hook('txnclose', throw=False, txnname=desc,
978 **tr2.hookargs)
978 **tr2.hookargs)
979 reporef()._afterlock(hook)
979 reporef()._afterlock(hook)
980 tr.addfinalize('txnclose-hook', txnclosehook)
980 tr.addfinalize('txnclose-hook', txnclosehook)
981 self._transref = weakref.ref(tr)
981 self._transref = weakref.ref(tr)
982 return tr
982 return tr
983
983
984 def _journalfiles(self):
984 def _journalfiles(self):
985 return ((self.svfs, 'journal'),
985 return ((self.svfs, 'journal'),
986 (self.vfs, 'journal.dirstate'),
986 (self.vfs, 'journal.dirstate'),
987 (self.vfs, 'journal.branch'),
987 (self.vfs, 'journal.branch'),
988 (self.vfs, 'journal.desc'),
988 (self.vfs, 'journal.desc'),
989 (self.vfs, 'journal.bookmarks'),
989 (self.vfs, 'journal.bookmarks'),
990 (self.svfs, 'journal.phaseroots'))
990 (self.svfs, 'journal.phaseroots'))
991
991
992 def undofiles(self):
992 def undofiles(self):
993 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
993 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
994
994
995 def _writejournal(self, desc):
995 def _writejournal(self, desc):
996 self.vfs.write("journal.dirstate",
996 self.vfs.write("journal.dirstate",
997 self.vfs.tryread("dirstate"))
997 self.vfs.tryread("dirstate"))
998 self.vfs.write("journal.branch",
998 self.vfs.write("journal.branch",
999 encoding.fromlocal(self.dirstate.branch()))
999 encoding.fromlocal(self.dirstate.branch()))
1000 self.vfs.write("journal.desc",
1000 self.vfs.write("journal.desc",
1001 "%d\n%s\n" % (len(self), desc))
1001 "%d\n%s\n" % (len(self), desc))
1002 self.vfs.write("journal.bookmarks",
1002 self.vfs.write("journal.bookmarks",
1003 self.vfs.tryread("bookmarks"))
1003 self.vfs.tryread("bookmarks"))
1004 self.svfs.write("journal.phaseroots",
1004 self.svfs.write("journal.phaseroots",
1005 self.svfs.tryread("phaseroots"))
1005 self.svfs.tryread("phaseroots"))
1006
1006
1007 def recover(self):
1007 def recover(self):
1008 lock = self.lock()
1008 lock = self.lock()
1009 try:
1009 try:
1010 if self.svfs.exists("journal"):
1010 if self.svfs.exists("journal"):
1011 self.ui.status(_("rolling back interrupted transaction\n"))
1011 self.ui.status(_("rolling back interrupted transaction\n"))
1012 vfsmap = {'': self.svfs,
1012 vfsmap = {'': self.svfs,
1013 'plain': self.vfs,}
1013 'plain': self.vfs,}
1014 transaction.rollback(self.svfs, vfsmap, "journal",
1014 transaction.rollback(self.svfs, vfsmap, "journal",
1015 self.ui.warn)
1015 self.ui.warn)
1016 self.invalidate()
1016 self.invalidate()
1017 return True
1017 return True
1018 else:
1018 else:
1019 self.ui.warn(_("no interrupted transaction available\n"))
1019 self.ui.warn(_("no interrupted transaction available\n"))
1020 return False
1020 return False
1021 finally:
1021 finally:
1022 lock.release()
1022 lock.release()
1023
1023
1024 def rollback(self, dryrun=False, force=False):
1024 def rollback(self, dryrun=False, force=False):
1025 wlock = lock = None
1025 wlock = lock = None
1026 try:
1026 try:
1027 wlock = self.wlock()
1027 wlock = self.wlock()
1028 lock = self.lock()
1028 lock = self.lock()
1029 if self.svfs.exists("undo"):
1029 if self.svfs.exists("undo"):
1030 return self._rollback(dryrun, force)
1030 return self._rollback(dryrun, force)
1031 else:
1031 else:
1032 self.ui.warn(_("no rollback information available\n"))
1032 self.ui.warn(_("no rollback information available\n"))
1033 return 1
1033 return 1
1034 finally:
1034 finally:
1035 release(lock, wlock)
1035 release(lock, wlock)
1036
1036
1037 @unfilteredmethod # Until we get smarter cache management
1037 @unfilteredmethod # Until we get smarter cache management
1038 def _rollback(self, dryrun, force):
1038 def _rollback(self, dryrun, force):
1039 ui = self.ui
1039 ui = self.ui
1040 try:
1040 try:
1041 args = self.vfs.read('undo.desc').splitlines()
1041 args = self.vfs.read('undo.desc').splitlines()
1042 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1042 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1043 if len(args) >= 3:
1043 if len(args) >= 3:
1044 detail = args[2]
1044 detail = args[2]
1045 oldtip = oldlen - 1
1045 oldtip = oldlen - 1
1046
1046
1047 if detail and ui.verbose:
1047 if detail and ui.verbose:
1048 msg = (_('repository tip rolled back to revision %s'
1048 msg = (_('repository tip rolled back to revision %s'
1049 ' (undo %s: %s)\n')
1049 ' (undo %s: %s)\n')
1050 % (oldtip, desc, detail))
1050 % (oldtip, desc, detail))
1051 else:
1051 else:
1052 msg = (_('repository tip rolled back to revision %s'
1052 msg = (_('repository tip rolled back to revision %s'
1053 ' (undo %s)\n')
1053 ' (undo %s)\n')
1054 % (oldtip, desc))
1054 % (oldtip, desc))
1055 except IOError:
1055 except IOError:
1056 msg = _('rolling back unknown transaction\n')
1056 msg = _('rolling back unknown transaction\n')
1057 desc = None
1057 desc = None
1058
1058
1059 if not force and self['.'] != self['tip'] and desc == 'commit':
1059 if not force and self['.'] != self['tip'] and desc == 'commit':
1060 raise util.Abort(
1060 raise util.Abort(
1061 _('rollback of last commit while not checked out '
1061 _('rollback of last commit while not checked out '
1062 'may lose data'), hint=_('use -f to force'))
1062 'may lose data'), hint=_('use -f to force'))
1063
1063
1064 ui.status(msg)
1064 ui.status(msg)
1065 if dryrun:
1065 if dryrun:
1066 return 0
1066 return 0
1067
1067
1068 parents = self.dirstate.parents()
1068 parents = self.dirstate.parents()
1069 self.destroying()
1069 self.destroying()
1070 vfsmap = {'plain': self.vfs, '': self.svfs}
1070 vfsmap = {'plain': self.vfs, '': self.svfs}
1071 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1071 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1072 if self.vfs.exists('undo.bookmarks'):
1072 if self.vfs.exists('undo.bookmarks'):
1073 self.vfs.rename('undo.bookmarks', 'bookmarks')
1073 self.vfs.rename('undo.bookmarks', 'bookmarks')
1074 if self.svfs.exists('undo.phaseroots'):
1074 if self.svfs.exists('undo.phaseroots'):
1075 self.svfs.rename('undo.phaseroots', 'phaseroots')
1075 self.svfs.rename('undo.phaseroots', 'phaseroots')
1076 self.invalidate()
1076 self.invalidate()
1077
1077
1078 parentgone = (parents[0] not in self.changelog.nodemap or
1078 parentgone = (parents[0] not in self.changelog.nodemap or
1079 parents[1] not in self.changelog.nodemap)
1079 parents[1] not in self.changelog.nodemap)
1080 if parentgone:
1080 if parentgone:
1081 self.vfs.rename('undo.dirstate', 'dirstate')
1081 self.vfs.rename('undo.dirstate', 'dirstate')
1082 try:
1082 try:
1083 branch = self.vfs.read('undo.branch')
1083 branch = self.vfs.read('undo.branch')
1084 self.dirstate.setbranch(encoding.tolocal(branch))
1084 self.dirstate.setbranch(encoding.tolocal(branch))
1085 except IOError:
1085 except IOError:
1086 ui.warn(_('named branch could not be reset: '
1086 ui.warn(_('named branch could not be reset: '
1087 'current branch is still \'%s\'\n')
1087 'current branch is still \'%s\'\n')
1088 % self.dirstate.branch())
1088 % self.dirstate.branch())
1089
1089
1090 self.dirstate.invalidate()
1090 self.dirstate.invalidate()
1091 parents = tuple([p.rev() for p in self.parents()])
1091 parents = tuple([p.rev() for p in self.parents()])
1092 if len(parents) > 1:
1092 if len(parents) > 1:
1093 ui.status(_('working directory now based on '
1093 ui.status(_('working directory now based on '
1094 'revisions %d and %d\n') % parents)
1094 'revisions %d and %d\n') % parents)
1095 else:
1095 else:
1096 ui.status(_('working directory now based on '
1096 ui.status(_('working directory now based on '
1097 'revision %d\n') % parents)
1097 'revision %d\n') % parents)
1098 # TODO: if we know which new heads may result from this rollback, pass
1098 # TODO: if we know which new heads may result from this rollback, pass
1099 # them to destroy(), which will prevent the branchhead cache from being
1099 # them to destroy(), which will prevent the branchhead cache from being
1100 # invalidated.
1100 # invalidated.
1101 self.destroyed()
1101 self.destroyed()
1102 return 0
1102 return 0
1103
1103
1104 def invalidatecaches(self):
1104 def invalidatecaches(self):
1105
1105
1106 if '_tagscache' in vars(self):
1106 if '_tagscache' in vars(self):
1107 # can't use delattr on proxy
1107 # can't use delattr on proxy
1108 del self.__dict__['_tagscache']
1108 del self.__dict__['_tagscache']
1109
1109
1110 self.unfiltered()._branchcaches.clear()
1110 self.unfiltered()._branchcaches.clear()
1111 self.invalidatevolatilesets()
1111 self.invalidatevolatilesets()
1112
1112
1113 def invalidatevolatilesets(self):
1113 def invalidatevolatilesets(self):
1114 self.filteredrevcache.clear()
1114 self.filteredrevcache.clear()
1115 obsolete.clearobscaches(self)
1115 obsolete.clearobscaches(self)
1116
1116
1117 def invalidatedirstate(self):
1117 def invalidatedirstate(self):
1118 '''Invalidates the dirstate, causing the next call to dirstate
1118 '''Invalidates the dirstate, causing the next call to dirstate
1119 to check if it was modified since the last time it was read,
1119 to check if it was modified since the last time it was read,
1120 rereading it if it has.
1120 rereading it if it has.
1121
1121
1122 This is different to dirstate.invalidate() that it doesn't always
1122 This is different to dirstate.invalidate() that it doesn't always
1123 rereads the dirstate. Use dirstate.invalidate() if you want to
1123 rereads the dirstate. Use dirstate.invalidate() if you want to
1124 explicitly read the dirstate again (i.e. restoring it to a previous
1124 explicitly read the dirstate again (i.e. restoring it to a previous
1125 known good state).'''
1125 known good state).'''
1126 if hasunfilteredcache(self, 'dirstate'):
1126 if hasunfilteredcache(self, 'dirstate'):
1127 for k in self.dirstate._filecache:
1127 for k in self.dirstate._filecache:
1128 try:
1128 try:
1129 delattr(self.dirstate, k)
1129 delattr(self.dirstate, k)
1130 except AttributeError:
1130 except AttributeError:
1131 pass
1131 pass
1132 delattr(self.unfiltered(), 'dirstate')
1132 delattr(self.unfiltered(), 'dirstate')
1133
1133
1134 def invalidate(self):
1134 def invalidate(self):
1135 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1135 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1136 for k in self._filecache:
1136 for k in self._filecache:
1137 # dirstate is invalidated separately in invalidatedirstate()
1137 # dirstate is invalidated separately in invalidatedirstate()
1138 if k == 'dirstate':
1138 if k == 'dirstate':
1139 continue
1139 continue
1140
1140
1141 try:
1141 try:
1142 delattr(unfiltered, k)
1142 delattr(unfiltered, k)
1143 except AttributeError:
1143 except AttributeError:
1144 pass
1144 pass
1145 self.invalidatecaches()
1145 self.invalidatecaches()
1146 self.store.invalidatecaches()
1146 self.store.invalidatecaches()
1147
1147
1148 def invalidateall(self):
1148 def invalidateall(self):
1149 '''Fully invalidates both store and non-store parts, causing the
1149 '''Fully invalidates both store and non-store parts, causing the
1150 subsequent operation to reread any outside changes.'''
1150 subsequent operation to reread any outside changes.'''
1151 # extension should hook this to invalidate its caches
1151 # extension should hook this to invalidate its caches
1152 self.invalidate()
1152 self.invalidate()
1153 self.invalidatedirstate()
1153 self.invalidatedirstate()
1154
1154
1155 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1155 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1156 try:
1156 try:
1157 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1157 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1158 except error.LockHeld, inst:
1158 except error.LockHeld, inst:
1159 if not wait:
1159 if not wait:
1160 raise
1160 raise
1161 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1161 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1162 (desc, inst.locker))
1162 (desc, inst.locker))
1163 # default to 600 seconds timeout
1163 # default to 600 seconds timeout
1164 l = lockmod.lock(vfs, lockname,
1164 l = lockmod.lock(vfs, lockname,
1165 int(self.ui.config("ui", "timeout", "600")),
1165 int(self.ui.config("ui", "timeout", "600")),
1166 releasefn, desc=desc)
1166 releasefn, desc=desc)
1167 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1167 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1168 if acquirefn:
1168 if acquirefn:
1169 acquirefn()
1169 acquirefn()
1170 return l
1170 return l
1171
1171
1172 def _afterlock(self, callback):
1172 def _afterlock(self, callback):
1173 """add a callback to the current repository lock.
1173 """add a callback to the current repository lock.
1174
1174
1175 The callback will be executed on lock release."""
1175 The callback will be executed on lock release."""
1176 l = self._lockref and self._lockref()
1176 l = self._lockref and self._lockref()
1177 if l:
1177 if l:
1178 l.postrelease.append(callback)
1178 l.postrelease.append(callback)
1179 else:
1179 else:
1180 callback()
1180 callback()
1181
1181
1182 def lock(self, wait=True):
1182 def lock(self, wait=True):
1183 '''Lock the repository store (.hg/store) and return a weak reference
1183 '''Lock the repository store (.hg/store) and return a weak reference
1184 to the lock. Use this before modifying the store (e.g. committing or
1184 to the lock. Use this before modifying the store (e.g. committing or
1185 stripping). If you are opening a transaction, get a lock as well.)'''
1185 stripping). If you are opening a transaction, get a lock as well.)'''
1186 l = self._lockref and self._lockref()
1186 l = self._lockref and self._lockref()
1187 if l is not None and l.held:
1187 if l is not None and l.held:
1188 l.lock()
1188 l.lock()
1189 return l
1189 return l
1190
1190
1191 def unlock():
1191 def unlock():
1192 for k, ce in self._filecache.items():
1192 for k, ce in self._filecache.items():
1193 if k == 'dirstate' or k not in self.__dict__:
1193 if k == 'dirstate' or k not in self.__dict__:
1194 continue
1194 continue
1195 ce.refresh()
1195 ce.refresh()
1196
1196
1197 l = self._lock(self.svfs, "lock", wait, unlock,
1197 l = self._lock(self.svfs, "lock", wait, unlock,
1198 self.invalidate, _('repository %s') % self.origroot)
1198 self.invalidate, _('repository %s') % self.origroot)
1199 self._lockref = weakref.ref(l)
1199 self._lockref = weakref.ref(l)
1200 return l
1200 return l
1201
1201
1202 def wlock(self, wait=True):
1202 def wlock(self, wait=True):
1203 '''Lock the non-store parts of the repository (everything under
1203 '''Lock the non-store parts of the repository (everything under
1204 .hg except .hg/store) and return a weak reference to the lock.
1204 .hg except .hg/store) and return a weak reference to the lock.
1205 Use this before modifying files in .hg.'''
1205 Use this before modifying files in .hg.'''
1206 if (self.ui.configbool('devel', 'all')
1206 if (self.ui.configbool('devel', 'all')
1207 or self.ui.configbool('devel', 'check-locks')):
1207 or self.ui.configbool('devel', 'check-locks')):
1208 l = self._lockref and self._lockref()
1208 l = self._lockref and self._lockref()
1209 if l is not None and l.held:
1209 if l is not None and l.held:
1210 msg = '"lock" taken before "wlock"\n'
1210 msg = '"lock" taken before "wlock"\n'
1211 if self.ui.tracebackflag:
1211 if self.ui.tracebackflag:
1212 util.debugstacktrace(msg, 1)
1212 util.debugstacktrace(msg, 1)
1213 else:
1213 else:
1214 self.ui.write_err(msg)
1214 self.ui.write_err(msg)
1215 l = self._wlockref and self._wlockref()
1215 l = self._wlockref and self._wlockref()
1216 if l is not None and l.held:
1216 if l is not None and l.held:
1217 l.lock()
1217 l.lock()
1218 return l
1218 return l
1219
1219
1220 def unlock():
1220 def unlock():
1221 if self.dirstate.pendingparentchange():
1221 if self.dirstate.pendingparentchange():
1222 self.dirstate.invalidate()
1222 self.dirstate.invalidate()
1223 else:
1223 else:
1224 self.dirstate.write()
1224 self.dirstate.write()
1225
1225
1226 self._filecache['dirstate'].refresh()
1226 self._filecache['dirstate'].refresh()
1227
1227
1228 l = self._lock(self.vfs, "wlock", wait, unlock,
1228 l = self._lock(self.vfs, "wlock", wait, unlock,
1229 self.invalidatedirstate, _('working directory of %s') %
1229 self.invalidatedirstate, _('working directory of %s') %
1230 self.origroot)
1230 self.origroot)
1231 self._wlockref = weakref.ref(l)
1231 self._wlockref = weakref.ref(l)
1232 return l
1232 return l
1233
1233
1234 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1234 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1235 """
1235 """
1236 commit an individual file as part of a larger transaction
1236 commit an individual file as part of a larger transaction
1237 """
1237 """
1238
1238
1239 fname = fctx.path()
1239 fname = fctx.path()
1240 fparent1 = manifest1.get(fname, nullid)
1240 fparent1 = manifest1.get(fname, nullid)
1241 fparent2 = manifest2.get(fname, nullid)
1241 fparent2 = manifest2.get(fname, nullid)
1242 if isinstance(fctx, context.filectx):
1242 if isinstance(fctx, context.filectx):
1243 node = fctx.filenode()
1243 node = fctx.filenode()
1244 if node in [fparent1, fparent2]:
1244 if node in [fparent1, fparent2]:
1245 self.ui.debug('reusing %s filelog entry\n' % fname)
1245 self.ui.debug('reusing %s filelog entry\n' % fname)
1246 return node
1246 return node
1247
1247
1248 flog = self.file(fname)
1248 flog = self.file(fname)
1249 meta = {}
1249 meta = {}
1250 copy = fctx.renamed()
1250 copy = fctx.renamed()
1251 if copy and copy[0] != fname:
1251 if copy and copy[0] != fname:
1252 # Mark the new revision of this file as a copy of another
1252 # Mark the new revision of this file as a copy of another
1253 # file. This copy data will effectively act as a parent
1253 # file. This copy data will effectively act as a parent
1254 # of this new revision. If this is a merge, the first
1254 # of this new revision. If this is a merge, the first
1255 # parent will be the nullid (meaning "look up the copy data")
1255 # parent will be the nullid (meaning "look up the copy data")
1256 # and the second one will be the other parent. For example:
1256 # and the second one will be the other parent. For example:
1257 #
1257 #
1258 # 0 --- 1 --- 3 rev1 changes file foo
1258 # 0 --- 1 --- 3 rev1 changes file foo
1259 # \ / rev2 renames foo to bar and changes it
1259 # \ / rev2 renames foo to bar and changes it
1260 # \- 2 -/ rev3 should have bar with all changes and
1260 # \- 2 -/ rev3 should have bar with all changes and
1261 # should record that bar descends from
1261 # should record that bar descends from
1262 # bar in rev2 and foo in rev1
1262 # bar in rev2 and foo in rev1
1263 #
1263 #
1264 # this allows this merge to succeed:
1264 # this allows this merge to succeed:
1265 #
1265 #
1266 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1266 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1267 # \ / merging rev3 and rev4 should use bar@rev2
1267 # \ / merging rev3 and rev4 should use bar@rev2
1268 # \- 2 --- 4 as the merge base
1268 # \- 2 --- 4 as the merge base
1269 #
1269 #
1270
1270
1271 cfname = copy[0]
1271 cfname = copy[0]
1272 crev = manifest1.get(cfname)
1272 crev = manifest1.get(cfname)
1273 newfparent = fparent2
1273 newfparent = fparent2
1274
1274
1275 if manifest2: # branch merge
1275 if manifest2: # branch merge
1276 if fparent2 == nullid or crev is None: # copied on remote side
1276 if fparent2 == nullid or crev is None: # copied on remote side
1277 if cfname in manifest2:
1277 if cfname in manifest2:
1278 crev = manifest2[cfname]
1278 crev = manifest2[cfname]
1279 newfparent = fparent1
1279 newfparent = fparent1
1280
1280
1281 # Here, we used to search backwards through history to try to find
1281 # Here, we used to search backwards through history to try to find
1282 # where the file copy came from if the source of a copy was not in
1282 # where the file copy came from if the source of a copy was not in
1283 # the parent directory. However, this doesn't actually make sense to
1283 # the parent directory. However, this doesn't actually make sense to
1284 # do (what does a copy from something not in your working copy even
1284 # do (what does a copy from something not in your working copy even
1285 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1285 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1286 # the user that copy information was dropped, so if they didn't
1286 # the user that copy information was dropped, so if they didn't
1287 # expect this outcome it can be fixed, but this is the correct
1287 # expect this outcome it can be fixed, but this is the correct
1288 # behavior in this circumstance.
1288 # behavior in this circumstance.
1289
1289
1290 if crev:
1290 if crev:
1291 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1291 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1292 meta["copy"] = cfname
1292 meta["copy"] = cfname
1293 meta["copyrev"] = hex(crev)
1293 meta["copyrev"] = hex(crev)
1294 fparent1, fparent2 = nullid, newfparent
1294 fparent1, fparent2 = nullid, newfparent
1295 else:
1295 else:
1296 self.ui.warn(_("warning: can't find ancestor for '%s' "
1296 self.ui.warn(_("warning: can't find ancestor for '%s' "
1297 "copied from '%s'!\n") % (fname, cfname))
1297 "copied from '%s'!\n") % (fname, cfname))
1298
1298
1299 elif fparent1 == nullid:
1299 elif fparent1 == nullid:
1300 fparent1, fparent2 = fparent2, nullid
1300 fparent1, fparent2 = fparent2, nullid
1301 elif fparent2 != nullid:
1301 elif fparent2 != nullid:
1302 # is one parent an ancestor of the other?
1302 # is one parent an ancestor of the other?
1303 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1303 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1304 if fparent1 in fparentancestors:
1304 if fparent1 in fparentancestors:
1305 fparent1, fparent2 = fparent2, nullid
1305 fparent1, fparent2 = fparent2, nullid
1306 elif fparent2 in fparentancestors:
1306 elif fparent2 in fparentancestors:
1307 fparent2 = nullid
1307 fparent2 = nullid
1308
1308
1309 # is the file changed?
1309 # is the file changed?
1310 text = fctx.data()
1310 text = fctx.data()
1311 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1311 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1312 changelist.append(fname)
1312 changelist.append(fname)
1313 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1313 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1314 # are just the flags changed during merge?
1314 # are just the flags changed during merge?
1315 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1315 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1316 changelist.append(fname)
1316 changelist.append(fname)
1317
1317
1318 return fparent1
1318 return fparent1
1319
1319
1320 @unfilteredmethod
1320 @unfilteredmethod
1321 def commit(self, text="", user=None, date=None, match=None, force=False,
1321 def commit(self, text="", user=None, date=None, match=None, force=False,
1322 editor=False, extra={}):
1322 editor=False, extra={}):
1323 """Add a new revision to current repository.
1323 """Add a new revision to current repository.
1324
1324
1325 Revision information is gathered from the working directory,
1325 Revision information is gathered from the working directory,
1326 match can be used to filter the committed files. If editor is
1326 match can be used to filter the committed files. If editor is
1327 supplied, it is called to get a commit message.
1327 supplied, it is called to get a commit message.
1328 """
1328 """
1329
1329
1330 def fail(f, msg):
1330 def fail(f, msg):
1331 raise util.Abort('%s: %s' % (f, msg))
1331 raise util.Abort('%s: %s' % (f, msg))
1332
1332
1333 if not match:
1333 if not match:
1334 match = matchmod.always(self.root, '')
1334 match = matchmod.always(self.root, '')
1335
1335
1336 if not force:
1336 if not force:
1337 vdirs = []
1337 vdirs = []
1338 match.explicitdir = vdirs.append
1338 match.explicitdir = vdirs.append
1339 match.bad = fail
1339 match.bad = fail
1340
1340
1341 wlock = self.wlock()
1341 wlock = self.wlock()
1342 try:
1342 try:
1343 wctx = self[None]
1343 wctx = self[None]
1344 merge = len(wctx.parents()) > 1
1344 merge = len(wctx.parents()) > 1
1345
1345
1346 if not force and merge and not match.always():
1346 if not force and merge and not match.always():
1347 raise util.Abort(_('cannot partially commit a merge '
1347 raise util.Abort(_('cannot partially commit a merge '
1348 '(do not specify files or patterns)'))
1348 '(do not specify files or patterns)'))
1349
1349
1350 status = self.status(match=match, clean=force)
1350 status = self.status(match=match, clean=force)
1351 if force:
1351 if force:
1352 status.modified.extend(status.clean) # mq may commit clean files
1352 status.modified.extend(status.clean) # mq may commit clean files
1353
1353
1354 # check subrepos
1354 # check subrepos
1355 subs = []
1355 subs = []
1356 commitsubs = set()
1356 commitsubs = set()
1357 newstate = wctx.substate.copy()
1357 newstate = wctx.substate.copy()
1358 # only manage subrepos and .hgsubstate if .hgsub is present
1358 # only manage subrepos and .hgsubstate if .hgsub is present
1359 if '.hgsub' in wctx:
1359 if '.hgsub' in wctx:
1360 # we'll decide whether to track this ourselves, thanks
1360 # we'll decide whether to track this ourselves, thanks
1361 for c in status.modified, status.added, status.removed:
1361 for c in status.modified, status.added, status.removed:
1362 if '.hgsubstate' in c:
1362 if '.hgsubstate' in c:
1363 c.remove('.hgsubstate')
1363 c.remove('.hgsubstate')
1364
1364
1365 # compare current state to last committed state
1365 # compare current state to last committed state
1366 # build new substate based on last committed state
1366 # build new substate based on last committed state
1367 oldstate = wctx.p1().substate
1367 oldstate = wctx.p1().substate
1368 for s in sorted(newstate.keys()):
1368 for s in sorted(newstate.keys()):
1369 if not match(s):
1369 if not match(s):
1370 # ignore working copy, use old state if present
1370 # ignore working copy, use old state if present
1371 if s in oldstate:
1371 if s in oldstate:
1372 newstate[s] = oldstate[s]
1372 newstate[s] = oldstate[s]
1373 continue
1373 continue
1374 if not force:
1374 if not force:
1375 raise util.Abort(
1375 raise util.Abort(
1376 _("commit with new subrepo %s excluded") % s)
1376 _("commit with new subrepo %s excluded") % s)
1377 dirtyreason = wctx.sub(s).dirtyreason(True)
1377 dirtyreason = wctx.sub(s).dirtyreason(True)
1378 if dirtyreason:
1378 if dirtyreason:
1379 if not self.ui.configbool('ui', 'commitsubrepos'):
1379 if not self.ui.configbool('ui', 'commitsubrepos'):
1380 raise util.Abort(dirtyreason,
1380 raise util.Abort(dirtyreason,
1381 hint=_("use --subrepos for recursive commit"))
1381 hint=_("use --subrepos for recursive commit"))
1382 subs.append(s)
1382 subs.append(s)
1383 commitsubs.add(s)
1383 commitsubs.add(s)
1384 else:
1384 else:
1385 bs = wctx.sub(s).basestate()
1385 bs = wctx.sub(s).basestate()
1386 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1386 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1387 if oldstate.get(s, (None, None, None))[1] != bs:
1387 if oldstate.get(s, (None, None, None))[1] != bs:
1388 subs.append(s)
1388 subs.append(s)
1389
1389
1390 # check for removed subrepos
1390 # check for removed subrepos
1391 for p in wctx.parents():
1391 for p in wctx.parents():
1392 r = [s for s in p.substate if s not in newstate]
1392 r = [s for s in p.substate if s not in newstate]
1393 subs += [s for s in r if match(s)]
1393 subs += [s for s in r if match(s)]
1394 if subs:
1394 if subs:
1395 if (not match('.hgsub') and
1395 if (not match('.hgsub') and
1396 '.hgsub' in (wctx.modified() + wctx.added())):
1396 '.hgsub' in (wctx.modified() + wctx.added())):
1397 raise util.Abort(
1397 raise util.Abort(
1398 _("can't commit subrepos without .hgsub"))
1398 _("can't commit subrepos without .hgsub"))
1399 status.modified.insert(0, '.hgsubstate')
1399 status.modified.insert(0, '.hgsubstate')
1400
1400
1401 elif '.hgsub' in status.removed:
1401 elif '.hgsub' in status.removed:
1402 # clean up .hgsubstate when .hgsub is removed
1402 # clean up .hgsubstate when .hgsub is removed
1403 if ('.hgsubstate' in wctx and
1403 if ('.hgsubstate' in wctx and
1404 '.hgsubstate' not in (status.modified + status.added +
1404 '.hgsubstate' not in (status.modified + status.added +
1405 status.removed)):
1405 status.removed)):
1406 status.removed.insert(0, '.hgsubstate')
1406 status.removed.insert(0, '.hgsubstate')
1407
1407
1408 # make sure all explicit patterns are matched
1408 # make sure all explicit patterns are matched
1409 if not force and match.files():
1409 if not force and match.files():
1410 matched = set(status.modified + status.added + status.removed)
1410 matched = set(status.modified + status.added + status.removed)
1411
1411
1412 for f in match.files():
1412 for f in match.files():
1413 f = self.dirstate.normalize(f)
1413 f = self.dirstate.normalize(f)
1414 if f == '.' or f in matched or f in wctx.substate:
1414 if f == '.' or f in matched or f in wctx.substate:
1415 continue
1415 continue
1416 if f in status.deleted:
1416 if f in status.deleted:
1417 fail(f, _('file not found!'))
1417 fail(f, _('file not found!'))
1418 if f in vdirs: # visited directory
1418 if f in vdirs: # visited directory
1419 d = f + '/'
1419 d = f + '/'
1420 for mf in matched:
1420 for mf in matched:
1421 if mf.startswith(d):
1421 if mf.startswith(d):
1422 break
1422 break
1423 else:
1423 else:
1424 fail(f, _("no match under directory!"))
1424 fail(f, _("no match under directory!"))
1425 elif f not in self.dirstate:
1425 elif f not in self.dirstate:
1426 fail(f, _("file not tracked!"))
1426 fail(f, _("file not tracked!"))
1427
1427
1428 cctx = context.workingcommitctx(self, status,
1428 cctx = context.workingcommitctx(self, status,
1429 text, user, date, extra)
1429 text, user, date, extra)
1430
1430
1431 if (not force and not extra.get("close") and not merge
1431 if (not force and not extra.get("close") and not merge
1432 and not cctx.files()
1432 and not cctx.files()
1433 and wctx.branch() == wctx.p1().branch()):
1433 and wctx.branch() == wctx.p1().branch()):
1434 return None
1434 return None
1435
1435
1436 if merge and cctx.deleted():
1436 if merge and cctx.deleted():
1437 raise util.Abort(_("cannot commit merge with missing files"))
1437 raise util.Abort(_("cannot commit merge with missing files"))
1438
1438
1439 ms = mergemod.mergestate(self)
1439 ms = mergemod.mergestate(self)
1440 for f in status.modified:
1440 for f in status.modified:
1441 if f in ms and ms[f] == 'u':
1441 if f in ms and ms[f] == 'u':
1442 raise util.Abort(_('unresolved merge conflicts '
1442 raise util.Abort(_('unresolved merge conflicts '
1443 '(see "hg help resolve")'))
1443 '(see "hg help resolve")'))
1444
1444
1445 if editor:
1445 if editor:
1446 cctx._text = editor(self, cctx, subs)
1446 cctx._text = editor(self, cctx, subs)
1447 edited = (text != cctx._text)
1447 edited = (text != cctx._text)
1448
1448
1449 # Save commit message in case this transaction gets rolled back
1449 # Save commit message in case this transaction gets rolled back
1450 # (e.g. by a pretxncommit hook). Leave the content alone on
1450 # (e.g. by a pretxncommit hook). Leave the content alone on
1451 # the assumption that the user will use the same editor again.
1451 # the assumption that the user will use the same editor again.
1452 msgfn = self.savecommitmessage(cctx._text)
1452 msgfn = self.savecommitmessage(cctx._text)
1453
1453
1454 # commit subs and write new state
1454 # commit subs and write new state
1455 if subs:
1455 if subs:
1456 for s in sorted(commitsubs):
1456 for s in sorted(commitsubs):
1457 sub = wctx.sub(s)
1457 sub = wctx.sub(s)
1458 self.ui.status(_('committing subrepository %s\n') %
1458 self.ui.status(_('committing subrepository %s\n') %
1459 subrepo.subrelpath(sub))
1459 subrepo.subrelpath(sub))
1460 sr = sub.commit(cctx._text, user, date)
1460 sr = sub.commit(cctx._text, user, date)
1461 newstate[s] = (newstate[s][0], sr)
1461 newstate[s] = (newstate[s][0], sr)
1462 subrepo.writestate(self, newstate)
1462 subrepo.writestate(self, newstate)
1463
1463
1464 p1, p2 = self.dirstate.parents()
1464 p1, p2 = self.dirstate.parents()
1465 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1465 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1466 try:
1466 try:
1467 self.hook("precommit", throw=True, parent1=hookp1,
1467 self.hook("precommit", throw=True, parent1=hookp1,
1468 parent2=hookp2)
1468 parent2=hookp2)
1469 ret = self.commitctx(cctx, True)
1469 ret = self.commitctx(cctx, True)
1470 except: # re-raises
1470 except: # re-raises
1471 if edited:
1471 if edited:
1472 self.ui.write(
1472 self.ui.write(
1473 _('note: commit message saved in %s\n') % msgfn)
1473 _('note: commit message saved in %s\n') % msgfn)
1474 raise
1474 raise
1475
1475
1476 # update bookmarks, dirstate and mergestate
1476 # update bookmarks, dirstate and mergestate
1477 bookmarks.update(self, [p1, p2], ret)
1477 bookmarks.update(self, [p1, p2], ret)
1478 cctx.markcommitted(ret)
1478 cctx.markcommitted(ret)
1479 ms.reset()
1479 ms.reset()
1480 finally:
1480 finally:
1481 wlock.release()
1481 wlock.release()
1482
1482
1483 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1483 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1484 # hack for command that use a temporary commit (eg: histedit)
1484 # hack for command that use a temporary commit (eg: histedit)
1485 # temporary commit got stripped before hook release
1485 # temporary commit got stripped before hook release
1486 if node in self:
1486 if node in self:
1487 self.hook("commit", node=node, parent1=parent1,
1487 self.hook("commit", node=node, parent1=parent1,
1488 parent2=parent2)
1488 parent2=parent2)
1489 self._afterlock(commithook)
1489 self._afterlock(commithook)
1490 return ret
1490 return ret
1491
1491
1492 @unfilteredmethod
1492 @unfilteredmethod
1493 def commitctx(self, ctx, error=False):
1493 def commitctx(self, ctx, error=False):
1494 """Add a new revision to current repository.
1494 """Add a new revision to current repository.
1495 Revision information is passed via the context argument.
1495 Revision information is passed via the context argument.
1496 """
1496 """
1497
1497
1498 tr = None
1498 tr = None
1499 p1, p2 = ctx.p1(), ctx.p2()
1499 p1, p2 = ctx.p1(), ctx.p2()
1500 user = ctx.user()
1500 user = ctx.user()
1501
1501
1502 lock = self.lock()
1502 lock = self.lock()
1503 try:
1503 try:
1504 tr = self.transaction("commit")
1504 tr = self.transaction("commit")
1505 trp = weakref.proxy(tr)
1505 trp = weakref.proxy(tr)
1506
1506
1507 if ctx.files():
1507 if ctx.files():
1508 m1 = p1.manifest()
1508 m1 = p1.manifest()
1509 m2 = p2.manifest()
1509 m2 = p2.manifest()
1510 m = m1.copy()
1510 m = m1.copy()
1511
1511
1512 # check in files
1512 # check in files
1513 added = []
1513 added = []
1514 changed = []
1514 changed = []
1515 removed = list(ctx.removed())
1515 removed = list(ctx.removed())
1516 linkrev = len(self)
1516 linkrev = len(self)
1517 self.ui.note(_("committing files:\n"))
1517 self.ui.note(_("committing files:\n"))
1518 for f in sorted(ctx.modified() + ctx.added()):
1518 for f in sorted(ctx.modified() + ctx.added()):
1519 self.ui.note(f + "\n")
1519 self.ui.note(f + "\n")
1520 try:
1520 try:
1521 fctx = ctx[f]
1521 fctx = ctx[f]
1522 if fctx is None:
1522 if fctx is None:
1523 removed.append(f)
1523 removed.append(f)
1524 else:
1524 else:
1525 added.append(f)
1525 added.append(f)
1526 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1526 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1527 trp, changed)
1527 trp, changed)
1528 m.setflag(f, fctx.flags())
1528 m.setflag(f, fctx.flags())
1529 except OSError, inst:
1529 except OSError, inst:
1530 self.ui.warn(_("trouble committing %s!\n") % f)
1530 self.ui.warn(_("trouble committing %s!\n") % f)
1531 raise
1531 raise
1532 except IOError, inst:
1532 except IOError, inst:
1533 errcode = getattr(inst, 'errno', errno.ENOENT)
1533 errcode = getattr(inst, 'errno', errno.ENOENT)
1534 if error or errcode and errcode != errno.ENOENT:
1534 if error or errcode and errcode != errno.ENOENT:
1535 self.ui.warn(_("trouble committing %s!\n") % f)
1535 self.ui.warn(_("trouble committing %s!\n") % f)
1536 raise
1536 raise
1537
1537
1538 # update manifest
1538 # update manifest
1539 self.ui.note(_("committing manifest\n"))
1539 self.ui.note(_("committing manifest\n"))
1540 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1540 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1541 drop = [f for f in removed if f in m]
1541 drop = [f for f in removed if f in m]
1542 for f in drop:
1542 for f in drop:
1543 del m[f]
1543 del m[f]
1544 mn = self.manifest.add(m, trp, linkrev,
1544 mn = self.manifest.add(m, trp, linkrev,
1545 p1.manifestnode(), p2.manifestnode(),
1545 p1.manifestnode(), p2.manifestnode(),
1546 added, drop)
1546 added, drop)
1547 files = changed + removed
1547 files = changed + removed
1548 else:
1548 else:
1549 mn = p1.manifestnode()
1549 mn = p1.manifestnode()
1550 files = []
1550 files = []
1551
1551
1552 # update changelog
1552 # update changelog
1553 self.ui.note(_("committing changelog\n"))
1553 self.ui.note(_("committing changelog\n"))
1554 self.changelog.delayupdate(tr)
1554 self.changelog.delayupdate(tr)
1555 n = self.changelog.add(mn, files, ctx.description(),
1555 n = self.changelog.add(mn, files, ctx.description(),
1556 trp, p1.node(), p2.node(),
1556 trp, p1.node(), p2.node(),
1557 user, ctx.date(), ctx.extra().copy())
1557 user, ctx.date(), ctx.extra().copy())
1558 p = lambda: tr.writepending() and self.root or ""
1558 p = lambda: tr.writepending() and self.root or ""
1559 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1559 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1560 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1560 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1561 parent2=xp2, pending=p)
1561 parent2=xp2, pending=p)
1562 # set the new commit is proper phase
1562 # set the new commit is proper phase
1563 targetphase = subrepo.newcommitphase(self.ui, ctx)
1563 targetphase = subrepo.newcommitphase(self.ui, ctx)
1564 if targetphase:
1564 if targetphase:
1565 # retract boundary do not alter parent changeset.
1565 # retract boundary do not alter parent changeset.
1566 # if a parent have higher the resulting phase will
1566 # if a parent have higher the resulting phase will
1567 # be compliant anyway
1567 # be compliant anyway
1568 #
1568 #
1569 # if minimal phase was 0 we don't need to retract anything
1569 # if minimal phase was 0 we don't need to retract anything
1570 phases.retractboundary(self, tr, targetphase, [n])
1570 phases.retractboundary(self, tr, targetphase, [n])
1571 tr.close()
1571 tr.close()
1572 branchmap.updatecache(self.filtered('served'))
1572 branchmap.updatecache(self.filtered('served'))
1573 return n
1573 return n
1574 finally:
1574 finally:
1575 if tr:
1575 if tr:
1576 tr.release()
1576 tr.release()
1577 lock.release()
1577 lock.release()
1578
1578
1579 @unfilteredmethod
1579 @unfilteredmethod
1580 def destroying(self):
1580 def destroying(self):
1581 '''Inform the repository that nodes are about to be destroyed.
1581 '''Inform the repository that nodes are about to be destroyed.
1582 Intended for use by strip and rollback, so there's a common
1582 Intended for use by strip and rollback, so there's a common
1583 place for anything that has to be done before destroying history.
1583 place for anything that has to be done before destroying history.
1584
1584
1585 This is mostly useful for saving state that is in memory and waiting
1585 This is mostly useful for saving state that is in memory and waiting
1586 to be flushed when the current lock is released. Because a call to
1586 to be flushed when the current lock is released. Because a call to
1587 destroyed is imminent, the repo will be invalidated causing those
1587 destroyed is imminent, the repo will be invalidated causing those
1588 changes to stay in memory (waiting for the next unlock), or vanish
1588 changes to stay in memory (waiting for the next unlock), or vanish
1589 completely.
1589 completely.
1590 '''
1590 '''
1591 # When using the same lock to commit and strip, the phasecache is left
1591 # When using the same lock to commit and strip, the phasecache is left
1592 # dirty after committing. Then when we strip, the repo is invalidated,
1592 # dirty after committing. Then when we strip, the repo is invalidated,
1593 # causing those changes to disappear.
1593 # causing those changes to disappear.
1594 if '_phasecache' in vars(self):
1594 if '_phasecache' in vars(self):
1595 self._phasecache.write()
1595 self._phasecache.write()
1596
1596
1597 @unfilteredmethod
1597 @unfilteredmethod
1598 def destroyed(self):
1598 def destroyed(self):
1599 '''Inform the repository that nodes have been destroyed.
1599 '''Inform the repository that nodes have been destroyed.
1600 Intended for use by strip and rollback, so there's a common
1600 Intended for use by strip and rollback, so there's a common
1601 place for anything that has to be done after destroying history.
1601 place for anything that has to be done after destroying history.
1602 '''
1602 '''
1603 # When one tries to:
1603 # When one tries to:
1604 # 1) destroy nodes thus calling this method (e.g. strip)
1604 # 1) destroy nodes thus calling this method (e.g. strip)
1605 # 2) use phasecache somewhere (e.g. commit)
1605 # 2) use phasecache somewhere (e.g. commit)
1606 #
1606 #
1607 # then 2) will fail because the phasecache contains nodes that were
1607 # then 2) will fail because the phasecache contains nodes that were
1608 # removed. We can either remove phasecache from the filecache,
1608 # removed. We can either remove phasecache from the filecache,
1609 # causing it to reload next time it is accessed, or simply filter
1609 # causing it to reload next time it is accessed, or simply filter
1610 # the removed nodes now and write the updated cache.
1610 # the removed nodes now and write the updated cache.
1611 self._phasecache.filterunknown(self)
1611 self._phasecache.filterunknown(self)
1612 self._phasecache.write()
1612 self._phasecache.write()
1613
1613
1614 # update the 'served' branch cache to help read only server process
1614 # update the 'served' branch cache to help read only server process
1615 # Thanks to branchcache collaboration this is done from the nearest
1615 # Thanks to branchcache collaboration this is done from the nearest
1616 # filtered subset and it is expected to be fast.
1616 # filtered subset and it is expected to be fast.
1617 branchmap.updatecache(self.filtered('served'))
1617 branchmap.updatecache(self.filtered('served'))
1618
1618
1619 # Ensure the persistent tag cache is updated. Doing it now
1619 # Ensure the persistent tag cache is updated. Doing it now
1620 # means that the tag cache only has to worry about destroyed
1620 # means that the tag cache only has to worry about destroyed
1621 # heads immediately after a strip/rollback. That in turn
1621 # heads immediately after a strip/rollback. That in turn
1622 # guarantees that "cachetip == currenttip" (comparing both rev
1622 # guarantees that "cachetip == currenttip" (comparing both rev
1623 # and node) always means no nodes have been added or destroyed.
1623 # and node) always means no nodes have been added or destroyed.
1624
1624
1625 # XXX this is suboptimal when qrefresh'ing: we strip the current
1625 # XXX this is suboptimal when qrefresh'ing: we strip the current
1626 # head, refresh the tag cache, then immediately add a new head.
1626 # head, refresh the tag cache, then immediately add a new head.
1627 # But I think doing it this way is necessary for the "instant
1627 # But I think doing it this way is necessary for the "instant
1628 # tag cache retrieval" case to work.
1628 # tag cache retrieval" case to work.
1629 self.invalidate()
1629 self.invalidate()
1630
1630
1631 def walk(self, match, node=None):
1631 def walk(self, match, node=None):
1632 '''
1632 '''
1633 walk recursively through the directory tree or a given
1633 walk recursively through the directory tree or a given
1634 changeset, finding all files matched by the match
1634 changeset, finding all files matched by the match
1635 function
1635 function
1636 '''
1636 '''
1637 return self[node].walk(match)
1637 return self[node].walk(match)
1638
1638
1639 def status(self, node1='.', node2=None, match=None,
1639 def status(self, node1='.', node2=None, match=None,
1640 ignored=False, clean=False, unknown=False,
1640 ignored=False, clean=False, unknown=False,
1641 listsubrepos=False):
1641 listsubrepos=False):
1642 '''a convenience method that calls node1.status(node2)'''
1642 '''a convenience method that calls node1.status(node2)'''
1643 return self[node1].status(node2, match, ignored, clean, unknown,
1643 return self[node1].status(node2, match, ignored, clean, unknown,
1644 listsubrepos)
1644 listsubrepos)
1645
1645
1646 def heads(self, start=None):
1646 def heads(self, start=None):
1647 heads = self.changelog.heads(start)
1647 heads = self.changelog.heads(start)
1648 # sort the output in rev descending order
1648 # sort the output in rev descending order
1649 return sorted(heads, key=self.changelog.rev, reverse=True)
1649 return sorted(heads, key=self.changelog.rev, reverse=True)
1650
1650
1651 def branchheads(self, branch=None, start=None, closed=False):
1651 def branchheads(self, branch=None, start=None, closed=False):
1652 '''return a (possibly filtered) list of heads for the given branch
1652 '''return a (possibly filtered) list of heads for the given branch
1653
1653
1654 Heads are returned in topological order, from newest to oldest.
1654 Heads are returned in topological order, from newest to oldest.
1655 If branch is None, use the dirstate branch.
1655 If branch is None, use the dirstate branch.
1656 If start is not None, return only heads reachable from start.
1656 If start is not None, return only heads reachable from start.
1657 If closed is True, return heads that are marked as closed as well.
1657 If closed is True, return heads that are marked as closed as well.
1658 '''
1658 '''
1659 if branch is None:
1659 if branch is None:
1660 branch = self[None].branch()
1660 branch = self[None].branch()
1661 branches = self.branchmap()
1661 branches = self.branchmap()
1662 if branch not in branches:
1662 if branch not in branches:
1663 return []
1663 return []
1664 # the cache returns heads ordered lowest to highest
1664 # the cache returns heads ordered lowest to highest
1665 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1665 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1666 if start is not None:
1666 if start is not None:
1667 # filter out the heads that cannot be reached from startrev
1667 # filter out the heads that cannot be reached from startrev
1668 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1668 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1669 bheads = [h for h in bheads if h in fbheads]
1669 bheads = [h for h in bheads if h in fbheads]
1670 return bheads
1670 return bheads
1671
1671
1672 def branches(self, nodes):
1672 def branches(self, nodes):
1673 if not nodes:
1673 if not nodes:
1674 nodes = [self.changelog.tip()]
1674 nodes = [self.changelog.tip()]
1675 b = []
1675 b = []
1676 for n in nodes:
1676 for n in nodes:
1677 t = n
1677 t = n
1678 while True:
1678 while True:
1679 p = self.changelog.parents(n)
1679 p = self.changelog.parents(n)
1680 if p[1] != nullid or p[0] == nullid:
1680 if p[1] != nullid or p[0] == nullid:
1681 b.append((t, n, p[0], p[1]))
1681 b.append((t, n, p[0], p[1]))
1682 break
1682 break
1683 n = p[0]
1683 n = p[0]
1684 return b
1684 return b
1685
1685
1686 def between(self, pairs):
1686 def between(self, pairs):
1687 r = []
1687 r = []
1688
1688
1689 for top, bottom in pairs:
1689 for top, bottom in pairs:
1690 n, l, i = top, [], 0
1690 n, l, i = top, [], 0
1691 f = 1
1691 f = 1
1692
1692
1693 while n != bottom and n != nullid:
1693 while n != bottom and n != nullid:
1694 p = self.changelog.parents(n)[0]
1694 p = self.changelog.parents(n)[0]
1695 if i == f:
1695 if i == f:
1696 l.append(n)
1696 l.append(n)
1697 f = f * 2
1697 f = f * 2
1698 n = p
1698 n = p
1699 i += 1
1699 i += 1
1700
1700
1701 r.append(l)
1701 r.append(l)
1702
1702
1703 return r
1703 return r
1704
1704
1705 def checkpush(self, pushop):
1705 def checkpush(self, pushop):
1706 """Extensions can override this function if additional checks have
1706 """Extensions can override this function if additional checks have
1707 to be performed before pushing, or call it if they override push
1707 to be performed before pushing, or call it if they override push
1708 command.
1708 command.
1709 """
1709 """
1710 pass
1710 pass
1711
1711
1712 @unfilteredpropertycache
1712 @unfilteredpropertycache
1713 def prepushoutgoinghooks(self):
1713 def prepushoutgoinghooks(self):
1714 """Return util.hooks consists of "(repo, remote, outgoing)"
1714 """Return util.hooks consists of "(repo, remote, outgoing)"
1715 functions, which are called before pushing changesets.
1715 functions, which are called before pushing changesets.
1716 """
1716 """
1717 return util.hooks()
1717 return util.hooks()
1718
1718
1719 def stream_in(self, remote, requirements):
1719 def stream_in(self, remote, requirements):
1720 lock = self.lock()
1720 lock = self.lock()
1721 try:
1721 try:
1722 # Save remote branchmap. We will use it later
1722 # Save remote branchmap. We will use it later
1723 # to speed up branchcache creation
1723 # to speed up branchcache creation
1724 rbranchmap = None
1724 rbranchmap = None
1725 if remote.capable("branchmap"):
1725 if remote.capable("branchmap"):
1726 rbranchmap = remote.branchmap()
1726 rbranchmap = remote.branchmap()
1727
1727
1728 fp = remote.stream_out()
1728 fp = remote.stream_out()
1729 l = fp.readline()
1729 l = fp.readline()
1730 try:
1730 try:
1731 resp = int(l)
1731 resp = int(l)
1732 except ValueError:
1732 except ValueError:
1733 raise error.ResponseError(
1733 raise error.ResponseError(
1734 _('unexpected response from remote server:'), l)
1734 _('unexpected response from remote server:'), l)
1735 if resp == 1:
1735 if resp == 1:
1736 raise util.Abort(_('operation forbidden by server'))
1736 raise util.Abort(_('operation forbidden by server'))
1737 elif resp == 2:
1737 elif resp == 2:
1738 raise util.Abort(_('locking the remote repository failed'))
1738 raise util.Abort(_('locking the remote repository failed'))
1739 elif resp != 0:
1739 elif resp != 0:
1740 raise util.Abort(_('the server sent an unknown error code'))
1740 raise util.Abort(_('the server sent an unknown error code'))
1741 self.ui.status(_('streaming all changes\n'))
1741 self.ui.status(_('streaming all changes\n'))
1742 l = fp.readline()
1742 l = fp.readline()
1743 try:
1743 try:
1744 total_files, total_bytes = map(int, l.split(' ', 1))
1744 total_files, total_bytes = map(int, l.split(' ', 1))
1745 except (ValueError, TypeError):
1745 except (ValueError, TypeError):
1746 raise error.ResponseError(
1746 raise error.ResponseError(
1747 _('unexpected response from remote server:'), l)
1747 _('unexpected response from remote server:'), l)
1748 self.ui.status(_('%d files to transfer, %s of data\n') %
1748 self.ui.status(_('%d files to transfer, %s of data\n') %
1749 (total_files, util.bytecount(total_bytes)))
1749 (total_files, util.bytecount(total_bytes)))
1750 handled_bytes = 0
1750 handled_bytes = 0
1751 self.ui.progress(_('clone'), 0, total=total_bytes)
1751 self.ui.progress(_('clone'), 0, total=total_bytes)
1752 start = time.time()
1752 start = time.time()
1753
1753
1754 tr = self.transaction(_('clone'))
1754 tr = self.transaction(_('clone'))
1755 try:
1755 try:
1756 for i in xrange(total_files):
1756 for i in xrange(total_files):
1757 # XXX doesn't support '\n' or '\r' in filenames
1757 # XXX doesn't support '\n' or '\r' in filenames
1758 l = fp.readline()
1758 l = fp.readline()
1759 try:
1759 try:
1760 name, size = l.split('\0', 1)
1760 name, size = l.split('\0', 1)
1761 size = int(size)
1761 size = int(size)
1762 except (ValueError, TypeError):
1762 except (ValueError, TypeError):
1763 raise error.ResponseError(
1763 raise error.ResponseError(
1764 _('unexpected response from remote server:'), l)
1764 _('unexpected response from remote server:'), l)
1765 if self.ui.debugflag:
1765 if self.ui.debugflag:
1766 self.ui.debug('adding %s (%s)\n' %
1766 self.ui.debug('adding %s (%s)\n' %
1767 (name, util.bytecount(size)))
1767 (name, util.bytecount(size)))
1768 # for backwards compat, name was partially encoded
1768 # for backwards compat, name was partially encoded
1769 ofp = self.svfs(store.decodedir(name), 'w')
1769 ofp = self.svfs(store.decodedir(name), 'w')
1770 for chunk in util.filechunkiter(fp, limit=size):
1770 for chunk in util.filechunkiter(fp, limit=size):
1771 handled_bytes += len(chunk)
1771 handled_bytes += len(chunk)
1772 self.ui.progress(_('clone'), handled_bytes,
1772 self.ui.progress(_('clone'), handled_bytes,
1773 total=total_bytes)
1773 total=total_bytes)
1774 ofp.write(chunk)
1774 ofp.write(chunk)
1775 ofp.close()
1775 ofp.close()
1776 tr.close()
1776 tr.close()
1777 finally:
1777 finally:
1778 tr.release()
1778 tr.release()
1779
1779
1780 # Writing straight to files circumvented the inmemory caches
1780 # Writing straight to files circumvented the inmemory caches
1781 self.invalidate()
1781 self.invalidate()
1782
1782
1783 elapsed = time.time() - start
1783 elapsed = time.time() - start
1784 if elapsed <= 0:
1784 if elapsed <= 0:
1785 elapsed = 0.001
1785 elapsed = 0.001
1786 self.ui.progress(_('clone'), None)
1786 self.ui.progress(_('clone'), None)
1787 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1787 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1788 (util.bytecount(total_bytes), elapsed,
1788 (util.bytecount(total_bytes), elapsed,
1789 util.bytecount(total_bytes / elapsed)))
1789 util.bytecount(total_bytes / elapsed)))
1790
1790
1791 # new requirements = old non-format requirements +
1791 # new requirements = old non-format requirements +
1792 # new format-related
1792 # new format-related
1793 # requirements from the streamed-in repository
1793 # requirements from the streamed-in repository
1794 requirements.update(set(self.requirements) - self.supportedformats)
1794 requirements.update(set(self.requirements) - self.supportedformats)
1795 self._applyrequirements(requirements)
1795 self._applyrequirements(requirements)
1796 self._writerequirements()
1796 self._writerequirements()
1797
1797
1798 if rbranchmap:
1798 if rbranchmap:
1799 rbheads = []
1799 rbheads = []
1800 closed = []
1800 closed = []
1801 for bheads in rbranchmap.itervalues():
1801 for bheads in rbranchmap.itervalues():
1802 rbheads.extend(bheads)
1802 rbheads.extend(bheads)
1803 for h in bheads:
1803 for h in bheads:
1804 r = self.changelog.rev(h)
1804 r = self.changelog.rev(h)
1805 b, c = self.changelog.branchinfo(r)
1805 b, c = self.changelog.branchinfo(r)
1806 if c:
1806 if c:
1807 closed.append(h)
1807 closed.append(h)
1808
1808
1809 if rbheads:
1809 if rbheads:
1810 rtiprev = max((int(self.changelog.rev(node))
1810 rtiprev = max((int(self.changelog.rev(node))
1811 for node in rbheads))
1811 for node in rbheads))
1812 cache = branchmap.branchcache(rbranchmap,
1812 cache = branchmap.branchcache(rbranchmap,
1813 self[rtiprev].node(),
1813 self[rtiprev].node(),
1814 rtiprev,
1814 rtiprev,
1815 closednodes=closed)
1815 closednodes=closed)
1816 # Try to stick it as low as possible
1816 # Try to stick it as low as possible
1817 # filter above served are unlikely to be fetch from a clone
1817 # filter above served are unlikely to be fetch from a clone
1818 for candidate in ('base', 'immutable', 'served'):
1818 for candidate in ('base', 'immutable', 'served'):
1819 rview = self.filtered(candidate)
1819 rview = self.filtered(candidate)
1820 if cache.validfor(rview):
1820 if cache.validfor(rview):
1821 self._branchcaches[candidate] = cache
1821 self._branchcaches[candidate] = cache
1822 cache.write(rview)
1822 cache.write(rview)
1823 break
1823 break
1824 self.invalidate()
1824 self.invalidate()
1825 return len(self.heads()) + 1
1825 return len(self.heads()) + 1
1826 finally:
1826 finally:
1827 lock.release()
1827 lock.release()
1828
1828
1829 def clone(self, remote, heads=[], stream=None):
1829 def clone(self, remote, heads=[], stream=None):
1830 '''clone remote repository.
1830 '''clone remote repository.
1831
1831
1832 keyword arguments:
1832 keyword arguments:
1833 heads: list of revs to clone (forces use of pull)
1833 heads: list of revs to clone (forces use of pull)
1834 stream: use streaming clone if possible'''
1834 stream: use streaming clone if possible'''
1835
1835
1836 # now, all clients that can request uncompressed clones can
1836 # now, all clients that can request uncompressed clones can
1837 # read repo formats supported by all servers that can serve
1837 # read repo formats supported by all servers that can serve
1838 # them.
1838 # them.
1839
1839
1840 # if revlog format changes, client will have to check version
1840 # if revlog format changes, client will have to check version
1841 # and format flags on "stream" capability, and use
1841 # and format flags on "stream" capability, and use
1842 # uncompressed only if compatible.
1842 # uncompressed only if compatible.
1843
1843
1844 if stream is None:
1844 if stream is None:
1845 # if the server explicitly prefers to stream (for fast LANs)
1845 # if the server explicitly prefers to stream (for fast LANs)
1846 stream = remote.capable('stream-preferred')
1846 stream = remote.capable('stream-preferred')
1847
1847
1848 if stream and not heads:
1848 if stream and not heads:
1849 # 'stream' means remote revlog format is revlogv1 only
1849 # 'stream' means remote revlog format is revlogv1 only
1850 if remote.capable('stream'):
1850 if remote.capable('stream'):
1851 self.stream_in(remote, set(('revlogv1',)))
1851 self.stream_in(remote, set(('revlogv1',)))
1852 else:
1852 else:
1853 # otherwise, 'streamreqs' contains the remote revlog format
1853 # otherwise, 'streamreqs' contains the remote revlog format
1854 streamreqs = remote.capable('streamreqs')
1854 streamreqs = remote.capable('streamreqs')
1855 if streamreqs:
1855 if streamreqs:
1856 streamreqs = set(streamreqs.split(','))
1856 streamreqs = set(streamreqs.split(','))
1857 # if we support it, stream in and adjust our requirements
1857 # if we support it, stream in and adjust our requirements
1858 if not streamreqs - self.supportedformats:
1858 if not streamreqs - self.supportedformats:
1859 self.stream_in(remote, streamreqs)
1859 self.stream_in(remote, streamreqs)
1860
1860
1861 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1861 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1862 try:
1862 try:
1863 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1863 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1864 ret = exchange.pull(self, remote, heads).cgresult
1864 ret = exchange.pull(self, remote, heads).cgresult
1865 finally:
1865 finally:
1866 self.ui.restoreconfig(quiet)
1866 self.ui.restoreconfig(quiet)
1867 return ret
1867 return ret
1868
1868
1869 def pushkey(self, namespace, key, old, new):
1869 def pushkey(self, namespace, key, old, new):
1870 try:
1870 try:
1871 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1871 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1872 old=old, new=new)
1872 old=old, new=new)
1873 except error.HookAbort, exc:
1873 except error.HookAbort, exc:
1874 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1874 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1875 if exc.hint:
1875 if exc.hint:
1876 self.ui.write_err(_("(%s)\n") % exc.hint)
1876 self.ui.write_err(_("(%s)\n") % exc.hint)
1877 return False
1877 return False
1878 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1878 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1879 ret = pushkey.push(self, namespace, key, old, new)
1879 ret = pushkey.push(self, namespace, key, old, new)
1880 def runhook():
1880 def runhook():
1881 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1881 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1882 ret=ret)
1882 ret=ret)
1883 self._afterlock(runhook)
1883 self._afterlock(runhook)
1884 return ret
1884 return ret
1885
1885
1886 def listkeys(self, namespace):
1886 def listkeys(self, namespace):
1887 self.hook('prelistkeys', throw=True, namespace=namespace)
1887 self.hook('prelistkeys', throw=True, namespace=namespace)
1888 self.ui.debug('listing keys for "%s"\n' % namespace)
1888 self.ui.debug('listing keys for "%s"\n' % namespace)
1889 values = pushkey.list(self, namespace)
1889 values = pushkey.list(self, namespace)
1890 self.hook('listkeys', namespace=namespace, values=values)
1890 self.hook('listkeys', namespace=namespace, values=values)
1891 return values
1891 return values
1892
1892
1893 def debugwireargs(self, one, two, three=None, four=None, five=None):
1893 def debugwireargs(self, one, two, three=None, four=None, five=None):
1894 '''used to test argument passing over the wire'''
1894 '''used to test argument passing over the wire'''
1895 return "%s %s %s %s %s" % (one, two, three, four, five)
1895 return "%s %s %s %s %s" % (one, two, three, four, five)
1896
1896
1897 def savecommitmessage(self, text):
1897 def savecommitmessage(self, text):
1898 fp = self.vfs('last-message.txt', 'wb')
1898 fp = self.vfs('last-message.txt', 'wb')
1899 try:
1899 try:
1900 fp.write(text)
1900 fp.write(text)
1901 finally:
1901 finally:
1902 fp.close()
1902 fp.close()
1903 return self.pathto(fp.name[len(self.root) + 1:])
1903 return self.pathto(fp.name[len(self.root) + 1:])
1904
1904
1905 # used to avoid circular references so destructors work
1905 # used to avoid circular references so destructors work
1906 def aftertrans(files):
1906 def aftertrans(files):
1907 renamefiles = [tuple(t) for t in files]
1907 renamefiles = [tuple(t) for t in files]
1908 def a():
1908 def a():
1909 for vfs, src, dest in renamefiles:
1909 for vfs, src, dest in renamefiles:
1910 try:
1910 try:
1911 vfs.rename(src, dest)
1911 vfs.rename(src, dest)
1912 except OSError: # journal file does not yet exist
1912 except OSError: # journal file does not yet exist
1913 pass
1913 pass
1914 return a
1914 return a
1915
1915
1916 def undoname(fn):
1916 def undoname(fn):
1917 base, name = os.path.split(fn)
1917 base, name = os.path.split(fn)
1918 assert name.startswith('journal')
1918 assert name.startswith('journal')
1919 return os.path.join(base, name.replace('journal', 'undo', 1))
1919 return os.path.join(base, name.replace('journal', 'undo', 1))
1920
1920
1921 def instance(ui, path, create):
1921 def instance(ui, path, create):
1922 return localrepository(ui, util.urllocalpath(path), create)
1922 return localrepository(ui, util.urllocalpath(path), create)
1923
1923
1924 def islocal(path):
1924 def islocal(path):
1925 return True
1925 return True
General Comments 0
You need to be logged in to leave comments. Login now