##// END OF EJS Templates
localrepo: remove unneeded unpacking of r...
Sean Farley -
r21479:e18ef2e1 default
parent child Browse files
Show More
@@ -1,1851 +1,1851 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10', **kwargs):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps, **kwargs)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except error.PushRaced, exc:
136 except error.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), str(exc))
137 raise error.ResponseError(_('push failed:'), str(exc))
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 bundle2caps = {'HG2X': ()}
183 bundle2caps = {'HG2X': ()}
184
184
185 # a list of (ui, featureset) functions.
185 # a list of (ui, featureset) functions.
186 # only functions defined in module of enabled extensions are invoked
186 # only functions defined in module of enabled extensions are invoked
187 featuresetupfuncs = set()
187 featuresetupfuncs = set()
188
188
189 def _baserequirements(self, create):
189 def _baserequirements(self, create):
190 return self.requirements[:]
190 return self.requirements[:]
191
191
192 def __init__(self, baseui, path=None, create=False):
192 def __init__(self, baseui, path=None, create=False):
193 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
194 self.wopener = self.wvfs
194 self.wopener = self.wvfs
195 self.root = self.wvfs.base
195 self.root = self.wvfs.base
196 self.path = self.wvfs.join(".hg")
196 self.path = self.wvfs.join(".hg")
197 self.origroot = path
197 self.origroot = path
198 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 self.auditor = pathutil.pathauditor(self.root, self._checknested)
199 self.vfs = scmutil.vfs(self.path)
199 self.vfs = scmutil.vfs(self.path)
200 self.opener = self.vfs
200 self.opener = self.vfs
201 self.baseui = baseui
201 self.baseui = baseui
202 self.ui = baseui.copy()
202 self.ui = baseui.copy()
203 self.ui.copy = baseui.copy # prevent copying repo configuration
203 self.ui.copy = baseui.copy # prevent copying repo configuration
204 # A list of callback to shape the phase if no data were found.
204 # A list of callback to shape the phase if no data were found.
205 # Callback are in the form: func(repo, roots) --> processed root.
205 # Callback are in the form: func(repo, roots) --> processed root.
206 # This list it to be filled by extension during repo setup
206 # This list it to be filled by extension during repo setup
207 self._phasedefaults = []
207 self._phasedefaults = []
208 try:
208 try:
209 self.ui.readconfig(self.join("hgrc"), self.root)
209 self.ui.readconfig(self.join("hgrc"), self.root)
210 extensions.loadall(self.ui)
210 extensions.loadall(self.ui)
211 except IOError:
211 except IOError:
212 pass
212 pass
213
213
214 if self.featuresetupfuncs:
214 if self.featuresetupfuncs:
215 self.supported = set(self._basesupported) # use private copy
215 self.supported = set(self._basesupported) # use private copy
216 extmods = set(m.__name__ for n, m
216 extmods = set(m.__name__ for n, m
217 in extensions.extensions(self.ui))
217 in extensions.extensions(self.ui))
218 for setupfunc in self.featuresetupfuncs:
218 for setupfunc in self.featuresetupfuncs:
219 if setupfunc.__module__ in extmods:
219 if setupfunc.__module__ in extmods:
220 setupfunc(self.ui, self.supported)
220 setupfunc(self.ui, self.supported)
221 else:
221 else:
222 self.supported = self._basesupported
222 self.supported = self._basesupported
223
223
224 if not self.vfs.isdir():
224 if not self.vfs.isdir():
225 if create:
225 if create:
226 if not self.wvfs.exists():
226 if not self.wvfs.exists():
227 self.wvfs.makedirs()
227 self.wvfs.makedirs()
228 self.vfs.makedir(notindexed=True)
228 self.vfs.makedir(notindexed=True)
229 requirements = self._baserequirements(create)
229 requirements = self._baserequirements(create)
230 if self.ui.configbool('format', 'usestore', True):
230 if self.ui.configbool('format', 'usestore', True):
231 self.vfs.mkdir("store")
231 self.vfs.mkdir("store")
232 requirements.append("store")
232 requirements.append("store")
233 if self.ui.configbool('format', 'usefncache', True):
233 if self.ui.configbool('format', 'usefncache', True):
234 requirements.append("fncache")
234 requirements.append("fncache")
235 if self.ui.configbool('format', 'dotencode', True):
235 if self.ui.configbool('format', 'dotencode', True):
236 requirements.append('dotencode')
236 requirements.append('dotencode')
237 # create an invalid changelog
237 # create an invalid changelog
238 self.vfs.append(
238 self.vfs.append(
239 "00changelog.i",
239 "00changelog.i",
240 '\0\0\0\2' # represents revlogv2
240 '\0\0\0\2' # represents revlogv2
241 ' dummy changelog to prevent using the old repo layout'
241 ' dummy changelog to prevent using the old repo layout'
242 )
242 )
243 if self.ui.configbool('format', 'generaldelta', False):
243 if self.ui.configbool('format', 'generaldelta', False):
244 requirements.append("generaldelta")
244 requirements.append("generaldelta")
245 requirements = set(requirements)
245 requirements = set(requirements)
246 else:
246 else:
247 raise error.RepoError(_("repository %s not found") % path)
247 raise error.RepoError(_("repository %s not found") % path)
248 elif create:
248 elif create:
249 raise error.RepoError(_("repository %s already exists") % path)
249 raise error.RepoError(_("repository %s already exists") % path)
250 else:
250 else:
251 try:
251 try:
252 requirements = scmutil.readrequires(self.vfs, self.supported)
252 requirements = scmutil.readrequires(self.vfs, self.supported)
253 except IOError, inst:
253 except IOError, inst:
254 if inst.errno != errno.ENOENT:
254 if inst.errno != errno.ENOENT:
255 raise
255 raise
256 requirements = set()
256 requirements = set()
257
257
258 self.sharedpath = self.path
258 self.sharedpath = self.path
259 try:
259 try:
260 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
261 realpath=True)
261 realpath=True)
262 s = vfs.base
262 s = vfs.base
263 if not vfs.exists():
263 if not vfs.exists():
264 raise error.RepoError(
264 raise error.RepoError(
265 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 _('.hg/sharedpath points to nonexistent directory %s') % s)
266 self.sharedpath = s
266 self.sharedpath = s
267 except IOError, inst:
267 except IOError, inst:
268 if inst.errno != errno.ENOENT:
268 if inst.errno != errno.ENOENT:
269 raise
269 raise
270
270
271 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
272 self.spath = self.store.path
272 self.spath = self.store.path
273 self.svfs = self.store.vfs
273 self.svfs = self.store.vfs
274 self.sopener = self.svfs
274 self.sopener = self.svfs
275 self.sjoin = self.store.join
275 self.sjoin = self.store.join
276 self.vfs.createmode = self.store.createmode
276 self.vfs.createmode = self.store.createmode
277 self._applyrequirements(requirements)
277 self._applyrequirements(requirements)
278 if create:
278 if create:
279 self._writerequirements()
279 self._writerequirements()
280
280
281
281
282 self._branchcaches = {}
282 self._branchcaches = {}
283 self.filterpats = {}
283 self.filterpats = {}
284 self._datafilters = {}
284 self._datafilters = {}
285 self._transref = self._lockref = self._wlockref = None
285 self._transref = self._lockref = self._wlockref = None
286
286
287 # A cache for various files under .hg/ that tracks file changes,
287 # A cache for various files under .hg/ that tracks file changes,
288 # (used by the filecache decorator)
288 # (used by the filecache decorator)
289 #
289 #
290 # Maps a property name to its util.filecacheentry
290 # Maps a property name to its util.filecacheentry
291 self._filecache = {}
291 self._filecache = {}
292
292
293 # hold sets of revision to be filtered
293 # hold sets of revision to be filtered
294 # should be cleared when something might have changed the filter value:
294 # should be cleared when something might have changed the filter value:
295 # - new changesets,
295 # - new changesets,
296 # - phase change,
296 # - phase change,
297 # - new obsolescence marker,
297 # - new obsolescence marker,
298 # - working directory parent change,
298 # - working directory parent change,
299 # - bookmark changes
299 # - bookmark changes
300 self.filteredrevcache = {}
300 self.filteredrevcache = {}
301
301
302 def close(self):
302 def close(self):
303 pass
303 pass
304
304
305 def _restrictcapabilities(self, caps):
305 def _restrictcapabilities(self, caps):
306 # bundle2 is not ready for prime time, drop it unless explicitly
306 # bundle2 is not ready for prime time, drop it unless explicitly
307 # required by the tests (or some brave tester)
307 # required by the tests (or some brave tester)
308 if self.ui.configbool('experimental', 'bundle2-exp', False):
308 if self.ui.configbool('experimental', 'bundle2-exp', False):
309 caps = set(caps)
309 caps = set(caps)
310 capsblob = bundle2.encodecaps(self.bundle2caps)
310 capsblob = bundle2.encodecaps(self.bundle2caps)
311 caps.add('bundle2-exp=' + urllib.quote(capsblob))
311 caps.add('bundle2-exp=' + urllib.quote(capsblob))
312 return caps
312 return caps
313
313
314 def _applyrequirements(self, requirements):
314 def _applyrequirements(self, requirements):
315 self.requirements = requirements
315 self.requirements = requirements
316 self.sopener.options = dict((r, 1) for r in requirements
316 self.sopener.options = dict((r, 1) for r in requirements
317 if r in self.openerreqs)
317 if r in self.openerreqs)
318 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
318 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
319 if chunkcachesize is not None:
319 if chunkcachesize is not None:
320 self.sopener.options['chunkcachesize'] = chunkcachesize
320 self.sopener.options['chunkcachesize'] = chunkcachesize
321
321
322 def _writerequirements(self):
322 def _writerequirements(self):
323 reqfile = self.opener("requires", "w")
323 reqfile = self.opener("requires", "w")
324 for r in sorted(self.requirements):
324 for r in sorted(self.requirements):
325 reqfile.write("%s\n" % r)
325 reqfile.write("%s\n" % r)
326 reqfile.close()
326 reqfile.close()
327
327
328 def _checknested(self, path):
328 def _checknested(self, path):
329 """Determine if path is a legal nested repository."""
329 """Determine if path is a legal nested repository."""
330 if not path.startswith(self.root):
330 if not path.startswith(self.root):
331 return False
331 return False
332 subpath = path[len(self.root) + 1:]
332 subpath = path[len(self.root) + 1:]
333 normsubpath = util.pconvert(subpath)
333 normsubpath = util.pconvert(subpath)
334
334
335 # XXX: Checking against the current working copy is wrong in
335 # XXX: Checking against the current working copy is wrong in
336 # the sense that it can reject things like
336 # the sense that it can reject things like
337 #
337 #
338 # $ hg cat -r 10 sub/x.txt
338 # $ hg cat -r 10 sub/x.txt
339 #
339 #
340 # if sub/ is no longer a subrepository in the working copy
340 # if sub/ is no longer a subrepository in the working copy
341 # parent revision.
341 # parent revision.
342 #
342 #
343 # However, it can of course also allow things that would have
343 # However, it can of course also allow things that would have
344 # been rejected before, such as the above cat command if sub/
344 # been rejected before, such as the above cat command if sub/
345 # is a subrepository now, but was a normal directory before.
345 # is a subrepository now, but was a normal directory before.
346 # The old path auditor would have rejected by mistake since it
346 # The old path auditor would have rejected by mistake since it
347 # panics when it sees sub/.hg/.
347 # panics when it sees sub/.hg/.
348 #
348 #
349 # All in all, checking against the working copy seems sensible
349 # All in all, checking against the working copy seems sensible
350 # since we want to prevent access to nested repositories on
350 # since we want to prevent access to nested repositories on
351 # the filesystem *now*.
351 # the filesystem *now*.
352 ctx = self[None]
352 ctx = self[None]
353 parts = util.splitpath(subpath)
353 parts = util.splitpath(subpath)
354 while parts:
354 while parts:
355 prefix = '/'.join(parts)
355 prefix = '/'.join(parts)
356 if prefix in ctx.substate:
356 if prefix in ctx.substate:
357 if prefix == normsubpath:
357 if prefix == normsubpath:
358 return True
358 return True
359 else:
359 else:
360 sub = ctx.sub(prefix)
360 sub = ctx.sub(prefix)
361 return sub.checknested(subpath[len(prefix) + 1:])
361 return sub.checknested(subpath[len(prefix) + 1:])
362 else:
362 else:
363 parts.pop()
363 parts.pop()
364 return False
364 return False
365
365
366 def peer(self):
366 def peer(self):
367 return localpeer(self) # not cached to avoid reference cycle
367 return localpeer(self) # not cached to avoid reference cycle
368
368
369 def unfiltered(self):
369 def unfiltered(self):
370 """Return unfiltered version of the repository
370 """Return unfiltered version of the repository
371
371
372 Intended to be overwritten by filtered repo."""
372 Intended to be overwritten by filtered repo."""
373 return self
373 return self
374
374
375 def filtered(self, name):
375 def filtered(self, name):
376 """Return a filtered version of a repository"""
376 """Return a filtered version of a repository"""
377 # build a new class with the mixin and the current class
377 # build a new class with the mixin and the current class
378 # (possibly subclass of the repo)
378 # (possibly subclass of the repo)
379 class proxycls(repoview.repoview, self.unfiltered().__class__):
379 class proxycls(repoview.repoview, self.unfiltered().__class__):
380 pass
380 pass
381 return proxycls(self, name)
381 return proxycls(self, name)
382
382
383 @repofilecache('bookmarks')
383 @repofilecache('bookmarks')
384 def _bookmarks(self):
384 def _bookmarks(self):
385 return bookmarks.bmstore(self)
385 return bookmarks.bmstore(self)
386
386
387 @repofilecache('bookmarks.current')
387 @repofilecache('bookmarks.current')
388 def _bookmarkcurrent(self):
388 def _bookmarkcurrent(self):
389 return bookmarks.readcurrent(self)
389 return bookmarks.readcurrent(self)
390
390
391 def bookmarkheads(self, bookmark):
391 def bookmarkheads(self, bookmark):
392 name = bookmark.split('@', 1)[0]
392 name = bookmark.split('@', 1)[0]
393 heads = []
393 heads = []
394 for mark, n in self._bookmarks.iteritems():
394 for mark, n in self._bookmarks.iteritems():
395 if mark.split('@', 1)[0] == name:
395 if mark.split('@', 1)[0] == name:
396 heads.append(n)
396 heads.append(n)
397 return heads
397 return heads
398
398
399 @storecache('phaseroots')
399 @storecache('phaseroots')
400 def _phasecache(self):
400 def _phasecache(self):
401 return phases.phasecache(self, self._phasedefaults)
401 return phases.phasecache(self, self._phasedefaults)
402
402
403 @storecache('obsstore')
403 @storecache('obsstore')
404 def obsstore(self):
404 def obsstore(self):
405 store = obsolete.obsstore(self.sopener)
405 store = obsolete.obsstore(self.sopener)
406 if store and not obsolete._enabled:
406 if store and not obsolete._enabled:
407 # message is rare enough to not be translated
407 # message is rare enough to not be translated
408 msg = 'obsolete feature not enabled but %i markers found!\n'
408 msg = 'obsolete feature not enabled but %i markers found!\n'
409 self.ui.warn(msg % len(list(store)))
409 self.ui.warn(msg % len(list(store)))
410 return store
410 return store
411
411
412 @storecache('00changelog.i')
412 @storecache('00changelog.i')
413 def changelog(self):
413 def changelog(self):
414 c = changelog.changelog(self.sopener)
414 c = changelog.changelog(self.sopener)
415 if 'HG_PENDING' in os.environ:
415 if 'HG_PENDING' in os.environ:
416 p = os.environ['HG_PENDING']
416 p = os.environ['HG_PENDING']
417 if p.startswith(self.root):
417 if p.startswith(self.root):
418 c.readpending('00changelog.i.a')
418 c.readpending('00changelog.i.a')
419 return c
419 return c
420
420
421 @storecache('00manifest.i')
421 @storecache('00manifest.i')
422 def manifest(self):
422 def manifest(self):
423 return manifest.manifest(self.sopener)
423 return manifest.manifest(self.sopener)
424
424
425 @repofilecache('dirstate')
425 @repofilecache('dirstate')
426 def dirstate(self):
426 def dirstate(self):
427 warned = [0]
427 warned = [0]
428 def validate(node):
428 def validate(node):
429 try:
429 try:
430 self.changelog.rev(node)
430 self.changelog.rev(node)
431 return node
431 return node
432 except error.LookupError:
432 except error.LookupError:
433 if not warned[0]:
433 if not warned[0]:
434 warned[0] = True
434 warned[0] = True
435 self.ui.warn(_("warning: ignoring unknown"
435 self.ui.warn(_("warning: ignoring unknown"
436 " working parent %s!\n") % short(node))
436 " working parent %s!\n") % short(node))
437 return nullid
437 return nullid
438
438
439 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
439 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
440
440
441 def __getitem__(self, changeid):
441 def __getitem__(self, changeid):
442 if changeid is None:
442 if changeid is None:
443 return context.workingctx(self)
443 return context.workingctx(self)
444 return context.changectx(self, changeid)
444 return context.changectx(self, changeid)
445
445
446 def __contains__(self, changeid):
446 def __contains__(self, changeid):
447 try:
447 try:
448 return bool(self.lookup(changeid))
448 return bool(self.lookup(changeid))
449 except error.RepoLookupError:
449 except error.RepoLookupError:
450 return False
450 return False
451
451
452 def __nonzero__(self):
452 def __nonzero__(self):
453 return True
453 return True
454
454
455 def __len__(self):
455 def __len__(self):
456 return len(self.changelog)
456 return len(self.changelog)
457
457
458 def __iter__(self):
458 def __iter__(self):
459 return iter(self.changelog)
459 return iter(self.changelog)
460
460
461 def revs(self, expr, *args):
461 def revs(self, expr, *args):
462 '''Return a list of revisions matching the given revset'''
462 '''Return a list of revisions matching the given revset'''
463 expr = revset.formatspec(expr, *args)
463 expr = revset.formatspec(expr, *args)
464 m = revset.match(None, expr)
464 m = revset.match(None, expr)
465 return m(self, revset.spanset(self))
465 return m(self, revset.spanset(self))
466
466
467 def set(self, expr, *args):
467 def set(self, expr, *args):
468 '''
468 '''
469 Yield a context for each matching revision, after doing arg
469 Yield a context for each matching revision, after doing arg
470 replacement via revset.formatspec
470 replacement via revset.formatspec
471 '''
471 '''
472 for r in self.revs(expr, *args):
472 for r in self.revs(expr, *args):
473 yield self[r]
473 yield self[r]
474
474
475 def url(self):
475 def url(self):
476 return 'file:' + self.root
476 return 'file:' + self.root
477
477
478 def hook(self, name, throw=False, **args):
478 def hook(self, name, throw=False, **args):
479 return hook.hook(self.ui, self, name, throw, **args)
479 return hook.hook(self.ui, self, name, throw, **args)
480
480
481 @unfilteredmethod
481 @unfilteredmethod
482 def _tag(self, names, node, message, local, user, date, extra={},
482 def _tag(self, names, node, message, local, user, date, extra={},
483 editor=False):
483 editor=False):
484 if isinstance(names, str):
484 if isinstance(names, str):
485 names = (names,)
485 names = (names,)
486
486
487 branches = self.branchmap()
487 branches = self.branchmap()
488 for name in names:
488 for name in names:
489 self.hook('pretag', throw=True, node=hex(node), tag=name,
489 self.hook('pretag', throw=True, node=hex(node), tag=name,
490 local=local)
490 local=local)
491 if name in branches:
491 if name in branches:
492 self.ui.warn(_("warning: tag %s conflicts with existing"
492 self.ui.warn(_("warning: tag %s conflicts with existing"
493 " branch name\n") % name)
493 " branch name\n") % name)
494
494
495 def writetags(fp, names, munge, prevtags):
495 def writetags(fp, names, munge, prevtags):
496 fp.seek(0, 2)
496 fp.seek(0, 2)
497 if prevtags and prevtags[-1] != '\n':
497 if prevtags and prevtags[-1] != '\n':
498 fp.write('\n')
498 fp.write('\n')
499 for name in names:
499 for name in names:
500 m = munge and munge(name) or name
500 m = munge and munge(name) or name
501 if (self._tagscache.tagtypes and
501 if (self._tagscache.tagtypes and
502 name in self._tagscache.tagtypes):
502 name in self._tagscache.tagtypes):
503 old = self.tags().get(name, nullid)
503 old = self.tags().get(name, nullid)
504 fp.write('%s %s\n' % (hex(old), m))
504 fp.write('%s %s\n' % (hex(old), m))
505 fp.write('%s %s\n' % (hex(node), m))
505 fp.write('%s %s\n' % (hex(node), m))
506 fp.close()
506 fp.close()
507
507
508 prevtags = ''
508 prevtags = ''
509 if local:
509 if local:
510 try:
510 try:
511 fp = self.opener('localtags', 'r+')
511 fp = self.opener('localtags', 'r+')
512 except IOError:
512 except IOError:
513 fp = self.opener('localtags', 'a')
513 fp = self.opener('localtags', 'a')
514 else:
514 else:
515 prevtags = fp.read()
515 prevtags = fp.read()
516
516
517 # local tags are stored in the current charset
517 # local tags are stored in the current charset
518 writetags(fp, names, None, prevtags)
518 writetags(fp, names, None, prevtags)
519 for name in names:
519 for name in names:
520 self.hook('tag', node=hex(node), tag=name, local=local)
520 self.hook('tag', node=hex(node), tag=name, local=local)
521 return
521 return
522
522
523 try:
523 try:
524 fp = self.wfile('.hgtags', 'rb+')
524 fp = self.wfile('.hgtags', 'rb+')
525 except IOError, e:
525 except IOError, e:
526 if e.errno != errno.ENOENT:
526 if e.errno != errno.ENOENT:
527 raise
527 raise
528 fp = self.wfile('.hgtags', 'ab')
528 fp = self.wfile('.hgtags', 'ab')
529 else:
529 else:
530 prevtags = fp.read()
530 prevtags = fp.read()
531
531
532 # committed tags are stored in UTF-8
532 # committed tags are stored in UTF-8
533 writetags(fp, names, encoding.fromlocal, prevtags)
533 writetags(fp, names, encoding.fromlocal, prevtags)
534
534
535 fp.close()
535 fp.close()
536
536
537 self.invalidatecaches()
537 self.invalidatecaches()
538
538
539 if '.hgtags' not in self.dirstate:
539 if '.hgtags' not in self.dirstate:
540 self[None].add(['.hgtags'])
540 self[None].add(['.hgtags'])
541
541
542 m = matchmod.exact(self.root, '', ['.hgtags'])
542 m = matchmod.exact(self.root, '', ['.hgtags'])
543 tagnode = self.commit(message, user, date, extra=extra, match=m,
543 tagnode = self.commit(message, user, date, extra=extra, match=m,
544 editor=editor)
544 editor=editor)
545
545
546 for name in names:
546 for name in names:
547 self.hook('tag', node=hex(node), tag=name, local=local)
547 self.hook('tag', node=hex(node), tag=name, local=local)
548
548
549 return tagnode
549 return tagnode
550
550
551 def tag(self, names, node, message, local, user, date, editor=False):
551 def tag(self, names, node, message, local, user, date, editor=False):
552 '''tag a revision with one or more symbolic names.
552 '''tag a revision with one or more symbolic names.
553
553
554 names is a list of strings or, when adding a single tag, names may be a
554 names is a list of strings or, when adding a single tag, names may be a
555 string.
555 string.
556
556
557 if local is True, the tags are stored in a per-repository file.
557 if local is True, the tags are stored in a per-repository file.
558 otherwise, they are stored in the .hgtags file, and a new
558 otherwise, they are stored in the .hgtags file, and a new
559 changeset is committed with the change.
559 changeset is committed with the change.
560
560
561 keyword arguments:
561 keyword arguments:
562
562
563 local: whether to store tags in non-version-controlled file
563 local: whether to store tags in non-version-controlled file
564 (default False)
564 (default False)
565
565
566 message: commit message to use if committing
566 message: commit message to use if committing
567
567
568 user: name of user to use if committing
568 user: name of user to use if committing
569
569
570 date: date tuple to use if committing'''
570 date: date tuple to use if committing'''
571
571
572 if not local:
572 if not local:
573 for x in self.status()[:5]:
573 for x in self.status()[:5]:
574 if '.hgtags' in x:
574 if '.hgtags' in x:
575 raise util.Abort(_('working copy of .hgtags is changed '
575 raise util.Abort(_('working copy of .hgtags is changed '
576 '(please commit .hgtags manually)'))
576 '(please commit .hgtags manually)'))
577
577
578 self.tags() # instantiate the cache
578 self.tags() # instantiate the cache
579 self._tag(names, node, message, local, user, date, editor=editor)
579 self._tag(names, node, message, local, user, date, editor=editor)
580
580
581 @filteredpropertycache
581 @filteredpropertycache
582 def _tagscache(self):
582 def _tagscache(self):
583 '''Returns a tagscache object that contains various tags related
583 '''Returns a tagscache object that contains various tags related
584 caches.'''
584 caches.'''
585
585
586 # This simplifies its cache management by having one decorated
586 # This simplifies its cache management by having one decorated
587 # function (this one) and the rest simply fetch things from it.
587 # function (this one) and the rest simply fetch things from it.
588 class tagscache(object):
588 class tagscache(object):
589 def __init__(self):
589 def __init__(self):
590 # These two define the set of tags for this repository. tags
590 # These two define the set of tags for this repository. tags
591 # maps tag name to node; tagtypes maps tag name to 'global' or
591 # maps tag name to node; tagtypes maps tag name to 'global' or
592 # 'local'. (Global tags are defined by .hgtags across all
592 # 'local'. (Global tags are defined by .hgtags across all
593 # heads, and local tags are defined in .hg/localtags.)
593 # heads, and local tags are defined in .hg/localtags.)
594 # They constitute the in-memory cache of tags.
594 # They constitute the in-memory cache of tags.
595 self.tags = self.tagtypes = None
595 self.tags = self.tagtypes = None
596
596
597 self.nodetagscache = self.tagslist = None
597 self.nodetagscache = self.tagslist = None
598
598
599 cache = tagscache()
599 cache = tagscache()
600 cache.tags, cache.tagtypes = self._findtags()
600 cache.tags, cache.tagtypes = self._findtags()
601
601
602 return cache
602 return cache
603
603
604 def tags(self):
604 def tags(self):
605 '''return a mapping of tag to node'''
605 '''return a mapping of tag to node'''
606 t = {}
606 t = {}
607 if self.changelog.filteredrevs:
607 if self.changelog.filteredrevs:
608 tags, tt = self._findtags()
608 tags, tt = self._findtags()
609 else:
609 else:
610 tags = self._tagscache.tags
610 tags = self._tagscache.tags
611 for k, v in tags.iteritems():
611 for k, v in tags.iteritems():
612 try:
612 try:
613 # ignore tags to unknown nodes
613 # ignore tags to unknown nodes
614 self.changelog.rev(v)
614 self.changelog.rev(v)
615 t[k] = v
615 t[k] = v
616 except (error.LookupError, ValueError):
616 except (error.LookupError, ValueError):
617 pass
617 pass
618 return t
618 return t
619
619
620 def _findtags(self):
620 def _findtags(self):
621 '''Do the hard work of finding tags. Return a pair of dicts
621 '''Do the hard work of finding tags. Return a pair of dicts
622 (tags, tagtypes) where tags maps tag name to node, and tagtypes
622 (tags, tagtypes) where tags maps tag name to node, and tagtypes
623 maps tag name to a string like \'global\' or \'local\'.
623 maps tag name to a string like \'global\' or \'local\'.
624 Subclasses or extensions are free to add their own tags, but
624 Subclasses or extensions are free to add their own tags, but
625 should be aware that the returned dicts will be retained for the
625 should be aware that the returned dicts will be retained for the
626 duration of the localrepo object.'''
626 duration of the localrepo object.'''
627
627
628 # XXX what tagtype should subclasses/extensions use? Currently
628 # XXX what tagtype should subclasses/extensions use? Currently
629 # mq and bookmarks add tags, but do not set the tagtype at all.
629 # mq and bookmarks add tags, but do not set the tagtype at all.
630 # Should each extension invent its own tag type? Should there
630 # Should each extension invent its own tag type? Should there
631 # be one tagtype for all such "virtual" tags? Or is the status
631 # be one tagtype for all such "virtual" tags? Or is the status
632 # quo fine?
632 # quo fine?
633
633
634 alltags = {} # map tag name to (node, hist)
634 alltags = {} # map tag name to (node, hist)
635 tagtypes = {}
635 tagtypes = {}
636
636
637 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
637 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
638 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
638 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
639
639
640 # Build the return dicts. Have to re-encode tag names because
640 # Build the return dicts. Have to re-encode tag names because
641 # the tags module always uses UTF-8 (in order not to lose info
641 # the tags module always uses UTF-8 (in order not to lose info
642 # writing to the cache), but the rest of Mercurial wants them in
642 # writing to the cache), but the rest of Mercurial wants them in
643 # local encoding.
643 # local encoding.
644 tags = {}
644 tags = {}
645 for (name, (node, hist)) in alltags.iteritems():
645 for (name, (node, hist)) in alltags.iteritems():
646 if node != nullid:
646 if node != nullid:
647 tags[encoding.tolocal(name)] = node
647 tags[encoding.tolocal(name)] = node
648 tags['tip'] = self.changelog.tip()
648 tags['tip'] = self.changelog.tip()
649 tagtypes = dict([(encoding.tolocal(name), value)
649 tagtypes = dict([(encoding.tolocal(name), value)
650 for (name, value) in tagtypes.iteritems()])
650 for (name, value) in tagtypes.iteritems()])
651 return (tags, tagtypes)
651 return (tags, tagtypes)
652
652
653 def tagtype(self, tagname):
653 def tagtype(self, tagname):
654 '''
654 '''
655 return the type of the given tag. result can be:
655 return the type of the given tag. result can be:
656
656
657 'local' : a local tag
657 'local' : a local tag
658 'global' : a global tag
658 'global' : a global tag
659 None : tag does not exist
659 None : tag does not exist
660 '''
660 '''
661
661
662 return self._tagscache.tagtypes.get(tagname)
662 return self._tagscache.tagtypes.get(tagname)
663
663
664 def tagslist(self):
664 def tagslist(self):
665 '''return a list of tags ordered by revision'''
665 '''return a list of tags ordered by revision'''
666 if not self._tagscache.tagslist:
666 if not self._tagscache.tagslist:
667 l = []
667 l = []
668 for t, n in self.tags().iteritems():
668 for t, n in self.tags().iteritems():
669 r = self.changelog.rev(n)
669 r = self.changelog.rev(n)
670 l.append((r, t, n))
670 l.append((r, t, n))
671 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
671 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
672
672
673 return self._tagscache.tagslist
673 return self._tagscache.tagslist
674
674
675 def nodetags(self, node):
675 def nodetags(self, node):
676 '''return the tags associated with a node'''
676 '''return the tags associated with a node'''
677 if not self._tagscache.nodetagscache:
677 if not self._tagscache.nodetagscache:
678 nodetagscache = {}
678 nodetagscache = {}
679 for t, n in self._tagscache.tags.iteritems():
679 for t, n in self._tagscache.tags.iteritems():
680 nodetagscache.setdefault(n, []).append(t)
680 nodetagscache.setdefault(n, []).append(t)
681 for tags in nodetagscache.itervalues():
681 for tags in nodetagscache.itervalues():
682 tags.sort()
682 tags.sort()
683 self._tagscache.nodetagscache = nodetagscache
683 self._tagscache.nodetagscache = nodetagscache
684 return self._tagscache.nodetagscache.get(node, [])
684 return self._tagscache.nodetagscache.get(node, [])
685
685
686 def nodebookmarks(self, node):
686 def nodebookmarks(self, node):
687 marks = []
687 marks = []
688 for bookmark, n in self._bookmarks.iteritems():
688 for bookmark, n in self._bookmarks.iteritems():
689 if n == node:
689 if n == node:
690 marks.append(bookmark)
690 marks.append(bookmark)
691 return sorted(marks)
691 return sorted(marks)
692
692
693 def branchmap(self):
693 def branchmap(self):
694 '''returns a dictionary {branch: [branchheads]} with branchheads
694 '''returns a dictionary {branch: [branchheads]} with branchheads
695 ordered by increasing revision number'''
695 ordered by increasing revision number'''
696 branchmap.updatecache(self)
696 branchmap.updatecache(self)
697 return self._branchcaches[self.filtername]
697 return self._branchcaches[self.filtername]
698
698
699 def branchtip(self, branch):
699 def branchtip(self, branch):
700 '''return the tip node for a given branch'''
700 '''return the tip node for a given branch'''
701 try:
701 try:
702 return self.branchmap().branchtip(branch)
702 return self.branchmap().branchtip(branch)
703 except KeyError:
703 except KeyError:
704 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
704 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
705
705
706 def lookup(self, key):
706 def lookup(self, key):
707 return self[key].node()
707 return self[key].node()
708
708
709 def lookupbranch(self, key, remote=None):
709 def lookupbranch(self, key, remote=None):
710 repo = remote or self
710 repo = remote or self
711 if key in repo.branchmap():
711 if key in repo.branchmap():
712 return key
712 return key
713
713
714 repo = (remote and remote.local()) and remote or self
714 repo = (remote and remote.local()) and remote or self
715 return repo[key].branch()
715 return repo[key].branch()
716
716
717 def known(self, nodes):
717 def known(self, nodes):
718 nm = self.changelog.nodemap
718 nm = self.changelog.nodemap
719 pc = self._phasecache
719 pc = self._phasecache
720 result = []
720 result = []
721 for n in nodes:
721 for n in nodes:
722 r = nm.get(n)
722 r = nm.get(n)
723 resp = not (r is None or pc.phase(self, r) >= phases.secret)
723 resp = not (r is None or pc.phase(self, r) >= phases.secret)
724 result.append(resp)
724 result.append(resp)
725 return result
725 return result
726
726
727 def local(self):
727 def local(self):
728 return self
728 return self
729
729
730 def cancopy(self):
730 def cancopy(self):
731 # so statichttprepo's override of local() works
731 # so statichttprepo's override of local() works
732 if not self.local():
732 if not self.local():
733 return False
733 return False
734 if not self.ui.configbool('phases', 'publish', True):
734 if not self.ui.configbool('phases', 'publish', True):
735 return True
735 return True
736 # if publishing we can't copy if there is filtered content
736 # if publishing we can't copy if there is filtered content
737 return not self.filtered('visible').changelog.filteredrevs
737 return not self.filtered('visible').changelog.filteredrevs
738
738
739 def join(self, f):
739 def join(self, f):
740 return os.path.join(self.path, f)
740 return os.path.join(self.path, f)
741
741
742 def wjoin(self, f):
742 def wjoin(self, f):
743 return os.path.join(self.root, f)
743 return os.path.join(self.root, f)
744
744
745 def file(self, f):
745 def file(self, f):
746 if f[0] == '/':
746 if f[0] == '/':
747 f = f[1:]
747 f = f[1:]
748 return filelog.filelog(self.sopener, f)
748 return filelog.filelog(self.sopener, f)
749
749
750 def changectx(self, changeid):
750 def changectx(self, changeid):
751 return self[changeid]
751 return self[changeid]
752
752
753 def parents(self, changeid=None):
753 def parents(self, changeid=None):
754 '''get list of changectxs for parents of changeid'''
754 '''get list of changectxs for parents of changeid'''
755 return self[changeid].parents()
755 return self[changeid].parents()
756
756
757 def setparents(self, p1, p2=nullid):
757 def setparents(self, p1, p2=nullid):
758 copies = self.dirstate.setparents(p1, p2)
758 copies = self.dirstate.setparents(p1, p2)
759 pctx = self[p1]
759 pctx = self[p1]
760 if copies:
760 if copies:
761 # Adjust copy records, the dirstate cannot do it, it
761 # Adjust copy records, the dirstate cannot do it, it
762 # requires access to parents manifests. Preserve them
762 # requires access to parents manifests. Preserve them
763 # only for entries added to first parent.
763 # only for entries added to first parent.
764 for f in copies:
764 for f in copies:
765 if f not in pctx and copies[f] in pctx:
765 if f not in pctx and copies[f] in pctx:
766 self.dirstate.copy(copies[f], f)
766 self.dirstate.copy(copies[f], f)
767 if p2 == nullid:
767 if p2 == nullid:
768 for f, s in sorted(self.dirstate.copies().items()):
768 for f, s in sorted(self.dirstate.copies().items()):
769 if f not in pctx and s not in pctx:
769 if f not in pctx and s not in pctx:
770 self.dirstate.copy(None, f)
770 self.dirstate.copy(None, f)
771
771
772 def filectx(self, path, changeid=None, fileid=None):
772 def filectx(self, path, changeid=None, fileid=None):
773 """changeid can be a changeset revision, node, or tag.
773 """changeid can be a changeset revision, node, or tag.
774 fileid can be a file revision or node."""
774 fileid can be a file revision or node."""
775 return context.filectx(self, path, changeid, fileid)
775 return context.filectx(self, path, changeid, fileid)
776
776
777 def getcwd(self):
777 def getcwd(self):
778 return self.dirstate.getcwd()
778 return self.dirstate.getcwd()
779
779
780 def pathto(self, f, cwd=None):
780 def pathto(self, f, cwd=None):
781 return self.dirstate.pathto(f, cwd)
781 return self.dirstate.pathto(f, cwd)
782
782
783 def wfile(self, f, mode='r'):
783 def wfile(self, f, mode='r'):
784 return self.wopener(f, mode)
784 return self.wopener(f, mode)
785
785
786 def _link(self, f):
786 def _link(self, f):
787 return self.wvfs.islink(f)
787 return self.wvfs.islink(f)
788
788
789 def _loadfilter(self, filter):
789 def _loadfilter(self, filter):
790 if filter not in self.filterpats:
790 if filter not in self.filterpats:
791 l = []
791 l = []
792 for pat, cmd in self.ui.configitems(filter):
792 for pat, cmd in self.ui.configitems(filter):
793 if cmd == '!':
793 if cmd == '!':
794 continue
794 continue
795 mf = matchmod.match(self.root, '', [pat])
795 mf = matchmod.match(self.root, '', [pat])
796 fn = None
796 fn = None
797 params = cmd
797 params = cmd
798 for name, filterfn in self._datafilters.iteritems():
798 for name, filterfn in self._datafilters.iteritems():
799 if cmd.startswith(name):
799 if cmd.startswith(name):
800 fn = filterfn
800 fn = filterfn
801 params = cmd[len(name):].lstrip()
801 params = cmd[len(name):].lstrip()
802 break
802 break
803 if not fn:
803 if not fn:
804 fn = lambda s, c, **kwargs: util.filter(s, c)
804 fn = lambda s, c, **kwargs: util.filter(s, c)
805 # Wrap old filters not supporting keyword arguments
805 # Wrap old filters not supporting keyword arguments
806 if not inspect.getargspec(fn)[2]:
806 if not inspect.getargspec(fn)[2]:
807 oldfn = fn
807 oldfn = fn
808 fn = lambda s, c, **kwargs: oldfn(s, c)
808 fn = lambda s, c, **kwargs: oldfn(s, c)
809 l.append((mf, fn, params))
809 l.append((mf, fn, params))
810 self.filterpats[filter] = l
810 self.filterpats[filter] = l
811 return self.filterpats[filter]
811 return self.filterpats[filter]
812
812
813 def _filter(self, filterpats, filename, data):
813 def _filter(self, filterpats, filename, data):
814 for mf, fn, cmd in filterpats:
814 for mf, fn, cmd in filterpats:
815 if mf(filename):
815 if mf(filename):
816 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
816 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
817 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
817 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
818 break
818 break
819
819
820 return data
820 return data
821
821
822 @unfilteredpropertycache
822 @unfilteredpropertycache
823 def _encodefilterpats(self):
823 def _encodefilterpats(self):
824 return self._loadfilter('encode')
824 return self._loadfilter('encode')
825
825
826 @unfilteredpropertycache
826 @unfilteredpropertycache
827 def _decodefilterpats(self):
827 def _decodefilterpats(self):
828 return self._loadfilter('decode')
828 return self._loadfilter('decode')
829
829
830 def adddatafilter(self, name, filter):
830 def adddatafilter(self, name, filter):
831 self._datafilters[name] = filter
831 self._datafilters[name] = filter
832
832
833 def wread(self, filename):
833 def wread(self, filename):
834 if self._link(filename):
834 if self._link(filename):
835 data = self.wvfs.readlink(filename)
835 data = self.wvfs.readlink(filename)
836 else:
836 else:
837 data = self.wopener.read(filename)
837 data = self.wopener.read(filename)
838 return self._filter(self._encodefilterpats, filename, data)
838 return self._filter(self._encodefilterpats, filename, data)
839
839
840 def wwrite(self, filename, data, flags):
840 def wwrite(self, filename, data, flags):
841 data = self._filter(self._decodefilterpats, filename, data)
841 data = self._filter(self._decodefilterpats, filename, data)
842 if 'l' in flags:
842 if 'l' in flags:
843 self.wopener.symlink(data, filename)
843 self.wopener.symlink(data, filename)
844 else:
844 else:
845 self.wopener.write(filename, data)
845 self.wopener.write(filename, data)
846 if 'x' in flags:
846 if 'x' in flags:
847 self.wvfs.setflags(filename, False, True)
847 self.wvfs.setflags(filename, False, True)
848
848
849 def wwritedata(self, filename, data):
849 def wwritedata(self, filename, data):
850 return self._filter(self._decodefilterpats, filename, data)
850 return self._filter(self._decodefilterpats, filename, data)
851
851
852 def transaction(self, desc, report=None):
852 def transaction(self, desc, report=None):
853 tr = self._transref and self._transref() or None
853 tr = self._transref and self._transref() or None
854 if tr and tr.running():
854 if tr and tr.running():
855 return tr.nest()
855 return tr.nest()
856
856
857 # abort here if the journal already exists
857 # abort here if the journal already exists
858 if self.svfs.exists("journal"):
858 if self.svfs.exists("journal"):
859 raise error.RepoError(
859 raise error.RepoError(
860 _("abandoned transaction found"),
860 _("abandoned transaction found"),
861 hint=_("run 'hg recover' to clean up transaction"))
861 hint=_("run 'hg recover' to clean up transaction"))
862
862
863 def onclose():
863 def onclose():
864 self.store.write(tr)
864 self.store.write(tr)
865
865
866 self._writejournal(desc)
866 self._writejournal(desc)
867 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
867 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
868 rp = report and report or self.ui.warn
868 rp = report and report or self.ui.warn
869 tr = transaction.transaction(rp, self.sopener,
869 tr = transaction.transaction(rp, self.sopener,
870 "journal",
870 "journal",
871 aftertrans(renames),
871 aftertrans(renames),
872 self.store.createmode,
872 self.store.createmode,
873 onclose)
873 onclose)
874 self._transref = weakref.ref(tr)
874 self._transref = weakref.ref(tr)
875 return tr
875 return tr
876
876
877 def _journalfiles(self):
877 def _journalfiles(self):
878 return ((self.svfs, 'journal'),
878 return ((self.svfs, 'journal'),
879 (self.vfs, 'journal.dirstate'),
879 (self.vfs, 'journal.dirstate'),
880 (self.vfs, 'journal.branch'),
880 (self.vfs, 'journal.branch'),
881 (self.vfs, 'journal.desc'),
881 (self.vfs, 'journal.desc'),
882 (self.vfs, 'journal.bookmarks'),
882 (self.vfs, 'journal.bookmarks'),
883 (self.svfs, 'journal.phaseroots'))
883 (self.svfs, 'journal.phaseroots'))
884
884
885 def undofiles(self):
885 def undofiles(self):
886 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
886 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
887
887
888 def _writejournal(self, desc):
888 def _writejournal(self, desc):
889 self.opener.write("journal.dirstate",
889 self.opener.write("journal.dirstate",
890 self.opener.tryread("dirstate"))
890 self.opener.tryread("dirstate"))
891 self.opener.write("journal.branch",
891 self.opener.write("journal.branch",
892 encoding.fromlocal(self.dirstate.branch()))
892 encoding.fromlocal(self.dirstate.branch()))
893 self.opener.write("journal.desc",
893 self.opener.write("journal.desc",
894 "%d\n%s\n" % (len(self), desc))
894 "%d\n%s\n" % (len(self), desc))
895 self.opener.write("journal.bookmarks",
895 self.opener.write("journal.bookmarks",
896 self.opener.tryread("bookmarks"))
896 self.opener.tryread("bookmarks"))
897 self.sopener.write("journal.phaseroots",
897 self.sopener.write("journal.phaseroots",
898 self.sopener.tryread("phaseroots"))
898 self.sopener.tryread("phaseroots"))
899
899
900 def recover(self):
900 def recover(self):
901 lock = self.lock()
901 lock = self.lock()
902 try:
902 try:
903 if self.svfs.exists("journal"):
903 if self.svfs.exists("journal"):
904 self.ui.status(_("rolling back interrupted transaction\n"))
904 self.ui.status(_("rolling back interrupted transaction\n"))
905 transaction.rollback(self.sopener, "journal",
905 transaction.rollback(self.sopener, "journal",
906 self.ui.warn)
906 self.ui.warn)
907 self.invalidate()
907 self.invalidate()
908 return True
908 return True
909 else:
909 else:
910 self.ui.warn(_("no interrupted transaction available\n"))
910 self.ui.warn(_("no interrupted transaction available\n"))
911 return False
911 return False
912 finally:
912 finally:
913 lock.release()
913 lock.release()
914
914
915 def rollback(self, dryrun=False, force=False):
915 def rollback(self, dryrun=False, force=False):
916 wlock = lock = None
916 wlock = lock = None
917 try:
917 try:
918 wlock = self.wlock()
918 wlock = self.wlock()
919 lock = self.lock()
919 lock = self.lock()
920 if self.svfs.exists("undo"):
920 if self.svfs.exists("undo"):
921 return self._rollback(dryrun, force)
921 return self._rollback(dryrun, force)
922 else:
922 else:
923 self.ui.warn(_("no rollback information available\n"))
923 self.ui.warn(_("no rollback information available\n"))
924 return 1
924 return 1
925 finally:
925 finally:
926 release(lock, wlock)
926 release(lock, wlock)
927
927
928 @unfilteredmethod # Until we get smarter cache management
928 @unfilteredmethod # Until we get smarter cache management
929 def _rollback(self, dryrun, force):
929 def _rollback(self, dryrun, force):
930 ui = self.ui
930 ui = self.ui
931 try:
931 try:
932 args = self.opener.read('undo.desc').splitlines()
932 args = self.opener.read('undo.desc').splitlines()
933 (oldlen, desc, detail) = (int(args[0]), args[1], None)
933 (oldlen, desc, detail) = (int(args[0]), args[1], None)
934 if len(args) >= 3:
934 if len(args) >= 3:
935 detail = args[2]
935 detail = args[2]
936 oldtip = oldlen - 1
936 oldtip = oldlen - 1
937
937
938 if detail and ui.verbose:
938 if detail and ui.verbose:
939 msg = (_('repository tip rolled back to revision %s'
939 msg = (_('repository tip rolled back to revision %s'
940 ' (undo %s: %s)\n')
940 ' (undo %s: %s)\n')
941 % (oldtip, desc, detail))
941 % (oldtip, desc, detail))
942 else:
942 else:
943 msg = (_('repository tip rolled back to revision %s'
943 msg = (_('repository tip rolled back to revision %s'
944 ' (undo %s)\n')
944 ' (undo %s)\n')
945 % (oldtip, desc))
945 % (oldtip, desc))
946 except IOError:
946 except IOError:
947 msg = _('rolling back unknown transaction\n')
947 msg = _('rolling back unknown transaction\n')
948 desc = None
948 desc = None
949
949
950 if not force and self['.'] != self['tip'] and desc == 'commit':
950 if not force and self['.'] != self['tip'] and desc == 'commit':
951 raise util.Abort(
951 raise util.Abort(
952 _('rollback of last commit while not checked out '
952 _('rollback of last commit while not checked out '
953 'may lose data'), hint=_('use -f to force'))
953 'may lose data'), hint=_('use -f to force'))
954
954
955 ui.status(msg)
955 ui.status(msg)
956 if dryrun:
956 if dryrun:
957 return 0
957 return 0
958
958
959 parents = self.dirstate.parents()
959 parents = self.dirstate.parents()
960 self.destroying()
960 self.destroying()
961 transaction.rollback(self.sopener, 'undo', ui.warn)
961 transaction.rollback(self.sopener, 'undo', ui.warn)
962 if self.vfs.exists('undo.bookmarks'):
962 if self.vfs.exists('undo.bookmarks'):
963 self.vfs.rename('undo.bookmarks', 'bookmarks')
963 self.vfs.rename('undo.bookmarks', 'bookmarks')
964 if self.svfs.exists('undo.phaseroots'):
964 if self.svfs.exists('undo.phaseroots'):
965 self.svfs.rename('undo.phaseroots', 'phaseroots')
965 self.svfs.rename('undo.phaseroots', 'phaseroots')
966 self.invalidate()
966 self.invalidate()
967
967
968 parentgone = (parents[0] not in self.changelog.nodemap or
968 parentgone = (parents[0] not in self.changelog.nodemap or
969 parents[1] not in self.changelog.nodemap)
969 parents[1] not in self.changelog.nodemap)
970 if parentgone:
970 if parentgone:
971 self.vfs.rename('undo.dirstate', 'dirstate')
971 self.vfs.rename('undo.dirstate', 'dirstate')
972 try:
972 try:
973 branch = self.opener.read('undo.branch')
973 branch = self.opener.read('undo.branch')
974 self.dirstate.setbranch(encoding.tolocal(branch))
974 self.dirstate.setbranch(encoding.tolocal(branch))
975 except IOError:
975 except IOError:
976 ui.warn(_('named branch could not be reset: '
976 ui.warn(_('named branch could not be reset: '
977 'current branch is still \'%s\'\n')
977 'current branch is still \'%s\'\n')
978 % self.dirstate.branch())
978 % self.dirstate.branch())
979
979
980 self.dirstate.invalidate()
980 self.dirstate.invalidate()
981 parents = tuple([p.rev() for p in self.parents()])
981 parents = tuple([p.rev() for p in self.parents()])
982 if len(parents) > 1:
982 if len(parents) > 1:
983 ui.status(_('working directory now based on '
983 ui.status(_('working directory now based on '
984 'revisions %d and %d\n') % parents)
984 'revisions %d and %d\n') % parents)
985 else:
985 else:
986 ui.status(_('working directory now based on '
986 ui.status(_('working directory now based on '
987 'revision %d\n') % parents)
987 'revision %d\n') % parents)
988 # TODO: if we know which new heads may result from this rollback, pass
988 # TODO: if we know which new heads may result from this rollback, pass
989 # them to destroy(), which will prevent the branchhead cache from being
989 # them to destroy(), which will prevent the branchhead cache from being
990 # invalidated.
990 # invalidated.
991 self.destroyed()
991 self.destroyed()
992 return 0
992 return 0
993
993
994 def invalidatecaches(self):
994 def invalidatecaches(self):
995
995
996 if '_tagscache' in vars(self):
996 if '_tagscache' in vars(self):
997 # can't use delattr on proxy
997 # can't use delattr on proxy
998 del self.__dict__['_tagscache']
998 del self.__dict__['_tagscache']
999
999
1000 self.unfiltered()._branchcaches.clear()
1000 self.unfiltered()._branchcaches.clear()
1001 self.invalidatevolatilesets()
1001 self.invalidatevolatilesets()
1002
1002
1003 def invalidatevolatilesets(self):
1003 def invalidatevolatilesets(self):
1004 self.filteredrevcache.clear()
1004 self.filteredrevcache.clear()
1005 obsolete.clearobscaches(self)
1005 obsolete.clearobscaches(self)
1006
1006
1007 def invalidatedirstate(self):
1007 def invalidatedirstate(self):
1008 '''Invalidates the dirstate, causing the next call to dirstate
1008 '''Invalidates the dirstate, causing the next call to dirstate
1009 to check if it was modified since the last time it was read,
1009 to check if it was modified since the last time it was read,
1010 rereading it if it has.
1010 rereading it if it has.
1011
1011
1012 This is different to dirstate.invalidate() that it doesn't always
1012 This is different to dirstate.invalidate() that it doesn't always
1013 rereads the dirstate. Use dirstate.invalidate() if you want to
1013 rereads the dirstate. Use dirstate.invalidate() if you want to
1014 explicitly read the dirstate again (i.e. restoring it to a previous
1014 explicitly read the dirstate again (i.e. restoring it to a previous
1015 known good state).'''
1015 known good state).'''
1016 if hasunfilteredcache(self, 'dirstate'):
1016 if hasunfilteredcache(self, 'dirstate'):
1017 for k in self.dirstate._filecache:
1017 for k in self.dirstate._filecache:
1018 try:
1018 try:
1019 delattr(self.dirstate, k)
1019 delattr(self.dirstate, k)
1020 except AttributeError:
1020 except AttributeError:
1021 pass
1021 pass
1022 delattr(self.unfiltered(), 'dirstate')
1022 delattr(self.unfiltered(), 'dirstate')
1023
1023
1024 def invalidate(self):
1024 def invalidate(self):
1025 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1025 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1026 for k in self._filecache:
1026 for k in self._filecache:
1027 # dirstate is invalidated separately in invalidatedirstate()
1027 # dirstate is invalidated separately in invalidatedirstate()
1028 if k == 'dirstate':
1028 if k == 'dirstate':
1029 continue
1029 continue
1030
1030
1031 try:
1031 try:
1032 delattr(unfiltered, k)
1032 delattr(unfiltered, k)
1033 except AttributeError:
1033 except AttributeError:
1034 pass
1034 pass
1035 self.invalidatecaches()
1035 self.invalidatecaches()
1036 self.store.invalidatecaches()
1036 self.store.invalidatecaches()
1037
1037
1038 def invalidateall(self):
1038 def invalidateall(self):
1039 '''Fully invalidates both store and non-store parts, causing the
1039 '''Fully invalidates both store and non-store parts, causing the
1040 subsequent operation to reread any outside changes.'''
1040 subsequent operation to reread any outside changes.'''
1041 # extension should hook this to invalidate its caches
1041 # extension should hook this to invalidate its caches
1042 self.invalidate()
1042 self.invalidate()
1043 self.invalidatedirstate()
1043 self.invalidatedirstate()
1044
1044
1045 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1045 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1046 try:
1046 try:
1047 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1047 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1048 except error.LockHeld, inst:
1048 except error.LockHeld, inst:
1049 if not wait:
1049 if not wait:
1050 raise
1050 raise
1051 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1051 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1052 (desc, inst.locker))
1052 (desc, inst.locker))
1053 # default to 600 seconds timeout
1053 # default to 600 seconds timeout
1054 l = lockmod.lock(vfs, lockname,
1054 l = lockmod.lock(vfs, lockname,
1055 int(self.ui.config("ui", "timeout", "600")),
1055 int(self.ui.config("ui", "timeout", "600")),
1056 releasefn, desc=desc)
1056 releasefn, desc=desc)
1057 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1057 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1058 if acquirefn:
1058 if acquirefn:
1059 acquirefn()
1059 acquirefn()
1060 return l
1060 return l
1061
1061
1062 def _afterlock(self, callback):
1062 def _afterlock(self, callback):
1063 """add a callback to the current repository lock.
1063 """add a callback to the current repository lock.
1064
1064
1065 The callback will be executed on lock release."""
1065 The callback will be executed on lock release."""
1066 l = self._lockref and self._lockref()
1066 l = self._lockref and self._lockref()
1067 if l:
1067 if l:
1068 l.postrelease.append(callback)
1068 l.postrelease.append(callback)
1069 else:
1069 else:
1070 callback()
1070 callback()
1071
1071
1072 def lock(self, wait=True):
1072 def lock(self, wait=True):
1073 '''Lock the repository store (.hg/store) and return a weak reference
1073 '''Lock the repository store (.hg/store) and return a weak reference
1074 to the lock. Use this before modifying the store (e.g. committing or
1074 to the lock. Use this before modifying the store (e.g. committing or
1075 stripping). If you are opening a transaction, get a lock as well.)'''
1075 stripping). If you are opening a transaction, get a lock as well.)'''
1076 l = self._lockref and self._lockref()
1076 l = self._lockref and self._lockref()
1077 if l is not None and l.held:
1077 if l is not None and l.held:
1078 l.lock()
1078 l.lock()
1079 return l
1079 return l
1080
1080
1081 def unlock():
1081 def unlock():
1082 if hasunfilteredcache(self, '_phasecache'):
1082 if hasunfilteredcache(self, '_phasecache'):
1083 self._phasecache.write()
1083 self._phasecache.write()
1084 for k, ce in self._filecache.items():
1084 for k, ce in self._filecache.items():
1085 if k == 'dirstate' or k not in self.__dict__:
1085 if k == 'dirstate' or k not in self.__dict__:
1086 continue
1086 continue
1087 ce.refresh()
1087 ce.refresh()
1088
1088
1089 l = self._lock(self.svfs, "lock", wait, unlock,
1089 l = self._lock(self.svfs, "lock", wait, unlock,
1090 self.invalidate, _('repository %s') % self.origroot)
1090 self.invalidate, _('repository %s') % self.origroot)
1091 self._lockref = weakref.ref(l)
1091 self._lockref = weakref.ref(l)
1092 return l
1092 return l
1093
1093
1094 def wlock(self, wait=True):
1094 def wlock(self, wait=True):
1095 '''Lock the non-store parts of the repository (everything under
1095 '''Lock the non-store parts of the repository (everything under
1096 .hg except .hg/store) and return a weak reference to the lock.
1096 .hg except .hg/store) and return a weak reference to the lock.
1097 Use this before modifying files in .hg.'''
1097 Use this before modifying files in .hg.'''
1098 l = self._wlockref and self._wlockref()
1098 l = self._wlockref and self._wlockref()
1099 if l is not None and l.held:
1099 if l is not None and l.held:
1100 l.lock()
1100 l.lock()
1101 return l
1101 return l
1102
1102
1103 def unlock():
1103 def unlock():
1104 self.dirstate.write()
1104 self.dirstate.write()
1105 self._filecache['dirstate'].refresh()
1105 self._filecache['dirstate'].refresh()
1106
1106
1107 l = self._lock(self.vfs, "wlock", wait, unlock,
1107 l = self._lock(self.vfs, "wlock", wait, unlock,
1108 self.invalidatedirstate, _('working directory of %s') %
1108 self.invalidatedirstate, _('working directory of %s') %
1109 self.origroot)
1109 self.origroot)
1110 self._wlockref = weakref.ref(l)
1110 self._wlockref = weakref.ref(l)
1111 return l
1111 return l
1112
1112
1113 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1113 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1114 """
1114 """
1115 commit an individual file as part of a larger transaction
1115 commit an individual file as part of a larger transaction
1116 """
1116 """
1117
1117
1118 fname = fctx.path()
1118 fname = fctx.path()
1119 text = fctx.data()
1119 text = fctx.data()
1120 flog = self.file(fname)
1120 flog = self.file(fname)
1121 fparent1 = manifest1.get(fname, nullid)
1121 fparent1 = manifest1.get(fname, nullid)
1122 fparent2 = fparent2o = manifest2.get(fname, nullid)
1122 fparent2 = fparent2o = manifest2.get(fname, nullid)
1123
1123
1124 meta = {}
1124 meta = {}
1125 copy = fctx.renamed()
1125 copy = fctx.renamed()
1126 if copy and copy[0] != fname:
1126 if copy and copy[0] != fname:
1127 # Mark the new revision of this file as a copy of another
1127 # Mark the new revision of this file as a copy of another
1128 # file. This copy data will effectively act as a parent
1128 # file. This copy data will effectively act as a parent
1129 # of this new revision. If this is a merge, the first
1129 # of this new revision. If this is a merge, the first
1130 # parent will be the nullid (meaning "look up the copy data")
1130 # parent will be the nullid (meaning "look up the copy data")
1131 # and the second one will be the other parent. For example:
1131 # and the second one will be the other parent. For example:
1132 #
1132 #
1133 # 0 --- 1 --- 3 rev1 changes file foo
1133 # 0 --- 1 --- 3 rev1 changes file foo
1134 # \ / rev2 renames foo to bar and changes it
1134 # \ / rev2 renames foo to bar and changes it
1135 # \- 2 -/ rev3 should have bar with all changes and
1135 # \- 2 -/ rev3 should have bar with all changes and
1136 # should record that bar descends from
1136 # should record that bar descends from
1137 # bar in rev2 and foo in rev1
1137 # bar in rev2 and foo in rev1
1138 #
1138 #
1139 # this allows this merge to succeed:
1139 # this allows this merge to succeed:
1140 #
1140 #
1141 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1141 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1142 # \ / merging rev3 and rev4 should use bar@rev2
1142 # \ / merging rev3 and rev4 should use bar@rev2
1143 # \- 2 --- 4 as the merge base
1143 # \- 2 --- 4 as the merge base
1144 #
1144 #
1145
1145
1146 cfname = copy[0]
1146 cfname = copy[0]
1147 crev = manifest1.get(cfname)
1147 crev = manifest1.get(cfname)
1148 newfparent = fparent2
1148 newfparent = fparent2
1149
1149
1150 if manifest2: # branch merge
1150 if manifest2: # branch merge
1151 if fparent2 == nullid or crev is None: # copied on remote side
1151 if fparent2 == nullid or crev is None: # copied on remote side
1152 if cfname in manifest2:
1152 if cfname in manifest2:
1153 crev = manifest2[cfname]
1153 crev = manifest2[cfname]
1154 newfparent = fparent1
1154 newfparent = fparent1
1155
1155
1156 # find source in nearest ancestor if we've lost track
1156 # find source in nearest ancestor if we've lost track
1157 if not crev:
1157 if not crev:
1158 self.ui.debug(" %s: searching for copy revision for %s\n" %
1158 self.ui.debug(" %s: searching for copy revision for %s\n" %
1159 (fname, cfname))
1159 (fname, cfname))
1160 for ancestor in self[None].ancestors():
1160 for ancestor in self[None].ancestors():
1161 if cfname in ancestor:
1161 if cfname in ancestor:
1162 crev = ancestor[cfname].filenode()
1162 crev = ancestor[cfname].filenode()
1163 break
1163 break
1164
1164
1165 if crev:
1165 if crev:
1166 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1166 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1167 meta["copy"] = cfname
1167 meta["copy"] = cfname
1168 meta["copyrev"] = hex(crev)
1168 meta["copyrev"] = hex(crev)
1169 fparent1, fparent2 = nullid, newfparent
1169 fparent1, fparent2 = nullid, newfparent
1170 else:
1170 else:
1171 self.ui.warn(_("warning: can't find ancestor for '%s' "
1171 self.ui.warn(_("warning: can't find ancestor for '%s' "
1172 "copied from '%s'!\n") % (fname, cfname))
1172 "copied from '%s'!\n") % (fname, cfname))
1173
1173
1174 elif fparent1 == nullid:
1174 elif fparent1 == nullid:
1175 fparent1, fparent2 = fparent2, nullid
1175 fparent1, fparent2 = fparent2, nullid
1176 elif fparent2 != nullid:
1176 elif fparent2 != nullid:
1177 # is one parent an ancestor of the other?
1177 # is one parent an ancestor of the other?
1178 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1178 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1179 if fparent1 in fparentancestors:
1179 if fparent1 in fparentancestors:
1180 fparent1, fparent2 = fparent2, nullid
1180 fparent1, fparent2 = fparent2, nullid
1181 elif fparent2 in fparentancestors:
1181 elif fparent2 in fparentancestors:
1182 fparent2 = nullid
1182 fparent2 = nullid
1183
1183
1184 # is the file changed?
1184 # is the file changed?
1185 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1185 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1186 changelist.append(fname)
1186 changelist.append(fname)
1187 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1187 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1188
1188
1189 # are just the flags changed during merge?
1189 # are just the flags changed during merge?
1190 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1190 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1191 changelist.append(fname)
1191 changelist.append(fname)
1192
1192
1193 return fparent1
1193 return fparent1
1194
1194
1195 @unfilteredmethod
1195 @unfilteredmethod
1196 def commit(self, text="", user=None, date=None, match=None, force=False,
1196 def commit(self, text="", user=None, date=None, match=None, force=False,
1197 editor=False, extra={}):
1197 editor=False, extra={}):
1198 """Add a new revision to current repository.
1198 """Add a new revision to current repository.
1199
1199
1200 Revision information is gathered from the working directory,
1200 Revision information is gathered from the working directory,
1201 match can be used to filter the committed files. If editor is
1201 match can be used to filter the committed files. If editor is
1202 supplied, it is called to get a commit message.
1202 supplied, it is called to get a commit message.
1203 """
1203 """
1204
1204
1205 def fail(f, msg):
1205 def fail(f, msg):
1206 raise util.Abort('%s: %s' % (f, msg))
1206 raise util.Abort('%s: %s' % (f, msg))
1207
1207
1208 if not match:
1208 if not match:
1209 match = matchmod.always(self.root, '')
1209 match = matchmod.always(self.root, '')
1210
1210
1211 if not force:
1211 if not force:
1212 vdirs = []
1212 vdirs = []
1213 match.explicitdir = vdirs.append
1213 match.explicitdir = vdirs.append
1214 match.bad = fail
1214 match.bad = fail
1215
1215
1216 wlock = self.wlock()
1216 wlock = self.wlock()
1217 try:
1217 try:
1218 wctx = self[None]
1218 wctx = self[None]
1219 merge = len(wctx.parents()) > 1
1219 merge = len(wctx.parents()) > 1
1220
1220
1221 if (not force and merge and match and
1221 if (not force and merge and match and
1222 (match.files() or match.anypats())):
1222 (match.files() or match.anypats())):
1223 raise util.Abort(_('cannot partially commit a merge '
1223 raise util.Abort(_('cannot partially commit a merge '
1224 '(do not specify files or patterns)'))
1224 '(do not specify files or patterns)'))
1225
1225
1226 changes = self.status(match=match, clean=force)
1226 changes = self.status(match=match, clean=force)
1227 if force:
1227 if force:
1228 changes[0].extend(changes[6]) # mq may commit unchanged files
1228 changes[0].extend(changes[6]) # mq may commit unchanged files
1229
1229
1230 # check subrepos
1230 # check subrepos
1231 subs = []
1231 subs = []
1232 commitsubs = set()
1232 commitsubs = set()
1233 newstate = wctx.substate.copy()
1233 newstate = wctx.substate.copy()
1234 # only manage subrepos and .hgsubstate if .hgsub is present
1234 # only manage subrepos and .hgsubstate if .hgsub is present
1235 if '.hgsub' in wctx:
1235 if '.hgsub' in wctx:
1236 # we'll decide whether to track this ourselves, thanks
1236 # we'll decide whether to track this ourselves, thanks
1237 for c in changes[:3]:
1237 for c in changes[:3]:
1238 if '.hgsubstate' in c:
1238 if '.hgsubstate' in c:
1239 c.remove('.hgsubstate')
1239 c.remove('.hgsubstate')
1240
1240
1241 # compare current state to last committed state
1241 # compare current state to last committed state
1242 # build new substate based on last committed state
1242 # build new substate based on last committed state
1243 oldstate = wctx.p1().substate
1243 oldstate = wctx.p1().substate
1244 for s in sorted(newstate.keys()):
1244 for s in sorted(newstate.keys()):
1245 if not match(s):
1245 if not match(s):
1246 # ignore working copy, use old state if present
1246 # ignore working copy, use old state if present
1247 if s in oldstate:
1247 if s in oldstate:
1248 newstate[s] = oldstate[s]
1248 newstate[s] = oldstate[s]
1249 continue
1249 continue
1250 if not force:
1250 if not force:
1251 raise util.Abort(
1251 raise util.Abort(
1252 _("commit with new subrepo %s excluded") % s)
1252 _("commit with new subrepo %s excluded") % s)
1253 if wctx.sub(s).dirty(True):
1253 if wctx.sub(s).dirty(True):
1254 if not self.ui.configbool('ui', 'commitsubrepos'):
1254 if not self.ui.configbool('ui', 'commitsubrepos'):
1255 raise util.Abort(
1255 raise util.Abort(
1256 _("uncommitted changes in subrepo %s") % s,
1256 _("uncommitted changes in subrepo %s") % s,
1257 hint=_("use --subrepos for recursive commit"))
1257 hint=_("use --subrepos for recursive commit"))
1258 subs.append(s)
1258 subs.append(s)
1259 commitsubs.add(s)
1259 commitsubs.add(s)
1260 else:
1260 else:
1261 bs = wctx.sub(s).basestate()
1261 bs = wctx.sub(s).basestate()
1262 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1262 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1263 if oldstate.get(s, (None, None, None))[1] != bs:
1263 if oldstate.get(s, (None, None, None))[1] != bs:
1264 subs.append(s)
1264 subs.append(s)
1265
1265
1266 # check for removed subrepos
1266 # check for removed subrepos
1267 for p in wctx.parents():
1267 for p in wctx.parents():
1268 r = [s for s in p.substate if s not in newstate]
1268 r = [s for s in p.substate if s not in newstate]
1269 subs += [s for s in r if match(s)]
1269 subs += [s for s in r if match(s)]
1270 if subs:
1270 if subs:
1271 if (not match('.hgsub') and
1271 if (not match('.hgsub') and
1272 '.hgsub' in (wctx.modified() + wctx.added())):
1272 '.hgsub' in (wctx.modified() + wctx.added())):
1273 raise util.Abort(
1273 raise util.Abort(
1274 _("can't commit subrepos without .hgsub"))
1274 _("can't commit subrepos without .hgsub"))
1275 changes[0].insert(0, '.hgsubstate')
1275 changes[0].insert(0, '.hgsubstate')
1276
1276
1277 elif '.hgsub' in changes[2]:
1277 elif '.hgsub' in changes[2]:
1278 # clean up .hgsubstate when .hgsub is removed
1278 # clean up .hgsubstate when .hgsub is removed
1279 if ('.hgsubstate' in wctx and
1279 if ('.hgsubstate' in wctx and
1280 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1280 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1281 changes[2].insert(0, '.hgsubstate')
1281 changes[2].insert(0, '.hgsubstate')
1282
1282
1283 # make sure all explicit patterns are matched
1283 # make sure all explicit patterns are matched
1284 if not force and match.files():
1284 if not force and match.files():
1285 matched = set(changes[0] + changes[1] + changes[2])
1285 matched = set(changes[0] + changes[1] + changes[2])
1286
1286
1287 for f in match.files():
1287 for f in match.files():
1288 f = self.dirstate.normalize(f)
1288 f = self.dirstate.normalize(f)
1289 if f == '.' or f in matched or f in wctx.substate:
1289 if f == '.' or f in matched or f in wctx.substate:
1290 continue
1290 continue
1291 if f in changes[3]: # missing
1291 if f in changes[3]: # missing
1292 fail(f, _('file not found!'))
1292 fail(f, _('file not found!'))
1293 if f in vdirs: # visited directory
1293 if f in vdirs: # visited directory
1294 d = f + '/'
1294 d = f + '/'
1295 for mf in matched:
1295 for mf in matched:
1296 if mf.startswith(d):
1296 if mf.startswith(d):
1297 break
1297 break
1298 else:
1298 else:
1299 fail(f, _("no match under directory!"))
1299 fail(f, _("no match under directory!"))
1300 elif f not in self.dirstate:
1300 elif f not in self.dirstate:
1301 fail(f, _("file not tracked!"))
1301 fail(f, _("file not tracked!"))
1302
1302
1303 cctx = context.workingctx(self, text, user, date, extra, changes)
1303 cctx = context.workingctx(self, text, user, date, extra, changes)
1304
1304
1305 if (not force and not extra.get("close") and not merge
1305 if (not force and not extra.get("close") and not merge
1306 and not cctx.files()
1306 and not cctx.files()
1307 and wctx.branch() == wctx.p1().branch()):
1307 and wctx.branch() == wctx.p1().branch()):
1308 return None
1308 return None
1309
1309
1310 if merge and cctx.deleted():
1310 if merge and cctx.deleted():
1311 raise util.Abort(_("cannot commit merge with missing files"))
1311 raise util.Abort(_("cannot commit merge with missing files"))
1312
1312
1313 ms = mergemod.mergestate(self)
1313 ms = mergemod.mergestate(self)
1314 for f in changes[0]:
1314 for f in changes[0]:
1315 if f in ms and ms[f] == 'u':
1315 if f in ms and ms[f] == 'u':
1316 raise util.Abort(_("unresolved merge conflicts "
1316 raise util.Abort(_("unresolved merge conflicts "
1317 "(see hg help resolve)"))
1317 "(see hg help resolve)"))
1318
1318
1319 if editor:
1319 if editor:
1320 cctx._text = editor(self, cctx, subs)
1320 cctx._text = editor(self, cctx, subs)
1321 edited = (text != cctx._text)
1321 edited = (text != cctx._text)
1322
1322
1323 # Save commit message in case this transaction gets rolled back
1323 # Save commit message in case this transaction gets rolled back
1324 # (e.g. by a pretxncommit hook). Leave the content alone on
1324 # (e.g. by a pretxncommit hook). Leave the content alone on
1325 # the assumption that the user will use the same editor again.
1325 # the assumption that the user will use the same editor again.
1326 msgfn = self.savecommitmessage(cctx._text)
1326 msgfn = self.savecommitmessage(cctx._text)
1327
1327
1328 # commit subs and write new state
1328 # commit subs and write new state
1329 if subs:
1329 if subs:
1330 for s in sorted(commitsubs):
1330 for s in sorted(commitsubs):
1331 sub = wctx.sub(s)
1331 sub = wctx.sub(s)
1332 self.ui.status(_('committing subrepository %s\n') %
1332 self.ui.status(_('committing subrepository %s\n') %
1333 subrepo.subrelpath(sub))
1333 subrepo.subrelpath(sub))
1334 sr = sub.commit(cctx._text, user, date)
1334 sr = sub.commit(cctx._text, user, date)
1335 newstate[s] = (newstate[s][0], sr)
1335 newstate[s] = (newstate[s][0], sr)
1336 subrepo.writestate(self, newstate)
1336 subrepo.writestate(self, newstate)
1337
1337
1338 p1, p2 = self.dirstate.parents()
1338 p1, p2 = self.dirstate.parents()
1339 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1339 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1340 try:
1340 try:
1341 self.hook("precommit", throw=True, parent1=hookp1,
1341 self.hook("precommit", throw=True, parent1=hookp1,
1342 parent2=hookp2)
1342 parent2=hookp2)
1343 ret = self.commitctx(cctx, True)
1343 ret = self.commitctx(cctx, True)
1344 except: # re-raises
1344 except: # re-raises
1345 if edited:
1345 if edited:
1346 self.ui.write(
1346 self.ui.write(
1347 _('note: commit message saved in %s\n') % msgfn)
1347 _('note: commit message saved in %s\n') % msgfn)
1348 raise
1348 raise
1349
1349
1350 # update bookmarks, dirstate and mergestate
1350 # update bookmarks, dirstate and mergestate
1351 bookmarks.update(self, [p1, p2], ret)
1351 bookmarks.update(self, [p1, p2], ret)
1352 cctx.markcommitted(ret)
1352 cctx.markcommitted(ret)
1353 ms.reset()
1353 ms.reset()
1354 finally:
1354 finally:
1355 wlock.release()
1355 wlock.release()
1356
1356
1357 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1357 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1358 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1358 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1359 self._afterlock(commithook)
1359 self._afterlock(commithook)
1360 return ret
1360 return ret
1361
1361
1362 @unfilteredmethod
1362 @unfilteredmethod
1363 def commitctx(self, ctx, error=False):
1363 def commitctx(self, ctx, error=False):
1364 """Add a new revision to current repository.
1364 """Add a new revision to current repository.
1365 Revision information is passed via the context argument.
1365 Revision information is passed via the context argument.
1366 """
1366 """
1367
1367
1368 tr = lock = None
1368 tr = lock = None
1369 removed = list(ctx.removed())
1369 removed = list(ctx.removed())
1370 p1, p2 = ctx.p1(), ctx.p2()
1370 p1, p2 = ctx.p1(), ctx.p2()
1371 user = ctx.user()
1371 user = ctx.user()
1372
1372
1373 lock = self.lock()
1373 lock = self.lock()
1374 try:
1374 try:
1375 tr = self.transaction("commit")
1375 tr = self.transaction("commit")
1376 trp = weakref.proxy(tr)
1376 trp = weakref.proxy(tr)
1377
1377
1378 if ctx.files():
1378 if ctx.files():
1379 m1 = p1.manifest().copy()
1379 m1 = p1.manifest().copy()
1380 m2 = p2.manifest()
1380 m2 = p2.manifest()
1381
1381
1382 # check in files
1382 # check in files
1383 new = {}
1383 new = {}
1384 changed = []
1384 changed = []
1385 linkrev = len(self)
1385 linkrev = len(self)
1386 for f in sorted(ctx.modified() + ctx.added()):
1386 for f in sorted(ctx.modified() + ctx.added()):
1387 self.ui.note(f + "\n")
1387 self.ui.note(f + "\n")
1388 try:
1388 try:
1389 fctx = ctx[f]
1389 fctx = ctx[f]
1390 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1390 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1391 changed)
1391 changed)
1392 m1.set(f, fctx.flags())
1392 m1.set(f, fctx.flags())
1393 except OSError, inst:
1393 except OSError, inst:
1394 self.ui.warn(_("trouble committing %s!\n") % f)
1394 self.ui.warn(_("trouble committing %s!\n") % f)
1395 raise
1395 raise
1396 except IOError, inst:
1396 except IOError, inst:
1397 errcode = getattr(inst, 'errno', errno.ENOENT)
1397 errcode = getattr(inst, 'errno', errno.ENOENT)
1398 if error or errcode and errcode != errno.ENOENT:
1398 if error or errcode and errcode != errno.ENOENT:
1399 self.ui.warn(_("trouble committing %s!\n") % f)
1399 self.ui.warn(_("trouble committing %s!\n") % f)
1400 raise
1400 raise
1401 else:
1401 else:
1402 removed.append(f)
1402 removed.append(f)
1403
1403
1404 # update manifest
1404 # update manifest
1405 m1.update(new)
1405 m1.update(new)
1406 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1406 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1407 drop = [f for f in removed if f in m1]
1407 drop = [f for f in removed if f in m1]
1408 for f in drop:
1408 for f in drop:
1409 del m1[f]
1409 del m1[f]
1410 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1410 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1411 p2.manifestnode(), (new, drop))
1411 p2.manifestnode(), (new, drop))
1412 files = changed + removed
1412 files = changed + removed
1413 else:
1413 else:
1414 mn = p1.manifestnode()
1414 mn = p1.manifestnode()
1415 files = []
1415 files = []
1416
1416
1417 # update changelog
1417 # update changelog
1418 self.changelog.delayupdate()
1418 self.changelog.delayupdate()
1419 n = self.changelog.add(mn, files, ctx.description(),
1419 n = self.changelog.add(mn, files, ctx.description(),
1420 trp, p1.node(), p2.node(),
1420 trp, p1.node(), p2.node(),
1421 user, ctx.date(), ctx.extra().copy())
1421 user, ctx.date(), ctx.extra().copy())
1422 p = lambda: self.changelog.writepending() and self.root or ""
1422 p = lambda: self.changelog.writepending() and self.root or ""
1423 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1423 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1424 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1424 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1425 parent2=xp2, pending=p)
1425 parent2=xp2, pending=p)
1426 self.changelog.finalize(trp)
1426 self.changelog.finalize(trp)
1427 # set the new commit is proper phase
1427 # set the new commit is proper phase
1428 targetphase = subrepo.newcommitphase(self.ui, ctx)
1428 targetphase = subrepo.newcommitphase(self.ui, ctx)
1429 if targetphase:
1429 if targetphase:
1430 # retract boundary do not alter parent changeset.
1430 # retract boundary do not alter parent changeset.
1431 # if a parent have higher the resulting phase will
1431 # if a parent have higher the resulting phase will
1432 # be compliant anyway
1432 # be compliant anyway
1433 #
1433 #
1434 # if minimal phase was 0 we don't need to retract anything
1434 # if minimal phase was 0 we don't need to retract anything
1435 phases.retractboundary(self, targetphase, [n])
1435 phases.retractboundary(self, targetphase, [n])
1436 tr.close()
1436 tr.close()
1437 branchmap.updatecache(self.filtered('served'))
1437 branchmap.updatecache(self.filtered('served'))
1438 return n
1438 return n
1439 finally:
1439 finally:
1440 if tr:
1440 if tr:
1441 tr.release()
1441 tr.release()
1442 lock.release()
1442 lock.release()
1443
1443
1444 @unfilteredmethod
1444 @unfilteredmethod
1445 def destroying(self):
1445 def destroying(self):
1446 '''Inform the repository that nodes are about to be destroyed.
1446 '''Inform the repository that nodes are about to be destroyed.
1447 Intended for use by strip and rollback, so there's a common
1447 Intended for use by strip and rollback, so there's a common
1448 place for anything that has to be done before destroying history.
1448 place for anything that has to be done before destroying history.
1449
1449
1450 This is mostly useful for saving state that is in memory and waiting
1450 This is mostly useful for saving state that is in memory and waiting
1451 to be flushed when the current lock is released. Because a call to
1451 to be flushed when the current lock is released. Because a call to
1452 destroyed is imminent, the repo will be invalidated causing those
1452 destroyed is imminent, the repo will be invalidated causing those
1453 changes to stay in memory (waiting for the next unlock), or vanish
1453 changes to stay in memory (waiting for the next unlock), or vanish
1454 completely.
1454 completely.
1455 '''
1455 '''
1456 # When using the same lock to commit and strip, the phasecache is left
1456 # When using the same lock to commit and strip, the phasecache is left
1457 # dirty after committing. Then when we strip, the repo is invalidated,
1457 # dirty after committing. Then when we strip, the repo is invalidated,
1458 # causing those changes to disappear.
1458 # causing those changes to disappear.
1459 if '_phasecache' in vars(self):
1459 if '_phasecache' in vars(self):
1460 self._phasecache.write()
1460 self._phasecache.write()
1461
1461
1462 @unfilteredmethod
1462 @unfilteredmethod
1463 def destroyed(self):
1463 def destroyed(self):
1464 '''Inform the repository that nodes have been destroyed.
1464 '''Inform the repository that nodes have been destroyed.
1465 Intended for use by strip and rollback, so there's a common
1465 Intended for use by strip and rollback, so there's a common
1466 place for anything that has to be done after destroying history.
1466 place for anything that has to be done after destroying history.
1467 '''
1467 '''
1468 # When one tries to:
1468 # When one tries to:
1469 # 1) destroy nodes thus calling this method (e.g. strip)
1469 # 1) destroy nodes thus calling this method (e.g. strip)
1470 # 2) use phasecache somewhere (e.g. commit)
1470 # 2) use phasecache somewhere (e.g. commit)
1471 #
1471 #
1472 # then 2) will fail because the phasecache contains nodes that were
1472 # then 2) will fail because the phasecache contains nodes that were
1473 # removed. We can either remove phasecache from the filecache,
1473 # removed. We can either remove phasecache from the filecache,
1474 # causing it to reload next time it is accessed, or simply filter
1474 # causing it to reload next time it is accessed, or simply filter
1475 # the removed nodes now and write the updated cache.
1475 # the removed nodes now and write the updated cache.
1476 self._phasecache.filterunknown(self)
1476 self._phasecache.filterunknown(self)
1477 self._phasecache.write()
1477 self._phasecache.write()
1478
1478
1479 # update the 'served' branch cache to help read only server process
1479 # update the 'served' branch cache to help read only server process
1480 # Thanks to branchcache collaboration this is done from the nearest
1480 # Thanks to branchcache collaboration this is done from the nearest
1481 # filtered subset and it is expected to be fast.
1481 # filtered subset and it is expected to be fast.
1482 branchmap.updatecache(self.filtered('served'))
1482 branchmap.updatecache(self.filtered('served'))
1483
1483
1484 # Ensure the persistent tag cache is updated. Doing it now
1484 # Ensure the persistent tag cache is updated. Doing it now
1485 # means that the tag cache only has to worry about destroyed
1485 # means that the tag cache only has to worry about destroyed
1486 # heads immediately after a strip/rollback. That in turn
1486 # heads immediately after a strip/rollback. That in turn
1487 # guarantees that "cachetip == currenttip" (comparing both rev
1487 # guarantees that "cachetip == currenttip" (comparing both rev
1488 # and node) always means no nodes have been added or destroyed.
1488 # and node) always means no nodes have been added or destroyed.
1489
1489
1490 # XXX this is suboptimal when qrefresh'ing: we strip the current
1490 # XXX this is suboptimal when qrefresh'ing: we strip the current
1491 # head, refresh the tag cache, then immediately add a new head.
1491 # head, refresh the tag cache, then immediately add a new head.
1492 # But I think doing it this way is necessary for the "instant
1492 # But I think doing it this way is necessary for the "instant
1493 # tag cache retrieval" case to work.
1493 # tag cache retrieval" case to work.
1494 self.invalidate()
1494 self.invalidate()
1495
1495
1496 def walk(self, match, node=None):
1496 def walk(self, match, node=None):
1497 '''
1497 '''
1498 walk recursively through the directory tree or a given
1498 walk recursively through the directory tree or a given
1499 changeset, finding all files matched by the match
1499 changeset, finding all files matched by the match
1500 function
1500 function
1501 '''
1501 '''
1502 return self[node].walk(match)
1502 return self[node].walk(match)
1503
1503
1504 def status(self, node1='.', node2=None, match=None,
1504 def status(self, node1='.', node2=None, match=None,
1505 ignored=False, clean=False, unknown=False,
1505 ignored=False, clean=False, unknown=False,
1506 listsubrepos=False):
1506 listsubrepos=False):
1507 """return status of files between two nodes or node and working
1507 """return status of files between two nodes or node and working
1508 directory.
1508 directory.
1509
1509
1510 If node1 is None, use the first dirstate parent instead.
1510 If node1 is None, use the first dirstate parent instead.
1511 If node2 is None, compare node1 with working directory.
1511 If node2 is None, compare node1 with working directory.
1512 """
1512 """
1513
1513
1514 ctx1 = self[node1]
1514 ctx1 = self[node1]
1515 ctx2 = self[node2]
1515 ctx2 = self[node2]
1516
1516
1517 # This next code block is, admittedly, fragile logic that tests for
1517 # This next code block is, admittedly, fragile logic that tests for
1518 # reversing the contexts and wouldn't need to exist if it weren't for
1518 # reversing the contexts and wouldn't need to exist if it weren't for
1519 # the fast (and common) code path of comparing the working directory
1519 # the fast (and common) code path of comparing the working directory
1520 # with its first parent.
1520 # with its first parent.
1521 #
1521 #
1522 # What we're aiming for here is the ability to call:
1522 # What we're aiming for here is the ability to call:
1523 #
1523 #
1524 # workingctx.status(parentctx)
1524 # workingctx.status(parentctx)
1525 #
1525 #
1526 # If we always built the manifest for each context and compared those,
1526 # If we always built the manifest for each context and compared those,
1527 # then we'd be done. But the special case of the above call means we
1527 # then we'd be done. But the special case of the above call means we
1528 # just copy the manifest of the parent.
1528 # just copy the manifest of the parent.
1529 reversed = False
1529 reversed = False
1530 if (not isinstance(ctx1, context.changectx)
1530 if (not isinstance(ctx1, context.changectx)
1531 and isinstance(ctx2, context.changectx)):
1531 and isinstance(ctx2, context.changectx)):
1532 reversed = True
1532 reversed = True
1533 ctx1, ctx2 = ctx2, ctx1
1533 ctx1, ctx2 = ctx2, ctx1
1534
1534
1535 working = ctx2.rev() is None
1535 working = ctx2.rev() is None
1536 parentworking = working and ctx1 == self['.']
1536 parentworking = working and ctx1 == self['.']
1537 match = match or matchmod.always(self.root, self.getcwd())
1537 match = match or matchmod.always(self.root, self.getcwd())
1538 listignored, listclean, listunknown = ignored, clean, unknown
1538 listignored, listclean, listunknown = ignored, clean, unknown
1539
1539
1540 # load earliest manifest first for caching reasons
1540 # load earliest manifest first for caching reasons
1541 if not working and ctx2.rev() < ctx1.rev():
1541 if not working and ctx2.rev() < ctx1.rev():
1542 ctx2.manifest()
1542 ctx2.manifest()
1543
1543
1544 if not parentworking:
1544 if not parentworking:
1545 def bad(f, msg):
1545 def bad(f, msg):
1546 # 'f' may be a directory pattern from 'match.files()',
1546 # 'f' may be a directory pattern from 'match.files()',
1547 # so 'f not in ctx1' is not enough
1547 # so 'f not in ctx1' is not enough
1548 if f not in ctx1 and f not in ctx1.dirs():
1548 if f not in ctx1 and f not in ctx1.dirs():
1549 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1549 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1550 match.bad = bad
1550 match.bad = bad
1551
1551
1552 r = [[], [], [], [], [], [], []]
1552 r = [[], [], [], [], [], [], []]
1553 r = ctx2._prestatus(ctx1, r, match, listignored, listclean, listunknown)
1553 r = ctx2._prestatus(ctx1, r, match, listignored, listclean, listunknown)
1554
1554
1555 if not parentworking:
1555 if not parentworking:
1556 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
1556 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
1557 listunknown)
1557 listunknown)
1558
1558
1559 r = ctx2._poststatus(ctx1, r, match, listignored, listclean,
1559 r = ctx2._poststatus(ctx1, r, match, listignored, listclean,
1560 listunknown)
1560 listunknown)
1561 modified, added, removed, deleted, unknown, ignored, clean = r
1562
1561
1563 if reversed:
1562 if reversed:
1564 added, removed = removed, added
1563 # since we are maintaining whether we reversed ctx1 and ctx2 (due
1565
1564 # to comparing the workingctx with its parent), we need to switch
1566 r = modified, added, removed, deleted, unknown, ignored, clean
1565 # back added files (r[1]) and removed files (r[2])
1566 r[1], r[2] = r[2], r[1]
1567
1567
1568 if listsubrepos:
1568 if listsubrepos:
1569 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1569 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1570 if working:
1570 if working:
1571 rev2 = None
1571 rev2 = None
1572 else:
1572 else:
1573 rev2 = ctx2.substate[subpath][1]
1573 rev2 = ctx2.substate[subpath][1]
1574 try:
1574 try:
1575 submatch = matchmod.narrowmatcher(subpath, match)
1575 submatch = matchmod.narrowmatcher(subpath, match)
1576 s = sub.status(rev2, match=submatch, ignored=listignored,
1576 s = sub.status(rev2, match=submatch, ignored=listignored,
1577 clean=listclean, unknown=listunknown,
1577 clean=listclean, unknown=listunknown,
1578 listsubrepos=True)
1578 listsubrepos=True)
1579 for rfiles, sfiles in zip(r, s):
1579 for rfiles, sfiles in zip(r, s):
1580 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1580 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1581 except error.LookupError:
1581 except error.LookupError:
1582 self.ui.status(_("skipping missing subrepository: %s\n")
1582 self.ui.status(_("skipping missing subrepository: %s\n")
1583 % subpath)
1583 % subpath)
1584
1584
1585 for l in r:
1585 for l in r:
1586 l.sort()
1586 l.sort()
1587 return r
1587 return r
1588
1588
1589 def heads(self, start=None):
1589 def heads(self, start=None):
1590 heads = self.changelog.heads(start)
1590 heads = self.changelog.heads(start)
1591 # sort the output in rev descending order
1591 # sort the output in rev descending order
1592 return sorted(heads, key=self.changelog.rev, reverse=True)
1592 return sorted(heads, key=self.changelog.rev, reverse=True)
1593
1593
1594 def branchheads(self, branch=None, start=None, closed=False):
1594 def branchheads(self, branch=None, start=None, closed=False):
1595 '''return a (possibly filtered) list of heads for the given branch
1595 '''return a (possibly filtered) list of heads for the given branch
1596
1596
1597 Heads are returned in topological order, from newest to oldest.
1597 Heads are returned in topological order, from newest to oldest.
1598 If branch is None, use the dirstate branch.
1598 If branch is None, use the dirstate branch.
1599 If start is not None, return only heads reachable from start.
1599 If start is not None, return only heads reachable from start.
1600 If closed is True, return heads that are marked as closed as well.
1600 If closed is True, return heads that are marked as closed as well.
1601 '''
1601 '''
1602 if branch is None:
1602 if branch is None:
1603 branch = self[None].branch()
1603 branch = self[None].branch()
1604 branches = self.branchmap()
1604 branches = self.branchmap()
1605 if branch not in branches:
1605 if branch not in branches:
1606 return []
1606 return []
1607 # the cache returns heads ordered lowest to highest
1607 # the cache returns heads ordered lowest to highest
1608 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1608 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1609 if start is not None:
1609 if start is not None:
1610 # filter out the heads that cannot be reached from startrev
1610 # filter out the heads that cannot be reached from startrev
1611 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1611 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1612 bheads = [h for h in bheads if h in fbheads]
1612 bheads = [h for h in bheads if h in fbheads]
1613 return bheads
1613 return bheads
1614
1614
1615 def branches(self, nodes):
1615 def branches(self, nodes):
1616 if not nodes:
1616 if not nodes:
1617 nodes = [self.changelog.tip()]
1617 nodes = [self.changelog.tip()]
1618 b = []
1618 b = []
1619 for n in nodes:
1619 for n in nodes:
1620 t = n
1620 t = n
1621 while True:
1621 while True:
1622 p = self.changelog.parents(n)
1622 p = self.changelog.parents(n)
1623 if p[1] != nullid or p[0] == nullid:
1623 if p[1] != nullid or p[0] == nullid:
1624 b.append((t, n, p[0], p[1]))
1624 b.append((t, n, p[0], p[1]))
1625 break
1625 break
1626 n = p[0]
1626 n = p[0]
1627 return b
1627 return b
1628
1628
1629 def between(self, pairs):
1629 def between(self, pairs):
1630 r = []
1630 r = []
1631
1631
1632 for top, bottom in pairs:
1632 for top, bottom in pairs:
1633 n, l, i = top, [], 0
1633 n, l, i = top, [], 0
1634 f = 1
1634 f = 1
1635
1635
1636 while n != bottom and n != nullid:
1636 while n != bottom and n != nullid:
1637 p = self.changelog.parents(n)[0]
1637 p = self.changelog.parents(n)[0]
1638 if i == f:
1638 if i == f:
1639 l.append(n)
1639 l.append(n)
1640 f = f * 2
1640 f = f * 2
1641 n = p
1641 n = p
1642 i += 1
1642 i += 1
1643
1643
1644 r.append(l)
1644 r.append(l)
1645
1645
1646 return r
1646 return r
1647
1647
1648 def pull(self, remote, heads=None, force=False):
1648 def pull(self, remote, heads=None, force=False):
1649 return exchange.pull (self, remote, heads, force)
1649 return exchange.pull (self, remote, heads, force)
1650
1650
1651 def checkpush(self, pushop):
1651 def checkpush(self, pushop):
1652 """Extensions can override this function if additional checks have
1652 """Extensions can override this function if additional checks have
1653 to be performed before pushing, or call it if they override push
1653 to be performed before pushing, or call it if they override push
1654 command.
1654 command.
1655 """
1655 """
1656 pass
1656 pass
1657
1657
1658 @unfilteredpropertycache
1658 @unfilteredpropertycache
1659 def prepushoutgoinghooks(self):
1659 def prepushoutgoinghooks(self):
1660 """Return util.hooks consists of "(repo, remote, outgoing)"
1660 """Return util.hooks consists of "(repo, remote, outgoing)"
1661 functions, which are called before pushing changesets.
1661 functions, which are called before pushing changesets.
1662 """
1662 """
1663 return util.hooks()
1663 return util.hooks()
1664
1664
1665 def push(self, remote, force=False, revs=None, newbranch=False):
1665 def push(self, remote, force=False, revs=None, newbranch=False):
1666 return exchange.push(self, remote, force, revs, newbranch)
1666 return exchange.push(self, remote, force, revs, newbranch)
1667
1667
1668 def stream_in(self, remote, requirements):
1668 def stream_in(self, remote, requirements):
1669 lock = self.lock()
1669 lock = self.lock()
1670 try:
1670 try:
1671 # Save remote branchmap. We will use it later
1671 # Save remote branchmap. We will use it later
1672 # to speed up branchcache creation
1672 # to speed up branchcache creation
1673 rbranchmap = None
1673 rbranchmap = None
1674 if remote.capable("branchmap"):
1674 if remote.capable("branchmap"):
1675 rbranchmap = remote.branchmap()
1675 rbranchmap = remote.branchmap()
1676
1676
1677 fp = remote.stream_out()
1677 fp = remote.stream_out()
1678 l = fp.readline()
1678 l = fp.readline()
1679 try:
1679 try:
1680 resp = int(l)
1680 resp = int(l)
1681 except ValueError:
1681 except ValueError:
1682 raise error.ResponseError(
1682 raise error.ResponseError(
1683 _('unexpected response from remote server:'), l)
1683 _('unexpected response from remote server:'), l)
1684 if resp == 1:
1684 if resp == 1:
1685 raise util.Abort(_('operation forbidden by server'))
1685 raise util.Abort(_('operation forbidden by server'))
1686 elif resp == 2:
1686 elif resp == 2:
1687 raise util.Abort(_('locking the remote repository failed'))
1687 raise util.Abort(_('locking the remote repository failed'))
1688 elif resp != 0:
1688 elif resp != 0:
1689 raise util.Abort(_('the server sent an unknown error code'))
1689 raise util.Abort(_('the server sent an unknown error code'))
1690 self.ui.status(_('streaming all changes\n'))
1690 self.ui.status(_('streaming all changes\n'))
1691 l = fp.readline()
1691 l = fp.readline()
1692 try:
1692 try:
1693 total_files, total_bytes = map(int, l.split(' ', 1))
1693 total_files, total_bytes = map(int, l.split(' ', 1))
1694 except (ValueError, TypeError):
1694 except (ValueError, TypeError):
1695 raise error.ResponseError(
1695 raise error.ResponseError(
1696 _('unexpected response from remote server:'), l)
1696 _('unexpected response from remote server:'), l)
1697 self.ui.status(_('%d files to transfer, %s of data\n') %
1697 self.ui.status(_('%d files to transfer, %s of data\n') %
1698 (total_files, util.bytecount(total_bytes)))
1698 (total_files, util.bytecount(total_bytes)))
1699 handled_bytes = 0
1699 handled_bytes = 0
1700 self.ui.progress(_('clone'), 0, total=total_bytes)
1700 self.ui.progress(_('clone'), 0, total=total_bytes)
1701 start = time.time()
1701 start = time.time()
1702
1702
1703 tr = self.transaction(_('clone'))
1703 tr = self.transaction(_('clone'))
1704 try:
1704 try:
1705 for i in xrange(total_files):
1705 for i in xrange(total_files):
1706 # XXX doesn't support '\n' or '\r' in filenames
1706 # XXX doesn't support '\n' or '\r' in filenames
1707 l = fp.readline()
1707 l = fp.readline()
1708 try:
1708 try:
1709 name, size = l.split('\0', 1)
1709 name, size = l.split('\0', 1)
1710 size = int(size)
1710 size = int(size)
1711 except (ValueError, TypeError):
1711 except (ValueError, TypeError):
1712 raise error.ResponseError(
1712 raise error.ResponseError(
1713 _('unexpected response from remote server:'), l)
1713 _('unexpected response from remote server:'), l)
1714 if self.ui.debugflag:
1714 if self.ui.debugflag:
1715 self.ui.debug('adding %s (%s)\n' %
1715 self.ui.debug('adding %s (%s)\n' %
1716 (name, util.bytecount(size)))
1716 (name, util.bytecount(size)))
1717 # for backwards compat, name was partially encoded
1717 # for backwards compat, name was partially encoded
1718 ofp = self.sopener(store.decodedir(name), 'w')
1718 ofp = self.sopener(store.decodedir(name), 'w')
1719 for chunk in util.filechunkiter(fp, limit=size):
1719 for chunk in util.filechunkiter(fp, limit=size):
1720 handled_bytes += len(chunk)
1720 handled_bytes += len(chunk)
1721 self.ui.progress(_('clone'), handled_bytes,
1721 self.ui.progress(_('clone'), handled_bytes,
1722 total=total_bytes)
1722 total=total_bytes)
1723 ofp.write(chunk)
1723 ofp.write(chunk)
1724 ofp.close()
1724 ofp.close()
1725 tr.close()
1725 tr.close()
1726 finally:
1726 finally:
1727 tr.release()
1727 tr.release()
1728
1728
1729 # Writing straight to files circumvented the inmemory caches
1729 # Writing straight to files circumvented the inmemory caches
1730 self.invalidate()
1730 self.invalidate()
1731
1731
1732 elapsed = time.time() - start
1732 elapsed = time.time() - start
1733 if elapsed <= 0:
1733 if elapsed <= 0:
1734 elapsed = 0.001
1734 elapsed = 0.001
1735 self.ui.progress(_('clone'), None)
1735 self.ui.progress(_('clone'), None)
1736 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1736 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1737 (util.bytecount(total_bytes), elapsed,
1737 (util.bytecount(total_bytes), elapsed,
1738 util.bytecount(total_bytes / elapsed)))
1738 util.bytecount(total_bytes / elapsed)))
1739
1739
1740 # new requirements = old non-format requirements +
1740 # new requirements = old non-format requirements +
1741 # new format-related
1741 # new format-related
1742 # requirements from the streamed-in repository
1742 # requirements from the streamed-in repository
1743 requirements.update(set(self.requirements) - self.supportedformats)
1743 requirements.update(set(self.requirements) - self.supportedformats)
1744 self._applyrequirements(requirements)
1744 self._applyrequirements(requirements)
1745 self._writerequirements()
1745 self._writerequirements()
1746
1746
1747 if rbranchmap:
1747 if rbranchmap:
1748 rbheads = []
1748 rbheads = []
1749 for bheads in rbranchmap.itervalues():
1749 for bheads in rbranchmap.itervalues():
1750 rbheads.extend(bheads)
1750 rbheads.extend(bheads)
1751
1751
1752 if rbheads:
1752 if rbheads:
1753 rtiprev = max((int(self.changelog.rev(node))
1753 rtiprev = max((int(self.changelog.rev(node))
1754 for node in rbheads))
1754 for node in rbheads))
1755 cache = branchmap.branchcache(rbranchmap,
1755 cache = branchmap.branchcache(rbranchmap,
1756 self[rtiprev].node(),
1756 self[rtiprev].node(),
1757 rtiprev)
1757 rtiprev)
1758 # Try to stick it as low as possible
1758 # Try to stick it as low as possible
1759 # filter above served are unlikely to be fetch from a clone
1759 # filter above served are unlikely to be fetch from a clone
1760 for candidate in ('base', 'immutable', 'served'):
1760 for candidate in ('base', 'immutable', 'served'):
1761 rview = self.filtered(candidate)
1761 rview = self.filtered(candidate)
1762 if cache.validfor(rview):
1762 if cache.validfor(rview):
1763 self._branchcaches[candidate] = cache
1763 self._branchcaches[candidate] = cache
1764 cache.write(rview)
1764 cache.write(rview)
1765 break
1765 break
1766 self.invalidate()
1766 self.invalidate()
1767 return len(self.heads()) + 1
1767 return len(self.heads()) + 1
1768 finally:
1768 finally:
1769 lock.release()
1769 lock.release()
1770
1770
1771 def clone(self, remote, heads=[], stream=False):
1771 def clone(self, remote, heads=[], stream=False):
1772 '''clone remote repository.
1772 '''clone remote repository.
1773
1773
1774 keyword arguments:
1774 keyword arguments:
1775 heads: list of revs to clone (forces use of pull)
1775 heads: list of revs to clone (forces use of pull)
1776 stream: use streaming clone if possible'''
1776 stream: use streaming clone if possible'''
1777
1777
1778 # now, all clients that can request uncompressed clones can
1778 # now, all clients that can request uncompressed clones can
1779 # read repo formats supported by all servers that can serve
1779 # read repo formats supported by all servers that can serve
1780 # them.
1780 # them.
1781
1781
1782 # if revlog format changes, client will have to check version
1782 # if revlog format changes, client will have to check version
1783 # and format flags on "stream" capability, and use
1783 # and format flags on "stream" capability, and use
1784 # uncompressed only if compatible.
1784 # uncompressed only if compatible.
1785
1785
1786 if not stream:
1786 if not stream:
1787 # if the server explicitly prefers to stream (for fast LANs)
1787 # if the server explicitly prefers to stream (for fast LANs)
1788 stream = remote.capable('stream-preferred')
1788 stream = remote.capable('stream-preferred')
1789
1789
1790 if stream and not heads:
1790 if stream and not heads:
1791 # 'stream' means remote revlog format is revlogv1 only
1791 # 'stream' means remote revlog format is revlogv1 only
1792 if remote.capable('stream'):
1792 if remote.capable('stream'):
1793 return self.stream_in(remote, set(('revlogv1',)))
1793 return self.stream_in(remote, set(('revlogv1',)))
1794 # otherwise, 'streamreqs' contains the remote revlog format
1794 # otherwise, 'streamreqs' contains the remote revlog format
1795 streamreqs = remote.capable('streamreqs')
1795 streamreqs = remote.capable('streamreqs')
1796 if streamreqs:
1796 if streamreqs:
1797 streamreqs = set(streamreqs.split(','))
1797 streamreqs = set(streamreqs.split(','))
1798 # if we support it, stream in and adjust our requirements
1798 # if we support it, stream in and adjust our requirements
1799 if not streamreqs - self.supportedformats:
1799 if not streamreqs - self.supportedformats:
1800 return self.stream_in(remote, streamreqs)
1800 return self.stream_in(remote, streamreqs)
1801 return self.pull(remote, heads)
1801 return self.pull(remote, heads)
1802
1802
1803 def pushkey(self, namespace, key, old, new):
1803 def pushkey(self, namespace, key, old, new):
1804 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1804 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1805 old=old, new=new)
1805 old=old, new=new)
1806 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1806 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1807 ret = pushkey.push(self, namespace, key, old, new)
1807 ret = pushkey.push(self, namespace, key, old, new)
1808 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1808 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1809 ret=ret)
1809 ret=ret)
1810 return ret
1810 return ret
1811
1811
1812 def listkeys(self, namespace):
1812 def listkeys(self, namespace):
1813 self.hook('prelistkeys', throw=True, namespace=namespace)
1813 self.hook('prelistkeys', throw=True, namespace=namespace)
1814 self.ui.debug('listing keys for "%s"\n' % namespace)
1814 self.ui.debug('listing keys for "%s"\n' % namespace)
1815 values = pushkey.list(self, namespace)
1815 values = pushkey.list(self, namespace)
1816 self.hook('listkeys', namespace=namespace, values=values)
1816 self.hook('listkeys', namespace=namespace, values=values)
1817 return values
1817 return values
1818
1818
1819 def debugwireargs(self, one, two, three=None, four=None, five=None):
1819 def debugwireargs(self, one, two, three=None, four=None, five=None):
1820 '''used to test argument passing over the wire'''
1820 '''used to test argument passing over the wire'''
1821 return "%s %s %s %s %s" % (one, two, three, four, five)
1821 return "%s %s %s %s %s" % (one, two, three, four, five)
1822
1822
1823 def savecommitmessage(self, text):
1823 def savecommitmessage(self, text):
1824 fp = self.opener('last-message.txt', 'wb')
1824 fp = self.opener('last-message.txt', 'wb')
1825 try:
1825 try:
1826 fp.write(text)
1826 fp.write(text)
1827 finally:
1827 finally:
1828 fp.close()
1828 fp.close()
1829 return self.pathto(fp.name[len(self.root) + 1:])
1829 return self.pathto(fp.name[len(self.root) + 1:])
1830
1830
1831 # used to avoid circular references so destructors work
1831 # used to avoid circular references so destructors work
1832 def aftertrans(files):
1832 def aftertrans(files):
1833 renamefiles = [tuple(t) for t in files]
1833 renamefiles = [tuple(t) for t in files]
1834 def a():
1834 def a():
1835 for vfs, src, dest in renamefiles:
1835 for vfs, src, dest in renamefiles:
1836 try:
1836 try:
1837 vfs.rename(src, dest)
1837 vfs.rename(src, dest)
1838 except OSError: # journal file does not yet exist
1838 except OSError: # journal file does not yet exist
1839 pass
1839 pass
1840 return a
1840 return a
1841
1841
1842 def undoname(fn):
1842 def undoname(fn):
1843 base, name = os.path.split(fn)
1843 base, name = os.path.split(fn)
1844 assert name.startswith('journal')
1844 assert name.startswith('journal')
1845 return os.path.join(base, name.replace('journal', 'undo', 1))
1845 return os.path.join(base, name.replace('journal', 'undo', 1))
1846
1846
1847 def instance(ui, path, create):
1847 def instance(ui, path, create):
1848 return localrepository(ui, util.urllocalpath(path), create)
1848 return localrepository(ui, util.urllocalpath(path), create)
1849
1849
1850 def islocal(path):
1850 def islocal(path):
1851 return True
1851 return True
General Comments 0
You need to be logged in to leave comments. Login now