##// END OF EJS Templates
localrepo: rename revlog.maxchainlen to format.maxchainlen...
Augie Fackler -
r23256:1c11393d default
parent child Browse files
Show More
@@ -1,1806 +1,1806 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10', **kwargs):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps, **kwargs)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2Y' in bundlecaps:
112 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except error.PushRaced, exc:
136 except error.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), str(exc))
137 raise error.ResponseError(_('push failed:'), str(exc))
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 # a list of (ui, featureset) functions.
183 # a list of (ui, featureset) functions.
184 # only functions defined in module of enabled extensions are invoked
184 # only functions defined in module of enabled extensions are invoked
185 featuresetupfuncs = set()
185 featuresetupfuncs = set()
186
186
187 def _baserequirements(self, create):
187 def _baserequirements(self, create):
188 return self.requirements[:]
188 return self.requirements[:]
189
189
190 def __init__(self, baseui, path=None, create=False):
190 def __init__(self, baseui, path=None, create=False):
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wopener = self.wvfs
192 self.wopener = self.wvfs
193 self.root = self.wvfs.base
193 self.root = self.wvfs.base
194 self.path = self.wvfs.join(".hg")
194 self.path = self.wvfs.join(".hg")
195 self.origroot = path
195 self.origroot = path
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.vfs = scmutil.vfs(self.path)
197 self.vfs = scmutil.vfs(self.path)
198 self.opener = self.vfs
198 self.opener = self.vfs
199 self.baseui = baseui
199 self.baseui = baseui
200 self.ui = baseui.copy()
200 self.ui = baseui.copy()
201 self.ui.copy = baseui.copy # prevent copying repo configuration
201 self.ui.copy = baseui.copy # prevent copying repo configuration
202 # A list of callback to shape the phase if no data were found.
202 # A list of callback to shape the phase if no data were found.
203 # Callback are in the form: func(repo, roots) --> processed root.
203 # Callback are in the form: func(repo, roots) --> processed root.
204 # This list it to be filled by extension during repo setup
204 # This list it to be filled by extension during repo setup
205 self._phasedefaults = []
205 self._phasedefaults = []
206 try:
206 try:
207 self.ui.readconfig(self.join("hgrc"), self.root)
207 self.ui.readconfig(self.join("hgrc"), self.root)
208 extensions.loadall(self.ui)
208 extensions.loadall(self.ui)
209 except IOError:
209 except IOError:
210 pass
210 pass
211
211
212 if self.featuresetupfuncs:
212 if self.featuresetupfuncs:
213 self.supported = set(self._basesupported) # use private copy
213 self.supported = set(self._basesupported) # use private copy
214 extmods = set(m.__name__ for n, m
214 extmods = set(m.__name__ for n, m
215 in extensions.extensions(self.ui))
215 in extensions.extensions(self.ui))
216 for setupfunc in self.featuresetupfuncs:
216 for setupfunc in self.featuresetupfuncs:
217 if setupfunc.__module__ in extmods:
217 if setupfunc.__module__ in extmods:
218 setupfunc(self.ui, self.supported)
218 setupfunc(self.ui, self.supported)
219 else:
219 else:
220 self.supported = self._basesupported
220 self.supported = self._basesupported
221
221
222 if not self.vfs.isdir():
222 if not self.vfs.isdir():
223 if create:
223 if create:
224 if not self.wvfs.exists():
224 if not self.wvfs.exists():
225 self.wvfs.makedirs()
225 self.wvfs.makedirs()
226 self.vfs.makedir(notindexed=True)
226 self.vfs.makedir(notindexed=True)
227 requirements = self._baserequirements(create)
227 requirements = self._baserequirements(create)
228 if self.ui.configbool('format', 'usestore', True):
228 if self.ui.configbool('format', 'usestore', True):
229 self.vfs.mkdir("store")
229 self.vfs.mkdir("store")
230 requirements.append("store")
230 requirements.append("store")
231 if self.ui.configbool('format', 'usefncache', True):
231 if self.ui.configbool('format', 'usefncache', True):
232 requirements.append("fncache")
232 requirements.append("fncache")
233 if self.ui.configbool('format', 'dotencode', True):
233 if self.ui.configbool('format', 'dotencode', True):
234 requirements.append('dotencode')
234 requirements.append('dotencode')
235 # create an invalid changelog
235 # create an invalid changelog
236 self.vfs.append(
236 self.vfs.append(
237 "00changelog.i",
237 "00changelog.i",
238 '\0\0\0\2' # represents revlogv2
238 '\0\0\0\2' # represents revlogv2
239 ' dummy changelog to prevent using the old repo layout'
239 ' dummy changelog to prevent using the old repo layout'
240 )
240 )
241 if self.ui.configbool('format', 'generaldelta', False):
241 if self.ui.configbool('format', 'generaldelta', False):
242 requirements.append("generaldelta")
242 requirements.append("generaldelta")
243 requirements = set(requirements)
243 requirements = set(requirements)
244 else:
244 else:
245 raise error.RepoError(_("repository %s not found") % path)
245 raise error.RepoError(_("repository %s not found") % path)
246 elif create:
246 elif create:
247 raise error.RepoError(_("repository %s already exists") % path)
247 raise error.RepoError(_("repository %s already exists") % path)
248 else:
248 else:
249 try:
249 try:
250 requirements = scmutil.readrequires(self.vfs, self.supported)
250 requirements = scmutil.readrequires(self.vfs, self.supported)
251 except IOError, inst:
251 except IOError, inst:
252 if inst.errno != errno.ENOENT:
252 if inst.errno != errno.ENOENT:
253 raise
253 raise
254 requirements = set()
254 requirements = set()
255
255
256 self.sharedpath = self.path
256 self.sharedpath = self.path
257 try:
257 try:
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 realpath=True)
259 realpath=True)
260 s = vfs.base
260 s = vfs.base
261 if not vfs.exists():
261 if not vfs.exists():
262 raise error.RepoError(
262 raise error.RepoError(
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 self.sharedpath = s
264 self.sharedpath = s
265 except IOError, inst:
265 except IOError, inst:
266 if inst.errno != errno.ENOENT:
266 if inst.errno != errno.ENOENT:
267 raise
267 raise
268
268
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 self.spath = self.store.path
270 self.spath = self.store.path
271 self.svfs = self.store.vfs
271 self.svfs = self.store.vfs
272 self.sopener = self.svfs
272 self.sopener = self.svfs
273 self.sjoin = self.store.join
273 self.sjoin = self.store.join
274 self.vfs.createmode = self.store.createmode
274 self.vfs.createmode = self.store.createmode
275 self._applyrequirements(requirements)
275 self._applyrequirements(requirements)
276 if create:
276 if create:
277 self._writerequirements()
277 self._writerequirements()
278
278
279
279
280 self._branchcaches = {}
280 self._branchcaches = {}
281 self.filterpats = {}
281 self.filterpats = {}
282 self._datafilters = {}
282 self._datafilters = {}
283 self._transref = self._lockref = self._wlockref = None
283 self._transref = self._lockref = self._wlockref = None
284
284
285 # A cache for various files under .hg/ that tracks file changes,
285 # A cache for various files under .hg/ that tracks file changes,
286 # (used by the filecache decorator)
286 # (used by the filecache decorator)
287 #
287 #
288 # Maps a property name to its util.filecacheentry
288 # Maps a property name to its util.filecacheentry
289 self._filecache = {}
289 self._filecache = {}
290
290
291 # hold sets of revision to be filtered
291 # hold sets of revision to be filtered
292 # should be cleared when something might have changed the filter value:
292 # should be cleared when something might have changed the filter value:
293 # - new changesets,
293 # - new changesets,
294 # - phase change,
294 # - phase change,
295 # - new obsolescence marker,
295 # - new obsolescence marker,
296 # - working directory parent change,
296 # - working directory parent change,
297 # - bookmark changes
297 # - bookmark changes
298 self.filteredrevcache = {}
298 self.filteredrevcache = {}
299
299
300 def close(self):
300 def close(self):
301 pass
301 pass
302
302
303 def _restrictcapabilities(self, caps):
303 def _restrictcapabilities(self, caps):
304 # bundle2 is not ready for prime time, drop it unless explicitly
304 # bundle2 is not ready for prime time, drop it unless explicitly
305 # required by the tests (or some brave tester)
305 # required by the tests (or some brave tester)
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
307 caps = set(caps)
307 caps = set(caps)
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
310 return caps
310 return caps
311
311
312 def _applyrequirements(self, requirements):
312 def _applyrequirements(self, requirements):
313 self.requirements = requirements
313 self.requirements = requirements
314 self.sopener.options = dict((r, 1) for r in requirements
314 self.sopener.options = dict((r, 1) for r in requirements
315 if r in self.openerreqs)
315 if r in self.openerreqs)
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 if chunkcachesize is not None:
317 if chunkcachesize is not None:
318 self.sopener.options['chunkcachesize'] = chunkcachesize
318 self.sopener.options['chunkcachesize'] = chunkcachesize
319 maxchainlen = self.ui.configint('revlog', 'maxchainlen')
319 maxchainlen = self.ui.configint('format', 'maxchainlen')
320 if maxchainlen is not None:
320 if maxchainlen is not None:
321 self.sopener.options['maxchainlen'] = maxchainlen
321 self.sopener.options['maxchainlen'] = maxchainlen
322
322
323 def _writerequirements(self):
323 def _writerequirements(self):
324 reqfile = self.opener("requires", "w")
324 reqfile = self.opener("requires", "w")
325 for r in sorted(self.requirements):
325 for r in sorted(self.requirements):
326 reqfile.write("%s\n" % r)
326 reqfile.write("%s\n" % r)
327 reqfile.close()
327 reqfile.close()
328
328
329 def _checknested(self, path):
329 def _checknested(self, path):
330 """Determine if path is a legal nested repository."""
330 """Determine if path is a legal nested repository."""
331 if not path.startswith(self.root):
331 if not path.startswith(self.root):
332 return False
332 return False
333 subpath = path[len(self.root) + 1:]
333 subpath = path[len(self.root) + 1:]
334 normsubpath = util.pconvert(subpath)
334 normsubpath = util.pconvert(subpath)
335
335
336 # XXX: Checking against the current working copy is wrong in
336 # XXX: Checking against the current working copy is wrong in
337 # the sense that it can reject things like
337 # the sense that it can reject things like
338 #
338 #
339 # $ hg cat -r 10 sub/x.txt
339 # $ hg cat -r 10 sub/x.txt
340 #
340 #
341 # if sub/ is no longer a subrepository in the working copy
341 # if sub/ is no longer a subrepository in the working copy
342 # parent revision.
342 # parent revision.
343 #
343 #
344 # However, it can of course also allow things that would have
344 # However, it can of course also allow things that would have
345 # been rejected before, such as the above cat command if sub/
345 # been rejected before, such as the above cat command if sub/
346 # is a subrepository now, but was a normal directory before.
346 # is a subrepository now, but was a normal directory before.
347 # The old path auditor would have rejected by mistake since it
347 # The old path auditor would have rejected by mistake since it
348 # panics when it sees sub/.hg/.
348 # panics when it sees sub/.hg/.
349 #
349 #
350 # All in all, checking against the working copy seems sensible
350 # All in all, checking against the working copy seems sensible
351 # since we want to prevent access to nested repositories on
351 # since we want to prevent access to nested repositories on
352 # the filesystem *now*.
352 # the filesystem *now*.
353 ctx = self[None]
353 ctx = self[None]
354 parts = util.splitpath(subpath)
354 parts = util.splitpath(subpath)
355 while parts:
355 while parts:
356 prefix = '/'.join(parts)
356 prefix = '/'.join(parts)
357 if prefix in ctx.substate:
357 if prefix in ctx.substate:
358 if prefix == normsubpath:
358 if prefix == normsubpath:
359 return True
359 return True
360 else:
360 else:
361 sub = ctx.sub(prefix)
361 sub = ctx.sub(prefix)
362 return sub.checknested(subpath[len(prefix) + 1:])
362 return sub.checknested(subpath[len(prefix) + 1:])
363 else:
363 else:
364 parts.pop()
364 parts.pop()
365 return False
365 return False
366
366
367 def peer(self):
367 def peer(self):
368 return localpeer(self) # not cached to avoid reference cycle
368 return localpeer(self) # not cached to avoid reference cycle
369
369
370 def unfiltered(self):
370 def unfiltered(self):
371 """Return unfiltered version of the repository
371 """Return unfiltered version of the repository
372
372
373 Intended to be overwritten by filtered repo."""
373 Intended to be overwritten by filtered repo."""
374 return self
374 return self
375
375
376 def filtered(self, name):
376 def filtered(self, name):
377 """Return a filtered version of a repository"""
377 """Return a filtered version of a repository"""
378 # build a new class with the mixin and the current class
378 # build a new class with the mixin and the current class
379 # (possibly subclass of the repo)
379 # (possibly subclass of the repo)
380 class proxycls(repoview.repoview, self.unfiltered().__class__):
380 class proxycls(repoview.repoview, self.unfiltered().__class__):
381 pass
381 pass
382 return proxycls(self, name)
382 return proxycls(self, name)
383
383
384 @repofilecache('bookmarks')
384 @repofilecache('bookmarks')
385 def _bookmarks(self):
385 def _bookmarks(self):
386 return bookmarks.bmstore(self)
386 return bookmarks.bmstore(self)
387
387
388 @repofilecache('bookmarks.current')
388 @repofilecache('bookmarks.current')
389 def _bookmarkcurrent(self):
389 def _bookmarkcurrent(self):
390 return bookmarks.readcurrent(self)
390 return bookmarks.readcurrent(self)
391
391
392 def bookmarkheads(self, bookmark):
392 def bookmarkheads(self, bookmark):
393 name = bookmark.split('@', 1)[0]
393 name = bookmark.split('@', 1)[0]
394 heads = []
394 heads = []
395 for mark, n in self._bookmarks.iteritems():
395 for mark, n in self._bookmarks.iteritems():
396 if mark.split('@', 1)[0] == name:
396 if mark.split('@', 1)[0] == name:
397 heads.append(n)
397 heads.append(n)
398 return heads
398 return heads
399
399
400 @storecache('phaseroots')
400 @storecache('phaseroots')
401 def _phasecache(self):
401 def _phasecache(self):
402 return phases.phasecache(self, self._phasedefaults)
402 return phases.phasecache(self, self._phasedefaults)
403
403
404 @storecache('obsstore')
404 @storecache('obsstore')
405 def obsstore(self):
405 def obsstore(self):
406 # read default format for new obsstore.
406 # read default format for new obsstore.
407 defaultformat = self.ui.configint('format', 'obsstore-version', None)
407 defaultformat = self.ui.configint('format', 'obsstore-version', None)
408 # rely on obsstore class default when possible.
408 # rely on obsstore class default when possible.
409 kwargs = {}
409 kwargs = {}
410 if defaultformat is not None:
410 if defaultformat is not None:
411 kwargs['defaultformat'] = defaultformat
411 kwargs['defaultformat'] = defaultformat
412 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
412 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
413 store = obsolete.obsstore(self.sopener, readonly=readonly,
413 store = obsolete.obsstore(self.sopener, readonly=readonly,
414 **kwargs)
414 **kwargs)
415 if store and readonly:
415 if store and readonly:
416 # message is rare enough to not be translated
416 # message is rare enough to not be translated
417 msg = 'obsolete feature not enabled but %i markers found!\n'
417 msg = 'obsolete feature not enabled but %i markers found!\n'
418 self.ui.warn(msg % len(list(store)))
418 self.ui.warn(msg % len(list(store)))
419 return store
419 return store
420
420
421 @storecache('00changelog.i')
421 @storecache('00changelog.i')
422 def changelog(self):
422 def changelog(self):
423 c = changelog.changelog(self.sopener)
423 c = changelog.changelog(self.sopener)
424 if 'HG_PENDING' in os.environ:
424 if 'HG_PENDING' in os.environ:
425 p = os.environ['HG_PENDING']
425 p = os.environ['HG_PENDING']
426 if p.startswith(self.root):
426 if p.startswith(self.root):
427 c.readpending('00changelog.i.a')
427 c.readpending('00changelog.i.a')
428 return c
428 return c
429
429
430 @storecache('00manifest.i')
430 @storecache('00manifest.i')
431 def manifest(self):
431 def manifest(self):
432 return manifest.manifest(self.sopener)
432 return manifest.manifest(self.sopener)
433
433
434 @repofilecache('dirstate')
434 @repofilecache('dirstate')
435 def dirstate(self):
435 def dirstate(self):
436 warned = [0]
436 warned = [0]
437 def validate(node):
437 def validate(node):
438 try:
438 try:
439 self.changelog.rev(node)
439 self.changelog.rev(node)
440 return node
440 return node
441 except error.LookupError:
441 except error.LookupError:
442 if not warned[0]:
442 if not warned[0]:
443 warned[0] = True
443 warned[0] = True
444 self.ui.warn(_("warning: ignoring unknown"
444 self.ui.warn(_("warning: ignoring unknown"
445 " working parent %s!\n") % short(node))
445 " working parent %s!\n") % short(node))
446 return nullid
446 return nullid
447
447
448 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
448 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
449
449
450 def __getitem__(self, changeid):
450 def __getitem__(self, changeid):
451 if changeid is None:
451 if changeid is None:
452 return context.workingctx(self)
452 return context.workingctx(self)
453 return context.changectx(self, changeid)
453 return context.changectx(self, changeid)
454
454
455 def __contains__(self, changeid):
455 def __contains__(self, changeid):
456 try:
456 try:
457 return bool(self.lookup(changeid))
457 return bool(self.lookup(changeid))
458 except error.RepoLookupError:
458 except error.RepoLookupError:
459 return False
459 return False
460
460
461 def __nonzero__(self):
461 def __nonzero__(self):
462 return True
462 return True
463
463
464 def __len__(self):
464 def __len__(self):
465 return len(self.changelog)
465 return len(self.changelog)
466
466
467 def __iter__(self):
467 def __iter__(self):
468 return iter(self.changelog)
468 return iter(self.changelog)
469
469
470 def revs(self, expr, *args):
470 def revs(self, expr, *args):
471 '''Return a list of revisions matching the given revset'''
471 '''Return a list of revisions matching the given revset'''
472 expr = revset.formatspec(expr, *args)
472 expr = revset.formatspec(expr, *args)
473 m = revset.match(None, expr)
473 m = revset.match(None, expr)
474 return m(self, revset.spanset(self))
474 return m(self, revset.spanset(self))
475
475
476 def set(self, expr, *args):
476 def set(self, expr, *args):
477 '''
477 '''
478 Yield a context for each matching revision, after doing arg
478 Yield a context for each matching revision, after doing arg
479 replacement via revset.formatspec
479 replacement via revset.formatspec
480 '''
480 '''
481 for r in self.revs(expr, *args):
481 for r in self.revs(expr, *args):
482 yield self[r]
482 yield self[r]
483
483
484 def url(self):
484 def url(self):
485 return 'file:' + self.root
485 return 'file:' + self.root
486
486
487 def hook(self, name, throw=False, **args):
487 def hook(self, name, throw=False, **args):
488 """Call a hook, passing this repo instance.
488 """Call a hook, passing this repo instance.
489
489
490 This a convenience method to aid invoking hooks. Extensions likely
490 This a convenience method to aid invoking hooks. Extensions likely
491 won't call this unless they have registered a custom hook or are
491 won't call this unless they have registered a custom hook or are
492 replacing code that is expected to call a hook.
492 replacing code that is expected to call a hook.
493 """
493 """
494 return hook.hook(self.ui, self, name, throw, **args)
494 return hook.hook(self.ui, self, name, throw, **args)
495
495
496 @unfilteredmethod
496 @unfilteredmethod
497 def _tag(self, names, node, message, local, user, date, extra={},
497 def _tag(self, names, node, message, local, user, date, extra={},
498 editor=False):
498 editor=False):
499 if isinstance(names, str):
499 if isinstance(names, str):
500 names = (names,)
500 names = (names,)
501
501
502 branches = self.branchmap()
502 branches = self.branchmap()
503 for name in names:
503 for name in names:
504 self.hook('pretag', throw=True, node=hex(node), tag=name,
504 self.hook('pretag', throw=True, node=hex(node), tag=name,
505 local=local)
505 local=local)
506 if name in branches:
506 if name in branches:
507 self.ui.warn(_("warning: tag %s conflicts with existing"
507 self.ui.warn(_("warning: tag %s conflicts with existing"
508 " branch name\n") % name)
508 " branch name\n") % name)
509
509
510 def writetags(fp, names, munge, prevtags):
510 def writetags(fp, names, munge, prevtags):
511 fp.seek(0, 2)
511 fp.seek(0, 2)
512 if prevtags and prevtags[-1] != '\n':
512 if prevtags and prevtags[-1] != '\n':
513 fp.write('\n')
513 fp.write('\n')
514 for name in names:
514 for name in names:
515 m = munge and munge(name) or name
515 m = munge and munge(name) or name
516 if (self._tagscache.tagtypes and
516 if (self._tagscache.tagtypes and
517 name in self._tagscache.tagtypes):
517 name in self._tagscache.tagtypes):
518 old = self.tags().get(name, nullid)
518 old = self.tags().get(name, nullid)
519 fp.write('%s %s\n' % (hex(old), m))
519 fp.write('%s %s\n' % (hex(old), m))
520 fp.write('%s %s\n' % (hex(node), m))
520 fp.write('%s %s\n' % (hex(node), m))
521 fp.close()
521 fp.close()
522
522
523 prevtags = ''
523 prevtags = ''
524 if local:
524 if local:
525 try:
525 try:
526 fp = self.opener('localtags', 'r+')
526 fp = self.opener('localtags', 'r+')
527 except IOError:
527 except IOError:
528 fp = self.opener('localtags', 'a')
528 fp = self.opener('localtags', 'a')
529 else:
529 else:
530 prevtags = fp.read()
530 prevtags = fp.read()
531
531
532 # local tags are stored in the current charset
532 # local tags are stored in the current charset
533 writetags(fp, names, None, prevtags)
533 writetags(fp, names, None, prevtags)
534 for name in names:
534 for name in names:
535 self.hook('tag', node=hex(node), tag=name, local=local)
535 self.hook('tag', node=hex(node), tag=name, local=local)
536 return
536 return
537
537
538 try:
538 try:
539 fp = self.wfile('.hgtags', 'rb+')
539 fp = self.wfile('.hgtags', 'rb+')
540 except IOError, e:
540 except IOError, e:
541 if e.errno != errno.ENOENT:
541 if e.errno != errno.ENOENT:
542 raise
542 raise
543 fp = self.wfile('.hgtags', 'ab')
543 fp = self.wfile('.hgtags', 'ab')
544 else:
544 else:
545 prevtags = fp.read()
545 prevtags = fp.read()
546
546
547 # committed tags are stored in UTF-8
547 # committed tags are stored in UTF-8
548 writetags(fp, names, encoding.fromlocal, prevtags)
548 writetags(fp, names, encoding.fromlocal, prevtags)
549
549
550 fp.close()
550 fp.close()
551
551
552 self.invalidatecaches()
552 self.invalidatecaches()
553
553
554 if '.hgtags' not in self.dirstate:
554 if '.hgtags' not in self.dirstate:
555 self[None].add(['.hgtags'])
555 self[None].add(['.hgtags'])
556
556
557 m = matchmod.exact(self.root, '', ['.hgtags'])
557 m = matchmod.exact(self.root, '', ['.hgtags'])
558 tagnode = self.commit(message, user, date, extra=extra, match=m,
558 tagnode = self.commit(message, user, date, extra=extra, match=m,
559 editor=editor)
559 editor=editor)
560
560
561 for name in names:
561 for name in names:
562 self.hook('tag', node=hex(node), tag=name, local=local)
562 self.hook('tag', node=hex(node), tag=name, local=local)
563
563
564 return tagnode
564 return tagnode
565
565
566 def tag(self, names, node, message, local, user, date, editor=False):
566 def tag(self, names, node, message, local, user, date, editor=False):
567 '''tag a revision with one or more symbolic names.
567 '''tag a revision with one or more symbolic names.
568
568
569 names is a list of strings or, when adding a single tag, names may be a
569 names is a list of strings or, when adding a single tag, names may be a
570 string.
570 string.
571
571
572 if local is True, the tags are stored in a per-repository file.
572 if local is True, the tags are stored in a per-repository file.
573 otherwise, they are stored in the .hgtags file, and a new
573 otherwise, they are stored in the .hgtags file, and a new
574 changeset is committed with the change.
574 changeset is committed with the change.
575
575
576 keyword arguments:
576 keyword arguments:
577
577
578 local: whether to store tags in non-version-controlled file
578 local: whether to store tags in non-version-controlled file
579 (default False)
579 (default False)
580
580
581 message: commit message to use if committing
581 message: commit message to use if committing
582
582
583 user: name of user to use if committing
583 user: name of user to use if committing
584
584
585 date: date tuple to use if committing'''
585 date: date tuple to use if committing'''
586
586
587 if not local:
587 if not local:
588 m = matchmod.exact(self.root, '', ['.hgtags'])
588 m = matchmod.exact(self.root, '', ['.hgtags'])
589 if util.any(self.status(match=m, unknown=True, ignored=True)):
589 if util.any(self.status(match=m, unknown=True, ignored=True)):
590 raise util.Abort(_('working copy of .hgtags is changed'),
590 raise util.Abort(_('working copy of .hgtags is changed'),
591 hint=_('please commit .hgtags manually'))
591 hint=_('please commit .hgtags manually'))
592
592
593 self.tags() # instantiate the cache
593 self.tags() # instantiate the cache
594 self._tag(names, node, message, local, user, date, editor=editor)
594 self._tag(names, node, message, local, user, date, editor=editor)
595
595
596 @filteredpropertycache
596 @filteredpropertycache
597 def _tagscache(self):
597 def _tagscache(self):
598 '''Returns a tagscache object that contains various tags related
598 '''Returns a tagscache object that contains various tags related
599 caches.'''
599 caches.'''
600
600
601 # This simplifies its cache management by having one decorated
601 # This simplifies its cache management by having one decorated
602 # function (this one) and the rest simply fetch things from it.
602 # function (this one) and the rest simply fetch things from it.
603 class tagscache(object):
603 class tagscache(object):
604 def __init__(self):
604 def __init__(self):
605 # These two define the set of tags for this repository. tags
605 # These two define the set of tags for this repository. tags
606 # maps tag name to node; tagtypes maps tag name to 'global' or
606 # maps tag name to node; tagtypes maps tag name to 'global' or
607 # 'local'. (Global tags are defined by .hgtags across all
607 # 'local'. (Global tags are defined by .hgtags across all
608 # heads, and local tags are defined in .hg/localtags.)
608 # heads, and local tags are defined in .hg/localtags.)
609 # They constitute the in-memory cache of tags.
609 # They constitute the in-memory cache of tags.
610 self.tags = self.tagtypes = None
610 self.tags = self.tagtypes = None
611
611
612 self.nodetagscache = self.tagslist = None
612 self.nodetagscache = self.tagslist = None
613
613
614 cache = tagscache()
614 cache = tagscache()
615 cache.tags, cache.tagtypes = self._findtags()
615 cache.tags, cache.tagtypes = self._findtags()
616
616
617 return cache
617 return cache
618
618
619 def tags(self):
619 def tags(self):
620 '''return a mapping of tag to node'''
620 '''return a mapping of tag to node'''
621 t = {}
621 t = {}
622 if self.changelog.filteredrevs:
622 if self.changelog.filteredrevs:
623 tags, tt = self._findtags()
623 tags, tt = self._findtags()
624 else:
624 else:
625 tags = self._tagscache.tags
625 tags = self._tagscache.tags
626 for k, v in tags.iteritems():
626 for k, v in tags.iteritems():
627 try:
627 try:
628 # ignore tags to unknown nodes
628 # ignore tags to unknown nodes
629 self.changelog.rev(v)
629 self.changelog.rev(v)
630 t[k] = v
630 t[k] = v
631 except (error.LookupError, ValueError):
631 except (error.LookupError, ValueError):
632 pass
632 pass
633 return t
633 return t
634
634
635 def _findtags(self):
635 def _findtags(self):
636 '''Do the hard work of finding tags. Return a pair of dicts
636 '''Do the hard work of finding tags. Return a pair of dicts
637 (tags, tagtypes) where tags maps tag name to node, and tagtypes
637 (tags, tagtypes) where tags maps tag name to node, and tagtypes
638 maps tag name to a string like \'global\' or \'local\'.
638 maps tag name to a string like \'global\' or \'local\'.
639 Subclasses or extensions are free to add their own tags, but
639 Subclasses or extensions are free to add their own tags, but
640 should be aware that the returned dicts will be retained for the
640 should be aware that the returned dicts will be retained for the
641 duration of the localrepo object.'''
641 duration of the localrepo object.'''
642
642
643 # XXX what tagtype should subclasses/extensions use? Currently
643 # XXX what tagtype should subclasses/extensions use? Currently
644 # mq and bookmarks add tags, but do not set the tagtype at all.
644 # mq and bookmarks add tags, but do not set the tagtype at all.
645 # Should each extension invent its own tag type? Should there
645 # Should each extension invent its own tag type? Should there
646 # be one tagtype for all such "virtual" tags? Or is the status
646 # be one tagtype for all such "virtual" tags? Or is the status
647 # quo fine?
647 # quo fine?
648
648
649 alltags = {} # map tag name to (node, hist)
649 alltags = {} # map tag name to (node, hist)
650 tagtypes = {}
650 tagtypes = {}
651
651
652 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
652 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
653 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
653 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
654
654
655 # Build the return dicts. Have to re-encode tag names because
655 # Build the return dicts. Have to re-encode tag names because
656 # the tags module always uses UTF-8 (in order not to lose info
656 # the tags module always uses UTF-8 (in order not to lose info
657 # writing to the cache), but the rest of Mercurial wants them in
657 # writing to the cache), but the rest of Mercurial wants them in
658 # local encoding.
658 # local encoding.
659 tags = {}
659 tags = {}
660 for (name, (node, hist)) in alltags.iteritems():
660 for (name, (node, hist)) in alltags.iteritems():
661 if node != nullid:
661 if node != nullid:
662 tags[encoding.tolocal(name)] = node
662 tags[encoding.tolocal(name)] = node
663 tags['tip'] = self.changelog.tip()
663 tags['tip'] = self.changelog.tip()
664 tagtypes = dict([(encoding.tolocal(name), value)
664 tagtypes = dict([(encoding.tolocal(name), value)
665 for (name, value) in tagtypes.iteritems()])
665 for (name, value) in tagtypes.iteritems()])
666 return (tags, tagtypes)
666 return (tags, tagtypes)
667
667
668 def tagtype(self, tagname):
668 def tagtype(self, tagname):
669 '''
669 '''
670 return the type of the given tag. result can be:
670 return the type of the given tag. result can be:
671
671
672 'local' : a local tag
672 'local' : a local tag
673 'global' : a global tag
673 'global' : a global tag
674 None : tag does not exist
674 None : tag does not exist
675 '''
675 '''
676
676
677 return self._tagscache.tagtypes.get(tagname)
677 return self._tagscache.tagtypes.get(tagname)
678
678
679 def tagslist(self):
679 def tagslist(self):
680 '''return a list of tags ordered by revision'''
680 '''return a list of tags ordered by revision'''
681 if not self._tagscache.tagslist:
681 if not self._tagscache.tagslist:
682 l = []
682 l = []
683 for t, n in self.tags().iteritems():
683 for t, n in self.tags().iteritems():
684 l.append((self.changelog.rev(n), t, n))
684 l.append((self.changelog.rev(n), t, n))
685 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
685 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
686
686
687 return self._tagscache.tagslist
687 return self._tagscache.tagslist
688
688
689 def nodetags(self, node):
689 def nodetags(self, node):
690 '''return the tags associated with a node'''
690 '''return the tags associated with a node'''
691 if not self._tagscache.nodetagscache:
691 if not self._tagscache.nodetagscache:
692 nodetagscache = {}
692 nodetagscache = {}
693 for t, n in self._tagscache.tags.iteritems():
693 for t, n in self._tagscache.tags.iteritems():
694 nodetagscache.setdefault(n, []).append(t)
694 nodetagscache.setdefault(n, []).append(t)
695 for tags in nodetagscache.itervalues():
695 for tags in nodetagscache.itervalues():
696 tags.sort()
696 tags.sort()
697 self._tagscache.nodetagscache = nodetagscache
697 self._tagscache.nodetagscache = nodetagscache
698 return self._tagscache.nodetagscache.get(node, [])
698 return self._tagscache.nodetagscache.get(node, [])
699
699
700 def nodebookmarks(self, node):
700 def nodebookmarks(self, node):
701 marks = []
701 marks = []
702 for bookmark, n in self._bookmarks.iteritems():
702 for bookmark, n in self._bookmarks.iteritems():
703 if n == node:
703 if n == node:
704 marks.append(bookmark)
704 marks.append(bookmark)
705 return sorted(marks)
705 return sorted(marks)
706
706
707 def branchmap(self):
707 def branchmap(self):
708 '''returns a dictionary {branch: [branchheads]} with branchheads
708 '''returns a dictionary {branch: [branchheads]} with branchheads
709 ordered by increasing revision number'''
709 ordered by increasing revision number'''
710 branchmap.updatecache(self)
710 branchmap.updatecache(self)
711 return self._branchcaches[self.filtername]
711 return self._branchcaches[self.filtername]
712
712
713 def branchtip(self, branch):
713 def branchtip(self, branch):
714 '''return the tip node for a given branch'''
714 '''return the tip node for a given branch'''
715 try:
715 try:
716 return self.branchmap().branchtip(branch)
716 return self.branchmap().branchtip(branch)
717 except KeyError:
717 except KeyError:
718 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
718 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
719
719
720 def lookup(self, key):
720 def lookup(self, key):
721 return self[key].node()
721 return self[key].node()
722
722
723 def lookupbranch(self, key, remote=None):
723 def lookupbranch(self, key, remote=None):
724 repo = remote or self
724 repo = remote or self
725 if key in repo.branchmap():
725 if key in repo.branchmap():
726 return key
726 return key
727
727
728 repo = (remote and remote.local()) and remote or self
728 repo = (remote and remote.local()) and remote or self
729 return repo[key].branch()
729 return repo[key].branch()
730
730
731 def known(self, nodes):
731 def known(self, nodes):
732 nm = self.changelog.nodemap
732 nm = self.changelog.nodemap
733 pc = self._phasecache
733 pc = self._phasecache
734 result = []
734 result = []
735 for n in nodes:
735 for n in nodes:
736 r = nm.get(n)
736 r = nm.get(n)
737 resp = not (r is None or pc.phase(self, r) >= phases.secret)
737 resp = not (r is None or pc.phase(self, r) >= phases.secret)
738 result.append(resp)
738 result.append(resp)
739 return result
739 return result
740
740
741 def local(self):
741 def local(self):
742 return self
742 return self
743
743
744 def cancopy(self):
744 def cancopy(self):
745 # so statichttprepo's override of local() works
745 # so statichttprepo's override of local() works
746 if not self.local():
746 if not self.local():
747 return False
747 return False
748 if not self.ui.configbool('phases', 'publish', True):
748 if not self.ui.configbool('phases', 'publish', True):
749 return True
749 return True
750 # if publishing we can't copy if there is filtered content
750 # if publishing we can't copy if there is filtered content
751 return not self.filtered('visible').changelog.filteredrevs
751 return not self.filtered('visible').changelog.filteredrevs
752
752
753 def join(self, f, *insidef):
753 def join(self, f, *insidef):
754 return os.path.join(self.path, f, *insidef)
754 return os.path.join(self.path, f, *insidef)
755
755
756 def wjoin(self, f, *insidef):
756 def wjoin(self, f, *insidef):
757 return os.path.join(self.root, f, *insidef)
757 return os.path.join(self.root, f, *insidef)
758
758
759 def file(self, f):
759 def file(self, f):
760 if f[0] == '/':
760 if f[0] == '/':
761 f = f[1:]
761 f = f[1:]
762 return filelog.filelog(self.sopener, f)
762 return filelog.filelog(self.sopener, f)
763
763
764 def changectx(self, changeid):
764 def changectx(self, changeid):
765 return self[changeid]
765 return self[changeid]
766
766
767 def parents(self, changeid=None):
767 def parents(self, changeid=None):
768 '''get list of changectxs for parents of changeid'''
768 '''get list of changectxs for parents of changeid'''
769 return self[changeid].parents()
769 return self[changeid].parents()
770
770
771 def setparents(self, p1, p2=nullid):
771 def setparents(self, p1, p2=nullid):
772 self.dirstate.beginparentchange()
772 self.dirstate.beginparentchange()
773 copies = self.dirstate.setparents(p1, p2)
773 copies = self.dirstate.setparents(p1, p2)
774 pctx = self[p1]
774 pctx = self[p1]
775 if copies:
775 if copies:
776 # Adjust copy records, the dirstate cannot do it, it
776 # Adjust copy records, the dirstate cannot do it, it
777 # requires access to parents manifests. Preserve them
777 # requires access to parents manifests. Preserve them
778 # only for entries added to first parent.
778 # only for entries added to first parent.
779 for f in copies:
779 for f in copies:
780 if f not in pctx and copies[f] in pctx:
780 if f not in pctx and copies[f] in pctx:
781 self.dirstate.copy(copies[f], f)
781 self.dirstate.copy(copies[f], f)
782 if p2 == nullid:
782 if p2 == nullid:
783 for f, s in sorted(self.dirstate.copies().items()):
783 for f, s in sorted(self.dirstate.copies().items()):
784 if f not in pctx and s not in pctx:
784 if f not in pctx and s not in pctx:
785 self.dirstate.copy(None, f)
785 self.dirstate.copy(None, f)
786 self.dirstate.endparentchange()
786 self.dirstate.endparentchange()
787
787
788 def filectx(self, path, changeid=None, fileid=None):
788 def filectx(self, path, changeid=None, fileid=None):
789 """changeid can be a changeset revision, node, or tag.
789 """changeid can be a changeset revision, node, or tag.
790 fileid can be a file revision or node."""
790 fileid can be a file revision or node."""
791 return context.filectx(self, path, changeid, fileid)
791 return context.filectx(self, path, changeid, fileid)
792
792
793 def getcwd(self):
793 def getcwd(self):
794 return self.dirstate.getcwd()
794 return self.dirstate.getcwd()
795
795
796 def pathto(self, f, cwd=None):
796 def pathto(self, f, cwd=None):
797 return self.dirstate.pathto(f, cwd)
797 return self.dirstate.pathto(f, cwd)
798
798
799 def wfile(self, f, mode='r'):
799 def wfile(self, f, mode='r'):
800 return self.wopener(f, mode)
800 return self.wopener(f, mode)
801
801
802 def _link(self, f):
802 def _link(self, f):
803 return self.wvfs.islink(f)
803 return self.wvfs.islink(f)
804
804
805 def _loadfilter(self, filter):
805 def _loadfilter(self, filter):
806 if filter not in self.filterpats:
806 if filter not in self.filterpats:
807 l = []
807 l = []
808 for pat, cmd in self.ui.configitems(filter):
808 for pat, cmd in self.ui.configitems(filter):
809 if cmd == '!':
809 if cmd == '!':
810 continue
810 continue
811 mf = matchmod.match(self.root, '', [pat])
811 mf = matchmod.match(self.root, '', [pat])
812 fn = None
812 fn = None
813 params = cmd
813 params = cmd
814 for name, filterfn in self._datafilters.iteritems():
814 for name, filterfn in self._datafilters.iteritems():
815 if cmd.startswith(name):
815 if cmd.startswith(name):
816 fn = filterfn
816 fn = filterfn
817 params = cmd[len(name):].lstrip()
817 params = cmd[len(name):].lstrip()
818 break
818 break
819 if not fn:
819 if not fn:
820 fn = lambda s, c, **kwargs: util.filter(s, c)
820 fn = lambda s, c, **kwargs: util.filter(s, c)
821 # Wrap old filters not supporting keyword arguments
821 # Wrap old filters not supporting keyword arguments
822 if not inspect.getargspec(fn)[2]:
822 if not inspect.getargspec(fn)[2]:
823 oldfn = fn
823 oldfn = fn
824 fn = lambda s, c, **kwargs: oldfn(s, c)
824 fn = lambda s, c, **kwargs: oldfn(s, c)
825 l.append((mf, fn, params))
825 l.append((mf, fn, params))
826 self.filterpats[filter] = l
826 self.filterpats[filter] = l
827 return self.filterpats[filter]
827 return self.filterpats[filter]
828
828
829 def _filter(self, filterpats, filename, data):
829 def _filter(self, filterpats, filename, data):
830 for mf, fn, cmd in filterpats:
830 for mf, fn, cmd in filterpats:
831 if mf(filename):
831 if mf(filename):
832 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
832 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
833 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
833 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
834 break
834 break
835
835
836 return data
836 return data
837
837
838 @unfilteredpropertycache
838 @unfilteredpropertycache
839 def _encodefilterpats(self):
839 def _encodefilterpats(self):
840 return self._loadfilter('encode')
840 return self._loadfilter('encode')
841
841
842 @unfilteredpropertycache
842 @unfilteredpropertycache
843 def _decodefilterpats(self):
843 def _decodefilterpats(self):
844 return self._loadfilter('decode')
844 return self._loadfilter('decode')
845
845
846 def adddatafilter(self, name, filter):
846 def adddatafilter(self, name, filter):
847 self._datafilters[name] = filter
847 self._datafilters[name] = filter
848
848
849 def wread(self, filename):
849 def wread(self, filename):
850 if self._link(filename):
850 if self._link(filename):
851 data = self.wvfs.readlink(filename)
851 data = self.wvfs.readlink(filename)
852 else:
852 else:
853 data = self.wopener.read(filename)
853 data = self.wopener.read(filename)
854 return self._filter(self._encodefilterpats, filename, data)
854 return self._filter(self._encodefilterpats, filename, data)
855
855
856 def wwrite(self, filename, data, flags):
856 def wwrite(self, filename, data, flags):
857 data = self._filter(self._decodefilterpats, filename, data)
857 data = self._filter(self._decodefilterpats, filename, data)
858 if 'l' in flags:
858 if 'l' in flags:
859 self.wopener.symlink(data, filename)
859 self.wopener.symlink(data, filename)
860 else:
860 else:
861 self.wopener.write(filename, data)
861 self.wopener.write(filename, data)
862 if 'x' in flags:
862 if 'x' in flags:
863 self.wvfs.setflags(filename, False, True)
863 self.wvfs.setflags(filename, False, True)
864
864
865 def wwritedata(self, filename, data):
865 def wwritedata(self, filename, data):
866 return self._filter(self._decodefilterpats, filename, data)
866 return self._filter(self._decodefilterpats, filename, data)
867
867
868 def transaction(self, desc, report=None):
868 def transaction(self, desc, report=None):
869 tr = self._transref and self._transref() or None
869 tr = self._transref and self._transref() or None
870 if tr and tr.running():
870 if tr and tr.running():
871 return tr.nest()
871 return tr.nest()
872
872
873 # abort here if the journal already exists
873 # abort here if the journal already exists
874 if self.svfs.exists("journal"):
874 if self.svfs.exists("journal"):
875 raise error.RepoError(
875 raise error.RepoError(
876 _("abandoned transaction found"),
876 _("abandoned transaction found"),
877 hint=_("run 'hg recover' to clean up transaction"))
877 hint=_("run 'hg recover' to clean up transaction"))
878
878
879 def onclose():
879 def onclose():
880 self.store.write(self._transref())
880 self.store.write(self._transref())
881
881
882 self._writejournal(desc)
882 self._writejournal(desc)
883 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
883 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
884 rp = report and report or self.ui.warn
884 rp = report and report or self.ui.warn
885 tr = transaction.transaction(rp, self.sopener,
885 tr = transaction.transaction(rp, self.sopener,
886 "journal",
886 "journal",
887 aftertrans(renames),
887 aftertrans(renames),
888 self.store.createmode,
888 self.store.createmode,
889 onclose)
889 onclose)
890 self._transref = weakref.ref(tr)
890 self._transref = weakref.ref(tr)
891 return tr
891 return tr
892
892
893 def _journalfiles(self):
893 def _journalfiles(self):
894 return ((self.svfs, 'journal'),
894 return ((self.svfs, 'journal'),
895 (self.vfs, 'journal.dirstate'),
895 (self.vfs, 'journal.dirstate'),
896 (self.vfs, 'journal.branch'),
896 (self.vfs, 'journal.branch'),
897 (self.vfs, 'journal.desc'),
897 (self.vfs, 'journal.desc'),
898 (self.vfs, 'journal.bookmarks'),
898 (self.vfs, 'journal.bookmarks'),
899 (self.svfs, 'journal.phaseroots'))
899 (self.svfs, 'journal.phaseroots'))
900
900
901 def undofiles(self):
901 def undofiles(self):
902 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
902 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
903
903
904 def _writejournal(self, desc):
904 def _writejournal(self, desc):
905 self.opener.write("journal.dirstate",
905 self.opener.write("journal.dirstate",
906 self.opener.tryread("dirstate"))
906 self.opener.tryread("dirstate"))
907 self.opener.write("journal.branch",
907 self.opener.write("journal.branch",
908 encoding.fromlocal(self.dirstate.branch()))
908 encoding.fromlocal(self.dirstate.branch()))
909 self.opener.write("journal.desc",
909 self.opener.write("journal.desc",
910 "%d\n%s\n" % (len(self), desc))
910 "%d\n%s\n" % (len(self), desc))
911 self.opener.write("journal.bookmarks",
911 self.opener.write("journal.bookmarks",
912 self.opener.tryread("bookmarks"))
912 self.opener.tryread("bookmarks"))
913 self.sopener.write("journal.phaseroots",
913 self.sopener.write("journal.phaseroots",
914 self.sopener.tryread("phaseroots"))
914 self.sopener.tryread("phaseroots"))
915
915
916 def recover(self):
916 def recover(self):
917 lock = self.lock()
917 lock = self.lock()
918 try:
918 try:
919 if self.svfs.exists("journal"):
919 if self.svfs.exists("journal"):
920 self.ui.status(_("rolling back interrupted transaction\n"))
920 self.ui.status(_("rolling back interrupted transaction\n"))
921 transaction.rollback(self.sopener, "journal",
921 transaction.rollback(self.sopener, "journal",
922 self.ui.warn)
922 self.ui.warn)
923 self.invalidate()
923 self.invalidate()
924 return True
924 return True
925 else:
925 else:
926 self.ui.warn(_("no interrupted transaction available\n"))
926 self.ui.warn(_("no interrupted transaction available\n"))
927 return False
927 return False
928 finally:
928 finally:
929 lock.release()
929 lock.release()
930
930
931 def rollback(self, dryrun=False, force=False):
931 def rollback(self, dryrun=False, force=False):
932 wlock = lock = None
932 wlock = lock = None
933 try:
933 try:
934 wlock = self.wlock()
934 wlock = self.wlock()
935 lock = self.lock()
935 lock = self.lock()
936 if self.svfs.exists("undo"):
936 if self.svfs.exists("undo"):
937 return self._rollback(dryrun, force)
937 return self._rollback(dryrun, force)
938 else:
938 else:
939 self.ui.warn(_("no rollback information available\n"))
939 self.ui.warn(_("no rollback information available\n"))
940 return 1
940 return 1
941 finally:
941 finally:
942 release(lock, wlock)
942 release(lock, wlock)
943
943
944 @unfilteredmethod # Until we get smarter cache management
944 @unfilteredmethod # Until we get smarter cache management
945 def _rollback(self, dryrun, force):
945 def _rollback(self, dryrun, force):
946 ui = self.ui
946 ui = self.ui
947 try:
947 try:
948 args = self.opener.read('undo.desc').splitlines()
948 args = self.opener.read('undo.desc').splitlines()
949 (oldlen, desc, detail) = (int(args[0]), args[1], None)
949 (oldlen, desc, detail) = (int(args[0]), args[1], None)
950 if len(args) >= 3:
950 if len(args) >= 3:
951 detail = args[2]
951 detail = args[2]
952 oldtip = oldlen - 1
952 oldtip = oldlen - 1
953
953
954 if detail and ui.verbose:
954 if detail and ui.verbose:
955 msg = (_('repository tip rolled back to revision %s'
955 msg = (_('repository tip rolled back to revision %s'
956 ' (undo %s: %s)\n')
956 ' (undo %s: %s)\n')
957 % (oldtip, desc, detail))
957 % (oldtip, desc, detail))
958 else:
958 else:
959 msg = (_('repository tip rolled back to revision %s'
959 msg = (_('repository tip rolled back to revision %s'
960 ' (undo %s)\n')
960 ' (undo %s)\n')
961 % (oldtip, desc))
961 % (oldtip, desc))
962 except IOError:
962 except IOError:
963 msg = _('rolling back unknown transaction\n')
963 msg = _('rolling back unknown transaction\n')
964 desc = None
964 desc = None
965
965
966 if not force and self['.'] != self['tip'] and desc == 'commit':
966 if not force and self['.'] != self['tip'] and desc == 'commit':
967 raise util.Abort(
967 raise util.Abort(
968 _('rollback of last commit while not checked out '
968 _('rollback of last commit while not checked out '
969 'may lose data'), hint=_('use -f to force'))
969 'may lose data'), hint=_('use -f to force'))
970
970
971 ui.status(msg)
971 ui.status(msg)
972 if dryrun:
972 if dryrun:
973 return 0
973 return 0
974
974
975 parents = self.dirstate.parents()
975 parents = self.dirstate.parents()
976 self.destroying()
976 self.destroying()
977 transaction.rollback(self.sopener, 'undo', ui.warn)
977 transaction.rollback(self.sopener, 'undo', ui.warn)
978 if self.vfs.exists('undo.bookmarks'):
978 if self.vfs.exists('undo.bookmarks'):
979 self.vfs.rename('undo.bookmarks', 'bookmarks')
979 self.vfs.rename('undo.bookmarks', 'bookmarks')
980 if self.svfs.exists('undo.phaseroots'):
980 if self.svfs.exists('undo.phaseroots'):
981 self.svfs.rename('undo.phaseroots', 'phaseroots')
981 self.svfs.rename('undo.phaseroots', 'phaseroots')
982 self.invalidate()
982 self.invalidate()
983
983
984 parentgone = (parents[0] not in self.changelog.nodemap or
984 parentgone = (parents[0] not in self.changelog.nodemap or
985 parents[1] not in self.changelog.nodemap)
985 parents[1] not in self.changelog.nodemap)
986 if parentgone:
986 if parentgone:
987 self.vfs.rename('undo.dirstate', 'dirstate')
987 self.vfs.rename('undo.dirstate', 'dirstate')
988 try:
988 try:
989 branch = self.opener.read('undo.branch')
989 branch = self.opener.read('undo.branch')
990 self.dirstate.setbranch(encoding.tolocal(branch))
990 self.dirstate.setbranch(encoding.tolocal(branch))
991 except IOError:
991 except IOError:
992 ui.warn(_('named branch could not be reset: '
992 ui.warn(_('named branch could not be reset: '
993 'current branch is still \'%s\'\n')
993 'current branch is still \'%s\'\n')
994 % self.dirstate.branch())
994 % self.dirstate.branch())
995
995
996 self.dirstate.invalidate()
996 self.dirstate.invalidate()
997 parents = tuple([p.rev() for p in self.parents()])
997 parents = tuple([p.rev() for p in self.parents()])
998 if len(parents) > 1:
998 if len(parents) > 1:
999 ui.status(_('working directory now based on '
999 ui.status(_('working directory now based on '
1000 'revisions %d and %d\n') % parents)
1000 'revisions %d and %d\n') % parents)
1001 else:
1001 else:
1002 ui.status(_('working directory now based on '
1002 ui.status(_('working directory now based on '
1003 'revision %d\n') % parents)
1003 'revision %d\n') % parents)
1004 # TODO: if we know which new heads may result from this rollback, pass
1004 # TODO: if we know which new heads may result from this rollback, pass
1005 # them to destroy(), which will prevent the branchhead cache from being
1005 # them to destroy(), which will prevent the branchhead cache from being
1006 # invalidated.
1006 # invalidated.
1007 self.destroyed()
1007 self.destroyed()
1008 return 0
1008 return 0
1009
1009
1010 def invalidatecaches(self):
1010 def invalidatecaches(self):
1011
1011
1012 if '_tagscache' in vars(self):
1012 if '_tagscache' in vars(self):
1013 # can't use delattr on proxy
1013 # can't use delattr on proxy
1014 del self.__dict__['_tagscache']
1014 del self.__dict__['_tagscache']
1015
1015
1016 self.unfiltered()._branchcaches.clear()
1016 self.unfiltered()._branchcaches.clear()
1017 self.invalidatevolatilesets()
1017 self.invalidatevolatilesets()
1018
1018
1019 def invalidatevolatilesets(self):
1019 def invalidatevolatilesets(self):
1020 self.filteredrevcache.clear()
1020 self.filteredrevcache.clear()
1021 obsolete.clearobscaches(self)
1021 obsolete.clearobscaches(self)
1022
1022
1023 def invalidatedirstate(self):
1023 def invalidatedirstate(self):
1024 '''Invalidates the dirstate, causing the next call to dirstate
1024 '''Invalidates the dirstate, causing the next call to dirstate
1025 to check if it was modified since the last time it was read,
1025 to check if it was modified since the last time it was read,
1026 rereading it if it has.
1026 rereading it if it has.
1027
1027
1028 This is different to dirstate.invalidate() that it doesn't always
1028 This is different to dirstate.invalidate() that it doesn't always
1029 rereads the dirstate. Use dirstate.invalidate() if you want to
1029 rereads the dirstate. Use dirstate.invalidate() if you want to
1030 explicitly read the dirstate again (i.e. restoring it to a previous
1030 explicitly read the dirstate again (i.e. restoring it to a previous
1031 known good state).'''
1031 known good state).'''
1032 if hasunfilteredcache(self, 'dirstate'):
1032 if hasunfilteredcache(self, 'dirstate'):
1033 for k in self.dirstate._filecache:
1033 for k in self.dirstate._filecache:
1034 try:
1034 try:
1035 delattr(self.dirstate, k)
1035 delattr(self.dirstate, k)
1036 except AttributeError:
1036 except AttributeError:
1037 pass
1037 pass
1038 delattr(self.unfiltered(), 'dirstate')
1038 delattr(self.unfiltered(), 'dirstate')
1039
1039
1040 def invalidate(self):
1040 def invalidate(self):
1041 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1041 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1042 for k in self._filecache:
1042 for k in self._filecache:
1043 # dirstate is invalidated separately in invalidatedirstate()
1043 # dirstate is invalidated separately in invalidatedirstate()
1044 if k == 'dirstate':
1044 if k == 'dirstate':
1045 continue
1045 continue
1046
1046
1047 try:
1047 try:
1048 delattr(unfiltered, k)
1048 delattr(unfiltered, k)
1049 except AttributeError:
1049 except AttributeError:
1050 pass
1050 pass
1051 self.invalidatecaches()
1051 self.invalidatecaches()
1052 self.store.invalidatecaches()
1052 self.store.invalidatecaches()
1053
1053
1054 def invalidateall(self):
1054 def invalidateall(self):
1055 '''Fully invalidates both store and non-store parts, causing the
1055 '''Fully invalidates both store and non-store parts, causing the
1056 subsequent operation to reread any outside changes.'''
1056 subsequent operation to reread any outside changes.'''
1057 # extension should hook this to invalidate its caches
1057 # extension should hook this to invalidate its caches
1058 self.invalidate()
1058 self.invalidate()
1059 self.invalidatedirstate()
1059 self.invalidatedirstate()
1060
1060
1061 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1061 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1062 try:
1062 try:
1063 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1063 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1064 except error.LockHeld, inst:
1064 except error.LockHeld, inst:
1065 if not wait:
1065 if not wait:
1066 raise
1066 raise
1067 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1067 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1068 (desc, inst.locker))
1068 (desc, inst.locker))
1069 # default to 600 seconds timeout
1069 # default to 600 seconds timeout
1070 l = lockmod.lock(vfs, lockname,
1070 l = lockmod.lock(vfs, lockname,
1071 int(self.ui.config("ui", "timeout", "600")),
1071 int(self.ui.config("ui", "timeout", "600")),
1072 releasefn, desc=desc)
1072 releasefn, desc=desc)
1073 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1073 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1074 if acquirefn:
1074 if acquirefn:
1075 acquirefn()
1075 acquirefn()
1076 return l
1076 return l
1077
1077
1078 def _afterlock(self, callback):
1078 def _afterlock(self, callback):
1079 """add a callback to the current repository lock.
1079 """add a callback to the current repository lock.
1080
1080
1081 The callback will be executed on lock release."""
1081 The callback will be executed on lock release."""
1082 l = self._lockref and self._lockref()
1082 l = self._lockref and self._lockref()
1083 if l:
1083 if l:
1084 l.postrelease.append(callback)
1084 l.postrelease.append(callback)
1085 else:
1085 else:
1086 callback()
1086 callback()
1087
1087
1088 def lock(self, wait=True):
1088 def lock(self, wait=True):
1089 '''Lock the repository store (.hg/store) and return a weak reference
1089 '''Lock the repository store (.hg/store) and return a weak reference
1090 to the lock. Use this before modifying the store (e.g. committing or
1090 to the lock. Use this before modifying the store (e.g. committing or
1091 stripping). If you are opening a transaction, get a lock as well.)'''
1091 stripping). If you are opening a transaction, get a lock as well.)'''
1092 l = self._lockref and self._lockref()
1092 l = self._lockref and self._lockref()
1093 if l is not None and l.held:
1093 if l is not None and l.held:
1094 l.lock()
1094 l.lock()
1095 return l
1095 return l
1096
1096
1097 def unlock():
1097 def unlock():
1098 for k, ce in self._filecache.items():
1098 for k, ce in self._filecache.items():
1099 if k == 'dirstate' or k not in self.__dict__:
1099 if k == 'dirstate' or k not in self.__dict__:
1100 continue
1100 continue
1101 ce.refresh()
1101 ce.refresh()
1102
1102
1103 l = self._lock(self.svfs, "lock", wait, unlock,
1103 l = self._lock(self.svfs, "lock", wait, unlock,
1104 self.invalidate, _('repository %s') % self.origroot)
1104 self.invalidate, _('repository %s') % self.origroot)
1105 self._lockref = weakref.ref(l)
1105 self._lockref = weakref.ref(l)
1106 return l
1106 return l
1107
1107
1108 def wlock(self, wait=True):
1108 def wlock(self, wait=True):
1109 '''Lock the non-store parts of the repository (everything under
1109 '''Lock the non-store parts of the repository (everything under
1110 .hg except .hg/store) and return a weak reference to the lock.
1110 .hg except .hg/store) and return a weak reference to the lock.
1111 Use this before modifying files in .hg.'''
1111 Use this before modifying files in .hg.'''
1112 l = self._wlockref and self._wlockref()
1112 l = self._wlockref and self._wlockref()
1113 if l is not None and l.held:
1113 if l is not None and l.held:
1114 l.lock()
1114 l.lock()
1115 return l
1115 return l
1116
1116
1117 def unlock():
1117 def unlock():
1118 if self.dirstate.pendingparentchange():
1118 if self.dirstate.pendingparentchange():
1119 self.dirstate.invalidate()
1119 self.dirstate.invalidate()
1120 else:
1120 else:
1121 self.dirstate.write()
1121 self.dirstate.write()
1122
1122
1123 self._filecache['dirstate'].refresh()
1123 self._filecache['dirstate'].refresh()
1124
1124
1125 l = self._lock(self.vfs, "wlock", wait, unlock,
1125 l = self._lock(self.vfs, "wlock", wait, unlock,
1126 self.invalidatedirstate, _('working directory of %s') %
1126 self.invalidatedirstate, _('working directory of %s') %
1127 self.origroot)
1127 self.origroot)
1128 self._wlockref = weakref.ref(l)
1128 self._wlockref = weakref.ref(l)
1129 return l
1129 return l
1130
1130
1131 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1131 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1132 """
1132 """
1133 commit an individual file as part of a larger transaction
1133 commit an individual file as part of a larger transaction
1134 """
1134 """
1135
1135
1136 fname = fctx.path()
1136 fname = fctx.path()
1137 text = fctx.data()
1137 text = fctx.data()
1138 flog = self.file(fname)
1138 flog = self.file(fname)
1139 fparent1 = manifest1.get(fname, nullid)
1139 fparent1 = manifest1.get(fname, nullid)
1140 fparent2 = manifest2.get(fname, nullid)
1140 fparent2 = manifest2.get(fname, nullid)
1141
1141
1142 meta = {}
1142 meta = {}
1143 copy = fctx.renamed()
1143 copy = fctx.renamed()
1144 if copy and copy[0] != fname:
1144 if copy and copy[0] != fname:
1145 # Mark the new revision of this file as a copy of another
1145 # Mark the new revision of this file as a copy of another
1146 # file. This copy data will effectively act as a parent
1146 # file. This copy data will effectively act as a parent
1147 # of this new revision. If this is a merge, the first
1147 # of this new revision. If this is a merge, the first
1148 # parent will be the nullid (meaning "look up the copy data")
1148 # parent will be the nullid (meaning "look up the copy data")
1149 # and the second one will be the other parent. For example:
1149 # and the second one will be the other parent. For example:
1150 #
1150 #
1151 # 0 --- 1 --- 3 rev1 changes file foo
1151 # 0 --- 1 --- 3 rev1 changes file foo
1152 # \ / rev2 renames foo to bar and changes it
1152 # \ / rev2 renames foo to bar and changes it
1153 # \- 2 -/ rev3 should have bar with all changes and
1153 # \- 2 -/ rev3 should have bar with all changes and
1154 # should record that bar descends from
1154 # should record that bar descends from
1155 # bar in rev2 and foo in rev1
1155 # bar in rev2 and foo in rev1
1156 #
1156 #
1157 # this allows this merge to succeed:
1157 # this allows this merge to succeed:
1158 #
1158 #
1159 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1159 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1160 # \ / merging rev3 and rev4 should use bar@rev2
1160 # \ / merging rev3 and rev4 should use bar@rev2
1161 # \- 2 --- 4 as the merge base
1161 # \- 2 --- 4 as the merge base
1162 #
1162 #
1163
1163
1164 cfname = copy[0]
1164 cfname = copy[0]
1165 crev = manifest1.get(cfname)
1165 crev = manifest1.get(cfname)
1166 newfparent = fparent2
1166 newfparent = fparent2
1167
1167
1168 if manifest2: # branch merge
1168 if manifest2: # branch merge
1169 if fparent2 == nullid or crev is None: # copied on remote side
1169 if fparent2 == nullid or crev is None: # copied on remote side
1170 if cfname in manifest2:
1170 if cfname in manifest2:
1171 crev = manifest2[cfname]
1171 crev = manifest2[cfname]
1172 newfparent = fparent1
1172 newfparent = fparent1
1173
1173
1174 # find source in nearest ancestor if we've lost track
1174 # find source in nearest ancestor if we've lost track
1175 if not crev:
1175 if not crev:
1176 self.ui.debug(" %s: searching for copy revision for %s\n" %
1176 self.ui.debug(" %s: searching for copy revision for %s\n" %
1177 (fname, cfname))
1177 (fname, cfname))
1178 for ancestor in self[None].ancestors():
1178 for ancestor in self[None].ancestors():
1179 if cfname in ancestor:
1179 if cfname in ancestor:
1180 crev = ancestor[cfname].filenode()
1180 crev = ancestor[cfname].filenode()
1181 break
1181 break
1182
1182
1183 if crev:
1183 if crev:
1184 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1184 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1185 meta["copy"] = cfname
1185 meta["copy"] = cfname
1186 meta["copyrev"] = hex(crev)
1186 meta["copyrev"] = hex(crev)
1187 fparent1, fparent2 = nullid, newfparent
1187 fparent1, fparent2 = nullid, newfparent
1188 else:
1188 else:
1189 self.ui.warn(_("warning: can't find ancestor for '%s' "
1189 self.ui.warn(_("warning: can't find ancestor for '%s' "
1190 "copied from '%s'!\n") % (fname, cfname))
1190 "copied from '%s'!\n") % (fname, cfname))
1191
1191
1192 elif fparent1 == nullid:
1192 elif fparent1 == nullid:
1193 fparent1, fparent2 = fparent2, nullid
1193 fparent1, fparent2 = fparent2, nullid
1194 elif fparent2 != nullid:
1194 elif fparent2 != nullid:
1195 # is one parent an ancestor of the other?
1195 # is one parent an ancestor of the other?
1196 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1196 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1197 if fparent1 in fparentancestors:
1197 if fparent1 in fparentancestors:
1198 fparent1, fparent2 = fparent2, nullid
1198 fparent1, fparent2 = fparent2, nullid
1199 elif fparent2 in fparentancestors:
1199 elif fparent2 in fparentancestors:
1200 fparent2 = nullid
1200 fparent2 = nullid
1201
1201
1202 # is the file changed?
1202 # is the file changed?
1203 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1203 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1204 changelist.append(fname)
1204 changelist.append(fname)
1205 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1205 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1206 # are just the flags changed during merge?
1206 # are just the flags changed during merge?
1207 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1207 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1208 changelist.append(fname)
1208 changelist.append(fname)
1209
1209
1210 return fparent1
1210 return fparent1
1211
1211
1212 @unfilteredmethod
1212 @unfilteredmethod
1213 def commit(self, text="", user=None, date=None, match=None, force=False,
1213 def commit(self, text="", user=None, date=None, match=None, force=False,
1214 editor=False, extra={}):
1214 editor=False, extra={}):
1215 """Add a new revision to current repository.
1215 """Add a new revision to current repository.
1216
1216
1217 Revision information is gathered from the working directory,
1217 Revision information is gathered from the working directory,
1218 match can be used to filter the committed files. If editor is
1218 match can be used to filter the committed files. If editor is
1219 supplied, it is called to get a commit message.
1219 supplied, it is called to get a commit message.
1220 """
1220 """
1221
1221
1222 def fail(f, msg):
1222 def fail(f, msg):
1223 raise util.Abort('%s: %s' % (f, msg))
1223 raise util.Abort('%s: %s' % (f, msg))
1224
1224
1225 if not match:
1225 if not match:
1226 match = matchmod.always(self.root, '')
1226 match = matchmod.always(self.root, '')
1227
1227
1228 if not force:
1228 if not force:
1229 vdirs = []
1229 vdirs = []
1230 match.explicitdir = vdirs.append
1230 match.explicitdir = vdirs.append
1231 match.bad = fail
1231 match.bad = fail
1232
1232
1233 wlock = self.wlock()
1233 wlock = self.wlock()
1234 try:
1234 try:
1235 wctx = self[None]
1235 wctx = self[None]
1236 merge = len(wctx.parents()) > 1
1236 merge = len(wctx.parents()) > 1
1237
1237
1238 if (not force and merge and match and
1238 if (not force and merge and match and
1239 (match.files() or match.anypats())):
1239 (match.files() or match.anypats())):
1240 raise util.Abort(_('cannot partially commit a merge '
1240 raise util.Abort(_('cannot partially commit a merge '
1241 '(do not specify files or patterns)'))
1241 '(do not specify files or patterns)'))
1242
1242
1243 status = self.status(match=match, clean=force)
1243 status = self.status(match=match, clean=force)
1244 if force:
1244 if force:
1245 status.modified.extend(status.clean) # mq may commit clean files
1245 status.modified.extend(status.clean) # mq may commit clean files
1246
1246
1247 # check subrepos
1247 # check subrepos
1248 subs = []
1248 subs = []
1249 commitsubs = set()
1249 commitsubs = set()
1250 newstate = wctx.substate.copy()
1250 newstate = wctx.substate.copy()
1251 # only manage subrepos and .hgsubstate if .hgsub is present
1251 # only manage subrepos and .hgsubstate if .hgsub is present
1252 if '.hgsub' in wctx:
1252 if '.hgsub' in wctx:
1253 # we'll decide whether to track this ourselves, thanks
1253 # we'll decide whether to track this ourselves, thanks
1254 for c in status.modified, status.added, status.removed:
1254 for c in status.modified, status.added, status.removed:
1255 if '.hgsubstate' in c:
1255 if '.hgsubstate' in c:
1256 c.remove('.hgsubstate')
1256 c.remove('.hgsubstate')
1257
1257
1258 # compare current state to last committed state
1258 # compare current state to last committed state
1259 # build new substate based on last committed state
1259 # build new substate based on last committed state
1260 oldstate = wctx.p1().substate
1260 oldstate = wctx.p1().substate
1261 for s in sorted(newstate.keys()):
1261 for s in sorted(newstate.keys()):
1262 if not match(s):
1262 if not match(s):
1263 # ignore working copy, use old state if present
1263 # ignore working copy, use old state if present
1264 if s in oldstate:
1264 if s in oldstate:
1265 newstate[s] = oldstate[s]
1265 newstate[s] = oldstate[s]
1266 continue
1266 continue
1267 if not force:
1267 if not force:
1268 raise util.Abort(
1268 raise util.Abort(
1269 _("commit with new subrepo %s excluded") % s)
1269 _("commit with new subrepo %s excluded") % s)
1270 if wctx.sub(s).dirty(True):
1270 if wctx.sub(s).dirty(True):
1271 if not self.ui.configbool('ui', 'commitsubrepos'):
1271 if not self.ui.configbool('ui', 'commitsubrepos'):
1272 raise util.Abort(
1272 raise util.Abort(
1273 _("uncommitted changes in subrepo %s") % s,
1273 _("uncommitted changes in subrepo %s") % s,
1274 hint=_("use --subrepos for recursive commit"))
1274 hint=_("use --subrepos for recursive commit"))
1275 subs.append(s)
1275 subs.append(s)
1276 commitsubs.add(s)
1276 commitsubs.add(s)
1277 else:
1277 else:
1278 bs = wctx.sub(s).basestate()
1278 bs = wctx.sub(s).basestate()
1279 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1279 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1280 if oldstate.get(s, (None, None, None))[1] != bs:
1280 if oldstate.get(s, (None, None, None))[1] != bs:
1281 subs.append(s)
1281 subs.append(s)
1282
1282
1283 # check for removed subrepos
1283 # check for removed subrepos
1284 for p in wctx.parents():
1284 for p in wctx.parents():
1285 r = [s for s in p.substate if s not in newstate]
1285 r = [s for s in p.substate if s not in newstate]
1286 subs += [s for s in r if match(s)]
1286 subs += [s for s in r if match(s)]
1287 if subs:
1287 if subs:
1288 if (not match('.hgsub') and
1288 if (not match('.hgsub') and
1289 '.hgsub' in (wctx.modified() + wctx.added())):
1289 '.hgsub' in (wctx.modified() + wctx.added())):
1290 raise util.Abort(
1290 raise util.Abort(
1291 _("can't commit subrepos without .hgsub"))
1291 _("can't commit subrepos without .hgsub"))
1292 status.modified.insert(0, '.hgsubstate')
1292 status.modified.insert(0, '.hgsubstate')
1293
1293
1294 elif '.hgsub' in status.removed:
1294 elif '.hgsub' in status.removed:
1295 # clean up .hgsubstate when .hgsub is removed
1295 # clean up .hgsubstate when .hgsub is removed
1296 if ('.hgsubstate' in wctx and
1296 if ('.hgsubstate' in wctx and
1297 '.hgsubstate' not in (status.modified + status.added +
1297 '.hgsubstate' not in (status.modified + status.added +
1298 status.removed)):
1298 status.removed)):
1299 status.removed.insert(0, '.hgsubstate')
1299 status.removed.insert(0, '.hgsubstate')
1300
1300
1301 # make sure all explicit patterns are matched
1301 # make sure all explicit patterns are matched
1302 if not force and match.files():
1302 if not force and match.files():
1303 matched = set(status.modified + status.added + status.removed)
1303 matched = set(status.modified + status.added + status.removed)
1304
1304
1305 for f in match.files():
1305 for f in match.files():
1306 f = self.dirstate.normalize(f)
1306 f = self.dirstate.normalize(f)
1307 if f == '.' or f in matched or f in wctx.substate:
1307 if f == '.' or f in matched or f in wctx.substate:
1308 continue
1308 continue
1309 if f in status.deleted:
1309 if f in status.deleted:
1310 fail(f, _('file not found!'))
1310 fail(f, _('file not found!'))
1311 if f in vdirs: # visited directory
1311 if f in vdirs: # visited directory
1312 d = f + '/'
1312 d = f + '/'
1313 for mf in matched:
1313 for mf in matched:
1314 if mf.startswith(d):
1314 if mf.startswith(d):
1315 break
1315 break
1316 else:
1316 else:
1317 fail(f, _("no match under directory!"))
1317 fail(f, _("no match under directory!"))
1318 elif f not in self.dirstate:
1318 elif f not in self.dirstate:
1319 fail(f, _("file not tracked!"))
1319 fail(f, _("file not tracked!"))
1320
1320
1321 cctx = context.workingctx(self, text, user, date, extra, status)
1321 cctx = context.workingctx(self, text, user, date, extra, status)
1322
1322
1323 if (not force and not extra.get("close") and not merge
1323 if (not force and not extra.get("close") and not merge
1324 and not cctx.files()
1324 and not cctx.files()
1325 and wctx.branch() == wctx.p1().branch()):
1325 and wctx.branch() == wctx.p1().branch()):
1326 return None
1326 return None
1327
1327
1328 if merge and cctx.deleted():
1328 if merge and cctx.deleted():
1329 raise util.Abort(_("cannot commit merge with missing files"))
1329 raise util.Abort(_("cannot commit merge with missing files"))
1330
1330
1331 ms = mergemod.mergestate(self)
1331 ms = mergemod.mergestate(self)
1332 for f in status.modified:
1332 for f in status.modified:
1333 if f in ms and ms[f] == 'u':
1333 if f in ms and ms[f] == 'u':
1334 raise util.Abort(_("unresolved merge conflicts "
1334 raise util.Abort(_("unresolved merge conflicts "
1335 "(see hg help resolve)"))
1335 "(see hg help resolve)"))
1336
1336
1337 if editor:
1337 if editor:
1338 cctx._text = editor(self, cctx, subs)
1338 cctx._text = editor(self, cctx, subs)
1339 edited = (text != cctx._text)
1339 edited = (text != cctx._text)
1340
1340
1341 # Save commit message in case this transaction gets rolled back
1341 # Save commit message in case this transaction gets rolled back
1342 # (e.g. by a pretxncommit hook). Leave the content alone on
1342 # (e.g. by a pretxncommit hook). Leave the content alone on
1343 # the assumption that the user will use the same editor again.
1343 # the assumption that the user will use the same editor again.
1344 msgfn = self.savecommitmessage(cctx._text)
1344 msgfn = self.savecommitmessage(cctx._text)
1345
1345
1346 # commit subs and write new state
1346 # commit subs and write new state
1347 if subs:
1347 if subs:
1348 for s in sorted(commitsubs):
1348 for s in sorted(commitsubs):
1349 sub = wctx.sub(s)
1349 sub = wctx.sub(s)
1350 self.ui.status(_('committing subrepository %s\n') %
1350 self.ui.status(_('committing subrepository %s\n') %
1351 subrepo.subrelpath(sub))
1351 subrepo.subrelpath(sub))
1352 sr = sub.commit(cctx._text, user, date)
1352 sr = sub.commit(cctx._text, user, date)
1353 newstate[s] = (newstate[s][0], sr)
1353 newstate[s] = (newstate[s][0], sr)
1354 subrepo.writestate(self, newstate)
1354 subrepo.writestate(self, newstate)
1355
1355
1356 p1, p2 = self.dirstate.parents()
1356 p1, p2 = self.dirstate.parents()
1357 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1357 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1358 try:
1358 try:
1359 self.hook("precommit", throw=True, parent1=hookp1,
1359 self.hook("precommit", throw=True, parent1=hookp1,
1360 parent2=hookp2)
1360 parent2=hookp2)
1361 ret = self.commitctx(cctx, True)
1361 ret = self.commitctx(cctx, True)
1362 except: # re-raises
1362 except: # re-raises
1363 if edited:
1363 if edited:
1364 self.ui.write(
1364 self.ui.write(
1365 _('note: commit message saved in %s\n') % msgfn)
1365 _('note: commit message saved in %s\n') % msgfn)
1366 raise
1366 raise
1367
1367
1368 # update bookmarks, dirstate and mergestate
1368 # update bookmarks, dirstate and mergestate
1369 bookmarks.update(self, [p1, p2], ret)
1369 bookmarks.update(self, [p1, p2], ret)
1370 cctx.markcommitted(ret)
1370 cctx.markcommitted(ret)
1371 ms.reset()
1371 ms.reset()
1372 finally:
1372 finally:
1373 wlock.release()
1373 wlock.release()
1374
1374
1375 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1375 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1376 # hack for command that use a temporary commit (eg: histedit)
1376 # hack for command that use a temporary commit (eg: histedit)
1377 # temporary commit got stripped before hook release
1377 # temporary commit got stripped before hook release
1378 if node in self:
1378 if node in self:
1379 self.hook("commit", node=node, parent1=parent1,
1379 self.hook("commit", node=node, parent1=parent1,
1380 parent2=parent2)
1380 parent2=parent2)
1381 self._afterlock(commithook)
1381 self._afterlock(commithook)
1382 return ret
1382 return ret
1383
1383
1384 @unfilteredmethod
1384 @unfilteredmethod
1385 def commitctx(self, ctx, error=False):
1385 def commitctx(self, ctx, error=False):
1386 """Add a new revision to current repository.
1386 """Add a new revision to current repository.
1387 Revision information is passed via the context argument.
1387 Revision information is passed via the context argument.
1388 """
1388 """
1389
1389
1390 tr = None
1390 tr = None
1391 p1, p2 = ctx.p1(), ctx.p2()
1391 p1, p2 = ctx.p1(), ctx.p2()
1392 user = ctx.user()
1392 user = ctx.user()
1393
1393
1394 lock = self.lock()
1394 lock = self.lock()
1395 try:
1395 try:
1396 tr = self.transaction("commit")
1396 tr = self.transaction("commit")
1397 trp = weakref.proxy(tr)
1397 trp = weakref.proxy(tr)
1398
1398
1399 if ctx.files():
1399 if ctx.files():
1400 m1 = p1.manifest()
1400 m1 = p1.manifest()
1401 m2 = p2.manifest()
1401 m2 = p2.manifest()
1402 m = m1.copy()
1402 m = m1.copy()
1403
1403
1404 # check in files
1404 # check in files
1405 added = []
1405 added = []
1406 changed = []
1406 changed = []
1407 removed = list(ctx.removed())
1407 removed = list(ctx.removed())
1408 linkrev = len(self)
1408 linkrev = len(self)
1409 for f in sorted(ctx.modified() + ctx.added()):
1409 for f in sorted(ctx.modified() + ctx.added()):
1410 self.ui.note(f + "\n")
1410 self.ui.note(f + "\n")
1411 try:
1411 try:
1412 fctx = ctx[f]
1412 fctx = ctx[f]
1413 if fctx is None:
1413 if fctx is None:
1414 removed.append(f)
1414 removed.append(f)
1415 else:
1415 else:
1416 added.append(f)
1416 added.append(f)
1417 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1417 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1418 trp, changed)
1418 trp, changed)
1419 m.setflag(f, fctx.flags())
1419 m.setflag(f, fctx.flags())
1420 except OSError, inst:
1420 except OSError, inst:
1421 self.ui.warn(_("trouble committing %s!\n") % f)
1421 self.ui.warn(_("trouble committing %s!\n") % f)
1422 raise
1422 raise
1423 except IOError, inst:
1423 except IOError, inst:
1424 errcode = getattr(inst, 'errno', errno.ENOENT)
1424 errcode = getattr(inst, 'errno', errno.ENOENT)
1425 if error or errcode and errcode != errno.ENOENT:
1425 if error or errcode and errcode != errno.ENOENT:
1426 self.ui.warn(_("trouble committing %s!\n") % f)
1426 self.ui.warn(_("trouble committing %s!\n") % f)
1427 raise
1427 raise
1428
1428
1429 # update manifest
1429 # update manifest
1430 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1430 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1431 drop = [f for f in removed if f in m]
1431 drop = [f for f in removed if f in m]
1432 for f in drop:
1432 for f in drop:
1433 del m[f]
1433 del m[f]
1434 mn = self.manifest.add(m, trp, linkrev,
1434 mn = self.manifest.add(m, trp, linkrev,
1435 p1.manifestnode(), p2.manifestnode(),
1435 p1.manifestnode(), p2.manifestnode(),
1436 added, drop)
1436 added, drop)
1437 files = changed + removed
1437 files = changed + removed
1438 else:
1438 else:
1439 mn = p1.manifestnode()
1439 mn = p1.manifestnode()
1440 files = []
1440 files = []
1441
1441
1442 # update changelog
1442 # update changelog
1443 self.changelog.delayupdate(tr)
1443 self.changelog.delayupdate(tr)
1444 n = self.changelog.add(mn, files, ctx.description(),
1444 n = self.changelog.add(mn, files, ctx.description(),
1445 trp, p1.node(), p2.node(),
1445 trp, p1.node(), p2.node(),
1446 user, ctx.date(), ctx.extra().copy())
1446 user, ctx.date(), ctx.extra().copy())
1447 p = lambda: tr.writepending() and self.root or ""
1447 p = lambda: tr.writepending() and self.root or ""
1448 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1448 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1449 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1449 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1450 parent2=xp2, pending=p)
1450 parent2=xp2, pending=p)
1451 # set the new commit is proper phase
1451 # set the new commit is proper phase
1452 targetphase = subrepo.newcommitphase(self.ui, ctx)
1452 targetphase = subrepo.newcommitphase(self.ui, ctx)
1453 if targetphase:
1453 if targetphase:
1454 # retract boundary do not alter parent changeset.
1454 # retract boundary do not alter parent changeset.
1455 # if a parent have higher the resulting phase will
1455 # if a parent have higher the resulting phase will
1456 # be compliant anyway
1456 # be compliant anyway
1457 #
1457 #
1458 # if minimal phase was 0 we don't need to retract anything
1458 # if minimal phase was 0 we don't need to retract anything
1459 phases.retractboundary(self, tr, targetphase, [n])
1459 phases.retractboundary(self, tr, targetphase, [n])
1460 tr.close()
1460 tr.close()
1461 branchmap.updatecache(self.filtered('served'))
1461 branchmap.updatecache(self.filtered('served'))
1462 return n
1462 return n
1463 finally:
1463 finally:
1464 if tr:
1464 if tr:
1465 tr.release()
1465 tr.release()
1466 lock.release()
1466 lock.release()
1467
1467
1468 @unfilteredmethod
1468 @unfilteredmethod
1469 def destroying(self):
1469 def destroying(self):
1470 '''Inform the repository that nodes are about to be destroyed.
1470 '''Inform the repository that nodes are about to be destroyed.
1471 Intended for use by strip and rollback, so there's a common
1471 Intended for use by strip and rollback, so there's a common
1472 place for anything that has to be done before destroying history.
1472 place for anything that has to be done before destroying history.
1473
1473
1474 This is mostly useful for saving state that is in memory and waiting
1474 This is mostly useful for saving state that is in memory and waiting
1475 to be flushed when the current lock is released. Because a call to
1475 to be flushed when the current lock is released. Because a call to
1476 destroyed is imminent, the repo will be invalidated causing those
1476 destroyed is imminent, the repo will be invalidated causing those
1477 changes to stay in memory (waiting for the next unlock), or vanish
1477 changes to stay in memory (waiting for the next unlock), or vanish
1478 completely.
1478 completely.
1479 '''
1479 '''
1480 # When using the same lock to commit and strip, the phasecache is left
1480 # When using the same lock to commit and strip, the phasecache is left
1481 # dirty after committing. Then when we strip, the repo is invalidated,
1481 # dirty after committing. Then when we strip, the repo is invalidated,
1482 # causing those changes to disappear.
1482 # causing those changes to disappear.
1483 if '_phasecache' in vars(self):
1483 if '_phasecache' in vars(self):
1484 self._phasecache.write()
1484 self._phasecache.write()
1485
1485
1486 @unfilteredmethod
1486 @unfilteredmethod
1487 def destroyed(self):
1487 def destroyed(self):
1488 '''Inform the repository that nodes have been destroyed.
1488 '''Inform the repository that nodes have been destroyed.
1489 Intended for use by strip and rollback, so there's a common
1489 Intended for use by strip and rollback, so there's a common
1490 place for anything that has to be done after destroying history.
1490 place for anything that has to be done after destroying history.
1491 '''
1491 '''
1492 # When one tries to:
1492 # When one tries to:
1493 # 1) destroy nodes thus calling this method (e.g. strip)
1493 # 1) destroy nodes thus calling this method (e.g. strip)
1494 # 2) use phasecache somewhere (e.g. commit)
1494 # 2) use phasecache somewhere (e.g. commit)
1495 #
1495 #
1496 # then 2) will fail because the phasecache contains nodes that were
1496 # then 2) will fail because the phasecache contains nodes that were
1497 # removed. We can either remove phasecache from the filecache,
1497 # removed. We can either remove phasecache from the filecache,
1498 # causing it to reload next time it is accessed, or simply filter
1498 # causing it to reload next time it is accessed, or simply filter
1499 # the removed nodes now and write the updated cache.
1499 # the removed nodes now and write the updated cache.
1500 self._phasecache.filterunknown(self)
1500 self._phasecache.filterunknown(self)
1501 self._phasecache.write()
1501 self._phasecache.write()
1502
1502
1503 # update the 'served' branch cache to help read only server process
1503 # update the 'served' branch cache to help read only server process
1504 # Thanks to branchcache collaboration this is done from the nearest
1504 # Thanks to branchcache collaboration this is done from the nearest
1505 # filtered subset and it is expected to be fast.
1505 # filtered subset and it is expected to be fast.
1506 branchmap.updatecache(self.filtered('served'))
1506 branchmap.updatecache(self.filtered('served'))
1507
1507
1508 # Ensure the persistent tag cache is updated. Doing it now
1508 # Ensure the persistent tag cache is updated. Doing it now
1509 # means that the tag cache only has to worry about destroyed
1509 # means that the tag cache only has to worry about destroyed
1510 # heads immediately after a strip/rollback. That in turn
1510 # heads immediately after a strip/rollback. That in turn
1511 # guarantees that "cachetip == currenttip" (comparing both rev
1511 # guarantees that "cachetip == currenttip" (comparing both rev
1512 # and node) always means no nodes have been added or destroyed.
1512 # and node) always means no nodes have been added or destroyed.
1513
1513
1514 # XXX this is suboptimal when qrefresh'ing: we strip the current
1514 # XXX this is suboptimal when qrefresh'ing: we strip the current
1515 # head, refresh the tag cache, then immediately add a new head.
1515 # head, refresh the tag cache, then immediately add a new head.
1516 # But I think doing it this way is necessary for the "instant
1516 # But I think doing it this way is necessary for the "instant
1517 # tag cache retrieval" case to work.
1517 # tag cache retrieval" case to work.
1518 self.invalidate()
1518 self.invalidate()
1519
1519
1520 def walk(self, match, node=None):
1520 def walk(self, match, node=None):
1521 '''
1521 '''
1522 walk recursively through the directory tree or a given
1522 walk recursively through the directory tree or a given
1523 changeset, finding all files matched by the match
1523 changeset, finding all files matched by the match
1524 function
1524 function
1525 '''
1525 '''
1526 return self[node].walk(match)
1526 return self[node].walk(match)
1527
1527
1528 def status(self, node1='.', node2=None, match=None,
1528 def status(self, node1='.', node2=None, match=None,
1529 ignored=False, clean=False, unknown=False,
1529 ignored=False, clean=False, unknown=False,
1530 listsubrepos=False):
1530 listsubrepos=False):
1531 '''a convenience method that calls node1.status(node2)'''
1531 '''a convenience method that calls node1.status(node2)'''
1532 return self[node1].status(node2, match, ignored, clean, unknown,
1532 return self[node1].status(node2, match, ignored, clean, unknown,
1533 listsubrepos)
1533 listsubrepos)
1534
1534
1535 def heads(self, start=None):
1535 def heads(self, start=None):
1536 heads = self.changelog.heads(start)
1536 heads = self.changelog.heads(start)
1537 # sort the output in rev descending order
1537 # sort the output in rev descending order
1538 return sorted(heads, key=self.changelog.rev, reverse=True)
1538 return sorted(heads, key=self.changelog.rev, reverse=True)
1539
1539
1540 def branchheads(self, branch=None, start=None, closed=False):
1540 def branchheads(self, branch=None, start=None, closed=False):
1541 '''return a (possibly filtered) list of heads for the given branch
1541 '''return a (possibly filtered) list of heads for the given branch
1542
1542
1543 Heads are returned in topological order, from newest to oldest.
1543 Heads are returned in topological order, from newest to oldest.
1544 If branch is None, use the dirstate branch.
1544 If branch is None, use the dirstate branch.
1545 If start is not None, return only heads reachable from start.
1545 If start is not None, return only heads reachable from start.
1546 If closed is True, return heads that are marked as closed as well.
1546 If closed is True, return heads that are marked as closed as well.
1547 '''
1547 '''
1548 if branch is None:
1548 if branch is None:
1549 branch = self[None].branch()
1549 branch = self[None].branch()
1550 branches = self.branchmap()
1550 branches = self.branchmap()
1551 if branch not in branches:
1551 if branch not in branches:
1552 return []
1552 return []
1553 # the cache returns heads ordered lowest to highest
1553 # the cache returns heads ordered lowest to highest
1554 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1554 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1555 if start is not None:
1555 if start is not None:
1556 # filter out the heads that cannot be reached from startrev
1556 # filter out the heads that cannot be reached from startrev
1557 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1557 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1558 bheads = [h for h in bheads if h in fbheads]
1558 bheads = [h for h in bheads if h in fbheads]
1559 return bheads
1559 return bheads
1560
1560
1561 def branches(self, nodes):
1561 def branches(self, nodes):
1562 if not nodes:
1562 if not nodes:
1563 nodes = [self.changelog.tip()]
1563 nodes = [self.changelog.tip()]
1564 b = []
1564 b = []
1565 for n in nodes:
1565 for n in nodes:
1566 t = n
1566 t = n
1567 while True:
1567 while True:
1568 p = self.changelog.parents(n)
1568 p = self.changelog.parents(n)
1569 if p[1] != nullid or p[0] == nullid:
1569 if p[1] != nullid or p[0] == nullid:
1570 b.append((t, n, p[0], p[1]))
1570 b.append((t, n, p[0], p[1]))
1571 break
1571 break
1572 n = p[0]
1572 n = p[0]
1573 return b
1573 return b
1574
1574
1575 def between(self, pairs):
1575 def between(self, pairs):
1576 r = []
1576 r = []
1577
1577
1578 for top, bottom in pairs:
1578 for top, bottom in pairs:
1579 n, l, i = top, [], 0
1579 n, l, i = top, [], 0
1580 f = 1
1580 f = 1
1581
1581
1582 while n != bottom and n != nullid:
1582 while n != bottom and n != nullid:
1583 p = self.changelog.parents(n)[0]
1583 p = self.changelog.parents(n)[0]
1584 if i == f:
1584 if i == f:
1585 l.append(n)
1585 l.append(n)
1586 f = f * 2
1586 f = f * 2
1587 n = p
1587 n = p
1588 i += 1
1588 i += 1
1589
1589
1590 r.append(l)
1590 r.append(l)
1591
1591
1592 return r
1592 return r
1593
1593
1594 def checkpush(self, pushop):
1594 def checkpush(self, pushop):
1595 """Extensions can override this function if additional checks have
1595 """Extensions can override this function if additional checks have
1596 to be performed before pushing, or call it if they override push
1596 to be performed before pushing, or call it if they override push
1597 command.
1597 command.
1598 """
1598 """
1599 pass
1599 pass
1600
1600
1601 @unfilteredpropertycache
1601 @unfilteredpropertycache
1602 def prepushoutgoinghooks(self):
1602 def prepushoutgoinghooks(self):
1603 """Return util.hooks consists of "(repo, remote, outgoing)"
1603 """Return util.hooks consists of "(repo, remote, outgoing)"
1604 functions, which are called before pushing changesets.
1604 functions, which are called before pushing changesets.
1605 """
1605 """
1606 return util.hooks()
1606 return util.hooks()
1607
1607
1608 def stream_in(self, remote, requirements):
1608 def stream_in(self, remote, requirements):
1609 lock = self.lock()
1609 lock = self.lock()
1610 try:
1610 try:
1611 # Save remote branchmap. We will use it later
1611 # Save remote branchmap. We will use it later
1612 # to speed up branchcache creation
1612 # to speed up branchcache creation
1613 rbranchmap = None
1613 rbranchmap = None
1614 if remote.capable("branchmap"):
1614 if remote.capable("branchmap"):
1615 rbranchmap = remote.branchmap()
1615 rbranchmap = remote.branchmap()
1616
1616
1617 fp = remote.stream_out()
1617 fp = remote.stream_out()
1618 l = fp.readline()
1618 l = fp.readline()
1619 try:
1619 try:
1620 resp = int(l)
1620 resp = int(l)
1621 except ValueError:
1621 except ValueError:
1622 raise error.ResponseError(
1622 raise error.ResponseError(
1623 _('unexpected response from remote server:'), l)
1623 _('unexpected response from remote server:'), l)
1624 if resp == 1:
1624 if resp == 1:
1625 raise util.Abort(_('operation forbidden by server'))
1625 raise util.Abort(_('operation forbidden by server'))
1626 elif resp == 2:
1626 elif resp == 2:
1627 raise util.Abort(_('locking the remote repository failed'))
1627 raise util.Abort(_('locking the remote repository failed'))
1628 elif resp != 0:
1628 elif resp != 0:
1629 raise util.Abort(_('the server sent an unknown error code'))
1629 raise util.Abort(_('the server sent an unknown error code'))
1630 self.ui.status(_('streaming all changes\n'))
1630 self.ui.status(_('streaming all changes\n'))
1631 l = fp.readline()
1631 l = fp.readline()
1632 try:
1632 try:
1633 total_files, total_bytes = map(int, l.split(' ', 1))
1633 total_files, total_bytes = map(int, l.split(' ', 1))
1634 except (ValueError, TypeError):
1634 except (ValueError, TypeError):
1635 raise error.ResponseError(
1635 raise error.ResponseError(
1636 _('unexpected response from remote server:'), l)
1636 _('unexpected response from remote server:'), l)
1637 self.ui.status(_('%d files to transfer, %s of data\n') %
1637 self.ui.status(_('%d files to transfer, %s of data\n') %
1638 (total_files, util.bytecount(total_bytes)))
1638 (total_files, util.bytecount(total_bytes)))
1639 handled_bytes = 0
1639 handled_bytes = 0
1640 self.ui.progress(_('clone'), 0, total=total_bytes)
1640 self.ui.progress(_('clone'), 0, total=total_bytes)
1641 start = time.time()
1641 start = time.time()
1642
1642
1643 tr = self.transaction(_('clone'))
1643 tr = self.transaction(_('clone'))
1644 try:
1644 try:
1645 for i in xrange(total_files):
1645 for i in xrange(total_files):
1646 # XXX doesn't support '\n' or '\r' in filenames
1646 # XXX doesn't support '\n' or '\r' in filenames
1647 l = fp.readline()
1647 l = fp.readline()
1648 try:
1648 try:
1649 name, size = l.split('\0', 1)
1649 name, size = l.split('\0', 1)
1650 size = int(size)
1650 size = int(size)
1651 except (ValueError, TypeError):
1651 except (ValueError, TypeError):
1652 raise error.ResponseError(
1652 raise error.ResponseError(
1653 _('unexpected response from remote server:'), l)
1653 _('unexpected response from remote server:'), l)
1654 if self.ui.debugflag:
1654 if self.ui.debugflag:
1655 self.ui.debug('adding %s (%s)\n' %
1655 self.ui.debug('adding %s (%s)\n' %
1656 (name, util.bytecount(size)))
1656 (name, util.bytecount(size)))
1657 # for backwards compat, name was partially encoded
1657 # for backwards compat, name was partially encoded
1658 ofp = self.sopener(store.decodedir(name), 'w')
1658 ofp = self.sopener(store.decodedir(name), 'w')
1659 for chunk in util.filechunkiter(fp, limit=size):
1659 for chunk in util.filechunkiter(fp, limit=size):
1660 handled_bytes += len(chunk)
1660 handled_bytes += len(chunk)
1661 self.ui.progress(_('clone'), handled_bytes,
1661 self.ui.progress(_('clone'), handled_bytes,
1662 total=total_bytes)
1662 total=total_bytes)
1663 ofp.write(chunk)
1663 ofp.write(chunk)
1664 ofp.close()
1664 ofp.close()
1665 tr.close()
1665 tr.close()
1666 finally:
1666 finally:
1667 tr.release()
1667 tr.release()
1668
1668
1669 # Writing straight to files circumvented the inmemory caches
1669 # Writing straight to files circumvented the inmemory caches
1670 self.invalidate()
1670 self.invalidate()
1671
1671
1672 elapsed = time.time() - start
1672 elapsed = time.time() - start
1673 if elapsed <= 0:
1673 if elapsed <= 0:
1674 elapsed = 0.001
1674 elapsed = 0.001
1675 self.ui.progress(_('clone'), None)
1675 self.ui.progress(_('clone'), None)
1676 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1676 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1677 (util.bytecount(total_bytes), elapsed,
1677 (util.bytecount(total_bytes), elapsed,
1678 util.bytecount(total_bytes / elapsed)))
1678 util.bytecount(total_bytes / elapsed)))
1679
1679
1680 # new requirements = old non-format requirements +
1680 # new requirements = old non-format requirements +
1681 # new format-related
1681 # new format-related
1682 # requirements from the streamed-in repository
1682 # requirements from the streamed-in repository
1683 requirements.update(set(self.requirements) - self.supportedformats)
1683 requirements.update(set(self.requirements) - self.supportedformats)
1684 self._applyrequirements(requirements)
1684 self._applyrequirements(requirements)
1685 self._writerequirements()
1685 self._writerequirements()
1686
1686
1687 if rbranchmap:
1687 if rbranchmap:
1688 rbheads = []
1688 rbheads = []
1689 closed = []
1689 closed = []
1690 for bheads in rbranchmap.itervalues():
1690 for bheads in rbranchmap.itervalues():
1691 rbheads.extend(bheads)
1691 rbheads.extend(bheads)
1692 for h in bheads:
1692 for h in bheads:
1693 r = self.changelog.rev(h)
1693 r = self.changelog.rev(h)
1694 b, c = self.changelog.branchinfo(r)
1694 b, c = self.changelog.branchinfo(r)
1695 if c:
1695 if c:
1696 closed.append(h)
1696 closed.append(h)
1697
1697
1698 if rbheads:
1698 if rbheads:
1699 rtiprev = max((int(self.changelog.rev(node))
1699 rtiprev = max((int(self.changelog.rev(node))
1700 for node in rbheads))
1700 for node in rbheads))
1701 cache = branchmap.branchcache(rbranchmap,
1701 cache = branchmap.branchcache(rbranchmap,
1702 self[rtiprev].node(),
1702 self[rtiprev].node(),
1703 rtiprev,
1703 rtiprev,
1704 closednodes=closed)
1704 closednodes=closed)
1705 # Try to stick it as low as possible
1705 # Try to stick it as low as possible
1706 # filter above served are unlikely to be fetch from a clone
1706 # filter above served are unlikely to be fetch from a clone
1707 for candidate in ('base', 'immutable', 'served'):
1707 for candidate in ('base', 'immutable', 'served'):
1708 rview = self.filtered(candidate)
1708 rview = self.filtered(candidate)
1709 if cache.validfor(rview):
1709 if cache.validfor(rview):
1710 self._branchcaches[candidate] = cache
1710 self._branchcaches[candidate] = cache
1711 cache.write(rview)
1711 cache.write(rview)
1712 break
1712 break
1713 self.invalidate()
1713 self.invalidate()
1714 return len(self.heads()) + 1
1714 return len(self.heads()) + 1
1715 finally:
1715 finally:
1716 lock.release()
1716 lock.release()
1717
1717
1718 def clone(self, remote, heads=[], stream=False):
1718 def clone(self, remote, heads=[], stream=False):
1719 '''clone remote repository.
1719 '''clone remote repository.
1720
1720
1721 keyword arguments:
1721 keyword arguments:
1722 heads: list of revs to clone (forces use of pull)
1722 heads: list of revs to clone (forces use of pull)
1723 stream: use streaming clone if possible'''
1723 stream: use streaming clone if possible'''
1724
1724
1725 # now, all clients that can request uncompressed clones can
1725 # now, all clients that can request uncompressed clones can
1726 # read repo formats supported by all servers that can serve
1726 # read repo formats supported by all servers that can serve
1727 # them.
1727 # them.
1728
1728
1729 # if revlog format changes, client will have to check version
1729 # if revlog format changes, client will have to check version
1730 # and format flags on "stream" capability, and use
1730 # and format flags on "stream" capability, and use
1731 # uncompressed only if compatible.
1731 # uncompressed only if compatible.
1732
1732
1733 if not stream:
1733 if not stream:
1734 # if the server explicitly prefers to stream (for fast LANs)
1734 # if the server explicitly prefers to stream (for fast LANs)
1735 stream = remote.capable('stream-preferred')
1735 stream = remote.capable('stream-preferred')
1736
1736
1737 if stream and not heads:
1737 if stream and not heads:
1738 # 'stream' means remote revlog format is revlogv1 only
1738 # 'stream' means remote revlog format is revlogv1 only
1739 if remote.capable('stream'):
1739 if remote.capable('stream'):
1740 self.stream_in(remote, set(('revlogv1',)))
1740 self.stream_in(remote, set(('revlogv1',)))
1741 else:
1741 else:
1742 # otherwise, 'streamreqs' contains the remote revlog format
1742 # otherwise, 'streamreqs' contains the remote revlog format
1743 streamreqs = remote.capable('streamreqs')
1743 streamreqs = remote.capable('streamreqs')
1744 if streamreqs:
1744 if streamreqs:
1745 streamreqs = set(streamreqs.split(','))
1745 streamreqs = set(streamreqs.split(','))
1746 # if we support it, stream in and adjust our requirements
1746 # if we support it, stream in and adjust our requirements
1747 if not streamreqs - self.supportedformats:
1747 if not streamreqs - self.supportedformats:
1748 self.stream_in(remote, streamreqs)
1748 self.stream_in(remote, streamreqs)
1749
1749
1750 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1750 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1751 try:
1751 try:
1752 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1752 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1753 ret = exchange.pull(self, remote, heads).cgresult
1753 ret = exchange.pull(self, remote, heads).cgresult
1754 finally:
1754 finally:
1755 self.ui.restoreconfig(quiet)
1755 self.ui.restoreconfig(quiet)
1756 return ret
1756 return ret
1757
1757
1758 def pushkey(self, namespace, key, old, new):
1758 def pushkey(self, namespace, key, old, new):
1759 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1759 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1760 old=old, new=new)
1760 old=old, new=new)
1761 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1761 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1762 ret = pushkey.push(self, namespace, key, old, new)
1762 ret = pushkey.push(self, namespace, key, old, new)
1763 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1763 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1764 ret=ret)
1764 ret=ret)
1765 return ret
1765 return ret
1766
1766
1767 def listkeys(self, namespace):
1767 def listkeys(self, namespace):
1768 self.hook('prelistkeys', throw=True, namespace=namespace)
1768 self.hook('prelistkeys', throw=True, namespace=namespace)
1769 self.ui.debug('listing keys for "%s"\n' % namespace)
1769 self.ui.debug('listing keys for "%s"\n' % namespace)
1770 values = pushkey.list(self, namespace)
1770 values = pushkey.list(self, namespace)
1771 self.hook('listkeys', namespace=namespace, values=values)
1771 self.hook('listkeys', namespace=namespace, values=values)
1772 return values
1772 return values
1773
1773
1774 def debugwireargs(self, one, two, three=None, four=None, five=None):
1774 def debugwireargs(self, one, two, three=None, four=None, five=None):
1775 '''used to test argument passing over the wire'''
1775 '''used to test argument passing over the wire'''
1776 return "%s %s %s %s %s" % (one, two, three, four, five)
1776 return "%s %s %s %s %s" % (one, two, three, four, five)
1777
1777
1778 def savecommitmessage(self, text):
1778 def savecommitmessage(self, text):
1779 fp = self.opener('last-message.txt', 'wb')
1779 fp = self.opener('last-message.txt', 'wb')
1780 try:
1780 try:
1781 fp.write(text)
1781 fp.write(text)
1782 finally:
1782 finally:
1783 fp.close()
1783 fp.close()
1784 return self.pathto(fp.name[len(self.root) + 1:])
1784 return self.pathto(fp.name[len(self.root) + 1:])
1785
1785
1786 # used to avoid circular references so destructors work
1786 # used to avoid circular references so destructors work
1787 def aftertrans(files):
1787 def aftertrans(files):
1788 renamefiles = [tuple(t) for t in files]
1788 renamefiles = [tuple(t) for t in files]
1789 def a():
1789 def a():
1790 for vfs, src, dest in renamefiles:
1790 for vfs, src, dest in renamefiles:
1791 try:
1791 try:
1792 vfs.rename(src, dest)
1792 vfs.rename(src, dest)
1793 except OSError: # journal file does not yet exist
1793 except OSError: # journal file does not yet exist
1794 pass
1794 pass
1795 return a
1795 return a
1796
1796
1797 def undoname(fn):
1797 def undoname(fn):
1798 base, name = os.path.split(fn)
1798 base, name = os.path.split(fn)
1799 assert name.startswith('journal')
1799 assert name.startswith('journal')
1800 return os.path.join(base, name.replace('journal', 'undo', 1))
1800 return os.path.join(base, name.replace('journal', 'undo', 1))
1801
1801
1802 def instance(ui, path, create):
1802 def instance(ui, path, create):
1803 return localrepository(ui, util.urllocalpath(path), create)
1803 return localrepository(ui, util.urllocalpath(path), create)
1804
1804
1805 def islocal(path):
1805 def islocal(path):
1806 return True
1806 return True
@@ -1,81 +1,81 b''
1 $ hg init debugrevlog
1 $ hg init debugrevlog
2 $ cd debugrevlog
2 $ cd debugrevlog
3 $ echo a > a
3 $ echo a > a
4 $ hg ci -Am adda
4 $ hg ci -Am adda
5 adding a
5 adding a
6 $ hg debugrevlog -m
6 $ hg debugrevlog -m
7 format : 1
7 format : 1
8 flags : inline
8 flags : inline
9
9
10 revisions : 1
10 revisions : 1
11 merges : 0 ( 0.00%)
11 merges : 0 ( 0.00%)
12 normal : 1 (100.00%)
12 normal : 1 (100.00%)
13 revisions : 1
13 revisions : 1
14 full : 1 (100.00%)
14 full : 1 (100.00%)
15 deltas : 0 ( 0.00%)
15 deltas : 0 ( 0.00%)
16 revision size : 44
16 revision size : 44
17 full : 44 (100.00%)
17 full : 44 (100.00%)
18 deltas : 0 ( 0.00%)
18 deltas : 0 ( 0.00%)
19
19
20 avg chain length : 0
20 avg chain length : 0
21 compression ratio : 0
21 compression ratio : 0
22
22
23 uncompressed data size (min/max/avg) : 43 / 43 / 43
23 uncompressed data size (min/max/avg) : 43 / 43 / 43
24 full revision size (min/max/avg) : 44 / 44 / 44
24 full revision size (min/max/avg) : 44 / 44 / 44
25 delta size (min/max/avg) : 0 / 0 / 0
25 delta size (min/max/avg) : 0 / 0 / 0
26
26
27 Test max chain len
27 Test max chain len
28 $ cat >> $HGRCPATH << EOF
28 $ cat >> $HGRCPATH << EOF
29 > [revlog]
29 > [format]
30 > maxchainlen=4
30 > maxchainlen=4
31 > EOF
31 > EOF
32
32
33 $ echo "This test checks if maxchainlen config value is respected also it can serve as basic test for debugrevlog -d <file>.\n" >> a
33 $ printf "This test checks if maxchainlen config value is respected also it can serve as basic test for debugrevlog -d <file>.\n" >> a
34 $ hg ci -m a
34 $ hg ci -m a
35 $ echo "b\n" >> a
35 $ printf "b\n" >> a
36 $ hg ci -m a
36 $ hg ci -m a
37 $ echo "c\n" >> a
37 $ printf "c\n" >> a
38 $ hg ci -m a
38 $ hg ci -m a
39 $ echo "d\n" >> a
39 $ printf "d\n" >> a
40 $ hg ci -m a
40 $ hg ci -m a
41 $ echo "e\n" >> a
41 $ printf "e\n" >> a
42 $ hg ci -m a
42 $ hg ci -m a
43 $ echo "f\n" >> a
43 $ printf "f\n" >> a
44 $ hg ci -m a
44 $ hg ci -m a
45 $ echo 'g\n' >> a
45 $ printf 'g\n' >> a
46 $ hg ci -m a
46 $ hg ci -m a
47 $ echo 'h\n' >> a
47 $ printf 'h\n' >> a
48 $ hg ci -m a
48 $ hg ci -m a
49 $ hg debugrevlog -d a
49 $ hg debugrevlog -d a
50 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
50 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
51 0 -1 -1 0 ??? 0 0 0 0 ??? ???? ? 1 0 (glob)
51 0 -1 -1 0 ??? 0 0 0 0 ??? ???? ? 1 0 (glob)
52 1 0 -1 ??? ??? 0 0 0 0 ??? ???? ? 1 1 (glob)
52 1 0 -1 ??? ??? 0 0 0 0 ??? ???? ? 1 1 (glob)
53 2 1 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
53 2 1 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
54 3 2 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
54 3 2 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
55 4 3 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 4 (glob)
55 4 3 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 4 (glob)
56 5 4 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 0 (glob)
56 5 4 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 0 (glob)
57 6 5 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 1 (glob)
57 6 5 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 1 (glob)
58 7 6 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
58 7 6 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
59 8 7 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
59 8 7 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
60 $ cd ..
60 $ cd ..
61
61
62 Test internal debugstacktrace command
62 Test internal debugstacktrace command
63
63
64 $ cat > debugstacktrace.py << EOF
64 $ cat > debugstacktrace.py << EOF
65 > from mercurial.util import debugstacktrace, dst, sys
65 > from mercurial.util import debugstacktrace, dst, sys
66 > def f():
66 > def f():
67 > dst('hello world')
67 > dst('hello world')
68 > def g():
68 > def g():
69 > f()
69 > f()
70 > debugstacktrace(skip=-5, f=sys.stdout)
70 > debugstacktrace(skip=-5, f=sys.stdout)
71 > g()
71 > g()
72 > EOF
72 > EOF
73 $ python debugstacktrace.py
73 $ python debugstacktrace.py
74 hello world at:
74 hello world at:
75 debugstacktrace.py:7 in * (glob)
75 debugstacktrace.py:7 in * (glob)
76 debugstacktrace.py:5 in g
76 debugstacktrace.py:5 in g
77 debugstacktrace.py:3 in f
77 debugstacktrace.py:3 in f
78 stacktrace at:
78 stacktrace at:
79 debugstacktrace.py:7 *in * (glob)
79 debugstacktrace.py:7 *in * (glob)
80 debugstacktrace.py:6 *in g (glob)
80 debugstacktrace.py:6 *in g (glob)
81 */util.py:* in debugstacktrace (glob)
81 */util.py:* in debugstacktrace (glob)
General Comments 0
You need to be logged in to leave comments. Login now