##// END OF EJS Templates
transaction: use the location value when doing backup...
Pierre-Yves David -
r23311:64ab33ff default
parent child Browse files
Show More
@@ -1,1807 +1,1810
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10', **kwargs):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps, **kwargs)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2Y' in bundlecaps:
112 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except error.PushRaced, exc:
136 except error.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), str(exc))
137 raise error.ResponseError(_('push failed:'), str(exc))
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 # a list of (ui, featureset) functions.
183 # a list of (ui, featureset) functions.
184 # only functions defined in module of enabled extensions are invoked
184 # only functions defined in module of enabled extensions are invoked
185 featuresetupfuncs = set()
185 featuresetupfuncs = set()
186
186
187 def _baserequirements(self, create):
187 def _baserequirements(self, create):
188 return self.requirements[:]
188 return self.requirements[:]
189
189
190 def __init__(self, baseui, path=None, create=False):
190 def __init__(self, baseui, path=None, create=False):
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wopener = self.wvfs
192 self.wopener = self.wvfs
193 self.root = self.wvfs.base
193 self.root = self.wvfs.base
194 self.path = self.wvfs.join(".hg")
194 self.path = self.wvfs.join(".hg")
195 self.origroot = path
195 self.origroot = path
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.vfs = scmutil.vfs(self.path)
197 self.vfs = scmutil.vfs(self.path)
198 self.opener = self.vfs
198 self.opener = self.vfs
199 self.baseui = baseui
199 self.baseui = baseui
200 self.ui = baseui.copy()
200 self.ui = baseui.copy()
201 self.ui.copy = baseui.copy # prevent copying repo configuration
201 self.ui.copy = baseui.copy # prevent copying repo configuration
202 # A list of callback to shape the phase if no data were found.
202 # A list of callback to shape the phase if no data were found.
203 # Callback are in the form: func(repo, roots) --> processed root.
203 # Callback are in the form: func(repo, roots) --> processed root.
204 # This list it to be filled by extension during repo setup
204 # This list it to be filled by extension during repo setup
205 self._phasedefaults = []
205 self._phasedefaults = []
206 try:
206 try:
207 self.ui.readconfig(self.join("hgrc"), self.root)
207 self.ui.readconfig(self.join("hgrc"), self.root)
208 extensions.loadall(self.ui)
208 extensions.loadall(self.ui)
209 except IOError:
209 except IOError:
210 pass
210 pass
211
211
212 if self.featuresetupfuncs:
212 if self.featuresetupfuncs:
213 self.supported = set(self._basesupported) # use private copy
213 self.supported = set(self._basesupported) # use private copy
214 extmods = set(m.__name__ for n, m
214 extmods = set(m.__name__ for n, m
215 in extensions.extensions(self.ui))
215 in extensions.extensions(self.ui))
216 for setupfunc in self.featuresetupfuncs:
216 for setupfunc in self.featuresetupfuncs:
217 if setupfunc.__module__ in extmods:
217 if setupfunc.__module__ in extmods:
218 setupfunc(self.ui, self.supported)
218 setupfunc(self.ui, self.supported)
219 else:
219 else:
220 self.supported = self._basesupported
220 self.supported = self._basesupported
221
221
222 if not self.vfs.isdir():
222 if not self.vfs.isdir():
223 if create:
223 if create:
224 if not self.wvfs.exists():
224 if not self.wvfs.exists():
225 self.wvfs.makedirs()
225 self.wvfs.makedirs()
226 self.vfs.makedir(notindexed=True)
226 self.vfs.makedir(notindexed=True)
227 requirements = self._baserequirements(create)
227 requirements = self._baserequirements(create)
228 if self.ui.configbool('format', 'usestore', True):
228 if self.ui.configbool('format', 'usestore', True):
229 self.vfs.mkdir("store")
229 self.vfs.mkdir("store")
230 requirements.append("store")
230 requirements.append("store")
231 if self.ui.configbool('format', 'usefncache', True):
231 if self.ui.configbool('format', 'usefncache', True):
232 requirements.append("fncache")
232 requirements.append("fncache")
233 if self.ui.configbool('format', 'dotencode', True):
233 if self.ui.configbool('format', 'dotencode', True):
234 requirements.append('dotencode')
234 requirements.append('dotencode')
235 # create an invalid changelog
235 # create an invalid changelog
236 self.vfs.append(
236 self.vfs.append(
237 "00changelog.i",
237 "00changelog.i",
238 '\0\0\0\2' # represents revlogv2
238 '\0\0\0\2' # represents revlogv2
239 ' dummy changelog to prevent using the old repo layout'
239 ' dummy changelog to prevent using the old repo layout'
240 )
240 )
241 if self.ui.configbool('format', 'generaldelta', False):
241 if self.ui.configbool('format', 'generaldelta', False):
242 requirements.append("generaldelta")
242 requirements.append("generaldelta")
243 requirements = set(requirements)
243 requirements = set(requirements)
244 else:
244 else:
245 raise error.RepoError(_("repository %s not found") % path)
245 raise error.RepoError(_("repository %s not found") % path)
246 elif create:
246 elif create:
247 raise error.RepoError(_("repository %s already exists") % path)
247 raise error.RepoError(_("repository %s already exists") % path)
248 else:
248 else:
249 try:
249 try:
250 requirements = scmutil.readrequires(self.vfs, self.supported)
250 requirements = scmutil.readrequires(self.vfs, self.supported)
251 except IOError, inst:
251 except IOError, inst:
252 if inst.errno != errno.ENOENT:
252 if inst.errno != errno.ENOENT:
253 raise
253 raise
254 requirements = set()
254 requirements = set()
255
255
256 self.sharedpath = self.path
256 self.sharedpath = self.path
257 try:
257 try:
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 realpath=True)
259 realpath=True)
260 s = vfs.base
260 s = vfs.base
261 if not vfs.exists():
261 if not vfs.exists():
262 raise error.RepoError(
262 raise error.RepoError(
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 self.sharedpath = s
264 self.sharedpath = s
265 except IOError, inst:
265 except IOError, inst:
266 if inst.errno != errno.ENOENT:
266 if inst.errno != errno.ENOENT:
267 raise
267 raise
268
268
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 self.spath = self.store.path
270 self.spath = self.store.path
271 self.svfs = self.store.vfs
271 self.svfs = self.store.vfs
272 self.sopener = self.svfs
272 self.sopener = self.svfs
273 self.sjoin = self.store.join
273 self.sjoin = self.store.join
274 self.vfs.createmode = self.store.createmode
274 self.vfs.createmode = self.store.createmode
275 self._applyrequirements(requirements)
275 self._applyrequirements(requirements)
276 if create:
276 if create:
277 self._writerequirements()
277 self._writerequirements()
278
278
279
279
280 self._branchcaches = {}
280 self._branchcaches = {}
281 self.filterpats = {}
281 self.filterpats = {}
282 self._datafilters = {}
282 self._datafilters = {}
283 self._transref = self._lockref = self._wlockref = None
283 self._transref = self._lockref = self._wlockref = None
284
284
285 # A cache for various files under .hg/ that tracks file changes,
285 # A cache for various files under .hg/ that tracks file changes,
286 # (used by the filecache decorator)
286 # (used by the filecache decorator)
287 #
287 #
288 # Maps a property name to its util.filecacheentry
288 # Maps a property name to its util.filecacheentry
289 self._filecache = {}
289 self._filecache = {}
290
290
291 # hold sets of revision to be filtered
291 # hold sets of revision to be filtered
292 # should be cleared when something might have changed the filter value:
292 # should be cleared when something might have changed the filter value:
293 # - new changesets,
293 # - new changesets,
294 # - phase change,
294 # - phase change,
295 # - new obsolescence marker,
295 # - new obsolescence marker,
296 # - working directory parent change,
296 # - working directory parent change,
297 # - bookmark changes
297 # - bookmark changes
298 self.filteredrevcache = {}
298 self.filteredrevcache = {}
299
299
300 def close(self):
300 def close(self):
301 pass
301 pass
302
302
303 def _restrictcapabilities(self, caps):
303 def _restrictcapabilities(self, caps):
304 # bundle2 is not ready for prime time, drop it unless explicitly
304 # bundle2 is not ready for prime time, drop it unless explicitly
305 # required by the tests (or some brave tester)
305 # required by the tests (or some brave tester)
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
307 caps = set(caps)
307 caps = set(caps)
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
310 return caps
310 return caps
311
311
312 def _applyrequirements(self, requirements):
312 def _applyrequirements(self, requirements):
313 self.requirements = requirements
313 self.requirements = requirements
314 self.sopener.options = dict((r, 1) for r in requirements
314 self.sopener.options = dict((r, 1) for r in requirements
315 if r in self.openerreqs)
315 if r in self.openerreqs)
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 if chunkcachesize is not None:
317 if chunkcachesize is not None:
318 self.sopener.options['chunkcachesize'] = chunkcachesize
318 self.sopener.options['chunkcachesize'] = chunkcachesize
319 maxchainlen = self.ui.configint('format', 'maxchainlen')
319 maxchainlen = self.ui.configint('format', 'maxchainlen')
320 if maxchainlen is not None:
320 if maxchainlen is not None:
321 self.sopener.options['maxchainlen'] = maxchainlen
321 self.sopener.options['maxchainlen'] = maxchainlen
322
322
323 def _writerequirements(self):
323 def _writerequirements(self):
324 reqfile = self.opener("requires", "w")
324 reqfile = self.opener("requires", "w")
325 for r in sorted(self.requirements):
325 for r in sorted(self.requirements):
326 reqfile.write("%s\n" % r)
326 reqfile.write("%s\n" % r)
327 reqfile.close()
327 reqfile.close()
328
328
329 def _checknested(self, path):
329 def _checknested(self, path):
330 """Determine if path is a legal nested repository."""
330 """Determine if path is a legal nested repository."""
331 if not path.startswith(self.root):
331 if not path.startswith(self.root):
332 return False
332 return False
333 subpath = path[len(self.root) + 1:]
333 subpath = path[len(self.root) + 1:]
334 normsubpath = util.pconvert(subpath)
334 normsubpath = util.pconvert(subpath)
335
335
336 # XXX: Checking against the current working copy is wrong in
336 # XXX: Checking against the current working copy is wrong in
337 # the sense that it can reject things like
337 # the sense that it can reject things like
338 #
338 #
339 # $ hg cat -r 10 sub/x.txt
339 # $ hg cat -r 10 sub/x.txt
340 #
340 #
341 # if sub/ is no longer a subrepository in the working copy
341 # if sub/ is no longer a subrepository in the working copy
342 # parent revision.
342 # parent revision.
343 #
343 #
344 # However, it can of course also allow things that would have
344 # However, it can of course also allow things that would have
345 # been rejected before, such as the above cat command if sub/
345 # been rejected before, such as the above cat command if sub/
346 # is a subrepository now, but was a normal directory before.
346 # is a subrepository now, but was a normal directory before.
347 # The old path auditor would have rejected by mistake since it
347 # The old path auditor would have rejected by mistake since it
348 # panics when it sees sub/.hg/.
348 # panics when it sees sub/.hg/.
349 #
349 #
350 # All in all, checking against the working copy seems sensible
350 # All in all, checking against the working copy seems sensible
351 # since we want to prevent access to nested repositories on
351 # since we want to prevent access to nested repositories on
352 # the filesystem *now*.
352 # the filesystem *now*.
353 ctx = self[None]
353 ctx = self[None]
354 parts = util.splitpath(subpath)
354 parts = util.splitpath(subpath)
355 while parts:
355 while parts:
356 prefix = '/'.join(parts)
356 prefix = '/'.join(parts)
357 if prefix in ctx.substate:
357 if prefix in ctx.substate:
358 if prefix == normsubpath:
358 if prefix == normsubpath:
359 return True
359 return True
360 else:
360 else:
361 sub = ctx.sub(prefix)
361 sub = ctx.sub(prefix)
362 return sub.checknested(subpath[len(prefix) + 1:])
362 return sub.checknested(subpath[len(prefix) + 1:])
363 else:
363 else:
364 parts.pop()
364 parts.pop()
365 return False
365 return False
366
366
367 def peer(self):
367 def peer(self):
368 return localpeer(self) # not cached to avoid reference cycle
368 return localpeer(self) # not cached to avoid reference cycle
369
369
370 def unfiltered(self):
370 def unfiltered(self):
371 """Return unfiltered version of the repository
371 """Return unfiltered version of the repository
372
372
373 Intended to be overwritten by filtered repo."""
373 Intended to be overwritten by filtered repo."""
374 return self
374 return self
375
375
376 def filtered(self, name):
376 def filtered(self, name):
377 """Return a filtered version of a repository"""
377 """Return a filtered version of a repository"""
378 # build a new class with the mixin and the current class
378 # build a new class with the mixin and the current class
379 # (possibly subclass of the repo)
379 # (possibly subclass of the repo)
380 class proxycls(repoview.repoview, self.unfiltered().__class__):
380 class proxycls(repoview.repoview, self.unfiltered().__class__):
381 pass
381 pass
382 return proxycls(self, name)
382 return proxycls(self, name)
383
383
384 @repofilecache('bookmarks')
384 @repofilecache('bookmarks')
385 def _bookmarks(self):
385 def _bookmarks(self):
386 return bookmarks.bmstore(self)
386 return bookmarks.bmstore(self)
387
387
388 @repofilecache('bookmarks.current')
388 @repofilecache('bookmarks.current')
389 def _bookmarkcurrent(self):
389 def _bookmarkcurrent(self):
390 return bookmarks.readcurrent(self)
390 return bookmarks.readcurrent(self)
391
391
392 def bookmarkheads(self, bookmark):
392 def bookmarkheads(self, bookmark):
393 name = bookmark.split('@', 1)[0]
393 name = bookmark.split('@', 1)[0]
394 heads = []
394 heads = []
395 for mark, n in self._bookmarks.iteritems():
395 for mark, n in self._bookmarks.iteritems():
396 if mark.split('@', 1)[0] == name:
396 if mark.split('@', 1)[0] == name:
397 heads.append(n)
397 heads.append(n)
398 return heads
398 return heads
399
399
400 @storecache('phaseroots')
400 @storecache('phaseroots')
401 def _phasecache(self):
401 def _phasecache(self):
402 return phases.phasecache(self, self._phasedefaults)
402 return phases.phasecache(self, self._phasedefaults)
403
403
404 @storecache('obsstore')
404 @storecache('obsstore')
405 def obsstore(self):
405 def obsstore(self):
406 # read default format for new obsstore.
406 # read default format for new obsstore.
407 defaultformat = self.ui.configint('format', 'obsstore-version', None)
407 defaultformat = self.ui.configint('format', 'obsstore-version', None)
408 # rely on obsstore class default when possible.
408 # rely on obsstore class default when possible.
409 kwargs = {}
409 kwargs = {}
410 if defaultformat is not None:
410 if defaultformat is not None:
411 kwargs['defaultformat'] = defaultformat
411 kwargs['defaultformat'] = defaultformat
412 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
412 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
413 store = obsolete.obsstore(self.sopener, readonly=readonly,
413 store = obsolete.obsstore(self.sopener, readonly=readonly,
414 **kwargs)
414 **kwargs)
415 if store and readonly:
415 if store and readonly:
416 # message is rare enough to not be translated
416 # message is rare enough to not be translated
417 msg = 'obsolete feature not enabled but %i markers found!\n'
417 msg = 'obsolete feature not enabled but %i markers found!\n'
418 self.ui.warn(msg % len(list(store)))
418 self.ui.warn(msg % len(list(store)))
419 return store
419 return store
420
420
421 @storecache('00changelog.i')
421 @storecache('00changelog.i')
422 def changelog(self):
422 def changelog(self):
423 c = changelog.changelog(self.sopener)
423 c = changelog.changelog(self.sopener)
424 if 'HG_PENDING' in os.environ:
424 if 'HG_PENDING' in os.environ:
425 p = os.environ['HG_PENDING']
425 p = os.environ['HG_PENDING']
426 if p.startswith(self.root):
426 if p.startswith(self.root):
427 c.readpending('00changelog.i.a')
427 c.readpending('00changelog.i.a')
428 return c
428 return c
429
429
430 @storecache('00manifest.i')
430 @storecache('00manifest.i')
431 def manifest(self):
431 def manifest(self):
432 return manifest.manifest(self.sopener)
432 return manifest.manifest(self.sopener)
433
433
434 @repofilecache('dirstate')
434 @repofilecache('dirstate')
435 def dirstate(self):
435 def dirstate(self):
436 warned = [0]
436 warned = [0]
437 def validate(node):
437 def validate(node):
438 try:
438 try:
439 self.changelog.rev(node)
439 self.changelog.rev(node)
440 return node
440 return node
441 except error.LookupError:
441 except error.LookupError:
442 if not warned[0]:
442 if not warned[0]:
443 warned[0] = True
443 warned[0] = True
444 self.ui.warn(_("warning: ignoring unknown"
444 self.ui.warn(_("warning: ignoring unknown"
445 " working parent %s!\n") % short(node))
445 " working parent %s!\n") % short(node))
446 return nullid
446 return nullid
447
447
448 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
448 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
449
449
450 def __getitem__(self, changeid):
450 def __getitem__(self, changeid):
451 if changeid is None:
451 if changeid is None:
452 return context.workingctx(self)
452 return context.workingctx(self)
453 return context.changectx(self, changeid)
453 return context.changectx(self, changeid)
454
454
455 def __contains__(self, changeid):
455 def __contains__(self, changeid):
456 try:
456 try:
457 return bool(self.lookup(changeid))
457 return bool(self.lookup(changeid))
458 except error.RepoLookupError:
458 except error.RepoLookupError:
459 return False
459 return False
460
460
461 def __nonzero__(self):
461 def __nonzero__(self):
462 return True
462 return True
463
463
464 def __len__(self):
464 def __len__(self):
465 return len(self.changelog)
465 return len(self.changelog)
466
466
467 def __iter__(self):
467 def __iter__(self):
468 return iter(self.changelog)
468 return iter(self.changelog)
469
469
470 def revs(self, expr, *args):
470 def revs(self, expr, *args):
471 '''Return a list of revisions matching the given revset'''
471 '''Return a list of revisions matching the given revset'''
472 expr = revset.formatspec(expr, *args)
472 expr = revset.formatspec(expr, *args)
473 m = revset.match(None, expr)
473 m = revset.match(None, expr)
474 return m(self, revset.spanset(self))
474 return m(self, revset.spanset(self))
475
475
476 def set(self, expr, *args):
476 def set(self, expr, *args):
477 '''
477 '''
478 Yield a context for each matching revision, after doing arg
478 Yield a context for each matching revision, after doing arg
479 replacement via revset.formatspec
479 replacement via revset.formatspec
480 '''
480 '''
481 for r in self.revs(expr, *args):
481 for r in self.revs(expr, *args):
482 yield self[r]
482 yield self[r]
483
483
484 def url(self):
484 def url(self):
485 return 'file:' + self.root
485 return 'file:' + self.root
486
486
487 def hook(self, name, throw=False, **args):
487 def hook(self, name, throw=False, **args):
488 """Call a hook, passing this repo instance.
488 """Call a hook, passing this repo instance.
489
489
490 This a convenience method to aid invoking hooks. Extensions likely
490 This a convenience method to aid invoking hooks. Extensions likely
491 won't call this unless they have registered a custom hook or are
491 won't call this unless they have registered a custom hook or are
492 replacing code that is expected to call a hook.
492 replacing code that is expected to call a hook.
493 """
493 """
494 return hook.hook(self.ui, self, name, throw, **args)
494 return hook.hook(self.ui, self, name, throw, **args)
495
495
496 @unfilteredmethod
496 @unfilteredmethod
497 def _tag(self, names, node, message, local, user, date, extra={},
497 def _tag(self, names, node, message, local, user, date, extra={},
498 editor=False):
498 editor=False):
499 if isinstance(names, str):
499 if isinstance(names, str):
500 names = (names,)
500 names = (names,)
501
501
502 branches = self.branchmap()
502 branches = self.branchmap()
503 for name in names:
503 for name in names:
504 self.hook('pretag', throw=True, node=hex(node), tag=name,
504 self.hook('pretag', throw=True, node=hex(node), tag=name,
505 local=local)
505 local=local)
506 if name in branches:
506 if name in branches:
507 self.ui.warn(_("warning: tag %s conflicts with existing"
507 self.ui.warn(_("warning: tag %s conflicts with existing"
508 " branch name\n") % name)
508 " branch name\n") % name)
509
509
510 def writetags(fp, names, munge, prevtags):
510 def writetags(fp, names, munge, prevtags):
511 fp.seek(0, 2)
511 fp.seek(0, 2)
512 if prevtags and prevtags[-1] != '\n':
512 if prevtags and prevtags[-1] != '\n':
513 fp.write('\n')
513 fp.write('\n')
514 for name in names:
514 for name in names:
515 m = munge and munge(name) or name
515 m = munge and munge(name) or name
516 if (self._tagscache.tagtypes and
516 if (self._tagscache.tagtypes and
517 name in self._tagscache.tagtypes):
517 name in self._tagscache.tagtypes):
518 old = self.tags().get(name, nullid)
518 old = self.tags().get(name, nullid)
519 fp.write('%s %s\n' % (hex(old), m))
519 fp.write('%s %s\n' % (hex(old), m))
520 fp.write('%s %s\n' % (hex(node), m))
520 fp.write('%s %s\n' % (hex(node), m))
521 fp.close()
521 fp.close()
522
522
523 prevtags = ''
523 prevtags = ''
524 if local:
524 if local:
525 try:
525 try:
526 fp = self.opener('localtags', 'r+')
526 fp = self.opener('localtags', 'r+')
527 except IOError:
527 except IOError:
528 fp = self.opener('localtags', 'a')
528 fp = self.opener('localtags', 'a')
529 else:
529 else:
530 prevtags = fp.read()
530 prevtags = fp.read()
531
531
532 # local tags are stored in the current charset
532 # local tags are stored in the current charset
533 writetags(fp, names, None, prevtags)
533 writetags(fp, names, None, prevtags)
534 for name in names:
534 for name in names:
535 self.hook('tag', node=hex(node), tag=name, local=local)
535 self.hook('tag', node=hex(node), tag=name, local=local)
536 return
536 return
537
537
538 try:
538 try:
539 fp = self.wfile('.hgtags', 'rb+')
539 fp = self.wfile('.hgtags', 'rb+')
540 except IOError, e:
540 except IOError, e:
541 if e.errno != errno.ENOENT:
541 if e.errno != errno.ENOENT:
542 raise
542 raise
543 fp = self.wfile('.hgtags', 'ab')
543 fp = self.wfile('.hgtags', 'ab')
544 else:
544 else:
545 prevtags = fp.read()
545 prevtags = fp.read()
546
546
547 # committed tags are stored in UTF-8
547 # committed tags are stored in UTF-8
548 writetags(fp, names, encoding.fromlocal, prevtags)
548 writetags(fp, names, encoding.fromlocal, prevtags)
549
549
550 fp.close()
550 fp.close()
551
551
552 self.invalidatecaches()
552 self.invalidatecaches()
553
553
554 if '.hgtags' not in self.dirstate:
554 if '.hgtags' not in self.dirstate:
555 self[None].add(['.hgtags'])
555 self[None].add(['.hgtags'])
556
556
557 m = matchmod.exact(self.root, '', ['.hgtags'])
557 m = matchmod.exact(self.root, '', ['.hgtags'])
558 tagnode = self.commit(message, user, date, extra=extra, match=m,
558 tagnode = self.commit(message, user, date, extra=extra, match=m,
559 editor=editor)
559 editor=editor)
560
560
561 for name in names:
561 for name in names:
562 self.hook('tag', node=hex(node), tag=name, local=local)
562 self.hook('tag', node=hex(node), tag=name, local=local)
563
563
564 return tagnode
564 return tagnode
565
565
566 def tag(self, names, node, message, local, user, date, editor=False):
566 def tag(self, names, node, message, local, user, date, editor=False):
567 '''tag a revision with one or more symbolic names.
567 '''tag a revision with one or more symbolic names.
568
568
569 names is a list of strings or, when adding a single tag, names may be a
569 names is a list of strings or, when adding a single tag, names may be a
570 string.
570 string.
571
571
572 if local is True, the tags are stored in a per-repository file.
572 if local is True, the tags are stored in a per-repository file.
573 otherwise, they are stored in the .hgtags file, and a new
573 otherwise, they are stored in the .hgtags file, and a new
574 changeset is committed with the change.
574 changeset is committed with the change.
575
575
576 keyword arguments:
576 keyword arguments:
577
577
578 local: whether to store tags in non-version-controlled file
578 local: whether to store tags in non-version-controlled file
579 (default False)
579 (default False)
580
580
581 message: commit message to use if committing
581 message: commit message to use if committing
582
582
583 user: name of user to use if committing
583 user: name of user to use if committing
584
584
585 date: date tuple to use if committing'''
585 date: date tuple to use if committing'''
586
586
587 if not local:
587 if not local:
588 m = matchmod.exact(self.root, '', ['.hgtags'])
588 m = matchmod.exact(self.root, '', ['.hgtags'])
589 if util.any(self.status(match=m, unknown=True, ignored=True)):
589 if util.any(self.status(match=m, unknown=True, ignored=True)):
590 raise util.Abort(_('working copy of .hgtags is changed'),
590 raise util.Abort(_('working copy of .hgtags is changed'),
591 hint=_('please commit .hgtags manually'))
591 hint=_('please commit .hgtags manually'))
592
592
593 self.tags() # instantiate the cache
593 self.tags() # instantiate the cache
594 self._tag(names, node, message, local, user, date, editor=editor)
594 self._tag(names, node, message, local, user, date, editor=editor)
595
595
596 @filteredpropertycache
596 @filteredpropertycache
597 def _tagscache(self):
597 def _tagscache(self):
598 '''Returns a tagscache object that contains various tags related
598 '''Returns a tagscache object that contains various tags related
599 caches.'''
599 caches.'''
600
600
601 # This simplifies its cache management by having one decorated
601 # This simplifies its cache management by having one decorated
602 # function (this one) and the rest simply fetch things from it.
602 # function (this one) and the rest simply fetch things from it.
603 class tagscache(object):
603 class tagscache(object):
604 def __init__(self):
604 def __init__(self):
605 # These two define the set of tags for this repository. tags
605 # These two define the set of tags for this repository. tags
606 # maps tag name to node; tagtypes maps tag name to 'global' or
606 # maps tag name to node; tagtypes maps tag name to 'global' or
607 # 'local'. (Global tags are defined by .hgtags across all
607 # 'local'. (Global tags are defined by .hgtags across all
608 # heads, and local tags are defined in .hg/localtags.)
608 # heads, and local tags are defined in .hg/localtags.)
609 # They constitute the in-memory cache of tags.
609 # They constitute the in-memory cache of tags.
610 self.tags = self.tagtypes = None
610 self.tags = self.tagtypes = None
611
611
612 self.nodetagscache = self.tagslist = None
612 self.nodetagscache = self.tagslist = None
613
613
614 cache = tagscache()
614 cache = tagscache()
615 cache.tags, cache.tagtypes = self._findtags()
615 cache.tags, cache.tagtypes = self._findtags()
616
616
617 return cache
617 return cache
618
618
619 def tags(self):
619 def tags(self):
620 '''return a mapping of tag to node'''
620 '''return a mapping of tag to node'''
621 t = {}
621 t = {}
622 if self.changelog.filteredrevs:
622 if self.changelog.filteredrevs:
623 tags, tt = self._findtags()
623 tags, tt = self._findtags()
624 else:
624 else:
625 tags = self._tagscache.tags
625 tags = self._tagscache.tags
626 for k, v in tags.iteritems():
626 for k, v in tags.iteritems():
627 try:
627 try:
628 # ignore tags to unknown nodes
628 # ignore tags to unknown nodes
629 self.changelog.rev(v)
629 self.changelog.rev(v)
630 t[k] = v
630 t[k] = v
631 except (error.LookupError, ValueError):
631 except (error.LookupError, ValueError):
632 pass
632 pass
633 return t
633 return t
634
634
635 def _findtags(self):
635 def _findtags(self):
636 '''Do the hard work of finding tags. Return a pair of dicts
636 '''Do the hard work of finding tags. Return a pair of dicts
637 (tags, tagtypes) where tags maps tag name to node, and tagtypes
637 (tags, tagtypes) where tags maps tag name to node, and tagtypes
638 maps tag name to a string like \'global\' or \'local\'.
638 maps tag name to a string like \'global\' or \'local\'.
639 Subclasses or extensions are free to add their own tags, but
639 Subclasses or extensions are free to add their own tags, but
640 should be aware that the returned dicts will be retained for the
640 should be aware that the returned dicts will be retained for the
641 duration of the localrepo object.'''
641 duration of the localrepo object.'''
642
642
643 # XXX what tagtype should subclasses/extensions use? Currently
643 # XXX what tagtype should subclasses/extensions use? Currently
644 # mq and bookmarks add tags, but do not set the tagtype at all.
644 # mq and bookmarks add tags, but do not set the tagtype at all.
645 # Should each extension invent its own tag type? Should there
645 # Should each extension invent its own tag type? Should there
646 # be one tagtype for all such "virtual" tags? Or is the status
646 # be one tagtype for all such "virtual" tags? Or is the status
647 # quo fine?
647 # quo fine?
648
648
649 alltags = {} # map tag name to (node, hist)
649 alltags = {} # map tag name to (node, hist)
650 tagtypes = {}
650 tagtypes = {}
651
651
652 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
652 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
653 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
653 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
654
654
655 # Build the return dicts. Have to re-encode tag names because
655 # Build the return dicts. Have to re-encode tag names because
656 # the tags module always uses UTF-8 (in order not to lose info
656 # the tags module always uses UTF-8 (in order not to lose info
657 # writing to the cache), but the rest of Mercurial wants them in
657 # writing to the cache), but the rest of Mercurial wants them in
658 # local encoding.
658 # local encoding.
659 tags = {}
659 tags = {}
660 for (name, (node, hist)) in alltags.iteritems():
660 for (name, (node, hist)) in alltags.iteritems():
661 if node != nullid:
661 if node != nullid:
662 tags[encoding.tolocal(name)] = node
662 tags[encoding.tolocal(name)] = node
663 tags['tip'] = self.changelog.tip()
663 tags['tip'] = self.changelog.tip()
664 tagtypes = dict([(encoding.tolocal(name), value)
664 tagtypes = dict([(encoding.tolocal(name), value)
665 for (name, value) in tagtypes.iteritems()])
665 for (name, value) in tagtypes.iteritems()])
666 return (tags, tagtypes)
666 return (tags, tagtypes)
667
667
668 def tagtype(self, tagname):
668 def tagtype(self, tagname):
669 '''
669 '''
670 return the type of the given tag. result can be:
670 return the type of the given tag. result can be:
671
671
672 'local' : a local tag
672 'local' : a local tag
673 'global' : a global tag
673 'global' : a global tag
674 None : tag does not exist
674 None : tag does not exist
675 '''
675 '''
676
676
677 return self._tagscache.tagtypes.get(tagname)
677 return self._tagscache.tagtypes.get(tagname)
678
678
679 def tagslist(self):
679 def tagslist(self):
680 '''return a list of tags ordered by revision'''
680 '''return a list of tags ordered by revision'''
681 if not self._tagscache.tagslist:
681 if not self._tagscache.tagslist:
682 l = []
682 l = []
683 for t, n in self.tags().iteritems():
683 for t, n in self.tags().iteritems():
684 l.append((self.changelog.rev(n), t, n))
684 l.append((self.changelog.rev(n), t, n))
685 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
685 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
686
686
687 return self._tagscache.tagslist
687 return self._tagscache.tagslist
688
688
689 def nodetags(self, node):
689 def nodetags(self, node):
690 '''return the tags associated with a node'''
690 '''return the tags associated with a node'''
691 if not self._tagscache.nodetagscache:
691 if not self._tagscache.nodetagscache:
692 nodetagscache = {}
692 nodetagscache = {}
693 for t, n in self._tagscache.tags.iteritems():
693 for t, n in self._tagscache.tags.iteritems():
694 nodetagscache.setdefault(n, []).append(t)
694 nodetagscache.setdefault(n, []).append(t)
695 for tags in nodetagscache.itervalues():
695 for tags in nodetagscache.itervalues():
696 tags.sort()
696 tags.sort()
697 self._tagscache.nodetagscache = nodetagscache
697 self._tagscache.nodetagscache = nodetagscache
698 return self._tagscache.nodetagscache.get(node, [])
698 return self._tagscache.nodetagscache.get(node, [])
699
699
700 def nodebookmarks(self, node):
700 def nodebookmarks(self, node):
701 marks = []
701 marks = []
702 for bookmark, n in self._bookmarks.iteritems():
702 for bookmark, n in self._bookmarks.iteritems():
703 if n == node:
703 if n == node:
704 marks.append(bookmark)
704 marks.append(bookmark)
705 return sorted(marks)
705 return sorted(marks)
706
706
707 def branchmap(self):
707 def branchmap(self):
708 '''returns a dictionary {branch: [branchheads]} with branchheads
708 '''returns a dictionary {branch: [branchheads]} with branchheads
709 ordered by increasing revision number'''
709 ordered by increasing revision number'''
710 branchmap.updatecache(self)
710 branchmap.updatecache(self)
711 return self._branchcaches[self.filtername]
711 return self._branchcaches[self.filtername]
712
712
713 def branchtip(self, branch):
713 def branchtip(self, branch):
714 '''return the tip node for a given branch'''
714 '''return the tip node for a given branch'''
715 try:
715 try:
716 return self.branchmap().branchtip(branch)
716 return self.branchmap().branchtip(branch)
717 except KeyError:
717 except KeyError:
718 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
718 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
719
719
720 def lookup(self, key):
720 def lookup(self, key):
721 return self[key].node()
721 return self[key].node()
722
722
723 def lookupbranch(self, key, remote=None):
723 def lookupbranch(self, key, remote=None):
724 repo = remote or self
724 repo = remote or self
725 if key in repo.branchmap():
725 if key in repo.branchmap():
726 return key
726 return key
727
727
728 repo = (remote and remote.local()) and remote or self
728 repo = (remote and remote.local()) and remote or self
729 return repo[key].branch()
729 return repo[key].branch()
730
730
731 def known(self, nodes):
731 def known(self, nodes):
732 nm = self.changelog.nodemap
732 nm = self.changelog.nodemap
733 pc = self._phasecache
733 pc = self._phasecache
734 result = []
734 result = []
735 for n in nodes:
735 for n in nodes:
736 r = nm.get(n)
736 r = nm.get(n)
737 resp = not (r is None or pc.phase(self, r) >= phases.secret)
737 resp = not (r is None or pc.phase(self, r) >= phases.secret)
738 result.append(resp)
738 result.append(resp)
739 return result
739 return result
740
740
741 def local(self):
741 def local(self):
742 return self
742 return self
743
743
744 def cancopy(self):
744 def cancopy(self):
745 # so statichttprepo's override of local() works
745 # so statichttprepo's override of local() works
746 if not self.local():
746 if not self.local():
747 return False
747 return False
748 if not self.ui.configbool('phases', 'publish', True):
748 if not self.ui.configbool('phases', 'publish', True):
749 return True
749 return True
750 # if publishing we can't copy if there is filtered content
750 # if publishing we can't copy if there is filtered content
751 return not self.filtered('visible').changelog.filteredrevs
751 return not self.filtered('visible').changelog.filteredrevs
752
752
753 def join(self, f, *insidef):
753 def join(self, f, *insidef):
754 return os.path.join(self.path, f, *insidef)
754 return os.path.join(self.path, f, *insidef)
755
755
756 def wjoin(self, f, *insidef):
756 def wjoin(self, f, *insidef):
757 return os.path.join(self.root, f, *insidef)
757 return os.path.join(self.root, f, *insidef)
758
758
759 def file(self, f):
759 def file(self, f):
760 if f[0] == '/':
760 if f[0] == '/':
761 f = f[1:]
761 f = f[1:]
762 return filelog.filelog(self.sopener, f)
762 return filelog.filelog(self.sopener, f)
763
763
764 def changectx(self, changeid):
764 def changectx(self, changeid):
765 return self[changeid]
765 return self[changeid]
766
766
767 def parents(self, changeid=None):
767 def parents(self, changeid=None):
768 '''get list of changectxs for parents of changeid'''
768 '''get list of changectxs for parents of changeid'''
769 return self[changeid].parents()
769 return self[changeid].parents()
770
770
771 def setparents(self, p1, p2=nullid):
771 def setparents(self, p1, p2=nullid):
772 self.dirstate.beginparentchange()
772 self.dirstate.beginparentchange()
773 copies = self.dirstate.setparents(p1, p2)
773 copies = self.dirstate.setparents(p1, p2)
774 pctx = self[p1]
774 pctx = self[p1]
775 if copies:
775 if copies:
776 # Adjust copy records, the dirstate cannot do it, it
776 # Adjust copy records, the dirstate cannot do it, it
777 # requires access to parents manifests. Preserve them
777 # requires access to parents manifests. Preserve them
778 # only for entries added to first parent.
778 # only for entries added to first parent.
779 for f in copies:
779 for f in copies:
780 if f not in pctx and copies[f] in pctx:
780 if f not in pctx and copies[f] in pctx:
781 self.dirstate.copy(copies[f], f)
781 self.dirstate.copy(copies[f], f)
782 if p2 == nullid:
782 if p2 == nullid:
783 for f, s in sorted(self.dirstate.copies().items()):
783 for f, s in sorted(self.dirstate.copies().items()):
784 if f not in pctx and s not in pctx:
784 if f not in pctx and s not in pctx:
785 self.dirstate.copy(None, f)
785 self.dirstate.copy(None, f)
786 self.dirstate.endparentchange()
786 self.dirstate.endparentchange()
787
787
788 def filectx(self, path, changeid=None, fileid=None):
788 def filectx(self, path, changeid=None, fileid=None):
789 """changeid can be a changeset revision, node, or tag.
789 """changeid can be a changeset revision, node, or tag.
790 fileid can be a file revision or node."""
790 fileid can be a file revision or node."""
791 return context.filectx(self, path, changeid, fileid)
791 return context.filectx(self, path, changeid, fileid)
792
792
793 def getcwd(self):
793 def getcwd(self):
794 return self.dirstate.getcwd()
794 return self.dirstate.getcwd()
795
795
796 def pathto(self, f, cwd=None):
796 def pathto(self, f, cwd=None):
797 return self.dirstate.pathto(f, cwd)
797 return self.dirstate.pathto(f, cwd)
798
798
799 def wfile(self, f, mode='r'):
799 def wfile(self, f, mode='r'):
800 return self.wopener(f, mode)
800 return self.wopener(f, mode)
801
801
802 def _link(self, f):
802 def _link(self, f):
803 return self.wvfs.islink(f)
803 return self.wvfs.islink(f)
804
804
805 def _loadfilter(self, filter):
805 def _loadfilter(self, filter):
806 if filter not in self.filterpats:
806 if filter not in self.filterpats:
807 l = []
807 l = []
808 for pat, cmd in self.ui.configitems(filter):
808 for pat, cmd in self.ui.configitems(filter):
809 if cmd == '!':
809 if cmd == '!':
810 continue
810 continue
811 mf = matchmod.match(self.root, '', [pat])
811 mf = matchmod.match(self.root, '', [pat])
812 fn = None
812 fn = None
813 params = cmd
813 params = cmd
814 for name, filterfn in self._datafilters.iteritems():
814 for name, filterfn in self._datafilters.iteritems():
815 if cmd.startswith(name):
815 if cmd.startswith(name):
816 fn = filterfn
816 fn = filterfn
817 params = cmd[len(name):].lstrip()
817 params = cmd[len(name):].lstrip()
818 break
818 break
819 if not fn:
819 if not fn:
820 fn = lambda s, c, **kwargs: util.filter(s, c)
820 fn = lambda s, c, **kwargs: util.filter(s, c)
821 # Wrap old filters not supporting keyword arguments
821 # Wrap old filters not supporting keyword arguments
822 if not inspect.getargspec(fn)[2]:
822 if not inspect.getargspec(fn)[2]:
823 oldfn = fn
823 oldfn = fn
824 fn = lambda s, c, **kwargs: oldfn(s, c)
824 fn = lambda s, c, **kwargs: oldfn(s, c)
825 l.append((mf, fn, params))
825 l.append((mf, fn, params))
826 self.filterpats[filter] = l
826 self.filterpats[filter] = l
827 return self.filterpats[filter]
827 return self.filterpats[filter]
828
828
829 def _filter(self, filterpats, filename, data):
829 def _filter(self, filterpats, filename, data):
830 for mf, fn, cmd in filterpats:
830 for mf, fn, cmd in filterpats:
831 if mf(filename):
831 if mf(filename):
832 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
832 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
833 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
833 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
834 break
834 break
835
835
836 return data
836 return data
837
837
838 @unfilteredpropertycache
838 @unfilteredpropertycache
839 def _encodefilterpats(self):
839 def _encodefilterpats(self):
840 return self._loadfilter('encode')
840 return self._loadfilter('encode')
841
841
842 @unfilteredpropertycache
842 @unfilteredpropertycache
843 def _decodefilterpats(self):
843 def _decodefilterpats(self):
844 return self._loadfilter('decode')
844 return self._loadfilter('decode')
845
845
846 def adddatafilter(self, name, filter):
846 def adddatafilter(self, name, filter):
847 self._datafilters[name] = filter
847 self._datafilters[name] = filter
848
848
849 def wread(self, filename):
849 def wread(self, filename):
850 if self._link(filename):
850 if self._link(filename):
851 data = self.wvfs.readlink(filename)
851 data = self.wvfs.readlink(filename)
852 else:
852 else:
853 data = self.wopener.read(filename)
853 data = self.wopener.read(filename)
854 return self._filter(self._encodefilterpats, filename, data)
854 return self._filter(self._encodefilterpats, filename, data)
855
855
856 def wwrite(self, filename, data, flags):
856 def wwrite(self, filename, data, flags):
857 data = self._filter(self._decodefilterpats, filename, data)
857 data = self._filter(self._decodefilterpats, filename, data)
858 if 'l' in flags:
858 if 'l' in flags:
859 self.wopener.symlink(data, filename)
859 self.wopener.symlink(data, filename)
860 else:
860 else:
861 self.wopener.write(filename, data)
861 self.wopener.write(filename, data)
862 if 'x' in flags:
862 if 'x' in flags:
863 self.wvfs.setflags(filename, False, True)
863 self.wvfs.setflags(filename, False, True)
864
864
865 def wwritedata(self, filename, data):
865 def wwritedata(self, filename, data):
866 return self._filter(self._decodefilterpats, filename, data)
866 return self._filter(self._decodefilterpats, filename, data)
867
867
868 def transaction(self, desc, report=None):
868 def transaction(self, desc, report=None):
869 tr = self._transref and self._transref() or None
869 tr = self._transref and self._transref() or None
870 if tr and tr.running():
870 if tr and tr.running():
871 return tr.nest()
871 return tr.nest()
872
872
873 # abort here if the journal already exists
873 # abort here if the journal already exists
874 if self.svfs.exists("journal"):
874 if self.svfs.exists("journal"):
875 raise error.RepoError(
875 raise error.RepoError(
876 _("abandoned transaction found"),
876 _("abandoned transaction found"),
877 hint=_("run 'hg recover' to clean up transaction"))
877 hint=_("run 'hg recover' to clean up transaction"))
878
878
879 def onclose():
879 def onclose():
880 self.store.write(self._transref())
880 self.store.write(self._transref())
881
881
882 self._writejournal(desc)
882 self._writejournal(desc)
883 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
883 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
884 rp = report and report or self.ui.warn
884 rp = report and report or self.ui.warn
885 vfsmap = {'plain': self.opener} # root of .hg/
885 vfsmap = {'plain': self.opener} # root of .hg/
886 tr = transaction.transaction(rp, self.sopener, vfsmap,
886 tr = transaction.transaction(rp, self.sopener, vfsmap,
887 "journal",
887 "journal",
888 aftertrans(renames),
888 aftertrans(renames),
889 self.store.createmode,
889 self.store.createmode,
890 onclose)
890 onclose)
891 self._transref = weakref.ref(tr)
891 self._transref = weakref.ref(tr)
892 return tr
892 return tr
893
893
894 def _journalfiles(self):
894 def _journalfiles(self):
895 return ((self.svfs, 'journal'),
895 return ((self.svfs, 'journal'),
896 (self.vfs, 'journal.dirstate'),
896 (self.vfs, 'journal.dirstate'),
897 (self.vfs, 'journal.branch'),
897 (self.vfs, 'journal.branch'),
898 (self.vfs, 'journal.desc'),
898 (self.vfs, 'journal.desc'),
899 (self.vfs, 'journal.bookmarks'),
899 (self.vfs, 'journal.bookmarks'),
900 (self.svfs, 'journal.phaseroots'))
900 (self.svfs, 'journal.phaseroots'))
901
901
902 def undofiles(self):
902 def undofiles(self):
903 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
903 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
904
904
905 def _writejournal(self, desc):
905 def _writejournal(self, desc):
906 self.opener.write("journal.dirstate",
906 self.opener.write("journal.dirstate",
907 self.opener.tryread("dirstate"))
907 self.opener.tryread("dirstate"))
908 self.opener.write("journal.branch",
908 self.opener.write("journal.branch",
909 encoding.fromlocal(self.dirstate.branch()))
909 encoding.fromlocal(self.dirstate.branch()))
910 self.opener.write("journal.desc",
910 self.opener.write("journal.desc",
911 "%d\n%s\n" % (len(self), desc))
911 "%d\n%s\n" % (len(self), desc))
912 self.opener.write("journal.bookmarks",
912 self.opener.write("journal.bookmarks",
913 self.opener.tryread("bookmarks"))
913 self.opener.tryread("bookmarks"))
914 self.sopener.write("journal.phaseroots",
914 self.sopener.write("journal.phaseroots",
915 self.sopener.tryread("phaseroots"))
915 self.sopener.tryread("phaseroots"))
916
916
917 def recover(self):
917 def recover(self):
918 lock = self.lock()
918 lock = self.lock()
919 try:
919 try:
920 if self.svfs.exists("journal"):
920 if self.svfs.exists("journal"):
921 self.ui.status(_("rolling back interrupted transaction\n"))
921 self.ui.status(_("rolling back interrupted transaction\n"))
922 transaction.rollback(self.sopener, "journal",
922 vfsmap = {'': self.sopener,
923 'plain': self.opener,}
924 transaction.rollback(self.sopener, vfsmap, "journal",
923 self.ui.warn)
925 self.ui.warn)
924 self.invalidate()
926 self.invalidate()
925 return True
927 return True
926 else:
928 else:
927 self.ui.warn(_("no interrupted transaction available\n"))
929 self.ui.warn(_("no interrupted transaction available\n"))
928 return False
930 return False
929 finally:
931 finally:
930 lock.release()
932 lock.release()
931
933
932 def rollback(self, dryrun=False, force=False):
934 def rollback(self, dryrun=False, force=False):
933 wlock = lock = None
935 wlock = lock = None
934 try:
936 try:
935 wlock = self.wlock()
937 wlock = self.wlock()
936 lock = self.lock()
938 lock = self.lock()
937 if self.svfs.exists("undo"):
939 if self.svfs.exists("undo"):
938 return self._rollback(dryrun, force)
940 return self._rollback(dryrun, force)
939 else:
941 else:
940 self.ui.warn(_("no rollback information available\n"))
942 self.ui.warn(_("no rollback information available\n"))
941 return 1
943 return 1
942 finally:
944 finally:
943 release(lock, wlock)
945 release(lock, wlock)
944
946
945 @unfilteredmethod # Until we get smarter cache management
947 @unfilteredmethod # Until we get smarter cache management
946 def _rollback(self, dryrun, force):
948 def _rollback(self, dryrun, force):
947 ui = self.ui
949 ui = self.ui
948 try:
950 try:
949 args = self.opener.read('undo.desc').splitlines()
951 args = self.opener.read('undo.desc').splitlines()
950 (oldlen, desc, detail) = (int(args[0]), args[1], None)
952 (oldlen, desc, detail) = (int(args[0]), args[1], None)
951 if len(args) >= 3:
953 if len(args) >= 3:
952 detail = args[2]
954 detail = args[2]
953 oldtip = oldlen - 1
955 oldtip = oldlen - 1
954
956
955 if detail and ui.verbose:
957 if detail and ui.verbose:
956 msg = (_('repository tip rolled back to revision %s'
958 msg = (_('repository tip rolled back to revision %s'
957 ' (undo %s: %s)\n')
959 ' (undo %s: %s)\n')
958 % (oldtip, desc, detail))
960 % (oldtip, desc, detail))
959 else:
961 else:
960 msg = (_('repository tip rolled back to revision %s'
962 msg = (_('repository tip rolled back to revision %s'
961 ' (undo %s)\n')
963 ' (undo %s)\n')
962 % (oldtip, desc))
964 % (oldtip, desc))
963 except IOError:
965 except IOError:
964 msg = _('rolling back unknown transaction\n')
966 msg = _('rolling back unknown transaction\n')
965 desc = None
967 desc = None
966
968
967 if not force and self['.'] != self['tip'] and desc == 'commit':
969 if not force and self['.'] != self['tip'] and desc == 'commit':
968 raise util.Abort(
970 raise util.Abort(
969 _('rollback of last commit while not checked out '
971 _('rollback of last commit while not checked out '
970 'may lose data'), hint=_('use -f to force'))
972 'may lose data'), hint=_('use -f to force'))
971
973
972 ui.status(msg)
974 ui.status(msg)
973 if dryrun:
975 if dryrun:
974 return 0
976 return 0
975
977
976 parents = self.dirstate.parents()
978 parents = self.dirstate.parents()
977 self.destroying()
979 self.destroying()
978 transaction.rollback(self.sopener, 'undo', ui.warn)
980 vfsmap = {'plain': self.opener}
981 transaction.rollback(self.sopener, vfsmap, 'undo', ui.warn)
979 if self.vfs.exists('undo.bookmarks'):
982 if self.vfs.exists('undo.bookmarks'):
980 self.vfs.rename('undo.bookmarks', 'bookmarks')
983 self.vfs.rename('undo.bookmarks', 'bookmarks')
981 if self.svfs.exists('undo.phaseroots'):
984 if self.svfs.exists('undo.phaseroots'):
982 self.svfs.rename('undo.phaseroots', 'phaseroots')
985 self.svfs.rename('undo.phaseroots', 'phaseroots')
983 self.invalidate()
986 self.invalidate()
984
987
985 parentgone = (parents[0] not in self.changelog.nodemap or
988 parentgone = (parents[0] not in self.changelog.nodemap or
986 parents[1] not in self.changelog.nodemap)
989 parents[1] not in self.changelog.nodemap)
987 if parentgone:
990 if parentgone:
988 self.vfs.rename('undo.dirstate', 'dirstate')
991 self.vfs.rename('undo.dirstate', 'dirstate')
989 try:
992 try:
990 branch = self.opener.read('undo.branch')
993 branch = self.opener.read('undo.branch')
991 self.dirstate.setbranch(encoding.tolocal(branch))
994 self.dirstate.setbranch(encoding.tolocal(branch))
992 except IOError:
995 except IOError:
993 ui.warn(_('named branch could not be reset: '
996 ui.warn(_('named branch could not be reset: '
994 'current branch is still \'%s\'\n')
997 'current branch is still \'%s\'\n')
995 % self.dirstate.branch())
998 % self.dirstate.branch())
996
999
997 self.dirstate.invalidate()
1000 self.dirstate.invalidate()
998 parents = tuple([p.rev() for p in self.parents()])
1001 parents = tuple([p.rev() for p in self.parents()])
999 if len(parents) > 1:
1002 if len(parents) > 1:
1000 ui.status(_('working directory now based on '
1003 ui.status(_('working directory now based on '
1001 'revisions %d and %d\n') % parents)
1004 'revisions %d and %d\n') % parents)
1002 else:
1005 else:
1003 ui.status(_('working directory now based on '
1006 ui.status(_('working directory now based on '
1004 'revision %d\n') % parents)
1007 'revision %d\n') % parents)
1005 # TODO: if we know which new heads may result from this rollback, pass
1008 # TODO: if we know which new heads may result from this rollback, pass
1006 # them to destroy(), which will prevent the branchhead cache from being
1009 # them to destroy(), which will prevent the branchhead cache from being
1007 # invalidated.
1010 # invalidated.
1008 self.destroyed()
1011 self.destroyed()
1009 return 0
1012 return 0
1010
1013
1011 def invalidatecaches(self):
1014 def invalidatecaches(self):
1012
1015
1013 if '_tagscache' in vars(self):
1016 if '_tagscache' in vars(self):
1014 # can't use delattr on proxy
1017 # can't use delattr on proxy
1015 del self.__dict__['_tagscache']
1018 del self.__dict__['_tagscache']
1016
1019
1017 self.unfiltered()._branchcaches.clear()
1020 self.unfiltered()._branchcaches.clear()
1018 self.invalidatevolatilesets()
1021 self.invalidatevolatilesets()
1019
1022
1020 def invalidatevolatilesets(self):
1023 def invalidatevolatilesets(self):
1021 self.filteredrevcache.clear()
1024 self.filteredrevcache.clear()
1022 obsolete.clearobscaches(self)
1025 obsolete.clearobscaches(self)
1023
1026
1024 def invalidatedirstate(self):
1027 def invalidatedirstate(self):
1025 '''Invalidates the dirstate, causing the next call to dirstate
1028 '''Invalidates the dirstate, causing the next call to dirstate
1026 to check if it was modified since the last time it was read,
1029 to check if it was modified since the last time it was read,
1027 rereading it if it has.
1030 rereading it if it has.
1028
1031
1029 This is different to dirstate.invalidate() that it doesn't always
1032 This is different to dirstate.invalidate() that it doesn't always
1030 rereads the dirstate. Use dirstate.invalidate() if you want to
1033 rereads the dirstate. Use dirstate.invalidate() if you want to
1031 explicitly read the dirstate again (i.e. restoring it to a previous
1034 explicitly read the dirstate again (i.e. restoring it to a previous
1032 known good state).'''
1035 known good state).'''
1033 if hasunfilteredcache(self, 'dirstate'):
1036 if hasunfilteredcache(self, 'dirstate'):
1034 for k in self.dirstate._filecache:
1037 for k in self.dirstate._filecache:
1035 try:
1038 try:
1036 delattr(self.dirstate, k)
1039 delattr(self.dirstate, k)
1037 except AttributeError:
1040 except AttributeError:
1038 pass
1041 pass
1039 delattr(self.unfiltered(), 'dirstate')
1042 delattr(self.unfiltered(), 'dirstate')
1040
1043
1041 def invalidate(self):
1044 def invalidate(self):
1042 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1045 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1043 for k in self._filecache:
1046 for k in self._filecache:
1044 # dirstate is invalidated separately in invalidatedirstate()
1047 # dirstate is invalidated separately in invalidatedirstate()
1045 if k == 'dirstate':
1048 if k == 'dirstate':
1046 continue
1049 continue
1047
1050
1048 try:
1051 try:
1049 delattr(unfiltered, k)
1052 delattr(unfiltered, k)
1050 except AttributeError:
1053 except AttributeError:
1051 pass
1054 pass
1052 self.invalidatecaches()
1055 self.invalidatecaches()
1053 self.store.invalidatecaches()
1056 self.store.invalidatecaches()
1054
1057
1055 def invalidateall(self):
1058 def invalidateall(self):
1056 '''Fully invalidates both store and non-store parts, causing the
1059 '''Fully invalidates both store and non-store parts, causing the
1057 subsequent operation to reread any outside changes.'''
1060 subsequent operation to reread any outside changes.'''
1058 # extension should hook this to invalidate its caches
1061 # extension should hook this to invalidate its caches
1059 self.invalidate()
1062 self.invalidate()
1060 self.invalidatedirstate()
1063 self.invalidatedirstate()
1061
1064
1062 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1065 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1063 try:
1066 try:
1064 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1067 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1065 except error.LockHeld, inst:
1068 except error.LockHeld, inst:
1066 if not wait:
1069 if not wait:
1067 raise
1070 raise
1068 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1071 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1069 (desc, inst.locker))
1072 (desc, inst.locker))
1070 # default to 600 seconds timeout
1073 # default to 600 seconds timeout
1071 l = lockmod.lock(vfs, lockname,
1074 l = lockmod.lock(vfs, lockname,
1072 int(self.ui.config("ui", "timeout", "600")),
1075 int(self.ui.config("ui", "timeout", "600")),
1073 releasefn, desc=desc)
1076 releasefn, desc=desc)
1074 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1077 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1075 if acquirefn:
1078 if acquirefn:
1076 acquirefn()
1079 acquirefn()
1077 return l
1080 return l
1078
1081
1079 def _afterlock(self, callback):
1082 def _afterlock(self, callback):
1080 """add a callback to the current repository lock.
1083 """add a callback to the current repository lock.
1081
1084
1082 The callback will be executed on lock release."""
1085 The callback will be executed on lock release."""
1083 l = self._lockref and self._lockref()
1086 l = self._lockref and self._lockref()
1084 if l:
1087 if l:
1085 l.postrelease.append(callback)
1088 l.postrelease.append(callback)
1086 else:
1089 else:
1087 callback()
1090 callback()
1088
1091
1089 def lock(self, wait=True):
1092 def lock(self, wait=True):
1090 '''Lock the repository store (.hg/store) and return a weak reference
1093 '''Lock the repository store (.hg/store) and return a weak reference
1091 to the lock. Use this before modifying the store (e.g. committing or
1094 to the lock. Use this before modifying the store (e.g. committing or
1092 stripping). If you are opening a transaction, get a lock as well.)'''
1095 stripping). If you are opening a transaction, get a lock as well.)'''
1093 l = self._lockref and self._lockref()
1096 l = self._lockref and self._lockref()
1094 if l is not None and l.held:
1097 if l is not None and l.held:
1095 l.lock()
1098 l.lock()
1096 return l
1099 return l
1097
1100
1098 def unlock():
1101 def unlock():
1099 for k, ce in self._filecache.items():
1102 for k, ce in self._filecache.items():
1100 if k == 'dirstate' or k not in self.__dict__:
1103 if k == 'dirstate' or k not in self.__dict__:
1101 continue
1104 continue
1102 ce.refresh()
1105 ce.refresh()
1103
1106
1104 l = self._lock(self.svfs, "lock", wait, unlock,
1107 l = self._lock(self.svfs, "lock", wait, unlock,
1105 self.invalidate, _('repository %s') % self.origroot)
1108 self.invalidate, _('repository %s') % self.origroot)
1106 self._lockref = weakref.ref(l)
1109 self._lockref = weakref.ref(l)
1107 return l
1110 return l
1108
1111
1109 def wlock(self, wait=True):
1112 def wlock(self, wait=True):
1110 '''Lock the non-store parts of the repository (everything under
1113 '''Lock the non-store parts of the repository (everything under
1111 .hg except .hg/store) and return a weak reference to the lock.
1114 .hg except .hg/store) and return a weak reference to the lock.
1112 Use this before modifying files in .hg.'''
1115 Use this before modifying files in .hg.'''
1113 l = self._wlockref and self._wlockref()
1116 l = self._wlockref and self._wlockref()
1114 if l is not None and l.held:
1117 if l is not None and l.held:
1115 l.lock()
1118 l.lock()
1116 return l
1119 return l
1117
1120
1118 def unlock():
1121 def unlock():
1119 if self.dirstate.pendingparentchange():
1122 if self.dirstate.pendingparentchange():
1120 self.dirstate.invalidate()
1123 self.dirstate.invalidate()
1121 else:
1124 else:
1122 self.dirstate.write()
1125 self.dirstate.write()
1123
1126
1124 self._filecache['dirstate'].refresh()
1127 self._filecache['dirstate'].refresh()
1125
1128
1126 l = self._lock(self.vfs, "wlock", wait, unlock,
1129 l = self._lock(self.vfs, "wlock", wait, unlock,
1127 self.invalidatedirstate, _('working directory of %s') %
1130 self.invalidatedirstate, _('working directory of %s') %
1128 self.origroot)
1131 self.origroot)
1129 self._wlockref = weakref.ref(l)
1132 self._wlockref = weakref.ref(l)
1130 return l
1133 return l
1131
1134
1132 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1135 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1133 """
1136 """
1134 commit an individual file as part of a larger transaction
1137 commit an individual file as part of a larger transaction
1135 """
1138 """
1136
1139
1137 fname = fctx.path()
1140 fname = fctx.path()
1138 text = fctx.data()
1141 text = fctx.data()
1139 flog = self.file(fname)
1142 flog = self.file(fname)
1140 fparent1 = manifest1.get(fname, nullid)
1143 fparent1 = manifest1.get(fname, nullid)
1141 fparent2 = manifest2.get(fname, nullid)
1144 fparent2 = manifest2.get(fname, nullid)
1142
1145
1143 meta = {}
1146 meta = {}
1144 copy = fctx.renamed()
1147 copy = fctx.renamed()
1145 if copy and copy[0] != fname:
1148 if copy and copy[0] != fname:
1146 # Mark the new revision of this file as a copy of another
1149 # Mark the new revision of this file as a copy of another
1147 # file. This copy data will effectively act as a parent
1150 # file. This copy data will effectively act as a parent
1148 # of this new revision. If this is a merge, the first
1151 # of this new revision. If this is a merge, the first
1149 # parent will be the nullid (meaning "look up the copy data")
1152 # parent will be the nullid (meaning "look up the copy data")
1150 # and the second one will be the other parent. For example:
1153 # and the second one will be the other parent. For example:
1151 #
1154 #
1152 # 0 --- 1 --- 3 rev1 changes file foo
1155 # 0 --- 1 --- 3 rev1 changes file foo
1153 # \ / rev2 renames foo to bar and changes it
1156 # \ / rev2 renames foo to bar and changes it
1154 # \- 2 -/ rev3 should have bar with all changes and
1157 # \- 2 -/ rev3 should have bar with all changes and
1155 # should record that bar descends from
1158 # should record that bar descends from
1156 # bar in rev2 and foo in rev1
1159 # bar in rev2 and foo in rev1
1157 #
1160 #
1158 # this allows this merge to succeed:
1161 # this allows this merge to succeed:
1159 #
1162 #
1160 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1163 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1161 # \ / merging rev3 and rev4 should use bar@rev2
1164 # \ / merging rev3 and rev4 should use bar@rev2
1162 # \- 2 --- 4 as the merge base
1165 # \- 2 --- 4 as the merge base
1163 #
1166 #
1164
1167
1165 cfname = copy[0]
1168 cfname = copy[0]
1166 crev = manifest1.get(cfname)
1169 crev = manifest1.get(cfname)
1167 newfparent = fparent2
1170 newfparent = fparent2
1168
1171
1169 if manifest2: # branch merge
1172 if manifest2: # branch merge
1170 if fparent2 == nullid or crev is None: # copied on remote side
1173 if fparent2 == nullid or crev is None: # copied on remote side
1171 if cfname in manifest2:
1174 if cfname in manifest2:
1172 crev = manifest2[cfname]
1175 crev = manifest2[cfname]
1173 newfparent = fparent1
1176 newfparent = fparent1
1174
1177
1175 # find source in nearest ancestor if we've lost track
1178 # find source in nearest ancestor if we've lost track
1176 if not crev:
1179 if not crev:
1177 self.ui.debug(" %s: searching for copy revision for %s\n" %
1180 self.ui.debug(" %s: searching for copy revision for %s\n" %
1178 (fname, cfname))
1181 (fname, cfname))
1179 for ancestor in self[None].ancestors():
1182 for ancestor in self[None].ancestors():
1180 if cfname in ancestor:
1183 if cfname in ancestor:
1181 crev = ancestor[cfname].filenode()
1184 crev = ancestor[cfname].filenode()
1182 break
1185 break
1183
1186
1184 if crev:
1187 if crev:
1185 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1188 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1186 meta["copy"] = cfname
1189 meta["copy"] = cfname
1187 meta["copyrev"] = hex(crev)
1190 meta["copyrev"] = hex(crev)
1188 fparent1, fparent2 = nullid, newfparent
1191 fparent1, fparent2 = nullid, newfparent
1189 else:
1192 else:
1190 self.ui.warn(_("warning: can't find ancestor for '%s' "
1193 self.ui.warn(_("warning: can't find ancestor for '%s' "
1191 "copied from '%s'!\n") % (fname, cfname))
1194 "copied from '%s'!\n") % (fname, cfname))
1192
1195
1193 elif fparent1 == nullid:
1196 elif fparent1 == nullid:
1194 fparent1, fparent2 = fparent2, nullid
1197 fparent1, fparent2 = fparent2, nullid
1195 elif fparent2 != nullid:
1198 elif fparent2 != nullid:
1196 # is one parent an ancestor of the other?
1199 # is one parent an ancestor of the other?
1197 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1200 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1198 if fparent1 in fparentancestors:
1201 if fparent1 in fparentancestors:
1199 fparent1, fparent2 = fparent2, nullid
1202 fparent1, fparent2 = fparent2, nullid
1200 elif fparent2 in fparentancestors:
1203 elif fparent2 in fparentancestors:
1201 fparent2 = nullid
1204 fparent2 = nullid
1202
1205
1203 # is the file changed?
1206 # is the file changed?
1204 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1207 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1205 changelist.append(fname)
1208 changelist.append(fname)
1206 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1209 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1207 # are just the flags changed during merge?
1210 # are just the flags changed during merge?
1208 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1211 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1209 changelist.append(fname)
1212 changelist.append(fname)
1210
1213
1211 return fparent1
1214 return fparent1
1212
1215
1213 @unfilteredmethod
1216 @unfilteredmethod
1214 def commit(self, text="", user=None, date=None, match=None, force=False,
1217 def commit(self, text="", user=None, date=None, match=None, force=False,
1215 editor=False, extra={}):
1218 editor=False, extra={}):
1216 """Add a new revision to current repository.
1219 """Add a new revision to current repository.
1217
1220
1218 Revision information is gathered from the working directory,
1221 Revision information is gathered from the working directory,
1219 match can be used to filter the committed files. If editor is
1222 match can be used to filter the committed files. If editor is
1220 supplied, it is called to get a commit message.
1223 supplied, it is called to get a commit message.
1221 """
1224 """
1222
1225
1223 def fail(f, msg):
1226 def fail(f, msg):
1224 raise util.Abort('%s: %s' % (f, msg))
1227 raise util.Abort('%s: %s' % (f, msg))
1225
1228
1226 if not match:
1229 if not match:
1227 match = matchmod.always(self.root, '')
1230 match = matchmod.always(self.root, '')
1228
1231
1229 if not force:
1232 if not force:
1230 vdirs = []
1233 vdirs = []
1231 match.explicitdir = vdirs.append
1234 match.explicitdir = vdirs.append
1232 match.bad = fail
1235 match.bad = fail
1233
1236
1234 wlock = self.wlock()
1237 wlock = self.wlock()
1235 try:
1238 try:
1236 wctx = self[None]
1239 wctx = self[None]
1237 merge = len(wctx.parents()) > 1
1240 merge = len(wctx.parents()) > 1
1238
1241
1239 if (not force and merge and match and
1242 if (not force and merge and match and
1240 (match.files() or match.anypats())):
1243 (match.files() or match.anypats())):
1241 raise util.Abort(_('cannot partially commit a merge '
1244 raise util.Abort(_('cannot partially commit a merge '
1242 '(do not specify files or patterns)'))
1245 '(do not specify files or patterns)'))
1243
1246
1244 status = self.status(match=match, clean=force)
1247 status = self.status(match=match, clean=force)
1245 if force:
1248 if force:
1246 status.modified.extend(status.clean) # mq may commit clean files
1249 status.modified.extend(status.clean) # mq may commit clean files
1247
1250
1248 # check subrepos
1251 # check subrepos
1249 subs = []
1252 subs = []
1250 commitsubs = set()
1253 commitsubs = set()
1251 newstate = wctx.substate.copy()
1254 newstate = wctx.substate.copy()
1252 # only manage subrepos and .hgsubstate if .hgsub is present
1255 # only manage subrepos and .hgsubstate if .hgsub is present
1253 if '.hgsub' in wctx:
1256 if '.hgsub' in wctx:
1254 # we'll decide whether to track this ourselves, thanks
1257 # we'll decide whether to track this ourselves, thanks
1255 for c in status.modified, status.added, status.removed:
1258 for c in status.modified, status.added, status.removed:
1256 if '.hgsubstate' in c:
1259 if '.hgsubstate' in c:
1257 c.remove('.hgsubstate')
1260 c.remove('.hgsubstate')
1258
1261
1259 # compare current state to last committed state
1262 # compare current state to last committed state
1260 # build new substate based on last committed state
1263 # build new substate based on last committed state
1261 oldstate = wctx.p1().substate
1264 oldstate = wctx.p1().substate
1262 for s in sorted(newstate.keys()):
1265 for s in sorted(newstate.keys()):
1263 if not match(s):
1266 if not match(s):
1264 # ignore working copy, use old state if present
1267 # ignore working copy, use old state if present
1265 if s in oldstate:
1268 if s in oldstate:
1266 newstate[s] = oldstate[s]
1269 newstate[s] = oldstate[s]
1267 continue
1270 continue
1268 if not force:
1271 if not force:
1269 raise util.Abort(
1272 raise util.Abort(
1270 _("commit with new subrepo %s excluded") % s)
1273 _("commit with new subrepo %s excluded") % s)
1271 if wctx.sub(s).dirty(True):
1274 if wctx.sub(s).dirty(True):
1272 if not self.ui.configbool('ui', 'commitsubrepos'):
1275 if not self.ui.configbool('ui', 'commitsubrepos'):
1273 raise util.Abort(
1276 raise util.Abort(
1274 _("uncommitted changes in subrepo %s") % s,
1277 _("uncommitted changes in subrepo %s") % s,
1275 hint=_("use --subrepos for recursive commit"))
1278 hint=_("use --subrepos for recursive commit"))
1276 subs.append(s)
1279 subs.append(s)
1277 commitsubs.add(s)
1280 commitsubs.add(s)
1278 else:
1281 else:
1279 bs = wctx.sub(s).basestate()
1282 bs = wctx.sub(s).basestate()
1280 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1283 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1281 if oldstate.get(s, (None, None, None))[1] != bs:
1284 if oldstate.get(s, (None, None, None))[1] != bs:
1282 subs.append(s)
1285 subs.append(s)
1283
1286
1284 # check for removed subrepos
1287 # check for removed subrepos
1285 for p in wctx.parents():
1288 for p in wctx.parents():
1286 r = [s for s in p.substate if s not in newstate]
1289 r = [s for s in p.substate if s not in newstate]
1287 subs += [s for s in r if match(s)]
1290 subs += [s for s in r if match(s)]
1288 if subs:
1291 if subs:
1289 if (not match('.hgsub') and
1292 if (not match('.hgsub') and
1290 '.hgsub' in (wctx.modified() + wctx.added())):
1293 '.hgsub' in (wctx.modified() + wctx.added())):
1291 raise util.Abort(
1294 raise util.Abort(
1292 _("can't commit subrepos without .hgsub"))
1295 _("can't commit subrepos without .hgsub"))
1293 status.modified.insert(0, '.hgsubstate')
1296 status.modified.insert(0, '.hgsubstate')
1294
1297
1295 elif '.hgsub' in status.removed:
1298 elif '.hgsub' in status.removed:
1296 # clean up .hgsubstate when .hgsub is removed
1299 # clean up .hgsubstate when .hgsub is removed
1297 if ('.hgsubstate' in wctx and
1300 if ('.hgsubstate' in wctx and
1298 '.hgsubstate' not in (status.modified + status.added +
1301 '.hgsubstate' not in (status.modified + status.added +
1299 status.removed)):
1302 status.removed)):
1300 status.removed.insert(0, '.hgsubstate')
1303 status.removed.insert(0, '.hgsubstate')
1301
1304
1302 # make sure all explicit patterns are matched
1305 # make sure all explicit patterns are matched
1303 if not force and match.files():
1306 if not force and match.files():
1304 matched = set(status.modified + status.added + status.removed)
1307 matched = set(status.modified + status.added + status.removed)
1305
1308
1306 for f in match.files():
1309 for f in match.files():
1307 f = self.dirstate.normalize(f)
1310 f = self.dirstate.normalize(f)
1308 if f == '.' or f in matched or f in wctx.substate:
1311 if f == '.' or f in matched or f in wctx.substate:
1309 continue
1312 continue
1310 if f in status.deleted:
1313 if f in status.deleted:
1311 fail(f, _('file not found!'))
1314 fail(f, _('file not found!'))
1312 if f in vdirs: # visited directory
1315 if f in vdirs: # visited directory
1313 d = f + '/'
1316 d = f + '/'
1314 for mf in matched:
1317 for mf in matched:
1315 if mf.startswith(d):
1318 if mf.startswith(d):
1316 break
1319 break
1317 else:
1320 else:
1318 fail(f, _("no match under directory!"))
1321 fail(f, _("no match under directory!"))
1319 elif f not in self.dirstate:
1322 elif f not in self.dirstate:
1320 fail(f, _("file not tracked!"))
1323 fail(f, _("file not tracked!"))
1321
1324
1322 cctx = context.workingctx(self, text, user, date, extra, status)
1325 cctx = context.workingctx(self, text, user, date, extra, status)
1323
1326
1324 if (not force and not extra.get("close") and not merge
1327 if (not force and not extra.get("close") and not merge
1325 and not cctx.files()
1328 and not cctx.files()
1326 and wctx.branch() == wctx.p1().branch()):
1329 and wctx.branch() == wctx.p1().branch()):
1327 return None
1330 return None
1328
1331
1329 if merge and cctx.deleted():
1332 if merge and cctx.deleted():
1330 raise util.Abort(_("cannot commit merge with missing files"))
1333 raise util.Abort(_("cannot commit merge with missing files"))
1331
1334
1332 ms = mergemod.mergestate(self)
1335 ms = mergemod.mergestate(self)
1333 for f in status.modified:
1336 for f in status.modified:
1334 if f in ms and ms[f] == 'u':
1337 if f in ms and ms[f] == 'u':
1335 raise util.Abort(_("unresolved merge conflicts "
1338 raise util.Abort(_("unresolved merge conflicts "
1336 "(see hg help resolve)"))
1339 "(see hg help resolve)"))
1337
1340
1338 if editor:
1341 if editor:
1339 cctx._text = editor(self, cctx, subs)
1342 cctx._text = editor(self, cctx, subs)
1340 edited = (text != cctx._text)
1343 edited = (text != cctx._text)
1341
1344
1342 # Save commit message in case this transaction gets rolled back
1345 # Save commit message in case this transaction gets rolled back
1343 # (e.g. by a pretxncommit hook). Leave the content alone on
1346 # (e.g. by a pretxncommit hook). Leave the content alone on
1344 # the assumption that the user will use the same editor again.
1347 # the assumption that the user will use the same editor again.
1345 msgfn = self.savecommitmessage(cctx._text)
1348 msgfn = self.savecommitmessage(cctx._text)
1346
1349
1347 # commit subs and write new state
1350 # commit subs and write new state
1348 if subs:
1351 if subs:
1349 for s in sorted(commitsubs):
1352 for s in sorted(commitsubs):
1350 sub = wctx.sub(s)
1353 sub = wctx.sub(s)
1351 self.ui.status(_('committing subrepository %s\n') %
1354 self.ui.status(_('committing subrepository %s\n') %
1352 subrepo.subrelpath(sub))
1355 subrepo.subrelpath(sub))
1353 sr = sub.commit(cctx._text, user, date)
1356 sr = sub.commit(cctx._text, user, date)
1354 newstate[s] = (newstate[s][0], sr)
1357 newstate[s] = (newstate[s][0], sr)
1355 subrepo.writestate(self, newstate)
1358 subrepo.writestate(self, newstate)
1356
1359
1357 p1, p2 = self.dirstate.parents()
1360 p1, p2 = self.dirstate.parents()
1358 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1361 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1359 try:
1362 try:
1360 self.hook("precommit", throw=True, parent1=hookp1,
1363 self.hook("precommit", throw=True, parent1=hookp1,
1361 parent2=hookp2)
1364 parent2=hookp2)
1362 ret = self.commitctx(cctx, True)
1365 ret = self.commitctx(cctx, True)
1363 except: # re-raises
1366 except: # re-raises
1364 if edited:
1367 if edited:
1365 self.ui.write(
1368 self.ui.write(
1366 _('note: commit message saved in %s\n') % msgfn)
1369 _('note: commit message saved in %s\n') % msgfn)
1367 raise
1370 raise
1368
1371
1369 # update bookmarks, dirstate and mergestate
1372 # update bookmarks, dirstate and mergestate
1370 bookmarks.update(self, [p1, p2], ret)
1373 bookmarks.update(self, [p1, p2], ret)
1371 cctx.markcommitted(ret)
1374 cctx.markcommitted(ret)
1372 ms.reset()
1375 ms.reset()
1373 finally:
1376 finally:
1374 wlock.release()
1377 wlock.release()
1375
1378
1376 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1379 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1377 # hack for command that use a temporary commit (eg: histedit)
1380 # hack for command that use a temporary commit (eg: histedit)
1378 # temporary commit got stripped before hook release
1381 # temporary commit got stripped before hook release
1379 if node in self:
1382 if node in self:
1380 self.hook("commit", node=node, parent1=parent1,
1383 self.hook("commit", node=node, parent1=parent1,
1381 parent2=parent2)
1384 parent2=parent2)
1382 self._afterlock(commithook)
1385 self._afterlock(commithook)
1383 return ret
1386 return ret
1384
1387
1385 @unfilteredmethod
1388 @unfilteredmethod
1386 def commitctx(self, ctx, error=False):
1389 def commitctx(self, ctx, error=False):
1387 """Add a new revision to current repository.
1390 """Add a new revision to current repository.
1388 Revision information is passed via the context argument.
1391 Revision information is passed via the context argument.
1389 """
1392 """
1390
1393
1391 tr = None
1394 tr = None
1392 p1, p2 = ctx.p1(), ctx.p2()
1395 p1, p2 = ctx.p1(), ctx.p2()
1393 user = ctx.user()
1396 user = ctx.user()
1394
1397
1395 lock = self.lock()
1398 lock = self.lock()
1396 try:
1399 try:
1397 tr = self.transaction("commit")
1400 tr = self.transaction("commit")
1398 trp = weakref.proxy(tr)
1401 trp = weakref.proxy(tr)
1399
1402
1400 if ctx.files():
1403 if ctx.files():
1401 m1 = p1.manifest()
1404 m1 = p1.manifest()
1402 m2 = p2.manifest()
1405 m2 = p2.manifest()
1403 m = m1.copy()
1406 m = m1.copy()
1404
1407
1405 # check in files
1408 # check in files
1406 added = []
1409 added = []
1407 changed = []
1410 changed = []
1408 removed = list(ctx.removed())
1411 removed = list(ctx.removed())
1409 linkrev = len(self)
1412 linkrev = len(self)
1410 for f in sorted(ctx.modified() + ctx.added()):
1413 for f in sorted(ctx.modified() + ctx.added()):
1411 self.ui.note(f + "\n")
1414 self.ui.note(f + "\n")
1412 try:
1415 try:
1413 fctx = ctx[f]
1416 fctx = ctx[f]
1414 if fctx is None:
1417 if fctx is None:
1415 removed.append(f)
1418 removed.append(f)
1416 else:
1419 else:
1417 added.append(f)
1420 added.append(f)
1418 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1421 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1419 trp, changed)
1422 trp, changed)
1420 m.setflag(f, fctx.flags())
1423 m.setflag(f, fctx.flags())
1421 except OSError, inst:
1424 except OSError, inst:
1422 self.ui.warn(_("trouble committing %s!\n") % f)
1425 self.ui.warn(_("trouble committing %s!\n") % f)
1423 raise
1426 raise
1424 except IOError, inst:
1427 except IOError, inst:
1425 errcode = getattr(inst, 'errno', errno.ENOENT)
1428 errcode = getattr(inst, 'errno', errno.ENOENT)
1426 if error or errcode and errcode != errno.ENOENT:
1429 if error or errcode and errcode != errno.ENOENT:
1427 self.ui.warn(_("trouble committing %s!\n") % f)
1430 self.ui.warn(_("trouble committing %s!\n") % f)
1428 raise
1431 raise
1429
1432
1430 # update manifest
1433 # update manifest
1431 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1434 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1432 drop = [f for f in removed if f in m]
1435 drop = [f for f in removed if f in m]
1433 for f in drop:
1436 for f in drop:
1434 del m[f]
1437 del m[f]
1435 mn = self.manifest.add(m, trp, linkrev,
1438 mn = self.manifest.add(m, trp, linkrev,
1436 p1.manifestnode(), p2.manifestnode(),
1439 p1.manifestnode(), p2.manifestnode(),
1437 added, drop)
1440 added, drop)
1438 files = changed + removed
1441 files = changed + removed
1439 else:
1442 else:
1440 mn = p1.manifestnode()
1443 mn = p1.manifestnode()
1441 files = []
1444 files = []
1442
1445
1443 # update changelog
1446 # update changelog
1444 self.changelog.delayupdate(tr)
1447 self.changelog.delayupdate(tr)
1445 n = self.changelog.add(mn, files, ctx.description(),
1448 n = self.changelog.add(mn, files, ctx.description(),
1446 trp, p1.node(), p2.node(),
1449 trp, p1.node(), p2.node(),
1447 user, ctx.date(), ctx.extra().copy())
1450 user, ctx.date(), ctx.extra().copy())
1448 p = lambda: tr.writepending() and self.root or ""
1451 p = lambda: tr.writepending() and self.root or ""
1449 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1452 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1450 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1453 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1451 parent2=xp2, pending=p)
1454 parent2=xp2, pending=p)
1452 # set the new commit is proper phase
1455 # set the new commit is proper phase
1453 targetphase = subrepo.newcommitphase(self.ui, ctx)
1456 targetphase = subrepo.newcommitphase(self.ui, ctx)
1454 if targetphase:
1457 if targetphase:
1455 # retract boundary do not alter parent changeset.
1458 # retract boundary do not alter parent changeset.
1456 # if a parent have higher the resulting phase will
1459 # if a parent have higher the resulting phase will
1457 # be compliant anyway
1460 # be compliant anyway
1458 #
1461 #
1459 # if minimal phase was 0 we don't need to retract anything
1462 # if minimal phase was 0 we don't need to retract anything
1460 phases.retractboundary(self, tr, targetphase, [n])
1463 phases.retractboundary(self, tr, targetphase, [n])
1461 tr.close()
1464 tr.close()
1462 branchmap.updatecache(self.filtered('served'))
1465 branchmap.updatecache(self.filtered('served'))
1463 return n
1466 return n
1464 finally:
1467 finally:
1465 if tr:
1468 if tr:
1466 tr.release()
1469 tr.release()
1467 lock.release()
1470 lock.release()
1468
1471
1469 @unfilteredmethod
1472 @unfilteredmethod
1470 def destroying(self):
1473 def destroying(self):
1471 '''Inform the repository that nodes are about to be destroyed.
1474 '''Inform the repository that nodes are about to be destroyed.
1472 Intended for use by strip and rollback, so there's a common
1475 Intended for use by strip and rollback, so there's a common
1473 place for anything that has to be done before destroying history.
1476 place for anything that has to be done before destroying history.
1474
1477
1475 This is mostly useful for saving state that is in memory and waiting
1478 This is mostly useful for saving state that is in memory and waiting
1476 to be flushed when the current lock is released. Because a call to
1479 to be flushed when the current lock is released. Because a call to
1477 destroyed is imminent, the repo will be invalidated causing those
1480 destroyed is imminent, the repo will be invalidated causing those
1478 changes to stay in memory (waiting for the next unlock), or vanish
1481 changes to stay in memory (waiting for the next unlock), or vanish
1479 completely.
1482 completely.
1480 '''
1483 '''
1481 # When using the same lock to commit and strip, the phasecache is left
1484 # When using the same lock to commit and strip, the phasecache is left
1482 # dirty after committing. Then when we strip, the repo is invalidated,
1485 # dirty after committing. Then when we strip, the repo is invalidated,
1483 # causing those changes to disappear.
1486 # causing those changes to disappear.
1484 if '_phasecache' in vars(self):
1487 if '_phasecache' in vars(self):
1485 self._phasecache.write()
1488 self._phasecache.write()
1486
1489
1487 @unfilteredmethod
1490 @unfilteredmethod
1488 def destroyed(self):
1491 def destroyed(self):
1489 '''Inform the repository that nodes have been destroyed.
1492 '''Inform the repository that nodes have been destroyed.
1490 Intended for use by strip and rollback, so there's a common
1493 Intended for use by strip and rollback, so there's a common
1491 place for anything that has to be done after destroying history.
1494 place for anything that has to be done after destroying history.
1492 '''
1495 '''
1493 # When one tries to:
1496 # When one tries to:
1494 # 1) destroy nodes thus calling this method (e.g. strip)
1497 # 1) destroy nodes thus calling this method (e.g. strip)
1495 # 2) use phasecache somewhere (e.g. commit)
1498 # 2) use phasecache somewhere (e.g. commit)
1496 #
1499 #
1497 # then 2) will fail because the phasecache contains nodes that were
1500 # then 2) will fail because the phasecache contains nodes that were
1498 # removed. We can either remove phasecache from the filecache,
1501 # removed. We can either remove phasecache from the filecache,
1499 # causing it to reload next time it is accessed, or simply filter
1502 # causing it to reload next time it is accessed, or simply filter
1500 # the removed nodes now and write the updated cache.
1503 # the removed nodes now and write the updated cache.
1501 self._phasecache.filterunknown(self)
1504 self._phasecache.filterunknown(self)
1502 self._phasecache.write()
1505 self._phasecache.write()
1503
1506
1504 # update the 'served' branch cache to help read only server process
1507 # update the 'served' branch cache to help read only server process
1505 # Thanks to branchcache collaboration this is done from the nearest
1508 # Thanks to branchcache collaboration this is done from the nearest
1506 # filtered subset and it is expected to be fast.
1509 # filtered subset and it is expected to be fast.
1507 branchmap.updatecache(self.filtered('served'))
1510 branchmap.updatecache(self.filtered('served'))
1508
1511
1509 # Ensure the persistent tag cache is updated. Doing it now
1512 # Ensure the persistent tag cache is updated. Doing it now
1510 # means that the tag cache only has to worry about destroyed
1513 # means that the tag cache only has to worry about destroyed
1511 # heads immediately after a strip/rollback. That in turn
1514 # heads immediately after a strip/rollback. That in turn
1512 # guarantees that "cachetip == currenttip" (comparing both rev
1515 # guarantees that "cachetip == currenttip" (comparing both rev
1513 # and node) always means no nodes have been added or destroyed.
1516 # and node) always means no nodes have been added or destroyed.
1514
1517
1515 # XXX this is suboptimal when qrefresh'ing: we strip the current
1518 # XXX this is suboptimal when qrefresh'ing: we strip the current
1516 # head, refresh the tag cache, then immediately add a new head.
1519 # head, refresh the tag cache, then immediately add a new head.
1517 # But I think doing it this way is necessary for the "instant
1520 # But I think doing it this way is necessary for the "instant
1518 # tag cache retrieval" case to work.
1521 # tag cache retrieval" case to work.
1519 self.invalidate()
1522 self.invalidate()
1520
1523
1521 def walk(self, match, node=None):
1524 def walk(self, match, node=None):
1522 '''
1525 '''
1523 walk recursively through the directory tree or a given
1526 walk recursively through the directory tree or a given
1524 changeset, finding all files matched by the match
1527 changeset, finding all files matched by the match
1525 function
1528 function
1526 '''
1529 '''
1527 return self[node].walk(match)
1530 return self[node].walk(match)
1528
1531
1529 def status(self, node1='.', node2=None, match=None,
1532 def status(self, node1='.', node2=None, match=None,
1530 ignored=False, clean=False, unknown=False,
1533 ignored=False, clean=False, unknown=False,
1531 listsubrepos=False):
1534 listsubrepos=False):
1532 '''a convenience method that calls node1.status(node2)'''
1535 '''a convenience method that calls node1.status(node2)'''
1533 return self[node1].status(node2, match, ignored, clean, unknown,
1536 return self[node1].status(node2, match, ignored, clean, unknown,
1534 listsubrepos)
1537 listsubrepos)
1535
1538
1536 def heads(self, start=None):
1539 def heads(self, start=None):
1537 heads = self.changelog.heads(start)
1540 heads = self.changelog.heads(start)
1538 # sort the output in rev descending order
1541 # sort the output in rev descending order
1539 return sorted(heads, key=self.changelog.rev, reverse=True)
1542 return sorted(heads, key=self.changelog.rev, reverse=True)
1540
1543
1541 def branchheads(self, branch=None, start=None, closed=False):
1544 def branchheads(self, branch=None, start=None, closed=False):
1542 '''return a (possibly filtered) list of heads for the given branch
1545 '''return a (possibly filtered) list of heads for the given branch
1543
1546
1544 Heads are returned in topological order, from newest to oldest.
1547 Heads are returned in topological order, from newest to oldest.
1545 If branch is None, use the dirstate branch.
1548 If branch is None, use the dirstate branch.
1546 If start is not None, return only heads reachable from start.
1549 If start is not None, return only heads reachable from start.
1547 If closed is True, return heads that are marked as closed as well.
1550 If closed is True, return heads that are marked as closed as well.
1548 '''
1551 '''
1549 if branch is None:
1552 if branch is None:
1550 branch = self[None].branch()
1553 branch = self[None].branch()
1551 branches = self.branchmap()
1554 branches = self.branchmap()
1552 if branch not in branches:
1555 if branch not in branches:
1553 return []
1556 return []
1554 # the cache returns heads ordered lowest to highest
1557 # the cache returns heads ordered lowest to highest
1555 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1558 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1556 if start is not None:
1559 if start is not None:
1557 # filter out the heads that cannot be reached from startrev
1560 # filter out the heads that cannot be reached from startrev
1558 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1561 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1559 bheads = [h for h in bheads if h in fbheads]
1562 bheads = [h for h in bheads if h in fbheads]
1560 return bheads
1563 return bheads
1561
1564
1562 def branches(self, nodes):
1565 def branches(self, nodes):
1563 if not nodes:
1566 if not nodes:
1564 nodes = [self.changelog.tip()]
1567 nodes = [self.changelog.tip()]
1565 b = []
1568 b = []
1566 for n in nodes:
1569 for n in nodes:
1567 t = n
1570 t = n
1568 while True:
1571 while True:
1569 p = self.changelog.parents(n)
1572 p = self.changelog.parents(n)
1570 if p[1] != nullid or p[0] == nullid:
1573 if p[1] != nullid or p[0] == nullid:
1571 b.append((t, n, p[0], p[1]))
1574 b.append((t, n, p[0], p[1]))
1572 break
1575 break
1573 n = p[0]
1576 n = p[0]
1574 return b
1577 return b
1575
1578
1576 def between(self, pairs):
1579 def between(self, pairs):
1577 r = []
1580 r = []
1578
1581
1579 for top, bottom in pairs:
1582 for top, bottom in pairs:
1580 n, l, i = top, [], 0
1583 n, l, i = top, [], 0
1581 f = 1
1584 f = 1
1582
1585
1583 while n != bottom and n != nullid:
1586 while n != bottom and n != nullid:
1584 p = self.changelog.parents(n)[0]
1587 p = self.changelog.parents(n)[0]
1585 if i == f:
1588 if i == f:
1586 l.append(n)
1589 l.append(n)
1587 f = f * 2
1590 f = f * 2
1588 n = p
1591 n = p
1589 i += 1
1592 i += 1
1590
1593
1591 r.append(l)
1594 r.append(l)
1592
1595
1593 return r
1596 return r
1594
1597
1595 def checkpush(self, pushop):
1598 def checkpush(self, pushop):
1596 """Extensions can override this function if additional checks have
1599 """Extensions can override this function if additional checks have
1597 to be performed before pushing, or call it if they override push
1600 to be performed before pushing, or call it if they override push
1598 command.
1601 command.
1599 """
1602 """
1600 pass
1603 pass
1601
1604
1602 @unfilteredpropertycache
1605 @unfilteredpropertycache
1603 def prepushoutgoinghooks(self):
1606 def prepushoutgoinghooks(self):
1604 """Return util.hooks consists of "(repo, remote, outgoing)"
1607 """Return util.hooks consists of "(repo, remote, outgoing)"
1605 functions, which are called before pushing changesets.
1608 functions, which are called before pushing changesets.
1606 """
1609 """
1607 return util.hooks()
1610 return util.hooks()
1608
1611
1609 def stream_in(self, remote, requirements):
1612 def stream_in(self, remote, requirements):
1610 lock = self.lock()
1613 lock = self.lock()
1611 try:
1614 try:
1612 # Save remote branchmap. We will use it later
1615 # Save remote branchmap. We will use it later
1613 # to speed up branchcache creation
1616 # to speed up branchcache creation
1614 rbranchmap = None
1617 rbranchmap = None
1615 if remote.capable("branchmap"):
1618 if remote.capable("branchmap"):
1616 rbranchmap = remote.branchmap()
1619 rbranchmap = remote.branchmap()
1617
1620
1618 fp = remote.stream_out()
1621 fp = remote.stream_out()
1619 l = fp.readline()
1622 l = fp.readline()
1620 try:
1623 try:
1621 resp = int(l)
1624 resp = int(l)
1622 except ValueError:
1625 except ValueError:
1623 raise error.ResponseError(
1626 raise error.ResponseError(
1624 _('unexpected response from remote server:'), l)
1627 _('unexpected response from remote server:'), l)
1625 if resp == 1:
1628 if resp == 1:
1626 raise util.Abort(_('operation forbidden by server'))
1629 raise util.Abort(_('operation forbidden by server'))
1627 elif resp == 2:
1630 elif resp == 2:
1628 raise util.Abort(_('locking the remote repository failed'))
1631 raise util.Abort(_('locking the remote repository failed'))
1629 elif resp != 0:
1632 elif resp != 0:
1630 raise util.Abort(_('the server sent an unknown error code'))
1633 raise util.Abort(_('the server sent an unknown error code'))
1631 self.ui.status(_('streaming all changes\n'))
1634 self.ui.status(_('streaming all changes\n'))
1632 l = fp.readline()
1635 l = fp.readline()
1633 try:
1636 try:
1634 total_files, total_bytes = map(int, l.split(' ', 1))
1637 total_files, total_bytes = map(int, l.split(' ', 1))
1635 except (ValueError, TypeError):
1638 except (ValueError, TypeError):
1636 raise error.ResponseError(
1639 raise error.ResponseError(
1637 _('unexpected response from remote server:'), l)
1640 _('unexpected response from remote server:'), l)
1638 self.ui.status(_('%d files to transfer, %s of data\n') %
1641 self.ui.status(_('%d files to transfer, %s of data\n') %
1639 (total_files, util.bytecount(total_bytes)))
1642 (total_files, util.bytecount(total_bytes)))
1640 handled_bytes = 0
1643 handled_bytes = 0
1641 self.ui.progress(_('clone'), 0, total=total_bytes)
1644 self.ui.progress(_('clone'), 0, total=total_bytes)
1642 start = time.time()
1645 start = time.time()
1643
1646
1644 tr = self.transaction(_('clone'))
1647 tr = self.transaction(_('clone'))
1645 try:
1648 try:
1646 for i in xrange(total_files):
1649 for i in xrange(total_files):
1647 # XXX doesn't support '\n' or '\r' in filenames
1650 # XXX doesn't support '\n' or '\r' in filenames
1648 l = fp.readline()
1651 l = fp.readline()
1649 try:
1652 try:
1650 name, size = l.split('\0', 1)
1653 name, size = l.split('\0', 1)
1651 size = int(size)
1654 size = int(size)
1652 except (ValueError, TypeError):
1655 except (ValueError, TypeError):
1653 raise error.ResponseError(
1656 raise error.ResponseError(
1654 _('unexpected response from remote server:'), l)
1657 _('unexpected response from remote server:'), l)
1655 if self.ui.debugflag:
1658 if self.ui.debugflag:
1656 self.ui.debug('adding %s (%s)\n' %
1659 self.ui.debug('adding %s (%s)\n' %
1657 (name, util.bytecount(size)))
1660 (name, util.bytecount(size)))
1658 # for backwards compat, name was partially encoded
1661 # for backwards compat, name was partially encoded
1659 ofp = self.sopener(store.decodedir(name), 'w')
1662 ofp = self.sopener(store.decodedir(name), 'w')
1660 for chunk in util.filechunkiter(fp, limit=size):
1663 for chunk in util.filechunkiter(fp, limit=size):
1661 handled_bytes += len(chunk)
1664 handled_bytes += len(chunk)
1662 self.ui.progress(_('clone'), handled_bytes,
1665 self.ui.progress(_('clone'), handled_bytes,
1663 total=total_bytes)
1666 total=total_bytes)
1664 ofp.write(chunk)
1667 ofp.write(chunk)
1665 ofp.close()
1668 ofp.close()
1666 tr.close()
1669 tr.close()
1667 finally:
1670 finally:
1668 tr.release()
1671 tr.release()
1669
1672
1670 # Writing straight to files circumvented the inmemory caches
1673 # Writing straight to files circumvented the inmemory caches
1671 self.invalidate()
1674 self.invalidate()
1672
1675
1673 elapsed = time.time() - start
1676 elapsed = time.time() - start
1674 if elapsed <= 0:
1677 if elapsed <= 0:
1675 elapsed = 0.001
1678 elapsed = 0.001
1676 self.ui.progress(_('clone'), None)
1679 self.ui.progress(_('clone'), None)
1677 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1680 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1678 (util.bytecount(total_bytes), elapsed,
1681 (util.bytecount(total_bytes), elapsed,
1679 util.bytecount(total_bytes / elapsed)))
1682 util.bytecount(total_bytes / elapsed)))
1680
1683
1681 # new requirements = old non-format requirements +
1684 # new requirements = old non-format requirements +
1682 # new format-related
1685 # new format-related
1683 # requirements from the streamed-in repository
1686 # requirements from the streamed-in repository
1684 requirements.update(set(self.requirements) - self.supportedformats)
1687 requirements.update(set(self.requirements) - self.supportedformats)
1685 self._applyrequirements(requirements)
1688 self._applyrequirements(requirements)
1686 self._writerequirements()
1689 self._writerequirements()
1687
1690
1688 if rbranchmap:
1691 if rbranchmap:
1689 rbheads = []
1692 rbheads = []
1690 closed = []
1693 closed = []
1691 for bheads in rbranchmap.itervalues():
1694 for bheads in rbranchmap.itervalues():
1692 rbheads.extend(bheads)
1695 rbheads.extend(bheads)
1693 for h in bheads:
1696 for h in bheads:
1694 r = self.changelog.rev(h)
1697 r = self.changelog.rev(h)
1695 b, c = self.changelog.branchinfo(r)
1698 b, c = self.changelog.branchinfo(r)
1696 if c:
1699 if c:
1697 closed.append(h)
1700 closed.append(h)
1698
1701
1699 if rbheads:
1702 if rbheads:
1700 rtiprev = max((int(self.changelog.rev(node))
1703 rtiprev = max((int(self.changelog.rev(node))
1701 for node in rbheads))
1704 for node in rbheads))
1702 cache = branchmap.branchcache(rbranchmap,
1705 cache = branchmap.branchcache(rbranchmap,
1703 self[rtiprev].node(),
1706 self[rtiprev].node(),
1704 rtiprev,
1707 rtiprev,
1705 closednodes=closed)
1708 closednodes=closed)
1706 # Try to stick it as low as possible
1709 # Try to stick it as low as possible
1707 # filter above served are unlikely to be fetch from a clone
1710 # filter above served are unlikely to be fetch from a clone
1708 for candidate in ('base', 'immutable', 'served'):
1711 for candidate in ('base', 'immutable', 'served'):
1709 rview = self.filtered(candidate)
1712 rview = self.filtered(candidate)
1710 if cache.validfor(rview):
1713 if cache.validfor(rview):
1711 self._branchcaches[candidate] = cache
1714 self._branchcaches[candidate] = cache
1712 cache.write(rview)
1715 cache.write(rview)
1713 break
1716 break
1714 self.invalidate()
1717 self.invalidate()
1715 return len(self.heads()) + 1
1718 return len(self.heads()) + 1
1716 finally:
1719 finally:
1717 lock.release()
1720 lock.release()
1718
1721
1719 def clone(self, remote, heads=[], stream=False):
1722 def clone(self, remote, heads=[], stream=False):
1720 '''clone remote repository.
1723 '''clone remote repository.
1721
1724
1722 keyword arguments:
1725 keyword arguments:
1723 heads: list of revs to clone (forces use of pull)
1726 heads: list of revs to clone (forces use of pull)
1724 stream: use streaming clone if possible'''
1727 stream: use streaming clone if possible'''
1725
1728
1726 # now, all clients that can request uncompressed clones can
1729 # now, all clients that can request uncompressed clones can
1727 # read repo formats supported by all servers that can serve
1730 # read repo formats supported by all servers that can serve
1728 # them.
1731 # them.
1729
1732
1730 # if revlog format changes, client will have to check version
1733 # if revlog format changes, client will have to check version
1731 # and format flags on "stream" capability, and use
1734 # and format flags on "stream" capability, and use
1732 # uncompressed only if compatible.
1735 # uncompressed only if compatible.
1733
1736
1734 if not stream:
1737 if not stream:
1735 # if the server explicitly prefers to stream (for fast LANs)
1738 # if the server explicitly prefers to stream (for fast LANs)
1736 stream = remote.capable('stream-preferred')
1739 stream = remote.capable('stream-preferred')
1737
1740
1738 if stream and not heads:
1741 if stream and not heads:
1739 # 'stream' means remote revlog format is revlogv1 only
1742 # 'stream' means remote revlog format is revlogv1 only
1740 if remote.capable('stream'):
1743 if remote.capable('stream'):
1741 self.stream_in(remote, set(('revlogv1',)))
1744 self.stream_in(remote, set(('revlogv1',)))
1742 else:
1745 else:
1743 # otherwise, 'streamreqs' contains the remote revlog format
1746 # otherwise, 'streamreqs' contains the remote revlog format
1744 streamreqs = remote.capable('streamreqs')
1747 streamreqs = remote.capable('streamreqs')
1745 if streamreqs:
1748 if streamreqs:
1746 streamreqs = set(streamreqs.split(','))
1749 streamreqs = set(streamreqs.split(','))
1747 # if we support it, stream in and adjust our requirements
1750 # if we support it, stream in and adjust our requirements
1748 if not streamreqs - self.supportedformats:
1751 if not streamreqs - self.supportedformats:
1749 self.stream_in(remote, streamreqs)
1752 self.stream_in(remote, streamreqs)
1750
1753
1751 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1754 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1752 try:
1755 try:
1753 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1756 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1754 ret = exchange.pull(self, remote, heads).cgresult
1757 ret = exchange.pull(self, remote, heads).cgresult
1755 finally:
1758 finally:
1756 self.ui.restoreconfig(quiet)
1759 self.ui.restoreconfig(quiet)
1757 return ret
1760 return ret
1758
1761
1759 def pushkey(self, namespace, key, old, new):
1762 def pushkey(self, namespace, key, old, new):
1760 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1763 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1761 old=old, new=new)
1764 old=old, new=new)
1762 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1765 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1763 ret = pushkey.push(self, namespace, key, old, new)
1766 ret = pushkey.push(self, namespace, key, old, new)
1764 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1767 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1765 ret=ret)
1768 ret=ret)
1766 return ret
1769 return ret
1767
1770
1768 def listkeys(self, namespace):
1771 def listkeys(self, namespace):
1769 self.hook('prelistkeys', throw=True, namespace=namespace)
1772 self.hook('prelistkeys', throw=True, namespace=namespace)
1770 self.ui.debug('listing keys for "%s"\n' % namespace)
1773 self.ui.debug('listing keys for "%s"\n' % namespace)
1771 values = pushkey.list(self, namespace)
1774 values = pushkey.list(self, namespace)
1772 self.hook('listkeys', namespace=namespace, values=values)
1775 self.hook('listkeys', namespace=namespace, values=values)
1773 return values
1776 return values
1774
1777
1775 def debugwireargs(self, one, two, three=None, four=None, five=None):
1778 def debugwireargs(self, one, two, three=None, four=None, five=None):
1776 '''used to test argument passing over the wire'''
1779 '''used to test argument passing over the wire'''
1777 return "%s %s %s %s %s" % (one, two, three, four, five)
1780 return "%s %s %s %s %s" % (one, two, three, four, five)
1778
1781
1779 def savecommitmessage(self, text):
1782 def savecommitmessage(self, text):
1780 fp = self.opener('last-message.txt', 'wb')
1783 fp = self.opener('last-message.txt', 'wb')
1781 try:
1784 try:
1782 fp.write(text)
1785 fp.write(text)
1783 finally:
1786 finally:
1784 fp.close()
1787 fp.close()
1785 return self.pathto(fp.name[len(self.root) + 1:])
1788 return self.pathto(fp.name[len(self.root) + 1:])
1786
1789
1787 # used to avoid circular references so destructors work
1790 # used to avoid circular references so destructors work
1788 def aftertrans(files):
1791 def aftertrans(files):
1789 renamefiles = [tuple(t) for t in files]
1792 renamefiles = [tuple(t) for t in files]
1790 def a():
1793 def a():
1791 for vfs, src, dest in renamefiles:
1794 for vfs, src, dest in renamefiles:
1792 try:
1795 try:
1793 vfs.rename(src, dest)
1796 vfs.rename(src, dest)
1794 except OSError: # journal file does not yet exist
1797 except OSError: # journal file does not yet exist
1795 pass
1798 pass
1796 return a
1799 return a
1797
1800
1798 def undoname(fn):
1801 def undoname(fn):
1799 base, name = os.path.split(fn)
1802 base, name = os.path.split(fn)
1800 assert name.startswith('journal')
1803 assert name.startswith('journal')
1801 return os.path.join(base, name.replace('journal', 'undo', 1))
1804 return os.path.join(base, name.replace('journal', 'undo', 1))
1802
1805
1803 def instance(ui, path, create):
1806 def instance(ui, path, create):
1804 return localrepository(ui, util.urllocalpath(path), create)
1807 return localrepository(ui, util.urllocalpath(path), create)
1805
1808
1806 def islocal(path):
1809 def islocal(path):
1807 return True
1810 return True
@@ -1,462 +1,468
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 version = 0
18 version = 0
19
19
20 def active(func):
20 def active(func):
21 def _active(self, *args, **kwds):
21 def _active(self, *args, **kwds):
22 if self.count == 0:
22 if self.count == 0:
23 raise error.Abort(_(
23 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
24 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
25 return func(self, *args, **kwds)
26 return _active
26 return _active
27
27
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
28 def _playback(journal, report, opener, vfsmap, entries, backupentries,
29 unlink=True):
29 for f, o, _ignore in entries:
30 for f, o, _ignore in entries:
30 if o or not unlink:
31 if o or not unlink:
31 try:
32 try:
32 fp = opener(f, 'a')
33 fp = opener(f, 'a')
33 fp.truncate(o)
34 fp.truncate(o)
34 fp.close()
35 fp.close()
35 except IOError:
36 except IOError:
36 report(_("failed to truncate %s\n") % f)
37 report(_("failed to truncate %s\n") % f)
37 raise
38 raise
38 else:
39 else:
39 try:
40 try:
40 opener.unlink(f)
41 opener.unlink(f)
41 except (IOError, OSError), inst:
42 except (IOError, OSError), inst:
42 if inst.errno != errno.ENOENT:
43 if inst.errno != errno.ENOENT:
43 raise
44 raise
44
45
45 backupfiles = []
46 backupfiles = []
46 for l, f, b, c in backupentries:
47 for l, f, b, c in backupentries:
48 vfs = vfsmap[l]
47 if f and b:
49 if f and b:
48 filepath = opener.join(f)
50 filepath = vfs.join(f)
49 backuppath = opener.join(b)
51 backuppath = vfs.join(b)
50 try:
52 try:
51 util.copyfile(backuppath, filepath)
53 util.copyfile(backuppath, filepath)
52 backupfiles.append(b)
54 backupfiles.append(b)
53 except IOError:
55 except IOError:
54 report(_("failed to recover %s\n") % f)
56 report(_("failed to recover %s\n") % f)
55 raise
57 raise
56 else:
58 else:
57 target = f or b
59 target = f or b
58 try:
60 try:
59 opener.unlink(target)
61 vfs.unlink(target)
60 except (IOError, OSError), inst:
62 except (IOError, OSError), inst:
61 if inst.errno != errno.ENOENT:
63 if inst.errno != errno.ENOENT:
62 raise
64 raise
63
65
64 opener.unlink(journal)
66 opener.unlink(journal)
65 backuppath = "%s.backupfiles" % journal
67 backuppath = "%s.backupfiles" % journal
66 if opener.exists(backuppath):
68 if opener.exists(backuppath):
67 opener.unlink(backuppath)
69 opener.unlink(backuppath)
68 for f in backupfiles:
70 for f in backupfiles:
69 if opener.exists(f):
71 if opener.exists(f):
70 opener.unlink(f)
72 opener.unlink(f)
71
73
72 class transaction(object):
74 class transaction(object):
73 def __init__(self, report, opener, vfsmap, journal, after=None,
75 def __init__(self, report, opener, vfsmap, journal, after=None,
74 createmode=None, onclose=None, onabort=None):
76 createmode=None, onclose=None, onabort=None):
75 """Begin a new transaction
77 """Begin a new transaction
76
78
77 Begins a new transaction that allows rolling back writes in the event of
79 Begins a new transaction that allows rolling back writes in the event of
78 an exception.
80 an exception.
79
81
80 * `after`: called after the transaction has been committed
82 * `after`: called after the transaction has been committed
81 * `createmode`: the mode of the journal file that will be created
83 * `createmode`: the mode of the journal file that will be created
82 * `onclose`: called as the transaction is closing, but before it is
84 * `onclose`: called as the transaction is closing, but before it is
83 closed
85 closed
84 * `onabort`: called as the transaction is aborting, but before any files
86 * `onabort`: called as the transaction is aborting, but before any files
85 have been truncated
87 have been truncated
86 """
88 """
87 self.count = 1
89 self.count = 1
88 self.usages = 1
90 self.usages = 1
89 self.report = report
91 self.report = report
90 # a vfs to the store content
92 # a vfs to the store content
91 self.opener = opener
93 self.opener = opener
92 # a map to access file in various {location -> vfs}
94 # a map to access file in various {location -> vfs}
93 vfsmap = vfsmap.copy()
95 vfsmap = vfsmap.copy()
94 vfsmap[''] = opener # set default value
96 vfsmap[''] = opener # set default value
95 self._vfsmap = vfsmap
97 self._vfsmap = vfsmap
96 self.after = after
98 self.after = after
97 self.onclose = onclose
99 self.onclose = onclose
98 self.onabort = onabort
100 self.onabort = onabort
99 self.entries = []
101 self.entries = []
100 self.map = {}
102 self.map = {}
101 self.journal = journal
103 self.journal = journal
102 self._queue = []
104 self._queue = []
103 # a dict of arguments to be passed to hooks
105 # a dict of arguments to be passed to hooks
104 self.hookargs = {}
106 self.hookargs = {}
105 self.file = opener.open(self.journal, "w")
107 self.file = opener.open(self.journal, "w")
106
108
107 # a list of ('location', 'path', 'backuppath', cache) entries.
109 # a list of ('location', 'path', 'backuppath', cache) entries.
108 # if 'backuppath' is empty, no file existed at backup time
110 # - if 'backuppath' is empty, no file existed at backup time
109 # if 'path' is empty, this is a temporary transaction file
111 # - if 'path' is empty, this is a temporary transaction file
110 # (location, and cache are current unused)
112 # - if 'location' is not empty, the path is outside main opener reach.
113 # use 'location' value as a key in a vfsmap to find the right 'vfs'
114 # (cache is currently unused)
111 self._backupentries = []
115 self._backupentries = []
112 self._backupmap = {}
116 self._backupmap = {}
113 self._backupjournal = "%s.backupfiles" % journal
117 self._backupjournal = "%s.backupfiles" % journal
114 self._backupsfile = opener.open(self._backupjournal, 'w')
118 self._backupsfile = opener.open(self._backupjournal, 'w')
115 self._backupsfile.write('%d\n' % version)
119 self._backupsfile.write('%d\n' % version)
116
120
117 if createmode is not None:
121 if createmode is not None:
118 opener.chmod(self.journal, createmode & 0666)
122 opener.chmod(self.journal, createmode & 0666)
119 opener.chmod(self._backupjournal, createmode & 0666)
123 opener.chmod(self._backupjournal, createmode & 0666)
120
124
121 # hold file generations to be performed on commit
125 # hold file generations to be performed on commit
122 self._filegenerators = {}
126 self._filegenerators = {}
123 # hold callbalk to write pending data for hooks
127 # hold callbalk to write pending data for hooks
124 self._pendingcallback = {}
128 self._pendingcallback = {}
125 # True is any pending data have been written ever
129 # True is any pending data have been written ever
126 self._anypending = False
130 self._anypending = False
127 # holds callback to call when writing the transaction
131 # holds callback to call when writing the transaction
128 self._finalizecallback = {}
132 self._finalizecallback = {}
129 # hold callbalk for post transaction close
133 # hold callbalk for post transaction close
130 self._postclosecallback = {}
134 self._postclosecallback = {}
131
135
132 def __del__(self):
136 def __del__(self):
133 if self.journal:
137 if self.journal:
134 self._abort()
138 self._abort()
135
139
136 @active
140 @active
137 def startgroup(self):
141 def startgroup(self):
138 """delay registration of file entry
142 """delay registration of file entry
139
143
140 This is used by strip to delay vision of strip offset. The transaction
144 This is used by strip to delay vision of strip offset. The transaction
141 sees either none or all of the strip actions to be done."""
145 sees either none or all of the strip actions to be done."""
142 self._queue.append([])
146 self._queue.append([])
143
147
144 @active
148 @active
145 def endgroup(self):
149 def endgroup(self):
146 """apply delayed registration of file entry.
150 """apply delayed registration of file entry.
147
151
148 This is used by strip to delay vision of strip offset. The transaction
152 This is used by strip to delay vision of strip offset. The transaction
149 sees either none or all of the strip actions to be done."""
153 sees either none or all of the strip actions to be done."""
150 q = self._queue.pop()
154 q = self._queue.pop()
151 for f, o, data in q:
155 for f, o, data in q:
152 self._addentry(f, o, data)
156 self._addentry(f, o, data)
153
157
154 @active
158 @active
155 def add(self, file, offset, data=None):
159 def add(self, file, offset, data=None):
156 """record the state of an append-only file before update"""
160 """record the state of an append-only file before update"""
157 if file in self.map or file in self._backupmap:
161 if file in self.map or file in self._backupmap:
158 return
162 return
159 if self._queue:
163 if self._queue:
160 self._queue[-1].append((file, offset, data))
164 self._queue[-1].append((file, offset, data))
161 return
165 return
162
166
163 self._addentry(file, offset, data)
167 self._addentry(file, offset, data)
164
168
165 def _addentry(self, file, offset, data):
169 def _addentry(self, file, offset, data):
166 """add a append-only entry to memory and on-disk state"""
170 """add a append-only entry to memory and on-disk state"""
167 if file in self.map or file in self._backupmap:
171 if file in self.map or file in self._backupmap:
168 return
172 return
169 self.entries.append((file, offset, data))
173 self.entries.append((file, offset, data))
170 self.map[file] = len(self.entries) - 1
174 self.map[file] = len(self.entries) - 1
171 # add enough data to the journal to do the truncate
175 # add enough data to the journal to do the truncate
172 self.file.write("%s\0%d\n" % (file, offset))
176 self.file.write("%s\0%d\n" % (file, offset))
173 self.file.flush()
177 self.file.flush()
174
178
175 @active
179 @active
176 def addbackup(self, file, hardlink=True, vfs=None):
180 def addbackup(self, file, hardlink=True, vfs=None):
177 """Adds a backup of the file to the transaction
181 """Adds a backup of the file to the transaction
178
182
179 Calling addbackup() creates a hardlink backup of the specified file
183 Calling addbackup() creates a hardlink backup of the specified file
180 that is used to recover the file in the event of the transaction
184 that is used to recover the file in the event of the transaction
181 aborting.
185 aborting.
182
186
183 * `file`: the file path, relative to .hg/store
187 * `file`: the file path, relative to .hg/store
184 * `hardlink`: use a hardlink to quickly create the backup
188 * `hardlink`: use a hardlink to quickly create the backup
185 """
189 """
186 if self._queue:
190 if self._queue:
187 msg = 'cannot use transaction.addbackup inside "group"'
191 msg = 'cannot use transaction.addbackup inside "group"'
188 raise RuntimeError(msg)
192 raise RuntimeError(msg)
189
193
190 if file in self.map or file in self._backupmap:
194 if file in self.map or file in self._backupmap:
191 return
195 return
192 backupfile = "%s.backup.%s" % (self.journal, file)
196 backupfile = "%s.backup.%s" % (self.journal, file)
193 if vfs is None:
197 if vfs is None:
194 vfs = self.opener
198 vfs = self.opener
195 if vfs.exists(file):
199 if vfs.exists(file):
196 filepath = vfs.join(file)
200 filepath = vfs.join(file)
197 backuppath = self.opener.join(backupfile)
201 backuppath = self.opener.join(backupfile)
198 util.copyfiles(filepath, backuppath, hardlink=hardlink)
202 util.copyfiles(filepath, backuppath, hardlink=hardlink)
199 else:
203 else:
200 backupfile = ''
204 backupfile = ''
201
205
202 self._addbackupentry(('', file, backupfile, False))
206 self._addbackupentry(('', file, backupfile, False))
203
207
204 def _addbackupentry(self, entry):
208 def _addbackupentry(self, entry):
205 """register a new backup entry and write it to disk"""
209 """register a new backup entry and write it to disk"""
206 self._backupentries.append(entry)
210 self._backupentries.append(entry)
207 self._backupmap[file] = len(self._backupentries) - 1
211 self._backupmap[file] = len(self._backupentries) - 1
208 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
212 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
209 self._backupsfile.flush()
213 self._backupsfile.flush()
210
214
211 @active
215 @active
212 def registertmp(self, tmpfile):
216 def registertmp(self, tmpfile):
213 """register a temporary transaction file
217 """register a temporary transaction file
214
218
215 Such file will be delete when the transaction exit (on both failure and
219 Such file will be delete when the transaction exit (on both failure and
216 success).
220 success).
217 """
221 """
218 self._addbackupentry(('', '', tmpfile, False))
222 self._addbackupentry(('', '', tmpfile, False))
219
223
220 @active
224 @active
221 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
225 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
222 """add a function to generates some files at transaction commit
226 """add a function to generates some files at transaction commit
223
227
224 The `genfunc` argument is a function capable of generating proper
228 The `genfunc` argument is a function capable of generating proper
225 content of each entry in the `filename` tuple.
229 content of each entry in the `filename` tuple.
226
230
227 At transaction close time, `genfunc` will be called with one file
231 At transaction close time, `genfunc` will be called with one file
228 object argument per entries in `filenames`.
232 object argument per entries in `filenames`.
229
233
230 The transaction itself is responsible for the backup, creation and
234 The transaction itself is responsible for the backup, creation and
231 final write of such file.
235 final write of such file.
232
236
233 The `genid` argument is used to ensure the same set of file is only
237 The `genid` argument is used to ensure the same set of file is only
234 generated once. Call to `addfilegenerator` for a `genid` already
238 generated once. Call to `addfilegenerator` for a `genid` already
235 present will overwrite the old entry.
239 present will overwrite the old entry.
236
240
237 The `order` argument may be used to control the order in which multiple
241 The `order` argument may be used to control the order in which multiple
238 generator will be executed.
242 generator will be executed.
239 """
243 """
240 # For now, we are unable to do proper backup and restore of custom vfs
244 # For now, we are unable to do proper backup and restore of custom vfs
241 # but for bookmarks that are handled outside this mechanism.
245 # but for bookmarks that are handled outside this mechanism.
242 assert vfs is None or filenames == ('bookmarks',)
246 assert vfs is None or filenames == ('bookmarks',)
243 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
247 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
244
248
245 def _generatefiles(self):
249 def _generatefiles(self):
246 # write files registered for generation
250 # write files registered for generation
247 for entry in sorted(self._filegenerators.values()):
251 for entry in sorted(self._filegenerators.values()):
248 order, filenames, genfunc, vfs = entry
252 order, filenames, genfunc, vfs = entry
249 if vfs is None:
253 if vfs is None:
250 vfs = self.opener
254 vfs = self.opener
251 files = []
255 files = []
252 try:
256 try:
253 for name in filenames:
257 for name in filenames:
254 # Some files are already backed up when creating the
258 # Some files are already backed up when creating the
255 # localrepo. Until this is properly fixed we disable the
259 # localrepo. Until this is properly fixed we disable the
256 # backup for them.
260 # backup for them.
257 if name not in ('phaseroots', 'bookmarks'):
261 if name not in ('phaseroots', 'bookmarks'):
258 self.addbackup(name)
262 self.addbackup(name)
259 files.append(vfs(name, 'w', atomictemp=True))
263 files.append(vfs(name, 'w', atomictemp=True))
260 genfunc(*files)
264 genfunc(*files)
261 finally:
265 finally:
262 for f in files:
266 for f in files:
263 f.close()
267 f.close()
264
268
265 @active
269 @active
266 def find(self, file):
270 def find(self, file):
267 if file in self.map:
271 if file in self.map:
268 return self.entries[self.map[file]]
272 return self.entries[self.map[file]]
269 if file in self._backupmap:
273 if file in self._backupmap:
270 return self._backupentries[self._backupmap[file]]
274 return self._backupentries[self._backupmap[file]]
271 return None
275 return None
272
276
273 @active
277 @active
274 def replace(self, file, offset, data=None):
278 def replace(self, file, offset, data=None):
275 '''
279 '''
276 replace can only replace already committed entries
280 replace can only replace already committed entries
277 that are not pending in the queue
281 that are not pending in the queue
278 '''
282 '''
279
283
280 if file not in self.map:
284 if file not in self.map:
281 raise KeyError(file)
285 raise KeyError(file)
282 index = self.map[file]
286 index = self.map[file]
283 self.entries[index] = (file, offset, data)
287 self.entries[index] = (file, offset, data)
284 self.file.write("%s\0%d\n" % (file, offset))
288 self.file.write("%s\0%d\n" % (file, offset))
285 self.file.flush()
289 self.file.flush()
286
290
287 @active
291 @active
288 def nest(self):
292 def nest(self):
289 self.count += 1
293 self.count += 1
290 self.usages += 1
294 self.usages += 1
291 return self
295 return self
292
296
293 def release(self):
297 def release(self):
294 if self.count > 0:
298 if self.count > 0:
295 self.usages -= 1
299 self.usages -= 1
296 # if the transaction scopes are left without being closed, fail
300 # if the transaction scopes are left without being closed, fail
297 if self.count > 0 and self.usages == 0:
301 if self.count > 0 and self.usages == 0:
298 self._abort()
302 self._abort()
299
303
300 def running(self):
304 def running(self):
301 return self.count > 0
305 return self.count > 0
302
306
303 def addpending(self, category, callback):
307 def addpending(self, category, callback):
304 """add a callback to be called when the transaction is pending
308 """add a callback to be called when the transaction is pending
305
309
306 The transaction will be given as callback's first argument.
310 The transaction will be given as callback's first argument.
307
311
308 Category is a unique identifier to allow overwriting an old callback
312 Category is a unique identifier to allow overwriting an old callback
309 with a newer callback.
313 with a newer callback.
310 """
314 """
311 self._pendingcallback[category] = callback
315 self._pendingcallback[category] = callback
312
316
313 @active
317 @active
314 def writepending(self):
318 def writepending(self):
315 '''write pending file to temporary version
319 '''write pending file to temporary version
316
320
317 This is used to allow hooks to view a transaction before commit'''
321 This is used to allow hooks to view a transaction before commit'''
318 categories = sorted(self._pendingcallback)
322 categories = sorted(self._pendingcallback)
319 for cat in categories:
323 for cat in categories:
320 # remove callback since the data will have been flushed
324 # remove callback since the data will have been flushed
321 any = self._pendingcallback.pop(cat)(self)
325 any = self._pendingcallback.pop(cat)(self)
322 self._anypending = self._anypending or any
326 self._anypending = self._anypending or any
323 return self._anypending
327 return self._anypending
324
328
325 @active
329 @active
326 def addfinalize(self, category, callback):
330 def addfinalize(self, category, callback):
327 """add a callback to be called when the transaction is closed
331 """add a callback to be called when the transaction is closed
328
332
329 The transaction will be given as callback's first argument.
333 The transaction will be given as callback's first argument.
330
334
331 Category is a unique identifier to allow overwriting old callbacks with
335 Category is a unique identifier to allow overwriting old callbacks with
332 newer callbacks.
336 newer callbacks.
333 """
337 """
334 self._finalizecallback[category] = callback
338 self._finalizecallback[category] = callback
335
339
336 @active
340 @active
337 def addpostclose(self, category, callback):
341 def addpostclose(self, category, callback):
338 """add a callback to be called after the transaction is closed
342 """add a callback to be called after the transaction is closed
339
343
340 The transaction will be given as callback's first argument.
344 The transaction will be given as callback's first argument.
341
345
342 Category is a unique identifier to allow overwriting an old callback
346 Category is a unique identifier to allow overwriting an old callback
343 with a newer callback.
347 with a newer callback.
344 """
348 """
345 self._postclosecallback[category] = callback
349 self._postclosecallback[category] = callback
346
350
347 @active
351 @active
348 def close(self):
352 def close(self):
349 '''commit the transaction'''
353 '''commit the transaction'''
350 if self.count == 1:
354 if self.count == 1:
351 self._generatefiles()
355 self._generatefiles()
352 categories = sorted(self._finalizecallback)
356 categories = sorted(self._finalizecallback)
353 for cat in categories:
357 for cat in categories:
354 self._finalizecallback[cat](self)
358 self._finalizecallback[cat](self)
355 if self.onclose is not None:
359 if self.onclose is not None:
356 self.onclose()
360 self.onclose()
357
361
358 self.count -= 1
362 self.count -= 1
359 if self.count != 0:
363 if self.count != 0:
360 return
364 return
361 self.file.close()
365 self.file.close()
362 self._backupsfile.close()
366 self._backupsfile.close()
363 # cleanup temporary files
367 # cleanup temporary files
364 for _l, f, b, _c in self._backupentries:
368 for l, f, b, _c in self._backupentries:
365 if not f and b and self.opener.exists(b):
369 vfs = self._vfsmap[l]
366 self.opener.unlink(b)
370 if not f and b and vfs.exists(b):
371 vfs.unlink(b)
367 self.entries = []
372 self.entries = []
368 if self.after:
373 if self.after:
369 self.after()
374 self.after()
370 if self.opener.isfile(self.journal):
375 if self.opener.isfile(self.journal):
371 self.opener.unlink(self.journal)
376 self.opener.unlink(self.journal)
372 if self.opener.isfile(self._backupjournal):
377 if self.opener.isfile(self._backupjournal):
373 self.opener.unlink(self._backupjournal)
378 self.opener.unlink(self._backupjournal)
374 for _l, _f, b, _c in self._backupentries:
379 for _l, _f, b, _c in self._backupentries:
375 if b and self.opener.exists(b):
380 vfs = self._vfsmap[l]
376 self.opener.unlink(b)
381 if b and vfs.exists(b):
382 vfs.unlink(b)
377 self._backupentries = []
383 self._backupentries = []
378 self.journal = None
384 self.journal = None
379 # run post close action
385 # run post close action
380 categories = sorted(self._postclosecallback)
386 categories = sorted(self._postclosecallback)
381 for cat in categories:
387 for cat in categories:
382 self._postclosecallback[cat](self)
388 self._postclosecallback[cat](self)
383
389
384 @active
390 @active
385 def abort(self):
391 def abort(self):
386 '''abort the transaction (generally called on error, or when the
392 '''abort the transaction (generally called on error, or when the
387 transaction is not explicitly committed before going out of
393 transaction is not explicitly committed before going out of
388 scope)'''
394 scope)'''
389 self._abort()
395 self._abort()
390
396
391 def _abort(self):
397 def _abort(self):
392 self.count = 0
398 self.count = 0
393 self.usages = 0
399 self.usages = 0
394 self.file.close()
400 self.file.close()
395 self._backupsfile.close()
401 self._backupsfile.close()
396
402
397 if self.onabort is not None:
403 if self.onabort is not None:
398 self.onabort()
404 self.onabort()
399
405
400 try:
406 try:
401 if not self.entries and not self._backupentries:
407 if not self.entries and not self._backupentries:
402 if self.journal:
408 if self.journal:
403 self.opener.unlink(self.journal)
409 self.opener.unlink(self.journal)
404 if self._backupjournal:
410 if self._backupjournal:
405 self.opener.unlink(self._backupjournal)
411 self.opener.unlink(self._backupjournal)
406 return
412 return
407
413
408 self.report(_("transaction abort!\n"))
414 self.report(_("transaction abort!\n"))
409
415
410 try:
416 try:
411 _playback(self.journal, self.report, self.opener,
417 _playback(self.journal, self.report, self.opener, self._vfsmap,
412 self.entries, self._backupentries, False)
418 self.entries, self._backupentries, False)
413 self.report(_("rollback completed\n"))
419 self.report(_("rollback completed\n"))
414 except Exception:
420 except Exception:
415 self.report(_("rollback failed - please run hg recover\n"))
421 self.report(_("rollback failed - please run hg recover\n"))
416 finally:
422 finally:
417 self.journal = None
423 self.journal = None
418
424
419
425
420 def rollback(opener, file, report):
426 def rollback(opener, vfsmap, file, report):
421 """Rolls back the transaction contained in the given file
427 """Rolls back the transaction contained in the given file
422
428
423 Reads the entries in the specified file, and the corresponding
429 Reads the entries in the specified file, and the corresponding
424 '*.backupfiles' file, to recover from an incomplete transaction.
430 '*.backupfiles' file, to recover from an incomplete transaction.
425
431
426 * `file`: a file containing a list of entries, specifying where
432 * `file`: a file containing a list of entries, specifying where
427 to truncate each file. The file should contain a list of
433 to truncate each file. The file should contain a list of
428 file\0offset pairs, delimited by newlines. The corresponding
434 file\0offset pairs, delimited by newlines. The corresponding
429 '*.backupfiles' file should contain a list of file\0backupfile
435 '*.backupfiles' file should contain a list of file\0backupfile
430 pairs, delimited by \0.
436 pairs, delimited by \0.
431 """
437 """
432 entries = []
438 entries = []
433 backupentries = []
439 backupentries = []
434
440
435 fp = opener.open(file)
441 fp = opener.open(file)
436 lines = fp.readlines()
442 lines = fp.readlines()
437 fp.close()
443 fp.close()
438 for l in lines:
444 for l in lines:
439 try:
445 try:
440 f, o = l.split('\0')
446 f, o = l.split('\0')
441 entries.append((f, int(o), None))
447 entries.append((f, int(o), None))
442 except ValueError:
448 except ValueError:
443 report(_("couldn't read journal entry %r!\n") % l)
449 report(_("couldn't read journal entry %r!\n") % l)
444
450
445 backupjournal = "%s.backupfiles" % file
451 backupjournal = "%s.backupfiles" % file
446 if opener.exists(backupjournal):
452 if opener.exists(backupjournal):
447 fp = opener.open(backupjournal)
453 fp = opener.open(backupjournal)
448 lines = fp.readlines()
454 lines = fp.readlines()
449 if lines:
455 if lines:
450 ver = lines[0][:-1]
456 ver = lines[0][:-1]
451 if ver == str(version):
457 if ver == str(version):
452 for line in lines[1:]:
458 for line in lines[1:]:
453 if line:
459 if line:
454 # Shave off the trailing newline
460 # Shave off the trailing newline
455 line = line[:-1]
461 line = line[:-1]
456 l, f, b, c = line.split('\0')
462 l, f, b, c = line.split('\0')
457 backupentries.append((l, f, b, bool(c)))
463 backupentries.append((l, f, b, bool(c)))
458 else:
464 else:
459 report(_("journal was created by a different version of "
465 report(_("journal was created by a different version of "
460 "Mercurial"))
466 "Mercurial"))
461
467
462 _playback(file, report, opener, entries, backupentries)
468 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now