##// END OF EJS Templates
transaction: pass a vfs map to the transaction...
Pierre-Yves David -
r23310:5bd1f657 default
parent child Browse files
Show More
@@ -1,37 +1,38 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 # Undump a dump from dumprevlog
2 # Undump a dump from dumprevlog
3 # $ hg init
3 # $ hg init
4 # $ undumprevlog < repo.dump
4 # $ undumprevlog < repo.dump
5
5
6 import sys
6 import sys
7 from mercurial import revlog, node, scmutil, util, transaction
7 from mercurial import revlog, node, scmutil, util, transaction
8
8
9 for fp in (sys.stdin, sys.stdout, sys.stderr):
9 for fp in (sys.stdin, sys.stdout, sys.stderr):
10 util.setbinary(fp)
10 util.setbinary(fp)
11
11
12 opener = scmutil.opener('.', False)
12 opener = scmutil.opener('.', False)
13 tr = transaction.transaction(sys.stderr.write, opener, "undump.journal")
13 tr = transaction.transaction(sys.stderr.write, opener, {'store': opener},
14 "undump.journal")
14 while True:
15 while True:
15 l = sys.stdin.readline()
16 l = sys.stdin.readline()
16 if not l:
17 if not l:
17 break
18 break
18 if l.startswith("file:"):
19 if l.startswith("file:"):
19 f = l[6:-1]
20 f = l[6:-1]
20 r = revlog.revlog(opener, f)
21 r = revlog.revlog(opener, f)
21 print f
22 print f
22 elif l.startswith("node:"):
23 elif l.startswith("node:"):
23 n = node.bin(l[6:-1])
24 n = node.bin(l[6:-1])
24 elif l.startswith("linkrev:"):
25 elif l.startswith("linkrev:"):
25 lr = int(l[9:-1])
26 lr = int(l[9:-1])
26 elif l.startswith("parents:"):
27 elif l.startswith("parents:"):
27 p = l[9:-1].split()
28 p = l[9:-1].split()
28 p1 = node.bin(p[0])
29 p1 = node.bin(p[0])
29 p2 = node.bin(p[1])
30 p2 = node.bin(p[1])
30 elif l.startswith("length:"):
31 elif l.startswith("length:"):
31 length = int(l[8:-1])
32 length = int(l[8:-1])
32 sys.stdin.readline() # start marker
33 sys.stdin.readline() # start marker
33 d = sys.stdin.read(length)
34 d = sys.stdin.read(length)
34 sys.stdin.readline() # end marker
35 sys.stdin.readline() # end marker
35 r.addrevision(d, tr, lr, p1, p2)
36 r.addrevision(d, tr, lr, p1, p2)
36
37
37 tr.close()
38 tr.close()
@@ -1,1806 +1,1807 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10', **kwargs):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps, **kwargs)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2Y' in bundlecaps:
112 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except error.PushRaced, exc:
136 except error.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), str(exc))
137 raise error.ResponseError(_('push failed:'), str(exc))
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 # a list of (ui, featureset) functions.
183 # a list of (ui, featureset) functions.
184 # only functions defined in module of enabled extensions are invoked
184 # only functions defined in module of enabled extensions are invoked
185 featuresetupfuncs = set()
185 featuresetupfuncs = set()
186
186
187 def _baserequirements(self, create):
187 def _baserequirements(self, create):
188 return self.requirements[:]
188 return self.requirements[:]
189
189
190 def __init__(self, baseui, path=None, create=False):
190 def __init__(self, baseui, path=None, create=False):
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wopener = self.wvfs
192 self.wopener = self.wvfs
193 self.root = self.wvfs.base
193 self.root = self.wvfs.base
194 self.path = self.wvfs.join(".hg")
194 self.path = self.wvfs.join(".hg")
195 self.origroot = path
195 self.origroot = path
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.vfs = scmutil.vfs(self.path)
197 self.vfs = scmutil.vfs(self.path)
198 self.opener = self.vfs
198 self.opener = self.vfs
199 self.baseui = baseui
199 self.baseui = baseui
200 self.ui = baseui.copy()
200 self.ui = baseui.copy()
201 self.ui.copy = baseui.copy # prevent copying repo configuration
201 self.ui.copy = baseui.copy # prevent copying repo configuration
202 # A list of callback to shape the phase if no data were found.
202 # A list of callback to shape the phase if no data were found.
203 # Callback are in the form: func(repo, roots) --> processed root.
203 # Callback are in the form: func(repo, roots) --> processed root.
204 # This list it to be filled by extension during repo setup
204 # This list it to be filled by extension during repo setup
205 self._phasedefaults = []
205 self._phasedefaults = []
206 try:
206 try:
207 self.ui.readconfig(self.join("hgrc"), self.root)
207 self.ui.readconfig(self.join("hgrc"), self.root)
208 extensions.loadall(self.ui)
208 extensions.loadall(self.ui)
209 except IOError:
209 except IOError:
210 pass
210 pass
211
211
212 if self.featuresetupfuncs:
212 if self.featuresetupfuncs:
213 self.supported = set(self._basesupported) # use private copy
213 self.supported = set(self._basesupported) # use private copy
214 extmods = set(m.__name__ for n, m
214 extmods = set(m.__name__ for n, m
215 in extensions.extensions(self.ui))
215 in extensions.extensions(self.ui))
216 for setupfunc in self.featuresetupfuncs:
216 for setupfunc in self.featuresetupfuncs:
217 if setupfunc.__module__ in extmods:
217 if setupfunc.__module__ in extmods:
218 setupfunc(self.ui, self.supported)
218 setupfunc(self.ui, self.supported)
219 else:
219 else:
220 self.supported = self._basesupported
220 self.supported = self._basesupported
221
221
222 if not self.vfs.isdir():
222 if not self.vfs.isdir():
223 if create:
223 if create:
224 if not self.wvfs.exists():
224 if not self.wvfs.exists():
225 self.wvfs.makedirs()
225 self.wvfs.makedirs()
226 self.vfs.makedir(notindexed=True)
226 self.vfs.makedir(notindexed=True)
227 requirements = self._baserequirements(create)
227 requirements = self._baserequirements(create)
228 if self.ui.configbool('format', 'usestore', True):
228 if self.ui.configbool('format', 'usestore', True):
229 self.vfs.mkdir("store")
229 self.vfs.mkdir("store")
230 requirements.append("store")
230 requirements.append("store")
231 if self.ui.configbool('format', 'usefncache', True):
231 if self.ui.configbool('format', 'usefncache', True):
232 requirements.append("fncache")
232 requirements.append("fncache")
233 if self.ui.configbool('format', 'dotencode', True):
233 if self.ui.configbool('format', 'dotencode', True):
234 requirements.append('dotencode')
234 requirements.append('dotencode')
235 # create an invalid changelog
235 # create an invalid changelog
236 self.vfs.append(
236 self.vfs.append(
237 "00changelog.i",
237 "00changelog.i",
238 '\0\0\0\2' # represents revlogv2
238 '\0\0\0\2' # represents revlogv2
239 ' dummy changelog to prevent using the old repo layout'
239 ' dummy changelog to prevent using the old repo layout'
240 )
240 )
241 if self.ui.configbool('format', 'generaldelta', False):
241 if self.ui.configbool('format', 'generaldelta', False):
242 requirements.append("generaldelta")
242 requirements.append("generaldelta")
243 requirements = set(requirements)
243 requirements = set(requirements)
244 else:
244 else:
245 raise error.RepoError(_("repository %s not found") % path)
245 raise error.RepoError(_("repository %s not found") % path)
246 elif create:
246 elif create:
247 raise error.RepoError(_("repository %s already exists") % path)
247 raise error.RepoError(_("repository %s already exists") % path)
248 else:
248 else:
249 try:
249 try:
250 requirements = scmutil.readrequires(self.vfs, self.supported)
250 requirements = scmutil.readrequires(self.vfs, self.supported)
251 except IOError, inst:
251 except IOError, inst:
252 if inst.errno != errno.ENOENT:
252 if inst.errno != errno.ENOENT:
253 raise
253 raise
254 requirements = set()
254 requirements = set()
255
255
256 self.sharedpath = self.path
256 self.sharedpath = self.path
257 try:
257 try:
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 realpath=True)
259 realpath=True)
260 s = vfs.base
260 s = vfs.base
261 if not vfs.exists():
261 if not vfs.exists():
262 raise error.RepoError(
262 raise error.RepoError(
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 self.sharedpath = s
264 self.sharedpath = s
265 except IOError, inst:
265 except IOError, inst:
266 if inst.errno != errno.ENOENT:
266 if inst.errno != errno.ENOENT:
267 raise
267 raise
268
268
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 self.spath = self.store.path
270 self.spath = self.store.path
271 self.svfs = self.store.vfs
271 self.svfs = self.store.vfs
272 self.sopener = self.svfs
272 self.sopener = self.svfs
273 self.sjoin = self.store.join
273 self.sjoin = self.store.join
274 self.vfs.createmode = self.store.createmode
274 self.vfs.createmode = self.store.createmode
275 self._applyrequirements(requirements)
275 self._applyrequirements(requirements)
276 if create:
276 if create:
277 self._writerequirements()
277 self._writerequirements()
278
278
279
279
280 self._branchcaches = {}
280 self._branchcaches = {}
281 self.filterpats = {}
281 self.filterpats = {}
282 self._datafilters = {}
282 self._datafilters = {}
283 self._transref = self._lockref = self._wlockref = None
283 self._transref = self._lockref = self._wlockref = None
284
284
285 # A cache for various files under .hg/ that tracks file changes,
285 # A cache for various files under .hg/ that tracks file changes,
286 # (used by the filecache decorator)
286 # (used by the filecache decorator)
287 #
287 #
288 # Maps a property name to its util.filecacheentry
288 # Maps a property name to its util.filecacheentry
289 self._filecache = {}
289 self._filecache = {}
290
290
291 # hold sets of revision to be filtered
291 # hold sets of revision to be filtered
292 # should be cleared when something might have changed the filter value:
292 # should be cleared when something might have changed the filter value:
293 # - new changesets,
293 # - new changesets,
294 # - phase change,
294 # - phase change,
295 # - new obsolescence marker,
295 # - new obsolescence marker,
296 # - working directory parent change,
296 # - working directory parent change,
297 # - bookmark changes
297 # - bookmark changes
298 self.filteredrevcache = {}
298 self.filteredrevcache = {}
299
299
300 def close(self):
300 def close(self):
301 pass
301 pass
302
302
303 def _restrictcapabilities(self, caps):
303 def _restrictcapabilities(self, caps):
304 # bundle2 is not ready for prime time, drop it unless explicitly
304 # bundle2 is not ready for prime time, drop it unless explicitly
305 # required by the tests (or some brave tester)
305 # required by the tests (or some brave tester)
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
307 caps = set(caps)
307 caps = set(caps)
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
310 return caps
310 return caps
311
311
312 def _applyrequirements(self, requirements):
312 def _applyrequirements(self, requirements):
313 self.requirements = requirements
313 self.requirements = requirements
314 self.sopener.options = dict((r, 1) for r in requirements
314 self.sopener.options = dict((r, 1) for r in requirements
315 if r in self.openerreqs)
315 if r in self.openerreqs)
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 if chunkcachesize is not None:
317 if chunkcachesize is not None:
318 self.sopener.options['chunkcachesize'] = chunkcachesize
318 self.sopener.options['chunkcachesize'] = chunkcachesize
319 maxchainlen = self.ui.configint('format', 'maxchainlen')
319 maxchainlen = self.ui.configint('format', 'maxchainlen')
320 if maxchainlen is not None:
320 if maxchainlen is not None:
321 self.sopener.options['maxchainlen'] = maxchainlen
321 self.sopener.options['maxchainlen'] = maxchainlen
322
322
323 def _writerequirements(self):
323 def _writerequirements(self):
324 reqfile = self.opener("requires", "w")
324 reqfile = self.opener("requires", "w")
325 for r in sorted(self.requirements):
325 for r in sorted(self.requirements):
326 reqfile.write("%s\n" % r)
326 reqfile.write("%s\n" % r)
327 reqfile.close()
327 reqfile.close()
328
328
329 def _checknested(self, path):
329 def _checknested(self, path):
330 """Determine if path is a legal nested repository."""
330 """Determine if path is a legal nested repository."""
331 if not path.startswith(self.root):
331 if not path.startswith(self.root):
332 return False
332 return False
333 subpath = path[len(self.root) + 1:]
333 subpath = path[len(self.root) + 1:]
334 normsubpath = util.pconvert(subpath)
334 normsubpath = util.pconvert(subpath)
335
335
336 # XXX: Checking against the current working copy is wrong in
336 # XXX: Checking against the current working copy is wrong in
337 # the sense that it can reject things like
337 # the sense that it can reject things like
338 #
338 #
339 # $ hg cat -r 10 sub/x.txt
339 # $ hg cat -r 10 sub/x.txt
340 #
340 #
341 # if sub/ is no longer a subrepository in the working copy
341 # if sub/ is no longer a subrepository in the working copy
342 # parent revision.
342 # parent revision.
343 #
343 #
344 # However, it can of course also allow things that would have
344 # However, it can of course also allow things that would have
345 # been rejected before, such as the above cat command if sub/
345 # been rejected before, such as the above cat command if sub/
346 # is a subrepository now, but was a normal directory before.
346 # is a subrepository now, but was a normal directory before.
347 # The old path auditor would have rejected by mistake since it
347 # The old path auditor would have rejected by mistake since it
348 # panics when it sees sub/.hg/.
348 # panics when it sees sub/.hg/.
349 #
349 #
350 # All in all, checking against the working copy seems sensible
350 # All in all, checking against the working copy seems sensible
351 # since we want to prevent access to nested repositories on
351 # since we want to prevent access to nested repositories on
352 # the filesystem *now*.
352 # the filesystem *now*.
353 ctx = self[None]
353 ctx = self[None]
354 parts = util.splitpath(subpath)
354 parts = util.splitpath(subpath)
355 while parts:
355 while parts:
356 prefix = '/'.join(parts)
356 prefix = '/'.join(parts)
357 if prefix in ctx.substate:
357 if prefix in ctx.substate:
358 if prefix == normsubpath:
358 if prefix == normsubpath:
359 return True
359 return True
360 else:
360 else:
361 sub = ctx.sub(prefix)
361 sub = ctx.sub(prefix)
362 return sub.checknested(subpath[len(prefix) + 1:])
362 return sub.checknested(subpath[len(prefix) + 1:])
363 else:
363 else:
364 parts.pop()
364 parts.pop()
365 return False
365 return False
366
366
367 def peer(self):
367 def peer(self):
368 return localpeer(self) # not cached to avoid reference cycle
368 return localpeer(self) # not cached to avoid reference cycle
369
369
370 def unfiltered(self):
370 def unfiltered(self):
371 """Return unfiltered version of the repository
371 """Return unfiltered version of the repository
372
372
373 Intended to be overwritten by filtered repo."""
373 Intended to be overwritten by filtered repo."""
374 return self
374 return self
375
375
376 def filtered(self, name):
376 def filtered(self, name):
377 """Return a filtered version of a repository"""
377 """Return a filtered version of a repository"""
378 # build a new class with the mixin and the current class
378 # build a new class with the mixin and the current class
379 # (possibly subclass of the repo)
379 # (possibly subclass of the repo)
380 class proxycls(repoview.repoview, self.unfiltered().__class__):
380 class proxycls(repoview.repoview, self.unfiltered().__class__):
381 pass
381 pass
382 return proxycls(self, name)
382 return proxycls(self, name)
383
383
384 @repofilecache('bookmarks')
384 @repofilecache('bookmarks')
385 def _bookmarks(self):
385 def _bookmarks(self):
386 return bookmarks.bmstore(self)
386 return bookmarks.bmstore(self)
387
387
388 @repofilecache('bookmarks.current')
388 @repofilecache('bookmarks.current')
389 def _bookmarkcurrent(self):
389 def _bookmarkcurrent(self):
390 return bookmarks.readcurrent(self)
390 return bookmarks.readcurrent(self)
391
391
392 def bookmarkheads(self, bookmark):
392 def bookmarkheads(self, bookmark):
393 name = bookmark.split('@', 1)[0]
393 name = bookmark.split('@', 1)[0]
394 heads = []
394 heads = []
395 for mark, n in self._bookmarks.iteritems():
395 for mark, n in self._bookmarks.iteritems():
396 if mark.split('@', 1)[0] == name:
396 if mark.split('@', 1)[0] == name:
397 heads.append(n)
397 heads.append(n)
398 return heads
398 return heads
399
399
400 @storecache('phaseroots')
400 @storecache('phaseroots')
401 def _phasecache(self):
401 def _phasecache(self):
402 return phases.phasecache(self, self._phasedefaults)
402 return phases.phasecache(self, self._phasedefaults)
403
403
404 @storecache('obsstore')
404 @storecache('obsstore')
405 def obsstore(self):
405 def obsstore(self):
406 # read default format for new obsstore.
406 # read default format for new obsstore.
407 defaultformat = self.ui.configint('format', 'obsstore-version', None)
407 defaultformat = self.ui.configint('format', 'obsstore-version', None)
408 # rely on obsstore class default when possible.
408 # rely on obsstore class default when possible.
409 kwargs = {}
409 kwargs = {}
410 if defaultformat is not None:
410 if defaultformat is not None:
411 kwargs['defaultformat'] = defaultformat
411 kwargs['defaultformat'] = defaultformat
412 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
412 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
413 store = obsolete.obsstore(self.sopener, readonly=readonly,
413 store = obsolete.obsstore(self.sopener, readonly=readonly,
414 **kwargs)
414 **kwargs)
415 if store and readonly:
415 if store and readonly:
416 # message is rare enough to not be translated
416 # message is rare enough to not be translated
417 msg = 'obsolete feature not enabled but %i markers found!\n'
417 msg = 'obsolete feature not enabled but %i markers found!\n'
418 self.ui.warn(msg % len(list(store)))
418 self.ui.warn(msg % len(list(store)))
419 return store
419 return store
420
420
421 @storecache('00changelog.i')
421 @storecache('00changelog.i')
422 def changelog(self):
422 def changelog(self):
423 c = changelog.changelog(self.sopener)
423 c = changelog.changelog(self.sopener)
424 if 'HG_PENDING' in os.environ:
424 if 'HG_PENDING' in os.environ:
425 p = os.environ['HG_PENDING']
425 p = os.environ['HG_PENDING']
426 if p.startswith(self.root):
426 if p.startswith(self.root):
427 c.readpending('00changelog.i.a')
427 c.readpending('00changelog.i.a')
428 return c
428 return c
429
429
430 @storecache('00manifest.i')
430 @storecache('00manifest.i')
431 def manifest(self):
431 def manifest(self):
432 return manifest.manifest(self.sopener)
432 return manifest.manifest(self.sopener)
433
433
434 @repofilecache('dirstate')
434 @repofilecache('dirstate')
435 def dirstate(self):
435 def dirstate(self):
436 warned = [0]
436 warned = [0]
437 def validate(node):
437 def validate(node):
438 try:
438 try:
439 self.changelog.rev(node)
439 self.changelog.rev(node)
440 return node
440 return node
441 except error.LookupError:
441 except error.LookupError:
442 if not warned[0]:
442 if not warned[0]:
443 warned[0] = True
443 warned[0] = True
444 self.ui.warn(_("warning: ignoring unknown"
444 self.ui.warn(_("warning: ignoring unknown"
445 " working parent %s!\n") % short(node))
445 " working parent %s!\n") % short(node))
446 return nullid
446 return nullid
447
447
448 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
448 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
449
449
450 def __getitem__(self, changeid):
450 def __getitem__(self, changeid):
451 if changeid is None:
451 if changeid is None:
452 return context.workingctx(self)
452 return context.workingctx(self)
453 return context.changectx(self, changeid)
453 return context.changectx(self, changeid)
454
454
455 def __contains__(self, changeid):
455 def __contains__(self, changeid):
456 try:
456 try:
457 return bool(self.lookup(changeid))
457 return bool(self.lookup(changeid))
458 except error.RepoLookupError:
458 except error.RepoLookupError:
459 return False
459 return False
460
460
461 def __nonzero__(self):
461 def __nonzero__(self):
462 return True
462 return True
463
463
464 def __len__(self):
464 def __len__(self):
465 return len(self.changelog)
465 return len(self.changelog)
466
466
467 def __iter__(self):
467 def __iter__(self):
468 return iter(self.changelog)
468 return iter(self.changelog)
469
469
470 def revs(self, expr, *args):
470 def revs(self, expr, *args):
471 '''Return a list of revisions matching the given revset'''
471 '''Return a list of revisions matching the given revset'''
472 expr = revset.formatspec(expr, *args)
472 expr = revset.formatspec(expr, *args)
473 m = revset.match(None, expr)
473 m = revset.match(None, expr)
474 return m(self, revset.spanset(self))
474 return m(self, revset.spanset(self))
475
475
476 def set(self, expr, *args):
476 def set(self, expr, *args):
477 '''
477 '''
478 Yield a context for each matching revision, after doing arg
478 Yield a context for each matching revision, after doing arg
479 replacement via revset.formatspec
479 replacement via revset.formatspec
480 '''
480 '''
481 for r in self.revs(expr, *args):
481 for r in self.revs(expr, *args):
482 yield self[r]
482 yield self[r]
483
483
484 def url(self):
484 def url(self):
485 return 'file:' + self.root
485 return 'file:' + self.root
486
486
487 def hook(self, name, throw=False, **args):
487 def hook(self, name, throw=False, **args):
488 """Call a hook, passing this repo instance.
488 """Call a hook, passing this repo instance.
489
489
490 This a convenience method to aid invoking hooks. Extensions likely
490 This a convenience method to aid invoking hooks. Extensions likely
491 won't call this unless they have registered a custom hook or are
491 won't call this unless they have registered a custom hook or are
492 replacing code that is expected to call a hook.
492 replacing code that is expected to call a hook.
493 """
493 """
494 return hook.hook(self.ui, self, name, throw, **args)
494 return hook.hook(self.ui, self, name, throw, **args)
495
495
496 @unfilteredmethod
496 @unfilteredmethod
497 def _tag(self, names, node, message, local, user, date, extra={},
497 def _tag(self, names, node, message, local, user, date, extra={},
498 editor=False):
498 editor=False):
499 if isinstance(names, str):
499 if isinstance(names, str):
500 names = (names,)
500 names = (names,)
501
501
502 branches = self.branchmap()
502 branches = self.branchmap()
503 for name in names:
503 for name in names:
504 self.hook('pretag', throw=True, node=hex(node), tag=name,
504 self.hook('pretag', throw=True, node=hex(node), tag=name,
505 local=local)
505 local=local)
506 if name in branches:
506 if name in branches:
507 self.ui.warn(_("warning: tag %s conflicts with existing"
507 self.ui.warn(_("warning: tag %s conflicts with existing"
508 " branch name\n") % name)
508 " branch name\n") % name)
509
509
510 def writetags(fp, names, munge, prevtags):
510 def writetags(fp, names, munge, prevtags):
511 fp.seek(0, 2)
511 fp.seek(0, 2)
512 if prevtags and prevtags[-1] != '\n':
512 if prevtags and prevtags[-1] != '\n':
513 fp.write('\n')
513 fp.write('\n')
514 for name in names:
514 for name in names:
515 m = munge and munge(name) or name
515 m = munge and munge(name) or name
516 if (self._tagscache.tagtypes and
516 if (self._tagscache.tagtypes and
517 name in self._tagscache.tagtypes):
517 name in self._tagscache.tagtypes):
518 old = self.tags().get(name, nullid)
518 old = self.tags().get(name, nullid)
519 fp.write('%s %s\n' % (hex(old), m))
519 fp.write('%s %s\n' % (hex(old), m))
520 fp.write('%s %s\n' % (hex(node), m))
520 fp.write('%s %s\n' % (hex(node), m))
521 fp.close()
521 fp.close()
522
522
523 prevtags = ''
523 prevtags = ''
524 if local:
524 if local:
525 try:
525 try:
526 fp = self.opener('localtags', 'r+')
526 fp = self.opener('localtags', 'r+')
527 except IOError:
527 except IOError:
528 fp = self.opener('localtags', 'a')
528 fp = self.opener('localtags', 'a')
529 else:
529 else:
530 prevtags = fp.read()
530 prevtags = fp.read()
531
531
532 # local tags are stored in the current charset
532 # local tags are stored in the current charset
533 writetags(fp, names, None, prevtags)
533 writetags(fp, names, None, prevtags)
534 for name in names:
534 for name in names:
535 self.hook('tag', node=hex(node), tag=name, local=local)
535 self.hook('tag', node=hex(node), tag=name, local=local)
536 return
536 return
537
537
538 try:
538 try:
539 fp = self.wfile('.hgtags', 'rb+')
539 fp = self.wfile('.hgtags', 'rb+')
540 except IOError, e:
540 except IOError, e:
541 if e.errno != errno.ENOENT:
541 if e.errno != errno.ENOENT:
542 raise
542 raise
543 fp = self.wfile('.hgtags', 'ab')
543 fp = self.wfile('.hgtags', 'ab')
544 else:
544 else:
545 prevtags = fp.read()
545 prevtags = fp.read()
546
546
547 # committed tags are stored in UTF-8
547 # committed tags are stored in UTF-8
548 writetags(fp, names, encoding.fromlocal, prevtags)
548 writetags(fp, names, encoding.fromlocal, prevtags)
549
549
550 fp.close()
550 fp.close()
551
551
552 self.invalidatecaches()
552 self.invalidatecaches()
553
553
554 if '.hgtags' not in self.dirstate:
554 if '.hgtags' not in self.dirstate:
555 self[None].add(['.hgtags'])
555 self[None].add(['.hgtags'])
556
556
557 m = matchmod.exact(self.root, '', ['.hgtags'])
557 m = matchmod.exact(self.root, '', ['.hgtags'])
558 tagnode = self.commit(message, user, date, extra=extra, match=m,
558 tagnode = self.commit(message, user, date, extra=extra, match=m,
559 editor=editor)
559 editor=editor)
560
560
561 for name in names:
561 for name in names:
562 self.hook('tag', node=hex(node), tag=name, local=local)
562 self.hook('tag', node=hex(node), tag=name, local=local)
563
563
564 return tagnode
564 return tagnode
565
565
566 def tag(self, names, node, message, local, user, date, editor=False):
566 def tag(self, names, node, message, local, user, date, editor=False):
567 '''tag a revision with one or more symbolic names.
567 '''tag a revision with one or more symbolic names.
568
568
569 names is a list of strings or, when adding a single tag, names may be a
569 names is a list of strings or, when adding a single tag, names may be a
570 string.
570 string.
571
571
572 if local is True, the tags are stored in a per-repository file.
572 if local is True, the tags are stored in a per-repository file.
573 otherwise, they are stored in the .hgtags file, and a new
573 otherwise, they are stored in the .hgtags file, and a new
574 changeset is committed with the change.
574 changeset is committed with the change.
575
575
576 keyword arguments:
576 keyword arguments:
577
577
578 local: whether to store tags in non-version-controlled file
578 local: whether to store tags in non-version-controlled file
579 (default False)
579 (default False)
580
580
581 message: commit message to use if committing
581 message: commit message to use if committing
582
582
583 user: name of user to use if committing
583 user: name of user to use if committing
584
584
585 date: date tuple to use if committing'''
585 date: date tuple to use if committing'''
586
586
587 if not local:
587 if not local:
588 m = matchmod.exact(self.root, '', ['.hgtags'])
588 m = matchmod.exact(self.root, '', ['.hgtags'])
589 if util.any(self.status(match=m, unknown=True, ignored=True)):
589 if util.any(self.status(match=m, unknown=True, ignored=True)):
590 raise util.Abort(_('working copy of .hgtags is changed'),
590 raise util.Abort(_('working copy of .hgtags is changed'),
591 hint=_('please commit .hgtags manually'))
591 hint=_('please commit .hgtags manually'))
592
592
593 self.tags() # instantiate the cache
593 self.tags() # instantiate the cache
594 self._tag(names, node, message, local, user, date, editor=editor)
594 self._tag(names, node, message, local, user, date, editor=editor)
595
595
596 @filteredpropertycache
596 @filteredpropertycache
597 def _tagscache(self):
597 def _tagscache(self):
598 '''Returns a tagscache object that contains various tags related
598 '''Returns a tagscache object that contains various tags related
599 caches.'''
599 caches.'''
600
600
601 # This simplifies its cache management by having one decorated
601 # This simplifies its cache management by having one decorated
602 # function (this one) and the rest simply fetch things from it.
602 # function (this one) and the rest simply fetch things from it.
603 class tagscache(object):
603 class tagscache(object):
604 def __init__(self):
604 def __init__(self):
605 # These two define the set of tags for this repository. tags
605 # These two define the set of tags for this repository. tags
606 # maps tag name to node; tagtypes maps tag name to 'global' or
606 # maps tag name to node; tagtypes maps tag name to 'global' or
607 # 'local'. (Global tags are defined by .hgtags across all
607 # 'local'. (Global tags are defined by .hgtags across all
608 # heads, and local tags are defined in .hg/localtags.)
608 # heads, and local tags are defined in .hg/localtags.)
609 # They constitute the in-memory cache of tags.
609 # They constitute the in-memory cache of tags.
610 self.tags = self.tagtypes = None
610 self.tags = self.tagtypes = None
611
611
612 self.nodetagscache = self.tagslist = None
612 self.nodetagscache = self.tagslist = None
613
613
614 cache = tagscache()
614 cache = tagscache()
615 cache.tags, cache.tagtypes = self._findtags()
615 cache.tags, cache.tagtypes = self._findtags()
616
616
617 return cache
617 return cache
618
618
619 def tags(self):
619 def tags(self):
620 '''return a mapping of tag to node'''
620 '''return a mapping of tag to node'''
621 t = {}
621 t = {}
622 if self.changelog.filteredrevs:
622 if self.changelog.filteredrevs:
623 tags, tt = self._findtags()
623 tags, tt = self._findtags()
624 else:
624 else:
625 tags = self._tagscache.tags
625 tags = self._tagscache.tags
626 for k, v in tags.iteritems():
626 for k, v in tags.iteritems():
627 try:
627 try:
628 # ignore tags to unknown nodes
628 # ignore tags to unknown nodes
629 self.changelog.rev(v)
629 self.changelog.rev(v)
630 t[k] = v
630 t[k] = v
631 except (error.LookupError, ValueError):
631 except (error.LookupError, ValueError):
632 pass
632 pass
633 return t
633 return t
634
634
635 def _findtags(self):
635 def _findtags(self):
636 '''Do the hard work of finding tags. Return a pair of dicts
636 '''Do the hard work of finding tags. Return a pair of dicts
637 (tags, tagtypes) where tags maps tag name to node, and tagtypes
637 (tags, tagtypes) where tags maps tag name to node, and tagtypes
638 maps tag name to a string like \'global\' or \'local\'.
638 maps tag name to a string like \'global\' or \'local\'.
639 Subclasses or extensions are free to add their own tags, but
639 Subclasses or extensions are free to add their own tags, but
640 should be aware that the returned dicts will be retained for the
640 should be aware that the returned dicts will be retained for the
641 duration of the localrepo object.'''
641 duration of the localrepo object.'''
642
642
643 # XXX what tagtype should subclasses/extensions use? Currently
643 # XXX what tagtype should subclasses/extensions use? Currently
644 # mq and bookmarks add tags, but do not set the tagtype at all.
644 # mq and bookmarks add tags, but do not set the tagtype at all.
645 # Should each extension invent its own tag type? Should there
645 # Should each extension invent its own tag type? Should there
646 # be one tagtype for all such "virtual" tags? Or is the status
646 # be one tagtype for all such "virtual" tags? Or is the status
647 # quo fine?
647 # quo fine?
648
648
649 alltags = {} # map tag name to (node, hist)
649 alltags = {} # map tag name to (node, hist)
650 tagtypes = {}
650 tagtypes = {}
651
651
652 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
652 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
653 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
653 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
654
654
655 # Build the return dicts. Have to re-encode tag names because
655 # Build the return dicts. Have to re-encode tag names because
656 # the tags module always uses UTF-8 (in order not to lose info
656 # the tags module always uses UTF-8 (in order not to lose info
657 # writing to the cache), but the rest of Mercurial wants them in
657 # writing to the cache), but the rest of Mercurial wants them in
658 # local encoding.
658 # local encoding.
659 tags = {}
659 tags = {}
660 for (name, (node, hist)) in alltags.iteritems():
660 for (name, (node, hist)) in alltags.iteritems():
661 if node != nullid:
661 if node != nullid:
662 tags[encoding.tolocal(name)] = node
662 tags[encoding.tolocal(name)] = node
663 tags['tip'] = self.changelog.tip()
663 tags['tip'] = self.changelog.tip()
664 tagtypes = dict([(encoding.tolocal(name), value)
664 tagtypes = dict([(encoding.tolocal(name), value)
665 for (name, value) in tagtypes.iteritems()])
665 for (name, value) in tagtypes.iteritems()])
666 return (tags, tagtypes)
666 return (tags, tagtypes)
667
667
668 def tagtype(self, tagname):
668 def tagtype(self, tagname):
669 '''
669 '''
670 return the type of the given tag. result can be:
670 return the type of the given tag. result can be:
671
671
672 'local' : a local tag
672 'local' : a local tag
673 'global' : a global tag
673 'global' : a global tag
674 None : tag does not exist
674 None : tag does not exist
675 '''
675 '''
676
676
677 return self._tagscache.tagtypes.get(tagname)
677 return self._tagscache.tagtypes.get(tagname)
678
678
679 def tagslist(self):
679 def tagslist(self):
680 '''return a list of tags ordered by revision'''
680 '''return a list of tags ordered by revision'''
681 if not self._tagscache.tagslist:
681 if not self._tagscache.tagslist:
682 l = []
682 l = []
683 for t, n in self.tags().iteritems():
683 for t, n in self.tags().iteritems():
684 l.append((self.changelog.rev(n), t, n))
684 l.append((self.changelog.rev(n), t, n))
685 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
685 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
686
686
687 return self._tagscache.tagslist
687 return self._tagscache.tagslist
688
688
689 def nodetags(self, node):
689 def nodetags(self, node):
690 '''return the tags associated with a node'''
690 '''return the tags associated with a node'''
691 if not self._tagscache.nodetagscache:
691 if not self._tagscache.nodetagscache:
692 nodetagscache = {}
692 nodetagscache = {}
693 for t, n in self._tagscache.tags.iteritems():
693 for t, n in self._tagscache.tags.iteritems():
694 nodetagscache.setdefault(n, []).append(t)
694 nodetagscache.setdefault(n, []).append(t)
695 for tags in nodetagscache.itervalues():
695 for tags in nodetagscache.itervalues():
696 tags.sort()
696 tags.sort()
697 self._tagscache.nodetagscache = nodetagscache
697 self._tagscache.nodetagscache = nodetagscache
698 return self._tagscache.nodetagscache.get(node, [])
698 return self._tagscache.nodetagscache.get(node, [])
699
699
700 def nodebookmarks(self, node):
700 def nodebookmarks(self, node):
701 marks = []
701 marks = []
702 for bookmark, n in self._bookmarks.iteritems():
702 for bookmark, n in self._bookmarks.iteritems():
703 if n == node:
703 if n == node:
704 marks.append(bookmark)
704 marks.append(bookmark)
705 return sorted(marks)
705 return sorted(marks)
706
706
707 def branchmap(self):
707 def branchmap(self):
708 '''returns a dictionary {branch: [branchheads]} with branchheads
708 '''returns a dictionary {branch: [branchheads]} with branchheads
709 ordered by increasing revision number'''
709 ordered by increasing revision number'''
710 branchmap.updatecache(self)
710 branchmap.updatecache(self)
711 return self._branchcaches[self.filtername]
711 return self._branchcaches[self.filtername]
712
712
713 def branchtip(self, branch):
713 def branchtip(self, branch):
714 '''return the tip node for a given branch'''
714 '''return the tip node for a given branch'''
715 try:
715 try:
716 return self.branchmap().branchtip(branch)
716 return self.branchmap().branchtip(branch)
717 except KeyError:
717 except KeyError:
718 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
718 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
719
719
720 def lookup(self, key):
720 def lookup(self, key):
721 return self[key].node()
721 return self[key].node()
722
722
723 def lookupbranch(self, key, remote=None):
723 def lookupbranch(self, key, remote=None):
724 repo = remote or self
724 repo = remote or self
725 if key in repo.branchmap():
725 if key in repo.branchmap():
726 return key
726 return key
727
727
728 repo = (remote and remote.local()) and remote or self
728 repo = (remote and remote.local()) and remote or self
729 return repo[key].branch()
729 return repo[key].branch()
730
730
731 def known(self, nodes):
731 def known(self, nodes):
732 nm = self.changelog.nodemap
732 nm = self.changelog.nodemap
733 pc = self._phasecache
733 pc = self._phasecache
734 result = []
734 result = []
735 for n in nodes:
735 for n in nodes:
736 r = nm.get(n)
736 r = nm.get(n)
737 resp = not (r is None or pc.phase(self, r) >= phases.secret)
737 resp = not (r is None or pc.phase(self, r) >= phases.secret)
738 result.append(resp)
738 result.append(resp)
739 return result
739 return result
740
740
741 def local(self):
741 def local(self):
742 return self
742 return self
743
743
744 def cancopy(self):
744 def cancopy(self):
745 # so statichttprepo's override of local() works
745 # so statichttprepo's override of local() works
746 if not self.local():
746 if not self.local():
747 return False
747 return False
748 if not self.ui.configbool('phases', 'publish', True):
748 if not self.ui.configbool('phases', 'publish', True):
749 return True
749 return True
750 # if publishing we can't copy if there is filtered content
750 # if publishing we can't copy if there is filtered content
751 return not self.filtered('visible').changelog.filteredrevs
751 return not self.filtered('visible').changelog.filteredrevs
752
752
753 def join(self, f, *insidef):
753 def join(self, f, *insidef):
754 return os.path.join(self.path, f, *insidef)
754 return os.path.join(self.path, f, *insidef)
755
755
756 def wjoin(self, f, *insidef):
756 def wjoin(self, f, *insidef):
757 return os.path.join(self.root, f, *insidef)
757 return os.path.join(self.root, f, *insidef)
758
758
759 def file(self, f):
759 def file(self, f):
760 if f[0] == '/':
760 if f[0] == '/':
761 f = f[1:]
761 f = f[1:]
762 return filelog.filelog(self.sopener, f)
762 return filelog.filelog(self.sopener, f)
763
763
764 def changectx(self, changeid):
764 def changectx(self, changeid):
765 return self[changeid]
765 return self[changeid]
766
766
767 def parents(self, changeid=None):
767 def parents(self, changeid=None):
768 '''get list of changectxs for parents of changeid'''
768 '''get list of changectxs for parents of changeid'''
769 return self[changeid].parents()
769 return self[changeid].parents()
770
770
771 def setparents(self, p1, p2=nullid):
771 def setparents(self, p1, p2=nullid):
772 self.dirstate.beginparentchange()
772 self.dirstate.beginparentchange()
773 copies = self.dirstate.setparents(p1, p2)
773 copies = self.dirstate.setparents(p1, p2)
774 pctx = self[p1]
774 pctx = self[p1]
775 if copies:
775 if copies:
776 # Adjust copy records, the dirstate cannot do it, it
776 # Adjust copy records, the dirstate cannot do it, it
777 # requires access to parents manifests. Preserve them
777 # requires access to parents manifests. Preserve them
778 # only for entries added to first parent.
778 # only for entries added to first parent.
779 for f in copies:
779 for f in copies:
780 if f not in pctx and copies[f] in pctx:
780 if f not in pctx and copies[f] in pctx:
781 self.dirstate.copy(copies[f], f)
781 self.dirstate.copy(copies[f], f)
782 if p2 == nullid:
782 if p2 == nullid:
783 for f, s in sorted(self.dirstate.copies().items()):
783 for f, s in sorted(self.dirstate.copies().items()):
784 if f not in pctx and s not in pctx:
784 if f not in pctx and s not in pctx:
785 self.dirstate.copy(None, f)
785 self.dirstate.copy(None, f)
786 self.dirstate.endparentchange()
786 self.dirstate.endparentchange()
787
787
788 def filectx(self, path, changeid=None, fileid=None):
788 def filectx(self, path, changeid=None, fileid=None):
789 """changeid can be a changeset revision, node, or tag.
789 """changeid can be a changeset revision, node, or tag.
790 fileid can be a file revision or node."""
790 fileid can be a file revision or node."""
791 return context.filectx(self, path, changeid, fileid)
791 return context.filectx(self, path, changeid, fileid)
792
792
793 def getcwd(self):
793 def getcwd(self):
794 return self.dirstate.getcwd()
794 return self.dirstate.getcwd()
795
795
796 def pathto(self, f, cwd=None):
796 def pathto(self, f, cwd=None):
797 return self.dirstate.pathto(f, cwd)
797 return self.dirstate.pathto(f, cwd)
798
798
799 def wfile(self, f, mode='r'):
799 def wfile(self, f, mode='r'):
800 return self.wopener(f, mode)
800 return self.wopener(f, mode)
801
801
802 def _link(self, f):
802 def _link(self, f):
803 return self.wvfs.islink(f)
803 return self.wvfs.islink(f)
804
804
805 def _loadfilter(self, filter):
805 def _loadfilter(self, filter):
806 if filter not in self.filterpats:
806 if filter not in self.filterpats:
807 l = []
807 l = []
808 for pat, cmd in self.ui.configitems(filter):
808 for pat, cmd in self.ui.configitems(filter):
809 if cmd == '!':
809 if cmd == '!':
810 continue
810 continue
811 mf = matchmod.match(self.root, '', [pat])
811 mf = matchmod.match(self.root, '', [pat])
812 fn = None
812 fn = None
813 params = cmd
813 params = cmd
814 for name, filterfn in self._datafilters.iteritems():
814 for name, filterfn in self._datafilters.iteritems():
815 if cmd.startswith(name):
815 if cmd.startswith(name):
816 fn = filterfn
816 fn = filterfn
817 params = cmd[len(name):].lstrip()
817 params = cmd[len(name):].lstrip()
818 break
818 break
819 if not fn:
819 if not fn:
820 fn = lambda s, c, **kwargs: util.filter(s, c)
820 fn = lambda s, c, **kwargs: util.filter(s, c)
821 # Wrap old filters not supporting keyword arguments
821 # Wrap old filters not supporting keyword arguments
822 if not inspect.getargspec(fn)[2]:
822 if not inspect.getargspec(fn)[2]:
823 oldfn = fn
823 oldfn = fn
824 fn = lambda s, c, **kwargs: oldfn(s, c)
824 fn = lambda s, c, **kwargs: oldfn(s, c)
825 l.append((mf, fn, params))
825 l.append((mf, fn, params))
826 self.filterpats[filter] = l
826 self.filterpats[filter] = l
827 return self.filterpats[filter]
827 return self.filterpats[filter]
828
828
829 def _filter(self, filterpats, filename, data):
829 def _filter(self, filterpats, filename, data):
830 for mf, fn, cmd in filterpats:
830 for mf, fn, cmd in filterpats:
831 if mf(filename):
831 if mf(filename):
832 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
832 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
833 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
833 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
834 break
834 break
835
835
836 return data
836 return data
837
837
838 @unfilteredpropertycache
838 @unfilteredpropertycache
839 def _encodefilterpats(self):
839 def _encodefilterpats(self):
840 return self._loadfilter('encode')
840 return self._loadfilter('encode')
841
841
842 @unfilteredpropertycache
842 @unfilteredpropertycache
843 def _decodefilterpats(self):
843 def _decodefilterpats(self):
844 return self._loadfilter('decode')
844 return self._loadfilter('decode')
845
845
846 def adddatafilter(self, name, filter):
846 def adddatafilter(self, name, filter):
847 self._datafilters[name] = filter
847 self._datafilters[name] = filter
848
848
849 def wread(self, filename):
849 def wread(self, filename):
850 if self._link(filename):
850 if self._link(filename):
851 data = self.wvfs.readlink(filename)
851 data = self.wvfs.readlink(filename)
852 else:
852 else:
853 data = self.wopener.read(filename)
853 data = self.wopener.read(filename)
854 return self._filter(self._encodefilterpats, filename, data)
854 return self._filter(self._encodefilterpats, filename, data)
855
855
856 def wwrite(self, filename, data, flags):
856 def wwrite(self, filename, data, flags):
857 data = self._filter(self._decodefilterpats, filename, data)
857 data = self._filter(self._decodefilterpats, filename, data)
858 if 'l' in flags:
858 if 'l' in flags:
859 self.wopener.symlink(data, filename)
859 self.wopener.symlink(data, filename)
860 else:
860 else:
861 self.wopener.write(filename, data)
861 self.wopener.write(filename, data)
862 if 'x' in flags:
862 if 'x' in flags:
863 self.wvfs.setflags(filename, False, True)
863 self.wvfs.setflags(filename, False, True)
864
864
865 def wwritedata(self, filename, data):
865 def wwritedata(self, filename, data):
866 return self._filter(self._decodefilterpats, filename, data)
866 return self._filter(self._decodefilterpats, filename, data)
867
867
868 def transaction(self, desc, report=None):
868 def transaction(self, desc, report=None):
869 tr = self._transref and self._transref() or None
869 tr = self._transref and self._transref() or None
870 if tr and tr.running():
870 if tr and tr.running():
871 return tr.nest()
871 return tr.nest()
872
872
873 # abort here if the journal already exists
873 # abort here if the journal already exists
874 if self.svfs.exists("journal"):
874 if self.svfs.exists("journal"):
875 raise error.RepoError(
875 raise error.RepoError(
876 _("abandoned transaction found"),
876 _("abandoned transaction found"),
877 hint=_("run 'hg recover' to clean up transaction"))
877 hint=_("run 'hg recover' to clean up transaction"))
878
878
879 def onclose():
879 def onclose():
880 self.store.write(self._transref())
880 self.store.write(self._transref())
881
881
882 self._writejournal(desc)
882 self._writejournal(desc)
883 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
883 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
884 rp = report and report or self.ui.warn
884 rp = report and report or self.ui.warn
885 tr = transaction.transaction(rp, self.sopener,
885 vfsmap = {'plain': self.opener} # root of .hg/
886 tr = transaction.transaction(rp, self.sopener, vfsmap,
886 "journal",
887 "journal",
887 aftertrans(renames),
888 aftertrans(renames),
888 self.store.createmode,
889 self.store.createmode,
889 onclose)
890 onclose)
890 self._transref = weakref.ref(tr)
891 self._transref = weakref.ref(tr)
891 return tr
892 return tr
892
893
893 def _journalfiles(self):
894 def _journalfiles(self):
894 return ((self.svfs, 'journal'),
895 return ((self.svfs, 'journal'),
895 (self.vfs, 'journal.dirstate'),
896 (self.vfs, 'journal.dirstate'),
896 (self.vfs, 'journal.branch'),
897 (self.vfs, 'journal.branch'),
897 (self.vfs, 'journal.desc'),
898 (self.vfs, 'journal.desc'),
898 (self.vfs, 'journal.bookmarks'),
899 (self.vfs, 'journal.bookmarks'),
899 (self.svfs, 'journal.phaseroots'))
900 (self.svfs, 'journal.phaseroots'))
900
901
901 def undofiles(self):
902 def undofiles(self):
902 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
903 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
903
904
904 def _writejournal(self, desc):
905 def _writejournal(self, desc):
905 self.opener.write("journal.dirstate",
906 self.opener.write("journal.dirstate",
906 self.opener.tryread("dirstate"))
907 self.opener.tryread("dirstate"))
907 self.opener.write("journal.branch",
908 self.opener.write("journal.branch",
908 encoding.fromlocal(self.dirstate.branch()))
909 encoding.fromlocal(self.dirstate.branch()))
909 self.opener.write("journal.desc",
910 self.opener.write("journal.desc",
910 "%d\n%s\n" % (len(self), desc))
911 "%d\n%s\n" % (len(self), desc))
911 self.opener.write("journal.bookmarks",
912 self.opener.write("journal.bookmarks",
912 self.opener.tryread("bookmarks"))
913 self.opener.tryread("bookmarks"))
913 self.sopener.write("journal.phaseroots",
914 self.sopener.write("journal.phaseroots",
914 self.sopener.tryread("phaseroots"))
915 self.sopener.tryread("phaseroots"))
915
916
916 def recover(self):
917 def recover(self):
917 lock = self.lock()
918 lock = self.lock()
918 try:
919 try:
919 if self.svfs.exists("journal"):
920 if self.svfs.exists("journal"):
920 self.ui.status(_("rolling back interrupted transaction\n"))
921 self.ui.status(_("rolling back interrupted transaction\n"))
921 transaction.rollback(self.sopener, "journal",
922 transaction.rollback(self.sopener, "journal",
922 self.ui.warn)
923 self.ui.warn)
923 self.invalidate()
924 self.invalidate()
924 return True
925 return True
925 else:
926 else:
926 self.ui.warn(_("no interrupted transaction available\n"))
927 self.ui.warn(_("no interrupted transaction available\n"))
927 return False
928 return False
928 finally:
929 finally:
929 lock.release()
930 lock.release()
930
931
931 def rollback(self, dryrun=False, force=False):
932 def rollback(self, dryrun=False, force=False):
932 wlock = lock = None
933 wlock = lock = None
933 try:
934 try:
934 wlock = self.wlock()
935 wlock = self.wlock()
935 lock = self.lock()
936 lock = self.lock()
936 if self.svfs.exists("undo"):
937 if self.svfs.exists("undo"):
937 return self._rollback(dryrun, force)
938 return self._rollback(dryrun, force)
938 else:
939 else:
939 self.ui.warn(_("no rollback information available\n"))
940 self.ui.warn(_("no rollback information available\n"))
940 return 1
941 return 1
941 finally:
942 finally:
942 release(lock, wlock)
943 release(lock, wlock)
943
944
944 @unfilteredmethod # Until we get smarter cache management
945 @unfilteredmethod # Until we get smarter cache management
945 def _rollback(self, dryrun, force):
946 def _rollback(self, dryrun, force):
946 ui = self.ui
947 ui = self.ui
947 try:
948 try:
948 args = self.opener.read('undo.desc').splitlines()
949 args = self.opener.read('undo.desc').splitlines()
949 (oldlen, desc, detail) = (int(args[0]), args[1], None)
950 (oldlen, desc, detail) = (int(args[0]), args[1], None)
950 if len(args) >= 3:
951 if len(args) >= 3:
951 detail = args[2]
952 detail = args[2]
952 oldtip = oldlen - 1
953 oldtip = oldlen - 1
953
954
954 if detail and ui.verbose:
955 if detail and ui.verbose:
955 msg = (_('repository tip rolled back to revision %s'
956 msg = (_('repository tip rolled back to revision %s'
956 ' (undo %s: %s)\n')
957 ' (undo %s: %s)\n')
957 % (oldtip, desc, detail))
958 % (oldtip, desc, detail))
958 else:
959 else:
959 msg = (_('repository tip rolled back to revision %s'
960 msg = (_('repository tip rolled back to revision %s'
960 ' (undo %s)\n')
961 ' (undo %s)\n')
961 % (oldtip, desc))
962 % (oldtip, desc))
962 except IOError:
963 except IOError:
963 msg = _('rolling back unknown transaction\n')
964 msg = _('rolling back unknown transaction\n')
964 desc = None
965 desc = None
965
966
966 if not force and self['.'] != self['tip'] and desc == 'commit':
967 if not force and self['.'] != self['tip'] and desc == 'commit':
967 raise util.Abort(
968 raise util.Abort(
968 _('rollback of last commit while not checked out '
969 _('rollback of last commit while not checked out '
969 'may lose data'), hint=_('use -f to force'))
970 'may lose data'), hint=_('use -f to force'))
970
971
971 ui.status(msg)
972 ui.status(msg)
972 if dryrun:
973 if dryrun:
973 return 0
974 return 0
974
975
975 parents = self.dirstate.parents()
976 parents = self.dirstate.parents()
976 self.destroying()
977 self.destroying()
977 transaction.rollback(self.sopener, 'undo', ui.warn)
978 transaction.rollback(self.sopener, 'undo', ui.warn)
978 if self.vfs.exists('undo.bookmarks'):
979 if self.vfs.exists('undo.bookmarks'):
979 self.vfs.rename('undo.bookmarks', 'bookmarks')
980 self.vfs.rename('undo.bookmarks', 'bookmarks')
980 if self.svfs.exists('undo.phaseroots'):
981 if self.svfs.exists('undo.phaseroots'):
981 self.svfs.rename('undo.phaseroots', 'phaseroots')
982 self.svfs.rename('undo.phaseroots', 'phaseroots')
982 self.invalidate()
983 self.invalidate()
983
984
984 parentgone = (parents[0] not in self.changelog.nodemap or
985 parentgone = (parents[0] not in self.changelog.nodemap or
985 parents[1] not in self.changelog.nodemap)
986 parents[1] not in self.changelog.nodemap)
986 if parentgone:
987 if parentgone:
987 self.vfs.rename('undo.dirstate', 'dirstate')
988 self.vfs.rename('undo.dirstate', 'dirstate')
988 try:
989 try:
989 branch = self.opener.read('undo.branch')
990 branch = self.opener.read('undo.branch')
990 self.dirstate.setbranch(encoding.tolocal(branch))
991 self.dirstate.setbranch(encoding.tolocal(branch))
991 except IOError:
992 except IOError:
992 ui.warn(_('named branch could not be reset: '
993 ui.warn(_('named branch could not be reset: '
993 'current branch is still \'%s\'\n')
994 'current branch is still \'%s\'\n')
994 % self.dirstate.branch())
995 % self.dirstate.branch())
995
996
996 self.dirstate.invalidate()
997 self.dirstate.invalidate()
997 parents = tuple([p.rev() for p in self.parents()])
998 parents = tuple([p.rev() for p in self.parents()])
998 if len(parents) > 1:
999 if len(parents) > 1:
999 ui.status(_('working directory now based on '
1000 ui.status(_('working directory now based on '
1000 'revisions %d and %d\n') % parents)
1001 'revisions %d and %d\n') % parents)
1001 else:
1002 else:
1002 ui.status(_('working directory now based on '
1003 ui.status(_('working directory now based on '
1003 'revision %d\n') % parents)
1004 'revision %d\n') % parents)
1004 # TODO: if we know which new heads may result from this rollback, pass
1005 # TODO: if we know which new heads may result from this rollback, pass
1005 # them to destroy(), which will prevent the branchhead cache from being
1006 # them to destroy(), which will prevent the branchhead cache from being
1006 # invalidated.
1007 # invalidated.
1007 self.destroyed()
1008 self.destroyed()
1008 return 0
1009 return 0
1009
1010
1010 def invalidatecaches(self):
1011 def invalidatecaches(self):
1011
1012
1012 if '_tagscache' in vars(self):
1013 if '_tagscache' in vars(self):
1013 # can't use delattr on proxy
1014 # can't use delattr on proxy
1014 del self.__dict__['_tagscache']
1015 del self.__dict__['_tagscache']
1015
1016
1016 self.unfiltered()._branchcaches.clear()
1017 self.unfiltered()._branchcaches.clear()
1017 self.invalidatevolatilesets()
1018 self.invalidatevolatilesets()
1018
1019
1019 def invalidatevolatilesets(self):
1020 def invalidatevolatilesets(self):
1020 self.filteredrevcache.clear()
1021 self.filteredrevcache.clear()
1021 obsolete.clearobscaches(self)
1022 obsolete.clearobscaches(self)
1022
1023
1023 def invalidatedirstate(self):
1024 def invalidatedirstate(self):
1024 '''Invalidates the dirstate, causing the next call to dirstate
1025 '''Invalidates the dirstate, causing the next call to dirstate
1025 to check if it was modified since the last time it was read,
1026 to check if it was modified since the last time it was read,
1026 rereading it if it has.
1027 rereading it if it has.
1027
1028
1028 This is different to dirstate.invalidate() that it doesn't always
1029 This is different to dirstate.invalidate() that it doesn't always
1029 rereads the dirstate. Use dirstate.invalidate() if you want to
1030 rereads the dirstate. Use dirstate.invalidate() if you want to
1030 explicitly read the dirstate again (i.e. restoring it to a previous
1031 explicitly read the dirstate again (i.e. restoring it to a previous
1031 known good state).'''
1032 known good state).'''
1032 if hasunfilteredcache(self, 'dirstate'):
1033 if hasunfilteredcache(self, 'dirstate'):
1033 for k in self.dirstate._filecache:
1034 for k in self.dirstate._filecache:
1034 try:
1035 try:
1035 delattr(self.dirstate, k)
1036 delattr(self.dirstate, k)
1036 except AttributeError:
1037 except AttributeError:
1037 pass
1038 pass
1038 delattr(self.unfiltered(), 'dirstate')
1039 delattr(self.unfiltered(), 'dirstate')
1039
1040
1040 def invalidate(self):
1041 def invalidate(self):
1041 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1042 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1042 for k in self._filecache:
1043 for k in self._filecache:
1043 # dirstate is invalidated separately in invalidatedirstate()
1044 # dirstate is invalidated separately in invalidatedirstate()
1044 if k == 'dirstate':
1045 if k == 'dirstate':
1045 continue
1046 continue
1046
1047
1047 try:
1048 try:
1048 delattr(unfiltered, k)
1049 delattr(unfiltered, k)
1049 except AttributeError:
1050 except AttributeError:
1050 pass
1051 pass
1051 self.invalidatecaches()
1052 self.invalidatecaches()
1052 self.store.invalidatecaches()
1053 self.store.invalidatecaches()
1053
1054
1054 def invalidateall(self):
1055 def invalidateall(self):
1055 '''Fully invalidates both store and non-store parts, causing the
1056 '''Fully invalidates both store and non-store parts, causing the
1056 subsequent operation to reread any outside changes.'''
1057 subsequent operation to reread any outside changes.'''
1057 # extension should hook this to invalidate its caches
1058 # extension should hook this to invalidate its caches
1058 self.invalidate()
1059 self.invalidate()
1059 self.invalidatedirstate()
1060 self.invalidatedirstate()
1060
1061
1061 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1062 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1062 try:
1063 try:
1063 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1064 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1064 except error.LockHeld, inst:
1065 except error.LockHeld, inst:
1065 if not wait:
1066 if not wait:
1066 raise
1067 raise
1067 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1068 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1068 (desc, inst.locker))
1069 (desc, inst.locker))
1069 # default to 600 seconds timeout
1070 # default to 600 seconds timeout
1070 l = lockmod.lock(vfs, lockname,
1071 l = lockmod.lock(vfs, lockname,
1071 int(self.ui.config("ui", "timeout", "600")),
1072 int(self.ui.config("ui", "timeout", "600")),
1072 releasefn, desc=desc)
1073 releasefn, desc=desc)
1073 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1074 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1074 if acquirefn:
1075 if acquirefn:
1075 acquirefn()
1076 acquirefn()
1076 return l
1077 return l
1077
1078
1078 def _afterlock(self, callback):
1079 def _afterlock(self, callback):
1079 """add a callback to the current repository lock.
1080 """add a callback to the current repository lock.
1080
1081
1081 The callback will be executed on lock release."""
1082 The callback will be executed on lock release."""
1082 l = self._lockref and self._lockref()
1083 l = self._lockref and self._lockref()
1083 if l:
1084 if l:
1084 l.postrelease.append(callback)
1085 l.postrelease.append(callback)
1085 else:
1086 else:
1086 callback()
1087 callback()
1087
1088
1088 def lock(self, wait=True):
1089 def lock(self, wait=True):
1089 '''Lock the repository store (.hg/store) and return a weak reference
1090 '''Lock the repository store (.hg/store) and return a weak reference
1090 to the lock. Use this before modifying the store (e.g. committing or
1091 to the lock. Use this before modifying the store (e.g. committing or
1091 stripping). If you are opening a transaction, get a lock as well.)'''
1092 stripping). If you are opening a transaction, get a lock as well.)'''
1092 l = self._lockref and self._lockref()
1093 l = self._lockref and self._lockref()
1093 if l is not None and l.held:
1094 if l is not None and l.held:
1094 l.lock()
1095 l.lock()
1095 return l
1096 return l
1096
1097
1097 def unlock():
1098 def unlock():
1098 for k, ce in self._filecache.items():
1099 for k, ce in self._filecache.items():
1099 if k == 'dirstate' or k not in self.__dict__:
1100 if k == 'dirstate' or k not in self.__dict__:
1100 continue
1101 continue
1101 ce.refresh()
1102 ce.refresh()
1102
1103
1103 l = self._lock(self.svfs, "lock", wait, unlock,
1104 l = self._lock(self.svfs, "lock", wait, unlock,
1104 self.invalidate, _('repository %s') % self.origroot)
1105 self.invalidate, _('repository %s') % self.origroot)
1105 self._lockref = weakref.ref(l)
1106 self._lockref = weakref.ref(l)
1106 return l
1107 return l
1107
1108
1108 def wlock(self, wait=True):
1109 def wlock(self, wait=True):
1109 '''Lock the non-store parts of the repository (everything under
1110 '''Lock the non-store parts of the repository (everything under
1110 .hg except .hg/store) and return a weak reference to the lock.
1111 .hg except .hg/store) and return a weak reference to the lock.
1111 Use this before modifying files in .hg.'''
1112 Use this before modifying files in .hg.'''
1112 l = self._wlockref and self._wlockref()
1113 l = self._wlockref and self._wlockref()
1113 if l is not None and l.held:
1114 if l is not None and l.held:
1114 l.lock()
1115 l.lock()
1115 return l
1116 return l
1116
1117
1117 def unlock():
1118 def unlock():
1118 if self.dirstate.pendingparentchange():
1119 if self.dirstate.pendingparentchange():
1119 self.dirstate.invalidate()
1120 self.dirstate.invalidate()
1120 else:
1121 else:
1121 self.dirstate.write()
1122 self.dirstate.write()
1122
1123
1123 self._filecache['dirstate'].refresh()
1124 self._filecache['dirstate'].refresh()
1124
1125
1125 l = self._lock(self.vfs, "wlock", wait, unlock,
1126 l = self._lock(self.vfs, "wlock", wait, unlock,
1126 self.invalidatedirstate, _('working directory of %s') %
1127 self.invalidatedirstate, _('working directory of %s') %
1127 self.origroot)
1128 self.origroot)
1128 self._wlockref = weakref.ref(l)
1129 self._wlockref = weakref.ref(l)
1129 return l
1130 return l
1130
1131
1131 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1132 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1132 """
1133 """
1133 commit an individual file as part of a larger transaction
1134 commit an individual file as part of a larger transaction
1134 """
1135 """
1135
1136
1136 fname = fctx.path()
1137 fname = fctx.path()
1137 text = fctx.data()
1138 text = fctx.data()
1138 flog = self.file(fname)
1139 flog = self.file(fname)
1139 fparent1 = manifest1.get(fname, nullid)
1140 fparent1 = manifest1.get(fname, nullid)
1140 fparent2 = manifest2.get(fname, nullid)
1141 fparent2 = manifest2.get(fname, nullid)
1141
1142
1142 meta = {}
1143 meta = {}
1143 copy = fctx.renamed()
1144 copy = fctx.renamed()
1144 if copy and copy[0] != fname:
1145 if copy and copy[0] != fname:
1145 # Mark the new revision of this file as a copy of another
1146 # Mark the new revision of this file as a copy of another
1146 # file. This copy data will effectively act as a parent
1147 # file. This copy data will effectively act as a parent
1147 # of this new revision. If this is a merge, the first
1148 # of this new revision. If this is a merge, the first
1148 # parent will be the nullid (meaning "look up the copy data")
1149 # parent will be the nullid (meaning "look up the copy data")
1149 # and the second one will be the other parent. For example:
1150 # and the second one will be the other parent. For example:
1150 #
1151 #
1151 # 0 --- 1 --- 3 rev1 changes file foo
1152 # 0 --- 1 --- 3 rev1 changes file foo
1152 # \ / rev2 renames foo to bar and changes it
1153 # \ / rev2 renames foo to bar and changes it
1153 # \- 2 -/ rev3 should have bar with all changes and
1154 # \- 2 -/ rev3 should have bar with all changes and
1154 # should record that bar descends from
1155 # should record that bar descends from
1155 # bar in rev2 and foo in rev1
1156 # bar in rev2 and foo in rev1
1156 #
1157 #
1157 # this allows this merge to succeed:
1158 # this allows this merge to succeed:
1158 #
1159 #
1159 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1160 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1160 # \ / merging rev3 and rev4 should use bar@rev2
1161 # \ / merging rev3 and rev4 should use bar@rev2
1161 # \- 2 --- 4 as the merge base
1162 # \- 2 --- 4 as the merge base
1162 #
1163 #
1163
1164
1164 cfname = copy[0]
1165 cfname = copy[0]
1165 crev = manifest1.get(cfname)
1166 crev = manifest1.get(cfname)
1166 newfparent = fparent2
1167 newfparent = fparent2
1167
1168
1168 if manifest2: # branch merge
1169 if manifest2: # branch merge
1169 if fparent2 == nullid or crev is None: # copied on remote side
1170 if fparent2 == nullid or crev is None: # copied on remote side
1170 if cfname in manifest2:
1171 if cfname in manifest2:
1171 crev = manifest2[cfname]
1172 crev = manifest2[cfname]
1172 newfparent = fparent1
1173 newfparent = fparent1
1173
1174
1174 # find source in nearest ancestor if we've lost track
1175 # find source in nearest ancestor if we've lost track
1175 if not crev:
1176 if not crev:
1176 self.ui.debug(" %s: searching for copy revision for %s\n" %
1177 self.ui.debug(" %s: searching for copy revision for %s\n" %
1177 (fname, cfname))
1178 (fname, cfname))
1178 for ancestor in self[None].ancestors():
1179 for ancestor in self[None].ancestors():
1179 if cfname in ancestor:
1180 if cfname in ancestor:
1180 crev = ancestor[cfname].filenode()
1181 crev = ancestor[cfname].filenode()
1181 break
1182 break
1182
1183
1183 if crev:
1184 if crev:
1184 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1185 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1185 meta["copy"] = cfname
1186 meta["copy"] = cfname
1186 meta["copyrev"] = hex(crev)
1187 meta["copyrev"] = hex(crev)
1187 fparent1, fparent2 = nullid, newfparent
1188 fparent1, fparent2 = nullid, newfparent
1188 else:
1189 else:
1189 self.ui.warn(_("warning: can't find ancestor for '%s' "
1190 self.ui.warn(_("warning: can't find ancestor for '%s' "
1190 "copied from '%s'!\n") % (fname, cfname))
1191 "copied from '%s'!\n") % (fname, cfname))
1191
1192
1192 elif fparent1 == nullid:
1193 elif fparent1 == nullid:
1193 fparent1, fparent2 = fparent2, nullid
1194 fparent1, fparent2 = fparent2, nullid
1194 elif fparent2 != nullid:
1195 elif fparent2 != nullid:
1195 # is one parent an ancestor of the other?
1196 # is one parent an ancestor of the other?
1196 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1197 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1197 if fparent1 in fparentancestors:
1198 if fparent1 in fparentancestors:
1198 fparent1, fparent2 = fparent2, nullid
1199 fparent1, fparent2 = fparent2, nullid
1199 elif fparent2 in fparentancestors:
1200 elif fparent2 in fparentancestors:
1200 fparent2 = nullid
1201 fparent2 = nullid
1201
1202
1202 # is the file changed?
1203 # is the file changed?
1203 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1204 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1204 changelist.append(fname)
1205 changelist.append(fname)
1205 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1206 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1206 # are just the flags changed during merge?
1207 # are just the flags changed during merge?
1207 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1208 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1208 changelist.append(fname)
1209 changelist.append(fname)
1209
1210
1210 return fparent1
1211 return fparent1
1211
1212
1212 @unfilteredmethod
1213 @unfilteredmethod
1213 def commit(self, text="", user=None, date=None, match=None, force=False,
1214 def commit(self, text="", user=None, date=None, match=None, force=False,
1214 editor=False, extra={}):
1215 editor=False, extra={}):
1215 """Add a new revision to current repository.
1216 """Add a new revision to current repository.
1216
1217
1217 Revision information is gathered from the working directory,
1218 Revision information is gathered from the working directory,
1218 match can be used to filter the committed files. If editor is
1219 match can be used to filter the committed files. If editor is
1219 supplied, it is called to get a commit message.
1220 supplied, it is called to get a commit message.
1220 """
1221 """
1221
1222
1222 def fail(f, msg):
1223 def fail(f, msg):
1223 raise util.Abort('%s: %s' % (f, msg))
1224 raise util.Abort('%s: %s' % (f, msg))
1224
1225
1225 if not match:
1226 if not match:
1226 match = matchmod.always(self.root, '')
1227 match = matchmod.always(self.root, '')
1227
1228
1228 if not force:
1229 if not force:
1229 vdirs = []
1230 vdirs = []
1230 match.explicitdir = vdirs.append
1231 match.explicitdir = vdirs.append
1231 match.bad = fail
1232 match.bad = fail
1232
1233
1233 wlock = self.wlock()
1234 wlock = self.wlock()
1234 try:
1235 try:
1235 wctx = self[None]
1236 wctx = self[None]
1236 merge = len(wctx.parents()) > 1
1237 merge = len(wctx.parents()) > 1
1237
1238
1238 if (not force and merge and match and
1239 if (not force and merge and match and
1239 (match.files() or match.anypats())):
1240 (match.files() or match.anypats())):
1240 raise util.Abort(_('cannot partially commit a merge '
1241 raise util.Abort(_('cannot partially commit a merge '
1241 '(do not specify files or patterns)'))
1242 '(do not specify files or patterns)'))
1242
1243
1243 status = self.status(match=match, clean=force)
1244 status = self.status(match=match, clean=force)
1244 if force:
1245 if force:
1245 status.modified.extend(status.clean) # mq may commit clean files
1246 status.modified.extend(status.clean) # mq may commit clean files
1246
1247
1247 # check subrepos
1248 # check subrepos
1248 subs = []
1249 subs = []
1249 commitsubs = set()
1250 commitsubs = set()
1250 newstate = wctx.substate.copy()
1251 newstate = wctx.substate.copy()
1251 # only manage subrepos and .hgsubstate if .hgsub is present
1252 # only manage subrepos and .hgsubstate if .hgsub is present
1252 if '.hgsub' in wctx:
1253 if '.hgsub' in wctx:
1253 # we'll decide whether to track this ourselves, thanks
1254 # we'll decide whether to track this ourselves, thanks
1254 for c in status.modified, status.added, status.removed:
1255 for c in status.modified, status.added, status.removed:
1255 if '.hgsubstate' in c:
1256 if '.hgsubstate' in c:
1256 c.remove('.hgsubstate')
1257 c.remove('.hgsubstate')
1257
1258
1258 # compare current state to last committed state
1259 # compare current state to last committed state
1259 # build new substate based on last committed state
1260 # build new substate based on last committed state
1260 oldstate = wctx.p1().substate
1261 oldstate = wctx.p1().substate
1261 for s in sorted(newstate.keys()):
1262 for s in sorted(newstate.keys()):
1262 if not match(s):
1263 if not match(s):
1263 # ignore working copy, use old state if present
1264 # ignore working copy, use old state if present
1264 if s in oldstate:
1265 if s in oldstate:
1265 newstate[s] = oldstate[s]
1266 newstate[s] = oldstate[s]
1266 continue
1267 continue
1267 if not force:
1268 if not force:
1268 raise util.Abort(
1269 raise util.Abort(
1269 _("commit with new subrepo %s excluded") % s)
1270 _("commit with new subrepo %s excluded") % s)
1270 if wctx.sub(s).dirty(True):
1271 if wctx.sub(s).dirty(True):
1271 if not self.ui.configbool('ui', 'commitsubrepos'):
1272 if not self.ui.configbool('ui', 'commitsubrepos'):
1272 raise util.Abort(
1273 raise util.Abort(
1273 _("uncommitted changes in subrepo %s") % s,
1274 _("uncommitted changes in subrepo %s") % s,
1274 hint=_("use --subrepos for recursive commit"))
1275 hint=_("use --subrepos for recursive commit"))
1275 subs.append(s)
1276 subs.append(s)
1276 commitsubs.add(s)
1277 commitsubs.add(s)
1277 else:
1278 else:
1278 bs = wctx.sub(s).basestate()
1279 bs = wctx.sub(s).basestate()
1279 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1280 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1280 if oldstate.get(s, (None, None, None))[1] != bs:
1281 if oldstate.get(s, (None, None, None))[1] != bs:
1281 subs.append(s)
1282 subs.append(s)
1282
1283
1283 # check for removed subrepos
1284 # check for removed subrepos
1284 for p in wctx.parents():
1285 for p in wctx.parents():
1285 r = [s for s in p.substate if s not in newstate]
1286 r = [s for s in p.substate if s not in newstate]
1286 subs += [s for s in r if match(s)]
1287 subs += [s for s in r if match(s)]
1287 if subs:
1288 if subs:
1288 if (not match('.hgsub') and
1289 if (not match('.hgsub') and
1289 '.hgsub' in (wctx.modified() + wctx.added())):
1290 '.hgsub' in (wctx.modified() + wctx.added())):
1290 raise util.Abort(
1291 raise util.Abort(
1291 _("can't commit subrepos without .hgsub"))
1292 _("can't commit subrepos without .hgsub"))
1292 status.modified.insert(0, '.hgsubstate')
1293 status.modified.insert(0, '.hgsubstate')
1293
1294
1294 elif '.hgsub' in status.removed:
1295 elif '.hgsub' in status.removed:
1295 # clean up .hgsubstate when .hgsub is removed
1296 # clean up .hgsubstate when .hgsub is removed
1296 if ('.hgsubstate' in wctx and
1297 if ('.hgsubstate' in wctx and
1297 '.hgsubstate' not in (status.modified + status.added +
1298 '.hgsubstate' not in (status.modified + status.added +
1298 status.removed)):
1299 status.removed)):
1299 status.removed.insert(0, '.hgsubstate')
1300 status.removed.insert(0, '.hgsubstate')
1300
1301
1301 # make sure all explicit patterns are matched
1302 # make sure all explicit patterns are matched
1302 if not force and match.files():
1303 if not force and match.files():
1303 matched = set(status.modified + status.added + status.removed)
1304 matched = set(status.modified + status.added + status.removed)
1304
1305
1305 for f in match.files():
1306 for f in match.files():
1306 f = self.dirstate.normalize(f)
1307 f = self.dirstate.normalize(f)
1307 if f == '.' or f in matched or f in wctx.substate:
1308 if f == '.' or f in matched or f in wctx.substate:
1308 continue
1309 continue
1309 if f in status.deleted:
1310 if f in status.deleted:
1310 fail(f, _('file not found!'))
1311 fail(f, _('file not found!'))
1311 if f in vdirs: # visited directory
1312 if f in vdirs: # visited directory
1312 d = f + '/'
1313 d = f + '/'
1313 for mf in matched:
1314 for mf in matched:
1314 if mf.startswith(d):
1315 if mf.startswith(d):
1315 break
1316 break
1316 else:
1317 else:
1317 fail(f, _("no match under directory!"))
1318 fail(f, _("no match under directory!"))
1318 elif f not in self.dirstate:
1319 elif f not in self.dirstate:
1319 fail(f, _("file not tracked!"))
1320 fail(f, _("file not tracked!"))
1320
1321
1321 cctx = context.workingctx(self, text, user, date, extra, status)
1322 cctx = context.workingctx(self, text, user, date, extra, status)
1322
1323
1323 if (not force and not extra.get("close") and not merge
1324 if (not force and not extra.get("close") and not merge
1324 and not cctx.files()
1325 and not cctx.files()
1325 and wctx.branch() == wctx.p1().branch()):
1326 and wctx.branch() == wctx.p1().branch()):
1326 return None
1327 return None
1327
1328
1328 if merge and cctx.deleted():
1329 if merge and cctx.deleted():
1329 raise util.Abort(_("cannot commit merge with missing files"))
1330 raise util.Abort(_("cannot commit merge with missing files"))
1330
1331
1331 ms = mergemod.mergestate(self)
1332 ms = mergemod.mergestate(self)
1332 for f in status.modified:
1333 for f in status.modified:
1333 if f in ms and ms[f] == 'u':
1334 if f in ms and ms[f] == 'u':
1334 raise util.Abort(_("unresolved merge conflicts "
1335 raise util.Abort(_("unresolved merge conflicts "
1335 "(see hg help resolve)"))
1336 "(see hg help resolve)"))
1336
1337
1337 if editor:
1338 if editor:
1338 cctx._text = editor(self, cctx, subs)
1339 cctx._text = editor(self, cctx, subs)
1339 edited = (text != cctx._text)
1340 edited = (text != cctx._text)
1340
1341
1341 # Save commit message in case this transaction gets rolled back
1342 # Save commit message in case this transaction gets rolled back
1342 # (e.g. by a pretxncommit hook). Leave the content alone on
1343 # (e.g. by a pretxncommit hook). Leave the content alone on
1343 # the assumption that the user will use the same editor again.
1344 # the assumption that the user will use the same editor again.
1344 msgfn = self.savecommitmessage(cctx._text)
1345 msgfn = self.savecommitmessage(cctx._text)
1345
1346
1346 # commit subs and write new state
1347 # commit subs and write new state
1347 if subs:
1348 if subs:
1348 for s in sorted(commitsubs):
1349 for s in sorted(commitsubs):
1349 sub = wctx.sub(s)
1350 sub = wctx.sub(s)
1350 self.ui.status(_('committing subrepository %s\n') %
1351 self.ui.status(_('committing subrepository %s\n') %
1351 subrepo.subrelpath(sub))
1352 subrepo.subrelpath(sub))
1352 sr = sub.commit(cctx._text, user, date)
1353 sr = sub.commit(cctx._text, user, date)
1353 newstate[s] = (newstate[s][0], sr)
1354 newstate[s] = (newstate[s][0], sr)
1354 subrepo.writestate(self, newstate)
1355 subrepo.writestate(self, newstate)
1355
1356
1356 p1, p2 = self.dirstate.parents()
1357 p1, p2 = self.dirstate.parents()
1357 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1358 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1358 try:
1359 try:
1359 self.hook("precommit", throw=True, parent1=hookp1,
1360 self.hook("precommit", throw=True, parent1=hookp1,
1360 parent2=hookp2)
1361 parent2=hookp2)
1361 ret = self.commitctx(cctx, True)
1362 ret = self.commitctx(cctx, True)
1362 except: # re-raises
1363 except: # re-raises
1363 if edited:
1364 if edited:
1364 self.ui.write(
1365 self.ui.write(
1365 _('note: commit message saved in %s\n') % msgfn)
1366 _('note: commit message saved in %s\n') % msgfn)
1366 raise
1367 raise
1367
1368
1368 # update bookmarks, dirstate and mergestate
1369 # update bookmarks, dirstate and mergestate
1369 bookmarks.update(self, [p1, p2], ret)
1370 bookmarks.update(self, [p1, p2], ret)
1370 cctx.markcommitted(ret)
1371 cctx.markcommitted(ret)
1371 ms.reset()
1372 ms.reset()
1372 finally:
1373 finally:
1373 wlock.release()
1374 wlock.release()
1374
1375
1375 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1376 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1376 # hack for command that use a temporary commit (eg: histedit)
1377 # hack for command that use a temporary commit (eg: histedit)
1377 # temporary commit got stripped before hook release
1378 # temporary commit got stripped before hook release
1378 if node in self:
1379 if node in self:
1379 self.hook("commit", node=node, parent1=parent1,
1380 self.hook("commit", node=node, parent1=parent1,
1380 parent2=parent2)
1381 parent2=parent2)
1381 self._afterlock(commithook)
1382 self._afterlock(commithook)
1382 return ret
1383 return ret
1383
1384
1384 @unfilteredmethod
1385 @unfilteredmethod
1385 def commitctx(self, ctx, error=False):
1386 def commitctx(self, ctx, error=False):
1386 """Add a new revision to current repository.
1387 """Add a new revision to current repository.
1387 Revision information is passed via the context argument.
1388 Revision information is passed via the context argument.
1388 """
1389 """
1389
1390
1390 tr = None
1391 tr = None
1391 p1, p2 = ctx.p1(), ctx.p2()
1392 p1, p2 = ctx.p1(), ctx.p2()
1392 user = ctx.user()
1393 user = ctx.user()
1393
1394
1394 lock = self.lock()
1395 lock = self.lock()
1395 try:
1396 try:
1396 tr = self.transaction("commit")
1397 tr = self.transaction("commit")
1397 trp = weakref.proxy(tr)
1398 trp = weakref.proxy(tr)
1398
1399
1399 if ctx.files():
1400 if ctx.files():
1400 m1 = p1.manifest()
1401 m1 = p1.manifest()
1401 m2 = p2.manifest()
1402 m2 = p2.manifest()
1402 m = m1.copy()
1403 m = m1.copy()
1403
1404
1404 # check in files
1405 # check in files
1405 added = []
1406 added = []
1406 changed = []
1407 changed = []
1407 removed = list(ctx.removed())
1408 removed = list(ctx.removed())
1408 linkrev = len(self)
1409 linkrev = len(self)
1409 for f in sorted(ctx.modified() + ctx.added()):
1410 for f in sorted(ctx.modified() + ctx.added()):
1410 self.ui.note(f + "\n")
1411 self.ui.note(f + "\n")
1411 try:
1412 try:
1412 fctx = ctx[f]
1413 fctx = ctx[f]
1413 if fctx is None:
1414 if fctx is None:
1414 removed.append(f)
1415 removed.append(f)
1415 else:
1416 else:
1416 added.append(f)
1417 added.append(f)
1417 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1418 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1418 trp, changed)
1419 trp, changed)
1419 m.setflag(f, fctx.flags())
1420 m.setflag(f, fctx.flags())
1420 except OSError, inst:
1421 except OSError, inst:
1421 self.ui.warn(_("trouble committing %s!\n") % f)
1422 self.ui.warn(_("trouble committing %s!\n") % f)
1422 raise
1423 raise
1423 except IOError, inst:
1424 except IOError, inst:
1424 errcode = getattr(inst, 'errno', errno.ENOENT)
1425 errcode = getattr(inst, 'errno', errno.ENOENT)
1425 if error or errcode and errcode != errno.ENOENT:
1426 if error or errcode and errcode != errno.ENOENT:
1426 self.ui.warn(_("trouble committing %s!\n") % f)
1427 self.ui.warn(_("trouble committing %s!\n") % f)
1427 raise
1428 raise
1428
1429
1429 # update manifest
1430 # update manifest
1430 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1431 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1431 drop = [f for f in removed if f in m]
1432 drop = [f for f in removed if f in m]
1432 for f in drop:
1433 for f in drop:
1433 del m[f]
1434 del m[f]
1434 mn = self.manifest.add(m, trp, linkrev,
1435 mn = self.manifest.add(m, trp, linkrev,
1435 p1.manifestnode(), p2.manifestnode(),
1436 p1.manifestnode(), p2.manifestnode(),
1436 added, drop)
1437 added, drop)
1437 files = changed + removed
1438 files = changed + removed
1438 else:
1439 else:
1439 mn = p1.manifestnode()
1440 mn = p1.manifestnode()
1440 files = []
1441 files = []
1441
1442
1442 # update changelog
1443 # update changelog
1443 self.changelog.delayupdate(tr)
1444 self.changelog.delayupdate(tr)
1444 n = self.changelog.add(mn, files, ctx.description(),
1445 n = self.changelog.add(mn, files, ctx.description(),
1445 trp, p1.node(), p2.node(),
1446 trp, p1.node(), p2.node(),
1446 user, ctx.date(), ctx.extra().copy())
1447 user, ctx.date(), ctx.extra().copy())
1447 p = lambda: tr.writepending() and self.root or ""
1448 p = lambda: tr.writepending() and self.root or ""
1448 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1449 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1449 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1450 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1450 parent2=xp2, pending=p)
1451 parent2=xp2, pending=p)
1451 # set the new commit is proper phase
1452 # set the new commit is proper phase
1452 targetphase = subrepo.newcommitphase(self.ui, ctx)
1453 targetphase = subrepo.newcommitphase(self.ui, ctx)
1453 if targetphase:
1454 if targetphase:
1454 # retract boundary do not alter parent changeset.
1455 # retract boundary do not alter parent changeset.
1455 # if a parent have higher the resulting phase will
1456 # if a parent have higher the resulting phase will
1456 # be compliant anyway
1457 # be compliant anyway
1457 #
1458 #
1458 # if minimal phase was 0 we don't need to retract anything
1459 # if minimal phase was 0 we don't need to retract anything
1459 phases.retractboundary(self, tr, targetphase, [n])
1460 phases.retractboundary(self, tr, targetphase, [n])
1460 tr.close()
1461 tr.close()
1461 branchmap.updatecache(self.filtered('served'))
1462 branchmap.updatecache(self.filtered('served'))
1462 return n
1463 return n
1463 finally:
1464 finally:
1464 if tr:
1465 if tr:
1465 tr.release()
1466 tr.release()
1466 lock.release()
1467 lock.release()
1467
1468
1468 @unfilteredmethod
1469 @unfilteredmethod
1469 def destroying(self):
1470 def destroying(self):
1470 '''Inform the repository that nodes are about to be destroyed.
1471 '''Inform the repository that nodes are about to be destroyed.
1471 Intended for use by strip and rollback, so there's a common
1472 Intended for use by strip and rollback, so there's a common
1472 place for anything that has to be done before destroying history.
1473 place for anything that has to be done before destroying history.
1473
1474
1474 This is mostly useful for saving state that is in memory and waiting
1475 This is mostly useful for saving state that is in memory and waiting
1475 to be flushed when the current lock is released. Because a call to
1476 to be flushed when the current lock is released. Because a call to
1476 destroyed is imminent, the repo will be invalidated causing those
1477 destroyed is imminent, the repo will be invalidated causing those
1477 changes to stay in memory (waiting for the next unlock), or vanish
1478 changes to stay in memory (waiting for the next unlock), or vanish
1478 completely.
1479 completely.
1479 '''
1480 '''
1480 # When using the same lock to commit and strip, the phasecache is left
1481 # When using the same lock to commit and strip, the phasecache is left
1481 # dirty after committing. Then when we strip, the repo is invalidated,
1482 # dirty after committing. Then when we strip, the repo is invalidated,
1482 # causing those changes to disappear.
1483 # causing those changes to disappear.
1483 if '_phasecache' in vars(self):
1484 if '_phasecache' in vars(self):
1484 self._phasecache.write()
1485 self._phasecache.write()
1485
1486
1486 @unfilteredmethod
1487 @unfilteredmethod
1487 def destroyed(self):
1488 def destroyed(self):
1488 '''Inform the repository that nodes have been destroyed.
1489 '''Inform the repository that nodes have been destroyed.
1489 Intended for use by strip and rollback, so there's a common
1490 Intended for use by strip and rollback, so there's a common
1490 place for anything that has to be done after destroying history.
1491 place for anything that has to be done after destroying history.
1491 '''
1492 '''
1492 # When one tries to:
1493 # When one tries to:
1493 # 1) destroy nodes thus calling this method (e.g. strip)
1494 # 1) destroy nodes thus calling this method (e.g. strip)
1494 # 2) use phasecache somewhere (e.g. commit)
1495 # 2) use phasecache somewhere (e.g. commit)
1495 #
1496 #
1496 # then 2) will fail because the phasecache contains nodes that were
1497 # then 2) will fail because the phasecache contains nodes that were
1497 # removed. We can either remove phasecache from the filecache,
1498 # removed. We can either remove phasecache from the filecache,
1498 # causing it to reload next time it is accessed, or simply filter
1499 # causing it to reload next time it is accessed, or simply filter
1499 # the removed nodes now and write the updated cache.
1500 # the removed nodes now and write the updated cache.
1500 self._phasecache.filterunknown(self)
1501 self._phasecache.filterunknown(self)
1501 self._phasecache.write()
1502 self._phasecache.write()
1502
1503
1503 # update the 'served' branch cache to help read only server process
1504 # update the 'served' branch cache to help read only server process
1504 # Thanks to branchcache collaboration this is done from the nearest
1505 # Thanks to branchcache collaboration this is done from the nearest
1505 # filtered subset and it is expected to be fast.
1506 # filtered subset and it is expected to be fast.
1506 branchmap.updatecache(self.filtered('served'))
1507 branchmap.updatecache(self.filtered('served'))
1507
1508
1508 # Ensure the persistent tag cache is updated. Doing it now
1509 # Ensure the persistent tag cache is updated. Doing it now
1509 # means that the tag cache only has to worry about destroyed
1510 # means that the tag cache only has to worry about destroyed
1510 # heads immediately after a strip/rollback. That in turn
1511 # heads immediately after a strip/rollback. That in turn
1511 # guarantees that "cachetip == currenttip" (comparing both rev
1512 # guarantees that "cachetip == currenttip" (comparing both rev
1512 # and node) always means no nodes have been added or destroyed.
1513 # and node) always means no nodes have been added or destroyed.
1513
1514
1514 # XXX this is suboptimal when qrefresh'ing: we strip the current
1515 # XXX this is suboptimal when qrefresh'ing: we strip the current
1515 # head, refresh the tag cache, then immediately add a new head.
1516 # head, refresh the tag cache, then immediately add a new head.
1516 # But I think doing it this way is necessary for the "instant
1517 # But I think doing it this way is necessary for the "instant
1517 # tag cache retrieval" case to work.
1518 # tag cache retrieval" case to work.
1518 self.invalidate()
1519 self.invalidate()
1519
1520
1520 def walk(self, match, node=None):
1521 def walk(self, match, node=None):
1521 '''
1522 '''
1522 walk recursively through the directory tree or a given
1523 walk recursively through the directory tree or a given
1523 changeset, finding all files matched by the match
1524 changeset, finding all files matched by the match
1524 function
1525 function
1525 '''
1526 '''
1526 return self[node].walk(match)
1527 return self[node].walk(match)
1527
1528
1528 def status(self, node1='.', node2=None, match=None,
1529 def status(self, node1='.', node2=None, match=None,
1529 ignored=False, clean=False, unknown=False,
1530 ignored=False, clean=False, unknown=False,
1530 listsubrepos=False):
1531 listsubrepos=False):
1531 '''a convenience method that calls node1.status(node2)'''
1532 '''a convenience method that calls node1.status(node2)'''
1532 return self[node1].status(node2, match, ignored, clean, unknown,
1533 return self[node1].status(node2, match, ignored, clean, unknown,
1533 listsubrepos)
1534 listsubrepos)
1534
1535
1535 def heads(self, start=None):
1536 def heads(self, start=None):
1536 heads = self.changelog.heads(start)
1537 heads = self.changelog.heads(start)
1537 # sort the output in rev descending order
1538 # sort the output in rev descending order
1538 return sorted(heads, key=self.changelog.rev, reverse=True)
1539 return sorted(heads, key=self.changelog.rev, reverse=True)
1539
1540
1540 def branchheads(self, branch=None, start=None, closed=False):
1541 def branchheads(self, branch=None, start=None, closed=False):
1541 '''return a (possibly filtered) list of heads for the given branch
1542 '''return a (possibly filtered) list of heads for the given branch
1542
1543
1543 Heads are returned in topological order, from newest to oldest.
1544 Heads are returned in topological order, from newest to oldest.
1544 If branch is None, use the dirstate branch.
1545 If branch is None, use the dirstate branch.
1545 If start is not None, return only heads reachable from start.
1546 If start is not None, return only heads reachable from start.
1546 If closed is True, return heads that are marked as closed as well.
1547 If closed is True, return heads that are marked as closed as well.
1547 '''
1548 '''
1548 if branch is None:
1549 if branch is None:
1549 branch = self[None].branch()
1550 branch = self[None].branch()
1550 branches = self.branchmap()
1551 branches = self.branchmap()
1551 if branch not in branches:
1552 if branch not in branches:
1552 return []
1553 return []
1553 # the cache returns heads ordered lowest to highest
1554 # the cache returns heads ordered lowest to highest
1554 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1555 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1555 if start is not None:
1556 if start is not None:
1556 # filter out the heads that cannot be reached from startrev
1557 # filter out the heads that cannot be reached from startrev
1557 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1558 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1558 bheads = [h for h in bheads if h in fbheads]
1559 bheads = [h for h in bheads if h in fbheads]
1559 return bheads
1560 return bheads
1560
1561
1561 def branches(self, nodes):
1562 def branches(self, nodes):
1562 if not nodes:
1563 if not nodes:
1563 nodes = [self.changelog.tip()]
1564 nodes = [self.changelog.tip()]
1564 b = []
1565 b = []
1565 for n in nodes:
1566 for n in nodes:
1566 t = n
1567 t = n
1567 while True:
1568 while True:
1568 p = self.changelog.parents(n)
1569 p = self.changelog.parents(n)
1569 if p[1] != nullid or p[0] == nullid:
1570 if p[1] != nullid or p[0] == nullid:
1570 b.append((t, n, p[0], p[1]))
1571 b.append((t, n, p[0], p[1]))
1571 break
1572 break
1572 n = p[0]
1573 n = p[0]
1573 return b
1574 return b
1574
1575
1575 def between(self, pairs):
1576 def between(self, pairs):
1576 r = []
1577 r = []
1577
1578
1578 for top, bottom in pairs:
1579 for top, bottom in pairs:
1579 n, l, i = top, [], 0
1580 n, l, i = top, [], 0
1580 f = 1
1581 f = 1
1581
1582
1582 while n != bottom and n != nullid:
1583 while n != bottom and n != nullid:
1583 p = self.changelog.parents(n)[0]
1584 p = self.changelog.parents(n)[0]
1584 if i == f:
1585 if i == f:
1585 l.append(n)
1586 l.append(n)
1586 f = f * 2
1587 f = f * 2
1587 n = p
1588 n = p
1588 i += 1
1589 i += 1
1589
1590
1590 r.append(l)
1591 r.append(l)
1591
1592
1592 return r
1593 return r
1593
1594
1594 def checkpush(self, pushop):
1595 def checkpush(self, pushop):
1595 """Extensions can override this function if additional checks have
1596 """Extensions can override this function if additional checks have
1596 to be performed before pushing, or call it if they override push
1597 to be performed before pushing, or call it if they override push
1597 command.
1598 command.
1598 """
1599 """
1599 pass
1600 pass
1600
1601
1601 @unfilteredpropertycache
1602 @unfilteredpropertycache
1602 def prepushoutgoinghooks(self):
1603 def prepushoutgoinghooks(self):
1603 """Return util.hooks consists of "(repo, remote, outgoing)"
1604 """Return util.hooks consists of "(repo, remote, outgoing)"
1604 functions, which are called before pushing changesets.
1605 functions, which are called before pushing changesets.
1605 """
1606 """
1606 return util.hooks()
1607 return util.hooks()
1607
1608
1608 def stream_in(self, remote, requirements):
1609 def stream_in(self, remote, requirements):
1609 lock = self.lock()
1610 lock = self.lock()
1610 try:
1611 try:
1611 # Save remote branchmap. We will use it later
1612 # Save remote branchmap. We will use it later
1612 # to speed up branchcache creation
1613 # to speed up branchcache creation
1613 rbranchmap = None
1614 rbranchmap = None
1614 if remote.capable("branchmap"):
1615 if remote.capable("branchmap"):
1615 rbranchmap = remote.branchmap()
1616 rbranchmap = remote.branchmap()
1616
1617
1617 fp = remote.stream_out()
1618 fp = remote.stream_out()
1618 l = fp.readline()
1619 l = fp.readline()
1619 try:
1620 try:
1620 resp = int(l)
1621 resp = int(l)
1621 except ValueError:
1622 except ValueError:
1622 raise error.ResponseError(
1623 raise error.ResponseError(
1623 _('unexpected response from remote server:'), l)
1624 _('unexpected response from remote server:'), l)
1624 if resp == 1:
1625 if resp == 1:
1625 raise util.Abort(_('operation forbidden by server'))
1626 raise util.Abort(_('operation forbidden by server'))
1626 elif resp == 2:
1627 elif resp == 2:
1627 raise util.Abort(_('locking the remote repository failed'))
1628 raise util.Abort(_('locking the remote repository failed'))
1628 elif resp != 0:
1629 elif resp != 0:
1629 raise util.Abort(_('the server sent an unknown error code'))
1630 raise util.Abort(_('the server sent an unknown error code'))
1630 self.ui.status(_('streaming all changes\n'))
1631 self.ui.status(_('streaming all changes\n'))
1631 l = fp.readline()
1632 l = fp.readline()
1632 try:
1633 try:
1633 total_files, total_bytes = map(int, l.split(' ', 1))
1634 total_files, total_bytes = map(int, l.split(' ', 1))
1634 except (ValueError, TypeError):
1635 except (ValueError, TypeError):
1635 raise error.ResponseError(
1636 raise error.ResponseError(
1636 _('unexpected response from remote server:'), l)
1637 _('unexpected response from remote server:'), l)
1637 self.ui.status(_('%d files to transfer, %s of data\n') %
1638 self.ui.status(_('%d files to transfer, %s of data\n') %
1638 (total_files, util.bytecount(total_bytes)))
1639 (total_files, util.bytecount(total_bytes)))
1639 handled_bytes = 0
1640 handled_bytes = 0
1640 self.ui.progress(_('clone'), 0, total=total_bytes)
1641 self.ui.progress(_('clone'), 0, total=total_bytes)
1641 start = time.time()
1642 start = time.time()
1642
1643
1643 tr = self.transaction(_('clone'))
1644 tr = self.transaction(_('clone'))
1644 try:
1645 try:
1645 for i in xrange(total_files):
1646 for i in xrange(total_files):
1646 # XXX doesn't support '\n' or '\r' in filenames
1647 # XXX doesn't support '\n' or '\r' in filenames
1647 l = fp.readline()
1648 l = fp.readline()
1648 try:
1649 try:
1649 name, size = l.split('\0', 1)
1650 name, size = l.split('\0', 1)
1650 size = int(size)
1651 size = int(size)
1651 except (ValueError, TypeError):
1652 except (ValueError, TypeError):
1652 raise error.ResponseError(
1653 raise error.ResponseError(
1653 _('unexpected response from remote server:'), l)
1654 _('unexpected response from remote server:'), l)
1654 if self.ui.debugflag:
1655 if self.ui.debugflag:
1655 self.ui.debug('adding %s (%s)\n' %
1656 self.ui.debug('adding %s (%s)\n' %
1656 (name, util.bytecount(size)))
1657 (name, util.bytecount(size)))
1657 # for backwards compat, name was partially encoded
1658 # for backwards compat, name was partially encoded
1658 ofp = self.sopener(store.decodedir(name), 'w')
1659 ofp = self.sopener(store.decodedir(name), 'w')
1659 for chunk in util.filechunkiter(fp, limit=size):
1660 for chunk in util.filechunkiter(fp, limit=size):
1660 handled_bytes += len(chunk)
1661 handled_bytes += len(chunk)
1661 self.ui.progress(_('clone'), handled_bytes,
1662 self.ui.progress(_('clone'), handled_bytes,
1662 total=total_bytes)
1663 total=total_bytes)
1663 ofp.write(chunk)
1664 ofp.write(chunk)
1664 ofp.close()
1665 ofp.close()
1665 tr.close()
1666 tr.close()
1666 finally:
1667 finally:
1667 tr.release()
1668 tr.release()
1668
1669
1669 # Writing straight to files circumvented the inmemory caches
1670 # Writing straight to files circumvented the inmemory caches
1670 self.invalidate()
1671 self.invalidate()
1671
1672
1672 elapsed = time.time() - start
1673 elapsed = time.time() - start
1673 if elapsed <= 0:
1674 if elapsed <= 0:
1674 elapsed = 0.001
1675 elapsed = 0.001
1675 self.ui.progress(_('clone'), None)
1676 self.ui.progress(_('clone'), None)
1676 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1677 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1677 (util.bytecount(total_bytes), elapsed,
1678 (util.bytecount(total_bytes), elapsed,
1678 util.bytecount(total_bytes / elapsed)))
1679 util.bytecount(total_bytes / elapsed)))
1679
1680
1680 # new requirements = old non-format requirements +
1681 # new requirements = old non-format requirements +
1681 # new format-related
1682 # new format-related
1682 # requirements from the streamed-in repository
1683 # requirements from the streamed-in repository
1683 requirements.update(set(self.requirements) - self.supportedformats)
1684 requirements.update(set(self.requirements) - self.supportedformats)
1684 self._applyrequirements(requirements)
1685 self._applyrequirements(requirements)
1685 self._writerequirements()
1686 self._writerequirements()
1686
1687
1687 if rbranchmap:
1688 if rbranchmap:
1688 rbheads = []
1689 rbheads = []
1689 closed = []
1690 closed = []
1690 for bheads in rbranchmap.itervalues():
1691 for bheads in rbranchmap.itervalues():
1691 rbheads.extend(bheads)
1692 rbheads.extend(bheads)
1692 for h in bheads:
1693 for h in bheads:
1693 r = self.changelog.rev(h)
1694 r = self.changelog.rev(h)
1694 b, c = self.changelog.branchinfo(r)
1695 b, c = self.changelog.branchinfo(r)
1695 if c:
1696 if c:
1696 closed.append(h)
1697 closed.append(h)
1697
1698
1698 if rbheads:
1699 if rbheads:
1699 rtiprev = max((int(self.changelog.rev(node))
1700 rtiprev = max((int(self.changelog.rev(node))
1700 for node in rbheads))
1701 for node in rbheads))
1701 cache = branchmap.branchcache(rbranchmap,
1702 cache = branchmap.branchcache(rbranchmap,
1702 self[rtiprev].node(),
1703 self[rtiprev].node(),
1703 rtiprev,
1704 rtiprev,
1704 closednodes=closed)
1705 closednodes=closed)
1705 # Try to stick it as low as possible
1706 # Try to stick it as low as possible
1706 # filter above served are unlikely to be fetch from a clone
1707 # filter above served are unlikely to be fetch from a clone
1707 for candidate in ('base', 'immutable', 'served'):
1708 for candidate in ('base', 'immutable', 'served'):
1708 rview = self.filtered(candidate)
1709 rview = self.filtered(candidate)
1709 if cache.validfor(rview):
1710 if cache.validfor(rview):
1710 self._branchcaches[candidate] = cache
1711 self._branchcaches[candidate] = cache
1711 cache.write(rview)
1712 cache.write(rview)
1712 break
1713 break
1713 self.invalidate()
1714 self.invalidate()
1714 return len(self.heads()) + 1
1715 return len(self.heads()) + 1
1715 finally:
1716 finally:
1716 lock.release()
1717 lock.release()
1717
1718
1718 def clone(self, remote, heads=[], stream=False):
1719 def clone(self, remote, heads=[], stream=False):
1719 '''clone remote repository.
1720 '''clone remote repository.
1720
1721
1721 keyword arguments:
1722 keyword arguments:
1722 heads: list of revs to clone (forces use of pull)
1723 heads: list of revs to clone (forces use of pull)
1723 stream: use streaming clone if possible'''
1724 stream: use streaming clone if possible'''
1724
1725
1725 # now, all clients that can request uncompressed clones can
1726 # now, all clients that can request uncompressed clones can
1726 # read repo formats supported by all servers that can serve
1727 # read repo formats supported by all servers that can serve
1727 # them.
1728 # them.
1728
1729
1729 # if revlog format changes, client will have to check version
1730 # if revlog format changes, client will have to check version
1730 # and format flags on "stream" capability, and use
1731 # and format flags on "stream" capability, and use
1731 # uncompressed only if compatible.
1732 # uncompressed only if compatible.
1732
1733
1733 if not stream:
1734 if not stream:
1734 # if the server explicitly prefers to stream (for fast LANs)
1735 # if the server explicitly prefers to stream (for fast LANs)
1735 stream = remote.capable('stream-preferred')
1736 stream = remote.capable('stream-preferred')
1736
1737
1737 if stream and not heads:
1738 if stream and not heads:
1738 # 'stream' means remote revlog format is revlogv1 only
1739 # 'stream' means remote revlog format is revlogv1 only
1739 if remote.capable('stream'):
1740 if remote.capable('stream'):
1740 self.stream_in(remote, set(('revlogv1',)))
1741 self.stream_in(remote, set(('revlogv1',)))
1741 else:
1742 else:
1742 # otherwise, 'streamreqs' contains the remote revlog format
1743 # otherwise, 'streamreqs' contains the remote revlog format
1743 streamreqs = remote.capable('streamreqs')
1744 streamreqs = remote.capable('streamreqs')
1744 if streamreqs:
1745 if streamreqs:
1745 streamreqs = set(streamreqs.split(','))
1746 streamreqs = set(streamreqs.split(','))
1746 # if we support it, stream in and adjust our requirements
1747 # if we support it, stream in and adjust our requirements
1747 if not streamreqs - self.supportedformats:
1748 if not streamreqs - self.supportedformats:
1748 self.stream_in(remote, streamreqs)
1749 self.stream_in(remote, streamreqs)
1749
1750
1750 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1751 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1751 try:
1752 try:
1752 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1753 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1753 ret = exchange.pull(self, remote, heads).cgresult
1754 ret = exchange.pull(self, remote, heads).cgresult
1754 finally:
1755 finally:
1755 self.ui.restoreconfig(quiet)
1756 self.ui.restoreconfig(quiet)
1756 return ret
1757 return ret
1757
1758
1758 def pushkey(self, namespace, key, old, new):
1759 def pushkey(self, namespace, key, old, new):
1759 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1760 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1760 old=old, new=new)
1761 old=old, new=new)
1761 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1762 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1762 ret = pushkey.push(self, namespace, key, old, new)
1763 ret = pushkey.push(self, namespace, key, old, new)
1763 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1764 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1764 ret=ret)
1765 ret=ret)
1765 return ret
1766 return ret
1766
1767
1767 def listkeys(self, namespace):
1768 def listkeys(self, namespace):
1768 self.hook('prelistkeys', throw=True, namespace=namespace)
1769 self.hook('prelistkeys', throw=True, namespace=namespace)
1769 self.ui.debug('listing keys for "%s"\n' % namespace)
1770 self.ui.debug('listing keys for "%s"\n' % namespace)
1770 values = pushkey.list(self, namespace)
1771 values = pushkey.list(self, namespace)
1771 self.hook('listkeys', namespace=namespace, values=values)
1772 self.hook('listkeys', namespace=namespace, values=values)
1772 return values
1773 return values
1773
1774
1774 def debugwireargs(self, one, two, three=None, four=None, five=None):
1775 def debugwireargs(self, one, two, three=None, four=None, five=None):
1775 '''used to test argument passing over the wire'''
1776 '''used to test argument passing over the wire'''
1776 return "%s %s %s %s %s" % (one, two, three, four, five)
1777 return "%s %s %s %s %s" % (one, two, three, four, five)
1777
1778
1778 def savecommitmessage(self, text):
1779 def savecommitmessage(self, text):
1779 fp = self.opener('last-message.txt', 'wb')
1780 fp = self.opener('last-message.txt', 'wb')
1780 try:
1781 try:
1781 fp.write(text)
1782 fp.write(text)
1782 finally:
1783 finally:
1783 fp.close()
1784 fp.close()
1784 return self.pathto(fp.name[len(self.root) + 1:])
1785 return self.pathto(fp.name[len(self.root) + 1:])
1785
1786
1786 # used to avoid circular references so destructors work
1787 # used to avoid circular references so destructors work
1787 def aftertrans(files):
1788 def aftertrans(files):
1788 renamefiles = [tuple(t) for t in files]
1789 renamefiles = [tuple(t) for t in files]
1789 def a():
1790 def a():
1790 for vfs, src, dest in renamefiles:
1791 for vfs, src, dest in renamefiles:
1791 try:
1792 try:
1792 vfs.rename(src, dest)
1793 vfs.rename(src, dest)
1793 except OSError: # journal file does not yet exist
1794 except OSError: # journal file does not yet exist
1794 pass
1795 pass
1795 return a
1796 return a
1796
1797
1797 def undoname(fn):
1798 def undoname(fn):
1798 base, name = os.path.split(fn)
1799 base, name = os.path.split(fn)
1799 assert name.startswith('journal')
1800 assert name.startswith('journal')
1800 return os.path.join(base, name.replace('journal', 'undo', 1))
1801 return os.path.join(base, name.replace('journal', 'undo', 1))
1801
1802
1802 def instance(ui, path, create):
1803 def instance(ui, path, create):
1803 return localrepository(ui, util.urllocalpath(path), create)
1804 return localrepository(ui, util.urllocalpath(path), create)
1804
1805
1805 def islocal(path):
1806 def islocal(path):
1806 return True
1807 return True
@@ -1,457 +1,462 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 version = 0
18 version = 0
19
19
20 def active(func):
20 def active(func):
21 def _active(self, *args, **kwds):
21 def _active(self, *args, **kwds):
22 if self.count == 0:
22 if self.count == 0:
23 raise error.Abort(_(
23 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
24 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
25 return func(self, *args, **kwds)
26 return _active
26 return _active
27
27
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 for f, o, _ignore in entries:
29 for f, o, _ignore in entries:
30 if o or not unlink:
30 if o or not unlink:
31 try:
31 try:
32 fp = opener(f, 'a')
32 fp = opener(f, 'a')
33 fp.truncate(o)
33 fp.truncate(o)
34 fp.close()
34 fp.close()
35 except IOError:
35 except IOError:
36 report(_("failed to truncate %s\n") % f)
36 report(_("failed to truncate %s\n") % f)
37 raise
37 raise
38 else:
38 else:
39 try:
39 try:
40 opener.unlink(f)
40 opener.unlink(f)
41 except (IOError, OSError), inst:
41 except (IOError, OSError), inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44
44
45 backupfiles = []
45 backupfiles = []
46 for l, f, b, c in backupentries:
46 for l, f, b, c in backupentries:
47 if f and b:
47 if f and b:
48 filepath = opener.join(f)
48 filepath = opener.join(f)
49 backuppath = opener.join(b)
49 backuppath = opener.join(b)
50 try:
50 try:
51 util.copyfile(backuppath, filepath)
51 util.copyfile(backuppath, filepath)
52 backupfiles.append(b)
52 backupfiles.append(b)
53 except IOError:
53 except IOError:
54 report(_("failed to recover %s\n") % f)
54 report(_("failed to recover %s\n") % f)
55 raise
55 raise
56 else:
56 else:
57 target = f or b
57 target = f or b
58 try:
58 try:
59 opener.unlink(target)
59 opener.unlink(target)
60 except (IOError, OSError), inst:
60 except (IOError, OSError), inst:
61 if inst.errno != errno.ENOENT:
61 if inst.errno != errno.ENOENT:
62 raise
62 raise
63
63
64 opener.unlink(journal)
64 opener.unlink(journal)
65 backuppath = "%s.backupfiles" % journal
65 backuppath = "%s.backupfiles" % journal
66 if opener.exists(backuppath):
66 if opener.exists(backuppath):
67 opener.unlink(backuppath)
67 opener.unlink(backuppath)
68 for f in backupfiles:
68 for f in backupfiles:
69 if opener.exists(f):
69 if opener.exists(f):
70 opener.unlink(f)
70 opener.unlink(f)
71
71
72 class transaction(object):
72 class transaction(object):
73 def __init__(self, report, opener, journal, after=None, createmode=None,
73 def __init__(self, report, opener, vfsmap, journal, after=None,
74 onclose=None, onabort=None):
74 createmode=None, onclose=None, onabort=None):
75 """Begin a new transaction
75 """Begin a new transaction
76
76
77 Begins a new transaction that allows rolling back writes in the event of
77 Begins a new transaction that allows rolling back writes in the event of
78 an exception.
78 an exception.
79
79
80 * `after`: called after the transaction has been committed
80 * `after`: called after the transaction has been committed
81 * `createmode`: the mode of the journal file that will be created
81 * `createmode`: the mode of the journal file that will be created
82 * `onclose`: called as the transaction is closing, but before it is
82 * `onclose`: called as the transaction is closing, but before it is
83 closed
83 closed
84 * `onabort`: called as the transaction is aborting, but before any files
84 * `onabort`: called as the transaction is aborting, but before any files
85 have been truncated
85 have been truncated
86 """
86 """
87 self.count = 1
87 self.count = 1
88 self.usages = 1
88 self.usages = 1
89 self.report = report
89 self.report = report
90 # a vfs to the store content
90 self.opener = opener
91 self.opener = opener
92 # a map to access file in various {location -> vfs}
93 vfsmap = vfsmap.copy()
94 vfsmap[''] = opener # set default value
95 self._vfsmap = vfsmap
91 self.after = after
96 self.after = after
92 self.onclose = onclose
97 self.onclose = onclose
93 self.onabort = onabort
98 self.onabort = onabort
94 self.entries = []
99 self.entries = []
95 self.map = {}
100 self.map = {}
96 self.journal = journal
101 self.journal = journal
97 self._queue = []
102 self._queue = []
98 # a dict of arguments to be passed to hooks
103 # a dict of arguments to be passed to hooks
99 self.hookargs = {}
104 self.hookargs = {}
100 self.file = opener.open(self.journal, "w")
105 self.file = opener.open(self.journal, "w")
101
106
102 # a list of ('location', 'path', 'backuppath', cache) entries.
107 # a list of ('location', 'path', 'backuppath', cache) entries.
103 # if 'backuppath' is empty, no file existed at backup time
108 # if 'backuppath' is empty, no file existed at backup time
104 # if 'path' is empty, this is a temporary transaction file
109 # if 'path' is empty, this is a temporary transaction file
105 # (location, and cache are current unused)
110 # (location, and cache are current unused)
106 self._backupentries = []
111 self._backupentries = []
107 self._backupmap = {}
112 self._backupmap = {}
108 self._backupjournal = "%s.backupfiles" % journal
113 self._backupjournal = "%s.backupfiles" % journal
109 self._backupsfile = opener.open(self._backupjournal, 'w')
114 self._backupsfile = opener.open(self._backupjournal, 'w')
110 self._backupsfile.write('%d\n' % version)
115 self._backupsfile.write('%d\n' % version)
111
116
112 if createmode is not None:
117 if createmode is not None:
113 opener.chmod(self.journal, createmode & 0666)
118 opener.chmod(self.journal, createmode & 0666)
114 opener.chmod(self._backupjournal, createmode & 0666)
119 opener.chmod(self._backupjournal, createmode & 0666)
115
120
116 # hold file generations to be performed on commit
121 # hold file generations to be performed on commit
117 self._filegenerators = {}
122 self._filegenerators = {}
118 # hold callbalk to write pending data for hooks
123 # hold callbalk to write pending data for hooks
119 self._pendingcallback = {}
124 self._pendingcallback = {}
120 # True is any pending data have been written ever
125 # True is any pending data have been written ever
121 self._anypending = False
126 self._anypending = False
122 # holds callback to call when writing the transaction
127 # holds callback to call when writing the transaction
123 self._finalizecallback = {}
128 self._finalizecallback = {}
124 # hold callbalk for post transaction close
129 # hold callbalk for post transaction close
125 self._postclosecallback = {}
130 self._postclosecallback = {}
126
131
127 def __del__(self):
132 def __del__(self):
128 if self.journal:
133 if self.journal:
129 self._abort()
134 self._abort()
130
135
131 @active
136 @active
132 def startgroup(self):
137 def startgroup(self):
133 """delay registration of file entry
138 """delay registration of file entry
134
139
135 This is used by strip to delay vision of strip offset. The transaction
140 This is used by strip to delay vision of strip offset. The transaction
136 sees either none or all of the strip actions to be done."""
141 sees either none or all of the strip actions to be done."""
137 self._queue.append([])
142 self._queue.append([])
138
143
139 @active
144 @active
140 def endgroup(self):
145 def endgroup(self):
141 """apply delayed registration of file entry.
146 """apply delayed registration of file entry.
142
147
143 This is used by strip to delay vision of strip offset. The transaction
148 This is used by strip to delay vision of strip offset. The transaction
144 sees either none or all of the strip actions to be done."""
149 sees either none or all of the strip actions to be done."""
145 q = self._queue.pop()
150 q = self._queue.pop()
146 for f, o, data in q:
151 for f, o, data in q:
147 self._addentry(f, o, data)
152 self._addentry(f, o, data)
148
153
149 @active
154 @active
150 def add(self, file, offset, data=None):
155 def add(self, file, offset, data=None):
151 """record the state of an append-only file before update"""
156 """record the state of an append-only file before update"""
152 if file in self.map or file in self._backupmap:
157 if file in self.map or file in self._backupmap:
153 return
158 return
154 if self._queue:
159 if self._queue:
155 self._queue[-1].append((file, offset, data))
160 self._queue[-1].append((file, offset, data))
156 return
161 return
157
162
158 self._addentry(file, offset, data)
163 self._addentry(file, offset, data)
159
164
160 def _addentry(self, file, offset, data):
165 def _addentry(self, file, offset, data):
161 """add a append-only entry to memory and on-disk state"""
166 """add a append-only entry to memory and on-disk state"""
162 if file in self.map or file in self._backupmap:
167 if file in self.map or file in self._backupmap:
163 return
168 return
164 self.entries.append((file, offset, data))
169 self.entries.append((file, offset, data))
165 self.map[file] = len(self.entries) - 1
170 self.map[file] = len(self.entries) - 1
166 # add enough data to the journal to do the truncate
171 # add enough data to the journal to do the truncate
167 self.file.write("%s\0%d\n" % (file, offset))
172 self.file.write("%s\0%d\n" % (file, offset))
168 self.file.flush()
173 self.file.flush()
169
174
170 @active
175 @active
171 def addbackup(self, file, hardlink=True, vfs=None):
176 def addbackup(self, file, hardlink=True, vfs=None):
172 """Adds a backup of the file to the transaction
177 """Adds a backup of the file to the transaction
173
178
174 Calling addbackup() creates a hardlink backup of the specified file
179 Calling addbackup() creates a hardlink backup of the specified file
175 that is used to recover the file in the event of the transaction
180 that is used to recover the file in the event of the transaction
176 aborting.
181 aborting.
177
182
178 * `file`: the file path, relative to .hg/store
183 * `file`: the file path, relative to .hg/store
179 * `hardlink`: use a hardlink to quickly create the backup
184 * `hardlink`: use a hardlink to quickly create the backup
180 """
185 """
181 if self._queue:
186 if self._queue:
182 msg = 'cannot use transaction.addbackup inside "group"'
187 msg = 'cannot use transaction.addbackup inside "group"'
183 raise RuntimeError(msg)
188 raise RuntimeError(msg)
184
189
185 if file in self.map or file in self._backupmap:
190 if file in self.map or file in self._backupmap:
186 return
191 return
187 backupfile = "%s.backup.%s" % (self.journal, file)
192 backupfile = "%s.backup.%s" % (self.journal, file)
188 if vfs is None:
193 if vfs is None:
189 vfs = self.opener
194 vfs = self.opener
190 if vfs.exists(file):
195 if vfs.exists(file):
191 filepath = vfs.join(file)
196 filepath = vfs.join(file)
192 backuppath = self.opener.join(backupfile)
197 backuppath = self.opener.join(backupfile)
193 util.copyfiles(filepath, backuppath, hardlink=hardlink)
198 util.copyfiles(filepath, backuppath, hardlink=hardlink)
194 else:
199 else:
195 backupfile = ''
200 backupfile = ''
196
201
197 self._addbackupentry(('', file, backupfile, False))
202 self._addbackupentry(('', file, backupfile, False))
198
203
199 def _addbackupentry(self, entry):
204 def _addbackupentry(self, entry):
200 """register a new backup entry and write it to disk"""
205 """register a new backup entry and write it to disk"""
201 self._backupentries.append(entry)
206 self._backupentries.append(entry)
202 self._backupmap[file] = len(self._backupentries) - 1
207 self._backupmap[file] = len(self._backupentries) - 1
203 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
208 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
204 self._backupsfile.flush()
209 self._backupsfile.flush()
205
210
206 @active
211 @active
207 def registertmp(self, tmpfile):
212 def registertmp(self, tmpfile):
208 """register a temporary transaction file
213 """register a temporary transaction file
209
214
210 Such file will be delete when the transaction exit (on both failure and
215 Such file will be delete when the transaction exit (on both failure and
211 success).
216 success).
212 """
217 """
213 self._addbackupentry(('', '', tmpfile, False))
218 self._addbackupentry(('', '', tmpfile, False))
214
219
215 @active
220 @active
216 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
221 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
217 """add a function to generates some files at transaction commit
222 """add a function to generates some files at transaction commit
218
223
219 The `genfunc` argument is a function capable of generating proper
224 The `genfunc` argument is a function capable of generating proper
220 content of each entry in the `filename` tuple.
225 content of each entry in the `filename` tuple.
221
226
222 At transaction close time, `genfunc` will be called with one file
227 At transaction close time, `genfunc` will be called with one file
223 object argument per entries in `filenames`.
228 object argument per entries in `filenames`.
224
229
225 The transaction itself is responsible for the backup, creation and
230 The transaction itself is responsible for the backup, creation and
226 final write of such file.
231 final write of such file.
227
232
228 The `genid` argument is used to ensure the same set of file is only
233 The `genid` argument is used to ensure the same set of file is only
229 generated once. Call to `addfilegenerator` for a `genid` already
234 generated once. Call to `addfilegenerator` for a `genid` already
230 present will overwrite the old entry.
235 present will overwrite the old entry.
231
236
232 The `order` argument may be used to control the order in which multiple
237 The `order` argument may be used to control the order in which multiple
233 generator will be executed.
238 generator will be executed.
234 """
239 """
235 # For now, we are unable to do proper backup and restore of custom vfs
240 # For now, we are unable to do proper backup and restore of custom vfs
236 # but for bookmarks that are handled outside this mechanism.
241 # but for bookmarks that are handled outside this mechanism.
237 assert vfs is None or filenames == ('bookmarks',)
242 assert vfs is None or filenames == ('bookmarks',)
238 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
243 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
239
244
240 def _generatefiles(self):
245 def _generatefiles(self):
241 # write files registered for generation
246 # write files registered for generation
242 for entry in sorted(self._filegenerators.values()):
247 for entry in sorted(self._filegenerators.values()):
243 order, filenames, genfunc, vfs = entry
248 order, filenames, genfunc, vfs = entry
244 if vfs is None:
249 if vfs is None:
245 vfs = self.opener
250 vfs = self.opener
246 files = []
251 files = []
247 try:
252 try:
248 for name in filenames:
253 for name in filenames:
249 # Some files are already backed up when creating the
254 # Some files are already backed up when creating the
250 # localrepo. Until this is properly fixed we disable the
255 # localrepo. Until this is properly fixed we disable the
251 # backup for them.
256 # backup for them.
252 if name not in ('phaseroots', 'bookmarks'):
257 if name not in ('phaseroots', 'bookmarks'):
253 self.addbackup(name)
258 self.addbackup(name)
254 files.append(vfs(name, 'w', atomictemp=True))
259 files.append(vfs(name, 'w', atomictemp=True))
255 genfunc(*files)
260 genfunc(*files)
256 finally:
261 finally:
257 for f in files:
262 for f in files:
258 f.close()
263 f.close()
259
264
260 @active
265 @active
261 def find(self, file):
266 def find(self, file):
262 if file in self.map:
267 if file in self.map:
263 return self.entries[self.map[file]]
268 return self.entries[self.map[file]]
264 if file in self._backupmap:
269 if file in self._backupmap:
265 return self._backupentries[self._backupmap[file]]
270 return self._backupentries[self._backupmap[file]]
266 return None
271 return None
267
272
268 @active
273 @active
269 def replace(self, file, offset, data=None):
274 def replace(self, file, offset, data=None):
270 '''
275 '''
271 replace can only replace already committed entries
276 replace can only replace already committed entries
272 that are not pending in the queue
277 that are not pending in the queue
273 '''
278 '''
274
279
275 if file not in self.map:
280 if file not in self.map:
276 raise KeyError(file)
281 raise KeyError(file)
277 index = self.map[file]
282 index = self.map[file]
278 self.entries[index] = (file, offset, data)
283 self.entries[index] = (file, offset, data)
279 self.file.write("%s\0%d\n" % (file, offset))
284 self.file.write("%s\0%d\n" % (file, offset))
280 self.file.flush()
285 self.file.flush()
281
286
282 @active
287 @active
283 def nest(self):
288 def nest(self):
284 self.count += 1
289 self.count += 1
285 self.usages += 1
290 self.usages += 1
286 return self
291 return self
287
292
288 def release(self):
293 def release(self):
289 if self.count > 0:
294 if self.count > 0:
290 self.usages -= 1
295 self.usages -= 1
291 # if the transaction scopes are left without being closed, fail
296 # if the transaction scopes are left without being closed, fail
292 if self.count > 0 and self.usages == 0:
297 if self.count > 0 and self.usages == 0:
293 self._abort()
298 self._abort()
294
299
295 def running(self):
300 def running(self):
296 return self.count > 0
301 return self.count > 0
297
302
298 def addpending(self, category, callback):
303 def addpending(self, category, callback):
299 """add a callback to be called when the transaction is pending
304 """add a callback to be called when the transaction is pending
300
305
301 The transaction will be given as callback's first argument.
306 The transaction will be given as callback's first argument.
302
307
303 Category is a unique identifier to allow overwriting an old callback
308 Category is a unique identifier to allow overwriting an old callback
304 with a newer callback.
309 with a newer callback.
305 """
310 """
306 self._pendingcallback[category] = callback
311 self._pendingcallback[category] = callback
307
312
308 @active
313 @active
309 def writepending(self):
314 def writepending(self):
310 '''write pending file to temporary version
315 '''write pending file to temporary version
311
316
312 This is used to allow hooks to view a transaction before commit'''
317 This is used to allow hooks to view a transaction before commit'''
313 categories = sorted(self._pendingcallback)
318 categories = sorted(self._pendingcallback)
314 for cat in categories:
319 for cat in categories:
315 # remove callback since the data will have been flushed
320 # remove callback since the data will have been flushed
316 any = self._pendingcallback.pop(cat)(self)
321 any = self._pendingcallback.pop(cat)(self)
317 self._anypending = self._anypending or any
322 self._anypending = self._anypending or any
318 return self._anypending
323 return self._anypending
319
324
320 @active
325 @active
321 def addfinalize(self, category, callback):
326 def addfinalize(self, category, callback):
322 """add a callback to be called when the transaction is closed
327 """add a callback to be called when the transaction is closed
323
328
324 The transaction will be given as callback's first argument.
329 The transaction will be given as callback's first argument.
325
330
326 Category is a unique identifier to allow overwriting old callbacks with
331 Category is a unique identifier to allow overwriting old callbacks with
327 newer callbacks.
332 newer callbacks.
328 """
333 """
329 self._finalizecallback[category] = callback
334 self._finalizecallback[category] = callback
330
335
331 @active
336 @active
332 def addpostclose(self, category, callback):
337 def addpostclose(self, category, callback):
333 """add a callback to be called after the transaction is closed
338 """add a callback to be called after the transaction is closed
334
339
335 The transaction will be given as callback's first argument.
340 The transaction will be given as callback's first argument.
336
341
337 Category is a unique identifier to allow overwriting an old callback
342 Category is a unique identifier to allow overwriting an old callback
338 with a newer callback.
343 with a newer callback.
339 """
344 """
340 self._postclosecallback[category] = callback
345 self._postclosecallback[category] = callback
341
346
342 @active
347 @active
343 def close(self):
348 def close(self):
344 '''commit the transaction'''
349 '''commit the transaction'''
345 if self.count == 1:
350 if self.count == 1:
346 self._generatefiles()
351 self._generatefiles()
347 categories = sorted(self._finalizecallback)
352 categories = sorted(self._finalizecallback)
348 for cat in categories:
353 for cat in categories:
349 self._finalizecallback[cat](self)
354 self._finalizecallback[cat](self)
350 if self.onclose is not None:
355 if self.onclose is not None:
351 self.onclose()
356 self.onclose()
352
357
353 self.count -= 1
358 self.count -= 1
354 if self.count != 0:
359 if self.count != 0:
355 return
360 return
356 self.file.close()
361 self.file.close()
357 self._backupsfile.close()
362 self._backupsfile.close()
358 # cleanup temporary files
363 # cleanup temporary files
359 for _l, f, b, _c in self._backupentries:
364 for _l, f, b, _c in self._backupentries:
360 if not f and b and self.opener.exists(b):
365 if not f and b and self.opener.exists(b):
361 self.opener.unlink(b)
366 self.opener.unlink(b)
362 self.entries = []
367 self.entries = []
363 if self.after:
368 if self.after:
364 self.after()
369 self.after()
365 if self.opener.isfile(self.journal):
370 if self.opener.isfile(self.journal):
366 self.opener.unlink(self.journal)
371 self.opener.unlink(self.journal)
367 if self.opener.isfile(self._backupjournal):
372 if self.opener.isfile(self._backupjournal):
368 self.opener.unlink(self._backupjournal)
373 self.opener.unlink(self._backupjournal)
369 for _l, _f, b, _c in self._backupentries:
374 for _l, _f, b, _c in self._backupentries:
370 if b and self.opener.exists(b):
375 if b and self.opener.exists(b):
371 self.opener.unlink(b)
376 self.opener.unlink(b)
372 self._backupentries = []
377 self._backupentries = []
373 self.journal = None
378 self.journal = None
374 # run post close action
379 # run post close action
375 categories = sorted(self._postclosecallback)
380 categories = sorted(self._postclosecallback)
376 for cat in categories:
381 for cat in categories:
377 self._postclosecallback[cat](self)
382 self._postclosecallback[cat](self)
378
383
379 @active
384 @active
380 def abort(self):
385 def abort(self):
381 '''abort the transaction (generally called on error, or when the
386 '''abort the transaction (generally called on error, or when the
382 transaction is not explicitly committed before going out of
387 transaction is not explicitly committed before going out of
383 scope)'''
388 scope)'''
384 self._abort()
389 self._abort()
385
390
386 def _abort(self):
391 def _abort(self):
387 self.count = 0
392 self.count = 0
388 self.usages = 0
393 self.usages = 0
389 self.file.close()
394 self.file.close()
390 self._backupsfile.close()
395 self._backupsfile.close()
391
396
392 if self.onabort is not None:
397 if self.onabort is not None:
393 self.onabort()
398 self.onabort()
394
399
395 try:
400 try:
396 if not self.entries and not self._backupentries:
401 if not self.entries and not self._backupentries:
397 if self.journal:
402 if self.journal:
398 self.opener.unlink(self.journal)
403 self.opener.unlink(self.journal)
399 if self._backupjournal:
404 if self._backupjournal:
400 self.opener.unlink(self._backupjournal)
405 self.opener.unlink(self._backupjournal)
401 return
406 return
402
407
403 self.report(_("transaction abort!\n"))
408 self.report(_("transaction abort!\n"))
404
409
405 try:
410 try:
406 _playback(self.journal, self.report, self.opener,
411 _playback(self.journal, self.report, self.opener,
407 self.entries, self._backupentries, False)
412 self.entries, self._backupentries, False)
408 self.report(_("rollback completed\n"))
413 self.report(_("rollback completed\n"))
409 except Exception:
414 except Exception:
410 self.report(_("rollback failed - please run hg recover\n"))
415 self.report(_("rollback failed - please run hg recover\n"))
411 finally:
416 finally:
412 self.journal = None
417 self.journal = None
413
418
414
419
415 def rollback(opener, file, report):
420 def rollback(opener, file, report):
416 """Rolls back the transaction contained in the given file
421 """Rolls back the transaction contained in the given file
417
422
418 Reads the entries in the specified file, and the corresponding
423 Reads the entries in the specified file, and the corresponding
419 '*.backupfiles' file, to recover from an incomplete transaction.
424 '*.backupfiles' file, to recover from an incomplete transaction.
420
425
421 * `file`: a file containing a list of entries, specifying where
426 * `file`: a file containing a list of entries, specifying where
422 to truncate each file. The file should contain a list of
427 to truncate each file. The file should contain a list of
423 file\0offset pairs, delimited by newlines. The corresponding
428 file\0offset pairs, delimited by newlines. The corresponding
424 '*.backupfiles' file should contain a list of file\0backupfile
429 '*.backupfiles' file should contain a list of file\0backupfile
425 pairs, delimited by \0.
430 pairs, delimited by \0.
426 """
431 """
427 entries = []
432 entries = []
428 backupentries = []
433 backupentries = []
429
434
430 fp = opener.open(file)
435 fp = opener.open(file)
431 lines = fp.readlines()
436 lines = fp.readlines()
432 fp.close()
437 fp.close()
433 for l in lines:
438 for l in lines:
434 try:
439 try:
435 f, o = l.split('\0')
440 f, o = l.split('\0')
436 entries.append((f, int(o), None))
441 entries.append((f, int(o), None))
437 except ValueError:
442 except ValueError:
438 report(_("couldn't read journal entry %r!\n") % l)
443 report(_("couldn't read journal entry %r!\n") % l)
439
444
440 backupjournal = "%s.backupfiles" % file
445 backupjournal = "%s.backupfiles" % file
441 if opener.exists(backupjournal):
446 if opener.exists(backupjournal):
442 fp = opener.open(backupjournal)
447 fp = opener.open(backupjournal)
443 lines = fp.readlines()
448 lines = fp.readlines()
444 if lines:
449 if lines:
445 ver = lines[0][:-1]
450 ver = lines[0][:-1]
446 if ver == str(version):
451 if ver == str(version):
447 for line in lines[1:]:
452 for line in lines[1:]:
448 if line:
453 if line:
449 # Shave off the trailing newline
454 # Shave off the trailing newline
450 line = line[:-1]
455 line = line[:-1]
451 l, f, b, c = line.split('\0')
456 l, f, b, c = line.split('\0')
452 backupentries.append((l, f, b, bool(c)))
457 backupentries.append((l, f, b, bool(c)))
453 else:
458 else:
454 report(_("journal was created by a different version of "
459 report(_("journal was created by a different version of "
455 "Mercurial"))
460 "Mercurial"))
456
461
457 _playback(file, report, opener, entries, backupentries)
462 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now