##// END OF EJS Templates
streamclone: move applystreamclone() from localrepo.py...
Gregory Szorc -
r26441:56527b88 default
parent child Browse files
Show More
@@ -0,0 +1,64 b''
1 # streamclone.py - producing and consuming streaming repository data
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 from __future__ import absolute_import
9
10 from . import (
11 branchmap,
12 exchange,
13 )
14
15 def applyremotedata(repo, remotereqs, remotebranchmap, fp):
16 """Apply stream clone data to a repository.
17
18 "remotereqs" is a set of requirements to handle the incoming data.
19 "remotebranchmap" is the result of a branchmap lookup on the remote. It
20 can be None.
21 "fp" is a file object containing the raw stream data, suitable for
22 feeding into exchange.consumestreamclone.
23 """
24 lock = repo.lock()
25 try:
26 exchange.consumestreamclone(repo, fp)
27
28 # new requirements = old non-format requirements +
29 # new format-related remote requirements
30 # requirements from the streamed-in repository
31 repo.requirements = remotereqs | (
32 repo.requirements - repo.supportedformats)
33 repo._applyopenerreqs()
34 repo._writerequirements()
35
36 if remotebranchmap:
37 rbheads = []
38 closed = []
39 for bheads in remotebranchmap.itervalues():
40 rbheads.extend(bheads)
41 for h in bheads:
42 r = repo.changelog.rev(h)
43 b, c = repo.changelog.branchinfo(r)
44 if c:
45 closed.append(h)
46
47 if rbheads:
48 rtiprev = max((int(repo.changelog.rev(node))
49 for node in rbheads))
50 cache = branchmap.branchcache(remotebranchmap,
51 repo[rtiprev].node(),
52 rtiprev,
53 closednodes=closed)
54 # Try to stick it as low as possible
55 # filter above served are unlikely to be fetch from a clone
56 for candidate in ('base', 'immutable', 'served'):
57 rview = repo.filtered(candidate)
58 if cache.validfor(rview):
59 repo._branchcaches[candidate] = cache
60 cache.write(rview)
61 break
62 repo.invalidate()
63 finally:
64 lock.release()
@@ -1,1971 +1,1921 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, wdirrev, short
7 from node import hex, nullid, wdirrev, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect, random
19 import weakref, errno, os, time, inspect, random
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 import streamclone
22 propertycache = util.propertycache
23 propertycache = util.propertycache
23 filecache = scmutil.filecache
24 filecache = scmutil.filecache
24
25
25 class repofilecache(filecache):
26 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """All filecache usage on repo are done for logic that should be unfiltered
27 """
28 """
28
29
29 def __get__(self, repo, type=None):
30 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
32 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
34 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 return super(repofilecache, self).__delete__(repo.unfiltered())
35
36
36 class storecache(repofilecache):
37 class storecache(repofilecache):
37 """filecache for files in the store"""
38 """filecache for files in the store"""
38 def join(self, obj, fname):
39 def join(self, obj, fname):
39 return obj.sjoin(fname)
40 return obj.sjoin(fname)
40
41
41 class unfilteredpropertycache(propertycache):
42 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
43 """propertycache that apply to unfiltered repo only"""
43
44
44 def __get__(self, repo, type=None):
45 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
46 unfi = repo.unfiltered()
46 if unfi is repo:
47 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
49 return getattr(unfi, self.name)
49
50
50 class filteredpropertycache(propertycache):
51 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
52 """propertycache that must take filtering in account"""
52
53
53 def cachevalue(self, obj, value):
54 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
55 object.__setattr__(obj, self.name, value)
55
56
56
57
57 def hasunfilteredcache(repo, name):
58 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
60 return name in vars(repo.unfiltered())
60
61
61 def unfilteredmethod(orig):
62 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
63 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
64 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
66 return wrapper
66
67
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
69 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
71
71 class localpeer(peer.peerrepository):
72 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
73 '''peer for a local repo; reflects only the most recent API'''
73
74
74 def __init__(self, repo, caps=moderncaps):
75 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
76 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
77 self._repo = repo.filtered('served')
77 self.ui = repo.ui
78 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
79 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
80 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
81 self.supportedformats = repo.supportedformats
81
82
82 def close(self):
83 def close(self):
83 self._repo.close()
84 self._repo.close()
84
85
85 def _capabilities(self):
86 def _capabilities(self):
86 return self._caps
87 return self._caps
87
88
88 def local(self):
89 def local(self):
89 return self._repo
90 return self._repo
90
91
91 def canpush(self):
92 def canpush(self):
92 return True
93 return True
93
94
94 def url(self):
95 def url(self):
95 return self._repo.url()
96 return self._repo.url()
96
97
97 def lookup(self, key):
98 def lookup(self, key):
98 return self._repo.lookup(key)
99 return self._repo.lookup(key)
99
100
100 def branchmap(self):
101 def branchmap(self):
101 return self._repo.branchmap()
102 return self._repo.branchmap()
102
103
103 def heads(self):
104 def heads(self):
104 return self._repo.heads()
105 return self._repo.heads()
105
106
106 def known(self, nodes):
107 def known(self, nodes):
107 return self._repo.known(nodes)
108 return self._repo.known(nodes)
108
109
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
111 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
116 # wire level function happier. We need to build a proper object
116 # from it in local peer.
117 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
118 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
119 return cg
119
120
120 # TODO We might want to move the next two calls into legacypeer and add
121 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
122 # unbundle instead.
122
123
123 def unbundle(self, cg, heads, url):
124 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
125 """apply a bundle on a repo
125
126
126 This function handles the repo locking itself."""
127 This function handles the repo locking itself."""
127 try:
128 try:
128 try:
129 try:
129 cg = exchange.readbundle(self.ui, cg, None)
130 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
132 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
133 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
134 # This little dance should be dropped eventually when the
134 # API is finally improved.
135 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
136 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
137 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
138 return ret
138 except Exception as exc:
139 except Exception as exc:
139 # If the exception contains output salvaged from a bundle2
140 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
141 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
142 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
143 # it directly.
143 #
144 #
144 # This is not very elegant but allows a "simple" solution for
145 # This is not very elegant but allows a "simple" solution for
145 # issue4594
146 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
148 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
149 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
150 for out in output:
150 bundler.addpart(out)
151 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
152 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
153 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
154 bundle2.processbundle(self._repo, b)
154 raise
155 raise
155 except error.PushRaced as exc:
156 except error.PushRaced as exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
157 raise error.ResponseError(_('push failed:'), str(exc))
157
158
158 def lock(self):
159 def lock(self):
159 return self._repo.lock()
160 return self._repo.lock()
160
161
161 def addchangegroup(self, cg, source, url):
162 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163 return changegroup.addchangegroup(self._repo, cg, source, url)
163
164
164 def pushkey(self, namespace, key, old, new):
165 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
166 return self._repo.pushkey(namespace, key, old, new)
166
167
167 def listkeys(self, namespace):
168 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
169 return self._repo.listkeys(namespace)
169
170
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
172 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 return "%s %s %s %s %s" % (one, two, three, four, five)
173
174
174 class locallegacypeer(localpeer):
175 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
176 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
177 restricted capabilities'''
177
178
178 def __init__(self, repo):
179 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
180 localpeer.__init__(self, repo, caps=legacycaps)
180
181
181 def branches(self, nodes):
182 def branches(self, nodes):
182 return self._repo.branches(nodes)
183 return self._repo.branches(nodes)
183
184
184 def between(self, pairs):
185 def between(self, pairs):
185 return self._repo.between(pairs)
186 return self._repo.between(pairs)
186
187
187 def changegroup(self, basenodes, source):
188 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
189 return changegroup.changegroup(self._repo, basenodes, source)
189
190
190 def changegroupsubset(self, bases, heads, source):
191 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
193
193 class localrepository(object):
194 class localrepository(object):
194
195
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 'manifestv2'))
197 'manifestv2'))
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 'dotencode'))
199 'dotencode'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 filtername = None
201 filtername = None
201
202
202 # a list of (ui, featureset) functions.
203 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
204 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
205 featuresetupfuncs = set()
205
206
206 def _baserequirements(self, create):
207 def _baserequirements(self, create):
207 return ['revlogv1']
208 return ['revlogv1']
208
209
209 def __init__(self, baseui, path=None, create=False):
210 def __init__(self, baseui, path=None, create=False):
210 self.requirements = set()
211 self.requirements = set()
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 self.wopener = self.wvfs
213 self.wopener = self.wvfs
213 self.root = self.wvfs.base
214 self.root = self.wvfs.base
214 self.path = self.wvfs.join(".hg")
215 self.path = self.wvfs.join(".hg")
215 self.origroot = path
216 self.origroot = path
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 self.vfs = scmutil.vfs(self.path)
218 self.vfs = scmutil.vfs(self.path)
218 self.opener = self.vfs
219 self.opener = self.vfs
219 self.baseui = baseui
220 self.baseui = baseui
220 self.ui = baseui.copy()
221 self.ui = baseui.copy()
221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 self.ui.copy = baseui.copy # prevent copying repo configuration
222 # A list of callback to shape the phase if no data were found.
223 # A list of callback to shape the phase if no data were found.
223 # Callback are in the form: func(repo, roots) --> processed root.
224 # Callback are in the form: func(repo, roots) --> processed root.
224 # This list it to be filled by extension during repo setup
225 # This list it to be filled by extension during repo setup
225 self._phasedefaults = []
226 self._phasedefaults = []
226 try:
227 try:
227 self.ui.readconfig(self.join("hgrc"), self.root)
228 self.ui.readconfig(self.join("hgrc"), self.root)
228 extensions.loadall(self.ui)
229 extensions.loadall(self.ui)
229 except IOError:
230 except IOError:
230 pass
231 pass
231
232
232 if self.featuresetupfuncs:
233 if self.featuresetupfuncs:
233 self.supported = set(self._basesupported) # use private copy
234 self.supported = set(self._basesupported) # use private copy
234 extmods = set(m.__name__ for n, m
235 extmods = set(m.__name__ for n, m
235 in extensions.extensions(self.ui))
236 in extensions.extensions(self.ui))
236 for setupfunc in self.featuresetupfuncs:
237 for setupfunc in self.featuresetupfuncs:
237 if setupfunc.__module__ in extmods:
238 if setupfunc.__module__ in extmods:
238 setupfunc(self.ui, self.supported)
239 setupfunc(self.ui, self.supported)
239 else:
240 else:
240 self.supported = self._basesupported
241 self.supported = self._basesupported
241
242
242 if not self.vfs.isdir():
243 if not self.vfs.isdir():
243 if create:
244 if create:
244 if not self.wvfs.exists():
245 if not self.wvfs.exists():
245 self.wvfs.makedirs()
246 self.wvfs.makedirs()
246 self.vfs.makedir(notindexed=True)
247 self.vfs.makedir(notindexed=True)
247 self.requirements.update(self._baserequirements(create))
248 self.requirements.update(self._baserequirements(create))
248 if self.ui.configbool('format', 'usestore', True):
249 if self.ui.configbool('format', 'usestore', True):
249 self.vfs.mkdir("store")
250 self.vfs.mkdir("store")
250 self.requirements.add("store")
251 self.requirements.add("store")
251 if self.ui.configbool('format', 'usefncache', True):
252 if self.ui.configbool('format', 'usefncache', True):
252 self.requirements.add("fncache")
253 self.requirements.add("fncache")
253 if self.ui.configbool('format', 'dotencode', True):
254 if self.ui.configbool('format', 'dotencode', True):
254 self.requirements.add('dotencode')
255 self.requirements.add('dotencode')
255 # create an invalid changelog
256 # create an invalid changelog
256 self.vfs.append(
257 self.vfs.append(
257 "00changelog.i",
258 "00changelog.i",
258 '\0\0\0\2' # represents revlogv2
259 '\0\0\0\2' # represents revlogv2
259 ' dummy changelog to prevent using the old repo layout'
260 ' dummy changelog to prevent using the old repo layout'
260 )
261 )
261 # experimental config: format.generaldelta
262 # experimental config: format.generaldelta
262 if self.ui.configbool('format', 'generaldelta', False):
263 if self.ui.configbool('format', 'generaldelta', False):
263 self.requirements.add("generaldelta")
264 self.requirements.add("generaldelta")
264 if self.ui.configbool('experimental', 'treemanifest', False):
265 if self.ui.configbool('experimental', 'treemanifest', False):
265 self.requirements.add("treemanifest")
266 self.requirements.add("treemanifest")
266 if self.ui.configbool('experimental', 'manifestv2', False):
267 if self.ui.configbool('experimental', 'manifestv2', False):
267 self.requirements.add("manifestv2")
268 self.requirements.add("manifestv2")
268 else:
269 else:
269 raise error.RepoError(_("repository %s not found") % path)
270 raise error.RepoError(_("repository %s not found") % path)
270 elif create:
271 elif create:
271 raise error.RepoError(_("repository %s already exists") % path)
272 raise error.RepoError(_("repository %s already exists") % path)
272 else:
273 else:
273 try:
274 try:
274 self.requirements = scmutil.readrequires(
275 self.requirements = scmutil.readrequires(
275 self.vfs, self.supported)
276 self.vfs, self.supported)
276 except IOError as inst:
277 except IOError as inst:
277 if inst.errno != errno.ENOENT:
278 if inst.errno != errno.ENOENT:
278 raise
279 raise
279
280
280 self.sharedpath = self.path
281 self.sharedpath = self.path
281 try:
282 try:
282 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
283 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
283 realpath=True)
284 realpath=True)
284 s = vfs.base
285 s = vfs.base
285 if not vfs.exists():
286 if not vfs.exists():
286 raise error.RepoError(
287 raise error.RepoError(
287 _('.hg/sharedpath points to nonexistent directory %s') % s)
288 _('.hg/sharedpath points to nonexistent directory %s') % s)
288 self.sharedpath = s
289 self.sharedpath = s
289 except IOError as inst:
290 except IOError as inst:
290 if inst.errno != errno.ENOENT:
291 if inst.errno != errno.ENOENT:
291 raise
292 raise
292
293
293 self.store = store.store(
294 self.store = store.store(
294 self.requirements, self.sharedpath, scmutil.vfs)
295 self.requirements, self.sharedpath, scmutil.vfs)
295 self.spath = self.store.path
296 self.spath = self.store.path
296 self.svfs = self.store.vfs
297 self.svfs = self.store.vfs
297 self.sjoin = self.store.join
298 self.sjoin = self.store.join
298 self.vfs.createmode = self.store.createmode
299 self.vfs.createmode = self.store.createmode
299 self._applyopenerreqs()
300 self._applyopenerreqs()
300 if create:
301 if create:
301 self._writerequirements()
302 self._writerequirements()
302
303
303 self._dirstatevalidatewarned = False
304 self._dirstatevalidatewarned = False
304
305
305 self._branchcaches = {}
306 self._branchcaches = {}
306 self._revbranchcache = None
307 self._revbranchcache = None
307 self.filterpats = {}
308 self.filterpats = {}
308 self._datafilters = {}
309 self._datafilters = {}
309 self._transref = self._lockref = self._wlockref = None
310 self._transref = self._lockref = self._wlockref = None
310
311
311 # A cache for various files under .hg/ that tracks file changes,
312 # A cache for various files under .hg/ that tracks file changes,
312 # (used by the filecache decorator)
313 # (used by the filecache decorator)
313 #
314 #
314 # Maps a property name to its util.filecacheentry
315 # Maps a property name to its util.filecacheentry
315 self._filecache = {}
316 self._filecache = {}
316
317
317 # hold sets of revision to be filtered
318 # hold sets of revision to be filtered
318 # should be cleared when something might have changed the filter value:
319 # should be cleared when something might have changed the filter value:
319 # - new changesets,
320 # - new changesets,
320 # - phase change,
321 # - phase change,
321 # - new obsolescence marker,
322 # - new obsolescence marker,
322 # - working directory parent change,
323 # - working directory parent change,
323 # - bookmark changes
324 # - bookmark changes
324 self.filteredrevcache = {}
325 self.filteredrevcache = {}
325
326
326 # generic mapping between names and nodes
327 # generic mapping between names and nodes
327 self.names = namespaces.namespaces()
328 self.names = namespaces.namespaces()
328
329
329 def close(self):
330 def close(self):
330 self._writecaches()
331 self._writecaches()
331
332
332 def _writecaches(self):
333 def _writecaches(self):
333 if self._revbranchcache:
334 if self._revbranchcache:
334 self._revbranchcache.write()
335 self._revbranchcache.write()
335
336
336 def _restrictcapabilities(self, caps):
337 def _restrictcapabilities(self, caps):
337 if self.ui.configbool('experimental', 'bundle2-advertise', True):
338 if self.ui.configbool('experimental', 'bundle2-advertise', True):
338 caps = set(caps)
339 caps = set(caps)
339 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
340 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
340 caps.add('bundle2=' + urllib.quote(capsblob))
341 caps.add('bundle2=' + urllib.quote(capsblob))
341 return caps
342 return caps
342
343
343 def _applyopenerreqs(self):
344 def _applyopenerreqs(self):
344 self.svfs.options = dict((r, 1) for r in self.requirements
345 self.svfs.options = dict((r, 1) for r in self.requirements
345 if r in self.openerreqs)
346 if r in self.openerreqs)
346 # experimental config: format.chunkcachesize
347 # experimental config: format.chunkcachesize
347 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
348 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
348 if chunkcachesize is not None:
349 if chunkcachesize is not None:
349 self.svfs.options['chunkcachesize'] = chunkcachesize
350 self.svfs.options['chunkcachesize'] = chunkcachesize
350 # experimental config: format.maxchainlen
351 # experimental config: format.maxchainlen
351 maxchainlen = self.ui.configint('format', 'maxchainlen')
352 maxchainlen = self.ui.configint('format', 'maxchainlen')
352 if maxchainlen is not None:
353 if maxchainlen is not None:
353 self.svfs.options['maxchainlen'] = maxchainlen
354 self.svfs.options['maxchainlen'] = maxchainlen
354 # experimental config: format.manifestcachesize
355 # experimental config: format.manifestcachesize
355 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
356 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
356 if manifestcachesize is not None:
357 if manifestcachesize is not None:
357 self.svfs.options['manifestcachesize'] = manifestcachesize
358 self.svfs.options['manifestcachesize'] = manifestcachesize
358 # experimental config: format.aggressivemergedeltas
359 # experimental config: format.aggressivemergedeltas
359 aggressivemergedeltas = self.ui.configbool('format',
360 aggressivemergedeltas = self.ui.configbool('format',
360 'aggressivemergedeltas', False)
361 'aggressivemergedeltas', False)
361 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
362 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
362
363
363 def _writerequirements(self):
364 def _writerequirements(self):
364 scmutil.writerequires(self.vfs, self.requirements)
365 scmutil.writerequires(self.vfs, self.requirements)
365
366
366 def _checknested(self, path):
367 def _checknested(self, path):
367 """Determine if path is a legal nested repository."""
368 """Determine if path is a legal nested repository."""
368 if not path.startswith(self.root):
369 if not path.startswith(self.root):
369 return False
370 return False
370 subpath = path[len(self.root) + 1:]
371 subpath = path[len(self.root) + 1:]
371 normsubpath = util.pconvert(subpath)
372 normsubpath = util.pconvert(subpath)
372
373
373 # XXX: Checking against the current working copy is wrong in
374 # XXX: Checking against the current working copy is wrong in
374 # the sense that it can reject things like
375 # the sense that it can reject things like
375 #
376 #
376 # $ hg cat -r 10 sub/x.txt
377 # $ hg cat -r 10 sub/x.txt
377 #
378 #
378 # if sub/ is no longer a subrepository in the working copy
379 # if sub/ is no longer a subrepository in the working copy
379 # parent revision.
380 # parent revision.
380 #
381 #
381 # However, it can of course also allow things that would have
382 # However, it can of course also allow things that would have
382 # been rejected before, such as the above cat command if sub/
383 # been rejected before, such as the above cat command if sub/
383 # is a subrepository now, but was a normal directory before.
384 # is a subrepository now, but was a normal directory before.
384 # The old path auditor would have rejected by mistake since it
385 # The old path auditor would have rejected by mistake since it
385 # panics when it sees sub/.hg/.
386 # panics when it sees sub/.hg/.
386 #
387 #
387 # All in all, checking against the working copy seems sensible
388 # All in all, checking against the working copy seems sensible
388 # since we want to prevent access to nested repositories on
389 # since we want to prevent access to nested repositories on
389 # the filesystem *now*.
390 # the filesystem *now*.
390 ctx = self[None]
391 ctx = self[None]
391 parts = util.splitpath(subpath)
392 parts = util.splitpath(subpath)
392 while parts:
393 while parts:
393 prefix = '/'.join(parts)
394 prefix = '/'.join(parts)
394 if prefix in ctx.substate:
395 if prefix in ctx.substate:
395 if prefix == normsubpath:
396 if prefix == normsubpath:
396 return True
397 return True
397 else:
398 else:
398 sub = ctx.sub(prefix)
399 sub = ctx.sub(prefix)
399 return sub.checknested(subpath[len(prefix) + 1:])
400 return sub.checknested(subpath[len(prefix) + 1:])
400 else:
401 else:
401 parts.pop()
402 parts.pop()
402 return False
403 return False
403
404
404 def peer(self):
405 def peer(self):
405 return localpeer(self) # not cached to avoid reference cycle
406 return localpeer(self) # not cached to avoid reference cycle
406
407
407 def unfiltered(self):
408 def unfiltered(self):
408 """Return unfiltered version of the repository
409 """Return unfiltered version of the repository
409
410
410 Intended to be overwritten by filtered repo."""
411 Intended to be overwritten by filtered repo."""
411 return self
412 return self
412
413
413 def filtered(self, name):
414 def filtered(self, name):
414 """Return a filtered version of a repository"""
415 """Return a filtered version of a repository"""
415 # build a new class with the mixin and the current class
416 # build a new class with the mixin and the current class
416 # (possibly subclass of the repo)
417 # (possibly subclass of the repo)
417 class proxycls(repoview.repoview, self.unfiltered().__class__):
418 class proxycls(repoview.repoview, self.unfiltered().__class__):
418 pass
419 pass
419 return proxycls(self, name)
420 return proxycls(self, name)
420
421
421 @repofilecache('bookmarks')
422 @repofilecache('bookmarks')
422 def _bookmarks(self):
423 def _bookmarks(self):
423 return bookmarks.bmstore(self)
424 return bookmarks.bmstore(self)
424
425
425 @repofilecache('bookmarks.current')
426 @repofilecache('bookmarks.current')
426 def _activebookmark(self):
427 def _activebookmark(self):
427 return bookmarks.readactive(self)
428 return bookmarks.readactive(self)
428
429
429 def bookmarkheads(self, bookmark):
430 def bookmarkheads(self, bookmark):
430 name = bookmark.split('@', 1)[0]
431 name = bookmark.split('@', 1)[0]
431 heads = []
432 heads = []
432 for mark, n in self._bookmarks.iteritems():
433 for mark, n in self._bookmarks.iteritems():
433 if mark.split('@', 1)[0] == name:
434 if mark.split('@', 1)[0] == name:
434 heads.append(n)
435 heads.append(n)
435 return heads
436 return heads
436
437
437 # _phaserevs and _phasesets depend on changelog. what we need is to
438 # _phaserevs and _phasesets depend on changelog. what we need is to
438 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
439 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
439 # can't be easily expressed in filecache mechanism.
440 # can't be easily expressed in filecache mechanism.
440 @storecache('phaseroots', '00changelog.i')
441 @storecache('phaseroots', '00changelog.i')
441 def _phasecache(self):
442 def _phasecache(self):
442 return phases.phasecache(self, self._phasedefaults)
443 return phases.phasecache(self, self._phasedefaults)
443
444
444 @storecache('obsstore')
445 @storecache('obsstore')
445 def obsstore(self):
446 def obsstore(self):
446 # read default format for new obsstore.
447 # read default format for new obsstore.
447 # developer config: format.obsstore-version
448 # developer config: format.obsstore-version
448 defaultformat = self.ui.configint('format', 'obsstore-version', None)
449 defaultformat = self.ui.configint('format', 'obsstore-version', None)
449 # rely on obsstore class default when possible.
450 # rely on obsstore class default when possible.
450 kwargs = {}
451 kwargs = {}
451 if defaultformat is not None:
452 if defaultformat is not None:
452 kwargs['defaultformat'] = defaultformat
453 kwargs['defaultformat'] = defaultformat
453 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
454 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
454 store = obsolete.obsstore(self.svfs, readonly=readonly,
455 store = obsolete.obsstore(self.svfs, readonly=readonly,
455 **kwargs)
456 **kwargs)
456 if store and readonly:
457 if store and readonly:
457 self.ui.warn(
458 self.ui.warn(
458 _('obsolete feature not enabled but %i markers found!\n')
459 _('obsolete feature not enabled but %i markers found!\n')
459 % len(list(store)))
460 % len(list(store)))
460 return store
461 return store
461
462
462 @storecache('00changelog.i')
463 @storecache('00changelog.i')
463 def changelog(self):
464 def changelog(self):
464 c = changelog.changelog(self.svfs)
465 c = changelog.changelog(self.svfs)
465 if 'HG_PENDING' in os.environ:
466 if 'HG_PENDING' in os.environ:
466 p = os.environ['HG_PENDING']
467 p = os.environ['HG_PENDING']
467 if p.startswith(self.root):
468 if p.startswith(self.root):
468 c.readpending('00changelog.i.a')
469 c.readpending('00changelog.i.a')
469 return c
470 return c
470
471
471 @storecache('00manifest.i')
472 @storecache('00manifest.i')
472 def manifest(self):
473 def manifest(self):
473 return manifest.manifest(self.svfs)
474 return manifest.manifest(self.svfs)
474
475
475 def dirlog(self, dir):
476 def dirlog(self, dir):
476 return self.manifest.dirlog(dir)
477 return self.manifest.dirlog(dir)
477
478
478 @repofilecache('dirstate')
479 @repofilecache('dirstate')
479 def dirstate(self):
480 def dirstate(self):
480 return dirstate.dirstate(self.vfs, self.ui, self.root,
481 return dirstate.dirstate(self.vfs, self.ui, self.root,
481 self._dirstatevalidate)
482 self._dirstatevalidate)
482
483
483 def _dirstatevalidate(self, node):
484 def _dirstatevalidate(self, node):
484 try:
485 try:
485 self.changelog.rev(node)
486 self.changelog.rev(node)
486 return node
487 return node
487 except error.LookupError:
488 except error.LookupError:
488 if not self._dirstatevalidatewarned:
489 if not self._dirstatevalidatewarned:
489 self._dirstatevalidatewarned = True
490 self._dirstatevalidatewarned = True
490 self.ui.warn(_("warning: ignoring unknown"
491 self.ui.warn(_("warning: ignoring unknown"
491 " working parent %s!\n") % short(node))
492 " working parent %s!\n") % short(node))
492 return nullid
493 return nullid
493
494
494 def __getitem__(self, changeid):
495 def __getitem__(self, changeid):
495 if changeid is None or changeid == wdirrev:
496 if changeid is None or changeid == wdirrev:
496 return context.workingctx(self)
497 return context.workingctx(self)
497 if isinstance(changeid, slice):
498 if isinstance(changeid, slice):
498 return [context.changectx(self, i)
499 return [context.changectx(self, i)
499 for i in xrange(*changeid.indices(len(self)))
500 for i in xrange(*changeid.indices(len(self)))
500 if i not in self.changelog.filteredrevs]
501 if i not in self.changelog.filteredrevs]
501 return context.changectx(self, changeid)
502 return context.changectx(self, changeid)
502
503
503 def __contains__(self, changeid):
504 def __contains__(self, changeid):
504 try:
505 try:
505 self[changeid]
506 self[changeid]
506 return True
507 return True
507 except error.RepoLookupError:
508 except error.RepoLookupError:
508 return False
509 return False
509
510
510 def __nonzero__(self):
511 def __nonzero__(self):
511 return True
512 return True
512
513
513 def __len__(self):
514 def __len__(self):
514 return len(self.changelog)
515 return len(self.changelog)
515
516
516 def __iter__(self):
517 def __iter__(self):
517 return iter(self.changelog)
518 return iter(self.changelog)
518
519
519 def revs(self, expr, *args):
520 def revs(self, expr, *args):
520 '''Return a list of revisions matching the given revset'''
521 '''Return a list of revisions matching the given revset'''
521 expr = revset.formatspec(expr, *args)
522 expr = revset.formatspec(expr, *args)
522 m = revset.match(None, expr)
523 m = revset.match(None, expr)
523 return m(self)
524 return m(self)
524
525
525 def set(self, expr, *args):
526 def set(self, expr, *args):
526 '''
527 '''
527 Yield a context for each matching revision, after doing arg
528 Yield a context for each matching revision, after doing arg
528 replacement via revset.formatspec
529 replacement via revset.formatspec
529 '''
530 '''
530 for r in self.revs(expr, *args):
531 for r in self.revs(expr, *args):
531 yield self[r]
532 yield self[r]
532
533
533 def url(self):
534 def url(self):
534 return 'file:' + self.root
535 return 'file:' + self.root
535
536
536 def hook(self, name, throw=False, **args):
537 def hook(self, name, throw=False, **args):
537 """Call a hook, passing this repo instance.
538 """Call a hook, passing this repo instance.
538
539
539 This a convenience method to aid invoking hooks. Extensions likely
540 This a convenience method to aid invoking hooks. Extensions likely
540 won't call this unless they have registered a custom hook or are
541 won't call this unless they have registered a custom hook or are
541 replacing code that is expected to call a hook.
542 replacing code that is expected to call a hook.
542 """
543 """
543 return hook.hook(self.ui, self, name, throw, **args)
544 return hook.hook(self.ui, self, name, throw, **args)
544
545
545 @unfilteredmethod
546 @unfilteredmethod
546 def _tag(self, names, node, message, local, user, date, extra=None,
547 def _tag(self, names, node, message, local, user, date, extra=None,
547 editor=False):
548 editor=False):
548 if isinstance(names, str):
549 if isinstance(names, str):
549 names = (names,)
550 names = (names,)
550
551
551 branches = self.branchmap()
552 branches = self.branchmap()
552 for name in names:
553 for name in names:
553 self.hook('pretag', throw=True, node=hex(node), tag=name,
554 self.hook('pretag', throw=True, node=hex(node), tag=name,
554 local=local)
555 local=local)
555 if name in branches:
556 if name in branches:
556 self.ui.warn(_("warning: tag %s conflicts with existing"
557 self.ui.warn(_("warning: tag %s conflicts with existing"
557 " branch name\n") % name)
558 " branch name\n") % name)
558
559
559 def writetags(fp, names, munge, prevtags):
560 def writetags(fp, names, munge, prevtags):
560 fp.seek(0, 2)
561 fp.seek(0, 2)
561 if prevtags and prevtags[-1] != '\n':
562 if prevtags and prevtags[-1] != '\n':
562 fp.write('\n')
563 fp.write('\n')
563 for name in names:
564 for name in names:
564 if munge:
565 if munge:
565 m = munge(name)
566 m = munge(name)
566 else:
567 else:
567 m = name
568 m = name
568
569
569 if (self._tagscache.tagtypes and
570 if (self._tagscache.tagtypes and
570 name in self._tagscache.tagtypes):
571 name in self._tagscache.tagtypes):
571 old = self.tags().get(name, nullid)
572 old = self.tags().get(name, nullid)
572 fp.write('%s %s\n' % (hex(old), m))
573 fp.write('%s %s\n' % (hex(old), m))
573 fp.write('%s %s\n' % (hex(node), m))
574 fp.write('%s %s\n' % (hex(node), m))
574 fp.close()
575 fp.close()
575
576
576 prevtags = ''
577 prevtags = ''
577 if local:
578 if local:
578 try:
579 try:
579 fp = self.vfs('localtags', 'r+')
580 fp = self.vfs('localtags', 'r+')
580 except IOError:
581 except IOError:
581 fp = self.vfs('localtags', 'a')
582 fp = self.vfs('localtags', 'a')
582 else:
583 else:
583 prevtags = fp.read()
584 prevtags = fp.read()
584
585
585 # local tags are stored in the current charset
586 # local tags are stored in the current charset
586 writetags(fp, names, None, prevtags)
587 writetags(fp, names, None, prevtags)
587 for name in names:
588 for name in names:
588 self.hook('tag', node=hex(node), tag=name, local=local)
589 self.hook('tag', node=hex(node), tag=name, local=local)
589 return
590 return
590
591
591 try:
592 try:
592 fp = self.wfile('.hgtags', 'rb+')
593 fp = self.wfile('.hgtags', 'rb+')
593 except IOError as e:
594 except IOError as e:
594 if e.errno != errno.ENOENT:
595 if e.errno != errno.ENOENT:
595 raise
596 raise
596 fp = self.wfile('.hgtags', 'ab')
597 fp = self.wfile('.hgtags', 'ab')
597 else:
598 else:
598 prevtags = fp.read()
599 prevtags = fp.read()
599
600
600 # committed tags are stored in UTF-8
601 # committed tags are stored in UTF-8
601 writetags(fp, names, encoding.fromlocal, prevtags)
602 writetags(fp, names, encoding.fromlocal, prevtags)
602
603
603 fp.close()
604 fp.close()
604
605
605 self.invalidatecaches()
606 self.invalidatecaches()
606
607
607 if '.hgtags' not in self.dirstate:
608 if '.hgtags' not in self.dirstate:
608 self[None].add(['.hgtags'])
609 self[None].add(['.hgtags'])
609
610
610 m = matchmod.exact(self.root, '', ['.hgtags'])
611 m = matchmod.exact(self.root, '', ['.hgtags'])
611 tagnode = self.commit(message, user, date, extra=extra, match=m,
612 tagnode = self.commit(message, user, date, extra=extra, match=m,
612 editor=editor)
613 editor=editor)
613
614
614 for name in names:
615 for name in names:
615 self.hook('tag', node=hex(node), tag=name, local=local)
616 self.hook('tag', node=hex(node), tag=name, local=local)
616
617
617 return tagnode
618 return tagnode
618
619
619 def tag(self, names, node, message, local, user, date, editor=False):
620 def tag(self, names, node, message, local, user, date, editor=False):
620 '''tag a revision with one or more symbolic names.
621 '''tag a revision with one or more symbolic names.
621
622
622 names is a list of strings or, when adding a single tag, names may be a
623 names is a list of strings or, when adding a single tag, names may be a
623 string.
624 string.
624
625
625 if local is True, the tags are stored in a per-repository file.
626 if local is True, the tags are stored in a per-repository file.
626 otherwise, they are stored in the .hgtags file, and a new
627 otherwise, they are stored in the .hgtags file, and a new
627 changeset is committed with the change.
628 changeset is committed with the change.
628
629
629 keyword arguments:
630 keyword arguments:
630
631
631 local: whether to store tags in non-version-controlled file
632 local: whether to store tags in non-version-controlled file
632 (default False)
633 (default False)
633
634
634 message: commit message to use if committing
635 message: commit message to use if committing
635
636
636 user: name of user to use if committing
637 user: name of user to use if committing
637
638
638 date: date tuple to use if committing'''
639 date: date tuple to use if committing'''
639
640
640 if not local:
641 if not local:
641 m = matchmod.exact(self.root, '', ['.hgtags'])
642 m = matchmod.exact(self.root, '', ['.hgtags'])
642 if any(self.status(match=m, unknown=True, ignored=True)):
643 if any(self.status(match=m, unknown=True, ignored=True)):
643 raise util.Abort(_('working copy of .hgtags is changed'),
644 raise util.Abort(_('working copy of .hgtags is changed'),
644 hint=_('please commit .hgtags manually'))
645 hint=_('please commit .hgtags manually'))
645
646
646 self.tags() # instantiate the cache
647 self.tags() # instantiate the cache
647 self._tag(names, node, message, local, user, date, editor=editor)
648 self._tag(names, node, message, local, user, date, editor=editor)
648
649
649 @filteredpropertycache
650 @filteredpropertycache
650 def _tagscache(self):
651 def _tagscache(self):
651 '''Returns a tagscache object that contains various tags related
652 '''Returns a tagscache object that contains various tags related
652 caches.'''
653 caches.'''
653
654
654 # This simplifies its cache management by having one decorated
655 # This simplifies its cache management by having one decorated
655 # function (this one) and the rest simply fetch things from it.
656 # function (this one) and the rest simply fetch things from it.
656 class tagscache(object):
657 class tagscache(object):
657 def __init__(self):
658 def __init__(self):
658 # These two define the set of tags for this repository. tags
659 # These two define the set of tags for this repository. tags
659 # maps tag name to node; tagtypes maps tag name to 'global' or
660 # maps tag name to node; tagtypes maps tag name to 'global' or
660 # 'local'. (Global tags are defined by .hgtags across all
661 # 'local'. (Global tags are defined by .hgtags across all
661 # heads, and local tags are defined in .hg/localtags.)
662 # heads, and local tags are defined in .hg/localtags.)
662 # They constitute the in-memory cache of tags.
663 # They constitute the in-memory cache of tags.
663 self.tags = self.tagtypes = None
664 self.tags = self.tagtypes = None
664
665
665 self.nodetagscache = self.tagslist = None
666 self.nodetagscache = self.tagslist = None
666
667
667 cache = tagscache()
668 cache = tagscache()
668 cache.tags, cache.tagtypes = self._findtags()
669 cache.tags, cache.tagtypes = self._findtags()
669
670
670 return cache
671 return cache
671
672
672 def tags(self):
673 def tags(self):
673 '''return a mapping of tag to node'''
674 '''return a mapping of tag to node'''
674 t = {}
675 t = {}
675 if self.changelog.filteredrevs:
676 if self.changelog.filteredrevs:
676 tags, tt = self._findtags()
677 tags, tt = self._findtags()
677 else:
678 else:
678 tags = self._tagscache.tags
679 tags = self._tagscache.tags
679 for k, v in tags.iteritems():
680 for k, v in tags.iteritems():
680 try:
681 try:
681 # ignore tags to unknown nodes
682 # ignore tags to unknown nodes
682 self.changelog.rev(v)
683 self.changelog.rev(v)
683 t[k] = v
684 t[k] = v
684 except (error.LookupError, ValueError):
685 except (error.LookupError, ValueError):
685 pass
686 pass
686 return t
687 return t
687
688
688 def _findtags(self):
689 def _findtags(self):
689 '''Do the hard work of finding tags. Return a pair of dicts
690 '''Do the hard work of finding tags. Return a pair of dicts
690 (tags, tagtypes) where tags maps tag name to node, and tagtypes
691 (tags, tagtypes) where tags maps tag name to node, and tagtypes
691 maps tag name to a string like \'global\' or \'local\'.
692 maps tag name to a string like \'global\' or \'local\'.
692 Subclasses or extensions are free to add their own tags, but
693 Subclasses or extensions are free to add their own tags, but
693 should be aware that the returned dicts will be retained for the
694 should be aware that the returned dicts will be retained for the
694 duration of the localrepo object.'''
695 duration of the localrepo object.'''
695
696
696 # XXX what tagtype should subclasses/extensions use? Currently
697 # XXX what tagtype should subclasses/extensions use? Currently
697 # mq and bookmarks add tags, but do not set the tagtype at all.
698 # mq and bookmarks add tags, but do not set the tagtype at all.
698 # Should each extension invent its own tag type? Should there
699 # Should each extension invent its own tag type? Should there
699 # be one tagtype for all such "virtual" tags? Or is the status
700 # be one tagtype for all such "virtual" tags? Or is the status
700 # quo fine?
701 # quo fine?
701
702
702 alltags = {} # map tag name to (node, hist)
703 alltags = {} # map tag name to (node, hist)
703 tagtypes = {}
704 tagtypes = {}
704
705
705 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
706 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
706 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
707 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
707
708
708 # Build the return dicts. Have to re-encode tag names because
709 # Build the return dicts. Have to re-encode tag names because
709 # the tags module always uses UTF-8 (in order not to lose info
710 # the tags module always uses UTF-8 (in order not to lose info
710 # writing to the cache), but the rest of Mercurial wants them in
711 # writing to the cache), but the rest of Mercurial wants them in
711 # local encoding.
712 # local encoding.
712 tags = {}
713 tags = {}
713 for (name, (node, hist)) in alltags.iteritems():
714 for (name, (node, hist)) in alltags.iteritems():
714 if node != nullid:
715 if node != nullid:
715 tags[encoding.tolocal(name)] = node
716 tags[encoding.tolocal(name)] = node
716 tags['tip'] = self.changelog.tip()
717 tags['tip'] = self.changelog.tip()
717 tagtypes = dict([(encoding.tolocal(name), value)
718 tagtypes = dict([(encoding.tolocal(name), value)
718 for (name, value) in tagtypes.iteritems()])
719 for (name, value) in tagtypes.iteritems()])
719 return (tags, tagtypes)
720 return (tags, tagtypes)
720
721
721 def tagtype(self, tagname):
722 def tagtype(self, tagname):
722 '''
723 '''
723 return the type of the given tag. result can be:
724 return the type of the given tag. result can be:
724
725
725 'local' : a local tag
726 'local' : a local tag
726 'global' : a global tag
727 'global' : a global tag
727 None : tag does not exist
728 None : tag does not exist
728 '''
729 '''
729
730
730 return self._tagscache.tagtypes.get(tagname)
731 return self._tagscache.tagtypes.get(tagname)
731
732
732 def tagslist(self):
733 def tagslist(self):
733 '''return a list of tags ordered by revision'''
734 '''return a list of tags ordered by revision'''
734 if not self._tagscache.tagslist:
735 if not self._tagscache.tagslist:
735 l = []
736 l = []
736 for t, n in self.tags().iteritems():
737 for t, n in self.tags().iteritems():
737 l.append((self.changelog.rev(n), t, n))
738 l.append((self.changelog.rev(n), t, n))
738 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
739 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
739
740
740 return self._tagscache.tagslist
741 return self._tagscache.tagslist
741
742
742 def nodetags(self, node):
743 def nodetags(self, node):
743 '''return the tags associated with a node'''
744 '''return the tags associated with a node'''
744 if not self._tagscache.nodetagscache:
745 if not self._tagscache.nodetagscache:
745 nodetagscache = {}
746 nodetagscache = {}
746 for t, n in self._tagscache.tags.iteritems():
747 for t, n in self._tagscache.tags.iteritems():
747 nodetagscache.setdefault(n, []).append(t)
748 nodetagscache.setdefault(n, []).append(t)
748 for tags in nodetagscache.itervalues():
749 for tags in nodetagscache.itervalues():
749 tags.sort()
750 tags.sort()
750 self._tagscache.nodetagscache = nodetagscache
751 self._tagscache.nodetagscache = nodetagscache
751 return self._tagscache.nodetagscache.get(node, [])
752 return self._tagscache.nodetagscache.get(node, [])
752
753
753 def nodebookmarks(self, node):
754 def nodebookmarks(self, node):
754 marks = []
755 marks = []
755 for bookmark, n in self._bookmarks.iteritems():
756 for bookmark, n in self._bookmarks.iteritems():
756 if n == node:
757 if n == node:
757 marks.append(bookmark)
758 marks.append(bookmark)
758 return sorted(marks)
759 return sorted(marks)
759
760
760 def branchmap(self):
761 def branchmap(self):
761 '''returns a dictionary {branch: [branchheads]} with branchheads
762 '''returns a dictionary {branch: [branchheads]} with branchheads
762 ordered by increasing revision number'''
763 ordered by increasing revision number'''
763 branchmap.updatecache(self)
764 branchmap.updatecache(self)
764 return self._branchcaches[self.filtername]
765 return self._branchcaches[self.filtername]
765
766
766 @unfilteredmethod
767 @unfilteredmethod
767 def revbranchcache(self):
768 def revbranchcache(self):
768 if not self._revbranchcache:
769 if not self._revbranchcache:
769 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
770 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
770 return self._revbranchcache
771 return self._revbranchcache
771
772
772 def branchtip(self, branch, ignoremissing=False):
773 def branchtip(self, branch, ignoremissing=False):
773 '''return the tip node for a given branch
774 '''return the tip node for a given branch
774
775
775 If ignoremissing is True, then this method will not raise an error.
776 If ignoremissing is True, then this method will not raise an error.
776 This is helpful for callers that only expect None for a missing branch
777 This is helpful for callers that only expect None for a missing branch
777 (e.g. namespace).
778 (e.g. namespace).
778
779
779 '''
780 '''
780 try:
781 try:
781 return self.branchmap().branchtip(branch)
782 return self.branchmap().branchtip(branch)
782 except KeyError:
783 except KeyError:
783 if not ignoremissing:
784 if not ignoremissing:
784 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
785 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
785 else:
786 else:
786 pass
787 pass
787
788
788 def lookup(self, key):
789 def lookup(self, key):
789 return self[key].node()
790 return self[key].node()
790
791
791 def lookupbranch(self, key, remote=None):
792 def lookupbranch(self, key, remote=None):
792 repo = remote or self
793 repo = remote or self
793 if key in repo.branchmap():
794 if key in repo.branchmap():
794 return key
795 return key
795
796
796 repo = (remote and remote.local()) and remote or self
797 repo = (remote and remote.local()) and remote or self
797 return repo[key].branch()
798 return repo[key].branch()
798
799
799 def known(self, nodes):
800 def known(self, nodes):
800 nm = self.changelog.nodemap
801 nm = self.changelog.nodemap
801 pc = self._phasecache
802 pc = self._phasecache
802 result = []
803 result = []
803 for n in nodes:
804 for n in nodes:
804 r = nm.get(n)
805 r = nm.get(n)
805 resp = not (r is None or pc.phase(self, r) >= phases.secret)
806 resp = not (r is None or pc.phase(self, r) >= phases.secret)
806 result.append(resp)
807 result.append(resp)
807 return result
808 return result
808
809
809 def local(self):
810 def local(self):
810 return self
811 return self
811
812
812 def publishing(self):
813 def publishing(self):
813 # it's safe (and desirable) to trust the publish flag unconditionally
814 # it's safe (and desirable) to trust the publish flag unconditionally
814 # so that we don't finalize changes shared between users via ssh or nfs
815 # so that we don't finalize changes shared between users via ssh or nfs
815 return self.ui.configbool('phases', 'publish', True, untrusted=True)
816 return self.ui.configbool('phases', 'publish', True, untrusted=True)
816
817
817 def cancopy(self):
818 def cancopy(self):
818 # so statichttprepo's override of local() works
819 # so statichttprepo's override of local() works
819 if not self.local():
820 if not self.local():
820 return False
821 return False
821 if not self.publishing():
822 if not self.publishing():
822 return True
823 return True
823 # if publishing we can't copy if there is filtered content
824 # if publishing we can't copy if there is filtered content
824 return not self.filtered('visible').changelog.filteredrevs
825 return not self.filtered('visible').changelog.filteredrevs
825
826
826 def shared(self):
827 def shared(self):
827 '''the type of shared repository (None if not shared)'''
828 '''the type of shared repository (None if not shared)'''
828 if self.sharedpath != self.path:
829 if self.sharedpath != self.path:
829 return 'store'
830 return 'store'
830 return None
831 return None
831
832
832 def join(self, f, *insidef):
833 def join(self, f, *insidef):
833 return self.vfs.join(os.path.join(f, *insidef))
834 return self.vfs.join(os.path.join(f, *insidef))
834
835
835 def wjoin(self, f, *insidef):
836 def wjoin(self, f, *insidef):
836 return self.vfs.reljoin(self.root, f, *insidef)
837 return self.vfs.reljoin(self.root, f, *insidef)
837
838
838 def file(self, f):
839 def file(self, f):
839 if f[0] == '/':
840 if f[0] == '/':
840 f = f[1:]
841 f = f[1:]
841 return filelog.filelog(self.svfs, f)
842 return filelog.filelog(self.svfs, f)
842
843
843 def changectx(self, changeid):
844 def changectx(self, changeid):
844 return self[changeid]
845 return self[changeid]
845
846
846 def parents(self, changeid=None):
847 def parents(self, changeid=None):
847 '''get list of changectxs for parents of changeid'''
848 '''get list of changectxs for parents of changeid'''
848 return self[changeid].parents()
849 return self[changeid].parents()
849
850
850 def setparents(self, p1, p2=nullid):
851 def setparents(self, p1, p2=nullid):
851 self.dirstate.beginparentchange()
852 self.dirstate.beginparentchange()
852 copies = self.dirstate.setparents(p1, p2)
853 copies = self.dirstate.setparents(p1, p2)
853 pctx = self[p1]
854 pctx = self[p1]
854 if copies:
855 if copies:
855 # Adjust copy records, the dirstate cannot do it, it
856 # Adjust copy records, the dirstate cannot do it, it
856 # requires access to parents manifests. Preserve them
857 # requires access to parents manifests. Preserve them
857 # only for entries added to first parent.
858 # only for entries added to first parent.
858 for f in copies:
859 for f in copies:
859 if f not in pctx and copies[f] in pctx:
860 if f not in pctx and copies[f] in pctx:
860 self.dirstate.copy(copies[f], f)
861 self.dirstate.copy(copies[f], f)
861 if p2 == nullid:
862 if p2 == nullid:
862 for f, s in sorted(self.dirstate.copies().items()):
863 for f, s in sorted(self.dirstate.copies().items()):
863 if f not in pctx and s not in pctx:
864 if f not in pctx and s not in pctx:
864 self.dirstate.copy(None, f)
865 self.dirstate.copy(None, f)
865 self.dirstate.endparentchange()
866 self.dirstate.endparentchange()
866
867
867 def filectx(self, path, changeid=None, fileid=None):
868 def filectx(self, path, changeid=None, fileid=None):
868 """changeid can be a changeset revision, node, or tag.
869 """changeid can be a changeset revision, node, or tag.
869 fileid can be a file revision or node."""
870 fileid can be a file revision or node."""
870 return context.filectx(self, path, changeid, fileid)
871 return context.filectx(self, path, changeid, fileid)
871
872
872 def getcwd(self):
873 def getcwd(self):
873 return self.dirstate.getcwd()
874 return self.dirstate.getcwd()
874
875
875 def pathto(self, f, cwd=None):
876 def pathto(self, f, cwd=None):
876 return self.dirstate.pathto(f, cwd)
877 return self.dirstate.pathto(f, cwd)
877
878
878 def wfile(self, f, mode='r'):
879 def wfile(self, f, mode='r'):
879 return self.wvfs(f, mode)
880 return self.wvfs(f, mode)
880
881
881 def _link(self, f):
882 def _link(self, f):
882 return self.wvfs.islink(f)
883 return self.wvfs.islink(f)
883
884
884 def _loadfilter(self, filter):
885 def _loadfilter(self, filter):
885 if filter not in self.filterpats:
886 if filter not in self.filterpats:
886 l = []
887 l = []
887 for pat, cmd in self.ui.configitems(filter):
888 for pat, cmd in self.ui.configitems(filter):
888 if cmd == '!':
889 if cmd == '!':
889 continue
890 continue
890 mf = matchmod.match(self.root, '', [pat])
891 mf = matchmod.match(self.root, '', [pat])
891 fn = None
892 fn = None
892 params = cmd
893 params = cmd
893 for name, filterfn in self._datafilters.iteritems():
894 for name, filterfn in self._datafilters.iteritems():
894 if cmd.startswith(name):
895 if cmd.startswith(name):
895 fn = filterfn
896 fn = filterfn
896 params = cmd[len(name):].lstrip()
897 params = cmd[len(name):].lstrip()
897 break
898 break
898 if not fn:
899 if not fn:
899 fn = lambda s, c, **kwargs: util.filter(s, c)
900 fn = lambda s, c, **kwargs: util.filter(s, c)
900 # Wrap old filters not supporting keyword arguments
901 # Wrap old filters not supporting keyword arguments
901 if not inspect.getargspec(fn)[2]:
902 if not inspect.getargspec(fn)[2]:
902 oldfn = fn
903 oldfn = fn
903 fn = lambda s, c, **kwargs: oldfn(s, c)
904 fn = lambda s, c, **kwargs: oldfn(s, c)
904 l.append((mf, fn, params))
905 l.append((mf, fn, params))
905 self.filterpats[filter] = l
906 self.filterpats[filter] = l
906 return self.filterpats[filter]
907 return self.filterpats[filter]
907
908
908 def _filter(self, filterpats, filename, data):
909 def _filter(self, filterpats, filename, data):
909 for mf, fn, cmd in filterpats:
910 for mf, fn, cmd in filterpats:
910 if mf(filename):
911 if mf(filename):
911 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
912 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
912 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
913 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
913 break
914 break
914
915
915 return data
916 return data
916
917
917 @unfilteredpropertycache
918 @unfilteredpropertycache
918 def _encodefilterpats(self):
919 def _encodefilterpats(self):
919 return self._loadfilter('encode')
920 return self._loadfilter('encode')
920
921
921 @unfilteredpropertycache
922 @unfilteredpropertycache
922 def _decodefilterpats(self):
923 def _decodefilterpats(self):
923 return self._loadfilter('decode')
924 return self._loadfilter('decode')
924
925
925 def adddatafilter(self, name, filter):
926 def adddatafilter(self, name, filter):
926 self._datafilters[name] = filter
927 self._datafilters[name] = filter
927
928
928 def wread(self, filename):
929 def wread(self, filename):
929 if self._link(filename):
930 if self._link(filename):
930 data = self.wvfs.readlink(filename)
931 data = self.wvfs.readlink(filename)
931 else:
932 else:
932 data = self.wvfs.read(filename)
933 data = self.wvfs.read(filename)
933 return self._filter(self._encodefilterpats, filename, data)
934 return self._filter(self._encodefilterpats, filename, data)
934
935
935 def wwrite(self, filename, data, flags):
936 def wwrite(self, filename, data, flags):
936 """write ``data`` into ``filename`` in the working directory
937 """write ``data`` into ``filename`` in the working directory
937
938
938 This returns length of written (maybe decoded) data.
939 This returns length of written (maybe decoded) data.
939 """
940 """
940 data = self._filter(self._decodefilterpats, filename, data)
941 data = self._filter(self._decodefilterpats, filename, data)
941 if 'l' in flags:
942 if 'l' in flags:
942 self.wvfs.symlink(data, filename)
943 self.wvfs.symlink(data, filename)
943 else:
944 else:
944 self.wvfs.write(filename, data)
945 self.wvfs.write(filename, data)
945 if 'x' in flags:
946 if 'x' in flags:
946 self.wvfs.setflags(filename, False, True)
947 self.wvfs.setflags(filename, False, True)
947 return len(data)
948 return len(data)
948
949
949 def wwritedata(self, filename, data):
950 def wwritedata(self, filename, data):
950 return self._filter(self._decodefilterpats, filename, data)
951 return self._filter(self._decodefilterpats, filename, data)
951
952
952 def currenttransaction(self):
953 def currenttransaction(self):
953 """return the current transaction or None if non exists"""
954 """return the current transaction or None if non exists"""
954 if self._transref:
955 if self._transref:
955 tr = self._transref()
956 tr = self._transref()
956 else:
957 else:
957 tr = None
958 tr = None
958
959
959 if tr and tr.running():
960 if tr and tr.running():
960 return tr
961 return tr
961 return None
962 return None
962
963
963 def transaction(self, desc, report=None):
964 def transaction(self, desc, report=None):
964 if (self.ui.configbool('devel', 'all-warnings')
965 if (self.ui.configbool('devel', 'all-warnings')
965 or self.ui.configbool('devel', 'check-locks')):
966 or self.ui.configbool('devel', 'check-locks')):
966 l = self._lockref and self._lockref()
967 l = self._lockref and self._lockref()
967 if l is None or not l.held:
968 if l is None or not l.held:
968 self.ui.develwarn('transaction with no lock')
969 self.ui.develwarn('transaction with no lock')
969 tr = self.currenttransaction()
970 tr = self.currenttransaction()
970 if tr is not None:
971 if tr is not None:
971 return tr.nest()
972 return tr.nest()
972
973
973 # abort here if the journal already exists
974 # abort here if the journal already exists
974 if self.svfs.exists("journal"):
975 if self.svfs.exists("journal"):
975 raise error.RepoError(
976 raise error.RepoError(
976 _("abandoned transaction found"),
977 _("abandoned transaction found"),
977 hint=_("run 'hg recover' to clean up transaction"))
978 hint=_("run 'hg recover' to clean up transaction"))
978
979
979 # make journal.dirstate contain in-memory changes at this point
980 # make journal.dirstate contain in-memory changes at this point
980 self.dirstate.write()
981 self.dirstate.write()
981
982
982 idbase = "%.40f#%f" % (random.random(), time.time())
983 idbase = "%.40f#%f" % (random.random(), time.time())
983 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
984 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
984 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
985 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
985
986
986 self._writejournal(desc)
987 self._writejournal(desc)
987 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
988 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
988 if report:
989 if report:
989 rp = report
990 rp = report
990 else:
991 else:
991 rp = self.ui.warn
992 rp = self.ui.warn
992 vfsmap = {'plain': self.vfs} # root of .hg/
993 vfsmap = {'plain': self.vfs} # root of .hg/
993 # we must avoid cyclic reference between repo and transaction.
994 # we must avoid cyclic reference between repo and transaction.
994 reporef = weakref.ref(self)
995 reporef = weakref.ref(self)
995 def validate(tr):
996 def validate(tr):
996 """will run pre-closing hooks"""
997 """will run pre-closing hooks"""
997 pending = lambda: tr.writepending() and self.root or ""
998 pending = lambda: tr.writepending() and self.root or ""
998 reporef().hook('pretxnclose', throw=True, pending=pending,
999 reporef().hook('pretxnclose', throw=True, pending=pending,
999 txnname=desc, **tr.hookargs)
1000 txnname=desc, **tr.hookargs)
1000
1001
1001 tr = transaction.transaction(rp, self.svfs, vfsmap,
1002 tr = transaction.transaction(rp, self.svfs, vfsmap,
1002 "journal",
1003 "journal",
1003 "undo",
1004 "undo",
1004 aftertrans(renames),
1005 aftertrans(renames),
1005 self.store.createmode,
1006 self.store.createmode,
1006 validator=validate)
1007 validator=validate)
1007
1008
1008 tr.hookargs['txnid'] = txnid
1009 tr.hookargs['txnid'] = txnid
1009 # note: writing the fncache only during finalize mean that the file is
1010 # note: writing the fncache only during finalize mean that the file is
1010 # outdated when running hooks. As fncache is used for streaming clone,
1011 # outdated when running hooks. As fncache is used for streaming clone,
1011 # this is not expected to break anything that happen during the hooks.
1012 # this is not expected to break anything that happen during the hooks.
1012 tr.addfinalize('flush-fncache', self.store.write)
1013 tr.addfinalize('flush-fncache', self.store.write)
1013 def txnclosehook(tr2):
1014 def txnclosehook(tr2):
1014 """To be run if transaction is successful, will schedule a hook run
1015 """To be run if transaction is successful, will schedule a hook run
1015 """
1016 """
1016 def hook():
1017 def hook():
1017 reporef().hook('txnclose', throw=False, txnname=desc,
1018 reporef().hook('txnclose', throw=False, txnname=desc,
1018 **tr2.hookargs)
1019 **tr2.hookargs)
1019 reporef()._afterlock(hook)
1020 reporef()._afterlock(hook)
1020 tr.addfinalize('txnclose-hook', txnclosehook)
1021 tr.addfinalize('txnclose-hook', txnclosehook)
1021 def txnaborthook(tr2):
1022 def txnaborthook(tr2):
1022 """To be run if transaction is aborted
1023 """To be run if transaction is aborted
1023 """
1024 """
1024 reporef().hook('txnabort', throw=False, txnname=desc,
1025 reporef().hook('txnabort', throw=False, txnname=desc,
1025 **tr2.hookargs)
1026 **tr2.hookargs)
1026 tr.addabort('txnabort-hook', txnaborthook)
1027 tr.addabort('txnabort-hook', txnaborthook)
1027 # avoid eager cache invalidation. in-memory data should be identical
1028 # avoid eager cache invalidation. in-memory data should be identical
1028 # to stored data if transaction has no error.
1029 # to stored data if transaction has no error.
1029 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1030 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1030 self._transref = weakref.ref(tr)
1031 self._transref = weakref.ref(tr)
1031 return tr
1032 return tr
1032
1033
1033 def _journalfiles(self):
1034 def _journalfiles(self):
1034 return ((self.svfs, 'journal'),
1035 return ((self.svfs, 'journal'),
1035 (self.vfs, 'journal.dirstate'),
1036 (self.vfs, 'journal.dirstate'),
1036 (self.vfs, 'journal.branch'),
1037 (self.vfs, 'journal.branch'),
1037 (self.vfs, 'journal.desc'),
1038 (self.vfs, 'journal.desc'),
1038 (self.vfs, 'journal.bookmarks'),
1039 (self.vfs, 'journal.bookmarks'),
1039 (self.svfs, 'journal.phaseroots'))
1040 (self.svfs, 'journal.phaseroots'))
1040
1041
1041 def undofiles(self):
1042 def undofiles(self):
1042 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1043 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1043
1044
1044 def _writejournal(self, desc):
1045 def _writejournal(self, desc):
1045 self.vfs.write("journal.dirstate",
1046 self.vfs.write("journal.dirstate",
1046 self.vfs.tryread("dirstate"))
1047 self.vfs.tryread("dirstate"))
1047 self.vfs.write("journal.branch",
1048 self.vfs.write("journal.branch",
1048 encoding.fromlocal(self.dirstate.branch()))
1049 encoding.fromlocal(self.dirstate.branch()))
1049 self.vfs.write("journal.desc",
1050 self.vfs.write("journal.desc",
1050 "%d\n%s\n" % (len(self), desc))
1051 "%d\n%s\n" % (len(self), desc))
1051 self.vfs.write("journal.bookmarks",
1052 self.vfs.write("journal.bookmarks",
1052 self.vfs.tryread("bookmarks"))
1053 self.vfs.tryread("bookmarks"))
1053 self.svfs.write("journal.phaseroots",
1054 self.svfs.write("journal.phaseroots",
1054 self.svfs.tryread("phaseroots"))
1055 self.svfs.tryread("phaseroots"))
1055
1056
1056 def recover(self):
1057 def recover(self):
1057 lock = self.lock()
1058 lock = self.lock()
1058 try:
1059 try:
1059 if self.svfs.exists("journal"):
1060 if self.svfs.exists("journal"):
1060 self.ui.status(_("rolling back interrupted transaction\n"))
1061 self.ui.status(_("rolling back interrupted transaction\n"))
1061 vfsmap = {'': self.svfs,
1062 vfsmap = {'': self.svfs,
1062 'plain': self.vfs,}
1063 'plain': self.vfs,}
1063 transaction.rollback(self.svfs, vfsmap, "journal",
1064 transaction.rollback(self.svfs, vfsmap, "journal",
1064 self.ui.warn)
1065 self.ui.warn)
1065 self.invalidate()
1066 self.invalidate()
1066 return True
1067 return True
1067 else:
1068 else:
1068 self.ui.warn(_("no interrupted transaction available\n"))
1069 self.ui.warn(_("no interrupted transaction available\n"))
1069 return False
1070 return False
1070 finally:
1071 finally:
1071 lock.release()
1072 lock.release()
1072
1073
1073 def rollback(self, dryrun=False, force=False):
1074 def rollback(self, dryrun=False, force=False):
1074 wlock = lock = None
1075 wlock = lock = None
1075 try:
1076 try:
1076 wlock = self.wlock()
1077 wlock = self.wlock()
1077 lock = self.lock()
1078 lock = self.lock()
1078 if self.svfs.exists("undo"):
1079 if self.svfs.exists("undo"):
1079 return self._rollback(dryrun, force)
1080 return self._rollback(dryrun, force)
1080 else:
1081 else:
1081 self.ui.warn(_("no rollback information available\n"))
1082 self.ui.warn(_("no rollback information available\n"))
1082 return 1
1083 return 1
1083 finally:
1084 finally:
1084 release(lock, wlock)
1085 release(lock, wlock)
1085
1086
1086 @unfilteredmethod # Until we get smarter cache management
1087 @unfilteredmethod # Until we get smarter cache management
1087 def _rollback(self, dryrun, force):
1088 def _rollback(self, dryrun, force):
1088 ui = self.ui
1089 ui = self.ui
1089 try:
1090 try:
1090 args = self.vfs.read('undo.desc').splitlines()
1091 args = self.vfs.read('undo.desc').splitlines()
1091 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1092 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1092 if len(args) >= 3:
1093 if len(args) >= 3:
1093 detail = args[2]
1094 detail = args[2]
1094 oldtip = oldlen - 1
1095 oldtip = oldlen - 1
1095
1096
1096 if detail and ui.verbose:
1097 if detail and ui.verbose:
1097 msg = (_('repository tip rolled back to revision %s'
1098 msg = (_('repository tip rolled back to revision %s'
1098 ' (undo %s: %s)\n')
1099 ' (undo %s: %s)\n')
1099 % (oldtip, desc, detail))
1100 % (oldtip, desc, detail))
1100 else:
1101 else:
1101 msg = (_('repository tip rolled back to revision %s'
1102 msg = (_('repository tip rolled back to revision %s'
1102 ' (undo %s)\n')
1103 ' (undo %s)\n')
1103 % (oldtip, desc))
1104 % (oldtip, desc))
1104 except IOError:
1105 except IOError:
1105 msg = _('rolling back unknown transaction\n')
1106 msg = _('rolling back unknown transaction\n')
1106 desc = None
1107 desc = None
1107
1108
1108 if not force and self['.'] != self['tip'] and desc == 'commit':
1109 if not force and self['.'] != self['tip'] and desc == 'commit':
1109 raise util.Abort(
1110 raise util.Abort(
1110 _('rollback of last commit while not checked out '
1111 _('rollback of last commit while not checked out '
1111 'may lose data'), hint=_('use -f to force'))
1112 'may lose data'), hint=_('use -f to force'))
1112
1113
1113 ui.status(msg)
1114 ui.status(msg)
1114 if dryrun:
1115 if dryrun:
1115 return 0
1116 return 0
1116
1117
1117 parents = self.dirstate.parents()
1118 parents = self.dirstate.parents()
1118 self.destroying()
1119 self.destroying()
1119 vfsmap = {'plain': self.vfs, '': self.svfs}
1120 vfsmap = {'plain': self.vfs, '': self.svfs}
1120 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1121 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1121 if self.vfs.exists('undo.bookmarks'):
1122 if self.vfs.exists('undo.bookmarks'):
1122 self.vfs.rename('undo.bookmarks', 'bookmarks')
1123 self.vfs.rename('undo.bookmarks', 'bookmarks')
1123 if self.svfs.exists('undo.phaseroots'):
1124 if self.svfs.exists('undo.phaseroots'):
1124 self.svfs.rename('undo.phaseroots', 'phaseroots')
1125 self.svfs.rename('undo.phaseroots', 'phaseroots')
1125 self.invalidate()
1126 self.invalidate()
1126
1127
1127 parentgone = (parents[0] not in self.changelog.nodemap or
1128 parentgone = (parents[0] not in self.changelog.nodemap or
1128 parents[1] not in self.changelog.nodemap)
1129 parents[1] not in self.changelog.nodemap)
1129 if parentgone:
1130 if parentgone:
1130 self.vfs.rename('undo.dirstate', 'dirstate')
1131 self.vfs.rename('undo.dirstate', 'dirstate')
1131 try:
1132 try:
1132 branch = self.vfs.read('undo.branch')
1133 branch = self.vfs.read('undo.branch')
1133 self.dirstate.setbranch(encoding.tolocal(branch))
1134 self.dirstate.setbranch(encoding.tolocal(branch))
1134 except IOError:
1135 except IOError:
1135 ui.warn(_('named branch could not be reset: '
1136 ui.warn(_('named branch could not be reset: '
1136 'current branch is still \'%s\'\n')
1137 'current branch is still \'%s\'\n')
1137 % self.dirstate.branch())
1138 % self.dirstate.branch())
1138
1139
1139 self.dirstate.invalidate()
1140 self.dirstate.invalidate()
1140 parents = tuple([p.rev() for p in self.parents()])
1141 parents = tuple([p.rev() for p in self.parents()])
1141 if len(parents) > 1:
1142 if len(parents) > 1:
1142 ui.status(_('working directory now based on '
1143 ui.status(_('working directory now based on '
1143 'revisions %d and %d\n') % parents)
1144 'revisions %d and %d\n') % parents)
1144 else:
1145 else:
1145 ui.status(_('working directory now based on '
1146 ui.status(_('working directory now based on '
1146 'revision %d\n') % parents)
1147 'revision %d\n') % parents)
1147 ms = mergemod.mergestate(self)
1148 ms = mergemod.mergestate(self)
1148 ms.reset(self['.'].node())
1149 ms.reset(self['.'].node())
1149
1150
1150 # TODO: if we know which new heads may result from this rollback, pass
1151 # TODO: if we know which new heads may result from this rollback, pass
1151 # them to destroy(), which will prevent the branchhead cache from being
1152 # them to destroy(), which will prevent the branchhead cache from being
1152 # invalidated.
1153 # invalidated.
1153 self.destroyed()
1154 self.destroyed()
1154 return 0
1155 return 0
1155
1156
1156 def invalidatecaches(self):
1157 def invalidatecaches(self):
1157
1158
1158 if '_tagscache' in vars(self):
1159 if '_tagscache' in vars(self):
1159 # can't use delattr on proxy
1160 # can't use delattr on proxy
1160 del self.__dict__['_tagscache']
1161 del self.__dict__['_tagscache']
1161
1162
1162 self.unfiltered()._branchcaches.clear()
1163 self.unfiltered()._branchcaches.clear()
1163 self.invalidatevolatilesets()
1164 self.invalidatevolatilesets()
1164
1165
1165 def invalidatevolatilesets(self):
1166 def invalidatevolatilesets(self):
1166 self.filteredrevcache.clear()
1167 self.filteredrevcache.clear()
1167 obsolete.clearobscaches(self)
1168 obsolete.clearobscaches(self)
1168
1169
1169 def invalidatedirstate(self):
1170 def invalidatedirstate(self):
1170 '''Invalidates the dirstate, causing the next call to dirstate
1171 '''Invalidates the dirstate, causing the next call to dirstate
1171 to check if it was modified since the last time it was read,
1172 to check if it was modified since the last time it was read,
1172 rereading it if it has.
1173 rereading it if it has.
1173
1174
1174 This is different to dirstate.invalidate() that it doesn't always
1175 This is different to dirstate.invalidate() that it doesn't always
1175 rereads the dirstate. Use dirstate.invalidate() if you want to
1176 rereads the dirstate. Use dirstate.invalidate() if you want to
1176 explicitly read the dirstate again (i.e. restoring it to a previous
1177 explicitly read the dirstate again (i.e. restoring it to a previous
1177 known good state).'''
1178 known good state).'''
1178 if hasunfilteredcache(self, 'dirstate'):
1179 if hasunfilteredcache(self, 'dirstate'):
1179 for k in self.dirstate._filecache:
1180 for k in self.dirstate._filecache:
1180 try:
1181 try:
1181 delattr(self.dirstate, k)
1182 delattr(self.dirstate, k)
1182 except AttributeError:
1183 except AttributeError:
1183 pass
1184 pass
1184 delattr(self.unfiltered(), 'dirstate')
1185 delattr(self.unfiltered(), 'dirstate')
1185
1186
1186 def invalidate(self):
1187 def invalidate(self):
1187 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1188 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1188 for k in self._filecache:
1189 for k in self._filecache:
1189 # dirstate is invalidated separately in invalidatedirstate()
1190 # dirstate is invalidated separately in invalidatedirstate()
1190 if k == 'dirstate':
1191 if k == 'dirstate':
1191 continue
1192 continue
1192
1193
1193 try:
1194 try:
1194 delattr(unfiltered, k)
1195 delattr(unfiltered, k)
1195 except AttributeError:
1196 except AttributeError:
1196 pass
1197 pass
1197 self.invalidatecaches()
1198 self.invalidatecaches()
1198 self.store.invalidatecaches()
1199 self.store.invalidatecaches()
1199
1200
1200 def invalidateall(self):
1201 def invalidateall(self):
1201 '''Fully invalidates both store and non-store parts, causing the
1202 '''Fully invalidates both store and non-store parts, causing the
1202 subsequent operation to reread any outside changes.'''
1203 subsequent operation to reread any outside changes.'''
1203 # extension should hook this to invalidate its caches
1204 # extension should hook this to invalidate its caches
1204 self.invalidate()
1205 self.invalidate()
1205 self.invalidatedirstate()
1206 self.invalidatedirstate()
1206
1207
1207 def _refreshfilecachestats(self, tr):
1208 def _refreshfilecachestats(self, tr):
1208 """Reload stats of cached files so that they are flagged as valid"""
1209 """Reload stats of cached files so that they are flagged as valid"""
1209 for k, ce in self._filecache.items():
1210 for k, ce in self._filecache.items():
1210 if k == 'dirstate' or k not in self.__dict__:
1211 if k == 'dirstate' or k not in self.__dict__:
1211 continue
1212 continue
1212 ce.refresh()
1213 ce.refresh()
1213
1214
1214 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1215 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1215 parentenvvar=None):
1216 parentenvvar=None):
1216 parentlock = None
1217 parentlock = None
1217 if parentenvvar is not None:
1218 if parentenvvar is not None:
1218 parentlock = os.environ.get(parentenvvar)
1219 parentlock = os.environ.get(parentenvvar)
1219 try:
1220 try:
1220 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1221 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1221 acquirefn=acquirefn, desc=desc,
1222 acquirefn=acquirefn, desc=desc,
1222 parentlock=parentlock)
1223 parentlock=parentlock)
1223 except error.LockHeld as inst:
1224 except error.LockHeld as inst:
1224 if not wait:
1225 if not wait:
1225 raise
1226 raise
1226 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1227 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1227 (desc, inst.locker))
1228 (desc, inst.locker))
1228 # default to 600 seconds timeout
1229 # default to 600 seconds timeout
1229 l = lockmod.lock(vfs, lockname,
1230 l = lockmod.lock(vfs, lockname,
1230 int(self.ui.config("ui", "timeout", "600")),
1231 int(self.ui.config("ui", "timeout", "600")),
1231 releasefn=releasefn, acquirefn=acquirefn,
1232 releasefn=releasefn, acquirefn=acquirefn,
1232 desc=desc)
1233 desc=desc)
1233 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1234 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1234 return l
1235 return l
1235
1236
1236 def _afterlock(self, callback):
1237 def _afterlock(self, callback):
1237 """add a callback to be run when the repository is fully unlocked
1238 """add a callback to be run when the repository is fully unlocked
1238
1239
1239 The callback will be executed when the outermost lock is released
1240 The callback will be executed when the outermost lock is released
1240 (with wlock being higher level than 'lock')."""
1241 (with wlock being higher level than 'lock')."""
1241 for ref in (self._wlockref, self._lockref):
1242 for ref in (self._wlockref, self._lockref):
1242 l = ref and ref()
1243 l = ref and ref()
1243 if l and l.held:
1244 if l and l.held:
1244 l.postrelease.append(callback)
1245 l.postrelease.append(callback)
1245 break
1246 break
1246 else: # no lock have been found.
1247 else: # no lock have been found.
1247 callback()
1248 callback()
1248
1249
1249 def lock(self, wait=True):
1250 def lock(self, wait=True):
1250 '''Lock the repository store (.hg/store) and return a weak reference
1251 '''Lock the repository store (.hg/store) and return a weak reference
1251 to the lock. Use this before modifying the store (e.g. committing or
1252 to the lock. Use this before modifying the store (e.g. committing or
1252 stripping). If you are opening a transaction, get a lock as well.)
1253 stripping). If you are opening a transaction, get a lock as well.)
1253
1254
1254 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1255 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1255 'wlock' first to avoid a dead-lock hazard.'''
1256 'wlock' first to avoid a dead-lock hazard.'''
1256 l = self._lockref and self._lockref()
1257 l = self._lockref and self._lockref()
1257 if l is not None and l.held:
1258 if l is not None and l.held:
1258 l.lock()
1259 l.lock()
1259 return l
1260 return l
1260
1261
1261 l = self._lock(self.svfs, "lock", wait, None,
1262 l = self._lock(self.svfs, "lock", wait, None,
1262 self.invalidate, _('repository %s') % self.origroot)
1263 self.invalidate, _('repository %s') % self.origroot)
1263 self._lockref = weakref.ref(l)
1264 self._lockref = weakref.ref(l)
1264 return l
1265 return l
1265
1266
1266 def wlock(self, wait=True):
1267 def wlock(self, wait=True):
1267 '''Lock the non-store parts of the repository (everything under
1268 '''Lock the non-store parts of the repository (everything under
1268 .hg except .hg/store) and return a weak reference to the lock.
1269 .hg except .hg/store) and return a weak reference to the lock.
1269
1270
1270 Use this before modifying files in .hg.
1271 Use this before modifying files in .hg.
1271
1272
1272 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1273 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1273 'wlock' first to avoid a dead-lock hazard.'''
1274 'wlock' first to avoid a dead-lock hazard.'''
1274 l = self._wlockref and self._wlockref()
1275 l = self._wlockref and self._wlockref()
1275 if l is not None and l.held:
1276 if l is not None and l.held:
1276 l.lock()
1277 l.lock()
1277 return l
1278 return l
1278
1279
1279 # We do not need to check for non-waiting lock aquisition. Such
1280 # We do not need to check for non-waiting lock aquisition. Such
1280 # acquisition would not cause dead-lock as they would just fail.
1281 # acquisition would not cause dead-lock as they would just fail.
1281 if wait and (self.ui.configbool('devel', 'all-warnings')
1282 if wait and (self.ui.configbool('devel', 'all-warnings')
1282 or self.ui.configbool('devel', 'check-locks')):
1283 or self.ui.configbool('devel', 'check-locks')):
1283 l = self._lockref and self._lockref()
1284 l = self._lockref and self._lockref()
1284 if l is not None and l.held:
1285 if l is not None and l.held:
1285 self.ui.develwarn('"wlock" acquired after "lock"')
1286 self.ui.develwarn('"wlock" acquired after "lock"')
1286
1287
1287 def unlock():
1288 def unlock():
1288 if self.dirstate.pendingparentchange():
1289 if self.dirstate.pendingparentchange():
1289 self.dirstate.invalidate()
1290 self.dirstate.invalidate()
1290 else:
1291 else:
1291 self.dirstate.write()
1292 self.dirstate.write()
1292
1293
1293 self._filecache['dirstate'].refresh()
1294 self._filecache['dirstate'].refresh()
1294
1295
1295 l = self._lock(self.vfs, "wlock", wait, unlock,
1296 l = self._lock(self.vfs, "wlock", wait, unlock,
1296 self.invalidatedirstate, _('working directory of %s') %
1297 self.invalidatedirstate, _('working directory of %s') %
1297 self.origroot)
1298 self.origroot)
1298 self._wlockref = weakref.ref(l)
1299 self._wlockref = weakref.ref(l)
1299 return l
1300 return l
1300
1301
1301 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1302 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1302 """
1303 """
1303 commit an individual file as part of a larger transaction
1304 commit an individual file as part of a larger transaction
1304 """
1305 """
1305
1306
1306 fname = fctx.path()
1307 fname = fctx.path()
1307 fparent1 = manifest1.get(fname, nullid)
1308 fparent1 = manifest1.get(fname, nullid)
1308 fparent2 = manifest2.get(fname, nullid)
1309 fparent2 = manifest2.get(fname, nullid)
1309 if isinstance(fctx, context.filectx):
1310 if isinstance(fctx, context.filectx):
1310 node = fctx.filenode()
1311 node = fctx.filenode()
1311 if node in [fparent1, fparent2]:
1312 if node in [fparent1, fparent2]:
1312 self.ui.debug('reusing %s filelog entry\n' % fname)
1313 self.ui.debug('reusing %s filelog entry\n' % fname)
1313 return node
1314 return node
1314
1315
1315 flog = self.file(fname)
1316 flog = self.file(fname)
1316 meta = {}
1317 meta = {}
1317 copy = fctx.renamed()
1318 copy = fctx.renamed()
1318 if copy and copy[0] != fname:
1319 if copy and copy[0] != fname:
1319 # Mark the new revision of this file as a copy of another
1320 # Mark the new revision of this file as a copy of another
1320 # file. This copy data will effectively act as a parent
1321 # file. This copy data will effectively act as a parent
1321 # of this new revision. If this is a merge, the first
1322 # of this new revision. If this is a merge, the first
1322 # parent will be the nullid (meaning "look up the copy data")
1323 # parent will be the nullid (meaning "look up the copy data")
1323 # and the second one will be the other parent. For example:
1324 # and the second one will be the other parent. For example:
1324 #
1325 #
1325 # 0 --- 1 --- 3 rev1 changes file foo
1326 # 0 --- 1 --- 3 rev1 changes file foo
1326 # \ / rev2 renames foo to bar and changes it
1327 # \ / rev2 renames foo to bar and changes it
1327 # \- 2 -/ rev3 should have bar with all changes and
1328 # \- 2 -/ rev3 should have bar with all changes and
1328 # should record that bar descends from
1329 # should record that bar descends from
1329 # bar in rev2 and foo in rev1
1330 # bar in rev2 and foo in rev1
1330 #
1331 #
1331 # this allows this merge to succeed:
1332 # this allows this merge to succeed:
1332 #
1333 #
1333 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1334 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1334 # \ / merging rev3 and rev4 should use bar@rev2
1335 # \ / merging rev3 and rev4 should use bar@rev2
1335 # \- 2 --- 4 as the merge base
1336 # \- 2 --- 4 as the merge base
1336 #
1337 #
1337
1338
1338 cfname = copy[0]
1339 cfname = copy[0]
1339 crev = manifest1.get(cfname)
1340 crev = manifest1.get(cfname)
1340 newfparent = fparent2
1341 newfparent = fparent2
1341
1342
1342 if manifest2: # branch merge
1343 if manifest2: # branch merge
1343 if fparent2 == nullid or crev is None: # copied on remote side
1344 if fparent2 == nullid or crev is None: # copied on remote side
1344 if cfname in manifest2:
1345 if cfname in manifest2:
1345 crev = manifest2[cfname]
1346 crev = manifest2[cfname]
1346 newfparent = fparent1
1347 newfparent = fparent1
1347
1348
1348 # Here, we used to search backwards through history to try to find
1349 # Here, we used to search backwards through history to try to find
1349 # where the file copy came from if the source of a copy was not in
1350 # where the file copy came from if the source of a copy was not in
1350 # the parent directory. However, this doesn't actually make sense to
1351 # the parent directory. However, this doesn't actually make sense to
1351 # do (what does a copy from something not in your working copy even
1352 # do (what does a copy from something not in your working copy even
1352 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1353 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1353 # the user that copy information was dropped, so if they didn't
1354 # the user that copy information was dropped, so if they didn't
1354 # expect this outcome it can be fixed, but this is the correct
1355 # expect this outcome it can be fixed, but this is the correct
1355 # behavior in this circumstance.
1356 # behavior in this circumstance.
1356
1357
1357 if crev:
1358 if crev:
1358 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1359 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1359 meta["copy"] = cfname
1360 meta["copy"] = cfname
1360 meta["copyrev"] = hex(crev)
1361 meta["copyrev"] = hex(crev)
1361 fparent1, fparent2 = nullid, newfparent
1362 fparent1, fparent2 = nullid, newfparent
1362 else:
1363 else:
1363 self.ui.warn(_("warning: can't find ancestor for '%s' "
1364 self.ui.warn(_("warning: can't find ancestor for '%s' "
1364 "copied from '%s'!\n") % (fname, cfname))
1365 "copied from '%s'!\n") % (fname, cfname))
1365
1366
1366 elif fparent1 == nullid:
1367 elif fparent1 == nullid:
1367 fparent1, fparent2 = fparent2, nullid
1368 fparent1, fparent2 = fparent2, nullid
1368 elif fparent2 != nullid:
1369 elif fparent2 != nullid:
1369 # is one parent an ancestor of the other?
1370 # is one parent an ancestor of the other?
1370 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1371 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1371 if fparent1 in fparentancestors:
1372 if fparent1 in fparentancestors:
1372 fparent1, fparent2 = fparent2, nullid
1373 fparent1, fparent2 = fparent2, nullid
1373 elif fparent2 in fparentancestors:
1374 elif fparent2 in fparentancestors:
1374 fparent2 = nullid
1375 fparent2 = nullid
1375
1376
1376 # is the file changed?
1377 # is the file changed?
1377 text = fctx.data()
1378 text = fctx.data()
1378 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1379 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1379 changelist.append(fname)
1380 changelist.append(fname)
1380 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1381 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1381 # are just the flags changed during merge?
1382 # are just the flags changed during merge?
1382 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1383 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1383 changelist.append(fname)
1384 changelist.append(fname)
1384
1385
1385 return fparent1
1386 return fparent1
1386
1387
1387 @unfilteredmethod
1388 @unfilteredmethod
1388 def commit(self, text="", user=None, date=None, match=None, force=False,
1389 def commit(self, text="", user=None, date=None, match=None, force=False,
1389 editor=False, extra=None):
1390 editor=False, extra=None):
1390 """Add a new revision to current repository.
1391 """Add a new revision to current repository.
1391
1392
1392 Revision information is gathered from the working directory,
1393 Revision information is gathered from the working directory,
1393 match can be used to filter the committed files. If editor is
1394 match can be used to filter the committed files. If editor is
1394 supplied, it is called to get a commit message.
1395 supplied, it is called to get a commit message.
1395 """
1396 """
1396 if extra is None:
1397 if extra is None:
1397 extra = {}
1398 extra = {}
1398
1399
1399 def fail(f, msg):
1400 def fail(f, msg):
1400 raise util.Abort('%s: %s' % (f, msg))
1401 raise util.Abort('%s: %s' % (f, msg))
1401
1402
1402 if not match:
1403 if not match:
1403 match = matchmod.always(self.root, '')
1404 match = matchmod.always(self.root, '')
1404
1405
1405 if not force:
1406 if not force:
1406 vdirs = []
1407 vdirs = []
1407 match.explicitdir = vdirs.append
1408 match.explicitdir = vdirs.append
1408 match.bad = fail
1409 match.bad = fail
1409
1410
1410 wlock = self.wlock()
1411 wlock = self.wlock()
1411 try:
1412 try:
1412 wctx = self[None]
1413 wctx = self[None]
1413 merge = len(wctx.parents()) > 1
1414 merge = len(wctx.parents()) > 1
1414
1415
1415 if not force and merge and match.ispartial():
1416 if not force and merge and match.ispartial():
1416 raise util.Abort(_('cannot partially commit a merge '
1417 raise util.Abort(_('cannot partially commit a merge '
1417 '(do not specify files or patterns)'))
1418 '(do not specify files or patterns)'))
1418
1419
1419 status = self.status(match=match, clean=force)
1420 status = self.status(match=match, clean=force)
1420 if force:
1421 if force:
1421 status.modified.extend(status.clean) # mq may commit clean files
1422 status.modified.extend(status.clean) # mq may commit clean files
1422
1423
1423 # check subrepos
1424 # check subrepos
1424 subs = []
1425 subs = []
1425 commitsubs = set()
1426 commitsubs = set()
1426 newstate = wctx.substate.copy()
1427 newstate = wctx.substate.copy()
1427 # only manage subrepos and .hgsubstate if .hgsub is present
1428 # only manage subrepos and .hgsubstate if .hgsub is present
1428 if '.hgsub' in wctx:
1429 if '.hgsub' in wctx:
1429 # we'll decide whether to track this ourselves, thanks
1430 # we'll decide whether to track this ourselves, thanks
1430 for c in status.modified, status.added, status.removed:
1431 for c in status.modified, status.added, status.removed:
1431 if '.hgsubstate' in c:
1432 if '.hgsubstate' in c:
1432 c.remove('.hgsubstate')
1433 c.remove('.hgsubstate')
1433
1434
1434 # compare current state to last committed state
1435 # compare current state to last committed state
1435 # build new substate based on last committed state
1436 # build new substate based on last committed state
1436 oldstate = wctx.p1().substate
1437 oldstate = wctx.p1().substate
1437 for s in sorted(newstate.keys()):
1438 for s in sorted(newstate.keys()):
1438 if not match(s):
1439 if not match(s):
1439 # ignore working copy, use old state if present
1440 # ignore working copy, use old state if present
1440 if s in oldstate:
1441 if s in oldstate:
1441 newstate[s] = oldstate[s]
1442 newstate[s] = oldstate[s]
1442 continue
1443 continue
1443 if not force:
1444 if not force:
1444 raise util.Abort(
1445 raise util.Abort(
1445 _("commit with new subrepo %s excluded") % s)
1446 _("commit with new subrepo %s excluded") % s)
1446 dirtyreason = wctx.sub(s).dirtyreason(True)
1447 dirtyreason = wctx.sub(s).dirtyreason(True)
1447 if dirtyreason:
1448 if dirtyreason:
1448 if not self.ui.configbool('ui', 'commitsubrepos'):
1449 if not self.ui.configbool('ui', 'commitsubrepos'):
1449 raise util.Abort(dirtyreason,
1450 raise util.Abort(dirtyreason,
1450 hint=_("use --subrepos for recursive commit"))
1451 hint=_("use --subrepos for recursive commit"))
1451 subs.append(s)
1452 subs.append(s)
1452 commitsubs.add(s)
1453 commitsubs.add(s)
1453 else:
1454 else:
1454 bs = wctx.sub(s).basestate()
1455 bs = wctx.sub(s).basestate()
1455 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1456 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1456 if oldstate.get(s, (None, None, None))[1] != bs:
1457 if oldstate.get(s, (None, None, None))[1] != bs:
1457 subs.append(s)
1458 subs.append(s)
1458
1459
1459 # check for removed subrepos
1460 # check for removed subrepos
1460 for p in wctx.parents():
1461 for p in wctx.parents():
1461 r = [s for s in p.substate if s not in newstate]
1462 r = [s for s in p.substate if s not in newstate]
1462 subs += [s for s in r if match(s)]
1463 subs += [s for s in r if match(s)]
1463 if subs:
1464 if subs:
1464 if (not match('.hgsub') and
1465 if (not match('.hgsub') and
1465 '.hgsub' in (wctx.modified() + wctx.added())):
1466 '.hgsub' in (wctx.modified() + wctx.added())):
1466 raise util.Abort(
1467 raise util.Abort(
1467 _("can't commit subrepos without .hgsub"))
1468 _("can't commit subrepos without .hgsub"))
1468 status.modified.insert(0, '.hgsubstate')
1469 status.modified.insert(0, '.hgsubstate')
1469
1470
1470 elif '.hgsub' in status.removed:
1471 elif '.hgsub' in status.removed:
1471 # clean up .hgsubstate when .hgsub is removed
1472 # clean up .hgsubstate when .hgsub is removed
1472 if ('.hgsubstate' in wctx and
1473 if ('.hgsubstate' in wctx and
1473 '.hgsubstate' not in (status.modified + status.added +
1474 '.hgsubstate' not in (status.modified + status.added +
1474 status.removed)):
1475 status.removed)):
1475 status.removed.insert(0, '.hgsubstate')
1476 status.removed.insert(0, '.hgsubstate')
1476
1477
1477 # make sure all explicit patterns are matched
1478 # make sure all explicit patterns are matched
1478 if not force and (match.isexact() or match.prefix()):
1479 if not force and (match.isexact() or match.prefix()):
1479 matched = set(status.modified + status.added + status.removed)
1480 matched = set(status.modified + status.added + status.removed)
1480
1481
1481 for f in match.files():
1482 for f in match.files():
1482 f = self.dirstate.normalize(f)
1483 f = self.dirstate.normalize(f)
1483 if f == '.' or f in matched or f in wctx.substate:
1484 if f == '.' or f in matched or f in wctx.substate:
1484 continue
1485 continue
1485 if f in status.deleted:
1486 if f in status.deleted:
1486 fail(f, _('file not found!'))
1487 fail(f, _('file not found!'))
1487 if f in vdirs: # visited directory
1488 if f in vdirs: # visited directory
1488 d = f + '/'
1489 d = f + '/'
1489 for mf in matched:
1490 for mf in matched:
1490 if mf.startswith(d):
1491 if mf.startswith(d):
1491 break
1492 break
1492 else:
1493 else:
1493 fail(f, _("no match under directory!"))
1494 fail(f, _("no match under directory!"))
1494 elif f not in self.dirstate:
1495 elif f not in self.dirstate:
1495 fail(f, _("file not tracked!"))
1496 fail(f, _("file not tracked!"))
1496
1497
1497 cctx = context.workingcommitctx(self, status,
1498 cctx = context.workingcommitctx(self, status,
1498 text, user, date, extra)
1499 text, user, date, extra)
1499
1500
1500 # internal config: ui.allowemptycommit
1501 # internal config: ui.allowemptycommit
1501 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1502 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1502 or extra.get('close') or merge or cctx.files()
1503 or extra.get('close') or merge or cctx.files()
1503 or self.ui.configbool('ui', 'allowemptycommit'))
1504 or self.ui.configbool('ui', 'allowemptycommit'))
1504 if not allowemptycommit:
1505 if not allowemptycommit:
1505 return None
1506 return None
1506
1507
1507 if merge and cctx.deleted():
1508 if merge and cctx.deleted():
1508 raise util.Abort(_("cannot commit merge with missing files"))
1509 raise util.Abort(_("cannot commit merge with missing files"))
1509
1510
1510 ms = mergemod.mergestate(self)
1511 ms = mergemod.mergestate(self)
1511 for f in status.modified:
1512 for f in status.modified:
1512 if f in ms and ms[f] == 'u':
1513 if f in ms and ms[f] == 'u':
1513 raise util.Abort(_('unresolved merge conflicts '
1514 raise util.Abort(_('unresolved merge conflicts '
1514 '(see "hg help resolve")'))
1515 '(see "hg help resolve")'))
1515
1516
1516 if editor:
1517 if editor:
1517 cctx._text = editor(self, cctx, subs)
1518 cctx._text = editor(self, cctx, subs)
1518 edited = (text != cctx._text)
1519 edited = (text != cctx._text)
1519
1520
1520 # Save commit message in case this transaction gets rolled back
1521 # Save commit message in case this transaction gets rolled back
1521 # (e.g. by a pretxncommit hook). Leave the content alone on
1522 # (e.g. by a pretxncommit hook). Leave the content alone on
1522 # the assumption that the user will use the same editor again.
1523 # the assumption that the user will use the same editor again.
1523 msgfn = self.savecommitmessage(cctx._text)
1524 msgfn = self.savecommitmessage(cctx._text)
1524
1525
1525 # commit subs and write new state
1526 # commit subs and write new state
1526 if subs:
1527 if subs:
1527 for s in sorted(commitsubs):
1528 for s in sorted(commitsubs):
1528 sub = wctx.sub(s)
1529 sub = wctx.sub(s)
1529 self.ui.status(_('committing subrepository %s\n') %
1530 self.ui.status(_('committing subrepository %s\n') %
1530 subrepo.subrelpath(sub))
1531 subrepo.subrelpath(sub))
1531 sr = sub.commit(cctx._text, user, date)
1532 sr = sub.commit(cctx._text, user, date)
1532 newstate[s] = (newstate[s][0], sr)
1533 newstate[s] = (newstate[s][0], sr)
1533 subrepo.writestate(self, newstate)
1534 subrepo.writestate(self, newstate)
1534
1535
1535 p1, p2 = self.dirstate.parents()
1536 p1, p2 = self.dirstate.parents()
1536 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1537 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1537 try:
1538 try:
1538 self.hook("precommit", throw=True, parent1=hookp1,
1539 self.hook("precommit", throw=True, parent1=hookp1,
1539 parent2=hookp2)
1540 parent2=hookp2)
1540 ret = self.commitctx(cctx, True)
1541 ret = self.commitctx(cctx, True)
1541 except: # re-raises
1542 except: # re-raises
1542 if edited:
1543 if edited:
1543 self.ui.write(
1544 self.ui.write(
1544 _('note: commit message saved in %s\n') % msgfn)
1545 _('note: commit message saved in %s\n') % msgfn)
1545 raise
1546 raise
1546
1547
1547 # update bookmarks, dirstate and mergestate
1548 # update bookmarks, dirstate and mergestate
1548 bookmarks.update(self, [p1, p2], ret)
1549 bookmarks.update(self, [p1, p2], ret)
1549 cctx.markcommitted(ret)
1550 cctx.markcommitted(ret)
1550 ms.reset()
1551 ms.reset()
1551 finally:
1552 finally:
1552 wlock.release()
1553 wlock.release()
1553
1554
1554 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1555 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1555 # hack for command that use a temporary commit (eg: histedit)
1556 # hack for command that use a temporary commit (eg: histedit)
1556 # temporary commit got stripped before hook release
1557 # temporary commit got stripped before hook release
1557 if self.changelog.hasnode(ret):
1558 if self.changelog.hasnode(ret):
1558 self.hook("commit", node=node, parent1=parent1,
1559 self.hook("commit", node=node, parent1=parent1,
1559 parent2=parent2)
1560 parent2=parent2)
1560 self._afterlock(commithook)
1561 self._afterlock(commithook)
1561 return ret
1562 return ret
1562
1563
1563 @unfilteredmethod
1564 @unfilteredmethod
1564 def commitctx(self, ctx, error=False):
1565 def commitctx(self, ctx, error=False):
1565 """Add a new revision to current repository.
1566 """Add a new revision to current repository.
1566 Revision information is passed via the context argument.
1567 Revision information is passed via the context argument.
1567 """
1568 """
1568
1569
1569 tr = None
1570 tr = None
1570 p1, p2 = ctx.p1(), ctx.p2()
1571 p1, p2 = ctx.p1(), ctx.p2()
1571 user = ctx.user()
1572 user = ctx.user()
1572
1573
1573 lock = self.lock()
1574 lock = self.lock()
1574 try:
1575 try:
1575 tr = self.transaction("commit")
1576 tr = self.transaction("commit")
1576 trp = weakref.proxy(tr)
1577 trp = weakref.proxy(tr)
1577
1578
1578 if ctx.files():
1579 if ctx.files():
1579 m1 = p1.manifest()
1580 m1 = p1.manifest()
1580 m2 = p2.manifest()
1581 m2 = p2.manifest()
1581 m = m1.copy()
1582 m = m1.copy()
1582
1583
1583 # check in files
1584 # check in files
1584 added = []
1585 added = []
1585 changed = []
1586 changed = []
1586 removed = list(ctx.removed())
1587 removed = list(ctx.removed())
1587 linkrev = len(self)
1588 linkrev = len(self)
1588 self.ui.note(_("committing files:\n"))
1589 self.ui.note(_("committing files:\n"))
1589 for f in sorted(ctx.modified() + ctx.added()):
1590 for f in sorted(ctx.modified() + ctx.added()):
1590 self.ui.note(f + "\n")
1591 self.ui.note(f + "\n")
1591 try:
1592 try:
1592 fctx = ctx[f]
1593 fctx = ctx[f]
1593 if fctx is None:
1594 if fctx is None:
1594 removed.append(f)
1595 removed.append(f)
1595 else:
1596 else:
1596 added.append(f)
1597 added.append(f)
1597 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1598 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1598 trp, changed)
1599 trp, changed)
1599 m.setflag(f, fctx.flags())
1600 m.setflag(f, fctx.flags())
1600 except OSError as inst:
1601 except OSError as inst:
1601 self.ui.warn(_("trouble committing %s!\n") % f)
1602 self.ui.warn(_("trouble committing %s!\n") % f)
1602 raise
1603 raise
1603 except IOError as inst:
1604 except IOError as inst:
1604 errcode = getattr(inst, 'errno', errno.ENOENT)
1605 errcode = getattr(inst, 'errno', errno.ENOENT)
1605 if error or errcode and errcode != errno.ENOENT:
1606 if error or errcode and errcode != errno.ENOENT:
1606 self.ui.warn(_("trouble committing %s!\n") % f)
1607 self.ui.warn(_("trouble committing %s!\n") % f)
1607 raise
1608 raise
1608
1609
1609 # update manifest
1610 # update manifest
1610 self.ui.note(_("committing manifest\n"))
1611 self.ui.note(_("committing manifest\n"))
1611 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1612 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1612 drop = [f for f in removed if f in m]
1613 drop = [f for f in removed if f in m]
1613 for f in drop:
1614 for f in drop:
1614 del m[f]
1615 del m[f]
1615 mn = self.manifest.add(m, trp, linkrev,
1616 mn = self.manifest.add(m, trp, linkrev,
1616 p1.manifestnode(), p2.manifestnode(),
1617 p1.manifestnode(), p2.manifestnode(),
1617 added, drop)
1618 added, drop)
1618 files = changed + removed
1619 files = changed + removed
1619 else:
1620 else:
1620 mn = p1.manifestnode()
1621 mn = p1.manifestnode()
1621 files = []
1622 files = []
1622
1623
1623 # update changelog
1624 # update changelog
1624 self.ui.note(_("committing changelog\n"))
1625 self.ui.note(_("committing changelog\n"))
1625 self.changelog.delayupdate(tr)
1626 self.changelog.delayupdate(tr)
1626 n = self.changelog.add(mn, files, ctx.description(),
1627 n = self.changelog.add(mn, files, ctx.description(),
1627 trp, p1.node(), p2.node(),
1628 trp, p1.node(), p2.node(),
1628 user, ctx.date(), ctx.extra().copy())
1629 user, ctx.date(), ctx.extra().copy())
1629 p = lambda: tr.writepending() and self.root or ""
1630 p = lambda: tr.writepending() and self.root or ""
1630 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1631 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1631 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1632 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1632 parent2=xp2, pending=p)
1633 parent2=xp2, pending=p)
1633 # set the new commit is proper phase
1634 # set the new commit is proper phase
1634 targetphase = subrepo.newcommitphase(self.ui, ctx)
1635 targetphase = subrepo.newcommitphase(self.ui, ctx)
1635 if targetphase:
1636 if targetphase:
1636 # retract boundary do not alter parent changeset.
1637 # retract boundary do not alter parent changeset.
1637 # if a parent have higher the resulting phase will
1638 # if a parent have higher the resulting phase will
1638 # be compliant anyway
1639 # be compliant anyway
1639 #
1640 #
1640 # if minimal phase was 0 we don't need to retract anything
1641 # if minimal phase was 0 we don't need to retract anything
1641 phases.retractboundary(self, tr, targetphase, [n])
1642 phases.retractboundary(self, tr, targetphase, [n])
1642 tr.close()
1643 tr.close()
1643 branchmap.updatecache(self.filtered('served'))
1644 branchmap.updatecache(self.filtered('served'))
1644 return n
1645 return n
1645 finally:
1646 finally:
1646 if tr:
1647 if tr:
1647 tr.release()
1648 tr.release()
1648 lock.release()
1649 lock.release()
1649
1650
1650 @unfilteredmethod
1651 @unfilteredmethod
1651 def destroying(self):
1652 def destroying(self):
1652 '''Inform the repository that nodes are about to be destroyed.
1653 '''Inform the repository that nodes are about to be destroyed.
1653 Intended for use by strip and rollback, so there's a common
1654 Intended for use by strip and rollback, so there's a common
1654 place for anything that has to be done before destroying history.
1655 place for anything that has to be done before destroying history.
1655
1656
1656 This is mostly useful for saving state that is in memory and waiting
1657 This is mostly useful for saving state that is in memory and waiting
1657 to be flushed when the current lock is released. Because a call to
1658 to be flushed when the current lock is released. Because a call to
1658 destroyed is imminent, the repo will be invalidated causing those
1659 destroyed is imminent, the repo will be invalidated causing those
1659 changes to stay in memory (waiting for the next unlock), or vanish
1660 changes to stay in memory (waiting for the next unlock), or vanish
1660 completely.
1661 completely.
1661 '''
1662 '''
1662 # When using the same lock to commit and strip, the phasecache is left
1663 # When using the same lock to commit and strip, the phasecache is left
1663 # dirty after committing. Then when we strip, the repo is invalidated,
1664 # dirty after committing. Then when we strip, the repo is invalidated,
1664 # causing those changes to disappear.
1665 # causing those changes to disappear.
1665 if '_phasecache' in vars(self):
1666 if '_phasecache' in vars(self):
1666 self._phasecache.write()
1667 self._phasecache.write()
1667
1668
1668 @unfilteredmethod
1669 @unfilteredmethod
1669 def destroyed(self):
1670 def destroyed(self):
1670 '''Inform the repository that nodes have been destroyed.
1671 '''Inform the repository that nodes have been destroyed.
1671 Intended for use by strip and rollback, so there's a common
1672 Intended for use by strip and rollback, so there's a common
1672 place for anything that has to be done after destroying history.
1673 place for anything that has to be done after destroying history.
1673 '''
1674 '''
1674 # When one tries to:
1675 # When one tries to:
1675 # 1) destroy nodes thus calling this method (e.g. strip)
1676 # 1) destroy nodes thus calling this method (e.g. strip)
1676 # 2) use phasecache somewhere (e.g. commit)
1677 # 2) use phasecache somewhere (e.g. commit)
1677 #
1678 #
1678 # then 2) will fail because the phasecache contains nodes that were
1679 # then 2) will fail because the phasecache contains nodes that were
1679 # removed. We can either remove phasecache from the filecache,
1680 # removed. We can either remove phasecache from the filecache,
1680 # causing it to reload next time it is accessed, or simply filter
1681 # causing it to reload next time it is accessed, or simply filter
1681 # the removed nodes now and write the updated cache.
1682 # the removed nodes now and write the updated cache.
1682 self._phasecache.filterunknown(self)
1683 self._phasecache.filterunknown(self)
1683 self._phasecache.write()
1684 self._phasecache.write()
1684
1685
1685 # update the 'served' branch cache to help read only server process
1686 # update the 'served' branch cache to help read only server process
1686 # Thanks to branchcache collaboration this is done from the nearest
1687 # Thanks to branchcache collaboration this is done from the nearest
1687 # filtered subset and it is expected to be fast.
1688 # filtered subset and it is expected to be fast.
1688 branchmap.updatecache(self.filtered('served'))
1689 branchmap.updatecache(self.filtered('served'))
1689
1690
1690 # Ensure the persistent tag cache is updated. Doing it now
1691 # Ensure the persistent tag cache is updated. Doing it now
1691 # means that the tag cache only has to worry about destroyed
1692 # means that the tag cache only has to worry about destroyed
1692 # heads immediately after a strip/rollback. That in turn
1693 # heads immediately after a strip/rollback. That in turn
1693 # guarantees that "cachetip == currenttip" (comparing both rev
1694 # guarantees that "cachetip == currenttip" (comparing both rev
1694 # and node) always means no nodes have been added or destroyed.
1695 # and node) always means no nodes have been added or destroyed.
1695
1696
1696 # XXX this is suboptimal when qrefresh'ing: we strip the current
1697 # XXX this is suboptimal when qrefresh'ing: we strip the current
1697 # head, refresh the tag cache, then immediately add a new head.
1698 # head, refresh the tag cache, then immediately add a new head.
1698 # But I think doing it this way is necessary for the "instant
1699 # But I think doing it this way is necessary for the "instant
1699 # tag cache retrieval" case to work.
1700 # tag cache retrieval" case to work.
1700 self.invalidate()
1701 self.invalidate()
1701
1702
1702 def walk(self, match, node=None):
1703 def walk(self, match, node=None):
1703 '''
1704 '''
1704 walk recursively through the directory tree or a given
1705 walk recursively through the directory tree or a given
1705 changeset, finding all files matched by the match
1706 changeset, finding all files matched by the match
1706 function
1707 function
1707 '''
1708 '''
1708 return self[node].walk(match)
1709 return self[node].walk(match)
1709
1710
1710 def status(self, node1='.', node2=None, match=None,
1711 def status(self, node1='.', node2=None, match=None,
1711 ignored=False, clean=False, unknown=False,
1712 ignored=False, clean=False, unknown=False,
1712 listsubrepos=False):
1713 listsubrepos=False):
1713 '''a convenience method that calls node1.status(node2)'''
1714 '''a convenience method that calls node1.status(node2)'''
1714 return self[node1].status(node2, match, ignored, clean, unknown,
1715 return self[node1].status(node2, match, ignored, clean, unknown,
1715 listsubrepos)
1716 listsubrepos)
1716
1717
1717 def heads(self, start=None):
1718 def heads(self, start=None):
1718 heads = self.changelog.heads(start)
1719 heads = self.changelog.heads(start)
1719 # sort the output in rev descending order
1720 # sort the output in rev descending order
1720 return sorted(heads, key=self.changelog.rev, reverse=True)
1721 return sorted(heads, key=self.changelog.rev, reverse=True)
1721
1722
1722 def branchheads(self, branch=None, start=None, closed=False):
1723 def branchheads(self, branch=None, start=None, closed=False):
1723 '''return a (possibly filtered) list of heads for the given branch
1724 '''return a (possibly filtered) list of heads for the given branch
1724
1725
1725 Heads are returned in topological order, from newest to oldest.
1726 Heads are returned in topological order, from newest to oldest.
1726 If branch is None, use the dirstate branch.
1727 If branch is None, use the dirstate branch.
1727 If start is not None, return only heads reachable from start.
1728 If start is not None, return only heads reachable from start.
1728 If closed is True, return heads that are marked as closed as well.
1729 If closed is True, return heads that are marked as closed as well.
1729 '''
1730 '''
1730 if branch is None:
1731 if branch is None:
1731 branch = self[None].branch()
1732 branch = self[None].branch()
1732 branches = self.branchmap()
1733 branches = self.branchmap()
1733 if branch not in branches:
1734 if branch not in branches:
1734 return []
1735 return []
1735 # the cache returns heads ordered lowest to highest
1736 # the cache returns heads ordered lowest to highest
1736 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1737 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1737 if start is not None:
1738 if start is not None:
1738 # filter out the heads that cannot be reached from startrev
1739 # filter out the heads that cannot be reached from startrev
1739 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1740 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1740 bheads = [h for h in bheads if h in fbheads]
1741 bheads = [h for h in bheads if h in fbheads]
1741 return bheads
1742 return bheads
1742
1743
1743 def branches(self, nodes):
1744 def branches(self, nodes):
1744 if not nodes:
1745 if not nodes:
1745 nodes = [self.changelog.tip()]
1746 nodes = [self.changelog.tip()]
1746 b = []
1747 b = []
1747 for n in nodes:
1748 for n in nodes:
1748 t = n
1749 t = n
1749 while True:
1750 while True:
1750 p = self.changelog.parents(n)
1751 p = self.changelog.parents(n)
1751 if p[1] != nullid or p[0] == nullid:
1752 if p[1] != nullid or p[0] == nullid:
1752 b.append((t, n, p[0], p[1]))
1753 b.append((t, n, p[0], p[1]))
1753 break
1754 break
1754 n = p[0]
1755 n = p[0]
1755 return b
1756 return b
1756
1757
1757 def between(self, pairs):
1758 def between(self, pairs):
1758 r = []
1759 r = []
1759
1760
1760 for top, bottom in pairs:
1761 for top, bottom in pairs:
1761 n, l, i = top, [], 0
1762 n, l, i = top, [], 0
1762 f = 1
1763 f = 1
1763
1764
1764 while n != bottom and n != nullid:
1765 while n != bottom and n != nullid:
1765 p = self.changelog.parents(n)[0]
1766 p = self.changelog.parents(n)[0]
1766 if i == f:
1767 if i == f:
1767 l.append(n)
1768 l.append(n)
1768 f = f * 2
1769 f = f * 2
1769 n = p
1770 n = p
1770 i += 1
1771 i += 1
1771
1772
1772 r.append(l)
1773 r.append(l)
1773
1774
1774 return r
1775 return r
1775
1776
1776 def checkpush(self, pushop):
1777 def checkpush(self, pushop):
1777 """Extensions can override this function if additional checks have
1778 """Extensions can override this function if additional checks have
1778 to be performed before pushing, or call it if they override push
1779 to be performed before pushing, or call it if they override push
1779 command.
1780 command.
1780 """
1781 """
1781 pass
1782 pass
1782
1783
1783 @unfilteredpropertycache
1784 @unfilteredpropertycache
1784 def prepushoutgoinghooks(self):
1785 def prepushoutgoinghooks(self):
1785 """Return util.hooks consists of "(repo, remote, outgoing)"
1786 """Return util.hooks consists of "(repo, remote, outgoing)"
1786 functions, which are called before pushing changesets.
1787 functions, which are called before pushing changesets.
1787 """
1788 """
1788 return util.hooks()
1789 return util.hooks()
1789
1790
1790 def stream_in(self, remote, remotereqs):
1791 def stream_in(self, remote, remotereqs):
1791 # Save remote branchmap. We will use it later
1792 # Save remote branchmap. We will use it later
1792 # to speed up branchcache creation
1793 # to speed up branchcache creation
1793 rbranchmap = None
1794 rbranchmap = None
1794 if remote.capable("branchmap"):
1795 if remote.capable("branchmap"):
1795 rbranchmap = remote.branchmap()
1796 rbranchmap = remote.branchmap()
1796
1797
1797 fp = remote.stream_out()
1798 fp = remote.stream_out()
1798 l = fp.readline()
1799 l = fp.readline()
1799 try:
1800 try:
1800 resp = int(l)
1801 resp = int(l)
1801 except ValueError:
1802 except ValueError:
1802 raise error.ResponseError(
1803 raise error.ResponseError(
1803 _('unexpected response from remote server:'), l)
1804 _('unexpected response from remote server:'), l)
1804 if resp == 1:
1805 if resp == 1:
1805 raise util.Abort(_('operation forbidden by server'))
1806 raise util.Abort(_('operation forbidden by server'))
1806 elif resp == 2:
1807 elif resp == 2:
1807 raise util.Abort(_('locking the remote repository failed'))
1808 raise util.Abort(_('locking the remote repository failed'))
1808 elif resp != 0:
1809 elif resp != 0:
1809 raise util.Abort(_('the server sent an unknown error code'))
1810 raise util.Abort(_('the server sent an unknown error code'))
1810
1811
1811 self.applystreamclone(remotereqs, rbranchmap, fp)
1812 streamclone.applyremotedata(self, remotereqs, rbranchmap, fp)
1812 return len(self.heads()) + 1
1813 return len(self.heads()) + 1
1813
1814
1814 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1815 """Apply stream clone data to this repository.
1816
1817 "remotereqs" is a set of requirements to handle the incoming data.
1818 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1819 can be None.
1820 "fp" is a file object containing the raw stream data, suitable for
1821 feeding into exchange.consumestreamclone.
1822 """
1823 lock = self.lock()
1824 try:
1825 exchange.consumestreamclone(self, fp)
1826
1827 # new requirements = old non-format requirements +
1828 # new format-related remote requirements
1829 # requirements from the streamed-in repository
1830 self.requirements = remotereqs | (
1831 self.requirements - self.supportedformats)
1832 self._applyopenerreqs()
1833 self._writerequirements()
1834
1835 if remotebranchmap:
1836 rbheads = []
1837 closed = []
1838 for bheads in remotebranchmap.itervalues():
1839 rbheads.extend(bheads)
1840 for h in bheads:
1841 r = self.changelog.rev(h)
1842 b, c = self.changelog.branchinfo(r)
1843 if c:
1844 closed.append(h)
1845
1846 if rbheads:
1847 rtiprev = max((int(self.changelog.rev(node))
1848 for node in rbheads))
1849 cache = branchmap.branchcache(remotebranchmap,
1850 self[rtiprev].node(),
1851 rtiprev,
1852 closednodes=closed)
1853 # Try to stick it as low as possible
1854 # filter above served are unlikely to be fetch from a clone
1855 for candidate in ('base', 'immutable', 'served'):
1856 rview = self.filtered(candidate)
1857 if cache.validfor(rview):
1858 self._branchcaches[candidate] = cache
1859 cache.write(rview)
1860 break
1861 self.invalidate()
1862 finally:
1863 lock.release()
1864
1865 def clone(self, remote, heads=[], stream=None):
1815 def clone(self, remote, heads=[], stream=None):
1866 '''clone remote repository.
1816 '''clone remote repository.
1867
1817
1868 keyword arguments:
1818 keyword arguments:
1869 heads: list of revs to clone (forces use of pull)
1819 heads: list of revs to clone (forces use of pull)
1870 stream: use streaming clone if possible'''
1820 stream: use streaming clone if possible'''
1871
1821
1872 # now, all clients that can request uncompressed clones can
1822 # now, all clients that can request uncompressed clones can
1873 # read repo formats supported by all servers that can serve
1823 # read repo formats supported by all servers that can serve
1874 # them.
1824 # them.
1875
1825
1876 # if revlog format changes, client will have to check version
1826 # if revlog format changes, client will have to check version
1877 # and format flags on "stream" capability, and use
1827 # and format flags on "stream" capability, and use
1878 # uncompressed only if compatible.
1828 # uncompressed only if compatible.
1879
1829
1880 if stream is None:
1830 if stream is None:
1881 # if the server explicitly prefers to stream (for fast LANs)
1831 # if the server explicitly prefers to stream (for fast LANs)
1882 stream = remote.capable('stream-preferred')
1832 stream = remote.capable('stream-preferred')
1883
1833
1884 if stream and not heads:
1834 if stream and not heads:
1885 # 'stream' means remote revlog format is revlogv1 only
1835 # 'stream' means remote revlog format is revlogv1 only
1886 if remote.capable('stream'):
1836 if remote.capable('stream'):
1887 self.stream_in(remote, set(('revlogv1',)))
1837 self.stream_in(remote, set(('revlogv1',)))
1888 else:
1838 else:
1889 # otherwise, 'streamreqs' contains the remote revlog format
1839 # otherwise, 'streamreqs' contains the remote revlog format
1890 streamreqs = remote.capable('streamreqs')
1840 streamreqs = remote.capable('streamreqs')
1891 if streamreqs:
1841 if streamreqs:
1892 streamreqs = set(streamreqs.split(','))
1842 streamreqs = set(streamreqs.split(','))
1893 # if we support it, stream in and adjust our requirements
1843 # if we support it, stream in and adjust our requirements
1894 if not streamreqs - self.supportedformats:
1844 if not streamreqs - self.supportedformats:
1895 self.stream_in(remote, streamreqs)
1845 self.stream_in(remote, streamreqs)
1896
1846
1897 # internal config: ui.quietbookmarkmove
1847 # internal config: ui.quietbookmarkmove
1898 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1848 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1899 try:
1849 try:
1900 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1850 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1901 ret = exchange.pull(self, remote, heads).cgresult
1851 ret = exchange.pull(self, remote, heads).cgresult
1902 finally:
1852 finally:
1903 self.ui.restoreconfig(quiet)
1853 self.ui.restoreconfig(quiet)
1904 return ret
1854 return ret
1905
1855
1906 def pushkey(self, namespace, key, old, new):
1856 def pushkey(self, namespace, key, old, new):
1907 try:
1857 try:
1908 tr = self.currenttransaction()
1858 tr = self.currenttransaction()
1909 hookargs = {}
1859 hookargs = {}
1910 if tr is not None:
1860 if tr is not None:
1911 hookargs.update(tr.hookargs)
1861 hookargs.update(tr.hookargs)
1912 pending = lambda: tr.writepending() and self.root or ""
1862 pending = lambda: tr.writepending() and self.root or ""
1913 hookargs['pending'] = pending
1863 hookargs['pending'] = pending
1914 hookargs['namespace'] = namespace
1864 hookargs['namespace'] = namespace
1915 hookargs['key'] = key
1865 hookargs['key'] = key
1916 hookargs['old'] = old
1866 hookargs['old'] = old
1917 hookargs['new'] = new
1867 hookargs['new'] = new
1918 self.hook('prepushkey', throw=True, **hookargs)
1868 self.hook('prepushkey', throw=True, **hookargs)
1919 except error.HookAbort as exc:
1869 except error.HookAbort as exc:
1920 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1870 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1921 if exc.hint:
1871 if exc.hint:
1922 self.ui.write_err(_("(%s)\n") % exc.hint)
1872 self.ui.write_err(_("(%s)\n") % exc.hint)
1923 return False
1873 return False
1924 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1874 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1925 ret = pushkey.push(self, namespace, key, old, new)
1875 ret = pushkey.push(self, namespace, key, old, new)
1926 def runhook():
1876 def runhook():
1927 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1877 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1928 ret=ret)
1878 ret=ret)
1929 self._afterlock(runhook)
1879 self._afterlock(runhook)
1930 return ret
1880 return ret
1931
1881
1932 def listkeys(self, namespace):
1882 def listkeys(self, namespace):
1933 self.hook('prelistkeys', throw=True, namespace=namespace)
1883 self.hook('prelistkeys', throw=True, namespace=namespace)
1934 self.ui.debug('listing keys for "%s"\n' % namespace)
1884 self.ui.debug('listing keys for "%s"\n' % namespace)
1935 values = pushkey.list(self, namespace)
1885 values = pushkey.list(self, namespace)
1936 self.hook('listkeys', namespace=namespace, values=values)
1886 self.hook('listkeys', namespace=namespace, values=values)
1937 return values
1887 return values
1938
1888
1939 def debugwireargs(self, one, two, three=None, four=None, five=None):
1889 def debugwireargs(self, one, two, three=None, four=None, five=None):
1940 '''used to test argument passing over the wire'''
1890 '''used to test argument passing over the wire'''
1941 return "%s %s %s %s %s" % (one, two, three, four, five)
1891 return "%s %s %s %s %s" % (one, two, three, four, five)
1942
1892
1943 def savecommitmessage(self, text):
1893 def savecommitmessage(self, text):
1944 fp = self.vfs('last-message.txt', 'wb')
1894 fp = self.vfs('last-message.txt', 'wb')
1945 try:
1895 try:
1946 fp.write(text)
1896 fp.write(text)
1947 finally:
1897 finally:
1948 fp.close()
1898 fp.close()
1949 return self.pathto(fp.name[len(self.root) + 1:])
1899 return self.pathto(fp.name[len(self.root) + 1:])
1950
1900
1951 # used to avoid circular references so destructors work
1901 # used to avoid circular references so destructors work
1952 def aftertrans(files):
1902 def aftertrans(files):
1953 renamefiles = [tuple(t) for t in files]
1903 renamefiles = [tuple(t) for t in files]
1954 def a():
1904 def a():
1955 for vfs, src, dest in renamefiles:
1905 for vfs, src, dest in renamefiles:
1956 try:
1906 try:
1957 vfs.rename(src, dest)
1907 vfs.rename(src, dest)
1958 except OSError: # journal file does not yet exist
1908 except OSError: # journal file does not yet exist
1959 pass
1909 pass
1960 return a
1910 return a
1961
1911
1962 def undoname(fn):
1912 def undoname(fn):
1963 base, name = os.path.split(fn)
1913 base, name = os.path.split(fn)
1964 assert name.startswith('journal')
1914 assert name.startswith('journal')
1965 return os.path.join(base, name.replace('journal', 'undo', 1))
1915 return os.path.join(base, name.replace('journal', 'undo', 1))
1966
1916
1967 def instance(ui, path, create):
1917 def instance(ui, path, create):
1968 return localrepository(ui, util.urllocalpath(path), create)
1918 return localrepository(ui, util.urllocalpath(path), create)
1969
1919
1970 def islocal(path):
1920 def islocal(path):
1971 return True
1921 return True
General Comments 0
You need to be logged in to leave comments. Login now