##// END OF EJS Templates
merge with stable
Matt Mackall -
r26409:19d946cf merge default
parent child Browse files
Show More
@@ -1,1966 +1,1966 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, wdirrev, short
7 from node import hex, nullid, wdirrev, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect, random
19 import weakref, errno, os, time, inspect, random
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception as exc:
138 except Exception as exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced as exc:
155 except error.PushRaced as exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 'manifestv2'))
196 'manifestv2'))
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 'dotencode'))
198 'dotencode'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 filtername = None
200 filtername = None
201
201
202 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
204 featuresetupfuncs = set()
205
205
206 def _baserequirements(self, create):
206 def _baserequirements(self, create):
207 return ['revlogv1']
207 return ['revlogv1']
208
208
209 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
210 self.requirements = set()
210 self.requirements = set()
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 self.wopener = self.wvfs
212 self.wopener = self.wvfs
213 self.root = self.wvfs.base
213 self.root = self.wvfs.base
214 self.path = self.wvfs.join(".hg")
214 self.path = self.wvfs.join(".hg")
215 self.origroot = path
215 self.origroot = path
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 self.vfs = scmutil.vfs(self.path)
217 self.vfs = scmutil.vfs(self.path)
218 self.opener = self.vfs
218 self.opener = self.vfs
219 self.baseui = baseui
219 self.baseui = baseui
220 self.ui = baseui.copy()
220 self.ui = baseui.copy()
221 self.ui.copy = baseui.copy # prevent copying repo configuration
221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 # A list of callback to shape the phase if no data were found.
222 # A list of callback to shape the phase if no data were found.
223 # Callback are in the form: func(repo, roots) --> processed root.
223 # Callback are in the form: func(repo, roots) --> processed root.
224 # This list it to be filled by extension during repo setup
224 # This list it to be filled by extension during repo setup
225 self._phasedefaults = []
225 self._phasedefaults = []
226 try:
226 try:
227 self.ui.readconfig(self.join("hgrc"), self.root)
227 self.ui.readconfig(self.join("hgrc"), self.root)
228 extensions.loadall(self.ui)
228 extensions.loadall(self.ui)
229 except IOError:
229 except IOError:
230 pass
230 pass
231
231
232 if self.featuresetupfuncs:
232 if self.featuresetupfuncs:
233 self.supported = set(self._basesupported) # use private copy
233 self.supported = set(self._basesupported) # use private copy
234 extmods = set(m.__name__ for n, m
234 extmods = set(m.__name__ for n, m
235 in extensions.extensions(self.ui))
235 in extensions.extensions(self.ui))
236 for setupfunc in self.featuresetupfuncs:
236 for setupfunc in self.featuresetupfuncs:
237 if setupfunc.__module__ in extmods:
237 if setupfunc.__module__ in extmods:
238 setupfunc(self.ui, self.supported)
238 setupfunc(self.ui, self.supported)
239 else:
239 else:
240 self.supported = self._basesupported
240 self.supported = self._basesupported
241
241
242 if not self.vfs.isdir():
242 if not self.vfs.isdir():
243 if create:
243 if create:
244 if not self.wvfs.exists():
244 if not self.wvfs.exists():
245 self.wvfs.makedirs()
245 self.wvfs.makedirs()
246 self.vfs.makedir(notindexed=True)
246 self.vfs.makedir(notindexed=True)
247 self.requirements.update(self._baserequirements(create))
247 self.requirements.update(self._baserequirements(create))
248 if self.ui.configbool('format', 'usestore', True):
248 if self.ui.configbool('format', 'usestore', True):
249 self.vfs.mkdir("store")
249 self.vfs.mkdir("store")
250 self.requirements.add("store")
250 self.requirements.add("store")
251 if self.ui.configbool('format', 'usefncache', True):
251 if self.ui.configbool('format', 'usefncache', True):
252 self.requirements.add("fncache")
252 self.requirements.add("fncache")
253 if self.ui.configbool('format', 'dotencode', True):
253 if self.ui.configbool('format', 'dotencode', True):
254 self.requirements.add('dotencode')
254 self.requirements.add('dotencode')
255 # create an invalid changelog
255 # create an invalid changelog
256 self.vfs.append(
256 self.vfs.append(
257 "00changelog.i",
257 "00changelog.i",
258 '\0\0\0\2' # represents revlogv2
258 '\0\0\0\2' # represents revlogv2
259 ' dummy changelog to prevent using the old repo layout'
259 ' dummy changelog to prevent using the old repo layout'
260 )
260 )
261 # experimental config: format.generaldelta
261 # experimental config: format.generaldelta
262 if self.ui.configbool('format', 'generaldelta', False):
262 if self.ui.configbool('format', 'generaldelta', False):
263 self.requirements.add("generaldelta")
263 self.requirements.add("generaldelta")
264 if self.ui.configbool('experimental', 'treemanifest', False):
264 if self.ui.configbool('experimental', 'treemanifest', False):
265 self.requirements.add("treemanifest")
265 self.requirements.add("treemanifest")
266 if self.ui.configbool('experimental', 'manifestv2', False):
266 if self.ui.configbool('experimental', 'manifestv2', False):
267 self.requirements.add("manifestv2")
267 self.requirements.add("manifestv2")
268 else:
268 else:
269 raise error.RepoError(_("repository %s not found") % path)
269 raise error.RepoError(_("repository %s not found") % path)
270 elif create:
270 elif create:
271 raise error.RepoError(_("repository %s already exists") % path)
271 raise error.RepoError(_("repository %s already exists") % path)
272 else:
272 else:
273 try:
273 try:
274 self.requirements = scmutil.readrequires(
274 self.requirements = scmutil.readrequires(
275 self.vfs, self.supported)
275 self.vfs, self.supported)
276 except IOError as inst:
276 except IOError as inst:
277 if inst.errno != errno.ENOENT:
277 if inst.errno != errno.ENOENT:
278 raise
278 raise
279
279
280 self.sharedpath = self.path
280 self.sharedpath = self.path
281 try:
281 try:
282 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
283 realpath=True)
283 realpath=True)
284 s = vfs.base
284 s = vfs.base
285 if not vfs.exists():
285 if not vfs.exists():
286 raise error.RepoError(
286 raise error.RepoError(
287 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 _('.hg/sharedpath points to nonexistent directory %s') % s)
288 self.sharedpath = s
288 self.sharedpath = s
289 except IOError as inst:
289 except IOError as inst:
290 if inst.errno != errno.ENOENT:
290 if inst.errno != errno.ENOENT:
291 raise
291 raise
292
292
293 self.store = store.store(
293 self.store = store.store(
294 self.requirements, self.sharedpath, scmutil.vfs)
294 self.requirements, self.sharedpath, scmutil.vfs)
295 self.spath = self.store.path
295 self.spath = self.store.path
296 self.svfs = self.store.vfs
296 self.svfs = self.store.vfs
297 self.sjoin = self.store.join
297 self.sjoin = self.store.join
298 self.vfs.createmode = self.store.createmode
298 self.vfs.createmode = self.store.createmode
299 self._applyopenerreqs()
299 self._applyopenerreqs()
300 if create:
300 if create:
301 self._writerequirements()
301 self._writerequirements()
302
302
303 self._dirstatevalidatewarned = False
303 self._dirstatevalidatewarned = False
304
304
305 self._branchcaches = {}
305 self._branchcaches = {}
306 self._revbranchcache = None
306 self._revbranchcache = None
307 self.filterpats = {}
307 self.filterpats = {}
308 self._datafilters = {}
308 self._datafilters = {}
309 self._transref = self._lockref = self._wlockref = None
309 self._transref = self._lockref = self._wlockref = None
310
310
311 # A cache for various files under .hg/ that tracks file changes,
311 # A cache for various files under .hg/ that tracks file changes,
312 # (used by the filecache decorator)
312 # (used by the filecache decorator)
313 #
313 #
314 # Maps a property name to its util.filecacheentry
314 # Maps a property name to its util.filecacheentry
315 self._filecache = {}
315 self._filecache = {}
316
316
317 # hold sets of revision to be filtered
317 # hold sets of revision to be filtered
318 # should be cleared when something might have changed the filter value:
318 # should be cleared when something might have changed the filter value:
319 # - new changesets,
319 # - new changesets,
320 # - phase change,
320 # - phase change,
321 # - new obsolescence marker,
321 # - new obsolescence marker,
322 # - working directory parent change,
322 # - working directory parent change,
323 # - bookmark changes
323 # - bookmark changes
324 self.filteredrevcache = {}
324 self.filteredrevcache = {}
325
325
326 # generic mapping between names and nodes
326 # generic mapping between names and nodes
327 self.names = namespaces.namespaces()
327 self.names = namespaces.namespaces()
328
328
329 def close(self):
329 def close(self):
330 self._writecaches()
330 self._writecaches()
331
331
332 def _writecaches(self):
332 def _writecaches(self):
333 if self._revbranchcache:
333 if self._revbranchcache:
334 self._revbranchcache.write()
334 self._revbranchcache.write()
335
335
336 def _restrictcapabilities(self, caps):
336 def _restrictcapabilities(self, caps):
337 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 if self.ui.configbool('experimental', 'bundle2-advertise', True):
338 caps = set(caps)
338 caps = set(caps)
339 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
340 caps.add('bundle2=' + urllib.quote(capsblob))
340 caps.add('bundle2=' + urllib.quote(capsblob))
341 return caps
341 return caps
342
342
343 def _applyopenerreqs(self):
343 def _applyopenerreqs(self):
344 self.svfs.options = dict((r, 1) for r in self.requirements
344 self.svfs.options = dict((r, 1) for r in self.requirements
345 if r in self.openerreqs)
345 if r in self.openerreqs)
346 # experimental config: format.chunkcachesize
346 # experimental config: format.chunkcachesize
347 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
347 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
348 if chunkcachesize is not None:
348 if chunkcachesize is not None:
349 self.svfs.options['chunkcachesize'] = chunkcachesize
349 self.svfs.options['chunkcachesize'] = chunkcachesize
350 # experimental config: format.maxchainlen
350 # experimental config: format.maxchainlen
351 maxchainlen = self.ui.configint('format', 'maxchainlen')
351 maxchainlen = self.ui.configint('format', 'maxchainlen')
352 if maxchainlen is not None:
352 if maxchainlen is not None:
353 self.svfs.options['maxchainlen'] = maxchainlen
353 self.svfs.options['maxchainlen'] = maxchainlen
354 # experimental config: format.manifestcachesize
354 # experimental config: format.manifestcachesize
355 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
355 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
356 if manifestcachesize is not None:
356 if manifestcachesize is not None:
357 self.svfs.options['manifestcachesize'] = manifestcachesize
357 self.svfs.options['manifestcachesize'] = manifestcachesize
358 # experimental config: format.aggressivemergedeltas
358 # experimental config: format.aggressivemergedeltas
359 aggressivemergedeltas = self.ui.configbool('format',
359 aggressivemergedeltas = self.ui.configbool('format',
360 'aggressivemergedeltas', False)
360 'aggressivemergedeltas', False)
361 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
361 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
362
362
363 def _writerequirements(self):
363 def _writerequirements(self):
364 scmutil.writerequires(self.vfs, self.requirements)
364 scmutil.writerequires(self.vfs, self.requirements)
365
365
366 def _checknested(self, path):
366 def _checknested(self, path):
367 """Determine if path is a legal nested repository."""
367 """Determine if path is a legal nested repository."""
368 if not path.startswith(self.root):
368 if not path.startswith(self.root):
369 return False
369 return False
370 subpath = path[len(self.root) + 1:]
370 subpath = path[len(self.root) + 1:]
371 normsubpath = util.pconvert(subpath)
371 normsubpath = util.pconvert(subpath)
372
372
373 # XXX: Checking against the current working copy is wrong in
373 # XXX: Checking against the current working copy is wrong in
374 # the sense that it can reject things like
374 # the sense that it can reject things like
375 #
375 #
376 # $ hg cat -r 10 sub/x.txt
376 # $ hg cat -r 10 sub/x.txt
377 #
377 #
378 # if sub/ is no longer a subrepository in the working copy
378 # if sub/ is no longer a subrepository in the working copy
379 # parent revision.
379 # parent revision.
380 #
380 #
381 # However, it can of course also allow things that would have
381 # However, it can of course also allow things that would have
382 # been rejected before, such as the above cat command if sub/
382 # been rejected before, such as the above cat command if sub/
383 # is a subrepository now, but was a normal directory before.
383 # is a subrepository now, but was a normal directory before.
384 # The old path auditor would have rejected by mistake since it
384 # The old path auditor would have rejected by mistake since it
385 # panics when it sees sub/.hg/.
385 # panics when it sees sub/.hg/.
386 #
386 #
387 # All in all, checking against the working copy seems sensible
387 # All in all, checking against the working copy seems sensible
388 # since we want to prevent access to nested repositories on
388 # since we want to prevent access to nested repositories on
389 # the filesystem *now*.
389 # the filesystem *now*.
390 ctx = self[None]
390 ctx = self[None]
391 parts = util.splitpath(subpath)
391 parts = util.splitpath(subpath)
392 while parts:
392 while parts:
393 prefix = '/'.join(parts)
393 prefix = '/'.join(parts)
394 if prefix in ctx.substate:
394 if prefix in ctx.substate:
395 if prefix == normsubpath:
395 if prefix == normsubpath:
396 return True
396 return True
397 else:
397 else:
398 sub = ctx.sub(prefix)
398 sub = ctx.sub(prefix)
399 return sub.checknested(subpath[len(prefix) + 1:])
399 return sub.checknested(subpath[len(prefix) + 1:])
400 else:
400 else:
401 parts.pop()
401 parts.pop()
402 return False
402 return False
403
403
404 def peer(self):
404 def peer(self):
405 return localpeer(self) # not cached to avoid reference cycle
405 return localpeer(self) # not cached to avoid reference cycle
406
406
407 def unfiltered(self):
407 def unfiltered(self):
408 """Return unfiltered version of the repository
408 """Return unfiltered version of the repository
409
409
410 Intended to be overwritten by filtered repo."""
410 Intended to be overwritten by filtered repo."""
411 return self
411 return self
412
412
413 def filtered(self, name):
413 def filtered(self, name):
414 """Return a filtered version of a repository"""
414 """Return a filtered version of a repository"""
415 # build a new class with the mixin and the current class
415 # build a new class with the mixin and the current class
416 # (possibly subclass of the repo)
416 # (possibly subclass of the repo)
417 class proxycls(repoview.repoview, self.unfiltered().__class__):
417 class proxycls(repoview.repoview, self.unfiltered().__class__):
418 pass
418 pass
419 return proxycls(self, name)
419 return proxycls(self, name)
420
420
421 @repofilecache('bookmarks')
421 @repofilecache('bookmarks')
422 def _bookmarks(self):
422 def _bookmarks(self):
423 return bookmarks.bmstore(self)
423 return bookmarks.bmstore(self)
424
424
425 @repofilecache('bookmarks.current')
425 @repofilecache('bookmarks.current')
426 def _activebookmark(self):
426 def _activebookmark(self):
427 return bookmarks.readactive(self)
427 return bookmarks.readactive(self)
428
428
429 def bookmarkheads(self, bookmark):
429 def bookmarkheads(self, bookmark):
430 name = bookmark.split('@', 1)[0]
430 name = bookmark.split('@', 1)[0]
431 heads = []
431 heads = []
432 for mark, n in self._bookmarks.iteritems():
432 for mark, n in self._bookmarks.iteritems():
433 if mark.split('@', 1)[0] == name:
433 if mark.split('@', 1)[0] == name:
434 heads.append(n)
434 heads.append(n)
435 return heads
435 return heads
436
436
437 # _phaserevs and _phasesets depend on changelog. what wee need is to
437 # _phaserevs and _phasesets depend on changelog. what we need is to
438 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
438 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
439 # can't be easily expressed in filecache mechanism.
439 # can't be easily expressed in filecache mechanism.
440 @storecache('phaseroots', '00changelog.i')
440 @storecache('phaseroots', '00changelog.i')
441 def _phasecache(self):
441 def _phasecache(self):
442 return phases.phasecache(self, self._phasedefaults)
442 return phases.phasecache(self, self._phasedefaults)
443
443
444 @storecache('obsstore')
444 @storecache('obsstore')
445 def obsstore(self):
445 def obsstore(self):
446 # read default format for new obsstore.
446 # read default format for new obsstore.
447 # developer config: format.obsstore-version
447 # developer config: format.obsstore-version
448 defaultformat = self.ui.configint('format', 'obsstore-version', None)
448 defaultformat = self.ui.configint('format', 'obsstore-version', None)
449 # rely on obsstore class default when possible.
449 # rely on obsstore class default when possible.
450 kwargs = {}
450 kwargs = {}
451 if defaultformat is not None:
451 if defaultformat is not None:
452 kwargs['defaultformat'] = defaultformat
452 kwargs['defaultformat'] = defaultformat
453 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
453 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
454 store = obsolete.obsstore(self.svfs, readonly=readonly,
454 store = obsolete.obsstore(self.svfs, readonly=readonly,
455 **kwargs)
455 **kwargs)
456 if store and readonly:
456 if store and readonly:
457 self.ui.warn(
457 self.ui.warn(
458 _('obsolete feature not enabled but %i markers found!\n')
458 _('obsolete feature not enabled but %i markers found!\n')
459 % len(list(store)))
459 % len(list(store)))
460 return store
460 return store
461
461
462 @storecache('00changelog.i')
462 @storecache('00changelog.i')
463 def changelog(self):
463 def changelog(self):
464 c = changelog.changelog(self.svfs)
464 c = changelog.changelog(self.svfs)
465 if 'HG_PENDING' in os.environ:
465 if 'HG_PENDING' in os.environ:
466 p = os.environ['HG_PENDING']
466 p = os.environ['HG_PENDING']
467 if p.startswith(self.root):
467 if p.startswith(self.root):
468 c.readpending('00changelog.i.a')
468 c.readpending('00changelog.i.a')
469 return c
469 return c
470
470
471 @storecache('00manifest.i')
471 @storecache('00manifest.i')
472 def manifest(self):
472 def manifest(self):
473 return manifest.manifest(self.svfs)
473 return manifest.manifest(self.svfs)
474
474
475 def dirlog(self, dir):
475 def dirlog(self, dir):
476 return self.manifest.dirlog(dir)
476 return self.manifest.dirlog(dir)
477
477
478 @repofilecache('dirstate')
478 @repofilecache('dirstate')
479 def dirstate(self):
479 def dirstate(self):
480 return dirstate.dirstate(self.vfs, self.ui, self.root,
480 return dirstate.dirstate(self.vfs, self.ui, self.root,
481 self._dirstatevalidate)
481 self._dirstatevalidate)
482
482
483 def _dirstatevalidate(self, node):
483 def _dirstatevalidate(self, node):
484 try:
484 try:
485 self.changelog.rev(node)
485 self.changelog.rev(node)
486 return node
486 return node
487 except error.LookupError:
487 except error.LookupError:
488 if not self._dirstatevalidatewarned:
488 if not self._dirstatevalidatewarned:
489 self._dirstatevalidatewarned = True
489 self._dirstatevalidatewarned = True
490 self.ui.warn(_("warning: ignoring unknown"
490 self.ui.warn(_("warning: ignoring unknown"
491 " working parent %s!\n") % short(node))
491 " working parent %s!\n") % short(node))
492 return nullid
492 return nullid
493
493
494 def __getitem__(self, changeid):
494 def __getitem__(self, changeid):
495 if changeid is None or changeid == wdirrev:
495 if changeid is None or changeid == wdirrev:
496 return context.workingctx(self)
496 return context.workingctx(self)
497 if isinstance(changeid, slice):
497 if isinstance(changeid, slice):
498 return [context.changectx(self, i)
498 return [context.changectx(self, i)
499 for i in xrange(*changeid.indices(len(self)))
499 for i in xrange(*changeid.indices(len(self)))
500 if i not in self.changelog.filteredrevs]
500 if i not in self.changelog.filteredrevs]
501 return context.changectx(self, changeid)
501 return context.changectx(self, changeid)
502
502
503 def __contains__(self, changeid):
503 def __contains__(self, changeid):
504 try:
504 try:
505 self[changeid]
505 self[changeid]
506 return True
506 return True
507 except error.RepoLookupError:
507 except error.RepoLookupError:
508 return False
508 return False
509
509
510 def __nonzero__(self):
510 def __nonzero__(self):
511 return True
511 return True
512
512
513 def __len__(self):
513 def __len__(self):
514 return len(self.changelog)
514 return len(self.changelog)
515
515
516 def __iter__(self):
516 def __iter__(self):
517 return iter(self.changelog)
517 return iter(self.changelog)
518
518
519 def revs(self, expr, *args):
519 def revs(self, expr, *args):
520 '''Return a list of revisions matching the given revset'''
520 '''Return a list of revisions matching the given revset'''
521 expr = revset.formatspec(expr, *args)
521 expr = revset.formatspec(expr, *args)
522 m = revset.match(None, expr)
522 m = revset.match(None, expr)
523 return m(self)
523 return m(self)
524
524
525 def set(self, expr, *args):
525 def set(self, expr, *args):
526 '''
526 '''
527 Yield a context for each matching revision, after doing arg
527 Yield a context for each matching revision, after doing arg
528 replacement via revset.formatspec
528 replacement via revset.formatspec
529 '''
529 '''
530 for r in self.revs(expr, *args):
530 for r in self.revs(expr, *args):
531 yield self[r]
531 yield self[r]
532
532
533 def url(self):
533 def url(self):
534 return 'file:' + self.root
534 return 'file:' + self.root
535
535
536 def hook(self, name, throw=False, **args):
536 def hook(self, name, throw=False, **args):
537 """Call a hook, passing this repo instance.
537 """Call a hook, passing this repo instance.
538
538
539 This a convenience method to aid invoking hooks. Extensions likely
539 This a convenience method to aid invoking hooks. Extensions likely
540 won't call this unless they have registered a custom hook or are
540 won't call this unless they have registered a custom hook or are
541 replacing code that is expected to call a hook.
541 replacing code that is expected to call a hook.
542 """
542 """
543 return hook.hook(self.ui, self, name, throw, **args)
543 return hook.hook(self.ui, self, name, throw, **args)
544
544
545 @unfilteredmethod
545 @unfilteredmethod
546 def _tag(self, names, node, message, local, user, date, extra=None,
546 def _tag(self, names, node, message, local, user, date, extra=None,
547 editor=False):
547 editor=False):
548 if isinstance(names, str):
548 if isinstance(names, str):
549 names = (names,)
549 names = (names,)
550
550
551 branches = self.branchmap()
551 branches = self.branchmap()
552 for name in names:
552 for name in names:
553 self.hook('pretag', throw=True, node=hex(node), tag=name,
553 self.hook('pretag', throw=True, node=hex(node), tag=name,
554 local=local)
554 local=local)
555 if name in branches:
555 if name in branches:
556 self.ui.warn(_("warning: tag %s conflicts with existing"
556 self.ui.warn(_("warning: tag %s conflicts with existing"
557 " branch name\n") % name)
557 " branch name\n") % name)
558
558
559 def writetags(fp, names, munge, prevtags):
559 def writetags(fp, names, munge, prevtags):
560 fp.seek(0, 2)
560 fp.seek(0, 2)
561 if prevtags and prevtags[-1] != '\n':
561 if prevtags and prevtags[-1] != '\n':
562 fp.write('\n')
562 fp.write('\n')
563 for name in names:
563 for name in names:
564 if munge:
564 if munge:
565 m = munge(name)
565 m = munge(name)
566 else:
566 else:
567 m = name
567 m = name
568
568
569 if (self._tagscache.tagtypes and
569 if (self._tagscache.tagtypes and
570 name in self._tagscache.tagtypes):
570 name in self._tagscache.tagtypes):
571 old = self.tags().get(name, nullid)
571 old = self.tags().get(name, nullid)
572 fp.write('%s %s\n' % (hex(old), m))
572 fp.write('%s %s\n' % (hex(old), m))
573 fp.write('%s %s\n' % (hex(node), m))
573 fp.write('%s %s\n' % (hex(node), m))
574 fp.close()
574 fp.close()
575
575
576 prevtags = ''
576 prevtags = ''
577 if local:
577 if local:
578 try:
578 try:
579 fp = self.vfs('localtags', 'r+')
579 fp = self.vfs('localtags', 'r+')
580 except IOError:
580 except IOError:
581 fp = self.vfs('localtags', 'a')
581 fp = self.vfs('localtags', 'a')
582 else:
582 else:
583 prevtags = fp.read()
583 prevtags = fp.read()
584
584
585 # local tags are stored in the current charset
585 # local tags are stored in the current charset
586 writetags(fp, names, None, prevtags)
586 writetags(fp, names, None, prevtags)
587 for name in names:
587 for name in names:
588 self.hook('tag', node=hex(node), tag=name, local=local)
588 self.hook('tag', node=hex(node), tag=name, local=local)
589 return
589 return
590
590
591 try:
591 try:
592 fp = self.wfile('.hgtags', 'rb+')
592 fp = self.wfile('.hgtags', 'rb+')
593 except IOError as e:
593 except IOError as e:
594 if e.errno != errno.ENOENT:
594 if e.errno != errno.ENOENT:
595 raise
595 raise
596 fp = self.wfile('.hgtags', 'ab')
596 fp = self.wfile('.hgtags', 'ab')
597 else:
597 else:
598 prevtags = fp.read()
598 prevtags = fp.read()
599
599
600 # committed tags are stored in UTF-8
600 # committed tags are stored in UTF-8
601 writetags(fp, names, encoding.fromlocal, prevtags)
601 writetags(fp, names, encoding.fromlocal, prevtags)
602
602
603 fp.close()
603 fp.close()
604
604
605 self.invalidatecaches()
605 self.invalidatecaches()
606
606
607 if '.hgtags' not in self.dirstate:
607 if '.hgtags' not in self.dirstate:
608 self[None].add(['.hgtags'])
608 self[None].add(['.hgtags'])
609
609
610 m = matchmod.exact(self.root, '', ['.hgtags'])
610 m = matchmod.exact(self.root, '', ['.hgtags'])
611 tagnode = self.commit(message, user, date, extra=extra, match=m,
611 tagnode = self.commit(message, user, date, extra=extra, match=m,
612 editor=editor)
612 editor=editor)
613
613
614 for name in names:
614 for name in names:
615 self.hook('tag', node=hex(node), tag=name, local=local)
615 self.hook('tag', node=hex(node), tag=name, local=local)
616
616
617 return tagnode
617 return tagnode
618
618
619 def tag(self, names, node, message, local, user, date, editor=False):
619 def tag(self, names, node, message, local, user, date, editor=False):
620 '''tag a revision with one or more symbolic names.
620 '''tag a revision with one or more symbolic names.
621
621
622 names is a list of strings or, when adding a single tag, names may be a
622 names is a list of strings or, when adding a single tag, names may be a
623 string.
623 string.
624
624
625 if local is True, the tags are stored in a per-repository file.
625 if local is True, the tags are stored in a per-repository file.
626 otherwise, they are stored in the .hgtags file, and a new
626 otherwise, they are stored in the .hgtags file, and a new
627 changeset is committed with the change.
627 changeset is committed with the change.
628
628
629 keyword arguments:
629 keyword arguments:
630
630
631 local: whether to store tags in non-version-controlled file
631 local: whether to store tags in non-version-controlled file
632 (default False)
632 (default False)
633
633
634 message: commit message to use if committing
634 message: commit message to use if committing
635
635
636 user: name of user to use if committing
636 user: name of user to use if committing
637
637
638 date: date tuple to use if committing'''
638 date: date tuple to use if committing'''
639
639
640 if not local:
640 if not local:
641 m = matchmod.exact(self.root, '', ['.hgtags'])
641 m = matchmod.exact(self.root, '', ['.hgtags'])
642 if any(self.status(match=m, unknown=True, ignored=True)):
642 if any(self.status(match=m, unknown=True, ignored=True)):
643 raise util.Abort(_('working copy of .hgtags is changed'),
643 raise util.Abort(_('working copy of .hgtags is changed'),
644 hint=_('please commit .hgtags manually'))
644 hint=_('please commit .hgtags manually'))
645
645
646 self.tags() # instantiate the cache
646 self.tags() # instantiate the cache
647 self._tag(names, node, message, local, user, date, editor=editor)
647 self._tag(names, node, message, local, user, date, editor=editor)
648
648
649 @filteredpropertycache
649 @filteredpropertycache
650 def _tagscache(self):
650 def _tagscache(self):
651 '''Returns a tagscache object that contains various tags related
651 '''Returns a tagscache object that contains various tags related
652 caches.'''
652 caches.'''
653
653
654 # This simplifies its cache management by having one decorated
654 # This simplifies its cache management by having one decorated
655 # function (this one) and the rest simply fetch things from it.
655 # function (this one) and the rest simply fetch things from it.
656 class tagscache(object):
656 class tagscache(object):
657 def __init__(self):
657 def __init__(self):
658 # These two define the set of tags for this repository. tags
658 # These two define the set of tags for this repository. tags
659 # maps tag name to node; tagtypes maps tag name to 'global' or
659 # maps tag name to node; tagtypes maps tag name to 'global' or
660 # 'local'. (Global tags are defined by .hgtags across all
660 # 'local'. (Global tags are defined by .hgtags across all
661 # heads, and local tags are defined in .hg/localtags.)
661 # heads, and local tags are defined in .hg/localtags.)
662 # They constitute the in-memory cache of tags.
662 # They constitute the in-memory cache of tags.
663 self.tags = self.tagtypes = None
663 self.tags = self.tagtypes = None
664
664
665 self.nodetagscache = self.tagslist = None
665 self.nodetagscache = self.tagslist = None
666
666
667 cache = tagscache()
667 cache = tagscache()
668 cache.tags, cache.tagtypes = self._findtags()
668 cache.tags, cache.tagtypes = self._findtags()
669
669
670 return cache
670 return cache
671
671
672 def tags(self):
672 def tags(self):
673 '''return a mapping of tag to node'''
673 '''return a mapping of tag to node'''
674 t = {}
674 t = {}
675 if self.changelog.filteredrevs:
675 if self.changelog.filteredrevs:
676 tags, tt = self._findtags()
676 tags, tt = self._findtags()
677 else:
677 else:
678 tags = self._tagscache.tags
678 tags = self._tagscache.tags
679 for k, v in tags.iteritems():
679 for k, v in tags.iteritems():
680 try:
680 try:
681 # ignore tags to unknown nodes
681 # ignore tags to unknown nodes
682 self.changelog.rev(v)
682 self.changelog.rev(v)
683 t[k] = v
683 t[k] = v
684 except (error.LookupError, ValueError):
684 except (error.LookupError, ValueError):
685 pass
685 pass
686 return t
686 return t
687
687
688 def _findtags(self):
688 def _findtags(self):
689 '''Do the hard work of finding tags. Return a pair of dicts
689 '''Do the hard work of finding tags. Return a pair of dicts
690 (tags, tagtypes) where tags maps tag name to node, and tagtypes
690 (tags, tagtypes) where tags maps tag name to node, and tagtypes
691 maps tag name to a string like \'global\' or \'local\'.
691 maps tag name to a string like \'global\' or \'local\'.
692 Subclasses or extensions are free to add their own tags, but
692 Subclasses or extensions are free to add their own tags, but
693 should be aware that the returned dicts will be retained for the
693 should be aware that the returned dicts will be retained for the
694 duration of the localrepo object.'''
694 duration of the localrepo object.'''
695
695
696 # XXX what tagtype should subclasses/extensions use? Currently
696 # XXX what tagtype should subclasses/extensions use? Currently
697 # mq and bookmarks add tags, but do not set the tagtype at all.
697 # mq and bookmarks add tags, but do not set the tagtype at all.
698 # Should each extension invent its own tag type? Should there
698 # Should each extension invent its own tag type? Should there
699 # be one tagtype for all such "virtual" tags? Or is the status
699 # be one tagtype for all such "virtual" tags? Or is the status
700 # quo fine?
700 # quo fine?
701
701
702 alltags = {} # map tag name to (node, hist)
702 alltags = {} # map tag name to (node, hist)
703 tagtypes = {}
703 tagtypes = {}
704
704
705 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
705 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
706 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
706 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
707
707
708 # Build the return dicts. Have to re-encode tag names because
708 # Build the return dicts. Have to re-encode tag names because
709 # the tags module always uses UTF-8 (in order not to lose info
709 # the tags module always uses UTF-8 (in order not to lose info
710 # writing to the cache), but the rest of Mercurial wants them in
710 # writing to the cache), but the rest of Mercurial wants them in
711 # local encoding.
711 # local encoding.
712 tags = {}
712 tags = {}
713 for (name, (node, hist)) in alltags.iteritems():
713 for (name, (node, hist)) in alltags.iteritems():
714 if node != nullid:
714 if node != nullid:
715 tags[encoding.tolocal(name)] = node
715 tags[encoding.tolocal(name)] = node
716 tags['tip'] = self.changelog.tip()
716 tags['tip'] = self.changelog.tip()
717 tagtypes = dict([(encoding.tolocal(name), value)
717 tagtypes = dict([(encoding.tolocal(name), value)
718 for (name, value) in tagtypes.iteritems()])
718 for (name, value) in tagtypes.iteritems()])
719 return (tags, tagtypes)
719 return (tags, tagtypes)
720
720
721 def tagtype(self, tagname):
721 def tagtype(self, tagname):
722 '''
722 '''
723 return the type of the given tag. result can be:
723 return the type of the given tag. result can be:
724
724
725 'local' : a local tag
725 'local' : a local tag
726 'global' : a global tag
726 'global' : a global tag
727 None : tag does not exist
727 None : tag does not exist
728 '''
728 '''
729
729
730 return self._tagscache.tagtypes.get(tagname)
730 return self._tagscache.tagtypes.get(tagname)
731
731
732 def tagslist(self):
732 def tagslist(self):
733 '''return a list of tags ordered by revision'''
733 '''return a list of tags ordered by revision'''
734 if not self._tagscache.tagslist:
734 if not self._tagscache.tagslist:
735 l = []
735 l = []
736 for t, n in self.tags().iteritems():
736 for t, n in self.tags().iteritems():
737 l.append((self.changelog.rev(n), t, n))
737 l.append((self.changelog.rev(n), t, n))
738 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
738 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
739
739
740 return self._tagscache.tagslist
740 return self._tagscache.tagslist
741
741
742 def nodetags(self, node):
742 def nodetags(self, node):
743 '''return the tags associated with a node'''
743 '''return the tags associated with a node'''
744 if not self._tagscache.nodetagscache:
744 if not self._tagscache.nodetagscache:
745 nodetagscache = {}
745 nodetagscache = {}
746 for t, n in self._tagscache.tags.iteritems():
746 for t, n in self._tagscache.tags.iteritems():
747 nodetagscache.setdefault(n, []).append(t)
747 nodetagscache.setdefault(n, []).append(t)
748 for tags in nodetagscache.itervalues():
748 for tags in nodetagscache.itervalues():
749 tags.sort()
749 tags.sort()
750 self._tagscache.nodetagscache = nodetagscache
750 self._tagscache.nodetagscache = nodetagscache
751 return self._tagscache.nodetagscache.get(node, [])
751 return self._tagscache.nodetagscache.get(node, [])
752
752
753 def nodebookmarks(self, node):
753 def nodebookmarks(self, node):
754 marks = []
754 marks = []
755 for bookmark, n in self._bookmarks.iteritems():
755 for bookmark, n in self._bookmarks.iteritems():
756 if n == node:
756 if n == node:
757 marks.append(bookmark)
757 marks.append(bookmark)
758 return sorted(marks)
758 return sorted(marks)
759
759
760 def branchmap(self):
760 def branchmap(self):
761 '''returns a dictionary {branch: [branchheads]} with branchheads
761 '''returns a dictionary {branch: [branchheads]} with branchheads
762 ordered by increasing revision number'''
762 ordered by increasing revision number'''
763 branchmap.updatecache(self)
763 branchmap.updatecache(self)
764 return self._branchcaches[self.filtername]
764 return self._branchcaches[self.filtername]
765
765
766 @unfilteredmethod
766 @unfilteredmethod
767 def revbranchcache(self):
767 def revbranchcache(self):
768 if not self._revbranchcache:
768 if not self._revbranchcache:
769 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
769 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
770 return self._revbranchcache
770 return self._revbranchcache
771
771
772 def branchtip(self, branch, ignoremissing=False):
772 def branchtip(self, branch, ignoremissing=False):
773 '''return the tip node for a given branch
773 '''return the tip node for a given branch
774
774
775 If ignoremissing is True, then this method will not raise an error.
775 If ignoremissing is True, then this method will not raise an error.
776 This is helpful for callers that only expect None for a missing branch
776 This is helpful for callers that only expect None for a missing branch
777 (e.g. namespace).
777 (e.g. namespace).
778
778
779 '''
779 '''
780 try:
780 try:
781 return self.branchmap().branchtip(branch)
781 return self.branchmap().branchtip(branch)
782 except KeyError:
782 except KeyError:
783 if not ignoremissing:
783 if not ignoremissing:
784 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
784 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
785 else:
785 else:
786 pass
786 pass
787
787
788 def lookup(self, key):
788 def lookup(self, key):
789 return self[key].node()
789 return self[key].node()
790
790
791 def lookupbranch(self, key, remote=None):
791 def lookupbranch(self, key, remote=None):
792 repo = remote or self
792 repo = remote or self
793 if key in repo.branchmap():
793 if key in repo.branchmap():
794 return key
794 return key
795
795
796 repo = (remote and remote.local()) and remote or self
796 repo = (remote and remote.local()) and remote or self
797 return repo[key].branch()
797 return repo[key].branch()
798
798
799 def known(self, nodes):
799 def known(self, nodes):
800 nm = self.changelog.nodemap
800 nm = self.changelog.nodemap
801 pc = self._phasecache
801 pc = self._phasecache
802 result = []
802 result = []
803 for n in nodes:
803 for n in nodes:
804 r = nm.get(n)
804 r = nm.get(n)
805 resp = not (r is None or pc.phase(self, r) >= phases.secret)
805 resp = not (r is None or pc.phase(self, r) >= phases.secret)
806 result.append(resp)
806 result.append(resp)
807 return result
807 return result
808
808
809 def local(self):
809 def local(self):
810 return self
810 return self
811
811
812 def publishing(self):
812 def publishing(self):
813 # it's safe (and desirable) to trust the publish flag unconditionally
813 # it's safe (and desirable) to trust the publish flag unconditionally
814 # so that we don't finalize changes shared between users via ssh or nfs
814 # so that we don't finalize changes shared between users via ssh or nfs
815 return self.ui.configbool('phases', 'publish', True, untrusted=True)
815 return self.ui.configbool('phases', 'publish', True, untrusted=True)
816
816
817 def cancopy(self):
817 def cancopy(self):
818 # so statichttprepo's override of local() works
818 # so statichttprepo's override of local() works
819 if not self.local():
819 if not self.local():
820 return False
820 return False
821 if not self.publishing():
821 if not self.publishing():
822 return True
822 return True
823 # if publishing we can't copy if there is filtered content
823 # if publishing we can't copy if there is filtered content
824 return not self.filtered('visible').changelog.filteredrevs
824 return not self.filtered('visible').changelog.filteredrevs
825
825
826 def shared(self):
826 def shared(self):
827 '''the type of shared repository (None if not shared)'''
827 '''the type of shared repository (None if not shared)'''
828 if self.sharedpath != self.path:
828 if self.sharedpath != self.path:
829 return 'store'
829 return 'store'
830 return None
830 return None
831
831
832 def join(self, f, *insidef):
832 def join(self, f, *insidef):
833 return self.vfs.join(os.path.join(f, *insidef))
833 return self.vfs.join(os.path.join(f, *insidef))
834
834
835 def wjoin(self, f, *insidef):
835 def wjoin(self, f, *insidef):
836 return self.vfs.reljoin(self.root, f, *insidef)
836 return self.vfs.reljoin(self.root, f, *insidef)
837
837
838 def file(self, f):
838 def file(self, f):
839 if f[0] == '/':
839 if f[0] == '/':
840 f = f[1:]
840 f = f[1:]
841 return filelog.filelog(self.svfs, f)
841 return filelog.filelog(self.svfs, f)
842
842
843 def changectx(self, changeid):
843 def changectx(self, changeid):
844 return self[changeid]
844 return self[changeid]
845
845
846 def parents(self, changeid=None):
846 def parents(self, changeid=None):
847 '''get list of changectxs for parents of changeid'''
847 '''get list of changectxs for parents of changeid'''
848 return self[changeid].parents()
848 return self[changeid].parents()
849
849
850 def setparents(self, p1, p2=nullid):
850 def setparents(self, p1, p2=nullid):
851 self.dirstate.beginparentchange()
851 self.dirstate.beginparentchange()
852 copies = self.dirstate.setparents(p1, p2)
852 copies = self.dirstate.setparents(p1, p2)
853 pctx = self[p1]
853 pctx = self[p1]
854 if copies:
854 if copies:
855 # Adjust copy records, the dirstate cannot do it, it
855 # Adjust copy records, the dirstate cannot do it, it
856 # requires access to parents manifests. Preserve them
856 # requires access to parents manifests. Preserve them
857 # only for entries added to first parent.
857 # only for entries added to first parent.
858 for f in copies:
858 for f in copies:
859 if f not in pctx and copies[f] in pctx:
859 if f not in pctx and copies[f] in pctx:
860 self.dirstate.copy(copies[f], f)
860 self.dirstate.copy(copies[f], f)
861 if p2 == nullid:
861 if p2 == nullid:
862 for f, s in sorted(self.dirstate.copies().items()):
862 for f, s in sorted(self.dirstate.copies().items()):
863 if f not in pctx and s not in pctx:
863 if f not in pctx and s not in pctx:
864 self.dirstate.copy(None, f)
864 self.dirstate.copy(None, f)
865 self.dirstate.endparentchange()
865 self.dirstate.endparentchange()
866
866
867 def filectx(self, path, changeid=None, fileid=None):
867 def filectx(self, path, changeid=None, fileid=None):
868 """changeid can be a changeset revision, node, or tag.
868 """changeid can be a changeset revision, node, or tag.
869 fileid can be a file revision or node."""
869 fileid can be a file revision or node."""
870 return context.filectx(self, path, changeid, fileid)
870 return context.filectx(self, path, changeid, fileid)
871
871
872 def getcwd(self):
872 def getcwd(self):
873 return self.dirstate.getcwd()
873 return self.dirstate.getcwd()
874
874
875 def pathto(self, f, cwd=None):
875 def pathto(self, f, cwd=None):
876 return self.dirstate.pathto(f, cwd)
876 return self.dirstate.pathto(f, cwd)
877
877
878 def wfile(self, f, mode='r'):
878 def wfile(self, f, mode='r'):
879 return self.wvfs(f, mode)
879 return self.wvfs(f, mode)
880
880
881 def _link(self, f):
881 def _link(self, f):
882 return self.wvfs.islink(f)
882 return self.wvfs.islink(f)
883
883
884 def _loadfilter(self, filter):
884 def _loadfilter(self, filter):
885 if filter not in self.filterpats:
885 if filter not in self.filterpats:
886 l = []
886 l = []
887 for pat, cmd in self.ui.configitems(filter):
887 for pat, cmd in self.ui.configitems(filter):
888 if cmd == '!':
888 if cmd == '!':
889 continue
889 continue
890 mf = matchmod.match(self.root, '', [pat])
890 mf = matchmod.match(self.root, '', [pat])
891 fn = None
891 fn = None
892 params = cmd
892 params = cmd
893 for name, filterfn in self._datafilters.iteritems():
893 for name, filterfn in self._datafilters.iteritems():
894 if cmd.startswith(name):
894 if cmd.startswith(name):
895 fn = filterfn
895 fn = filterfn
896 params = cmd[len(name):].lstrip()
896 params = cmd[len(name):].lstrip()
897 break
897 break
898 if not fn:
898 if not fn:
899 fn = lambda s, c, **kwargs: util.filter(s, c)
899 fn = lambda s, c, **kwargs: util.filter(s, c)
900 # Wrap old filters not supporting keyword arguments
900 # Wrap old filters not supporting keyword arguments
901 if not inspect.getargspec(fn)[2]:
901 if not inspect.getargspec(fn)[2]:
902 oldfn = fn
902 oldfn = fn
903 fn = lambda s, c, **kwargs: oldfn(s, c)
903 fn = lambda s, c, **kwargs: oldfn(s, c)
904 l.append((mf, fn, params))
904 l.append((mf, fn, params))
905 self.filterpats[filter] = l
905 self.filterpats[filter] = l
906 return self.filterpats[filter]
906 return self.filterpats[filter]
907
907
908 def _filter(self, filterpats, filename, data):
908 def _filter(self, filterpats, filename, data):
909 for mf, fn, cmd in filterpats:
909 for mf, fn, cmd in filterpats:
910 if mf(filename):
910 if mf(filename):
911 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
911 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
912 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
912 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
913 break
913 break
914
914
915 return data
915 return data
916
916
917 @unfilteredpropertycache
917 @unfilteredpropertycache
918 def _encodefilterpats(self):
918 def _encodefilterpats(self):
919 return self._loadfilter('encode')
919 return self._loadfilter('encode')
920
920
921 @unfilteredpropertycache
921 @unfilteredpropertycache
922 def _decodefilterpats(self):
922 def _decodefilterpats(self):
923 return self._loadfilter('decode')
923 return self._loadfilter('decode')
924
924
925 def adddatafilter(self, name, filter):
925 def adddatafilter(self, name, filter):
926 self._datafilters[name] = filter
926 self._datafilters[name] = filter
927
927
928 def wread(self, filename):
928 def wread(self, filename):
929 if self._link(filename):
929 if self._link(filename):
930 data = self.wvfs.readlink(filename)
930 data = self.wvfs.readlink(filename)
931 else:
931 else:
932 data = self.wvfs.read(filename)
932 data = self.wvfs.read(filename)
933 return self._filter(self._encodefilterpats, filename, data)
933 return self._filter(self._encodefilterpats, filename, data)
934
934
935 def wwrite(self, filename, data, flags):
935 def wwrite(self, filename, data, flags):
936 """write ``data`` into ``filename`` in the working directory
936 """write ``data`` into ``filename`` in the working directory
937
937
938 This returns length of written (maybe decoded) data.
938 This returns length of written (maybe decoded) data.
939 """
939 """
940 data = self._filter(self._decodefilterpats, filename, data)
940 data = self._filter(self._decodefilterpats, filename, data)
941 if 'l' in flags:
941 if 'l' in flags:
942 self.wvfs.symlink(data, filename)
942 self.wvfs.symlink(data, filename)
943 else:
943 else:
944 self.wvfs.write(filename, data)
944 self.wvfs.write(filename, data)
945 if 'x' in flags:
945 if 'x' in flags:
946 self.wvfs.setflags(filename, False, True)
946 self.wvfs.setflags(filename, False, True)
947 return len(data)
947 return len(data)
948
948
949 def wwritedata(self, filename, data):
949 def wwritedata(self, filename, data):
950 return self._filter(self._decodefilterpats, filename, data)
950 return self._filter(self._decodefilterpats, filename, data)
951
951
952 def currenttransaction(self):
952 def currenttransaction(self):
953 """return the current transaction or None if non exists"""
953 """return the current transaction or None if non exists"""
954 if self._transref:
954 if self._transref:
955 tr = self._transref()
955 tr = self._transref()
956 else:
956 else:
957 tr = None
957 tr = None
958
958
959 if tr and tr.running():
959 if tr and tr.running():
960 return tr
960 return tr
961 return None
961 return None
962
962
963 def transaction(self, desc, report=None):
963 def transaction(self, desc, report=None):
964 if (self.ui.configbool('devel', 'all-warnings')
964 if (self.ui.configbool('devel', 'all-warnings')
965 or self.ui.configbool('devel', 'check-locks')):
965 or self.ui.configbool('devel', 'check-locks')):
966 l = self._lockref and self._lockref()
966 l = self._lockref and self._lockref()
967 if l is None or not l.held:
967 if l is None or not l.held:
968 self.ui.develwarn('transaction with no lock')
968 self.ui.develwarn('transaction with no lock')
969 tr = self.currenttransaction()
969 tr = self.currenttransaction()
970 if tr is not None:
970 if tr is not None:
971 return tr.nest()
971 return tr.nest()
972
972
973 # abort here if the journal already exists
973 # abort here if the journal already exists
974 if self.svfs.exists("journal"):
974 if self.svfs.exists("journal"):
975 raise error.RepoError(
975 raise error.RepoError(
976 _("abandoned transaction found"),
976 _("abandoned transaction found"),
977 hint=_("run 'hg recover' to clean up transaction"))
977 hint=_("run 'hg recover' to clean up transaction"))
978
978
979 # make journal.dirstate contain in-memory changes at this point
979 # make journal.dirstate contain in-memory changes at this point
980 self.dirstate.write()
980 self.dirstate.write()
981
981
982 idbase = "%.40f#%f" % (random.random(), time.time())
982 idbase = "%.40f#%f" % (random.random(), time.time())
983 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
983 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
984 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
984 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
985
985
986 self._writejournal(desc)
986 self._writejournal(desc)
987 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
987 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
988 if report:
988 if report:
989 rp = report
989 rp = report
990 else:
990 else:
991 rp = self.ui.warn
991 rp = self.ui.warn
992 vfsmap = {'plain': self.vfs} # root of .hg/
992 vfsmap = {'plain': self.vfs} # root of .hg/
993 # we must avoid cyclic reference between repo and transaction.
993 # we must avoid cyclic reference between repo and transaction.
994 reporef = weakref.ref(self)
994 reporef = weakref.ref(self)
995 def validate(tr):
995 def validate(tr):
996 """will run pre-closing hooks"""
996 """will run pre-closing hooks"""
997 pending = lambda: tr.writepending() and self.root or ""
997 pending = lambda: tr.writepending() and self.root or ""
998 reporef().hook('pretxnclose', throw=True, pending=pending,
998 reporef().hook('pretxnclose', throw=True, pending=pending,
999 txnname=desc, **tr.hookargs)
999 txnname=desc, **tr.hookargs)
1000
1000
1001 tr = transaction.transaction(rp, self.svfs, vfsmap,
1001 tr = transaction.transaction(rp, self.svfs, vfsmap,
1002 "journal",
1002 "journal",
1003 "undo",
1003 "undo",
1004 aftertrans(renames),
1004 aftertrans(renames),
1005 self.store.createmode,
1005 self.store.createmode,
1006 validator=validate)
1006 validator=validate)
1007
1007
1008 tr.hookargs['txnid'] = txnid
1008 tr.hookargs['txnid'] = txnid
1009 # note: writing the fncache only during finalize mean that the file is
1009 # note: writing the fncache only during finalize mean that the file is
1010 # outdated when running hooks. As fncache is used for streaming clone,
1010 # outdated when running hooks. As fncache is used for streaming clone,
1011 # this is not expected to break anything that happen during the hooks.
1011 # this is not expected to break anything that happen during the hooks.
1012 tr.addfinalize('flush-fncache', self.store.write)
1012 tr.addfinalize('flush-fncache', self.store.write)
1013 def txnclosehook(tr2):
1013 def txnclosehook(tr2):
1014 """To be run if transaction is successful, will schedule a hook run
1014 """To be run if transaction is successful, will schedule a hook run
1015 """
1015 """
1016 def hook():
1016 def hook():
1017 reporef().hook('txnclose', throw=False, txnname=desc,
1017 reporef().hook('txnclose', throw=False, txnname=desc,
1018 **tr2.hookargs)
1018 **tr2.hookargs)
1019 reporef()._afterlock(hook)
1019 reporef()._afterlock(hook)
1020 tr.addfinalize('txnclose-hook', txnclosehook)
1020 tr.addfinalize('txnclose-hook', txnclosehook)
1021 def txnaborthook(tr2):
1021 def txnaborthook(tr2):
1022 """To be run if transaction is aborted
1022 """To be run if transaction is aborted
1023 """
1023 """
1024 reporef().hook('txnabort', throw=False, txnname=desc,
1024 reporef().hook('txnabort', throw=False, txnname=desc,
1025 **tr2.hookargs)
1025 **tr2.hookargs)
1026 tr.addabort('txnabort-hook', txnaborthook)
1026 tr.addabort('txnabort-hook', txnaborthook)
1027 # avoid eager cache invalidation. in-memory data should be identical
1027 # avoid eager cache invalidation. in-memory data should be identical
1028 # to stored data if transaction has no error.
1028 # to stored data if transaction has no error.
1029 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1029 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1030 self._transref = weakref.ref(tr)
1030 self._transref = weakref.ref(tr)
1031 return tr
1031 return tr
1032
1032
1033 def _journalfiles(self):
1033 def _journalfiles(self):
1034 return ((self.svfs, 'journal'),
1034 return ((self.svfs, 'journal'),
1035 (self.vfs, 'journal.dirstate'),
1035 (self.vfs, 'journal.dirstate'),
1036 (self.vfs, 'journal.branch'),
1036 (self.vfs, 'journal.branch'),
1037 (self.vfs, 'journal.desc'),
1037 (self.vfs, 'journal.desc'),
1038 (self.vfs, 'journal.bookmarks'),
1038 (self.vfs, 'journal.bookmarks'),
1039 (self.svfs, 'journal.phaseroots'))
1039 (self.svfs, 'journal.phaseroots'))
1040
1040
1041 def undofiles(self):
1041 def undofiles(self):
1042 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1042 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1043
1043
1044 def _writejournal(self, desc):
1044 def _writejournal(self, desc):
1045 self.vfs.write("journal.dirstate",
1045 self.vfs.write("journal.dirstate",
1046 self.vfs.tryread("dirstate"))
1046 self.vfs.tryread("dirstate"))
1047 self.vfs.write("journal.branch",
1047 self.vfs.write("journal.branch",
1048 encoding.fromlocal(self.dirstate.branch()))
1048 encoding.fromlocal(self.dirstate.branch()))
1049 self.vfs.write("journal.desc",
1049 self.vfs.write("journal.desc",
1050 "%d\n%s\n" % (len(self), desc))
1050 "%d\n%s\n" % (len(self), desc))
1051 self.vfs.write("journal.bookmarks",
1051 self.vfs.write("journal.bookmarks",
1052 self.vfs.tryread("bookmarks"))
1052 self.vfs.tryread("bookmarks"))
1053 self.svfs.write("journal.phaseroots",
1053 self.svfs.write("journal.phaseroots",
1054 self.svfs.tryread("phaseroots"))
1054 self.svfs.tryread("phaseroots"))
1055
1055
1056 def recover(self):
1056 def recover(self):
1057 lock = self.lock()
1057 lock = self.lock()
1058 try:
1058 try:
1059 if self.svfs.exists("journal"):
1059 if self.svfs.exists("journal"):
1060 self.ui.status(_("rolling back interrupted transaction\n"))
1060 self.ui.status(_("rolling back interrupted transaction\n"))
1061 vfsmap = {'': self.svfs,
1061 vfsmap = {'': self.svfs,
1062 'plain': self.vfs,}
1062 'plain': self.vfs,}
1063 transaction.rollback(self.svfs, vfsmap, "journal",
1063 transaction.rollback(self.svfs, vfsmap, "journal",
1064 self.ui.warn)
1064 self.ui.warn)
1065 self.invalidate()
1065 self.invalidate()
1066 return True
1066 return True
1067 else:
1067 else:
1068 self.ui.warn(_("no interrupted transaction available\n"))
1068 self.ui.warn(_("no interrupted transaction available\n"))
1069 return False
1069 return False
1070 finally:
1070 finally:
1071 lock.release()
1071 lock.release()
1072
1072
1073 def rollback(self, dryrun=False, force=False):
1073 def rollback(self, dryrun=False, force=False):
1074 wlock = lock = None
1074 wlock = lock = None
1075 try:
1075 try:
1076 wlock = self.wlock()
1076 wlock = self.wlock()
1077 lock = self.lock()
1077 lock = self.lock()
1078 if self.svfs.exists("undo"):
1078 if self.svfs.exists("undo"):
1079 return self._rollback(dryrun, force)
1079 return self._rollback(dryrun, force)
1080 else:
1080 else:
1081 self.ui.warn(_("no rollback information available\n"))
1081 self.ui.warn(_("no rollback information available\n"))
1082 return 1
1082 return 1
1083 finally:
1083 finally:
1084 release(lock, wlock)
1084 release(lock, wlock)
1085
1085
1086 @unfilteredmethod # Until we get smarter cache management
1086 @unfilteredmethod # Until we get smarter cache management
1087 def _rollback(self, dryrun, force):
1087 def _rollback(self, dryrun, force):
1088 ui = self.ui
1088 ui = self.ui
1089 try:
1089 try:
1090 args = self.vfs.read('undo.desc').splitlines()
1090 args = self.vfs.read('undo.desc').splitlines()
1091 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1091 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1092 if len(args) >= 3:
1092 if len(args) >= 3:
1093 detail = args[2]
1093 detail = args[2]
1094 oldtip = oldlen - 1
1094 oldtip = oldlen - 1
1095
1095
1096 if detail and ui.verbose:
1096 if detail and ui.verbose:
1097 msg = (_('repository tip rolled back to revision %s'
1097 msg = (_('repository tip rolled back to revision %s'
1098 ' (undo %s: %s)\n')
1098 ' (undo %s: %s)\n')
1099 % (oldtip, desc, detail))
1099 % (oldtip, desc, detail))
1100 else:
1100 else:
1101 msg = (_('repository tip rolled back to revision %s'
1101 msg = (_('repository tip rolled back to revision %s'
1102 ' (undo %s)\n')
1102 ' (undo %s)\n')
1103 % (oldtip, desc))
1103 % (oldtip, desc))
1104 except IOError:
1104 except IOError:
1105 msg = _('rolling back unknown transaction\n')
1105 msg = _('rolling back unknown transaction\n')
1106 desc = None
1106 desc = None
1107
1107
1108 if not force and self['.'] != self['tip'] and desc == 'commit':
1108 if not force and self['.'] != self['tip'] and desc == 'commit':
1109 raise util.Abort(
1109 raise util.Abort(
1110 _('rollback of last commit while not checked out '
1110 _('rollback of last commit while not checked out '
1111 'may lose data'), hint=_('use -f to force'))
1111 'may lose data'), hint=_('use -f to force'))
1112
1112
1113 ui.status(msg)
1113 ui.status(msg)
1114 if dryrun:
1114 if dryrun:
1115 return 0
1115 return 0
1116
1116
1117 parents = self.dirstate.parents()
1117 parents = self.dirstate.parents()
1118 self.destroying()
1118 self.destroying()
1119 vfsmap = {'plain': self.vfs, '': self.svfs}
1119 vfsmap = {'plain': self.vfs, '': self.svfs}
1120 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1120 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1121 if self.vfs.exists('undo.bookmarks'):
1121 if self.vfs.exists('undo.bookmarks'):
1122 self.vfs.rename('undo.bookmarks', 'bookmarks')
1122 self.vfs.rename('undo.bookmarks', 'bookmarks')
1123 if self.svfs.exists('undo.phaseroots'):
1123 if self.svfs.exists('undo.phaseroots'):
1124 self.svfs.rename('undo.phaseroots', 'phaseroots')
1124 self.svfs.rename('undo.phaseroots', 'phaseroots')
1125 self.invalidate()
1125 self.invalidate()
1126
1126
1127 parentgone = (parents[0] not in self.changelog.nodemap or
1127 parentgone = (parents[0] not in self.changelog.nodemap or
1128 parents[1] not in self.changelog.nodemap)
1128 parents[1] not in self.changelog.nodemap)
1129 if parentgone:
1129 if parentgone:
1130 self.vfs.rename('undo.dirstate', 'dirstate')
1130 self.vfs.rename('undo.dirstate', 'dirstate')
1131 try:
1131 try:
1132 branch = self.vfs.read('undo.branch')
1132 branch = self.vfs.read('undo.branch')
1133 self.dirstate.setbranch(encoding.tolocal(branch))
1133 self.dirstate.setbranch(encoding.tolocal(branch))
1134 except IOError:
1134 except IOError:
1135 ui.warn(_('named branch could not be reset: '
1135 ui.warn(_('named branch could not be reset: '
1136 'current branch is still \'%s\'\n')
1136 'current branch is still \'%s\'\n')
1137 % self.dirstate.branch())
1137 % self.dirstate.branch())
1138
1138
1139 self.dirstate.invalidate()
1139 self.dirstate.invalidate()
1140 parents = tuple([p.rev() for p in self.parents()])
1140 parents = tuple([p.rev() for p in self.parents()])
1141 if len(parents) > 1:
1141 if len(parents) > 1:
1142 ui.status(_('working directory now based on '
1142 ui.status(_('working directory now based on '
1143 'revisions %d and %d\n') % parents)
1143 'revisions %d and %d\n') % parents)
1144 else:
1144 else:
1145 ui.status(_('working directory now based on '
1145 ui.status(_('working directory now based on '
1146 'revision %d\n') % parents)
1146 'revision %d\n') % parents)
1147 ms = mergemod.mergestate(self)
1147 ms = mergemod.mergestate(self)
1148 ms.reset(self['.'].node())
1148 ms.reset(self['.'].node())
1149
1149
1150 # TODO: if we know which new heads may result from this rollback, pass
1150 # TODO: if we know which new heads may result from this rollback, pass
1151 # them to destroy(), which will prevent the branchhead cache from being
1151 # them to destroy(), which will prevent the branchhead cache from being
1152 # invalidated.
1152 # invalidated.
1153 self.destroyed()
1153 self.destroyed()
1154 return 0
1154 return 0
1155
1155
1156 def invalidatecaches(self):
1156 def invalidatecaches(self):
1157
1157
1158 if '_tagscache' in vars(self):
1158 if '_tagscache' in vars(self):
1159 # can't use delattr on proxy
1159 # can't use delattr on proxy
1160 del self.__dict__['_tagscache']
1160 del self.__dict__['_tagscache']
1161
1161
1162 self.unfiltered()._branchcaches.clear()
1162 self.unfiltered()._branchcaches.clear()
1163 self.invalidatevolatilesets()
1163 self.invalidatevolatilesets()
1164
1164
1165 def invalidatevolatilesets(self):
1165 def invalidatevolatilesets(self):
1166 self.filteredrevcache.clear()
1166 self.filteredrevcache.clear()
1167 obsolete.clearobscaches(self)
1167 obsolete.clearobscaches(self)
1168
1168
1169 def invalidatedirstate(self):
1169 def invalidatedirstate(self):
1170 '''Invalidates the dirstate, causing the next call to dirstate
1170 '''Invalidates the dirstate, causing the next call to dirstate
1171 to check if it was modified since the last time it was read,
1171 to check if it was modified since the last time it was read,
1172 rereading it if it has.
1172 rereading it if it has.
1173
1173
1174 This is different to dirstate.invalidate() that it doesn't always
1174 This is different to dirstate.invalidate() that it doesn't always
1175 rereads the dirstate. Use dirstate.invalidate() if you want to
1175 rereads the dirstate. Use dirstate.invalidate() if you want to
1176 explicitly read the dirstate again (i.e. restoring it to a previous
1176 explicitly read the dirstate again (i.e. restoring it to a previous
1177 known good state).'''
1177 known good state).'''
1178 if hasunfilteredcache(self, 'dirstate'):
1178 if hasunfilteredcache(self, 'dirstate'):
1179 for k in self.dirstate._filecache:
1179 for k in self.dirstate._filecache:
1180 try:
1180 try:
1181 delattr(self.dirstate, k)
1181 delattr(self.dirstate, k)
1182 except AttributeError:
1182 except AttributeError:
1183 pass
1183 pass
1184 delattr(self.unfiltered(), 'dirstate')
1184 delattr(self.unfiltered(), 'dirstate')
1185
1185
1186 def invalidate(self):
1186 def invalidate(self):
1187 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1187 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1188 for k in self._filecache:
1188 for k in self._filecache:
1189 # dirstate is invalidated separately in invalidatedirstate()
1189 # dirstate is invalidated separately in invalidatedirstate()
1190 if k == 'dirstate':
1190 if k == 'dirstate':
1191 continue
1191 continue
1192
1192
1193 try:
1193 try:
1194 delattr(unfiltered, k)
1194 delattr(unfiltered, k)
1195 except AttributeError:
1195 except AttributeError:
1196 pass
1196 pass
1197 self.invalidatecaches()
1197 self.invalidatecaches()
1198 self.store.invalidatecaches()
1198 self.store.invalidatecaches()
1199
1199
1200 def invalidateall(self):
1200 def invalidateall(self):
1201 '''Fully invalidates both store and non-store parts, causing the
1201 '''Fully invalidates both store and non-store parts, causing the
1202 subsequent operation to reread any outside changes.'''
1202 subsequent operation to reread any outside changes.'''
1203 # extension should hook this to invalidate its caches
1203 # extension should hook this to invalidate its caches
1204 self.invalidate()
1204 self.invalidate()
1205 self.invalidatedirstate()
1205 self.invalidatedirstate()
1206
1206
1207 def _refreshfilecachestats(self, tr):
1207 def _refreshfilecachestats(self, tr):
1208 """Reload stats of cached files so that they are flagged as valid"""
1208 """Reload stats of cached files so that they are flagged as valid"""
1209 for k, ce in self._filecache.items():
1209 for k, ce in self._filecache.items():
1210 if k == 'dirstate' or k not in self.__dict__:
1210 if k == 'dirstate' or k not in self.__dict__:
1211 continue
1211 continue
1212 ce.refresh()
1212 ce.refresh()
1213
1213
1214 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1214 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1215 try:
1215 try:
1216 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1216 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1217 acquirefn=acquirefn, desc=desc)
1217 acquirefn=acquirefn, desc=desc)
1218 except error.LockHeld as inst:
1218 except error.LockHeld as inst:
1219 if not wait:
1219 if not wait:
1220 raise
1220 raise
1221 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1221 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1222 (desc, inst.locker))
1222 (desc, inst.locker))
1223 # default to 600 seconds timeout
1223 # default to 600 seconds timeout
1224 l = lockmod.lock(vfs, lockname,
1224 l = lockmod.lock(vfs, lockname,
1225 int(self.ui.config("ui", "timeout", "600")),
1225 int(self.ui.config("ui", "timeout", "600")),
1226 releasefn=releasefn, acquirefn=acquirefn,
1226 releasefn=releasefn, acquirefn=acquirefn,
1227 desc=desc)
1227 desc=desc)
1228 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1228 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1229 return l
1229 return l
1230
1230
1231 def _afterlock(self, callback):
1231 def _afterlock(self, callback):
1232 """add a callback to be run when the repository is fully unlocked
1232 """add a callback to be run when the repository is fully unlocked
1233
1233
1234 The callback will be executed when the outermost lock is released
1234 The callback will be executed when the outermost lock is released
1235 (with wlock being higher level than 'lock')."""
1235 (with wlock being higher level than 'lock')."""
1236 for ref in (self._wlockref, self._lockref):
1236 for ref in (self._wlockref, self._lockref):
1237 l = ref and ref()
1237 l = ref and ref()
1238 if l and l.held:
1238 if l and l.held:
1239 l.postrelease.append(callback)
1239 l.postrelease.append(callback)
1240 break
1240 break
1241 else: # no lock have been found.
1241 else: # no lock have been found.
1242 callback()
1242 callback()
1243
1243
1244 def lock(self, wait=True):
1244 def lock(self, wait=True):
1245 '''Lock the repository store (.hg/store) and return a weak reference
1245 '''Lock the repository store (.hg/store) and return a weak reference
1246 to the lock. Use this before modifying the store (e.g. committing or
1246 to the lock. Use this before modifying the store (e.g. committing or
1247 stripping). If you are opening a transaction, get a lock as well.)
1247 stripping). If you are opening a transaction, get a lock as well.)
1248
1248
1249 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1249 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1250 'wlock' first to avoid a dead-lock hazard.'''
1250 'wlock' first to avoid a dead-lock hazard.'''
1251 l = self._lockref and self._lockref()
1251 l = self._lockref and self._lockref()
1252 if l is not None and l.held:
1252 if l is not None and l.held:
1253 l.lock()
1253 l.lock()
1254 return l
1254 return l
1255
1255
1256 l = self._lock(self.svfs, "lock", wait, None,
1256 l = self._lock(self.svfs, "lock", wait, None,
1257 self.invalidate, _('repository %s') % self.origroot)
1257 self.invalidate, _('repository %s') % self.origroot)
1258 self._lockref = weakref.ref(l)
1258 self._lockref = weakref.ref(l)
1259 return l
1259 return l
1260
1260
1261 def wlock(self, wait=True):
1261 def wlock(self, wait=True):
1262 '''Lock the non-store parts of the repository (everything under
1262 '''Lock the non-store parts of the repository (everything under
1263 .hg except .hg/store) and return a weak reference to the lock.
1263 .hg except .hg/store) and return a weak reference to the lock.
1264
1264
1265 Use this before modifying files in .hg.
1265 Use this before modifying files in .hg.
1266
1266
1267 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1267 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1268 'wlock' first to avoid a dead-lock hazard.'''
1268 'wlock' first to avoid a dead-lock hazard.'''
1269 l = self._wlockref and self._wlockref()
1269 l = self._wlockref and self._wlockref()
1270 if l is not None and l.held:
1270 if l is not None and l.held:
1271 l.lock()
1271 l.lock()
1272 return l
1272 return l
1273
1273
1274 # We do not need to check for non-waiting lock aquisition. Such
1274 # We do not need to check for non-waiting lock aquisition. Such
1275 # acquisition would not cause dead-lock as they would just fail.
1275 # acquisition would not cause dead-lock as they would just fail.
1276 if wait and (self.ui.configbool('devel', 'all-warnings')
1276 if wait and (self.ui.configbool('devel', 'all-warnings')
1277 or self.ui.configbool('devel', 'check-locks')):
1277 or self.ui.configbool('devel', 'check-locks')):
1278 l = self._lockref and self._lockref()
1278 l = self._lockref and self._lockref()
1279 if l is not None and l.held:
1279 if l is not None and l.held:
1280 self.ui.develwarn('"wlock" acquired after "lock"')
1280 self.ui.develwarn('"wlock" acquired after "lock"')
1281
1281
1282 def unlock():
1282 def unlock():
1283 if self.dirstate.pendingparentchange():
1283 if self.dirstate.pendingparentchange():
1284 self.dirstate.invalidate()
1284 self.dirstate.invalidate()
1285 else:
1285 else:
1286 self.dirstate.write()
1286 self.dirstate.write()
1287
1287
1288 self._filecache['dirstate'].refresh()
1288 self._filecache['dirstate'].refresh()
1289
1289
1290 l = self._lock(self.vfs, "wlock", wait, unlock,
1290 l = self._lock(self.vfs, "wlock", wait, unlock,
1291 self.invalidatedirstate, _('working directory of %s') %
1291 self.invalidatedirstate, _('working directory of %s') %
1292 self.origroot)
1292 self.origroot)
1293 self._wlockref = weakref.ref(l)
1293 self._wlockref = weakref.ref(l)
1294 return l
1294 return l
1295
1295
1296 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1296 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1297 """
1297 """
1298 commit an individual file as part of a larger transaction
1298 commit an individual file as part of a larger transaction
1299 """
1299 """
1300
1300
1301 fname = fctx.path()
1301 fname = fctx.path()
1302 fparent1 = manifest1.get(fname, nullid)
1302 fparent1 = manifest1.get(fname, nullid)
1303 fparent2 = manifest2.get(fname, nullid)
1303 fparent2 = manifest2.get(fname, nullid)
1304 if isinstance(fctx, context.filectx):
1304 if isinstance(fctx, context.filectx):
1305 node = fctx.filenode()
1305 node = fctx.filenode()
1306 if node in [fparent1, fparent2]:
1306 if node in [fparent1, fparent2]:
1307 self.ui.debug('reusing %s filelog entry\n' % fname)
1307 self.ui.debug('reusing %s filelog entry\n' % fname)
1308 return node
1308 return node
1309
1309
1310 flog = self.file(fname)
1310 flog = self.file(fname)
1311 meta = {}
1311 meta = {}
1312 copy = fctx.renamed()
1312 copy = fctx.renamed()
1313 if copy and copy[0] != fname:
1313 if copy and copy[0] != fname:
1314 # Mark the new revision of this file as a copy of another
1314 # Mark the new revision of this file as a copy of another
1315 # file. This copy data will effectively act as a parent
1315 # file. This copy data will effectively act as a parent
1316 # of this new revision. If this is a merge, the first
1316 # of this new revision. If this is a merge, the first
1317 # parent will be the nullid (meaning "look up the copy data")
1317 # parent will be the nullid (meaning "look up the copy data")
1318 # and the second one will be the other parent. For example:
1318 # and the second one will be the other parent. For example:
1319 #
1319 #
1320 # 0 --- 1 --- 3 rev1 changes file foo
1320 # 0 --- 1 --- 3 rev1 changes file foo
1321 # \ / rev2 renames foo to bar and changes it
1321 # \ / rev2 renames foo to bar and changes it
1322 # \- 2 -/ rev3 should have bar with all changes and
1322 # \- 2 -/ rev3 should have bar with all changes and
1323 # should record that bar descends from
1323 # should record that bar descends from
1324 # bar in rev2 and foo in rev1
1324 # bar in rev2 and foo in rev1
1325 #
1325 #
1326 # this allows this merge to succeed:
1326 # this allows this merge to succeed:
1327 #
1327 #
1328 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1328 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1329 # \ / merging rev3 and rev4 should use bar@rev2
1329 # \ / merging rev3 and rev4 should use bar@rev2
1330 # \- 2 --- 4 as the merge base
1330 # \- 2 --- 4 as the merge base
1331 #
1331 #
1332
1332
1333 cfname = copy[0]
1333 cfname = copy[0]
1334 crev = manifest1.get(cfname)
1334 crev = manifest1.get(cfname)
1335 newfparent = fparent2
1335 newfparent = fparent2
1336
1336
1337 if manifest2: # branch merge
1337 if manifest2: # branch merge
1338 if fparent2 == nullid or crev is None: # copied on remote side
1338 if fparent2 == nullid or crev is None: # copied on remote side
1339 if cfname in manifest2:
1339 if cfname in manifest2:
1340 crev = manifest2[cfname]
1340 crev = manifest2[cfname]
1341 newfparent = fparent1
1341 newfparent = fparent1
1342
1342
1343 # Here, we used to search backwards through history to try to find
1343 # Here, we used to search backwards through history to try to find
1344 # where the file copy came from if the source of a copy was not in
1344 # where the file copy came from if the source of a copy was not in
1345 # the parent directory. However, this doesn't actually make sense to
1345 # the parent directory. However, this doesn't actually make sense to
1346 # do (what does a copy from something not in your working copy even
1346 # do (what does a copy from something not in your working copy even
1347 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1347 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1348 # the user that copy information was dropped, so if they didn't
1348 # the user that copy information was dropped, so if they didn't
1349 # expect this outcome it can be fixed, but this is the correct
1349 # expect this outcome it can be fixed, but this is the correct
1350 # behavior in this circumstance.
1350 # behavior in this circumstance.
1351
1351
1352 if crev:
1352 if crev:
1353 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1353 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1354 meta["copy"] = cfname
1354 meta["copy"] = cfname
1355 meta["copyrev"] = hex(crev)
1355 meta["copyrev"] = hex(crev)
1356 fparent1, fparent2 = nullid, newfparent
1356 fparent1, fparent2 = nullid, newfparent
1357 else:
1357 else:
1358 self.ui.warn(_("warning: can't find ancestor for '%s' "
1358 self.ui.warn(_("warning: can't find ancestor for '%s' "
1359 "copied from '%s'!\n") % (fname, cfname))
1359 "copied from '%s'!\n") % (fname, cfname))
1360
1360
1361 elif fparent1 == nullid:
1361 elif fparent1 == nullid:
1362 fparent1, fparent2 = fparent2, nullid
1362 fparent1, fparent2 = fparent2, nullid
1363 elif fparent2 != nullid:
1363 elif fparent2 != nullid:
1364 # is one parent an ancestor of the other?
1364 # is one parent an ancestor of the other?
1365 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1365 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1366 if fparent1 in fparentancestors:
1366 if fparent1 in fparentancestors:
1367 fparent1, fparent2 = fparent2, nullid
1367 fparent1, fparent2 = fparent2, nullid
1368 elif fparent2 in fparentancestors:
1368 elif fparent2 in fparentancestors:
1369 fparent2 = nullid
1369 fparent2 = nullid
1370
1370
1371 # is the file changed?
1371 # is the file changed?
1372 text = fctx.data()
1372 text = fctx.data()
1373 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1373 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1374 changelist.append(fname)
1374 changelist.append(fname)
1375 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1375 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1376 # are just the flags changed during merge?
1376 # are just the flags changed during merge?
1377 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1377 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1378 changelist.append(fname)
1378 changelist.append(fname)
1379
1379
1380 return fparent1
1380 return fparent1
1381
1381
1382 @unfilteredmethod
1382 @unfilteredmethod
1383 def commit(self, text="", user=None, date=None, match=None, force=False,
1383 def commit(self, text="", user=None, date=None, match=None, force=False,
1384 editor=False, extra=None):
1384 editor=False, extra=None):
1385 """Add a new revision to current repository.
1385 """Add a new revision to current repository.
1386
1386
1387 Revision information is gathered from the working directory,
1387 Revision information is gathered from the working directory,
1388 match can be used to filter the committed files. If editor is
1388 match can be used to filter the committed files. If editor is
1389 supplied, it is called to get a commit message.
1389 supplied, it is called to get a commit message.
1390 """
1390 """
1391 if extra is None:
1391 if extra is None:
1392 extra = {}
1392 extra = {}
1393
1393
1394 def fail(f, msg):
1394 def fail(f, msg):
1395 raise util.Abort('%s: %s' % (f, msg))
1395 raise util.Abort('%s: %s' % (f, msg))
1396
1396
1397 if not match:
1397 if not match:
1398 match = matchmod.always(self.root, '')
1398 match = matchmod.always(self.root, '')
1399
1399
1400 if not force:
1400 if not force:
1401 vdirs = []
1401 vdirs = []
1402 match.explicitdir = vdirs.append
1402 match.explicitdir = vdirs.append
1403 match.bad = fail
1403 match.bad = fail
1404
1404
1405 wlock = self.wlock()
1405 wlock = self.wlock()
1406 try:
1406 try:
1407 wctx = self[None]
1407 wctx = self[None]
1408 merge = len(wctx.parents()) > 1
1408 merge = len(wctx.parents()) > 1
1409
1409
1410 if not force and merge and match.ispartial():
1410 if not force and merge and match.ispartial():
1411 raise util.Abort(_('cannot partially commit a merge '
1411 raise util.Abort(_('cannot partially commit a merge '
1412 '(do not specify files or patterns)'))
1412 '(do not specify files or patterns)'))
1413
1413
1414 status = self.status(match=match, clean=force)
1414 status = self.status(match=match, clean=force)
1415 if force:
1415 if force:
1416 status.modified.extend(status.clean) # mq may commit clean files
1416 status.modified.extend(status.clean) # mq may commit clean files
1417
1417
1418 # check subrepos
1418 # check subrepos
1419 subs = []
1419 subs = []
1420 commitsubs = set()
1420 commitsubs = set()
1421 newstate = wctx.substate.copy()
1421 newstate = wctx.substate.copy()
1422 # only manage subrepos and .hgsubstate if .hgsub is present
1422 # only manage subrepos and .hgsubstate if .hgsub is present
1423 if '.hgsub' in wctx:
1423 if '.hgsub' in wctx:
1424 # we'll decide whether to track this ourselves, thanks
1424 # we'll decide whether to track this ourselves, thanks
1425 for c in status.modified, status.added, status.removed:
1425 for c in status.modified, status.added, status.removed:
1426 if '.hgsubstate' in c:
1426 if '.hgsubstate' in c:
1427 c.remove('.hgsubstate')
1427 c.remove('.hgsubstate')
1428
1428
1429 # compare current state to last committed state
1429 # compare current state to last committed state
1430 # build new substate based on last committed state
1430 # build new substate based on last committed state
1431 oldstate = wctx.p1().substate
1431 oldstate = wctx.p1().substate
1432 for s in sorted(newstate.keys()):
1432 for s in sorted(newstate.keys()):
1433 if not match(s):
1433 if not match(s):
1434 # ignore working copy, use old state if present
1434 # ignore working copy, use old state if present
1435 if s in oldstate:
1435 if s in oldstate:
1436 newstate[s] = oldstate[s]
1436 newstate[s] = oldstate[s]
1437 continue
1437 continue
1438 if not force:
1438 if not force:
1439 raise util.Abort(
1439 raise util.Abort(
1440 _("commit with new subrepo %s excluded") % s)
1440 _("commit with new subrepo %s excluded") % s)
1441 dirtyreason = wctx.sub(s).dirtyreason(True)
1441 dirtyreason = wctx.sub(s).dirtyreason(True)
1442 if dirtyreason:
1442 if dirtyreason:
1443 if not self.ui.configbool('ui', 'commitsubrepos'):
1443 if not self.ui.configbool('ui', 'commitsubrepos'):
1444 raise util.Abort(dirtyreason,
1444 raise util.Abort(dirtyreason,
1445 hint=_("use --subrepos for recursive commit"))
1445 hint=_("use --subrepos for recursive commit"))
1446 subs.append(s)
1446 subs.append(s)
1447 commitsubs.add(s)
1447 commitsubs.add(s)
1448 else:
1448 else:
1449 bs = wctx.sub(s).basestate()
1449 bs = wctx.sub(s).basestate()
1450 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1450 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1451 if oldstate.get(s, (None, None, None))[1] != bs:
1451 if oldstate.get(s, (None, None, None))[1] != bs:
1452 subs.append(s)
1452 subs.append(s)
1453
1453
1454 # check for removed subrepos
1454 # check for removed subrepos
1455 for p in wctx.parents():
1455 for p in wctx.parents():
1456 r = [s for s in p.substate if s not in newstate]
1456 r = [s for s in p.substate if s not in newstate]
1457 subs += [s for s in r if match(s)]
1457 subs += [s for s in r if match(s)]
1458 if subs:
1458 if subs:
1459 if (not match('.hgsub') and
1459 if (not match('.hgsub') and
1460 '.hgsub' in (wctx.modified() + wctx.added())):
1460 '.hgsub' in (wctx.modified() + wctx.added())):
1461 raise util.Abort(
1461 raise util.Abort(
1462 _("can't commit subrepos without .hgsub"))
1462 _("can't commit subrepos without .hgsub"))
1463 status.modified.insert(0, '.hgsubstate')
1463 status.modified.insert(0, '.hgsubstate')
1464
1464
1465 elif '.hgsub' in status.removed:
1465 elif '.hgsub' in status.removed:
1466 # clean up .hgsubstate when .hgsub is removed
1466 # clean up .hgsubstate when .hgsub is removed
1467 if ('.hgsubstate' in wctx and
1467 if ('.hgsubstate' in wctx and
1468 '.hgsubstate' not in (status.modified + status.added +
1468 '.hgsubstate' not in (status.modified + status.added +
1469 status.removed)):
1469 status.removed)):
1470 status.removed.insert(0, '.hgsubstate')
1470 status.removed.insert(0, '.hgsubstate')
1471
1471
1472 # make sure all explicit patterns are matched
1472 # make sure all explicit patterns are matched
1473 if not force and (match.isexact() or match.prefix()):
1473 if not force and (match.isexact() or match.prefix()):
1474 matched = set(status.modified + status.added + status.removed)
1474 matched = set(status.modified + status.added + status.removed)
1475
1475
1476 for f in match.files():
1476 for f in match.files():
1477 f = self.dirstate.normalize(f)
1477 f = self.dirstate.normalize(f)
1478 if f == '.' or f in matched or f in wctx.substate:
1478 if f == '.' or f in matched or f in wctx.substate:
1479 continue
1479 continue
1480 if f in status.deleted:
1480 if f in status.deleted:
1481 fail(f, _('file not found!'))
1481 fail(f, _('file not found!'))
1482 if f in vdirs: # visited directory
1482 if f in vdirs: # visited directory
1483 d = f + '/'
1483 d = f + '/'
1484 for mf in matched:
1484 for mf in matched:
1485 if mf.startswith(d):
1485 if mf.startswith(d):
1486 break
1486 break
1487 else:
1487 else:
1488 fail(f, _("no match under directory!"))
1488 fail(f, _("no match under directory!"))
1489 elif f not in self.dirstate:
1489 elif f not in self.dirstate:
1490 fail(f, _("file not tracked!"))
1490 fail(f, _("file not tracked!"))
1491
1491
1492 cctx = context.workingcommitctx(self, status,
1492 cctx = context.workingcommitctx(self, status,
1493 text, user, date, extra)
1493 text, user, date, extra)
1494
1494
1495 # internal config: ui.allowemptycommit
1495 # internal config: ui.allowemptycommit
1496 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1496 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1497 or extra.get('close') or merge or cctx.files()
1497 or extra.get('close') or merge or cctx.files()
1498 or self.ui.configbool('ui', 'allowemptycommit'))
1498 or self.ui.configbool('ui', 'allowemptycommit'))
1499 if not allowemptycommit:
1499 if not allowemptycommit:
1500 return None
1500 return None
1501
1501
1502 if merge and cctx.deleted():
1502 if merge and cctx.deleted():
1503 raise util.Abort(_("cannot commit merge with missing files"))
1503 raise util.Abort(_("cannot commit merge with missing files"))
1504
1504
1505 ms = mergemod.mergestate(self)
1505 ms = mergemod.mergestate(self)
1506 for f in status.modified:
1506 for f in status.modified:
1507 if f in ms and ms[f] == 'u':
1507 if f in ms and ms[f] == 'u':
1508 raise util.Abort(_('unresolved merge conflicts '
1508 raise util.Abort(_('unresolved merge conflicts '
1509 '(see "hg help resolve")'))
1509 '(see "hg help resolve")'))
1510
1510
1511 if editor:
1511 if editor:
1512 cctx._text = editor(self, cctx, subs)
1512 cctx._text = editor(self, cctx, subs)
1513 edited = (text != cctx._text)
1513 edited = (text != cctx._text)
1514
1514
1515 # Save commit message in case this transaction gets rolled back
1515 # Save commit message in case this transaction gets rolled back
1516 # (e.g. by a pretxncommit hook). Leave the content alone on
1516 # (e.g. by a pretxncommit hook). Leave the content alone on
1517 # the assumption that the user will use the same editor again.
1517 # the assumption that the user will use the same editor again.
1518 msgfn = self.savecommitmessage(cctx._text)
1518 msgfn = self.savecommitmessage(cctx._text)
1519
1519
1520 # commit subs and write new state
1520 # commit subs and write new state
1521 if subs:
1521 if subs:
1522 for s in sorted(commitsubs):
1522 for s in sorted(commitsubs):
1523 sub = wctx.sub(s)
1523 sub = wctx.sub(s)
1524 self.ui.status(_('committing subrepository %s\n') %
1524 self.ui.status(_('committing subrepository %s\n') %
1525 subrepo.subrelpath(sub))
1525 subrepo.subrelpath(sub))
1526 sr = sub.commit(cctx._text, user, date)
1526 sr = sub.commit(cctx._text, user, date)
1527 newstate[s] = (newstate[s][0], sr)
1527 newstate[s] = (newstate[s][0], sr)
1528 subrepo.writestate(self, newstate)
1528 subrepo.writestate(self, newstate)
1529
1529
1530 p1, p2 = self.dirstate.parents()
1530 p1, p2 = self.dirstate.parents()
1531 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1531 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1532 try:
1532 try:
1533 self.hook("precommit", throw=True, parent1=hookp1,
1533 self.hook("precommit", throw=True, parent1=hookp1,
1534 parent2=hookp2)
1534 parent2=hookp2)
1535 ret = self.commitctx(cctx, True)
1535 ret = self.commitctx(cctx, True)
1536 except: # re-raises
1536 except: # re-raises
1537 if edited:
1537 if edited:
1538 self.ui.write(
1538 self.ui.write(
1539 _('note: commit message saved in %s\n') % msgfn)
1539 _('note: commit message saved in %s\n') % msgfn)
1540 raise
1540 raise
1541
1541
1542 # update bookmarks, dirstate and mergestate
1542 # update bookmarks, dirstate and mergestate
1543 bookmarks.update(self, [p1, p2], ret)
1543 bookmarks.update(self, [p1, p2], ret)
1544 cctx.markcommitted(ret)
1544 cctx.markcommitted(ret)
1545 ms.reset()
1545 ms.reset()
1546 finally:
1546 finally:
1547 wlock.release()
1547 wlock.release()
1548
1548
1549 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1549 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1550 # hack for command that use a temporary commit (eg: histedit)
1550 # hack for command that use a temporary commit (eg: histedit)
1551 # temporary commit got stripped before hook release
1551 # temporary commit got stripped before hook release
1552 if self.changelog.hasnode(ret):
1552 if self.changelog.hasnode(ret):
1553 self.hook("commit", node=node, parent1=parent1,
1553 self.hook("commit", node=node, parent1=parent1,
1554 parent2=parent2)
1554 parent2=parent2)
1555 self._afterlock(commithook)
1555 self._afterlock(commithook)
1556 return ret
1556 return ret
1557
1557
1558 @unfilteredmethod
1558 @unfilteredmethod
1559 def commitctx(self, ctx, error=False):
1559 def commitctx(self, ctx, error=False):
1560 """Add a new revision to current repository.
1560 """Add a new revision to current repository.
1561 Revision information is passed via the context argument.
1561 Revision information is passed via the context argument.
1562 """
1562 """
1563
1563
1564 tr = None
1564 tr = None
1565 p1, p2 = ctx.p1(), ctx.p2()
1565 p1, p2 = ctx.p1(), ctx.p2()
1566 user = ctx.user()
1566 user = ctx.user()
1567
1567
1568 lock = self.lock()
1568 lock = self.lock()
1569 try:
1569 try:
1570 tr = self.transaction("commit")
1570 tr = self.transaction("commit")
1571 trp = weakref.proxy(tr)
1571 trp = weakref.proxy(tr)
1572
1572
1573 if ctx.files():
1573 if ctx.files():
1574 m1 = p1.manifest()
1574 m1 = p1.manifest()
1575 m2 = p2.manifest()
1575 m2 = p2.manifest()
1576 m = m1.copy()
1576 m = m1.copy()
1577
1577
1578 # check in files
1578 # check in files
1579 added = []
1579 added = []
1580 changed = []
1580 changed = []
1581 removed = list(ctx.removed())
1581 removed = list(ctx.removed())
1582 linkrev = len(self)
1582 linkrev = len(self)
1583 self.ui.note(_("committing files:\n"))
1583 self.ui.note(_("committing files:\n"))
1584 for f in sorted(ctx.modified() + ctx.added()):
1584 for f in sorted(ctx.modified() + ctx.added()):
1585 self.ui.note(f + "\n")
1585 self.ui.note(f + "\n")
1586 try:
1586 try:
1587 fctx = ctx[f]
1587 fctx = ctx[f]
1588 if fctx is None:
1588 if fctx is None:
1589 removed.append(f)
1589 removed.append(f)
1590 else:
1590 else:
1591 added.append(f)
1591 added.append(f)
1592 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1592 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1593 trp, changed)
1593 trp, changed)
1594 m.setflag(f, fctx.flags())
1594 m.setflag(f, fctx.flags())
1595 except OSError as inst:
1595 except OSError as inst:
1596 self.ui.warn(_("trouble committing %s!\n") % f)
1596 self.ui.warn(_("trouble committing %s!\n") % f)
1597 raise
1597 raise
1598 except IOError as inst:
1598 except IOError as inst:
1599 errcode = getattr(inst, 'errno', errno.ENOENT)
1599 errcode = getattr(inst, 'errno', errno.ENOENT)
1600 if error or errcode and errcode != errno.ENOENT:
1600 if error or errcode and errcode != errno.ENOENT:
1601 self.ui.warn(_("trouble committing %s!\n") % f)
1601 self.ui.warn(_("trouble committing %s!\n") % f)
1602 raise
1602 raise
1603
1603
1604 # update manifest
1604 # update manifest
1605 self.ui.note(_("committing manifest\n"))
1605 self.ui.note(_("committing manifest\n"))
1606 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1606 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1607 drop = [f for f in removed if f in m]
1607 drop = [f for f in removed if f in m]
1608 for f in drop:
1608 for f in drop:
1609 del m[f]
1609 del m[f]
1610 mn = self.manifest.add(m, trp, linkrev,
1610 mn = self.manifest.add(m, trp, linkrev,
1611 p1.manifestnode(), p2.manifestnode(),
1611 p1.manifestnode(), p2.manifestnode(),
1612 added, drop)
1612 added, drop)
1613 files = changed + removed
1613 files = changed + removed
1614 else:
1614 else:
1615 mn = p1.manifestnode()
1615 mn = p1.manifestnode()
1616 files = []
1616 files = []
1617
1617
1618 # update changelog
1618 # update changelog
1619 self.ui.note(_("committing changelog\n"))
1619 self.ui.note(_("committing changelog\n"))
1620 self.changelog.delayupdate(tr)
1620 self.changelog.delayupdate(tr)
1621 n = self.changelog.add(mn, files, ctx.description(),
1621 n = self.changelog.add(mn, files, ctx.description(),
1622 trp, p1.node(), p2.node(),
1622 trp, p1.node(), p2.node(),
1623 user, ctx.date(), ctx.extra().copy())
1623 user, ctx.date(), ctx.extra().copy())
1624 p = lambda: tr.writepending() and self.root or ""
1624 p = lambda: tr.writepending() and self.root or ""
1625 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1625 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1626 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1626 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1627 parent2=xp2, pending=p)
1627 parent2=xp2, pending=p)
1628 # set the new commit is proper phase
1628 # set the new commit is proper phase
1629 targetphase = subrepo.newcommitphase(self.ui, ctx)
1629 targetphase = subrepo.newcommitphase(self.ui, ctx)
1630 if targetphase:
1630 if targetphase:
1631 # retract boundary do not alter parent changeset.
1631 # retract boundary do not alter parent changeset.
1632 # if a parent have higher the resulting phase will
1632 # if a parent have higher the resulting phase will
1633 # be compliant anyway
1633 # be compliant anyway
1634 #
1634 #
1635 # if minimal phase was 0 we don't need to retract anything
1635 # if minimal phase was 0 we don't need to retract anything
1636 phases.retractboundary(self, tr, targetphase, [n])
1636 phases.retractboundary(self, tr, targetphase, [n])
1637 tr.close()
1637 tr.close()
1638 branchmap.updatecache(self.filtered('served'))
1638 branchmap.updatecache(self.filtered('served'))
1639 return n
1639 return n
1640 finally:
1640 finally:
1641 if tr:
1641 if tr:
1642 tr.release()
1642 tr.release()
1643 lock.release()
1643 lock.release()
1644
1644
1645 @unfilteredmethod
1645 @unfilteredmethod
1646 def destroying(self):
1646 def destroying(self):
1647 '''Inform the repository that nodes are about to be destroyed.
1647 '''Inform the repository that nodes are about to be destroyed.
1648 Intended for use by strip and rollback, so there's a common
1648 Intended for use by strip and rollback, so there's a common
1649 place for anything that has to be done before destroying history.
1649 place for anything that has to be done before destroying history.
1650
1650
1651 This is mostly useful for saving state that is in memory and waiting
1651 This is mostly useful for saving state that is in memory and waiting
1652 to be flushed when the current lock is released. Because a call to
1652 to be flushed when the current lock is released. Because a call to
1653 destroyed is imminent, the repo will be invalidated causing those
1653 destroyed is imminent, the repo will be invalidated causing those
1654 changes to stay in memory (waiting for the next unlock), or vanish
1654 changes to stay in memory (waiting for the next unlock), or vanish
1655 completely.
1655 completely.
1656 '''
1656 '''
1657 # When using the same lock to commit and strip, the phasecache is left
1657 # When using the same lock to commit and strip, the phasecache is left
1658 # dirty after committing. Then when we strip, the repo is invalidated,
1658 # dirty after committing. Then when we strip, the repo is invalidated,
1659 # causing those changes to disappear.
1659 # causing those changes to disappear.
1660 if '_phasecache' in vars(self):
1660 if '_phasecache' in vars(self):
1661 self._phasecache.write()
1661 self._phasecache.write()
1662
1662
1663 @unfilteredmethod
1663 @unfilteredmethod
1664 def destroyed(self):
1664 def destroyed(self):
1665 '''Inform the repository that nodes have been destroyed.
1665 '''Inform the repository that nodes have been destroyed.
1666 Intended for use by strip and rollback, so there's a common
1666 Intended for use by strip and rollback, so there's a common
1667 place for anything that has to be done after destroying history.
1667 place for anything that has to be done after destroying history.
1668 '''
1668 '''
1669 # When one tries to:
1669 # When one tries to:
1670 # 1) destroy nodes thus calling this method (e.g. strip)
1670 # 1) destroy nodes thus calling this method (e.g. strip)
1671 # 2) use phasecache somewhere (e.g. commit)
1671 # 2) use phasecache somewhere (e.g. commit)
1672 #
1672 #
1673 # then 2) will fail because the phasecache contains nodes that were
1673 # then 2) will fail because the phasecache contains nodes that were
1674 # removed. We can either remove phasecache from the filecache,
1674 # removed. We can either remove phasecache from the filecache,
1675 # causing it to reload next time it is accessed, or simply filter
1675 # causing it to reload next time it is accessed, or simply filter
1676 # the removed nodes now and write the updated cache.
1676 # the removed nodes now and write the updated cache.
1677 self._phasecache.filterunknown(self)
1677 self._phasecache.filterunknown(self)
1678 self._phasecache.write()
1678 self._phasecache.write()
1679
1679
1680 # update the 'served' branch cache to help read only server process
1680 # update the 'served' branch cache to help read only server process
1681 # Thanks to branchcache collaboration this is done from the nearest
1681 # Thanks to branchcache collaboration this is done from the nearest
1682 # filtered subset and it is expected to be fast.
1682 # filtered subset and it is expected to be fast.
1683 branchmap.updatecache(self.filtered('served'))
1683 branchmap.updatecache(self.filtered('served'))
1684
1684
1685 # Ensure the persistent tag cache is updated. Doing it now
1685 # Ensure the persistent tag cache is updated. Doing it now
1686 # means that the tag cache only has to worry about destroyed
1686 # means that the tag cache only has to worry about destroyed
1687 # heads immediately after a strip/rollback. That in turn
1687 # heads immediately after a strip/rollback. That in turn
1688 # guarantees that "cachetip == currenttip" (comparing both rev
1688 # guarantees that "cachetip == currenttip" (comparing both rev
1689 # and node) always means no nodes have been added or destroyed.
1689 # and node) always means no nodes have been added or destroyed.
1690
1690
1691 # XXX this is suboptimal when qrefresh'ing: we strip the current
1691 # XXX this is suboptimal when qrefresh'ing: we strip the current
1692 # head, refresh the tag cache, then immediately add a new head.
1692 # head, refresh the tag cache, then immediately add a new head.
1693 # But I think doing it this way is necessary for the "instant
1693 # But I think doing it this way is necessary for the "instant
1694 # tag cache retrieval" case to work.
1694 # tag cache retrieval" case to work.
1695 self.invalidate()
1695 self.invalidate()
1696
1696
1697 def walk(self, match, node=None):
1697 def walk(self, match, node=None):
1698 '''
1698 '''
1699 walk recursively through the directory tree or a given
1699 walk recursively through the directory tree or a given
1700 changeset, finding all files matched by the match
1700 changeset, finding all files matched by the match
1701 function
1701 function
1702 '''
1702 '''
1703 return self[node].walk(match)
1703 return self[node].walk(match)
1704
1704
1705 def status(self, node1='.', node2=None, match=None,
1705 def status(self, node1='.', node2=None, match=None,
1706 ignored=False, clean=False, unknown=False,
1706 ignored=False, clean=False, unknown=False,
1707 listsubrepos=False):
1707 listsubrepos=False):
1708 '''a convenience method that calls node1.status(node2)'''
1708 '''a convenience method that calls node1.status(node2)'''
1709 return self[node1].status(node2, match, ignored, clean, unknown,
1709 return self[node1].status(node2, match, ignored, clean, unknown,
1710 listsubrepos)
1710 listsubrepos)
1711
1711
1712 def heads(self, start=None):
1712 def heads(self, start=None):
1713 heads = self.changelog.heads(start)
1713 heads = self.changelog.heads(start)
1714 # sort the output in rev descending order
1714 # sort the output in rev descending order
1715 return sorted(heads, key=self.changelog.rev, reverse=True)
1715 return sorted(heads, key=self.changelog.rev, reverse=True)
1716
1716
1717 def branchheads(self, branch=None, start=None, closed=False):
1717 def branchheads(self, branch=None, start=None, closed=False):
1718 '''return a (possibly filtered) list of heads for the given branch
1718 '''return a (possibly filtered) list of heads for the given branch
1719
1719
1720 Heads are returned in topological order, from newest to oldest.
1720 Heads are returned in topological order, from newest to oldest.
1721 If branch is None, use the dirstate branch.
1721 If branch is None, use the dirstate branch.
1722 If start is not None, return only heads reachable from start.
1722 If start is not None, return only heads reachable from start.
1723 If closed is True, return heads that are marked as closed as well.
1723 If closed is True, return heads that are marked as closed as well.
1724 '''
1724 '''
1725 if branch is None:
1725 if branch is None:
1726 branch = self[None].branch()
1726 branch = self[None].branch()
1727 branches = self.branchmap()
1727 branches = self.branchmap()
1728 if branch not in branches:
1728 if branch not in branches:
1729 return []
1729 return []
1730 # the cache returns heads ordered lowest to highest
1730 # the cache returns heads ordered lowest to highest
1731 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1731 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1732 if start is not None:
1732 if start is not None:
1733 # filter out the heads that cannot be reached from startrev
1733 # filter out the heads that cannot be reached from startrev
1734 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1734 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1735 bheads = [h for h in bheads if h in fbheads]
1735 bheads = [h for h in bheads if h in fbheads]
1736 return bheads
1736 return bheads
1737
1737
1738 def branches(self, nodes):
1738 def branches(self, nodes):
1739 if not nodes:
1739 if not nodes:
1740 nodes = [self.changelog.tip()]
1740 nodes = [self.changelog.tip()]
1741 b = []
1741 b = []
1742 for n in nodes:
1742 for n in nodes:
1743 t = n
1743 t = n
1744 while True:
1744 while True:
1745 p = self.changelog.parents(n)
1745 p = self.changelog.parents(n)
1746 if p[1] != nullid or p[0] == nullid:
1746 if p[1] != nullid or p[0] == nullid:
1747 b.append((t, n, p[0], p[1]))
1747 b.append((t, n, p[0], p[1]))
1748 break
1748 break
1749 n = p[0]
1749 n = p[0]
1750 return b
1750 return b
1751
1751
1752 def between(self, pairs):
1752 def between(self, pairs):
1753 r = []
1753 r = []
1754
1754
1755 for top, bottom in pairs:
1755 for top, bottom in pairs:
1756 n, l, i = top, [], 0
1756 n, l, i = top, [], 0
1757 f = 1
1757 f = 1
1758
1758
1759 while n != bottom and n != nullid:
1759 while n != bottom and n != nullid:
1760 p = self.changelog.parents(n)[0]
1760 p = self.changelog.parents(n)[0]
1761 if i == f:
1761 if i == f:
1762 l.append(n)
1762 l.append(n)
1763 f = f * 2
1763 f = f * 2
1764 n = p
1764 n = p
1765 i += 1
1765 i += 1
1766
1766
1767 r.append(l)
1767 r.append(l)
1768
1768
1769 return r
1769 return r
1770
1770
1771 def checkpush(self, pushop):
1771 def checkpush(self, pushop):
1772 """Extensions can override this function if additional checks have
1772 """Extensions can override this function if additional checks have
1773 to be performed before pushing, or call it if they override push
1773 to be performed before pushing, or call it if they override push
1774 command.
1774 command.
1775 """
1775 """
1776 pass
1776 pass
1777
1777
1778 @unfilteredpropertycache
1778 @unfilteredpropertycache
1779 def prepushoutgoinghooks(self):
1779 def prepushoutgoinghooks(self):
1780 """Return util.hooks consists of "(repo, remote, outgoing)"
1780 """Return util.hooks consists of "(repo, remote, outgoing)"
1781 functions, which are called before pushing changesets.
1781 functions, which are called before pushing changesets.
1782 """
1782 """
1783 return util.hooks()
1783 return util.hooks()
1784
1784
1785 def stream_in(self, remote, remotereqs):
1785 def stream_in(self, remote, remotereqs):
1786 # Save remote branchmap. We will use it later
1786 # Save remote branchmap. We will use it later
1787 # to speed up branchcache creation
1787 # to speed up branchcache creation
1788 rbranchmap = None
1788 rbranchmap = None
1789 if remote.capable("branchmap"):
1789 if remote.capable("branchmap"):
1790 rbranchmap = remote.branchmap()
1790 rbranchmap = remote.branchmap()
1791
1791
1792 fp = remote.stream_out()
1792 fp = remote.stream_out()
1793 l = fp.readline()
1793 l = fp.readline()
1794 try:
1794 try:
1795 resp = int(l)
1795 resp = int(l)
1796 except ValueError:
1796 except ValueError:
1797 raise error.ResponseError(
1797 raise error.ResponseError(
1798 _('unexpected response from remote server:'), l)
1798 _('unexpected response from remote server:'), l)
1799 if resp == 1:
1799 if resp == 1:
1800 raise util.Abort(_('operation forbidden by server'))
1800 raise util.Abort(_('operation forbidden by server'))
1801 elif resp == 2:
1801 elif resp == 2:
1802 raise util.Abort(_('locking the remote repository failed'))
1802 raise util.Abort(_('locking the remote repository failed'))
1803 elif resp != 0:
1803 elif resp != 0:
1804 raise util.Abort(_('the server sent an unknown error code'))
1804 raise util.Abort(_('the server sent an unknown error code'))
1805
1805
1806 self.applystreamclone(remotereqs, rbranchmap, fp)
1806 self.applystreamclone(remotereqs, rbranchmap, fp)
1807 return len(self.heads()) + 1
1807 return len(self.heads()) + 1
1808
1808
1809 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1809 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1810 """Apply stream clone data to this repository.
1810 """Apply stream clone data to this repository.
1811
1811
1812 "remotereqs" is a set of requirements to handle the incoming data.
1812 "remotereqs" is a set of requirements to handle the incoming data.
1813 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1813 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1814 can be None.
1814 can be None.
1815 "fp" is a file object containing the raw stream data, suitable for
1815 "fp" is a file object containing the raw stream data, suitable for
1816 feeding into exchange.consumestreamclone.
1816 feeding into exchange.consumestreamclone.
1817 """
1817 """
1818 lock = self.lock()
1818 lock = self.lock()
1819 try:
1819 try:
1820 exchange.consumestreamclone(self, fp)
1820 exchange.consumestreamclone(self, fp)
1821
1821
1822 # new requirements = old non-format requirements +
1822 # new requirements = old non-format requirements +
1823 # new format-related remote requirements
1823 # new format-related remote requirements
1824 # requirements from the streamed-in repository
1824 # requirements from the streamed-in repository
1825 self.requirements = remotereqs | (
1825 self.requirements = remotereqs | (
1826 self.requirements - self.supportedformats)
1826 self.requirements - self.supportedformats)
1827 self._applyopenerreqs()
1827 self._applyopenerreqs()
1828 self._writerequirements()
1828 self._writerequirements()
1829
1829
1830 if remotebranchmap:
1830 if remotebranchmap:
1831 rbheads = []
1831 rbheads = []
1832 closed = []
1832 closed = []
1833 for bheads in remotebranchmap.itervalues():
1833 for bheads in remotebranchmap.itervalues():
1834 rbheads.extend(bheads)
1834 rbheads.extend(bheads)
1835 for h in bheads:
1835 for h in bheads:
1836 r = self.changelog.rev(h)
1836 r = self.changelog.rev(h)
1837 b, c = self.changelog.branchinfo(r)
1837 b, c = self.changelog.branchinfo(r)
1838 if c:
1838 if c:
1839 closed.append(h)
1839 closed.append(h)
1840
1840
1841 if rbheads:
1841 if rbheads:
1842 rtiprev = max((int(self.changelog.rev(node))
1842 rtiprev = max((int(self.changelog.rev(node))
1843 for node in rbheads))
1843 for node in rbheads))
1844 cache = branchmap.branchcache(remotebranchmap,
1844 cache = branchmap.branchcache(remotebranchmap,
1845 self[rtiprev].node(),
1845 self[rtiprev].node(),
1846 rtiprev,
1846 rtiprev,
1847 closednodes=closed)
1847 closednodes=closed)
1848 # Try to stick it as low as possible
1848 # Try to stick it as low as possible
1849 # filter above served are unlikely to be fetch from a clone
1849 # filter above served are unlikely to be fetch from a clone
1850 for candidate in ('base', 'immutable', 'served'):
1850 for candidate in ('base', 'immutable', 'served'):
1851 rview = self.filtered(candidate)
1851 rview = self.filtered(candidate)
1852 if cache.validfor(rview):
1852 if cache.validfor(rview):
1853 self._branchcaches[candidate] = cache
1853 self._branchcaches[candidate] = cache
1854 cache.write(rview)
1854 cache.write(rview)
1855 break
1855 break
1856 self.invalidate()
1856 self.invalidate()
1857 finally:
1857 finally:
1858 lock.release()
1858 lock.release()
1859
1859
1860 def clone(self, remote, heads=[], stream=None):
1860 def clone(self, remote, heads=[], stream=None):
1861 '''clone remote repository.
1861 '''clone remote repository.
1862
1862
1863 keyword arguments:
1863 keyword arguments:
1864 heads: list of revs to clone (forces use of pull)
1864 heads: list of revs to clone (forces use of pull)
1865 stream: use streaming clone if possible'''
1865 stream: use streaming clone if possible'''
1866
1866
1867 # now, all clients that can request uncompressed clones can
1867 # now, all clients that can request uncompressed clones can
1868 # read repo formats supported by all servers that can serve
1868 # read repo formats supported by all servers that can serve
1869 # them.
1869 # them.
1870
1870
1871 # if revlog format changes, client will have to check version
1871 # if revlog format changes, client will have to check version
1872 # and format flags on "stream" capability, and use
1872 # and format flags on "stream" capability, and use
1873 # uncompressed only if compatible.
1873 # uncompressed only if compatible.
1874
1874
1875 if stream is None:
1875 if stream is None:
1876 # if the server explicitly prefers to stream (for fast LANs)
1876 # if the server explicitly prefers to stream (for fast LANs)
1877 stream = remote.capable('stream-preferred')
1877 stream = remote.capable('stream-preferred')
1878
1878
1879 if stream and not heads:
1879 if stream and not heads:
1880 # 'stream' means remote revlog format is revlogv1 only
1880 # 'stream' means remote revlog format is revlogv1 only
1881 if remote.capable('stream'):
1881 if remote.capable('stream'):
1882 self.stream_in(remote, set(('revlogv1',)))
1882 self.stream_in(remote, set(('revlogv1',)))
1883 else:
1883 else:
1884 # otherwise, 'streamreqs' contains the remote revlog format
1884 # otherwise, 'streamreqs' contains the remote revlog format
1885 streamreqs = remote.capable('streamreqs')
1885 streamreqs = remote.capable('streamreqs')
1886 if streamreqs:
1886 if streamreqs:
1887 streamreqs = set(streamreqs.split(','))
1887 streamreqs = set(streamreqs.split(','))
1888 # if we support it, stream in and adjust our requirements
1888 # if we support it, stream in and adjust our requirements
1889 if not streamreqs - self.supportedformats:
1889 if not streamreqs - self.supportedformats:
1890 self.stream_in(remote, streamreqs)
1890 self.stream_in(remote, streamreqs)
1891
1891
1892 # internal config: ui.quietbookmarkmove
1892 # internal config: ui.quietbookmarkmove
1893 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1893 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1894 try:
1894 try:
1895 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1895 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1896 ret = exchange.pull(self, remote, heads).cgresult
1896 ret = exchange.pull(self, remote, heads).cgresult
1897 finally:
1897 finally:
1898 self.ui.restoreconfig(quiet)
1898 self.ui.restoreconfig(quiet)
1899 return ret
1899 return ret
1900
1900
1901 def pushkey(self, namespace, key, old, new):
1901 def pushkey(self, namespace, key, old, new):
1902 try:
1902 try:
1903 tr = self.currenttransaction()
1903 tr = self.currenttransaction()
1904 hookargs = {}
1904 hookargs = {}
1905 if tr is not None:
1905 if tr is not None:
1906 hookargs.update(tr.hookargs)
1906 hookargs.update(tr.hookargs)
1907 pending = lambda: tr.writepending() and self.root or ""
1907 pending = lambda: tr.writepending() and self.root or ""
1908 hookargs['pending'] = pending
1908 hookargs['pending'] = pending
1909 hookargs['namespace'] = namespace
1909 hookargs['namespace'] = namespace
1910 hookargs['key'] = key
1910 hookargs['key'] = key
1911 hookargs['old'] = old
1911 hookargs['old'] = old
1912 hookargs['new'] = new
1912 hookargs['new'] = new
1913 self.hook('prepushkey', throw=True, **hookargs)
1913 self.hook('prepushkey', throw=True, **hookargs)
1914 except error.HookAbort as exc:
1914 except error.HookAbort as exc:
1915 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1915 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1916 if exc.hint:
1916 if exc.hint:
1917 self.ui.write_err(_("(%s)\n") % exc.hint)
1917 self.ui.write_err(_("(%s)\n") % exc.hint)
1918 return False
1918 return False
1919 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1919 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1920 ret = pushkey.push(self, namespace, key, old, new)
1920 ret = pushkey.push(self, namespace, key, old, new)
1921 def runhook():
1921 def runhook():
1922 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1922 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1923 ret=ret)
1923 ret=ret)
1924 self._afterlock(runhook)
1924 self._afterlock(runhook)
1925 return ret
1925 return ret
1926
1926
1927 def listkeys(self, namespace):
1927 def listkeys(self, namespace):
1928 self.hook('prelistkeys', throw=True, namespace=namespace)
1928 self.hook('prelistkeys', throw=True, namespace=namespace)
1929 self.ui.debug('listing keys for "%s"\n' % namespace)
1929 self.ui.debug('listing keys for "%s"\n' % namespace)
1930 values = pushkey.list(self, namespace)
1930 values = pushkey.list(self, namespace)
1931 self.hook('listkeys', namespace=namespace, values=values)
1931 self.hook('listkeys', namespace=namespace, values=values)
1932 return values
1932 return values
1933
1933
1934 def debugwireargs(self, one, two, three=None, four=None, five=None):
1934 def debugwireargs(self, one, two, three=None, four=None, five=None):
1935 '''used to test argument passing over the wire'''
1935 '''used to test argument passing over the wire'''
1936 return "%s %s %s %s %s" % (one, two, three, four, five)
1936 return "%s %s %s %s %s" % (one, two, three, four, five)
1937
1937
1938 def savecommitmessage(self, text):
1938 def savecommitmessage(self, text):
1939 fp = self.vfs('last-message.txt', 'wb')
1939 fp = self.vfs('last-message.txt', 'wb')
1940 try:
1940 try:
1941 fp.write(text)
1941 fp.write(text)
1942 finally:
1942 finally:
1943 fp.close()
1943 fp.close()
1944 return self.pathto(fp.name[len(self.root) + 1:])
1944 return self.pathto(fp.name[len(self.root) + 1:])
1945
1945
1946 # used to avoid circular references so destructors work
1946 # used to avoid circular references so destructors work
1947 def aftertrans(files):
1947 def aftertrans(files):
1948 renamefiles = [tuple(t) for t in files]
1948 renamefiles = [tuple(t) for t in files]
1949 def a():
1949 def a():
1950 for vfs, src, dest in renamefiles:
1950 for vfs, src, dest in renamefiles:
1951 try:
1951 try:
1952 vfs.rename(src, dest)
1952 vfs.rename(src, dest)
1953 except OSError: # journal file does not yet exist
1953 except OSError: # journal file does not yet exist
1954 pass
1954 pass
1955 return a
1955 return a
1956
1956
1957 def undoname(fn):
1957 def undoname(fn):
1958 base, name = os.path.split(fn)
1958 base, name = os.path.split(fn)
1959 assert name.startswith('journal')
1959 assert name.startswith('journal')
1960 return os.path.join(base, name.replace('journal', 'undo', 1))
1960 return os.path.join(base, name.replace('journal', 'undo', 1))
1961
1961
1962 def instance(ui, path, create):
1962 def instance(ui, path, create):
1963 return localrepository(ui, util.urllocalpath(path), create)
1963 return localrepository(ui, util.urllocalpath(path), create)
1964
1964
1965 def islocal(path):
1965 def islocal(path):
1966 return True
1966 return True
General Comments 0
You need to be logged in to leave comments. Login now