##// END OF EJS Templates
localrepo: ensure transaction id is fully bytes on py3
Augie Fackler -
r31508:590319c0 default
parent child Browse files
Show More
@@ -1,2083 +1,2087 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 repoview,
53 repoview,
53 revset,
54 revset,
54 revsetlang,
55 revsetlang,
55 scmutil,
56 scmutil,
56 store,
57 store,
57 subrepo,
58 subrepo,
58 tags as tagsmod,
59 tags as tagsmod,
59 transaction,
60 transaction,
60 txnutil,
61 txnutil,
61 util,
62 util,
62 vfs as vfsmod,
63 vfs as vfsmod,
63 )
64 )
64
65
65 release = lockmod.release
66 release = lockmod.release
66 urlerr = util.urlerr
67 urlerr = util.urlerr
67 urlreq = util.urlreq
68 urlreq = util.urlreq
68
69
69 class repofilecache(scmutil.filecache):
70 class repofilecache(scmutil.filecache):
70 """All filecache usage on repo are done for logic that should be unfiltered
71 """All filecache usage on repo are done for logic that should be unfiltered
71 """
72 """
72
73
73 def join(self, obj, fname):
74 def join(self, obj, fname):
74 return obj.vfs.join(fname)
75 return obj.vfs.join(fname)
75 def __get__(self, repo, type=None):
76 def __get__(self, repo, type=None):
76 if repo is None:
77 if repo is None:
77 return self
78 return self
78 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 def __set__(self, repo, value):
80 def __set__(self, repo, value):
80 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 def __delete__(self, repo):
82 def __delete__(self, repo):
82 return super(repofilecache, self).__delete__(repo.unfiltered())
83 return super(repofilecache, self).__delete__(repo.unfiltered())
83
84
84 class storecache(repofilecache):
85 class storecache(repofilecache):
85 """filecache for files in the store"""
86 """filecache for files in the store"""
86 def join(self, obj, fname):
87 def join(self, obj, fname):
87 return obj.sjoin(fname)
88 return obj.sjoin(fname)
88
89
89 class unfilteredpropertycache(util.propertycache):
90 class unfilteredpropertycache(util.propertycache):
90 """propertycache that apply to unfiltered repo only"""
91 """propertycache that apply to unfiltered repo only"""
91
92
92 def __get__(self, repo, type=None):
93 def __get__(self, repo, type=None):
93 unfi = repo.unfiltered()
94 unfi = repo.unfiltered()
94 if unfi is repo:
95 if unfi is repo:
95 return super(unfilteredpropertycache, self).__get__(unfi)
96 return super(unfilteredpropertycache, self).__get__(unfi)
96 return getattr(unfi, self.name)
97 return getattr(unfi, self.name)
97
98
98 class filteredpropertycache(util.propertycache):
99 class filteredpropertycache(util.propertycache):
99 """propertycache that must take filtering in account"""
100 """propertycache that must take filtering in account"""
100
101
101 def cachevalue(self, obj, value):
102 def cachevalue(self, obj, value):
102 object.__setattr__(obj, self.name, value)
103 object.__setattr__(obj, self.name, value)
103
104
104
105
105 def hasunfilteredcache(repo, name):
106 def hasunfilteredcache(repo, name):
106 """check if a repo has an unfilteredpropertycache value for <name>"""
107 """check if a repo has an unfilteredpropertycache value for <name>"""
107 return name in vars(repo.unfiltered())
108 return name in vars(repo.unfiltered())
108
109
109 def unfilteredmethod(orig):
110 def unfilteredmethod(orig):
110 """decorate method that always need to be run on unfiltered version"""
111 """decorate method that always need to be run on unfiltered version"""
111 def wrapper(repo, *args, **kwargs):
112 def wrapper(repo, *args, **kwargs):
112 return orig(repo.unfiltered(), *args, **kwargs)
113 return orig(repo.unfiltered(), *args, **kwargs)
113 return wrapper
114 return wrapper
114
115
115 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 'unbundle'))
117 'unbundle'))
117 legacycaps = moderncaps.union(set(['changegroupsubset']))
118 legacycaps = moderncaps.union(set(['changegroupsubset']))
118
119
119 class localpeer(peer.peerrepository):
120 class localpeer(peer.peerrepository):
120 '''peer for a local repo; reflects only the most recent API'''
121 '''peer for a local repo; reflects only the most recent API'''
121
122
122 def __init__(self, repo, caps=None):
123 def __init__(self, repo, caps=None):
123 if caps is None:
124 if caps is None:
124 caps = moderncaps.copy()
125 caps = moderncaps.copy()
125 peer.peerrepository.__init__(self)
126 peer.peerrepository.__init__(self)
126 self._repo = repo.filtered('served')
127 self._repo = repo.filtered('served')
127 self.ui = repo.ui
128 self.ui = repo.ui
128 self._caps = repo._restrictcapabilities(caps)
129 self._caps = repo._restrictcapabilities(caps)
129 self.requirements = repo.requirements
130 self.requirements = repo.requirements
130 self.supportedformats = repo.supportedformats
131 self.supportedformats = repo.supportedformats
131
132
132 def close(self):
133 def close(self):
133 self._repo.close()
134 self._repo.close()
134
135
135 def _capabilities(self):
136 def _capabilities(self):
136 return self._caps
137 return self._caps
137
138
138 def local(self):
139 def local(self):
139 return self._repo
140 return self._repo
140
141
141 def canpush(self):
142 def canpush(self):
142 return True
143 return True
143
144
144 def url(self):
145 def url(self):
145 return self._repo.url()
146 return self._repo.url()
146
147
147 def lookup(self, key):
148 def lookup(self, key):
148 return self._repo.lookup(key)
149 return self._repo.lookup(key)
149
150
150 def branchmap(self):
151 def branchmap(self):
151 return self._repo.branchmap()
152 return self._repo.branchmap()
152
153
153 def heads(self):
154 def heads(self):
154 return self._repo.heads()
155 return self._repo.heads()
155
156
156 def known(self, nodes):
157 def known(self, nodes):
157 return self._repo.known(nodes)
158 return self._repo.known(nodes)
158
159
159 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
160 **kwargs):
161 **kwargs):
161 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
162 common=common, bundlecaps=bundlecaps,
163 common=common, bundlecaps=bundlecaps,
163 **kwargs)
164 **kwargs)
164 cb = util.chunkbuffer(chunks)
165 cb = util.chunkbuffer(chunks)
165
166
166 if bundlecaps is not None and 'HG20' in bundlecaps:
167 if bundlecaps is not None and 'HG20' in bundlecaps:
167 # When requesting a bundle2, getbundle returns a stream to make the
168 # When requesting a bundle2, getbundle returns a stream to make the
168 # wire level function happier. We need to build a proper object
169 # wire level function happier. We need to build a proper object
169 # from it in local peer.
170 # from it in local peer.
170 return bundle2.getunbundler(self.ui, cb)
171 return bundle2.getunbundler(self.ui, cb)
171 else:
172 else:
172 return changegroup.getunbundler('01', cb, None)
173 return changegroup.getunbundler('01', cb, None)
173
174
174 # TODO We might want to move the next two calls into legacypeer and add
175 # TODO We might want to move the next two calls into legacypeer and add
175 # unbundle instead.
176 # unbundle instead.
176
177
177 def unbundle(self, cg, heads, url):
178 def unbundle(self, cg, heads, url):
178 """apply a bundle on a repo
179 """apply a bundle on a repo
179
180
180 This function handles the repo locking itself."""
181 This function handles the repo locking itself."""
181 try:
182 try:
182 try:
183 try:
183 cg = exchange.readbundle(self.ui, cg, None)
184 cg = exchange.readbundle(self.ui, cg, None)
184 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
185 if util.safehasattr(ret, 'getchunks'):
186 if util.safehasattr(ret, 'getchunks'):
186 # This is a bundle20 object, turn it into an unbundler.
187 # This is a bundle20 object, turn it into an unbundler.
187 # This little dance should be dropped eventually when the
188 # This little dance should be dropped eventually when the
188 # API is finally improved.
189 # API is finally improved.
189 stream = util.chunkbuffer(ret.getchunks())
190 stream = util.chunkbuffer(ret.getchunks())
190 ret = bundle2.getunbundler(self.ui, stream)
191 ret = bundle2.getunbundler(self.ui, stream)
191 return ret
192 return ret
192 except Exception as exc:
193 except Exception as exc:
193 # If the exception contains output salvaged from a bundle2
194 # If the exception contains output salvaged from a bundle2
194 # reply, we need to make sure it is printed before continuing
195 # reply, we need to make sure it is printed before continuing
195 # to fail. So we build a bundle2 with such output and consume
196 # to fail. So we build a bundle2 with such output and consume
196 # it directly.
197 # it directly.
197 #
198 #
198 # This is not very elegant but allows a "simple" solution for
199 # This is not very elegant but allows a "simple" solution for
199 # issue4594
200 # issue4594
200 output = getattr(exc, '_bundle2salvagedoutput', ())
201 output = getattr(exc, '_bundle2salvagedoutput', ())
201 if output:
202 if output:
202 bundler = bundle2.bundle20(self._repo.ui)
203 bundler = bundle2.bundle20(self._repo.ui)
203 for out in output:
204 for out in output:
204 bundler.addpart(out)
205 bundler.addpart(out)
205 stream = util.chunkbuffer(bundler.getchunks())
206 stream = util.chunkbuffer(bundler.getchunks())
206 b = bundle2.getunbundler(self.ui, stream)
207 b = bundle2.getunbundler(self.ui, stream)
207 bundle2.processbundle(self._repo, b)
208 bundle2.processbundle(self._repo, b)
208 raise
209 raise
209 except error.PushRaced as exc:
210 except error.PushRaced as exc:
210 raise error.ResponseError(_('push failed:'), str(exc))
211 raise error.ResponseError(_('push failed:'), str(exc))
211
212
212 def lock(self):
213 def lock(self):
213 return self._repo.lock()
214 return self._repo.lock()
214
215
215 def addchangegroup(self, cg, source, url):
216 def addchangegroup(self, cg, source, url):
216 return cg.apply(self._repo, source, url)
217 return cg.apply(self._repo, source, url)
217
218
218 def pushkey(self, namespace, key, old, new):
219 def pushkey(self, namespace, key, old, new):
219 return self._repo.pushkey(namespace, key, old, new)
220 return self._repo.pushkey(namespace, key, old, new)
220
221
221 def listkeys(self, namespace):
222 def listkeys(self, namespace):
222 return self._repo.listkeys(namespace)
223 return self._repo.listkeys(namespace)
223
224
224 def debugwireargs(self, one, two, three=None, four=None, five=None):
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
225 '''used to test argument passing over the wire'''
226 '''used to test argument passing over the wire'''
226 return "%s %s %s %s %s" % (one, two, three, four, five)
227 return "%s %s %s %s %s" % (one, two, three, four, five)
227
228
228 class locallegacypeer(localpeer):
229 class locallegacypeer(localpeer):
229 '''peer extension which implements legacy methods too; used for tests with
230 '''peer extension which implements legacy methods too; used for tests with
230 restricted capabilities'''
231 restricted capabilities'''
231
232
232 def __init__(self, repo):
233 def __init__(self, repo):
233 localpeer.__init__(self, repo, caps=legacycaps)
234 localpeer.__init__(self, repo, caps=legacycaps)
234
235
235 def branches(self, nodes):
236 def branches(self, nodes):
236 return self._repo.branches(nodes)
237 return self._repo.branches(nodes)
237
238
238 def between(self, pairs):
239 def between(self, pairs):
239 return self._repo.between(pairs)
240 return self._repo.between(pairs)
240
241
241 def changegroup(self, basenodes, source):
242 def changegroup(self, basenodes, source):
242 return changegroup.changegroup(self._repo, basenodes, source)
243 return changegroup.changegroup(self._repo, basenodes, source)
243
244
244 def changegroupsubset(self, bases, heads, source):
245 def changegroupsubset(self, bases, heads, source):
245 return changegroup.changegroupsubset(self._repo, bases, heads, source)
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
246
247
247 class localrepository(object):
248 class localrepository(object):
248
249
249 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
250 'manifestv2'))
251 'manifestv2'))
251 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
252 'relshared', 'dotencode'))
253 'relshared', 'dotencode'))
253 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
254 filtername = None
255 filtername = None
255
256
256 # a list of (ui, featureset) functions.
257 # a list of (ui, featureset) functions.
257 # only functions defined in module of enabled extensions are invoked
258 # only functions defined in module of enabled extensions are invoked
258 featuresetupfuncs = set()
259 featuresetupfuncs = set()
259
260
260 def __init__(self, baseui, path, create=False):
261 def __init__(self, baseui, path, create=False):
261 self.requirements = set()
262 self.requirements = set()
262 # vfs to access the working copy
263 # vfs to access the working copy
263 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
264 # vfs to access the content of the repository
265 # vfs to access the content of the repository
265 self.vfs = None
266 self.vfs = None
266 # vfs to access the store part of the repository
267 # vfs to access the store part of the repository
267 self.svfs = None
268 self.svfs = None
268 self.root = self.wvfs.base
269 self.root = self.wvfs.base
269 self.path = self.wvfs.join(".hg")
270 self.path = self.wvfs.join(".hg")
270 self.origroot = path
271 self.origroot = path
271 self.auditor = pathutil.pathauditor(self.root, self._checknested)
272 self.auditor = pathutil.pathauditor(self.root, self._checknested)
272 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
273 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
273 realfs=False)
274 realfs=False)
274 self.vfs = vfsmod.vfs(self.path)
275 self.vfs = vfsmod.vfs(self.path)
275 self.baseui = baseui
276 self.baseui = baseui
276 self.ui = baseui.copy()
277 self.ui = baseui.copy()
277 self.ui.copy = baseui.copy # prevent copying repo configuration
278 self.ui.copy = baseui.copy # prevent copying repo configuration
278 # A list of callback to shape the phase if no data were found.
279 # A list of callback to shape the phase if no data were found.
279 # Callback are in the form: func(repo, roots) --> processed root.
280 # Callback are in the form: func(repo, roots) --> processed root.
280 # This list it to be filled by extension during repo setup
281 # This list it to be filled by extension during repo setup
281 self._phasedefaults = []
282 self._phasedefaults = []
282 try:
283 try:
283 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
284 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
284 self._loadextensions()
285 self._loadextensions()
285 except IOError:
286 except IOError:
286 pass
287 pass
287
288
288 if self.featuresetupfuncs:
289 if self.featuresetupfuncs:
289 self.supported = set(self._basesupported) # use private copy
290 self.supported = set(self._basesupported) # use private copy
290 extmods = set(m.__name__ for n, m
291 extmods = set(m.__name__ for n, m
291 in extensions.extensions(self.ui))
292 in extensions.extensions(self.ui))
292 for setupfunc in self.featuresetupfuncs:
293 for setupfunc in self.featuresetupfuncs:
293 if setupfunc.__module__ in extmods:
294 if setupfunc.__module__ in extmods:
294 setupfunc(self.ui, self.supported)
295 setupfunc(self.ui, self.supported)
295 else:
296 else:
296 self.supported = self._basesupported
297 self.supported = self._basesupported
297 color.setup(self.ui)
298 color.setup(self.ui)
298
299
299 # Add compression engines.
300 # Add compression engines.
300 for name in util.compengines:
301 for name in util.compengines:
301 engine = util.compengines[name]
302 engine = util.compengines[name]
302 if engine.revlogheader():
303 if engine.revlogheader():
303 self.supported.add('exp-compression-%s' % name)
304 self.supported.add('exp-compression-%s' % name)
304
305
305 if not self.vfs.isdir():
306 if not self.vfs.isdir():
306 if create:
307 if create:
307 self.requirements = newreporequirements(self)
308 self.requirements = newreporequirements(self)
308
309
309 if not self.wvfs.exists():
310 if not self.wvfs.exists():
310 self.wvfs.makedirs()
311 self.wvfs.makedirs()
311 self.vfs.makedir(notindexed=True)
312 self.vfs.makedir(notindexed=True)
312
313
313 if 'store' in self.requirements:
314 if 'store' in self.requirements:
314 self.vfs.mkdir("store")
315 self.vfs.mkdir("store")
315
316
316 # create an invalid changelog
317 # create an invalid changelog
317 self.vfs.append(
318 self.vfs.append(
318 "00changelog.i",
319 "00changelog.i",
319 '\0\0\0\2' # represents revlogv2
320 '\0\0\0\2' # represents revlogv2
320 ' dummy changelog to prevent using the old repo layout'
321 ' dummy changelog to prevent using the old repo layout'
321 )
322 )
322 else:
323 else:
323 raise error.RepoError(_("repository %s not found") % path)
324 raise error.RepoError(_("repository %s not found") % path)
324 elif create:
325 elif create:
325 raise error.RepoError(_("repository %s already exists") % path)
326 raise error.RepoError(_("repository %s already exists") % path)
326 else:
327 else:
327 try:
328 try:
328 self.requirements = scmutil.readrequires(
329 self.requirements = scmutil.readrequires(
329 self.vfs, self.supported)
330 self.vfs, self.supported)
330 except IOError as inst:
331 except IOError as inst:
331 if inst.errno != errno.ENOENT:
332 if inst.errno != errno.ENOENT:
332 raise
333 raise
333
334
334 self.sharedpath = self.path
335 self.sharedpath = self.path
335 try:
336 try:
336 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
337 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
337 if 'relshared' in self.requirements:
338 if 'relshared' in self.requirements:
338 sharedpath = self.vfs.join(sharedpath)
339 sharedpath = self.vfs.join(sharedpath)
339 vfs = vfsmod.vfs(sharedpath, realpath=True)
340 vfs = vfsmod.vfs(sharedpath, realpath=True)
340 s = vfs.base
341 s = vfs.base
341 if not vfs.exists():
342 if not vfs.exists():
342 raise error.RepoError(
343 raise error.RepoError(
343 _('.hg/sharedpath points to nonexistent directory %s') % s)
344 _('.hg/sharedpath points to nonexistent directory %s') % s)
344 self.sharedpath = s
345 self.sharedpath = s
345 except IOError as inst:
346 except IOError as inst:
346 if inst.errno != errno.ENOENT:
347 if inst.errno != errno.ENOENT:
347 raise
348 raise
348
349
349 self.store = store.store(
350 self.store = store.store(
350 self.requirements, self.sharedpath, vfsmod.vfs)
351 self.requirements, self.sharedpath, vfsmod.vfs)
351 self.spath = self.store.path
352 self.spath = self.store.path
352 self.svfs = self.store.vfs
353 self.svfs = self.store.vfs
353 self.sjoin = self.store.join
354 self.sjoin = self.store.join
354 self.vfs.createmode = self.store.createmode
355 self.vfs.createmode = self.store.createmode
355 self._applyopenerreqs()
356 self._applyopenerreqs()
356 if create:
357 if create:
357 self._writerequirements()
358 self._writerequirements()
358
359
359 self._dirstatevalidatewarned = False
360 self._dirstatevalidatewarned = False
360
361
361 self._branchcaches = {}
362 self._branchcaches = {}
362 self._revbranchcache = None
363 self._revbranchcache = None
363 self.filterpats = {}
364 self.filterpats = {}
364 self._datafilters = {}
365 self._datafilters = {}
365 self._transref = self._lockref = self._wlockref = None
366 self._transref = self._lockref = self._wlockref = None
366
367
367 # A cache for various files under .hg/ that tracks file changes,
368 # A cache for various files under .hg/ that tracks file changes,
368 # (used by the filecache decorator)
369 # (used by the filecache decorator)
369 #
370 #
370 # Maps a property name to its util.filecacheentry
371 # Maps a property name to its util.filecacheentry
371 self._filecache = {}
372 self._filecache = {}
372
373
373 # hold sets of revision to be filtered
374 # hold sets of revision to be filtered
374 # should be cleared when something might have changed the filter value:
375 # should be cleared when something might have changed the filter value:
375 # - new changesets,
376 # - new changesets,
376 # - phase change,
377 # - phase change,
377 # - new obsolescence marker,
378 # - new obsolescence marker,
378 # - working directory parent change,
379 # - working directory parent change,
379 # - bookmark changes
380 # - bookmark changes
380 self.filteredrevcache = {}
381 self.filteredrevcache = {}
381
382
382 # generic mapping between names and nodes
383 # generic mapping between names and nodes
383 self.names = namespaces.namespaces()
384 self.names = namespaces.namespaces()
384
385
385 @property
386 @property
386 def wopener(self):
387 def wopener(self):
387 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
388 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
388 return self.wvfs
389 return self.wvfs
389
390
390 @property
391 @property
391 def opener(self):
392 def opener(self):
392 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
393 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
393 return self.vfs
394 return self.vfs
394
395
395 def close(self):
396 def close(self):
396 self._writecaches()
397 self._writecaches()
397
398
398 def _loadextensions(self):
399 def _loadextensions(self):
399 extensions.loadall(self.ui)
400 extensions.loadall(self.ui)
400
401
401 def _writecaches(self):
402 def _writecaches(self):
402 if self._revbranchcache:
403 if self._revbranchcache:
403 self._revbranchcache.write()
404 self._revbranchcache.write()
404
405
405 def _restrictcapabilities(self, caps):
406 def _restrictcapabilities(self, caps):
406 if self.ui.configbool('experimental', 'bundle2-advertise', True):
407 if self.ui.configbool('experimental', 'bundle2-advertise', True):
407 caps = set(caps)
408 caps = set(caps)
408 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
409 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
409 caps.add('bundle2=' + urlreq.quote(capsblob))
410 caps.add('bundle2=' + urlreq.quote(capsblob))
410 return caps
411 return caps
411
412
412 def _applyopenerreqs(self):
413 def _applyopenerreqs(self):
413 self.svfs.options = dict((r, 1) for r in self.requirements
414 self.svfs.options = dict((r, 1) for r in self.requirements
414 if r in self.openerreqs)
415 if r in self.openerreqs)
415 # experimental config: format.chunkcachesize
416 # experimental config: format.chunkcachesize
416 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
417 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
417 if chunkcachesize is not None:
418 if chunkcachesize is not None:
418 self.svfs.options['chunkcachesize'] = chunkcachesize
419 self.svfs.options['chunkcachesize'] = chunkcachesize
419 # experimental config: format.maxchainlen
420 # experimental config: format.maxchainlen
420 maxchainlen = self.ui.configint('format', 'maxchainlen')
421 maxchainlen = self.ui.configint('format', 'maxchainlen')
421 if maxchainlen is not None:
422 if maxchainlen is not None:
422 self.svfs.options['maxchainlen'] = maxchainlen
423 self.svfs.options['maxchainlen'] = maxchainlen
423 # experimental config: format.manifestcachesize
424 # experimental config: format.manifestcachesize
424 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
425 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
425 if manifestcachesize is not None:
426 if manifestcachesize is not None:
426 self.svfs.options['manifestcachesize'] = manifestcachesize
427 self.svfs.options['manifestcachesize'] = manifestcachesize
427 # experimental config: format.aggressivemergedeltas
428 # experimental config: format.aggressivemergedeltas
428 aggressivemergedeltas = self.ui.configbool('format',
429 aggressivemergedeltas = self.ui.configbool('format',
429 'aggressivemergedeltas', False)
430 'aggressivemergedeltas', False)
430 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
431 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
431 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
432 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
432
433
433 for r in self.requirements:
434 for r in self.requirements:
434 if r.startswith('exp-compression-'):
435 if r.startswith('exp-compression-'):
435 self.svfs.options['compengine'] = r[len('exp-compression-'):]
436 self.svfs.options['compengine'] = r[len('exp-compression-'):]
436
437
437 def _writerequirements(self):
438 def _writerequirements(self):
438 scmutil.writerequires(self.vfs, self.requirements)
439 scmutil.writerequires(self.vfs, self.requirements)
439
440
440 def _checknested(self, path):
441 def _checknested(self, path):
441 """Determine if path is a legal nested repository."""
442 """Determine if path is a legal nested repository."""
442 if not path.startswith(self.root):
443 if not path.startswith(self.root):
443 return False
444 return False
444 subpath = path[len(self.root) + 1:]
445 subpath = path[len(self.root) + 1:]
445 normsubpath = util.pconvert(subpath)
446 normsubpath = util.pconvert(subpath)
446
447
447 # XXX: Checking against the current working copy is wrong in
448 # XXX: Checking against the current working copy is wrong in
448 # the sense that it can reject things like
449 # the sense that it can reject things like
449 #
450 #
450 # $ hg cat -r 10 sub/x.txt
451 # $ hg cat -r 10 sub/x.txt
451 #
452 #
452 # if sub/ is no longer a subrepository in the working copy
453 # if sub/ is no longer a subrepository in the working copy
453 # parent revision.
454 # parent revision.
454 #
455 #
455 # However, it can of course also allow things that would have
456 # However, it can of course also allow things that would have
456 # been rejected before, such as the above cat command if sub/
457 # been rejected before, such as the above cat command if sub/
457 # is a subrepository now, but was a normal directory before.
458 # is a subrepository now, but was a normal directory before.
458 # The old path auditor would have rejected by mistake since it
459 # The old path auditor would have rejected by mistake since it
459 # panics when it sees sub/.hg/.
460 # panics when it sees sub/.hg/.
460 #
461 #
461 # All in all, checking against the working copy seems sensible
462 # All in all, checking against the working copy seems sensible
462 # since we want to prevent access to nested repositories on
463 # since we want to prevent access to nested repositories on
463 # the filesystem *now*.
464 # the filesystem *now*.
464 ctx = self[None]
465 ctx = self[None]
465 parts = util.splitpath(subpath)
466 parts = util.splitpath(subpath)
466 while parts:
467 while parts:
467 prefix = '/'.join(parts)
468 prefix = '/'.join(parts)
468 if prefix in ctx.substate:
469 if prefix in ctx.substate:
469 if prefix == normsubpath:
470 if prefix == normsubpath:
470 return True
471 return True
471 else:
472 else:
472 sub = ctx.sub(prefix)
473 sub = ctx.sub(prefix)
473 return sub.checknested(subpath[len(prefix) + 1:])
474 return sub.checknested(subpath[len(prefix) + 1:])
474 else:
475 else:
475 parts.pop()
476 parts.pop()
476 return False
477 return False
477
478
478 def peer(self):
479 def peer(self):
479 return localpeer(self) # not cached to avoid reference cycle
480 return localpeer(self) # not cached to avoid reference cycle
480
481
481 def unfiltered(self):
482 def unfiltered(self):
482 """Return unfiltered version of the repository
483 """Return unfiltered version of the repository
483
484
484 Intended to be overwritten by filtered repo."""
485 Intended to be overwritten by filtered repo."""
485 return self
486 return self
486
487
487 def filtered(self, name):
488 def filtered(self, name):
488 """Return a filtered version of a repository"""
489 """Return a filtered version of a repository"""
489 # build a new class with the mixin and the current class
490 # build a new class with the mixin and the current class
490 # (possibly subclass of the repo)
491 # (possibly subclass of the repo)
491 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
492 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
492 pass
493 pass
493 return filteredrepo(self, name)
494 return filteredrepo(self, name)
494
495
495 @repofilecache('bookmarks', 'bookmarks.current')
496 @repofilecache('bookmarks', 'bookmarks.current')
496 def _bookmarks(self):
497 def _bookmarks(self):
497 return bookmarks.bmstore(self)
498 return bookmarks.bmstore(self)
498
499
499 @property
500 @property
500 def _activebookmark(self):
501 def _activebookmark(self):
501 return self._bookmarks.active
502 return self._bookmarks.active
502
503
503 def bookmarkheads(self, bookmark):
504 def bookmarkheads(self, bookmark):
504 name = bookmark.split('@', 1)[0]
505 name = bookmark.split('@', 1)[0]
505 heads = []
506 heads = []
506 for mark, n in self._bookmarks.iteritems():
507 for mark, n in self._bookmarks.iteritems():
507 if mark.split('@', 1)[0] == name:
508 if mark.split('@', 1)[0] == name:
508 heads.append(n)
509 heads.append(n)
509 return heads
510 return heads
510
511
511 # _phaserevs and _phasesets depend on changelog. what we need is to
512 # _phaserevs and _phasesets depend on changelog. what we need is to
512 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
513 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
513 # can't be easily expressed in filecache mechanism.
514 # can't be easily expressed in filecache mechanism.
514 @storecache('phaseroots', '00changelog.i')
515 @storecache('phaseroots', '00changelog.i')
515 def _phasecache(self):
516 def _phasecache(self):
516 return phases.phasecache(self, self._phasedefaults)
517 return phases.phasecache(self, self._phasedefaults)
517
518
518 @storecache('obsstore')
519 @storecache('obsstore')
519 def obsstore(self):
520 def obsstore(self):
520 # read default format for new obsstore.
521 # read default format for new obsstore.
521 # developer config: format.obsstore-version
522 # developer config: format.obsstore-version
522 defaultformat = self.ui.configint('format', 'obsstore-version', None)
523 defaultformat = self.ui.configint('format', 'obsstore-version', None)
523 # rely on obsstore class default when possible.
524 # rely on obsstore class default when possible.
524 kwargs = {}
525 kwargs = {}
525 if defaultformat is not None:
526 if defaultformat is not None:
526 kwargs['defaultformat'] = defaultformat
527 kwargs['defaultformat'] = defaultformat
527 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
528 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
528 store = obsolete.obsstore(self.svfs, readonly=readonly,
529 store = obsolete.obsstore(self.svfs, readonly=readonly,
529 **kwargs)
530 **kwargs)
530 if store and readonly:
531 if store and readonly:
531 self.ui.warn(
532 self.ui.warn(
532 _('obsolete feature not enabled but %i markers found!\n')
533 _('obsolete feature not enabled but %i markers found!\n')
533 % len(list(store)))
534 % len(list(store)))
534 return store
535 return store
535
536
536 @storecache('00changelog.i')
537 @storecache('00changelog.i')
537 def changelog(self):
538 def changelog(self):
538 c = changelog.changelog(self.svfs)
539 c = changelog.changelog(self.svfs)
539 if txnutil.mayhavepending(self.root):
540 if txnutil.mayhavepending(self.root):
540 c.readpending('00changelog.i.a')
541 c.readpending('00changelog.i.a')
541 return c
542 return c
542
543
543 def _constructmanifest(self):
544 def _constructmanifest(self):
544 # This is a temporary function while we migrate from manifest to
545 # This is a temporary function while we migrate from manifest to
545 # manifestlog. It allows bundlerepo and unionrepo to intercept the
546 # manifestlog. It allows bundlerepo and unionrepo to intercept the
546 # manifest creation.
547 # manifest creation.
547 return manifest.manifestrevlog(self.svfs)
548 return manifest.manifestrevlog(self.svfs)
548
549
549 @storecache('00manifest.i')
550 @storecache('00manifest.i')
550 def manifestlog(self):
551 def manifestlog(self):
551 return manifest.manifestlog(self.svfs, self)
552 return manifest.manifestlog(self.svfs, self)
552
553
553 @repofilecache('dirstate')
554 @repofilecache('dirstate')
554 def dirstate(self):
555 def dirstate(self):
555 return dirstate.dirstate(self.vfs, self.ui, self.root,
556 return dirstate.dirstate(self.vfs, self.ui, self.root,
556 self._dirstatevalidate)
557 self._dirstatevalidate)
557
558
558 def _dirstatevalidate(self, node):
559 def _dirstatevalidate(self, node):
559 try:
560 try:
560 self.changelog.rev(node)
561 self.changelog.rev(node)
561 return node
562 return node
562 except error.LookupError:
563 except error.LookupError:
563 if not self._dirstatevalidatewarned:
564 if not self._dirstatevalidatewarned:
564 self._dirstatevalidatewarned = True
565 self._dirstatevalidatewarned = True
565 self.ui.warn(_("warning: ignoring unknown"
566 self.ui.warn(_("warning: ignoring unknown"
566 " working parent %s!\n") % short(node))
567 " working parent %s!\n") % short(node))
567 return nullid
568 return nullid
568
569
569 def __getitem__(self, changeid):
570 def __getitem__(self, changeid):
570 if changeid is None or changeid == wdirrev:
571 if changeid is None or changeid == wdirrev:
571 return context.workingctx(self)
572 return context.workingctx(self)
572 if isinstance(changeid, slice):
573 if isinstance(changeid, slice):
573 return [context.changectx(self, i)
574 return [context.changectx(self, i)
574 for i in xrange(*changeid.indices(len(self)))
575 for i in xrange(*changeid.indices(len(self)))
575 if i not in self.changelog.filteredrevs]
576 if i not in self.changelog.filteredrevs]
576 return context.changectx(self, changeid)
577 return context.changectx(self, changeid)
577
578
578 def __contains__(self, changeid):
579 def __contains__(self, changeid):
579 try:
580 try:
580 self[changeid]
581 self[changeid]
581 return True
582 return True
582 except error.RepoLookupError:
583 except error.RepoLookupError:
583 return False
584 return False
584
585
585 def __nonzero__(self):
586 def __nonzero__(self):
586 return True
587 return True
587
588
588 __bool__ = __nonzero__
589 __bool__ = __nonzero__
589
590
590 def __len__(self):
591 def __len__(self):
591 return len(self.changelog)
592 return len(self.changelog)
592
593
593 def __iter__(self):
594 def __iter__(self):
594 return iter(self.changelog)
595 return iter(self.changelog)
595
596
596 def revs(self, expr, *args):
597 def revs(self, expr, *args):
597 '''Find revisions matching a revset.
598 '''Find revisions matching a revset.
598
599
599 The revset is specified as a string ``expr`` that may contain
600 The revset is specified as a string ``expr`` that may contain
600 %-formatting to escape certain types. See ``revsetlang.formatspec``.
601 %-formatting to escape certain types. See ``revsetlang.formatspec``.
601
602
602 Revset aliases from the configuration are not expanded. To expand
603 Revset aliases from the configuration are not expanded. To expand
603 user aliases, consider calling ``scmutil.revrange()`` or
604 user aliases, consider calling ``scmutil.revrange()`` or
604 ``repo.anyrevs([expr], user=True)``.
605 ``repo.anyrevs([expr], user=True)``.
605
606
606 Returns a revset.abstractsmartset, which is a list-like interface
607 Returns a revset.abstractsmartset, which is a list-like interface
607 that contains integer revisions.
608 that contains integer revisions.
608 '''
609 '''
609 expr = revsetlang.formatspec(expr, *args)
610 expr = revsetlang.formatspec(expr, *args)
610 m = revset.match(None, expr)
611 m = revset.match(None, expr)
611 return m(self)
612 return m(self)
612
613
613 def set(self, expr, *args):
614 def set(self, expr, *args):
614 '''Find revisions matching a revset and emit changectx instances.
615 '''Find revisions matching a revset and emit changectx instances.
615
616
616 This is a convenience wrapper around ``revs()`` that iterates the
617 This is a convenience wrapper around ``revs()`` that iterates the
617 result and is a generator of changectx instances.
618 result and is a generator of changectx instances.
618
619
619 Revset aliases from the configuration are not expanded. To expand
620 Revset aliases from the configuration are not expanded. To expand
620 user aliases, consider calling ``scmutil.revrange()``.
621 user aliases, consider calling ``scmutil.revrange()``.
621 '''
622 '''
622 for r in self.revs(expr, *args):
623 for r in self.revs(expr, *args):
623 yield self[r]
624 yield self[r]
624
625
625 def anyrevs(self, specs, user=False):
626 def anyrevs(self, specs, user=False):
626 '''Find revisions matching one of the given revsets.
627 '''Find revisions matching one of the given revsets.
627
628
628 Revset aliases from the configuration are not expanded by default. To
629 Revset aliases from the configuration are not expanded by default. To
629 expand user aliases, specify ``user=True``.
630 expand user aliases, specify ``user=True``.
630 '''
631 '''
631 if user:
632 if user:
632 m = revset.matchany(self.ui, specs, repo=self)
633 m = revset.matchany(self.ui, specs, repo=self)
633 else:
634 else:
634 m = revset.matchany(None, specs)
635 m = revset.matchany(None, specs)
635 return m(self)
636 return m(self)
636
637
637 def url(self):
638 def url(self):
638 return 'file:' + self.root
639 return 'file:' + self.root
639
640
640 def hook(self, name, throw=False, **args):
641 def hook(self, name, throw=False, **args):
641 """Call a hook, passing this repo instance.
642 """Call a hook, passing this repo instance.
642
643
643 This a convenience method to aid invoking hooks. Extensions likely
644 This a convenience method to aid invoking hooks. Extensions likely
644 won't call this unless they have registered a custom hook or are
645 won't call this unless they have registered a custom hook or are
645 replacing code that is expected to call a hook.
646 replacing code that is expected to call a hook.
646 """
647 """
647 return hook.hook(self.ui, self, name, throw, **args)
648 return hook.hook(self.ui, self, name, throw, **args)
648
649
649 @unfilteredmethod
650 @unfilteredmethod
650 def _tag(self, names, node, message, local, user, date, extra=None,
651 def _tag(self, names, node, message, local, user, date, extra=None,
651 editor=False):
652 editor=False):
652 if isinstance(names, str):
653 if isinstance(names, str):
653 names = (names,)
654 names = (names,)
654
655
655 branches = self.branchmap()
656 branches = self.branchmap()
656 for name in names:
657 for name in names:
657 self.hook('pretag', throw=True, node=hex(node), tag=name,
658 self.hook('pretag', throw=True, node=hex(node), tag=name,
658 local=local)
659 local=local)
659 if name in branches:
660 if name in branches:
660 self.ui.warn(_("warning: tag %s conflicts with existing"
661 self.ui.warn(_("warning: tag %s conflicts with existing"
661 " branch name\n") % name)
662 " branch name\n") % name)
662
663
663 def writetags(fp, names, munge, prevtags):
664 def writetags(fp, names, munge, prevtags):
664 fp.seek(0, 2)
665 fp.seek(0, 2)
665 if prevtags and prevtags[-1] != '\n':
666 if prevtags and prevtags[-1] != '\n':
666 fp.write('\n')
667 fp.write('\n')
667 for name in names:
668 for name in names:
668 if munge:
669 if munge:
669 m = munge(name)
670 m = munge(name)
670 else:
671 else:
671 m = name
672 m = name
672
673
673 if (self._tagscache.tagtypes and
674 if (self._tagscache.tagtypes and
674 name in self._tagscache.tagtypes):
675 name in self._tagscache.tagtypes):
675 old = self.tags().get(name, nullid)
676 old = self.tags().get(name, nullid)
676 fp.write('%s %s\n' % (hex(old), m))
677 fp.write('%s %s\n' % (hex(old), m))
677 fp.write('%s %s\n' % (hex(node), m))
678 fp.write('%s %s\n' % (hex(node), m))
678 fp.close()
679 fp.close()
679
680
680 prevtags = ''
681 prevtags = ''
681 if local:
682 if local:
682 try:
683 try:
683 fp = self.vfs('localtags', 'r+')
684 fp = self.vfs('localtags', 'r+')
684 except IOError:
685 except IOError:
685 fp = self.vfs('localtags', 'a')
686 fp = self.vfs('localtags', 'a')
686 else:
687 else:
687 prevtags = fp.read()
688 prevtags = fp.read()
688
689
689 # local tags are stored in the current charset
690 # local tags are stored in the current charset
690 writetags(fp, names, None, prevtags)
691 writetags(fp, names, None, prevtags)
691 for name in names:
692 for name in names:
692 self.hook('tag', node=hex(node), tag=name, local=local)
693 self.hook('tag', node=hex(node), tag=name, local=local)
693 return
694 return
694
695
695 try:
696 try:
696 fp = self.wvfs('.hgtags', 'rb+')
697 fp = self.wvfs('.hgtags', 'rb+')
697 except IOError as e:
698 except IOError as e:
698 if e.errno != errno.ENOENT:
699 if e.errno != errno.ENOENT:
699 raise
700 raise
700 fp = self.wvfs('.hgtags', 'ab')
701 fp = self.wvfs('.hgtags', 'ab')
701 else:
702 else:
702 prevtags = fp.read()
703 prevtags = fp.read()
703
704
704 # committed tags are stored in UTF-8
705 # committed tags are stored in UTF-8
705 writetags(fp, names, encoding.fromlocal, prevtags)
706 writetags(fp, names, encoding.fromlocal, prevtags)
706
707
707 fp.close()
708 fp.close()
708
709
709 self.invalidatecaches()
710 self.invalidatecaches()
710
711
711 if '.hgtags' not in self.dirstate:
712 if '.hgtags' not in self.dirstate:
712 self[None].add(['.hgtags'])
713 self[None].add(['.hgtags'])
713
714
714 m = matchmod.exact(self.root, '', ['.hgtags'])
715 m = matchmod.exact(self.root, '', ['.hgtags'])
715 tagnode = self.commit(message, user, date, extra=extra, match=m,
716 tagnode = self.commit(message, user, date, extra=extra, match=m,
716 editor=editor)
717 editor=editor)
717
718
718 for name in names:
719 for name in names:
719 self.hook('tag', node=hex(node), tag=name, local=local)
720 self.hook('tag', node=hex(node), tag=name, local=local)
720
721
721 return tagnode
722 return tagnode
722
723
723 def tag(self, names, node, message, local, user, date, editor=False):
724 def tag(self, names, node, message, local, user, date, editor=False):
724 '''tag a revision with one or more symbolic names.
725 '''tag a revision with one or more symbolic names.
725
726
726 names is a list of strings or, when adding a single tag, names may be a
727 names is a list of strings or, when adding a single tag, names may be a
727 string.
728 string.
728
729
729 if local is True, the tags are stored in a per-repository file.
730 if local is True, the tags are stored in a per-repository file.
730 otherwise, they are stored in the .hgtags file, and a new
731 otherwise, they are stored in the .hgtags file, and a new
731 changeset is committed with the change.
732 changeset is committed with the change.
732
733
733 keyword arguments:
734 keyword arguments:
734
735
735 local: whether to store tags in non-version-controlled file
736 local: whether to store tags in non-version-controlled file
736 (default False)
737 (default False)
737
738
738 message: commit message to use if committing
739 message: commit message to use if committing
739
740
740 user: name of user to use if committing
741 user: name of user to use if committing
741
742
742 date: date tuple to use if committing'''
743 date: date tuple to use if committing'''
743
744
744 if not local:
745 if not local:
745 m = matchmod.exact(self.root, '', ['.hgtags'])
746 m = matchmod.exact(self.root, '', ['.hgtags'])
746 if any(self.status(match=m, unknown=True, ignored=True)):
747 if any(self.status(match=m, unknown=True, ignored=True)):
747 raise error.Abort(_('working copy of .hgtags is changed'),
748 raise error.Abort(_('working copy of .hgtags is changed'),
748 hint=_('please commit .hgtags manually'))
749 hint=_('please commit .hgtags manually'))
749
750
750 self.tags() # instantiate the cache
751 self.tags() # instantiate the cache
751 self._tag(names, node, message, local, user, date, editor=editor)
752 self._tag(names, node, message, local, user, date, editor=editor)
752
753
753 @filteredpropertycache
754 @filteredpropertycache
754 def _tagscache(self):
755 def _tagscache(self):
755 '''Returns a tagscache object that contains various tags related
756 '''Returns a tagscache object that contains various tags related
756 caches.'''
757 caches.'''
757
758
758 # This simplifies its cache management by having one decorated
759 # This simplifies its cache management by having one decorated
759 # function (this one) and the rest simply fetch things from it.
760 # function (this one) and the rest simply fetch things from it.
760 class tagscache(object):
761 class tagscache(object):
761 def __init__(self):
762 def __init__(self):
762 # These two define the set of tags for this repository. tags
763 # These two define the set of tags for this repository. tags
763 # maps tag name to node; tagtypes maps tag name to 'global' or
764 # maps tag name to node; tagtypes maps tag name to 'global' or
764 # 'local'. (Global tags are defined by .hgtags across all
765 # 'local'. (Global tags are defined by .hgtags across all
765 # heads, and local tags are defined in .hg/localtags.)
766 # heads, and local tags are defined in .hg/localtags.)
766 # They constitute the in-memory cache of tags.
767 # They constitute the in-memory cache of tags.
767 self.tags = self.tagtypes = None
768 self.tags = self.tagtypes = None
768
769
769 self.nodetagscache = self.tagslist = None
770 self.nodetagscache = self.tagslist = None
770
771
771 cache = tagscache()
772 cache = tagscache()
772 cache.tags, cache.tagtypes = self._findtags()
773 cache.tags, cache.tagtypes = self._findtags()
773
774
774 return cache
775 return cache
775
776
776 def tags(self):
777 def tags(self):
777 '''return a mapping of tag to node'''
778 '''return a mapping of tag to node'''
778 t = {}
779 t = {}
779 if self.changelog.filteredrevs:
780 if self.changelog.filteredrevs:
780 tags, tt = self._findtags()
781 tags, tt = self._findtags()
781 else:
782 else:
782 tags = self._tagscache.tags
783 tags = self._tagscache.tags
783 for k, v in tags.iteritems():
784 for k, v in tags.iteritems():
784 try:
785 try:
785 # ignore tags to unknown nodes
786 # ignore tags to unknown nodes
786 self.changelog.rev(v)
787 self.changelog.rev(v)
787 t[k] = v
788 t[k] = v
788 except (error.LookupError, ValueError):
789 except (error.LookupError, ValueError):
789 pass
790 pass
790 return t
791 return t
791
792
792 def _findtags(self):
793 def _findtags(self):
793 '''Do the hard work of finding tags. Return a pair of dicts
794 '''Do the hard work of finding tags. Return a pair of dicts
794 (tags, tagtypes) where tags maps tag name to node, and tagtypes
795 (tags, tagtypes) where tags maps tag name to node, and tagtypes
795 maps tag name to a string like \'global\' or \'local\'.
796 maps tag name to a string like \'global\' or \'local\'.
796 Subclasses or extensions are free to add their own tags, but
797 Subclasses or extensions are free to add their own tags, but
797 should be aware that the returned dicts will be retained for the
798 should be aware that the returned dicts will be retained for the
798 duration of the localrepo object.'''
799 duration of the localrepo object.'''
799
800
800 # XXX what tagtype should subclasses/extensions use? Currently
801 # XXX what tagtype should subclasses/extensions use? Currently
801 # mq and bookmarks add tags, but do not set the tagtype at all.
802 # mq and bookmarks add tags, but do not set the tagtype at all.
802 # Should each extension invent its own tag type? Should there
803 # Should each extension invent its own tag type? Should there
803 # be one tagtype for all such "virtual" tags? Or is the status
804 # be one tagtype for all such "virtual" tags? Or is the status
804 # quo fine?
805 # quo fine?
805
806
806 alltags = {} # map tag name to (node, hist)
807 alltags = {} # map tag name to (node, hist)
807 tagtypes = {}
808 tagtypes = {}
808
809
809 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
810 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
810 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
811 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
811
812
812 # Build the return dicts. Have to re-encode tag names because
813 # Build the return dicts. Have to re-encode tag names because
813 # the tags module always uses UTF-8 (in order not to lose info
814 # the tags module always uses UTF-8 (in order not to lose info
814 # writing to the cache), but the rest of Mercurial wants them in
815 # writing to the cache), but the rest of Mercurial wants them in
815 # local encoding.
816 # local encoding.
816 tags = {}
817 tags = {}
817 for (name, (node, hist)) in alltags.iteritems():
818 for (name, (node, hist)) in alltags.iteritems():
818 if node != nullid:
819 if node != nullid:
819 tags[encoding.tolocal(name)] = node
820 tags[encoding.tolocal(name)] = node
820 tags['tip'] = self.changelog.tip()
821 tags['tip'] = self.changelog.tip()
821 tagtypes = dict([(encoding.tolocal(name), value)
822 tagtypes = dict([(encoding.tolocal(name), value)
822 for (name, value) in tagtypes.iteritems()])
823 for (name, value) in tagtypes.iteritems()])
823 return (tags, tagtypes)
824 return (tags, tagtypes)
824
825
825 def tagtype(self, tagname):
826 def tagtype(self, tagname):
826 '''
827 '''
827 return the type of the given tag. result can be:
828 return the type of the given tag. result can be:
828
829
829 'local' : a local tag
830 'local' : a local tag
830 'global' : a global tag
831 'global' : a global tag
831 None : tag does not exist
832 None : tag does not exist
832 '''
833 '''
833
834
834 return self._tagscache.tagtypes.get(tagname)
835 return self._tagscache.tagtypes.get(tagname)
835
836
836 def tagslist(self):
837 def tagslist(self):
837 '''return a list of tags ordered by revision'''
838 '''return a list of tags ordered by revision'''
838 if not self._tagscache.tagslist:
839 if not self._tagscache.tagslist:
839 l = []
840 l = []
840 for t, n in self.tags().iteritems():
841 for t, n in self.tags().iteritems():
841 l.append((self.changelog.rev(n), t, n))
842 l.append((self.changelog.rev(n), t, n))
842 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
843 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
843
844
844 return self._tagscache.tagslist
845 return self._tagscache.tagslist
845
846
846 def nodetags(self, node):
847 def nodetags(self, node):
847 '''return the tags associated with a node'''
848 '''return the tags associated with a node'''
848 if not self._tagscache.nodetagscache:
849 if not self._tagscache.nodetagscache:
849 nodetagscache = {}
850 nodetagscache = {}
850 for t, n in self._tagscache.tags.iteritems():
851 for t, n in self._tagscache.tags.iteritems():
851 nodetagscache.setdefault(n, []).append(t)
852 nodetagscache.setdefault(n, []).append(t)
852 for tags in nodetagscache.itervalues():
853 for tags in nodetagscache.itervalues():
853 tags.sort()
854 tags.sort()
854 self._tagscache.nodetagscache = nodetagscache
855 self._tagscache.nodetagscache = nodetagscache
855 return self._tagscache.nodetagscache.get(node, [])
856 return self._tagscache.nodetagscache.get(node, [])
856
857
857 def nodebookmarks(self, node):
858 def nodebookmarks(self, node):
858 """return the list of bookmarks pointing to the specified node"""
859 """return the list of bookmarks pointing to the specified node"""
859 marks = []
860 marks = []
860 for bookmark, n in self._bookmarks.iteritems():
861 for bookmark, n in self._bookmarks.iteritems():
861 if n == node:
862 if n == node:
862 marks.append(bookmark)
863 marks.append(bookmark)
863 return sorted(marks)
864 return sorted(marks)
864
865
865 def branchmap(self):
866 def branchmap(self):
866 '''returns a dictionary {branch: [branchheads]} with branchheads
867 '''returns a dictionary {branch: [branchheads]} with branchheads
867 ordered by increasing revision number'''
868 ordered by increasing revision number'''
868 branchmap.updatecache(self)
869 branchmap.updatecache(self)
869 return self._branchcaches[self.filtername]
870 return self._branchcaches[self.filtername]
870
871
871 @unfilteredmethod
872 @unfilteredmethod
872 def revbranchcache(self):
873 def revbranchcache(self):
873 if not self._revbranchcache:
874 if not self._revbranchcache:
874 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
875 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
875 return self._revbranchcache
876 return self._revbranchcache
876
877
877 def branchtip(self, branch, ignoremissing=False):
878 def branchtip(self, branch, ignoremissing=False):
878 '''return the tip node for a given branch
879 '''return the tip node for a given branch
879
880
880 If ignoremissing is True, then this method will not raise an error.
881 If ignoremissing is True, then this method will not raise an error.
881 This is helpful for callers that only expect None for a missing branch
882 This is helpful for callers that only expect None for a missing branch
882 (e.g. namespace).
883 (e.g. namespace).
883
884
884 '''
885 '''
885 try:
886 try:
886 return self.branchmap().branchtip(branch)
887 return self.branchmap().branchtip(branch)
887 except KeyError:
888 except KeyError:
888 if not ignoremissing:
889 if not ignoremissing:
889 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
890 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
890 else:
891 else:
891 pass
892 pass
892
893
893 def lookup(self, key):
894 def lookup(self, key):
894 return self[key].node()
895 return self[key].node()
895
896
896 def lookupbranch(self, key, remote=None):
897 def lookupbranch(self, key, remote=None):
897 repo = remote or self
898 repo = remote or self
898 if key in repo.branchmap():
899 if key in repo.branchmap():
899 return key
900 return key
900
901
901 repo = (remote and remote.local()) and remote or self
902 repo = (remote and remote.local()) and remote or self
902 return repo[key].branch()
903 return repo[key].branch()
903
904
904 def known(self, nodes):
905 def known(self, nodes):
905 cl = self.changelog
906 cl = self.changelog
906 nm = cl.nodemap
907 nm = cl.nodemap
907 filtered = cl.filteredrevs
908 filtered = cl.filteredrevs
908 result = []
909 result = []
909 for n in nodes:
910 for n in nodes:
910 r = nm.get(n)
911 r = nm.get(n)
911 resp = not (r is None or r in filtered)
912 resp = not (r is None or r in filtered)
912 result.append(resp)
913 result.append(resp)
913 return result
914 return result
914
915
915 def local(self):
916 def local(self):
916 return self
917 return self
917
918
918 def publishing(self):
919 def publishing(self):
919 # it's safe (and desirable) to trust the publish flag unconditionally
920 # it's safe (and desirable) to trust the publish flag unconditionally
920 # so that we don't finalize changes shared between users via ssh or nfs
921 # so that we don't finalize changes shared between users via ssh or nfs
921 return self.ui.configbool('phases', 'publish', True, untrusted=True)
922 return self.ui.configbool('phases', 'publish', True, untrusted=True)
922
923
923 def cancopy(self):
924 def cancopy(self):
924 # so statichttprepo's override of local() works
925 # so statichttprepo's override of local() works
925 if not self.local():
926 if not self.local():
926 return False
927 return False
927 if not self.publishing():
928 if not self.publishing():
928 return True
929 return True
929 # if publishing we can't copy if there is filtered content
930 # if publishing we can't copy if there is filtered content
930 return not self.filtered('visible').changelog.filteredrevs
931 return not self.filtered('visible').changelog.filteredrevs
931
932
932 def shared(self):
933 def shared(self):
933 '''the type of shared repository (None if not shared)'''
934 '''the type of shared repository (None if not shared)'''
934 if self.sharedpath != self.path:
935 if self.sharedpath != self.path:
935 return 'store'
936 return 'store'
936 return None
937 return None
937
938
938 def join(self, f, *insidef):
939 def join(self, f, *insidef):
939 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
940 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
940 return self.vfs.join(os.path.join(f, *insidef))
941 return self.vfs.join(os.path.join(f, *insidef))
941
942
942 def wjoin(self, f, *insidef):
943 def wjoin(self, f, *insidef):
943 return self.vfs.reljoin(self.root, f, *insidef)
944 return self.vfs.reljoin(self.root, f, *insidef)
944
945
945 def file(self, f):
946 def file(self, f):
946 if f[0] == '/':
947 if f[0] == '/':
947 f = f[1:]
948 f = f[1:]
948 return filelog.filelog(self.svfs, f)
949 return filelog.filelog(self.svfs, f)
949
950
950 def changectx(self, changeid):
951 def changectx(self, changeid):
951 return self[changeid]
952 return self[changeid]
952
953
953 def setparents(self, p1, p2=nullid):
954 def setparents(self, p1, p2=nullid):
954 self.dirstate.beginparentchange()
955 self.dirstate.beginparentchange()
955 copies = self.dirstate.setparents(p1, p2)
956 copies = self.dirstate.setparents(p1, p2)
956 pctx = self[p1]
957 pctx = self[p1]
957 if copies:
958 if copies:
958 # Adjust copy records, the dirstate cannot do it, it
959 # Adjust copy records, the dirstate cannot do it, it
959 # requires access to parents manifests. Preserve them
960 # requires access to parents manifests. Preserve them
960 # only for entries added to first parent.
961 # only for entries added to first parent.
961 for f in copies:
962 for f in copies:
962 if f not in pctx and copies[f] in pctx:
963 if f not in pctx and copies[f] in pctx:
963 self.dirstate.copy(copies[f], f)
964 self.dirstate.copy(copies[f], f)
964 if p2 == nullid:
965 if p2 == nullid:
965 for f, s in sorted(self.dirstate.copies().items()):
966 for f, s in sorted(self.dirstate.copies().items()):
966 if f not in pctx and s not in pctx:
967 if f not in pctx and s not in pctx:
967 self.dirstate.copy(None, f)
968 self.dirstate.copy(None, f)
968 self.dirstate.endparentchange()
969 self.dirstate.endparentchange()
969
970
970 def filectx(self, path, changeid=None, fileid=None):
971 def filectx(self, path, changeid=None, fileid=None):
971 """changeid can be a changeset revision, node, or tag.
972 """changeid can be a changeset revision, node, or tag.
972 fileid can be a file revision or node."""
973 fileid can be a file revision or node."""
973 return context.filectx(self, path, changeid, fileid)
974 return context.filectx(self, path, changeid, fileid)
974
975
975 def getcwd(self):
976 def getcwd(self):
976 return self.dirstate.getcwd()
977 return self.dirstate.getcwd()
977
978
978 def pathto(self, f, cwd=None):
979 def pathto(self, f, cwd=None):
979 return self.dirstate.pathto(f, cwd)
980 return self.dirstate.pathto(f, cwd)
980
981
981 def wfile(self, f, mode='r'):
982 def wfile(self, f, mode='r'):
982 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
983 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
983 return self.wvfs(f, mode)
984 return self.wvfs(f, mode)
984
985
985 def _link(self, f):
986 def _link(self, f):
986 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
987 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
987 '4.0')
988 '4.0')
988 return self.wvfs.islink(f)
989 return self.wvfs.islink(f)
989
990
990 def _loadfilter(self, filter):
991 def _loadfilter(self, filter):
991 if filter not in self.filterpats:
992 if filter not in self.filterpats:
992 l = []
993 l = []
993 for pat, cmd in self.ui.configitems(filter):
994 for pat, cmd in self.ui.configitems(filter):
994 if cmd == '!':
995 if cmd == '!':
995 continue
996 continue
996 mf = matchmod.match(self.root, '', [pat])
997 mf = matchmod.match(self.root, '', [pat])
997 fn = None
998 fn = None
998 params = cmd
999 params = cmd
999 for name, filterfn in self._datafilters.iteritems():
1000 for name, filterfn in self._datafilters.iteritems():
1000 if cmd.startswith(name):
1001 if cmd.startswith(name):
1001 fn = filterfn
1002 fn = filterfn
1002 params = cmd[len(name):].lstrip()
1003 params = cmd[len(name):].lstrip()
1003 break
1004 break
1004 if not fn:
1005 if not fn:
1005 fn = lambda s, c, **kwargs: util.filter(s, c)
1006 fn = lambda s, c, **kwargs: util.filter(s, c)
1006 # Wrap old filters not supporting keyword arguments
1007 # Wrap old filters not supporting keyword arguments
1007 if not inspect.getargspec(fn)[2]:
1008 if not inspect.getargspec(fn)[2]:
1008 oldfn = fn
1009 oldfn = fn
1009 fn = lambda s, c, **kwargs: oldfn(s, c)
1010 fn = lambda s, c, **kwargs: oldfn(s, c)
1010 l.append((mf, fn, params))
1011 l.append((mf, fn, params))
1011 self.filterpats[filter] = l
1012 self.filterpats[filter] = l
1012 return self.filterpats[filter]
1013 return self.filterpats[filter]
1013
1014
1014 def _filter(self, filterpats, filename, data):
1015 def _filter(self, filterpats, filename, data):
1015 for mf, fn, cmd in filterpats:
1016 for mf, fn, cmd in filterpats:
1016 if mf(filename):
1017 if mf(filename):
1017 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1018 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1018 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1019 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1019 break
1020 break
1020
1021
1021 return data
1022 return data
1022
1023
1023 @unfilteredpropertycache
1024 @unfilteredpropertycache
1024 def _encodefilterpats(self):
1025 def _encodefilterpats(self):
1025 return self._loadfilter('encode')
1026 return self._loadfilter('encode')
1026
1027
1027 @unfilteredpropertycache
1028 @unfilteredpropertycache
1028 def _decodefilterpats(self):
1029 def _decodefilterpats(self):
1029 return self._loadfilter('decode')
1030 return self._loadfilter('decode')
1030
1031
1031 def adddatafilter(self, name, filter):
1032 def adddatafilter(self, name, filter):
1032 self._datafilters[name] = filter
1033 self._datafilters[name] = filter
1033
1034
1034 def wread(self, filename):
1035 def wread(self, filename):
1035 if self.wvfs.islink(filename):
1036 if self.wvfs.islink(filename):
1036 data = self.wvfs.readlink(filename)
1037 data = self.wvfs.readlink(filename)
1037 else:
1038 else:
1038 data = self.wvfs.read(filename)
1039 data = self.wvfs.read(filename)
1039 return self._filter(self._encodefilterpats, filename, data)
1040 return self._filter(self._encodefilterpats, filename, data)
1040
1041
1041 def wwrite(self, filename, data, flags, backgroundclose=False):
1042 def wwrite(self, filename, data, flags, backgroundclose=False):
1042 """write ``data`` into ``filename`` in the working directory
1043 """write ``data`` into ``filename`` in the working directory
1043
1044
1044 This returns length of written (maybe decoded) data.
1045 This returns length of written (maybe decoded) data.
1045 """
1046 """
1046 data = self._filter(self._decodefilterpats, filename, data)
1047 data = self._filter(self._decodefilterpats, filename, data)
1047 if 'l' in flags:
1048 if 'l' in flags:
1048 self.wvfs.symlink(data, filename)
1049 self.wvfs.symlink(data, filename)
1049 else:
1050 else:
1050 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1051 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1051 if 'x' in flags:
1052 if 'x' in flags:
1052 self.wvfs.setflags(filename, False, True)
1053 self.wvfs.setflags(filename, False, True)
1053 return len(data)
1054 return len(data)
1054
1055
1055 def wwritedata(self, filename, data):
1056 def wwritedata(self, filename, data):
1056 return self._filter(self._decodefilterpats, filename, data)
1057 return self._filter(self._decodefilterpats, filename, data)
1057
1058
1058 def currenttransaction(self):
1059 def currenttransaction(self):
1059 """return the current transaction or None if non exists"""
1060 """return the current transaction or None if non exists"""
1060 if self._transref:
1061 if self._transref:
1061 tr = self._transref()
1062 tr = self._transref()
1062 else:
1063 else:
1063 tr = None
1064 tr = None
1064
1065
1065 if tr and tr.running():
1066 if tr and tr.running():
1066 return tr
1067 return tr
1067 return None
1068 return None
1068
1069
1069 def transaction(self, desc, report=None):
1070 def transaction(self, desc, report=None):
1070 if (self.ui.configbool('devel', 'all-warnings')
1071 if (self.ui.configbool('devel', 'all-warnings')
1071 or self.ui.configbool('devel', 'check-locks')):
1072 or self.ui.configbool('devel', 'check-locks')):
1072 if self._currentlock(self._lockref) is None:
1073 if self._currentlock(self._lockref) is None:
1073 raise error.ProgrammingError('transaction requires locking')
1074 raise error.ProgrammingError('transaction requires locking')
1074 tr = self.currenttransaction()
1075 tr = self.currenttransaction()
1075 if tr is not None:
1076 if tr is not None:
1076 return tr.nest()
1077 return tr.nest()
1077
1078
1078 # abort here if the journal already exists
1079 # abort here if the journal already exists
1079 if self.svfs.exists("journal"):
1080 if self.svfs.exists("journal"):
1080 raise error.RepoError(
1081 raise error.RepoError(
1081 _("abandoned transaction found"),
1082 _("abandoned transaction found"),
1082 hint=_("run 'hg recover' to clean up transaction"))
1083 hint=_("run 'hg recover' to clean up transaction"))
1083
1084
1084 idbase = "%.40f#%f" % (random.random(), time.time())
1085 idbase = "%.40f#%f" % (random.random(), time.time())
1085 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1086 ha = hashlib.sha1(idbase).hexdigest()
1087 if pycompat.ispy3:
1088 ha = ha.encode('latin1')
1089 txnid = 'TXN:' + ha
1086 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1090 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1087
1091
1088 self._writejournal(desc)
1092 self._writejournal(desc)
1089 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1093 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1090 if report:
1094 if report:
1091 rp = report
1095 rp = report
1092 else:
1096 else:
1093 rp = self.ui.warn
1097 rp = self.ui.warn
1094 vfsmap = {'plain': self.vfs} # root of .hg/
1098 vfsmap = {'plain': self.vfs} # root of .hg/
1095 # we must avoid cyclic reference between repo and transaction.
1099 # we must avoid cyclic reference between repo and transaction.
1096 reporef = weakref.ref(self)
1100 reporef = weakref.ref(self)
1097 def validate(tr):
1101 def validate(tr):
1098 """will run pre-closing hooks"""
1102 """will run pre-closing hooks"""
1099 reporef().hook('pretxnclose', throw=True,
1103 reporef().hook('pretxnclose', throw=True,
1100 txnname=desc, **tr.hookargs)
1104 txnname=desc, **tr.hookargs)
1101 def releasefn(tr, success):
1105 def releasefn(tr, success):
1102 repo = reporef()
1106 repo = reporef()
1103 if success:
1107 if success:
1104 # this should be explicitly invoked here, because
1108 # this should be explicitly invoked here, because
1105 # in-memory changes aren't written out at closing
1109 # in-memory changes aren't written out at closing
1106 # transaction, if tr.addfilegenerator (via
1110 # transaction, if tr.addfilegenerator (via
1107 # dirstate.write or so) isn't invoked while
1111 # dirstate.write or so) isn't invoked while
1108 # transaction running
1112 # transaction running
1109 repo.dirstate.write(None)
1113 repo.dirstate.write(None)
1110 else:
1114 else:
1111 # discard all changes (including ones already written
1115 # discard all changes (including ones already written
1112 # out) in this transaction
1116 # out) in this transaction
1113 repo.dirstate.restorebackup(None, prefix='journal.')
1117 repo.dirstate.restorebackup(None, prefix='journal.')
1114
1118
1115 repo.invalidate(clearfilecache=True)
1119 repo.invalidate(clearfilecache=True)
1116
1120
1117 tr = transaction.transaction(rp, self.svfs, vfsmap,
1121 tr = transaction.transaction(rp, self.svfs, vfsmap,
1118 "journal",
1122 "journal",
1119 "undo",
1123 "undo",
1120 aftertrans(renames),
1124 aftertrans(renames),
1121 self.store.createmode,
1125 self.store.createmode,
1122 validator=validate,
1126 validator=validate,
1123 releasefn=releasefn)
1127 releasefn=releasefn)
1124
1128
1125 tr.hookargs['txnid'] = txnid
1129 tr.hookargs['txnid'] = txnid
1126 # note: writing the fncache only during finalize mean that the file is
1130 # note: writing the fncache only during finalize mean that the file is
1127 # outdated when running hooks. As fncache is used for streaming clone,
1131 # outdated when running hooks. As fncache is used for streaming clone,
1128 # this is not expected to break anything that happen during the hooks.
1132 # this is not expected to break anything that happen during the hooks.
1129 tr.addfinalize('flush-fncache', self.store.write)
1133 tr.addfinalize('flush-fncache', self.store.write)
1130 def txnclosehook(tr2):
1134 def txnclosehook(tr2):
1131 """To be run if transaction is successful, will schedule a hook run
1135 """To be run if transaction is successful, will schedule a hook run
1132 """
1136 """
1133 # Don't reference tr2 in hook() so we don't hold a reference.
1137 # Don't reference tr2 in hook() so we don't hold a reference.
1134 # This reduces memory consumption when there are multiple
1138 # This reduces memory consumption when there are multiple
1135 # transactions per lock. This can likely go away if issue5045
1139 # transactions per lock. This can likely go away if issue5045
1136 # fixes the function accumulation.
1140 # fixes the function accumulation.
1137 hookargs = tr2.hookargs
1141 hookargs = tr2.hookargs
1138
1142
1139 def hook():
1143 def hook():
1140 reporef().hook('txnclose', throw=False, txnname=desc,
1144 reporef().hook('txnclose', throw=False, txnname=desc,
1141 **hookargs)
1145 **hookargs)
1142 reporef()._afterlock(hook)
1146 reporef()._afterlock(hook)
1143 tr.addfinalize('txnclose-hook', txnclosehook)
1147 tr.addfinalize('txnclose-hook', txnclosehook)
1144 def txnaborthook(tr2):
1148 def txnaborthook(tr2):
1145 """To be run if transaction is aborted
1149 """To be run if transaction is aborted
1146 """
1150 """
1147 reporef().hook('txnabort', throw=False, txnname=desc,
1151 reporef().hook('txnabort', throw=False, txnname=desc,
1148 **tr2.hookargs)
1152 **tr2.hookargs)
1149 tr.addabort('txnabort-hook', txnaborthook)
1153 tr.addabort('txnabort-hook', txnaborthook)
1150 # avoid eager cache invalidation. in-memory data should be identical
1154 # avoid eager cache invalidation. in-memory data should be identical
1151 # to stored data if transaction has no error.
1155 # to stored data if transaction has no error.
1152 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1156 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1153 self._transref = weakref.ref(tr)
1157 self._transref = weakref.ref(tr)
1154 return tr
1158 return tr
1155
1159
1156 def _journalfiles(self):
1160 def _journalfiles(self):
1157 return ((self.svfs, 'journal'),
1161 return ((self.svfs, 'journal'),
1158 (self.vfs, 'journal.dirstate'),
1162 (self.vfs, 'journal.dirstate'),
1159 (self.vfs, 'journal.branch'),
1163 (self.vfs, 'journal.branch'),
1160 (self.vfs, 'journal.desc'),
1164 (self.vfs, 'journal.desc'),
1161 (self.vfs, 'journal.bookmarks'),
1165 (self.vfs, 'journal.bookmarks'),
1162 (self.svfs, 'journal.phaseroots'))
1166 (self.svfs, 'journal.phaseroots'))
1163
1167
1164 def undofiles(self):
1168 def undofiles(self):
1165 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1169 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1166
1170
1167 def _writejournal(self, desc):
1171 def _writejournal(self, desc):
1168 self.dirstate.savebackup(None, prefix='journal.')
1172 self.dirstate.savebackup(None, prefix='journal.')
1169 self.vfs.write("journal.branch",
1173 self.vfs.write("journal.branch",
1170 encoding.fromlocal(self.dirstate.branch()))
1174 encoding.fromlocal(self.dirstate.branch()))
1171 self.vfs.write("journal.desc",
1175 self.vfs.write("journal.desc",
1172 "%d\n%s\n" % (len(self), desc))
1176 "%d\n%s\n" % (len(self), desc))
1173 self.vfs.write("journal.bookmarks",
1177 self.vfs.write("journal.bookmarks",
1174 self.vfs.tryread("bookmarks"))
1178 self.vfs.tryread("bookmarks"))
1175 self.svfs.write("journal.phaseroots",
1179 self.svfs.write("journal.phaseroots",
1176 self.svfs.tryread("phaseroots"))
1180 self.svfs.tryread("phaseroots"))
1177
1181
1178 def recover(self):
1182 def recover(self):
1179 with self.lock():
1183 with self.lock():
1180 if self.svfs.exists("journal"):
1184 if self.svfs.exists("journal"):
1181 self.ui.status(_("rolling back interrupted transaction\n"))
1185 self.ui.status(_("rolling back interrupted transaction\n"))
1182 vfsmap = {'': self.svfs,
1186 vfsmap = {'': self.svfs,
1183 'plain': self.vfs,}
1187 'plain': self.vfs,}
1184 transaction.rollback(self.svfs, vfsmap, "journal",
1188 transaction.rollback(self.svfs, vfsmap, "journal",
1185 self.ui.warn)
1189 self.ui.warn)
1186 self.invalidate()
1190 self.invalidate()
1187 return True
1191 return True
1188 else:
1192 else:
1189 self.ui.warn(_("no interrupted transaction available\n"))
1193 self.ui.warn(_("no interrupted transaction available\n"))
1190 return False
1194 return False
1191
1195
1192 def rollback(self, dryrun=False, force=False):
1196 def rollback(self, dryrun=False, force=False):
1193 wlock = lock = dsguard = None
1197 wlock = lock = dsguard = None
1194 try:
1198 try:
1195 wlock = self.wlock()
1199 wlock = self.wlock()
1196 lock = self.lock()
1200 lock = self.lock()
1197 if self.svfs.exists("undo"):
1201 if self.svfs.exists("undo"):
1198 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1202 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1199
1203
1200 return self._rollback(dryrun, force, dsguard)
1204 return self._rollback(dryrun, force, dsguard)
1201 else:
1205 else:
1202 self.ui.warn(_("no rollback information available\n"))
1206 self.ui.warn(_("no rollback information available\n"))
1203 return 1
1207 return 1
1204 finally:
1208 finally:
1205 release(dsguard, lock, wlock)
1209 release(dsguard, lock, wlock)
1206
1210
1207 @unfilteredmethod # Until we get smarter cache management
1211 @unfilteredmethod # Until we get smarter cache management
1208 def _rollback(self, dryrun, force, dsguard):
1212 def _rollback(self, dryrun, force, dsguard):
1209 ui = self.ui
1213 ui = self.ui
1210 try:
1214 try:
1211 args = self.vfs.read('undo.desc').splitlines()
1215 args = self.vfs.read('undo.desc').splitlines()
1212 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1216 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1213 if len(args) >= 3:
1217 if len(args) >= 3:
1214 detail = args[2]
1218 detail = args[2]
1215 oldtip = oldlen - 1
1219 oldtip = oldlen - 1
1216
1220
1217 if detail and ui.verbose:
1221 if detail and ui.verbose:
1218 msg = (_('repository tip rolled back to revision %s'
1222 msg = (_('repository tip rolled back to revision %s'
1219 ' (undo %s: %s)\n')
1223 ' (undo %s: %s)\n')
1220 % (oldtip, desc, detail))
1224 % (oldtip, desc, detail))
1221 else:
1225 else:
1222 msg = (_('repository tip rolled back to revision %s'
1226 msg = (_('repository tip rolled back to revision %s'
1223 ' (undo %s)\n')
1227 ' (undo %s)\n')
1224 % (oldtip, desc))
1228 % (oldtip, desc))
1225 except IOError:
1229 except IOError:
1226 msg = _('rolling back unknown transaction\n')
1230 msg = _('rolling back unknown transaction\n')
1227 desc = None
1231 desc = None
1228
1232
1229 if not force and self['.'] != self['tip'] and desc == 'commit':
1233 if not force and self['.'] != self['tip'] and desc == 'commit':
1230 raise error.Abort(
1234 raise error.Abort(
1231 _('rollback of last commit while not checked out '
1235 _('rollback of last commit while not checked out '
1232 'may lose data'), hint=_('use -f to force'))
1236 'may lose data'), hint=_('use -f to force'))
1233
1237
1234 ui.status(msg)
1238 ui.status(msg)
1235 if dryrun:
1239 if dryrun:
1236 return 0
1240 return 0
1237
1241
1238 parents = self.dirstate.parents()
1242 parents = self.dirstate.parents()
1239 self.destroying()
1243 self.destroying()
1240 vfsmap = {'plain': self.vfs, '': self.svfs}
1244 vfsmap = {'plain': self.vfs, '': self.svfs}
1241 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1245 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1242 if self.vfs.exists('undo.bookmarks'):
1246 if self.vfs.exists('undo.bookmarks'):
1243 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1247 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1244 if self.svfs.exists('undo.phaseroots'):
1248 if self.svfs.exists('undo.phaseroots'):
1245 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1249 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1246 self.invalidate()
1250 self.invalidate()
1247
1251
1248 parentgone = (parents[0] not in self.changelog.nodemap or
1252 parentgone = (parents[0] not in self.changelog.nodemap or
1249 parents[1] not in self.changelog.nodemap)
1253 parents[1] not in self.changelog.nodemap)
1250 if parentgone:
1254 if parentgone:
1251 # prevent dirstateguard from overwriting already restored one
1255 # prevent dirstateguard from overwriting already restored one
1252 dsguard.close()
1256 dsguard.close()
1253
1257
1254 self.dirstate.restorebackup(None, prefix='undo.')
1258 self.dirstate.restorebackup(None, prefix='undo.')
1255 try:
1259 try:
1256 branch = self.vfs.read('undo.branch')
1260 branch = self.vfs.read('undo.branch')
1257 self.dirstate.setbranch(encoding.tolocal(branch))
1261 self.dirstate.setbranch(encoding.tolocal(branch))
1258 except IOError:
1262 except IOError:
1259 ui.warn(_('named branch could not be reset: '
1263 ui.warn(_('named branch could not be reset: '
1260 'current branch is still \'%s\'\n')
1264 'current branch is still \'%s\'\n')
1261 % self.dirstate.branch())
1265 % self.dirstate.branch())
1262
1266
1263 parents = tuple([p.rev() for p in self[None].parents()])
1267 parents = tuple([p.rev() for p in self[None].parents()])
1264 if len(parents) > 1:
1268 if len(parents) > 1:
1265 ui.status(_('working directory now based on '
1269 ui.status(_('working directory now based on '
1266 'revisions %d and %d\n') % parents)
1270 'revisions %d and %d\n') % parents)
1267 else:
1271 else:
1268 ui.status(_('working directory now based on '
1272 ui.status(_('working directory now based on '
1269 'revision %d\n') % parents)
1273 'revision %d\n') % parents)
1270 mergemod.mergestate.clean(self, self['.'].node())
1274 mergemod.mergestate.clean(self, self['.'].node())
1271
1275
1272 # TODO: if we know which new heads may result from this rollback, pass
1276 # TODO: if we know which new heads may result from this rollback, pass
1273 # them to destroy(), which will prevent the branchhead cache from being
1277 # them to destroy(), which will prevent the branchhead cache from being
1274 # invalidated.
1278 # invalidated.
1275 self.destroyed()
1279 self.destroyed()
1276 return 0
1280 return 0
1277
1281
1278 def invalidatecaches(self):
1282 def invalidatecaches(self):
1279
1283
1280 if '_tagscache' in vars(self):
1284 if '_tagscache' in vars(self):
1281 # can't use delattr on proxy
1285 # can't use delattr on proxy
1282 del self.__dict__['_tagscache']
1286 del self.__dict__['_tagscache']
1283
1287
1284 self.unfiltered()._branchcaches.clear()
1288 self.unfiltered()._branchcaches.clear()
1285 self.invalidatevolatilesets()
1289 self.invalidatevolatilesets()
1286
1290
1287 def invalidatevolatilesets(self):
1291 def invalidatevolatilesets(self):
1288 self.filteredrevcache.clear()
1292 self.filteredrevcache.clear()
1289 obsolete.clearobscaches(self)
1293 obsolete.clearobscaches(self)
1290
1294
1291 def invalidatedirstate(self):
1295 def invalidatedirstate(self):
1292 '''Invalidates the dirstate, causing the next call to dirstate
1296 '''Invalidates the dirstate, causing the next call to dirstate
1293 to check if it was modified since the last time it was read,
1297 to check if it was modified since the last time it was read,
1294 rereading it if it has.
1298 rereading it if it has.
1295
1299
1296 This is different to dirstate.invalidate() that it doesn't always
1300 This is different to dirstate.invalidate() that it doesn't always
1297 rereads the dirstate. Use dirstate.invalidate() if you want to
1301 rereads the dirstate. Use dirstate.invalidate() if you want to
1298 explicitly read the dirstate again (i.e. restoring it to a previous
1302 explicitly read the dirstate again (i.e. restoring it to a previous
1299 known good state).'''
1303 known good state).'''
1300 if hasunfilteredcache(self, 'dirstate'):
1304 if hasunfilteredcache(self, 'dirstate'):
1301 for k in self.dirstate._filecache:
1305 for k in self.dirstate._filecache:
1302 try:
1306 try:
1303 delattr(self.dirstate, k)
1307 delattr(self.dirstate, k)
1304 except AttributeError:
1308 except AttributeError:
1305 pass
1309 pass
1306 delattr(self.unfiltered(), 'dirstate')
1310 delattr(self.unfiltered(), 'dirstate')
1307
1311
1308 def invalidate(self, clearfilecache=False):
1312 def invalidate(self, clearfilecache=False):
1309 '''Invalidates both store and non-store parts other than dirstate
1313 '''Invalidates both store and non-store parts other than dirstate
1310
1314
1311 If a transaction is running, invalidation of store is omitted,
1315 If a transaction is running, invalidation of store is omitted,
1312 because discarding in-memory changes might cause inconsistency
1316 because discarding in-memory changes might cause inconsistency
1313 (e.g. incomplete fncache causes unintentional failure, but
1317 (e.g. incomplete fncache causes unintentional failure, but
1314 redundant one doesn't).
1318 redundant one doesn't).
1315 '''
1319 '''
1316 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1320 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1317 for k in self._filecache.keys():
1321 for k in self._filecache.keys():
1318 # dirstate is invalidated separately in invalidatedirstate()
1322 # dirstate is invalidated separately in invalidatedirstate()
1319 if k == 'dirstate':
1323 if k == 'dirstate':
1320 continue
1324 continue
1321
1325
1322 if clearfilecache:
1326 if clearfilecache:
1323 del self._filecache[k]
1327 del self._filecache[k]
1324 try:
1328 try:
1325 delattr(unfiltered, k)
1329 delattr(unfiltered, k)
1326 except AttributeError:
1330 except AttributeError:
1327 pass
1331 pass
1328 self.invalidatecaches()
1332 self.invalidatecaches()
1329 if not self.currenttransaction():
1333 if not self.currenttransaction():
1330 # TODO: Changing contents of store outside transaction
1334 # TODO: Changing contents of store outside transaction
1331 # causes inconsistency. We should make in-memory store
1335 # causes inconsistency. We should make in-memory store
1332 # changes detectable, and abort if changed.
1336 # changes detectable, and abort if changed.
1333 self.store.invalidatecaches()
1337 self.store.invalidatecaches()
1334
1338
1335 def invalidateall(self):
1339 def invalidateall(self):
1336 '''Fully invalidates both store and non-store parts, causing the
1340 '''Fully invalidates both store and non-store parts, causing the
1337 subsequent operation to reread any outside changes.'''
1341 subsequent operation to reread any outside changes.'''
1338 # extension should hook this to invalidate its caches
1342 # extension should hook this to invalidate its caches
1339 self.invalidate()
1343 self.invalidate()
1340 self.invalidatedirstate()
1344 self.invalidatedirstate()
1341
1345
1342 @unfilteredmethod
1346 @unfilteredmethod
1343 def _refreshfilecachestats(self, tr):
1347 def _refreshfilecachestats(self, tr):
1344 """Reload stats of cached files so that they are flagged as valid"""
1348 """Reload stats of cached files so that they are flagged as valid"""
1345 for k, ce in self._filecache.items():
1349 for k, ce in self._filecache.items():
1346 if k == 'dirstate' or k not in self.__dict__:
1350 if k == 'dirstate' or k not in self.__dict__:
1347 continue
1351 continue
1348 ce.refresh()
1352 ce.refresh()
1349
1353
1350 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1354 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1351 inheritchecker=None, parentenvvar=None):
1355 inheritchecker=None, parentenvvar=None):
1352 parentlock = None
1356 parentlock = None
1353 # the contents of parentenvvar are used by the underlying lock to
1357 # the contents of parentenvvar are used by the underlying lock to
1354 # determine whether it can be inherited
1358 # determine whether it can be inherited
1355 if parentenvvar is not None:
1359 if parentenvvar is not None:
1356 parentlock = encoding.environ.get(parentenvvar)
1360 parentlock = encoding.environ.get(parentenvvar)
1357 try:
1361 try:
1358 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1362 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1359 acquirefn=acquirefn, desc=desc,
1363 acquirefn=acquirefn, desc=desc,
1360 inheritchecker=inheritchecker,
1364 inheritchecker=inheritchecker,
1361 parentlock=parentlock)
1365 parentlock=parentlock)
1362 except error.LockHeld as inst:
1366 except error.LockHeld as inst:
1363 if not wait:
1367 if not wait:
1364 raise
1368 raise
1365 # show more details for new-style locks
1369 # show more details for new-style locks
1366 if ':' in inst.locker:
1370 if ':' in inst.locker:
1367 host, pid = inst.locker.split(":", 1)
1371 host, pid = inst.locker.split(":", 1)
1368 self.ui.warn(
1372 self.ui.warn(
1369 _("waiting for lock on %s held by process %r "
1373 _("waiting for lock on %s held by process %r "
1370 "on host %r\n") % (desc, pid, host))
1374 "on host %r\n") % (desc, pid, host))
1371 else:
1375 else:
1372 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1376 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1373 (desc, inst.locker))
1377 (desc, inst.locker))
1374 # default to 600 seconds timeout
1378 # default to 600 seconds timeout
1375 l = lockmod.lock(vfs, lockname,
1379 l = lockmod.lock(vfs, lockname,
1376 int(self.ui.config("ui", "timeout", "600")),
1380 int(self.ui.config("ui", "timeout", "600")),
1377 releasefn=releasefn, acquirefn=acquirefn,
1381 releasefn=releasefn, acquirefn=acquirefn,
1378 desc=desc)
1382 desc=desc)
1379 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1383 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1380 return l
1384 return l
1381
1385
1382 def _afterlock(self, callback):
1386 def _afterlock(self, callback):
1383 """add a callback to be run when the repository is fully unlocked
1387 """add a callback to be run when the repository is fully unlocked
1384
1388
1385 The callback will be executed when the outermost lock is released
1389 The callback will be executed when the outermost lock is released
1386 (with wlock being higher level than 'lock')."""
1390 (with wlock being higher level than 'lock')."""
1387 for ref in (self._wlockref, self._lockref):
1391 for ref in (self._wlockref, self._lockref):
1388 l = ref and ref()
1392 l = ref and ref()
1389 if l and l.held:
1393 if l and l.held:
1390 l.postrelease.append(callback)
1394 l.postrelease.append(callback)
1391 break
1395 break
1392 else: # no lock have been found.
1396 else: # no lock have been found.
1393 callback()
1397 callback()
1394
1398
1395 def lock(self, wait=True):
1399 def lock(self, wait=True):
1396 '''Lock the repository store (.hg/store) and return a weak reference
1400 '''Lock the repository store (.hg/store) and return a weak reference
1397 to the lock. Use this before modifying the store (e.g. committing or
1401 to the lock. Use this before modifying the store (e.g. committing or
1398 stripping). If you are opening a transaction, get a lock as well.)
1402 stripping). If you are opening a transaction, get a lock as well.)
1399
1403
1400 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1404 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1401 'wlock' first to avoid a dead-lock hazard.'''
1405 'wlock' first to avoid a dead-lock hazard.'''
1402 l = self._currentlock(self._lockref)
1406 l = self._currentlock(self._lockref)
1403 if l is not None:
1407 if l is not None:
1404 l.lock()
1408 l.lock()
1405 return l
1409 return l
1406
1410
1407 l = self._lock(self.svfs, "lock", wait, None,
1411 l = self._lock(self.svfs, "lock", wait, None,
1408 self.invalidate, _('repository %s') % self.origroot)
1412 self.invalidate, _('repository %s') % self.origroot)
1409 self._lockref = weakref.ref(l)
1413 self._lockref = weakref.ref(l)
1410 return l
1414 return l
1411
1415
1412 def _wlockchecktransaction(self):
1416 def _wlockchecktransaction(self):
1413 if self.currenttransaction() is not None:
1417 if self.currenttransaction() is not None:
1414 raise error.LockInheritanceContractViolation(
1418 raise error.LockInheritanceContractViolation(
1415 'wlock cannot be inherited in the middle of a transaction')
1419 'wlock cannot be inherited in the middle of a transaction')
1416
1420
1417 def wlock(self, wait=True):
1421 def wlock(self, wait=True):
1418 '''Lock the non-store parts of the repository (everything under
1422 '''Lock the non-store parts of the repository (everything under
1419 .hg except .hg/store) and return a weak reference to the lock.
1423 .hg except .hg/store) and return a weak reference to the lock.
1420
1424
1421 Use this before modifying files in .hg.
1425 Use this before modifying files in .hg.
1422
1426
1423 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1427 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1424 'wlock' first to avoid a dead-lock hazard.'''
1428 'wlock' first to avoid a dead-lock hazard.'''
1425 l = self._wlockref and self._wlockref()
1429 l = self._wlockref and self._wlockref()
1426 if l is not None and l.held:
1430 if l is not None and l.held:
1427 l.lock()
1431 l.lock()
1428 return l
1432 return l
1429
1433
1430 # We do not need to check for non-waiting lock acquisition. Such
1434 # We do not need to check for non-waiting lock acquisition. Such
1431 # acquisition would not cause dead-lock as they would just fail.
1435 # acquisition would not cause dead-lock as they would just fail.
1432 if wait and (self.ui.configbool('devel', 'all-warnings')
1436 if wait and (self.ui.configbool('devel', 'all-warnings')
1433 or self.ui.configbool('devel', 'check-locks')):
1437 or self.ui.configbool('devel', 'check-locks')):
1434 if self._currentlock(self._lockref) is not None:
1438 if self._currentlock(self._lockref) is not None:
1435 self.ui.develwarn('"wlock" acquired after "lock"')
1439 self.ui.develwarn('"wlock" acquired after "lock"')
1436
1440
1437 def unlock():
1441 def unlock():
1438 if self.dirstate.pendingparentchange():
1442 if self.dirstate.pendingparentchange():
1439 self.dirstate.invalidate()
1443 self.dirstate.invalidate()
1440 else:
1444 else:
1441 self.dirstate.write(None)
1445 self.dirstate.write(None)
1442
1446
1443 self._filecache['dirstate'].refresh()
1447 self._filecache['dirstate'].refresh()
1444
1448
1445 l = self._lock(self.vfs, "wlock", wait, unlock,
1449 l = self._lock(self.vfs, "wlock", wait, unlock,
1446 self.invalidatedirstate, _('working directory of %s') %
1450 self.invalidatedirstate, _('working directory of %s') %
1447 self.origroot,
1451 self.origroot,
1448 inheritchecker=self._wlockchecktransaction,
1452 inheritchecker=self._wlockchecktransaction,
1449 parentenvvar='HG_WLOCK_LOCKER')
1453 parentenvvar='HG_WLOCK_LOCKER')
1450 self._wlockref = weakref.ref(l)
1454 self._wlockref = weakref.ref(l)
1451 return l
1455 return l
1452
1456
1453 def _currentlock(self, lockref):
1457 def _currentlock(self, lockref):
1454 """Returns the lock if it's held, or None if it's not."""
1458 """Returns the lock if it's held, or None if it's not."""
1455 if lockref is None:
1459 if lockref is None:
1456 return None
1460 return None
1457 l = lockref()
1461 l = lockref()
1458 if l is None or not l.held:
1462 if l is None or not l.held:
1459 return None
1463 return None
1460 return l
1464 return l
1461
1465
1462 def currentwlock(self):
1466 def currentwlock(self):
1463 """Returns the wlock if it's held, or None if it's not."""
1467 """Returns the wlock if it's held, or None if it's not."""
1464 return self._currentlock(self._wlockref)
1468 return self._currentlock(self._wlockref)
1465
1469
1466 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1470 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1467 """
1471 """
1468 commit an individual file as part of a larger transaction
1472 commit an individual file as part of a larger transaction
1469 """
1473 """
1470
1474
1471 fname = fctx.path()
1475 fname = fctx.path()
1472 fparent1 = manifest1.get(fname, nullid)
1476 fparent1 = manifest1.get(fname, nullid)
1473 fparent2 = manifest2.get(fname, nullid)
1477 fparent2 = manifest2.get(fname, nullid)
1474 if isinstance(fctx, context.filectx):
1478 if isinstance(fctx, context.filectx):
1475 node = fctx.filenode()
1479 node = fctx.filenode()
1476 if node in [fparent1, fparent2]:
1480 if node in [fparent1, fparent2]:
1477 self.ui.debug('reusing %s filelog entry\n' % fname)
1481 self.ui.debug('reusing %s filelog entry\n' % fname)
1478 if manifest1.flags(fname) != fctx.flags():
1482 if manifest1.flags(fname) != fctx.flags():
1479 changelist.append(fname)
1483 changelist.append(fname)
1480 return node
1484 return node
1481
1485
1482 flog = self.file(fname)
1486 flog = self.file(fname)
1483 meta = {}
1487 meta = {}
1484 copy = fctx.renamed()
1488 copy = fctx.renamed()
1485 if copy and copy[0] != fname:
1489 if copy and copy[0] != fname:
1486 # Mark the new revision of this file as a copy of another
1490 # Mark the new revision of this file as a copy of another
1487 # file. This copy data will effectively act as a parent
1491 # file. This copy data will effectively act as a parent
1488 # of this new revision. If this is a merge, the first
1492 # of this new revision. If this is a merge, the first
1489 # parent will be the nullid (meaning "look up the copy data")
1493 # parent will be the nullid (meaning "look up the copy data")
1490 # and the second one will be the other parent. For example:
1494 # and the second one will be the other parent. For example:
1491 #
1495 #
1492 # 0 --- 1 --- 3 rev1 changes file foo
1496 # 0 --- 1 --- 3 rev1 changes file foo
1493 # \ / rev2 renames foo to bar and changes it
1497 # \ / rev2 renames foo to bar and changes it
1494 # \- 2 -/ rev3 should have bar with all changes and
1498 # \- 2 -/ rev3 should have bar with all changes and
1495 # should record that bar descends from
1499 # should record that bar descends from
1496 # bar in rev2 and foo in rev1
1500 # bar in rev2 and foo in rev1
1497 #
1501 #
1498 # this allows this merge to succeed:
1502 # this allows this merge to succeed:
1499 #
1503 #
1500 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1504 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1501 # \ / merging rev3 and rev4 should use bar@rev2
1505 # \ / merging rev3 and rev4 should use bar@rev2
1502 # \- 2 --- 4 as the merge base
1506 # \- 2 --- 4 as the merge base
1503 #
1507 #
1504
1508
1505 cfname = copy[0]
1509 cfname = copy[0]
1506 crev = manifest1.get(cfname)
1510 crev = manifest1.get(cfname)
1507 newfparent = fparent2
1511 newfparent = fparent2
1508
1512
1509 if manifest2: # branch merge
1513 if manifest2: # branch merge
1510 if fparent2 == nullid or crev is None: # copied on remote side
1514 if fparent2 == nullid or crev is None: # copied on remote side
1511 if cfname in manifest2:
1515 if cfname in manifest2:
1512 crev = manifest2[cfname]
1516 crev = manifest2[cfname]
1513 newfparent = fparent1
1517 newfparent = fparent1
1514
1518
1515 # Here, we used to search backwards through history to try to find
1519 # Here, we used to search backwards through history to try to find
1516 # where the file copy came from if the source of a copy was not in
1520 # where the file copy came from if the source of a copy was not in
1517 # the parent directory. However, this doesn't actually make sense to
1521 # the parent directory. However, this doesn't actually make sense to
1518 # do (what does a copy from something not in your working copy even
1522 # do (what does a copy from something not in your working copy even
1519 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1523 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1520 # the user that copy information was dropped, so if they didn't
1524 # the user that copy information was dropped, so if they didn't
1521 # expect this outcome it can be fixed, but this is the correct
1525 # expect this outcome it can be fixed, but this is the correct
1522 # behavior in this circumstance.
1526 # behavior in this circumstance.
1523
1527
1524 if crev:
1528 if crev:
1525 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1529 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1526 meta["copy"] = cfname
1530 meta["copy"] = cfname
1527 meta["copyrev"] = hex(crev)
1531 meta["copyrev"] = hex(crev)
1528 fparent1, fparent2 = nullid, newfparent
1532 fparent1, fparent2 = nullid, newfparent
1529 else:
1533 else:
1530 self.ui.warn(_("warning: can't find ancestor for '%s' "
1534 self.ui.warn(_("warning: can't find ancestor for '%s' "
1531 "copied from '%s'!\n") % (fname, cfname))
1535 "copied from '%s'!\n") % (fname, cfname))
1532
1536
1533 elif fparent1 == nullid:
1537 elif fparent1 == nullid:
1534 fparent1, fparent2 = fparent2, nullid
1538 fparent1, fparent2 = fparent2, nullid
1535 elif fparent2 != nullid:
1539 elif fparent2 != nullid:
1536 # is one parent an ancestor of the other?
1540 # is one parent an ancestor of the other?
1537 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1541 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1538 if fparent1 in fparentancestors:
1542 if fparent1 in fparentancestors:
1539 fparent1, fparent2 = fparent2, nullid
1543 fparent1, fparent2 = fparent2, nullid
1540 elif fparent2 in fparentancestors:
1544 elif fparent2 in fparentancestors:
1541 fparent2 = nullid
1545 fparent2 = nullid
1542
1546
1543 # is the file changed?
1547 # is the file changed?
1544 text = fctx.data()
1548 text = fctx.data()
1545 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1549 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1546 changelist.append(fname)
1550 changelist.append(fname)
1547 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1551 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1548 # are just the flags changed during merge?
1552 # are just the flags changed during merge?
1549 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1553 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1550 changelist.append(fname)
1554 changelist.append(fname)
1551
1555
1552 return fparent1
1556 return fparent1
1553
1557
1554 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1558 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1555 """check for commit arguments that aren't committable"""
1559 """check for commit arguments that aren't committable"""
1556 if match.isexact() or match.prefix():
1560 if match.isexact() or match.prefix():
1557 matched = set(status.modified + status.added + status.removed)
1561 matched = set(status.modified + status.added + status.removed)
1558
1562
1559 for f in match.files():
1563 for f in match.files():
1560 f = self.dirstate.normalize(f)
1564 f = self.dirstate.normalize(f)
1561 if f == '.' or f in matched or f in wctx.substate:
1565 if f == '.' or f in matched or f in wctx.substate:
1562 continue
1566 continue
1563 if f in status.deleted:
1567 if f in status.deleted:
1564 fail(f, _('file not found!'))
1568 fail(f, _('file not found!'))
1565 if f in vdirs: # visited directory
1569 if f in vdirs: # visited directory
1566 d = f + '/'
1570 d = f + '/'
1567 for mf in matched:
1571 for mf in matched:
1568 if mf.startswith(d):
1572 if mf.startswith(d):
1569 break
1573 break
1570 else:
1574 else:
1571 fail(f, _("no match under directory!"))
1575 fail(f, _("no match under directory!"))
1572 elif f not in self.dirstate:
1576 elif f not in self.dirstate:
1573 fail(f, _("file not tracked!"))
1577 fail(f, _("file not tracked!"))
1574
1578
1575 @unfilteredmethod
1579 @unfilteredmethod
1576 def commit(self, text="", user=None, date=None, match=None, force=False,
1580 def commit(self, text="", user=None, date=None, match=None, force=False,
1577 editor=False, extra=None):
1581 editor=False, extra=None):
1578 """Add a new revision to current repository.
1582 """Add a new revision to current repository.
1579
1583
1580 Revision information is gathered from the working directory,
1584 Revision information is gathered from the working directory,
1581 match can be used to filter the committed files. If editor is
1585 match can be used to filter the committed files. If editor is
1582 supplied, it is called to get a commit message.
1586 supplied, it is called to get a commit message.
1583 """
1587 """
1584 if extra is None:
1588 if extra is None:
1585 extra = {}
1589 extra = {}
1586
1590
1587 def fail(f, msg):
1591 def fail(f, msg):
1588 raise error.Abort('%s: %s' % (f, msg))
1592 raise error.Abort('%s: %s' % (f, msg))
1589
1593
1590 if not match:
1594 if not match:
1591 match = matchmod.always(self.root, '')
1595 match = matchmod.always(self.root, '')
1592
1596
1593 if not force:
1597 if not force:
1594 vdirs = []
1598 vdirs = []
1595 match.explicitdir = vdirs.append
1599 match.explicitdir = vdirs.append
1596 match.bad = fail
1600 match.bad = fail
1597
1601
1598 wlock = lock = tr = None
1602 wlock = lock = tr = None
1599 try:
1603 try:
1600 wlock = self.wlock()
1604 wlock = self.wlock()
1601 lock = self.lock() # for recent changelog (see issue4368)
1605 lock = self.lock() # for recent changelog (see issue4368)
1602
1606
1603 wctx = self[None]
1607 wctx = self[None]
1604 merge = len(wctx.parents()) > 1
1608 merge = len(wctx.parents()) > 1
1605
1609
1606 if not force and merge and match.ispartial():
1610 if not force and merge and match.ispartial():
1607 raise error.Abort(_('cannot partially commit a merge '
1611 raise error.Abort(_('cannot partially commit a merge '
1608 '(do not specify files or patterns)'))
1612 '(do not specify files or patterns)'))
1609
1613
1610 status = self.status(match=match, clean=force)
1614 status = self.status(match=match, clean=force)
1611 if force:
1615 if force:
1612 status.modified.extend(status.clean) # mq may commit clean files
1616 status.modified.extend(status.clean) # mq may commit clean files
1613
1617
1614 # check subrepos
1618 # check subrepos
1615 subs = []
1619 subs = []
1616 commitsubs = set()
1620 commitsubs = set()
1617 newstate = wctx.substate.copy()
1621 newstate = wctx.substate.copy()
1618 # only manage subrepos and .hgsubstate if .hgsub is present
1622 # only manage subrepos and .hgsubstate if .hgsub is present
1619 if '.hgsub' in wctx:
1623 if '.hgsub' in wctx:
1620 # we'll decide whether to track this ourselves, thanks
1624 # we'll decide whether to track this ourselves, thanks
1621 for c in status.modified, status.added, status.removed:
1625 for c in status.modified, status.added, status.removed:
1622 if '.hgsubstate' in c:
1626 if '.hgsubstate' in c:
1623 c.remove('.hgsubstate')
1627 c.remove('.hgsubstate')
1624
1628
1625 # compare current state to last committed state
1629 # compare current state to last committed state
1626 # build new substate based on last committed state
1630 # build new substate based on last committed state
1627 oldstate = wctx.p1().substate
1631 oldstate = wctx.p1().substate
1628 for s in sorted(newstate.keys()):
1632 for s in sorted(newstate.keys()):
1629 if not match(s):
1633 if not match(s):
1630 # ignore working copy, use old state if present
1634 # ignore working copy, use old state if present
1631 if s in oldstate:
1635 if s in oldstate:
1632 newstate[s] = oldstate[s]
1636 newstate[s] = oldstate[s]
1633 continue
1637 continue
1634 if not force:
1638 if not force:
1635 raise error.Abort(
1639 raise error.Abort(
1636 _("commit with new subrepo %s excluded") % s)
1640 _("commit with new subrepo %s excluded") % s)
1637 dirtyreason = wctx.sub(s).dirtyreason(True)
1641 dirtyreason = wctx.sub(s).dirtyreason(True)
1638 if dirtyreason:
1642 if dirtyreason:
1639 if not self.ui.configbool('ui', 'commitsubrepos'):
1643 if not self.ui.configbool('ui', 'commitsubrepos'):
1640 raise error.Abort(dirtyreason,
1644 raise error.Abort(dirtyreason,
1641 hint=_("use --subrepos for recursive commit"))
1645 hint=_("use --subrepos for recursive commit"))
1642 subs.append(s)
1646 subs.append(s)
1643 commitsubs.add(s)
1647 commitsubs.add(s)
1644 else:
1648 else:
1645 bs = wctx.sub(s).basestate()
1649 bs = wctx.sub(s).basestate()
1646 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1650 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1647 if oldstate.get(s, (None, None, None))[1] != bs:
1651 if oldstate.get(s, (None, None, None))[1] != bs:
1648 subs.append(s)
1652 subs.append(s)
1649
1653
1650 # check for removed subrepos
1654 # check for removed subrepos
1651 for p in wctx.parents():
1655 for p in wctx.parents():
1652 r = [s for s in p.substate if s not in newstate]
1656 r = [s for s in p.substate if s not in newstate]
1653 subs += [s for s in r if match(s)]
1657 subs += [s for s in r if match(s)]
1654 if subs:
1658 if subs:
1655 if (not match('.hgsub') and
1659 if (not match('.hgsub') and
1656 '.hgsub' in (wctx.modified() + wctx.added())):
1660 '.hgsub' in (wctx.modified() + wctx.added())):
1657 raise error.Abort(
1661 raise error.Abort(
1658 _("can't commit subrepos without .hgsub"))
1662 _("can't commit subrepos without .hgsub"))
1659 status.modified.insert(0, '.hgsubstate')
1663 status.modified.insert(0, '.hgsubstate')
1660
1664
1661 elif '.hgsub' in status.removed:
1665 elif '.hgsub' in status.removed:
1662 # clean up .hgsubstate when .hgsub is removed
1666 # clean up .hgsubstate when .hgsub is removed
1663 if ('.hgsubstate' in wctx and
1667 if ('.hgsubstate' in wctx and
1664 '.hgsubstate' not in (status.modified + status.added +
1668 '.hgsubstate' not in (status.modified + status.added +
1665 status.removed)):
1669 status.removed)):
1666 status.removed.insert(0, '.hgsubstate')
1670 status.removed.insert(0, '.hgsubstate')
1667
1671
1668 # make sure all explicit patterns are matched
1672 # make sure all explicit patterns are matched
1669 if not force:
1673 if not force:
1670 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1674 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1671
1675
1672 cctx = context.workingcommitctx(self, status,
1676 cctx = context.workingcommitctx(self, status,
1673 text, user, date, extra)
1677 text, user, date, extra)
1674
1678
1675 # internal config: ui.allowemptycommit
1679 # internal config: ui.allowemptycommit
1676 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1680 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1677 or extra.get('close') or merge or cctx.files()
1681 or extra.get('close') or merge or cctx.files()
1678 or self.ui.configbool('ui', 'allowemptycommit'))
1682 or self.ui.configbool('ui', 'allowemptycommit'))
1679 if not allowemptycommit:
1683 if not allowemptycommit:
1680 return None
1684 return None
1681
1685
1682 if merge and cctx.deleted():
1686 if merge and cctx.deleted():
1683 raise error.Abort(_("cannot commit merge with missing files"))
1687 raise error.Abort(_("cannot commit merge with missing files"))
1684
1688
1685 ms = mergemod.mergestate.read(self)
1689 ms = mergemod.mergestate.read(self)
1686 mergeutil.checkunresolved(ms)
1690 mergeutil.checkunresolved(ms)
1687
1691
1688 if editor:
1692 if editor:
1689 cctx._text = editor(self, cctx, subs)
1693 cctx._text = editor(self, cctx, subs)
1690 edited = (text != cctx._text)
1694 edited = (text != cctx._text)
1691
1695
1692 # Save commit message in case this transaction gets rolled back
1696 # Save commit message in case this transaction gets rolled back
1693 # (e.g. by a pretxncommit hook). Leave the content alone on
1697 # (e.g. by a pretxncommit hook). Leave the content alone on
1694 # the assumption that the user will use the same editor again.
1698 # the assumption that the user will use the same editor again.
1695 msgfn = self.savecommitmessage(cctx._text)
1699 msgfn = self.savecommitmessage(cctx._text)
1696
1700
1697 # commit subs and write new state
1701 # commit subs and write new state
1698 if subs:
1702 if subs:
1699 for s in sorted(commitsubs):
1703 for s in sorted(commitsubs):
1700 sub = wctx.sub(s)
1704 sub = wctx.sub(s)
1701 self.ui.status(_('committing subrepository %s\n') %
1705 self.ui.status(_('committing subrepository %s\n') %
1702 subrepo.subrelpath(sub))
1706 subrepo.subrelpath(sub))
1703 sr = sub.commit(cctx._text, user, date)
1707 sr = sub.commit(cctx._text, user, date)
1704 newstate[s] = (newstate[s][0], sr)
1708 newstate[s] = (newstate[s][0], sr)
1705 subrepo.writestate(self, newstate)
1709 subrepo.writestate(self, newstate)
1706
1710
1707 p1, p2 = self.dirstate.parents()
1711 p1, p2 = self.dirstate.parents()
1708 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1712 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1709 try:
1713 try:
1710 self.hook("precommit", throw=True, parent1=hookp1,
1714 self.hook("precommit", throw=True, parent1=hookp1,
1711 parent2=hookp2)
1715 parent2=hookp2)
1712 tr = self.transaction('commit')
1716 tr = self.transaction('commit')
1713 ret = self.commitctx(cctx, True)
1717 ret = self.commitctx(cctx, True)
1714 except: # re-raises
1718 except: # re-raises
1715 if edited:
1719 if edited:
1716 self.ui.write(
1720 self.ui.write(
1717 _('note: commit message saved in %s\n') % msgfn)
1721 _('note: commit message saved in %s\n') % msgfn)
1718 raise
1722 raise
1719 # update bookmarks, dirstate and mergestate
1723 # update bookmarks, dirstate and mergestate
1720 bookmarks.update(self, [p1, p2], ret)
1724 bookmarks.update(self, [p1, p2], ret)
1721 cctx.markcommitted(ret)
1725 cctx.markcommitted(ret)
1722 ms.reset()
1726 ms.reset()
1723 tr.close()
1727 tr.close()
1724
1728
1725 finally:
1729 finally:
1726 lockmod.release(tr, lock, wlock)
1730 lockmod.release(tr, lock, wlock)
1727
1731
1728 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1732 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1729 # hack for command that use a temporary commit (eg: histedit)
1733 # hack for command that use a temporary commit (eg: histedit)
1730 # temporary commit got stripped before hook release
1734 # temporary commit got stripped before hook release
1731 if self.changelog.hasnode(ret):
1735 if self.changelog.hasnode(ret):
1732 self.hook("commit", node=node, parent1=parent1,
1736 self.hook("commit", node=node, parent1=parent1,
1733 parent2=parent2)
1737 parent2=parent2)
1734 self._afterlock(commithook)
1738 self._afterlock(commithook)
1735 return ret
1739 return ret
1736
1740
1737 @unfilteredmethod
1741 @unfilteredmethod
1738 def commitctx(self, ctx, error=False):
1742 def commitctx(self, ctx, error=False):
1739 """Add a new revision to current repository.
1743 """Add a new revision to current repository.
1740 Revision information is passed via the context argument.
1744 Revision information is passed via the context argument.
1741 """
1745 """
1742
1746
1743 tr = None
1747 tr = None
1744 p1, p2 = ctx.p1(), ctx.p2()
1748 p1, p2 = ctx.p1(), ctx.p2()
1745 user = ctx.user()
1749 user = ctx.user()
1746
1750
1747 lock = self.lock()
1751 lock = self.lock()
1748 try:
1752 try:
1749 tr = self.transaction("commit")
1753 tr = self.transaction("commit")
1750 trp = weakref.proxy(tr)
1754 trp = weakref.proxy(tr)
1751
1755
1752 if ctx.manifestnode():
1756 if ctx.manifestnode():
1753 # reuse an existing manifest revision
1757 # reuse an existing manifest revision
1754 mn = ctx.manifestnode()
1758 mn = ctx.manifestnode()
1755 files = ctx.files()
1759 files = ctx.files()
1756 elif ctx.files():
1760 elif ctx.files():
1757 m1ctx = p1.manifestctx()
1761 m1ctx = p1.manifestctx()
1758 m2ctx = p2.manifestctx()
1762 m2ctx = p2.manifestctx()
1759 mctx = m1ctx.copy()
1763 mctx = m1ctx.copy()
1760
1764
1761 m = mctx.read()
1765 m = mctx.read()
1762 m1 = m1ctx.read()
1766 m1 = m1ctx.read()
1763 m2 = m2ctx.read()
1767 m2 = m2ctx.read()
1764
1768
1765 # check in files
1769 # check in files
1766 added = []
1770 added = []
1767 changed = []
1771 changed = []
1768 removed = list(ctx.removed())
1772 removed = list(ctx.removed())
1769 linkrev = len(self)
1773 linkrev = len(self)
1770 self.ui.note(_("committing files:\n"))
1774 self.ui.note(_("committing files:\n"))
1771 for f in sorted(ctx.modified() + ctx.added()):
1775 for f in sorted(ctx.modified() + ctx.added()):
1772 self.ui.note(f + "\n")
1776 self.ui.note(f + "\n")
1773 try:
1777 try:
1774 fctx = ctx[f]
1778 fctx = ctx[f]
1775 if fctx is None:
1779 if fctx is None:
1776 removed.append(f)
1780 removed.append(f)
1777 else:
1781 else:
1778 added.append(f)
1782 added.append(f)
1779 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1783 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1780 trp, changed)
1784 trp, changed)
1781 m.setflag(f, fctx.flags())
1785 m.setflag(f, fctx.flags())
1782 except OSError as inst:
1786 except OSError as inst:
1783 self.ui.warn(_("trouble committing %s!\n") % f)
1787 self.ui.warn(_("trouble committing %s!\n") % f)
1784 raise
1788 raise
1785 except IOError as inst:
1789 except IOError as inst:
1786 errcode = getattr(inst, 'errno', errno.ENOENT)
1790 errcode = getattr(inst, 'errno', errno.ENOENT)
1787 if error or errcode and errcode != errno.ENOENT:
1791 if error or errcode and errcode != errno.ENOENT:
1788 self.ui.warn(_("trouble committing %s!\n") % f)
1792 self.ui.warn(_("trouble committing %s!\n") % f)
1789 raise
1793 raise
1790
1794
1791 # update manifest
1795 # update manifest
1792 self.ui.note(_("committing manifest\n"))
1796 self.ui.note(_("committing manifest\n"))
1793 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1797 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1794 drop = [f for f in removed if f in m]
1798 drop = [f for f in removed if f in m]
1795 for f in drop:
1799 for f in drop:
1796 del m[f]
1800 del m[f]
1797 mn = mctx.write(trp, linkrev,
1801 mn = mctx.write(trp, linkrev,
1798 p1.manifestnode(), p2.manifestnode(),
1802 p1.manifestnode(), p2.manifestnode(),
1799 added, drop)
1803 added, drop)
1800 files = changed + removed
1804 files = changed + removed
1801 else:
1805 else:
1802 mn = p1.manifestnode()
1806 mn = p1.manifestnode()
1803 files = []
1807 files = []
1804
1808
1805 # update changelog
1809 # update changelog
1806 self.ui.note(_("committing changelog\n"))
1810 self.ui.note(_("committing changelog\n"))
1807 self.changelog.delayupdate(tr)
1811 self.changelog.delayupdate(tr)
1808 n = self.changelog.add(mn, files, ctx.description(),
1812 n = self.changelog.add(mn, files, ctx.description(),
1809 trp, p1.node(), p2.node(),
1813 trp, p1.node(), p2.node(),
1810 user, ctx.date(), ctx.extra().copy())
1814 user, ctx.date(), ctx.extra().copy())
1811 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1815 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1812 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1816 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1813 parent2=xp2)
1817 parent2=xp2)
1814 # set the new commit is proper phase
1818 # set the new commit is proper phase
1815 targetphase = subrepo.newcommitphase(self.ui, ctx)
1819 targetphase = subrepo.newcommitphase(self.ui, ctx)
1816 if targetphase:
1820 if targetphase:
1817 # retract boundary do not alter parent changeset.
1821 # retract boundary do not alter parent changeset.
1818 # if a parent have higher the resulting phase will
1822 # if a parent have higher the resulting phase will
1819 # be compliant anyway
1823 # be compliant anyway
1820 #
1824 #
1821 # if minimal phase was 0 we don't need to retract anything
1825 # if minimal phase was 0 we don't need to retract anything
1822 phases.retractboundary(self, tr, targetphase, [n])
1826 phases.retractboundary(self, tr, targetphase, [n])
1823 tr.close()
1827 tr.close()
1824 branchmap.updatecache(self.filtered('served'))
1828 branchmap.updatecache(self.filtered('served'))
1825 return n
1829 return n
1826 finally:
1830 finally:
1827 if tr:
1831 if tr:
1828 tr.release()
1832 tr.release()
1829 lock.release()
1833 lock.release()
1830
1834
1831 @unfilteredmethod
1835 @unfilteredmethod
1832 def destroying(self):
1836 def destroying(self):
1833 '''Inform the repository that nodes are about to be destroyed.
1837 '''Inform the repository that nodes are about to be destroyed.
1834 Intended for use by strip and rollback, so there's a common
1838 Intended for use by strip and rollback, so there's a common
1835 place for anything that has to be done before destroying history.
1839 place for anything that has to be done before destroying history.
1836
1840
1837 This is mostly useful for saving state that is in memory and waiting
1841 This is mostly useful for saving state that is in memory and waiting
1838 to be flushed when the current lock is released. Because a call to
1842 to be flushed when the current lock is released. Because a call to
1839 destroyed is imminent, the repo will be invalidated causing those
1843 destroyed is imminent, the repo will be invalidated causing those
1840 changes to stay in memory (waiting for the next unlock), or vanish
1844 changes to stay in memory (waiting for the next unlock), or vanish
1841 completely.
1845 completely.
1842 '''
1846 '''
1843 # When using the same lock to commit and strip, the phasecache is left
1847 # When using the same lock to commit and strip, the phasecache is left
1844 # dirty after committing. Then when we strip, the repo is invalidated,
1848 # dirty after committing. Then when we strip, the repo is invalidated,
1845 # causing those changes to disappear.
1849 # causing those changes to disappear.
1846 if '_phasecache' in vars(self):
1850 if '_phasecache' in vars(self):
1847 self._phasecache.write()
1851 self._phasecache.write()
1848
1852
1849 @unfilteredmethod
1853 @unfilteredmethod
1850 def destroyed(self):
1854 def destroyed(self):
1851 '''Inform the repository that nodes have been destroyed.
1855 '''Inform the repository that nodes have been destroyed.
1852 Intended for use by strip and rollback, so there's a common
1856 Intended for use by strip and rollback, so there's a common
1853 place for anything that has to be done after destroying history.
1857 place for anything that has to be done after destroying history.
1854 '''
1858 '''
1855 # When one tries to:
1859 # When one tries to:
1856 # 1) destroy nodes thus calling this method (e.g. strip)
1860 # 1) destroy nodes thus calling this method (e.g. strip)
1857 # 2) use phasecache somewhere (e.g. commit)
1861 # 2) use phasecache somewhere (e.g. commit)
1858 #
1862 #
1859 # then 2) will fail because the phasecache contains nodes that were
1863 # then 2) will fail because the phasecache contains nodes that were
1860 # removed. We can either remove phasecache from the filecache,
1864 # removed. We can either remove phasecache from the filecache,
1861 # causing it to reload next time it is accessed, or simply filter
1865 # causing it to reload next time it is accessed, or simply filter
1862 # the removed nodes now and write the updated cache.
1866 # the removed nodes now and write the updated cache.
1863 self._phasecache.filterunknown(self)
1867 self._phasecache.filterunknown(self)
1864 self._phasecache.write()
1868 self._phasecache.write()
1865
1869
1866 # update the 'served' branch cache to help read only server process
1870 # update the 'served' branch cache to help read only server process
1867 # Thanks to branchcache collaboration this is done from the nearest
1871 # Thanks to branchcache collaboration this is done from the nearest
1868 # filtered subset and it is expected to be fast.
1872 # filtered subset and it is expected to be fast.
1869 branchmap.updatecache(self.filtered('served'))
1873 branchmap.updatecache(self.filtered('served'))
1870
1874
1871 # Ensure the persistent tag cache is updated. Doing it now
1875 # Ensure the persistent tag cache is updated. Doing it now
1872 # means that the tag cache only has to worry about destroyed
1876 # means that the tag cache only has to worry about destroyed
1873 # heads immediately after a strip/rollback. That in turn
1877 # heads immediately after a strip/rollback. That in turn
1874 # guarantees that "cachetip == currenttip" (comparing both rev
1878 # guarantees that "cachetip == currenttip" (comparing both rev
1875 # and node) always means no nodes have been added or destroyed.
1879 # and node) always means no nodes have been added or destroyed.
1876
1880
1877 # XXX this is suboptimal when qrefresh'ing: we strip the current
1881 # XXX this is suboptimal when qrefresh'ing: we strip the current
1878 # head, refresh the tag cache, then immediately add a new head.
1882 # head, refresh the tag cache, then immediately add a new head.
1879 # But I think doing it this way is necessary for the "instant
1883 # But I think doing it this way is necessary for the "instant
1880 # tag cache retrieval" case to work.
1884 # tag cache retrieval" case to work.
1881 self.invalidate()
1885 self.invalidate()
1882
1886
1883 def walk(self, match, node=None):
1887 def walk(self, match, node=None):
1884 '''
1888 '''
1885 walk recursively through the directory tree or a given
1889 walk recursively through the directory tree or a given
1886 changeset, finding all files matched by the match
1890 changeset, finding all files matched by the match
1887 function
1891 function
1888 '''
1892 '''
1889 return self[node].walk(match)
1893 return self[node].walk(match)
1890
1894
1891 def status(self, node1='.', node2=None, match=None,
1895 def status(self, node1='.', node2=None, match=None,
1892 ignored=False, clean=False, unknown=False,
1896 ignored=False, clean=False, unknown=False,
1893 listsubrepos=False):
1897 listsubrepos=False):
1894 '''a convenience method that calls node1.status(node2)'''
1898 '''a convenience method that calls node1.status(node2)'''
1895 return self[node1].status(node2, match, ignored, clean, unknown,
1899 return self[node1].status(node2, match, ignored, clean, unknown,
1896 listsubrepos)
1900 listsubrepos)
1897
1901
1898 def heads(self, start=None):
1902 def heads(self, start=None):
1899 if start is None:
1903 if start is None:
1900 cl = self.changelog
1904 cl = self.changelog
1901 headrevs = reversed(cl.headrevs())
1905 headrevs = reversed(cl.headrevs())
1902 return [cl.node(rev) for rev in headrevs]
1906 return [cl.node(rev) for rev in headrevs]
1903
1907
1904 heads = self.changelog.heads(start)
1908 heads = self.changelog.heads(start)
1905 # sort the output in rev descending order
1909 # sort the output in rev descending order
1906 return sorted(heads, key=self.changelog.rev, reverse=True)
1910 return sorted(heads, key=self.changelog.rev, reverse=True)
1907
1911
1908 def branchheads(self, branch=None, start=None, closed=False):
1912 def branchheads(self, branch=None, start=None, closed=False):
1909 '''return a (possibly filtered) list of heads for the given branch
1913 '''return a (possibly filtered) list of heads for the given branch
1910
1914
1911 Heads are returned in topological order, from newest to oldest.
1915 Heads are returned in topological order, from newest to oldest.
1912 If branch is None, use the dirstate branch.
1916 If branch is None, use the dirstate branch.
1913 If start is not None, return only heads reachable from start.
1917 If start is not None, return only heads reachable from start.
1914 If closed is True, return heads that are marked as closed as well.
1918 If closed is True, return heads that are marked as closed as well.
1915 '''
1919 '''
1916 if branch is None:
1920 if branch is None:
1917 branch = self[None].branch()
1921 branch = self[None].branch()
1918 branches = self.branchmap()
1922 branches = self.branchmap()
1919 if branch not in branches:
1923 if branch not in branches:
1920 return []
1924 return []
1921 # the cache returns heads ordered lowest to highest
1925 # the cache returns heads ordered lowest to highest
1922 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1926 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1923 if start is not None:
1927 if start is not None:
1924 # filter out the heads that cannot be reached from startrev
1928 # filter out the heads that cannot be reached from startrev
1925 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1929 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1926 bheads = [h for h in bheads if h in fbheads]
1930 bheads = [h for h in bheads if h in fbheads]
1927 return bheads
1931 return bheads
1928
1932
1929 def branches(self, nodes):
1933 def branches(self, nodes):
1930 if not nodes:
1934 if not nodes:
1931 nodes = [self.changelog.tip()]
1935 nodes = [self.changelog.tip()]
1932 b = []
1936 b = []
1933 for n in nodes:
1937 for n in nodes:
1934 t = n
1938 t = n
1935 while True:
1939 while True:
1936 p = self.changelog.parents(n)
1940 p = self.changelog.parents(n)
1937 if p[1] != nullid or p[0] == nullid:
1941 if p[1] != nullid or p[0] == nullid:
1938 b.append((t, n, p[0], p[1]))
1942 b.append((t, n, p[0], p[1]))
1939 break
1943 break
1940 n = p[0]
1944 n = p[0]
1941 return b
1945 return b
1942
1946
1943 def between(self, pairs):
1947 def between(self, pairs):
1944 r = []
1948 r = []
1945
1949
1946 for top, bottom in pairs:
1950 for top, bottom in pairs:
1947 n, l, i = top, [], 0
1951 n, l, i = top, [], 0
1948 f = 1
1952 f = 1
1949
1953
1950 while n != bottom and n != nullid:
1954 while n != bottom and n != nullid:
1951 p = self.changelog.parents(n)[0]
1955 p = self.changelog.parents(n)[0]
1952 if i == f:
1956 if i == f:
1953 l.append(n)
1957 l.append(n)
1954 f = f * 2
1958 f = f * 2
1955 n = p
1959 n = p
1956 i += 1
1960 i += 1
1957
1961
1958 r.append(l)
1962 r.append(l)
1959
1963
1960 return r
1964 return r
1961
1965
1962 def checkpush(self, pushop):
1966 def checkpush(self, pushop):
1963 """Extensions can override this function if additional checks have
1967 """Extensions can override this function if additional checks have
1964 to be performed before pushing, or call it if they override push
1968 to be performed before pushing, or call it if they override push
1965 command.
1969 command.
1966 """
1970 """
1967 pass
1971 pass
1968
1972
1969 @unfilteredpropertycache
1973 @unfilteredpropertycache
1970 def prepushoutgoinghooks(self):
1974 def prepushoutgoinghooks(self):
1971 """Return util.hooks consists of a pushop with repo, remote, outgoing
1975 """Return util.hooks consists of a pushop with repo, remote, outgoing
1972 methods, which are called before pushing changesets.
1976 methods, which are called before pushing changesets.
1973 """
1977 """
1974 return util.hooks()
1978 return util.hooks()
1975
1979
1976 def pushkey(self, namespace, key, old, new):
1980 def pushkey(self, namespace, key, old, new):
1977 try:
1981 try:
1978 tr = self.currenttransaction()
1982 tr = self.currenttransaction()
1979 hookargs = {}
1983 hookargs = {}
1980 if tr is not None:
1984 if tr is not None:
1981 hookargs.update(tr.hookargs)
1985 hookargs.update(tr.hookargs)
1982 hookargs['namespace'] = namespace
1986 hookargs['namespace'] = namespace
1983 hookargs['key'] = key
1987 hookargs['key'] = key
1984 hookargs['old'] = old
1988 hookargs['old'] = old
1985 hookargs['new'] = new
1989 hookargs['new'] = new
1986 self.hook('prepushkey', throw=True, **hookargs)
1990 self.hook('prepushkey', throw=True, **hookargs)
1987 except error.HookAbort as exc:
1991 except error.HookAbort as exc:
1988 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1992 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1989 if exc.hint:
1993 if exc.hint:
1990 self.ui.write_err(_("(%s)\n") % exc.hint)
1994 self.ui.write_err(_("(%s)\n") % exc.hint)
1991 return False
1995 return False
1992 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1996 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1993 ret = pushkey.push(self, namespace, key, old, new)
1997 ret = pushkey.push(self, namespace, key, old, new)
1994 def runhook():
1998 def runhook():
1995 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1999 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1996 ret=ret)
2000 ret=ret)
1997 self._afterlock(runhook)
2001 self._afterlock(runhook)
1998 return ret
2002 return ret
1999
2003
2000 def listkeys(self, namespace):
2004 def listkeys(self, namespace):
2001 self.hook('prelistkeys', throw=True, namespace=namespace)
2005 self.hook('prelistkeys', throw=True, namespace=namespace)
2002 self.ui.debug('listing keys for "%s"\n' % namespace)
2006 self.ui.debug('listing keys for "%s"\n' % namespace)
2003 values = pushkey.list(self, namespace)
2007 values = pushkey.list(self, namespace)
2004 self.hook('listkeys', namespace=namespace, values=values)
2008 self.hook('listkeys', namespace=namespace, values=values)
2005 return values
2009 return values
2006
2010
2007 def debugwireargs(self, one, two, three=None, four=None, five=None):
2011 def debugwireargs(self, one, two, three=None, four=None, five=None):
2008 '''used to test argument passing over the wire'''
2012 '''used to test argument passing over the wire'''
2009 return "%s %s %s %s %s" % (one, two, three, four, five)
2013 return "%s %s %s %s %s" % (one, two, three, four, five)
2010
2014
2011 def savecommitmessage(self, text):
2015 def savecommitmessage(self, text):
2012 fp = self.vfs('last-message.txt', 'wb')
2016 fp = self.vfs('last-message.txt', 'wb')
2013 try:
2017 try:
2014 fp.write(text)
2018 fp.write(text)
2015 finally:
2019 finally:
2016 fp.close()
2020 fp.close()
2017 return self.pathto(fp.name[len(self.root) + 1:])
2021 return self.pathto(fp.name[len(self.root) + 1:])
2018
2022
2019 # used to avoid circular references so destructors work
2023 # used to avoid circular references so destructors work
2020 def aftertrans(files):
2024 def aftertrans(files):
2021 renamefiles = [tuple(t) for t in files]
2025 renamefiles = [tuple(t) for t in files]
2022 def a():
2026 def a():
2023 for vfs, src, dest in renamefiles:
2027 for vfs, src, dest in renamefiles:
2024 try:
2028 try:
2025 # if src and dest refer to a same file, vfs.rename is a no-op,
2029 # if src and dest refer to a same file, vfs.rename is a no-op,
2026 # leaving both src and dest on disk. delete dest to make sure
2030 # leaving both src and dest on disk. delete dest to make sure
2027 # the rename couldn't be such a no-op.
2031 # the rename couldn't be such a no-op.
2028 vfs.unlink(dest)
2032 vfs.unlink(dest)
2029 except OSError as ex:
2033 except OSError as ex:
2030 if ex.errno != errno.ENOENT:
2034 if ex.errno != errno.ENOENT:
2031 raise
2035 raise
2032 try:
2036 try:
2033 vfs.rename(src, dest)
2037 vfs.rename(src, dest)
2034 except OSError: # journal file does not yet exist
2038 except OSError: # journal file does not yet exist
2035 pass
2039 pass
2036 return a
2040 return a
2037
2041
2038 def undoname(fn):
2042 def undoname(fn):
2039 base, name = os.path.split(fn)
2043 base, name = os.path.split(fn)
2040 assert name.startswith('journal')
2044 assert name.startswith('journal')
2041 return os.path.join(base, name.replace('journal', 'undo', 1))
2045 return os.path.join(base, name.replace('journal', 'undo', 1))
2042
2046
2043 def instance(ui, path, create):
2047 def instance(ui, path, create):
2044 return localrepository(ui, util.urllocalpath(path), create)
2048 return localrepository(ui, util.urllocalpath(path), create)
2045
2049
2046 def islocal(path):
2050 def islocal(path):
2047 return True
2051 return True
2048
2052
2049 def newreporequirements(repo):
2053 def newreporequirements(repo):
2050 """Determine the set of requirements for a new local repository.
2054 """Determine the set of requirements for a new local repository.
2051
2055
2052 Extensions can wrap this function to specify custom requirements for
2056 Extensions can wrap this function to specify custom requirements for
2053 new repositories.
2057 new repositories.
2054 """
2058 """
2055 ui = repo.ui
2059 ui = repo.ui
2056 requirements = set(['revlogv1'])
2060 requirements = set(['revlogv1'])
2057 if ui.configbool('format', 'usestore', True):
2061 if ui.configbool('format', 'usestore', True):
2058 requirements.add('store')
2062 requirements.add('store')
2059 if ui.configbool('format', 'usefncache', True):
2063 if ui.configbool('format', 'usefncache', True):
2060 requirements.add('fncache')
2064 requirements.add('fncache')
2061 if ui.configbool('format', 'dotencode', True):
2065 if ui.configbool('format', 'dotencode', True):
2062 requirements.add('dotencode')
2066 requirements.add('dotencode')
2063
2067
2064 compengine = ui.config('experimental', 'format.compression', 'zlib')
2068 compengine = ui.config('experimental', 'format.compression', 'zlib')
2065 if compengine not in util.compengines:
2069 if compengine not in util.compengines:
2066 raise error.Abort(_('compression engine %s defined by '
2070 raise error.Abort(_('compression engine %s defined by '
2067 'experimental.format.compression not available') %
2071 'experimental.format.compression not available') %
2068 compengine,
2072 compengine,
2069 hint=_('run "hg debuginstall" to list available '
2073 hint=_('run "hg debuginstall" to list available '
2070 'compression engines'))
2074 'compression engines'))
2071
2075
2072 # zlib is the historical default and doesn't need an explicit requirement.
2076 # zlib is the historical default and doesn't need an explicit requirement.
2073 if compengine != 'zlib':
2077 if compengine != 'zlib':
2074 requirements.add('exp-compression-%s' % compengine)
2078 requirements.add('exp-compression-%s' % compengine)
2075
2079
2076 if scmutil.gdinitconfig(ui):
2080 if scmutil.gdinitconfig(ui):
2077 requirements.add('generaldelta')
2081 requirements.add('generaldelta')
2078 if ui.configbool('experimental', 'treemanifest', False):
2082 if ui.configbool('experimental', 'treemanifest', False):
2079 requirements.add('treemanifest')
2083 requirements.add('treemanifest')
2080 if ui.configbool('experimental', 'manifestv2', False):
2084 if ui.configbool('experimental', 'manifestv2', False):
2081 requirements.add('manifestv2')
2085 requirements.add('manifestv2')
2082
2086
2083 return requirements
2087 return requirements
General Comments 0
You need to be logged in to leave comments. Login now