##// END OF EJS Templates
scmutil: improve documentation of revset APIs...
Gregory Szorc -
r29417:526b027b default
parent child Browse files
Show More
@@ -1,1972 +1,1978 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 cmdutil,
31 cmdutil,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 namespaces,
44 namespaces,
45 obsolete,
45 obsolete,
46 pathutil,
46 pathutil,
47 peer,
47 peer,
48 phases,
48 phases,
49 pushkey,
49 pushkey,
50 repoview,
50 repoview,
51 revset,
51 revset,
52 scmutil,
52 scmutil,
53 store,
53 store,
54 subrepo,
54 subrepo,
55 tags as tagsmod,
55 tags as tagsmod,
56 transaction,
56 transaction,
57 util,
57 util,
58 )
58 )
59
59
60 release = lockmod.release
60 release = lockmod.release
61 urlerr = util.urlerr
61 urlerr = util.urlerr
62 urlreq = util.urlreq
62 urlreq = util.urlreq
63
63
64 class repofilecache(scmutil.filecache):
64 class repofilecache(scmutil.filecache):
65 """All filecache usage on repo are done for logic that should be unfiltered
65 """All filecache usage on repo are done for logic that should be unfiltered
66 """
66 """
67
67
68 def __get__(self, repo, type=None):
68 def __get__(self, repo, type=None):
69 if repo is None:
69 if repo is None:
70 return self
70 return self
71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 def __set__(self, repo, value):
72 def __set__(self, repo, value):
73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 def __delete__(self, repo):
74 def __delete__(self, repo):
75 return super(repofilecache, self).__delete__(repo.unfiltered())
75 return super(repofilecache, self).__delete__(repo.unfiltered())
76
76
77 class storecache(repofilecache):
77 class storecache(repofilecache):
78 """filecache for files in the store"""
78 """filecache for files in the store"""
79 def join(self, obj, fname):
79 def join(self, obj, fname):
80 return obj.sjoin(fname)
80 return obj.sjoin(fname)
81
81
82 class unfilteredpropertycache(util.propertycache):
82 class unfilteredpropertycache(util.propertycache):
83 """propertycache that apply to unfiltered repo only"""
83 """propertycache that apply to unfiltered repo only"""
84
84
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 unfi = repo.unfiltered()
86 unfi = repo.unfiltered()
87 if unfi is repo:
87 if unfi is repo:
88 return super(unfilteredpropertycache, self).__get__(unfi)
88 return super(unfilteredpropertycache, self).__get__(unfi)
89 return getattr(unfi, self.name)
89 return getattr(unfi, self.name)
90
90
91 class filteredpropertycache(util.propertycache):
91 class filteredpropertycache(util.propertycache):
92 """propertycache that must take filtering in account"""
92 """propertycache that must take filtering in account"""
93
93
94 def cachevalue(self, obj, value):
94 def cachevalue(self, obj, value):
95 object.__setattr__(obj, self.name, value)
95 object.__setattr__(obj, self.name, value)
96
96
97
97
98 def hasunfilteredcache(repo, name):
98 def hasunfilteredcache(repo, name):
99 """check if a repo has an unfilteredpropertycache value for <name>"""
99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 return name in vars(repo.unfiltered())
100 return name in vars(repo.unfiltered())
101
101
102 def unfilteredmethod(orig):
102 def unfilteredmethod(orig):
103 """decorate method that always need to be run on unfiltered version"""
103 """decorate method that always need to be run on unfiltered version"""
104 def wrapper(repo, *args, **kwargs):
104 def wrapper(repo, *args, **kwargs):
105 return orig(repo.unfiltered(), *args, **kwargs)
105 return orig(repo.unfiltered(), *args, **kwargs)
106 return wrapper
106 return wrapper
107
107
108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 'unbundle'))
109 'unbundle'))
110 legacycaps = moderncaps.union(set(['changegroupsubset']))
110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111
111
112 class localpeer(peer.peerrepository):
112 class localpeer(peer.peerrepository):
113 '''peer for a local repo; reflects only the most recent API'''
113 '''peer for a local repo; reflects only the most recent API'''
114
114
115 def __init__(self, repo, caps=moderncaps):
115 def __init__(self, repo, caps=moderncaps):
116 peer.peerrepository.__init__(self)
116 peer.peerrepository.__init__(self)
117 self._repo = repo.filtered('served')
117 self._repo = repo.filtered('served')
118 self.ui = repo.ui
118 self.ui = repo.ui
119 self._caps = repo._restrictcapabilities(caps)
119 self._caps = repo._restrictcapabilities(caps)
120 self.requirements = repo.requirements
120 self.requirements = repo.requirements
121 self.supportedformats = repo.supportedformats
121 self.supportedformats = repo.supportedformats
122
122
123 def close(self):
123 def close(self):
124 self._repo.close()
124 self._repo.close()
125
125
126 def _capabilities(self):
126 def _capabilities(self):
127 return self._caps
127 return self._caps
128
128
129 def local(self):
129 def local(self):
130 return self._repo
130 return self._repo
131
131
132 def canpush(self):
132 def canpush(self):
133 return True
133 return True
134
134
135 def url(self):
135 def url(self):
136 return self._repo.url()
136 return self._repo.url()
137
137
138 def lookup(self, key):
138 def lookup(self, key):
139 return self._repo.lookup(key)
139 return self._repo.lookup(key)
140
140
141 def branchmap(self):
141 def branchmap(self):
142 return self._repo.branchmap()
142 return self._repo.branchmap()
143
143
144 def heads(self):
144 def heads(self):
145 return self._repo.heads()
145 return self._repo.heads()
146
146
147 def known(self, nodes):
147 def known(self, nodes):
148 return self._repo.known(nodes)
148 return self._repo.known(nodes)
149
149
150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 **kwargs):
151 **kwargs):
152 cg = exchange.getbundle(self._repo, source, heads=heads,
152 cg = exchange.getbundle(self._repo, source, heads=heads,
153 common=common, bundlecaps=bundlecaps, **kwargs)
153 common=common, bundlecaps=bundlecaps, **kwargs)
154 if bundlecaps is not None and 'HG20' in bundlecaps:
154 if bundlecaps is not None and 'HG20' in bundlecaps:
155 # When requesting a bundle2, getbundle returns a stream to make the
155 # When requesting a bundle2, getbundle returns a stream to make the
156 # wire level function happier. We need to build a proper object
156 # wire level function happier. We need to build a proper object
157 # from it in local peer.
157 # from it in local peer.
158 cg = bundle2.getunbundler(self.ui, cg)
158 cg = bundle2.getunbundler(self.ui, cg)
159 return cg
159 return cg
160
160
161 # TODO We might want to move the next two calls into legacypeer and add
161 # TODO We might want to move the next two calls into legacypeer and add
162 # unbundle instead.
162 # unbundle instead.
163
163
164 def unbundle(self, cg, heads, url):
164 def unbundle(self, cg, heads, url):
165 """apply a bundle on a repo
165 """apply a bundle on a repo
166
166
167 This function handles the repo locking itself."""
167 This function handles the repo locking itself."""
168 try:
168 try:
169 try:
169 try:
170 cg = exchange.readbundle(self.ui, cg, None)
170 cg = exchange.readbundle(self.ui, cg, None)
171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
172 if util.safehasattr(ret, 'getchunks'):
172 if util.safehasattr(ret, 'getchunks'):
173 # This is a bundle20 object, turn it into an unbundler.
173 # This is a bundle20 object, turn it into an unbundler.
174 # This little dance should be dropped eventually when the
174 # This little dance should be dropped eventually when the
175 # API is finally improved.
175 # API is finally improved.
176 stream = util.chunkbuffer(ret.getchunks())
176 stream = util.chunkbuffer(ret.getchunks())
177 ret = bundle2.getunbundler(self.ui, stream)
177 ret = bundle2.getunbundler(self.ui, stream)
178 return ret
178 return ret
179 except Exception as exc:
179 except Exception as exc:
180 # If the exception contains output salvaged from a bundle2
180 # If the exception contains output salvaged from a bundle2
181 # reply, we need to make sure it is printed before continuing
181 # reply, we need to make sure it is printed before continuing
182 # to fail. So we build a bundle2 with such output and consume
182 # to fail. So we build a bundle2 with such output and consume
183 # it directly.
183 # it directly.
184 #
184 #
185 # This is not very elegant but allows a "simple" solution for
185 # This is not very elegant but allows a "simple" solution for
186 # issue4594
186 # issue4594
187 output = getattr(exc, '_bundle2salvagedoutput', ())
187 output = getattr(exc, '_bundle2salvagedoutput', ())
188 if output:
188 if output:
189 bundler = bundle2.bundle20(self._repo.ui)
189 bundler = bundle2.bundle20(self._repo.ui)
190 for out in output:
190 for out in output:
191 bundler.addpart(out)
191 bundler.addpart(out)
192 stream = util.chunkbuffer(bundler.getchunks())
192 stream = util.chunkbuffer(bundler.getchunks())
193 b = bundle2.getunbundler(self.ui, stream)
193 b = bundle2.getunbundler(self.ui, stream)
194 bundle2.processbundle(self._repo, b)
194 bundle2.processbundle(self._repo, b)
195 raise
195 raise
196 except error.PushRaced as exc:
196 except error.PushRaced as exc:
197 raise error.ResponseError(_('push failed:'), str(exc))
197 raise error.ResponseError(_('push failed:'), str(exc))
198
198
199 def lock(self):
199 def lock(self):
200 return self._repo.lock()
200 return self._repo.lock()
201
201
202 def addchangegroup(self, cg, source, url):
202 def addchangegroup(self, cg, source, url):
203 return cg.apply(self._repo, source, url)
203 return cg.apply(self._repo, source, url)
204
204
205 def pushkey(self, namespace, key, old, new):
205 def pushkey(self, namespace, key, old, new):
206 return self._repo.pushkey(namespace, key, old, new)
206 return self._repo.pushkey(namespace, key, old, new)
207
207
208 def listkeys(self, namespace):
208 def listkeys(self, namespace):
209 return self._repo.listkeys(namespace)
209 return self._repo.listkeys(namespace)
210
210
211 def debugwireargs(self, one, two, three=None, four=None, five=None):
211 def debugwireargs(self, one, two, three=None, four=None, five=None):
212 '''used to test argument passing over the wire'''
212 '''used to test argument passing over the wire'''
213 return "%s %s %s %s %s" % (one, two, three, four, five)
213 return "%s %s %s %s %s" % (one, two, three, four, five)
214
214
215 class locallegacypeer(localpeer):
215 class locallegacypeer(localpeer):
216 '''peer extension which implements legacy methods too; used for tests with
216 '''peer extension which implements legacy methods too; used for tests with
217 restricted capabilities'''
217 restricted capabilities'''
218
218
219 def __init__(self, repo):
219 def __init__(self, repo):
220 localpeer.__init__(self, repo, caps=legacycaps)
220 localpeer.__init__(self, repo, caps=legacycaps)
221
221
222 def branches(self, nodes):
222 def branches(self, nodes):
223 return self._repo.branches(nodes)
223 return self._repo.branches(nodes)
224
224
225 def between(self, pairs):
225 def between(self, pairs):
226 return self._repo.between(pairs)
226 return self._repo.between(pairs)
227
227
228 def changegroup(self, basenodes, source):
228 def changegroup(self, basenodes, source):
229 return changegroup.changegroup(self._repo, basenodes, source)
229 return changegroup.changegroup(self._repo, basenodes, source)
230
230
231 def changegroupsubset(self, bases, heads, source):
231 def changegroupsubset(self, bases, heads, source):
232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
233
233
234 class localrepository(object):
234 class localrepository(object):
235
235
236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
237 'manifestv2'))
237 'manifestv2'))
238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
239 'dotencode'))
239 'dotencode'))
240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
241 filtername = None
241 filtername = None
242
242
243 # a list of (ui, featureset) functions.
243 # a list of (ui, featureset) functions.
244 # only functions defined in module of enabled extensions are invoked
244 # only functions defined in module of enabled extensions are invoked
245 featuresetupfuncs = set()
245 featuresetupfuncs = set()
246
246
247 def __init__(self, baseui, path=None, create=False):
247 def __init__(self, baseui, path=None, create=False):
248 self.requirements = set()
248 self.requirements = set()
249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
250 self.wopener = self.wvfs
250 self.wopener = self.wvfs
251 self.root = self.wvfs.base
251 self.root = self.wvfs.base
252 self.path = self.wvfs.join(".hg")
252 self.path = self.wvfs.join(".hg")
253 self.origroot = path
253 self.origroot = path
254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
256 realfs=False)
256 realfs=False)
257 self.vfs = scmutil.vfs(self.path)
257 self.vfs = scmutil.vfs(self.path)
258 self.opener = self.vfs
258 self.opener = self.vfs
259 self.baseui = baseui
259 self.baseui = baseui
260 self.ui = baseui.copy()
260 self.ui = baseui.copy()
261 self.ui.copy = baseui.copy # prevent copying repo configuration
261 self.ui.copy = baseui.copy # prevent copying repo configuration
262 # A list of callback to shape the phase if no data were found.
262 # A list of callback to shape the phase if no data were found.
263 # Callback are in the form: func(repo, roots) --> processed root.
263 # Callback are in the form: func(repo, roots) --> processed root.
264 # This list it to be filled by extension during repo setup
264 # This list it to be filled by extension during repo setup
265 self._phasedefaults = []
265 self._phasedefaults = []
266 try:
266 try:
267 self.ui.readconfig(self.join("hgrc"), self.root)
267 self.ui.readconfig(self.join("hgrc"), self.root)
268 extensions.loadall(self.ui)
268 extensions.loadall(self.ui)
269 except IOError:
269 except IOError:
270 pass
270 pass
271
271
272 if self.featuresetupfuncs:
272 if self.featuresetupfuncs:
273 self.supported = set(self._basesupported) # use private copy
273 self.supported = set(self._basesupported) # use private copy
274 extmods = set(m.__name__ for n, m
274 extmods = set(m.__name__ for n, m
275 in extensions.extensions(self.ui))
275 in extensions.extensions(self.ui))
276 for setupfunc in self.featuresetupfuncs:
276 for setupfunc in self.featuresetupfuncs:
277 if setupfunc.__module__ in extmods:
277 if setupfunc.__module__ in extmods:
278 setupfunc(self.ui, self.supported)
278 setupfunc(self.ui, self.supported)
279 else:
279 else:
280 self.supported = self._basesupported
280 self.supported = self._basesupported
281
281
282 if not self.vfs.isdir():
282 if not self.vfs.isdir():
283 if create:
283 if create:
284 self.requirements = newreporequirements(self)
284 self.requirements = newreporequirements(self)
285
285
286 if not self.wvfs.exists():
286 if not self.wvfs.exists():
287 self.wvfs.makedirs()
287 self.wvfs.makedirs()
288 self.vfs.makedir(notindexed=True)
288 self.vfs.makedir(notindexed=True)
289
289
290 if 'store' in self.requirements:
290 if 'store' in self.requirements:
291 self.vfs.mkdir("store")
291 self.vfs.mkdir("store")
292
292
293 # create an invalid changelog
293 # create an invalid changelog
294 self.vfs.append(
294 self.vfs.append(
295 "00changelog.i",
295 "00changelog.i",
296 '\0\0\0\2' # represents revlogv2
296 '\0\0\0\2' # represents revlogv2
297 ' dummy changelog to prevent using the old repo layout'
297 ' dummy changelog to prevent using the old repo layout'
298 )
298 )
299 else:
299 else:
300 raise error.RepoError(_("repository %s not found") % path)
300 raise error.RepoError(_("repository %s not found") % path)
301 elif create:
301 elif create:
302 raise error.RepoError(_("repository %s already exists") % path)
302 raise error.RepoError(_("repository %s already exists") % path)
303 else:
303 else:
304 try:
304 try:
305 self.requirements = scmutil.readrequires(
305 self.requirements = scmutil.readrequires(
306 self.vfs, self.supported)
306 self.vfs, self.supported)
307 except IOError as inst:
307 except IOError as inst:
308 if inst.errno != errno.ENOENT:
308 if inst.errno != errno.ENOENT:
309 raise
309 raise
310
310
311 self.sharedpath = self.path
311 self.sharedpath = self.path
312 try:
312 try:
313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
314 realpath=True)
314 realpath=True)
315 s = vfs.base
315 s = vfs.base
316 if not vfs.exists():
316 if not vfs.exists():
317 raise error.RepoError(
317 raise error.RepoError(
318 _('.hg/sharedpath points to nonexistent directory %s') % s)
318 _('.hg/sharedpath points to nonexistent directory %s') % s)
319 self.sharedpath = s
319 self.sharedpath = s
320 except IOError as inst:
320 except IOError as inst:
321 if inst.errno != errno.ENOENT:
321 if inst.errno != errno.ENOENT:
322 raise
322 raise
323
323
324 self.store = store.store(
324 self.store = store.store(
325 self.requirements, self.sharedpath, scmutil.vfs)
325 self.requirements, self.sharedpath, scmutil.vfs)
326 self.spath = self.store.path
326 self.spath = self.store.path
327 self.svfs = self.store.vfs
327 self.svfs = self.store.vfs
328 self.sjoin = self.store.join
328 self.sjoin = self.store.join
329 self.vfs.createmode = self.store.createmode
329 self.vfs.createmode = self.store.createmode
330 self._applyopenerreqs()
330 self._applyopenerreqs()
331 if create:
331 if create:
332 self._writerequirements()
332 self._writerequirements()
333
333
334 self._dirstatevalidatewarned = False
334 self._dirstatevalidatewarned = False
335
335
336 self._branchcaches = {}
336 self._branchcaches = {}
337 self._revbranchcache = None
337 self._revbranchcache = None
338 self.filterpats = {}
338 self.filterpats = {}
339 self._datafilters = {}
339 self._datafilters = {}
340 self._transref = self._lockref = self._wlockref = None
340 self._transref = self._lockref = self._wlockref = None
341
341
342 # A cache for various files under .hg/ that tracks file changes,
342 # A cache for various files under .hg/ that tracks file changes,
343 # (used by the filecache decorator)
343 # (used by the filecache decorator)
344 #
344 #
345 # Maps a property name to its util.filecacheentry
345 # Maps a property name to its util.filecacheentry
346 self._filecache = {}
346 self._filecache = {}
347
347
348 # hold sets of revision to be filtered
348 # hold sets of revision to be filtered
349 # should be cleared when something might have changed the filter value:
349 # should be cleared when something might have changed the filter value:
350 # - new changesets,
350 # - new changesets,
351 # - phase change,
351 # - phase change,
352 # - new obsolescence marker,
352 # - new obsolescence marker,
353 # - working directory parent change,
353 # - working directory parent change,
354 # - bookmark changes
354 # - bookmark changes
355 self.filteredrevcache = {}
355 self.filteredrevcache = {}
356
356
357 # generic mapping between names and nodes
357 # generic mapping between names and nodes
358 self.names = namespaces.namespaces()
358 self.names = namespaces.namespaces()
359
359
360 def close(self):
360 def close(self):
361 self._writecaches()
361 self._writecaches()
362
362
363 def _writecaches(self):
363 def _writecaches(self):
364 if self._revbranchcache:
364 if self._revbranchcache:
365 self._revbranchcache.write()
365 self._revbranchcache.write()
366
366
367 def _restrictcapabilities(self, caps):
367 def _restrictcapabilities(self, caps):
368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
369 caps = set(caps)
369 caps = set(caps)
370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
371 caps.add('bundle2=' + urlreq.quote(capsblob))
371 caps.add('bundle2=' + urlreq.quote(capsblob))
372 return caps
372 return caps
373
373
374 def _applyopenerreqs(self):
374 def _applyopenerreqs(self):
375 self.svfs.options = dict((r, 1) for r in self.requirements
375 self.svfs.options = dict((r, 1) for r in self.requirements
376 if r in self.openerreqs)
376 if r in self.openerreqs)
377 # experimental config: format.chunkcachesize
377 # experimental config: format.chunkcachesize
378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
379 if chunkcachesize is not None:
379 if chunkcachesize is not None:
380 self.svfs.options['chunkcachesize'] = chunkcachesize
380 self.svfs.options['chunkcachesize'] = chunkcachesize
381 # experimental config: format.maxchainlen
381 # experimental config: format.maxchainlen
382 maxchainlen = self.ui.configint('format', 'maxchainlen')
382 maxchainlen = self.ui.configint('format', 'maxchainlen')
383 if maxchainlen is not None:
383 if maxchainlen is not None:
384 self.svfs.options['maxchainlen'] = maxchainlen
384 self.svfs.options['maxchainlen'] = maxchainlen
385 # experimental config: format.manifestcachesize
385 # experimental config: format.manifestcachesize
386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
387 if manifestcachesize is not None:
387 if manifestcachesize is not None:
388 self.svfs.options['manifestcachesize'] = manifestcachesize
388 self.svfs.options['manifestcachesize'] = manifestcachesize
389 # experimental config: format.aggressivemergedeltas
389 # experimental config: format.aggressivemergedeltas
390 aggressivemergedeltas = self.ui.configbool('format',
390 aggressivemergedeltas = self.ui.configbool('format',
391 'aggressivemergedeltas', False)
391 'aggressivemergedeltas', False)
392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
394
394
395 def _writerequirements(self):
395 def _writerequirements(self):
396 scmutil.writerequires(self.vfs, self.requirements)
396 scmutil.writerequires(self.vfs, self.requirements)
397
397
398 def _checknested(self, path):
398 def _checknested(self, path):
399 """Determine if path is a legal nested repository."""
399 """Determine if path is a legal nested repository."""
400 if not path.startswith(self.root):
400 if not path.startswith(self.root):
401 return False
401 return False
402 subpath = path[len(self.root) + 1:]
402 subpath = path[len(self.root) + 1:]
403 normsubpath = util.pconvert(subpath)
403 normsubpath = util.pconvert(subpath)
404
404
405 # XXX: Checking against the current working copy is wrong in
405 # XXX: Checking against the current working copy is wrong in
406 # the sense that it can reject things like
406 # the sense that it can reject things like
407 #
407 #
408 # $ hg cat -r 10 sub/x.txt
408 # $ hg cat -r 10 sub/x.txt
409 #
409 #
410 # if sub/ is no longer a subrepository in the working copy
410 # if sub/ is no longer a subrepository in the working copy
411 # parent revision.
411 # parent revision.
412 #
412 #
413 # However, it can of course also allow things that would have
413 # However, it can of course also allow things that would have
414 # been rejected before, such as the above cat command if sub/
414 # been rejected before, such as the above cat command if sub/
415 # is a subrepository now, but was a normal directory before.
415 # is a subrepository now, but was a normal directory before.
416 # The old path auditor would have rejected by mistake since it
416 # The old path auditor would have rejected by mistake since it
417 # panics when it sees sub/.hg/.
417 # panics when it sees sub/.hg/.
418 #
418 #
419 # All in all, checking against the working copy seems sensible
419 # All in all, checking against the working copy seems sensible
420 # since we want to prevent access to nested repositories on
420 # since we want to prevent access to nested repositories on
421 # the filesystem *now*.
421 # the filesystem *now*.
422 ctx = self[None]
422 ctx = self[None]
423 parts = util.splitpath(subpath)
423 parts = util.splitpath(subpath)
424 while parts:
424 while parts:
425 prefix = '/'.join(parts)
425 prefix = '/'.join(parts)
426 if prefix in ctx.substate:
426 if prefix in ctx.substate:
427 if prefix == normsubpath:
427 if prefix == normsubpath:
428 return True
428 return True
429 else:
429 else:
430 sub = ctx.sub(prefix)
430 sub = ctx.sub(prefix)
431 return sub.checknested(subpath[len(prefix) + 1:])
431 return sub.checknested(subpath[len(prefix) + 1:])
432 else:
432 else:
433 parts.pop()
433 parts.pop()
434 return False
434 return False
435
435
436 def peer(self):
436 def peer(self):
437 return localpeer(self) # not cached to avoid reference cycle
437 return localpeer(self) # not cached to avoid reference cycle
438
438
439 def unfiltered(self):
439 def unfiltered(self):
440 """Return unfiltered version of the repository
440 """Return unfiltered version of the repository
441
441
442 Intended to be overwritten by filtered repo."""
442 Intended to be overwritten by filtered repo."""
443 return self
443 return self
444
444
445 def filtered(self, name):
445 def filtered(self, name):
446 """Return a filtered version of a repository"""
446 """Return a filtered version of a repository"""
447 # build a new class with the mixin and the current class
447 # build a new class with the mixin and the current class
448 # (possibly subclass of the repo)
448 # (possibly subclass of the repo)
449 class proxycls(repoview.repoview, self.unfiltered().__class__):
449 class proxycls(repoview.repoview, self.unfiltered().__class__):
450 pass
450 pass
451 return proxycls(self, name)
451 return proxycls(self, name)
452
452
453 @repofilecache('bookmarks', 'bookmarks.current')
453 @repofilecache('bookmarks', 'bookmarks.current')
454 def _bookmarks(self):
454 def _bookmarks(self):
455 return bookmarks.bmstore(self)
455 return bookmarks.bmstore(self)
456
456
457 @property
457 @property
458 def _activebookmark(self):
458 def _activebookmark(self):
459 return self._bookmarks.active
459 return self._bookmarks.active
460
460
461 def bookmarkheads(self, bookmark):
461 def bookmarkheads(self, bookmark):
462 name = bookmark.split('@', 1)[0]
462 name = bookmark.split('@', 1)[0]
463 heads = []
463 heads = []
464 for mark, n in self._bookmarks.iteritems():
464 for mark, n in self._bookmarks.iteritems():
465 if mark.split('@', 1)[0] == name:
465 if mark.split('@', 1)[0] == name:
466 heads.append(n)
466 heads.append(n)
467 return heads
467 return heads
468
468
469 # _phaserevs and _phasesets depend on changelog. what we need is to
469 # _phaserevs and _phasesets depend on changelog. what we need is to
470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
471 # can't be easily expressed in filecache mechanism.
471 # can't be easily expressed in filecache mechanism.
472 @storecache('phaseroots', '00changelog.i')
472 @storecache('phaseroots', '00changelog.i')
473 def _phasecache(self):
473 def _phasecache(self):
474 return phases.phasecache(self, self._phasedefaults)
474 return phases.phasecache(self, self._phasedefaults)
475
475
476 @storecache('obsstore')
476 @storecache('obsstore')
477 def obsstore(self):
477 def obsstore(self):
478 # read default format for new obsstore.
478 # read default format for new obsstore.
479 # developer config: format.obsstore-version
479 # developer config: format.obsstore-version
480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
481 # rely on obsstore class default when possible.
481 # rely on obsstore class default when possible.
482 kwargs = {}
482 kwargs = {}
483 if defaultformat is not None:
483 if defaultformat is not None:
484 kwargs['defaultformat'] = defaultformat
484 kwargs['defaultformat'] = defaultformat
485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
486 store = obsolete.obsstore(self.svfs, readonly=readonly,
486 store = obsolete.obsstore(self.svfs, readonly=readonly,
487 **kwargs)
487 **kwargs)
488 if store and readonly:
488 if store and readonly:
489 self.ui.warn(
489 self.ui.warn(
490 _('obsolete feature not enabled but %i markers found!\n')
490 _('obsolete feature not enabled but %i markers found!\n')
491 % len(list(store)))
491 % len(list(store)))
492 return store
492 return store
493
493
494 @storecache('00changelog.i')
494 @storecache('00changelog.i')
495 def changelog(self):
495 def changelog(self):
496 c = changelog.changelog(self.svfs)
496 c = changelog.changelog(self.svfs)
497 if 'HG_PENDING' in os.environ:
497 if 'HG_PENDING' in os.environ:
498 p = os.environ['HG_PENDING']
498 p = os.environ['HG_PENDING']
499 if p.startswith(self.root):
499 if p.startswith(self.root):
500 c.readpending('00changelog.i.a')
500 c.readpending('00changelog.i.a')
501 return c
501 return c
502
502
503 @storecache('00manifest.i')
503 @storecache('00manifest.i')
504 def manifest(self):
504 def manifest(self):
505 return manifest.manifest(self.svfs)
505 return manifest.manifest(self.svfs)
506
506
507 def dirlog(self, dir):
507 def dirlog(self, dir):
508 return self.manifest.dirlog(dir)
508 return self.manifest.dirlog(dir)
509
509
510 @repofilecache('dirstate')
510 @repofilecache('dirstate')
511 def dirstate(self):
511 def dirstate(self):
512 return dirstate.dirstate(self.vfs, self.ui, self.root,
512 return dirstate.dirstate(self.vfs, self.ui, self.root,
513 self._dirstatevalidate)
513 self._dirstatevalidate)
514
514
515 def _dirstatevalidate(self, node):
515 def _dirstatevalidate(self, node):
516 try:
516 try:
517 self.changelog.rev(node)
517 self.changelog.rev(node)
518 return node
518 return node
519 except error.LookupError:
519 except error.LookupError:
520 if not self._dirstatevalidatewarned:
520 if not self._dirstatevalidatewarned:
521 self._dirstatevalidatewarned = True
521 self._dirstatevalidatewarned = True
522 self.ui.warn(_("warning: ignoring unknown"
522 self.ui.warn(_("warning: ignoring unknown"
523 " working parent %s!\n") % short(node))
523 " working parent %s!\n") % short(node))
524 return nullid
524 return nullid
525
525
526 def __getitem__(self, changeid):
526 def __getitem__(self, changeid):
527 if changeid is None or changeid == wdirrev:
527 if changeid is None or changeid == wdirrev:
528 return context.workingctx(self)
528 return context.workingctx(self)
529 if isinstance(changeid, slice):
529 if isinstance(changeid, slice):
530 return [context.changectx(self, i)
530 return [context.changectx(self, i)
531 for i in xrange(*changeid.indices(len(self)))
531 for i in xrange(*changeid.indices(len(self)))
532 if i not in self.changelog.filteredrevs]
532 if i not in self.changelog.filteredrevs]
533 return context.changectx(self, changeid)
533 return context.changectx(self, changeid)
534
534
535 def __contains__(self, changeid):
535 def __contains__(self, changeid):
536 try:
536 try:
537 self[changeid]
537 self[changeid]
538 return True
538 return True
539 except error.RepoLookupError:
539 except error.RepoLookupError:
540 return False
540 return False
541
541
542 def __nonzero__(self):
542 def __nonzero__(self):
543 return True
543 return True
544
544
545 def __len__(self):
545 def __len__(self):
546 return len(self.changelog)
546 return len(self.changelog)
547
547
548 def __iter__(self):
548 def __iter__(self):
549 return iter(self.changelog)
549 return iter(self.changelog)
550
550
551 def revs(self, expr, *args):
551 def revs(self, expr, *args):
552 '''Find revisions matching a revset.
552 '''Find revisions matching a revset.
553
553
554 The revset is specified as a string ``expr`` that may contain
554 The revset is specified as a string ``expr`` that may contain
555 %-formatting to escape certain types. See ``revset.formatspec``.
555 %-formatting to escape certain types. See ``revset.formatspec``.
556
556
557 Return a revset.abstractsmartset, which is a list-like interface
557 Revset aliases from the configuration are not expanded. To expand
558 user aliases, consider calling ``scmutil.revrange()``.
559
560 Returns a revset.abstractsmartset, which is a list-like interface
558 that contains integer revisions.
561 that contains integer revisions.
559 '''
562 '''
560 expr = revset.formatspec(expr, *args)
563 expr = revset.formatspec(expr, *args)
561 m = revset.match(None, expr)
564 m = revset.match(None, expr)
562 return m(self)
565 return m(self)
563
566
564 def set(self, expr, *args):
567 def set(self, expr, *args):
565 '''Find revisions matching a revset and emit changectx instances.
568 '''Find revisions matching a revset and emit changectx instances.
566
569
567 This is a convenience wrapper around ``revs()`` that iterates the
570 This is a convenience wrapper around ``revs()`` that iterates the
568 result and is a generator of changectx instances.
571 result and is a generator of changectx instances.
572
573 Revset aliases from the configuration are not expanded. To expand
574 user aliases, consider calling ``scmutil.revrange()``.
569 '''
575 '''
570 for r in self.revs(expr, *args):
576 for r in self.revs(expr, *args):
571 yield self[r]
577 yield self[r]
572
578
573 def url(self):
579 def url(self):
574 return 'file:' + self.root
580 return 'file:' + self.root
575
581
576 def hook(self, name, throw=False, **args):
582 def hook(self, name, throw=False, **args):
577 """Call a hook, passing this repo instance.
583 """Call a hook, passing this repo instance.
578
584
579 This a convenience method to aid invoking hooks. Extensions likely
585 This a convenience method to aid invoking hooks. Extensions likely
580 won't call this unless they have registered a custom hook or are
586 won't call this unless they have registered a custom hook or are
581 replacing code that is expected to call a hook.
587 replacing code that is expected to call a hook.
582 """
588 """
583 return hook.hook(self.ui, self, name, throw, **args)
589 return hook.hook(self.ui, self, name, throw, **args)
584
590
585 @unfilteredmethod
591 @unfilteredmethod
586 def _tag(self, names, node, message, local, user, date, extra=None,
592 def _tag(self, names, node, message, local, user, date, extra=None,
587 editor=False):
593 editor=False):
588 if isinstance(names, str):
594 if isinstance(names, str):
589 names = (names,)
595 names = (names,)
590
596
591 branches = self.branchmap()
597 branches = self.branchmap()
592 for name in names:
598 for name in names:
593 self.hook('pretag', throw=True, node=hex(node), tag=name,
599 self.hook('pretag', throw=True, node=hex(node), tag=name,
594 local=local)
600 local=local)
595 if name in branches:
601 if name in branches:
596 self.ui.warn(_("warning: tag %s conflicts with existing"
602 self.ui.warn(_("warning: tag %s conflicts with existing"
597 " branch name\n") % name)
603 " branch name\n") % name)
598
604
599 def writetags(fp, names, munge, prevtags):
605 def writetags(fp, names, munge, prevtags):
600 fp.seek(0, 2)
606 fp.seek(0, 2)
601 if prevtags and prevtags[-1] != '\n':
607 if prevtags and prevtags[-1] != '\n':
602 fp.write('\n')
608 fp.write('\n')
603 for name in names:
609 for name in names:
604 if munge:
610 if munge:
605 m = munge(name)
611 m = munge(name)
606 else:
612 else:
607 m = name
613 m = name
608
614
609 if (self._tagscache.tagtypes and
615 if (self._tagscache.tagtypes and
610 name in self._tagscache.tagtypes):
616 name in self._tagscache.tagtypes):
611 old = self.tags().get(name, nullid)
617 old = self.tags().get(name, nullid)
612 fp.write('%s %s\n' % (hex(old), m))
618 fp.write('%s %s\n' % (hex(old), m))
613 fp.write('%s %s\n' % (hex(node), m))
619 fp.write('%s %s\n' % (hex(node), m))
614 fp.close()
620 fp.close()
615
621
616 prevtags = ''
622 prevtags = ''
617 if local:
623 if local:
618 try:
624 try:
619 fp = self.vfs('localtags', 'r+')
625 fp = self.vfs('localtags', 'r+')
620 except IOError:
626 except IOError:
621 fp = self.vfs('localtags', 'a')
627 fp = self.vfs('localtags', 'a')
622 else:
628 else:
623 prevtags = fp.read()
629 prevtags = fp.read()
624
630
625 # local tags are stored in the current charset
631 # local tags are stored in the current charset
626 writetags(fp, names, None, prevtags)
632 writetags(fp, names, None, prevtags)
627 for name in names:
633 for name in names:
628 self.hook('tag', node=hex(node), tag=name, local=local)
634 self.hook('tag', node=hex(node), tag=name, local=local)
629 return
635 return
630
636
631 try:
637 try:
632 fp = self.wfile('.hgtags', 'rb+')
638 fp = self.wfile('.hgtags', 'rb+')
633 except IOError as e:
639 except IOError as e:
634 if e.errno != errno.ENOENT:
640 if e.errno != errno.ENOENT:
635 raise
641 raise
636 fp = self.wfile('.hgtags', 'ab')
642 fp = self.wfile('.hgtags', 'ab')
637 else:
643 else:
638 prevtags = fp.read()
644 prevtags = fp.read()
639
645
640 # committed tags are stored in UTF-8
646 # committed tags are stored in UTF-8
641 writetags(fp, names, encoding.fromlocal, prevtags)
647 writetags(fp, names, encoding.fromlocal, prevtags)
642
648
643 fp.close()
649 fp.close()
644
650
645 self.invalidatecaches()
651 self.invalidatecaches()
646
652
647 if '.hgtags' not in self.dirstate:
653 if '.hgtags' not in self.dirstate:
648 self[None].add(['.hgtags'])
654 self[None].add(['.hgtags'])
649
655
650 m = matchmod.exact(self.root, '', ['.hgtags'])
656 m = matchmod.exact(self.root, '', ['.hgtags'])
651 tagnode = self.commit(message, user, date, extra=extra, match=m,
657 tagnode = self.commit(message, user, date, extra=extra, match=m,
652 editor=editor)
658 editor=editor)
653
659
654 for name in names:
660 for name in names:
655 self.hook('tag', node=hex(node), tag=name, local=local)
661 self.hook('tag', node=hex(node), tag=name, local=local)
656
662
657 return tagnode
663 return tagnode
658
664
659 def tag(self, names, node, message, local, user, date, editor=False):
665 def tag(self, names, node, message, local, user, date, editor=False):
660 '''tag a revision with one or more symbolic names.
666 '''tag a revision with one or more symbolic names.
661
667
662 names is a list of strings or, when adding a single tag, names may be a
668 names is a list of strings or, when adding a single tag, names may be a
663 string.
669 string.
664
670
665 if local is True, the tags are stored in a per-repository file.
671 if local is True, the tags are stored in a per-repository file.
666 otherwise, they are stored in the .hgtags file, and a new
672 otherwise, they are stored in the .hgtags file, and a new
667 changeset is committed with the change.
673 changeset is committed with the change.
668
674
669 keyword arguments:
675 keyword arguments:
670
676
671 local: whether to store tags in non-version-controlled file
677 local: whether to store tags in non-version-controlled file
672 (default False)
678 (default False)
673
679
674 message: commit message to use if committing
680 message: commit message to use if committing
675
681
676 user: name of user to use if committing
682 user: name of user to use if committing
677
683
678 date: date tuple to use if committing'''
684 date: date tuple to use if committing'''
679
685
680 if not local:
686 if not local:
681 m = matchmod.exact(self.root, '', ['.hgtags'])
687 m = matchmod.exact(self.root, '', ['.hgtags'])
682 if any(self.status(match=m, unknown=True, ignored=True)):
688 if any(self.status(match=m, unknown=True, ignored=True)):
683 raise error.Abort(_('working copy of .hgtags is changed'),
689 raise error.Abort(_('working copy of .hgtags is changed'),
684 hint=_('please commit .hgtags manually'))
690 hint=_('please commit .hgtags manually'))
685
691
686 self.tags() # instantiate the cache
692 self.tags() # instantiate the cache
687 self._tag(names, node, message, local, user, date, editor=editor)
693 self._tag(names, node, message, local, user, date, editor=editor)
688
694
689 @filteredpropertycache
695 @filteredpropertycache
690 def _tagscache(self):
696 def _tagscache(self):
691 '''Returns a tagscache object that contains various tags related
697 '''Returns a tagscache object that contains various tags related
692 caches.'''
698 caches.'''
693
699
694 # This simplifies its cache management by having one decorated
700 # This simplifies its cache management by having one decorated
695 # function (this one) and the rest simply fetch things from it.
701 # function (this one) and the rest simply fetch things from it.
696 class tagscache(object):
702 class tagscache(object):
697 def __init__(self):
703 def __init__(self):
698 # These two define the set of tags for this repository. tags
704 # These two define the set of tags for this repository. tags
699 # maps tag name to node; tagtypes maps tag name to 'global' or
705 # maps tag name to node; tagtypes maps tag name to 'global' or
700 # 'local'. (Global tags are defined by .hgtags across all
706 # 'local'. (Global tags are defined by .hgtags across all
701 # heads, and local tags are defined in .hg/localtags.)
707 # heads, and local tags are defined in .hg/localtags.)
702 # They constitute the in-memory cache of tags.
708 # They constitute the in-memory cache of tags.
703 self.tags = self.tagtypes = None
709 self.tags = self.tagtypes = None
704
710
705 self.nodetagscache = self.tagslist = None
711 self.nodetagscache = self.tagslist = None
706
712
707 cache = tagscache()
713 cache = tagscache()
708 cache.tags, cache.tagtypes = self._findtags()
714 cache.tags, cache.tagtypes = self._findtags()
709
715
710 return cache
716 return cache
711
717
712 def tags(self):
718 def tags(self):
713 '''return a mapping of tag to node'''
719 '''return a mapping of tag to node'''
714 t = {}
720 t = {}
715 if self.changelog.filteredrevs:
721 if self.changelog.filteredrevs:
716 tags, tt = self._findtags()
722 tags, tt = self._findtags()
717 else:
723 else:
718 tags = self._tagscache.tags
724 tags = self._tagscache.tags
719 for k, v in tags.iteritems():
725 for k, v in tags.iteritems():
720 try:
726 try:
721 # ignore tags to unknown nodes
727 # ignore tags to unknown nodes
722 self.changelog.rev(v)
728 self.changelog.rev(v)
723 t[k] = v
729 t[k] = v
724 except (error.LookupError, ValueError):
730 except (error.LookupError, ValueError):
725 pass
731 pass
726 return t
732 return t
727
733
728 def _findtags(self):
734 def _findtags(self):
729 '''Do the hard work of finding tags. Return a pair of dicts
735 '''Do the hard work of finding tags. Return a pair of dicts
730 (tags, tagtypes) where tags maps tag name to node, and tagtypes
736 (tags, tagtypes) where tags maps tag name to node, and tagtypes
731 maps tag name to a string like \'global\' or \'local\'.
737 maps tag name to a string like \'global\' or \'local\'.
732 Subclasses or extensions are free to add their own tags, but
738 Subclasses or extensions are free to add their own tags, but
733 should be aware that the returned dicts will be retained for the
739 should be aware that the returned dicts will be retained for the
734 duration of the localrepo object.'''
740 duration of the localrepo object.'''
735
741
736 # XXX what tagtype should subclasses/extensions use? Currently
742 # XXX what tagtype should subclasses/extensions use? Currently
737 # mq and bookmarks add tags, but do not set the tagtype at all.
743 # mq and bookmarks add tags, but do not set the tagtype at all.
738 # Should each extension invent its own tag type? Should there
744 # Should each extension invent its own tag type? Should there
739 # be one tagtype for all such "virtual" tags? Or is the status
745 # be one tagtype for all such "virtual" tags? Or is the status
740 # quo fine?
746 # quo fine?
741
747
742 alltags = {} # map tag name to (node, hist)
748 alltags = {} # map tag name to (node, hist)
743 tagtypes = {}
749 tagtypes = {}
744
750
745 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
751 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
746 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
752 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
747
753
748 # Build the return dicts. Have to re-encode tag names because
754 # Build the return dicts. Have to re-encode tag names because
749 # the tags module always uses UTF-8 (in order not to lose info
755 # the tags module always uses UTF-8 (in order not to lose info
750 # writing to the cache), but the rest of Mercurial wants them in
756 # writing to the cache), but the rest of Mercurial wants them in
751 # local encoding.
757 # local encoding.
752 tags = {}
758 tags = {}
753 for (name, (node, hist)) in alltags.iteritems():
759 for (name, (node, hist)) in alltags.iteritems():
754 if node != nullid:
760 if node != nullid:
755 tags[encoding.tolocal(name)] = node
761 tags[encoding.tolocal(name)] = node
756 tags['tip'] = self.changelog.tip()
762 tags['tip'] = self.changelog.tip()
757 tagtypes = dict([(encoding.tolocal(name), value)
763 tagtypes = dict([(encoding.tolocal(name), value)
758 for (name, value) in tagtypes.iteritems()])
764 for (name, value) in tagtypes.iteritems()])
759 return (tags, tagtypes)
765 return (tags, tagtypes)
760
766
761 def tagtype(self, tagname):
767 def tagtype(self, tagname):
762 '''
768 '''
763 return the type of the given tag. result can be:
769 return the type of the given tag. result can be:
764
770
765 'local' : a local tag
771 'local' : a local tag
766 'global' : a global tag
772 'global' : a global tag
767 None : tag does not exist
773 None : tag does not exist
768 '''
774 '''
769
775
770 return self._tagscache.tagtypes.get(tagname)
776 return self._tagscache.tagtypes.get(tagname)
771
777
772 def tagslist(self):
778 def tagslist(self):
773 '''return a list of tags ordered by revision'''
779 '''return a list of tags ordered by revision'''
774 if not self._tagscache.tagslist:
780 if not self._tagscache.tagslist:
775 l = []
781 l = []
776 for t, n in self.tags().iteritems():
782 for t, n in self.tags().iteritems():
777 l.append((self.changelog.rev(n), t, n))
783 l.append((self.changelog.rev(n), t, n))
778 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
784 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
779
785
780 return self._tagscache.tagslist
786 return self._tagscache.tagslist
781
787
782 def nodetags(self, node):
788 def nodetags(self, node):
783 '''return the tags associated with a node'''
789 '''return the tags associated with a node'''
784 if not self._tagscache.nodetagscache:
790 if not self._tagscache.nodetagscache:
785 nodetagscache = {}
791 nodetagscache = {}
786 for t, n in self._tagscache.tags.iteritems():
792 for t, n in self._tagscache.tags.iteritems():
787 nodetagscache.setdefault(n, []).append(t)
793 nodetagscache.setdefault(n, []).append(t)
788 for tags in nodetagscache.itervalues():
794 for tags in nodetagscache.itervalues():
789 tags.sort()
795 tags.sort()
790 self._tagscache.nodetagscache = nodetagscache
796 self._tagscache.nodetagscache = nodetagscache
791 return self._tagscache.nodetagscache.get(node, [])
797 return self._tagscache.nodetagscache.get(node, [])
792
798
793 def nodebookmarks(self, node):
799 def nodebookmarks(self, node):
794 """return the list of bookmarks pointing to the specified node"""
800 """return the list of bookmarks pointing to the specified node"""
795 marks = []
801 marks = []
796 for bookmark, n in self._bookmarks.iteritems():
802 for bookmark, n in self._bookmarks.iteritems():
797 if n == node:
803 if n == node:
798 marks.append(bookmark)
804 marks.append(bookmark)
799 return sorted(marks)
805 return sorted(marks)
800
806
801 def branchmap(self):
807 def branchmap(self):
802 '''returns a dictionary {branch: [branchheads]} with branchheads
808 '''returns a dictionary {branch: [branchheads]} with branchheads
803 ordered by increasing revision number'''
809 ordered by increasing revision number'''
804 branchmap.updatecache(self)
810 branchmap.updatecache(self)
805 return self._branchcaches[self.filtername]
811 return self._branchcaches[self.filtername]
806
812
807 @unfilteredmethod
813 @unfilteredmethod
808 def revbranchcache(self):
814 def revbranchcache(self):
809 if not self._revbranchcache:
815 if not self._revbranchcache:
810 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
816 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
811 return self._revbranchcache
817 return self._revbranchcache
812
818
813 def branchtip(self, branch, ignoremissing=False):
819 def branchtip(self, branch, ignoremissing=False):
814 '''return the tip node for a given branch
820 '''return the tip node for a given branch
815
821
816 If ignoremissing is True, then this method will not raise an error.
822 If ignoremissing is True, then this method will not raise an error.
817 This is helpful for callers that only expect None for a missing branch
823 This is helpful for callers that only expect None for a missing branch
818 (e.g. namespace).
824 (e.g. namespace).
819
825
820 '''
826 '''
821 try:
827 try:
822 return self.branchmap().branchtip(branch)
828 return self.branchmap().branchtip(branch)
823 except KeyError:
829 except KeyError:
824 if not ignoremissing:
830 if not ignoremissing:
825 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
831 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
826 else:
832 else:
827 pass
833 pass
828
834
829 def lookup(self, key):
835 def lookup(self, key):
830 return self[key].node()
836 return self[key].node()
831
837
832 def lookupbranch(self, key, remote=None):
838 def lookupbranch(self, key, remote=None):
833 repo = remote or self
839 repo = remote or self
834 if key in repo.branchmap():
840 if key in repo.branchmap():
835 return key
841 return key
836
842
837 repo = (remote and remote.local()) and remote or self
843 repo = (remote and remote.local()) and remote or self
838 return repo[key].branch()
844 return repo[key].branch()
839
845
840 def known(self, nodes):
846 def known(self, nodes):
841 cl = self.changelog
847 cl = self.changelog
842 nm = cl.nodemap
848 nm = cl.nodemap
843 filtered = cl.filteredrevs
849 filtered = cl.filteredrevs
844 result = []
850 result = []
845 for n in nodes:
851 for n in nodes:
846 r = nm.get(n)
852 r = nm.get(n)
847 resp = not (r is None or r in filtered)
853 resp = not (r is None or r in filtered)
848 result.append(resp)
854 result.append(resp)
849 return result
855 return result
850
856
851 def local(self):
857 def local(self):
852 return self
858 return self
853
859
854 def publishing(self):
860 def publishing(self):
855 # it's safe (and desirable) to trust the publish flag unconditionally
861 # it's safe (and desirable) to trust the publish flag unconditionally
856 # so that we don't finalize changes shared between users via ssh or nfs
862 # so that we don't finalize changes shared between users via ssh or nfs
857 return self.ui.configbool('phases', 'publish', True, untrusted=True)
863 return self.ui.configbool('phases', 'publish', True, untrusted=True)
858
864
859 def cancopy(self):
865 def cancopy(self):
860 # so statichttprepo's override of local() works
866 # so statichttprepo's override of local() works
861 if not self.local():
867 if not self.local():
862 return False
868 return False
863 if not self.publishing():
869 if not self.publishing():
864 return True
870 return True
865 # if publishing we can't copy if there is filtered content
871 # if publishing we can't copy if there is filtered content
866 return not self.filtered('visible').changelog.filteredrevs
872 return not self.filtered('visible').changelog.filteredrevs
867
873
868 def shared(self):
874 def shared(self):
869 '''the type of shared repository (None if not shared)'''
875 '''the type of shared repository (None if not shared)'''
870 if self.sharedpath != self.path:
876 if self.sharedpath != self.path:
871 return 'store'
877 return 'store'
872 return None
878 return None
873
879
874 def join(self, f, *insidef):
880 def join(self, f, *insidef):
875 return self.vfs.join(os.path.join(f, *insidef))
881 return self.vfs.join(os.path.join(f, *insidef))
876
882
877 def wjoin(self, f, *insidef):
883 def wjoin(self, f, *insidef):
878 return self.vfs.reljoin(self.root, f, *insidef)
884 return self.vfs.reljoin(self.root, f, *insidef)
879
885
880 def file(self, f):
886 def file(self, f):
881 if f[0] == '/':
887 if f[0] == '/':
882 f = f[1:]
888 f = f[1:]
883 return filelog.filelog(self.svfs, f)
889 return filelog.filelog(self.svfs, f)
884
890
885 def changectx(self, changeid):
891 def changectx(self, changeid):
886 return self[changeid]
892 return self[changeid]
887
893
888 def setparents(self, p1, p2=nullid):
894 def setparents(self, p1, p2=nullid):
889 self.dirstate.beginparentchange()
895 self.dirstate.beginparentchange()
890 copies = self.dirstate.setparents(p1, p2)
896 copies = self.dirstate.setparents(p1, p2)
891 pctx = self[p1]
897 pctx = self[p1]
892 if copies:
898 if copies:
893 # Adjust copy records, the dirstate cannot do it, it
899 # Adjust copy records, the dirstate cannot do it, it
894 # requires access to parents manifests. Preserve them
900 # requires access to parents manifests. Preserve them
895 # only for entries added to first parent.
901 # only for entries added to first parent.
896 for f in copies:
902 for f in copies:
897 if f not in pctx and copies[f] in pctx:
903 if f not in pctx and copies[f] in pctx:
898 self.dirstate.copy(copies[f], f)
904 self.dirstate.copy(copies[f], f)
899 if p2 == nullid:
905 if p2 == nullid:
900 for f, s in sorted(self.dirstate.copies().items()):
906 for f, s in sorted(self.dirstate.copies().items()):
901 if f not in pctx and s not in pctx:
907 if f not in pctx and s not in pctx:
902 self.dirstate.copy(None, f)
908 self.dirstate.copy(None, f)
903 self.dirstate.endparentchange()
909 self.dirstate.endparentchange()
904
910
905 def filectx(self, path, changeid=None, fileid=None):
911 def filectx(self, path, changeid=None, fileid=None):
906 """changeid can be a changeset revision, node, or tag.
912 """changeid can be a changeset revision, node, or tag.
907 fileid can be a file revision or node."""
913 fileid can be a file revision or node."""
908 return context.filectx(self, path, changeid, fileid)
914 return context.filectx(self, path, changeid, fileid)
909
915
910 def getcwd(self):
916 def getcwd(self):
911 return self.dirstate.getcwd()
917 return self.dirstate.getcwd()
912
918
913 def pathto(self, f, cwd=None):
919 def pathto(self, f, cwd=None):
914 return self.dirstate.pathto(f, cwd)
920 return self.dirstate.pathto(f, cwd)
915
921
916 def wfile(self, f, mode='r'):
922 def wfile(self, f, mode='r'):
917 return self.wvfs(f, mode)
923 return self.wvfs(f, mode)
918
924
919 def _link(self, f):
925 def _link(self, f):
920 return self.wvfs.islink(f)
926 return self.wvfs.islink(f)
921
927
922 def _loadfilter(self, filter):
928 def _loadfilter(self, filter):
923 if filter not in self.filterpats:
929 if filter not in self.filterpats:
924 l = []
930 l = []
925 for pat, cmd in self.ui.configitems(filter):
931 for pat, cmd in self.ui.configitems(filter):
926 if cmd == '!':
932 if cmd == '!':
927 continue
933 continue
928 mf = matchmod.match(self.root, '', [pat])
934 mf = matchmod.match(self.root, '', [pat])
929 fn = None
935 fn = None
930 params = cmd
936 params = cmd
931 for name, filterfn in self._datafilters.iteritems():
937 for name, filterfn in self._datafilters.iteritems():
932 if cmd.startswith(name):
938 if cmd.startswith(name):
933 fn = filterfn
939 fn = filterfn
934 params = cmd[len(name):].lstrip()
940 params = cmd[len(name):].lstrip()
935 break
941 break
936 if not fn:
942 if not fn:
937 fn = lambda s, c, **kwargs: util.filter(s, c)
943 fn = lambda s, c, **kwargs: util.filter(s, c)
938 # Wrap old filters not supporting keyword arguments
944 # Wrap old filters not supporting keyword arguments
939 if not inspect.getargspec(fn)[2]:
945 if not inspect.getargspec(fn)[2]:
940 oldfn = fn
946 oldfn = fn
941 fn = lambda s, c, **kwargs: oldfn(s, c)
947 fn = lambda s, c, **kwargs: oldfn(s, c)
942 l.append((mf, fn, params))
948 l.append((mf, fn, params))
943 self.filterpats[filter] = l
949 self.filterpats[filter] = l
944 return self.filterpats[filter]
950 return self.filterpats[filter]
945
951
946 def _filter(self, filterpats, filename, data):
952 def _filter(self, filterpats, filename, data):
947 for mf, fn, cmd in filterpats:
953 for mf, fn, cmd in filterpats:
948 if mf(filename):
954 if mf(filename):
949 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
955 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
950 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
956 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
951 break
957 break
952
958
953 return data
959 return data
954
960
955 @unfilteredpropertycache
961 @unfilteredpropertycache
956 def _encodefilterpats(self):
962 def _encodefilterpats(self):
957 return self._loadfilter('encode')
963 return self._loadfilter('encode')
958
964
959 @unfilteredpropertycache
965 @unfilteredpropertycache
960 def _decodefilterpats(self):
966 def _decodefilterpats(self):
961 return self._loadfilter('decode')
967 return self._loadfilter('decode')
962
968
963 def adddatafilter(self, name, filter):
969 def adddatafilter(self, name, filter):
964 self._datafilters[name] = filter
970 self._datafilters[name] = filter
965
971
966 def wread(self, filename):
972 def wread(self, filename):
967 if self._link(filename):
973 if self._link(filename):
968 data = self.wvfs.readlink(filename)
974 data = self.wvfs.readlink(filename)
969 else:
975 else:
970 data = self.wvfs.read(filename)
976 data = self.wvfs.read(filename)
971 return self._filter(self._encodefilterpats, filename, data)
977 return self._filter(self._encodefilterpats, filename, data)
972
978
973 def wwrite(self, filename, data, flags, backgroundclose=False):
979 def wwrite(self, filename, data, flags, backgroundclose=False):
974 """write ``data`` into ``filename`` in the working directory
980 """write ``data`` into ``filename`` in the working directory
975
981
976 This returns length of written (maybe decoded) data.
982 This returns length of written (maybe decoded) data.
977 """
983 """
978 data = self._filter(self._decodefilterpats, filename, data)
984 data = self._filter(self._decodefilterpats, filename, data)
979 if 'l' in flags:
985 if 'l' in flags:
980 self.wvfs.symlink(data, filename)
986 self.wvfs.symlink(data, filename)
981 else:
987 else:
982 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
988 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
983 if 'x' in flags:
989 if 'x' in flags:
984 self.wvfs.setflags(filename, False, True)
990 self.wvfs.setflags(filename, False, True)
985 return len(data)
991 return len(data)
986
992
987 def wwritedata(self, filename, data):
993 def wwritedata(self, filename, data):
988 return self._filter(self._decodefilterpats, filename, data)
994 return self._filter(self._decodefilterpats, filename, data)
989
995
990 def currenttransaction(self):
996 def currenttransaction(self):
991 """return the current transaction or None if non exists"""
997 """return the current transaction or None if non exists"""
992 if self._transref:
998 if self._transref:
993 tr = self._transref()
999 tr = self._transref()
994 else:
1000 else:
995 tr = None
1001 tr = None
996
1002
997 if tr and tr.running():
1003 if tr and tr.running():
998 return tr
1004 return tr
999 return None
1005 return None
1000
1006
1001 def transaction(self, desc, report=None):
1007 def transaction(self, desc, report=None):
1002 if (self.ui.configbool('devel', 'all-warnings')
1008 if (self.ui.configbool('devel', 'all-warnings')
1003 or self.ui.configbool('devel', 'check-locks')):
1009 or self.ui.configbool('devel', 'check-locks')):
1004 l = self._lockref and self._lockref()
1010 l = self._lockref and self._lockref()
1005 if l is None or not l.held:
1011 if l is None or not l.held:
1006 raise RuntimeError('programming error: transaction requires '
1012 raise RuntimeError('programming error: transaction requires '
1007 'locking')
1013 'locking')
1008 tr = self.currenttransaction()
1014 tr = self.currenttransaction()
1009 if tr is not None:
1015 if tr is not None:
1010 return tr.nest()
1016 return tr.nest()
1011
1017
1012 # abort here if the journal already exists
1018 # abort here if the journal already exists
1013 if self.svfs.exists("journal"):
1019 if self.svfs.exists("journal"):
1014 raise error.RepoError(
1020 raise error.RepoError(
1015 _("abandoned transaction found"),
1021 _("abandoned transaction found"),
1016 hint=_("run 'hg recover' to clean up transaction"))
1022 hint=_("run 'hg recover' to clean up transaction"))
1017
1023
1018 idbase = "%.40f#%f" % (random.random(), time.time())
1024 idbase = "%.40f#%f" % (random.random(), time.time())
1019 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1025 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1020 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1021
1027
1022 self._writejournal(desc)
1028 self._writejournal(desc)
1023 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1024 if report:
1030 if report:
1025 rp = report
1031 rp = report
1026 else:
1032 else:
1027 rp = self.ui.warn
1033 rp = self.ui.warn
1028 vfsmap = {'plain': self.vfs} # root of .hg/
1034 vfsmap = {'plain': self.vfs} # root of .hg/
1029 # we must avoid cyclic reference between repo and transaction.
1035 # we must avoid cyclic reference between repo and transaction.
1030 reporef = weakref.ref(self)
1036 reporef = weakref.ref(self)
1031 def validate(tr):
1037 def validate(tr):
1032 """will run pre-closing hooks"""
1038 """will run pre-closing hooks"""
1033 reporef().hook('pretxnclose', throw=True,
1039 reporef().hook('pretxnclose', throw=True,
1034 txnname=desc, **tr.hookargs)
1040 txnname=desc, **tr.hookargs)
1035 def releasefn(tr, success):
1041 def releasefn(tr, success):
1036 repo = reporef()
1042 repo = reporef()
1037 if success:
1043 if success:
1038 # this should be explicitly invoked here, because
1044 # this should be explicitly invoked here, because
1039 # in-memory changes aren't written out at closing
1045 # in-memory changes aren't written out at closing
1040 # transaction, if tr.addfilegenerator (via
1046 # transaction, if tr.addfilegenerator (via
1041 # dirstate.write or so) isn't invoked while
1047 # dirstate.write or so) isn't invoked while
1042 # transaction running
1048 # transaction running
1043 repo.dirstate.write(None)
1049 repo.dirstate.write(None)
1044 else:
1050 else:
1045 # discard all changes (including ones already written
1051 # discard all changes (including ones already written
1046 # out) in this transaction
1052 # out) in this transaction
1047 repo.dirstate.restorebackup(None, prefix='journal.')
1053 repo.dirstate.restorebackup(None, prefix='journal.')
1048
1054
1049 repo.invalidate(clearfilecache=True)
1055 repo.invalidate(clearfilecache=True)
1050
1056
1051 tr = transaction.transaction(rp, self.svfs, vfsmap,
1057 tr = transaction.transaction(rp, self.svfs, vfsmap,
1052 "journal",
1058 "journal",
1053 "undo",
1059 "undo",
1054 aftertrans(renames),
1060 aftertrans(renames),
1055 self.store.createmode,
1061 self.store.createmode,
1056 validator=validate,
1062 validator=validate,
1057 releasefn=releasefn)
1063 releasefn=releasefn)
1058
1064
1059 tr.hookargs['txnid'] = txnid
1065 tr.hookargs['txnid'] = txnid
1060 # note: writing the fncache only during finalize mean that the file is
1066 # note: writing the fncache only during finalize mean that the file is
1061 # outdated when running hooks. As fncache is used for streaming clone,
1067 # outdated when running hooks. As fncache is used for streaming clone,
1062 # this is not expected to break anything that happen during the hooks.
1068 # this is not expected to break anything that happen during the hooks.
1063 tr.addfinalize('flush-fncache', self.store.write)
1069 tr.addfinalize('flush-fncache', self.store.write)
1064 def txnclosehook(tr2):
1070 def txnclosehook(tr2):
1065 """To be run if transaction is successful, will schedule a hook run
1071 """To be run if transaction is successful, will schedule a hook run
1066 """
1072 """
1067 # Don't reference tr2 in hook() so we don't hold a reference.
1073 # Don't reference tr2 in hook() so we don't hold a reference.
1068 # This reduces memory consumption when there are multiple
1074 # This reduces memory consumption when there are multiple
1069 # transactions per lock. This can likely go away if issue5045
1075 # transactions per lock. This can likely go away if issue5045
1070 # fixes the function accumulation.
1076 # fixes the function accumulation.
1071 hookargs = tr2.hookargs
1077 hookargs = tr2.hookargs
1072
1078
1073 def hook():
1079 def hook():
1074 reporef().hook('txnclose', throw=False, txnname=desc,
1080 reporef().hook('txnclose', throw=False, txnname=desc,
1075 **hookargs)
1081 **hookargs)
1076 reporef()._afterlock(hook)
1082 reporef()._afterlock(hook)
1077 tr.addfinalize('txnclose-hook', txnclosehook)
1083 tr.addfinalize('txnclose-hook', txnclosehook)
1078 def txnaborthook(tr2):
1084 def txnaborthook(tr2):
1079 """To be run if transaction is aborted
1085 """To be run if transaction is aborted
1080 """
1086 """
1081 reporef().hook('txnabort', throw=False, txnname=desc,
1087 reporef().hook('txnabort', throw=False, txnname=desc,
1082 **tr2.hookargs)
1088 **tr2.hookargs)
1083 tr.addabort('txnabort-hook', txnaborthook)
1089 tr.addabort('txnabort-hook', txnaborthook)
1084 # avoid eager cache invalidation. in-memory data should be identical
1090 # avoid eager cache invalidation. in-memory data should be identical
1085 # to stored data if transaction has no error.
1091 # to stored data if transaction has no error.
1086 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1092 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1087 self._transref = weakref.ref(tr)
1093 self._transref = weakref.ref(tr)
1088 return tr
1094 return tr
1089
1095
1090 def _journalfiles(self):
1096 def _journalfiles(self):
1091 return ((self.svfs, 'journal'),
1097 return ((self.svfs, 'journal'),
1092 (self.vfs, 'journal.dirstate'),
1098 (self.vfs, 'journal.dirstate'),
1093 (self.vfs, 'journal.branch'),
1099 (self.vfs, 'journal.branch'),
1094 (self.vfs, 'journal.desc'),
1100 (self.vfs, 'journal.desc'),
1095 (self.vfs, 'journal.bookmarks'),
1101 (self.vfs, 'journal.bookmarks'),
1096 (self.svfs, 'journal.phaseroots'))
1102 (self.svfs, 'journal.phaseroots'))
1097
1103
1098 def undofiles(self):
1104 def undofiles(self):
1099 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1105 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1100
1106
1101 def _writejournal(self, desc):
1107 def _writejournal(self, desc):
1102 self.dirstate.savebackup(None, prefix='journal.')
1108 self.dirstate.savebackup(None, prefix='journal.')
1103 self.vfs.write("journal.branch",
1109 self.vfs.write("journal.branch",
1104 encoding.fromlocal(self.dirstate.branch()))
1110 encoding.fromlocal(self.dirstate.branch()))
1105 self.vfs.write("journal.desc",
1111 self.vfs.write("journal.desc",
1106 "%d\n%s\n" % (len(self), desc))
1112 "%d\n%s\n" % (len(self), desc))
1107 self.vfs.write("journal.bookmarks",
1113 self.vfs.write("journal.bookmarks",
1108 self.vfs.tryread("bookmarks"))
1114 self.vfs.tryread("bookmarks"))
1109 self.svfs.write("journal.phaseroots",
1115 self.svfs.write("journal.phaseroots",
1110 self.svfs.tryread("phaseroots"))
1116 self.svfs.tryread("phaseroots"))
1111
1117
1112 def recover(self):
1118 def recover(self):
1113 with self.lock():
1119 with self.lock():
1114 if self.svfs.exists("journal"):
1120 if self.svfs.exists("journal"):
1115 self.ui.status(_("rolling back interrupted transaction\n"))
1121 self.ui.status(_("rolling back interrupted transaction\n"))
1116 vfsmap = {'': self.svfs,
1122 vfsmap = {'': self.svfs,
1117 'plain': self.vfs,}
1123 'plain': self.vfs,}
1118 transaction.rollback(self.svfs, vfsmap, "journal",
1124 transaction.rollback(self.svfs, vfsmap, "journal",
1119 self.ui.warn)
1125 self.ui.warn)
1120 self.invalidate()
1126 self.invalidate()
1121 return True
1127 return True
1122 else:
1128 else:
1123 self.ui.warn(_("no interrupted transaction available\n"))
1129 self.ui.warn(_("no interrupted transaction available\n"))
1124 return False
1130 return False
1125
1131
1126 def rollback(self, dryrun=False, force=False):
1132 def rollback(self, dryrun=False, force=False):
1127 wlock = lock = dsguard = None
1133 wlock = lock = dsguard = None
1128 try:
1134 try:
1129 wlock = self.wlock()
1135 wlock = self.wlock()
1130 lock = self.lock()
1136 lock = self.lock()
1131 if self.svfs.exists("undo"):
1137 if self.svfs.exists("undo"):
1132 dsguard = cmdutil.dirstateguard(self, 'rollback')
1138 dsguard = cmdutil.dirstateguard(self, 'rollback')
1133
1139
1134 return self._rollback(dryrun, force, dsguard)
1140 return self._rollback(dryrun, force, dsguard)
1135 else:
1141 else:
1136 self.ui.warn(_("no rollback information available\n"))
1142 self.ui.warn(_("no rollback information available\n"))
1137 return 1
1143 return 1
1138 finally:
1144 finally:
1139 release(dsguard, lock, wlock)
1145 release(dsguard, lock, wlock)
1140
1146
1141 @unfilteredmethod # Until we get smarter cache management
1147 @unfilteredmethod # Until we get smarter cache management
1142 def _rollback(self, dryrun, force, dsguard):
1148 def _rollback(self, dryrun, force, dsguard):
1143 ui = self.ui
1149 ui = self.ui
1144 try:
1150 try:
1145 args = self.vfs.read('undo.desc').splitlines()
1151 args = self.vfs.read('undo.desc').splitlines()
1146 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1152 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1147 if len(args) >= 3:
1153 if len(args) >= 3:
1148 detail = args[2]
1154 detail = args[2]
1149 oldtip = oldlen - 1
1155 oldtip = oldlen - 1
1150
1156
1151 if detail and ui.verbose:
1157 if detail and ui.verbose:
1152 msg = (_('repository tip rolled back to revision %s'
1158 msg = (_('repository tip rolled back to revision %s'
1153 ' (undo %s: %s)\n')
1159 ' (undo %s: %s)\n')
1154 % (oldtip, desc, detail))
1160 % (oldtip, desc, detail))
1155 else:
1161 else:
1156 msg = (_('repository tip rolled back to revision %s'
1162 msg = (_('repository tip rolled back to revision %s'
1157 ' (undo %s)\n')
1163 ' (undo %s)\n')
1158 % (oldtip, desc))
1164 % (oldtip, desc))
1159 except IOError:
1165 except IOError:
1160 msg = _('rolling back unknown transaction\n')
1166 msg = _('rolling back unknown transaction\n')
1161 desc = None
1167 desc = None
1162
1168
1163 if not force and self['.'] != self['tip'] and desc == 'commit':
1169 if not force and self['.'] != self['tip'] and desc == 'commit':
1164 raise error.Abort(
1170 raise error.Abort(
1165 _('rollback of last commit while not checked out '
1171 _('rollback of last commit while not checked out '
1166 'may lose data'), hint=_('use -f to force'))
1172 'may lose data'), hint=_('use -f to force'))
1167
1173
1168 ui.status(msg)
1174 ui.status(msg)
1169 if dryrun:
1175 if dryrun:
1170 return 0
1176 return 0
1171
1177
1172 parents = self.dirstate.parents()
1178 parents = self.dirstate.parents()
1173 self.destroying()
1179 self.destroying()
1174 vfsmap = {'plain': self.vfs, '': self.svfs}
1180 vfsmap = {'plain': self.vfs, '': self.svfs}
1175 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1181 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1176 if self.vfs.exists('undo.bookmarks'):
1182 if self.vfs.exists('undo.bookmarks'):
1177 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1183 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1178 if self.svfs.exists('undo.phaseroots'):
1184 if self.svfs.exists('undo.phaseroots'):
1179 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1185 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1180 self.invalidate()
1186 self.invalidate()
1181
1187
1182 parentgone = (parents[0] not in self.changelog.nodemap or
1188 parentgone = (parents[0] not in self.changelog.nodemap or
1183 parents[1] not in self.changelog.nodemap)
1189 parents[1] not in self.changelog.nodemap)
1184 if parentgone:
1190 if parentgone:
1185 # prevent dirstateguard from overwriting already restored one
1191 # prevent dirstateguard from overwriting already restored one
1186 dsguard.close()
1192 dsguard.close()
1187
1193
1188 self.dirstate.restorebackup(None, prefix='undo.')
1194 self.dirstate.restorebackup(None, prefix='undo.')
1189 try:
1195 try:
1190 branch = self.vfs.read('undo.branch')
1196 branch = self.vfs.read('undo.branch')
1191 self.dirstate.setbranch(encoding.tolocal(branch))
1197 self.dirstate.setbranch(encoding.tolocal(branch))
1192 except IOError:
1198 except IOError:
1193 ui.warn(_('named branch could not be reset: '
1199 ui.warn(_('named branch could not be reset: '
1194 'current branch is still \'%s\'\n')
1200 'current branch is still \'%s\'\n')
1195 % self.dirstate.branch())
1201 % self.dirstate.branch())
1196
1202
1197 parents = tuple([p.rev() for p in self[None].parents()])
1203 parents = tuple([p.rev() for p in self[None].parents()])
1198 if len(parents) > 1:
1204 if len(parents) > 1:
1199 ui.status(_('working directory now based on '
1205 ui.status(_('working directory now based on '
1200 'revisions %d and %d\n') % parents)
1206 'revisions %d and %d\n') % parents)
1201 else:
1207 else:
1202 ui.status(_('working directory now based on '
1208 ui.status(_('working directory now based on '
1203 'revision %d\n') % parents)
1209 'revision %d\n') % parents)
1204 mergemod.mergestate.clean(self, self['.'].node())
1210 mergemod.mergestate.clean(self, self['.'].node())
1205
1211
1206 # TODO: if we know which new heads may result from this rollback, pass
1212 # TODO: if we know which new heads may result from this rollback, pass
1207 # them to destroy(), which will prevent the branchhead cache from being
1213 # them to destroy(), which will prevent the branchhead cache from being
1208 # invalidated.
1214 # invalidated.
1209 self.destroyed()
1215 self.destroyed()
1210 return 0
1216 return 0
1211
1217
1212 def invalidatecaches(self):
1218 def invalidatecaches(self):
1213
1219
1214 if '_tagscache' in vars(self):
1220 if '_tagscache' in vars(self):
1215 # can't use delattr on proxy
1221 # can't use delattr on proxy
1216 del self.__dict__['_tagscache']
1222 del self.__dict__['_tagscache']
1217
1223
1218 self.unfiltered()._branchcaches.clear()
1224 self.unfiltered()._branchcaches.clear()
1219 self.invalidatevolatilesets()
1225 self.invalidatevolatilesets()
1220
1226
1221 def invalidatevolatilesets(self):
1227 def invalidatevolatilesets(self):
1222 self.filteredrevcache.clear()
1228 self.filteredrevcache.clear()
1223 obsolete.clearobscaches(self)
1229 obsolete.clearobscaches(self)
1224
1230
1225 def invalidatedirstate(self):
1231 def invalidatedirstate(self):
1226 '''Invalidates the dirstate, causing the next call to dirstate
1232 '''Invalidates the dirstate, causing the next call to dirstate
1227 to check if it was modified since the last time it was read,
1233 to check if it was modified since the last time it was read,
1228 rereading it if it has.
1234 rereading it if it has.
1229
1235
1230 This is different to dirstate.invalidate() that it doesn't always
1236 This is different to dirstate.invalidate() that it doesn't always
1231 rereads the dirstate. Use dirstate.invalidate() if you want to
1237 rereads the dirstate. Use dirstate.invalidate() if you want to
1232 explicitly read the dirstate again (i.e. restoring it to a previous
1238 explicitly read the dirstate again (i.e. restoring it to a previous
1233 known good state).'''
1239 known good state).'''
1234 if hasunfilteredcache(self, 'dirstate'):
1240 if hasunfilteredcache(self, 'dirstate'):
1235 for k in self.dirstate._filecache:
1241 for k in self.dirstate._filecache:
1236 try:
1242 try:
1237 delattr(self.dirstate, k)
1243 delattr(self.dirstate, k)
1238 except AttributeError:
1244 except AttributeError:
1239 pass
1245 pass
1240 delattr(self.unfiltered(), 'dirstate')
1246 delattr(self.unfiltered(), 'dirstate')
1241
1247
1242 def invalidate(self, clearfilecache=False):
1248 def invalidate(self, clearfilecache=False):
1243 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1249 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1244 for k in self._filecache.keys():
1250 for k in self._filecache.keys():
1245 # dirstate is invalidated separately in invalidatedirstate()
1251 # dirstate is invalidated separately in invalidatedirstate()
1246 if k == 'dirstate':
1252 if k == 'dirstate':
1247 continue
1253 continue
1248
1254
1249 if clearfilecache:
1255 if clearfilecache:
1250 del self._filecache[k]
1256 del self._filecache[k]
1251 try:
1257 try:
1252 delattr(unfiltered, k)
1258 delattr(unfiltered, k)
1253 except AttributeError:
1259 except AttributeError:
1254 pass
1260 pass
1255 self.invalidatecaches()
1261 self.invalidatecaches()
1256 self.store.invalidatecaches()
1262 self.store.invalidatecaches()
1257
1263
1258 def invalidateall(self):
1264 def invalidateall(self):
1259 '''Fully invalidates both store and non-store parts, causing the
1265 '''Fully invalidates both store and non-store parts, causing the
1260 subsequent operation to reread any outside changes.'''
1266 subsequent operation to reread any outside changes.'''
1261 # extension should hook this to invalidate its caches
1267 # extension should hook this to invalidate its caches
1262 self.invalidate()
1268 self.invalidate()
1263 self.invalidatedirstate()
1269 self.invalidatedirstate()
1264
1270
1265 def _refreshfilecachestats(self, tr):
1271 def _refreshfilecachestats(self, tr):
1266 """Reload stats of cached files so that they are flagged as valid"""
1272 """Reload stats of cached files so that they are flagged as valid"""
1267 for k, ce in self._filecache.items():
1273 for k, ce in self._filecache.items():
1268 if k == 'dirstate' or k not in self.__dict__:
1274 if k == 'dirstate' or k not in self.__dict__:
1269 continue
1275 continue
1270 ce.refresh()
1276 ce.refresh()
1271
1277
1272 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1278 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1273 inheritchecker=None, parentenvvar=None):
1279 inheritchecker=None, parentenvvar=None):
1274 parentlock = None
1280 parentlock = None
1275 # the contents of parentenvvar are used by the underlying lock to
1281 # the contents of parentenvvar are used by the underlying lock to
1276 # determine whether it can be inherited
1282 # determine whether it can be inherited
1277 if parentenvvar is not None:
1283 if parentenvvar is not None:
1278 parentlock = os.environ.get(parentenvvar)
1284 parentlock = os.environ.get(parentenvvar)
1279 try:
1285 try:
1280 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1286 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1281 acquirefn=acquirefn, desc=desc,
1287 acquirefn=acquirefn, desc=desc,
1282 inheritchecker=inheritchecker,
1288 inheritchecker=inheritchecker,
1283 parentlock=parentlock)
1289 parentlock=parentlock)
1284 except error.LockHeld as inst:
1290 except error.LockHeld as inst:
1285 if not wait:
1291 if not wait:
1286 raise
1292 raise
1287 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1293 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1288 (desc, inst.locker))
1294 (desc, inst.locker))
1289 # default to 600 seconds timeout
1295 # default to 600 seconds timeout
1290 l = lockmod.lock(vfs, lockname,
1296 l = lockmod.lock(vfs, lockname,
1291 int(self.ui.config("ui", "timeout", "600")),
1297 int(self.ui.config("ui", "timeout", "600")),
1292 releasefn=releasefn, acquirefn=acquirefn,
1298 releasefn=releasefn, acquirefn=acquirefn,
1293 desc=desc)
1299 desc=desc)
1294 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1300 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1295 return l
1301 return l
1296
1302
1297 def _afterlock(self, callback):
1303 def _afterlock(self, callback):
1298 """add a callback to be run when the repository is fully unlocked
1304 """add a callback to be run when the repository is fully unlocked
1299
1305
1300 The callback will be executed when the outermost lock is released
1306 The callback will be executed when the outermost lock is released
1301 (with wlock being higher level than 'lock')."""
1307 (with wlock being higher level than 'lock')."""
1302 for ref in (self._wlockref, self._lockref):
1308 for ref in (self._wlockref, self._lockref):
1303 l = ref and ref()
1309 l = ref and ref()
1304 if l and l.held:
1310 if l and l.held:
1305 l.postrelease.append(callback)
1311 l.postrelease.append(callback)
1306 break
1312 break
1307 else: # no lock have been found.
1313 else: # no lock have been found.
1308 callback()
1314 callback()
1309
1315
1310 def lock(self, wait=True):
1316 def lock(self, wait=True):
1311 '''Lock the repository store (.hg/store) and return a weak reference
1317 '''Lock the repository store (.hg/store) and return a weak reference
1312 to the lock. Use this before modifying the store (e.g. committing or
1318 to the lock. Use this before modifying the store (e.g. committing or
1313 stripping). If you are opening a transaction, get a lock as well.)
1319 stripping). If you are opening a transaction, get a lock as well.)
1314
1320
1315 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1321 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1316 'wlock' first to avoid a dead-lock hazard.'''
1322 'wlock' first to avoid a dead-lock hazard.'''
1317 l = self._lockref and self._lockref()
1323 l = self._lockref and self._lockref()
1318 if l is not None and l.held:
1324 if l is not None and l.held:
1319 l.lock()
1325 l.lock()
1320 return l
1326 return l
1321
1327
1322 l = self._lock(self.svfs, "lock", wait, None,
1328 l = self._lock(self.svfs, "lock", wait, None,
1323 self.invalidate, _('repository %s') % self.origroot)
1329 self.invalidate, _('repository %s') % self.origroot)
1324 self._lockref = weakref.ref(l)
1330 self._lockref = weakref.ref(l)
1325 return l
1331 return l
1326
1332
1327 def _wlockchecktransaction(self):
1333 def _wlockchecktransaction(self):
1328 if self.currenttransaction() is not None:
1334 if self.currenttransaction() is not None:
1329 raise error.LockInheritanceContractViolation(
1335 raise error.LockInheritanceContractViolation(
1330 'wlock cannot be inherited in the middle of a transaction')
1336 'wlock cannot be inherited in the middle of a transaction')
1331
1337
1332 def wlock(self, wait=True):
1338 def wlock(self, wait=True):
1333 '''Lock the non-store parts of the repository (everything under
1339 '''Lock the non-store parts of the repository (everything under
1334 .hg except .hg/store) and return a weak reference to the lock.
1340 .hg except .hg/store) and return a weak reference to the lock.
1335
1341
1336 Use this before modifying files in .hg.
1342 Use this before modifying files in .hg.
1337
1343
1338 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1344 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1339 'wlock' first to avoid a dead-lock hazard.'''
1345 'wlock' first to avoid a dead-lock hazard.'''
1340 l = self._wlockref and self._wlockref()
1346 l = self._wlockref and self._wlockref()
1341 if l is not None and l.held:
1347 if l is not None and l.held:
1342 l.lock()
1348 l.lock()
1343 return l
1349 return l
1344
1350
1345 # We do not need to check for non-waiting lock acquisition. Such
1351 # We do not need to check for non-waiting lock acquisition. Such
1346 # acquisition would not cause dead-lock as they would just fail.
1352 # acquisition would not cause dead-lock as they would just fail.
1347 if wait and (self.ui.configbool('devel', 'all-warnings')
1353 if wait and (self.ui.configbool('devel', 'all-warnings')
1348 or self.ui.configbool('devel', 'check-locks')):
1354 or self.ui.configbool('devel', 'check-locks')):
1349 l = self._lockref and self._lockref()
1355 l = self._lockref and self._lockref()
1350 if l is not None and l.held:
1356 if l is not None and l.held:
1351 self.ui.develwarn('"wlock" acquired after "lock"')
1357 self.ui.develwarn('"wlock" acquired after "lock"')
1352
1358
1353 def unlock():
1359 def unlock():
1354 if self.dirstate.pendingparentchange():
1360 if self.dirstate.pendingparentchange():
1355 self.dirstate.invalidate()
1361 self.dirstate.invalidate()
1356 else:
1362 else:
1357 self.dirstate.write(None)
1363 self.dirstate.write(None)
1358
1364
1359 self._filecache['dirstate'].refresh()
1365 self._filecache['dirstate'].refresh()
1360
1366
1361 l = self._lock(self.vfs, "wlock", wait, unlock,
1367 l = self._lock(self.vfs, "wlock", wait, unlock,
1362 self.invalidatedirstate, _('working directory of %s') %
1368 self.invalidatedirstate, _('working directory of %s') %
1363 self.origroot,
1369 self.origroot,
1364 inheritchecker=self._wlockchecktransaction,
1370 inheritchecker=self._wlockchecktransaction,
1365 parentenvvar='HG_WLOCK_LOCKER')
1371 parentenvvar='HG_WLOCK_LOCKER')
1366 self._wlockref = weakref.ref(l)
1372 self._wlockref = weakref.ref(l)
1367 return l
1373 return l
1368
1374
1369 def _currentlock(self, lockref):
1375 def _currentlock(self, lockref):
1370 """Returns the lock if it's held, or None if it's not."""
1376 """Returns the lock if it's held, or None if it's not."""
1371 if lockref is None:
1377 if lockref is None:
1372 return None
1378 return None
1373 l = lockref()
1379 l = lockref()
1374 if l is None or not l.held:
1380 if l is None or not l.held:
1375 return None
1381 return None
1376 return l
1382 return l
1377
1383
1378 def currentwlock(self):
1384 def currentwlock(self):
1379 """Returns the wlock if it's held, or None if it's not."""
1385 """Returns the wlock if it's held, or None if it's not."""
1380 return self._currentlock(self._wlockref)
1386 return self._currentlock(self._wlockref)
1381
1387
1382 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1388 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1383 """
1389 """
1384 commit an individual file as part of a larger transaction
1390 commit an individual file as part of a larger transaction
1385 """
1391 """
1386
1392
1387 fname = fctx.path()
1393 fname = fctx.path()
1388 fparent1 = manifest1.get(fname, nullid)
1394 fparent1 = manifest1.get(fname, nullid)
1389 fparent2 = manifest2.get(fname, nullid)
1395 fparent2 = manifest2.get(fname, nullid)
1390 if isinstance(fctx, context.filectx):
1396 if isinstance(fctx, context.filectx):
1391 node = fctx.filenode()
1397 node = fctx.filenode()
1392 if node in [fparent1, fparent2]:
1398 if node in [fparent1, fparent2]:
1393 self.ui.debug('reusing %s filelog entry\n' % fname)
1399 self.ui.debug('reusing %s filelog entry\n' % fname)
1394 if manifest1.flags(fname) != fctx.flags():
1400 if manifest1.flags(fname) != fctx.flags():
1395 changelist.append(fname)
1401 changelist.append(fname)
1396 return node
1402 return node
1397
1403
1398 flog = self.file(fname)
1404 flog = self.file(fname)
1399 meta = {}
1405 meta = {}
1400 copy = fctx.renamed()
1406 copy = fctx.renamed()
1401 if copy and copy[0] != fname:
1407 if copy and copy[0] != fname:
1402 # Mark the new revision of this file as a copy of another
1408 # Mark the new revision of this file as a copy of another
1403 # file. This copy data will effectively act as a parent
1409 # file. This copy data will effectively act as a parent
1404 # of this new revision. If this is a merge, the first
1410 # of this new revision. If this is a merge, the first
1405 # parent will be the nullid (meaning "look up the copy data")
1411 # parent will be the nullid (meaning "look up the copy data")
1406 # and the second one will be the other parent. For example:
1412 # and the second one will be the other parent. For example:
1407 #
1413 #
1408 # 0 --- 1 --- 3 rev1 changes file foo
1414 # 0 --- 1 --- 3 rev1 changes file foo
1409 # \ / rev2 renames foo to bar and changes it
1415 # \ / rev2 renames foo to bar and changes it
1410 # \- 2 -/ rev3 should have bar with all changes and
1416 # \- 2 -/ rev3 should have bar with all changes and
1411 # should record that bar descends from
1417 # should record that bar descends from
1412 # bar in rev2 and foo in rev1
1418 # bar in rev2 and foo in rev1
1413 #
1419 #
1414 # this allows this merge to succeed:
1420 # this allows this merge to succeed:
1415 #
1421 #
1416 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1422 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1417 # \ / merging rev3 and rev4 should use bar@rev2
1423 # \ / merging rev3 and rev4 should use bar@rev2
1418 # \- 2 --- 4 as the merge base
1424 # \- 2 --- 4 as the merge base
1419 #
1425 #
1420
1426
1421 cfname = copy[0]
1427 cfname = copy[0]
1422 crev = manifest1.get(cfname)
1428 crev = manifest1.get(cfname)
1423 newfparent = fparent2
1429 newfparent = fparent2
1424
1430
1425 if manifest2: # branch merge
1431 if manifest2: # branch merge
1426 if fparent2 == nullid or crev is None: # copied on remote side
1432 if fparent2 == nullid or crev is None: # copied on remote side
1427 if cfname in manifest2:
1433 if cfname in manifest2:
1428 crev = manifest2[cfname]
1434 crev = manifest2[cfname]
1429 newfparent = fparent1
1435 newfparent = fparent1
1430
1436
1431 # Here, we used to search backwards through history to try to find
1437 # Here, we used to search backwards through history to try to find
1432 # where the file copy came from if the source of a copy was not in
1438 # where the file copy came from if the source of a copy was not in
1433 # the parent directory. However, this doesn't actually make sense to
1439 # the parent directory. However, this doesn't actually make sense to
1434 # do (what does a copy from something not in your working copy even
1440 # do (what does a copy from something not in your working copy even
1435 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1441 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1436 # the user that copy information was dropped, so if they didn't
1442 # the user that copy information was dropped, so if they didn't
1437 # expect this outcome it can be fixed, but this is the correct
1443 # expect this outcome it can be fixed, but this is the correct
1438 # behavior in this circumstance.
1444 # behavior in this circumstance.
1439
1445
1440 if crev:
1446 if crev:
1441 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1447 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1442 meta["copy"] = cfname
1448 meta["copy"] = cfname
1443 meta["copyrev"] = hex(crev)
1449 meta["copyrev"] = hex(crev)
1444 fparent1, fparent2 = nullid, newfparent
1450 fparent1, fparent2 = nullid, newfparent
1445 else:
1451 else:
1446 self.ui.warn(_("warning: can't find ancestor for '%s' "
1452 self.ui.warn(_("warning: can't find ancestor for '%s' "
1447 "copied from '%s'!\n") % (fname, cfname))
1453 "copied from '%s'!\n") % (fname, cfname))
1448
1454
1449 elif fparent1 == nullid:
1455 elif fparent1 == nullid:
1450 fparent1, fparent2 = fparent2, nullid
1456 fparent1, fparent2 = fparent2, nullid
1451 elif fparent2 != nullid:
1457 elif fparent2 != nullid:
1452 # is one parent an ancestor of the other?
1458 # is one parent an ancestor of the other?
1453 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1459 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1454 if fparent1 in fparentancestors:
1460 if fparent1 in fparentancestors:
1455 fparent1, fparent2 = fparent2, nullid
1461 fparent1, fparent2 = fparent2, nullid
1456 elif fparent2 in fparentancestors:
1462 elif fparent2 in fparentancestors:
1457 fparent2 = nullid
1463 fparent2 = nullid
1458
1464
1459 # is the file changed?
1465 # is the file changed?
1460 text = fctx.data()
1466 text = fctx.data()
1461 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1467 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1462 changelist.append(fname)
1468 changelist.append(fname)
1463 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1469 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1464 # are just the flags changed during merge?
1470 # are just the flags changed during merge?
1465 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1471 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1466 changelist.append(fname)
1472 changelist.append(fname)
1467
1473
1468 return fparent1
1474 return fparent1
1469
1475
1470 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1476 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1471 """check for commit arguments that aren't commitable"""
1477 """check for commit arguments that aren't commitable"""
1472 if match.isexact() or match.prefix():
1478 if match.isexact() or match.prefix():
1473 matched = set(status.modified + status.added + status.removed)
1479 matched = set(status.modified + status.added + status.removed)
1474
1480
1475 for f in match.files():
1481 for f in match.files():
1476 f = self.dirstate.normalize(f)
1482 f = self.dirstate.normalize(f)
1477 if f == '.' or f in matched or f in wctx.substate:
1483 if f == '.' or f in matched or f in wctx.substate:
1478 continue
1484 continue
1479 if f in status.deleted:
1485 if f in status.deleted:
1480 fail(f, _('file not found!'))
1486 fail(f, _('file not found!'))
1481 if f in vdirs: # visited directory
1487 if f in vdirs: # visited directory
1482 d = f + '/'
1488 d = f + '/'
1483 for mf in matched:
1489 for mf in matched:
1484 if mf.startswith(d):
1490 if mf.startswith(d):
1485 break
1491 break
1486 else:
1492 else:
1487 fail(f, _("no match under directory!"))
1493 fail(f, _("no match under directory!"))
1488 elif f not in self.dirstate:
1494 elif f not in self.dirstate:
1489 fail(f, _("file not tracked!"))
1495 fail(f, _("file not tracked!"))
1490
1496
1491 @unfilteredmethod
1497 @unfilteredmethod
1492 def commit(self, text="", user=None, date=None, match=None, force=False,
1498 def commit(self, text="", user=None, date=None, match=None, force=False,
1493 editor=False, extra=None):
1499 editor=False, extra=None):
1494 """Add a new revision to current repository.
1500 """Add a new revision to current repository.
1495
1501
1496 Revision information is gathered from the working directory,
1502 Revision information is gathered from the working directory,
1497 match can be used to filter the committed files. If editor is
1503 match can be used to filter the committed files. If editor is
1498 supplied, it is called to get a commit message.
1504 supplied, it is called to get a commit message.
1499 """
1505 """
1500 if extra is None:
1506 if extra is None:
1501 extra = {}
1507 extra = {}
1502
1508
1503 def fail(f, msg):
1509 def fail(f, msg):
1504 raise error.Abort('%s: %s' % (f, msg))
1510 raise error.Abort('%s: %s' % (f, msg))
1505
1511
1506 if not match:
1512 if not match:
1507 match = matchmod.always(self.root, '')
1513 match = matchmod.always(self.root, '')
1508
1514
1509 if not force:
1515 if not force:
1510 vdirs = []
1516 vdirs = []
1511 match.explicitdir = vdirs.append
1517 match.explicitdir = vdirs.append
1512 match.bad = fail
1518 match.bad = fail
1513
1519
1514 wlock = lock = tr = None
1520 wlock = lock = tr = None
1515 try:
1521 try:
1516 wlock = self.wlock()
1522 wlock = self.wlock()
1517 lock = self.lock() # for recent changelog (see issue4368)
1523 lock = self.lock() # for recent changelog (see issue4368)
1518
1524
1519 wctx = self[None]
1525 wctx = self[None]
1520 merge = len(wctx.parents()) > 1
1526 merge = len(wctx.parents()) > 1
1521
1527
1522 if not force and merge and match.ispartial():
1528 if not force and merge and match.ispartial():
1523 raise error.Abort(_('cannot partially commit a merge '
1529 raise error.Abort(_('cannot partially commit a merge '
1524 '(do not specify files or patterns)'))
1530 '(do not specify files or patterns)'))
1525
1531
1526 status = self.status(match=match, clean=force)
1532 status = self.status(match=match, clean=force)
1527 if force:
1533 if force:
1528 status.modified.extend(status.clean) # mq may commit clean files
1534 status.modified.extend(status.clean) # mq may commit clean files
1529
1535
1530 # check subrepos
1536 # check subrepos
1531 subs = []
1537 subs = []
1532 commitsubs = set()
1538 commitsubs = set()
1533 newstate = wctx.substate.copy()
1539 newstate = wctx.substate.copy()
1534 # only manage subrepos and .hgsubstate if .hgsub is present
1540 # only manage subrepos and .hgsubstate if .hgsub is present
1535 if '.hgsub' in wctx:
1541 if '.hgsub' in wctx:
1536 # we'll decide whether to track this ourselves, thanks
1542 # we'll decide whether to track this ourselves, thanks
1537 for c in status.modified, status.added, status.removed:
1543 for c in status.modified, status.added, status.removed:
1538 if '.hgsubstate' in c:
1544 if '.hgsubstate' in c:
1539 c.remove('.hgsubstate')
1545 c.remove('.hgsubstate')
1540
1546
1541 # compare current state to last committed state
1547 # compare current state to last committed state
1542 # build new substate based on last committed state
1548 # build new substate based on last committed state
1543 oldstate = wctx.p1().substate
1549 oldstate = wctx.p1().substate
1544 for s in sorted(newstate.keys()):
1550 for s in sorted(newstate.keys()):
1545 if not match(s):
1551 if not match(s):
1546 # ignore working copy, use old state if present
1552 # ignore working copy, use old state if present
1547 if s in oldstate:
1553 if s in oldstate:
1548 newstate[s] = oldstate[s]
1554 newstate[s] = oldstate[s]
1549 continue
1555 continue
1550 if not force:
1556 if not force:
1551 raise error.Abort(
1557 raise error.Abort(
1552 _("commit with new subrepo %s excluded") % s)
1558 _("commit with new subrepo %s excluded") % s)
1553 dirtyreason = wctx.sub(s).dirtyreason(True)
1559 dirtyreason = wctx.sub(s).dirtyreason(True)
1554 if dirtyreason:
1560 if dirtyreason:
1555 if not self.ui.configbool('ui', 'commitsubrepos'):
1561 if not self.ui.configbool('ui', 'commitsubrepos'):
1556 raise error.Abort(dirtyreason,
1562 raise error.Abort(dirtyreason,
1557 hint=_("use --subrepos for recursive commit"))
1563 hint=_("use --subrepos for recursive commit"))
1558 subs.append(s)
1564 subs.append(s)
1559 commitsubs.add(s)
1565 commitsubs.add(s)
1560 else:
1566 else:
1561 bs = wctx.sub(s).basestate()
1567 bs = wctx.sub(s).basestate()
1562 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1568 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1563 if oldstate.get(s, (None, None, None))[1] != bs:
1569 if oldstate.get(s, (None, None, None))[1] != bs:
1564 subs.append(s)
1570 subs.append(s)
1565
1571
1566 # check for removed subrepos
1572 # check for removed subrepos
1567 for p in wctx.parents():
1573 for p in wctx.parents():
1568 r = [s for s in p.substate if s not in newstate]
1574 r = [s for s in p.substate if s not in newstate]
1569 subs += [s for s in r if match(s)]
1575 subs += [s for s in r if match(s)]
1570 if subs:
1576 if subs:
1571 if (not match('.hgsub') and
1577 if (not match('.hgsub') and
1572 '.hgsub' in (wctx.modified() + wctx.added())):
1578 '.hgsub' in (wctx.modified() + wctx.added())):
1573 raise error.Abort(
1579 raise error.Abort(
1574 _("can't commit subrepos without .hgsub"))
1580 _("can't commit subrepos without .hgsub"))
1575 status.modified.insert(0, '.hgsubstate')
1581 status.modified.insert(0, '.hgsubstate')
1576
1582
1577 elif '.hgsub' in status.removed:
1583 elif '.hgsub' in status.removed:
1578 # clean up .hgsubstate when .hgsub is removed
1584 # clean up .hgsubstate when .hgsub is removed
1579 if ('.hgsubstate' in wctx and
1585 if ('.hgsubstate' in wctx and
1580 '.hgsubstate' not in (status.modified + status.added +
1586 '.hgsubstate' not in (status.modified + status.added +
1581 status.removed)):
1587 status.removed)):
1582 status.removed.insert(0, '.hgsubstate')
1588 status.removed.insert(0, '.hgsubstate')
1583
1589
1584 # make sure all explicit patterns are matched
1590 # make sure all explicit patterns are matched
1585 if not force:
1591 if not force:
1586 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1592 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1587
1593
1588 cctx = context.workingcommitctx(self, status,
1594 cctx = context.workingcommitctx(self, status,
1589 text, user, date, extra)
1595 text, user, date, extra)
1590
1596
1591 # internal config: ui.allowemptycommit
1597 # internal config: ui.allowemptycommit
1592 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1598 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1593 or extra.get('close') or merge or cctx.files()
1599 or extra.get('close') or merge or cctx.files()
1594 or self.ui.configbool('ui', 'allowemptycommit'))
1600 or self.ui.configbool('ui', 'allowemptycommit'))
1595 if not allowemptycommit:
1601 if not allowemptycommit:
1596 return None
1602 return None
1597
1603
1598 if merge and cctx.deleted():
1604 if merge and cctx.deleted():
1599 raise error.Abort(_("cannot commit merge with missing files"))
1605 raise error.Abort(_("cannot commit merge with missing files"))
1600
1606
1601 ms = mergemod.mergestate.read(self)
1607 ms = mergemod.mergestate.read(self)
1602
1608
1603 if list(ms.unresolved()):
1609 if list(ms.unresolved()):
1604 raise error.Abort(_('unresolved merge conflicts '
1610 raise error.Abort(_('unresolved merge conflicts '
1605 '(see "hg help resolve")'))
1611 '(see "hg help resolve")'))
1606 if ms.mdstate() != 's' or list(ms.driverresolved()):
1612 if ms.mdstate() != 's' or list(ms.driverresolved()):
1607 raise error.Abort(_('driver-resolved merge conflicts'),
1613 raise error.Abort(_('driver-resolved merge conflicts'),
1608 hint=_('run "hg resolve --all" to resolve'))
1614 hint=_('run "hg resolve --all" to resolve'))
1609
1615
1610 if editor:
1616 if editor:
1611 cctx._text = editor(self, cctx, subs)
1617 cctx._text = editor(self, cctx, subs)
1612 edited = (text != cctx._text)
1618 edited = (text != cctx._text)
1613
1619
1614 # Save commit message in case this transaction gets rolled back
1620 # Save commit message in case this transaction gets rolled back
1615 # (e.g. by a pretxncommit hook). Leave the content alone on
1621 # (e.g. by a pretxncommit hook). Leave the content alone on
1616 # the assumption that the user will use the same editor again.
1622 # the assumption that the user will use the same editor again.
1617 msgfn = self.savecommitmessage(cctx._text)
1623 msgfn = self.savecommitmessage(cctx._text)
1618
1624
1619 # commit subs and write new state
1625 # commit subs and write new state
1620 if subs:
1626 if subs:
1621 for s in sorted(commitsubs):
1627 for s in sorted(commitsubs):
1622 sub = wctx.sub(s)
1628 sub = wctx.sub(s)
1623 self.ui.status(_('committing subrepository %s\n') %
1629 self.ui.status(_('committing subrepository %s\n') %
1624 subrepo.subrelpath(sub))
1630 subrepo.subrelpath(sub))
1625 sr = sub.commit(cctx._text, user, date)
1631 sr = sub.commit(cctx._text, user, date)
1626 newstate[s] = (newstate[s][0], sr)
1632 newstate[s] = (newstate[s][0], sr)
1627 subrepo.writestate(self, newstate)
1633 subrepo.writestate(self, newstate)
1628
1634
1629 p1, p2 = self.dirstate.parents()
1635 p1, p2 = self.dirstate.parents()
1630 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1636 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1631 try:
1637 try:
1632 self.hook("precommit", throw=True, parent1=hookp1,
1638 self.hook("precommit", throw=True, parent1=hookp1,
1633 parent2=hookp2)
1639 parent2=hookp2)
1634 tr = self.transaction('commit')
1640 tr = self.transaction('commit')
1635 ret = self.commitctx(cctx, True)
1641 ret = self.commitctx(cctx, True)
1636 except: # re-raises
1642 except: # re-raises
1637 if edited:
1643 if edited:
1638 self.ui.write(
1644 self.ui.write(
1639 _('note: commit message saved in %s\n') % msgfn)
1645 _('note: commit message saved in %s\n') % msgfn)
1640 raise
1646 raise
1641 # update bookmarks, dirstate and mergestate
1647 # update bookmarks, dirstate and mergestate
1642 bookmarks.update(self, [p1, p2], ret)
1648 bookmarks.update(self, [p1, p2], ret)
1643 cctx.markcommitted(ret)
1649 cctx.markcommitted(ret)
1644 ms.reset()
1650 ms.reset()
1645 tr.close()
1651 tr.close()
1646
1652
1647 finally:
1653 finally:
1648 lockmod.release(tr, lock, wlock)
1654 lockmod.release(tr, lock, wlock)
1649
1655
1650 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1656 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1651 # hack for command that use a temporary commit (eg: histedit)
1657 # hack for command that use a temporary commit (eg: histedit)
1652 # temporary commit got stripped before hook release
1658 # temporary commit got stripped before hook release
1653 if self.changelog.hasnode(ret):
1659 if self.changelog.hasnode(ret):
1654 self.hook("commit", node=node, parent1=parent1,
1660 self.hook("commit", node=node, parent1=parent1,
1655 parent2=parent2)
1661 parent2=parent2)
1656 self._afterlock(commithook)
1662 self._afterlock(commithook)
1657 return ret
1663 return ret
1658
1664
1659 @unfilteredmethod
1665 @unfilteredmethod
1660 def commitctx(self, ctx, error=False):
1666 def commitctx(self, ctx, error=False):
1661 """Add a new revision to current repository.
1667 """Add a new revision to current repository.
1662 Revision information is passed via the context argument.
1668 Revision information is passed via the context argument.
1663 """
1669 """
1664
1670
1665 tr = None
1671 tr = None
1666 p1, p2 = ctx.p1(), ctx.p2()
1672 p1, p2 = ctx.p1(), ctx.p2()
1667 user = ctx.user()
1673 user = ctx.user()
1668
1674
1669 lock = self.lock()
1675 lock = self.lock()
1670 try:
1676 try:
1671 tr = self.transaction("commit")
1677 tr = self.transaction("commit")
1672 trp = weakref.proxy(tr)
1678 trp = weakref.proxy(tr)
1673
1679
1674 if ctx.files():
1680 if ctx.files():
1675 m1 = p1.manifest()
1681 m1 = p1.manifest()
1676 m2 = p2.manifest()
1682 m2 = p2.manifest()
1677 m = m1.copy()
1683 m = m1.copy()
1678
1684
1679 # check in files
1685 # check in files
1680 added = []
1686 added = []
1681 changed = []
1687 changed = []
1682 removed = list(ctx.removed())
1688 removed = list(ctx.removed())
1683 linkrev = len(self)
1689 linkrev = len(self)
1684 self.ui.note(_("committing files:\n"))
1690 self.ui.note(_("committing files:\n"))
1685 for f in sorted(ctx.modified() + ctx.added()):
1691 for f in sorted(ctx.modified() + ctx.added()):
1686 self.ui.note(f + "\n")
1692 self.ui.note(f + "\n")
1687 try:
1693 try:
1688 fctx = ctx[f]
1694 fctx = ctx[f]
1689 if fctx is None:
1695 if fctx is None:
1690 removed.append(f)
1696 removed.append(f)
1691 else:
1697 else:
1692 added.append(f)
1698 added.append(f)
1693 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1699 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1694 trp, changed)
1700 trp, changed)
1695 m.setflag(f, fctx.flags())
1701 m.setflag(f, fctx.flags())
1696 except OSError as inst:
1702 except OSError as inst:
1697 self.ui.warn(_("trouble committing %s!\n") % f)
1703 self.ui.warn(_("trouble committing %s!\n") % f)
1698 raise
1704 raise
1699 except IOError as inst:
1705 except IOError as inst:
1700 errcode = getattr(inst, 'errno', errno.ENOENT)
1706 errcode = getattr(inst, 'errno', errno.ENOENT)
1701 if error or errcode and errcode != errno.ENOENT:
1707 if error or errcode and errcode != errno.ENOENT:
1702 self.ui.warn(_("trouble committing %s!\n") % f)
1708 self.ui.warn(_("trouble committing %s!\n") % f)
1703 raise
1709 raise
1704
1710
1705 # update manifest
1711 # update manifest
1706 self.ui.note(_("committing manifest\n"))
1712 self.ui.note(_("committing manifest\n"))
1707 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1713 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1708 drop = [f for f in removed if f in m]
1714 drop = [f for f in removed if f in m]
1709 for f in drop:
1715 for f in drop:
1710 del m[f]
1716 del m[f]
1711 mn = self.manifest.add(m, trp, linkrev,
1717 mn = self.manifest.add(m, trp, linkrev,
1712 p1.manifestnode(), p2.manifestnode(),
1718 p1.manifestnode(), p2.manifestnode(),
1713 added, drop)
1719 added, drop)
1714 files = changed + removed
1720 files = changed + removed
1715 else:
1721 else:
1716 mn = p1.manifestnode()
1722 mn = p1.manifestnode()
1717 files = []
1723 files = []
1718
1724
1719 # update changelog
1725 # update changelog
1720 self.ui.note(_("committing changelog\n"))
1726 self.ui.note(_("committing changelog\n"))
1721 self.changelog.delayupdate(tr)
1727 self.changelog.delayupdate(tr)
1722 n = self.changelog.add(mn, files, ctx.description(),
1728 n = self.changelog.add(mn, files, ctx.description(),
1723 trp, p1.node(), p2.node(),
1729 trp, p1.node(), p2.node(),
1724 user, ctx.date(), ctx.extra().copy())
1730 user, ctx.date(), ctx.extra().copy())
1725 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1731 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1726 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1732 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1727 parent2=xp2)
1733 parent2=xp2)
1728 # set the new commit is proper phase
1734 # set the new commit is proper phase
1729 targetphase = subrepo.newcommitphase(self.ui, ctx)
1735 targetphase = subrepo.newcommitphase(self.ui, ctx)
1730 if targetphase:
1736 if targetphase:
1731 # retract boundary do not alter parent changeset.
1737 # retract boundary do not alter parent changeset.
1732 # if a parent have higher the resulting phase will
1738 # if a parent have higher the resulting phase will
1733 # be compliant anyway
1739 # be compliant anyway
1734 #
1740 #
1735 # if minimal phase was 0 we don't need to retract anything
1741 # if minimal phase was 0 we don't need to retract anything
1736 phases.retractboundary(self, tr, targetphase, [n])
1742 phases.retractboundary(self, tr, targetphase, [n])
1737 tr.close()
1743 tr.close()
1738 branchmap.updatecache(self.filtered('served'))
1744 branchmap.updatecache(self.filtered('served'))
1739 return n
1745 return n
1740 finally:
1746 finally:
1741 if tr:
1747 if tr:
1742 tr.release()
1748 tr.release()
1743 lock.release()
1749 lock.release()
1744
1750
1745 @unfilteredmethod
1751 @unfilteredmethod
1746 def destroying(self):
1752 def destroying(self):
1747 '''Inform the repository that nodes are about to be destroyed.
1753 '''Inform the repository that nodes are about to be destroyed.
1748 Intended for use by strip and rollback, so there's a common
1754 Intended for use by strip and rollback, so there's a common
1749 place for anything that has to be done before destroying history.
1755 place for anything that has to be done before destroying history.
1750
1756
1751 This is mostly useful for saving state that is in memory and waiting
1757 This is mostly useful for saving state that is in memory and waiting
1752 to be flushed when the current lock is released. Because a call to
1758 to be flushed when the current lock is released. Because a call to
1753 destroyed is imminent, the repo will be invalidated causing those
1759 destroyed is imminent, the repo will be invalidated causing those
1754 changes to stay in memory (waiting for the next unlock), or vanish
1760 changes to stay in memory (waiting for the next unlock), or vanish
1755 completely.
1761 completely.
1756 '''
1762 '''
1757 # When using the same lock to commit and strip, the phasecache is left
1763 # When using the same lock to commit and strip, the phasecache is left
1758 # dirty after committing. Then when we strip, the repo is invalidated,
1764 # dirty after committing. Then when we strip, the repo is invalidated,
1759 # causing those changes to disappear.
1765 # causing those changes to disappear.
1760 if '_phasecache' in vars(self):
1766 if '_phasecache' in vars(self):
1761 self._phasecache.write()
1767 self._phasecache.write()
1762
1768
1763 @unfilteredmethod
1769 @unfilteredmethod
1764 def destroyed(self):
1770 def destroyed(self):
1765 '''Inform the repository that nodes have been destroyed.
1771 '''Inform the repository that nodes have been destroyed.
1766 Intended for use by strip and rollback, so there's a common
1772 Intended for use by strip and rollback, so there's a common
1767 place for anything that has to be done after destroying history.
1773 place for anything that has to be done after destroying history.
1768 '''
1774 '''
1769 # When one tries to:
1775 # When one tries to:
1770 # 1) destroy nodes thus calling this method (e.g. strip)
1776 # 1) destroy nodes thus calling this method (e.g. strip)
1771 # 2) use phasecache somewhere (e.g. commit)
1777 # 2) use phasecache somewhere (e.g. commit)
1772 #
1778 #
1773 # then 2) will fail because the phasecache contains nodes that were
1779 # then 2) will fail because the phasecache contains nodes that were
1774 # removed. We can either remove phasecache from the filecache,
1780 # removed. We can either remove phasecache from the filecache,
1775 # causing it to reload next time it is accessed, or simply filter
1781 # causing it to reload next time it is accessed, or simply filter
1776 # the removed nodes now and write the updated cache.
1782 # the removed nodes now and write the updated cache.
1777 self._phasecache.filterunknown(self)
1783 self._phasecache.filterunknown(self)
1778 self._phasecache.write()
1784 self._phasecache.write()
1779
1785
1780 # update the 'served' branch cache to help read only server process
1786 # update the 'served' branch cache to help read only server process
1781 # Thanks to branchcache collaboration this is done from the nearest
1787 # Thanks to branchcache collaboration this is done from the nearest
1782 # filtered subset and it is expected to be fast.
1788 # filtered subset and it is expected to be fast.
1783 branchmap.updatecache(self.filtered('served'))
1789 branchmap.updatecache(self.filtered('served'))
1784
1790
1785 # Ensure the persistent tag cache is updated. Doing it now
1791 # Ensure the persistent tag cache is updated. Doing it now
1786 # means that the tag cache only has to worry about destroyed
1792 # means that the tag cache only has to worry about destroyed
1787 # heads immediately after a strip/rollback. That in turn
1793 # heads immediately after a strip/rollback. That in turn
1788 # guarantees that "cachetip == currenttip" (comparing both rev
1794 # guarantees that "cachetip == currenttip" (comparing both rev
1789 # and node) always means no nodes have been added or destroyed.
1795 # and node) always means no nodes have been added or destroyed.
1790
1796
1791 # XXX this is suboptimal when qrefresh'ing: we strip the current
1797 # XXX this is suboptimal when qrefresh'ing: we strip the current
1792 # head, refresh the tag cache, then immediately add a new head.
1798 # head, refresh the tag cache, then immediately add a new head.
1793 # But I think doing it this way is necessary for the "instant
1799 # But I think doing it this way is necessary for the "instant
1794 # tag cache retrieval" case to work.
1800 # tag cache retrieval" case to work.
1795 self.invalidate()
1801 self.invalidate()
1796
1802
1797 def walk(self, match, node=None):
1803 def walk(self, match, node=None):
1798 '''
1804 '''
1799 walk recursively through the directory tree or a given
1805 walk recursively through the directory tree or a given
1800 changeset, finding all files matched by the match
1806 changeset, finding all files matched by the match
1801 function
1807 function
1802 '''
1808 '''
1803 return self[node].walk(match)
1809 return self[node].walk(match)
1804
1810
1805 def status(self, node1='.', node2=None, match=None,
1811 def status(self, node1='.', node2=None, match=None,
1806 ignored=False, clean=False, unknown=False,
1812 ignored=False, clean=False, unknown=False,
1807 listsubrepos=False):
1813 listsubrepos=False):
1808 '''a convenience method that calls node1.status(node2)'''
1814 '''a convenience method that calls node1.status(node2)'''
1809 return self[node1].status(node2, match, ignored, clean, unknown,
1815 return self[node1].status(node2, match, ignored, clean, unknown,
1810 listsubrepos)
1816 listsubrepos)
1811
1817
1812 def heads(self, start=None):
1818 def heads(self, start=None):
1813 heads = self.changelog.heads(start)
1819 heads = self.changelog.heads(start)
1814 # sort the output in rev descending order
1820 # sort the output in rev descending order
1815 return sorted(heads, key=self.changelog.rev, reverse=True)
1821 return sorted(heads, key=self.changelog.rev, reverse=True)
1816
1822
1817 def branchheads(self, branch=None, start=None, closed=False):
1823 def branchheads(self, branch=None, start=None, closed=False):
1818 '''return a (possibly filtered) list of heads for the given branch
1824 '''return a (possibly filtered) list of heads for the given branch
1819
1825
1820 Heads are returned in topological order, from newest to oldest.
1826 Heads are returned in topological order, from newest to oldest.
1821 If branch is None, use the dirstate branch.
1827 If branch is None, use the dirstate branch.
1822 If start is not None, return only heads reachable from start.
1828 If start is not None, return only heads reachable from start.
1823 If closed is True, return heads that are marked as closed as well.
1829 If closed is True, return heads that are marked as closed as well.
1824 '''
1830 '''
1825 if branch is None:
1831 if branch is None:
1826 branch = self[None].branch()
1832 branch = self[None].branch()
1827 branches = self.branchmap()
1833 branches = self.branchmap()
1828 if branch not in branches:
1834 if branch not in branches:
1829 return []
1835 return []
1830 # the cache returns heads ordered lowest to highest
1836 # the cache returns heads ordered lowest to highest
1831 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1837 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1832 if start is not None:
1838 if start is not None:
1833 # filter out the heads that cannot be reached from startrev
1839 # filter out the heads that cannot be reached from startrev
1834 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1840 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1835 bheads = [h for h in bheads if h in fbheads]
1841 bheads = [h for h in bheads if h in fbheads]
1836 return bheads
1842 return bheads
1837
1843
1838 def branches(self, nodes):
1844 def branches(self, nodes):
1839 if not nodes:
1845 if not nodes:
1840 nodes = [self.changelog.tip()]
1846 nodes = [self.changelog.tip()]
1841 b = []
1847 b = []
1842 for n in nodes:
1848 for n in nodes:
1843 t = n
1849 t = n
1844 while True:
1850 while True:
1845 p = self.changelog.parents(n)
1851 p = self.changelog.parents(n)
1846 if p[1] != nullid or p[0] == nullid:
1852 if p[1] != nullid or p[0] == nullid:
1847 b.append((t, n, p[0], p[1]))
1853 b.append((t, n, p[0], p[1]))
1848 break
1854 break
1849 n = p[0]
1855 n = p[0]
1850 return b
1856 return b
1851
1857
1852 def between(self, pairs):
1858 def between(self, pairs):
1853 r = []
1859 r = []
1854
1860
1855 for top, bottom in pairs:
1861 for top, bottom in pairs:
1856 n, l, i = top, [], 0
1862 n, l, i = top, [], 0
1857 f = 1
1863 f = 1
1858
1864
1859 while n != bottom and n != nullid:
1865 while n != bottom and n != nullid:
1860 p = self.changelog.parents(n)[0]
1866 p = self.changelog.parents(n)[0]
1861 if i == f:
1867 if i == f:
1862 l.append(n)
1868 l.append(n)
1863 f = f * 2
1869 f = f * 2
1864 n = p
1870 n = p
1865 i += 1
1871 i += 1
1866
1872
1867 r.append(l)
1873 r.append(l)
1868
1874
1869 return r
1875 return r
1870
1876
1871 def checkpush(self, pushop):
1877 def checkpush(self, pushop):
1872 """Extensions can override this function if additional checks have
1878 """Extensions can override this function if additional checks have
1873 to be performed before pushing, or call it if they override push
1879 to be performed before pushing, or call it if they override push
1874 command.
1880 command.
1875 """
1881 """
1876 pass
1882 pass
1877
1883
1878 @unfilteredpropertycache
1884 @unfilteredpropertycache
1879 def prepushoutgoinghooks(self):
1885 def prepushoutgoinghooks(self):
1880 """Return util.hooks consists of a pushop with repo, remote, outgoing
1886 """Return util.hooks consists of a pushop with repo, remote, outgoing
1881 methods, which are called before pushing changesets.
1887 methods, which are called before pushing changesets.
1882 """
1888 """
1883 return util.hooks()
1889 return util.hooks()
1884
1890
1885 def pushkey(self, namespace, key, old, new):
1891 def pushkey(self, namespace, key, old, new):
1886 try:
1892 try:
1887 tr = self.currenttransaction()
1893 tr = self.currenttransaction()
1888 hookargs = {}
1894 hookargs = {}
1889 if tr is not None:
1895 if tr is not None:
1890 hookargs.update(tr.hookargs)
1896 hookargs.update(tr.hookargs)
1891 hookargs['namespace'] = namespace
1897 hookargs['namespace'] = namespace
1892 hookargs['key'] = key
1898 hookargs['key'] = key
1893 hookargs['old'] = old
1899 hookargs['old'] = old
1894 hookargs['new'] = new
1900 hookargs['new'] = new
1895 self.hook('prepushkey', throw=True, **hookargs)
1901 self.hook('prepushkey', throw=True, **hookargs)
1896 except error.HookAbort as exc:
1902 except error.HookAbort as exc:
1897 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1903 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1898 if exc.hint:
1904 if exc.hint:
1899 self.ui.write_err(_("(%s)\n") % exc.hint)
1905 self.ui.write_err(_("(%s)\n") % exc.hint)
1900 return False
1906 return False
1901 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1907 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1902 ret = pushkey.push(self, namespace, key, old, new)
1908 ret = pushkey.push(self, namespace, key, old, new)
1903 def runhook():
1909 def runhook():
1904 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1910 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1905 ret=ret)
1911 ret=ret)
1906 self._afterlock(runhook)
1912 self._afterlock(runhook)
1907 return ret
1913 return ret
1908
1914
1909 def listkeys(self, namespace):
1915 def listkeys(self, namespace):
1910 self.hook('prelistkeys', throw=True, namespace=namespace)
1916 self.hook('prelistkeys', throw=True, namespace=namespace)
1911 self.ui.debug('listing keys for "%s"\n' % namespace)
1917 self.ui.debug('listing keys for "%s"\n' % namespace)
1912 values = pushkey.list(self, namespace)
1918 values = pushkey.list(self, namespace)
1913 self.hook('listkeys', namespace=namespace, values=values)
1919 self.hook('listkeys', namespace=namespace, values=values)
1914 return values
1920 return values
1915
1921
1916 def debugwireargs(self, one, two, three=None, four=None, five=None):
1922 def debugwireargs(self, one, two, three=None, four=None, five=None):
1917 '''used to test argument passing over the wire'''
1923 '''used to test argument passing over the wire'''
1918 return "%s %s %s %s %s" % (one, two, three, four, five)
1924 return "%s %s %s %s %s" % (one, two, three, four, five)
1919
1925
1920 def savecommitmessage(self, text):
1926 def savecommitmessage(self, text):
1921 fp = self.vfs('last-message.txt', 'wb')
1927 fp = self.vfs('last-message.txt', 'wb')
1922 try:
1928 try:
1923 fp.write(text)
1929 fp.write(text)
1924 finally:
1930 finally:
1925 fp.close()
1931 fp.close()
1926 return self.pathto(fp.name[len(self.root) + 1:])
1932 return self.pathto(fp.name[len(self.root) + 1:])
1927
1933
1928 # used to avoid circular references so destructors work
1934 # used to avoid circular references so destructors work
1929 def aftertrans(files):
1935 def aftertrans(files):
1930 renamefiles = [tuple(t) for t in files]
1936 renamefiles = [tuple(t) for t in files]
1931 def a():
1937 def a():
1932 for vfs, src, dest in renamefiles:
1938 for vfs, src, dest in renamefiles:
1933 try:
1939 try:
1934 vfs.rename(src, dest)
1940 vfs.rename(src, dest)
1935 except OSError: # journal file does not yet exist
1941 except OSError: # journal file does not yet exist
1936 pass
1942 pass
1937 return a
1943 return a
1938
1944
1939 def undoname(fn):
1945 def undoname(fn):
1940 base, name = os.path.split(fn)
1946 base, name = os.path.split(fn)
1941 assert name.startswith('journal')
1947 assert name.startswith('journal')
1942 return os.path.join(base, name.replace('journal', 'undo', 1))
1948 return os.path.join(base, name.replace('journal', 'undo', 1))
1943
1949
1944 def instance(ui, path, create):
1950 def instance(ui, path, create):
1945 return localrepository(ui, util.urllocalpath(path), create)
1951 return localrepository(ui, util.urllocalpath(path), create)
1946
1952
1947 def islocal(path):
1953 def islocal(path):
1948 return True
1954 return True
1949
1955
1950 def newreporequirements(repo):
1956 def newreporequirements(repo):
1951 """Determine the set of requirements for a new local repository.
1957 """Determine the set of requirements for a new local repository.
1952
1958
1953 Extensions can wrap this function to specify custom requirements for
1959 Extensions can wrap this function to specify custom requirements for
1954 new repositories.
1960 new repositories.
1955 """
1961 """
1956 ui = repo.ui
1962 ui = repo.ui
1957 requirements = set(['revlogv1'])
1963 requirements = set(['revlogv1'])
1958 if ui.configbool('format', 'usestore', True):
1964 if ui.configbool('format', 'usestore', True):
1959 requirements.add('store')
1965 requirements.add('store')
1960 if ui.configbool('format', 'usefncache', True):
1966 if ui.configbool('format', 'usefncache', True):
1961 requirements.add('fncache')
1967 requirements.add('fncache')
1962 if ui.configbool('format', 'dotencode', True):
1968 if ui.configbool('format', 'dotencode', True):
1963 requirements.add('dotencode')
1969 requirements.add('dotencode')
1964
1970
1965 if scmutil.gdinitconfig(ui):
1971 if scmutil.gdinitconfig(ui):
1966 requirements.add('generaldelta')
1972 requirements.add('generaldelta')
1967 if ui.configbool('experimental', 'treemanifest', False):
1973 if ui.configbool('experimental', 'treemanifest', False):
1968 requirements.add('treemanifest')
1974 requirements.add('treemanifest')
1969 if ui.configbool('experimental', 'manifestv2', False):
1975 if ui.configbool('experimental', 'manifestv2', False):
1970 requirements.add('manifestv2')
1976 requirements.add('manifestv2')
1971
1977
1972 return requirements
1978 return requirements
@@ -1,1404 +1,1423 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import glob
12 import glob
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import re
15 import re
16 import shutil
16 import shutil
17 import stat
17 import stat
18 import tempfile
18 import tempfile
19 import threading
19 import threading
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import wdirrev
22 from .node import wdirrev
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 osutil,
27 osutil,
28 pathutil,
28 pathutil,
29 phases,
29 phases,
30 revset,
30 revset,
31 similar,
31 similar,
32 util,
32 util,
33 )
33 )
34
34
35 if os.name == 'nt':
35 if os.name == 'nt':
36 from . import scmwindows as scmplatform
36 from . import scmwindows as scmplatform
37 else:
37 else:
38 from . import scmposix as scmplatform
38 from . import scmposix as scmplatform
39
39
40 systemrcpath = scmplatform.systemrcpath
40 systemrcpath = scmplatform.systemrcpath
41 userrcpath = scmplatform.userrcpath
41 userrcpath = scmplatform.userrcpath
42
42
43 class status(tuple):
43 class status(tuple):
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 and 'ignored' properties are only relevant to the working copy.
45 and 'ignored' properties are only relevant to the working copy.
46 '''
46 '''
47
47
48 __slots__ = ()
48 __slots__ = ()
49
49
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 clean):
51 clean):
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 ignored, clean))
53 ignored, clean))
54
54
55 @property
55 @property
56 def modified(self):
56 def modified(self):
57 '''files that have been modified'''
57 '''files that have been modified'''
58 return self[0]
58 return self[0]
59
59
60 @property
60 @property
61 def added(self):
61 def added(self):
62 '''files that have been added'''
62 '''files that have been added'''
63 return self[1]
63 return self[1]
64
64
65 @property
65 @property
66 def removed(self):
66 def removed(self):
67 '''files that have been removed'''
67 '''files that have been removed'''
68 return self[2]
68 return self[2]
69
69
70 @property
70 @property
71 def deleted(self):
71 def deleted(self):
72 '''files that are in the dirstate, but have been deleted from the
72 '''files that are in the dirstate, but have been deleted from the
73 working copy (aka "missing")
73 working copy (aka "missing")
74 '''
74 '''
75 return self[3]
75 return self[3]
76
76
77 @property
77 @property
78 def unknown(self):
78 def unknown(self):
79 '''files not in the dirstate that are not ignored'''
79 '''files not in the dirstate that are not ignored'''
80 return self[4]
80 return self[4]
81
81
82 @property
82 @property
83 def ignored(self):
83 def ignored(self):
84 '''files not in the dirstate that are ignored (by _dirignore())'''
84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 return self[5]
85 return self[5]
86
86
87 @property
87 @property
88 def clean(self):
88 def clean(self):
89 '''files that have not been modified'''
89 '''files that have not been modified'''
90 return self[6]
90 return self[6]
91
91
92 def __repr__(self, *args, **kwargs):
92 def __repr__(self, *args, **kwargs):
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 'unknown=%r, ignored=%r, clean=%r>') % self)
94 'unknown=%r, ignored=%r, clean=%r>') % self)
95
95
96 def itersubrepos(ctx1, ctx2):
96 def itersubrepos(ctx1, ctx2):
97 """find subrepos in ctx1 or ctx2"""
97 """find subrepos in ctx1 or ctx2"""
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # has been modified (in ctx2) but not yet committed (in ctx1).
100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103
103
104 missing = set()
104 missing = set()
105
105
106 for subpath in ctx2.substate:
106 for subpath in ctx2.substate:
107 if subpath not in ctx1.substate:
107 if subpath not in ctx1.substate:
108 del subpaths[subpath]
108 del subpaths[subpath]
109 missing.add(subpath)
109 missing.add(subpath)
110
110
111 for subpath, ctx in sorted(subpaths.iteritems()):
111 for subpath, ctx in sorted(subpaths.iteritems()):
112 yield subpath, ctx.sub(subpath)
112 yield subpath, ctx.sub(subpath)
113
113
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # status and diff will have an accurate result when it does
115 # status and diff will have an accurate result when it does
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # against itself.
117 # against itself.
118 for subpath in missing:
118 for subpath in missing:
119 yield subpath, ctx2.nullsub(subpath, ctx1)
119 yield subpath, ctx2.nullsub(subpath, ctx1)
120
120
121 def nochangesfound(ui, repo, excluded=None):
121 def nochangesfound(ui, repo, excluded=None):
122 '''Report no changes for push/pull, excluded is None or a list of
122 '''Report no changes for push/pull, excluded is None or a list of
123 nodes excluded from the push/pull.
123 nodes excluded from the push/pull.
124 '''
124 '''
125 secretlist = []
125 secretlist = []
126 if excluded:
126 if excluded:
127 for n in excluded:
127 for n in excluded:
128 if n not in repo:
128 if n not in repo:
129 # discovery should not have included the filtered revision,
129 # discovery should not have included the filtered revision,
130 # we have to explicitly exclude it until discovery is cleanup.
130 # we have to explicitly exclude it until discovery is cleanup.
131 continue
131 continue
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 % len(secretlist))
138 % len(secretlist))
139 else:
139 else:
140 ui.status(_("no changes found\n"))
140 ui.status(_("no changes found\n"))
141
141
142 def checknewlabel(repo, lbl, kind):
142 def checknewlabel(repo, lbl, kind):
143 # Do not use the "kind" parameter in ui output.
143 # Do not use the "kind" parameter in ui output.
144 # It makes strings difficult to translate.
144 # It makes strings difficult to translate.
145 if lbl in ['tip', '.', 'null']:
145 if lbl in ['tip', '.', 'null']:
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 for c in (':', '\0', '\n', '\r'):
147 for c in (':', '\0', '\n', '\r'):
148 if c in lbl:
148 if c in lbl:
149 raise error.Abort(_("%r cannot be used in a name") % c)
149 raise error.Abort(_("%r cannot be used in a name") % c)
150 try:
150 try:
151 int(lbl)
151 int(lbl)
152 raise error.Abort(_("cannot use an integer as a name"))
152 raise error.Abort(_("cannot use an integer as a name"))
153 except ValueError:
153 except ValueError:
154 pass
154 pass
155
155
156 def checkfilename(f):
156 def checkfilename(f):
157 '''Check that the filename f is an acceptable filename for a tracked file'''
157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 if '\r' in f or '\n' in f:
158 if '\r' in f or '\n' in f:
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160
160
161 def checkportable(ui, f):
161 def checkportable(ui, f):
162 '''Check if filename f is portable and warn or abort depending on config'''
162 '''Check if filename f is portable and warn or abort depending on config'''
163 checkfilename(f)
163 checkfilename(f)
164 abort, warn = checkportabilityalert(ui)
164 abort, warn = checkportabilityalert(ui)
165 if abort or warn:
165 if abort or warn:
166 msg = util.checkwinfilename(f)
166 msg = util.checkwinfilename(f)
167 if msg:
167 if msg:
168 msg = "%s: %r" % (msg, f)
168 msg = "%s: %r" % (msg, f)
169 if abort:
169 if abort:
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 ui.warn(_("warning: %s\n") % msg)
171 ui.warn(_("warning: %s\n") % msg)
172
172
173 def checkportabilityalert(ui):
173 def checkportabilityalert(ui):
174 '''check if the user's config requests nothing, a warning, or abort for
174 '''check if the user's config requests nothing, a warning, or abort for
175 non-portable filenames'''
175 non-portable filenames'''
176 val = ui.config('ui', 'portablefilenames', 'warn')
176 val = ui.config('ui', 'portablefilenames', 'warn')
177 lval = val.lower()
177 lval = val.lower()
178 bval = util.parsebool(val)
178 bval = util.parsebool(val)
179 abort = os.name == 'nt' or lval == 'abort'
179 abort = os.name == 'nt' or lval == 'abort'
180 warn = bval or lval == 'warn'
180 warn = bval or lval == 'warn'
181 if bval is None and not (warn or abort or lval == 'ignore'):
181 if bval is None and not (warn or abort or lval == 'ignore'):
182 raise error.ConfigError(
182 raise error.ConfigError(
183 _("ui.portablefilenames value is invalid ('%s')") % val)
183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 return abort, warn
184 return abort, warn
185
185
186 class casecollisionauditor(object):
186 class casecollisionauditor(object):
187 def __init__(self, ui, abort, dirstate):
187 def __init__(self, ui, abort, dirstate):
188 self._ui = ui
188 self._ui = ui
189 self._abort = abort
189 self._abort = abort
190 allfiles = '\0'.join(dirstate._map)
190 allfiles = '\0'.join(dirstate._map)
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 self._dirstate = dirstate
192 self._dirstate = dirstate
193 # The purpose of _newfiles is so that we don't complain about
193 # The purpose of _newfiles is so that we don't complain about
194 # case collisions if someone were to call this object with the
194 # case collisions if someone were to call this object with the
195 # same filename twice.
195 # same filename twice.
196 self._newfiles = set()
196 self._newfiles = set()
197
197
198 def __call__(self, f):
198 def __call__(self, f):
199 if f in self._newfiles:
199 if f in self._newfiles:
200 return
200 return
201 fl = encoding.lower(f)
201 fl = encoding.lower(f)
202 if fl in self._loweredfiles and f not in self._dirstate:
202 if fl in self._loweredfiles and f not in self._dirstate:
203 msg = _('possible case-folding collision for %s') % f
203 msg = _('possible case-folding collision for %s') % f
204 if self._abort:
204 if self._abort:
205 raise error.Abort(msg)
205 raise error.Abort(msg)
206 self._ui.warn(_("warning: %s\n") % msg)
206 self._ui.warn(_("warning: %s\n") % msg)
207 self._loweredfiles.add(fl)
207 self._loweredfiles.add(fl)
208 self._newfiles.add(f)
208 self._newfiles.add(f)
209
209
210 def filteredhash(repo, maxrev):
210 def filteredhash(repo, maxrev):
211 """build hash of filtered revisions in the current repoview.
211 """build hash of filtered revisions in the current repoview.
212
212
213 Multiple caches perform up-to-date validation by checking that the
213 Multiple caches perform up-to-date validation by checking that the
214 tiprev and tipnode stored in the cache file match the current repository.
214 tiprev and tipnode stored in the cache file match the current repository.
215 However, this is not sufficient for validating repoviews because the set
215 However, this is not sufficient for validating repoviews because the set
216 of revisions in the view may change without the repository tiprev and
216 of revisions in the view may change without the repository tiprev and
217 tipnode changing.
217 tipnode changing.
218
218
219 This function hashes all the revs filtered from the view and returns
219 This function hashes all the revs filtered from the view and returns
220 that SHA-1 digest.
220 that SHA-1 digest.
221 """
221 """
222 cl = repo.changelog
222 cl = repo.changelog
223 if not cl.filteredrevs:
223 if not cl.filteredrevs:
224 return None
224 return None
225 key = None
225 key = None
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 if revs:
227 if revs:
228 s = hashlib.sha1()
228 s = hashlib.sha1()
229 for rev in revs:
229 for rev in revs:
230 s.update('%s;' % rev)
230 s.update('%s;' % rev)
231 key = s.digest()
231 key = s.digest()
232 return key
232 return key
233
233
234 class abstractvfs(object):
234 class abstractvfs(object):
235 """Abstract base class; cannot be instantiated"""
235 """Abstract base class; cannot be instantiated"""
236
236
237 def __init__(self, *args, **kwargs):
237 def __init__(self, *args, **kwargs):
238 '''Prevent instantiation; don't call this from subclasses.'''
238 '''Prevent instantiation; don't call this from subclasses.'''
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240
240
241 def tryread(self, path):
241 def tryread(self, path):
242 '''gracefully return an empty string for missing files'''
242 '''gracefully return an empty string for missing files'''
243 try:
243 try:
244 return self.read(path)
244 return self.read(path)
245 except IOError as inst:
245 except IOError as inst:
246 if inst.errno != errno.ENOENT:
246 if inst.errno != errno.ENOENT:
247 raise
247 raise
248 return ""
248 return ""
249
249
250 def tryreadlines(self, path, mode='rb'):
250 def tryreadlines(self, path, mode='rb'):
251 '''gracefully return an empty array for missing files'''
251 '''gracefully return an empty array for missing files'''
252 try:
252 try:
253 return self.readlines(path, mode=mode)
253 return self.readlines(path, mode=mode)
254 except IOError as inst:
254 except IOError as inst:
255 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
256 raise
256 raise
257 return []
257 return []
258
258
259 def open(self, path, mode="r", text=False, atomictemp=False,
259 def open(self, path, mode="r", text=False, atomictemp=False,
260 notindexed=False, backgroundclose=False):
260 notindexed=False, backgroundclose=False):
261 '''Open ``path`` file, which is relative to vfs root.
261 '''Open ``path`` file, which is relative to vfs root.
262
262
263 Newly created directories are marked as "not to be indexed by
263 Newly created directories are marked as "not to be indexed by
264 the content indexing service", if ``notindexed`` is specified
264 the content indexing service", if ``notindexed`` is specified
265 for "write" mode access.
265 for "write" mode access.
266 '''
266 '''
267 self.open = self.__call__
267 self.open = self.__call__
268 return self.__call__(path, mode, text, atomictemp, notindexed,
268 return self.__call__(path, mode, text, atomictemp, notindexed,
269 backgroundclose=backgroundclose)
269 backgroundclose=backgroundclose)
270
270
271 def read(self, path):
271 def read(self, path):
272 with self(path, 'rb') as fp:
272 with self(path, 'rb') as fp:
273 return fp.read()
273 return fp.read()
274
274
275 def readlines(self, path, mode='rb'):
275 def readlines(self, path, mode='rb'):
276 with self(path, mode=mode) as fp:
276 with self(path, mode=mode) as fp:
277 return fp.readlines()
277 return fp.readlines()
278
278
279 def write(self, path, data, backgroundclose=False):
279 def write(self, path, data, backgroundclose=False):
280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
281 return fp.write(data)
281 return fp.write(data)
282
282
283 def writelines(self, path, data, mode='wb', notindexed=False):
283 def writelines(self, path, data, mode='wb', notindexed=False):
284 with self(path, mode=mode, notindexed=notindexed) as fp:
284 with self(path, mode=mode, notindexed=notindexed) as fp:
285 return fp.writelines(data)
285 return fp.writelines(data)
286
286
287 def append(self, path, data):
287 def append(self, path, data):
288 with self(path, 'ab') as fp:
288 with self(path, 'ab') as fp:
289 return fp.write(data)
289 return fp.write(data)
290
290
291 def basename(self, path):
291 def basename(self, path):
292 """return base element of a path (as os.path.basename would do)
292 """return base element of a path (as os.path.basename would do)
293
293
294 This exists to allow handling of strange encoding if needed."""
294 This exists to allow handling of strange encoding if needed."""
295 return os.path.basename(path)
295 return os.path.basename(path)
296
296
297 def chmod(self, path, mode):
297 def chmod(self, path, mode):
298 return os.chmod(self.join(path), mode)
298 return os.chmod(self.join(path), mode)
299
299
300 def dirname(self, path):
300 def dirname(self, path):
301 """return dirname element of a path (as os.path.dirname would do)
301 """return dirname element of a path (as os.path.dirname would do)
302
302
303 This exists to allow handling of strange encoding if needed."""
303 This exists to allow handling of strange encoding if needed."""
304 return os.path.dirname(path)
304 return os.path.dirname(path)
305
305
306 def exists(self, path=None):
306 def exists(self, path=None):
307 return os.path.exists(self.join(path))
307 return os.path.exists(self.join(path))
308
308
309 def fstat(self, fp):
309 def fstat(self, fp):
310 return util.fstat(fp)
310 return util.fstat(fp)
311
311
312 def isdir(self, path=None):
312 def isdir(self, path=None):
313 return os.path.isdir(self.join(path))
313 return os.path.isdir(self.join(path))
314
314
315 def isfile(self, path=None):
315 def isfile(self, path=None):
316 return os.path.isfile(self.join(path))
316 return os.path.isfile(self.join(path))
317
317
318 def islink(self, path=None):
318 def islink(self, path=None):
319 return os.path.islink(self.join(path))
319 return os.path.islink(self.join(path))
320
320
321 def isfileorlink(self, path=None):
321 def isfileorlink(self, path=None):
322 '''return whether path is a regular file or a symlink
322 '''return whether path is a regular file or a symlink
323
323
324 Unlike isfile, this doesn't follow symlinks.'''
324 Unlike isfile, this doesn't follow symlinks.'''
325 try:
325 try:
326 st = self.lstat(path)
326 st = self.lstat(path)
327 except OSError:
327 except OSError:
328 return False
328 return False
329 mode = st.st_mode
329 mode = st.st_mode
330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
331
331
332 def reljoin(self, *paths):
332 def reljoin(self, *paths):
333 """join various elements of a path together (as os.path.join would do)
333 """join various elements of a path together (as os.path.join would do)
334
334
335 The vfs base is not injected so that path stay relative. This exists
335 The vfs base is not injected so that path stay relative. This exists
336 to allow handling of strange encoding if needed."""
336 to allow handling of strange encoding if needed."""
337 return os.path.join(*paths)
337 return os.path.join(*paths)
338
338
339 def split(self, path):
339 def split(self, path):
340 """split top-most element of a path (as os.path.split would do)
340 """split top-most element of a path (as os.path.split would do)
341
341
342 This exists to allow handling of strange encoding if needed."""
342 This exists to allow handling of strange encoding if needed."""
343 return os.path.split(path)
343 return os.path.split(path)
344
344
345 def lexists(self, path=None):
345 def lexists(self, path=None):
346 return os.path.lexists(self.join(path))
346 return os.path.lexists(self.join(path))
347
347
348 def lstat(self, path=None):
348 def lstat(self, path=None):
349 return os.lstat(self.join(path))
349 return os.lstat(self.join(path))
350
350
351 def listdir(self, path=None):
351 def listdir(self, path=None):
352 return os.listdir(self.join(path))
352 return os.listdir(self.join(path))
353
353
354 def makedir(self, path=None, notindexed=True):
354 def makedir(self, path=None, notindexed=True):
355 return util.makedir(self.join(path), notindexed)
355 return util.makedir(self.join(path), notindexed)
356
356
357 def makedirs(self, path=None, mode=None):
357 def makedirs(self, path=None, mode=None):
358 return util.makedirs(self.join(path), mode)
358 return util.makedirs(self.join(path), mode)
359
359
360 def makelock(self, info, path):
360 def makelock(self, info, path):
361 return util.makelock(info, self.join(path))
361 return util.makelock(info, self.join(path))
362
362
363 def mkdir(self, path=None):
363 def mkdir(self, path=None):
364 return os.mkdir(self.join(path))
364 return os.mkdir(self.join(path))
365
365
366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
368 dir=self.join(dir), text=text)
368 dir=self.join(dir), text=text)
369 dname, fname = util.split(name)
369 dname, fname = util.split(name)
370 if dir:
370 if dir:
371 return fd, os.path.join(dir, fname)
371 return fd, os.path.join(dir, fname)
372 else:
372 else:
373 return fd, fname
373 return fd, fname
374
374
375 def readdir(self, path=None, stat=None, skip=None):
375 def readdir(self, path=None, stat=None, skip=None):
376 return osutil.listdir(self.join(path), stat, skip)
376 return osutil.listdir(self.join(path), stat, skip)
377
377
378 def readlock(self, path):
378 def readlock(self, path):
379 return util.readlock(self.join(path))
379 return util.readlock(self.join(path))
380
380
381 def rename(self, src, dst, checkambig=False):
381 def rename(self, src, dst, checkambig=False):
382 """Rename from src to dst
382 """Rename from src to dst
383
383
384 checkambig argument is used with util.filestat, and is useful
384 checkambig argument is used with util.filestat, and is useful
385 only if destination file is guarded by any lock
385 only if destination file is guarded by any lock
386 (e.g. repo.lock or repo.wlock).
386 (e.g. repo.lock or repo.wlock).
387 """
387 """
388 dstpath = self.join(dst)
388 dstpath = self.join(dst)
389 oldstat = checkambig and util.filestat(dstpath)
389 oldstat = checkambig and util.filestat(dstpath)
390 if oldstat and oldstat.stat:
390 if oldstat and oldstat.stat:
391 ret = util.rename(self.join(src), dstpath)
391 ret = util.rename(self.join(src), dstpath)
392 newstat = util.filestat(dstpath)
392 newstat = util.filestat(dstpath)
393 if newstat.isambig(oldstat):
393 if newstat.isambig(oldstat):
394 # stat of renamed file is ambiguous to original one
394 # stat of renamed file is ambiguous to original one
395 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
395 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
396 os.utime(dstpath, (advanced, advanced))
396 os.utime(dstpath, (advanced, advanced))
397 return ret
397 return ret
398 return util.rename(self.join(src), dstpath)
398 return util.rename(self.join(src), dstpath)
399
399
400 def readlink(self, path):
400 def readlink(self, path):
401 return os.readlink(self.join(path))
401 return os.readlink(self.join(path))
402
402
403 def removedirs(self, path=None):
403 def removedirs(self, path=None):
404 """Remove a leaf directory and all empty intermediate ones
404 """Remove a leaf directory and all empty intermediate ones
405 """
405 """
406 return util.removedirs(self.join(path))
406 return util.removedirs(self.join(path))
407
407
408 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
408 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
409 """Remove a directory tree recursively
409 """Remove a directory tree recursively
410
410
411 If ``forcibly``, this tries to remove READ-ONLY files, too.
411 If ``forcibly``, this tries to remove READ-ONLY files, too.
412 """
412 """
413 if forcibly:
413 if forcibly:
414 def onerror(function, path, excinfo):
414 def onerror(function, path, excinfo):
415 if function is not os.remove:
415 if function is not os.remove:
416 raise
416 raise
417 # read-only files cannot be unlinked under Windows
417 # read-only files cannot be unlinked under Windows
418 s = os.stat(path)
418 s = os.stat(path)
419 if (s.st_mode & stat.S_IWRITE) != 0:
419 if (s.st_mode & stat.S_IWRITE) != 0:
420 raise
420 raise
421 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
421 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
422 os.remove(path)
422 os.remove(path)
423 else:
423 else:
424 onerror = None
424 onerror = None
425 return shutil.rmtree(self.join(path),
425 return shutil.rmtree(self.join(path),
426 ignore_errors=ignore_errors, onerror=onerror)
426 ignore_errors=ignore_errors, onerror=onerror)
427
427
428 def setflags(self, path, l, x):
428 def setflags(self, path, l, x):
429 return util.setflags(self.join(path), l, x)
429 return util.setflags(self.join(path), l, x)
430
430
431 def stat(self, path=None):
431 def stat(self, path=None):
432 return os.stat(self.join(path))
432 return os.stat(self.join(path))
433
433
434 def unlink(self, path=None):
434 def unlink(self, path=None):
435 return util.unlink(self.join(path))
435 return util.unlink(self.join(path))
436
436
437 def unlinkpath(self, path=None, ignoremissing=False):
437 def unlinkpath(self, path=None, ignoremissing=False):
438 return util.unlinkpath(self.join(path), ignoremissing)
438 return util.unlinkpath(self.join(path), ignoremissing)
439
439
440 def utime(self, path=None, t=None):
440 def utime(self, path=None, t=None):
441 return os.utime(self.join(path), t)
441 return os.utime(self.join(path), t)
442
442
443 def walk(self, path=None, onerror=None):
443 def walk(self, path=None, onerror=None):
444 """Yield (dirpath, dirs, files) tuple for each directories under path
444 """Yield (dirpath, dirs, files) tuple for each directories under path
445
445
446 ``dirpath`` is relative one from the root of this vfs. This
446 ``dirpath`` is relative one from the root of this vfs. This
447 uses ``os.sep`` as path separator, even you specify POSIX
447 uses ``os.sep`` as path separator, even you specify POSIX
448 style ``path``.
448 style ``path``.
449
449
450 "The root of this vfs" is represented as empty ``dirpath``.
450 "The root of this vfs" is represented as empty ``dirpath``.
451 """
451 """
452 root = os.path.normpath(self.join(None))
452 root = os.path.normpath(self.join(None))
453 # when dirpath == root, dirpath[prefixlen:] becomes empty
453 # when dirpath == root, dirpath[prefixlen:] becomes empty
454 # because len(dirpath) < prefixlen.
454 # because len(dirpath) < prefixlen.
455 prefixlen = len(pathutil.normasprefix(root))
455 prefixlen = len(pathutil.normasprefix(root))
456 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
456 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
457 yield (dirpath[prefixlen:], dirs, files)
457 yield (dirpath[prefixlen:], dirs, files)
458
458
459 @contextlib.contextmanager
459 @contextlib.contextmanager
460 def backgroundclosing(self, ui, expectedcount=-1):
460 def backgroundclosing(self, ui, expectedcount=-1):
461 """Allow files to be closed asynchronously.
461 """Allow files to be closed asynchronously.
462
462
463 When this context manager is active, ``backgroundclose`` can be passed
463 When this context manager is active, ``backgroundclose`` can be passed
464 to ``__call__``/``open`` to result in the file possibly being closed
464 to ``__call__``/``open`` to result in the file possibly being closed
465 asynchronously, on a background thread.
465 asynchronously, on a background thread.
466 """
466 """
467 # This is an arbitrary restriction and could be changed if we ever
467 # This is an arbitrary restriction and could be changed if we ever
468 # have a use case.
468 # have a use case.
469 vfs = getattr(self, 'vfs', self)
469 vfs = getattr(self, 'vfs', self)
470 if getattr(vfs, '_backgroundfilecloser', None):
470 if getattr(vfs, '_backgroundfilecloser', None):
471 raise error.Abort(
471 raise error.Abort(
472 _('can only have 1 active background file closer'))
472 _('can only have 1 active background file closer'))
473
473
474 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
474 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
475 try:
475 try:
476 vfs._backgroundfilecloser = bfc
476 vfs._backgroundfilecloser = bfc
477 yield bfc
477 yield bfc
478 finally:
478 finally:
479 vfs._backgroundfilecloser = None
479 vfs._backgroundfilecloser = None
480
480
481 class vfs(abstractvfs):
481 class vfs(abstractvfs):
482 '''Operate files relative to a base directory
482 '''Operate files relative to a base directory
483
483
484 This class is used to hide the details of COW semantics and
484 This class is used to hide the details of COW semantics and
485 remote file access from higher level code.
485 remote file access from higher level code.
486 '''
486 '''
487 def __init__(self, base, audit=True, expandpath=False, realpath=False):
487 def __init__(self, base, audit=True, expandpath=False, realpath=False):
488 if expandpath:
488 if expandpath:
489 base = util.expandpath(base)
489 base = util.expandpath(base)
490 if realpath:
490 if realpath:
491 base = os.path.realpath(base)
491 base = os.path.realpath(base)
492 self.base = base
492 self.base = base
493 self.mustaudit = audit
493 self.mustaudit = audit
494 self.createmode = None
494 self.createmode = None
495 self._trustnlink = None
495 self._trustnlink = None
496
496
497 @property
497 @property
498 def mustaudit(self):
498 def mustaudit(self):
499 return self._audit
499 return self._audit
500
500
501 @mustaudit.setter
501 @mustaudit.setter
502 def mustaudit(self, onoff):
502 def mustaudit(self, onoff):
503 self._audit = onoff
503 self._audit = onoff
504 if onoff:
504 if onoff:
505 self.audit = pathutil.pathauditor(self.base)
505 self.audit = pathutil.pathauditor(self.base)
506 else:
506 else:
507 self.audit = util.always
507 self.audit = util.always
508
508
509 @util.propertycache
509 @util.propertycache
510 def _cansymlink(self):
510 def _cansymlink(self):
511 return util.checklink(self.base)
511 return util.checklink(self.base)
512
512
513 @util.propertycache
513 @util.propertycache
514 def _chmod(self):
514 def _chmod(self):
515 return util.checkexec(self.base)
515 return util.checkexec(self.base)
516
516
517 def _fixfilemode(self, name):
517 def _fixfilemode(self, name):
518 if self.createmode is None or not self._chmod:
518 if self.createmode is None or not self._chmod:
519 return
519 return
520 os.chmod(name, self.createmode & 0o666)
520 os.chmod(name, self.createmode & 0o666)
521
521
522 def __call__(self, path, mode="r", text=False, atomictemp=False,
522 def __call__(self, path, mode="r", text=False, atomictemp=False,
523 notindexed=False, backgroundclose=False, checkambig=False):
523 notindexed=False, backgroundclose=False, checkambig=False):
524 '''Open ``path`` file, which is relative to vfs root.
524 '''Open ``path`` file, which is relative to vfs root.
525
525
526 Newly created directories are marked as "not to be indexed by
526 Newly created directories are marked as "not to be indexed by
527 the content indexing service", if ``notindexed`` is specified
527 the content indexing service", if ``notindexed`` is specified
528 for "write" mode access.
528 for "write" mode access.
529
529
530 If ``backgroundclose`` is passed, the file may be closed asynchronously.
530 If ``backgroundclose`` is passed, the file may be closed asynchronously.
531 It can only be used if the ``self.backgroundclosing()`` context manager
531 It can only be used if the ``self.backgroundclosing()`` context manager
532 is active. This should only be specified if the following criteria hold:
532 is active. This should only be specified if the following criteria hold:
533
533
534 1. There is a potential for writing thousands of files. Unless you
534 1. There is a potential for writing thousands of files. Unless you
535 are writing thousands of files, the performance benefits of
535 are writing thousands of files, the performance benefits of
536 asynchronously closing files is not realized.
536 asynchronously closing files is not realized.
537 2. Files are opened exactly once for the ``backgroundclosing``
537 2. Files are opened exactly once for the ``backgroundclosing``
538 active duration and are therefore free of race conditions between
538 active duration and are therefore free of race conditions between
539 closing a file on a background thread and reopening it. (If the
539 closing a file on a background thread and reopening it. (If the
540 file were opened multiple times, there could be unflushed data
540 file were opened multiple times, there could be unflushed data
541 because the original file handle hasn't been flushed/closed yet.)
541 because the original file handle hasn't been flushed/closed yet.)
542
542
543 ``checkambig`` argument is passed to atomictemplfile (valid
543 ``checkambig`` argument is passed to atomictemplfile (valid
544 only for writing), and is useful only if target file is
544 only for writing), and is useful only if target file is
545 guarded by any lock (e.g. repo.lock or repo.wlock).
545 guarded by any lock (e.g. repo.lock or repo.wlock).
546 '''
546 '''
547 if self._audit:
547 if self._audit:
548 r = util.checkosfilename(path)
548 r = util.checkosfilename(path)
549 if r:
549 if r:
550 raise error.Abort("%s: %r" % (r, path))
550 raise error.Abort("%s: %r" % (r, path))
551 self.audit(path)
551 self.audit(path)
552 f = self.join(path)
552 f = self.join(path)
553
553
554 if not text and "b" not in mode:
554 if not text and "b" not in mode:
555 mode += "b" # for that other OS
555 mode += "b" # for that other OS
556
556
557 nlink = -1
557 nlink = -1
558 if mode not in ('r', 'rb'):
558 if mode not in ('r', 'rb'):
559 dirname, basename = util.split(f)
559 dirname, basename = util.split(f)
560 # If basename is empty, then the path is malformed because it points
560 # If basename is empty, then the path is malformed because it points
561 # to a directory. Let the posixfile() call below raise IOError.
561 # to a directory. Let the posixfile() call below raise IOError.
562 if basename:
562 if basename:
563 if atomictemp:
563 if atomictemp:
564 util.makedirs(dirname, self.createmode, notindexed)
564 util.makedirs(dirname, self.createmode, notindexed)
565 return util.atomictempfile(f, mode, self.createmode,
565 return util.atomictempfile(f, mode, self.createmode,
566 checkambig=checkambig)
566 checkambig=checkambig)
567 try:
567 try:
568 if 'w' in mode:
568 if 'w' in mode:
569 util.unlink(f)
569 util.unlink(f)
570 nlink = 0
570 nlink = 0
571 else:
571 else:
572 # nlinks() may behave differently for files on Windows
572 # nlinks() may behave differently for files on Windows
573 # shares if the file is open.
573 # shares if the file is open.
574 with util.posixfile(f):
574 with util.posixfile(f):
575 nlink = util.nlinks(f)
575 nlink = util.nlinks(f)
576 if nlink < 1:
576 if nlink < 1:
577 nlink = 2 # force mktempcopy (issue1922)
577 nlink = 2 # force mktempcopy (issue1922)
578 except (OSError, IOError) as e:
578 except (OSError, IOError) as e:
579 if e.errno != errno.ENOENT:
579 if e.errno != errno.ENOENT:
580 raise
580 raise
581 nlink = 0
581 nlink = 0
582 util.makedirs(dirname, self.createmode, notindexed)
582 util.makedirs(dirname, self.createmode, notindexed)
583 if nlink > 0:
583 if nlink > 0:
584 if self._trustnlink is None:
584 if self._trustnlink is None:
585 self._trustnlink = nlink > 1 or util.checknlink(f)
585 self._trustnlink = nlink > 1 or util.checknlink(f)
586 if nlink > 1 or not self._trustnlink:
586 if nlink > 1 or not self._trustnlink:
587 util.rename(util.mktempcopy(f), f)
587 util.rename(util.mktempcopy(f), f)
588 fp = util.posixfile(f, mode)
588 fp = util.posixfile(f, mode)
589 if nlink == 0:
589 if nlink == 0:
590 self._fixfilemode(f)
590 self._fixfilemode(f)
591
591
592 if backgroundclose:
592 if backgroundclose:
593 if not self._backgroundfilecloser:
593 if not self._backgroundfilecloser:
594 raise error.Abort(_('backgroundclose can only be used when a '
594 raise error.Abort(_('backgroundclose can only be used when a '
595 'backgroundclosing context manager is active')
595 'backgroundclosing context manager is active')
596 )
596 )
597
597
598 fp = delayclosedfile(fp, self._backgroundfilecloser)
598 fp = delayclosedfile(fp, self._backgroundfilecloser)
599
599
600 return fp
600 return fp
601
601
602 def symlink(self, src, dst):
602 def symlink(self, src, dst):
603 self.audit(dst)
603 self.audit(dst)
604 linkname = self.join(dst)
604 linkname = self.join(dst)
605 try:
605 try:
606 os.unlink(linkname)
606 os.unlink(linkname)
607 except OSError:
607 except OSError:
608 pass
608 pass
609
609
610 util.makedirs(os.path.dirname(linkname), self.createmode)
610 util.makedirs(os.path.dirname(linkname), self.createmode)
611
611
612 if self._cansymlink:
612 if self._cansymlink:
613 try:
613 try:
614 os.symlink(src, linkname)
614 os.symlink(src, linkname)
615 except OSError as err:
615 except OSError as err:
616 raise OSError(err.errno, _('could not symlink to %r: %s') %
616 raise OSError(err.errno, _('could not symlink to %r: %s') %
617 (src, err.strerror), linkname)
617 (src, err.strerror), linkname)
618 else:
618 else:
619 self.write(dst, src)
619 self.write(dst, src)
620
620
621 def join(self, path, *insidef):
621 def join(self, path, *insidef):
622 if path:
622 if path:
623 return os.path.join(self.base, path, *insidef)
623 return os.path.join(self.base, path, *insidef)
624 else:
624 else:
625 return self.base
625 return self.base
626
626
627 opener = vfs
627 opener = vfs
628
628
629 class auditvfs(object):
629 class auditvfs(object):
630 def __init__(self, vfs):
630 def __init__(self, vfs):
631 self.vfs = vfs
631 self.vfs = vfs
632
632
633 @property
633 @property
634 def mustaudit(self):
634 def mustaudit(self):
635 return self.vfs.mustaudit
635 return self.vfs.mustaudit
636
636
637 @mustaudit.setter
637 @mustaudit.setter
638 def mustaudit(self, onoff):
638 def mustaudit(self, onoff):
639 self.vfs.mustaudit = onoff
639 self.vfs.mustaudit = onoff
640
640
641 class filtervfs(abstractvfs, auditvfs):
641 class filtervfs(abstractvfs, auditvfs):
642 '''Wrapper vfs for filtering filenames with a function.'''
642 '''Wrapper vfs for filtering filenames with a function.'''
643
643
644 def __init__(self, vfs, filter):
644 def __init__(self, vfs, filter):
645 auditvfs.__init__(self, vfs)
645 auditvfs.__init__(self, vfs)
646 self._filter = filter
646 self._filter = filter
647
647
648 def __call__(self, path, *args, **kwargs):
648 def __call__(self, path, *args, **kwargs):
649 return self.vfs(self._filter(path), *args, **kwargs)
649 return self.vfs(self._filter(path), *args, **kwargs)
650
650
651 def join(self, path, *insidef):
651 def join(self, path, *insidef):
652 if path:
652 if path:
653 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
653 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
654 else:
654 else:
655 return self.vfs.join(path)
655 return self.vfs.join(path)
656
656
657 filteropener = filtervfs
657 filteropener = filtervfs
658
658
659 class readonlyvfs(abstractvfs, auditvfs):
659 class readonlyvfs(abstractvfs, auditvfs):
660 '''Wrapper vfs preventing any writing.'''
660 '''Wrapper vfs preventing any writing.'''
661
661
662 def __init__(self, vfs):
662 def __init__(self, vfs):
663 auditvfs.__init__(self, vfs)
663 auditvfs.__init__(self, vfs)
664
664
665 def __call__(self, path, mode='r', *args, **kw):
665 def __call__(self, path, mode='r', *args, **kw):
666 if mode not in ('r', 'rb'):
666 if mode not in ('r', 'rb'):
667 raise error.Abort(_('this vfs is read only'))
667 raise error.Abort(_('this vfs is read only'))
668 return self.vfs(path, mode, *args, **kw)
668 return self.vfs(path, mode, *args, **kw)
669
669
670 def join(self, path, *insidef):
670 def join(self, path, *insidef):
671 return self.vfs.join(path, *insidef)
671 return self.vfs.join(path, *insidef)
672
672
673 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
673 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
674 '''yield every hg repository under path, always recursively.
674 '''yield every hg repository under path, always recursively.
675 The recurse flag will only control recursion into repo working dirs'''
675 The recurse flag will only control recursion into repo working dirs'''
676 def errhandler(err):
676 def errhandler(err):
677 if err.filename == path:
677 if err.filename == path:
678 raise err
678 raise err
679 samestat = getattr(os.path, 'samestat', None)
679 samestat = getattr(os.path, 'samestat', None)
680 if followsym and samestat is not None:
680 if followsym and samestat is not None:
681 def adddir(dirlst, dirname):
681 def adddir(dirlst, dirname):
682 match = False
682 match = False
683 dirstat = os.stat(dirname)
683 dirstat = os.stat(dirname)
684 for lstdirstat in dirlst:
684 for lstdirstat in dirlst:
685 if samestat(dirstat, lstdirstat):
685 if samestat(dirstat, lstdirstat):
686 match = True
686 match = True
687 break
687 break
688 if not match:
688 if not match:
689 dirlst.append(dirstat)
689 dirlst.append(dirstat)
690 return not match
690 return not match
691 else:
691 else:
692 followsym = False
692 followsym = False
693
693
694 if (seen_dirs is None) and followsym:
694 if (seen_dirs is None) and followsym:
695 seen_dirs = []
695 seen_dirs = []
696 adddir(seen_dirs, path)
696 adddir(seen_dirs, path)
697 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
697 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
698 dirs.sort()
698 dirs.sort()
699 if '.hg' in dirs:
699 if '.hg' in dirs:
700 yield root # found a repository
700 yield root # found a repository
701 qroot = os.path.join(root, '.hg', 'patches')
701 qroot = os.path.join(root, '.hg', 'patches')
702 if os.path.isdir(os.path.join(qroot, '.hg')):
702 if os.path.isdir(os.path.join(qroot, '.hg')):
703 yield qroot # we have a patch queue repo here
703 yield qroot # we have a patch queue repo here
704 if recurse:
704 if recurse:
705 # avoid recursing inside the .hg directory
705 # avoid recursing inside the .hg directory
706 dirs.remove('.hg')
706 dirs.remove('.hg')
707 else:
707 else:
708 dirs[:] = [] # don't descend further
708 dirs[:] = [] # don't descend further
709 elif followsym:
709 elif followsym:
710 newdirs = []
710 newdirs = []
711 for d in dirs:
711 for d in dirs:
712 fname = os.path.join(root, d)
712 fname = os.path.join(root, d)
713 if adddir(seen_dirs, fname):
713 if adddir(seen_dirs, fname):
714 if os.path.islink(fname):
714 if os.path.islink(fname):
715 for hgname in walkrepos(fname, True, seen_dirs):
715 for hgname in walkrepos(fname, True, seen_dirs):
716 yield hgname
716 yield hgname
717 else:
717 else:
718 newdirs.append(d)
718 newdirs.append(d)
719 dirs[:] = newdirs
719 dirs[:] = newdirs
720
720
721 def osrcpath():
721 def osrcpath():
722 '''return default os-specific hgrc search path'''
722 '''return default os-specific hgrc search path'''
723 path = []
723 path = []
724 defaultpath = os.path.join(util.datapath, 'default.d')
724 defaultpath = os.path.join(util.datapath, 'default.d')
725 if os.path.isdir(defaultpath):
725 if os.path.isdir(defaultpath):
726 for f, kind in osutil.listdir(defaultpath):
726 for f, kind in osutil.listdir(defaultpath):
727 if f.endswith('.rc'):
727 if f.endswith('.rc'):
728 path.append(os.path.join(defaultpath, f))
728 path.append(os.path.join(defaultpath, f))
729 path.extend(systemrcpath())
729 path.extend(systemrcpath())
730 path.extend(userrcpath())
730 path.extend(userrcpath())
731 path = [os.path.normpath(f) for f in path]
731 path = [os.path.normpath(f) for f in path]
732 return path
732 return path
733
733
734 _rcpath = None
734 _rcpath = None
735
735
736 def rcpath():
736 def rcpath():
737 '''return hgrc search path. if env var HGRCPATH is set, use it.
737 '''return hgrc search path. if env var HGRCPATH is set, use it.
738 for each item in path, if directory, use files ending in .rc,
738 for each item in path, if directory, use files ending in .rc,
739 else use item.
739 else use item.
740 make HGRCPATH empty to only look in .hg/hgrc of current repo.
740 make HGRCPATH empty to only look in .hg/hgrc of current repo.
741 if no HGRCPATH, use default os-specific path.'''
741 if no HGRCPATH, use default os-specific path.'''
742 global _rcpath
742 global _rcpath
743 if _rcpath is None:
743 if _rcpath is None:
744 if 'HGRCPATH' in os.environ:
744 if 'HGRCPATH' in os.environ:
745 _rcpath = []
745 _rcpath = []
746 for p in os.environ['HGRCPATH'].split(os.pathsep):
746 for p in os.environ['HGRCPATH'].split(os.pathsep):
747 if not p:
747 if not p:
748 continue
748 continue
749 p = util.expandpath(p)
749 p = util.expandpath(p)
750 if os.path.isdir(p):
750 if os.path.isdir(p):
751 for f, kind in osutil.listdir(p):
751 for f, kind in osutil.listdir(p):
752 if f.endswith('.rc'):
752 if f.endswith('.rc'):
753 _rcpath.append(os.path.join(p, f))
753 _rcpath.append(os.path.join(p, f))
754 else:
754 else:
755 _rcpath.append(p)
755 _rcpath.append(p)
756 else:
756 else:
757 _rcpath = osrcpath()
757 _rcpath = osrcpath()
758 return _rcpath
758 return _rcpath
759
759
760 def intrev(rev):
760 def intrev(rev):
761 """Return integer for a given revision that can be used in comparison or
761 """Return integer for a given revision that can be used in comparison or
762 arithmetic operation"""
762 arithmetic operation"""
763 if rev is None:
763 if rev is None:
764 return wdirrev
764 return wdirrev
765 return rev
765 return rev
766
766
767 def revsingle(repo, revspec, default='.'):
767 def revsingle(repo, revspec, default='.'):
768 if not revspec and revspec != 0:
768 if not revspec and revspec != 0:
769 return repo[default]
769 return repo[default]
770
770
771 l = revrange(repo, [revspec])
771 l = revrange(repo, [revspec])
772 if not l:
772 if not l:
773 raise error.Abort(_('empty revision set'))
773 raise error.Abort(_('empty revision set'))
774 return repo[l.last()]
774 return repo[l.last()]
775
775
776 def _pairspec(revspec):
776 def _pairspec(revspec):
777 tree = revset.parse(revspec)
777 tree = revset.parse(revspec)
778 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
778 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
779 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
779 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
780
780
781 def revpair(repo, revs):
781 def revpair(repo, revs):
782 if not revs:
782 if not revs:
783 return repo.dirstate.p1(), None
783 return repo.dirstate.p1(), None
784
784
785 l = revrange(repo, revs)
785 l = revrange(repo, revs)
786
786
787 if not l:
787 if not l:
788 first = second = None
788 first = second = None
789 elif l.isascending():
789 elif l.isascending():
790 first = l.min()
790 first = l.min()
791 second = l.max()
791 second = l.max()
792 elif l.isdescending():
792 elif l.isdescending():
793 first = l.max()
793 first = l.max()
794 second = l.min()
794 second = l.min()
795 else:
795 else:
796 first = l.first()
796 first = l.first()
797 second = l.last()
797 second = l.last()
798
798
799 if first is None:
799 if first is None:
800 raise error.Abort(_('empty revision range'))
800 raise error.Abort(_('empty revision range'))
801 if (first == second and len(revs) >= 2
801 if (first == second and len(revs) >= 2
802 and not all(revrange(repo, [r]) for r in revs)):
802 and not all(revrange(repo, [r]) for r in revs)):
803 raise error.Abort(_('empty revision on one side of range'))
803 raise error.Abort(_('empty revision on one side of range'))
804
804
805 # if top-level is range expression, the result must always be a pair
805 # if top-level is range expression, the result must always be a pair
806 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
806 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
807 return repo.lookup(first), None
807 return repo.lookup(first), None
808
808
809 return repo.lookup(first), repo.lookup(second)
809 return repo.lookup(first), repo.lookup(second)
810
810
811 def revrange(repo, revs):
811 def revrange(repo, specs):
812 """Yield revision as strings from a list of revision specifications."""
812 """Execute 1 to many revsets and return the union.
813
814 This is the preferred mechanism for executing revsets using user-specified
815 config options, such as revset aliases.
816
817 The revsets specified by ``specs`` will be executed via a chained ``OR``
818 expression. If ``specs`` is empty, an empty result is returned.
819
820 ``specs`` can contain integers, in which case they are assumed to be
821 revision numbers.
822
823 It is assumed the revsets are already formatted. If you have arguments
824 that need to be expanded in the revset, call ``revset.formatspec()``
825 and pass the result as an element of ``specs``.
826
827 Specifying a single revset is allowed.
828
829 Returns a ``revset.abstractsmartset`` which is a list-like interface over
830 integer revisions.
831 """
813 allspecs = []
832 allspecs = []
814 for spec in revs:
833 for spec in specs:
815 if isinstance(spec, int):
834 if isinstance(spec, int):
816 spec = revset.formatspec('rev(%d)', spec)
835 spec = revset.formatspec('rev(%d)', spec)
817 allspecs.append(spec)
836 allspecs.append(spec)
818 m = revset.matchany(repo.ui, allspecs, repo)
837 m = revset.matchany(repo.ui, allspecs, repo)
819 return m(repo)
838 return m(repo)
820
839
821 def meaningfulparents(repo, ctx):
840 def meaningfulparents(repo, ctx):
822 """Return list of meaningful (or all if debug) parentrevs for rev.
841 """Return list of meaningful (or all if debug) parentrevs for rev.
823
842
824 For merges (two non-nullrev revisions) both parents are meaningful.
843 For merges (two non-nullrev revisions) both parents are meaningful.
825 Otherwise the first parent revision is considered meaningful if it
844 Otherwise the first parent revision is considered meaningful if it
826 is not the preceding revision.
845 is not the preceding revision.
827 """
846 """
828 parents = ctx.parents()
847 parents = ctx.parents()
829 if len(parents) > 1:
848 if len(parents) > 1:
830 return parents
849 return parents
831 if repo.ui.debugflag:
850 if repo.ui.debugflag:
832 return [parents[0], repo['null']]
851 return [parents[0], repo['null']]
833 if parents[0].rev() >= intrev(ctx.rev()) - 1:
852 if parents[0].rev() >= intrev(ctx.rev()) - 1:
834 return []
853 return []
835 return parents
854 return parents
836
855
837 def expandpats(pats):
856 def expandpats(pats):
838 '''Expand bare globs when running on windows.
857 '''Expand bare globs when running on windows.
839 On posix we assume it already has already been done by sh.'''
858 On posix we assume it already has already been done by sh.'''
840 if not util.expandglobs:
859 if not util.expandglobs:
841 return list(pats)
860 return list(pats)
842 ret = []
861 ret = []
843 for kindpat in pats:
862 for kindpat in pats:
844 kind, pat = matchmod._patsplit(kindpat, None)
863 kind, pat = matchmod._patsplit(kindpat, None)
845 if kind is None:
864 if kind is None:
846 try:
865 try:
847 globbed = glob.glob(pat)
866 globbed = glob.glob(pat)
848 except re.error:
867 except re.error:
849 globbed = [pat]
868 globbed = [pat]
850 if globbed:
869 if globbed:
851 ret.extend(globbed)
870 ret.extend(globbed)
852 continue
871 continue
853 ret.append(kindpat)
872 ret.append(kindpat)
854 return ret
873 return ret
855
874
856 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
875 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
857 badfn=None):
876 badfn=None):
858 '''Return a matcher and the patterns that were used.
877 '''Return a matcher and the patterns that were used.
859 The matcher will warn about bad matches, unless an alternate badfn callback
878 The matcher will warn about bad matches, unless an alternate badfn callback
860 is provided.'''
879 is provided.'''
861 if pats == ("",):
880 if pats == ("",):
862 pats = []
881 pats = []
863 if opts is None:
882 if opts is None:
864 opts = {}
883 opts = {}
865 if not globbed and default == 'relpath':
884 if not globbed and default == 'relpath':
866 pats = expandpats(pats or [])
885 pats = expandpats(pats or [])
867
886
868 def bad(f, msg):
887 def bad(f, msg):
869 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
888 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
870
889
871 if badfn is None:
890 if badfn is None:
872 badfn = bad
891 badfn = bad
873
892
874 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
893 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
875 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
894 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
876
895
877 if m.always():
896 if m.always():
878 pats = []
897 pats = []
879 return m, pats
898 return m, pats
880
899
881 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
900 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
882 badfn=None):
901 badfn=None):
883 '''Return a matcher that will warn about bad matches.'''
902 '''Return a matcher that will warn about bad matches.'''
884 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
903 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
885
904
886 def matchall(repo):
905 def matchall(repo):
887 '''Return a matcher that will efficiently match everything.'''
906 '''Return a matcher that will efficiently match everything.'''
888 return matchmod.always(repo.root, repo.getcwd())
907 return matchmod.always(repo.root, repo.getcwd())
889
908
890 def matchfiles(repo, files, badfn=None):
909 def matchfiles(repo, files, badfn=None):
891 '''Return a matcher that will efficiently match exactly these files.'''
910 '''Return a matcher that will efficiently match exactly these files.'''
892 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
911 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
893
912
894 def origpath(ui, repo, filepath):
913 def origpath(ui, repo, filepath):
895 '''customize where .orig files are created
914 '''customize where .orig files are created
896
915
897 Fetch user defined path from config file: [ui] origbackuppath = <path>
916 Fetch user defined path from config file: [ui] origbackuppath = <path>
898 Fall back to default (filepath) if not specified
917 Fall back to default (filepath) if not specified
899 '''
918 '''
900 origbackuppath = ui.config('ui', 'origbackuppath', None)
919 origbackuppath = ui.config('ui', 'origbackuppath', None)
901 if origbackuppath is None:
920 if origbackuppath is None:
902 return filepath + ".orig"
921 return filepath + ".orig"
903
922
904 filepathfromroot = os.path.relpath(filepath, start=repo.root)
923 filepathfromroot = os.path.relpath(filepath, start=repo.root)
905 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
924 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
906
925
907 origbackupdir = repo.vfs.dirname(fullorigpath)
926 origbackupdir = repo.vfs.dirname(fullorigpath)
908 if not repo.vfs.exists(origbackupdir):
927 if not repo.vfs.exists(origbackupdir):
909 ui.note(_('creating directory: %s\n') % origbackupdir)
928 ui.note(_('creating directory: %s\n') % origbackupdir)
910 util.makedirs(origbackupdir)
929 util.makedirs(origbackupdir)
911
930
912 return fullorigpath + ".orig"
931 return fullorigpath + ".orig"
913
932
914 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
933 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
915 if opts is None:
934 if opts is None:
916 opts = {}
935 opts = {}
917 m = matcher
936 m = matcher
918 if dry_run is None:
937 if dry_run is None:
919 dry_run = opts.get('dry_run')
938 dry_run = opts.get('dry_run')
920 if similarity is None:
939 if similarity is None:
921 similarity = float(opts.get('similarity') or 0)
940 similarity = float(opts.get('similarity') or 0)
922
941
923 ret = 0
942 ret = 0
924 join = lambda f: os.path.join(prefix, f)
943 join = lambda f: os.path.join(prefix, f)
925
944
926 def matchessubrepo(matcher, subpath):
945 def matchessubrepo(matcher, subpath):
927 if matcher.exact(subpath):
946 if matcher.exact(subpath):
928 return True
947 return True
929 for f in matcher.files():
948 for f in matcher.files():
930 if f.startswith(subpath):
949 if f.startswith(subpath):
931 return True
950 return True
932 return False
951 return False
933
952
934 wctx = repo[None]
953 wctx = repo[None]
935 for subpath in sorted(wctx.substate):
954 for subpath in sorted(wctx.substate):
936 if opts.get('subrepos') or matchessubrepo(m, subpath):
955 if opts.get('subrepos') or matchessubrepo(m, subpath):
937 sub = wctx.sub(subpath)
956 sub = wctx.sub(subpath)
938 try:
957 try:
939 submatch = matchmod.subdirmatcher(subpath, m)
958 submatch = matchmod.subdirmatcher(subpath, m)
940 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
959 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
941 ret = 1
960 ret = 1
942 except error.LookupError:
961 except error.LookupError:
943 repo.ui.status(_("skipping missing subrepository: %s\n")
962 repo.ui.status(_("skipping missing subrepository: %s\n")
944 % join(subpath))
963 % join(subpath))
945
964
946 rejected = []
965 rejected = []
947 def badfn(f, msg):
966 def badfn(f, msg):
948 if f in m.files():
967 if f in m.files():
949 m.bad(f, msg)
968 m.bad(f, msg)
950 rejected.append(f)
969 rejected.append(f)
951
970
952 badmatch = matchmod.badmatch(m, badfn)
971 badmatch = matchmod.badmatch(m, badfn)
953 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
972 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
954 badmatch)
973 badmatch)
955
974
956 unknownset = set(unknown + forgotten)
975 unknownset = set(unknown + forgotten)
957 toprint = unknownset.copy()
976 toprint = unknownset.copy()
958 toprint.update(deleted)
977 toprint.update(deleted)
959 for abs in sorted(toprint):
978 for abs in sorted(toprint):
960 if repo.ui.verbose or not m.exact(abs):
979 if repo.ui.verbose or not m.exact(abs):
961 if abs in unknownset:
980 if abs in unknownset:
962 status = _('adding %s\n') % m.uipath(abs)
981 status = _('adding %s\n') % m.uipath(abs)
963 else:
982 else:
964 status = _('removing %s\n') % m.uipath(abs)
983 status = _('removing %s\n') % m.uipath(abs)
965 repo.ui.status(status)
984 repo.ui.status(status)
966
985
967 renames = _findrenames(repo, m, added + unknown, removed + deleted,
986 renames = _findrenames(repo, m, added + unknown, removed + deleted,
968 similarity)
987 similarity)
969
988
970 if not dry_run:
989 if not dry_run:
971 _markchanges(repo, unknown + forgotten, deleted, renames)
990 _markchanges(repo, unknown + forgotten, deleted, renames)
972
991
973 for f in rejected:
992 for f in rejected:
974 if f in m.files():
993 if f in m.files():
975 return 1
994 return 1
976 return ret
995 return ret
977
996
978 def marktouched(repo, files, similarity=0.0):
997 def marktouched(repo, files, similarity=0.0):
979 '''Assert that files have somehow been operated upon. files are relative to
998 '''Assert that files have somehow been operated upon. files are relative to
980 the repo root.'''
999 the repo root.'''
981 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1000 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
982 rejected = []
1001 rejected = []
983
1002
984 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1003 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
985
1004
986 if repo.ui.verbose:
1005 if repo.ui.verbose:
987 unknownset = set(unknown + forgotten)
1006 unknownset = set(unknown + forgotten)
988 toprint = unknownset.copy()
1007 toprint = unknownset.copy()
989 toprint.update(deleted)
1008 toprint.update(deleted)
990 for abs in sorted(toprint):
1009 for abs in sorted(toprint):
991 if abs in unknownset:
1010 if abs in unknownset:
992 status = _('adding %s\n') % abs
1011 status = _('adding %s\n') % abs
993 else:
1012 else:
994 status = _('removing %s\n') % abs
1013 status = _('removing %s\n') % abs
995 repo.ui.status(status)
1014 repo.ui.status(status)
996
1015
997 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1016 renames = _findrenames(repo, m, added + unknown, removed + deleted,
998 similarity)
1017 similarity)
999
1018
1000 _markchanges(repo, unknown + forgotten, deleted, renames)
1019 _markchanges(repo, unknown + forgotten, deleted, renames)
1001
1020
1002 for f in rejected:
1021 for f in rejected:
1003 if f in m.files():
1022 if f in m.files():
1004 return 1
1023 return 1
1005 return 0
1024 return 0
1006
1025
1007 def _interestingfiles(repo, matcher):
1026 def _interestingfiles(repo, matcher):
1008 '''Walk dirstate with matcher, looking for files that addremove would care
1027 '''Walk dirstate with matcher, looking for files that addremove would care
1009 about.
1028 about.
1010
1029
1011 This is different from dirstate.status because it doesn't care about
1030 This is different from dirstate.status because it doesn't care about
1012 whether files are modified or clean.'''
1031 whether files are modified or clean.'''
1013 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1032 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1014 audit_path = pathutil.pathauditor(repo.root)
1033 audit_path = pathutil.pathauditor(repo.root)
1015
1034
1016 ctx = repo[None]
1035 ctx = repo[None]
1017 dirstate = repo.dirstate
1036 dirstate = repo.dirstate
1018 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1037 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1019 full=False)
1038 full=False)
1020 for abs, st in walkresults.iteritems():
1039 for abs, st in walkresults.iteritems():
1021 dstate = dirstate[abs]
1040 dstate = dirstate[abs]
1022 if dstate == '?' and audit_path.check(abs):
1041 if dstate == '?' and audit_path.check(abs):
1023 unknown.append(abs)
1042 unknown.append(abs)
1024 elif dstate != 'r' and not st:
1043 elif dstate != 'r' and not st:
1025 deleted.append(abs)
1044 deleted.append(abs)
1026 elif dstate == 'r' and st:
1045 elif dstate == 'r' and st:
1027 forgotten.append(abs)
1046 forgotten.append(abs)
1028 # for finding renames
1047 # for finding renames
1029 elif dstate == 'r' and not st:
1048 elif dstate == 'r' and not st:
1030 removed.append(abs)
1049 removed.append(abs)
1031 elif dstate == 'a':
1050 elif dstate == 'a':
1032 added.append(abs)
1051 added.append(abs)
1033
1052
1034 return added, unknown, deleted, removed, forgotten
1053 return added, unknown, deleted, removed, forgotten
1035
1054
1036 def _findrenames(repo, matcher, added, removed, similarity):
1055 def _findrenames(repo, matcher, added, removed, similarity):
1037 '''Find renames from removed files to added ones.'''
1056 '''Find renames from removed files to added ones.'''
1038 renames = {}
1057 renames = {}
1039 if similarity > 0:
1058 if similarity > 0:
1040 for old, new, score in similar.findrenames(repo, added, removed,
1059 for old, new, score in similar.findrenames(repo, added, removed,
1041 similarity):
1060 similarity):
1042 if (repo.ui.verbose or not matcher.exact(old)
1061 if (repo.ui.verbose or not matcher.exact(old)
1043 or not matcher.exact(new)):
1062 or not matcher.exact(new)):
1044 repo.ui.status(_('recording removal of %s as rename to %s '
1063 repo.ui.status(_('recording removal of %s as rename to %s '
1045 '(%d%% similar)\n') %
1064 '(%d%% similar)\n') %
1046 (matcher.rel(old), matcher.rel(new),
1065 (matcher.rel(old), matcher.rel(new),
1047 score * 100))
1066 score * 100))
1048 renames[new] = old
1067 renames[new] = old
1049 return renames
1068 return renames
1050
1069
1051 def _markchanges(repo, unknown, deleted, renames):
1070 def _markchanges(repo, unknown, deleted, renames):
1052 '''Marks the files in unknown as added, the files in deleted as removed,
1071 '''Marks the files in unknown as added, the files in deleted as removed,
1053 and the files in renames as copied.'''
1072 and the files in renames as copied.'''
1054 wctx = repo[None]
1073 wctx = repo[None]
1055 with repo.wlock():
1074 with repo.wlock():
1056 wctx.forget(deleted)
1075 wctx.forget(deleted)
1057 wctx.add(unknown)
1076 wctx.add(unknown)
1058 for new, old in renames.iteritems():
1077 for new, old in renames.iteritems():
1059 wctx.copy(old, new)
1078 wctx.copy(old, new)
1060
1079
1061 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1080 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1062 """Update the dirstate to reflect the intent of copying src to dst. For
1081 """Update the dirstate to reflect the intent of copying src to dst. For
1063 different reasons it might not end with dst being marked as copied from src.
1082 different reasons it might not end with dst being marked as copied from src.
1064 """
1083 """
1065 origsrc = repo.dirstate.copied(src) or src
1084 origsrc = repo.dirstate.copied(src) or src
1066 if dst == origsrc: # copying back a copy?
1085 if dst == origsrc: # copying back a copy?
1067 if repo.dirstate[dst] not in 'mn' and not dryrun:
1086 if repo.dirstate[dst] not in 'mn' and not dryrun:
1068 repo.dirstate.normallookup(dst)
1087 repo.dirstate.normallookup(dst)
1069 else:
1088 else:
1070 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1089 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1071 if not ui.quiet:
1090 if not ui.quiet:
1072 ui.warn(_("%s has not been committed yet, so no copy "
1091 ui.warn(_("%s has not been committed yet, so no copy "
1073 "data will be stored for %s.\n")
1092 "data will be stored for %s.\n")
1074 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1093 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1075 if repo.dirstate[dst] in '?r' and not dryrun:
1094 if repo.dirstate[dst] in '?r' and not dryrun:
1076 wctx.add([dst])
1095 wctx.add([dst])
1077 elif not dryrun:
1096 elif not dryrun:
1078 wctx.copy(origsrc, dst)
1097 wctx.copy(origsrc, dst)
1079
1098
1080 def readrequires(opener, supported):
1099 def readrequires(opener, supported):
1081 '''Reads and parses .hg/requires and checks if all entries found
1100 '''Reads and parses .hg/requires and checks if all entries found
1082 are in the list of supported features.'''
1101 are in the list of supported features.'''
1083 requirements = set(opener.read("requires").splitlines())
1102 requirements = set(opener.read("requires").splitlines())
1084 missings = []
1103 missings = []
1085 for r in requirements:
1104 for r in requirements:
1086 if r not in supported:
1105 if r not in supported:
1087 if not r or not r[0].isalnum():
1106 if not r or not r[0].isalnum():
1088 raise error.RequirementError(_(".hg/requires file is corrupt"))
1107 raise error.RequirementError(_(".hg/requires file is corrupt"))
1089 missings.append(r)
1108 missings.append(r)
1090 missings.sort()
1109 missings.sort()
1091 if missings:
1110 if missings:
1092 raise error.RequirementError(
1111 raise error.RequirementError(
1093 _("repository requires features unknown to this Mercurial: %s")
1112 _("repository requires features unknown to this Mercurial: %s")
1094 % " ".join(missings),
1113 % " ".join(missings),
1095 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1114 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1096 " for more information"))
1115 " for more information"))
1097 return requirements
1116 return requirements
1098
1117
1099 def writerequires(opener, requirements):
1118 def writerequires(opener, requirements):
1100 with opener('requires', 'w') as fp:
1119 with opener('requires', 'w') as fp:
1101 for r in sorted(requirements):
1120 for r in sorted(requirements):
1102 fp.write("%s\n" % r)
1121 fp.write("%s\n" % r)
1103
1122
1104 class filecachesubentry(object):
1123 class filecachesubentry(object):
1105 def __init__(self, path, stat):
1124 def __init__(self, path, stat):
1106 self.path = path
1125 self.path = path
1107 self.cachestat = None
1126 self.cachestat = None
1108 self._cacheable = None
1127 self._cacheable = None
1109
1128
1110 if stat:
1129 if stat:
1111 self.cachestat = filecachesubentry.stat(self.path)
1130 self.cachestat = filecachesubentry.stat(self.path)
1112
1131
1113 if self.cachestat:
1132 if self.cachestat:
1114 self._cacheable = self.cachestat.cacheable()
1133 self._cacheable = self.cachestat.cacheable()
1115 else:
1134 else:
1116 # None means we don't know yet
1135 # None means we don't know yet
1117 self._cacheable = None
1136 self._cacheable = None
1118
1137
1119 def refresh(self):
1138 def refresh(self):
1120 if self.cacheable():
1139 if self.cacheable():
1121 self.cachestat = filecachesubentry.stat(self.path)
1140 self.cachestat = filecachesubentry.stat(self.path)
1122
1141
1123 def cacheable(self):
1142 def cacheable(self):
1124 if self._cacheable is not None:
1143 if self._cacheable is not None:
1125 return self._cacheable
1144 return self._cacheable
1126
1145
1127 # we don't know yet, assume it is for now
1146 # we don't know yet, assume it is for now
1128 return True
1147 return True
1129
1148
1130 def changed(self):
1149 def changed(self):
1131 # no point in going further if we can't cache it
1150 # no point in going further if we can't cache it
1132 if not self.cacheable():
1151 if not self.cacheable():
1133 return True
1152 return True
1134
1153
1135 newstat = filecachesubentry.stat(self.path)
1154 newstat = filecachesubentry.stat(self.path)
1136
1155
1137 # we may not know if it's cacheable yet, check again now
1156 # we may not know if it's cacheable yet, check again now
1138 if newstat and self._cacheable is None:
1157 if newstat and self._cacheable is None:
1139 self._cacheable = newstat.cacheable()
1158 self._cacheable = newstat.cacheable()
1140
1159
1141 # check again
1160 # check again
1142 if not self._cacheable:
1161 if not self._cacheable:
1143 return True
1162 return True
1144
1163
1145 if self.cachestat != newstat:
1164 if self.cachestat != newstat:
1146 self.cachestat = newstat
1165 self.cachestat = newstat
1147 return True
1166 return True
1148 else:
1167 else:
1149 return False
1168 return False
1150
1169
1151 @staticmethod
1170 @staticmethod
1152 def stat(path):
1171 def stat(path):
1153 try:
1172 try:
1154 return util.cachestat(path)
1173 return util.cachestat(path)
1155 except OSError as e:
1174 except OSError as e:
1156 if e.errno != errno.ENOENT:
1175 if e.errno != errno.ENOENT:
1157 raise
1176 raise
1158
1177
1159 class filecacheentry(object):
1178 class filecacheentry(object):
1160 def __init__(self, paths, stat=True):
1179 def __init__(self, paths, stat=True):
1161 self._entries = []
1180 self._entries = []
1162 for path in paths:
1181 for path in paths:
1163 self._entries.append(filecachesubentry(path, stat))
1182 self._entries.append(filecachesubentry(path, stat))
1164
1183
1165 def changed(self):
1184 def changed(self):
1166 '''true if any entry has changed'''
1185 '''true if any entry has changed'''
1167 for entry in self._entries:
1186 for entry in self._entries:
1168 if entry.changed():
1187 if entry.changed():
1169 return True
1188 return True
1170 return False
1189 return False
1171
1190
1172 def refresh(self):
1191 def refresh(self):
1173 for entry in self._entries:
1192 for entry in self._entries:
1174 entry.refresh()
1193 entry.refresh()
1175
1194
1176 class filecache(object):
1195 class filecache(object):
1177 '''A property like decorator that tracks files under .hg/ for updates.
1196 '''A property like decorator that tracks files under .hg/ for updates.
1178
1197
1179 Records stat info when called in _filecache.
1198 Records stat info when called in _filecache.
1180
1199
1181 On subsequent calls, compares old stat info with new info, and recreates the
1200 On subsequent calls, compares old stat info with new info, and recreates the
1182 object when any of the files changes, updating the new stat info in
1201 object when any of the files changes, updating the new stat info in
1183 _filecache.
1202 _filecache.
1184
1203
1185 Mercurial either atomic renames or appends for files under .hg,
1204 Mercurial either atomic renames or appends for files under .hg,
1186 so to ensure the cache is reliable we need the filesystem to be able
1205 so to ensure the cache is reliable we need the filesystem to be able
1187 to tell us if a file has been replaced. If it can't, we fallback to
1206 to tell us if a file has been replaced. If it can't, we fallback to
1188 recreating the object on every call (essentially the same behavior as
1207 recreating the object on every call (essentially the same behavior as
1189 propertycache).
1208 propertycache).
1190
1209
1191 '''
1210 '''
1192 def __init__(self, *paths):
1211 def __init__(self, *paths):
1193 self.paths = paths
1212 self.paths = paths
1194
1213
1195 def join(self, obj, fname):
1214 def join(self, obj, fname):
1196 """Used to compute the runtime path of a cached file.
1215 """Used to compute the runtime path of a cached file.
1197
1216
1198 Users should subclass filecache and provide their own version of this
1217 Users should subclass filecache and provide their own version of this
1199 function to call the appropriate join function on 'obj' (an instance
1218 function to call the appropriate join function on 'obj' (an instance
1200 of the class that its member function was decorated).
1219 of the class that its member function was decorated).
1201 """
1220 """
1202 return obj.join(fname)
1221 return obj.join(fname)
1203
1222
1204 def __call__(self, func):
1223 def __call__(self, func):
1205 self.func = func
1224 self.func = func
1206 self.name = func.__name__
1225 self.name = func.__name__
1207 return self
1226 return self
1208
1227
1209 def __get__(self, obj, type=None):
1228 def __get__(self, obj, type=None):
1210 # if accessed on the class, return the descriptor itself.
1229 # if accessed on the class, return the descriptor itself.
1211 if obj is None:
1230 if obj is None:
1212 return self
1231 return self
1213 # do we need to check if the file changed?
1232 # do we need to check if the file changed?
1214 if self.name in obj.__dict__:
1233 if self.name in obj.__dict__:
1215 assert self.name in obj._filecache, self.name
1234 assert self.name in obj._filecache, self.name
1216 return obj.__dict__[self.name]
1235 return obj.__dict__[self.name]
1217
1236
1218 entry = obj._filecache.get(self.name)
1237 entry = obj._filecache.get(self.name)
1219
1238
1220 if entry:
1239 if entry:
1221 if entry.changed():
1240 if entry.changed():
1222 entry.obj = self.func(obj)
1241 entry.obj = self.func(obj)
1223 else:
1242 else:
1224 paths = [self.join(obj, path) for path in self.paths]
1243 paths = [self.join(obj, path) for path in self.paths]
1225
1244
1226 # We stat -before- creating the object so our cache doesn't lie if
1245 # We stat -before- creating the object so our cache doesn't lie if
1227 # a writer modified between the time we read and stat
1246 # a writer modified between the time we read and stat
1228 entry = filecacheentry(paths, True)
1247 entry = filecacheentry(paths, True)
1229 entry.obj = self.func(obj)
1248 entry.obj = self.func(obj)
1230
1249
1231 obj._filecache[self.name] = entry
1250 obj._filecache[self.name] = entry
1232
1251
1233 obj.__dict__[self.name] = entry.obj
1252 obj.__dict__[self.name] = entry.obj
1234 return entry.obj
1253 return entry.obj
1235
1254
1236 def __set__(self, obj, value):
1255 def __set__(self, obj, value):
1237 if self.name not in obj._filecache:
1256 if self.name not in obj._filecache:
1238 # we add an entry for the missing value because X in __dict__
1257 # we add an entry for the missing value because X in __dict__
1239 # implies X in _filecache
1258 # implies X in _filecache
1240 paths = [self.join(obj, path) for path in self.paths]
1259 paths = [self.join(obj, path) for path in self.paths]
1241 ce = filecacheentry(paths, False)
1260 ce = filecacheentry(paths, False)
1242 obj._filecache[self.name] = ce
1261 obj._filecache[self.name] = ce
1243 else:
1262 else:
1244 ce = obj._filecache[self.name]
1263 ce = obj._filecache[self.name]
1245
1264
1246 ce.obj = value # update cached copy
1265 ce.obj = value # update cached copy
1247 obj.__dict__[self.name] = value # update copy returned by obj.x
1266 obj.__dict__[self.name] = value # update copy returned by obj.x
1248
1267
1249 def __delete__(self, obj):
1268 def __delete__(self, obj):
1250 try:
1269 try:
1251 del obj.__dict__[self.name]
1270 del obj.__dict__[self.name]
1252 except KeyError:
1271 except KeyError:
1253 raise AttributeError(self.name)
1272 raise AttributeError(self.name)
1254
1273
1255 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1274 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1256 if lock is None:
1275 if lock is None:
1257 raise error.LockInheritanceContractViolation(
1276 raise error.LockInheritanceContractViolation(
1258 'lock can only be inherited while held')
1277 'lock can only be inherited while held')
1259 if environ is None:
1278 if environ is None:
1260 environ = {}
1279 environ = {}
1261 with lock.inherit() as locker:
1280 with lock.inherit() as locker:
1262 environ[envvar] = locker
1281 environ[envvar] = locker
1263 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1282 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1264
1283
1265 def wlocksub(repo, cmd, *args, **kwargs):
1284 def wlocksub(repo, cmd, *args, **kwargs):
1266 """run cmd as a subprocess that allows inheriting repo's wlock
1285 """run cmd as a subprocess that allows inheriting repo's wlock
1267
1286
1268 This can only be called while the wlock is held. This takes all the
1287 This can only be called while the wlock is held. This takes all the
1269 arguments that ui.system does, and returns the exit code of the
1288 arguments that ui.system does, and returns the exit code of the
1270 subprocess."""
1289 subprocess."""
1271 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1290 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1272 **kwargs)
1291 **kwargs)
1273
1292
1274 def gdinitconfig(ui):
1293 def gdinitconfig(ui):
1275 """helper function to know if a repo should be created as general delta
1294 """helper function to know if a repo should be created as general delta
1276 """
1295 """
1277 # experimental config: format.generaldelta
1296 # experimental config: format.generaldelta
1278 return (ui.configbool('format', 'generaldelta', False)
1297 return (ui.configbool('format', 'generaldelta', False)
1279 or ui.configbool('format', 'usegeneraldelta', True))
1298 or ui.configbool('format', 'usegeneraldelta', True))
1280
1299
1281 def gddeltaconfig(ui):
1300 def gddeltaconfig(ui):
1282 """helper function to know if incoming delta should be optimised
1301 """helper function to know if incoming delta should be optimised
1283 """
1302 """
1284 # experimental config: format.generaldelta
1303 # experimental config: format.generaldelta
1285 return ui.configbool('format', 'generaldelta', False)
1304 return ui.configbool('format', 'generaldelta', False)
1286
1305
1287 class delayclosedfile(object):
1306 class delayclosedfile(object):
1288 """Proxy for a file object whose close is delayed.
1307 """Proxy for a file object whose close is delayed.
1289
1308
1290 Do not instantiate outside of the vfs layer.
1309 Do not instantiate outside of the vfs layer.
1291 """
1310 """
1292
1311
1293 def __init__(self, fh, closer):
1312 def __init__(self, fh, closer):
1294 object.__setattr__(self, '_origfh', fh)
1313 object.__setattr__(self, '_origfh', fh)
1295 object.__setattr__(self, '_closer', closer)
1314 object.__setattr__(self, '_closer', closer)
1296
1315
1297 def __getattr__(self, attr):
1316 def __getattr__(self, attr):
1298 return getattr(self._origfh, attr)
1317 return getattr(self._origfh, attr)
1299
1318
1300 def __setattr__(self, attr, value):
1319 def __setattr__(self, attr, value):
1301 return setattr(self._origfh, attr, value)
1320 return setattr(self._origfh, attr, value)
1302
1321
1303 def __delattr__(self, attr):
1322 def __delattr__(self, attr):
1304 return delattr(self._origfh, attr)
1323 return delattr(self._origfh, attr)
1305
1324
1306 def __enter__(self):
1325 def __enter__(self):
1307 return self._origfh.__enter__()
1326 return self._origfh.__enter__()
1308
1327
1309 def __exit__(self, exc_type, exc_value, exc_tb):
1328 def __exit__(self, exc_type, exc_value, exc_tb):
1310 self._closer.close(self._origfh)
1329 self._closer.close(self._origfh)
1311
1330
1312 def close(self):
1331 def close(self):
1313 self._closer.close(self._origfh)
1332 self._closer.close(self._origfh)
1314
1333
1315 class backgroundfilecloser(object):
1334 class backgroundfilecloser(object):
1316 """Coordinates background closing of file handles on multiple threads."""
1335 """Coordinates background closing of file handles on multiple threads."""
1317 def __init__(self, ui, expectedcount=-1):
1336 def __init__(self, ui, expectedcount=-1):
1318 self._running = False
1337 self._running = False
1319 self._entered = False
1338 self._entered = False
1320 self._threads = []
1339 self._threads = []
1321 self._threadexception = None
1340 self._threadexception = None
1322
1341
1323 # Only Windows/NTFS has slow file closing. So only enable by default
1342 # Only Windows/NTFS has slow file closing. So only enable by default
1324 # on that platform. But allow to be enabled elsewhere for testing.
1343 # on that platform. But allow to be enabled elsewhere for testing.
1325 defaultenabled = os.name == 'nt'
1344 defaultenabled = os.name == 'nt'
1326 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1345 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1327
1346
1328 if not enabled:
1347 if not enabled:
1329 return
1348 return
1330
1349
1331 # There is overhead to starting and stopping the background threads.
1350 # There is overhead to starting and stopping the background threads.
1332 # Don't do background processing unless the file count is large enough
1351 # Don't do background processing unless the file count is large enough
1333 # to justify it.
1352 # to justify it.
1334 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1353 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1335 2048)
1354 2048)
1336 # FUTURE dynamically start background threads after minfilecount closes.
1355 # FUTURE dynamically start background threads after minfilecount closes.
1337 # (We don't currently have any callers that don't know their file count)
1356 # (We don't currently have any callers that don't know their file count)
1338 if expectedcount > 0 and expectedcount < minfilecount:
1357 if expectedcount > 0 and expectedcount < minfilecount:
1339 return
1358 return
1340
1359
1341 # Windows defaults to a limit of 512 open files. A buffer of 128
1360 # Windows defaults to a limit of 512 open files. A buffer of 128
1342 # should give us enough headway.
1361 # should give us enough headway.
1343 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1362 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1344 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1363 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1345
1364
1346 ui.debug('starting %d threads for background file closing\n' %
1365 ui.debug('starting %d threads for background file closing\n' %
1347 threadcount)
1366 threadcount)
1348
1367
1349 self._queue = util.queue(maxsize=maxqueue)
1368 self._queue = util.queue(maxsize=maxqueue)
1350 self._running = True
1369 self._running = True
1351
1370
1352 for i in range(threadcount):
1371 for i in range(threadcount):
1353 t = threading.Thread(target=self._worker, name='backgroundcloser')
1372 t = threading.Thread(target=self._worker, name='backgroundcloser')
1354 self._threads.append(t)
1373 self._threads.append(t)
1355 t.start()
1374 t.start()
1356
1375
1357 def __enter__(self):
1376 def __enter__(self):
1358 self._entered = True
1377 self._entered = True
1359 return self
1378 return self
1360
1379
1361 def __exit__(self, exc_type, exc_value, exc_tb):
1380 def __exit__(self, exc_type, exc_value, exc_tb):
1362 self._running = False
1381 self._running = False
1363
1382
1364 # Wait for threads to finish closing so open files don't linger for
1383 # Wait for threads to finish closing so open files don't linger for
1365 # longer than lifetime of context manager.
1384 # longer than lifetime of context manager.
1366 for t in self._threads:
1385 for t in self._threads:
1367 t.join()
1386 t.join()
1368
1387
1369 def _worker(self):
1388 def _worker(self):
1370 """Main routine for worker thread."""
1389 """Main routine for worker thread."""
1371 while True:
1390 while True:
1372 try:
1391 try:
1373 fh = self._queue.get(block=True, timeout=0.100)
1392 fh = self._queue.get(block=True, timeout=0.100)
1374 # Need to catch or the thread will terminate and
1393 # Need to catch or the thread will terminate and
1375 # we could orphan file descriptors.
1394 # we could orphan file descriptors.
1376 try:
1395 try:
1377 fh.close()
1396 fh.close()
1378 except Exception as e:
1397 except Exception as e:
1379 # Stash so can re-raise from main thread later.
1398 # Stash so can re-raise from main thread later.
1380 self._threadexception = e
1399 self._threadexception = e
1381 except util.empty:
1400 except util.empty:
1382 if not self._running:
1401 if not self._running:
1383 break
1402 break
1384
1403
1385 def close(self, fh):
1404 def close(self, fh):
1386 """Schedule a file for closing."""
1405 """Schedule a file for closing."""
1387 if not self._entered:
1406 if not self._entered:
1388 raise error.Abort(_('can only call close() when context manager '
1407 raise error.Abort(_('can only call close() when context manager '
1389 'active'))
1408 'active'))
1390
1409
1391 # If a background thread encountered an exception, raise now so we fail
1410 # If a background thread encountered an exception, raise now so we fail
1392 # fast. Otherwise we may potentially go on for minutes until the error
1411 # fast. Otherwise we may potentially go on for minutes until the error
1393 # is acted on.
1412 # is acted on.
1394 if self._threadexception:
1413 if self._threadexception:
1395 e = self._threadexception
1414 e = self._threadexception
1396 self._threadexception = None
1415 self._threadexception = None
1397 raise e
1416 raise e
1398
1417
1399 # If we're not actively running, close synchronously.
1418 # If we're not actively running, close synchronously.
1400 if not self._running:
1419 if not self._running:
1401 fh.close()
1420 fh.close()
1402 return
1421 return
1403
1422
1404 self._queue.put(fh, block=True, timeout=None)
1423 self._queue.put(fh, block=True, timeout=None)
General Comments 0
You need to be logged in to leave comments. Login now