##// END OF EJS Templates
scmutil: allow access to filecache descriptor on class...
Martijn Pieters -
r29373:36fbd72c default
parent child Browse files
Show More
@@ -1,1970 +1,1972 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 cmdutil,
31 cmdutil,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 namespaces,
44 namespaces,
45 obsolete,
45 obsolete,
46 pathutil,
46 pathutil,
47 peer,
47 peer,
48 phases,
48 phases,
49 pushkey,
49 pushkey,
50 repoview,
50 repoview,
51 revset,
51 revset,
52 scmutil,
52 scmutil,
53 store,
53 store,
54 subrepo,
54 subrepo,
55 tags as tagsmod,
55 tags as tagsmod,
56 transaction,
56 transaction,
57 util,
57 util,
58 )
58 )
59
59
60 release = lockmod.release
60 release = lockmod.release
61 urlerr = util.urlerr
61 urlerr = util.urlerr
62 urlreq = util.urlreq
62 urlreq = util.urlreq
63
63
64 class repofilecache(scmutil.filecache):
64 class repofilecache(scmutil.filecache):
65 """All filecache usage on repo are done for logic that should be unfiltered
65 """All filecache usage on repo are done for logic that should be unfiltered
66 """
66 """
67
67
68 def __get__(self, repo, type=None):
68 def __get__(self, repo, type=None):
69 if repo is None:
70 return self
69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
70 def __set__(self, repo, value):
72 def __set__(self, repo, value):
71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
72 def __delete__(self, repo):
74 def __delete__(self, repo):
73 return super(repofilecache, self).__delete__(repo.unfiltered())
75 return super(repofilecache, self).__delete__(repo.unfiltered())
74
76
75 class storecache(repofilecache):
77 class storecache(repofilecache):
76 """filecache for files in the store"""
78 """filecache for files in the store"""
77 def join(self, obj, fname):
79 def join(self, obj, fname):
78 return obj.sjoin(fname)
80 return obj.sjoin(fname)
79
81
80 class unfilteredpropertycache(util.propertycache):
82 class unfilteredpropertycache(util.propertycache):
81 """propertycache that apply to unfiltered repo only"""
83 """propertycache that apply to unfiltered repo only"""
82
84
83 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
84 unfi = repo.unfiltered()
86 unfi = repo.unfiltered()
85 if unfi is repo:
87 if unfi is repo:
86 return super(unfilteredpropertycache, self).__get__(unfi)
88 return super(unfilteredpropertycache, self).__get__(unfi)
87 return getattr(unfi, self.name)
89 return getattr(unfi, self.name)
88
90
89 class filteredpropertycache(util.propertycache):
91 class filteredpropertycache(util.propertycache):
90 """propertycache that must take filtering in account"""
92 """propertycache that must take filtering in account"""
91
93
92 def cachevalue(self, obj, value):
94 def cachevalue(self, obj, value):
93 object.__setattr__(obj, self.name, value)
95 object.__setattr__(obj, self.name, value)
94
96
95
97
96 def hasunfilteredcache(repo, name):
98 def hasunfilteredcache(repo, name):
97 """check if a repo has an unfilteredpropertycache value for <name>"""
99 """check if a repo has an unfilteredpropertycache value for <name>"""
98 return name in vars(repo.unfiltered())
100 return name in vars(repo.unfiltered())
99
101
100 def unfilteredmethod(orig):
102 def unfilteredmethod(orig):
101 """decorate method that always need to be run on unfiltered version"""
103 """decorate method that always need to be run on unfiltered version"""
102 def wrapper(repo, *args, **kwargs):
104 def wrapper(repo, *args, **kwargs):
103 return orig(repo.unfiltered(), *args, **kwargs)
105 return orig(repo.unfiltered(), *args, **kwargs)
104 return wrapper
106 return wrapper
105
107
106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
107 'unbundle'))
109 'unbundle'))
108 legacycaps = moderncaps.union(set(['changegroupsubset']))
110 legacycaps = moderncaps.union(set(['changegroupsubset']))
109
111
110 class localpeer(peer.peerrepository):
112 class localpeer(peer.peerrepository):
111 '''peer for a local repo; reflects only the most recent API'''
113 '''peer for a local repo; reflects only the most recent API'''
112
114
113 def __init__(self, repo, caps=moderncaps):
115 def __init__(self, repo, caps=moderncaps):
114 peer.peerrepository.__init__(self)
116 peer.peerrepository.__init__(self)
115 self._repo = repo.filtered('served')
117 self._repo = repo.filtered('served')
116 self.ui = repo.ui
118 self.ui = repo.ui
117 self._caps = repo._restrictcapabilities(caps)
119 self._caps = repo._restrictcapabilities(caps)
118 self.requirements = repo.requirements
120 self.requirements = repo.requirements
119 self.supportedformats = repo.supportedformats
121 self.supportedformats = repo.supportedformats
120
122
121 def close(self):
123 def close(self):
122 self._repo.close()
124 self._repo.close()
123
125
124 def _capabilities(self):
126 def _capabilities(self):
125 return self._caps
127 return self._caps
126
128
127 def local(self):
129 def local(self):
128 return self._repo
130 return self._repo
129
131
130 def canpush(self):
132 def canpush(self):
131 return True
133 return True
132
134
133 def url(self):
135 def url(self):
134 return self._repo.url()
136 return self._repo.url()
135
137
136 def lookup(self, key):
138 def lookup(self, key):
137 return self._repo.lookup(key)
139 return self._repo.lookup(key)
138
140
139 def branchmap(self):
141 def branchmap(self):
140 return self._repo.branchmap()
142 return self._repo.branchmap()
141
143
142 def heads(self):
144 def heads(self):
143 return self._repo.heads()
145 return self._repo.heads()
144
146
145 def known(self, nodes):
147 def known(self, nodes):
146 return self._repo.known(nodes)
148 return self._repo.known(nodes)
147
149
148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
149 **kwargs):
151 **kwargs):
150 cg = exchange.getbundle(self._repo, source, heads=heads,
152 cg = exchange.getbundle(self._repo, source, heads=heads,
151 common=common, bundlecaps=bundlecaps, **kwargs)
153 common=common, bundlecaps=bundlecaps, **kwargs)
152 if bundlecaps is not None and 'HG20' in bundlecaps:
154 if bundlecaps is not None and 'HG20' in bundlecaps:
153 # When requesting a bundle2, getbundle returns a stream to make the
155 # When requesting a bundle2, getbundle returns a stream to make the
154 # wire level function happier. We need to build a proper object
156 # wire level function happier. We need to build a proper object
155 # from it in local peer.
157 # from it in local peer.
156 cg = bundle2.getunbundler(self.ui, cg)
158 cg = bundle2.getunbundler(self.ui, cg)
157 return cg
159 return cg
158
160
159 # TODO We might want to move the next two calls into legacypeer and add
161 # TODO We might want to move the next two calls into legacypeer and add
160 # unbundle instead.
162 # unbundle instead.
161
163
162 def unbundle(self, cg, heads, url):
164 def unbundle(self, cg, heads, url):
163 """apply a bundle on a repo
165 """apply a bundle on a repo
164
166
165 This function handles the repo locking itself."""
167 This function handles the repo locking itself."""
166 try:
168 try:
167 try:
169 try:
168 cg = exchange.readbundle(self.ui, cg, None)
170 cg = exchange.readbundle(self.ui, cg, None)
169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
170 if util.safehasattr(ret, 'getchunks'):
172 if util.safehasattr(ret, 'getchunks'):
171 # This is a bundle20 object, turn it into an unbundler.
173 # This is a bundle20 object, turn it into an unbundler.
172 # This little dance should be dropped eventually when the
174 # This little dance should be dropped eventually when the
173 # API is finally improved.
175 # API is finally improved.
174 stream = util.chunkbuffer(ret.getchunks())
176 stream = util.chunkbuffer(ret.getchunks())
175 ret = bundle2.getunbundler(self.ui, stream)
177 ret = bundle2.getunbundler(self.ui, stream)
176 return ret
178 return ret
177 except Exception as exc:
179 except Exception as exc:
178 # If the exception contains output salvaged from a bundle2
180 # If the exception contains output salvaged from a bundle2
179 # reply, we need to make sure it is printed before continuing
181 # reply, we need to make sure it is printed before continuing
180 # to fail. So we build a bundle2 with such output and consume
182 # to fail. So we build a bundle2 with such output and consume
181 # it directly.
183 # it directly.
182 #
184 #
183 # This is not very elegant but allows a "simple" solution for
185 # This is not very elegant but allows a "simple" solution for
184 # issue4594
186 # issue4594
185 output = getattr(exc, '_bundle2salvagedoutput', ())
187 output = getattr(exc, '_bundle2salvagedoutput', ())
186 if output:
188 if output:
187 bundler = bundle2.bundle20(self._repo.ui)
189 bundler = bundle2.bundle20(self._repo.ui)
188 for out in output:
190 for out in output:
189 bundler.addpart(out)
191 bundler.addpart(out)
190 stream = util.chunkbuffer(bundler.getchunks())
192 stream = util.chunkbuffer(bundler.getchunks())
191 b = bundle2.getunbundler(self.ui, stream)
193 b = bundle2.getunbundler(self.ui, stream)
192 bundle2.processbundle(self._repo, b)
194 bundle2.processbundle(self._repo, b)
193 raise
195 raise
194 except error.PushRaced as exc:
196 except error.PushRaced as exc:
195 raise error.ResponseError(_('push failed:'), str(exc))
197 raise error.ResponseError(_('push failed:'), str(exc))
196
198
197 def lock(self):
199 def lock(self):
198 return self._repo.lock()
200 return self._repo.lock()
199
201
200 def addchangegroup(self, cg, source, url):
202 def addchangegroup(self, cg, source, url):
201 return cg.apply(self._repo, source, url)
203 return cg.apply(self._repo, source, url)
202
204
203 def pushkey(self, namespace, key, old, new):
205 def pushkey(self, namespace, key, old, new):
204 return self._repo.pushkey(namespace, key, old, new)
206 return self._repo.pushkey(namespace, key, old, new)
205
207
206 def listkeys(self, namespace):
208 def listkeys(self, namespace):
207 return self._repo.listkeys(namespace)
209 return self._repo.listkeys(namespace)
208
210
209 def debugwireargs(self, one, two, three=None, four=None, five=None):
211 def debugwireargs(self, one, two, three=None, four=None, five=None):
210 '''used to test argument passing over the wire'''
212 '''used to test argument passing over the wire'''
211 return "%s %s %s %s %s" % (one, two, three, four, five)
213 return "%s %s %s %s %s" % (one, two, three, four, five)
212
214
213 class locallegacypeer(localpeer):
215 class locallegacypeer(localpeer):
214 '''peer extension which implements legacy methods too; used for tests with
216 '''peer extension which implements legacy methods too; used for tests with
215 restricted capabilities'''
217 restricted capabilities'''
216
218
217 def __init__(self, repo):
219 def __init__(self, repo):
218 localpeer.__init__(self, repo, caps=legacycaps)
220 localpeer.__init__(self, repo, caps=legacycaps)
219
221
220 def branches(self, nodes):
222 def branches(self, nodes):
221 return self._repo.branches(nodes)
223 return self._repo.branches(nodes)
222
224
223 def between(self, pairs):
225 def between(self, pairs):
224 return self._repo.between(pairs)
226 return self._repo.between(pairs)
225
227
226 def changegroup(self, basenodes, source):
228 def changegroup(self, basenodes, source):
227 return changegroup.changegroup(self._repo, basenodes, source)
229 return changegroup.changegroup(self._repo, basenodes, source)
228
230
229 def changegroupsubset(self, bases, heads, source):
231 def changegroupsubset(self, bases, heads, source):
230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
231
233
232 class localrepository(object):
234 class localrepository(object):
233
235
234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
235 'manifestv2'))
237 'manifestv2'))
236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
237 'dotencode'))
239 'dotencode'))
238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
239 filtername = None
241 filtername = None
240
242
241 # a list of (ui, featureset) functions.
243 # a list of (ui, featureset) functions.
242 # only functions defined in module of enabled extensions are invoked
244 # only functions defined in module of enabled extensions are invoked
243 featuresetupfuncs = set()
245 featuresetupfuncs = set()
244
246
245 def __init__(self, baseui, path=None, create=False):
247 def __init__(self, baseui, path=None, create=False):
246 self.requirements = set()
248 self.requirements = set()
247 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
248 self.wopener = self.wvfs
250 self.wopener = self.wvfs
249 self.root = self.wvfs.base
251 self.root = self.wvfs.base
250 self.path = self.wvfs.join(".hg")
252 self.path = self.wvfs.join(".hg")
251 self.origroot = path
253 self.origroot = path
252 self.auditor = pathutil.pathauditor(self.root, self._checknested)
254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
253 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
254 realfs=False)
256 realfs=False)
255 self.vfs = scmutil.vfs(self.path)
257 self.vfs = scmutil.vfs(self.path)
256 self.opener = self.vfs
258 self.opener = self.vfs
257 self.baseui = baseui
259 self.baseui = baseui
258 self.ui = baseui.copy()
260 self.ui = baseui.copy()
259 self.ui.copy = baseui.copy # prevent copying repo configuration
261 self.ui.copy = baseui.copy # prevent copying repo configuration
260 # A list of callback to shape the phase if no data were found.
262 # A list of callback to shape the phase if no data were found.
261 # Callback are in the form: func(repo, roots) --> processed root.
263 # Callback are in the form: func(repo, roots) --> processed root.
262 # This list it to be filled by extension during repo setup
264 # This list it to be filled by extension during repo setup
263 self._phasedefaults = []
265 self._phasedefaults = []
264 try:
266 try:
265 self.ui.readconfig(self.join("hgrc"), self.root)
267 self.ui.readconfig(self.join("hgrc"), self.root)
266 extensions.loadall(self.ui)
268 extensions.loadall(self.ui)
267 except IOError:
269 except IOError:
268 pass
270 pass
269
271
270 if self.featuresetupfuncs:
272 if self.featuresetupfuncs:
271 self.supported = set(self._basesupported) # use private copy
273 self.supported = set(self._basesupported) # use private copy
272 extmods = set(m.__name__ for n, m
274 extmods = set(m.__name__ for n, m
273 in extensions.extensions(self.ui))
275 in extensions.extensions(self.ui))
274 for setupfunc in self.featuresetupfuncs:
276 for setupfunc in self.featuresetupfuncs:
275 if setupfunc.__module__ in extmods:
277 if setupfunc.__module__ in extmods:
276 setupfunc(self.ui, self.supported)
278 setupfunc(self.ui, self.supported)
277 else:
279 else:
278 self.supported = self._basesupported
280 self.supported = self._basesupported
279
281
280 if not self.vfs.isdir():
282 if not self.vfs.isdir():
281 if create:
283 if create:
282 self.requirements = newreporequirements(self)
284 self.requirements = newreporequirements(self)
283
285
284 if not self.wvfs.exists():
286 if not self.wvfs.exists():
285 self.wvfs.makedirs()
287 self.wvfs.makedirs()
286 self.vfs.makedir(notindexed=True)
288 self.vfs.makedir(notindexed=True)
287
289
288 if 'store' in self.requirements:
290 if 'store' in self.requirements:
289 self.vfs.mkdir("store")
291 self.vfs.mkdir("store")
290
292
291 # create an invalid changelog
293 # create an invalid changelog
292 self.vfs.append(
294 self.vfs.append(
293 "00changelog.i",
295 "00changelog.i",
294 '\0\0\0\2' # represents revlogv2
296 '\0\0\0\2' # represents revlogv2
295 ' dummy changelog to prevent using the old repo layout'
297 ' dummy changelog to prevent using the old repo layout'
296 )
298 )
297 else:
299 else:
298 raise error.RepoError(_("repository %s not found") % path)
300 raise error.RepoError(_("repository %s not found") % path)
299 elif create:
301 elif create:
300 raise error.RepoError(_("repository %s already exists") % path)
302 raise error.RepoError(_("repository %s already exists") % path)
301 else:
303 else:
302 try:
304 try:
303 self.requirements = scmutil.readrequires(
305 self.requirements = scmutil.readrequires(
304 self.vfs, self.supported)
306 self.vfs, self.supported)
305 except IOError as inst:
307 except IOError as inst:
306 if inst.errno != errno.ENOENT:
308 if inst.errno != errno.ENOENT:
307 raise
309 raise
308
310
309 self.sharedpath = self.path
311 self.sharedpath = self.path
310 try:
312 try:
311 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
312 realpath=True)
314 realpath=True)
313 s = vfs.base
315 s = vfs.base
314 if not vfs.exists():
316 if not vfs.exists():
315 raise error.RepoError(
317 raise error.RepoError(
316 _('.hg/sharedpath points to nonexistent directory %s') % s)
318 _('.hg/sharedpath points to nonexistent directory %s') % s)
317 self.sharedpath = s
319 self.sharedpath = s
318 except IOError as inst:
320 except IOError as inst:
319 if inst.errno != errno.ENOENT:
321 if inst.errno != errno.ENOENT:
320 raise
322 raise
321
323
322 self.store = store.store(
324 self.store = store.store(
323 self.requirements, self.sharedpath, scmutil.vfs)
325 self.requirements, self.sharedpath, scmutil.vfs)
324 self.spath = self.store.path
326 self.spath = self.store.path
325 self.svfs = self.store.vfs
327 self.svfs = self.store.vfs
326 self.sjoin = self.store.join
328 self.sjoin = self.store.join
327 self.vfs.createmode = self.store.createmode
329 self.vfs.createmode = self.store.createmode
328 self._applyopenerreqs()
330 self._applyopenerreqs()
329 if create:
331 if create:
330 self._writerequirements()
332 self._writerequirements()
331
333
332 self._dirstatevalidatewarned = False
334 self._dirstatevalidatewarned = False
333
335
334 self._branchcaches = {}
336 self._branchcaches = {}
335 self._revbranchcache = None
337 self._revbranchcache = None
336 self.filterpats = {}
338 self.filterpats = {}
337 self._datafilters = {}
339 self._datafilters = {}
338 self._transref = self._lockref = self._wlockref = None
340 self._transref = self._lockref = self._wlockref = None
339
341
340 # A cache for various files under .hg/ that tracks file changes,
342 # A cache for various files under .hg/ that tracks file changes,
341 # (used by the filecache decorator)
343 # (used by the filecache decorator)
342 #
344 #
343 # Maps a property name to its util.filecacheentry
345 # Maps a property name to its util.filecacheentry
344 self._filecache = {}
346 self._filecache = {}
345
347
346 # hold sets of revision to be filtered
348 # hold sets of revision to be filtered
347 # should be cleared when something might have changed the filter value:
349 # should be cleared when something might have changed the filter value:
348 # - new changesets,
350 # - new changesets,
349 # - phase change,
351 # - phase change,
350 # - new obsolescence marker,
352 # - new obsolescence marker,
351 # - working directory parent change,
353 # - working directory parent change,
352 # - bookmark changes
354 # - bookmark changes
353 self.filteredrevcache = {}
355 self.filteredrevcache = {}
354
356
355 # generic mapping between names and nodes
357 # generic mapping between names and nodes
356 self.names = namespaces.namespaces()
358 self.names = namespaces.namespaces()
357
359
358 def close(self):
360 def close(self):
359 self._writecaches()
361 self._writecaches()
360
362
361 def _writecaches(self):
363 def _writecaches(self):
362 if self._revbranchcache:
364 if self._revbranchcache:
363 self._revbranchcache.write()
365 self._revbranchcache.write()
364
366
365 def _restrictcapabilities(self, caps):
367 def _restrictcapabilities(self, caps):
366 if self.ui.configbool('experimental', 'bundle2-advertise', True):
368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
367 caps = set(caps)
369 caps = set(caps)
368 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
369 caps.add('bundle2=' + urlreq.quote(capsblob))
371 caps.add('bundle2=' + urlreq.quote(capsblob))
370 return caps
372 return caps
371
373
372 def _applyopenerreqs(self):
374 def _applyopenerreqs(self):
373 self.svfs.options = dict((r, 1) for r in self.requirements
375 self.svfs.options = dict((r, 1) for r in self.requirements
374 if r in self.openerreqs)
376 if r in self.openerreqs)
375 # experimental config: format.chunkcachesize
377 # experimental config: format.chunkcachesize
376 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
377 if chunkcachesize is not None:
379 if chunkcachesize is not None:
378 self.svfs.options['chunkcachesize'] = chunkcachesize
380 self.svfs.options['chunkcachesize'] = chunkcachesize
379 # experimental config: format.maxchainlen
381 # experimental config: format.maxchainlen
380 maxchainlen = self.ui.configint('format', 'maxchainlen')
382 maxchainlen = self.ui.configint('format', 'maxchainlen')
381 if maxchainlen is not None:
383 if maxchainlen is not None:
382 self.svfs.options['maxchainlen'] = maxchainlen
384 self.svfs.options['maxchainlen'] = maxchainlen
383 # experimental config: format.manifestcachesize
385 # experimental config: format.manifestcachesize
384 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
385 if manifestcachesize is not None:
387 if manifestcachesize is not None:
386 self.svfs.options['manifestcachesize'] = manifestcachesize
388 self.svfs.options['manifestcachesize'] = manifestcachesize
387 # experimental config: format.aggressivemergedeltas
389 # experimental config: format.aggressivemergedeltas
388 aggressivemergedeltas = self.ui.configbool('format',
390 aggressivemergedeltas = self.ui.configbool('format',
389 'aggressivemergedeltas', False)
391 'aggressivemergedeltas', False)
390 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
391 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
392
394
393 def _writerequirements(self):
395 def _writerequirements(self):
394 scmutil.writerequires(self.vfs, self.requirements)
396 scmutil.writerequires(self.vfs, self.requirements)
395
397
396 def _checknested(self, path):
398 def _checknested(self, path):
397 """Determine if path is a legal nested repository."""
399 """Determine if path is a legal nested repository."""
398 if not path.startswith(self.root):
400 if not path.startswith(self.root):
399 return False
401 return False
400 subpath = path[len(self.root) + 1:]
402 subpath = path[len(self.root) + 1:]
401 normsubpath = util.pconvert(subpath)
403 normsubpath = util.pconvert(subpath)
402
404
403 # XXX: Checking against the current working copy is wrong in
405 # XXX: Checking against the current working copy is wrong in
404 # the sense that it can reject things like
406 # the sense that it can reject things like
405 #
407 #
406 # $ hg cat -r 10 sub/x.txt
408 # $ hg cat -r 10 sub/x.txt
407 #
409 #
408 # if sub/ is no longer a subrepository in the working copy
410 # if sub/ is no longer a subrepository in the working copy
409 # parent revision.
411 # parent revision.
410 #
412 #
411 # However, it can of course also allow things that would have
413 # However, it can of course also allow things that would have
412 # been rejected before, such as the above cat command if sub/
414 # been rejected before, such as the above cat command if sub/
413 # is a subrepository now, but was a normal directory before.
415 # is a subrepository now, but was a normal directory before.
414 # The old path auditor would have rejected by mistake since it
416 # The old path auditor would have rejected by mistake since it
415 # panics when it sees sub/.hg/.
417 # panics when it sees sub/.hg/.
416 #
418 #
417 # All in all, checking against the working copy seems sensible
419 # All in all, checking against the working copy seems sensible
418 # since we want to prevent access to nested repositories on
420 # since we want to prevent access to nested repositories on
419 # the filesystem *now*.
421 # the filesystem *now*.
420 ctx = self[None]
422 ctx = self[None]
421 parts = util.splitpath(subpath)
423 parts = util.splitpath(subpath)
422 while parts:
424 while parts:
423 prefix = '/'.join(parts)
425 prefix = '/'.join(parts)
424 if prefix in ctx.substate:
426 if prefix in ctx.substate:
425 if prefix == normsubpath:
427 if prefix == normsubpath:
426 return True
428 return True
427 else:
429 else:
428 sub = ctx.sub(prefix)
430 sub = ctx.sub(prefix)
429 return sub.checknested(subpath[len(prefix) + 1:])
431 return sub.checknested(subpath[len(prefix) + 1:])
430 else:
432 else:
431 parts.pop()
433 parts.pop()
432 return False
434 return False
433
435
434 def peer(self):
436 def peer(self):
435 return localpeer(self) # not cached to avoid reference cycle
437 return localpeer(self) # not cached to avoid reference cycle
436
438
437 def unfiltered(self):
439 def unfiltered(self):
438 """Return unfiltered version of the repository
440 """Return unfiltered version of the repository
439
441
440 Intended to be overwritten by filtered repo."""
442 Intended to be overwritten by filtered repo."""
441 return self
443 return self
442
444
443 def filtered(self, name):
445 def filtered(self, name):
444 """Return a filtered version of a repository"""
446 """Return a filtered version of a repository"""
445 # build a new class with the mixin and the current class
447 # build a new class with the mixin and the current class
446 # (possibly subclass of the repo)
448 # (possibly subclass of the repo)
447 class proxycls(repoview.repoview, self.unfiltered().__class__):
449 class proxycls(repoview.repoview, self.unfiltered().__class__):
448 pass
450 pass
449 return proxycls(self, name)
451 return proxycls(self, name)
450
452
451 @repofilecache('bookmarks', 'bookmarks.current')
453 @repofilecache('bookmarks', 'bookmarks.current')
452 def _bookmarks(self):
454 def _bookmarks(self):
453 return bookmarks.bmstore(self)
455 return bookmarks.bmstore(self)
454
456
455 @property
457 @property
456 def _activebookmark(self):
458 def _activebookmark(self):
457 return self._bookmarks.active
459 return self._bookmarks.active
458
460
459 def bookmarkheads(self, bookmark):
461 def bookmarkheads(self, bookmark):
460 name = bookmark.split('@', 1)[0]
462 name = bookmark.split('@', 1)[0]
461 heads = []
463 heads = []
462 for mark, n in self._bookmarks.iteritems():
464 for mark, n in self._bookmarks.iteritems():
463 if mark.split('@', 1)[0] == name:
465 if mark.split('@', 1)[0] == name:
464 heads.append(n)
466 heads.append(n)
465 return heads
467 return heads
466
468
467 # _phaserevs and _phasesets depend on changelog. what we need is to
469 # _phaserevs and _phasesets depend on changelog. what we need is to
468 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
469 # can't be easily expressed in filecache mechanism.
471 # can't be easily expressed in filecache mechanism.
470 @storecache('phaseroots', '00changelog.i')
472 @storecache('phaseroots', '00changelog.i')
471 def _phasecache(self):
473 def _phasecache(self):
472 return phases.phasecache(self, self._phasedefaults)
474 return phases.phasecache(self, self._phasedefaults)
473
475
474 @storecache('obsstore')
476 @storecache('obsstore')
475 def obsstore(self):
477 def obsstore(self):
476 # read default format for new obsstore.
478 # read default format for new obsstore.
477 # developer config: format.obsstore-version
479 # developer config: format.obsstore-version
478 defaultformat = self.ui.configint('format', 'obsstore-version', None)
480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
479 # rely on obsstore class default when possible.
481 # rely on obsstore class default when possible.
480 kwargs = {}
482 kwargs = {}
481 if defaultformat is not None:
483 if defaultformat is not None:
482 kwargs['defaultformat'] = defaultformat
484 kwargs['defaultformat'] = defaultformat
483 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
484 store = obsolete.obsstore(self.svfs, readonly=readonly,
486 store = obsolete.obsstore(self.svfs, readonly=readonly,
485 **kwargs)
487 **kwargs)
486 if store and readonly:
488 if store and readonly:
487 self.ui.warn(
489 self.ui.warn(
488 _('obsolete feature not enabled but %i markers found!\n')
490 _('obsolete feature not enabled but %i markers found!\n')
489 % len(list(store)))
491 % len(list(store)))
490 return store
492 return store
491
493
492 @storecache('00changelog.i')
494 @storecache('00changelog.i')
493 def changelog(self):
495 def changelog(self):
494 c = changelog.changelog(self.svfs)
496 c = changelog.changelog(self.svfs)
495 if 'HG_PENDING' in os.environ:
497 if 'HG_PENDING' in os.environ:
496 p = os.environ['HG_PENDING']
498 p = os.environ['HG_PENDING']
497 if p.startswith(self.root):
499 if p.startswith(self.root):
498 c.readpending('00changelog.i.a')
500 c.readpending('00changelog.i.a')
499 return c
501 return c
500
502
501 @storecache('00manifest.i')
503 @storecache('00manifest.i')
502 def manifest(self):
504 def manifest(self):
503 return manifest.manifest(self.svfs)
505 return manifest.manifest(self.svfs)
504
506
505 def dirlog(self, dir):
507 def dirlog(self, dir):
506 return self.manifest.dirlog(dir)
508 return self.manifest.dirlog(dir)
507
509
508 @repofilecache('dirstate')
510 @repofilecache('dirstate')
509 def dirstate(self):
511 def dirstate(self):
510 return dirstate.dirstate(self.vfs, self.ui, self.root,
512 return dirstate.dirstate(self.vfs, self.ui, self.root,
511 self._dirstatevalidate)
513 self._dirstatevalidate)
512
514
513 def _dirstatevalidate(self, node):
515 def _dirstatevalidate(self, node):
514 try:
516 try:
515 self.changelog.rev(node)
517 self.changelog.rev(node)
516 return node
518 return node
517 except error.LookupError:
519 except error.LookupError:
518 if not self._dirstatevalidatewarned:
520 if not self._dirstatevalidatewarned:
519 self._dirstatevalidatewarned = True
521 self._dirstatevalidatewarned = True
520 self.ui.warn(_("warning: ignoring unknown"
522 self.ui.warn(_("warning: ignoring unknown"
521 " working parent %s!\n") % short(node))
523 " working parent %s!\n") % short(node))
522 return nullid
524 return nullid
523
525
524 def __getitem__(self, changeid):
526 def __getitem__(self, changeid):
525 if changeid is None or changeid == wdirrev:
527 if changeid is None or changeid == wdirrev:
526 return context.workingctx(self)
528 return context.workingctx(self)
527 if isinstance(changeid, slice):
529 if isinstance(changeid, slice):
528 return [context.changectx(self, i)
530 return [context.changectx(self, i)
529 for i in xrange(*changeid.indices(len(self)))
531 for i in xrange(*changeid.indices(len(self)))
530 if i not in self.changelog.filteredrevs]
532 if i not in self.changelog.filteredrevs]
531 return context.changectx(self, changeid)
533 return context.changectx(self, changeid)
532
534
533 def __contains__(self, changeid):
535 def __contains__(self, changeid):
534 try:
536 try:
535 self[changeid]
537 self[changeid]
536 return True
538 return True
537 except error.RepoLookupError:
539 except error.RepoLookupError:
538 return False
540 return False
539
541
540 def __nonzero__(self):
542 def __nonzero__(self):
541 return True
543 return True
542
544
543 def __len__(self):
545 def __len__(self):
544 return len(self.changelog)
546 return len(self.changelog)
545
547
546 def __iter__(self):
548 def __iter__(self):
547 return iter(self.changelog)
549 return iter(self.changelog)
548
550
549 def revs(self, expr, *args):
551 def revs(self, expr, *args):
550 '''Find revisions matching a revset.
552 '''Find revisions matching a revset.
551
553
552 The revset is specified as a string ``expr`` that may contain
554 The revset is specified as a string ``expr`` that may contain
553 %-formatting to escape certain types. See ``revset.formatspec``.
555 %-formatting to escape certain types. See ``revset.formatspec``.
554
556
555 Return a revset.abstractsmartset, which is a list-like interface
557 Return a revset.abstractsmartset, which is a list-like interface
556 that contains integer revisions.
558 that contains integer revisions.
557 '''
559 '''
558 expr = revset.formatspec(expr, *args)
560 expr = revset.formatspec(expr, *args)
559 m = revset.match(None, expr)
561 m = revset.match(None, expr)
560 return m(self)
562 return m(self)
561
563
562 def set(self, expr, *args):
564 def set(self, expr, *args):
563 '''Find revisions matching a revset and emit changectx instances.
565 '''Find revisions matching a revset and emit changectx instances.
564
566
565 This is a convenience wrapper around ``revs()`` that iterates the
567 This is a convenience wrapper around ``revs()`` that iterates the
566 result and is a generator of changectx instances.
568 result and is a generator of changectx instances.
567 '''
569 '''
568 for r in self.revs(expr, *args):
570 for r in self.revs(expr, *args):
569 yield self[r]
571 yield self[r]
570
572
571 def url(self):
573 def url(self):
572 return 'file:' + self.root
574 return 'file:' + self.root
573
575
574 def hook(self, name, throw=False, **args):
576 def hook(self, name, throw=False, **args):
575 """Call a hook, passing this repo instance.
577 """Call a hook, passing this repo instance.
576
578
577 This a convenience method to aid invoking hooks. Extensions likely
579 This a convenience method to aid invoking hooks. Extensions likely
578 won't call this unless they have registered a custom hook or are
580 won't call this unless they have registered a custom hook or are
579 replacing code that is expected to call a hook.
581 replacing code that is expected to call a hook.
580 """
582 """
581 return hook.hook(self.ui, self, name, throw, **args)
583 return hook.hook(self.ui, self, name, throw, **args)
582
584
583 @unfilteredmethod
585 @unfilteredmethod
584 def _tag(self, names, node, message, local, user, date, extra=None,
586 def _tag(self, names, node, message, local, user, date, extra=None,
585 editor=False):
587 editor=False):
586 if isinstance(names, str):
588 if isinstance(names, str):
587 names = (names,)
589 names = (names,)
588
590
589 branches = self.branchmap()
591 branches = self.branchmap()
590 for name in names:
592 for name in names:
591 self.hook('pretag', throw=True, node=hex(node), tag=name,
593 self.hook('pretag', throw=True, node=hex(node), tag=name,
592 local=local)
594 local=local)
593 if name in branches:
595 if name in branches:
594 self.ui.warn(_("warning: tag %s conflicts with existing"
596 self.ui.warn(_("warning: tag %s conflicts with existing"
595 " branch name\n") % name)
597 " branch name\n") % name)
596
598
597 def writetags(fp, names, munge, prevtags):
599 def writetags(fp, names, munge, prevtags):
598 fp.seek(0, 2)
600 fp.seek(0, 2)
599 if prevtags and prevtags[-1] != '\n':
601 if prevtags and prevtags[-1] != '\n':
600 fp.write('\n')
602 fp.write('\n')
601 for name in names:
603 for name in names:
602 if munge:
604 if munge:
603 m = munge(name)
605 m = munge(name)
604 else:
606 else:
605 m = name
607 m = name
606
608
607 if (self._tagscache.tagtypes and
609 if (self._tagscache.tagtypes and
608 name in self._tagscache.tagtypes):
610 name in self._tagscache.tagtypes):
609 old = self.tags().get(name, nullid)
611 old = self.tags().get(name, nullid)
610 fp.write('%s %s\n' % (hex(old), m))
612 fp.write('%s %s\n' % (hex(old), m))
611 fp.write('%s %s\n' % (hex(node), m))
613 fp.write('%s %s\n' % (hex(node), m))
612 fp.close()
614 fp.close()
613
615
614 prevtags = ''
616 prevtags = ''
615 if local:
617 if local:
616 try:
618 try:
617 fp = self.vfs('localtags', 'r+')
619 fp = self.vfs('localtags', 'r+')
618 except IOError:
620 except IOError:
619 fp = self.vfs('localtags', 'a')
621 fp = self.vfs('localtags', 'a')
620 else:
622 else:
621 prevtags = fp.read()
623 prevtags = fp.read()
622
624
623 # local tags are stored in the current charset
625 # local tags are stored in the current charset
624 writetags(fp, names, None, prevtags)
626 writetags(fp, names, None, prevtags)
625 for name in names:
627 for name in names:
626 self.hook('tag', node=hex(node), tag=name, local=local)
628 self.hook('tag', node=hex(node), tag=name, local=local)
627 return
629 return
628
630
629 try:
631 try:
630 fp = self.wfile('.hgtags', 'rb+')
632 fp = self.wfile('.hgtags', 'rb+')
631 except IOError as e:
633 except IOError as e:
632 if e.errno != errno.ENOENT:
634 if e.errno != errno.ENOENT:
633 raise
635 raise
634 fp = self.wfile('.hgtags', 'ab')
636 fp = self.wfile('.hgtags', 'ab')
635 else:
637 else:
636 prevtags = fp.read()
638 prevtags = fp.read()
637
639
638 # committed tags are stored in UTF-8
640 # committed tags are stored in UTF-8
639 writetags(fp, names, encoding.fromlocal, prevtags)
641 writetags(fp, names, encoding.fromlocal, prevtags)
640
642
641 fp.close()
643 fp.close()
642
644
643 self.invalidatecaches()
645 self.invalidatecaches()
644
646
645 if '.hgtags' not in self.dirstate:
647 if '.hgtags' not in self.dirstate:
646 self[None].add(['.hgtags'])
648 self[None].add(['.hgtags'])
647
649
648 m = matchmod.exact(self.root, '', ['.hgtags'])
650 m = matchmod.exact(self.root, '', ['.hgtags'])
649 tagnode = self.commit(message, user, date, extra=extra, match=m,
651 tagnode = self.commit(message, user, date, extra=extra, match=m,
650 editor=editor)
652 editor=editor)
651
653
652 for name in names:
654 for name in names:
653 self.hook('tag', node=hex(node), tag=name, local=local)
655 self.hook('tag', node=hex(node), tag=name, local=local)
654
656
655 return tagnode
657 return tagnode
656
658
657 def tag(self, names, node, message, local, user, date, editor=False):
659 def tag(self, names, node, message, local, user, date, editor=False):
658 '''tag a revision with one or more symbolic names.
660 '''tag a revision with one or more symbolic names.
659
661
660 names is a list of strings or, when adding a single tag, names may be a
662 names is a list of strings or, when adding a single tag, names may be a
661 string.
663 string.
662
664
663 if local is True, the tags are stored in a per-repository file.
665 if local is True, the tags are stored in a per-repository file.
664 otherwise, they are stored in the .hgtags file, and a new
666 otherwise, they are stored in the .hgtags file, and a new
665 changeset is committed with the change.
667 changeset is committed with the change.
666
668
667 keyword arguments:
669 keyword arguments:
668
670
669 local: whether to store tags in non-version-controlled file
671 local: whether to store tags in non-version-controlled file
670 (default False)
672 (default False)
671
673
672 message: commit message to use if committing
674 message: commit message to use if committing
673
675
674 user: name of user to use if committing
676 user: name of user to use if committing
675
677
676 date: date tuple to use if committing'''
678 date: date tuple to use if committing'''
677
679
678 if not local:
680 if not local:
679 m = matchmod.exact(self.root, '', ['.hgtags'])
681 m = matchmod.exact(self.root, '', ['.hgtags'])
680 if any(self.status(match=m, unknown=True, ignored=True)):
682 if any(self.status(match=m, unknown=True, ignored=True)):
681 raise error.Abort(_('working copy of .hgtags is changed'),
683 raise error.Abort(_('working copy of .hgtags is changed'),
682 hint=_('please commit .hgtags manually'))
684 hint=_('please commit .hgtags manually'))
683
685
684 self.tags() # instantiate the cache
686 self.tags() # instantiate the cache
685 self._tag(names, node, message, local, user, date, editor=editor)
687 self._tag(names, node, message, local, user, date, editor=editor)
686
688
687 @filteredpropertycache
689 @filteredpropertycache
688 def _tagscache(self):
690 def _tagscache(self):
689 '''Returns a tagscache object that contains various tags related
691 '''Returns a tagscache object that contains various tags related
690 caches.'''
692 caches.'''
691
693
692 # This simplifies its cache management by having one decorated
694 # This simplifies its cache management by having one decorated
693 # function (this one) and the rest simply fetch things from it.
695 # function (this one) and the rest simply fetch things from it.
694 class tagscache(object):
696 class tagscache(object):
695 def __init__(self):
697 def __init__(self):
696 # These two define the set of tags for this repository. tags
698 # These two define the set of tags for this repository. tags
697 # maps tag name to node; tagtypes maps tag name to 'global' or
699 # maps tag name to node; tagtypes maps tag name to 'global' or
698 # 'local'. (Global tags are defined by .hgtags across all
700 # 'local'. (Global tags are defined by .hgtags across all
699 # heads, and local tags are defined in .hg/localtags.)
701 # heads, and local tags are defined in .hg/localtags.)
700 # They constitute the in-memory cache of tags.
702 # They constitute the in-memory cache of tags.
701 self.tags = self.tagtypes = None
703 self.tags = self.tagtypes = None
702
704
703 self.nodetagscache = self.tagslist = None
705 self.nodetagscache = self.tagslist = None
704
706
705 cache = tagscache()
707 cache = tagscache()
706 cache.tags, cache.tagtypes = self._findtags()
708 cache.tags, cache.tagtypes = self._findtags()
707
709
708 return cache
710 return cache
709
711
710 def tags(self):
712 def tags(self):
711 '''return a mapping of tag to node'''
713 '''return a mapping of tag to node'''
712 t = {}
714 t = {}
713 if self.changelog.filteredrevs:
715 if self.changelog.filteredrevs:
714 tags, tt = self._findtags()
716 tags, tt = self._findtags()
715 else:
717 else:
716 tags = self._tagscache.tags
718 tags = self._tagscache.tags
717 for k, v in tags.iteritems():
719 for k, v in tags.iteritems():
718 try:
720 try:
719 # ignore tags to unknown nodes
721 # ignore tags to unknown nodes
720 self.changelog.rev(v)
722 self.changelog.rev(v)
721 t[k] = v
723 t[k] = v
722 except (error.LookupError, ValueError):
724 except (error.LookupError, ValueError):
723 pass
725 pass
724 return t
726 return t
725
727
726 def _findtags(self):
728 def _findtags(self):
727 '''Do the hard work of finding tags. Return a pair of dicts
729 '''Do the hard work of finding tags. Return a pair of dicts
728 (tags, tagtypes) where tags maps tag name to node, and tagtypes
730 (tags, tagtypes) where tags maps tag name to node, and tagtypes
729 maps tag name to a string like \'global\' or \'local\'.
731 maps tag name to a string like \'global\' or \'local\'.
730 Subclasses or extensions are free to add their own tags, but
732 Subclasses or extensions are free to add their own tags, but
731 should be aware that the returned dicts will be retained for the
733 should be aware that the returned dicts will be retained for the
732 duration of the localrepo object.'''
734 duration of the localrepo object.'''
733
735
734 # XXX what tagtype should subclasses/extensions use? Currently
736 # XXX what tagtype should subclasses/extensions use? Currently
735 # mq and bookmarks add tags, but do not set the tagtype at all.
737 # mq and bookmarks add tags, but do not set the tagtype at all.
736 # Should each extension invent its own tag type? Should there
738 # Should each extension invent its own tag type? Should there
737 # be one tagtype for all such "virtual" tags? Or is the status
739 # be one tagtype for all such "virtual" tags? Or is the status
738 # quo fine?
740 # quo fine?
739
741
740 alltags = {} # map tag name to (node, hist)
742 alltags = {} # map tag name to (node, hist)
741 tagtypes = {}
743 tagtypes = {}
742
744
743 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
745 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
744 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
746 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
745
747
746 # Build the return dicts. Have to re-encode tag names because
748 # Build the return dicts. Have to re-encode tag names because
747 # the tags module always uses UTF-8 (in order not to lose info
749 # the tags module always uses UTF-8 (in order not to lose info
748 # writing to the cache), but the rest of Mercurial wants them in
750 # writing to the cache), but the rest of Mercurial wants them in
749 # local encoding.
751 # local encoding.
750 tags = {}
752 tags = {}
751 for (name, (node, hist)) in alltags.iteritems():
753 for (name, (node, hist)) in alltags.iteritems():
752 if node != nullid:
754 if node != nullid:
753 tags[encoding.tolocal(name)] = node
755 tags[encoding.tolocal(name)] = node
754 tags['tip'] = self.changelog.tip()
756 tags['tip'] = self.changelog.tip()
755 tagtypes = dict([(encoding.tolocal(name), value)
757 tagtypes = dict([(encoding.tolocal(name), value)
756 for (name, value) in tagtypes.iteritems()])
758 for (name, value) in tagtypes.iteritems()])
757 return (tags, tagtypes)
759 return (tags, tagtypes)
758
760
759 def tagtype(self, tagname):
761 def tagtype(self, tagname):
760 '''
762 '''
761 return the type of the given tag. result can be:
763 return the type of the given tag. result can be:
762
764
763 'local' : a local tag
765 'local' : a local tag
764 'global' : a global tag
766 'global' : a global tag
765 None : tag does not exist
767 None : tag does not exist
766 '''
768 '''
767
769
768 return self._tagscache.tagtypes.get(tagname)
770 return self._tagscache.tagtypes.get(tagname)
769
771
770 def tagslist(self):
772 def tagslist(self):
771 '''return a list of tags ordered by revision'''
773 '''return a list of tags ordered by revision'''
772 if not self._tagscache.tagslist:
774 if not self._tagscache.tagslist:
773 l = []
775 l = []
774 for t, n in self.tags().iteritems():
776 for t, n in self.tags().iteritems():
775 l.append((self.changelog.rev(n), t, n))
777 l.append((self.changelog.rev(n), t, n))
776 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
778 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
777
779
778 return self._tagscache.tagslist
780 return self._tagscache.tagslist
779
781
780 def nodetags(self, node):
782 def nodetags(self, node):
781 '''return the tags associated with a node'''
783 '''return the tags associated with a node'''
782 if not self._tagscache.nodetagscache:
784 if not self._tagscache.nodetagscache:
783 nodetagscache = {}
785 nodetagscache = {}
784 for t, n in self._tagscache.tags.iteritems():
786 for t, n in self._tagscache.tags.iteritems():
785 nodetagscache.setdefault(n, []).append(t)
787 nodetagscache.setdefault(n, []).append(t)
786 for tags in nodetagscache.itervalues():
788 for tags in nodetagscache.itervalues():
787 tags.sort()
789 tags.sort()
788 self._tagscache.nodetagscache = nodetagscache
790 self._tagscache.nodetagscache = nodetagscache
789 return self._tagscache.nodetagscache.get(node, [])
791 return self._tagscache.nodetagscache.get(node, [])
790
792
791 def nodebookmarks(self, node):
793 def nodebookmarks(self, node):
792 """return the list of bookmarks pointing to the specified node"""
794 """return the list of bookmarks pointing to the specified node"""
793 marks = []
795 marks = []
794 for bookmark, n in self._bookmarks.iteritems():
796 for bookmark, n in self._bookmarks.iteritems():
795 if n == node:
797 if n == node:
796 marks.append(bookmark)
798 marks.append(bookmark)
797 return sorted(marks)
799 return sorted(marks)
798
800
799 def branchmap(self):
801 def branchmap(self):
800 '''returns a dictionary {branch: [branchheads]} with branchheads
802 '''returns a dictionary {branch: [branchheads]} with branchheads
801 ordered by increasing revision number'''
803 ordered by increasing revision number'''
802 branchmap.updatecache(self)
804 branchmap.updatecache(self)
803 return self._branchcaches[self.filtername]
805 return self._branchcaches[self.filtername]
804
806
805 @unfilteredmethod
807 @unfilteredmethod
806 def revbranchcache(self):
808 def revbranchcache(self):
807 if not self._revbranchcache:
809 if not self._revbranchcache:
808 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
810 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
809 return self._revbranchcache
811 return self._revbranchcache
810
812
811 def branchtip(self, branch, ignoremissing=False):
813 def branchtip(self, branch, ignoremissing=False):
812 '''return the tip node for a given branch
814 '''return the tip node for a given branch
813
815
814 If ignoremissing is True, then this method will not raise an error.
816 If ignoremissing is True, then this method will not raise an error.
815 This is helpful for callers that only expect None for a missing branch
817 This is helpful for callers that only expect None for a missing branch
816 (e.g. namespace).
818 (e.g. namespace).
817
819
818 '''
820 '''
819 try:
821 try:
820 return self.branchmap().branchtip(branch)
822 return self.branchmap().branchtip(branch)
821 except KeyError:
823 except KeyError:
822 if not ignoremissing:
824 if not ignoremissing:
823 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
825 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
824 else:
826 else:
825 pass
827 pass
826
828
827 def lookup(self, key):
829 def lookup(self, key):
828 return self[key].node()
830 return self[key].node()
829
831
830 def lookupbranch(self, key, remote=None):
832 def lookupbranch(self, key, remote=None):
831 repo = remote or self
833 repo = remote or self
832 if key in repo.branchmap():
834 if key in repo.branchmap():
833 return key
835 return key
834
836
835 repo = (remote and remote.local()) and remote or self
837 repo = (remote and remote.local()) and remote or self
836 return repo[key].branch()
838 return repo[key].branch()
837
839
838 def known(self, nodes):
840 def known(self, nodes):
839 cl = self.changelog
841 cl = self.changelog
840 nm = cl.nodemap
842 nm = cl.nodemap
841 filtered = cl.filteredrevs
843 filtered = cl.filteredrevs
842 result = []
844 result = []
843 for n in nodes:
845 for n in nodes:
844 r = nm.get(n)
846 r = nm.get(n)
845 resp = not (r is None or r in filtered)
847 resp = not (r is None or r in filtered)
846 result.append(resp)
848 result.append(resp)
847 return result
849 return result
848
850
849 def local(self):
851 def local(self):
850 return self
852 return self
851
853
852 def publishing(self):
854 def publishing(self):
853 # it's safe (and desirable) to trust the publish flag unconditionally
855 # it's safe (and desirable) to trust the publish flag unconditionally
854 # so that we don't finalize changes shared between users via ssh or nfs
856 # so that we don't finalize changes shared between users via ssh or nfs
855 return self.ui.configbool('phases', 'publish', True, untrusted=True)
857 return self.ui.configbool('phases', 'publish', True, untrusted=True)
856
858
857 def cancopy(self):
859 def cancopy(self):
858 # so statichttprepo's override of local() works
860 # so statichttprepo's override of local() works
859 if not self.local():
861 if not self.local():
860 return False
862 return False
861 if not self.publishing():
863 if not self.publishing():
862 return True
864 return True
863 # if publishing we can't copy if there is filtered content
865 # if publishing we can't copy if there is filtered content
864 return not self.filtered('visible').changelog.filteredrevs
866 return not self.filtered('visible').changelog.filteredrevs
865
867
866 def shared(self):
868 def shared(self):
867 '''the type of shared repository (None if not shared)'''
869 '''the type of shared repository (None if not shared)'''
868 if self.sharedpath != self.path:
870 if self.sharedpath != self.path:
869 return 'store'
871 return 'store'
870 return None
872 return None
871
873
872 def join(self, f, *insidef):
874 def join(self, f, *insidef):
873 return self.vfs.join(os.path.join(f, *insidef))
875 return self.vfs.join(os.path.join(f, *insidef))
874
876
875 def wjoin(self, f, *insidef):
877 def wjoin(self, f, *insidef):
876 return self.vfs.reljoin(self.root, f, *insidef)
878 return self.vfs.reljoin(self.root, f, *insidef)
877
879
878 def file(self, f):
880 def file(self, f):
879 if f[0] == '/':
881 if f[0] == '/':
880 f = f[1:]
882 f = f[1:]
881 return filelog.filelog(self.svfs, f)
883 return filelog.filelog(self.svfs, f)
882
884
883 def changectx(self, changeid):
885 def changectx(self, changeid):
884 return self[changeid]
886 return self[changeid]
885
887
886 def setparents(self, p1, p2=nullid):
888 def setparents(self, p1, p2=nullid):
887 self.dirstate.beginparentchange()
889 self.dirstate.beginparentchange()
888 copies = self.dirstate.setparents(p1, p2)
890 copies = self.dirstate.setparents(p1, p2)
889 pctx = self[p1]
891 pctx = self[p1]
890 if copies:
892 if copies:
891 # Adjust copy records, the dirstate cannot do it, it
893 # Adjust copy records, the dirstate cannot do it, it
892 # requires access to parents manifests. Preserve them
894 # requires access to parents manifests. Preserve them
893 # only for entries added to first parent.
895 # only for entries added to first parent.
894 for f in copies:
896 for f in copies:
895 if f not in pctx and copies[f] in pctx:
897 if f not in pctx and copies[f] in pctx:
896 self.dirstate.copy(copies[f], f)
898 self.dirstate.copy(copies[f], f)
897 if p2 == nullid:
899 if p2 == nullid:
898 for f, s in sorted(self.dirstate.copies().items()):
900 for f, s in sorted(self.dirstate.copies().items()):
899 if f not in pctx and s not in pctx:
901 if f not in pctx and s not in pctx:
900 self.dirstate.copy(None, f)
902 self.dirstate.copy(None, f)
901 self.dirstate.endparentchange()
903 self.dirstate.endparentchange()
902
904
903 def filectx(self, path, changeid=None, fileid=None):
905 def filectx(self, path, changeid=None, fileid=None):
904 """changeid can be a changeset revision, node, or tag.
906 """changeid can be a changeset revision, node, or tag.
905 fileid can be a file revision or node."""
907 fileid can be a file revision or node."""
906 return context.filectx(self, path, changeid, fileid)
908 return context.filectx(self, path, changeid, fileid)
907
909
908 def getcwd(self):
910 def getcwd(self):
909 return self.dirstate.getcwd()
911 return self.dirstate.getcwd()
910
912
911 def pathto(self, f, cwd=None):
913 def pathto(self, f, cwd=None):
912 return self.dirstate.pathto(f, cwd)
914 return self.dirstate.pathto(f, cwd)
913
915
914 def wfile(self, f, mode='r'):
916 def wfile(self, f, mode='r'):
915 return self.wvfs(f, mode)
917 return self.wvfs(f, mode)
916
918
917 def _link(self, f):
919 def _link(self, f):
918 return self.wvfs.islink(f)
920 return self.wvfs.islink(f)
919
921
920 def _loadfilter(self, filter):
922 def _loadfilter(self, filter):
921 if filter not in self.filterpats:
923 if filter not in self.filterpats:
922 l = []
924 l = []
923 for pat, cmd in self.ui.configitems(filter):
925 for pat, cmd in self.ui.configitems(filter):
924 if cmd == '!':
926 if cmd == '!':
925 continue
927 continue
926 mf = matchmod.match(self.root, '', [pat])
928 mf = matchmod.match(self.root, '', [pat])
927 fn = None
929 fn = None
928 params = cmd
930 params = cmd
929 for name, filterfn in self._datafilters.iteritems():
931 for name, filterfn in self._datafilters.iteritems():
930 if cmd.startswith(name):
932 if cmd.startswith(name):
931 fn = filterfn
933 fn = filterfn
932 params = cmd[len(name):].lstrip()
934 params = cmd[len(name):].lstrip()
933 break
935 break
934 if not fn:
936 if not fn:
935 fn = lambda s, c, **kwargs: util.filter(s, c)
937 fn = lambda s, c, **kwargs: util.filter(s, c)
936 # Wrap old filters not supporting keyword arguments
938 # Wrap old filters not supporting keyword arguments
937 if not inspect.getargspec(fn)[2]:
939 if not inspect.getargspec(fn)[2]:
938 oldfn = fn
940 oldfn = fn
939 fn = lambda s, c, **kwargs: oldfn(s, c)
941 fn = lambda s, c, **kwargs: oldfn(s, c)
940 l.append((mf, fn, params))
942 l.append((mf, fn, params))
941 self.filterpats[filter] = l
943 self.filterpats[filter] = l
942 return self.filterpats[filter]
944 return self.filterpats[filter]
943
945
944 def _filter(self, filterpats, filename, data):
946 def _filter(self, filterpats, filename, data):
945 for mf, fn, cmd in filterpats:
947 for mf, fn, cmd in filterpats:
946 if mf(filename):
948 if mf(filename):
947 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
949 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
948 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
950 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
949 break
951 break
950
952
951 return data
953 return data
952
954
953 @unfilteredpropertycache
955 @unfilteredpropertycache
954 def _encodefilterpats(self):
956 def _encodefilterpats(self):
955 return self._loadfilter('encode')
957 return self._loadfilter('encode')
956
958
957 @unfilteredpropertycache
959 @unfilteredpropertycache
958 def _decodefilterpats(self):
960 def _decodefilterpats(self):
959 return self._loadfilter('decode')
961 return self._loadfilter('decode')
960
962
961 def adddatafilter(self, name, filter):
963 def adddatafilter(self, name, filter):
962 self._datafilters[name] = filter
964 self._datafilters[name] = filter
963
965
964 def wread(self, filename):
966 def wread(self, filename):
965 if self._link(filename):
967 if self._link(filename):
966 data = self.wvfs.readlink(filename)
968 data = self.wvfs.readlink(filename)
967 else:
969 else:
968 data = self.wvfs.read(filename)
970 data = self.wvfs.read(filename)
969 return self._filter(self._encodefilterpats, filename, data)
971 return self._filter(self._encodefilterpats, filename, data)
970
972
971 def wwrite(self, filename, data, flags, backgroundclose=False):
973 def wwrite(self, filename, data, flags, backgroundclose=False):
972 """write ``data`` into ``filename`` in the working directory
974 """write ``data`` into ``filename`` in the working directory
973
975
974 This returns length of written (maybe decoded) data.
976 This returns length of written (maybe decoded) data.
975 """
977 """
976 data = self._filter(self._decodefilterpats, filename, data)
978 data = self._filter(self._decodefilterpats, filename, data)
977 if 'l' in flags:
979 if 'l' in flags:
978 self.wvfs.symlink(data, filename)
980 self.wvfs.symlink(data, filename)
979 else:
981 else:
980 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
982 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
981 if 'x' in flags:
983 if 'x' in flags:
982 self.wvfs.setflags(filename, False, True)
984 self.wvfs.setflags(filename, False, True)
983 return len(data)
985 return len(data)
984
986
985 def wwritedata(self, filename, data):
987 def wwritedata(self, filename, data):
986 return self._filter(self._decodefilterpats, filename, data)
988 return self._filter(self._decodefilterpats, filename, data)
987
989
988 def currenttransaction(self):
990 def currenttransaction(self):
989 """return the current transaction or None if non exists"""
991 """return the current transaction or None if non exists"""
990 if self._transref:
992 if self._transref:
991 tr = self._transref()
993 tr = self._transref()
992 else:
994 else:
993 tr = None
995 tr = None
994
996
995 if tr and tr.running():
997 if tr and tr.running():
996 return tr
998 return tr
997 return None
999 return None
998
1000
999 def transaction(self, desc, report=None):
1001 def transaction(self, desc, report=None):
1000 if (self.ui.configbool('devel', 'all-warnings')
1002 if (self.ui.configbool('devel', 'all-warnings')
1001 or self.ui.configbool('devel', 'check-locks')):
1003 or self.ui.configbool('devel', 'check-locks')):
1002 l = self._lockref and self._lockref()
1004 l = self._lockref and self._lockref()
1003 if l is None or not l.held:
1005 if l is None or not l.held:
1004 raise RuntimeError('programming error: transaction requires '
1006 raise RuntimeError('programming error: transaction requires '
1005 'locking')
1007 'locking')
1006 tr = self.currenttransaction()
1008 tr = self.currenttransaction()
1007 if tr is not None:
1009 if tr is not None:
1008 return tr.nest()
1010 return tr.nest()
1009
1011
1010 # abort here if the journal already exists
1012 # abort here if the journal already exists
1011 if self.svfs.exists("journal"):
1013 if self.svfs.exists("journal"):
1012 raise error.RepoError(
1014 raise error.RepoError(
1013 _("abandoned transaction found"),
1015 _("abandoned transaction found"),
1014 hint=_("run 'hg recover' to clean up transaction"))
1016 hint=_("run 'hg recover' to clean up transaction"))
1015
1017
1016 idbase = "%.40f#%f" % (random.random(), time.time())
1018 idbase = "%.40f#%f" % (random.random(), time.time())
1017 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1019 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1018 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1020 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1019
1021
1020 self._writejournal(desc)
1022 self._writejournal(desc)
1021 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1023 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1022 if report:
1024 if report:
1023 rp = report
1025 rp = report
1024 else:
1026 else:
1025 rp = self.ui.warn
1027 rp = self.ui.warn
1026 vfsmap = {'plain': self.vfs} # root of .hg/
1028 vfsmap = {'plain': self.vfs} # root of .hg/
1027 # we must avoid cyclic reference between repo and transaction.
1029 # we must avoid cyclic reference between repo and transaction.
1028 reporef = weakref.ref(self)
1030 reporef = weakref.ref(self)
1029 def validate(tr):
1031 def validate(tr):
1030 """will run pre-closing hooks"""
1032 """will run pre-closing hooks"""
1031 reporef().hook('pretxnclose', throw=True,
1033 reporef().hook('pretxnclose', throw=True,
1032 txnname=desc, **tr.hookargs)
1034 txnname=desc, **tr.hookargs)
1033 def releasefn(tr, success):
1035 def releasefn(tr, success):
1034 repo = reporef()
1036 repo = reporef()
1035 if success:
1037 if success:
1036 # this should be explicitly invoked here, because
1038 # this should be explicitly invoked here, because
1037 # in-memory changes aren't written out at closing
1039 # in-memory changes aren't written out at closing
1038 # transaction, if tr.addfilegenerator (via
1040 # transaction, if tr.addfilegenerator (via
1039 # dirstate.write or so) isn't invoked while
1041 # dirstate.write or so) isn't invoked while
1040 # transaction running
1042 # transaction running
1041 repo.dirstate.write(None)
1043 repo.dirstate.write(None)
1042 else:
1044 else:
1043 # discard all changes (including ones already written
1045 # discard all changes (including ones already written
1044 # out) in this transaction
1046 # out) in this transaction
1045 repo.dirstate.restorebackup(None, prefix='journal.')
1047 repo.dirstate.restorebackup(None, prefix='journal.')
1046
1048
1047 repo.invalidate(clearfilecache=True)
1049 repo.invalidate(clearfilecache=True)
1048
1050
1049 tr = transaction.transaction(rp, self.svfs, vfsmap,
1051 tr = transaction.transaction(rp, self.svfs, vfsmap,
1050 "journal",
1052 "journal",
1051 "undo",
1053 "undo",
1052 aftertrans(renames),
1054 aftertrans(renames),
1053 self.store.createmode,
1055 self.store.createmode,
1054 validator=validate,
1056 validator=validate,
1055 releasefn=releasefn)
1057 releasefn=releasefn)
1056
1058
1057 tr.hookargs['txnid'] = txnid
1059 tr.hookargs['txnid'] = txnid
1058 # note: writing the fncache only during finalize mean that the file is
1060 # note: writing the fncache only during finalize mean that the file is
1059 # outdated when running hooks. As fncache is used for streaming clone,
1061 # outdated when running hooks. As fncache is used for streaming clone,
1060 # this is not expected to break anything that happen during the hooks.
1062 # this is not expected to break anything that happen during the hooks.
1061 tr.addfinalize('flush-fncache', self.store.write)
1063 tr.addfinalize('flush-fncache', self.store.write)
1062 def txnclosehook(tr2):
1064 def txnclosehook(tr2):
1063 """To be run if transaction is successful, will schedule a hook run
1065 """To be run if transaction is successful, will schedule a hook run
1064 """
1066 """
1065 # Don't reference tr2 in hook() so we don't hold a reference.
1067 # Don't reference tr2 in hook() so we don't hold a reference.
1066 # This reduces memory consumption when there are multiple
1068 # This reduces memory consumption when there are multiple
1067 # transactions per lock. This can likely go away if issue5045
1069 # transactions per lock. This can likely go away if issue5045
1068 # fixes the function accumulation.
1070 # fixes the function accumulation.
1069 hookargs = tr2.hookargs
1071 hookargs = tr2.hookargs
1070
1072
1071 def hook():
1073 def hook():
1072 reporef().hook('txnclose', throw=False, txnname=desc,
1074 reporef().hook('txnclose', throw=False, txnname=desc,
1073 **hookargs)
1075 **hookargs)
1074 reporef()._afterlock(hook)
1076 reporef()._afterlock(hook)
1075 tr.addfinalize('txnclose-hook', txnclosehook)
1077 tr.addfinalize('txnclose-hook', txnclosehook)
1076 def txnaborthook(tr2):
1078 def txnaborthook(tr2):
1077 """To be run if transaction is aborted
1079 """To be run if transaction is aborted
1078 """
1080 """
1079 reporef().hook('txnabort', throw=False, txnname=desc,
1081 reporef().hook('txnabort', throw=False, txnname=desc,
1080 **tr2.hookargs)
1082 **tr2.hookargs)
1081 tr.addabort('txnabort-hook', txnaborthook)
1083 tr.addabort('txnabort-hook', txnaborthook)
1082 # avoid eager cache invalidation. in-memory data should be identical
1084 # avoid eager cache invalidation. in-memory data should be identical
1083 # to stored data if transaction has no error.
1085 # to stored data if transaction has no error.
1084 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1086 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1085 self._transref = weakref.ref(tr)
1087 self._transref = weakref.ref(tr)
1086 return tr
1088 return tr
1087
1089
1088 def _journalfiles(self):
1090 def _journalfiles(self):
1089 return ((self.svfs, 'journal'),
1091 return ((self.svfs, 'journal'),
1090 (self.vfs, 'journal.dirstate'),
1092 (self.vfs, 'journal.dirstate'),
1091 (self.vfs, 'journal.branch'),
1093 (self.vfs, 'journal.branch'),
1092 (self.vfs, 'journal.desc'),
1094 (self.vfs, 'journal.desc'),
1093 (self.vfs, 'journal.bookmarks'),
1095 (self.vfs, 'journal.bookmarks'),
1094 (self.svfs, 'journal.phaseroots'))
1096 (self.svfs, 'journal.phaseroots'))
1095
1097
1096 def undofiles(self):
1098 def undofiles(self):
1097 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1099 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1098
1100
1099 def _writejournal(self, desc):
1101 def _writejournal(self, desc):
1100 self.dirstate.savebackup(None, prefix='journal.')
1102 self.dirstate.savebackup(None, prefix='journal.')
1101 self.vfs.write("journal.branch",
1103 self.vfs.write("journal.branch",
1102 encoding.fromlocal(self.dirstate.branch()))
1104 encoding.fromlocal(self.dirstate.branch()))
1103 self.vfs.write("journal.desc",
1105 self.vfs.write("journal.desc",
1104 "%d\n%s\n" % (len(self), desc))
1106 "%d\n%s\n" % (len(self), desc))
1105 self.vfs.write("journal.bookmarks",
1107 self.vfs.write("journal.bookmarks",
1106 self.vfs.tryread("bookmarks"))
1108 self.vfs.tryread("bookmarks"))
1107 self.svfs.write("journal.phaseroots",
1109 self.svfs.write("journal.phaseroots",
1108 self.svfs.tryread("phaseroots"))
1110 self.svfs.tryread("phaseroots"))
1109
1111
1110 def recover(self):
1112 def recover(self):
1111 with self.lock():
1113 with self.lock():
1112 if self.svfs.exists("journal"):
1114 if self.svfs.exists("journal"):
1113 self.ui.status(_("rolling back interrupted transaction\n"))
1115 self.ui.status(_("rolling back interrupted transaction\n"))
1114 vfsmap = {'': self.svfs,
1116 vfsmap = {'': self.svfs,
1115 'plain': self.vfs,}
1117 'plain': self.vfs,}
1116 transaction.rollback(self.svfs, vfsmap, "journal",
1118 transaction.rollback(self.svfs, vfsmap, "journal",
1117 self.ui.warn)
1119 self.ui.warn)
1118 self.invalidate()
1120 self.invalidate()
1119 return True
1121 return True
1120 else:
1122 else:
1121 self.ui.warn(_("no interrupted transaction available\n"))
1123 self.ui.warn(_("no interrupted transaction available\n"))
1122 return False
1124 return False
1123
1125
1124 def rollback(self, dryrun=False, force=False):
1126 def rollback(self, dryrun=False, force=False):
1125 wlock = lock = dsguard = None
1127 wlock = lock = dsguard = None
1126 try:
1128 try:
1127 wlock = self.wlock()
1129 wlock = self.wlock()
1128 lock = self.lock()
1130 lock = self.lock()
1129 if self.svfs.exists("undo"):
1131 if self.svfs.exists("undo"):
1130 dsguard = cmdutil.dirstateguard(self, 'rollback')
1132 dsguard = cmdutil.dirstateguard(self, 'rollback')
1131
1133
1132 return self._rollback(dryrun, force, dsguard)
1134 return self._rollback(dryrun, force, dsguard)
1133 else:
1135 else:
1134 self.ui.warn(_("no rollback information available\n"))
1136 self.ui.warn(_("no rollback information available\n"))
1135 return 1
1137 return 1
1136 finally:
1138 finally:
1137 release(dsguard, lock, wlock)
1139 release(dsguard, lock, wlock)
1138
1140
1139 @unfilteredmethod # Until we get smarter cache management
1141 @unfilteredmethod # Until we get smarter cache management
1140 def _rollback(self, dryrun, force, dsguard):
1142 def _rollback(self, dryrun, force, dsguard):
1141 ui = self.ui
1143 ui = self.ui
1142 try:
1144 try:
1143 args = self.vfs.read('undo.desc').splitlines()
1145 args = self.vfs.read('undo.desc').splitlines()
1144 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1146 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1145 if len(args) >= 3:
1147 if len(args) >= 3:
1146 detail = args[2]
1148 detail = args[2]
1147 oldtip = oldlen - 1
1149 oldtip = oldlen - 1
1148
1150
1149 if detail and ui.verbose:
1151 if detail and ui.verbose:
1150 msg = (_('repository tip rolled back to revision %s'
1152 msg = (_('repository tip rolled back to revision %s'
1151 ' (undo %s: %s)\n')
1153 ' (undo %s: %s)\n')
1152 % (oldtip, desc, detail))
1154 % (oldtip, desc, detail))
1153 else:
1155 else:
1154 msg = (_('repository tip rolled back to revision %s'
1156 msg = (_('repository tip rolled back to revision %s'
1155 ' (undo %s)\n')
1157 ' (undo %s)\n')
1156 % (oldtip, desc))
1158 % (oldtip, desc))
1157 except IOError:
1159 except IOError:
1158 msg = _('rolling back unknown transaction\n')
1160 msg = _('rolling back unknown transaction\n')
1159 desc = None
1161 desc = None
1160
1162
1161 if not force and self['.'] != self['tip'] and desc == 'commit':
1163 if not force and self['.'] != self['tip'] and desc == 'commit':
1162 raise error.Abort(
1164 raise error.Abort(
1163 _('rollback of last commit while not checked out '
1165 _('rollback of last commit while not checked out '
1164 'may lose data'), hint=_('use -f to force'))
1166 'may lose data'), hint=_('use -f to force'))
1165
1167
1166 ui.status(msg)
1168 ui.status(msg)
1167 if dryrun:
1169 if dryrun:
1168 return 0
1170 return 0
1169
1171
1170 parents = self.dirstate.parents()
1172 parents = self.dirstate.parents()
1171 self.destroying()
1173 self.destroying()
1172 vfsmap = {'plain': self.vfs, '': self.svfs}
1174 vfsmap = {'plain': self.vfs, '': self.svfs}
1173 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1175 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1174 if self.vfs.exists('undo.bookmarks'):
1176 if self.vfs.exists('undo.bookmarks'):
1175 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1177 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1176 if self.svfs.exists('undo.phaseroots'):
1178 if self.svfs.exists('undo.phaseroots'):
1177 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1179 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1178 self.invalidate()
1180 self.invalidate()
1179
1181
1180 parentgone = (parents[0] not in self.changelog.nodemap or
1182 parentgone = (parents[0] not in self.changelog.nodemap or
1181 parents[1] not in self.changelog.nodemap)
1183 parents[1] not in self.changelog.nodemap)
1182 if parentgone:
1184 if parentgone:
1183 # prevent dirstateguard from overwriting already restored one
1185 # prevent dirstateguard from overwriting already restored one
1184 dsguard.close()
1186 dsguard.close()
1185
1187
1186 self.dirstate.restorebackup(None, prefix='undo.')
1188 self.dirstate.restorebackup(None, prefix='undo.')
1187 try:
1189 try:
1188 branch = self.vfs.read('undo.branch')
1190 branch = self.vfs.read('undo.branch')
1189 self.dirstate.setbranch(encoding.tolocal(branch))
1191 self.dirstate.setbranch(encoding.tolocal(branch))
1190 except IOError:
1192 except IOError:
1191 ui.warn(_('named branch could not be reset: '
1193 ui.warn(_('named branch could not be reset: '
1192 'current branch is still \'%s\'\n')
1194 'current branch is still \'%s\'\n')
1193 % self.dirstate.branch())
1195 % self.dirstate.branch())
1194
1196
1195 parents = tuple([p.rev() for p in self[None].parents()])
1197 parents = tuple([p.rev() for p in self[None].parents()])
1196 if len(parents) > 1:
1198 if len(parents) > 1:
1197 ui.status(_('working directory now based on '
1199 ui.status(_('working directory now based on '
1198 'revisions %d and %d\n') % parents)
1200 'revisions %d and %d\n') % parents)
1199 else:
1201 else:
1200 ui.status(_('working directory now based on '
1202 ui.status(_('working directory now based on '
1201 'revision %d\n') % parents)
1203 'revision %d\n') % parents)
1202 mergemod.mergestate.clean(self, self['.'].node())
1204 mergemod.mergestate.clean(self, self['.'].node())
1203
1205
1204 # TODO: if we know which new heads may result from this rollback, pass
1206 # TODO: if we know which new heads may result from this rollback, pass
1205 # them to destroy(), which will prevent the branchhead cache from being
1207 # them to destroy(), which will prevent the branchhead cache from being
1206 # invalidated.
1208 # invalidated.
1207 self.destroyed()
1209 self.destroyed()
1208 return 0
1210 return 0
1209
1211
1210 def invalidatecaches(self):
1212 def invalidatecaches(self):
1211
1213
1212 if '_tagscache' in vars(self):
1214 if '_tagscache' in vars(self):
1213 # can't use delattr on proxy
1215 # can't use delattr on proxy
1214 del self.__dict__['_tagscache']
1216 del self.__dict__['_tagscache']
1215
1217
1216 self.unfiltered()._branchcaches.clear()
1218 self.unfiltered()._branchcaches.clear()
1217 self.invalidatevolatilesets()
1219 self.invalidatevolatilesets()
1218
1220
1219 def invalidatevolatilesets(self):
1221 def invalidatevolatilesets(self):
1220 self.filteredrevcache.clear()
1222 self.filteredrevcache.clear()
1221 obsolete.clearobscaches(self)
1223 obsolete.clearobscaches(self)
1222
1224
1223 def invalidatedirstate(self):
1225 def invalidatedirstate(self):
1224 '''Invalidates the dirstate, causing the next call to dirstate
1226 '''Invalidates the dirstate, causing the next call to dirstate
1225 to check if it was modified since the last time it was read,
1227 to check if it was modified since the last time it was read,
1226 rereading it if it has.
1228 rereading it if it has.
1227
1229
1228 This is different to dirstate.invalidate() that it doesn't always
1230 This is different to dirstate.invalidate() that it doesn't always
1229 rereads the dirstate. Use dirstate.invalidate() if you want to
1231 rereads the dirstate. Use dirstate.invalidate() if you want to
1230 explicitly read the dirstate again (i.e. restoring it to a previous
1232 explicitly read the dirstate again (i.e. restoring it to a previous
1231 known good state).'''
1233 known good state).'''
1232 if hasunfilteredcache(self, 'dirstate'):
1234 if hasunfilteredcache(self, 'dirstate'):
1233 for k in self.dirstate._filecache:
1235 for k in self.dirstate._filecache:
1234 try:
1236 try:
1235 delattr(self.dirstate, k)
1237 delattr(self.dirstate, k)
1236 except AttributeError:
1238 except AttributeError:
1237 pass
1239 pass
1238 delattr(self.unfiltered(), 'dirstate')
1240 delattr(self.unfiltered(), 'dirstate')
1239
1241
1240 def invalidate(self, clearfilecache=False):
1242 def invalidate(self, clearfilecache=False):
1241 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1243 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1242 for k in self._filecache.keys():
1244 for k in self._filecache.keys():
1243 # dirstate is invalidated separately in invalidatedirstate()
1245 # dirstate is invalidated separately in invalidatedirstate()
1244 if k == 'dirstate':
1246 if k == 'dirstate':
1245 continue
1247 continue
1246
1248
1247 if clearfilecache:
1249 if clearfilecache:
1248 del self._filecache[k]
1250 del self._filecache[k]
1249 try:
1251 try:
1250 delattr(unfiltered, k)
1252 delattr(unfiltered, k)
1251 except AttributeError:
1253 except AttributeError:
1252 pass
1254 pass
1253 self.invalidatecaches()
1255 self.invalidatecaches()
1254 self.store.invalidatecaches()
1256 self.store.invalidatecaches()
1255
1257
1256 def invalidateall(self):
1258 def invalidateall(self):
1257 '''Fully invalidates both store and non-store parts, causing the
1259 '''Fully invalidates both store and non-store parts, causing the
1258 subsequent operation to reread any outside changes.'''
1260 subsequent operation to reread any outside changes.'''
1259 # extension should hook this to invalidate its caches
1261 # extension should hook this to invalidate its caches
1260 self.invalidate()
1262 self.invalidate()
1261 self.invalidatedirstate()
1263 self.invalidatedirstate()
1262
1264
1263 def _refreshfilecachestats(self, tr):
1265 def _refreshfilecachestats(self, tr):
1264 """Reload stats of cached files so that they are flagged as valid"""
1266 """Reload stats of cached files so that they are flagged as valid"""
1265 for k, ce in self._filecache.items():
1267 for k, ce in self._filecache.items():
1266 if k == 'dirstate' or k not in self.__dict__:
1268 if k == 'dirstate' or k not in self.__dict__:
1267 continue
1269 continue
1268 ce.refresh()
1270 ce.refresh()
1269
1271
1270 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1272 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1271 inheritchecker=None, parentenvvar=None):
1273 inheritchecker=None, parentenvvar=None):
1272 parentlock = None
1274 parentlock = None
1273 # the contents of parentenvvar are used by the underlying lock to
1275 # the contents of parentenvvar are used by the underlying lock to
1274 # determine whether it can be inherited
1276 # determine whether it can be inherited
1275 if parentenvvar is not None:
1277 if parentenvvar is not None:
1276 parentlock = os.environ.get(parentenvvar)
1278 parentlock = os.environ.get(parentenvvar)
1277 try:
1279 try:
1278 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1280 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1279 acquirefn=acquirefn, desc=desc,
1281 acquirefn=acquirefn, desc=desc,
1280 inheritchecker=inheritchecker,
1282 inheritchecker=inheritchecker,
1281 parentlock=parentlock)
1283 parentlock=parentlock)
1282 except error.LockHeld as inst:
1284 except error.LockHeld as inst:
1283 if not wait:
1285 if not wait:
1284 raise
1286 raise
1285 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1287 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1286 (desc, inst.locker))
1288 (desc, inst.locker))
1287 # default to 600 seconds timeout
1289 # default to 600 seconds timeout
1288 l = lockmod.lock(vfs, lockname,
1290 l = lockmod.lock(vfs, lockname,
1289 int(self.ui.config("ui", "timeout", "600")),
1291 int(self.ui.config("ui", "timeout", "600")),
1290 releasefn=releasefn, acquirefn=acquirefn,
1292 releasefn=releasefn, acquirefn=acquirefn,
1291 desc=desc)
1293 desc=desc)
1292 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1294 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1293 return l
1295 return l
1294
1296
1295 def _afterlock(self, callback):
1297 def _afterlock(self, callback):
1296 """add a callback to be run when the repository is fully unlocked
1298 """add a callback to be run when the repository is fully unlocked
1297
1299
1298 The callback will be executed when the outermost lock is released
1300 The callback will be executed when the outermost lock is released
1299 (with wlock being higher level than 'lock')."""
1301 (with wlock being higher level than 'lock')."""
1300 for ref in (self._wlockref, self._lockref):
1302 for ref in (self._wlockref, self._lockref):
1301 l = ref and ref()
1303 l = ref and ref()
1302 if l and l.held:
1304 if l and l.held:
1303 l.postrelease.append(callback)
1305 l.postrelease.append(callback)
1304 break
1306 break
1305 else: # no lock have been found.
1307 else: # no lock have been found.
1306 callback()
1308 callback()
1307
1309
1308 def lock(self, wait=True):
1310 def lock(self, wait=True):
1309 '''Lock the repository store (.hg/store) and return a weak reference
1311 '''Lock the repository store (.hg/store) and return a weak reference
1310 to the lock. Use this before modifying the store (e.g. committing or
1312 to the lock. Use this before modifying the store (e.g. committing or
1311 stripping). If you are opening a transaction, get a lock as well.)
1313 stripping). If you are opening a transaction, get a lock as well.)
1312
1314
1313 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1315 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1314 'wlock' first to avoid a dead-lock hazard.'''
1316 'wlock' first to avoid a dead-lock hazard.'''
1315 l = self._lockref and self._lockref()
1317 l = self._lockref and self._lockref()
1316 if l is not None and l.held:
1318 if l is not None and l.held:
1317 l.lock()
1319 l.lock()
1318 return l
1320 return l
1319
1321
1320 l = self._lock(self.svfs, "lock", wait, None,
1322 l = self._lock(self.svfs, "lock", wait, None,
1321 self.invalidate, _('repository %s') % self.origroot)
1323 self.invalidate, _('repository %s') % self.origroot)
1322 self._lockref = weakref.ref(l)
1324 self._lockref = weakref.ref(l)
1323 return l
1325 return l
1324
1326
1325 def _wlockchecktransaction(self):
1327 def _wlockchecktransaction(self):
1326 if self.currenttransaction() is not None:
1328 if self.currenttransaction() is not None:
1327 raise error.LockInheritanceContractViolation(
1329 raise error.LockInheritanceContractViolation(
1328 'wlock cannot be inherited in the middle of a transaction')
1330 'wlock cannot be inherited in the middle of a transaction')
1329
1331
1330 def wlock(self, wait=True):
1332 def wlock(self, wait=True):
1331 '''Lock the non-store parts of the repository (everything under
1333 '''Lock the non-store parts of the repository (everything under
1332 .hg except .hg/store) and return a weak reference to the lock.
1334 .hg except .hg/store) and return a weak reference to the lock.
1333
1335
1334 Use this before modifying files in .hg.
1336 Use this before modifying files in .hg.
1335
1337
1336 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1338 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1337 'wlock' first to avoid a dead-lock hazard.'''
1339 'wlock' first to avoid a dead-lock hazard.'''
1338 l = self._wlockref and self._wlockref()
1340 l = self._wlockref and self._wlockref()
1339 if l is not None and l.held:
1341 if l is not None and l.held:
1340 l.lock()
1342 l.lock()
1341 return l
1343 return l
1342
1344
1343 # We do not need to check for non-waiting lock acquisition. Such
1345 # We do not need to check for non-waiting lock acquisition. Such
1344 # acquisition would not cause dead-lock as they would just fail.
1346 # acquisition would not cause dead-lock as they would just fail.
1345 if wait and (self.ui.configbool('devel', 'all-warnings')
1347 if wait and (self.ui.configbool('devel', 'all-warnings')
1346 or self.ui.configbool('devel', 'check-locks')):
1348 or self.ui.configbool('devel', 'check-locks')):
1347 l = self._lockref and self._lockref()
1349 l = self._lockref and self._lockref()
1348 if l is not None and l.held:
1350 if l is not None and l.held:
1349 self.ui.develwarn('"wlock" acquired after "lock"')
1351 self.ui.develwarn('"wlock" acquired after "lock"')
1350
1352
1351 def unlock():
1353 def unlock():
1352 if self.dirstate.pendingparentchange():
1354 if self.dirstate.pendingparentchange():
1353 self.dirstate.invalidate()
1355 self.dirstate.invalidate()
1354 else:
1356 else:
1355 self.dirstate.write(None)
1357 self.dirstate.write(None)
1356
1358
1357 self._filecache['dirstate'].refresh()
1359 self._filecache['dirstate'].refresh()
1358
1360
1359 l = self._lock(self.vfs, "wlock", wait, unlock,
1361 l = self._lock(self.vfs, "wlock", wait, unlock,
1360 self.invalidatedirstate, _('working directory of %s') %
1362 self.invalidatedirstate, _('working directory of %s') %
1361 self.origroot,
1363 self.origroot,
1362 inheritchecker=self._wlockchecktransaction,
1364 inheritchecker=self._wlockchecktransaction,
1363 parentenvvar='HG_WLOCK_LOCKER')
1365 parentenvvar='HG_WLOCK_LOCKER')
1364 self._wlockref = weakref.ref(l)
1366 self._wlockref = weakref.ref(l)
1365 return l
1367 return l
1366
1368
1367 def _currentlock(self, lockref):
1369 def _currentlock(self, lockref):
1368 """Returns the lock if it's held, or None if it's not."""
1370 """Returns the lock if it's held, or None if it's not."""
1369 if lockref is None:
1371 if lockref is None:
1370 return None
1372 return None
1371 l = lockref()
1373 l = lockref()
1372 if l is None or not l.held:
1374 if l is None or not l.held:
1373 return None
1375 return None
1374 return l
1376 return l
1375
1377
1376 def currentwlock(self):
1378 def currentwlock(self):
1377 """Returns the wlock if it's held, or None if it's not."""
1379 """Returns the wlock if it's held, or None if it's not."""
1378 return self._currentlock(self._wlockref)
1380 return self._currentlock(self._wlockref)
1379
1381
1380 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1382 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1381 """
1383 """
1382 commit an individual file as part of a larger transaction
1384 commit an individual file as part of a larger transaction
1383 """
1385 """
1384
1386
1385 fname = fctx.path()
1387 fname = fctx.path()
1386 fparent1 = manifest1.get(fname, nullid)
1388 fparent1 = manifest1.get(fname, nullid)
1387 fparent2 = manifest2.get(fname, nullid)
1389 fparent2 = manifest2.get(fname, nullid)
1388 if isinstance(fctx, context.filectx):
1390 if isinstance(fctx, context.filectx):
1389 node = fctx.filenode()
1391 node = fctx.filenode()
1390 if node in [fparent1, fparent2]:
1392 if node in [fparent1, fparent2]:
1391 self.ui.debug('reusing %s filelog entry\n' % fname)
1393 self.ui.debug('reusing %s filelog entry\n' % fname)
1392 if manifest1.flags(fname) != fctx.flags():
1394 if manifest1.flags(fname) != fctx.flags():
1393 changelist.append(fname)
1395 changelist.append(fname)
1394 return node
1396 return node
1395
1397
1396 flog = self.file(fname)
1398 flog = self.file(fname)
1397 meta = {}
1399 meta = {}
1398 copy = fctx.renamed()
1400 copy = fctx.renamed()
1399 if copy and copy[0] != fname:
1401 if copy and copy[0] != fname:
1400 # Mark the new revision of this file as a copy of another
1402 # Mark the new revision of this file as a copy of another
1401 # file. This copy data will effectively act as a parent
1403 # file. This copy data will effectively act as a parent
1402 # of this new revision. If this is a merge, the first
1404 # of this new revision. If this is a merge, the first
1403 # parent will be the nullid (meaning "look up the copy data")
1405 # parent will be the nullid (meaning "look up the copy data")
1404 # and the second one will be the other parent. For example:
1406 # and the second one will be the other parent. For example:
1405 #
1407 #
1406 # 0 --- 1 --- 3 rev1 changes file foo
1408 # 0 --- 1 --- 3 rev1 changes file foo
1407 # \ / rev2 renames foo to bar and changes it
1409 # \ / rev2 renames foo to bar and changes it
1408 # \- 2 -/ rev3 should have bar with all changes and
1410 # \- 2 -/ rev3 should have bar with all changes and
1409 # should record that bar descends from
1411 # should record that bar descends from
1410 # bar in rev2 and foo in rev1
1412 # bar in rev2 and foo in rev1
1411 #
1413 #
1412 # this allows this merge to succeed:
1414 # this allows this merge to succeed:
1413 #
1415 #
1414 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1416 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1415 # \ / merging rev3 and rev4 should use bar@rev2
1417 # \ / merging rev3 and rev4 should use bar@rev2
1416 # \- 2 --- 4 as the merge base
1418 # \- 2 --- 4 as the merge base
1417 #
1419 #
1418
1420
1419 cfname = copy[0]
1421 cfname = copy[0]
1420 crev = manifest1.get(cfname)
1422 crev = manifest1.get(cfname)
1421 newfparent = fparent2
1423 newfparent = fparent2
1422
1424
1423 if manifest2: # branch merge
1425 if manifest2: # branch merge
1424 if fparent2 == nullid or crev is None: # copied on remote side
1426 if fparent2 == nullid or crev is None: # copied on remote side
1425 if cfname in manifest2:
1427 if cfname in manifest2:
1426 crev = manifest2[cfname]
1428 crev = manifest2[cfname]
1427 newfparent = fparent1
1429 newfparent = fparent1
1428
1430
1429 # Here, we used to search backwards through history to try to find
1431 # Here, we used to search backwards through history to try to find
1430 # where the file copy came from if the source of a copy was not in
1432 # where the file copy came from if the source of a copy was not in
1431 # the parent directory. However, this doesn't actually make sense to
1433 # the parent directory. However, this doesn't actually make sense to
1432 # do (what does a copy from something not in your working copy even
1434 # do (what does a copy from something not in your working copy even
1433 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1435 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1434 # the user that copy information was dropped, so if they didn't
1436 # the user that copy information was dropped, so if they didn't
1435 # expect this outcome it can be fixed, but this is the correct
1437 # expect this outcome it can be fixed, but this is the correct
1436 # behavior in this circumstance.
1438 # behavior in this circumstance.
1437
1439
1438 if crev:
1440 if crev:
1439 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1441 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1440 meta["copy"] = cfname
1442 meta["copy"] = cfname
1441 meta["copyrev"] = hex(crev)
1443 meta["copyrev"] = hex(crev)
1442 fparent1, fparent2 = nullid, newfparent
1444 fparent1, fparent2 = nullid, newfparent
1443 else:
1445 else:
1444 self.ui.warn(_("warning: can't find ancestor for '%s' "
1446 self.ui.warn(_("warning: can't find ancestor for '%s' "
1445 "copied from '%s'!\n") % (fname, cfname))
1447 "copied from '%s'!\n") % (fname, cfname))
1446
1448
1447 elif fparent1 == nullid:
1449 elif fparent1 == nullid:
1448 fparent1, fparent2 = fparent2, nullid
1450 fparent1, fparent2 = fparent2, nullid
1449 elif fparent2 != nullid:
1451 elif fparent2 != nullid:
1450 # is one parent an ancestor of the other?
1452 # is one parent an ancestor of the other?
1451 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1453 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1452 if fparent1 in fparentancestors:
1454 if fparent1 in fparentancestors:
1453 fparent1, fparent2 = fparent2, nullid
1455 fparent1, fparent2 = fparent2, nullid
1454 elif fparent2 in fparentancestors:
1456 elif fparent2 in fparentancestors:
1455 fparent2 = nullid
1457 fparent2 = nullid
1456
1458
1457 # is the file changed?
1459 # is the file changed?
1458 text = fctx.data()
1460 text = fctx.data()
1459 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1461 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1460 changelist.append(fname)
1462 changelist.append(fname)
1461 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1463 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1462 # are just the flags changed during merge?
1464 # are just the flags changed during merge?
1463 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1465 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1464 changelist.append(fname)
1466 changelist.append(fname)
1465
1467
1466 return fparent1
1468 return fparent1
1467
1469
1468 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1470 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1469 """check for commit arguments that aren't commitable"""
1471 """check for commit arguments that aren't commitable"""
1470 if match.isexact() or match.prefix():
1472 if match.isexact() or match.prefix():
1471 matched = set(status.modified + status.added + status.removed)
1473 matched = set(status.modified + status.added + status.removed)
1472
1474
1473 for f in match.files():
1475 for f in match.files():
1474 f = self.dirstate.normalize(f)
1476 f = self.dirstate.normalize(f)
1475 if f == '.' or f in matched or f in wctx.substate:
1477 if f == '.' or f in matched or f in wctx.substate:
1476 continue
1478 continue
1477 if f in status.deleted:
1479 if f in status.deleted:
1478 fail(f, _('file not found!'))
1480 fail(f, _('file not found!'))
1479 if f in vdirs: # visited directory
1481 if f in vdirs: # visited directory
1480 d = f + '/'
1482 d = f + '/'
1481 for mf in matched:
1483 for mf in matched:
1482 if mf.startswith(d):
1484 if mf.startswith(d):
1483 break
1485 break
1484 else:
1486 else:
1485 fail(f, _("no match under directory!"))
1487 fail(f, _("no match under directory!"))
1486 elif f not in self.dirstate:
1488 elif f not in self.dirstate:
1487 fail(f, _("file not tracked!"))
1489 fail(f, _("file not tracked!"))
1488
1490
1489 @unfilteredmethod
1491 @unfilteredmethod
1490 def commit(self, text="", user=None, date=None, match=None, force=False,
1492 def commit(self, text="", user=None, date=None, match=None, force=False,
1491 editor=False, extra=None):
1493 editor=False, extra=None):
1492 """Add a new revision to current repository.
1494 """Add a new revision to current repository.
1493
1495
1494 Revision information is gathered from the working directory,
1496 Revision information is gathered from the working directory,
1495 match can be used to filter the committed files. If editor is
1497 match can be used to filter the committed files. If editor is
1496 supplied, it is called to get a commit message.
1498 supplied, it is called to get a commit message.
1497 """
1499 """
1498 if extra is None:
1500 if extra is None:
1499 extra = {}
1501 extra = {}
1500
1502
1501 def fail(f, msg):
1503 def fail(f, msg):
1502 raise error.Abort('%s: %s' % (f, msg))
1504 raise error.Abort('%s: %s' % (f, msg))
1503
1505
1504 if not match:
1506 if not match:
1505 match = matchmod.always(self.root, '')
1507 match = matchmod.always(self.root, '')
1506
1508
1507 if not force:
1509 if not force:
1508 vdirs = []
1510 vdirs = []
1509 match.explicitdir = vdirs.append
1511 match.explicitdir = vdirs.append
1510 match.bad = fail
1512 match.bad = fail
1511
1513
1512 wlock = lock = tr = None
1514 wlock = lock = tr = None
1513 try:
1515 try:
1514 wlock = self.wlock()
1516 wlock = self.wlock()
1515 lock = self.lock() # for recent changelog (see issue4368)
1517 lock = self.lock() # for recent changelog (see issue4368)
1516
1518
1517 wctx = self[None]
1519 wctx = self[None]
1518 merge = len(wctx.parents()) > 1
1520 merge = len(wctx.parents()) > 1
1519
1521
1520 if not force and merge and match.ispartial():
1522 if not force and merge and match.ispartial():
1521 raise error.Abort(_('cannot partially commit a merge '
1523 raise error.Abort(_('cannot partially commit a merge '
1522 '(do not specify files or patterns)'))
1524 '(do not specify files or patterns)'))
1523
1525
1524 status = self.status(match=match, clean=force)
1526 status = self.status(match=match, clean=force)
1525 if force:
1527 if force:
1526 status.modified.extend(status.clean) # mq may commit clean files
1528 status.modified.extend(status.clean) # mq may commit clean files
1527
1529
1528 # check subrepos
1530 # check subrepos
1529 subs = []
1531 subs = []
1530 commitsubs = set()
1532 commitsubs = set()
1531 newstate = wctx.substate.copy()
1533 newstate = wctx.substate.copy()
1532 # only manage subrepos and .hgsubstate if .hgsub is present
1534 # only manage subrepos and .hgsubstate if .hgsub is present
1533 if '.hgsub' in wctx:
1535 if '.hgsub' in wctx:
1534 # we'll decide whether to track this ourselves, thanks
1536 # we'll decide whether to track this ourselves, thanks
1535 for c in status.modified, status.added, status.removed:
1537 for c in status.modified, status.added, status.removed:
1536 if '.hgsubstate' in c:
1538 if '.hgsubstate' in c:
1537 c.remove('.hgsubstate')
1539 c.remove('.hgsubstate')
1538
1540
1539 # compare current state to last committed state
1541 # compare current state to last committed state
1540 # build new substate based on last committed state
1542 # build new substate based on last committed state
1541 oldstate = wctx.p1().substate
1543 oldstate = wctx.p1().substate
1542 for s in sorted(newstate.keys()):
1544 for s in sorted(newstate.keys()):
1543 if not match(s):
1545 if not match(s):
1544 # ignore working copy, use old state if present
1546 # ignore working copy, use old state if present
1545 if s in oldstate:
1547 if s in oldstate:
1546 newstate[s] = oldstate[s]
1548 newstate[s] = oldstate[s]
1547 continue
1549 continue
1548 if not force:
1550 if not force:
1549 raise error.Abort(
1551 raise error.Abort(
1550 _("commit with new subrepo %s excluded") % s)
1552 _("commit with new subrepo %s excluded") % s)
1551 dirtyreason = wctx.sub(s).dirtyreason(True)
1553 dirtyreason = wctx.sub(s).dirtyreason(True)
1552 if dirtyreason:
1554 if dirtyreason:
1553 if not self.ui.configbool('ui', 'commitsubrepos'):
1555 if not self.ui.configbool('ui', 'commitsubrepos'):
1554 raise error.Abort(dirtyreason,
1556 raise error.Abort(dirtyreason,
1555 hint=_("use --subrepos for recursive commit"))
1557 hint=_("use --subrepos for recursive commit"))
1556 subs.append(s)
1558 subs.append(s)
1557 commitsubs.add(s)
1559 commitsubs.add(s)
1558 else:
1560 else:
1559 bs = wctx.sub(s).basestate()
1561 bs = wctx.sub(s).basestate()
1560 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1562 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1561 if oldstate.get(s, (None, None, None))[1] != bs:
1563 if oldstate.get(s, (None, None, None))[1] != bs:
1562 subs.append(s)
1564 subs.append(s)
1563
1565
1564 # check for removed subrepos
1566 # check for removed subrepos
1565 for p in wctx.parents():
1567 for p in wctx.parents():
1566 r = [s for s in p.substate if s not in newstate]
1568 r = [s for s in p.substate if s not in newstate]
1567 subs += [s for s in r if match(s)]
1569 subs += [s for s in r if match(s)]
1568 if subs:
1570 if subs:
1569 if (not match('.hgsub') and
1571 if (not match('.hgsub') and
1570 '.hgsub' in (wctx.modified() + wctx.added())):
1572 '.hgsub' in (wctx.modified() + wctx.added())):
1571 raise error.Abort(
1573 raise error.Abort(
1572 _("can't commit subrepos without .hgsub"))
1574 _("can't commit subrepos without .hgsub"))
1573 status.modified.insert(0, '.hgsubstate')
1575 status.modified.insert(0, '.hgsubstate')
1574
1576
1575 elif '.hgsub' in status.removed:
1577 elif '.hgsub' in status.removed:
1576 # clean up .hgsubstate when .hgsub is removed
1578 # clean up .hgsubstate when .hgsub is removed
1577 if ('.hgsubstate' in wctx and
1579 if ('.hgsubstate' in wctx and
1578 '.hgsubstate' not in (status.modified + status.added +
1580 '.hgsubstate' not in (status.modified + status.added +
1579 status.removed)):
1581 status.removed)):
1580 status.removed.insert(0, '.hgsubstate')
1582 status.removed.insert(0, '.hgsubstate')
1581
1583
1582 # make sure all explicit patterns are matched
1584 # make sure all explicit patterns are matched
1583 if not force:
1585 if not force:
1584 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1586 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1585
1587
1586 cctx = context.workingcommitctx(self, status,
1588 cctx = context.workingcommitctx(self, status,
1587 text, user, date, extra)
1589 text, user, date, extra)
1588
1590
1589 # internal config: ui.allowemptycommit
1591 # internal config: ui.allowemptycommit
1590 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1592 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1591 or extra.get('close') or merge or cctx.files()
1593 or extra.get('close') or merge or cctx.files()
1592 or self.ui.configbool('ui', 'allowemptycommit'))
1594 or self.ui.configbool('ui', 'allowemptycommit'))
1593 if not allowemptycommit:
1595 if not allowemptycommit:
1594 return None
1596 return None
1595
1597
1596 if merge and cctx.deleted():
1598 if merge and cctx.deleted():
1597 raise error.Abort(_("cannot commit merge with missing files"))
1599 raise error.Abort(_("cannot commit merge with missing files"))
1598
1600
1599 ms = mergemod.mergestate.read(self)
1601 ms = mergemod.mergestate.read(self)
1600
1602
1601 if list(ms.unresolved()):
1603 if list(ms.unresolved()):
1602 raise error.Abort(_('unresolved merge conflicts '
1604 raise error.Abort(_('unresolved merge conflicts '
1603 '(see "hg help resolve")'))
1605 '(see "hg help resolve")'))
1604 if ms.mdstate() != 's' or list(ms.driverresolved()):
1606 if ms.mdstate() != 's' or list(ms.driverresolved()):
1605 raise error.Abort(_('driver-resolved merge conflicts'),
1607 raise error.Abort(_('driver-resolved merge conflicts'),
1606 hint=_('run "hg resolve --all" to resolve'))
1608 hint=_('run "hg resolve --all" to resolve'))
1607
1609
1608 if editor:
1610 if editor:
1609 cctx._text = editor(self, cctx, subs)
1611 cctx._text = editor(self, cctx, subs)
1610 edited = (text != cctx._text)
1612 edited = (text != cctx._text)
1611
1613
1612 # Save commit message in case this transaction gets rolled back
1614 # Save commit message in case this transaction gets rolled back
1613 # (e.g. by a pretxncommit hook). Leave the content alone on
1615 # (e.g. by a pretxncommit hook). Leave the content alone on
1614 # the assumption that the user will use the same editor again.
1616 # the assumption that the user will use the same editor again.
1615 msgfn = self.savecommitmessage(cctx._text)
1617 msgfn = self.savecommitmessage(cctx._text)
1616
1618
1617 # commit subs and write new state
1619 # commit subs and write new state
1618 if subs:
1620 if subs:
1619 for s in sorted(commitsubs):
1621 for s in sorted(commitsubs):
1620 sub = wctx.sub(s)
1622 sub = wctx.sub(s)
1621 self.ui.status(_('committing subrepository %s\n') %
1623 self.ui.status(_('committing subrepository %s\n') %
1622 subrepo.subrelpath(sub))
1624 subrepo.subrelpath(sub))
1623 sr = sub.commit(cctx._text, user, date)
1625 sr = sub.commit(cctx._text, user, date)
1624 newstate[s] = (newstate[s][0], sr)
1626 newstate[s] = (newstate[s][0], sr)
1625 subrepo.writestate(self, newstate)
1627 subrepo.writestate(self, newstate)
1626
1628
1627 p1, p2 = self.dirstate.parents()
1629 p1, p2 = self.dirstate.parents()
1628 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1630 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1629 try:
1631 try:
1630 self.hook("precommit", throw=True, parent1=hookp1,
1632 self.hook("precommit", throw=True, parent1=hookp1,
1631 parent2=hookp2)
1633 parent2=hookp2)
1632 tr = self.transaction('commit')
1634 tr = self.transaction('commit')
1633 ret = self.commitctx(cctx, True)
1635 ret = self.commitctx(cctx, True)
1634 except: # re-raises
1636 except: # re-raises
1635 if edited:
1637 if edited:
1636 self.ui.write(
1638 self.ui.write(
1637 _('note: commit message saved in %s\n') % msgfn)
1639 _('note: commit message saved in %s\n') % msgfn)
1638 raise
1640 raise
1639 # update bookmarks, dirstate and mergestate
1641 # update bookmarks, dirstate and mergestate
1640 bookmarks.update(self, [p1, p2], ret)
1642 bookmarks.update(self, [p1, p2], ret)
1641 cctx.markcommitted(ret)
1643 cctx.markcommitted(ret)
1642 ms.reset()
1644 ms.reset()
1643 tr.close()
1645 tr.close()
1644
1646
1645 finally:
1647 finally:
1646 lockmod.release(tr, lock, wlock)
1648 lockmod.release(tr, lock, wlock)
1647
1649
1648 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1650 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1649 # hack for command that use a temporary commit (eg: histedit)
1651 # hack for command that use a temporary commit (eg: histedit)
1650 # temporary commit got stripped before hook release
1652 # temporary commit got stripped before hook release
1651 if self.changelog.hasnode(ret):
1653 if self.changelog.hasnode(ret):
1652 self.hook("commit", node=node, parent1=parent1,
1654 self.hook("commit", node=node, parent1=parent1,
1653 parent2=parent2)
1655 parent2=parent2)
1654 self._afterlock(commithook)
1656 self._afterlock(commithook)
1655 return ret
1657 return ret
1656
1658
1657 @unfilteredmethod
1659 @unfilteredmethod
1658 def commitctx(self, ctx, error=False):
1660 def commitctx(self, ctx, error=False):
1659 """Add a new revision to current repository.
1661 """Add a new revision to current repository.
1660 Revision information is passed via the context argument.
1662 Revision information is passed via the context argument.
1661 """
1663 """
1662
1664
1663 tr = None
1665 tr = None
1664 p1, p2 = ctx.p1(), ctx.p2()
1666 p1, p2 = ctx.p1(), ctx.p2()
1665 user = ctx.user()
1667 user = ctx.user()
1666
1668
1667 lock = self.lock()
1669 lock = self.lock()
1668 try:
1670 try:
1669 tr = self.transaction("commit")
1671 tr = self.transaction("commit")
1670 trp = weakref.proxy(tr)
1672 trp = weakref.proxy(tr)
1671
1673
1672 if ctx.files():
1674 if ctx.files():
1673 m1 = p1.manifest()
1675 m1 = p1.manifest()
1674 m2 = p2.manifest()
1676 m2 = p2.manifest()
1675 m = m1.copy()
1677 m = m1.copy()
1676
1678
1677 # check in files
1679 # check in files
1678 added = []
1680 added = []
1679 changed = []
1681 changed = []
1680 removed = list(ctx.removed())
1682 removed = list(ctx.removed())
1681 linkrev = len(self)
1683 linkrev = len(self)
1682 self.ui.note(_("committing files:\n"))
1684 self.ui.note(_("committing files:\n"))
1683 for f in sorted(ctx.modified() + ctx.added()):
1685 for f in sorted(ctx.modified() + ctx.added()):
1684 self.ui.note(f + "\n")
1686 self.ui.note(f + "\n")
1685 try:
1687 try:
1686 fctx = ctx[f]
1688 fctx = ctx[f]
1687 if fctx is None:
1689 if fctx is None:
1688 removed.append(f)
1690 removed.append(f)
1689 else:
1691 else:
1690 added.append(f)
1692 added.append(f)
1691 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1693 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1692 trp, changed)
1694 trp, changed)
1693 m.setflag(f, fctx.flags())
1695 m.setflag(f, fctx.flags())
1694 except OSError as inst:
1696 except OSError as inst:
1695 self.ui.warn(_("trouble committing %s!\n") % f)
1697 self.ui.warn(_("trouble committing %s!\n") % f)
1696 raise
1698 raise
1697 except IOError as inst:
1699 except IOError as inst:
1698 errcode = getattr(inst, 'errno', errno.ENOENT)
1700 errcode = getattr(inst, 'errno', errno.ENOENT)
1699 if error or errcode and errcode != errno.ENOENT:
1701 if error or errcode and errcode != errno.ENOENT:
1700 self.ui.warn(_("trouble committing %s!\n") % f)
1702 self.ui.warn(_("trouble committing %s!\n") % f)
1701 raise
1703 raise
1702
1704
1703 # update manifest
1705 # update manifest
1704 self.ui.note(_("committing manifest\n"))
1706 self.ui.note(_("committing manifest\n"))
1705 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1707 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1706 drop = [f for f in removed if f in m]
1708 drop = [f for f in removed if f in m]
1707 for f in drop:
1709 for f in drop:
1708 del m[f]
1710 del m[f]
1709 mn = self.manifest.add(m, trp, linkrev,
1711 mn = self.manifest.add(m, trp, linkrev,
1710 p1.manifestnode(), p2.manifestnode(),
1712 p1.manifestnode(), p2.manifestnode(),
1711 added, drop)
1713 added, drop)
1712 files = changed + removed
1714 files = changed + removed
1713 else:
1715 else:
1714 mn = p1.manifestnode()
1716 mn = p1.manifestnode()
1715 files = []
1717 files = []
1716
1718
1717 # update changelog
1719 # update changelog
1718 self.ui.note(_("committing changelog\n"))
1720 self.ui.note(_("committing changelog\n"))
1719 self.changelog.delayupdate(tr)
1721 self.changelog.delayupdate(tr)
1720 n = self.changelog.add(mn, files, ctx.description(),
1722 n = self.changelog.add(mn, files, ctx.description(),
1721 trp, p1.node(), p2.node(),
1723 trp, p1.node(), p2.node(),
1722 user, ctx.date(), ctx.extra().copy())
1724 user, ctx.date(), ctx.extra().copy())
1723 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1725 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1724 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1726 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1725 parent2=xp2)
1727 parent2=xp2)
1726 # set the new commit is proper phase
1728 # set the new commit is proper phase
1727 targetphase = subrepo.newcommitphase(self.ui, ctx)
1729 targetphase = subrepo.newcommitphase(self.ui, ctx)
1728 if targetphase:
1730 if targetphase:
1729 # retract boundary do not alter parent changeset.
1731 # retract boundary do not alter parent changeset.
1730 # if a parent have higher the resulting phase will
1732 # if a parent have higher the resulting phase will
1731 # be compliant anyway
1733 # be compliant anyway
1732 #
1734 #
1733 # if minimal phase was 0 we don't need to retract anything
1735 # if minimal phase was 0 we don't need to retract anything
1734 phases.retractboundary(self, tr, targetphase, [n])
1736 phases.retractboundary(self, tr, targetphase, [n])
1735 tr.close()
1737 tr.close()
1736 branchmap.updatecache(self.filtered('served'))
1738 branchmap.updatecache(self.filtered('served'))
1737 return n
1739 return n
1738 finally:
1740 finally:
1739 if tr:
1741 if tr:
1740 tr.release()
1742 tr.release()
1741 lock.release()
1743 lock.release()
1742
1744
1743 @unfilteredmethod
1745 @unfilteredmethod
1744 def destroying(self):
1746 def destroying(self):
1745 '''Inform the repository that nodes are about to be destroyed.
1747 '''Inform the repository that nodes are about to be destroyed.
1746 Intended for use by strip and rollback, so there's a common
1748 Intended for use by strip and rollback, so there's a common
1747 place for anything that has to be done before destroying history.
1749 place for anything that has to be done before destroying history.
1748
1750
1749 This is mostly useful for saving state that is in memory and waiting
1751 This is mostly useful for saving state that is in memory and waiting
1750 to be flushed when the current lock is released. Because a call to
1752 to be flushed when the current lock is released. Because a call to
1751 destroyed is imminent, the repo will be invalidated causing those
1753 destroyed is imminent, the repo will be invalidated causing those
1752 changes to stay in memory (waiting for the next unlock), or vanish
1754 changes to stay in memory (waiting for the next unlock), or vanish
1753 completely.
1755 completely.
1754 '''
1756 '''
1755 # When using the same lock to commit and strip, the phasecache is left
1757 # When using the same lock to commit and strip, the phasecache is left
1756 # dirty after committing. Then when we strip, the repo is invalidated,
1758 # dirty after committing. Then when we strip, the repo is invalidated,
1757 # causing those changes to disappear.
1759 # causing those changes to disappear.
1758 if '_phasecache' in vars(self):
1760 if '_phasecache' in vars(self):
1759 self._phasecache.write()
1761 self._phasecache.write()
1760
1762
1761 @unfilteredmethod
1763 @unfilteredmethod
1762 def destroyed(self):
1764 def destroyed(self):
1763 '''Inform the repository that nodes have been destroyed.
1765 '''Inform the repository that nodes have been destroyed.
1764 Intended for use by strip and rollback, so there's a common
1766 Intended for use by strip and rollback, so there's a common
1765 place for anything that has to be done after destroying history.
1767 place for anything that has to be done after destroying history.
1766 '''
1768 '''
1767 # When one tries to:
1769 # When one tries to:
1768 # 1) destroy nodes thus calling this method (e.g. strip)
1770 # 1) destroy nodes thus calling this method (e.g. strip)
1769 # 2) use phasecache somewhere (e.g. commit)
1771 # 2) use phasecache somewhere (e.g. commit)
1770 #
1772 #
1771 # then 2) will fail because the phasecache contains nodes that were
1773 # then 2) will fail because the phasecache contains nodes that were
1772 # removed. We can either remove phasecache from the filecache,
1774 # removed. We can either remove phasecache from the filecache,
1773 # causing it to reload next time it is accessed, or simply filter
1775 # causing it to reload next time it is accessed, or simply filter
1774 # the removed nodes now and write the updated cache.
1776 # the removed nodes now and write the updated cache.
1775 self._phasecache.filterunknown(self)
1777 self._phasecache.filterunknown(self)
1776 self._phasecache.write()
1778 self._phasecache.write()
1777
1779
1778 # update the 'served' branch cache to help read only server process
1780 # update the 'served' branch cache to help read only server process
1779 # Thanks to branchcache collaboration this is done from the nearest
1781 # Thanks to branchcache collaboration this is done from the nearest
1780 # filtered subset and it is expected to be fast.
1782 # filtered subset and it is expected to be fast.
1781 branchmap.updatecache(self.filtered('served'))
1783 branchmap.updatecache(self.filtered('served'))
1782
1784
1783 # Ensure the persistent tag cache is updated. Doing it now
1785 # Ensure the persistent tag cache is updated. Doing it now
1784 # means that the tag cache only has to worry about destroyed
1786 # means that the tag cache only has to worry about destroyed
1785 # heads immediately after a strip/rollback. That in turn
1787 # heads immediately after a strip/rollback. That in turn
1786 # guarantees that "cachetip == currenttip" (comparing both rev
1788 # guarantees that "cachetip == currenttip" (comparing both rev
1787 # and node) always means no nodes have been added or destroyed.
1789 # and node) always means no nodes have been added or destroyed.
1788
1790
1789 # XXX this is suboptimal when qrefresh'ing: we strip the current
1791 # XXX this is suboptimal when qrefresh'ing: we strip the current
1790 # head, refresh the tag cache, then immediately add a new head.
1792 # head, refresh the tag cache, then immediately add a new head.
1791 # But I think doing it this way is necessary for the "instant
1793 # But I think doing it this way is necessary for the "instant
1792 # tag cache retrieval" case to work.
1794 # tag cache retrieval" case to work.
1793 self.invalidate()
1795 self.invalidate()
1794
1796
1795 def walk(self, match, node=None):
1797 def walk(self, match, node=None):
1796 '''
1798 '''
1797 walk recursively through the directory tree or a given
1799 walk recursively through the directory tree or a given
1798 changeset, finding all files matched by the match
1800 changeset, finding all files matched by the match
1799 function
1801 function
1800 '''
1802 '''
1801 return self[node].walk(match)
1803 return self[node].walk(match)
1802
1804
1803 def status(self, node1='.', node2=None, match=None,
1805 def status(self, node1='.', node2=None, match=None,
1804 ignored=False, clean=False, unknown=False,
1806 ignored=False, clean=False, unknown=False,
1805 listsubrepos=False):
1807 listsubrepos=False):
1806 '''a convenience method that calls node1.status(node2)'''
1808 '''a convenience method that calls node1.status(node2)'''
1807 return self[node1].status(node2, match, ignored, clean, unknown,
1809 return self[node1].status(node2, match, ignored, clean, unknown,
1808 listsubrepos)
1810 listsubrepos)
1809
1811
1810 def heads(self, start=None):
1812 def heads(self, start=None):
1811 heads = self.changelog.heads(start)
1813 heads = self.changelog.heads(start)
1812 # sort the output in rev descending order
1814 # sort the output in rev descending order
1813 return sorted(heads, key=self.changelog.rev, reverse=True)
1815 return sorted(heads, key=self.changelog.rev, reverse=True)
1814
1816
1815 def branchheads(self, branch=None, start=None, closed=False):
1817 def branchheads(self, branch=None, start=None, closed=False):
1816 '''return a (possibly filtered) list of heads for the given branch
1818 '''return a (possibly filtered) list of heads for the given branch
1817
1819
1818 Heads are returned in topological order, from newest to oldest.
1820 Heads are returned in topological order, from newest to oldest.
1819 If branch is None, use the dirstate branch.
1821 If branch is None, use the dirstate branch.
1820 If start is not None, return only heads reachable from start.
1822 If start is not None, return only heads reachable from start.
1821 If closed is True, return heads that are marked as closed as well.
1823 If closed is True, return heads that are marked as closed as well.
1822 '''
1824 '''
1823 if branch is None:
1825 if branch is None:
1824 branch = self[None].branch()
1826 branch = self[None].branch()
1825 branches = self.branchmap()
1827 branches = self.branchmap()
1826 if branch not in branches:
1828 if branch not in branches:
1827 return []
1829 return []
1828 # the cache returns heads ordered lowest to highest
1830 # the cache returns heads ordered lowest to highest
1829 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1831 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1830 if start is not None:
1832 if start is not None:
1831 # filter out the heads that cannot be reached from startrev
1833 # filter out the heads that cannot be reached from startrev
1832 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1834 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1833 bheads = [h for h in bheads if h in fbheads]
1835 bheads = [h for h in bheads if h in fbheads]
1834 return bheads
1836 return bheads
1835
1837
1836 def branches(self, nodes):
1838 def branches(self, nodes):
1837 if not nodes:
1839 if not nodes:
1838 nodes = [self.changelog.tip()]
1840 nodes = [self.changelog.tip()]
1839 b = []
1841 b = []
1840 for n in nodes:
1842 for n in nodes:
1841 t = n
1843 t = n
1842 while True:
1844 while True:
1843 p = self.changelog.parents(n)
1845 p = self.changelog.parents(n)
1844 if p[1] != nullid or p[0] == nullid:
1846 if p[1] != nullid or p[0] == nullid:
1845 b.append((t, n, p[0], p[1]))
1847 b.append((t, n, p[0], p[1]))
1846 break
1848 break
1847 n = p[0]
1849 n = p[0]
1848 return b
1850 return b
1849
1851
1850 def between(self, pairs):
1852 def between(self, pairs):
1851 r = []
1853 r = []
1852
1854
1853 for top, bottom in pairs:
1855 for top, bottom in pairs:
1854 n, l, i = top, [], 0
1856 n, l, i = top, [], 0
1855 f = 1
1857 f = 1
1856
1858
1857 while n != bottom and n != nullid:
1859 while n != bottom and n != nullid:
1858 p = self.changelog.parents(n)[0]
1860 p = self.changelog.parents(n)[0]
1859 if i == f:
1861 if i == f:
1860 l.append(n)
1862 l.append(n)
1861 f = f * 2
1863 f = f * 2
1862 n = p
1864 n = p
1863 i += 1
1865 i += 1
1864
1866
1865 r.append(l)
1867 r.append(l)
1866
1868
1867 return r
1869 return r
1868
1870
1869 def checkpush(self, pushop):
1871 def checkpush(self, pushop):
1870 """Extensions can override this function if additional checks have
1872 """Extensions can override this function if additional checks have
1871 to be performed before pushing, or call it if they override push
1873 to be performed before pushing, or call it if they override push
1872 command.
1874 command.
1873 """
1875 """
1874 pass
1876 pass
1875
1877
1876 @unfilteredpropertycache
1878 @unfilteredpropertycache
1877 def prepushoutgoinghooks(self):
1879 def prepushoutgoinghooks(self):
1878 """Return util.hooks consists of a pushop with repo, remote, outgoing
1880 """Return util.hooks consists of a pushop with repo, remote, outgoing
1879 methods, which are called before pushing changesets.
1881 methods, which are called before pushing changesets.
1880 """
1882 """
1881 return util.hooks()
1883 return util.hooks()
1882
1884
1883 def pushkey(self, namespace, key, old, new):
1885 def pushkey(self, namespace, key, old, new):
1884 try:
1886 try:
1885 tr = self.currenttransaction()
1887 tr = self.currenttransaction()
1886 hookargs = {}
1888 hookargs = {}
1887 if tr is not None:
1889 if tr is not None:
1888 hookargs.update(tr.hookargs)
1890 hookargs.update(tr.hookargs)
1889 hookargs['namespace'] = namespace
1891 hookargs['namespace'] = namespace
1890 hookargs['key'] = key
1892 hookargs['key'] = key
1891 hookargs['old'] = old
1893 hookargs['old'] = old
1892 hookargs['new'] = new
1894 hookargs['new'] = new
1893 self.hook('prepushkey', throw=True, **hookargs)
1895 self.hook('prepushkey', throw=True, **hookargs)
1894 except error.HookAbort as exc:
1896 except error.HookAbort as exc:
1895 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1897 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1896 if exc.hint:
1898 if exc.hint:
1897 self.ui.write_err(_("(%s)\n") % exc.hint)
1899 self.ui.write_err(_("(%s)\n") % exc.hint)
1898 return False
1900 return False
1899 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1901 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1900 ret = pushkey.push(self, namespace, key, old, new)
1902 ret = pushkey.push(self, namespace, key, old, new)
1901 def runhook():
1903 def runhook():
1902 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1904 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1903 ret=ret)
1905 ret=ret)
1904 self._afterlock(runhook)
1906 self._afterlock(runhook)
1905 return ret
1907 return ret
1906
1908
1907 def listkeys(self, namespace):
1909 def listkeys(self, namespace):
1908 self.hook('prelistkeys', throw=True, namespace=namespace)
1910 self.hook('prelistkeys', throw=True, namespace=namespace)
1909 self.ui.debug('listing keys for "%s"\n' % namespace)
1911 self.ui.debug('listing keys for "%s"\n' % namespace)
1910 values = pushkey.list(self, namespace)
1912 values = pushkey.list(self, namespace)
1911 self.hook('listkeys', namespace=namespace, values=values)
1913 self.hook('listkeys', namespace=namespace, values=values)
1912 return values
1914 return values
1913
1915
1914 def debugwireargs(self, one, two, three=None, four=None, five=None):
1916 def debugwireargs(self, one, two, three=None, four=None, five=None):
1915 '''used to test argument passing over the wire'''
1917 '''used to test argument passing over the wire'''
1916 return "%s %s %s %s %s" % (one, two, three, four, five)
1918 return "%s %s %s %s %s" % (one, two, three, four, five)
1917
1919
1918 def savecommitmessage(self, text):
1920 def savecommitmessage(self, text):
1919 fp = self.vfs('last-message.txt', 'wb')
1921 fp = self.vfs('last-message.txt', 'wb')
1920 try:
1922 try:
1921 fp.write(text)
1923 fp.write(text)
1922 finally:
1924 finally:
1923 fp.close()
1925 fp.close()
1924 return self.pathto(fp.name[len(self.root) + 1:])
1926 return self.pathto(fp.name[len(self.root) + 1:])
1925
1927
1926 # used to avoid circular references so destructors work
1928 # used to avoid circular references so destructors work
1927 def aftertrans(files):
1929 def aftertrans(files):
1928 renamefiles = [tuple(t) for t in files]
1930 renamefiles = [tuple(t) for t in files]
1929 def a():
1931 def a():
1930 for vfs, src, dest in renamefiles:
1932 for vfs, src, dest in renamefiles:
1931 try:
1933 try:
1932 vfs.rename(src, dest)
1934 vfs.rename(src, dest)
1933 except OSError: # journal file does not yet exist
1935 except OSError: # journal file does not yet exist
1934 pass
1936 pass
1935 return a
1937 return a
1936
1938
1937 def undoname(fn):
1939 def undoname(fn):
1938 base, name = os.path.split(fn)
1940 base, name = os.path.split(fn)
1939 assert name.startswith('journal')
1941 assert name.startswith('journal')
1940 return os.path.join(base, name.replace('journal', 'undo', 1))
1942 return os.path.join(base, name.replace('journal', 'undo', 1))
1941
1943
1942 def instance(ui, path, create):
1944 def instance(ui, path, create):
1943 return localrepository(ui, util.urllocalpath(path), create)
1945 return localrepository(ui, util.urllocalpath(path), create)
1944
1946
1945 def islocal(path):
1947 def islocal(path):
1946 return True
1948 return True
1947
1949
1948 def newreporequirements(repo):
1950 def newreporequirements(repo):
1949 """Determine the set of requirements for a new local repository.
1951 """Determine the set of requirements for a new local repository.
1950
1952
1951 Extensions can wrap this function to specify custom requirements for
1953 Extensions can wrap this function to specify custom requirements for
1952 new repositories.
1954 new repositories.
1953 """
1955 """
1954 ui = repo.ui
1956 ui = repo.ui
1955 requirements = set(['revlogv1'])
1957 requirements = set(['revlogv1'])
1956 if ui.configbool('format', 'usestore', True):
1958 if ui.configbool('format', 'usestore', True):
1957 requirements.add('store')
1959 requirements.add('store')
1958 if ui.configbool('format', 'usefncache', True):
1960 if ui.configbool('format', 'usefncache', True):
1959 requirements.add('fncache')
1961 requirements.add('fncache')
1960 if ui.configbool('format', 'dotencode', True):
1962 if ui.configbool('format', 'dotencode', True):
1961 requirements.add('dotencode')
1963 requirements.add('dotencode')
1962
1964
1963 if scmutil.gdinitconfig(ui):
1965 if scmutil.gdinitconfig(ui):
1964 requirements.add('generaldelta')
1966 requirements.add('generaldelta')
1965 if ui.configbool('experimental', 'treemanifest', False):
1967 if ui.configbool('experimental', 'treemanifest', False):
1966 requirements.add('treemanifest')
1968 requirements.add('treemanifest')
1967 if ui.configbool('experimental', 'manifestv2', False):
1969 if ui.configbool('experimental', 'manifestv2', False):
1968 requirements.add('manifestv2')
1970 requirements.add('manifestv2')
1969
1971
1970 return requirements
1972 return requirements
@@ -1,1399 +1,1402 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import glob
12 import glob
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import re
15 import re
16 import shutil
16 import shutil
17 import stat
17 import stat
18 import tempfile
18 import tempfile
19 import threading
19 import threading
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import wdirrev
22 from .node import wdirrev
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 osutil,
27 osutil,
28 pathutil,
28 pathutil,
29 phases,
29 phases,
30 revset,
30 revset,
31 similar,
31 similar,
32 util,
32 util,
33 )
33 )
34
34
35 if os.name == 'nt':
35 if os.name == 'nt':
36 from . import scmwindows as scmplatform
36 from . import scmwindows as scmplatform
37 else:
37 else:
38 from . import scmposix as scmplatform
38 from . import scmposix as scmplatform
39
39
40 systemrcpath = scmplatform.systemrcpath
40 systemrcpath = scmplatform.systemrcpath
41 userrcpath = scmplatform.userrcpath
41 userrcpath = scmplatform.userrcpath
42
42
43 class status(tuple):
43 class status(tuple):
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 and 'ignored' properties are only relevant to the working copy.
45 and 'ignored' properties are only relevant to the working copy.
46 '''
46 '''
47
47
48 __slots__ = ()
48 __slots__ = ()
49
49
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 clean):
51 clean):
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 ignored, clean))
53 ignored, clean))
54
54
55 @property
55 @property
56 def modified(self):
56 def modified(self):
57 '''files that have been modified'''
57 '''files that have been modified'''
58 return self[0]
58 return self[0]
59
59
60 @property
60 @property
61 def added(self):
61 def added(self):
62 '''files that have been added'''
62 '''files that have been added'''
63 return self[1]
63 return self[1]
64
64
65 @property
65 @property
66 def removed(self):
66 def removed(self):
67 '''files that have been removed'''
67 '''files that have been removed'''
68 return self[2]
68 return self[2]
69
69
70 @property
70 @property
71 def deleted(self):
71 def deleted(self):
72 '''files that are in the dirstate, but have been deleted from the
72 '''files that are in the dirstate, but have been deleted from the
73 working copy (aka "missing")
73 working copy (aka "missing")
74 '''
74 '''
75 return self[3]
75 return self[3]
76
76
77 @property
77 @property
78 def unknown(self):
78 def unknown(self):
79 '''files not in the dirstate that are not ignored'''
79 '''files not in the dirstate that are not ignored'''
80 return self[4]
80 return self[4]
81
81
82 @property
82 @property
83 def ignored(self):
83 def ignored(self):
84 '''files not in the dirstate that are ignored (by _dirignore())'''
84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 return self[5]
85 return self[5]
86
86
87 @property
87 @property
88 def clean(self):
88 def clean(self):
89 '''files that have not been modified'''
89 '''files that have not been modified'''
90 return self[6]
90 return self[6]
91
91
92 def __repr__(self, *args, **kwargs):
92 def __repr__(self, *args, **kwargs):
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 'unknown=%r, ignored=%r, clean=%r>') % self)
94 'unknown=%r, ignored=%r, clean=%r>') % self)
95
95
96 def itersubrepos(ctx1, ctx2):
96 def itersubrepos(ctx1, ctx2):
97 """find subrepos in ctx1 or ctx2"""
97 """find subrepos in ctx1 or ctx2"""
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # has been modified (in ctx2) but not yet committed (in ctx1).
100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103
103
104 missing = set()
104 missing = set()
105
105
106 for subpath in ctx2.substate:
106 for subpath in ctx2.substate:
107 if subpath not in ctx1.substate:
107 if subpath not in ctx1.substate:
108 del subpaths[subpath]
108 del subpaths[subpath]
109 missing.add(subpath)
109 missing.add(subpath)
110
110
111 for subpath, ctx in sorted(subpaths.iteritems()):
111 for subpath, ctx in sorted(subpaths.iteritems()):
112 yield subpath, ctx.sub(subpath)
112 yield subpath, ctx.sub(subpath)
113
113
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # status and diff will have an accurate result when it does
115 # status and diff will have an accurate result when it does
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # against itself.
117 # against itself.
118 for subpath in missing:
118 for subpath in missing:
119 yield subpath, ctx2.nullsub(subpath, ctx1)
119 yield subpath, ctx2.nullsub(subpath, ctx1)
120
120
121 def nochangesfound(ui, repo, excluded=None):
121 def nochangesfound(ui, repo, excluded=None):
122 '''Report no changes for push/pull, excluded is None or a list of
122 '''Report no changes for push/pull, excluded is None or a list of
123 nodes excluded from the push/pull.
123 nodes excluded from the push/pull.
124 '''
124 '''
125 secretlist = []
125 secretlist = []
126 if excluded:
126 if excluded:
127 for n in excluded:
127 for n in excluded:
128 if n not in repo:
128 if n not in repo:
129 # discovery should not have included the filtered revision,
129 # discovery should not have included the filtered revision,
130 # we have to explicitly exclude it until discovery is cleanup.
130 # we have to explicitly exclude it until discovery is cleanup.
131 continue
131 continue
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 % len(secretlist))
138 % len(secretlist))
139 else:
139 else:
140 ui.status(_("no changes found\n"))
140 ui.status(_("no changes found\n"))
141
141
142 def checknewlabel(repo, lbl, kind):
142 def checknewlabel(repo, lbl, kind):
143 # Do not use the "kind" parameter in ui output.
143 # Do not use the "kind" parameter in ui output.
144 # It makes strings difficult to translate.
144 # It makes strings difficult to translate.
145 if lbl in ['tip', '.', 'null']:
145 if lbl in ['tip', '.', 'null']:
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 for c in (':', '\0', '\n', '\r'):
147 for c in (':', '\0', '\n', '\r'):
148 if c in lbl:
148 if c in lbl:
149 raise error.Abort(_("%r cannot be used in a name") % c)
149 raise error.Abort(_("%r cannot be used in a name") % c)
150 try:
150 try:
151 int(lbl)
151 int(lbl)
152 raise error.Abort(_("cannot use an integer as a name"))
152 raise error.Abort(_("cannot use an integer as a name"))
153 except ValueError:
153 except ValueError:
154 pass
154 pass
155
155
156 def checkfilename(f):
156 def checkfilename(f):
157 '''Check that the filename f is an acceptable filename for a tracked file'''
157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 if '\r' in f or '\n' in f:
158 if '\r' in f or '\n' in f:
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160
160
161 def checkportable(ui, f):
161 def checkportable(ui, f):
162 '''Check if filename f is portable and warn or abort depending on config'''
162 '''Check if filename f is portable and warn or abort depending on config'''
163 checkfilename(f)
163 checkfilename(f)
164 abort, warn = checkportabilityalert(ui)
164 abort, warn = checkportabilityalert(ui)
165 if abort or warn:
165 if abort or warn:
166 msg = util.checkwinfilename(f)
166 msg = util.checkwinfilename(f)
167 if msg:
167 if msg:
168 msg = "%s: %r" % (msg, f)
168 msg = "%s: %r" % (msg, f)
169 if abort:
169 if abort:
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 ui.warn(_("warning: %s\n") % msg)
171 ui.warn(_("warning: %s\n") % msg)
172
172
173 def checkportabilityalert(ui):
173 def checkportabilityalert(ui):
174 '''check if the user's config requests nothing, a warning, or abort for
174 '''check if the user's config requests nothing, a warning, or abort for
175 non-portable filenames'''
175 non-portable filenames'''
176 val = ui.config('ui', 'portablefilenames', 'warn')
176 val = ui.config('ui', 'portablefilenames', 'warn')
177 lval = val.lower()
177 lval = val.lower()
178 bval = util.parsebool(val)
178 bval = util.parsebool(val)
179 abort = os.name == 'nt' or lval == 'abort'
179 abort = os.name == 'nt' or lval == 'abort'
180 warn = bval or lval == 'warn'
180 warn = bval or lval == 'warn'
181 if bval is None and not (warn or abort or lval == 'ignore'):
181 if bval is None and not (warn or abort or lval == 'ignore'):
182 raise error.ConfigError(
182 raise error.ConfigError(
183 _("ui.portablefilenames value is invalid ('%s')") % val)
183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 return abort, warn
184 return abort, warn
185
185
186 class casecollisionauditor(object):
186 class casecollisionauditor(object):
187 def __init__(self, ui, abort, dirstate):
187 def __init__(self, ui, abort, dirstate):
188 self._ui = ui
188 self._ui = ui
189 self._abort = abort
189 self._abort = abort
190 allfiles = '\0'.join(dirstate._map)
190 allfiles = '\0'.join(dirstate._map)
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 self._dirstate = dirstate
192 self._dirstate = dirstate
193 # The purpose of _newfiles is so that we don't complain about
193 # The purpose of _newfiles is so that we don't complain about
194 # case collisions if someone were to call this object with the
194 # case collisions if someone were to call this object with the
195 # same filename twice.
195 # same filename twice.
196 self._newfiles = set()
196 self._newfiles = set()
197
197
198 def __call__(self, f):
198 def __call__(self, f):
199 if f in self._newfiles:
199 if f in self._newfiles:
200 return
200 return
201 fl = encoding.lower(f)
201 fl = encoding.lower(f)
202 if fl in self._loweredfiles and f not in self._dirstate:
202 if fl in self._loweredfiles and f not in self._dirstate:
203 msg = _('possible case-folding collision for %s') % f
203 msg = _('possible case-folding collision for %s') % f
204 if self._abort:
204 if self._abort:
205 raise error.Abort(msg)
205 raise error.Abort(msg)
206 self._ui.warn(_("warning: %s\n") % msg)
206 self._ui.warn(_("warning: %s\n") % msg)
207 self._loweredfiles.add(fl)
207 self._loweredfiles.add(fl)
208 self._newfiles.add(f)
208 self._newfiles.add(f)
209
209
210 def filteredhash(repo, maxrev):
210 def filteredhash(repo, maxrev):
211 """build hash of filtered revisions in the current repoview.
211 """build hash of filtered revisions in the current repoview.
212
212
213 Multiple caches perform up-to-date validation by checking that the
213 Multiple caches perform up-to-date validation by checking that the
214 tiprev and tipnode stored in the cache file match the current repository.
214 tiprev and tipnode stored in the cache file match the current repository.
215 However, this is not sufficient for validating repoviews because the set
215 However, this is not sufficient for validating repoviews because the set
216 of revisions in the view may change without the repository tiprev and
216 of revisions in the view may change without the repository tiprev and
217 tipnode changing.
217 tipnode changing.
218
218
219 This function hashes all the revs filtered from the view and returns
219 This function hashes all the revs filtered from the view and returns
220 that SHA-1 digest.
220 that SHA-1 digest.
221 """
221 """
222 cl = repo.changelog
222 cl = repo.changelog
223 if not cl.filteredrevs:
223 if not cl.filteredrevs:
224 return None
224 return None
225 key = None
225 key = None
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 if revs:
227 if revs:
228 s = hashlib.sha1()
228 s = hashlib.sha1()
229 for rev in revs:
229 for rev in revs:
230 s.update('%s;' % rev)
230 s.update('%s;' % rev)
231 key = s.digest()
231 key = s.digest()
232 return key
232 return key
233
233
234 class abstractvfs(object):
234 class abstractvfs(object):
235 """Abstract base class; cannot be instantiated"""
235 """Abstract base class; cannot be instantiated"""
236
236
237 def __init__(self, *args, **kwargs):
237 def __init__(self, *args, **kwargs):
238 '''Prevent instantiation; don't call this from subclasses.'''
238 '''Prevent instantiation; don't call this from subclasses.'''
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240
240
241 def tryread(self, path):
241 def tryread(self, path):
242 '''gracefully return an empty string for missing files'''
242 '''gracefully return an empty string for missing files'''
243 try:
243 try:
244 return self.read(path)
244 return self.read(path)
245 except IOError as inst:
245 except IOError as inst:
246 if inst.errno != errno.ENOENT:
246 if inst.errno != errno.ENOENT:
247 raise
247 raise
248 return ""
248 return ""
249
249
250 def tryreadlines(self, path, mode='rb'):
250 def tryreadlines(self, path, mode='rb'):
251 '''gracefully return an empty array for missing files'''
251 '''gracefully return an empty array for missing files'''
252 try:
252 try:
253 return self.readlines(path, mode=mode)
253 return self.readlines(path, mode=mode)
254 except IOError as inst:
254 except IOError as inst:
255 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
256 raise
256 raise
257 return []
257 return []
258
258
259 def open(self, path, mode="r", text=False, atomictemp=False,
259 def open(self, path, mode="r", text=False, atomictemp=False,
260 notindexed=False, backgroundclose=False):
260 notindexed=False, backgroundclose=False):
261 '''Open ``path`` file, which is relative to vfs root.
261 '''Open ``path`` file, which is relative to vfs root.
262
262
263 Newly created directories are marked as "not to be indexed by
263 Newly created directories are marked as "not to be indexed by
264 the content indexing service", if ``notindexed`` is specified
264 the content indexing service", if ``notindexed`` is specified
265 for "write" mode access.
265 for "write" mode access.
266 '''
266 '''
267 self.open = self.__call__
267 self.open = self.__call__
268 return self.__call__(path, mode, text, atomictemp, notindexed,
268 return self.__call__(path, mode, text, atomictemp, notindexed,
269 backgroundclose=backgroundclose)
269 backgroundclose=backgroundclose)
270
270
271 def read(self, path):
271 def read(self, path):
272 with self(path, 'rb') as fp:
272 with self(path, 'rb') as fp:
273 return fp.read()
273 return fp.read()
274
274
275 def readlines(self, path, mode='rb'):
275 def readlines(self, path, mode='rb'):
276 with self(path, mode=mode) as fp:
276 with self(path, mode=mode) as fp:
277 return fp.readlines()
277 return fp.readlines()
278
278
279 def write(self, path, data, backgroundclose=False):
279 def write(self, path, data, backgroundclose=False):
280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
281 return fp.write(data)
281 return fp.write(data)
282
282
283 def writelines(self, path, data, mode='wb', notindexed=False):
283 def writelines(self, path, data, mode='wb', notindexed=False):
284 with self(path, mode=mode, notindexed=notindexed) as fp:
284 with self(path, mode=mode, notindexed=notindexed) as fp:
285 return fp.writelines(data)
285 return fp.writelines(data)
286
286
287 def append(self, path, data):
287 def append(self, path, data):
288 with self(path, 'ab') as fp:
288 with self(path, 'ab') as fp:
289 return fp.write(data)
289 return fp.write(data)
290
290
291 def basename(self, path):
291 def basename(self, path):
292 """return base element of a path (as os.path.basename would do)
292 """return base element of a path (as os.path.basename would do)
293
293
294 This exists to allow handling of strange encoding if needed."""
294 This exists to allow handling of strange encoding if needed."""
295 return os.path.basename(path)
295 return os.path.basename(path)
296
296
297 def chmod(self, path, mode):
297 def chmod(self, path, mode):
298 return os.chmod(self.join(path), mode)
298 return os.chmod(self.join(path), mode)
299
299
300 def dirname(self, path):
300 def dirname(self, path):
301 """return dirname element of a path (as os.path.dirname would do)
301 """return dirname element of a path (as os.path.dirname would do)
302
302
303 This exists to allow handling of strange encoding if needed."""
303 This exists to allow handling of strange encoding if needed."""
304 return os.path.dirname(path)
304 return os.path.dirname(path)
305
305
306 def exists(self, path=None):
306 def exists(self, path=None):
307 return os.path.exists(self.join(path))
307 return os.path.exists(self.join(path))
308
308
309 def fstat(self, fp):
309 def fstat(self, fp):
310 return util.fstat(fp)
310 return util.fstat(fp)
311
311
312 def isdir(self, path=None):
312 def isdir(self, path=None):
313 return os.path.isdir(self.join(path))
313 return os.path.isdir(self.join(path))
314
314
315 def isfile(self, path=None):
315 def isfile(self, path=None):
316 return os.path.isfile(self.join(path))
316 return os.path.isfile(self.join(path))
317
317
318 def islink(self, path=None):
318 def islink(self, path=None):
319 return os.path.islink(self.join(path))
319 return os.path.islink(self.join(path))
320
320
321 def isfileorlink(self, path=None):
321 def isfileorlink(self, path=None):
322 '''return whether path is a regular file or a symlink
322 '''return whether path is a regular file or a symlink
323
323
324 Unlike isfile, this doesn't follow symlinks.'''
324 Unlike isfile, this doesn't follow symlinks.'''
325 try:
325 try:
326 st = self.lstat(path)
326 st = self.lstat(path)
327 except OSError:
327 except OSError:
328 return False
328 return False
329 mode = st.st_mode
329 mode = st.st_mode
330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
331
331
332 def reljoin(self, *paths):
332 def reljoin(self, *paths):
333 """join various elements of a path together (as os.path.join would do)
333 """join various elements of a path together (as os.path.join would do)
334
334
335 The vfs base is not injected so that path stay relative. This exists
335 The vfs base is not injected so that path stay relative. This exists
336 to allow handling of strange encoding if needed."""
336 to allow handling of strange encoding if needed."""
337 return os.path.join(*paths)
337 return os.path.join(*paths)
338
338
339 def split(self, path):
339 def split(self, path):
340 """split top-most element of a path (as os.path.split would do)
340 """split top-most element of a path (as os.path.split would do)
341
341
342 This exists to allow handling of strange encoding if needed."""
342 This exists to allow handling of strange encoding if needed."""
343 return os.path.split(path)
343 return os.path.split(path)
344
344
345 def lexists(self, path=None):
345 def lexists(self, path=None):
346 return os.path.lexists(self.join(path))
346 return os.path.lexists(self.join(path))
347
347
348 def lstat(self, path=None):
348 def lstat(self, path=None):
349 return os.lstat(self.join(path))
349 return os.lstat(self.join(path))
350
350
351 def listdir(self, path=None):
351 def listdir(self, path=None):
352 return os.listdir(self.join(path))
352 return os.listdir(self.join(path))
353
353
354 def makedir(self, path=None, notindexed=True):
354 def makedir(self, path=None, notindexed=True):
355 return util.makedir(self.join(path), notindexed)
355 return util.makedir(self.join(path), notindexed)
356
356
357 def makedirs(self, path=None, mode=None):
357 def makedirs(self, path=None, mode=None):
358 return util.makedirs(self.join(path), mode)
358 return util.makedirs(self.join(path), mode)
359
359
360 def makelock(self, info, path):
360 def makelock(self, info, path):
361 return util.makelock(info, self.join(path))
361 return util.makelock(info, self.join(path))
362
362
363 def mkdir(self, path=None):
363 def mkdir(self, path=None):
364 return os.mkdir(self.join(path))
364 return os.mkdir(self.join(path))
365
365
366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
368 dir=self.join(dir), text=text)
368 dir=self.join(dir), text=text)
369 dname, fname = util.split(name)
369 dname, fname = util.split(name)
370 if dir:
370 if dir:
371 return fd, os.path.join(dir, fname)
371 return fd, os.path.join(dir, fname)
372 else:
372 else:
373 return fd, fname
373 return fd, fname
374
374
375 def readdir(self, path=None, stat=None, skip=None):
375 def readdir(self, path=None, stat=None, skip=None):
376 return osutil.listdir(self.join(path), stat, skip)
376 return osutil.listdir(self.join(path), stat, skip)
377
377
378 def readlock(self, path):
378 def readlock(self, path):
379 return util.readlock(self.join(path))
379 return util.readlock(self.join(path))
380
380
381 def rename(self, src, dst, checkambig=False):
381 def rename(self, src, dst, checkambig=False):
382 """Rename from src to dst
382 """Rename from src to dst
383
383
384 checkambig argument is used with util.filestat, and is useful
384 checkambig argument is used with util.filestat, and is useful
385 only if destination file is guarded by any lock
385 only if destination file is guarded by any lock
386 (e.g. repo.lock or repo.wlock).
386 (e.g. repo.lock or repo.wlock).
387 """
387 """
388 dstpath = self.join(dst)
388 dstpath = self.join(dst)
389 oldstat = checkambig and util.filestat(dstpath)
389 oldstat = checkambig and util.filestat(dstpath)
390 if oldstat and oldstat.stat:
390 if oldstat and oldstat.stat:
391 ret = util.rename(self.join(src), dstpath)
391 ret = util.rename(self.join(src), dstpath)
392 newstat = util.filestat(dstpath)
392 newstat = util.filestat(dstpath)
393 if newstat.isambig(oldstat):
393 if newstat.isambig(oldstat):
394 # stat of renamed file is ambiguous to original one
394 # stat of renamed file is ambiguous to original one
395 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
395 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
396 os.utime(dstpath, (advanced, advanced))
396 os.utime(dstpath, (advanced, advanced))
397 return ret
397 return ret
398 return util.rename(self.join(src), dstpath)
398 return util.rename(self.join(src), dstpath)
399
399
400 def readlink(self, path):
400 def readlink(self, path):
401 return os.readlink(self.join(path))
401 return os.readlink(self.join(path))
402
402
403 def removedirs(self, path=None):
403 def removedirs(self, path=None):
404 """Remove a leaf directory and all empty intermediate ones
404 """Remove a leaf directory and all empty intermediate ones
405 """
405 """
406 return util.removedirs(self.join(path))
406 return util.removedirs(self.join(path))
407
407
408 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
408 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
409 """Remove a directory tree recursively
409 """Remove a directory tree recursively
410
410
411 If ``forcibly``, this tries to remove READ-ONLY files, too.
411 If ``forcibly``, this tries to remove READ-ONLY files, too.
412 """
412 """
413 if forcibly:
413 if forcibly:
414 def onerror(function, path, excinfo):
414 def onerror(function, path, excinfo):
415 if function is not os.remove:
415 if function is not os.remove:
416 raise
416 raise
417 # read-only files cannot be unlinked under Windows
417 # read-only files cannot be unlinked under Windows
418 s = os.stat(path)
418 s = os.stat(path)
419 if (s.st_mode & stat.S_IWRITE) != 0:
419 if (s.st_mode & stat.S_IWRITE) != 0:
420 raise
420 raise
421 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
421 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
422 os.remove(path)
422 os.remove(path)
423 else:
423 else:
424 onerror = None
424 onerror = None
425 return shutil.rmtree(self.join(path),
425 return shutil.rmtree(self.join(path),
426 ignore_errors=ignore_errors, onerror=onerror)
426 ignore_errors=ignore_errors, onerror=onerror)
427
427
428 def setflags(self, path, l, x):
428 def setflags(self, path, l, x):
429 return util.setflags(self.join(path), l, x)
429 return util.setflags(self.join(path), l, x)
430
430
431 def stat(self, path=None):
431 def stat(self, path=None):
432 return os.stat(self.join(path))
432 return os.stat(self.join(path))
433
433
434 def unlink(self, path=None):
434 def unlink(self, path=None):
435 return util.unlink(self.join(path))
435 return util.unlink(self.join(path))
436
436
437 def unlinkpath(self, path=None, ignoremissing=False):
437 def unlinkpath(self, path=None, ignoremissing=False):
438 return util.unlinkpath(self.join(path), ignoremissing)
438 return util.unlinkpath(self.join(path), ignoremissing)
439
439
440 def utime(self, path=None, t=None):
440 def utime(self, path=None, t=None):
441 return os.utime(self.join(path), t)
441 return os.utime(self.join(path), t)
442
442
443 def walk(self, path=None, onerror=None):
443 def walk(self, path=None, onerror=None):
444 """Yield (dirpath, dirs, files) tuple for each directories under path
444 """Yield (dirpath, dirs, files) tuple for each directories under path
445
445
446 ``dirpath`` is relative one from the root of this vfs. This
446 ``dirpath`` is relative one from the root of this vfs. This
447 uses ``os.sep`` as path separator, even you specify POSIX
447 uses ``os.sep`` as path separator, even you specify POSIX
448 style ``path``.
448 style ``path``.
449
449
450 "The root of this vfs" is represented as empty ``dirpath``.
450 "The root of this vfs" is represented as empty ``dirpath``.
451 """
451 """
452 root = os.path.normpath(self.join(None))
452 root = os.path.normpath(self.join(None))
453 # when dirpath == root, dirpath[prefixlen:] becomes empty
453 # when dirpath == root, dirpath[prefixlen:] becomes empty
454 # because len(dirpath) < prefixlen.
454 # because len(dirpath) < prefixlen.
455 prefixlen = len(pathutil.normasprefix(root))
455 prefixlen = len(pathutil.normasprefix(root))
456 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
456 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
457 yield (dirpath[prefixlen:], dirs, files)
457 yield (dirpath[prefixlen:], dirs, files)
458
458
459 @contextlib.contextmanager
459 @contextlib.contextmanager
460 def backgroundclosing(self, ui, expectedcount=-1):
460 def backgroundclosing(self, ui, expectedcount=-1):
461 """Allow files to be closed asynchronously.
461 """Allow files to be closed asynchronously.
462
462
463 When this context manager is active, ``backgroundclose`` can be passed
463 When this context manager is active, ``backgroundclose`` can be passed
464 to ``__call__``/``open`` to result in the file possibly being closed
464 to ``__call__``/``open`` to result in the file possibly being closed
465 asynchronously, on a background thread.
465 asynchronously, on a background thread.
466 """
466 """
467 # This is an arbitrary restriction and could be changed if we ever
467 # This is an arbitrary restriction and could be changed if we ever
468 # have a use case.
468 # have a use case.
469 vfs = getattr(self, 'vfs', self)
469 vfs = getattr(self, 'vfs', self)
470 if getattr(vfs, '_backgroundfilecloser', None):
470 if getattr(vfs, '_backgroundfilecloser', None):
471 raise error.Abort('can only have 1 active background file closer')
471 raise error.Abort('can only have 1 active background file closer')
472
472
473 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
473 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
474 try:
474 try:
475 vfs._backgroundfilecloser = bfc
475 vfs._backgroundfilecloser = bfc
476 yield bfc
476 yield bfc
477 finally:
477 finally:
478 vfs._backgroundfilecloser = None
478 vfs._backgroundfilecloser = None
479
479
480 class vfs(abstractvfs):
480 class vfs(abstractvfs):
481 '''Operate files relative to a base directory
481 '''Operate files relative to a base directory
482
482
483 This class is used to hide the details of COW semantics and
483 This class is used to hide the details of COW semantics and
484 remote file access from higher level code.
484 remote file access from higher level code.
485 '''
485 '''
486 def __init__(self, base, audit=True, expandpath=False, realpath=False):
486 def __init__(self, base, audit=True, expandpath=False, realpath=False):
487 if expandpath:
487 if expandpath:
488 base = util.expandpath(base)
488 base = util.expandpath(base)
489 if realpath:
489 if realpath:
490 base = os.path.realpath(base)
490 base = os.path.realpath(base)
491 self.base = base
491 self.base = base
492 self.mustaudit = audit
492 self.mustaudit = audit
493 self.createmode = None
493 self.createmode = None
494 self._trustnlink = None
494 self._trustnlink = None
495
495
496 @property
496 @property
497 def mustaudit(self):
497 def mustaudit(self):
498 return self._audit
498 return self._audit
499
499
500 @mustaudit.setter
500 @mustaudit.setter
501 def mustaudit(self, onoff):
501 def mustaudit(self, onoff):
502 self._audit = onoff
502 self._audit = onoff
503 if onoff:
503 if onoff:
504 self.audit = pathutil.pathauditor(self.base)
504 self.audit = pathutil.pathauditor(self.base)
505 else:
505 else:
506 self.audit = util.always
506 self.audit = util.always
507
507
508 @util.propertycache
508 @util.propertycache
509 def _cansymlink(self):
509 def _cansymlink(self):
510 return util.checklink(self.base)
510 return util.checklink(self.base)
511
511
512 @util.propertycache
512 @util.propertycache
513 def _chmod(self):
513 def _chmod(self):
514 return util.checkexec(self.base)
514 return util.checkexec(self.base)
515
515
516 def _fixfilemode(self, name):
516 def _fixfilemode(self, name):
517 if self.createmode is None or not self._chmod:
517 if self.createmode is None or not self._chmod:
518 return
518 return
519 os.chmod(name, self.createmode & 0o666)
519 os.chmod(name, self.createmode & 0o666)
520
520
521 def __call__(self, path, mode="r", text=False, atomictemp=False,
521 def __call__(self, path, mode="r", text=False, atomictemp=False,
522 notindexed=False, backgroundclose=False, checkambig=False):
522 notindexed=False, backgroundclose=False, checkambig=False):
523 '''Open ``path`` file, which is relative to vfs root.
523 '''Open ``path`` file, which is relative to vfs root.
524
524
525 Newly created directories are marked as "not to be indexed by
525 Newly created directories are marked as "not to be indexed by
526 the content indexing service", if ``notindexed`` is specified
526 the content indexing service", if ``notindexed`` is specified
527 for "write" mode access.
527 for "write" mode access.
528
528
529 If ``backgroundclose`` is passed, the file may be closed asynchronously.
529 If ``backgroundclose`` is passed, the file may be closed asynchronously.
530 It can only be used if the ``self.backgroundclosing()`` context manager
530 It can only be used if the ``self.backgroundclosing()`` context manager
531 is active. This should only be specified if the following criteria hold:
531 is active. This should only be specified if the following criteria hold:
532
532
533 1. There is a potential for writing thousands of files. Unless you
533 1. There is a potential for writing thousands of files. Unless you
534 are writing thousands of files, the performance benefits of
534 are writing thousands of files, the performance benefits of
535 asynchronously closing files is not realized.
535 asynchronously closing files is not realized.
536 2. Files are opened exactly once for the ``backgroundclosing``
536 2. Files are opened exactly once for the ``backgroundclosing``
537 active duration and are therefore free of race conditions between
537 active duration and are therefore free of race conditions between
538 closing a file on a background thread and reopening it. (If the
538 closing a file on a background thread and reopening it. (If the
539 file were opened multiple times, there could be unflushed data
539 file were opened multiple times, there could be unflushed data
540 because the original file handle hasn't been flushed/closed yet.)
540 because the original file handle hasn't been flushed/closed yet.)
541
541
542 ``checkambig`` argument is passed to atomictemplfile (valid
542 ``checkambig`` argument is passed to atomictemplfile (valid
543 only for writing), and is useful only if target file is
543 only for writing), and is useful only if target file is
544 guarded by any lock (e.g. repo.lock or repo.wlock).
544 guarded by any lock (e.g. repo.lock or repo.wlock).
545 '''
545 '''
546 if self._audit:
546 if self._audit:
547 r = util.checkosfilename(path)
547 r = util.checkosfilename(path)
548 if r:
548 if r:
549 raise error.Abort("%s: %r" % (r, path))
549 raise error.Abort("%s: %r" % (r, path))
550 self.audit(path)
550 self.audit(path)
551 f = self.join(path)
551 f = self.join(path)
552
552
553 if not text and "b" not in mode:
553 if not text and "b" not in mode:
554 mode += "b" # for that other OS
554 mode += "b" # for that other OS
555
555
556 nlink = -1
556 nlink = -1
557 if mode not in ('r', 'rb'):
557 if mode not in ('r', 'rb'):
558 dirname, basename = util.split(f)
558 dirname, basename = util.split(f)
559 # If basename is empty, then the path is malformed because it points
559 # If basename is empty, then the path is malformed because it points
560 # to a directory. Let the posixfile() call below raise IOError.
560 # to a directory. Let the posixfile() call below raise IOError.
561 if basename:
561 if basename:
562 if atomictemp:
562 if atomictemp:
563 util.makedirs(dirname, self.createmode, notindexed)
563 util.makedirs(dirname, self.createmode, notindexed)
564 return util.atomictempfile(f, mode, self.createmode,
564 return util.atomictempfile(f, mode, self.createmode,
565 checkambig=checkambig)
565 checkambig=checkambig)
566 try:
566 try:
567 if 'w' in mode:
567 if 'w' in mode:
568 util.unlink(f)
568 util.unlink(f)
569 nlink = 0
569 nlink = 0
570 else:
570 else:
571 # nlinks() may behave differently for files on Windows
571 # nlinks() may behave differently for files on Windows
572 # shares if the file is open.
572 # shares if the file is open.
573 with util.posixfile(f):
573 with util.posixfile(f):
574 nlink = util.nlinks(f)
574 nlink = util.nlinks(f)
575 if nlink < 1:
575 if nlink < 1:
576 nlink = 2 # force mktempcopy (issue1922)
576 nlink = 2 # force mktempcopy (issue1922)
577 except (OSError, IOError) as e:
577 except (OSError, IOError) as e:
578 if e.errno != errno.ENOENT:
578 if e.errno != errno.ENOENT:
579 raise
579 raise
580 nlink = 0
580 nlink = 0
581 util.makedirs(dirname, self.createmode, notindexed)
581 util.makedirs(dirname, self.createmode, notindexed)
582 if nlink > 0:
582 if nlink > 0:
583 if self._trustnlink is None:
583 if self._trustnlink is None:
584 self._trustnlink = nlink > 1 or util.checknlink(f)
584 self._trustnlink = nlink > 1 or util.checknlink(f)
585 if nlink > 1 or not self._trustnlink:
585 if nlink > 1 or not self._trustnlink:
586 util.rename(util.mktempcopy(f), f)
586 util.rename(util.mktempcopy(f), f)
587 fp = util.posixfile(f, mode)
587 fp = util.posixfile(f, mode)
588 if nlink == 0:
588 if nlink == 0:
589 self._fixfilemode(f)
589 self._fixfilemode(f)
590
590
591 if backgroundclose:
591 if backgroundclose:
592 if not self._backgroundfilecloser:
592 if not self._backgroundfilecloser:
593 raise error.Abort('backgroundclose can only be used when a '
593 raise error.Abort('backgroundclose can only be used when a '
594 'backgroundclosing context manager is active')
594 'backgroundclosing context manager is active')
595
595
596 fp = delayclosedfile(fp, self._backgroundfilecloser)
596 fp = delayclosedfile(fp, self._backgroundfilecloser)
597
597
598 return fp
598 return fp
599
599
600 def symlink(self, src, dst):
600 def symlink(self, src, dst):
601 self.audit(dst)
601 self.audit(dst)
602 linkname = self.join(dst)
602 linkname = self.join(dst)
603 try:
603 try:
604 os.unlink(linkname)
604 os.unlink(linkname)
605 except OSError:
605 except OSError:
606 pass
606 pass
607
607
608 util.makedirs(os.path.dirname(linkname), self.createmode)
608 util.makedirs(os.path.dirname(linkname), self.createmode)
609
609
610 if self._cansymlink:
610 if self._cansymlink:
611 try:
611 try:
612 os.symlink(src, linkname)
612 os.symlink(src, linkname)
613 except OSError as err:
613 except OSError as err:
614 raise OSError(err.errno, _('could not symlink to %r: %s') %
614 raise OSError(err.errno, _('could not symlink to %r: %s') %
615 (src, err.strerror), linkname)
615 (src, err.strerror), linkname)
616 else:
616 else:
617 self.write(dst, src)
617 self.write(dst, src)
618
618
619 def join(self, path, *insidef):
619 def join(self, path, *insidef):
620 if path:
620 if path:
621 return os.path.join(self.base, path, *insidef)
621 return os.path.join(self.base, path, *insidef)
622 else:
622 else:
623 return self.base
623 return self.base
624
624
625 opener = vfs
625 opener = vfs
626
626
627 class auditvfs(object):
627 class auditvfs(object):
628 def __init__(self, vfs):
628 def __init__(self, vfs):
629 self.vfs = vfs
629 self.vfs = vfs
630
630
631 @property
631 @property
632 def mustaudit(self):
632 def mustaudit(self):
633 return self.vfs.mustaudit
633 return self.vfs.mustaudit
634
634
635 @mustaudit.setter
635 @mustaudit.setter
636 def mustaudit(self, onoff):
636 def mustaudit(self, onoff):
637 self.vfs.mustaudit = onoff
637 self.vfs.mustaudit = onoff
638
638
639 class filtervfs(abstractvfs, auditvfs):
639 class filtervfs(abstractvfs, auditvfs):
640 '''Wrapper vfs for filtering filenames with a function.'''
640 '''Wrapper vfs for filtering filenames with a function.'''
641
641
642 def __init__(self, vfs, filter):
642 def __init__(self, vfs, filter):
643 auditvfs.__init__(self, vfs)
643 auditvfs.__init__(self, vfs)
644 self._filter = filter
644 self._filter = filter
645
645
646 def __call__(self, path, *args, **kwargs):
646 def __call__(self, path, *args, **kwargs):
647 return self.vfs(self._filter(path), *args, **kwargs)
647 return self.vfs(self._filter(path), *args, **kwargs)
648
648
649 def join(self, path, *insidef):
649 def join(self, path, *insidef):
650 if path:
650 if path:
651 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
651 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
652 else:
652 else:
653 return self.vfs.join(path)
653 return self.vfs.join(path)
654
654
655 filteropener = filtervfs
655 filteropener = filtervfs
656
656
657 class readonlyvfs(abstractvfs, auditvfs):
657 class readonlyvfs(abstractvfs, auditvfs):
658 '''Wrapper vfs preventing any writing.'''
658 '''Wrapper vfs preventing any writing.'''
659
659
660 def __init__(self, vfs):
660 def __init__(self, vfs):
661 auditvfs.__init__(self, vfs)
661 auditvfs.__init__(self, vfs)
662
662
663 def __call__(self, path, mode='r', *args, **kw):
663 def __call__(self, path, mode='r', *args, **kw):
664 if mode not in ('r', 'rb'):
664 if mode not in ('r', 'rb'):
665 raise error.Abort('this vfs is read only')
665 raise error.Abort('this vfs is read only')
666 return self.vfs(path, mode, *args, **kw)
666 return self.vfs(path, mode, *args, **kw)
667
667
668 def join(self, path, *insidef):
668 def join(self, path, *insidef):
669 return self.vfs.join(path, *insidef)
669 return self.vfs.join(path, *insidef)
670
670
671 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
671 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
672 '''yield every hg repository under path, always recursively.
672 '''yield every hg repository under path, always recursively.
673 The recurse flag will only control recursion into repo working dirs'''
673 The recurse flag will only control recursion into repo working dirs'''
674 def errhandler(err):
674 def errhandler(err):
675 if err.filename == path:
675 if err.filename == path:
676 raise err
676 raise err
677 samestat = getattr(os.path, 'samestat', None)
677 samestat = getattr(os.path, 'samestat', None)
678 if followsym and samestat is not None:
678 if followsym and samestat is not None:
679 def adddir(dirlst, dirname):
679 def adddir(dirlst, dirname):
680 match = False
680 match = False
681 dirstat = os.stat(dirname)
681 dirstat = os.stat(dirname)
682 for lstdirstat in dirlst:
682 for lstdirstat in dirlst:
683 if samestat(dirstat, lstdirstat):
683 if samestat(dirstat, lstdirstat):
684 match = True
684 match = True
685 break
685 break
686 if not match:
686 if not match:
687 dirlst.append(dirstat)
687 dirlst.append(dirstat)
688 return not match
688 return not match
689 else:
689 else:
690 followsym = False
690 followsym = False
691
691
692 if (seen_dirs is None) and followsym:
692 if (seen_dirs is None) and followsym:
693 seen_dirs = []
693 seen_dirs = []
694 adddir(seen_dirs, path)
694 adddir(seen_dirs, path)
695 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
695 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
696 dirs.sort()
696 dirs.sort()
697 if '.hg' in dirs:
697 if '.hg' in dirs:
698 yield root # found a repository
698 yield root # found a repository
699 qroot = os.path.join(root, '.hg', 'patches')
699 qroot = os.path.join(root, '.hg', 'patches')
700 if os.path.isdir(os.path.join(qroot, '.hg')):
700 if os.path.isdir(os.path.join(qroot, '.hg')):
701 yield qroot # we have a patch queue repo here
701 yield qroot # we have a patch queue repo here
702 if recurse:
702 if recurse:
703 # avoid recursing inside the .hg directory
703 # avoid recursing inside the .hg directory
704 dirs.remove('.hg')
704 dirs.remove('.hg')
705 else:
705 else:
706 dirs[:] = [] # don't descend further
706 dirs[:] = [] # don't descend further
707 elif followsym:
707 elif followsym:
708 newdirs = []
708 newdirs = []
709 for d in dirs:
709 for d in dirs:
710 fname = os.path.join(root, d)
710 fname = os.path.join(root, d)
711 if adddir(seen_dirs, fname):
711 if adddir(seen_dirs, fname):
712 if os.path.islink(fname):
712 if os.path.islink(fname):
713 for hgname in walkrepos(fname, True, seen_dirs):
713 for hgname in walkrepos(fname, True, seen_dirs):
714 yield hgname
714 yield hgname
715 else:
715 else:
716 newdirs.append(d)
716 newdirs.append(d)
717 dirs[:] = newdirs
717 dirs[:] = newdirs
718
718
719 def osrcpath():
719 def osrcpath():
720 '''return default os-specific hgrc search path'''
720 '''return default os-specific hgrc search path'''
721 path = []
721 path = []
722 defaultpath = os.path.join(util.datapath, 'default.d')
722 defaultpath = os.path.join(util.datapath, 'default.d')
723 if os.path.isdir(defaultpath):
723 if os.path.isdir(defaultpath):
724 for f, kind in osutil.listdir(defaultpath):
724 for f, kind in osutil.listdir(defaultpath):
725 if f.endswith('.rc'):
725 if f.endswith('.rc'):
726 path.append(os.path.join(defaultpath, f))
726 path.append(os.path.join(defaultpath, f))
727 path.extend(systemrcpath())
727 path.extend(systemrcpath())
728 path.extend(userrcpath())
728 path.extend(userrcpath())
729 path = [os.path.normpath(f) for f in path]
729 path = [os.path.normpath(f) for f in path]
730 return path
730 return path
731
731
732 _rcpath = None
732 _rcpath = None
733
733
734 def rcpath():
734 def rcpath():
735 '''return hgrc search path. if env var HGRCPATH is set, use it.
735 '''return hgrc search path. if env var HGRCPATH is set, use it.
736 for each item in path, if directory, use files ending in .rc,
736 for each item in path, if directory, use files ending in .rc,
737 else use item.
737 else use item.
738 make HGRCPATH empty to only look in .hg/hgrc of current repo.
738 make HGRCPATH empty to only look in .hg/hgrc of current repo.
739 if no HGRCPATH, use default os-specific path.'''
739 if no HGRCPATH, use default os-specific path.'''
740 global _rcpath
740 global _rcpath
741 if _rcpath is None:
741 if _rcpath is None:
742 if 'HGRCPATH' in os.environ:
742 if 'HGRCPATH' in os.environ:
743 _rcpath = []
743 _rcpath = []
744 for p in os.environ['HGRCPATH'].split(os.pathsep):
744 for p in os.environ['HGRCPATH'].split(os.pathsep):
745 if not p:
745 if not p:
746 continue
746 continue
747 p = util.expandpath(p)
747 p = util.expandpath(p)
748 if os.path.isdir(p):
748 if os.path.isdir(p):
749 for f, kind in osutil.listdir(p):
749 for f, kind in osutil.listdir(p):
750 if f.endswith('.rc'):
750 if f.endswith('.rc'):
751 _rcpath.append(os.path.join(p, f))
751 _rcpath.append(os.path.join(p, f))
752 else:
752 else:
753 _rcpath.append(p)
753 _rcpath.append(p)
754 else:
754 else:
755 _rcpath = osrcpath()
755 _rcpath = osrcpath()
756 return _rcpath
756 return _rcpath
757
757
758 def intrev(rev):
758 def intrev(rev):
759 """Return integer for a given revision that can be used in comparison or
759 """Return integer for a given revision that can be used in comparison or
760 arithmetic operation"""
760 arithmetic operation"""
761 if rev is None:
761 if rev is None:
762 return wdirrev
762 return wdirrev
763 return rev
763 return rev
764
764
765 def revsingle(repo, revspec, default='.'):
765 def revsingle(repo, revspec, default='.'):
766 if not revspec and revspec != 0:
766 if not revspec and revspec != 0:
767 return repo[default]
767 return repo[default]
768
768
769 l = revrange(repo, [revspec])
769 l = revrange(repo, [revspec])
770 if not l:
770 if not l:
771 raise error.Abort(_('empty revision set'))
771 raise error.Abort(_('empty revision set'))
772 return repo[l.last()]
772 return repo[l.last()]
773
773
774 def _pairspec(revspec):
774 def _pairspec(revspec):
775 tree = revset.parse(revspec)
775 tree = revset.parse(revspec)
776 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
776 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
777 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
777 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
778
778
779 def revpair(repo, revs):
779 def revpair(repo, revs):
780 if not revs:
780 if not revs:
781 return repo.dirstate.p1(), None
781 return repo.dirstate.p1(), None
782
782
783 l = revrange(repo, revs)
783 l = revrange(repo, revs)
784
784
785 if not l:
785 if not l:
786 first = second = None
786 first = second = None
787 elif l.isascending():
787 elif l.isascending():
788 first = l.min()
788 first = l.min()
789 second = l.max()
789 second = l.max()
790 elif l.isdescending():
790 elif l.isdescending():
791 first = l.max()
791 first = l.max()
792 second = l.min()
792 second = l.min()
793 else:
793 else:
794 first = l.first()
794 first = l.first()
795 second = l.last()
795 second = l.last()
796
796
797 if first is None:
797 if first is None:
798 raise error.Abort(_('empty revision range'))
798 raise error.Abort(_('empty revision range'))
799 if (first == second and len(revs) >= 2
799 if (first == second and len(revs) >= 2
800 and not all(revrange(repo, [r]) for r in revs)):
800 and not all(revrange(repo, [r]) for r in revs)):
801 raise error.Abort(_('empty revision on one side of range'))
801 raise error.Abort(_('empty revision on one side of range'))
802
802
803 # if top-level is range expression, the result must always be a pair
803 # if top-level is range expression, the result must always be a pair
804 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
804 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
805 return repo.lookup(first), None
805 return repo.lookup(first), None
806
806
807 return repo.lookup(first), repo.lookup(second)
807 return repo.lookup(first), repo.lookup(second)
808
808
809 def revrange(repo, revs):
809 def revrange(repo, revs):
810 """Yield revision as strings from a list of revision specifications."""
810 """Yield revision as strings from a list of revision specifications."""
811 allspecs = []
811 allspecs = []
812 for spec in revs:
812 for spec in revs:
813 if isinstance(spec, int):
813 if isinstance(spec, int):
814 spec = revset.formatspec('rev(%d)', spec)
814 spec = revset.formatspec('rev(%d)', spec)
815 allspecs.append(spec)
815 allspecs.append(spec)
816 m = revset.matchany(repo.ui, allspecs, repo)
816 m = revset.matchany(repo.ui, allspecs, repo)
817 return m(repo)
817 return m(repo)
818
818
819 def meaningfulparents(repo, ctx):
819 def meaningfulparents(repo, ctx):
820 """Return list of meaningful (or all if debug) parentrevs for rev.
820 """Return list of meaningful (or all if debug) parentrevs for rev.
821
821
822 For merges (two non-nullrev revisions) both parents are meaningful.
822 For merges (two non-nullrev revisions) both parents are meaningful.
823 Otherwise the first parent revision is considered meaningful if it
823 Otherwise the first parent revision is considered meaningful if it
824 is not the preceding revision.
824 is not the preceding revision.
825 """
825 """
826 parents = ctx.parents()
826 parents = ctx.parents()
827 if len(parents) > 1:
827 if len(parents) > 1:
828 return parents
828 return parents
829 if repo.ui.debugflag:
829 if repo.ui.debugflag:
830 return [parents[0], repo['null']]
830 return [parents[0], repo['null']]
831 if parents[0].rev() >= intrev(ctx.rev()) - 1:
831 if parents[0].rev() >= intrev(ctx.rev()) - 1:
832 return []
832 return []
833 return parents
833 return parents
834
834
835 def expandpats(pats):
835 def expandpats(pats):
836 '''Expand bare globs when running on windows.
836 '''Expand bare globs when running on windows.
837 On posix we assume it already has already been done by sh.'''
837 On posix we assume it already has already been done by sh.'''
838 if not util.expandglobs:
838 if not util.expandglobs:
839 return list(pats)
839 return list(pats)
840 ret = []
840 ret = []
841 for kindpat in pats:
841 for kindpat in pats:
842 kind, pat = matchmod._patsplit(kindpat, None)
842 kind, pat = matchmod._patsplit(kindpat, None)
843 if kind is None:
843 if kind is None:
844 try:
844 try:
845 globbed = glob.glob(pat)
845 globbed = glob.glob(pat)
846 except re.error:
846 except re.error:
847 globbed = [pat]
847 globbed = [pat]
848 if globbed:
848 if globbed:
849 ret.extend(globbed)
849 ret.extend(globbed)
850 continue
850 continue
851 ret.append(kindpat)
851 ret.append(kindpat)
852 return ret
852 return ret
853
853
854 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
854 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
855 badfn=None):
855 badfn=None):
856 '''Return a matcher and the patterns that were used.
856 '''Return a matcher and the patterns that were used.
857 The matcher will warn about bad matches, unless an alternate badfn callback
857 The matcher will warn about bad matches, unless an alternate badfn callback
858 is provided.'''
858 is provided.'''
859 if pats == ("",):
859 if pats == ("",):
860 pats = []
860 pats = []
861 if opts is None:
861 if opts is None:
862 opts = {}
862 opts = {}
863 if not globbed and default == 'relpath':
863 if not globbed and default == 'relpath':
864 pats = expandpats(pats or [])
864 pats = expandpats(pats or [])
865
865
866 def bad(f, msg):
866 def bad(f, msg):
867 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
867 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
868
868
869 if badfn is None:
869 if badfn is None:
870 badfn = bad
870 badfn = bad
871
871
872 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
872 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
873 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
873 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
874
874
875 if m.always():
875 if m.always():
876 pats = []
876 pats = []
877 return m, pats
877 return m, pats
878
878
879 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
879 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
880 badfn=None):
880 badfn=None):
881 '''Return a matcher that will warn about bad matches.'''
881 '''Return a matcher that will warn about bad matches.'''
882 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
882 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
883
883
884 def matchall(repo):
884 def matchall(repo):
885 '''Return a matcher that will efficiently match everything.'''
885 '''Return a matcher that will efficiently match everything.'''
886 return matchmod.always(repo.root, repo.getcwd())
886 return matchmod.always(repo.root, repo.getcwd())
887
887
888 def matchfiles(repo, files, badfn=None):
888 def matchfiles(repo, files, badfn=None):
889 '''Return a matcher that will efficiently match exactly these files.'''
889 '''Return a matcher that will efficiently match exactly these files.'''
890 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
890 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
891
891
892 def origpath(ui, repo, filepath):
892 def origpath(ui, repo, filepath):
893 '''customize where .orig files are created
893 '''customize where .orig files are created
894
894
895 Fetch user defined path from config file: [ui] origbackuppath = <path>
895 Fetch user defined path from config file: [ui] origbackuppath = <path>
896 Fall back to default (filepath) if not specified
896 Fall back to default (filepath) if not specified
897 '''
897 '''
898 origbackuppath = ui.config('ui', 'origbackuppath', None)
898 origbackuppath = ui.config('ui', 'origbackuppath', None)
899 if origbackuppath is None:
899 if origbackuppath is None:
900 return filepath + ".orig"
900 return filepath + ".orig"
901
901
902 filepathfromroot = os.path.relpath(filepath, start=repo.root)
902 filepathfromroot = os.path.relpath(filepath, start=repo.root)
903 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
903 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
904
904
905 origbackupdir = repo.vfs.dirname(fullorigpath)
905 origbackupdir = repo.vfs.dirname(fullorigpath)
906 if not repo.vfs.exists(origbackupdir):
906 if not repo.vfs.exists(origbackupdir):
907 ui.note(_('creating directory: %s\n') % origbackupdir)
907 ui.note(_('creating directory: %s\n') % origbackupdir)
908 util.makedirs(origbackupdir)
908 util.makedirs(origbackupdir)
909
909
910 return fullorigpath + ".orig"
910 return fullorigpath + ".orig"
911
911
912 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
912 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
913 if opts is None:
913 if opts is None:
914 opts = {}
914 opts = {}
915 m = matcher
915 m = matcher
916 if dry_run is None:
916 if dry_run is None:
917 dry_run = opts.get('dry_run')
917 dry_run = opts.get('dry_run')
918 if similarity is None:
918 if similarity is None:
919 similarity = float(opts.get('similarity') or 0)
919 similarity = float(opts.get('similarity') or 0)
920
920
921 ret = 0
921 ret = 0
922 join = lambda f: os.path.join(prefix, f)
922 join = lambda f: os.path.join(prefix, f)
923
923
924 def matchessubrepo(matcher, subpath):
924 def matchessubrepo(matcher, subpath):
925 if matcher.exact(subpath):
925 if matcher.exact(subpath):
926 return True
926 return True
927 for f in matcher.files():
927 for f in matcher.files():
928 if f.startswith(subpath):
928 if f.startswith(subpath):
929 return True
929 return True
930 return False
930 return False
931
931
932 wctx = repo[None]
932 wctx = repo[None]
933 for subpath in sorted(wctx.substate):
933 for subpath in sorted(wctx.substate):
934 if opts.get('subrepos') or matchessubrepo(m, subpath):
934 if opts.get('subrepos') or matchessubrepo(m, subpath):
935 sub = wctx.sub(subpath)
935 sub = wctx.sub(subpath)
936 try:
936 try:
937 submatch = matchmod.subdirmatcher(subpath, m)
937 submatch = matchmod.subdirmatcher(subpath, m)
938 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
938 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
939 ret = 1
939 ret = 1
940 except error.LookupError:
940 except error.LookupError:
941 repo.ui.status(_("skipping missing subrepository: %s\n")
941 repo.ui.status(_("skipping missing subrepository: %s\n")
942 % join(subpath))
942 % join(subpath))
943
943
944 rejected = []
944 rejected = []
945 def badfn(f, msg):
945 def badfn(f, msg):
946 if f in m.files():
946 if f in m.files():
947 m.bad(f, msg)
947 m.bad(f, msg)
948 rejected.append(f)
948 rejected.append(f)
949
949
950 badmatch = matchmod.badmatch(m, badfn)
950 badmatch = matchmod.badmatch(m, badfn)
951 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
951 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
952 badmatch)
952 badmatch)
953
953
954 unknownset = set(unknown + forgotten)
954 unknownset = set(unknown + forgotten)
955 toprint = unknownset.copy()
955 toprint = unknownset.copy()
956 toprint.update(deleted)
956 toprint.update(deleted)
957 for abs in sorted(toprint):
957 for abs in sorted(toprint):
958 if repo.ui.verbose or not m.exact(abs):
958 if repo.ui.verbose or not m.exact(abs):
959 if abs in unknownset:
959 if abs in unknownset:
960 status = _('adding %s\n') % m.uipath(abs)
960 status = _('adding %s\n') % m.uipath(abs)
961 else:
961 else:
962 status = _('removing %s\n') % m.uipath(abs)
962 status = _('removing %s\n') % m.uipath(abs)
963 repo.ui.status(status)
963 repo.ui.status(status)
964
964
965 renames = _findrenames(repo, m, added + unknown, removed + deleted,
965 renames = _findrenames(repo, m, added + unknown, removed + deleted,
966 similarity)
966 similarity)
967
967
968 if not dry_run:
968 if not dry_run:
969 _markchanges(repo, unknown + forgotten, deleted, renames)
969 _markchanges(repo, unknown + forgotten, deleted, renames)
970
970
971 for f in rejected:
971 for f in rejected:
972 if f in m.files():
972 if f in m.files():
973 return 1
973 return 1
974 return ret
974 return ret
975
975
976 def marktouched(repo, files, similarity=0.0):
976 def marktouched(repo, files, similarity=0.0):
977 '''Assert that files have somehow been operated upon. files are relative to
977 '''Assert that files have somehow been operated upon. files are relative to
978 the repo root.'''
978 the repo root.'''
979 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
979 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
980 rejected = []
980 rejected = []
981
981
982 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
982 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
983
983
984 if repo.ui.verbose:
984 if repo.ui.verbose:
985 unknownset = set(unknown + forgotten)
985 unknownset = set(unknown + forgotten)
986 toprint = unknownset.copy()
986 toprint = unknownset.copy()
987 toprint.update(deleted)
987 toprint.update(deleted)
988 for abs in sorted(toprint):
988 for abs in sorted(toprint):
989 if abs in unknownset:
989 if abs in unknownset:
990 status = _('adding %s\n') % abs
990 status = _('adding %s\n') % abs
991 else:
991 else:
992 status = _('removing %s\n') % abs
992 status = _('removing %s\n') % abs
993 repo.ui.status(status)
993 repo.ui.status(status)
994
994
995 renames = _findrenames(repo, m, added + unknown, removed + deleted,
995 renames = _findrenames(repo, m, added + unknown, removed + deleted,
996 similarity)
996 similarity)
997
997
998 _markchanges(repo, unknown + forgotten, deleted, renames)
998 _markchanges(repo, unknown + forgotten, deleted, renames)
999
999
1000 for f in rejected:
1000 for f in rejected:
1001 if f in m.files():
1001 if f in m.files():
1002 return 1
1002 return 1
1003 return 0
1003 return 0
1004
1004
1005 def _interestingfiles(repo, matcher):
1005 def _interestingfiles(repo, matcher):
1006 '''Walk dirstate with matcher, looking for files that addremove would care
1006 '''Walk dirstate with matcher, looking for files that addremove would care
1007 about.
1007 about.
1008
1008
1009 This is different from dirstate.status because it doesn't care about
1009 This is different from dirstate.status because it doesn't care about
1010 whether files are modified or clean.'''
1010 whether files are modified or clean.'''
1011 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1011 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1012 audit_path = pathutil.pathauditor(repo.root)
1012 audit_path = pathutil.pathauditor(repo.root)
1013
1013
1014 ctx = repo[None]
1014 ctx = repo[None]
1015 dirstate = repo.dirstate
1015 dirstate = repo.dirstate
1016 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1016 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1017 full=False)
1017 full=False)
1018 for abs, st in walkresults.iteritems():
1018 for abs, st in walkresults.iteritems():
1019 dstate = dirstate[abs]
1019 dstate = dirstate[abs]
1020 if dstate == '?' and audit_path.check(abs):
1020 if dstate == '?' and audit_path.check(abs):
1021 unknown.append(abs)
1021 unknown.append(abs)
1022 elif dstate != 'r' and not st:
1022 elif dstate != 'r' and not st:
1023 deleted.append(abs)
1023 deleted.append(abs)
1024 elif dstate == 'r' and st:
1024 elif dstate == 'r' and st:
1025 forgotten.append(abs)
1025 forgotten.append(abs)
1026 # for finding renames
1026 # for finding renames
1027 elif dstate == 'r' and not st:
1027 elif dstate == 'r' and not st:
1028 removed.append(abs)
1028 removed.append(abs)
1029 elif dstate == 'a':
1029 elif dstate == 'a':
1030 added.append(abs)
1030 added.append(abs)
1031
1031
1032 return added, unknown, deleted, removed, forgotten
1032 return added, unknown, deleted, removed, forgotten
1033
1033
1034 def _findrenames(repo, matcher, added, removed, similarity):
1034 def _findrenames(repo, matcher, added, removed, similarity):
1035 '''Find renames from removed files to added ones.'''
1035 '''Find renames from removed files to added ones.'''
1036 renames = {}
1036 renames = {}
1037 if similarity > 0:
1037 if similarity > 0:
1038 for old, new, score in similar.findrenames(repo, added, removed,
1038 for old, new, score in similar.findrenames(repo, added, removed,
1039 similarity):
1039 similarity):
1040 if (repo.ui.verbose or not matcher.exact(old)
1040 if (repo.ui.verbose or not matcher.exact(old)
1041 or not matcher.exact(new)):
1041 or not matcher.exact(new)):
1042 repo.ui.status(_('recording removal of %s as rename to %s '
1042 repo.ui.status(_('recording removal of %s as rename to %s '
1043 '(%d%% similar)\n') %
1043 '(%d%% similar)\n') %
1044 (matcher.rel(old), matcher.rel(new),
1044 (matcher.rel(old), matcher.rel(new),
1045 score * 100))
1045 score * 100))
1046 renames[new] = old
1046 renames[new] = old
1047 return renames
1047 return renames
1048
1048
1049 def _markchanges(repo, unknown, deleted, renames):
1049 def _markchanges(repo, unknown, deleted, renames):
1050 '''Marks the files in unknown as added, the files in deleted as removed,
1050 '''Marks the files in unknown as added, the files in deleted as removed,
1051 and the files in renames as copied.'''
1051 and the files in renames as copied.'''
1052 wctx = repo[None]
1052 wctx = repo[None]
1053 with repo.wlock():
1053 with repo.wlock():
1054 wctx.forget(deleted)
1054 wctx.forget(deleted)
1055 wctx.add(unknown)
1055 wctx.add(unknown)
1056 for new, old in renames.iteritems():
1056 for new, old in renames.iteritems():
1057 wctx.copy(old, new)
1057 wctx.copy(old, new)
1058
1058
1059 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1059 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1060 """Update the dirstate to reflect the intent of copying src to dst. For
1060 """Update the dirstate to reflect the intent of copying src to dst. For
1061 different reasons it might not end with dst being marked as copied from src.
1061 different reasons it might not end with dst being marked as copied from src.
1062 """
1062 """
1063 origsrc = repo.dirstate.copied(src) or src
1063 origsrc = repo.dirstate.copied(src) or src
1064 if dst == origsrc: # copying back a copy?
1064 if dst == origsrc: # copying back a copy?
1065 if repo.dirstate[dst] not in 'mn' and not dryrun:
1065 if repo.dirstate[dst] not in 'mn' and not dryrun:
1066 repo.dirstate.normallookup(dst)
1066 repo.dirstate.normallookup(dst)
1067 else:
1067 else:
1068 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1068 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1069 if not ui.quiet:
1069 if not ui.quiet:
1070 ui.warn(_("%s has not been committed yet, so no copy "
1070 ui.warn(_("%s has not been committed yet, so no copy "
1071 "data will be stored for %s.\n")
1071 "data will be stored for %s.\n")
1072 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1072 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1073 if repo.dirstate[dst] in '?r' and not dryrun:
1073 if repo.dirstate[dst] in '?r' and not dryrun:
1074 wctx.add([dst])
1074 wctx.add([dst])
1075 elif not dryrun:
1075 elif not dryrun:
1076 wctx.copy(origsrc, dst)
1076 wctx.copy(origsrc, dst)
1077
1077
1078 def readrequires(opener, supported):
1078 def readrequires(opener, supported):
1079 '''Reads and parses .hg/requires and checks if all entries found
1079 '''Reads and parses .hg/requires and checks if all entries found
1080 are in the list of supported features.'''
1080 are in the list of supported features.'''
1081 requirements = set(opener.read("requires").splitlines())
1081 requirements = set(opener.read("requires").splitlines())
1082 missings = []
1082 missings = []
1083 for r in requirements:
1083 for r in requirements:
1084 if r not in supported:
1084 if r not in supported:
1085 if not r or not r[0].isalnum():
1085 if not r or not r[0].isalnum():
1086 raise error.RequirementError(_(".hg/requires file is corrupt"))
1086 raise error.RequirementError(_(".hg/requires file is corrupt"))
1087 missings.append(r)
1087 missings.append(r)
1088 missings.sort()
1088 missings.sort()
1089 if missings:
1089 if missings:
1090 raise error.RequirementError(
1090 raise error.RequirementError(
1091 _("repository requires features unknown to this Mercurial: %s")
1091 _("repository requires features unknown to this Mercurial: %s")
1092 % " ".join(missings),
1092 % " ".join(missings),
1093 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1093 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1094 " for more information"))
1094 " for more information"))
1095 return requirements
1095 return requirements
1096
1096
1097 def writerequires(opener, requirements):
1097 def writerequires(opener, requirements):
1098 with opener('requires', 'w') as fp:
1098 with opener('requires', 'w') as fp:
1099 for r in sorted(requirements):
1099 for r in sorted(requirements):
1100 fp.write("%s\n" % r)
1100 fp.write("%s\n" % r)
1101
1101
1102 class filecachesubentry(object):
1102 class filecachesubentry(object):
1103 def __init__(self, path, stat):
1103 def __init__(self, path, stat):
1104 self.path = path
1104 self.path = path
1105 self.cachestat = None
1105 self.cachestat = None
1106 self._cacheable = None
1106 self._cacheable = None
1107
1107
1108 if stat:
1108 if stat:
1109 self.cachestat = filecachesubentry.stat(self.path)
1109 self.cachestat = filecachesubentry.stat(self.path)
1110
1110
1111 if self.cachestat:
1111 if self.cachestat:
1112 self._cacheable = self.cachestat.cacheable()
1112 self._cacheable = self.cachestat.cacheable()
1113 else:
1113 else:
1114 # None means we don't know yet
1114 # None means we don't know yet
1115 self._cacheable = None
1115 self._cacheable = None
1116
1116
1117 def refresh(self):
1117 def refresh(self):
1118 if self.cacheable():
1118 if self.cacheable():
1119 self.cachestat = filecachesubentry.stat(self.path)
1119 self.cachestat = filecachesubentry.stat(self.path)
1120
1120
1121 def cacheable(self):
1121 def cacheable(self):
1122 if self._cacheable is not None:
1122 if self._cacheable is not None:
1123 return self._cacheable
1123 return self._cacheable
1124
1124
1125 # we don't know yet, assume it is for now
1125 # we don't know yet, assume it is for now
1126 return True
1126 return True
1127
1127
1128 def changed(self):
1128 def changed(self):
1129 # no point in going further if we can't cache it
1129 # no point in going further if we can't cache it
1130 if not self.cacheable():
1130 if not self.cacheable():
1131 return True
1131 return True
1132
1132
1133 newstat = filecachesubentry.stat(self.path)
1133 newstat = filecachesubentry.stat(self.path)
1134
1134
1135 # we may not know if it's cacheable yet, check again now
1135 # we may not know if it's cacheable yet, check again now
1136 if newstat and self._cacheable is None:
1136 if newstat and self._cacheable is None:
1137 self._cacheable = newstat.cacheable()
1137 self._cacheable = newstat.cacheable()
1138
1138
1139 # check again
1139 # check again
1140 if not self._cacheable:
1140 if not self._cacheable:
1141 return True
1141 return True
1142
1142
1143 if self.cachestat != newstat:
1143 if self.cachestat != newstat:
1144 self.cachestat = newstat
1144 self.cachestat = newstat
1145 return True
1145 return True
1146 else:
1146 else:
1147 return False
1147 return False
1148
1148
1149 @staticmethod
1149 @staticmethod
1150 def stat(path):
1150 def stat(path):
1151 try:
1151 try:
1152 return util.cachestat(path)
1152 return util.cachestat(path)
1153 except OSError as e:
1153 except OSError as e:
1154 if e.errno != errno.ENOENT:
1154 if e.errno != errno.ENOENT:
1155 raise
1155 raise
1156
1156
1157 class filecacheentry(object):
1157 class filecacheentry(object):
1158 def __init__(self, paths, stat=True):
1158 def __init__(self, paths, stat=True):
1159 self._entries = []
1159 self._entries = []
1160 for path in paths:
1160 for path in paths:
1161 self._entries.append(filecachesubentry(path, stat))
1161 self._entries.append(filecachesubentry(path, stat))
1162
1162
1163 def changed(self):
1163 def changed(self):
1164 '''true if any entry has changed'''
1164 '''true if any entry has changed'''
1165 for entry in self._entries:
1165 for entry in self._entries:
1166 if entry.changed():
1166 if entry.changed():
1167 return True
1167 return True
1168 return False
1168 return False
1169
1169
1170 def refresh(self):
1170 def refresh(self):
1171 for entry in self._entries:
1171 for entry in self._entries:
1172 entry.refresh()
1172 entry.refresh()
1173
1173
1174 class filecache(object):
1174 class filecache(object):
1175 '''A property like decorator that tracks files under .hg/ for updates.
1175 '''A property like decorator that tracks files under .hg/ for updates.
1176
1176
1177 Records stat info when called in _filecache.
1177 Records stat info when called in _filecache.
1178
1178
1179 On subsequent calls, compares old stat info with new info, and recreates the
1179 On subsequent calls, compares old stat info with new info, and recreates the
1180 object when any of the files changes, updating the new stat info in
1180 object when any of the files changes, updating the new stat info in
1181 _filecache.
1181 _filecache.
1182
1182
1183 Mercurial either atomic renames or appends for files under .hg,
1183 Mercurial either atomic renames or appends for files under .hg,
1184 so to ensure the cache is reliable we need the filesystem to be able
1184 so to ensure the cache is reliable we need the filesystem to be able
1185 to tell us if a file has been replaced. If it can't, we fallback to
1185 to tell us if a file has been replaced. If it can't, we fallback to
1186 recreating the object on every call (essentially the same behavior as
1186 recreating the object on every call (essentially the same behavior as
1187 propertycache).
1187 propertycache).
1188
1188
1189 '''
1189 '''
1190 def __init__(self, *paths):
1190 def __init__(self, *paths):
1191 self.paths = paths
1191 self.paths = paths
1192
1192
1193 def join(self, obj, fname):
1193 def join(self, obj, fname):
1194 """Used to compute the runtime path of a cached file.
1194 """Used to compute the runtime path of a cached file.
1195
1195
1196 Users should subclass filecache and provide their own version of this
1196 Users should subclass filecache and provide their own version of this
1197 function to call the appropriate join function on 'obj' (an instance
1197 function to call the appropriate join function on 'obj' (an instance
1198 of the class that its member function was decorated).
1198 of the class that its member function was decorated).
1199 """
1199 """
1200 return obj.join(fname)
1200 return obj.join(fname)
1201
1201
1202 def __call__(self, func):
1202 def __call__(self, func):
1203 self.func = func
1203 self.func = func
1204 self.name = func.__name__
1204 self.name = func.__name__
1205 return self
1205 return self
1206
1206
1207 def __get__(self, obj, type=None):
1207 def __get__(self, obj, type=None):
1208 # if accessed on the class, return the descriptor itself.
1209 if obj is None:
1210 return self
1208 # do we need to check if the file changed?
1211 # do we need to check if the file changed?
1209 if self.name in obj.__dict__:
1212 if self.name in obj.__dict__:
1210 assert self.name in obj._filecache, self.name
1213 assert self.name in obj._filecache, self.name
1211 return obj.__dict__[self.name]
1214 return obj.__dict__[self.name]
1212
1215
1213 entry = obj._filecache.get(self.name)
1216 entry = obj._filecache.get(self.name)
1214
1217
1215 if entry:
1218 if entry:
1216 if entry.changed():
1219 if entry.changed():
1217 entry.obj = self.func(obj)
1220 entry.obj = self.func(obj)
1218 else:
1221 else:
1219 paths = [self.join(obj, path) for path in self.paths]
1222 paths = [self.join(obj, path) for path in self.paths]
1220
1223
1221 # We stat -before- creating the object so our cache doesn't lie if
1224 # We stat -before- creating the object so our cache doesn't lie if
1222 # a writer modified between the time we read and stat
1225 # a writer modified between the time we read and stat
1223 entry = filecacheentry(paths, True)
1226 entry = filecacheentry(paths, True)
1224 entry.obj = self.func(obj)
1227 entry.obj = self.func(obj)
1225
1228
1226 obj._filecache[self.name] = entry
1229 obj._filecache[self.name] = entry
1227
1230
1228 obj.__dict__[self.name] = entry.obj
1231 obj.__dict__[self.name] = entry.obj
1229 return entry.obj
1232 return entry.obj
1230
1233
1231 def __set__(self, obj, value):
1234 def __set__(self, obj, value):
1232 if self.name not in obj._filecache:
1235 if self.name not in obj._filecache:
1233 # we add an entry for the missing value because X in __dict__
1236 # we add an entry for the missing value because X in __dict__
1234 # implies X in _filecache
1237 # implies X in _filecache
1235 paths = [self.join(obj, path) for path in self.paths]
1238 paths = [self.join(obj, path) for path in self.paths]
1236 ce = filecacheentry(paths, False)
1239 ce = filecacheentry(paths, False)
1237 obj._filecache[self.name] = ce
1240 obj._filecache[self.name] = ce
1238 else:
1241 else:
1239 ce = obj._filecache[self.name]
1242 ce = obj._filecache[self.name]
1240
1243
1241 ce.obj = value # update cached copy
1244 ce.obj = value # update cached copy
1242 obj.__dict__[self.name] = value # update copy returned by obj.x
1245 obj.__dict__[self.name] = value # update copy returned by obj.x
1243
1246
1244 def __delete__(self, obj):
1247 def __delete__(self, obj):
1245 try:
1248 try:
1246 del obj.__dict__[self.name]
1249 del obj.__dict__[self.name]
1247 except KeyError:
1250 except KeyError:
1248 raise AttributeError(self.name)
1251 raise AttributeError(self.name)
1249
1252
1250 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1253 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1251 if lock is None:
1254 if lock is None:
1252 raise error.LockInheritanceContractViolation(
1255 raise error.LockInheritanceContractViolation(
1253 'lock can only be inherited while held')
1256 'lock can only be inherited while held')
1254 if environ is None:
1257 if environ is None:
1255 environ = {}
1258 environ = {}
1256 with lock.inherit() as locker:
1259 with lock.inherit() as locker:
1257 environ[envvar] = locker
1260 environ[envvar] = locker
1258 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1261 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1259
1262
1260 def wlocksub(repo, cmd, *args, **kwargs):
1263 def wlocksub(repo, cmd, *args, **kwargs):
1261 """run cmd as a subprocess that allows inheriting repo's wlock
1264 """run cmd as a subprocess that allows inheriting repo's wlock
1262
1265
1263 This can only be called while the wlock is held. This takes all the
1266 This can only be called while the wlock is held. This takes all the
1264 arguments that ui.system does, and returns the exit code of the
1267 arguments that ui.system does, and returns the exit code of the
1265 subprocess."""
1268 subprocess."""
1266 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1269 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1267 **kwargs)
1270 **kwargs)
1268
1271
1269 def gdinitconfig(ui):
1272 def gdinitconfig(ui):
1270 """helper function to know if a repo should be created as general delta
1273 """helper function to know if a repo should be created as general delta
1271 """
1274 """
1272 # experimental config: format.generaldelta
1275 # experimental config: format.generaldelta
1273 return (ui.configbool('format', 'generaldelta', False)
1276 return (ui.configbool('format', 'generaldelta', False)
1274 or ui.configbool('format', 'usegeneraldelta', True))
1277 or ui.configbool('format', 'usegeneraldelta', True))
1275
1278
1276 def gddeltaconfig(ui):
1279 def gddeltaconfig(ui):
1277 """helper function to know if incoming delta should be optimised
1280 """helper function to know if incoming delta should be optimised
1278 """
1281 """
1279 # experimental config: format.generaldelta
1282 # experimental config: format.generaldelta
1280 return ui.configbool('format', 'generaldelta', False)
1283 return ui.configbool('format', 'generaldelta', False)
1281
1284
1282 class delayclosedfile(object):
1285 class delayclosedfile(object):
1283 """Proxy for a file object whose close is delayed.
1286 """Proxy for a file object whose close is delayed.
1284
1287
1285 Do not instantiate outside of the vfs layer.
1288 Do not instantiate outside of the vfs layer.
1286 """
1289 """
1287
1290
1288 def __init__(self, fh, closer):
1291 def __init__(self, fh, closer):
1289 object.__setattr__(self, '_origfh', fh)
1292 object.__setattr__(self, '_origfh', fh)
1290 object.__setattr__(self, '_closer', closer)
1293 object.__setattr__(self, '_closer', closer)
1291
1294
1292 def __getattr__(self, attr):
1295 def __getattr__(self, attr):
1293 return getattr(self._origfh, attr)
1296 return getattr(self._origfh, attr)
1294
1297
1295 def __setattr__(self, attr, value):
1298 def __setattr__(self, attr, value):
1296 return setattr(self._origfh, attr, value)
1299 return setattr(self._origfh, attr, value)
1297
1300
1298 def __delattr__(self, attr):
1301 def __delattr__(self, attr):
1299 return delattr(self._origfh, attr)
1302 return delattr(self._origfh, attr)
1300
1303
1301 def __enter__(self):
1304 def __enter__(self):
1302 return self._origfh.__enter__()
1305 return self._origfh.__enter__()
1303
1306
1304 def __exit__(self, exc_type, exc_value, exc_tb):
1307 def __exit__(self, exc_type, exc_value, exc_tb):
1305 self._closer.close(self._origfh)
1308 self._closer.close(self._origfh)
1306
1309
1307 def close(self):
1310 def close(self):
1308 self._closer.close(self._origfh)
1311 self._closer.close(self._origfh)
1309
1312
1310 class backgroundfilecloser(object):
1313 class backgroundfilecloser(object):
1311 """Coordinates background closing of file handles on multiple threads."""
1314 """Coordinates background closing of file handles on multiple threads."""
1312 def __init__(self, ui, expectedcount=-1):
1315 def __init__(self, ui, expectedcount=-1):
1313 self._running = False
1316 self._running = False
1314 self._entered = False
1317 self._entered = False
1315 self._threads = []
1318 self._threads = []
1316 self._threadexception = None
1319 self._threadexception = None
1317
1320
1318 # Only Windows/NTFS has slow file closing. So only enable by default
1321 # Only Windows/NTFS has slow file closing. So only enable by default
1319 # on that platform. But allow to be enabled elsewhere for testing.
1322 # on that platform. But allow to be enabled elsewhere for testing.
1320 defaultenabled = os.name == 'nt'
1323 defaultenabled = os.name == 'nt'
1321 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1324 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1322
1325
1323 if not enabled:
1326 if not enabled:
1324 return
1327 return
1325
1328
1326 # There is overhead to starting and stopping the background threads.
1329 # There is overhead to starting and stopping the background threads.
1327 # Don't do background processing unless the file count is large enough
1330 # Don't do background processing unless the file count is large enough
1328 # to justify it.
1331 # to justify it.
1329 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1332 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1330 2048)
1333 2048)
1331 # FUTURE dynamically start background threads after minfilecount closes.
1334 # FUTURE dynamically start background threads after minfilecount closes.
1332 # (We don't currently have any callers that don't know their file count)
1335 # (We don't currently have any callers that don't know their file count)
1333 if expectedcount > 0 and expectedcount < minfilecount:
1336 if expectedcount > 0 and expectedcount < minfilecount:
1334 return
1337 return
1335
1338
1336 # Windows defaults to a limit of 512 open files. A buffer of 128
1339 # Windows defaults to a limit of 512 open files. A buffer of 128
1337 # should give us enough headway.
1340 # should give us enough headway.
1338 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1341 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1339 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1342 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1340
1343
1341 ui.debug('starting %d threads for background file closing\n' %
1344 ui.debug('starting %d threads for background file closing\n' %
1342 threadcount)
1345 threadcount)
1343
1346
1344 self._queue = util.queue(maxsize=maxqueue)
1347 self._queue = util.queue(maxsize=maxqueue)
1345 self._running = True
1348 self._running = True
1346
1349
1347 for i in range(threadcount):
1350 for i in range(threadcount):
1348 t = threading.Thread(target=self._worker, name='backgroundcloser')
1351 t = threading.Thread(target=self._worker, name='backgroundcloser')
1349 self._threads.append(t)
1352 self._threads.append(t)
1350 t.start()
1353 t.start()
1351
1354
1352 def __enter__(self):
1355 def __enter__(self):
1353 self._entered = True
1356 self._entered = True
1354 return self
1357 return self
1355
1358
1356 def __exit__(self, exc_type, exc_value, exc_tb):
1359 def __exit__(self, exc_type, exc_value, exc_tb):
1357 self._running = False
1360 self._running = False
1358
1361
1359 # Wait for threads to finish closing so open files don't linger for
1362 # Wait for threads to finish closing so open files don't linger for
1360 # longer than lifetime of context manager.
1363 # longer than lifetime of context manager.
1361 for t in self._threads:
1364 for t in self._threads:
1362 t.join()
1365 t.join()
1363
1366
1364 def _worker(self):
1367 def _worker(self):
1365 """Main routine for worker thread."""
1368 """Main routine for worker thread."""
1366 while True:
1369 while True:
1367 try:
1370 try:
1368 fh = self._queue.get(block=True, timeout=0.100)
1371 fh = self._queue.get(block=True, timeout=0.100)
1369 # Need to catch or the thread will terminate and
1372 # Need to catch or the thread will terminate and
1370 # we could orphan file descriptors.
1373 # we could orphan file descriptors.
1371 try:
1374 try:
1372 fh.close()
1375 fh.close()
1373 except Exception as e:
1376 except Exception as e:
1374 # Stash so can re-raise from main thread later.
1377 # Stash so can re-raise from main thread later.
1375 self._threadexception = e
1378 self._threadexception = e
1376 except util.empty:
1379 except util.empty:
1377 if not self._running:
1380 if not self._running:
1378 break
1381 break
1379
1382
1380 def close(self, fh):
1383 def close(self, fh):
1381 """Schedule a file for closing."""
1384 """Schedule a file for closing."""
1382 if not self._entered:
1385 if not self._entered:
1383 raise error.Abort('can only call close() when context manager '
1386 raise error.Abort('can only call close() when context manager '
1384 'active')
1387 'active')
1385
1388
1386 # If a background thread encountered an exception, raise now so we fail
1389 # If a background thread encountered an exception, raise now so we fail
1387 # fast. Otherwise we may potentially go on for minutes until the error
1390 # fast. Otherwise we may potentially go on for minutes until the error
1388 # is acted on.
1391 # is acted on.
1389 if self._threadexception:
1392 if self._threadexception:
1390 e = self._threadexception
1393 e = self._threadexception
1391 self._threadexception = None
1394 self._threadexception = None
1392 raise e
1395 raise e
1393
1396
1394 # If we're not actively running, close synchronously.
1397 # If we're not actively running, close synchronously.
1395 if not self._running:
1398 if not self._running:
1396 fh.close()
1399 fh.close()
1397 return
1400 return
1398
1401
1399 self._queue.put(fh, block=True, timeout=None)
1402 self._queue.put(fh, block=True, timeout=None)
General Comments 0
You need to be logged in to leave comments. Login now