##// END OF EJS Templates
localrepo: remove a couple of local type aliases...
Augie Fackler -
r29104:b207653a default
parent child Browse files
Show More
@@ -1,1977 +1,1975 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import inspect
11 import inspect
12 import os
12 import os
13 import random
13 import random
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 hex,
19 hex,
20 nullid,
20 nullid,
21 short,
21 short,
22 wdirrev,
22 wdirrev,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 cmdutil,
30 cmdutil,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 encoding,
33 encoding,
34 error,
34 error,
35 exchange,
35 exchange,
36 extensions,
36 extensions,
37 filelog,
37 filelog,
38 hook,
38 hook,
39 lock as lockmod,
39 lock as lockmod,
40 manifest,
40 manifest,
41 match as matchmod,
41 match as matchmod,
42 merge as mergemod,
42 merge as mergemod,
43 namespaces,
43 namespaces,
44 obsolete,
44 obsolete,
45 pathutil,
45 pathutil,
46 peer,
46 peer,
47 phases,
47 phases,
48 pushkey,
48 pushkey,
49 repoview,
49 repoview,
50 revset,
50 revset,
51 scmutil,
51 scmutil,
52 store,
52 store,
53 subrepo,
53 subrepo,
54 tags as tagsmod,
54 tags as tagsmod,
55 transaction,
55 transaction,
56 util,
56 util,
57 )
57 )
58
58
59 release = lockmod.release
59 release = lockmod.release
60 propertycache = util.propertycache
61 urlerr = util.urlerr
60 urlerr = util.urlerr
62 urlreq = util.urlreq
61 urlreq = util.urlreq
63 filecache = scmutil.filecache
64
62
65 class repofilecache(filecache):
63 class repofilecache(scmutil.filecache):
66 """All filecache usage on repo are done for logic that should be unfiltered
64 """All filecache usage on repo are done for logic that should be unfiltered
67 """
65 """
68
66
69 def __get__(self, repo, type=None):
67 def __get__(self, repo, type=None):
70 return super(repofilecache, self).__get__(repo.unfiltered(), type)
68 return super(repofilecache, self).__get__(repo.unfiltered(), type)
71 def __set__(self, repo, value):
69 def __set__(self, repo, value):
72 return super(repofilecache, self).__set__(repo.unfiltered(), value)
70 return super(repofilecache, self).__set__(repo.unfiltered(), value)
73 def __delete__(self, repo):
71 def __delete__(self, repo):
74 return super(repofilecache, self).__delete__(repo.unfiltered())
72 return super(repofilecache, self).__delete__(repo.unfiltered())
75
73
76 class storecache(repofilecache):
74 class storecache(repofilecache):
77 """filecache for files in the store"""
75 """filecache for files in the store"""
78 def join(self, obj, fname):
76 def join(self, obj, fname):
79 return obj.sjoin(fname)
77 return obj.sjoin(fname)
80
78
81 class unfilteredpropertycache(propertycache):
79 class unfilteredpropertycache(util.propertycache):
82 """propertycache that apply to unfiltered repo only"""
80 """propertycache that apply to unfiltered repo only"""
83
81
84 def __get__(self, repo, type=None):
82 def __get__(self, repo, type=None):
85 unfi = repo.unfiltered()
83 unfi = repo.unfiltered()
86 if unfi is repo:
84 if unfi is repo:
87 return super(unfilteredpropertycache, self).__get__(unfi)
85 return super(unfilteredpropertycache, self).__get__(unfi)
88 return getattr(unfi, self.name)
86 return getattr(unfi, self.name)
89
87
90 class filteredpropertycache(propertycache):
88 class filteredpropertycache(util.propertycache):
91 """propertycache that must take filtering in account"""
89 """propertycache that must take filtering in account"""
92
90
93 def cachevalue(self, obj, value):
91 def cachevalue(self, obj, value):
94 object.__setattr__(obj, self.name, value)
92 object.__setattr__(obj, self.name, value)
95
93
96
94
97 def hasunfilteredcache(repo, name):
95 def hasunfilteredcache(repo, name):
98 """check if a repo has an unfilteredpropertycache value for <name>"""
96 """check if a repo has an unfilteredpropertycache value for <name>"""
99 return name in vars(repo.unfiltered())
97 return name in vars(repo.unfiltered())
100
98
101 def unfilteredmethod(orig):
99 def unfilteredmethod(orig):
102 """decorate method that always need to be run on unfiltered version"""
100 """decorate method that always need to be run on unfiltered version"""
103 def wrapper(repo, *args, **kwargs):
101 def wrapper(repo, *args, **kwargs):
104 return orig(repo.unfiltered(), *args, **kwargs)
102 return orig(repo.unfiltered(), *args, **kwargs)
105 return wrapper
103 return wrapper
106
104
107 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
105 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
108 'unbundle'))
106 'unbundle'))
109 legacycaps = moderncaps.union(set(['changegroupsubset']))
107 legacycaps = moderncaps.union(set(['changegroupsubset']))
110
108
111 class localpeer(peer.peerrepository):
109 class localpeer(peer.peerrepository):
112 '''peer for a local repo; reflects only the most recent API'''
110 '''peer for a local repo; reflects only the most recent API'''
113
111
114 def __init__(self, repo, caps=moderncaps):
112 def __init__(self, repo, caps=moderncaps):
115 peer.peerrepository.__init__(self)
113 peer.peerrepository.__init__(self)
116 self._repo = repo.filtered('served')
114 self._repo = repo.filtered('served')
117 self.ui = repo.ui
115 self.ui = repo.ui
118 self._caps = repo._restrictcapabilities(caps)
116 self._caps = repo._restrictcapabilities(caps)
119 self.requirements = repo.requirements
117 self.requirements = repo.requirements
120 self.supportedformats = repo.supportedformats
118 self.supportedformats = repo.supportedformats
121
119
122 def close(self):
120 def close(self):
123 self._repo.close()
121 self._repo.close()
124
122
125 def _capabilities(self):
123 def _capabilities(self):
126 return self._caps
124 return self._caps
127
125
128 def local(self):
126 def local(self):
129 return self._repo
127 return self._repo
130
128
131 def canpush(self):
129 def canpush(self):
132 return True
130 return True
133
131
134 def url(self):
132 def url(self):
135 return self._repo.url()
133 return self._repo.url()
136
134
137 def lookup(self, key):
135 def lookup(self, key):
138 return self._repo.lookup(key)
136 return self._repo.lookup(key)
139
137
140 def branchmap(self):
138 def branchmap(self):
141 return self._repo.branchmap()
139 return self._repo.branchmap()
142
140
143 def heads(self):
141 def heads(self):
144 return self._repo.heads()
142 return self._repo.heads()
145
143
146 def known(self, nodes):
144 def known(self, nodes):
147 return self._repo.known(nodes)
145 return self._repo.known(nodes)
148
146
149 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
147 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
150 **kwargs):
148 **kwargs):
151 cg = exchange.getbundle(self._repo, source, heads=heads,
149 cg = exchange.getbundle(self._repo, source, heads=heads,
152 common=common, bundlecaps=bundlecaps, **kwargs)
150 common=common, bundlecaps=bundlecaps, **kwargs)
153 if bundlecaps is not None and 'HG20' in bundlecaps:
151 if bundlecaps is not None and 'HG20' in bundlecaps:
154 # When requesting a bundle2, getbundle returns a stream to make the
152 # When requesting a bundle2, getbundle returns a stream to make the
155 # wire level function happier. We need to build a proper object
153 # wire level function happier. We need to build a proper object
156 # from it in local peer.
154 # from it in local peer.
157 cg = bundle2.getunbundler(self.ui, cg)
155 cg = bundle2.getunbundler(self.ui, cg)
158 return cg
156 return cg
159
157
160 # TODO We might want to move the next two calls into legacypeer and add
158 # TODO We might want to move the next two calls into legacypeer and add
161 # unbundle instead.
159 # unbundle instead.
162
160
163 def unbundle(self, cg, heads, url):
161 def unbundle(self, cg, heads, url):
164 """apply a bundle on a repo
162 """apply a bundle on a repo
165
163
166 This function handles the repo locking itself."""
164 This function handles the repo locking itself."""
167 try:
165 try:
168 try:
166 try:
169 cg = exchange.readbundle(self.ui, cg, None)
167 cg = exchange.readbundle(self.ui, cg, None)
170 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
168 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
171 if util.safehasattr(ret, 'getchunks'):
169 if util.safehasattr(ret, 'getchunks'):
172 # This is a bundle20 object, turn it into an unbundler.
170 # This is a bundle20 object, turn it into an unbundler.
173 # This little dance should be dropped eventually when the
171 # This little dance should be dropped eventually when the
174 # API is finally improved.
172 # API is finally improved.
175 stream = util.chunkbuffer(ret.getchunks())
173 stream = util.chunkbuffer(ret.getchunks())
176 ret = bundle2.getunbundler(self.ui, stream)
174 ret = bundle2.getunbundler(self.ui, stream)
177 return ret
175 return ret
178 except Exception as exc:
176 except Exception as exc:
179 # If the exception contains output salvaged from a bundle2
177 # If the exception contains output salvaged from a bundle2
180 # reply, we need to make sure it is printed before continuing
178 # reply, we need to make sure it is printed before continuing
181 # to fail. So we build a bundle2 with such output and consume
179 # to fail. So we build a bundle2 with such output and consume
182 # it directly.
180 # it directly.
183 #
181 #
184 # This is not very elegant but allows a "simple" solution for
182 # This is not very elegant but allows a "simple" solution for
185 # issue4594
183 # issue4594
186 output = getattr(exc, '_bundle2salvagedoutput', ())
184 output = getattr(exc, '_bundle2salvagedoutput', ())
187 if output:
185 if output:
188 bundler = bundle2.bundle20(self._repo.ui)
186 bundler = bundle2.bundle20(self._repo.ui)
189 for out in output:
187 for out in output:
190 bundler.addpart(out)
188 bundler.addpart(out)
191 stream = util.chunkbuffer(bundler.getchunks())
189 stream = util.chunkbuffer(bundler.getchunks())
192 b = bundle2.getunbundler(self.ui, stream)
190 b = bundle2.getunbundler(self.ui, stream)
193 bundle2.processbundle(self._repo, b)
191 bundle2.processbundle(self._repo, b)
194 raise
192 raise
195 except error.PushRaced as exc:
193 except error.PushRaced as exc:
196 raise error.ResponseError(_('push failed:'), str(exc))
194 raise error.ResponseError(_('push failed:'), str(exc))
197
195
198 def lock(self):
196 def lock(self):
199 return self._repo.lock()
197 return self._repo.lock()
200
198
201 def addchangegroup(self, cg, source, url):
199 def addchangegroup(self, cg, source, url):
202 return cg.apply(self._repo, source, url)
200 return cg.apply(self._repo, source, url)
203
201
204 def pushkey(self, namespace, key, old, new):
202 def pushkey(self, namespace, key, old, new):
205 return self._repo.pushkey(namespace, key, old, new)
203 return self._repo.pushkey(namespace, key, old, new)
206
204
207 def listkeys(self, namespace):
205 def listkeys(self, namespace):
208 return self._repo.listkeys(namespace)
206 return self._repo.listkeys(namespace)
209
207
210 def debugwireargs(self, one, two, three=None, four=None, five=None):
208 def debugwireargs(self, one, two, three=None, four=None, five=None):
211 '''used to test argument passing over the wire'''
209 '''used to test argument passing over the wire'''
212 return "%s %s %s %s %s" % (one, two, three, four, five)
210 return "%s %s %s %s %s" % (one, two, three, four, five)
213
211
214 class locallegacypeer(localpeer):
212 class locallegacypeer(localpeer):
215 '''peer extension which implements legacy methods too; used for tests with
213 '''peer extension which implements legacy methods too; used for tests with
216 restricted capabilities'''
214 restricted capabilities'''
217
215
218 def __init__(self, repo):
216 def __init__(self, repo):
219 localpeer.__init__(self, repo, caps=legacycaps)
217 localpeer.__init__(self, repo, caps=legacycaps)
220
218
221 def branches(self, nodes):
219 def branches(self, nodes):
222 return self._repo.branches(nodes)
220 return self._repo.branches(nodes)
223
221
224 def between(self, pairs):
222 def between(self, pairs):
225 return self._repo.between(pairs)
223 return self._repo.between(pairs)
226
224
227 def changegroup(self, basenodes, source):
225 def changegroup(self, basenodes, source):
228 return changegroup.changegroup(self._repo, basenodes, source)
226 return changegroup.changegroup(self._repo, basenodes, source)
229
227
230 def changegroupsubset(self, bases, heads, source):
228 def changegroupsubset(self, bases, heads, source):
231 return changegroup.changegroupsubset(self._repo, bases, heads, source)
229 return changegroup.changegroupsubset(self._repo, bases, heads, source)
232
230
233 class localrepository(object):
231 class localrepository(object):
234
232
235 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
233 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
236 'manifestv2'))
234 'manifestv2'))
237 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
235 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
238 'dotencode'))
236 'dotencode'))
239 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
237 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
240 filtername = None
238 filtername = None
241
239
242 # a list of (ui, featureset) functions.
240 # a list of (ui, featureset) functions.
243 # only functions defined in module of enabled extensions are invoked
241 # only functions defined in module of enabled extensions are invoked
244 featuresetupfuncs = set()
242 featuresetupfuncs = set()
245
243
246 def __init__(self, baseui, path=None, create=False):
244 def __init__(self, baseui, path=None, create=False):
247 self.requirements = set()
245 self.requirements = set()
248 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
246 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
249 self.wopener = self.wvfs
247 self.wopener = self.wvfs
250 self.root = self.wvfs.base
248 self.root = self.wvfs.base
251 self.path = self.wvfs.join(".hg")
249 self.path = self.wvfs.join(".hg")
252 self.origroot = path
250 self.origroot = path
253 self.auditor = pathutil.pathauditor(self.root, self._checknested)
251 self.auditor = pathutil.pathauditor(self.root, self._checknested)
254 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
252 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
255 realfs=False)
253 realfs=False)
256 self.vfs = scmutil.vfs(self.path)
254 self.vfs = scmutil.vfs(self.path)
257 self.opener = self.vfs
255 self.opener = self.vfs
258 self.baseui = baseui
256 self.baseui = baseui
259 self.ui = baseui.copy()
257 self.ui = baseui.copy()
260 self.ui.copy = baseui.copy # prevent copying repo configuration
258 self.ui.copy = baseui.copy # prevent copying repo configuration
261 # A list of callback to shape the phase if no data were found.
259 # A list of callback to shape the phase if no data were found.
262 # Callback are in the form: func(repo, roots) --> processed root.
260 # Callback are in the form: func(repo, roots) --> processed root.
263 # This list it to be filled by extension during repo setup
261 # This list it to be filled by extension during repo setup
264 self._phasedefaults = []
262 self._phasedefaults = []
265 try:
263 try:
266 self.ui.readconfig(self.join("hgrc"), self.root)
264 self.ui.readconfig(self.join("hgrc"), self.root)
267 extensions.loadall(self.ui)
265 extensions.loadall(self.ui)
268 except IOError:
266 except IOError:
269 pass
267 pass
270
268
271 if self.featuresetupfuncs:
269 if self.featuresetupfuncs:
272 self.supported = set(self._basesupported) # use private copy
270 self.supported = set(self._basesupported) # use private copy
273 extmods = set(m.__name__ for n, m
271 extmods = set(m.__name__ for n, m
274 in extensions.extensions(self.ui))
272 in extensions.extensions(self.ui))
275 for setupfunc in self.featuresetupfuncs:
273 for setupfunc in self.featuresetupfuncs:
276 if setupfunc.__module__ in extmods:
274 if setupfunc.__module__ in extmods:
277 setupfunc(self.ui, self.supported)
275 setupfunc(self.ui, self.supported)
278 else:
276 else:
279 self.supported = self._basesupported
277 self.supported = self._basesupported
280
278
281 if not self.vfs.isdir():
279 if not self.vfs.isdir():
282 if create:
280 if create:
283 self.requirements = newreporequirements(self)
281 self.requirements = newreporequirements(self)
284
282
285 if not self.wvfs.exists():
283 if not self.wvfs.exists():
286 self.wvfs.makedirs()
284 self.wvfs.makedirs()
287 self.vfs.makedir(notindexed=True)
285 self.vfs.makedir(notindexed=True)
288
286
289 if 'store' in self.requirements:
287 if 'store' in self.requirements:
290 self.vfs.mkdir("store")
288 self.vfs.mkdir("store")
291
289
292 # create an invalid changelog
290 # create an invalid changelog
293 self.vfs.append(
291 self.vfs.append(
294 "00changelog.i",
292 "00changelog.i",
295 '\0\0\0\2' # represents revlogv2
293 '\0\0\0\2' # represents revlogv2
296 ' dummy changelog to prevent using the old repo layout'
294 ' dummy changelog to prevent using the old repo layout'
297 )
295 )
298 else:
296 else:
299 raise error.RepoError(_("repository %s not found") % path)
297 raise error.RepoError(_("repository %s not found") % path)
300 elif create:
298 elif create:
301 raise error.RepoError(_("repository %s already exists") % path)
299 raise error.RepoError(_("repository %s already exists") % path)
302 else:
300 else:
303 try:
301 try:
304 self.requirements = scmutil.readrequires(
302 self.requirements = scmutil.readrequires(
305 self.vfs, self.supported)
303 self.vfs, self.supported)
306 except IOError as inst:
304 except IOError as inst:
307 if inst.errno != errno.ENOENT:
305 if inst.errno != errno.ENOENT:
308 raise
306 raise
309
307
310 self.sharedpath = self.path
308 self.sharedpath = self.path
311 try:
309 try:
312 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
310 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
313 realpath=True)
311 realpath=True)
314 s = vfs.base
312 s = vfs.base
315 if not vfs.exists():
313 if not vfs.exists():
316 raise error.RepoError(
314 raise error.RepoError(
317 _('.hg/sharedpath points to nonexistent directory %s') % s)
315 _('.hg/sharedpath points to nonexistent directory %s') % s)
318 self.sharedpath = s
316 self.sharedpath = s
319 except IOError as inst:
317 except IOError as inst:
320 if inst.errno != errno.ENOENT:
318 if inst.errno != errno.ENOENT:
321 raise
319 raise
322
320
323 self.store = store.store(
321 self.store = store.store(
324 self.requirements, self.sharedpath, scmutil.vfs)
322 self.requirements, self.sharedpath, scmutil.vfs)
325 self.spath = self.store.path
323 self.spath = self.store.path
326 self.svfs = self.store.vfs
324 self.svfs = self.store.vfs
327 self.sjoin = self.store.join
325 self.sjoin = self.store.join
328 self.vfs.createmode = self.store.createmode
326 self.vfs.createmode = self.store.createmode
329 self._applyopenerreqs()
327 self._applyopenerreqs()
330 if create:
328 if create:
331 self._writerequirements()
329 self._writerequirements()
332
330
333 self._dirstatevalidatewarned = False
331 self._dirstatevalidatewarned = False
334
332
335 self._branchcaches = {}
333 self._branchcaches = {}
336 self._revbranchcache = None
334 self._revbranchcache = None
337 self.filterpats = {}
335 self.filterpats = {}
338 self._datafilters = {}
336 self._datafilters = {}
339 self._transref = self._lockref = self._wlockref = None
337 self._transref = self._lockref = self._wlockref = None
340
338
341 # A cache for various files under .hg/ that tracks file changes,
339 # A cache for various files under .hg/ that tracks file changes,
342 # (used by the filecache decorator)
340 # (used by the filecache decorator)
343 #
341 #
344 # Maps a property name to its util.filecacheentry
342 # Maps a property name to its util.filecacheentry
345 self._filecache = {}
343 self._filecache = {}
346
344
347 # hold sets of revision to be filtered
345 # hold sets of revision to be filtered
348 # should be cleared when something might have changed the filter value:
346 # should be cleared when something might have changed the filter value:
349 # - new changesets,
347 # - new changesets,
350 # - phase change,
348 # - phase change,
351 # - new obsolescence marker,
349 # - new obsolescence marker,
352 # - working directory parent change,
350 # - working directory parent change,
353 # - bookmark changes
351 # - bookmark changes
354 self.filteredrevcache = {}
352 self.filteredrevcache = {}
355
353
356 # generic mapping between names and nodes
354 # generic mapping between names and nodes
357 self.names = namespaces.namespaces()
355 self.names = namespaces.namespaces()
358
356
359 def close(self):
357 def close(self):
360 self._writecaches()
358 self._writecaches()
361
359
362 def _writecaches(self):
360 def _writecaches(self):
363 if self._revbranchcache:
361 if self._revbranchcache:
364 self._revbranchcache.write()
362 self._revbranchcache.write()
365
363
366 def _restrictcapabilities(self, caps):
364 def _restrictcapabilities(self, caps):
367 if self.ui.configbool('experimental', 'bundle2-advertise', True):
365 if self.ui.configbool('experimental', 'bundle2-advertise', True):
368 caps = set(caps)
366 caps = set(caps)
369 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
367 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
370 caps.add('bundle2=' + urlreq.quote(capsblob))
368 caps.add('bundle2=' + urlreq.quote(capsblob))
371 return caps
369 return caps
372
370
373 def _applyopenerreqs(self):
371 def _applyopenerreqs(self):
374 self.svfs.options = dict((r, 1) for r in self.requirements
372 self.svfs.options = dict((r, 1) for r in self.requirements
375 if r in self.openerreqs)
373 if r in self.openerreqs)
376 # experimental config: format.chunkcachesize
374 # experimental config: format.chunkcachesize
377 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
375 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
378 if chunkcachesize is not None:
376 if chunkcachesize is not None:
379 self.svfs.options['chunkcachesize'] = chunkcachesize
377 self.svfs.options['chunkcachesize'] = chunkcachesize
380 # experimental config: format.maxchainlen
378 # experimental config: format.maxchainlen
381 maxchainlen = self.ui.configint('format', 'maxchainlen')
379 maxchainlen = self.ui.configint('format', 'maxchainlen')
382 if maxchainlen is not None:
380 if maxchainlen is not None:
383 self.svfs.options['maxchainlen'] = maxchainlen
381 self.svfs.options['maxchainlen'] = maxchainlen
384 # experimental config: format.manifestcachesize
382 # experimental config: format.manifestcachesize
385 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
383 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
386 if manifestcachesize is not None:
384 if manifestcachesize is not None:
387 self.svfs.options['manifestcachesize'] = manifestcachesize
385 self.svfs.options['manifestcachesize'] = manifestcachesize
388 # experimental config: format.aggressivemergedeltas
386 # experimental config: format.aggressivemergedeltas
389 aggressivemergedeltas = self.ui.configbool('format',
387 aggressivemergedeltas = self.ui.configbool('format',
390 'aggressivemergedeltas', False)
388 'aggressivemergedeltas', False)
391 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
389 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
392 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
390 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
393
391
394 def _writerequirements(self):
392 def _writerequirements(self):
395 scmutil.writerequires(self.vfs, self.requirements)
393 scmutil.writerequires(self.vfs, self.requirements)
396
394
397 def _checknested(self, path):
395 def _checknested(self, path):
398 """Determine if path is a legal nested repository."""
396 """Determine if path is a legal nested repository."""
399 if not path.startswith(self.root):
397 if not path.startswith(self.root):
400 return False
398 return False
401 subpath = path[len(self.root) + 1:]
399 subpath = path[len(self.root) + 1:]
402 normsubpath = util.pconvert(subpath)
400 normsubpath = util.pconvert(subpath)
403
401
404 # XXX: Checking against the current working copy is wrong in
402 # XXX: Checking against the current working copy is wrong in
405 # the sense that it can reject things like
403 # the sense that it can reject things like
406 #
404 #
407 # $ hg cat -r 10 sub/x.txt
405 # $ hg cat -r 10 sub/x.txt
408 #
406 #
409 # if sub/ is no longer a subrepository in the working copy
407 # if sub/ is no longer a subrepository in the working copy
410 # parent revision.
408 # parent revision.
411 #
409 #
412 # However, it can of course also allow things that would have
410 # However, it can of course also allow things that would have
413 # been rejected before, such as the above cat command if sub/
411 # been rejected before, such as the above cat command if sub/
414 # is a subrepository now, but was a normal directory before.
412 # is a subrepository now, but was a normal directory before.
415 # The old path auditor would have rejected by mistake since it
413 # The old path auditor would have rejected by mistake since it
416 # panics when it sees sub/.hg/.
414 # panics when it sees sub/.hg/.
417 #
415 #
418 # All in all, checking against the working copy seems sensible
416 # All in all, checking against the working copy seems sensible
419 # since we want to prevent access to nested repositories on
417 # since we want to prevent access to nested repositories on
420 # the filesystem *now*.
418 # the filesystem *now*.
421 ctx = self[None]
419 ctx = self[None]
422 parts = util.splitpath(subpath)
420 parts = util.splitpath(subpath)
423 while parts:
421 while parts:
424 prefix = '/'.join(parts)
422 prefix = '/'.join(parts)
425 if prefix in ctx.substate:
423 if prefix in ctx.substate:
426 if prefix == normsubpath:
424 if prefix == normsubpath:
427 return True
425 return True
428 else:
426 else:
429 sub = ctx.sub(prefix)
427 sub = ctx.sub(prefix)
430 return sub.checknested(subpath[len(prefix) + 1:])
428 return sub.checknested(subpath[len(prefix) + 1:])
431 else:
429 else:
432 parts.pop()
430 parts.pop()
433 return False
431 return False
434
432
435 def peer(self):
433 def peer(self):
436 return localpeer(self) # not cached to avoid reference cycle
434 return localpeer(self) # not cached to avoid reference cycle
437
435
438 def unfiltered(self):
436 def unfiltered(self):
439 """Return unfiltered version of the repository
437 """Return unfiltered version of the repository
440
438
441 Intended to be overwritten by filtered repo."""
439 Intended to be overwritten by filtered repo."""
442 return self
440 return self
443
441
444 def filtered(self, name):
442 def filtered(self, name):
445 """Return a filtered version of a repository"""
443 """Return a filtered version of a repository"""
446 # build a new class with the mixin and the current class
444 # build a new class with the mixin and the current class
447 # (possibly subclass of the repo)
445 # (possibly subclass of the repo)
448 class proxycls(repoview.repoview, self.unfiltered().__class__):
446 class proxycls(repoview.repoview, self.unfiltered().__class__):
449 pass
447 pass
450 return proxycls(self, name)
448 return proxycls(self, name)
451
449
452 @repofilecache('bookmarks', 'bookmarks.current')
450 @repofilecache('bookmarks', 'bookmarks.current')
453 def _bookmarks(self):
451 def _bookmarks(self):
454 return bookmarks.bmstore(self)
452 return bookmarks.bmstore(self)
455
453
456 @property
454 @property
457 def _activebookmark(self):
455 def _activebookmark(self):
458 return self._bookmarks.active
456 return self._bookmarks.active
459
457
460 def bookmarkheads(self, bookmark):
458 def bookmarkheads(self, bookmark):
461 name = bookmark.split('@', 1)[0]
459 name = bookmark.split('@', 1)[0]
462 heads = []
460 heads = []
463 for mark, n in self._bookmarks.iteritems():
461 for mark, n in self._bookmarks.iteritems():
464 if mark.split('@', 1)[0] == name:
462 if mark.split('@', 1)[0] == name:
465 heads.append(n)
463 heads.append(n)
466 return heads
464 return heads
467
465
468 # _phaserevs and _phasesets depend on changelog. what we need is to
466 # _phaserevs and _phasesets depend on changelog. what we need is to
469 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
467 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
470 # can't be easily expressed in filecache mechanism.
468 # can't be easily expressed in filecache mechanism.
471 @storecache('phaseroots', '00changelog.i')
469 @storecache('phaseroots', '00changelog.i')
472 def _phasecache(self):
470 def _phasecache(self):
473 return phases.phasecache(self, self._phasedefaults)
471 return phases.phasecache(self, self._phasedefaults)
474
472
475 @storecache('obsstore')
473 @storecache('obsstore')
476 def obsstore(self):
474 def obsstore(self):
477 # read default format for new obsstore.
475 # read default format for new obsstore.
478 # developer config: format.obsstore-version
476 # developer config: format.obsstore-version
479 defaultformat = self.ui.configint('format', 'obsstore-version', None)
477 defaultformat = self.ui.configint('format', 'obsstore-version', None)
480 # rely on obsstore class default when possible.
478 # rely on obsstore class default when possible.
481 kwargs = {}
479 kwargs = {}
482 if defaultformat is not None:
480 if defaultformat is not None:
483 kwargs['defaultformat'] = defaultformat
481 kwargs['defaultformat'] = defaultformat
484 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
482 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
485 store = obsolete.obsstore(self.svfs, readonly=readonly,
483 store = obsolete.obsstore(self.svfs, readonly=readonly,
486 **kwargs)
484 **kwargs)
487 if store and readonly:
485 if store and readonly:
488 self.ui.warn(
486 self.ui.warn(
489 _('obsolete feature not enabled but %i markers found!\n')
487 _('obsolete feature not enabled but %i markers found!\n')
490 % len(list(store)))
488 % len(list(store)))
491 return store
489 return store
492
490
493 @storecache('00changelog.i')
491 @storecache('00changelog.i')
494 def changelog(self):
492 def changelog(self):
495 c = changelog.changelog(self.svfs)
493 c = changelog.changelog(self.svfs)
496 if 'HG_PENDING' in os.environ:
494 if 'HG_PENDING' in os.environ:
497 p = os.environ['HG_PENDING']
495 p = os.environ['HG_PENDING']
498 if p.startswith(self.root):
496 if p.startswith(self.root):
499 c.readpending('00changelog.i.a')
497 c.readpending('00changelog.i.a')
500 return c
498 return c
501
499
502 @storecache('00manifest.i')
500 @storecache('00manifest.i')
503 def manifest(self):
501 def manifest(self):
504 return manifest.manifest(self.svfs)
502 return manifest.manifest(self.svfs)
505
503
506 def dirlog(self, dir):
504 def dirlog(self, dir):
507 return self.manifest.dirlog(dir)
505 return self.manifest.dirlog(dir)
508
506
509 @repofilecache('dirstate')
507 @repofilecache('dirstate')
510 def dirstate(self):
508 def dirstate(self):
511 return dirstate.dirstate(self.vfs, self.ui, self.root,
509 return dirstate.dirstate(self.vfs, self.ui, self.root,
512 self._dirstatevalidate)
510 self._dirstatevalidate)
513
511
514 def _dirstatevalidate(self, node):
512 def _dirstatevalidate(self, node):
515 try:
513 try:
516 self.changelog.rev(node)
514 self.changelog.rev(node)
517 return node
515 return node
518 except error.LookupError:
516 except error.LookupError:
519 if not self._dirstatevalidatewarned:
517 if not self._dirstatevalidatewarned:
520 self._dirstatevalidatewarned = True
518 self._dirstatevalidatewarned = True
521 self.ui.warn(_("warning: ignoring unknown"
519 self.ui.warn(_("warning: ignoring unknown"
522 " working parent %s!\n") % short(node))
520 " working parent %s!\n") % short(node))
523 return nullid
521 return nullid
524
522
525 def __getitem__(self, changeid):
523 def __getitem__(self, changeid):
526 if changeid is None or changeid == wdirrev:
524 if changeid is None or changeid == wdirrev:
527 return context.workingctx(self)
525 return context.workingctx(self)
528 if isinstance(changeid, slice):
526 if isinstance(changeid, slice):
529 return [context.changectx(self, i)
527 return [context.changectx(self, i)
530 for i in xrange(*changeid.indices(len(self)))
528 for i in xrange(*changeid.indices(len(self)))
531 if i not in self.changelog.filteredrevs]
529 if i not in self.changelog.filteredrevs]
532 return context.changectx(self, changeid)
530 return context.changectx(self, changeid)
533
531
534 def __contains__(self, changeid):
532 def __contains__(self, changeid):
535 try:
533 try:
536 self[changeid]
534 self[changeid]
537 return True
535 return True
538 except error.RepoLookupError:
536 except error.RepoLookupError:
539 return False
537 return False
540
538
541 def __nonzero__(self):
539 def __nonzero__(self):
542 return True
540 return True
543
541
544 def __len__(self):
542 def __len__(self):
545 return len(self.changelog)
543 return len(self.changelog)
546
544
547 def __iter__(self):
545 def __iter__(self):
548 return iter(self.changelog)
546 return iter(self.changelog)
549
547
550 def revs(self, expr, *args):
548 def revs(self, expr, *args):
551 '''Find revisions matching a revset.
549 '''Find revisions matching a revset.
552
550
553 The revset is specified as a string ``expr`` that may contain
551 The revset is specified as a string ``expr`` that may contain
554 %-formatting to escape certain types. See ``revset.formatspec``.
552 %-formatting to escape certain types. See ``revset.formatspec``.
555
553
556 Return a revset.abstractsmartset, which is a list-like interface
554 Return a revset.abstractsmartset, which is a list-like interface
557 that contains integer revisions.
555 that contains integer revisions.
558 '''
556 '''
559 expr = revset.formatspec(expr, *args)
557 expr = revset.formatspec(expr, *args)
560 m = revset.match(None, expr)
558 m = revset.match(None, expr)
561 return m(self)
559 return m(self)
562
560
563 def set(self, expr, *args):
561 def set(self, expr, *args):
564 '''Find revisions matching a revset and emit changectx instances.
562 '''Find revisions matching a revset and emit changectx instances.
565
563
566 This is a convenience wrapper around ``revs()`` that iterates the
564 This is a convenience wrapper around ``revs()`` that iterates the
567 result and is a generator of changectx instances.
565 result and is a generator of changectx instances.
568 '''
566 '''
569 for r in self.revs(expr, *args):
567 for r in self.revs(expr, *args):
570 yield self[r]
568 yield self[r]
571
569
572 def url(self):
570 def url(self):
573 return 'file:' + self.root
571 return 'file:' + self.root
574
572
575 def hook(self, name, throw=False, **args):
573 def hook(self, name, throw=False, **args):
576 """Call a hook, passing this repo instance.
574 """Call a hook, passing this repo instance.
577
575
578 This a convenience method to aid invoking hooks. Extensions likely
576 This a convenience method to aid invoking hooks. Extensions likely
579 won't call this unless they have registered a custom hook or are
577 won't call this unless they have registered a custom hook or are
580 replacing code that is expected to call a hook.
578 replacing code that is expected to call a hook.
581 """
579 """
582 return hook.hook(self.ui, self, name, throw, **args)
580 return hook.hook(self.ui, self, name, throw, **args)
583
581
584 @unfilteredmethod
582 @unfilteredmethod
585 def _tag(self, names, node, message, local, user, date, extra=None,
583 def _tag(self, names, node, message, local, user, date, extra=None,
586 editor=False):
584 editor=False):
587 if isinstance(names, str):
585 if isinstance(names, str):
588 names = (names,)
586 names = (names,)
589
587
590 branches = self.branchmap()
588 branches = self.branchmap()
591 for name in names:
589 for name in names:
592 self.hook('pretag', throw=True, node=hex(node), tag=name,
590 self.hook('pretag', throw=True, node=hex(node), tag=name,
593 local=local)
591 local=local)
594 if name in branches:
592 if name in branches:
595 self.ui.warn(_("warning: tag %s conflicts with existing"
593 self.ui.warn(_("warning: tag %s conflicts with existing"
596 " branch name\n") % name)
594 " branch name\n") % name)
597
595
598 def writetags(fp, names, munge, prevtags):
596 def writetags(fp, names, munge, prevtags):
599 fp.seek(0, 2)
597 fp.seek(0, 2)
600 if prevtags and prevtags[-1] != '\n':
598 if prevtags and prevtags[-1] != '\n':
601 fp.write('\n')
599 fp.write('\n')
602 for name in names:
600 for name in names:
603 if munge:
601 if munge:
604 m = munge(name)
602 m = munge(name)
605 else:
603 else:
606 m = name
604 m = name
607
605
608 if (self._tagscache.tagtypes and
606 if (self._tagscache.tagtypes and
609 name in self._tagscache.tagtypes):
607 name in self._tagscache.tagtypes):
610 old = self.tags().get(name, nullid)
608 old = self.tags().get(name, nullid)
611 fp.write('%s %s\n' % (hex(old), m))
609 fp.write('%s %s\n' % (hex(old), m))
612 fp.write('%s %s\n' % (hex(node), m))
610 fp.write('%s %s\n' % (hex(node), m))
613 fp.close()
611 fp.close()
614
612
615 prevtags = ''
613 prevtags = ''
616 if local:
614 if local:
617 try:
615 try:
618 fp = self.vfs('localtags', 'r+')
616 fp = self.vfs('localtags', 'r+')
619 except IOError:
617 except IOError:
620 fp = self.vfs('localtags', 'a')
618 fp = self.vfs('localtags', 'a')
621 else:
619 else:
622 prevtags = fp.read()
620 prevtags = fp.read()
623
621
624 # local tags are stored in the current charset
622 # local tags are stored in the current charset
625 writetags(fp, names, None, prevtags)
623 writetags(fp, names, None, prevtags)
626 for name in names:
624 for name in names:
627 self.hook('tag', node=hex(node), tag=name, local=local)
625 self.hook('tag', node=hex(node), tag=name, local=local)
628 return
626 return
629
627
630 try:
628 try:
631 fp = self.wfile('.hgtags', 'rb+')
629 fp = self.wfile('.hgtags', 'rb+')
632 except IOError as e:
630 except IOError as e:
633 if e.errno != errno.ENOENT:
631 if e.errno != errno.ENOENT:
634 raise
632 raise
635 fp = self.wfile('.hgtags', 'ab')
633 fp = self.wfile('.hgtags', 'ab')
636 else:
634 else:
637 prevtags = fp.read()
635 prevtags = fp.read()
638
636
639 # committed tags are stored in UTF-8
637 # committed tags are stored in UTF-8
640 writetags(fp, names, encoding.fromlocal, prevtags)
638 writetags(fp, names, encoding.fromlocal, prevtags)
641
639
642 fp.close()
640 fp.close()
643
641
644 self.invalidatecaches()
642 self.invalidatecaches()
645
643
646 if '.hgtags' not in self.dirstate:
644 if '.hgtags' not in self.dirstate:
647 self[None].add(['.hgtags'])
645 self[None].add(['.hgtags'])
648
646
649 m = matchmod.exact(self.root, '', ['.hgtags'])
647 m = matchmod.exact(self.root, '', ['.hgtags'])
650 tagnode = self.commit(message, user, date, extra=extra, match=m,
648 tagnode = self.commit(message, user, date, extra=extra, match=m,
651 editor=editor)
649 editor=editor)
652
650
653 for name in names:
651 for name in names:
654 self.hook('tag', node=hex(node), tag=name, local=local)
652 self.hook('tag', node=hex(node), tag=name, local=local)
655
653
656 return tagnode
654 return tagnode
657
655
658 def tag(self, names, node, message, local, user, date, editor=False):
656 def tag(self, names, node, message, local, user, date, editor=False):
659 '''tag a revision with one or more symbolic names.
657 '''tag a revision with one or more symbolic names.
660
658
661 names is a list of strings or, when adding a single tag, names may be a
659 names is a list of strings or, when adding a single tag, names may be a
662 string.
660 string.
663
661
664 if local is True, the tags are stored in a per-repository file.
662 if local is True, the tags are stored in a per-repository file.
665 otherwise, they are stored in the .hgtags file, and a new
663 otherwise, they are stored in the .hgtags file, and a new
666 changeset is committed with the change.
664 changeset is committed with the change.
667
665
668 keyword arguments:
666 keyword arguments:
669
667
670 local: whether to store tags in non-version-controlled file
668 local: whether to store tags in non-version-controlled file
671 (default False)
669 (default False)
672
670
673 message: commit message to use if committing
671 message: commit message to use if committing
674
672
675 user: name of user to use if committing
673 user: name of user to use if committing
676
674
677 date: date tuple to use if committing'''
675 date: date tuple to use if committing'''
678
676
679 if not local:
677 if not local:
680 m = matchmod.exact(self.root, '', ['.hgtags'])
678 m = matchmod.exact(self.root, '', ['.hgtags'])
681 if any(self.status(match=m, unknown=True, ignored=True)):
679 if any(self.status(match=m, unknown=True, ignored=True)):
682 raise error.Abort(_('working copy of .hgtags is changed'),
680 raise error.Abort(_('working copy of .hgtags is changed'),
683 hint=_('please commit .hgtags manually'))
681 hint=_('please commit .hgtags manually'))
684
682
685 self.tags() # instantiate the cache
683 self.tags() # instantiate the cache
686 self._tag(names, node, message, local, user, date, editor=editor)
684 self._tag(names, node, message, local, user, date, editor=editor)
687
685
688 @filteredpropertycache
686 @filteredpropertycache
689 def _tagscache(self):
687 def _tagscache(self):
690 '''Returns a tagscache object that contains various tags related
688 '''Returns a tagscache object that contains various tags related
691 caches.'''
689 caches.'''
692
690
693 # This simplifies its cache management by having one decorated
691 # This simplifies its cache management by having one decorated
694 # function (this one) and the rest simply fetch things from it.
692 # function (this one) and the rest simply fetch things from it.
695 class tagscache(object):
693 class tagscache(object):
696 def __init__(self):
694 def __init__(self):
697 # These two define the set of tags for this repository. tags
695 # These two define the set of tags for this repository. tags
698 # maps tag name to node; tagtypes maps tag name to 'global' or
696 # maps tag name to node; tagtypes maps tag name to 'global' or
699 # 'local'. (Global tags are defined by .hgtags across all
697 # 'local'. (Global tags are defined by .hgtags across all
700 # heads, and local tags are defined in .hg/localtags.)
698 # heads, and local tags are defined in .hg/localtags.)
701 # They constitute the in-memory cache of tags.
699 # They constitute the in-memory cache of tags.
702 self.tags = self.tagtypes = None
700 self.tags = self.tagtypes = None
703
701
704 self.nodetagscache = self.tagslist = None
702 self.nodetagscache = self.tagslist = None
705
703
706 cache = tagscache()
704 cache = tagscache()
707 cache.tags, cache.tagtypes = self._findtags()
705 cache.tags, cache.tagtypes = self._findtags()
708
706
709 return cache
707 return cache
710
708
711 def tags(self):
709 def tags(self):
712 '''return a mapping of tag to node'''
710 '''return a mapping of tag to node'''
713 t = {}
711 t = {}
714 if self.changelog.filteredrevs:
712 if self.changelog.filteredrevs:
715 tags, tt = self._findtags()
713 tags, tt = self._findtags()
716 else:
714 else:
717 tags = self._tagscache.tags
715 tags = self._tagscache.tags
718 for k, v in tags.iteritems():
716 for k, v in tags.iteritems():
719 try:
717 try:
720 # ignore tags to unknown nodes
718 # ignore tags to unknown nodes
721 self.changelog.rev(v)
719 self.changelog.rev(v)
722 t[k] = v
720 t[k] = v
723 except (error.LookupError, ValueError):
721 except (error.LookupError, ValueError):
724 pass
722 pass
725 return t
723 return t
726
724
727 def _findtags(self):
725 def _findtags(self):
728 '''Do the hard work of finding tags. Return a pair of dicts
726 '''Do the hard work of finding tags. Return a pair of dicts
729 (tags, tagtypes) where tags maps tag name to node, and tagtypes
727 (tags, tagtypes) where tags maps tag name to node, and tagtypes
730 maps tag name to a string like \'global\' or \'local\'.
728 maps tag name to a string like \'global\' or \'local\'.
731 Subclasses or extensions are free to add their own tags, but
729 Subclasses or extensions are free to add their own tags, but
732 should be aware that the returned dicts will be retained for the
730 should be aware that the returned dicts will be retained for the
733 duration of the localrepo object.'''
731 duration of the localrepo object.'''
734
732
735 # XXX what tagtype should subclasses/extensions use? Currently
733 # XXX what tagtype should subclasses/extensions use? Currently
736 # mq and bookmarks add tags, but do not set the tagtype at all.
734 # mq and bookmarks add tags, but do not set the tagtype at all.
737 # Should each extension invent its own tag type? Should there
735 # Should each extension invent its own tag type? Should there
738 # be one tagtype for all such "virtual" tags? Or is the status
736 # be one tagtype for all such "virtual" tags? Or is the status
739 # quo fine?
737 # quo fine?
740
738
741 alltags = {} # map tag name to (node, hist)
739 alltags = {} # map tag name to (node, hist)
742 tagtypes = {}
740 tagtypes = {}
743
741
744 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
742 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
745 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
743 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
746
744
747 # Build the return dicts. Have to re-encode tag names because
745 # Build the return dicts. Have to re-encode tag names because
748 # the tags module always uses UTF-8 (in order not to lose info
746 # the tags module always uses UTF-8 (in order not to lose info
749 # writing to the cache), but the rest of Mercurial wants them in
747 # writing to the cache), but the rest of Mercurial wants them in
750 # local encoding.
748 # local encoding.
751 tags = {}
749 tags = {}
752 for (name, (node, hist)) in alltags.iteritems():
750 for (name, (node, hist)) in alltags.iteritems():
753 if node != nullid:
751 if node != nullid:
754 tags[encoding.tolocal(name)] = node
752 tags[encoding.tolocal(name)] = node
755 tags['tip'] = self.changelog.tip()
753 tags['tip'] = self.changelog.tip()
756 tagtypes = dict([(encoding.tolocal(name), value)
754 tagtypes = dict([(encoding.tolocal(name), value)
757 for (name, value) in tagtypes.iteritems()])
755 for (name, value) in tagtypes.iteritems()])
758 return (tags, tagtypes)
756 return (tags, tagtypes)
759
757
760 def tagtype(self, tagname):
758 def tagtype(self, tagname):
761 '''
759 '''
762 return the type of the given tag. result can be:
760 return the type of the given tag. result can be:
763
761
764 'local' : a local tag
762 'local' : a local tag
765 'global' : a global tag
763 'global' : a global tag
766 None : tag does not exist
764 None : tag does not exist
767 '''
765 '''
768
766
769 return self._tagscache.tagtypes.get(tagname)
767 return self._tagscache.tagtypes.get(tagname)
770
768
771 def tagslist(self):
769 def tagslist(self):
772 '''return a list of tags ordered by revision'''
770 '''return a list of tags ordered by revision'''
773 if not self._tagscache.tagslist:
771 if not self._tagscache.tagslist:
774 l = []
772 l = []
775 for t, n in self.tags().iteritems():
773 for t, n in self.tags().iteritems():
776 l.append((self.changelog.rev(n), t, n))
774 l.append((self.changelog.rev(n), t, n))
777 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
775 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
778
776
779 return self._tagscache.tagslist
777 return self._tagscache.tagslist
780
778
781 def nodetags(self, node):
779 def nodetags(self, node):
782 '''return the tags associated with a node'''
780 '''return the tags associated with a node'''
783 if not self._tagscache.nodetagscache:
781 if not self._tagscache.nodetagscache:
784 nodetagscache = {}
782 nodetagscache = {}
785 for t, n in self._tagscache.tags.iteritems():
783 for t, n in self._tagscache.tags.iteritems():
786 nodetagscache.setdefault(n, []).append(t)
784 nodetagscache.setdefault(n, []).append(t)
787 for tags in nodetagscache.itervalues():
785 for tags in nodetagscache.itervalues():
788 tags.sort()
786 tags.sort()
789 self._tagscache.nodetagscache = nodetagscache
787 self._tagscache.nodetagscache = nodetagscache
790 return self._tagscache.nodetagscache.get(node, [])
788 return self._tagscache.nodetagscache.get(node, [])
791
789
792 def nodebookmarks(self, node):
790 def nodebookmarks(self, node):
793 """return the list of bookmarks pointing to the specified node"""
791 """return the list of bookmarks pointing to the specified node"""
794 marks = []
792 marks = []
795 for bookmark, n in self._bookmarks.iteritems():
793 for bookmark, n in self._bookmarks.iteritems():
796 if n == node:
794 if n == node:
797 marks.append(bookmark)
795 marks.append(bookmark)
798 return sorted(marks)
796 return sorted(marks)
799
797
800 def branchmap(self):
798 def branchmap(self):
801 '''returns a dictionary {branch: [branchheads]} with branchheads
799 '''returns a dictionary {branch: [branchheads]} with branchheads
802 ordered by increasing revision number'''
800 ordered by increasing revision number'''
803 branchmap.updatecache(self)
801 branchmap.updatecache(self)
804 return self._branchcaches[self.filtername]
802 return self._branchcaches[self.filtername]
805
803
806 @unfilteredmethod
804 @unfilteredmethod
807 def revbranchcache(self):
805 def revbranchcache(self):
808 if not self._revbranchcache:
806 if not self._revbranchcache:
809 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
807 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
810 return self._revbranchcache
808 return self._revbranchcache
811
809
812 def branchtip(self, branch, ignoremissing=False):
810 def branchtip(self, branch, ignoremissing=False):
813 '''return the tip node for a given branch
811 '''return the tip node for a given branch
814
812
815 If ignoremissing is True, then this method will not raise an error.
813 If ignoremissing is True, then this method will not raise an error.
816 This is helpful for callers that only expect None for a missing branch
814 This is helpful for callers that only expect None for a missing branch
817 (e.g. namespace).
815 (e.g. namespace).
818
816
819 '''
817 '''
820 try:
818 try:
821 return self.branchmap().branchtip(branch)
819 return self.branchmap().branchtip(branch)
822 except KeyError:
820 except KeyError:
823 if not ignoremissing:
821 if not ignoremissing:
824 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
822 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
825 else:
823 else:
826 pass
824 pass
827
825
828 def lookup(self, key):
826 def lookup(self, key):
829 return self[key].node()
827 return self[key].node()
830
828
831 def lookupbranch(self, key, remote=None):
829 def lookupbranch(self, key, remote=None):
832 repo = remote or self
830 repo = remote or self
833 if key in repo.branchmap():
831 if key in repo.branchmap():
834 return key
832 return key
835
833
836 repo = (remote and remote.local()) and remote or self
834 repo = (remote and remote.local()) and remote or self
837 return repo[key].branch()
835 return repo[key].branch()
838
836
839 def known(self, nodes):
837 def known(self, nodes):
840 cl = self.changelog
838 cl = self.changelog
841 nm = cl.nodemap
839 nm = cl.nodemap
842 filtered = cl.filteredrevs
840 filtered = cl.filteredrevs
843 result = []
841 result = []
844 for n in nodes:
842 for n in nodes:
845 r = nm.get(n)
843 r = nm.get(n)
846 resp = not (r is None or r in filtered)
844 resp = not (r is None or r in filtered)
847 result.append(resp)
845 result.append(resp)
848 return result
846 return result
849
847
850 def local(self):
848 def local(self):
851 return self
849 return self
852
850
853 def publishing(self):
851 def publishing(self):
854 # it's safe (and desirable) to trust the publish flag unconditionally
852 # it's safe (and desirable) to trust the publish flag unconditionally
855 # so that we don't finalize changes shared between users via ssh or nfs
853 # so that we don't finalize changes shared between users via ssh or nfs
856 return self.ui.configbool('phases', 'publish', True, untrusted=True)
854 return self.ui.configbool('phases', 'publish', True, untrusted=True)
857
855
858 def cancopy(self):
856 def cancopy(self):
859 # so statichttprepo's override of local() works
857 # so statichttprepo's override of local() works
860 if not self.local():
858 if not self.local():
861 return False
859 return False
862 if not self.publishing():
860 if not self.publishing():
863 return True
861 return True
864 # if publishing we can't copy if there is filtered content
862 # if publishing we can't copy if there is filtered content
865 return not self.filtered('visible').changelog.filteredrevs
863 return not self.filtered('visible').changelog.filteredrevs
866
864
867 def shared(self):
865 def shared(self):
868 '''the type of shared repository (None if not shared)'''
866 '''the type of shared repository (None if not shared)'''
869 if self.sharedpath != self.path:
867 if self.sharedpath != self.path:
870 return 'store'
868 return 'store'
871 return None
869 return None
872
870
873 def join(self, f, *insidef):
871 def join(self, f, *insidef):
874 return self.vfs.join(os.path.join(f, *insidef))
872 return self.vfs.join(os.path.join(f, *insidef))
875
873
876 def wjoin(self, f, *insidef):
874 def wjoin(self, f, *insidef):
877 return self.vfs.reljoin(self.root, f, *insidef)
875 return self.vfs.reljoin(self.root, f, *insidef)
878
876
879 def file(self, f):
877 def file(self, f):
880 if f[0] == '/':
878 if f[0] == '/':
881 f = f[1:]
879 f = f[1:]
882 return filelog.filelog(self.svfs, f)
880 return filelog.filelog(self.svfs, f)
883
881
884 def changectx(self, changeid):
882 def changectx(self, changeid):
885 return self[changeid]
883 return self[changeid]
886
884
887 def setparents(self, p1, p2=nullid):
885 def setparents(self, p1, p2=nullid):
888 self.dirstate.beginparentchange()
886 self.dirstate.beginparentchange()
889 copies = self.dirstate.setparents(p1, p2)
887 copies = self.dirstate.setparents(p1, p2)
890 pctx = self[p1]
888 pctx = self[p1]
891 if copies:
889 if copies:
892 # Adjust copy records, the dirstate cannot do it, it
890 # Adjust copy records, the dirstate cannot do it, it
893 # requires access to parents manifests. Preserve them
891 # requires access to parents manifests. Preserve them
894 # only for entries added to first parent.
892 # only for entries added to first parent.
895 for f in copies:
893 for f in copies:
896 if f not in pctx and copies[f] in pctx:
894 if f not in pctx and copies[f] in pctx:
897 self.dirstate.copy(copies[f], f)
895 self.dirstate.copy(copies[f], f)
898 if p2 == nullid:
896 if p2 == nullid:
899 for f, s in sorted(self.dirstate.copies().items()):
897 for f, s in sorted(self.dirstate.copies().items()):
900 if f not in pctx and s not in pctx:
898 if f not in pctx and s not in pctx:
901 self.dirstate.copy(None, f)
899 self.dirstate.copy(None, f)
902 self.dirstate.endparentchange()
900 self.dirstate.endparentchange()
903
901
904 def filectx(self, path, changeid=None, fileid=None):
902 def filectx(self, path, changeid=None, fileid=None):
905 """changeid can be a changeset revision, node, or tag.
903 """changeid can be a changeset revision, node, or tag.
906 fileid can be a file revision or node."""
904 fileid can be a file revision or node."""
907 return context.filectx(self, path, changeid, fileid)
905 return context.filectx(self, path, changeid, fileid)
908
906
909 def getcwd(self):
907 def getcwd(self):
910 return self.dirstate.getcwd()
908 return self.dirstate.getcwd()
911
909
912 def pathto(self, f, cwd=None):
910 def pathto(self, f, cwd=None):
913 return self.dirstate.pathto(f, cwd)
911 return self.dirstate.pathto(f, cwd)
914
912
915 def wfile(self, f, mode='r'):
913 def wfile(self, f, mode='r'):
916 return self.wvfs(f, mode)
914 return self.wvfs(f, mode)
917
915
918 def _link(self, f):
916 def _link(self, f):
919 return self.wvfs.islink(f)
917 return self.wvfs.islink(f)
920
918
921 def _loadfilter(self, filter):
919 def _loadfilter(self, filter):
922 if filter not in self.filterpats:
920 if filter not in self.filterpats:
923 l = []
921 l = []
924 for pat, cmd in self.ui.configitems(filter):
922 for pat, cmd in self.ui.configitems(filter):
925 if cmd == '!':
923 if cmd == '!':
926 continue
924 continue
927 mf = matchmod.match(self.root, '', [pat])
925 mf = matchmod.match(self.root, '', [pat])
928 fn = None
926 fn = None
929 params = cmd
927 params = cmd
930 for name, filterfn in self._datafilters.iteritems():
928 for name, filterfn in self._datafilters.iteritems():
931 if cmd.startswith(name):
929 if cmd.startswith(name):
932 fn = filterfn
930 fn = filterfn
933 params = cmd[len(name):].lstrip()
931 params = cmd[len(name):].lstrip()
934 break
932 break
935 if not fn:
933 if not fn:
936 fn = lambda s, c, **kwargs: util.filter(s, c)
934 fn = lambda s, c, **kwargs: util.filter(s, c)
937 # Wrap old filters not supporting keyword arguments
935 # Wrap old filters not supporting keyword arguments
938 if not inspect.getargspec(fn)[2]:
936 if not inspect.getargspec(fn)[2]:
939 oldfn = fn
937 oldfn = fn
940 fn = lambda s, c, **kwargs: oldfn(s, c)
938 fn = lambda s, c, **kwargs: oldfn(s, c)
941 l.append((mf, fn, params))
939 l.append((mf, fn, params))
942 self.filterpats[filter] = l
940 self.filterpats[filter] = l
943 return self.filterpats[filter]
941 return self.filterpats[filter]
944
942
945 def _filter(self, filterpats, filename, data):
943 def _filter(self, filterpats, filename, data):
946 for mf, fn, cmd in filterpats:
944 for mf, fn, cmd in filterpats:
947 if mf(filename):
945 if mf(filename):
948 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
946 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
949 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
947 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
950 break
948 break
951
949
952 return data
950 return data
953
951
954 @unfilteredpropertycache
952 @unfilteredpropertycache
955 def _encodefilterpats(self):
953 def _encodefilterpats(self):
956 return self._loadfilter('encode')
954 return self._loadfilter('encode')
957
955
958 @unfilteredpropertycache
956 @unfilteredpropertycache
959 def _decodefilterpats(self):
957 def _decodefilterpats(self):
960 return self._loadfilter('decode')
958 return self._loadfilter('decode')
961
959
962 def adddatafilter(self, name, filter):
960 def adddatafilter(self, name, filter):
963 self._datafilters[name] = filter
961 self._datafilters[name] = filter
964
962
965 def wread(self, filename):
963 def wread(self, filename):
966 if self._link(filename):
964 if self._link(filename):
967 data = self.wvfs.readlink(filename)
965 data = self.wvfs.readlink(filename)
968 else:
966 else:
969 data = self.wvfs.read(filename)
967 data = self.wvfs.read(filename)
970 return self._filter(self._encodefilterpats, filename, data)
968 return self._filter(self._encodefilterpats, filename, data)
971
969
972 def wwrite(self, filename, data, flags, backgroundclose=False):
970 def wwrite(self, filename, data, flags, backgroundclose=False):
973 """write ``data`` into ``filename`` in the working directory
971 """write ``data`` into ``filename`` in the working directory
974
972
975 This returns length of written (maybe decoded) data.
973 This returns length of written (maybe decoded) data.
976 """
974 """
977 data = self._filter(self._decodefilterpats, filename, data)
975 data = self._filter(self._decodefilterpats, filename, data)
978 if 'l' in flags:
976 if 'l' in flags:
979 self.wvfs.symlink(data, filename)
977 self.wvfs.symlink(data, filename)
980 else:
978 else:
981 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
979 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
982 if 'x' in flags:
980 if 'x' in flags:
983 self.wvfs.setflags(filename, False, True)
981 self.wvfs.setflags(filename, False, True)
984 return len(data)
982 return len(data)
985
983
986 def wwritedata(self, filename, data):
984 def wwritedata(self, filename, data):
987 return self._filter(self._decodefilterpats, filename, data)
985 return self._filter(self._decodefilterpats, filename, data)
988
986
989 def currenttransaction(self):
987 def currenttransaction(self):
990 """return the current transaction or None if non exists"""
988 """return the current transaction or None if non exists"""
991 if self._transref:
989 if self._transref:
992 tr = self._transref()
990 tr = self._transref()
993 else:
991 else:
994 tr = None
992 tr = None
995
993
996 if tr and tr.running():
994 if tr and tr.running():
997 return tr
995 return tr
998 return None
996 return None
999
997
1000 def transaction(self, desc, report=None):
998 def transaction(self, desc, report=None):
1001 if (self.ui.configbool('devel', 'all-warnings')
999 if (self.ui.configbool('devel', 'all-warnings')
1002 or self.ui.configbool('devel', 'check-locks')):
1000 or self.ui.configbool('devel', 'check-locks')):
1003 l = self._lockref and self._lockref()
1001 l = self._lockref and self._lockref()
1004 if l is None or not l.held:
1002 if l is None or not l.held:
1005 self.ui.develwarn('transaction with no lock')
1003 self.ui.develwarn('transaction with no lock')
1006 tr = self.currenttransaction()
1004 tr = self.currenttransaction()
1007 if tr is not None:
1005 if tr is not None:
1008 return tr.nest()
1006 return tr.nest()
1009
1007
1010 # abort here if the journal already exists
1008 # abort here if the journal already exists
1011 if self.svfs.exists("journal"):
1009 if self.svfs.exists("journal"):
1012 raise error.RepoError(
1010 raise error.RepoError(
1013 _("abandoned transaction found"),
1011 _("abandoned transaction found"),
1014 hint=_("run 'hg recover' to clean up transaction"))
1012 hint=_("run 'hg recover' to clean up transaction"))
1015
1013
1016 # make journal.dirstate contain in-memory changes at this point
1014 # make journal.dirstate contain in-memory changes at this point
1017 self.dirstate.write(None)
1015 self.dirstate.write(None)
1018
1016
1019 idbase = "%.40f#%f" % (random.random(), time.time())
1017 idbase = "%.40f#%f" % (random.random(), time.time())
1020 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1018 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1021 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1019 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1022
1020
1023 self._writejournal(desc)
1021 self._writejournal(desc)
1024 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1022 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1025 if report:
1023 if report:
1026 rp = report
1024 rp = report
1027 else:
1025 else:
1028 rp = self.ui.warn
1026 rp = self.ui.warn
1029 vfsmap = {'plain': self.vfs} # root of .hg/
1027 vfsmap = {'plain': self.vfs} # root of .hg/
1030 # we must avoid cyclic reference between repo and transaction.
1028 # we must avoid cyclic reference between repo and transaction.
1031 reporef = weakref.ref(self)
1029 reporef = weakref.ref(self)
1032 def validate(tr):
1030 def validate(tr):
1033 """will run pre-closing hooks"""
1031 """will run pre-closing hooks"""
1034 reporef().hook('pretxnclose', throw=True,
1032 reporef().hook('pretxnclose', throw=True,
1035 txnname=desc, **tr.hookargs)
1033 txnname=desc, **tr.hookargs)
1036 def releasefn(tr, success):
1034 def releasefn(tr, success):
1037 repo = reporef()
1035 repo = reporef()
1038 if success:
1036 if success:
1039 # this should be explicitly invoked here, because
1037 # this should be explicitly invoked here, because
1040 # in-memory changes aren't written out at closing
1038 # in-memory changes aren't written out at closing
1041 # transaction, if tr.addfilegenerator (via
1039 # transaction, if tr.addfilegenerator (via
1042 # dirstate.write or so) isn't invoked while
1040 # dirstate.write or so) isn't invoked while
1043 # transaction running
1041 # transaction running
1044 repo.dirstate.write(None)
1042 repo.dirstate.write(None)
1045 else:
1043 else:
1046 # prevent in-memory changes from being written out at
1044 # prevent in-memory changes from being written out at
1047 # the end of outer wlock scope or so
1045 # the end of outer wlock scope or so
1048 repo.dirstate.invalidate()
1046 repo.dirstate.invalidate()
1049
1047
1050 # discard all changes (including ones already written
1048 # discard all changes (including ones already written
1051 # out) in this transaction
1049 # out) in this transaction
1052 repo.vfs.rename('journal.dirstate', 'dirstate')
1050 repo.vfs.rename('journal.dirstate', 'dirstate')
1053
1051
1054 repo.invalidate(clearfilecache=True)
1052 repo.invalidate(clearfilecache=True)
1055
1053
1056 tr = transaction.transaction(rp, self.svfs, vfsmap,
1054 tr = transaction.transaction(rp, self.svfs, vfsmap,
1057 "journal",
1055 "journal",
1058 "undo",
1056 "undo",
1059 aftertrans(renames),
1057 aftertrans(renames),
1060 self.store.createmode,
1058 self.store.createmode,
1061 validator=validate,
1059 validator=validate,
1062 releasefn=releasefn)
1060 releasefn=releasefn)
1063
1061
1064 tr.hookargs['txnid'] = txnid
1062 tr.hookargs['txnid'] = txnid
1065 # note: writing the fncache only during finalize mean that the file is
1063 # note: writing the fncache only during finalize mean that the file is
1066 # outdated when running hooks. As fncache is used for streaming clone,
1064 # outdated when running hooks. As fncache is used for streaming clone,
1067 # this is not expected to break anything that happen during the hooks.
1065 # this is not expected to break anything that happen during the hooks.
1068 tr.addfinalize('flush-fncache', self.store.write)
1066 tr.addfinalize('flush-fncache', self.store.write)
1069 def txnclosehook(tr2):
1067 def txnclosehook(tr2):
1070 """To be run if transaction is successful, will schedule a hook run
1068 """To be run if transaction is successful, will schedule a hook run
1071 """
1069 """
1072 # Don't reference tr2 in hook() so we don't hold a reference.
1070 # Don't reference tr2 in hook() so we don't hold a reference.
1073 # This reduces memory consumption when there are multiple
1071 # This reduces memory consumption when there are multiple
1074 # transactions per lock. This can likely go away if issue5045
1072 # transactions per lock. This can likely go away if issue5045
1075 # fixes the function accumulation.
1073 # fixes the function accumulation.
1076 hookargs = tr2.hookargs
1074 hookargs = tr2.hookargs
1077
1075
1078 def hook():
1076 def hook():
1079 reporef().hook('txnclose', throw=False, txnname=desc,
1077 reporef().hook('txnclose', throw=False, txnname=desc,
1080 **hookargs)
1078 **hookargs)
1081 reporef()._afterlock(hook)
1079 reporef()._afterlock(hook)
1082 tr.addfinalize('txnclose-hook', txnclosehook)
1080 tr.addfinalize('txnclose-hook', txnclosehook)
1083 def txnaborthook(tr2):
1081 def txnaborthook(tr2):
1084 """To be run if transaction is aborted
1082 """To be run if transaction is aborted
1085 """
1083 """
1086 reporef().hook('txnabort', throw=False, txnname=desc,
1084 reporef().hook('txnabort', throw=False, txnname=desc,
1087 **tr2.hookargs)
1085 **tr2.hookargs)
1088 tr.addabort('txnabort-hook', txnaborthook)
1086 tr.addabort('txnabort-hook', txnaborthook)
1089 # avoid eager cache invalidation. in-memory data should be identical
1087 # avoid eager cache invalidation. in-memory data should be identical
1090 # to stored data if transaction has no error.
1088 # to stored data if transaction has no error.
1091 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1089 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1092 self._transref = weakref.ref(tr)
1090 self._transref = weakref.ref(tr)
1093 return tr
1091 return tr
1094
1092
1095 def _journalfiles(self):
1093 def _journalfiles(self):
1096 return ((self.svfs, 'journal'),
1094 return ((self.svfs, 'journal'),
1097 (self.vfs, 'journal.dirstate'),
1095 (self.vfs, 'journal.dirstate'),
1098 (self.vfs, 'journal.branch'),
1096 (self.vfs, 'journal.branch'),
1099 (self.vfs, 'journal.desc'),
1097 (self.vfs, 'journal.desc'),
1100 (self.vfs, 'journal.bookmarks'),
1098 (self.vfs, 'journal.bookmarks'),
1101 (self.svfs, 'journal.phaseroots'))
1099 (self.svfs, 'journal.phaseroots'))
1102
1100
1103 def undofiles(self):
1101 def undofiles(self):
1104 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1102 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1105
1103
1106 def _writejournal(self, desc):
1104 def _writejournal(self, desc):
1107 self.vfs.write("journal.dirstate",
1105 self.vfs.write("journal.dirstate",
1108 self.vfs.tryread("dirstate"))
1106 self.vfs.tryread("dirstate"))
1109 self.vfs.write("journal.branch",
1107 self.vfs.write("journal.branch",
1110 encoding.fromlocal(self.dirstate.branch()))
1108 encoding.fromlocal(self.dirstate.branch()))
1111 self.vfs.write("journal.desc",
1109 self.vfs.write("journal.desc",
1112 "%d\n%s\n" % (len(self), desc))
1110 "%d\n%s\n" % (len(self), desc))
1113 self.vfs.write("journal.bookmarks",
1111 self.vfs.write("journal.bookmarks",
1114 self.vfs.tryread("bookmarks"))
1112 self.vfs.tryread("bookmarks"))
1115 self.svfs.write("journal.phaseroots",
1113 self.svfs.write("journal.phaseroots",
1116 self.svfs.tryread("phaseroots"))
1114 self.svfs.tryread("phaseroots"))
1117
1115
1118 def recover(self):
1116 def recover(self):
1119 with self.lock():
1117 with self.lock():
1120 if self.svfs.exists("journal"):
1118 if self.svfs.exists("journal"):
1121 self.ui.status(_("rolling back interrupted transaction\n"))
1119 self.ui.status(_("rolling back interrupted transaction\n"))
1122 vfsmap = {'': self.svfs,
1120 vfsmap = {'': self.svfs,
1123 'plain': self.vfs,}
1121 'plain': self.vfs,}
1124 transaction.rollback(self.svfs, vfsmap, "journal",
1122 transaction.rollback(self.svfs, vfsmap, "journal",
1125 self.ui.warn)
1123 self.ui.warn)
1126 self.invalidate()
1124 self.invalidate()
1127 return True
1125 return True
1128 else:
1126 else:
1129 self.ui.warn(_("no interrupted transaction available\n"))
1127 self.ui.warn(_("no interrupted transaction available\n"))
1130 return False
1128 return False
1131
1129
1132 def rollback(self, dryrun=False, force=False):
1130 def rollback(self, dryrun=False, force=False):
1133 wlock = lock = dsguard = None
1131 wlock = lock = dsguard = None
1134 try:
1132 try:
1135 wlock = self.wlock()
1133 wlock = self.wlock()
1136 lock = self.lock()
1134 lock = self.lock()
1137 if self.svfs.exists("undo"):
1135 if self.svfs.exists("undo"):
1138 dsguard = cmdutil.dirstateguard(self, 'rollback')
1136 dsguard = cmdutil.dirstateguard(self, 'rollback')
1139
1137
1140 return self._rollback(dryrun, force, dsguard)
1138 return self._rollback(dryrun, force, dsguard)
1141 else:
1139 else:
1142 self.ui.warn(_("no rollback information available\n"))
1140 self.ui.warn(_("no rollback information available\n"))
1143 return 1
1141 return 1
1144 finally:
1142 finally:
1145 release(dsguard, lock, wlock)
1143 release(dsguard, lock, wlock)
1146
1144
1147 @unfilteredmethod # Until we get smarter cache management
1145 @unfilteredmethod # Until we get smarter cache management
1148 def _rollback(self, dryrun, force, dsguard):
1146 def _rollback(self, dryrun, force, dsguard):
1149 ui = self.ui
1147 ui = self.ui
1150 try:
1148 try:
1151 args = self.vfs.read('undo.desc').splitlines()
1149 args = self.vfs.read('undo.desc').splitlines()
1152 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1150 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1153 if len(args) >= 3:
1151 if len(args) >= 3:
1154 detail = args[2]
1152 detail = args[2]
1155 oldtip = oldlen - 1
1153 oldtip = oldlen - 1
1156
1154
1157 if detail and ui.verbose:
1155 if detail and ui.verbose:
1158 msg = (_('repository tip rolled back to revision %s'
1156 msg = (_('repository tip rolled back to revision %s'
1159 ' (undo %s: %s)\n')
1157 ' (undo %s: %s)\n')
1160 % (oldtip, desc, detail))
1158 % (oldtip, desc, detail))
1161 else:
1159 else:
1162 msg = (_('repository tip rolled back to revision %s'
1160 msg = (_('repository tip rolled back to revision %s'
1163 ' (undo %s)\n')
1161 ' (undo %s)\n')
1164 % (oldtip, desc))
1162 % (oldtip, desc))
1165 except IOError:
1163 except IOError:
1166 msg = _('rolling back unknown transaction\n')
1164 msg = _('rolling back unknown transaction\n')
1167 desc = None
1165 desc = None
1168
1166
1169 if not force and self['.'] != self['tip'] and desc == 'commit':
1167 if not force and self['.'] != self['tip'] and desc == 'commit':
1170 raise error.Abort(
1168 raise error.Abort(
1171 _('rollback of last commit while not checked out '
1169 _('rollback of last commit while not checked out '
1172 'may lose data'), hint=_('use -f to force'))
1170 'may lose data'), hint=_('use -f to force'))
1173
1171
1174 ui.status(msg)
1172 ui.status(msg)
1175 if dryrun:
1173 if dryrun:
1176 return 0
1174 return 0
1177
1175
1178 parents = self.dirstate.parents()
1176 parents = self.dirstate.parents()
1179 self.destroying()
1177 self.destroying()
1180 vfsmap = {'plain': self.vfs, '': self.svfs}
1178 vfsmap = {'plain': self.vfs, '': self.svfs}
1181 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1179 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1182 if self.vfs.exists('undo.bookmarks'):
1180 if self.vfs.exists('undo.bookmarks'):
1183 self.vfs.rename('undo.bookmarks', 'bookmarks')
1181 self.vfs.rename('undo.bookmarks', 'bookmarks')
1184 if self.svfs.exists('undo.phaseroots'):
1182 if self.svfs.exists('undo.phaseroots'):
1185 self.svfs.rename('undo.phaseroots', 'phaseroots')
1183 self.svfs.rename('undo.phaseroots', 'phaseroots')
1186 self.invalidate()
1184 self.invalidate()
1187
1185
1188 parentgone = (parents[0] not in self.changelog.nodemap or
1186 parentgone = (parents[0] not in self.changelog.nodemap or
1189 parents[1] not in self.changelog.nodemap)
1187 parents[1] not in self.changelog.nodemap)
1190 if parentgone:
1188 if parentgone:
1191 # prevent dirstateguard from overwriting already restored one
1189 # prevent dirstateguard from overwriting already restored one
1192 dsguard.close()
1190 dsguard.close()
1193
1191
1194 self.vfs.rename('undo.dirstate', 'dirstate')
1192 self.vfs.rename('undo.dirstate', 'dirstate')
1195 try:
1193 try:
1196 branch = self.vfs.read('undo.branch')
1194 branch = self.vfs.read('undo.branch')
1197 self.dirstate.setbranch(encoding.tolocal(branch))
1195 self.dirstate.setbranch(encoding.tolocal(branch))
1198 except IOError:
1196 except IOError:
1199 ui.warn(_('named branch could not be reset: '
1197 ui.warn(_('named branch could not be reset: '
1200 'current branch is still \'%s\'\n')
1198 'current branch is still \'%s\'\n')
1201 % self.dirstate.branch())
1199 % self.dirstate.branch())
1202
1200
1203 self.dirstate.invalidate()
1201 self.dirstate.invalidate()
1204 parents = tuple([p.rev() for p in self[None].parents()])
1202 parents = tuple([p.rev() for p in self[None].parents()])
1205 if len(parents) > 1:
1203 if len(parents) > 1:
1206 ui.status(_('working directory now based on '
1204 ui.status(_('working directory now based on '
1207 'revisions %d and %d\n') % parents)
1205 'revisions %d and %d\n') % parents)
1208 else:
1206 else:
1209 ui.status(_('working directory now based on '
1207 ui.status(_('working directory now based on '
1210 'revision %d\n') % parents)
1208 'revision %d\n') % parents)
1211 mergemod.mergestate.clean(self, self['.'].node())
1209 mergemod.mergestate.clean(self, self['.'].node())
1212
1210
1213 # TODO: if we know which new heads may result from this rollback, pass
1211 # TODO: if we know which new heads may result from this rollback, pass
1214 # them to destroy(), which will prevent the branchhead cache from being
1212 # them to destroy(), which will prevent the branchhead cache from being
1215 # invalidated.
1213 # invalidated.
1216 self.destroyed()
1214 self.destroyed()
1217 return 0
1215 return 0
1218
1216
1219 def invalidatecaches(self):
1217 def invalidatecaches(self):
1220
1218
1221 if '_tagscache' in vars(self):
1219 if '_tagscache' in vars(self):
1222 # can't use delattr on proxy
1220 # can't use delattr on proxy
1223 del self.__dict__['_tagscache']
1221 del self.__dict__['_tagscache']
1224
1222
1225 self.unfiltered()._branchcaches.clear()
1223 self.unfiltered()._branchcaches.clear()
1226 self.invalidatevolatilesets()
1224 self.invalidatevolatilesets()
1227
1225
1228 def invalidatevolatilesets(self):
1226 def invalidatevolatilesets(self):
1229 self.filteredrevcache.clear()
1227 self.filteredrevcache.clear()
1230 obsolete.clearobscaches(self)
1228 obsolete.clearobscaches(self)
1231
1229
1232 def invalidatedirstate(self):
1230 def invalidatedirstate(self):
1233 '''Invalidates the dirstate, causing the next call to dirstate
1231 '''Invalidates the dirstate, causing the next call to dirstate
1234 to check if it was modified since the last time it was read,
1232 to check if it was modified since the last time it was read,
1235 rereading it if it has.
1233 rereading it if it has.
1236
1234
1237 This is different to dirstate.invalidate() that it doesn't always
1235 This is different to dirstate.invalidate() that it doesn't always
1238 rereads the dirstate. Use dirstate.invalidate() if you want to
1236 rereads the dirstate. Use dirstate.invalidate() if you want to
1239 explicitly read the dirstate again (i.e. restoring it to a previous
1237 explicitly read the dirstate again (i.e. restoring it to a previous
1240 known good state).'''
1238 known good state).'''
1241 if hasunfilteredcache(self, 'dirstate'):
1239 if hasunfilteredcache(self, 'dirstate'):
1242 for k in self.dirstate._filecache:
1240 for k in self.dirstate._filecache:
1243 try:
1241 try:
1244 delattr(self.dirstate, k)
1242 delattr(self.dirstate, k)
1245 except AttributeError:
1243 except AttributeError:
1246 pass
1244 pass
1247 delattr(self.unfiltered(), 'dirstate')
1245 delattr(self.unfiltered(), 'dirstate')
1248
1246
1249 def invalidate(self, clearfilecache=False):
1247 def invalidate(self, clearfilecache=False):
1250 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1248 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1251 for k in self._filecache.keys():
1249 for k in self._filecache.keys():
1252 # dirstate is invalidated separately in invalidatedirstate()
1250 # dirstate is invalidated separately in invalidatedirstate()
1253 if k == 'dirstate':
1251 if k == 'dirstate':
1254 continue
1252 continue
1255
1253
1256 if clearfilecache:
1254 if clearfilecache:
1257 del self._filecache[k]
1255 del self._filecache[k]
1258 try:
1256 try:
1259 delattr(unfiltered, k)
1257 delattr(unfiltered, k)
1260 except AttributeError:
1258 except AttributeError:
1261 pass
1259 pass
1262 self.invalidatecaches()
1260 self.invalidatecaches()
1263 self.store.invalidatecaches()
1261 self.store.invalidatecaches()
1264
1262
1265 def invalidateall(self):
1263 def invalidateall(self):
1266 '''Fully invalidates both store and non-store parts, causing the
1264 '''Fully invalidates both store and non-store parts, causing the
1267 subsequent operation to reread any outside changes.'''
1265 subsequent operation to reread any outside changes.'''
1268 # extension should hook this to invalidate its caches
1266 # extension should hook this to invalidate its caches
1269 self.invalidate()
1267 self.invalidate()
1270 self.invalidatedirstate()
1268 self.invalidatedirstate()
1271
1269
1272 def _refreshfilecachestats(self, tr):
1270 def _refreshfilecachestats(self, tr):
1273 """Reload stats of cached files so that they are flagged as valid"""
1271 """Reload stats of cached files so that they are flagged as valid"""
1274 for k, ce in self._filecache.items():
1272 for k, ce in self._filecache.items():
1275 if k == 'dirstate' or k not in self.__dict__:
1273 if k == 'dirstate' or k not in self.__dict__:
1276 continue
1274 continue
1277 ce.refresh()
1275 ce.refresh()
1278
1276
1279 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1277 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1280 inheritchecker=None, parentenvvar=None):
1278 inheritchecker=None, parentenvvar=None):
1281 parentlock = None
1279 parentlock = None
1282 # the contents of parentenvvar are used by the underlying lock to
1280 # the contents of parentenvvar are used by the underlying lock to
1283 # determine whether it can be inherited
1281 # determine whether it can be inherited
1284 if parentenvvar is not None:
1282 if parentenvvar is not None:
1285 parentlock = os.environ.get(parentenvvar)
1283 parentlock = os.environ.get(parentenvvar)
1286 try:
1284 try:
1287 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1285 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1288 acquirefn=acquirefn, desc=desc,
1286 acquirefn=acquirefn, desc=desc,
1289 inheritchecker=inheritchecker,
1287 inheritchecker=inheritchecker,
1290 parentlock=parentlock)
1288 parentlock=parentlock)
1291 except error.LockHeld as inst:
1289 except error.LockHeld as inst:
1292 if not wait:
1290 if not wait:
1293 raise
1291 raise
1294 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1292 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1295 (desc, inst.locker))
1293 (desc, inst.locker))
1296 # default to 600 seconds timeout
1294 # default to 600 seconds timeout
1297 l = lockmod.lock(vfs, lockname,
1295 l = lockmod.lock(vfs, lockname,
1298 int(self.ui.config("ui", "timeout", "600")),
1296 int(self.ui.config("ui", "timeout", "600")),
1299 releasefn=releasefn, acquirefn=acquirefn,
1297 releasefn=releasefn, acquirefn=acquirefn,
1300 desc=desc)
1298 desc=desc)
1301 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1299 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1302 return l
1300 return l
1303
1301
1304 def _afterlock(self, callback):
1302 def _afterlock(self, callback):
1305 """add a callback to be run when the repository is fully unlocked
1303 """add a callback to be run when the repository is fully unlocked
1306
1304
1307 The callback will be executed when the outermost lock is released
1305 The callback will be executed when the outermost lock is released
1308 (with wlock being higher level than 'lock')."""
1306 (with wlock being higher level than 'lock')."""
1309 for ref in (self._wlockref, self._lockref):
1307 for ref in (self._wlockref, self._lockref):
1310 l = ref and ref()
1308 l = ref and ref()
1311 if l and l.held:
1309 if l and l.held:
1312 l.postrelease.append(callback)
1310 l.postrelease.append(callback)
1313 break
1311 break
1314 else: # no lock have been found.
1312 else: # no lock have been found.
1315 callback()
1313 callback()
1316
1314
1317 def lock(self, wait=True):
1315 def lock(self, wait=True):
1318 '''Lock the repository store (.hg/store) and return a weak reference
1316 '''Lock the repository store (.hg/store) and return a weak reference
1319 to the lock. Use this before modifying the store (e.g. committing or
1317 to the lock. Use this before modifying the store (e.g. committing or
1320 stripping). If you are opening a transaction, get a lock as well.)
1318 stripping). If you are opening a transaction, get a lock as well.)
1321
1319
1322 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1320 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1323 'wlock' first to avoid a dead-lock hazard.'''
1321 'wlock' first to avoid a dead-lock hazard.'''
1324 l = self._lockref and self._lockref()
1322 l = self._lockref and self._lockref()
1325 if l is not None and l.held:
1323 if l is not None and l.held:
1326 l.lock()
1324 l.lock()
1327 return l
1325 return l
1328
1326
1329 l = self._lock(self.svfs, "lock", wait, None,
1327 l = self._lock(self.svfs, "lock", wait, None,
1330 self.invalidate, _('repository %s') % self.origroot)
1328 self.invalidate, _('repository %s') % self.origroot)
1331 self._lockref = weakref.ref(l)
1329 self._lockref = weakref.ref(l)
1332 return l
1330 return l
1333
1331
1334 def _wlockchecktransaction(self):
1332 def _wlockchecktransaction(self):
1335 if self.currenttransaction() is not None:
1333 if self.currenttransaction() is not None:
1336 raise error.LockInheritanceContractViolation(
1334 raise error.LockInheritanceContractViolation(
1337 'wlock cannot be inherited in the middle of a transaction')
1335 'wlock cannot be inherited in the middle of a transaction')
1338
1336
1339 def wlock(self, wait=True):
1337 def wlock(self, wait=True):
1340 '''Lock the non-store parts of the repository (everything under
1338 '''Lock the non-store parts of the repository (everything under
1341 .hg except .hg/store) and return a weak reference to the lock.
1339 .hg except .hg/store) and return a weak reference to the lock.
1342
1340
1343 Use this before modifying files in .hg.
1341 Use this before modifying files in .hg.
1344
1342
1345 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1343 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1346 'wlock' first to avoid a dead-lock hazard.'''
1344 'wlock' first to avoid a dead-lock hazard.'''
1347 l = self._wlockref and self._wlockref()
1345 l = self._wlockref and self._wlockref()
1348 if l is not None and l.held:
1346 if l is not None and l.held:
1349 l.lock()
1347 l.lock()
1350 return l
1348 return l
1351
1349
1352 # We do not need to check for non-waiting lock acquisition. Such
1350 # We do not need to check for non-waiting lock acquisition. Such
1353 # acquisition would not cause dead-lock as they would just fail.
1351 # acquisition would not cause dead-lock as they would just fail.
1354 if wait and (self.ui.configbool('devel', 'all-warnings')
1352 if wait and (self.ui.configbool('devel', 'all-warnings')
1355 or self.ui.configbool('devel', 'check-locks')):
1353 or self.ui.configbool('devel', 'check-locks')):
1356 l = self._lockref and self._lockref()
1354 l = self._lockref and self._lockref()
1357 if l is not None and l.held:
1355 if l is not None and l.held:
1358 self.ui.develwarn('"wlock" acquired after "lock"')
1356 self.ui.develwarn('"wlock" acquired after "lock"')
1359
1357
1360 def unlock():
1358 def unlock():
1361 if self.dirstate.pendingparentchange():
1359 if self.dirstate.pendingparentchange():
1362 self.dirstate.invalidate()
1360 self.dirstate.invalidate()
1363 else:
1361 else:
1364 self.dirstate.write(None)
1362 self.dirstate.write(None)
1365
1363
1366 self._filecache['dirstate'].refresh()
1364 self._filecache['dirstate'].refresh()
1367
1365
1368 l = self._lock(self.vfs, "wlock", wait, unlock,
1366 l = self._lock(self.vfs, "wlock", wait, unlock,
1369 self.invalidatedirstate, _('working directory of %s') %
1367 self.invalidatedirstate, _('working directory of %s') %
1370 self.origroot,
1368 self.origroot,
1371 inheritchecker=self._wlockchecktransaction,
1369 inheritchecker=self._wlockchecktransaction,
1372 parentenvvar='HG_WLOCK_LOCKER')
1370 parentenvvar='HG_WLOCK_LOCKER')
1373 self._wlockref = weakref.ref(l)
1371 self._wlockref = weakref.ref(l)
1374 return l
1372 return l
1375
1373
1376 def _currentlock(self, lockref):
1374 def _currentlock(self, lockref):
1377 """Returns the lock if it's held, or None if it's not."""
1375 """Returns the lock if it's held, or None if it's not."""
1378 if lockref is None:
1376 if lockref is None:
1379 return None
1377 return None
1380 l = lockref()
1378 l = lockref()
1381 if l is None or not l.held:
1379 if l is None or not l.held:
1382 return None
1380 return None
1383 return l
1381 return l
1384
1382
1385 def currentwlock(self):
1383 def currentwlock(self):
1386 """Returns the wlock if it's held, or None if it's not."""
1384 """Returns the wlock if it's held, or None if it's not."""
1387 return self._currentlock(self._wlockref)
1385 return self._currentlock(self._wlockref)
1388
1386
1389 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1387 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1390 """
1388 """
1391 commit an individual file as part of a larger transaction
1389 commit an individual file as part of a larger transaction
1392 """
1390 """
1393
1391
1394 fname = fctx.path()
1392 fname = fctx.path()
1395 fparent1 = manifest1.get(fname, nullid)
1393 fparent1 = manifest1.get(fname, nullid)
1396 fparent2 = manifest2.get(fname, nullid)
1394 fparent2 = manifest2.get(fname, nullid)
1397 if isinstance(fctx, context.filectx):
1395 if isinstance(fctx, context.filectx):
1398 node = fctx.filenode()
1396 node = fctx.filenode()
1399 if node in [fparent1, fparent2]:
1397 if node in [fparent1, fparent2]:
1400 self.ui.debug('reusing %s filelog entry\n' % fname)
1398 self.ui.debug('reusing %s filelog entry\n' % fname)
1401 return node
1399 return node
1402
1400
1403 flog = self.file(fname)
1401 flog = self.file(fname)
1404 meta = {}
1402 meta = {}
1405 copy = fctx.renamed()
1403 copy = fctx.renamed()
1406 if copy and copy[0] != fname:
1404 if copy and copy[0] != fname:
1407 # Mark the new revision of this file as a copy of another
1405 # Mark the new revision of this file as a copy of another
1408 # file. This copy data will effectively act as a parent
1406 # file. This copy data will effectively act as a parent
1409 # of this new revision. If this is a merge, the first
1407 # of this new revision. If this is a merge, the first
1410 # parent will be the nullid (meaning "look up the copy data")
1408 # parent will be the nullid (meaning "look up the copy data")
1411 # and the second one will be the other parent. For example:
1409 # and the second one will be the other parent. For example:
1412 #
1410 #
1413 # 0 --- 1 --- 3 rev1 changes file foo
1411 # 0 --- 1 --- 3 rev1 changes file foo
1414 # \ / rev2 renames foo to bar and changes it
1412 # \ / rev2 renames foo to bar and changes it
1415 # \- 2 -/ rev3 should have bar with all changes and
1413 # \- 2 -/ rev3 should have bar with all changes and
1416 # should record that bar descends from
1414 # should record that bar descends from
1417 # bar in rev2 and foo in rev1
1415 # bar in rev2 and foo in rev1
1418 #
1416 #
1419 # this allows this merge to succeed:
1417 # this allows this merge to succeed:
1420 #
1418 #
1421 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1419 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1422 # \ / merging rev3 and rev4 should use bar@rev2
1420 # \ / merging rev3 and rev4 should use bar@rev2
1423 # \- 2 --- 4 as the merge base
1421 # \- 2 --- 4 as the merge base
1424 #
1422 #
1425
1423
1426 cfname = copy[0]
1424 cfname = copy[0]
1427 crev = manifest1.get(cfname)
1425 crev = manifest1.get(cfname)
1428 newfparent = fparent2
1426 newfparent = fparent2
1429
1427
1430 if manifest2: # branch merge
1428 if manifest2: # branch merge
1431 if fparent2 == nullid or crev is None: # copied on remote side
1429 if fparent2 == nullid or crev is None: # copied on remote side
1432 if cfname in manifest2:
1430 if cfname in manifest2:
1433 crev = manifest2[cfname]
1431 crev = manifest2[cfname]
1434 newfparent = fparent1
1432 newfparent = fparent1
1435
1433
1436 # Here, we used to search backwards through history to try to find
1434 # Here, we used to search backwards through history to try to find
1437 # where the file copy came from if the source of a copy was not in
1435 # where the file copy came from if the source of a copy was not in
1438 # the parent directory. However, this doesn't actually make sense to
1436 # the parent directory. However, this doesn't actually make sense to
1439 # do (what does a copy from something not in your working copy even
1437 # do (what does a copy from something not in your working copy even
1440 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1438 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1441 # the user that copy information was dropped, so if they didn't
1439 # the user that copy information was dropped, so if they didn't
1442 # expect this outcome it can be fixed, but this is the correct
1440 # expect this outcome it can be fixed, but this is the correct
1443 # behavior in this circumstance.
1441 # behavior in this circumstance.
1444
1442
1445 if crev:
1443 if crev:
1446 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1444 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1447 meta["copy"] = cfname
1445 meta["copy"] = cfname
1448 meta["copyrev"] = hex(crev)
1446 meta["copyrev"] = hex(crev)
1449 fparent1, fparent2 = nullid, newfparent
1447 fparent1, fparent2 = nullid, newfparent
1450 else:
1448 else:
1451 self.ui.warn(_("warning: can't find ancestor for '%s' "
1449 self.ui.warn(_("warning: can't find ancestor for '%s' "
1452 "copied from '%s'!\n") % (fname, cfname))
1450 "copied from '%s'!\n") % (fname, cfname))
1453
1451
1454 elif fparent1 == nullid:
1452 elif fparent1 == nullid:
1455 fparent1, fparent2 = fparent2, nullid
1453 fparent1, fparent2 = fparent2, nullid
1456 elif fparent2 != nullid:
1454 elif fparent2 != nullid:
1457 # is one parent an ancestor of the other?
1455 # is one parent an ancestor of the other?
1458 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1456 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1459 if fparent1 in fparentancestors:
1457 if fparent1 in fparentancestors:
1460 fparent1, fparent2 = fparent2, nullid
1458 fparent1, fparent2 = fparent2, nullid
1461 elif fparent2 in fparentancestors:
1459 elif fparent2 in fparentancestors:
1462 fparent2 = nullid
1460 fparent2 = nullid
1463
1461
1464 # is the file changed?
1462 # is the file changed?
1465 text = fctx.data()
1463 text = fctx.data()
1466 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1464 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1467 changelist.append(fname)
1465 changelist.append(fname)
1468 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1466 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1469 # are just the flags changed during merge?
1467 # are just the flags changed during merge?
1470 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1468 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1471 changelist.append(fname)
1469 changelist.append(fname)
1472
1470
1473 return fparent1
1471 return fparent1
1474
1472
1475 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1473 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1476 """check for commit arguments that aren't commitable"""
1474 """check for commit arguments that aren't commitable"""
1477 if match.isexact() or match.prefix():
1475 if match.isexact() or match.prefix():
1478 matched = set(status.modified + status.added + status.removed)
1476 matched = set(status.modified + status.added + status.removed)
1479
1477
1480 for f in match.files():
1478 for f in match.files():
1481 f = self.dirstate.normalize(f)
1479 f = self.dirstate.normalize(f)
1482 if f == '.' or f in matched or f in wctx.substate:
1480 if f == '.' or f in matched or f in wctx.substate:
1483 continue
1481 continue
1484 if f in status.deleted:
1482 if f in status.deleted:
1485 fail(f, _('file not found!'))
1483 fail(f, _('file not found!'))
1486 if f in vdirs: # visited directory
1484 if f in vdirs: # visited directory
1487 d = f + '/'
1485 d = f + '/'
1488 for mf in matched:
1486 for mf in matched:
1489 if mf.startswith(d):
1487 if mf.startswith(d):
1490 break
1488 break
1491 else:
1489 else:
1492 fail(f, _("no match under directory!"))
1490 fail(f, _("no match under directory!"))
1493 elif f not in self.dirstate:
1491 elif f not in self.dirstate:
1494 fail(f, _("file not tracked!"))
1492 fail(f, _("file not tracked!"))
1495
1493
1496 @unfilteredmethod
1494 @unfilteredmethod
1497 def commit(self, text="", user=None, date=None, match=None, force=False,
1495 def commit(self, text="", user=None, date=None, match=None, force=False,
1498 editor=False, extra=None):
1496 editor=False, extra=None):
1499 """Add a new revision to current repository.
1497 """Add a new revision to current repository.
1500
1498
1501 Revision information is gathered from the working directory,
1499 Revision information is gathered from the working directory,
1502 match can be used to filter the committed files. If editor is
1500 match can be used to filter the committed files. If editor is
1503 supplied, it is called to get a commit message.
1501 supplied, it is called to get a commit message.
1504 """
1502 """
1505 if extra is None:
1503 if extra is None:
1506 extra = {}
1504 extra = {}
1507
1505
1508 def fail(f, msg):
1506 def fail(f, msg):
1509 raise error.Abort('%s: %s' % (f, msg))
1507 raise error.Abort('%s: %s' % (f, msg))
1510
1508
1511 if not match:
1509 if not match:
1512 match = matchmod.always(self.root, '')
1510 match = matchmod.always(self.root, '')
1513
1511
1514 if not force:
1512 if not force:
1515 vdirs = []
1513 vdirs = []
1516 match.explicitdir = vdirs.append
1514 match.explicitdir = vdirs.append
1517 match.bad = fail
1515 match.bad = fail
1518
1516
1519 wlock = lock = tr = None
1517 wlock = lock = tr = None
1520 try:
1518 try:
1521 wlock = self.wlock()
1519 wlock = self.wlock()
1522 lock = self.lock() # for recent changelog (see issue4368)
1520 lock = self.lock() # for recent changelog (see issue4368)
1523
1521
1524 wctx = self[None]
1522 wctx = self[None]
1525 merge = len(wctx.parents()) > 1
1523 merge = len(wctx.parents()) > 1
1526
1524
1527 if not force and merge and match.ispartial():
1525 if not force and merge and match.ispartial():
1528 raise error.Abort(_('cannot partially commit a merge '
1526 raise error.Abort(_('cannot partially commit a merge '
1529 '(do not specify files or patterns)'))
1527 '(do not specify files or patterns)'))
1530
1528
1531 status = self.status(match=match, clean=force)
1529 status = self.status(match=match, clean=force)
1532 if force:
1530 if force:
1533 status.modified.extend(status.clean) # mq may commit clean files
1531 status.modified.extend(status.clean) # mq may commit clean files
1534
1532
1535 # check subrepos
1533 # check subrepos
1536 subs = []
1534 subs = []
1537 commitsubs = set()
1535 commitsubs = set()
1538 newstate = wctx.substate.copy()
1536 newstate = wctx.substate.copy()
1539 # only manage subrepos and .hgsubstate if .hgsub is present
1537 # only manage subrepos and .hgsubstate if .hgsub is present
1540 if '.hgsub' in wctx:
1538 if '.hgsub' in wctx:
1541 # we'll decide whether to track this ourselves, thanks
1539 # we'll decide whether to track this ourselves, thanks
1542 for c in status.modified, status.added, status.removed:
1540 for c in status.modified, status.added, status.removed:
1543 if '.hgsubstate' in c:
1541 if '.hgsubstate' in c:
1544 c.remove('.hgsubstate')
1542 c.remove('.hgsubstate')
1545
1543
1546 # compare current state to last committed state
1544 # compare current state to last committed state
1547 # build new substate based on last committed state
1545 # build new substate based on last committed state
1548 oldstate = wctx.p1().substate
1546 oldstate = wctx.p1().substate
1549 for s in sorted(newstate.keys()):
1547 for s in sorted(newstate.keys()):
1550 if not match(s):
1548 if not match(s):
1551 # ignore working copy, use old state if present
1549 # ignore working copy, use old state if present
1552 if s in oldstate:
1550 if s in oldstate:
1553 newstate[s] = oldstate[s]
1551 newstate[s] = oldstate[s]
1554 continue
1552 continue
1555 if not force:
1553 if not force:
1556 raise error.Abort(
1554 raise error.Abort(
1557 _("commit with new subrepo %s excluded") % s)
1555 _("commit with new subrepo %s excluded") % s)
1558 dirtyreason = wctx.sub(s).dirtyreason(True)
1556 dirtyreason = wctx.sub(s).dirtyreason(True)
1559 if dirtyreason:
1557 if dirtyreason:
1560 if not self.ui.configbool('ui', 'commitsubrepos'):
1558 if not self.ui.configbool('ui', 'commitsubrepos'):
1561 raise error.Abort(dirtyreason,
1559 raise error.Abort(dirtyreason,
1562 hint=_("use --subrepos for recursive commit"))
1560 hint=_("use --subrepos for recursive commit"))
1563 subs.append(s)
1561 subs.append(s)
1564 commitsubs.add(s)
1562 commitsubs.add(s)
1565 else:
1563 else:
1566 bs = wctx.sub(s).basestate()
1564 bs = wctx.sub(s).basestate()
1567 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1565 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1568 if oldstate.get(s, (None, None, None))[1] != bs:
1566 if oldstate.get(s, (None, None, None))[1] != bs:
1569 subs.append(s)
1567 subs.append(s)
1570
1568
1571 # check for removed subrepos
1569 # check for removed subrepos
1572 for p in wctx.parents():
1570 for p in wctx.parents():
1573 r = [s for s in p.substate if s not in newstate]
1571 r = [s for s in p.substate if s not in newstate]
1574 subs += [s for s in r if match(s)]
1572 subs += [s for s in r if match(s)]
1575 if subs:
1573 if subs:
1576 if (not match('.hgsub') and
1574 if (not match('.hgsub') and
1577 '.hgsub' in (wctx.modified() + wctx.added())):
1575 '.hgsub' in (wctx.modified() + wctx.added())):
1578 raise error.Abort(
1576 raise error.Abort(
1579 _("can't commit subrepos without .hgsub"))
1577 _("can't commit subrepos without .hgsub"))
1580 status.modified.insert(0, '.hgsubstate')
1578 status.modified.insert(0, '.hgsubstate')
1581
1579
1582 elif '.hgsub' in status.removed:
1580 elif '.hgsub' in status.removed:
1583 # clean up .hgsubstate when .hgsub is removed
1581 # clean up .hgsubstate when .hgsub is removed
1584 if ('.hgsubstate' in wctx and
1582 if ('.hgsubstate' in wctx and
1585 '.hgsubstate' not in (status.modified + status.added +
1583 '.hgsubstate' not in (status.modified + status.added +
1586 status.removed)):
1584 status.removed)):
1587 status.removed.insert(0, '.hgsubstate')
1585 status.removed.insert(0, '.hgsubstate')
1588
1586
1589 # make sure all explicit patterns are matched
1587 # make sure all explicit patterns are matched
1590 if not force:
1588 if not force:
1591 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1589 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1592
1590
1593 cctx = context.workingcommitctx(self, status,
1591 cctx = context.workingcommitctx(self, status,
1594 text, user, date, extra)
1592 text, user, date, extra)
1595
1593
1596 # internal config: ui.allowemptycommit
1594 # internal config: ui.allowemptycommit
1597 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1595 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1598 or extra.get('close') or merge or cctx.files()
1596 or extra.get('close') or merge or cctx.files()
1599 or self.ui.configbool('ui', 'allowemptycommit'))
1597 or self.ui.configbool('ui', 'allowemptycommit'))
1600 if not allowemptycommit:
1598 if not allowemptycommit:
1601 return None
1599 return None
1602
1600
1603 if merge and cctx.deleted():
1601 if merge and cctx.deleted():
1604 raise error.Abort(_("cannot commit merge with missing files"))
1602 raise error.Abort(_("cannot commit merge with missing files"))
1605
1603
1606 ms = mergemod.mergestate.read(self)
1604 ms = mergemod.mergestate.read(self)
1607
1605
1608 if list(ms.unresolved()):
1606 if list(ms.unresolved()):
1609 raise error.Abort(_('unresolved merge conflicts '
1607 raise error.Abort(_('unresolved merge conflicts '
1610 '(see "hg help resolve")'))
1608 '(see "hg help resolve")'))
1611 if ms.mdstate() != 's' or list(ms.driverresolved()):
1609 if ms.mdstate() != 's' or list(ms.driverresolved()):
1612 raise error.Abort(_('driver-resolved merge conflicts'),
1610 raise error.Abort(_('driver-resolved merge conflicts'),
1613 hint=_('run "hg resolve --all" to resolve'))
1611 hint=_('run "hg resolve --all" to resolve'))
1614
1612
1615 if editor:
1613 if editor:
1616 cctx._text = editor(self, cctx, subs)
1614 cctx._text = editor(self, cctx, subs)
1617 edited = (text != cctx._text)
1615 edited = (text != cctx._text)
1618
1616
1619 # Save commit message in case this transaction gets rolled back
1617 # Save commit message in case this transaction gets rolled back
1620 # (e.g. by a pretxncommit hook). Leave the content alone on
1618 # (e.g. by a pretxncommit hook). Leave the content alone on
1621 # the assumption that the user will use the same editor again.
1619 # the assumption that the user will use the same editor again.
1622 msgfn = self.savecommitmessage(cctx._text)
1620 msgfn = self.savecommitmessage(cctx._text)
1623
1621
1624 # commit subs and write new state
1622 # commit subs and write new state
1625 if subs:
1623 if subs:
1626 for s in sorted(commitsubs):
1624 for s in sorted(commitsubs):
1627 sub = wctx.sub(s)
1625 sub = wctx.sub(s)
1628 self.ui.status(_('committing subrepository %s\n') %
1626 self.ui.status(_('committing subrepository %s\n') %
1629 subrepo.subrelpath(sub))
1627 subrepo.subrelpath(sub))
1630 sr = sub.commit(cctx._text, user, date)
1628 sr = sub.commit(cctx._text, user, date)
1631 newstate[s] = (newstate[s][0], sr)
1629 newstate[s] = (newstate[s][0], sr)
1632 subrepo.writestate(self, newstate)
1630 subrepo.writestate(self, newstate)
1633
1631
1634 p1, p2 = self.dirstate.parents()
1632 p1, p2 = self.dirstate.parents()
1635 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1633 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1636 try:
1634 try:
1637 self.hook("precommit", throw=True, parent1=hookp1,
1635 self.hook("precommit", throw=True, parent1=hookp1,
1638 parent2=hookp2)
1636 parent2=hookp2)
1639 tr = self.transaction('commit')
1637 tr = self.transaction('commit')
1640 ret = self.commitctx(cctx, True)
1638 ret = self.commitctx(cctx, True)
1641 except: # re-raises
1639 except: # re-raises
1642 if edited:
1640 if edited:
1643 self.ui.write(
1641 self.ui.write(
1644 _('note: commit message saved in %s\n') % msgfn)
1642 _('note: commit message saved in %s\n') % msgfn)
1645 raise
1643 raise
1646 # update bookmarks, dirstate and mergestate
1644 # update bookmarks, dirstate and mergestate
1647 bookmarks.update(self, [p1, p2], ret)
1645 bookmarks.update(self, [p1, p2], ret)
1648 cctx.markcommitted(ret)
1646 cctx.markcommitted(ret)
1649 ms.reset()
1647 ms.reset()
1650 tr.close()
1648 tr.close()
1651
1649
1652 finally:
1650 finally:
1653 lockmod.release(tr, lock, wlock)
1651 lockmod.release(tr, lock, wlock)
1654
1652
1655 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1653 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1656 # hack for command that use a temporary commit (eg: histedit)
1654 # hack for command that use a temporary commit (eg: histedit)
1657 # temporary commit got stripped before hook release
1655 # temporary commit got stripped before hook release
1658 if self.changelog.hasnode(ret):
1656 if self.changelog.hasnode(ret):
1659 self.hook("commit", node=node, parent1=parent1,
1657 self.hook("commit", node=node, parent1=parent1,
1660 parent2=parent2)
1658 parent2=parent2)
1661 self._afterlock(commithook)
1659 self._afterlock(commithook)
1662 return ret
1660 return ret
1663
1661
1664 @unfilteredmethod
1662 @unfilteredmethod
1665 def commitctx(self, ctx, error=False):
1663 def commitctx(self, ctx, error=False):
1666 """Add a new revision to current repository.
1664 """Add a new revision to current repository.
1667 Revision information is passed via the context argument.
1665 Revision information is passed via the context argument.
1668 """
1666 """
1669
1667
1670 tr = None
1668 tr = None
1671 p1, p2 = ctx.p1(), ctx.p2()
1669 p1, p2 = ctx.p1(), ctx.p2()
1672 user = ctx.user()
1670 user = ctx.user()
1673
1671
1674 lock = self.lock()
1672 lock = self.lock()
1675 try:
1673 try:
1676 tr = self.transaction("commit")
1674 tr = self.transaction("commit")
1677 trp = weakref.proxy(tr)
1675 trp = weakref.proxy(tr)
1678
1676
1679 if ctx.files():
1677 if ctx.files():
1680 m1 = p1.manifest()
1678 m1 = p1.manifest()
1681 m2 = p2.manifest()
1679 m2 = p2.manifest()
1682 m = m1.copy()
1680 m = m1.copy()
1683
1681
1684 # check in files
1682 # check in files
1685 added = []
1683 added = []
1686 changed = []
1684 changed = []
1687 removed = list(ctx.removed())
1685 removed = list(ctx.removed())
1688 linkrev = len(self)
1686 linkrev = len(self)
1689 self.ui.note(_("committing files:\n"))
1687 self.ui.note(_("committing files:\n"))
1690 for f in sorted(ctx.modified() + ctx.added()):
1688 for f in sorted(ctx.modified() + ctx.added()):
1691 self.ui.note(f + "\n")
1689 self.ui.note(f + "\n")
1692 try:
1690 try:
1693 fctx = ctx[f]
1691 fctx = ctx[f]
1694 if fctx is None:
1692 if fctx is None:
1695 removed.append(f)
1693 removed.append(f)
1696 else:
1694 else:
1697 added.append(f)
1695 added.append(f)
1698 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1696 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1699 trp, changed)
1697 trp, changed)
1700 m.setflag(f, fctx.flags())
1698 m.setflag(f, fctx.flags())
1701 except OSError as inst:
1699 except OSError as inst:
1702 self.ui.warn(_("trouble committing %s!\n") % f)
1700 self.ui.warn(_("trouble committing %s!\n") % f)
1703 raise
1701 raise
1704 except IOError as inst:
1702 except IOError as inst:
1705 errcode = getattr(inst, 'errno', errno.ENOENT)
1703 errcode = getattr(inst, 'errno', errno.ENOENT)
1706 if error or errcode and errcode != errno.ENOENT:
1704 if error or errcode and errcode != errno.ENOENT:
1707 self.ui.warn(_("trouble committing %s!\n") % f)
1705 self.ui.warn(_("trouble committing %s!\n") % f)
1708 raise
1706 raise
1709
1707
1710 # update manifest
1708 # update manifest
1711 self.ui.note(_("committing manifest\n"))
1709 self.ui.note(_("committing manifest\n"))
1712 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1710 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1713 drop = [f for f in removed if f in m]
1711 drop = [f for f in removed if f in m]
1714 for f in drop:
1712 for f in drop:
1715 del m[f]
1713 del m[f]
1716 mn = self.manifest.add(m, trp, linkrev,
1714 mn = self.manifest.add(m, trp, linkrev,
1717 p1.manifestnode(), p2.manifestnode(),
1715 p1.manifestnode(), p2.manifestnode(),
1718 added, drop)
1716 added, drop)
1719 files = changed + removed
1717 files = changed + removed
1720 else:
1718 else:
1721 mn = p1.manifestnode()
1719 mn = p1.manifestnode()
1722 files = []
1720 files = []
1723
1721
1724 # update changelog
1722 # update changelog
1725 self.ui.note(_("committing changelog\n"))
1723 self.ui.note(_("committing changelog\n"))
1726 self.changelog.delayupdate(tr)
1724 self.changelog.delayupdate(tr)
1727 n = self.changelog.add(mn, files, ctx.description(),
1725 n = self.changelog.add(mn, files, ctx.description(),
1728 trp, p1.node(), p2.node(),
1726 trp, p1.node(), p2.node(),
1729 user, ctx.date(), ctx.extra().copy())
1727 user, ctx.date(), ctx.extra().copy())
1730 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1728 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1731 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1729 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1732 parent2=xp2)
1730 parent2=xp2)
1733 # set the new commit is proper phase
1731 # set the new commit is proper phase
1734 targetphase = subrepo.newcommitphase(self.ui, ctx)
1732 targetphase = subrepo.newcommitphase(self.ui, ctx)
1735 if targetphase:
1733 if targetphase:
1736 # retract boundary do not alter parent changeset.
1734 # retract boundary do not alter parent changeset.
1737 # if a parent have higher the resulting phase will
1735 # if a parent have higher the resulting phase will
1738 # be compliant anyway
1736 # be compliant anyway
1739 #
1737 #
1740 # if minimal phase was 0 we don't need to retract anything
1738 # if minimal phase was 0 we don't need to retract anything
1741 phases.retractboundary(self, tr, targetphase, [n])
1739 phases.retractboundary(self, tr, targetphase, [n])
1742 tr.close()
1740 tr.close()
1743 branchmap.updatecache(self.filtered('served'))
1741 branchmap.updatecache(self.filtered('served'))
1744 return n
1742 return n
1745 finally:
1743 finally:
1746 if tr:
1744 if tr:
1747 tr.release()
1745 tr.release()
1748 lock.release()
1746 lock.release()
1749
1747
1750 @unfilteredmethod
1748 @unfilteredmethod
1751 def destroying(self):
1749 def destroying(self):
1752 '''Inform the repository that nodes are about to be destroyed.
1750 '''Inform the repository that nodes are about to be destroyed.
1753 Intended for use by strip and rollback, so there's a common
1751 Intended for use by strip and rollback, so there's a common
1754 place for anything that has to be done before destroying history.
1752 place for anything that has to be done before destroying history.
1755
1753
1756 This is mostly useful for saving state that is in memory and waiting
1754 This is mostly useful for saving state that is in memory and waiting
1757 to be flushed when the current lock is released. Because a call to
1755 to be flushed when the current lock is released. Because a call to
1758 destroyed is imminent, the repo will be invalidated causing those
1756 destroyed is imminent, the repo will be invalidated causing those
1759 changes to stay in memory (waiting for the next unlock), or vanish
1757 changes to stay in memory (waiting for the next unlock), or vanish
1760 completely.
1758 completely.
1761 '''
1759 '''
1762 # When using the same lock to commit and strip, the phasecache is left
1760 # When using the same lock to commit and strip, the phasecache is left
1763 # dirty after committing. Then when we strip, the repo is invalidated,
1761 # dirty after committing. Then when we strip, the repo is invalidated,
1764 # causing those changes to disappear.
1762 # causing those changes to disappear.
1765 if '_phasecache' in vars(self):
1763 if '_phasecache' in vars(self):
1766 self._phasecache.write()
1764 self._phasecache.write()
1767
1765
1768 @unfilteredmethod
1766 @unfilteredmethod
1769 def destroyed(self):
1767 def destroyed(self):
1770 '''Inform the repository that nodes have been destroyed.
1768 '''Inform the repository that nodes have been destroyed.
1771 Intended for use by strip and rollback, so there's a common
1769 Intended for use by strip and rollback, so there's a common
1772 place for anything that has to be done after destroying history.
1770 place for anything that has to be done after destroying history.
1773 '''
1771 '''
1774 # When one tries to:
1772 # When one tries to:
1775 # 1) destroy nodes thus calling this method (e.g. strip)
1773 # 1) destroy nodes thus calling this method (e.g. strip)
1776 # 2) use phasecache somewhere (e.g. commit)
1774 # 2) use phasecache somewhere (e.g. commit)
1777 #
1775 #
1778 # then 2) will fail because the phasecache contains nodes that were
1776 # then 2) will fail because the phasecache contains nodes that were
1779 # removed. We can either remove phasecache from the filecache,
1777 # removed. We can either remove phasecache from the filecache,
1780 # causing it to reload next time it is accessed, or simply filter
1778 # causing it to reload next time it is accessed, or simply filter
1781 # the removed nodes now and write the updated cache.
1779 # the removed nodes now and write the updated cache.
1782 self._phasecache.filterunknown(self)
1780 self._phasecache.filterunknown(self)
1783 self._phasecache.write()
1781 self._phasecache.write()
1784
1782
1785 # update the 'served' branch cache to help read only server process
1783 # update the 'served' branch cache to help read only server process
1786 # Thanks to branchcache collaboration this is done from the nearest
1784 # Thanks to branchcache collaboration this is done from the nearest
1787 # filtered subset and it is expected to be fast.
1785 # filtered subset and it is expected to be fast.
1788 branchmap.updatecache(self.filtered('served'))
1786 branchmap.updatecache(self.filtered('served'))
1789
1787
1790 # Ensure the persistent tag cache is updated. Doing it now
1788 # Ensure the persistent tag cache is updated. Doing it now
1791 # means that the tag cache only has to worry about destroyed
1789 # means that the tag cache only has to worry about destroyed
1792 # heads immediately after a strip/rollback. That in turn
1790 # heads immediately after a strip/rollback. That in turn
1793 # guarantees that "cachetip == currenttip" (comparing both rev
1791 # guarantees that "cachetip == currenttip" (comparing both rev
1794 # and node) always means no nodes have been added or destroyed.
1792 # and node) always means no nodes have been added or destroyed.
1795
1793
1796 # XXX this is suboptimal when qrefresh'ing: we strip the current
1794 # XXX this is suboptimal when qrefresh'ing: we strip the current
1797 # head, refresh the tag cache, then immediately add a new head.
1795 # head, refresh the tag cache, then immediately add a new head.
1798 # But I think doing it this way is necessary for the "instant
1796 # But I think doing it this way is necessary for the "instant
1799 # tag cache retrieval" case to work.
1797 # tag cache retrieval" case to work.
1800 self.invalidate()
1798 self.invalidate()
1801
1799
1802 def walk(self, match, node=None):
1800 def walk(self, match, node=None):
1803 '''
1801 '''
1804 walk recursively through the directory tree or a given
1802 walk recursively through the directory tree or a given
1805 changeset, finding all files matched by the match
1803 changeset, finding all files matched by the match
1806 function
1804 function
1807 '''
1805 '''
1808 return self[node].walk(match)
1806 return self[node].walk(match)
1809
1807
1810 def status(self, node1='.', node2=None, match=None,
1808 def status(self, node1='.', node2=None, match=None,
1811 ignored=False, clean=False, unknown=False,
1809 ignored=False, clean=False, unknown=False,
1812 listsubrepos=False):
1810 listsubrepos=False):
1813 '''a convenience method that calls node1.status(node2)'''
1811 '''a convenience method that calls node1.status(node2)'''
1814 return self[node1].status(node2, match, ignored, clean, unknown,
1812 return self[node1].status(node2, match, ignored, clean, unknown,
1815 listsubrepos)
1813 listsubrepos)
1816
1814
1817 def heads(self, start=None):
1815 def heads(self, start=None):
1818 heads = self.changelog.heads(start)
1816 heads = self.changelog.heads(start)
1819 # sort the output in rev descending order
1817 # sort the output in rev descending order
1820 return sorted(heads, key=self.changelog.rev, reverse=True)
1818 return sorted(heads, key=self.changelog.rev, reverse=True)
1821
1819
1822 def branchheads(self, branch=None, start=None, closed=False):
1820 def branchheads(self, branch=None, start=None, closed=False):
1823 '''return a (possibly filtered) list of heads for the given branch
1821 '''return a (possibly filtered) list of heads for the given branch
1824
1822
1825 Heads are returned in topological order, from newest to oldest.
1823 Heads are returned in topological order, from newest to oldest.
1826 If branch is None, use the dirstate branch.
1824 If branch is None, use the dirstate branch.
1827 If start is not None, return only heads reachable from start.
1825 If start is not None, return only heads reachable from start.
1828 If closed is True, return heads that are marked as closed as well.
1826 If closed is True, return heads that are marked as closed as well.
1829 '''
1827 '''
1830 if branch is None:
1828 if branch is None:
1831 branch = self[None].branch()
1829 branch = self[None].branch()
1832 branches = self.branchmap()
1830 branches = self.branchmap()
1833 if branch not in branches:
1831 if branch not in branches:
1834 return []
1832 return []
1835 # the cache returns heads ordered lowest to highest
1833 # the cache returns heads ordered lowest to highest
1836 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1834 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1837 if start is not None:
1835 if start is not None:
1838 # filter out the heads that cannot be reached from startrev
1836 # filter out the heads that cannot be reached from startrev
1839 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1837 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1840 bheads = [h for h in bheads if h in fbheads]
1838 bheads = [h for h in bheads if h in fbheads]
1841 return bheads
1839 return bheads
1842
1840
1843 def branches(self, nodes):
1841 def branches(self, nodes):
1844 if not nodes:
1842 if not nodes:
1845 nodes = [self.changelog.tip()]
1843 nodes = [self.changelog.tip()]
1846 b = []
1844 b = []
1847 for n in nodes:
1845 for n in nodes:
1848 t = n
1846 t = n
1849 while True:
1847 while True:
1850 p = self.changelog.parents(n)
1848 p = self.changelog.parents(n)
1851 if p[1] != nullid or p[0] == nullid:
1849 if p[1] != nullid or p[0] == nullid:
1852 b.append((t, n, p[0], p[1]))
1850 b.append((t, n, p[0], p[1]))
1853 break
1851 break
1854 n = p[0]
1852 n = p[0]
1855 return b
1853 return b
1856
1854
1857 def between(self, pairs):
1855 def between(self, pairs):
1858 r = []
1856 r = []
1859
1857
1860 for top, bottom in pairs:
1858 for top, bottom in pairs:
1861 n, l, i = top, [], 0
1859 n, l, i = top, [], 0
1862 f = 1
1860 f = 1
1863
1861
1864 while n != bottom and n != nullid:
1862 while n != bottom and n != nullid:
1865 p = self.changelog.parents(n)[0]
1863 p = self.changelog.parents(n)[0]
1866 if i == f:
1864 if i == f:
1867 l.append(n)
1865 l.append(n)
1868 f = f * 2
1866 f = f * 2
1869 n = p
1867 n = p
1870 i += 1
1868 i += 1
1871
1869
1872 r.append(l)
1870 r.append(l)
1873
1871
1874 return r
1872 return r
1875
1873
1876 def checkpush(self, pushop):
1874 def checkpush(self, pushop):
1877 """Extensions can override this function if additional checks have
1875 """Extensions can override this function if additional checks have
1878 to be performed before pushing, or call it if they override push
1876 to be performed before pushing, or call it if they override push
1879 command.
1877 command.
1880 """
1878 """
1881 pass
1879 pass
1882
1880
1883 @unfilteredpropertycache
1881 @unfilteredpropertycache
1884 def prepushoutgoinghooks(self):
1882 def prepushoutgoinghooks(self):
1885 """Return util.hooks consists of a pushop with repo, remote, outgoing
1883 """Return util.hooks consists of a pushop with repo, remote, outgoing
1886 methods, which are called before pushing changesets.
1884 methods, which are called before pushing changesets.
1887 """
1885 """
1888 return util.hooks()
1886 return util.hooks()
1889
1887
1890 def pushkey(self, namespace, key, old, new):
1888 def pushkey(self, namespace, key, old, new):
1891 try:
1889 try:
1892 tr = self.currenttransaction()
1890 tr = self.currenttransaction()
1893 hookargs = {}
1891 hookargs = {}
1894 if tr is not None:
1892 if tr is not None:
1895 hookargs.update(tr.hookargs)
1893 hookargs.update(tr.hookargs)
1896 hookargs['namespace'] = namespace
1894 hookargs['namespace'] = namespace
1897 hookargs['key'] = key
1895 hookargs['key'] = key
1898 hookargs['old'] = old
1896 hookargs['old'] = old
1899 hookargs['new'] = new
1897 hookargs['new'] = new
1900 self.hook('prepushkey', throw=True, **hookargs)
1898 self.hook('prepushkey', throw=True, **hookargs)
1901 except error.HookAbort as exc:
1899 except error.HookAbort as exc:
1902 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1900 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1903 if exc.hint:
1901 if exc.hint:
1904 self.ui.write_err(_("(%s)\n") % exc.hint)
1902 self.ui.write_err(_("(%s)\n") % exc.hint)
1905 return False
1903 return False
1906 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1904 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1907 ret = pushkey.push(self, namespace, key, old, new)
1905 ret = pushkey.push(self, namespace, key, old, new)
1908 def runhook():
1906 def runhook():
1909 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1907 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1910 ret=ret)
1908 ret=ret)
1911 self._afterlock(runhook)
1909 self._afterlock(runhook)
1912 return ret
1910 return ret
1913
1911
1914 def listkeys(self, namespace):
1912 def listkeys(self, namespace):
1915 self.hook('prelistkeys', throw=True, namespace=namespace)
1913 self.hook('prelistkeys', throw=True, namespace=namespace)
1916 self.ui.debug('listing keys for "%s"\n' % namespace)
1914 self.ui.debug('listing keys for "%s"\n' % namespace)
1917 values = pushkey.list(self, namespace)
1915 values = pushkey.list(self, namespace)
1918 self.hook('listkeys', namespace=namespace, values=values)
1916 self.hook('listkeys', namespace=namespace, values=values)
1919 return values
1917 return values
1920
1918
1921 def debugwireargs(self, one, two, three=None, four=None, five=None):
1919 def debugwireargs(self, one, two, three=None, four=None, five=None):
1922 '''used to test argument passing over the wire'''
1920 '''used to test argument passing over the wire'''
1923 return "%s %s %s %s %s" % (one, two, three, four, five)
1921 return "%s %s %s %s %s" % (one, two, three, four, five)
1924
1922
1925 def savecommitmessage(self, text):
1923 def savecommitmessage(self, text):
1926 fp = self.vfs('last-message.txt', 'wb')
1924 fp = self.vfs('last-message.txt', 'wb')
1927 try:
1925 try:
1928 fp.write(text)
1926 fp.write(text)
1929 finally:
1927 finally:
1930 fp.close()
1928 fp.close()
1931 return self.pathto(fp.name[len(self.root) + 1:])
1929 return self.pathto(fp.name[len(self.root) + 1:])
1932
1930
1933 # used to avoid circular references so destructors work
1931 # used to avoid circular references so destructors work
1934 def aftertrans(files):
1932 def aftertrans(files):
1935 renamefiles = [tuple(t) for t in files]
1933 renamefiles = [tuple(t) for t in files]
1936 def a():
1934 def a():
1937 for vfs, src, dest in renamefiles:
1935 for vfs, src, dest in renamefiles:
1938 try:
1936 try:
1939 vfs.rename(src, dest)
1937 vfs.rename(src, dest)
1940 except OSError: # journal file does not yet exist
1938 except OSError: # journal file does not yet exist
1941 pass
1939 pass
1942 return a
1940 return a
1943
1941
1944 def undoname(fn):
1942 def undoname(fn):
1945 base, name = os.path.split(fn)
1943 base, name = os.path.split(fn)
1946 assert name.startswith('journal')
1944 assert name.startswith('journal')
1947 return os.path.join(base, name.replace('journal', 'undo', 1))
1945 return os.path.join(base, name.replace('journal', 'undo', 1))
1948
1946
1949 def instance(ui, path, create):
1947 def instance(ui, path, create):
1950 return localrepository(ui, util.urllocalpath(path), create)
1948 return localrepository(ui, util.urllocalpath(path), create)
1951
1949
1952 def islocal(path):
1950 def islocal(path):
1953 return True
1951 return True
1954
1952
1955 def newreporequirements(repo):
1953 def newreporequirements(repo):
1956 """Determine the set of requirements for a new local repository.
1954 """Determine the set of requirements for a new local repository.
1957
1955
1958 Extensions can wrap this function to specify custom requirements for
1956 Extensions can wrap this function to specify custom requirements for
1959 new repositories.
1957 new repositories.
1960 """
1958 """
1961 ui = repo.ui
1959 ui = repo.ui
1962 requirements = set(['revlogv1'])
1960 requirements = set(['revlogv1'])
1963 if ui.configbool('format', 'usestore', True):
1961 if ui.configbool('format', 'usestore', True):
1964 requirements.add('store')
1962 requirements.add('store')
1965 if ui.configbool('format', 'usefncache', True):
1963 if ui.configbool('format', 'usefncache', True):
1966 requirements.add('fncache')
1964 requirements.add('fncache')
1967 if ui.configbool('format', 'dotencode', True):
1965 if ui.configbool('format', 'dotencode', True):
1968 requirements.add('dotencode')
1966 requirements.add('dotencode')
1969
1967
1970 if scmutil.gdinitconfig(ui):
1968 if scmutil.gdinitconfig(ui):
1971 requirements.add('generaldelta')
1969 requirements.add('generaldelta')
1972 if ui.configbool('experimental', 'treemanifest', False):
1970 if ui.configbool('experimental', 'treemanifest', False):
1973 requirements.add('treemanifest')
1971 requirements.add('treemanifest')
1974 if ui.configbool('experimental', 'manifestv2', False):
1972 if ui.configbool('experimental', 'manifestv2', False):
1975 requirements.add('manifestv2')
1973 requirements.add('manifestv2')
1976
1974
1977 return requirements
1975 return requirements
General Comments 0
You need to be logged in to leave comments. Login now