##// END OF EJS Templates
localrepo: refer to dirstateguard by its new name
Augie Fackler -
r30492:77cd647b default
parent child Browse files
Show More
@@ -1,2000 +1,2001 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 cmdutil,
31 cmdutil,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 encoding,
35 encoding,
35 error,
36 error,
36 exchange,
37 exchange,
37 extensions,
38 extensions,
38 filelog,
39 filelog,
39 hook,
40 hook,
40 lock as lockmod,
41 lock as lockmod,
41 manifest,
42 manifest,
42 match as matchmod,
43 match as matchmod,
43 merge as mergemod,
44 merge as mergemod,
44 namespaces,
45 namespaces,
45 obsolete,
46 obsolete,
46 pathutil,
47 pathutil,
47 peer,
48 peer,
48 phases,
49 phases,
49 pushkey,
50 pushkey,
50 repoview,
51 repoview,
51 revset,
52 revset,
52 scmutil,
53 scmutil,
53 store,
54 store,
54 subrepo,
55 subrepo,
55 tags as tagsmod,
56 tags as tagsmod,
56 transaction,
57 transaction,
57 util,
58 util,
58 )
59 )
59
60
60 release = lockmod.release
61 release = lockmod.release
61 urlerr = util.urlerr
62 urlerr = util.urlerr
62 urlreq = util.urlreq
63 urlreq = util.urlreq
63
64
64 class repofilecache(scmutil.filecache):
65 class repofilecache(scmutil.filecache):
65 """All filecache usage on repo are done for logic that should be unfiltered
66 """All filecache usage on repo are done for logic that should be unfiltered
66 """
67 """
67
68
68 def __get__(self, repo, type=None):
69 def __get__(self, repo, type=None):
69 if repo is None:
70 if repo is None:
70 return self
71 return self
71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 def __set__(self, repo, value):
73 def __set__(self, repo, value):
73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 def __delete__(self, repo):
75 def __delete__(self, repo):
75 return super(repofilecache, self).__delete__(repo.unfiltered())
76 return super(repofilecache, self).__delete__(repo.unfiltered())
76
77
77 class storecache(repofilecache):
78 class storecache(repofilecache):
78 """filecache for files in the store"""
79 """filecache for files in the store"""
79 def join(self, obj, fname):
80 def join(self, obj, fname):
80 return obj.sjoin(fname)
81 return obj.sjoin(fname)
81
82
82 class unfilteredpropertycache(util.propertycache):
83 class unfilteredpropertycache(util.propertycache):
83 """propertycache that apply to unfiltered repo only"""
84 """propertycache that apply to unfiltered repo only"""
84
85
85 def __get__(self, repo, type=None):
86 def __get__(self, repo, type=None):
86 unfi = repo.unfiltered()
87 unfi = repo.unfiltered()
87 if unfi is repo:
88 if unfi is repo:
88 return super(unfilteredpropertycache, self).__get__(unfi)
89 return super(unfilteredpropertycache, self).__get__(unfi)
89 return getattr(unfi, self.name)
90 return getattr(unfi, self.name)
90
91
91 class filteredpropertycache(util.propertycache):
92 class filteredpropertycache(util.propertycache):
92 """propertycache that must take filtering in account"""
93 """propertycache that must take filtering in account"""
93
94
94 def cachevalue(self, obj, value):
95 def cachevalue(self, obj, value):
95 object.__setattr__(obj, self.name, value)
96 object.__setattr__(obj, self.name, value)
96
97
97
98
98 def hasunfilteredcache(repo, name):
99 def hasunfilteredcache(repo, name):
99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 """check if a repo has an unfilteredpropertycache value for <name>"""
100 return name in vars(repo.unfiltered())
101 return name in vars(repo.unfiltered())
101
102
102 def unfilteredmethod(orig):
103 def unfilteredmethod(orig):
103 """decorate method that always need to be run on unfiltered version"""
104 """decorate method that always need to be run on unfiltered version"""
104 def wrapper(repo, *args, **kwargs):
105 def wrapper(repo, *args, **kwargs):
105 return orig(repo.unfiltered(), *args, **kwargs)
106 return orig(repo.unfiltered(), *args, **kwargs)
106 return wrapper
107 return wrapper
107
108
108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 'unbundle'))
110 'unbundle'))
110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111 legacycaps = moderncaps.union(set(['changegroupsubset']))
111
112
112 class localpeer(peer.peerrepository):
113 class localpeer(peer.peerrepository):
113 '''peer for a local repo; reflects only the most recent API'''
114 '''peer for a local repo; reflects only the most recent API'''
114
115
115 def __init__(self, repo, caps=moderncaps):
116 def __init__(self, repo, caps=moderncaps):
116 peer.peerrepository.__init__(self)
117 peer.peerrepository.__init__(self)
117 self._repo = repo.filtered('served')
118 self._repo = repo.filtered('served')
118 self.ui = repo.ui
119 self.ui = repo.ui
119 self._caps = repo._restrictcapabilities(caps)
120 self._caps = repo._restrictcapabilities(caps)
120 self.requirements = repo.requirements
121 self.requirements = repo.requirements
121 self.supportedformats = repo.supportedformats
122 self.supportedformats = repo.supportedformats
122
123
123 def close(self):
124 def close(self):
124 self._repo.close()
125 self._repo.close()
125
126
126 def _capabilities(self):
127 def _capabilities(self):
127 return self._caps
128 return self._caps
128
129
129 def local(self):
130 def local(self):
130 return self._repo
131 return self._repo
131
132
132 def canpush(self):
133 def canpush(self):
133 return True
134 return True
134
135
135 def url(self):
136 def url(self):
136 return self._repo.url()
137 return self._repo.url()
137
138
138 def lookup(self, key):
139 def lookup(self, key):
139 return self._repo.lookup(key)
140 return self._repo.lookup(key)
140
141
141 def branchmap(self):
142 def branchmap(self):
142 return self._repo.branchmap()
143 return self._repo.branchmap()
143
144
144 def heads(self):
145 def heads(self):
145 return self._repo.heads()
146 return self._repo.heads()
146
147
147 def known(self, nodes):
148 def known(self, nodes):
148 return self._repo.known(nodes)
149 return self._repo.known(nodes)
149
150
150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 **kwargs):
152 **kwargs):
152 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
153 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
153 common=common, bundlecaps=bundlecaps,
154 common=common, bundlecaps=bundlecaps,
154 **kwargs)
155 **kwargs)
155 cb = util.chunkbuffer(chunks)
156 cb = util.chunkbuffer(chunks)
156
157
157 if bundlecaps is not None and 'HG20' in bundlecaps:
158 if bundlecaps is not None and 'HG20' in bundlecaps:
158 # When requesting a bundle2, getbundle returns a stream to make the
159 # When requesting a bundle2, getbundle returns a stream to make the
159 # wire level function happier. We need to build a proper object
160 # wire level function happier. We need to build a proper object
160 # from it in local peer.
161 # from it in local peer.
161 return bundle2.getunbundler(self.ui, cb)
162 return bundle2.getunbundler(self.ui, cb)
162 else:
163 else:
163 return changegroup.getunbundler('01', cb, None)
164 return changegroup.getunbundler('01', cb, None)
164
165
165 # TODO We might want to move the next two calls into legacypeer and add
166 # TODO We might want to move the next two calls into legacypeer and add
166 # unbundle instead.
167 # unbundle instead.
167
168
168 def unbundle(self, cg, heads, url):
169 def unbundle(self, cg, heads, url):
169 """apply a bundle on a repo
170 """apply a bundle on a repo
170
171
171 This function handles the repo locking itself."""
172 This function handles the repo locking itself."""
172 try:
173 try:
173 try:
174 try:
174 cg = exchange.readbundle(self.ui, cg, None)
175 cg = exchange.readbundle(self.ui, cg, None)
175 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
176 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
176 if util.safehasattr(ret, 'getchunks'):
177 if util.safehasattr(ret, 'getchunks'):
177 # This is a bundle20 object, turn it into an unbundler.
178 # This is a bundle20 object, turn it into an unbundler.
178 # This little dance should be dropped eventually when the
179 # This little dance should be dropped eventually when the
179 # API is finally improved.
180 # API is finally improved.
180 stream = util.chunkbuffer(ret.getchunks())
181 stream = util.chunkbuffer(ret.getchunks())
181 ret = bundle2.getunbundler(self.ui, stream)
182 ret = bundle2.getunbundler(self.ui, stream)
182 return ret
183 return ret
183 except Exception as exc:
184 except Exception as exc:
184 # If the exception contains output salvaged from a bundle2
185 # If the exception contains output salvaged from a bundle2
185 # reply, we need to make sure it is printed before continuing
186 # reply, we need to make sure it is printed before continuing
186 # to fail. So we build a bundle2 with such output and consume
187 # to fail. So we build a bundle2 with such output and consume
187 # it directly.
188 # it directly.
188 #
189 #
189 # This is not very elegant but allows a "simple" solution for
190 # This is not very elegant but allows a "simple" solution for
190 # issue4594
191 # issue4594
191 output = getattr(exc, '_bundle2salvagedoutput', ())
192 output = getattr(exc, '_bundle2salvagedoutput', ())
192 if output:
193 if output:
193 bundler = bundle2.bundle20(self._repo.ui)
194 bundler = bundle2.bundle20(self._repo.ui)
194 for out in output:
195 for out in output:
195 bundler.addpart(out)
196 bundler.addpart(out)
196 stream = util.chunkbuffer(bundler.getchunks())
197 stream = util.chunkbuffer(bundler.getchunks())
197 b = bundle2.getunbundler(self.ui, stream)
198 b = bundle2.getunbundler(self.ui, stream)
198 bundle2.processbundle(self._repo, b)
199 bundle2.processbundle(self._repo, b)
199 raise
200 raise
200 except error.PushRaced as exc:
201 except error.PushRaced as exc:
201 raise error.ResponseError(_('push failed:'), str(exc))
202 raise error.ResponseError(_('push failed:'), str(exc))
202
203
203 def lock(self):
204 def lock(self):
204 return self._repo.lock()
205 return self._repo.lock()
205
206
206 def addchangegroup(self, cg, source, url):
207 def addchangegroup(self, cg, source, url):
207 return cg.apply(self._repo, source, url)
208 return cg.apply(self._repo, source, url)
208
209
209 def pushkey(self, namespace, key, old, new):
210 def pushkey(self, namespace, key, old, new):
210 return self._repo.pushkey(namespace, key, old, new)
211 return self._repo.pushkey(namespace, key, old, new)
211
212
212 def listkeys(self, namespace):
213 def listkeys(self, namespace):
213 return self._repo.listkeys(namespace)
214 return self._repo.listkeys(namespace)
214
215
215 def debugwireargs(self, one, two, three=None, four=None, five=None):
216 def debugwireargs(self, one, two, three=None, four=None, five=None):
216 '''used to test argument passing over the wire'''
217 '''used to test argument passing over the wire'''
217 return "%s %s %s %s %s" % (one, two, three, four, five)
218 return "%s %s %s %s %s" % (one, two, three, four, five)
218
219
219 class locallegacypeer(localpeer):
220 class locallegacypeer(localpeer):
220 '''peer extension which implements legacy methods too; used for tests with
221 '''peer extension which implements legacy methods too; used for tests with
221 restricted capabilities'''
222 restricted capabilities'''
222
223
223 def __init__(self, repo):
224 def __init__(self, repo):
224 localpeer.__init__(self, repo, caps=legacycaps)
225 localpeer.__init__(self, repo, caps=legacycaps)
225
226
226 def branches(self, nodes):
227 def branches(self, nodes):
227 return self._repo.branches(nodes)
228 return self._repo.branches(nodes)
228
229
229 def between(self, pairs):
230 def between(self, pairs):
230 return self._repo.between(pairs)
231 return self._repo.between(pairs)
231
232
232 def changegroup(self, basenodes, source):
233 def changegroup(self, basenodes, source):
233 return changegroup.changegroup(self._repo, basenodes, source)
234 return changegroup.changegroup(self._repo, basenodes, source)
234
235
235 def changegroupsubset(self, bases, heads, source):
236 def changegroupsubset(self, bases, heads, source):
236 return changegroup.changegroupsubset(self._repo, bases, heads, source)
237 return changegroup.changegroupsubset(self._repo, bases, heads, source)
237
238
238 class localrepository(object):
239 class localrepository(object):
239
240
240 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
241 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
241 'manifestv2'))
242 'manifestv2'))
242 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
243 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
243 'dotencode'))
244 'dotencode'))
244 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
245 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
245 filtername = None
246 filtername = None
246
247
247 # a list of (ui, featureset) functions.
248 # a list of (ui, featureset) functions.
248 # only functions defined in module of enabled extensions are invoked
249 # only functions defined in module of enabled extensions are invoked
249 featuresetupfuncs = set()
250 featuresetupfuncs = set()
250
251
251 def __init__(self, baseui, path=None, create=False):
252 def __init__(self, baseui, path=None, create=False):
252 self.requirements = set()
253 self.requirements = set()
253 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
254 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
254 self.wopener = self.wvfs
255 self.wopener = self.wvfs
255 self.root = self.wvfs.base
256 self.root = self.wvfs.base
256 self.path = self.wvfs.join(".hg")
257 self.path = self.wvfs.join(".hg")
257 self.origroot = path
258 self.origroot = path
258 self.auditor = pathutil.pathauditor(self.root, self._checknested)
259 self.auditor = pathutil.pathauditor(self.root, self._checknested)
259 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
260 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
260 realfs=False)
261 realfs=False)
261 self.vfs = scmutil.vfs(self.path)
262 self.vfs = scmutil.vfs(self.path)
262 self.opener = self.vfs
263 self.opener = self.vfs
263 self.baseui = baseui
264 self.baseui = baseui
264 self.ui = baseui.copy()
265 self.ui = baseui.copy()
265 self.ui.copy = baseui.copy # prevent copying repo configuration
266 self.ui.copy = baseui.copy # prevent copying repo configuration
266 # A list of callback to shape the phase if no data were found.
267 # A list of callback to shape the phase if no data were found.
267 # Callback are in the form: func(repo, roots) --> processed root.
268 # Callback are in the form: func(repo, roots) --> processed root.
268 # This list it to be filled by extension during repo setup
269 # This list it to be filled by extension during repo setup
269 self._phasedefaults = []
270 self._phasedefaults = []
270 try:
271 try:
271 self.ui.readconfig(self.join("hgrc"), self.root)
272 self.ui.readconfig(self.join("hgrc"), self.root)
272 extensions.loadall(self.ui)
273 extensions.loadall(self.ui)
273 except IOError:
274 except IOError:
274 pass
275 pass
275
276
276 if self.featuresetupfuncs:
277 if self.featuresetupfuncs:
277 self.supported = set(self._basesupported) # use private copy
278 self.supported = set(self._basesupported) # use private copy
278 extmods = set(m.__name__ for n, m
279 extmods = set(m.__name__ for n, m
279 in extensions.extensions(self.ui))
280 in extensions.extensions(self.ui))
280 for setupfunc in self.featuresetupfuncs:
281 for setupfunc in self.featuresetupfuncs:
281 if setupfunc.__module__ in extmods:
282 if setupfunc.__module__ in extmods:
282 setupfunc(self.ui, self.supported)
283 setupfunc(self.ui, self.supported)
283 else:
284 else:
284 self.supported = self._basesupported
285 self.supported = self._basesupported
285
286
286 if not self.vfs.isdir():
287 if not self.vfs.isdir():
287 if create:
288 if create:
288 self.requirements = newreporequirements(self)
289 self.requirements = newreporequirements(self)
289
290
290 if not self.wvfs.exists():
291 if not self.wvfs.exists():
291 self.wvfs.makedirs()
292 self.wvfs.makedirs()
292 self.vfs.makedir(notindexed=True)
293 self.vfs.makedir(notindexed=True)
293
294
294 if 'store' in self.requirements:
295 if 'store' in self.requirements:
295 self.vfs.mkdir("store")
296 self.vfs.mkdir("store")
296
297
297 # create an invalid changelog
298 # create an invalid changelog
298 self.vfs.append(
299 self.vfs.append(
299 "00changelog.i",
300 "00changelog.i",
300 '\0\0\0\2' # represents revlogv2
301 '\0\0\0\2' # represents revlogv2
301 ' dummy changelog to prevent using the old repo layout'
302 ' dummy changelog to prevent using the old repo layout'
302 )
303 )
303 else:
304 else:
304 raise error.RepoError(_("repository %s not found") % path)
305 raise error.RepoError(_("repository %s not found") % path)
305 elif create:
306 elif create:
306 raise error.RepoError(_("repository %s already exists") % path)
307 raise error.RepoError(_("repository %s already exists") % path)
307 else:
308 else:
308 try:
309 try:
309 self.requirements = scmutil.readrequires(
310 self.requirements = scmutil.readrequires(
310 self.vfs, self.supported)
311 self.vfs, self.supported)
311 except IOError as inst:
312 except IOError as inst:
312 if inst.errno != errno.ENOENT:
313 if inst.errno != errno.ENOENT:
313 raise
314 raise
314
315
315 self.sharedpath = self.path
316 self.sharedpath = self.path
316 try:
317 try:
317 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
318 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
318 realpath=True)
319 realpath=True)
319 s = vfs.base
320 s = vfs.base
320 if not vfs.exists():
321 if not vfs.exists():
321 raise error.RepoError(
322 raise error.RepoError(
322 _('.hg/sharedpath points to nonexistent directory %s') % s)
323 _('.hg/sharedpath points to nonexistent directory %s') % s)
323 self.sharedpath = s
324 self.sharedpath = s
324 except IOError as inst:
325 except IOError as inst:
325 if inst.errno != errno.ENOENT:
326 if inst.errno != errno.ENOENT:
326 raise
327 raise
327
328
328 self.store = store.store(
329 self.store = store.store(
329 self.requirements, self.sharedpath, scmutil.vfs)
330 self.requirements, self.sharedpath, scmutil.vfs)
330 self.spath = self.store.path
331 self.spath = self.store.path
331 self.svfs = self.store.vfs
332 self.svfs = self.store.vfs
332 self.sjoin = self.store.join
333 self.sjoin = self.store.join
333 self.vfs.createmode = self.store.createmode
334 self.vfs.createmode = self.store.createmode
334 self._applyopenerreqs()
335 self._applyopenerreqs()
335 if create:
336 if create:
336 self._writerequirements()
337 self._writerequirements()
337
338
338 self._dirstatevalidatewarned = False
339 self._dirstatevalidatewarned = False
339
340
340 self._branchcaches = {}
341 self._branchcaches = {}
341 self._revbranchcache = None
342 self._revbranchcache = None
342 self.filterpats = {}
343 self.filterpats = {}
343 self._datafilters = {}
344 self._datafilters = {}
344 self._transref = self._lockref = self._wlockref = None
345 self._transref = self._lockref = self._wlockref = None
345
346
346 # A cache for various files under .hg/ that tracks file changes,
347 # A cache for various files under .hg/ that tracks file changes,
347 # (used by the filecache decorator)
348 # (used by the filecache decorator)
348 #
349 #
349 # Maps a property name to its util.filecacheentry
350 # Maps a property name to its util.filecacheentry
350 self._filecache = {}
351 self._filecache = {}
351
352
352 # hold sets of revision to be filtered
353 # hold sets of revision to be filtered
353 # should be cleared when something might have changed the filter value:
354 # should be cleared when something might have changed the filter value:
354 # - new changesets,
355 # - new changesets,
355 # - phase change,
356 # - phase change,
356 # - new obsolescence marker,
357 # - new obsolescence marker,
357 # - working directory parent change,
358 # - working directory parent change,
358 # - bookmark changes
359 # - bookmark changes
359 self.filteredrevcache = {}
360 self.filteredrevcache = {}
360
361
361 # generic mapping between names and nodes
362 # generic mapping between names and nodes
362 self.names = namespaces.namespaces()
363 self.names = namespaces.namespaces()
363
364
364 def close(self):
365 def close(self):
365 self._writecaches()
366 self._writecaches()
366
367
367 def _writecaches(self):
368 def _writecaches(self):
368 if self._revbranchcache:
369 if self._revbranchcache:
369 self._revbranchcache.write()
370 self._revbranchcache.write()
370
371
371 def _restrictcapabilities(self, caps):
372 def _restrictcapabilities(self, caps):
372 if self.ui.configbool('experimental', 'bundle2-advertise', True):
373 if self.ui.configbool('experimental', 'bundle2-advertise', True):
373 caps = set(caps)
374 caps = set(caps)
374 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
375 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
375 caps.add('bundle2=' + urlreq.quote(capsblob))
376 caps.add('bundle2=' + urlreq.quote(capsblob))
376 return caps
377 return caps
377
378
378 def _applyopenerreqs(self):
379 def _applyopenerreqs(self):
379 self.svfs.options = dict((r, 1) for r in self.requirements
380 self.svfs.options = dict((r, 1) for r in self.requirements
380 if r in self.openerreqs)
381 if r in self.openerreqs)
381 # experimental config: format.chunkcachesize
382 # experimental config: format.chunkcachesize
382 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
383 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
383 if chunkcachesize is not None:
384 if chunkcachesize is not None:
384 self.svfs.options['chunkcachesize'] = chunkcachesize
385 self.svfs.options['chunkcachesize'] = chunkcachesize
385 # experimental config: format.maxchainlen
386 # experimental config: format.maxchainlen
386 maxchainlen = self.ui.configint('format', 'maxchainlen')
387 maxchainlen = self.ui.configint('format', 'maxchainlen')
387 if maxchainlen is not None:
388 if maxchainlen is not None:
388 self.svfs.options['maxchainlen'] = maxchainlen
389 self.svfs.options['maxchainlen'] = maxchainlen
389 # experimental config: format.manifestcachesize
390 # experimental config: format.manifestcachesize
390 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
391 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
391 if manifestcachesize is not None:
392 if manifestcachesize is not None:
392 self.svfs.options['manifestcachesize'] = manifestcachesize
393 self.svfs.options['manifestcachesize'] = manifestcachesize
393 # experimental config: format.aggressivemergedeltas
394 # experimental config: format.aggressivemergedeltas
394 aggressivemergedeltas = self.ui.configbool('format',
395 aggressivemergedeltas = self.ui.configbool('format',
395 'aggressivemergedeltas', False)
396 'aggressivemergedeltas', False)
396 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
397 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
397 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
398 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
398
399
399 def _writerequirements(self):
400 def _writerequirements(self):
400 scmutil.writerequires(self.vfs, self.requirements)
401 scmutil.writerequires(self.vfs, self.requirements)
401
402
402 def _checknested(self, path):
403 def _checknested(self, path):
403 """Determine if path is a legal nested repository."""
404 """Determine if path is a legal nested repository."""
404 if not path.startswith(self.root):
405 if not path.startswith(self.root):
405 return False
406 return False
406 subpath = path[len(self.root) + 1:]
407 subpath = path[len(self.root) + 1:]
407 normsubpath = util.pconvert(subpath)
408 normsubpath = util.pconvert(subpath)
408
409
409 # XXX: Checking against the current working copy is wrong in
410 # XXX: Checking against the current working copy is wrong in
410 # the sense that it can reject things like
411 # the sense that it can reject things like
411 #
412 #
412 # $ hg cat -r 10 sub/x.txt
413 # $ hg cat -r 10 sub/x.txt
413 #
414 #
414 # if sub/ is no longer a subrepository in the working copy
415 # if sub/ is no longer a subrepository in the working copy
415 # parent revision.
416 # parent revision.
416 #
417 #
417 # However, it can of course also allow things that would have
418 # However, it can of course also allow things that would have
418 # been rejected before, such as the above cat command if sub/
419 # been rejected before, such as the above cat command if sub/
419 # is a subrepository now, but was a normal directory before.
420 # is a subrepository now, but was a normal directory before.
420 # The old path auditor would have rejected by mistake since it
421 # The old path auditor would have rejected by mistake since it
421 # panics when it sees sub/.hg/.
422 # panics when it sees sub/.hg/.
422 #
423 #
423 # All in all, checking against the working copy seems sensible
424 # All in all, checking against the working copy seems sensible
424 # since we want to prevent access to nested repositories on
425 # since we want to prevent access to nested repositories on
425 # the filesystem *now*.
426 # the filesystem *now*.
426 ctx = self[None]
427 ctx = self[None]
427 parts = util.splitpath(subpath)
428 parts = util.splitpath(subpath)
428 while parts:
429 while parts:
429 prefix = '/'.join(parts)
430 prefix = '/'.join(parts)
430 if prefix in ctx.substate:
431 if prefix in ctx.substate:
431 if prefix == normsubpath:
432 if prefix == normsubpath:
432 return True
433 return True
433 else:
434 else:
434 sub = ctx.sub(prefix)
435 sub = ctx.sub(prefix)
435 return sub.checknested(subpath[len(prefix) + 1:])
436 return sub.checknested(subpath[len(prefix) + 1:])
436 else:
437 else:
437 parts.pop()
438 parts.pop()
438 return False
439 return False
439
440
440 def peer(self):
441 def peer(self):
441 return localpeer(self) # not cached to avoid reference cycle
442 return localpeer(self) # not cached to avoid reference cycle
442
443
443 def unfiltered(self):
444 def unfiltered(self):
444 """Return unfiltered version of the repository
445 """Return unfiltered version of the repository
445
446
446 Intended to be overwritten by filtered repo."""
447 Intended to be overwritten by filtered repo."""
447 return self
448 return self
448
449
449 def filtered(self, name):
450 def filtered(self, name):
450 """Return a filtered version of a repository"""
451 """Return a filtered version of a repository"""
451 # build a new class with the mixin and the current class
452 # build a new class with the mixin and the current class
452 # (possibly subclass of the repo)
453 # (possibly subclass of the repo)
453 class proxycls(repoview.repoview, self.unfiltered().__class__):
454 class proxycls(repoview.repoview, self.unfiltered().__class__):
454 pass
455 pass
455 return proxycls(self, name)
456 return proxycls(self, name)
456
457
457 @repofilecache('bookmarks', 'bookmarks.current')
458 @repofilecache('bookmarks', 'bookmarks.current')
458 def _bookmarks(self):
459 def _bookmarks(self):
459 return bookmarks.bmstore(self)
460 return bookmarks.bmstore(self)
460
461
461 @property
462 @property
462 def _activebookmark(self):
463 def _activebookmark(self):
463 return self._bookmarks.active
464 return self._bookmarks.active
464
465
465 def bookmarkheads(self, bookmark):
466 def bookmarkheads(self, bookmark):
466 name = bookmark.split('@', 1)[0]
467 name = bookmark.split('@', 1)[0]
467 heads = []
468 heads = []
468 for mark, n in self._bookmarks.iteritems():
469 for mark, n in self._bookmarks.iteritems():
469 if mark.split('@', 1)[0] == name:
470 if mark.split('@', 1)[0] == name:
470 heads.append(n)
471 heads.append(n)
471 return heads
472 return heads
472
473
473 # _phaserevs and _phasesets depend on changelog. what we need is to
474 # _phaserevs and _phasesets depend on changelog. what we need is to
474 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
475 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
475 # can't be easily expressed in filecache mechanism.
476 # can't be easily expressed in filecache mechanism.
476 @storecache('phaseroots', '00changelog.i')
477 @storecache('phaseroots', '00changelog.i')
477 def _phasecache(self):
478 def _phasecache(self):
478 return phases.phasecache(self, self._phasedefaults)
479 return phases.phasecache(self, self._phasedefaults)
479
480
480 @storecache('obsstore')
481 @storecache('obsstore')
481 def obsstore(self):
482 def obsstore(self):
482 # read default format for new obsstore.
483 # read default format for new obsstore.
483 # developer config: format.obsstore-version
484 # developer config: format.obsstore-version
484 defaultformat = self.ui.configint('format', 'obsstore-version', None)
485 defaultformat = self.ui.configint('format', 'obsstore-version', None)
485 # rely on obsstore class default when possible.
486 # rely on obsstore class default when possible.
486 kwargs = {}
487 kwargs = {}
487 if defaultformat is not None:
488 if defaultformat is not None:
488 kwargs['defaultformat'] = defaultformat
489 kwargs['defaultformat'] = defaultformat
489 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
490 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
490 store = obsolete.obsstore(self.svfs, readonly=readonly,
491 store = obsolete.obsstore(self.svfs, readonly=readonly,
491 **kwargs)
492 **kwargs)
492 if store and readonly:
493 if store and readonly:
493 self.ui.warn(
494 self.ui.warn(
494 _('obsolete feature not enabled but %i markers found!\n')
495 _('obsolete feature not enabled but %i markers found!\n')
495 % len(list(store)))
496 % len(list(store)))
496 return store
497 return store
497
498
498 @storecache('00changelog.i')
499 @storecache('00changelog.i')
499 def changelog(self):
500 def changelog(self):
500 c = changelog.changelog(self.svfs)
501 c = changelog.changelog(self.svfs)
501 if 'HG_PENDING' in os.environ:
502 if 'HG_PENDING' in os.environ:
502 p = os.environ['HG_PENDING']
503 p = os.environ['HG_PENDING']
503 if p.startswith(self.root):
504 if p.startswith(self.root):
504 c.readpending('00changelog.i.a')
505 c.readpending('00changelog.i.a')
505 return c
506 return c
506
507
507 def _constructmanifest(self):
508 def _constructmanifest(self):
508 # This is a temporary function while we migrate from manifest to
509 # This is a temporary function while we migrate from manifest to
509 # manifestlog. It allows bundlerepo and unionrepo to intercept the
510 # manifestlog. It allows bundlerepo and unionrepo to intercept the
510 # manifest creation.
511 # manifest creation.
511 return manifest.manifestrevlog(self.svfs)
512 return manifest.manifestrevlog(self.svfs)
512
513
513 @storecache('00manifest.i')
514 @storecache('00manifest.i')
514 def manifestlog(self):
515 def manifestlog(self):
515 return manifest.manifestlog(self.svfs, self)
516 return manifest.manifestlog(self.svfs, self)
516
517
517 @repofilecache('dirstate')
518 @repofilecache('dirstate')
518 def dirstate(self):
519 def dirstate(self):
519 return dirstate.dirstate(self.vfs, self.ui, self.root,
520 return dirstate.dirstate(self.vfs, self.ui, self.root,
520 self._dirstatevalidate)
521 self._dirstatevalidate)
521
522
522 def _dirstatevalidate(self, node):
523 def _dirstatevalidate(self, node):
523 try:
524 try:
524 self.changelog.rev(node)
525 self.changelog.rev(node)
525 return node
526 return node
526 except error.LookupError:
527 except error.LookupError:
527 if not self._dirstatevalidatewarned:
528 if not self._dirstatevalidatewarned:
528 self._dirstatevalidatewarned = True
529 self._dirstatevalidatewarned = True
529 self.ui.warn(_("warning: ignoring unknown"
530 self.ui.warn(_("warning: ignoring unknown"
530 " working parent %s!\n") % short(node))
531 " working parent %s!\n") % short(node))
531 return nullid
532 return nullid
532
533
533 def __getitem__(self, changeid):
534 def __getitem__(self, changeid):
534 if changeid is None or changeid == wdirrev:
535 if changeid is None or changeid == wdirrev:
535 return context.workingctx(self)
536 return context.workingctx(self)
536 if isinstance(changeid, slice):
537 if isinstance(changeid, slice):
537 return [context.changectx(self, i)
538 return [context.changectx(self, i)
538 for i in xrange(*changeid.indices(len(self)))
539 for i in xrange(*changeid.indices(len(self)))
539 if i not in self.changelog.filteredrevs]
540 if i not in self.changelog.filteredrevs]
540 return context.changectx(self, changeid)
541 return context.changectx(self, changeid)
541
542
542 def __contains__(self, changeid):
543 def __contains__(self, changeid):
543 try:
544 try:
544 self[changeid]
545 self[changeid]
545 return True
546 return True
546 except error.RepoLookupError:
547 except error.RepoLookupError:
547 return False
548 return False
548
549
549 def __nonzero__(self):
550 def __nonzero__(self):
550 return True
551 return True
551
552
552 def __len__(self):
553 def __len__(self):
553 return len(self.changelog)
554 return len(self.changelog)
554
555
555 def __iter__(self):
556 def __iter__(self):
556 return iter(self.changelog)
557 return iter(self.changelog)
557
558
558 def revs(self, expr, *args):
559 def revs(self, expr, *args):
559 '''Find revisions matching a revset.
560 '''Find revisions matching a revset.
560
561
561 The revset is specified as a string ``expr`` that may contain
562 The revset is specified as a string ``expr`` that may contain
562 %-formatting to escape certain types. See ``revset.formatspec``.
563 %-formatting to escape certain types. See ``revset.formatspec``.
563
564
564 Revset aliases from the configuration are not expanded. To expand
565 Revset aliases from the configuration are not expanded. To expand
565 user aliases, consider calling ``scmutil.revrange()``.
566 user aliases, consider calling ``scmutil.revrange()``.
566
567
567 Returns a revset.abstractsmartset, which is a list-like interface
568 Returns a revset.abstractsmartset, which is a list-like interface
568 that contains integer revisions.
569 that contains integer revisions.
569 '''
570 '''
570 expr = revset.formatspec(expr, *args)
571 expr = revset.formatspec(expr, *args)
571 m = revset.match(None, expr)
572 m = revset.match(None, expr)
572 return m(self)
573 return m(self)
573
574
574 def set(self, expr, *args):
575 def set(self, expr, *args):
575 '''Find revisions matching a revset and emit changectx instances.
576 '''Find revisions matching a revset and emit changectx instances.
576
577
577 This is a convenience wrapper around ``revs()`` that iterates the
578 This is a convenience wrapper around ``revs()`` that iterates the
578 result and is a generator of changectx instances.
579 result and is a generator of changectx instances.
579
580
580 Revset aliases from the configuration are not expanded. To expand
581 Revset aliases from the configuration are not expanded. To expand
581 user aliases, consider calling ``scmutil.revrange()``.
582 user aliases, consider calling ``scmutil.revrange()``.
582 '''
583 '''
583 for r in self.revs(expr, *args):
584 for r in self.revs(expr, *args):
584 yield self[r]
585 yield self[r]
585
586
586 def url(self):
587 def url(self):
587 return 'file:' + self.root
588 return 'file:' + self.root
588
589
589 def hook(self, name, throw=False, **args):
590 def hook(self, name, throw=False, **args):
590 """Call a hook, passing this repo instance.
591 """Call a hook, passing this repo instance.
591
592
592 This a convenience method to aid invoking hooks. Extensions likely
593 This a convenience method to aid invoking hooks. Extensions likely
593 won't call this unless they have registered a custom hook or are
594 won't call this unless they have registered a custom hook or are
594 replacing code that is expected to call a hook.
595 replacing code that is expected to call a hook.
595 """
596 """
596 return hook.hook(self.ui, self, name, throw, **args)
597 return hook.hook(self.ui, self, name, throw, **args)
597
598
598 @unfilteredmethod
599 @unfilteredmethod
599 def _tag(self, names, node, message, local, user, date, extra=None,
600 def _tag(self, names, node, message, local, user, date, extra=None,
600 editor=False):
601 editor=False):
601 if isinstance(names, str):
602 if isinstance(names, str):
602 names = (names,)
603 names = (names,)
603
604
604 branches = self.branchmap()
605 branches = self.branchmap()
605 for name in names:
606 for name in names:
606 self.hook('pretag', throw=True, node=hex(node), tag=name,
607 self.hook('pretag', throw=True, node=hex(node), tag=name,
607 local=local)
608 local=local)
608 if name in branches:
609 if name in branches:
609 self.ui.warn(_("warning: tag %s conflicts with existing"
610 self.ui.warn(_("warning: tag %s conflicts with existing"
610 " branch name\n") % name)
611 " branch name\n") % name)
611
612
612 def writetags(fp, names, munge, prevtags):
613 def writetags(fp, names, munge, prevtags):
613 fp.seek(0, 2)
614 fp.seek(0, 2)
614 if prevtags and prevtags[-1] != '\n':
615 if prevtags and prevtags[-1] != '\n':
615 fp.write('\n')
616 fp.write('\n')
616 for name in names:
617 for name in names:
617 if munge:
618 if munge:
618 m = munge(name)
619 m = munge(name)
619 else:
620 else:
620 m = name
621 m = name
621
622
622 if (self._tagscache.tagtypes and
623 if (self._tagscache.tagtypes and
623 name in self._tagscache.tagtypes):
624 name in self._tagscache.tagtypes):
624 old = self.tags().get(name, nullid)
625 old = self.tags().get(name, nullid)
625 fp.write('%s %s\n' % (hex(old), m))
626 fp.write('%s %s\n' % (hex(old), m))
626 fp.write('%s %s\n' % (hex(node), m))
627 fp.write('%s %s\n' % (hex(node), m))
627 fp.close()
628 fp.close()
628
629
629 prevtags = ''
630 prevtags = ''
630 if local:
631 if local:
631 try:
632 try:
632 fp = self.vfs('localtags', 'r+')
633 fp = self.vfs('localtags', 'r+')
633 except IOError:
634 except IOError:
634 fp = self.vfs('localtags', 'a')
635 fp = self.vfs('localtags', 'a')
635 else:
636 else:
636 prevtags = fp.read()
637 prevtags = fp.read()
637
638
638 # local tags are stored in the current charset
639 # local tags are stored in the current charset
639 writetags(fp, names, None, prevtags)
640 writetags(fp, names, None, prevtags)
640 for name in names:
641 for name in names:
641 self.hook('tag', node=hex(node), tag=name, local=local)
642 self.hook('tag', node=hex(node), tag=name, local=local)
642 return
643 return
643
644
644 try:
645 try:
645 fp = self.wfile('.hgtags', 'rb+')
646 fp = self.wfile('.hgtags', 'rb+')
646 except IOError as e:
647 except IOError as e:
647 if e.errno != errno.ENOENT:
648 if e.errno != errno.ENOENT:
648 raise
649 raise
649 fp = self.wfile('.hgtags', 'ab')
650 fp = self.wfile('.hgtags', 'ab')
650 else:
651 else:
651 prevtags = fp.read()
652 prevtags = fp.read()
652
653
653 # committed tags are stored in UTF-8
654 # committed tags are stored in UTF-8
654 writetags(fp, names, encoding.fromlocal, prevtags)
655 writetags(fp, names, encoding.fromlocal, prevtags)
655
656
656 fp.close()
657 fp.close()
657
658
658 self.invalidatecaches()
659 self.invalidatecaches()
659
660
660 if '.hgtags' not in self.dirstate:
661 if '.hgtags' not in self.dirstate:
661 self[None].add(['.hgtags'])
662 self[None].add(['.hgtags'])
662
663
663 m = matchmod.exact(self.root, '', ['.hgtags'])
664 m = matchmod.exact(self.root, '', ['.hgtags'])
664 tagnode = self.commit(message, user, date, extra=extra, match=m,
665 tagnode = self.commit(message, user, date, extra=extra, match=m,
665 editor=editor)
666 editor=editor)
666
667
667 for name in names:
668 for name in names:
668 self.hook('tag', node=hex(node), tag=name, local=local)
669 self.hook('tag', node=hex(node), tag=name, local=local)
669
670
670 return tagnode
671 return tagnode
671
672
672 def tag(self, names, node, message, local, user, date, editor=False):
673 def tag(self, names, node, message, local, user, date, editor=False):
673 '''tag a revision with one or more symbolic names.
674 '''tag a revision with one or more symbolic names.
674
675
675 names is a list of strings or, when adding a single tag, names may be a
676 names is a list of strings or, when adding a single tag, names may be a
676 string.
677 string.
677
678
678 if local is True, the tags are stored in a per-repository file.
679 if local is True, the tags are stored in a per-repository file.
679 otherwise, they are stored in the .hgtags file, and a new
680 otherwise, they are stored in the .hgtags file, and a new
680 changeset is committed with the change.
681 changeset is committed with the change.
681
682
682 keyword arguments:
683 keyword arguments:
683
684
684 local: whether to store tags in non-version-controlled file
685 local: whether to store tags in non-version-controlled file
685 (default False)
686 (default False)
686
687
687 message: commit message to use if committing
688 message: commit message to use if committing
688
689
689 user: name of user to use if committing
690 user: name of user to use if committing
690
691
691 date: date tuple to use if committing'''
692 date: date tuple to use if committing'''
692
693
693 if not local:
694 if not local:
694 m = matchmod.exact(self.root, '', ['.hgtags'])
695 m = matchmod.exact(self.root, '', ['.hgtags'])
695 if any(self.status(match=m, unknown=True, ignored=True)):
696 if any(self.status(match=m, unknown=True, ignored=True)):
696 raise error.Abort(_('working copy of .hgtags is changed'),
697 raise error.Abort(_('working copy of .hgtags is changed'),
697 hint=_('please commit .hgtags manually'))
698 hint=_('please commit .hgtags manually'))
698
699
699 self.tags() # instantiate the cache
700 self.tags() # instantiate the cache
700 self._tag(names, node, message, local, user, date, editor=editor)
701 self._tag(names, node, message, local, user, date, editor=editor)
701
702
702 @filteredpropertycache
703 @filteredpropertycache
703 def _tagscache(self):
704 def _tagscache(self):
704 '''Returns a tagscache object that contains various tags related
705 '''Returns a tagscache object that contains various tags related
705 caches.'''
706 caches.'''
706
707
707 # This simplifies its cache management by having one decorated
708 # This simplifies its cache management by having one decorated
708 # function (this one) and the rest simply fetch things from it.
709 # function (this one) and the rest simply fetch things from it.
709 class tagscache(object):
710 class tagscache(object):
710 def __init__(self):
711 def __init__(self):
711 # These two define the set of tags for this repository. tags
712 # These two define the set of tags for this repository. tags
712 # maps tag name to node; tagtypes maps tag name to 'global' or
713 # maps tag name to node; tagtypes maps tag name to 'global' or
713 # 'local'. (Global tags are defined by .hgtags across all
714 # 'local'. (Global tags are defined by .hgtags across all
714 # heads, and local tags are defined in .hg/localtags.)
715 # heads, and local tags are defined in .hg/localtags.)
715 # They constitute the in-memory cache of tags.
716 # They constitute the in-memory cache of tags.
716 self.tags = self.tagtypes = None
717 self.tags = self.tagtypes = None
717
718
718 self.nodetagscache = self.tagslist = None
719 self.nodetagscache = self.tagslist = None
719
720
720 cache = tagscache()
721 cache = tagscache()
721 cache.tags, cache.tagtypes = self._findtags()
722 cache.tags, cache.tagtypes = self._findtags()
722
723
723 return cache
724 return cache
724
725
725 def tags(self):
726 def tags(self):
726 '''return a mapping of tag to node'''
727 '''return a mapping of tag to node'''
727 t = {}
728 t = {}
728 if self.changelog.filteredrevs:
729 if self.changelog.filteredrevs:
729 tags, tt = self._findtags()
730 tags, tt = self._findtags()
730 else:
731 else:
731 tags = self._tagscache.tags
732 tags = self._tagscache.tags
732 for k, v in tags.iteritems():
733 for k, v in tags.iteritems():
733 try:
734 try:
734 # ignore tags to unknown nodes
735 # ignore tags to unknown nodes
735 self.changelog.rev(v)
736 self.changelog.rev(v)
736 t[k] = v
737 t[k] = v
737 except (error.LookupError, ValueError):
738 except (error.LookupError, ValueError):
738 pass
739 pass
739 return t
740 return t
740
741
741 def _findtags(self):
742 def _findtags(self):
742 '''Do the hard work of finding tags. Return a pair of dicts
743 '''Do the hard work of finding tags. Return a pair of dicts
743 (tags, tagtypes) where tags maps tag name to node, and tagtypes
744 (tags, tagtypes) where tags maps tag name to node, and tagtypes
744 maps tag name to a string like \'global\' or \'local\'.
745 maps tag name to a string like \'global\' or \'local\'.
745 Subclasses or extensions are free to add their own tags, but
746 Subclasses or extensions are free to add their own tags, but
746 should be aware that the returned dicts will be retained for the
747 should be aware that the returned dicts will be retained for the
747 duration of the localrepo object.'''
748 duration of the localrepo object.'''
748
749
749 # XXX what tagtype should subclasses/extensions use? Currently
750 # XXX what tagtype should subclasses/extensions use? Currently
750 # mq and bookmarks add tags, but do not set the tagtype at all.
751 # mq and bookmarks add tags, but do not set the tagtype at all.
751 # Should each extension invent its own tag type? Should there
752 # Should each extension invent its own tag type? Should there
752 # be one tagtype for all such "virtual" tags? Or is the status
753 # be one tagtype for all such "virtual" tags? Or is the status
753 # quo fine?
754 # quo fine?
754
755
755 alltags = {} # map tag name to (node, hist)
756 alltags = {} # map tag name to (node, hist)
756 tagtypes = {}
757 tagtypes = {}
757
758
758 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
759 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
759 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
760 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
760
761
761 # Build the return dicts. Have to re-encode tag names because
762 # Build the return dicts. Have to re-encode tag names because
762 # the tags module always uses UTF-8 (in order not to lose info
763 # the tags module always uses UTF-8 (in order not to lose info
763 # writing to the cache), but the rest of Mercurial wants them in
764 # writing to the cache), but the rest of Mercurial wants them in
764 # local encoding.
765 # local encoding.
765 tags = {}
766 tags = {}
766 for (name, (node, hist)) in alltags.iteritems():
767 for (name, (node, hist)) in alltags.iteritems():
767 if node != nullid:
768 if node != nullid:
768 tags[encoding.tolocal(name)] = node
769 tags[encoding.tolocal(name)] = node
769 tags['tip'] = self.changelog.tip()
770 tags['tip'] = self.changelog.tip()
770 tagtypes = dict([(encoding.tolocal(name), value)
771 tagtypes = dict([(encoding.tolocal(name), value)
771 for (name, value) in tagtypes.iteritems()])
772 for (name, value) in tagtypes.iteritems()])
772 return (tags, tagtypes)
773 return (tags, tagtypes)
773
774
774 def tagtype(self, tagname):
775 def tagtype(self, tagname):
775 '''
776 '''
776 return the type of the given tag. result can be:
777 return the type of the given tag. result can be:
777
778
778 'local' : a local tag
779 'local' : a local tag
779 'global' : a global tag
780 'global' : a global tag
780 None : tag does not exist
781 None : tag does not exist
781 '''
782 '''
782
783
783 return self._tagscache.tagtypes.get(tagname)
784 return self._tagscache.tagtypes.get(tagname)
784
785
785 def tagslist(self):
786 def tagslist(self):
786 '''return a list of tags ordered by revision'''
787 '''return a list of tags ordered by revision'''
787 if not self._tagscache.tagslist:
788 if not self._tagscache.tagslist:
788 l = []
789 l = []
789 for t, n in self.tags().iteritems():
790 for t, n in self.tags().iteritems():
790 l.append((self.changelog.rev(n), t, n))
791 l.append((self.changelog.rev(n), t, n))
791 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
792 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
792
793
793 return self._tagscache.tagslist
794 return self._tagscache.tagslist
794
795
795 def nodetags(self, node):
796 def nodetags(self, node):
796 '''return the tags associated with a node'''
797 '''return the tags associated with a node'''
797 if not self._tagscache.nodetagscache:
798 if not self._tagscache.nodetagscache:
798 nodetagscache = {}
799 nodetagscache = {}
799 for t, n in self._tagscache.tags.iteritems():
800 for t, n in self._tagscache.tags.iteritems():
800 nodetagscache.setdefault(n, []).append(t)
801 nodetagscache.setdefault(n, []).append(t)
801 for tags in nodetagscache.itervalues():
802 for tags in nodetagscache.itervalues():
802 tags.sort()
803 tags.sort()
803 self._tagscache.nodetagscache = nodetagscache
804 self._tagscache.nodetagscache = nodetagscache
804 return self._tagscache.nodetagscache.get(node, [])
805 return self._tagscache.nodetagscache.get(node, [])
805
806
806 def nodebookmarks(self, node):
807 def nodebookmarks(self, node):
807 """return the list of bookmarks pointing to the specified node"""
808 """return the list of bookmarks pointing to the specified node"""
808 marks = []
809 marks = []
809 for bookmark, n in self._bookmarks.iteritems():
810 for bookmark, n in self._bookmarks.iteritems():
810 if n == node:
811 if n == node:
811 marks.append(bookmark)
812 marks.append(bookmark)
812 return sorted(marks)
813 return sorted(marks)
813
814
814 def branchmap(self):
815 def branchmap(self):
815 '''returns a dictionary {branch: [branchheads]} with branchheads
816 '''returns a dictionary {branch: [branchheads]} with branchheads
816 ordered by increasing revision number'''
817 ordered by increasing revision number'''
817 branchmap.updatecache(self)
818 branchmap.updatecache(self)
818 return self._branchcaches[self.filtername]
819 return self._branchcaches[self.filtername]
819
820
820 @unfilteredmethod
821 @unfilteredmethod
821 def revbranchcache(self):
822 def revbranchcache(self):
822 if not self._revbranchcache:
823 if not self._revbranchcache:
823 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
824 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
824 return self._revbranchcache
825 return self._revbranchcache
825
826
826 def branchtip(self, branch, ignoremissing=False):
827 def branchtip(self, branch, ignoremissing=False):
827 '''return the tip node for a given branch
828 '''return the tip node for a given branch
828
829
829 If ignoremissing is True, then this method will not raise an error.
830 If ignoremissing is True, then this method will not raise an error.
830 This is helpful for callers that only expect None for a missing branch
831 This is helpful for callers that only expect None for a missing branch
831 (e.g. namespace).
832 (e.g. namespace).
832
833
833 '''
834 '''
834 try:
835 try:
835 return self.branchmap().branchtip(branch)
836 return self.branchmap().branchtip(branch)
836 except KeyError:
837 except KeyError:
837 if not ignoremissing:
838 if not ignoremissing:
838 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
839 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
839 else:
840 else:
840 pass
841 pass
841
842
842 def lookup(self, key):
843 def lookup(self, key):
843 return self[key].node()
844 return self[key].node()
844
845
845 def lookupbranch(self, key, remote=None):
846 def lookupbranch(self, key, remote=None):
846 repo = remote or self
847 repo = remote or self
847 if key in repo.branchmap():
848 if key in repo.branchmap():
848 return key
849 return key
849
850
850 repo = (remote and remote.local()) and remote or self
851 repo = (remote and remote.local()) and remote or self
851 return repo[key].branch()
852 return repo[key].branch()
852
853
853 def known(self, nodes):
854 def known(self, nodes):
854 cl = self.changelog
855 cl = self.changelog
855 nm = cl.nodemap
856 nm = cl.nodemap
856 filtered = cl.filteredrevs
857 filtered = cl.filteredrevs
857 result = []
858 result = []
858 for n in nodes:
859 for n in nodes:
859 r = nm.get(n)
860 r = nm.get(n)
860 resp = not (r is None or r in filtered)
861 resp = not (r is None or r in filtered)
861 result.append(resp)
862 result.append(resp)
862 return result
863 return result
863
864
864 def local(self):
865 def local(self):
865 return self
866 return self
866
867
867 def publishing(self):
868 def publishing(self):
868 # it's safe (and desirable) to trust the publish flag unconditionally
869 # it's safe (and desirable) to trust the publish flag unconditionally
869 # so that we don't finalize changes shared between users via ssh or nfs
870 # so that we don't finalize changes shared between users via ssh or nfs
870 return self.ui.configbool('phases', 'publish', True, untrusted=True)
871 return self.ui.configbool('phases', 'publish', True, untrusted=True)
871
872
872 def cancopy(self):
873 def cancopy(self):
873 # so statichttprepo's override of local() works
874 # so statichttprepo's override of local() works
874 if not self.local():
875 if not self.local():
875 return False
876 return False
876 if not self.publishing():
877 if not self.publishing():
877 return True
878 return True
878 # if publishing we can't copy if there is filtered content
879 # if publishing we can't copy if there is filtered content
879 return not self.filtered('visible').changelog.filteredrevs
880 return not self.filtered('visible').changelog.filteredrevs
880
881
881 def shared(self):
882 def shared(self):
882 '''the type of shared repository (None if not shared)'''
883 '''the type of shared repository (None if not shared)'''
883 if self.sharedpath != self.path:
884 if self.sharedpath != self.path:
884 return 'store'
885 return 'store'
885 return None
886 return None
886
887
887 def join(self, f, *insidef):
888 def join(self, f, *insidef):
888 return self.vfs.join(os.path.join(f, *insidef))
889 return self.vfs.join(os.path.join(f, *insidef))
889
890
890 def wjoin(self, f, *insidef):
891 def wjoin(self, f, *insidef):
891 return self.vfs.reljoin(self.root, f, *insidef)
892 return self.vfs.reljoin(self.root, f, *insidef)
892
893
893 def file(self, f):
894 def file(self, f):
894 if f[0] == '/':
895 if f[0] == '/':
895 f = f[1:]
896 f = f[1:]
896 return filelog.filelog(self.svfs, f)
897 return filelog.filelog(self.svfs, f)
897
898
898 def changectx(self, changeid):
899 def changectx(self, changeid):
899 return self[changeid]
900 return self[changeid]
900
901
901 def setparents(self, p1, p2=nullid):
902 def setparents(self, p1, p2=nullid):
902 self.dirstate.beginparentchange()
903 self.dirstate.beginparentchange()
903 copies = self.dirstate.setparents(p1, p2)
904 copies = self.dirstate.setparents(p1, p2)
904 pctx = self[p1]
905 pctx = self[p1]
905 if copies:
906 if copies:
906 # Adjust copy records, the dirstate cannot do it, it
907 # Adjust copy records, the dirstate cannot do it, it
907 # requires access to parents manifests. Preserve them
908 # requires access to parents manifests. Preserve them
908 # only for entries added to first parent.
909 # only for entries added to first parent.
909 for f in copies:
910 for f in copies:
910 if f not in pctx and copies[f] in pctx:
911 if f not in pctx and copies[f] in pctx:
911 self.dirstate.copy(copies[f], f)
912 self.dirstate.copy(copies[f], f)
912 if p2 == nullid:
913 if p2 == nullid:
913 for f, s in sorted(self.dirstate.copies().items()):
914 for f, s in sorted(self.dirstate.copies().items()):
914 if f not in pctx and s not in pctx:
915 if f not in pctx and s not in pctx:
915 self.dirstate.copy(None, f)
916 self.dirstate.copy(None, f)
916 self.dirstate.endparentchange()
917 self.dirstate.endparentchange()
917
918
918 def filectx(self, path, changeid=None, fileid=None):
919 def filectx(self, path, changeid=None, fileid=None):
919 """changeid can be a changeset revision, node, or tag.
920 """changeid can be a changeset revision, node, or tag.
920 fileid can be a file revision or node."""
921 fileid can be a file revision or node."""
921 return context.filectx(self, path, changeid, fileid)
922 return context.filectx(self, path, changeid, fileid)
922
923
923 def getcwd(self):
924 def getcwd(self):
924 return self.dirstate.getcwd()
925 return self.dirstate.getcwd()
925
926
926 def pathto(self, f, cwd=None):
927 def pathto(self, f, cwd=None):
927 return self.dirstate.pathto(f, cwd)
928 return self.dirstate.pathto(f, cwd)
928
929
929 def wfile(self, f, mode='r'):
930 def wfile(self, f, mode='r'):
930 return self.wvfs(f, mode)
931 return self.wvfs(f, mode)
931
932
932 def _link(self, f):
933 def _link(self, f):
933 return self.wvfs.islink(f)
934 return self.wvfs.islink(f)
934
935
935 def _loadfilter(self, filter):
936 def _loadfilter(self, filter):
936 if filter not in self.filterpats:
937 if filter not in self.filterpats:
937 l = []
938 l = []
938 for pat, cmd in self.ui.configitems(filter):
939 for pat, cmd in self.ui.configitems(filter):
939 if cmd == '!':
940 if cmd == '!':
940 continue
941 continue
941 mf = matchmod.match(self.root, '', [pat])
942 mf = matchmod.match(self.root, '', [pat])
942 fn = None
943 fn = None
943 params = cmd
944 params = cmd
944 for name, filterfn in self._datafilters.iteritems():
945 for name, filterfn in self._datafilters.iteritems():
945 if cmd.startswith(name):
946 if cmd.startswith(name):
946 fn = filterfn
947 fn = filterfn
947 params = cmd[len(name):].lstrip()
948 params = cmd[len(name):].lstrip()
948 break
949 break
949 if not fn:
950 if not fn:
950 fn = lambda s, c, **kwargs: util.filter(s, c)
951 fn = lambda s, c, **kwargs: util.filter(s, c)
951 # Wrap old filters not supporting keyword arguments
952 # Wrap old filters not supporting keyword arguments
952 if not inspect.getargspec(fn)[2]:
953 if not inspect.getargspec(fn)[2]:
953 oldfn = fn
954 oldfn = fn
954 fn = lambda s, c, **kwargs: oldfn(s, c)
955 fn = lambda s, c, **kwargs: oldfn(s, c)
955 l.append((mf, fn, params))
956 l.append((mf, fn, params))
956 self.filterpats[filter] = l
957 self.filterpats[filter] = l
957 return self.filterpats[filter]
958 return self.filterpats[filter]
958
959
959 def _filter(self, filterpats, filename, data):
960 def _filter(self, filterpats, filename, data):
960 for mf, fn, cmd in filterpats:
961 for mf, fn, cmd in filterpats:
961 if mf(filename):
962 if mf(filename):
962 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
963 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
963 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
964 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
964 break
965 break
965
966
966 return data
967 return data
967
968
968 @unfilteredpropertycache
969 @unfilteredpropertycache
969 def _encodefilterpats(self):
970 def _encodefilterpats(self):
970 return self._loadfilter('encode')
971 return self._loadfilter('encode')
971
972
972 @unfilteredpropertycache
973 @unfilteredpropertycache
973 def _decodefilterpats(self):
974 def _decodefilterpats(self):
974 return self._loadfilter('decode')
975 return self._loadfilter('decode')
975
976
976 def adddatafilter(self, name, filter):
977 def adddatafilter(self, name, filter):
977 self._datafilters[name] = filter
978 self._datafilters[name] = filter
978
979
979 def wread(self, filename):
980 def wread(self, filename):
980 if self._link(filename):
981 if self._link(filename):
981 data = self.wvfs.readlink(filename)
982 data = self.wvfs.readlink(filename)
982 else:
983 else:
983 data = self.wvfs.read(filename)
984 data = self.wvfs.read(filename)
984 return self._filter(self._encodefilterpats, filename, data)
985 return self._filter(self._encodefilterpats, filename, data)
985
986
986 def wwrite(self, filename, data, flags, backgroundclose=False):
987 def wwrite(self, filename, data, flags, backgroundclose=False):
987 """write ``data`` into ``filename`` in the working directory
988 """write ``data`` into ``filename`` in the working directory
988
989
989 This returns length of written (maybe decoded) data.
990 This returns length of written (maybe decoded) data.
990 """
991 """
991 data = self._filter(self._decodefilterpats, filename, data)
992 data = self._filter(self._decodefilterpats, filename, data)
992 if 'l' in flags:
993 if 'l' in flags:
993 self.wvfs.symlink(data, filename)
994 self.wvfs.symlink(data, filename)
994 else:
995 else:
995 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
996 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
996 if 'x' in flags:
997 if 'x' in flags:
997 self.wvfs.setflags(filename, False, True)
998 self.wvfs.setflags(filename, False, True)
998 return len(data)
999 return len(data)
999
1000
1000 def wwritedata(self, filename, data):
1001 def wwritedata(self, filename, data):
1001 return self._filter(self._decodefilterpats, filename, data)
1002 return self._filter(self._decodefilterpats, filename, data)
1002
1003
1003 def currenttransaction(self):
1004 def currenttransaction(self):
1004 """return the current transaction or None if non exists"""
1005 """return the current transaction or None if non exists"""
1005 if self._transref:
1006 if self._transref:
1006 tr = self._transref()
1007 tr = self._transref()
1007 else:
1008 else:
1008 tr = None
1009 tr = None
1009
1010
1010 if tr and tr.running():
1011 if tr and tr.running():
1011 return tr
1012 return tr
1012 return None
1013 return None
1013
1014
1014 def transaction(self, desc, report=None):
1015 def transaction(self, desc, report=None):
1015 if (self.ui.configbool('devel', 'all-warnings')
1016 if (self.ui.configbool('devel', 'all-warnings')
1016 or self.ui.configbool('devel', 'check-locks')):
1017 or self.ui.configbool('devel', 'check-locks')):
1017 if self._currentlock(self._lockref) is None:
1018 if self._currentlock(self._lockref) is None:
1018 raise RuntimeError('programming error: transaction requires '
1019 raise RuntimeError('programming error: transaction requires '
1019 'locking')
1020 'locking')
1020 tr = self.currenttransaction()
1021 tr = self.currenttransaction()
1021 if tr is not None:
1022 if tr is not None:
1022 return tr.nest()
1023 return tr.nest()
1023
1024
1024 # abort here if the journal already exists
1025 # abort here if the journal already exists
1025 if self.svfs.exists("journal"):
1026 if self.svfs.exists("journal"):
1026 raise error.RepoError(
1027 raise error.RepoError(
1027 _("abandoned transaction found"),
1028 _("abandoned transaction found"),
1028 hint=_("run 'hg recover' to clean up transaction"))
1029 hint=_("run 'hg recover' to clean up transaction"))
1029
1030
1030 idbase = "%.40f#%f" % (random.random(), time.time())
1031 idbase = "%.40f#%f" % (random.random(), time.time())
1031 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1032 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1032 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1033 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1033
1034
1034 self._writejournal(desc)
1035 self._writejournal(desc)
1035 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1036 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1036 if report:
1037 if report:
1037 rp = report
1038 rp = report
1038 else:
1039 else:
1039 rp = self.ui.warn
1040 rp = self.ui.warn
1040 vfsmap = {'plain': self.vfs} # root of .hg/
1041 vfsmap = {'plain': self.vfs} # root of .hg/
1041 # we must avoid cyclic reference between repo and transaction.
1042 # we must avoid cyclic reference between repo and transaction.
1042 reporef = weakref.ref(self)
1043 reporef = weakref.ref(self)
1043 def validate(tr):
1044 def validate(tr):
1044 """will run pre-closing hooks"""
1045 """will run pre-closing hooks"""
1045 reporef().hook('pretxnclose', throw=True,
1046 reporef().hook('pretxnclose', throw=True,
1046 txnname=desc, **tr.hookargs)
1047 txnname=desc, **tr.hookargs)
1047 def releasefn(tr, success):
1048 def releasefn(tr, success):
1048 repo = reporef()
1049 repo = reporef()
1049 if success:
1050 if success:
1050 # this should be explicitly invoked here, because
1051 # this should be explicitly invoked here, because
1051 # in-memory changes aren't written out at closing
1052 # in-memory changes aren't written out at closing
1052 # transaction, if tr.addfilegenerator (via
1053 # transaction, if tr.addfilegenerator (via
1053 # dirstate.write or so) isn't invoked while
1054 # dirstate.write or so) isn't invoked while
1054 # transaction running
1055 # transaction running
1055 repo.dirstate.write(None)
1056 repo.dirstate.write(None)
1056 else:
1057 else:
1057 # discard all changes (including ones already written
1058 # discard all changes (including ones already written
1058 # out) in this transaction
1059 # out) in this transaction
1059 repo.dirstate.restorebackup(None, prefix='journal.')
1060 repo.dirstate.restorebackup(None, prefix='journal.')
1060
1061
1061 repo.invalidate(clearfilecache=True)
1062 repo.invalidate(clearfilecache=True)
1062
1063
1063 tr = transaction.transaction(rp, self.svfs, vfsmap,
1064 tr = transaction.transaction(rp, self.svfs, vfsmap,
1064 "journal",
1065 "journal",
1065 "undo",
1066 "undo",
1066 aftertrans(renames),
1067 aftertrans(renames),
1067 self.store.createmode,
1068 self.store.createmode,
1068 validator=validate,
1069 validator=validate,
1069 releasefn=releasefn)
1070 releasefn=releasefn)
1070
1071
1071 tr.hookargs['txnid'] = txnid
1072 tr.hookargs['txnid'] = txnid
1072 # note: writing the fncache only during finalize mean that the file is
1073 # note: writing the fncache only during finalize mean that the file is
1073 # outdated when running hooks. As fncache is used for streaming clone,
1074 # outdated when running hooks. As fncache is used for streaming clone,
1074 # this is not expected to break anything that happen during the hooks.
1075 # this is not expected to break anything that happen during the hooks.
1075 tr.addfinalize('flush-fncache', self.store.write)
1076 tr.addfinalize('flush-fncache', self.store.write)
1076 def txnclosehook(tr2):
1077 def txnclosehook(tr2):
1077 """To be run if transaction is successful, will schedule a hook run
1078 """To be run if transaction is successful, will schedule a hook run
1078 """
1079 """
1079 # Don't reference tr2 in hook() so we don't hold a reference.
1080 # Don't reference tr2 in hook() so we don't hold a reference.
1080 # This reduces memory consumption when there are multiple
1081 # This reduces memory consumption when there are multiple
1081 # transactions per lock. This can likely go away if issue5045
1082 # transactions per lock. This can likely go away if issue5045
1082 # fixes the function accumulation.
1083 # fixes the function accumulation.
1083 hookargs = tr2.hookargs
1084 hookargs = tr2.hookargs
1084
1085
1085 def hook():
1086 def hook():
1086 reporef().hook('txnclose', throw=False, txnname=desc,
1087 reporef().hook('txnclose', throw=False, txnname=desc,
1087 **hookargs)
1088 **hookargs)
1088 reporef()._afterlock(hook)
1089 reporef()._afterlock(hook)
1089 tr.addfinalize('txnclose-hook', txnclosehook)
1090 tr.addfinalize('txnclose-hook', txnclosehook)
1090 def txnaborthook(tr2):
1091 def txnaborthook(tr2):
1091 """To be run if transaction is aborted
1092 """To be run if transaction is aborted
1092 """
1093 """
1093 reporef().hook('txnabort', throw=False, txnname=desc,
1094 reporef().hook('txnabort', throw=False, txnname=desc,
1094 **tr2.hookargs)
1095 **tr2.hookargs)
1095 tr.addabort('txnabort-hook', txnaborthook)
1096 tr.addabort('txnabort-hook', txnaborthook)
1096 # avoid eager cache invalidation. in-memory data should be identical
1097 # avoid eager cache invalidation. in-memory data should be identical
1097 # to stored data if transaction has no error.
1098 # to stored data if transaction has no error.
1098 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1099 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1099 self._transref = weakref.ref(tr)
1100 self._transref = weakref.ref(tr)
1100 return tr
1101 return tr
1101
1102
1102 def _journalfiles(self):
1103 def _journalfiles(self):
1103 return ((self.svfs, 'journal'),
1104 return ((self.svfs, 'journal'),
1104 (self.vfs, 'journal.dirstate'),
1105 (self.vfs, 'journal.dirstate'),
1105 (self.vfs, 'journal.branch'),
1106 (self.vfs, 'journal.branch'),
1106 (self.vfs, 'journal.desc'),
1107 (self.vfs, 'journal.desc'),
1107 (self.vfs, 'journal.bookmarks'),
1108 (self.vfs, 'journal.bookmarks'),
1108 (self.svfs, 'journal.phaseroots'))
1109 (self.svfs, 'journal.phaseroots'))
1109
1110
1110 def undofiles(self):
1111 def undofiles(self):
1111 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1112 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1112
1113
1113 def _writejournal(self, desc):
1114 def _writejournal(self, desc):
1114 self.dirstate.savebackup(None, prefix='journal.')
1115 self.dirstate.savebackup(None, prefix='journal.')
1115 self.vfs.write("journal.branch",
1116 self.vfs.write("journal.branch",
1116 encoding.fromlocal(self.dirstate.branch()))
1117 encoding.fromlocal(self.dirstate.branch()))
1117 self.vfs.write("journal.desc",
1118 self.vfs.write("journal.desc",
1118 "%d\n%s\n" % (len(self), desc))
1119 "%d\n%s\n" % (len(self), desc))
1119 self.vfs.write("journal.bookmarks",
1120 self.vfs.write("journal.bookmarks",
1120 self.vfs.tryread("bookmarks"))
1121 self.vfs.tryread("bookmarks"))
1121 self.svfs.write("journal.phaseroots",
1122 self.svfs.write("journal.phaseroots",
1122 self.svfs.tryread("phaseroots"))
1123 self.svfs.tryread("phaseroots"))
1123
1124
1124 def recover(self):
1125 def recover(self):
1125 with self.lock():
1126 with self.lock():
1126 if self.svfs.exists("journal"):
1127 if self.svfs.exists("journal"):
1127 self.ui.status(_("rolling back interrupted transaction\n"))
1128 self.ui.status(_("rolling back interrupted transaction\n"))
1128 vfsmap = {'': self.svfs,
1129 vfsmap = {'': self.svfs,
1129 'plain': self.vfs,}
1130 'plain': self.vfs,}
1130 transaction.rollback(self.svfs, vfsmap, "journal",
1131 transaction.rollback(self.svfs, vfsmap, "journal",
1131 self.ui.warn)
1132 self.ui.warn)
1132 self.invalidate()
1133 self.invalidate()
1133 return True
1134 return True
1134 else:
1135 else:
1135 self.ui.warn(_("no interrupted transaction available\n"))
1136 self.ui.warn(_("no interrupted transaction available\n"))
1136 return False
1137 return False
1137
1138
1138 def rollback(self, dryrun=False, force=False):
1139 def rollback(self, dryrun=False, force=False):
1139 wlock = lock = dsguard = None
1140 wlock = lock = dsguard = None
1140 try:
1141 try:
1141 wlock = self.wlock()
1142 wlock = self.wlock()
1142 lock = self.lock()
1143 lock = self.lock()
1143 if self.svfs.exists("undo"):
1144 if self.svfs.exists("undo"):
1144 dsguard = cmdutil.dirstateguard(self, 'rollback')
1145 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1145
1146
1146 return self._rollback(dryrun, force, dsguard)
1147 return self._rollback(dryrun, force, dsguard)
1147 else:
1148 else:
1148 self.ui.warn(_("no rollback information available\n"))
1149 self.ui.warn(_("no rollback information available\n"))
1149 return 1
1150 return 1
1150 finally:
1151 finally:
1151 release(dsguard, lock, wlock)
1152 release(dsguard, lock, wlock)
1152
1153
1153 @unfilteredmethod # Until we get smarter cache management
1154 @unfilteredmethod # Until we get smarter cache management
1154 def _rollback(self, dryrun, force, dsguard):
1155 def _rollback(self, dryrun, force, dsguard):
1155 ui = self.ui
1156 ui = self.ui
1156 try:
1157 try:
1157 args = self.vfs.read('undo.desc').splitlines()
1158 args = self.vfs.read('undo.desc').splitlines()
1158 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1159 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1159 if len(args) >= 3:
1160 if len(args) >= 3:
1160 detail = args[2]
1161 detail = args[2]
1161 oldtip = oldlen - 1
1162 oldtip = oldlen - 1
1162
1163
1163 if detail and ui.verbose:
1164 if detail and ui.verbose:
1164 msg = (_('repository tip rolled back to revision %s'
1165 msg = (_('repository tip rolled back to revision %s'
1165 ' (undo %s: %s)\n')
1166 ' (undo %s: %s)\n')
1166 % (oldtip, desc, detail))
1167 % (oldtip, desc, detail))
1167 else:
1168 else:
1168 msg = (_('repository tip rolled back to revision %s'
1169 msg = (_('repository tip rolled back to revision %s'
1169 ' (undo %s)\n')
1170 ' (undo %s)\n')
1170 % (oldtip, desc))
1171 % (oldtip, desc))
1171 except IOError:
1172 except IOError:
1172 msg = _('rolling back unknown transaction\n')
1173 msg = _('rolling back unknown transaction\n')
1173 desc = None
1174 desc = None
1174
1175
1175 if not force and self['.'] != self['tip'] and desc == 'commit':
1176 if not force and self['.'] != self['tip'] and desc == 'commit':
1176 raise error.Abort(
1177 raise error.Abort(
1177 _('rollback of last commit while not checked out '
1178 _('rollback of last commit while not checked out '
1178 'may lose data'), hint=_('use -f to force'))
1179 'may lose data'), hint=_('use -f to force'))
1179
1180
1180 ui.status(msg)
1181 ui.status(msg)
1181 if dryrun:
1182 if dryrun:
1182 return 0
1183 return 0
1183
1184
1184 parents = self.dirstate.parents()
1185 parents = self.dirstate.parents()
1185 self.destroying()
1186 self.destroying()
1186 vfsmap = {'plain': self.vfs, '': self.svfs}
1187 vfsmap = {'plain': self.vfs, '': self.svfs}
1187 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1188 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1188 if self.vfs.exists('undo.bookmarks'):
1189 if self.vfs.exists('undo.bookmarks'):
1189 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1190 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1190 if self.svfs.exists('undo.phaseroots'):
1191 if self.svfs.exists('undo.phaseroots'):
1191 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1192 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1192 self.invalidate()
1193 self.invalidate()
1193
1194
1194 parentgone = (parents[0] not in self.changelog.nodemap or
1195 parentgone = (parents[0] not in self.changelog.nodemap or
1195 parents[1] not in self.changelog.nodemap)
1196 parents[1] not in self.changelog.nodemap)
1196 if parentgone:
1197 if parentgone:
1197 # prevent dirstateguard from overwriting already restored one
1198 # prevent dirstateguard from overwriting already restored one
1198 dsguard.close()
1199 dsguard.close()
1199
1200
1200 self.dirstate.restorebackup(None, prefix='undo.')
1201 self.dirstate.restorebackup(None, prefix='undo.')
1201 try:
1202 try:
1202 branch = self.vfs.read('undo.branch')
1203 branch = self.vfs.read('undo.branch')
1203 self.dirstate.setbranch(encoding.tolocal(branch))
1204 self.dirstate.setbranch(encoding.tolocal(branch))
1204 except IOError:
1205 except IOError:
1205 ui.warn(_('named branch could not be reset: '
1206 ui.warn(_('named branch could not be reset: '
1206 'current branch is still \'%s\'\n')
1207 'current branch is still \'%s\'\n')
1207 % self.dirstate.branch())
1208 % self.dirstate.branch())
1208
1209
1209 parents = tuple([p.rev() for p in self[None].parents()])
1210 parents = tuple([p.rev() for p in self[None].parents()])
1210 if len(parents) > 1:
1211 if len(parents) > 1:
1211 ui.status(_('working directory now based on '
1212 ui.status(_('working directory now based on '
1212 'revisions %d and %d\n') % parents)
1213 'revisions %d and %d\n') % parents)
1213 else:
1214 else:
1214 ui.status(_('working directory now based on '
1215 ui.status(_('working directory now based on '
1215 'revision %d\n') % parents)
1216 'revision %d\n') % parents)
1216 mergemod.mergestate.clean(self, self['.'].node())
1217 mergemod.mergestate.clean(self, self['.'].node())
1217
1218
1218 # TODO: if we know which new heads may result from this rollback, pass
1219 # TODO: if we know which new heads may result from this rollback, pass
1219 # them to destroy(), which will prevent the branchhead cache from being
1220 # them to destroy(), which will prevent the branchhead cache from being
1220 # invalidated.
1221 # invalidated.
1221 self.destroyed()
1222 self.destroyed()
1222 return 0
1223 return 0
1223
1224
1224 def invalidatecaches(self):
1225 def invalidatecaches(self):
1225
1226
1226 if '_tagscache' in vars(self):
1227 if '_tagscache' in vars(self):
1227 # can't use delattr on proxy
1228 # can't use delattr on proxy
1228 del self.__dict__['_tagscache']
1229 del self.__dict__['_tagscache']
1229
1230
1230 self.unfiltered()._branchcaches.clear()
1231 self.unfiltered()._branchcaches.clear()
1231 self.invalidatevolatilesets()
1232 self.invalidatevolatilesets()
1232
1233
1233 def invalidatevolatilesets(self):
1234 def invalidatevolatilesets(self):
1234 self.filteredrevcache.clear()
1235 self.filteredrevcache.clear()
1235 obsolete.clearobscaches(self)
1236 obsolete.clearobscaches(self)
1236
1237
1237 def invalidatedirstate(self):
1238 def invalidatedirstate(self):
1238 '''Invalidates the dirstate, causing the next call to dirstate
1239 '''Invalidates the dirstate, causing the next call to dirstate
1239 to check if it was modified since the last time it was read,
1240 to check if it was modified since the last time it was read,
1240 rereading it if it has.
1241 rereading it if it has.
1241
1242
1242 This is different to dirstate.invalidate() that it doesn't always
1243 This is different to dirstate.invalidate() that it doesn't always
1243 rereads the dirstate. Use dirstate.invalidate() if you want to
1244 rereads the dirstate. Use dirstate.invalidate() if you want to
1244 explicitly read the dirstate again (i.e. restoring it to a previous
1245 explicitly read the dirstate again (i.e. restoring it to a previous
1245 known good state).'''
1246 known good state).'''
1246 if hasunfilteredcache(self, 'dirstate'):
1247 if hasunfilteredcache(self, 'dirstate'):
1247 for k in self.dirstate._filecache:
1248 for k in self.dirstate._filecache:
1248 try:
1249 try:
1249 delattr(self.dirstate, k)
1250 delattr(self.dirstate, k)
1250 except AttributeError:
1251 except AttributeError:
1251 pass
1252 pass
1252 delattr(self.unfiltered(), 'dirstate')
1253 delattr(self.unfiltered(), 'dirstate')
1253
1254
1254 def invalidate(self, clearfilecache=False):
1255 def invalidate(self, clearfilecache=False):
1255 '''Invalidates both store and non-store parts other than dirstate
1256 '''Invalidates both store and non-store parts other than dirstate
1256
1257
1257 If a transaction is running, invalidation of store is omitted,
1258 If a transaction is running, invalidation of store is omitted,
1258 because discarding in-memory changes might cause inconsistency
1259 because discarding in-memory changes might cause inconsistency
1259 (e.g. incomplete fncache causes unintentional failure, but
1260 (e.g. incomplete fncache causes unintentional failure, but
1260 redundant one doesn't).
1261 redundant one doesn't).
1261 '''
1262 '''
1262 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1263 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1263 for k in self._filecache.keys():
1264 for k in self._filecache.keys():
1264 # dirstate is invalidated separately in invalidatedirstate()
1265 # dirstate is invalidated separately in invalidatedirstate()
1265 if k == 'dirstate':
1266 if k == 'dirstate':
1266 continue
1267 continue
1267
1268
1268 if clearfilecache:
1269 if clearfilecache:
1269 del self._filecache[k]
1270 del self._filecache[k]
1270 try:
1271 try:
1271 delattr(unfiltered, k)
1272 delattr(unfiltered, k)
1272 except AttributeError:
1273 except AttributeError:
1273 pass
1274 pass
1274 self.invalidatecaches()
1275 self.invalidatecaches()
1275 if not self.currenttransaction():
1276 if not self.currenttransaction():
1276 # TODO: Changing contents of store outside transaction
1277 # TODO: Changing contents of store outside transaction
1277 # causes inconsistency. We should make in-memory store
1278 # causes inconsistency. We should make in-memory store
1278 # changes detectable, and abort if changed.
1279 # changes detectable, and abort if changed.
1279 self.store.invalidatecaches()
1280 self.store.invalidatecaches()
1280
1281
1281 def invalidateall(self):
1282 def invalidateall(self):
1282 '''Fully invalidates both store and non-store parts, causing the
1283 '''Fully invalidates both store and non-store parts, causing the
1283 subsequent operation to reread any outside changes.'''
1284 subsequent operation to reread any outside changes.'''
1284 # extension should hook this to invalidate its caches
1285 # extension should hook this to invalidate its caches
1285 self.invalidate()
1286 self.invalidate()
1286 self.invalidatedirstate()
1287 self.invalidatedirstate()
1287
1288
1288 @unfilteredmethod
1289 @unfilteredmethod
1289 def _refreshfilecachestats(self, tr):
1290 def _refreshfilecachestats(self, tr):
1290 """Reload stats of cached files so that they are flagged as valid"""
1291 """Reload stats of cached files so that they are flagged as valid"""
1291 for k, ce in self._filecache.items():
1292 for k, ce in self._filecache.items():
1292 if k == 'dirstate' or k not in self.__dict__:
1293 if k == 'dirstate' or k not in self.__dict__:
1293 continue
1294 continue
1294 ce.refresh()
1295 ce.refresh()
1295
1296
1296 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1297 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1297 inheritchecker=None, parentenvvar=None):
1298 inheritchecker=None, parentenvvar=None):
1298 parentlock = None
1299 parentlock = None
1299 # the contents of parentenvvar are used by the underlying lock to
1300 # the contents of parentenvvar are used by the underlying lock to
1300 # determine whether it can be inherited
1301 # determine whether it can be inherited
1301 if parentenvvar is not None:
1302 if parentenvvar is not None:
1302 parentlock = os.environ.get(parentenvvar)
1303 parentlock = os.environ.get(parentenvvar)
1303 try:
1304 try:
1304 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1305 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1305 acquirefn=acquirefn, desc=desc,
1306 acquirefn=acquirefn, desc=desc,
1306 inheritchecker=inheritchecker,
1307 inheritchecker=inheritchecker,
1307 parentlock=parentlock)
1308 parentlock=parentlock)
1308 except error.LockHeld as inst:
1309 except error.LockHeld as inst:
1309 if not wait:
1310 if not wait:
1310 raise
1311 raise
1311 # show more details for new-style locks
1312 # show more details for new-style locks
1312 if ':' in inst.locker:
1313 if ':' in inst.locker:
1313 host, pid = inst.locker.split(":", 1)
1314 host, pid = inst.locker.split(":", 1)
1314 self.ui.warn(
1315 self.ui.warn(
1315 _("waiting for lock on %s held by process %r "
1316 _("waiting for lock on %s held by process %r "
1316 "on host %r\n") % (desc, pid, host))
1317 "on host %r\n") % (desc, pid, host))
1317 else:
1318 else:
1318 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1319 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1319 (desc, inst.locker))
1320 (desc, inst.locker))
1320 # default to 600 seconds timeout
1321 # default to 600 seconds timeout
1321 l = lockmod.lock(vfs, lockname,
1322 l = lockmod.lock(vfs, lockname,
1322 int(self.ui.config("ui", "timeout", "600")),
1323 int(self.ui.config("ui", "timeout", "600")),
1323 releasefn=releasefn, acquirefn=acquirefn,
1324 releasefn=releasefn, acquirefn=acquirefn,
1324 desc=desc)
1325 desc=desc)
1325 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1326 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1326 return l
1327 return l
1327
1328
1328 def _afterlock(self, callback):
1329 def _afterlock(self, callback):
1329 """add a callback to be run when the repository is fully unlocked
1330 """add a callback to be run when the repository is fully unlocked
1330
1331
1331 The callback will be executed when the outermost lock is released
1332 The callback will be executed when the outermost lock is released
1332 (with wlock being higher level than 'lock')."""
1333 (with wlock being higher level than 'lock')."""
1333 for ref in (self._wlockref, self._lockref):
1334 for ref in (self._wlockref, self._lockref):
1334 l = ref and ref()
1335 l = ref and ref()
1335 if l and l.held:
1336 if l and l.held:
1336 l.postrelease.append(callback)
1337 l.postrelease.append(callback)
1337 break
1338 break
1338 else: # no lock have been found.
1339 else: # no lock have been found.
1339 callback()
1340 callback()
1340
1341
1341 def lock(self, wait=True):
1342 def lock(self, wait=True):
1342 '''Lock the repository store (.hg/store) and return a weak reference
1343 '''Lock the repository store (.hg/store) and return a weak reference
1343 to the lock. Use this before modifying the store (e.g. committing or
1344 to the lock. Use this before modifying the store (e.g. committing or
1344 stripping). If you are opening a transaction, get a lock as well.)
1345 stripping). If you are opening a transaction, get a lock as well.)
1345
1346
1346 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1347 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1347 'wlock' first to avoid a dead-lock hazard.'''
1348 'wlock' first to avoid a dead-lock hazard.'''
1348 l = self._currentlock(self._lockref)
1349 l = self._currentlock(self._lockref)
1349 if l is not None:
1350 if l is not None:
1350 l.lock()
1351 l.lock()
1351 return l
1352 return l
1352
1353
1353 l = self._lock(self.svfs, "lock", wait, None,
1354 l = self._lock(self.svfs, "lock", wait, None,
1354 self.invalidate, _('repository %s') % self.origroot)
1355 self.invalidate, _('repository %s') % self.origroot)
1355 self._lockref = weakref.ref(l)
1356 self._lockref = weakref.ref(l)
1356 return l
1357 return l
1357
1358
1358 def _wlockchecktransaction(self):
1359 def _wlockchecktransaction(self):
1359 if self.currenttransaction() is not None:
1360 if self.currenttransaction() is not None:
1360 raise error.LockInheritanceContractViolation(
1361 raise error.LockInheritanceContractViolation(
1361 'wlock cannot be inherited in the middle of a transaction')
1362 'wlock cannot be inherited in the middle of a transaction')
1362
1363
1363 def wlock(self, wait=True):
1364 def wlock(self, wait=True):
1364 '''Lock the non-store parts of the repository (everything under
1365 '''Lock the non-store parts of the repository (everything under
1365 .hg except .hg/store) and return a weak reference to the lock.
1366 .hg except .hg/store) and return a weak reference to the lock.
1366
1367
1367 Use this before modifying files in .hg.
1368 Use this before modifying files in .hg.
1368
1369
1369 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1370 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1370 'wlock' first to avoid a dead-lock hazard.'''
1371 'wlock' first to avoid a dead-lock hazard.'''
1371 l = self._wlockref and self._wlockref()
1372 l = self._wlockref and self._wlockref()
1372 if l is not None and l.held:
1373 if l is not None and l.held:
1373 l.lock()
1374 l.lock()
1374 return l
1375 return l
1375
1376
1376 # We do not need to check for non-waiting lock acquisition. Such
1377 # We do not need to check for non-waiting lock acquisition. Such
1377 # acquisition would not cause dead-lock as they would just fail.
1378 # acquisition would not cause dead-lock as they would just fail.
1378 if wait and (self.ui.configbool('devel', 'all-warnings')
1379 if wait and (self.ui.configbool('devel', 'all-warnings')
1379 or self.ui.configbool('devel', 'check-locks')):
1380 or self.ui.configbool('devel', 'check-locks')):
1380 if self._currentlock(self._lockref) is not None:
1381 if self._currentlock(self._lockref) is not None:
1381 self.ui.develwarn('"wlock" acquired after "lock"')
1382 self.ui.develwarn('"wlock" acquired after "lock"')
1382
1383
1383 def unlock():
1384 def unlock():
1384 if self.dirstate.pendingparentchange():
1385 if self.dirstate.pendingparentchange():
1385 self.dirstate.invalidate()
1386 self.dirstate.invalidate()
1386 else:
1387 else:
1387 self.dirstate.write(None)
1388 self.dirstate.write(None)
1388
1389
1389 self._filecache['dirstate'].refresh()
1390 self._filecache['dirstate'].refresh()
1390
1391
1391 l = self._lock(self.vfs, "wlock", wait, unlock,
1392 l = self._lock(self.vfs, "wlock", wait, unlock,
1392 self.invalidatedirstate, _('working directory of %s') %
1393 self.invalidatedirstate, _('working directory of %s') %
1393 self.origroot,
1394 self.origroot,
1394 inheritchecker=self._wlockchecktransaction,
1395 inheritchecker=self._wlockchecktransaction,
1395 parentenvvar='HG_WLOCK_LOCKER')
1396 parentenvvar='HG_WLOCK_LOCKER')
1396 self._wlockref = weakref.ref(l)
1397 self._wlockref = weakref.ref(l)
1397 return l
1398 return l
1398
1399
1399 def _currentlock(self, lockref):
1400 def _currentlock(self, lockref):
1400 """Returns the lock if it's held, or None if it's not."""
1401 """Returns the lock if it's held, or None if it's not."""
1401 if lockref is None:
1402 if lockref is None:
1402 return None
1403 return None
1403 l = lockref()
1404 l = lockref()
1404 if l is None or not l.held:
1405 if l is None or not l.held:
1405 return None
1406 return None
1406 return l
1407 return l
1407
1408
1408 def currentwlock(self):
1409 def currentwlock(self):
1409 """Returns the wlock if it's held, or None if it's not."""
1410 """Returns the wlock if it's held, or None if it's not."""
1410 return self._currentlock(self._wlockref)
1411 return self._currentlock(self._wlockref)
1411
1412
1412 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1413 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1413 """
1414 """
1414 commit an individual file as part of a larger transaction
1415 commit an individual file as part of a larger transaction
1415 """
1416 """
1416
1417
1417 fname = fctx.path()
1418 fname = fctx.path()
1418 fparent1 = manifest1.get(fname, nullid)
1419 fparent1 = manifest1.get(fname, nullid)
1419 fparent2 = manifest2.get(fname, nullid)
1420 fparent2 = manifest2.get(fname, nullid)
1420 if isinstance(fctx, context.filectx):
1421 if isinstance(fctx, context.filectx):
1421 node = fctx.filenode()
1422 node = fctx.filenode()
1422 if node in [fparent1, fparent2]:
1423 if node in [fparent1, fparent2]:
1423 self.ui.debug('reusing %s filelog entry\n' % fname)
1424 self.ui.debug('reusing %s filelog entry\n' % fname)
1424 if manifest1.flags(fname) != fctx.flags():
1425 if manifest1.flags(fname) != fctx.flags():
1425 changelist.append(fname)
1426 changelist.append(fname)
1426 return node
1427 return node
1427
1428
1428 flog = self.file(fname)
1429 flog = self.file(fname)
1429 meta = {}
1430 meta = {}
1430 copy = fctx.renamed()
1431 copy = fctx.renamed()
1431 if copy and copy[0] != fname:
1432 if copy and copy[0] != fname:
1432 # Mark the new revision of this file as a copy of another
1433 # Mark the new revision of this file as a copy of another
1433 # file. This copy data will effectively act as a parent
1434 # file. This copy data will effectively act as a parent
1434 # of this new revision. If this is a merge, the first
1435 # of this new revision. If this is a merge, the first
1435 # parent will be the nullid (meaning "look up the copy data")
1436 # parent will be the nullid (meaning "look up the copy data")
1436 # and the second one will be the other parent. For example:
1437 # and the second one will be the other parent. For example:
1437 #
1438 #
1438 # 0 --- 1 --- 3 rev1 changes file foo
1439 # 0 --- 1 --- 3 rev1 changes file foo
1439 # \ / rev2 renames foo to bar and changes it
1440 # \ / rev2 renames foo to bar and changes it
1440 # \- 2 -/ rev3 should have bar with all changes and
1441 # \- 2 -/ rev3 should have bar with all changes and
1441 # should record that bar descends from
1442 # should record that bar descends from
1442 # bar in rev2 and foo in rev1
1443 # bar in rev2 and foo in rev1
1443 #
1444 #
1444 # this allows this merge to succeed:
1445 # this allows this merge to succeed:
1445 #
1446 #
1446 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1447 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1447 # \ / merging rev3 and rev4 should use bar@rev2
1448 # \ / merging rev3 and rev4 should use bar@rev2
1448 # \- 2 --- 4 as the merge base
1449 # \- 2 --- 4 as the merge base
1449 #
1450 #
1450
1451
1451 cfname = copy[0]
1452 cfname = copy[0]
1452 crev = manifest1.get(cfname)
1453 crev = manifest1.get(cfname)
1453 newfparent = fparent2
1454 newfparent = fparent2
1454
1455
1455 if manifest2: # branch merge
1456 if manifest2: # branch merge
1456 if fparent2 == nullid or crev is None: # copied on remote side
1457 if fparent2 == nullid or crev is None: # copied on remote side
1457 if cfname in manifest2:
1458 if cfname in manifest2:
1458 crev = manifest2[cfname]
1459 crev = manifest2[cfname]
1459 newfparent = fparent1
1460 newfparent = fparent1
1460
1461
1461 # Here, we used to search backwards through history to try to find
1462 # Here, we used to search backwards through history to try to find
1462 # where the file copy came from if the source of a copy was not in
1463 # where the file copy came from if the source of a copy was not in
1463 # the parent directory. However, this doesn't actually make sense to
1464 # the parent directory. However, this doesn't actually make sense to
1464 # do (what does a copy from something not in your working copy even
1465 # do (what does a copy from something not in your working copy even
1465 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1466 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1466 # the user that copy information was dropped, so if they didn't
1467 # the user that copy information was dropped, so if they didn't
1467 # expect this outcome it can be fixed, but this is the correct
1468 # expect this outcome it can be fixed, but this is the correct
1468 # behavior in this circumstance.
1469 # behavior in this circumstance.
1469
1470
1470 if crev:
1471 if crev:
1471 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1472 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1472 meta["copy"] = cfname
1473 meta["copy"] = cfname
1473 meta["copyrev"] = hex(crev)
1474 meta["copyrev"] = hex(crev)
1474 fparent1, fparent2 = nullid, newfparent
1475 fparent1, fparent2 = nullid, newfparent
1475 else:
1476 else:
1476 self.ui.warn(_("warning: can't find ancestor for '%s' "
1477 self.ui.warn(_("warning: can't find ancestor for '%s' "
1477 "copied from '%s'!\n") % (fname, cfname))
1478 "copied from '%s'!\n") % (fname, cfname))
1478
1479
1479 elif fparent1 == nullid:
1480 elif fparent1 == nullid:
1480 fparent1, fparent2 = fparent2, nullid
1481 fparent1, fparent2 = fparent2, nullid
1481 elif fparent2 != nullid:
1482 elif fparent2 != nullid:
1482 # is one parent an ancestor of the other?
1483 # is one parent an ancestor of the other?
1483 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1484 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1484 if fparent1 in fparentancestors:
1485 if fparent1 in fparentancestors:
1485 fparent1, fparent2 = fparent2, nullid
1486 fparent1, fparent2 = fparent2, nullid
1486 elif fparent2 in fparentancestors:
1487 elif fparent2 in fparentancestors:
1487 fparent2 = nullid
1488 fparent2 = nullid
1488
1489
1489 # is the file changed?
1490 # is the file changed?
1490 text = fctx.data()
1491 text = fctx.data()
1491 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1492 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1492 changelist.append(fname)
1493 changelist.append(fname)
1493 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1494 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1494 # are just the flags changed during merge?
1495 # are just the flags changed during merge?
1495 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1496 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1496 changelist.append(fname)
1497 changelist.append(fname)
1497
1498
1498 return fparent1
1499 return fparent1
1499
1500
1500 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1501 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1501 """check for commit arguments that aren't committable"""
1502 """check for commit arguments that aren't committable"""
1502 if match.isexact() or match.prefix():
1503 if match.isexact() or match.prefix():
1503 matched = set(status.modified + status.added + status.removed)
1504 matched = set(status.modified + status.added + status.removed)
1504
1505
1505 for f in match.files():
1506 for f in match.files():
1506 f = self.dirstate.normalize(f)
1507 f = self.dirstate.normalize(f)
1507 if f == '.' or f in matched or f in wctx.substate:
1508 if f == '.' or f in matched or f in wctx.substate:
1508 continue
1509 continue
1509 if f in status.deleted:
1510 if f in status.deleted:
1510 fail(f, _('file not found!'))
1511 fail(f, _('file not found!'))
1511 if f in vdirs: # visited directory
1512 if f in vdirs: # visited directory
1512 d = f + '/'
1513 d = f + '/'
1513 for mf in matched:
1514 for mf in matched:
1514 if mf.startswith(d):
1515 if mf.startswith(d):
1515 break
1516 break
1516 else:
1517 else:
1517 fail(f, _("no match under directory!"))
1518 fail(f, _("no match under directory!"))
1518 elif f not in self.dirstate:
1519 elif f not in self.dirstate:
1519 fail(f, _("file not tracked!"))
1520 fail(f, _("file not tracked!"))
1520
1521
1521 @unfilteredmethod
1522 @unfilteredmethod
1522 def commit(self, text="", user=None, date=None, match=None, force=False,
1523 def commit(self, text="", user=None, date=None, match=None, force=False,
1523 editor=False, extra=None):
1524 editor=False, extra=None):
1524 """Add a new revision to current repository.
1525 """Add a new revision to current repository.
1525
1526
1526 Revision information is gathered from the working directory,
1527 Revision information is gathered from the working directory,
1527 match can be used to filter the committed files. If editor is
1528 match can be used to filter the committed files. If editor is
1528 supplied, it is called to get a commit message.
1529 supplied, it is called to get a commit message.
1529 """
1530 """
1530 if extra is None:
1531 if extra is None:
1531 extra = {}
1532 extra = {}
1532
1533
1533 def fail(f, msg):
1534 def fail(f, msg):
1534 raise error.Abort('%s: %s' % (f, msg))
1535 raise error.Abort('%s: %s' % (f, msg))
1535
1536
1536 if not match:
1537 if not match:
1537 match = matchmod.always(self.root, '')
1538 match = matchmod.always(self.root, '')
1538
1539
1539 if not force:
1540 if not force:
1540 vdirs = []
1541 vdirs = []
1541 match.explicitdir = vdirs.append
1542 match.explicitdir = vdirs.append
1542 match.bad = fail
1543 match.bad = fail
1543
1544
1544 wlock = lock = tr = None
1545 wlock = lock = tr = None
1545 try:
1546 try:
1546 wlock = self.wlock()
1547 wlock = self.wlock()
1547 lock = self.lock() # for recent changelog (see issue4368)
1548 lock = self.lock() # for recent changelog (see issue4368)
1548
1549
1549 wctx = self[None]
1550 wctx = self[None]
1550 merge = len(wctx.parents()) > 1
1551 merge = len(wctx.parents()) > 1
1551
1552
1552 if not force and merge and match.ispartial():
1553 if not force and merge and match.ispartial():
1553 raise error.Abort(_('cannot partially commit a merge '
1554 raise error.Abort(_('cannot partially commit a merge '
1554 '(do not specify files or patterns)'))
1555 '(do not specify files or patterns)'))
1555
1556
1556 status = self.status(match=match, clean=force)
1557 status = self.status(match=match, clean=force)
1557 if force:
1558 if force:
1558 status.modified.extend(status.clean) # mq may commit clean files
1559 status.modified.extend(status.clean) # mq may commit clean files
1559
1560
1560 # check subrepos
1561 # check subrepos
1561 subs = []
1562 subs = []
1562 commitsubs = set()
1563 commitsubs = set()
1563 newstate = wctx.substate.copy()
1564 newstate = wctx.substate.copy()
1564 # only manage subrepos and .hgsubstate if .hgsub is present
1565 # only manage subrepos and .hgsubstate if .hgsub is present
1565 if '.hgsub' in wctx:
1566 if '.hgsub' in wctx:
1566 # we'll decide whether to track this ourselves, thanks
1567 # we'll decide whether to track this ourselves, thanks
1567 for c in status.modified, status.added, status.removed:
1568 for c in status.modified, status.added, status.removed:
1568 if '.hgsubstate' in c:
1569 if '.hgsubstate' in c:
1569 c.remove('.hgsubstate')
1570 c.remove('.hgsubstate')
1570
1571
1571 # compare current state to last committed state
1572 # compare current state to last committed state
1572 # build new substate based on last committed state
1573 # build new substate based on last committed state
1573 oldstate = wctx.p1().substate
1574 oldstate = wctx.p1().substate
1574 for s in sorted(newstate.keys()):
1575 for s in sorted(newstate.keys()):
1575 if not match(s):
1576 if not match(s):
1576 # ignore working copy, use old state if present
1577 # ignore working copy, use old state if present
1577 if s in oldstate:
1578 if s in oldstate:
1578 newstate[s] = oldstate[s]
1579 newstate[s] = oldstate[s]
1579 continue
1580 continue
1580 if not force:
1581 if not force:
1581 raise error.Abort(
1582 raise error.Abort(
1582 _("commit with new subrepo %s excluded") % s)
1583 _("commit with new subrepo %s excluded") % s)
1583 dirtyreason = wctx.sub(s).dirtyreason(True)
1584 dirtyreason = wctx.sub(s).dirtyreason(True)
1584 if dirtyreason:
1585 if dirtyreason:
1585 if not self.ui.configbool('ui', 'commitsubrepos'):
1586 if not self.ui.configbool('ui', 'commitsubrepos'):
1586 raise error.Abort(dirtyreason,
1587 raise error.Abort(dirtyreason,
1587 hint=_("use --subrepos for recursive commit"))
1588 hint=_("use --subrepos for recursive commit"))
1588 subs.append(s)
1589 subs.append(s)
1589 commitsubs.add(s)
1590 commitsubs.add(s)
1590 else:
1591 else:
1591 bs = wctx.sub(s).basestate()
1592 bs = wctx.sub(s).basestate()
1592 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1593 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1593 if oldstate.get(s, (None, None, None))[1] != bs:
1594 if oldstate.get(s, (None, None, None))[1] != bs:
1594 subs.append(s)
1595 subs.append(s)
1595
1596
1596 # check for removed subrepos
1597 # check for removed subrepos
1597 for p in wctx.parents():
1598 for p in wctx.parents():
1598 r = [s for s in p.substate if s not in newstate]
1599 r = [s for s in p.substate if s not in newstate]
1599 subs += [s for s in r if match(s)]
1600 subs += [s for s in r if match(s)]
1600 if subs:
1601 if subs:
1601 if (not match('.hgsub') and
1602 if (not match('.hgsub') and
1602 '.hgsub' in (wctx.modified() + wctx.added())):
1603 '.hgsub' in (wctx.modified() + wctx.added())):
1603 raise error.Abort(
1604 raise error.Abort(
1604 _("can't commit subrepos without .hgsub"))
1605 _("can't commit subrepos without .hgsub"))
1605 status.modified.insert(0, '.hgsubstate')
1606 status.modified.insert(0, '.hgsubstate')
1606
1607
1607 elif '.hgsub' in status.removed:
1608 elif '.hgsub' in status.removed:
1608 # clean up .hgsubstate when .hgsub is removed
1609 # clean up .hgsubstate when .hgsub is removed
1609 if ('.hgsubstate' in wctx and
1610 if ('.hgsubstate' in wctx and
1610 '.hgsubstate' not in (status.modified + status.added +
1611 '.hgsubstate' not in (status.modified + status.added +
1611 status.removed)):
1612 status.removed)):
1612 status.removed.insert(0, '.hgsubstate')
1613 status.removed.insert(0, '.hgsubstate')
1613
1614
1614 # make sure all explicit patterns are matched
1615 # make sure all explicit patterns are matched
1615 if not force:
1616 if not force:
1616 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1617 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1617
1618
1618 cctx = context.workingcommitctx(self, status,
1619 cctx = context.workingcommitctx(self, status,
1619 text, user, date, extra)
1620 text, user, date, extra)
1620
1621
1621 # internal config: ui.allowemptycommit
1622 # internal config: ui.allowemptycommit
1622 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1623 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1623 or extra.get('close') or merge or cctx.files()
1624 or extra.get('close') or merge or cctx.files()
1624 or self.ui.configbool('ui', 'allowemptycommit'))
1625 or self.ui.configbool('ui', 'allowemptycommit'))
1625 if not allowemptycommit:
1626 if not allowemptycommit:
1626 return None
1627 return None
1627
1628
1628 if merge and cctx.deleted():
1629 if merge and cctx.deleted():
1629 raise error.Abort(_("cannot commit merge with missing files"))
1630 raise error.Abort(_("cannot commit merge with missing files"))
1630
1631
1631 ms = mergemod.mergestate.read(self)
1632 ms = mergemod.mergestate.read(self)
1632 cmdutil.checkunresolved(ms)
1633 cmdutil.checkunresolved(ms)
1633
1634
1634 if editor:
1635 if editor:
1635 cctx._text = editor(self, cctx, subs)
1636 cctx._text = editor(self, cctx, subs)
1636 edited = (text != cctx._text)
1637 edited = (text != cctx._text)
1637
1638
1638 # Save commit message in case this transaction gets rolled back
1639 # Save commit message in case this transaction gets rolled back
1639 # (e.g. by a pretxncommit hook). Leave the content alone on
1640 # (e.g. by a pretxncommit hook). Leave the content alone on
1640 # the assumption that the user will use the same editor again.
1641 # the assumption that the user will use the same editor again.
1641 msgfn = self.savecommitmessage(cctx._text)
1642 msgfn = self.savecommitmessage(cctx._text)
1642
1643
1643 # commit subs and write new state
1644 # commit subs and write new state
1644 if subs:
1645 if subs:
1645 for s in sorted(commitsubs):
1646 for s in sorted(commitsubs):
1646 sub = wctx.sub(s)
1647 sub = wctx.sub(s)
1647 self.ui.status(_('committing subrepository %s\n') %
1648 self.ui.status(_('committing subrepository %s\n') %
1648 subrepo.subrelpath(sub))
1649 subrepo.subrelpath(sub))
1649 sr = sub.commit(cctx._text, user, date)
1650 sr = sub.commit(cctx._text, user, date)
1650 newstate[s] = (newstate[s][0], sr)
1651 newstate[s] = (newstate[s][0], sr)
1651 subrepo.writestate(self, newstate)
1652 subrepo.writestate(self, newstate)
1652
1653
1653 p1, p2 = self.dirstate.parents()
1654 p1, p2 = self.dirstate.parents()
1654 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1655 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1655 try:
1656 try:
1656 self.hook("precommit", throw=True, parent1=hookp1,
1657 self.hook("precommit", throw=True, parent1=hookp1,
1657 parent2=hookp2)
1658 parent2=hookp2)
1658 tr = self.transaction('commit')
1659 tr = self.transaction('commit')
1659 ret = self.commitctx(cctx, True)
1660 ret = self.commitctx(cctx, True)
1660 except: # re-raises
1661 except: # re-raises
1661 if edited:
1662 if edited:
1662 self.ui.write(
1663 self.ui.write(
1663 _('note: commit message saved in %s\n') % msgfn)
1664 _('note: commit message saved in %s\n') % msgfn)
1664 raise
1665 raise
1665 # update bookmarks, dirstate and mergestate
1666 # update bookmarks, dirstate and mergestate
1666 bookmarks.update(self, [p1, p2], ret)
1667 bookmarks.update(self, [p1, p2], ret)
1667 cctx.markcommitted(ret)
1668 cctx.markcommitted(ret)
1668 ms.reset()
1669 ms.reset()
1669 tr.close()
1670 tr.close()
1670
1671
1671 finally:
1672 finally:
1672 lockmod.release(tr, lock, wlock)
1673 lockmod.release(tr, lock, wlock)
1673
1674
1674 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1675 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1675 # hack for command that use a temporary commit (eg: histedit)
1676 # hack for command that use a temporary commit (eg: histedit)
1676 # temporary commit got stripped before hook release
1677 # temporary commit got stripped before hook release
1677 if self.changelog.hasnode(ret):
1678 if self.changelog.hasnode(ret):
1678 self.hook("commit", node=node, parent1=parent1,
1679 self.hook("commit", node=node, parent1=parent1,
1679 parent2=parent2)
1680 parent2=parent2)
1680 self._afterlock(commithook)
1681 self._afterlock(commithook)
1681 return ret
1682 return ret
1682
1683
1683 @unfilteredmethod
1684 @unfilteredmethod
1684 def commitctx(self, ctx, error=False):
1685 def commitctx(self, ctx, error=False):
1685 """Add a new revision to current repository.
1686 """Add a new revision to current repository.
1686 Revision information is passed via the context argument.
1687 Revision information is passed via the context argument.
1687 """
1688 """
1688
1689
1689 tr = None
1690 tr = None
1690 p1, p2 = ctx.p1(), ctx.p2()
1691 p1, p2 = ctx.p1(), ctx.p2()
1691 user = ctx.user()
1692 user = ctx.user()
1692
1693
1693 lock = self.lock()
1694 lock = self.lock()
1694 try:
1695 try:
1695 tr = self.transaction("commit")
1696 tr = self.transaction("commit")
1696 trp = weakref.proxy(tr)
1697 trp = weakref.proxy(tr)
1697
1698
1698 if ctx.files():
1699 if ctx.files():
1699 m1ctx = p1.manifestctx()
1700 m1ctx = p1.manifestctx()
1700 m2ctx = p2.manifestctx()
1701 m2ctx = p2.manifestctx()
1701 mctx = m1ctx.copy()
1702 mctx = m1ctx.copy()
1702
1703
1703 m = mctx.read()
1704 m = mctx.read()
1704 m1 = m1ctx.read()
1705 m1 = m1ctx.read()
1705 m2 = m2ctx.read()
1706 m2 = m2ctx.read()
1706
1707
1707 # check in files
1708 # check in files
1708 added = []
1709 added = []
1709 changed = []
1710 changed = []
1710 removed = list(ctx.removed())
1711 removed = list(ctx.removed())
1711 linkrev = len(self)
1712 linkrev = len(self)
1712 self.ui.note(_("committing files:\n"))
1713 self.ui.note(_("committing files:\n"))
1713 for f in sorted(ctx.modified() + ctx.added()):
1714 for f in sorted(ctx.modified() + ctx.added()):
1714 self.ui.note(f + "\n")
1715 self.ui.note(f + "\n")
1715 try:
1716 try:
1716 fctx = ctx[f]
1717 fctx = ctx[f]
1717 if fctx is None:
1718 if fctx is None:
1718 removed.append(f)
1719 removed.append(f)
1719 else:
1720 else:
1720 added.append(f)
1721 added.append(f)
1721 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1722 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1722 trp, changed)
1723 trp, changed)
1723 m.setflag(f, fctx.flags())
1724 m.setflag(f, fctx.flags())
1724 except OSError as inst:
1725 except OSError as inst:
1725 self.ui.warn(_("trouble committing %s!\n") % f)
1726 self.ui.warn(_("trouble committing %s!\n") % f)
1726 raise
1727 raise
1727 except IOError as inst:
1728 except IOError as inst:
1728 errcode = getattr(inst, 'errno', errno.ENOENT)
1729 errcode = getattr(inst, 'errno', errno.ENOENT)
1729 if error or errcode and errcode != errno.ENOENT:
1730 if error or errcode and errcode != errno.ENOENT:
1730 self.ui.warn(_("trouble committing %s!\n") % f)
1731 self.ui.warn(_("trouble committing %s!\n") % f)
1731 raise
1732 raise
1732
1733
1733 # update manifest
1734 # update manifest
1734 self.ui.note(_("committing manifest\n"))
1735 self.ui.note(_("committing manifest\n"))
1735 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1736 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1736 drop = [f for f in removed if f in m]
1737 drop = [f for f in removed if f in m]
1737 for f in drop:
1738 for f in drop:
1738 del m[f]
1739 del m[f]
1739 mn = mctx.write(trp, linkrev,
1740 mn = mctx.write(trp, linkrev,
1740 p1.manifestnode(), p2.manifestnode(),
1741 p1.manifestnode(), p2.manifestnode(),
1741 added, drop)
1742 added, drop)
1742 files = changed + removed
1743 files = changed + removed
1743 else:
1744 else:
1744 mn = p1.manifestnode()
1745 mn = p1.manifestnode()
1745 files = []
1746 files = []
1746
1747
1747 # update changelog
1748 # update changelog
1748 self.ui.note(_("committing changelog\n"))
1749 self.ui.note(_("committing changelog\n"))
1749 self.changelog.delayupdate(tr)
1750 self.changelog.delayupdate(tr)
1750 n = self.changelog.add(mn, files, ctx.description(),
1751 n = self.changelog.add(mn, files, ctx.description(),
1751 trp, p1.node(), p2.node(),
1752 trp, p1.node(), p2.node(),
1752 user, ctx.date(), ctx.extra().copy())
1753 user, ctx.date(), ctx.extra().copy())
1753 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1754 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1754 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1755 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1755 parent2=xp2)
1756 parent2=xp2)
1756 # set the new commit is proper phase
1757 # set the new commit is proper phase
1757 targetphase = subrepo.newcommitphase(self.ui, ctx)
1758 targetphase = subrepo.newcommitphase(self.ui, ctx)
1758 if targetphase:
1759 if targetphase:
1759 # retract boundary do not alter parent changeset.
1760 # retract boundary do not alter parent changeset.
1760 # if a parent have higher the resulting phase will
1761 # if a parent have higher the resulting phase will
1761 # be compliant anyway
1762 # be compliant anyway
1762 #
1763 #
1763 # if minimal phase was 0 we don't need to retract anything
1764 # if minimal phase was 0 we don't need to retract anything
1764 phases.retractboundary(self, tr, targetphase, [n])
1765 phases.retractboundary(self, tr, targetphase, [n])
1765 tr.close()
1766 tr.close()
1766 branchmap.updatecache(self.filtered('served'))
1767 branchmap.updatecache(self.filtered('served'))
1767 return n
1768 return n
1768 finally:
1769 finally:
1769 if tr:
1770 if tr:
1770 tr.release()
1771 tr.release()
1771 lock.release()
1772 lock.release()
1772
1773
1773 @unfilteredmethod
1774 @unfilteredmethod
1774 def destroying(self):
1775 def destroying(self):
1775 '''Inform the repository that nodes are about to be destroyed.
1776 '''Inform the repository that nodes are about to be destroyed.
1776 Intended for use by strip and rollback, so there's a common
1777 Intended for use by strip and rollback, so there's a common
1777 place for anything that has to be done before destroying history.
1778 place for anything that has to be done before destroying history.
1778
1779
1779 This is mostly useful for saving state that is in memory and waiting
1780 This is mostly useful for saving state that is in memory and waiting
1780 to be flushed when the current lock is released. Because a call to
1781 to be flushed when the current lock is released. Because a call to
1781 destroyed is imminent, the repo will be invalidated causing those
1782 destroyed is imminent, the repo will be invalidated causing those
1782 changes to stay in memory (waiting for the next unlock), or vanish
1783 changes to stay in memory (waiting for the next unlock), or vanish
1783 completely.
1784 completely.
1784 '''
1785 '''
1785 # When using the same lock to commit and strip, the phasecache is left
1786 # When using the same lock to commit and strip, the phasecache is left
1786 # dirty after committing. Then when we strip, the repo is invalidated,
1787 # dirty after committing. Then when we strip, the repo is invalidated,
1787 # causing those changes to disappear.
1788 # causing those changes to disappear.
1788 if '_phasecache' in vars(self):
1789 if '_phasecache' in vars(self):
1789 self._phasecache.write()
1790 self._phasecache.write()
1790
1791
1791 @unfilteredmethod
1792 @unfilteredmethod
1792 def destroyed(self):
1793 def destroyed(self):
1793 '''Inform the repository that nodes have been destroyed.
1794 '''Inform the repository that nodes have been destroyed.
1794 Intended for use by strip and rollback, so there's a common
1795 Intended for use by strip and rollback, so there's a common
1795 place for anything that has to be done after destroying history.
1796 place for anything that has to be done after destroying history.
1796 '''
1797 '''
1797 # When one tries to:
1798 # When one tries to:
1798 # 1) destroy nodes thus calling this method (e.g. strip)
1799 # 1) destroy nodes thus calling this method (e.g. strip)
1799 # 2) use phasecache somewhere (e.g. commit)
1800 # 2) use phasecache somewhere (e.g. commit)
1800 #
1801 #
1801 # then 2) will fail because the phasecache contains nodes that were
1802 # then 2) will fail because the phasecache contains nodes that were
1802 # removed. We can either remove phasecache from the filecache,
1803 # removed. We can either remove phasecache from the filecache,
1803 # causing it to reload next time it is accessed, or simply filter
1804 # causing it to reload next time it is accessed, or simply filter
1804 # the removed nodes now and write the updated cache.
1805 # the removed nodes now and write the updated cache.
1805 self._phasecache.filterunknown(self)
1806 self._phasecache.filterunknown(self)
1806 self._phasecache.write()
1807 self._phasecache.write()
1807
1808
1808 # update the 'served' branch cache to help read only server process
1809 # update the 'served' branch cache to help read only server process
1809 # Thanks to branchcache collaboration this is done from the nearest
1810 # Thanks to branchcache collaboration this is done from the nearest
1810 # filtered subset and it is expected to be fast.
1811 # filtered subset and it is expected to be fast.
1811 branchmap.updatecache(self.filtered('served'))
1812 branchmap.updatecache(self.filtered('served'))
1812
1813
1813 # Ensure the persistent tag cache is updated. Doing it now
1814 # Ensure the persistent tag cache is updated. Doing it now
1814 # means that the tag cache only has to worry about destroyed
1815 # means that the tag cache only has to worry about destroyed
1815 # heads immediately after a strip/rollback. That in turn
1816 # heads immediately after a strip/rollback. That in turn
1816 # guarantees that "cachetip == currenttip" (comparing both rev
1817 # guarantees that "cachetip == currenttip" (comparing both rev
1817 # and node) always means no nodes have been added or destroyed.
1818 # and node) always means no nodes have been added or destroyed.
1818
1819
1819 # XXX this is suboptimal when qrefresh'ing: we strip the current
1820 # XXX this is suboptimal when qrefresh'ing: we strip the current
1820 # head, refresh the tag cache, then immediately add a new head.
1821 # head, refresh the tag cache, then immediately add a new head.
1821 # But I think doing it this way is necessary for the "instant
1822 # But I think doing it this way is necessary for the "instant
1822 # tag cache retrieval" case to work.
1823 # tag cache retrieval" case to work.
1823 self.invalidate()
1824 self.invalidate()
1824
1825
1825 def walk(self, match, node=None):
1826 def walk(self, match, node=None):
1826 '''
1827 '''
1827 walk recursively through the directory tree or a given
1828 walk recursively through the directory tree or a given
1828 changeset, finding all files matched by the match
1829 changeset, finding all files matched by the match
1829 function
1830 function
1830 '''
1831 '''
1831 return self[node].walk(match)
1832 return self[node].walk(match)
1832
1833
1833 def status(self, node1='.', node2=None, match=None,
1834 def status(self, node1='.', node2=None, match=None,
1834 ignored=False, clean=False, unknown=False,
1835 ignored=False, clean=False, unknown=False,
1835 listsubrepos=False):
1836 listsubrepos=False):
1836 '''a convenience method that calls node1.status(node2)'''
1837 '''a convenience method that calls node1.status(node2)'''
1837 return self[node1].status(node2, match, ignored, clean, unknown,
1838 return self[node1].status(node2, match, ignored, clean, unknown,
1838 listsubrepos)
1839 listsubrepos)
1839
1840
1840 def heads(self, start=None):
1841 def heads(self, start=None):
1841 heads = self.changelog.heads(start)
1842 heads = self.changelog.heads(start)
1842 # sort the output in rev descending order
1843 # sort the output in rev descending order
1843 return sorted(heads, key=self.changelog.rev, reverse=True)
1844 return sorted(heads, key=self.changelog.rev, reverse=True)
1844
1845
1845 def branchheads(self, branch=None, start=None, closed=False):
1846 def branchheads(self, branch=None, start=None, closed=False):
1846 '''return a (possibly filtered) list of heads for the given branch
1847 '''return a (possibly filtered) list of heads for the given branch
1847
1848
1848 Heads are returned in topological order, from newest to oldest.
1849 Heads are returned in topological order, from newest to oldest.
1849 If branch is None, use the dirstate branch.
1850 If branch is None, use the dirstate branch.
1850 If start is not None, return only heads reachable from start.
1851 If start is not None, return only heads reachable from start.
1851 If closed is True, return heads that are marked as closed as well.
1852 If closed is True, return heads that are marked as closed as well.
1852 '''
1853 '''
1853 if branch is None:
1854 if branch is None:
1854 branch = self[None].branch()
1855 branch = self[None].branch()
1855 branches = self.branchmap()
1856 branches = self.branchmap()
1856 if branch not in branches:
1857 if branch not in branches:
1857 return []
1858 return []
1858 # the cache returns heads ordered lowest to highest
1859 # the cache returns heads ordered lowest to highest
1859 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1860 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1860 if start is not None:
1861 if start is not None:
1861 # filter out the heads that cannot be reached from startrev
1862 # filter out the heads that cannot be reached from startrev
1862 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1863 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1863 bheads = [h for h in bheads if h in fbheads]
1864 bheads = [h for h in bheads if h in fbheads]
1864 return bheads
1865 return bheads
1865
1866
1866 def branches(self, nodes):
1867 def branches(self, nodes):
1867 if not nodes:
1868 if not nodes:
1868 nodes = [self.changelog.tip()]
1869 nodes = [self.changelog.tip()]
1869 b = []
1870 b = []
1870 for n in nodes:
1871 for n in nodes:
1871 t = n
1872 t = n
1872 while True:
1873 while True:
1873 p = self.changelog.parents(n)
1874 p = self.changelog.parents(n)
1874 if p[1] != nullid or p[0] == nullid:
1875 if p[1] != nullid or p[0] == nullid:
1875 b.append((t, n, p[0], p[1]))
1876 b.append((t, n, p[0], p[1]))
1876 break
1877 break
1877 n = p[0]
1878 n = p[0]
1878 return b
1879 return b
1879
1880
1880 def between(self, pairs):
1881 def between(self, pairs):
1881 r = []
1882 r = []
1882
1883
1883 for top, bottom in pairs:
1884 for top, bottom in pairs:
1884 n, l, i = top, [], 0
1885 n, l, i = top, [], 0
1885 f = 1
1886 f = 1
1886
1887
1887 while n != bottom and n != nullid:
1888 while n != bottom and n != nullid:
1888 p = self.changelog.parents(n)[0]
1889 p = self.changelog.parents(n)[0]
1889 if i == f:
1890 if i == f:
1890 l.append(n)
1891 l.append(n)
1891 f = f * 2
1892 f = f * 2
1892 n = p
1893 n = p
1893 i += 1
1894 i += 1
1894
1895
1895 r.append(l)
1896 r.append(l)
1896
1897
1897 return r
1898 return r
1898
1899
1899 def checkpush(self, pushop):
1900 def checkpush(self, pushop):
1900 """Extensions can override this function if additional checks have
1901 """Extensions can override this function if additional checks have
1901 to be performed before pushing, or call it if they override push
1902 to be performed before pushing, or call it if they override push
1902 command.
1903 command.
1903 """
1904 """
1904 pass
1905 pass
1905
1906
1906 @unfilteredpropertycache
1907 @unfilteredpropertycache
1907 def prepushoutgoinghooks(self):
1908 def prepushoutgoinghooks(self):
1908 """Return util.hooks consists of a pushop with repo, remote, outgoing
1909 """Return util.hooks consists of a pushop with repo, remote, outgoing
1909 methods, which are called before pushing changesets.
1910 methods, which are called before pushing changesets.
1910 """
1911 """
1911 return util.hooks()
1912 return util.hooks()
1912
1913
1913 def pushkey(self, namespace, key, old, new):
1914 def pushkey(self, namespace, key, old, new):
1914 try:
1915 try:
1915 tr = self.currenttransaction()
1916 tr = self.currenttransaction()
1916 hookargs = {}
1917 hookargs = {}
1917 if tr is not None:
1918 if tr is not None:
1918 hookargs.update(tr.hookargs)
1919 hookargs.update(tr.hookargs)
1919 hookargs['namespace'] = namespace
1920 hookargs['namespace'] = namespace
1920 hookargs['key'] = key
1921 hookargs['key'] = key
1921 hookargs['old'] = old
1922 hookargs['old'] = old
1922 hookargs['new'] = new
1923 hookargs['new'] = new
1923 self.hook('prepushkey', throw=True, **hookargs)
1924 self.hook('prepushkey', throw=True, **hookargs)
1924 except error.HookAbort as exc:
1925 except error.HookAbort as exc:
1925 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1926 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1926 if exc.hint:
1927 if exc.hint:
1927 self.ui.write_err(_("(%s)\n") % exc.hint)
1928 self.ui.write_err(_("(%s)\n") % exc.hint)
1928 return False
1929 return False
1929 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1930 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1930 ret = pushkey.push(self, namespace, key, old, new)
1931 ret = pushkey.push(self, namespace, key, old, new)
1931 def runhook():
1932 def runhook():
1932 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1933 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1933 ret=ret)
1934 ret=ret)
1934 self._afterlock(runhook)
1935 self._afterlock(runhook)
1935 return ret
1936 return ret
1936
1937
1937 def listkeys(self, namespace):
1938 def listkeys(self, namespace):
1938 self.hook('prelistkeys', throw=True, namespace=namespace)
1939 self.hook('prelistkeys', throw=True, namespace=namespace)
1939 self.ui.debug('listing keys for "%s"\n' % namespace)
1940 self.ui.debug('listing keys for "%s"\n' % namespace)
1940 values = pushkey.list(self, namespace)
1941 values = pushkey.list(self, namespace)
1941 self.hook('listkeys', namespace=namespace, values=values)
1942 self.hook('listkeys', namespace=namespace, values=values)
1942 return values
1943 return values
1943
1944
1944 def debugwireargs(self, one, two, three=None, four=None, five=None):
1945 def debugwireargs(self, one, two, three=None, four=None, five=None):
1945 '''used to test argument passing over the wire'''
1946 '''used to test argument passing over the wire'''
1946 return "%s %s %s %s %s" % (one, two, three, four, five)
1947 return "%s %s %s %s %s" % (one, two, three, four, five)
1947
1948
1948 def savecommitmessage(self, text):
1949 def savecommitmessage(self, text):
1949 fp = self.vfs('last-message.txt', 'wb')
1950 fp = self.vfs('last-message.txt', 'wb')
1950 try:
1951 try:
1951 fp.write(text)
1952 fp.write(text)
1952 finally:
1953 finally:
1953 fp.close()
1954 fp.close()
1954 return self.pathto(fp.name[len(self.root) + 1:])
1955 return self.pathto(fp.name[len(self.root) + 1:])
1955
1956
1956 # used to avoid circular references so destructors work
1957 # used to avoid circular references so destructors work
1957 def aftertrans(files):
1958 def aftertrans(files):
1958 renamefiles = [tuple(t) for t in files]
1959 renamefiles = [tuple(t) for t in files]
1959 def a():
1960 def a():
1960 for vfs, src, dest in renamefiles:
1961 for vfs, src, dest in renamefiles:
1961 try:
1962 try:
1962 vfs.rename(src, dest)
1963 vfs.rename(src, dest)
1963 except OSError: # journal file does not yet exist
1964 except OSError: # journal file does not yet exist
1964 pass
1965 pass
1965 return a
1966 return a
1966
1967
1967 def undoname(fn):
1968 def undoname(fn):
1968 base, name = os.path.split(fn)
1969 base, name = os.path.split(fn)
1969 assert name.startswith('journal')
1970 assert name.startswith('journal')
1970 return os.path.join(base, name.replace('journal', 'undo', 1))
1971 return os.path.join(base, name.replace('journal', 'undo', 1))
1971
1972
1972 def instance(ui, path, create):
1973 def instance(ui, path, create):
1973 return localrepository(ui, util.urllocalpath(path), create)
1974 return localrepository(ui, util.urllocalpath(path), create)
1974
1975
1975 def islocal(path):
1976 def islocal(path):
1976 return True
1977 return True
1977
1978
1978 def newreporequirements(repo):
1979 def newreporequirements(repo):
1979 """Determine the set of requirements for a new local repository.
1980 """Determine the set of requirements for a new local repository.
1980
1981
1981 Extensions can wrap this function to specify custom requirements for
1982 Extensions can wrap this function to specify custom requirements for
1982 new repositories.
1983 new repositories.
1983 """
1984 """
1984 ui = repo.ui
1985 ui = repo.ui
1985 requirements = set(['revlogv1'])
1986 requirements = set(['revlogv1'])
1986 if ui.configbool('format', 'usestore', True):
1987 if ui.configbool('format', 'usestore', True):
1987 requirements.add('store')
1988 requirements.add('store')
1988 if ui.configbool('format', 'usefncache', True):
1989 if ui.configbool('format', 'usefncache', True):
1989 requirements.add('fncache')
1990 requirements.add('fncache')
1990 if ui.configbool('format', 'dotencode', True):
1991 if ui.configbool('format', 'dotencode', True):
1991 requirements.add('dotencode')
1992 requirements.add('dotencode')
1992
1993
1993 if scmutil.gdinitconfig(ui):
1994 if scmutil.gdinitconfig(ui):
1994 requirements.add('generaldelta')
1995 requirements.add('generaldelta')
1995 if ui.configbool('experimental', 'treemanifest', False):
1996 if ui.configbool('experimental', 'treemanifest', False):
1996 requirements.add('treemanifest')
1997 requirements.add('treemanifest')
1997 if ui.configbool('experimental', 'manifestv2', False):
1998 if ui.configbool('experimental', 'manifestv2', False):
1998 requirements.add('manifestv2')
1999 requirements.add('manifestv2')
1999
2000
2000 return requirements
2001 return requirements
General Comments 0
You need to be logged in to leave comments. Login now