##// END OF EJS Templates
manifest: add manifestlog.add...
Durham Goode -
r29962:6b5a9a01 default
parent child Browse files
Show More
@@ -1,1996 +1,1996 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 cmdutil,
31 cmdutil,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 namespaces,
44 namespaces,
45 obsolete,
45 obsolete,
46 pathutil,
46 pathutil,
47 peer,
47 peer,
48 phases,
48 phases,
49 pushkey,
49 pushkey,
50 repoview,
50 repoview,
51 revset,
51 revset,
52 scmutil,
52 scmutil,
53 store,
53 store,
54 subrepo,
54 subrepo,
55 tags as tagsmod,
55 tags as tagsmod,
56 transaction,
56 transaction,
57 util,
57 util,
58 )
58 )
59
59
60 release = lockmod.release
60 release = lockmod.release
61 urlerr = util.urlerr
61 urlerr = util.urlerr
62 urlreq = util.urlreq
62 urlreq = util.urlreq
63
63
64 class repofilecache(scmutil.filecache):
64 class repofilecache(scmutil.filecache):
65 """All filecache usage on repo are done for logic that should be unfiltered
65 """All filecache usage on repo are done for logic that should be unfiltered
66 """
66 """
67
67
68 def __get__(self, repo, type=None):
68 def __get__(self, repo, type=None):
69 if repo is None:
69 if repo is None:
70 return self
70 return self
71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 def __set__(self, repo, value):
72 def __set__(self, repo, value):
73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 def __delete__(self, repo):
74 def __delete__(self, repo):
75 return super(repofilecache, self).__delete__(repo.unfiltered())
75 return super(repofilecache, self).__delete__(repo.unfiltered())
76
76
77 class storecache(repofilecache):
77 class storecache(repofilecache):
78 """filecache for files in the store"""
78 """filecache for files in the store"""
79 def join(self, obj, fname):
79 def join(self, obj, fname):
80 return obj.sjoin(fname)
80 return obj.sjoin(fname)
81
81
82 class unfilteredpropertycache(util.propertycache):
82 class unfilteredpropertycache(util.propertycache):
83 """propertycache that apply to unfiltered repo only"""
83 """propertycache that apply to unfiltered repo only"""
84
84
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 unfi = repo.unfiltered()
86 unfi = repo.unfiltered()
87 if unfi is repo:
87 if unfi is repo:
88 return super(unfilteredpropertycache, self).__get__(unfi)
88 return super(unfilteredpropertycache, self).__get__(unfi)
89 return getattr(unfi, self.name)
89 return getattr(unfi, self.name)
90
90
91 class filteredpropertycache(util.propertycache):
91 class filteredpropertycache(util.propertycache):
92 """propertycache that must take filtering in account"""
92 """propertycache that must take filtering in account"""
93
93
94 def cachevalue(self, obj, value):
94 def cachevalue(self, obj, value):
95 object.__setattr__(obj, self.name, value)
95 object.__setattr__(obj, self.name, value)
96
96
97
97
98 def hasunfilteredcache(repo, name):
98 def hasunfilteredcache(repo, name):
99 """check if a repo has an unfilteredpropertycache value for <name>"""
99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 return name in vars(repo.unfiltered())
100 return name in vars(repo.unfiltered())
101
101
102 def unfilteredmethod(orig):
102 def unfilteredmethod(orig):
103 """decorate method that always need to be run on unfiltered version"""
103 """decorate method that always need to be run on unfiltered version"""
104 def wrapper(repo, *args, **kwargs):
104 def wrapper(repo, *args, **kwargs):
105 return orig(repo.unfiltered(), *args, **kwargs)
105 return orig(repo.unfiltered(), *args, **kwargs)
106 return wrapper
106 return wrapper
107
107
108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 'unbundle'))
109 'unbundle'))
110 legacycaps = moderncaps.union(set(['changegroupsubset']))
110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111
111
112 class localpeer(peer.peerrepository):
112 class localpeer(peer.peerrepository):
113 '''peer for a local repo; reflects only the most recent API'''
113 '''peer for a local repo; reflects only the most recent API'''
114
114
115 def __init__(self, repo, caps=moderncaps):
115 def __init__(self, repo, caps=moderncaps):
116 peer.peerrepository.__init__(self)
116 peer.peerrepository.__init__(self)
117 self._repo = repo.filtered('served')
117 self._repo = repo.filtered('served')
118 self.ui = repo.ui
118 self.ui = repo.ui
119 self._caps = repo._restrictcapabilities(caps)
119 self._caps = repo._restrictcapabilities(caps)
120 self.requirements = repo.requirements
120 self.requirements = repo.requirements
121 self.supportedformats = repo.supportedformats
121 self.supportedformats = repo.supportedformats
122
122
123 def close(self):
123 def close(self):
124 self._repo.close()
124 self._repo.close()
125
125
126 def _capabilities(self):
126 def _capabilities(self):
127 return self._caps
127 return self._caps
128
128
129 def local(self):
129 def local(self):
130 return self._repo
130 return self._repo
131
131
132 def canpush(self):
132 def canpush(self):
133 return True
133 return True
134
134
135 def url(self):
135 def url(self):
136 return self._repo.url()
136 return self._repo.url()
137
137
138 def lookup(self, key):
138 def lookup(self, key):
139 return self._repo.lookup(key)
139 return self._repo.lookup(key)
140
140
141 def branchmap(self):
141 def branchmap(self):
142 return self._repo.branchmap()
142 return self._repo.branchmap()
143
143
144 def heads(self):
144 def heads(self):
145 return self._repo.heads()
145 return self._repo.heads()
146
146
147 def known(self, nodes):
147 def known(self, nodes):
148 return self._repo.known(nodes)
148 return self._repo.known(nodes)
149
149
150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 **kwargs):
151 **kwargs):
152 cg = exchange.getbundle(self._repo, source, heads=heads,
152 cg = exchange.getbundle(self._repo, source, heads=heads,
153 common=common, bundlecaps=bundlecaps, **kwargs)
153 common=common, bundlecaps=bundlecaps, **kwargs)
154 if bundlecaps is not None and 'HG20' in bundlecaps:
154 if bundlecaps is not None and 'HG20' in bundlecaps:
155 # When requesting a bundle2, getbundle returns a stream to make the
155 # When requesting a bundle2, getbundle returns a stream to make the
156 # wire level function happier. We need to build a proper object
156 # wire level function happier. We need to build a proper object
157 # from it in local peer.
157 # from it in local peer.
158 cg = bundle2.getunbundler(self.ui, cg)
158 cg = bundle2.getunbundler(self.ui, cg)
159 return cg
159 return cg
160
160
161 # TODO We might want to move the next two calls into legacypeer and add
161 # TODO We might want to move the next two calls into legacypeer and add
162 # unbundle instead.
162 # unbundle instead.
163
163
164 def unbundle(self, cg, heads, url):
164 def unbundle(self, cg, heads, url):
165 """apply a bundle on a repo
165 """apply a bundle on a repo
166
166
167 This function handles the repo locking itself."""
167 This function handles the repo locking itself."""
168 try:
168 try:
169 try:
169 try:
170 cg = exchange.readbundle(self.ui, cg, None)
170 cg = exchange.readbundle(self.ui, cg, None)
171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
172 if util.safehasattr(ret, 'getchunks'):
172 if util.safehasattr(ret, 'getchunks'):
173 # This is a bundle20 object, turn it into an unbundler.
173 # This is a bundle20 object, turn it into an unbundler.
174 # This little dance should be dropped eventually when the
174 # This little dance should be dropped eventually when the
175 # API is finally improved.
175 # API is finally improved.
176 stream = util.chunkbuffer(ret.getchunks())
176 stream = util.chunkbuffer(ret.getchunks())
177 ret = bundle2.getunbundler(self.ui, stream)
177 ret = bundle2.getunbundler(self.ui, stream)
178 return ret
178 return ret
179 except Exception as exc:
179 except Exception as exc:
180 # If the exception contains output salvaged from a bundle2
180 # If the exception contains output salvaged from a bundle2
181 # reply, we need to make sure it is printed before continuing
181 # reply, we need to make sure it is printed before continuing
182 # to fail. So we build a bundle2 with such output and consume
182 # to fail. So we build a bundle2 with such output and consume
183 # it directly.
183 # it directly.
184 #
184 #
185 # This is not very elegant but allows a "simple" solution for
185 # This is not very elegant but allows a "simple" solution for
186 # issue4594
186 # issue4594
187 output = getattr(exc, '_bundle2salvagedoutput', ())
187 output = getattr(exc, '_bundle2salvagedoutput', ())
188 if output:
188 if output:
189 bundler = bundle2.bundle20(self._repo.ui)
189 bundler = bundle2.bundle20(self._repo.ui)
190 for out in output:
190 for out in output:
191 bundler.addpart(out)
191 bundler.addpart(out)
192 stream = util.chunkbuffer(bundler.getchunks())
192 stream = util.chunkbuffer(bundler.getchunks())
193 b = bundle2.getunbundler(self.ui, stream)
193 b = bundle2.getunbundler(self.ui, stream)
194 bundle2.processbundle(self._repo, b)
194 bundle2.processbundle(self._repo, b)
195 raise
195 raise
196 except error.PushRaced as exc:
196 except error.PushRaced as exc:
197 raise error.ResponseError(_('push failed:'), str(exc))
197 raise error.ResponseError(_('push failed:'), str(exc))
198
198
199 def lock(self):
199 def lock(self):
200 return self._repo.lock()
200 return self._repo.lock()
201
201
202 def addchangegroup(self, cg, source, url):
202 def addchangegroup(self, cg, source, url):
203 return cg.apply(self._repo, source, url)
203 return cg.apply(self._repo, source, url)
204
204
205 def pushkey(self, namespace, key, old, new):
205 def pushkey(self, namespace, key, old, new):
206 return self._repo.pushkey(namespace, key, old, new)
206 return self._repo.pushkey(namespace, key, old, new)
207
207
208 def listkeys(self, namespace):
208 def listkeys(self, namespace):
209 return self._repo.listkeys(namespace)
209 return self._repo.listkeys(namespace)
210
210
211 def debugwireargs(self, one, two, three=None, four=None, five=None):
211 def debugwireargs(self, one, two, three=None, four=None, five=None):
212 '''used to test argument passing over the wire'''
212 '''used to test argument passing over the wire'''
213 return "%s %s %s %s %s" % (one, two, three, four, five)
213 return "%s %s %s %s %s" % (one, two, three, four, five)
214
214
215 class locallegacypeer(localpeer):
215 class locallegacypeer(localpeer):
216 '''peer extension which implements legacy methods too; used for tests with
216 '''peer extension which implements legacy methods too; used for tests with
217 restricted capabilities'''
217 restricted capabilities'''
218
218
219 def __init__(self, repo):
219 def __init__(self, repo):
220 localpeer.__init__(self, repo, caps=legacycaps)
220 localpeer.__init__(self, repo, caps=legacycaps)
221
221
222 def branches(self, nodes):
222 def branches(self, nodes):
223 return self._repo.branches(nodes)
223 return self._repo.branches(nodes)
224
224
225 def between(self, pairs):
225 def between(self, pairs):
226 return self._repo.between(pairs)
226 return self._repo.between(pairs)
227
227
228 def changegroup(self, basenodes, source):
228 def changegroup(self, basenodes, source):
229 return changegroup.changegroup(self._repo, basenodes, source)
229 return changegroup.changegroup(self._repo, basenodes, source)
230
230
231 def changegroupsubset(self, bases, heads, source):
231 def changegroupsubset(self, bases, heads, source):
232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
233
233
234 class localrepository(object):
234 class localrepository(object):
235
235
236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
237 'manifestv2'))
237 'manifestv2'))
238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
239 'dotencode'))
239 'dotencode'))
240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
241 filtername = None
241 filtername = None
242
242
243 # a list of (ui, featureset) functions.
243 # a list of (ui, featureset) functions.
244 # only functions defined in module of enabled extensions are invoked
244 # only functions defined in module of enabled extensions are invoked
245 featuresetupfuncs = set()
245 featuresetupfuncs = set()
246
246
247 def __init__(self, baseui, path=None, create=False):
247 def __init__(self, baseui, path=None, create=False):
248 self.requirements = set()
248 self.requirements = set()
249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
250 self.wopener = self.wvfs
250 self.wopener = self.wvfs
251 self.root = self.wvfs.base
251 self.root = self.wvfs.base
252 self.path = self.wvfs.join(".hg")
252 self.path = self.wvfs.join(".hg")
253 self.origroot = path
253 self.origroot = path
254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
256 realfs=False)
256 realfs=False)
257 self.vfs = scmutil.vfs(self.path)
257 self.vfs = scmutil.vfs(self.path)
258 self.opener = self.vfs
258 self.opener = self.vfs
259 self.baseui = baseui
259 self.baseui = baseui
260 self.ui = baseui.copy()
260 self.ui = baseui.copy()
261 self.ui.copy = baseui.copy # prevent copying repo configuration
261 self.ui.copy = baseui.copy # prevent copying repo configuration
262 # A list of callback to shape the phase if no data were found.
262 # A list of callback to shape the phase if no data were found.
263 # Callback are in the form: func(repo, roots) --> processed root.
263 # Callback are in the form: func(repo, roots) --> processed root.
264 # This list it to be filled by extension during repo setup
264 # This list it to be filled by extension during repo setup
265 self._phasedefaults = []
265 self._phasedefaults = []
266 try:
266 try:
267 self.ui.readconfig(self.join("hgrc"), self.root)
267 self.ui.readconfig(self.join("hgrc"), self.root)
268 extensions.loadall(self.ui)
268 extensions.loadall(self.ui)
269 except IOError:
269 except IOError:
270 pass
270 pass
271
271
272 if self.featuresetupfuncs:
272 if self.featuresetupfuncs:
273 self.supported = set(self._basesupported) # use private copy
273 self.supported = set(self._basesupported) # use private copy
274 extmods = set(m.__name__ for n, m
274 extmods = set(m.__name__ for n, m
275 in extensions.extensions(self.ui))
275 in extensions.extensions(self.ui))
276 for setupfunc in self.featuresetupfuncs:
276 for setupfunc in self.featuresetupfuncs:
277 if setupfunc.__module__ in extmods:
277 if setupfunc.__module__ in extmods:
278 setupfunc(self.ui, self.supported)
278 setupfunc(self.ui, self.supported)
279 else:
279 else:
280 self.supported = self._basesupported
280 self.supported = self._basesupported
281
281
282 if not self.vfs.isdir():
282 if not self.vfs.isdir():
283 if create:
283 if create:
284 self.requirements = newreporequirements(self)
284 self.requirements = newreporequirements(self)
285
285
286 if not self.wvfs.exists():
286 if not self.wvfs.exists():
287 self.wvfs.makedirs()
287 self.wvfs.makedirs()
288 self.vfs.makedir(notindexed=True)
288 self.vfs.makedir(notindexed=True)
289
289
290 if 'store' in self.requirements:
290 if 'store' in self.requirements:
291 self.vfs.mkdir("store")
291 self.vfs.mkdir("store")
292
292
293 # create an invalid changelog
293 # create an invalid changelog
294 self.vfs.append(
294 self.vfs.append(
295 "00changelog.i",
295 "00changelog.i",
296 '\0\0\0\2' # represents revlogv2
296 '\0\0\0\2' # represents revlogv2
297 ' dummy changelog to prevent using the old repo layout'
297 ' dummy changelog to prevent using the old repo layout'
298 )
298 )
299 else:
299 else:
300 raise error.RepoError(_("repository %s not found") % path)
300 raise error.RepoError(_("repository %s not found") % path)
301 elif create:
301 elif create:
302 raise error.RepoError(_("repository %s already exists") % path)
302 raise error.RepoError(_("repository %s already exists") % path)
303 else:
303 else:
304 try:
304 try:
305 self.requirements = scmutil.readrequires(
305 self.requirements = scmutil.readrequires(
306 self.vfs, self.supported)
306 self.vfs, self.supported)
307 except IOError as inst:
307 except IOError as inst:
308 if inst.errno != errno.ENOENT:
308 if inst.errno != errno.ENOENT:
309 raise
309 raise
310
310
311 self.sharedpath = self.path
311 self.sharedpath = self.path
312 try:
312 try:
313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
314 realpath=True)
314 realpath=True)
315 s = vfs.base
315 s = vfs.base
316 if not vfs.exists():
316 if not vfs.exists():
317 raise error.RepoError(
317 raise error.RepoError(
318 _('.hg/sharedpath points to nonexistent directory %s') % s)
318 _('.hg/sharedpath points to nonexistent directory %s') % s)
319 self.sharedpath = s
319 self.sharedpath = s
320 except IOError as inst:
320 except IOError as inst:
321 if inst.errno != errno.ENOENT:
321 if inst.errno != errno.ENOENT:
322 raise
322 raise
323
323
324 self.store = store.store(
324 self.store = store.store(
325 self.requirements, self.sharedpath, scmutil.vfs)
325 self.requirements, self.sharedpath, scmutil.vfs)
326 self.spath = self.store.path
326 self.spath = self.store.path
327 self.svfs = self.store.vfs
327 self.svfs = self.store.vfs
328 self.sjoin = self.store.join
328 self.sjoin = self.store.join
329 self.vfs.createmode = self.store.createmode
329 self.vfs.createmode = self.store.createmode
330 self._applyopenerreqs()
330 self._applyopenerreqs()
331 if create:
331 if create:
332 self._writerequirements()
332 self._writerequirements()
333
333
334 self._dirstatevalidatewarned = False
334 self._dirstatevalidatewarned = False
335
335
336 self._branchcaches = {}
336 self._branchcaches = {}
337 self._revbranchcache = None
337 self._revbranchcache = None
338 self.filterpats = {}
338 self.filterpats = {}
339 self._datafilters = {}
339 self._datafilters = {}
340 self._transref = self._lockref = self._wlockref = None
340 self._transref = self._lockref = self._wlockref = None
341
341
342 # A cache for various files under .hg/ that tracks file changes,
342 # A cache for various files under .hg/ that tracks file changes,
343 # (used by the filecache decorator)
343 # (used by the filecache decorator)
344 #
344 #
345 # Maps a property name to its util.filecacheentry
345 # Maps a property name to its util.filecacheentry
346 self._filecache = {}
346 self._filecache = {}
347
347
348 # hold sets of revision to be filtered
348 # hold sets of revision to be filtered
349 # should be cleared when something might have changed the filter value:
349 # should be cleared when something might have changed the filter value:
350 # - new changesets,
350 # - new changesets,
351 # - phase change,
351 # - phase change,
352 # - new obsolescence marker,
352 # - new obsolescence marker,
353 # - working directory parent change,
353 # - working directory parent change,
354 # - bookmark changes
354 # - bookmark changes
355 self.filteredrevcache = {}
355 self.filteredrevcache = {}
356
356
357 # generic mapping between names and nodes
357 # generic mapping between names and nodes
358 self.names = namespaces.namespaces()
358 self.names = namespaces.namespaces()
359
359
360 def close(self):
360 def close(self):
361 self._writecaches()
361 self._writecaches()
362
362
363 def _writecaches(self):
363 def _writecaches(self):
364 if self._revbranchcache:
364 if self._revbranchcache:
365 self._revbranchcache.write()
365 self._revbranchcache.write()
366
366
367 def _restrictcapabilities(self, caps):
367 def _restrictcapabilities(self, caps):
368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
369 caps = set(caps)
369 caps = set(caps)
370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
371 caps.add('bundle2=' + urlreq.quote(capsblob))
371 caps.add('bundle2=' + urlreq.quote(capsblob))
372 return caps
372 return caps
373
373
374 def _applyopenerreqs(self):
374 def _applyopenerreqs(self):
375 self.svfs.options = dict((r, 1) for r in self.requirements
375 self.svfs.options = dict((r, 1) for r in self.requirements
376 if r in self.openerreqs)
376 if r in self.openerreqs)
377 # experimental config: format.chunkcachesize
377 # experimental config: format.chunkcachesize
378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
379 if chunkcachesize is not None:
379 if chunkcachesize is not None:
380 self.svfs.options['chunkcachesize'] = chunkcachesize
380 self.svfs.options['chunkcachesize'] = chunkcachesize
381 # experimental config: format.maxchainlen
381 # experimental config: format.maxchainlen
382 maxchainlen = self.ui.configint('format', 'maxchainlen')
382 maxchainlen = self.ui.configint('format', 'maxchainlen')
383 if maxchainlen is not None:
383 if maxchainlen is not None:
384 self.svfs.options['maxchainlen'] = maxchainlen
384 self.svfs.options['maxchainlen'] = maxchainlen
385 # experimental config: format.manifestcachesize
385 # experimental config: format.manifestcachesize
386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
387 if manifestcachesize is not None:
387 if manifestcachesize is not None:
388 self.svfs.options['manifestcachesize'] = manifestcachesize
388 self.svfs.options['manifestcachesize'] = manifestcachesize
389 # experimental config: format.aggressivemergedeltas
389 # experimental config: format.aggressivemergedeltas
390 aggressivemergedeltas = self.ui.configbool('format',
390 aggressivemergedeltas = self.ui.configbool('format',
391 'aggressivemergedeltas', False)
391 'aggressivemergedeltas', False)
392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
394
394
395 def _writerequirements(self):
395 def _writerequirements(self):
396 scmutil.writerequires(self.vfs, self.requirements)
396 scmutil.writerequires(self.vfs, self.requirements)
397
397
398 def _checknested(self, path):
398 def _checknested(self, path):
399 """Determine if path is a legal nested repository."""
399 """Determine if path is a legal nested repository."""
400 if not path.startswith(self.root):
400 if not path.startswith(self.root):
401 return False
401 return False
402 subpath = path[len(self.root) + 1:]
402 subpath = path[len(self.root) + 1:]
403 normsubpath = util.pconvert(subpath)
403 normsubpath = util.pconvert(subpath)
404
404
405 # XXX: Checking against the current working copy is wrong in
405 # XXX: Checking against the current working copy is wrong in
406 # the sense that it can reject things like
406 # the sense that it can reject things like
407 #
407 #
408 # $ hg cat -r 10 sub/x.txt
408 # $ hg cat -r 10 sub/x.txt
409 #
409 #
410 # if sub/ is no longer a subrepository in the working copy
410 # if sub/ is no longer a subrepository in the working copy
411 # parent revision.
411 # parent revision.
412 #
412 #
413 # However, it can of course also allow things that would have
413 # However, it can of course also allow things that would have
414 # been rejected before, such as the above cat command if sub/
414 # been rejected before, such as the above cat command if sub/
415 # is a subrepository now, but was a normal directory before.
415 # is a subrepository now, but was a normal directory before.
416 # The old path auditor would have rejected by mistake since it
416 # The old path auditor would have rejected by mistake since it
417 # panics when it sees sub/.hg/.
417 # panics when it sees sub/.hg/.
418 #
418 #
419 # All in all, checking against the working copy seems sensible
419 # All in all, checking against the working copy seems sensible
420 # since we want to prevent access to nested repositories on
420 # since we want to prevent access to nested repositories on
421 # the filesystem *now*.
421 # the filesystem *now*.
422 ctx = self[None]
422 ctx = self[None]
423 parts = util.splitpath(subpath)
423 parts = util.splitpath(subpath)
424 while parts:
424 while parts:
425 prefix = '/'.join(parts)
425 prefix = '/'.join(parts)
426 if prefix in ctx.substate:
426 if prefix in ctx.substate:
427 if prefix == normsubpath:
427 if prefix == normsubpath:
428 return True
428 return True
429 else:
429 else:
430 sub = ctx.sub(prefix)
430 sub = ctx.sub(prefix)
431 return sub.checknested(subpath[len(prefix) + 1:])
431 return sub.checknested(subpath[len(prefix) + 1:])
432 else:
432 else:
433 parts.pop()
433 parts.pop()
434 return False
434 return False
435
435
436 def peer(self):
436 def peer(self):
437 return localpeer(self) # not cached to avoid reference cycle
437 return localpeer(self) # not cached to avoid reference cycle
438
438
439 def unfiltered(self):
439 def unfiltered(self):
440 """Return unfiltered version of the repository
440 """Return unfiltered version of the repository
441
441
442 Intended to be overwritten by filtered repo."""
442 Intended to be overwritten by filtered repo."""
443 return self
443 return self
444
444
445 def filtered(self, name):
445 def filtered(self, name):
446 """Return a filtered version of a repository"""
446 """Return a filtered version of a repository"""
447 # build a new class with the mixin and the current class
447 # build a new class with the mixin and the current class
448 # (possibly subclass of the repo)
448 # (possibly subclass of the repo)
449 class proxycls(repoview.repoview, self.unfiltered().__class__):
449 class proxycls(repoview.repoview, self.unfiltered().__class__):
450 pass
450 pass
451 return proxycls(self, name)
451 return proxycls(self, name)
452
452
453 @repofilecache('bookmarks', 'bookmarks.current')
453 @repofilecache('bookmarks', 'bookmarks.current')
454 def _bookmarks(self):
454 def _bookmarks(self):
455 return bookmarks.bmstore(self)
455 return bookmarks.bmstore(self)
456
456
457 @property
457 @property
458 def _activebookmark(self):
458 def _activebookmark(self):
459 return self._bookmarks.active
459 return self._bookmarks.active
460
460
461 def bookmarkheads(self, bookmark):
461 def bookmarkheads(self, bookmark):
462 name = bookmark.split('@', 1)[0]
462 name = bookmark.split('@', 1)[0]
463 heads = []
463 heads = []
464 for mark, n in self._bookmarks.iteritems():
464 for mark, n in self._bookmarks.iteritems():
465 if mark.split('@', 1)[0] == name:
465 if mark.split('@', 1)[0] == name:
466 heads.append(n)
466 heads.append(n)
467 return heads
467 return heads
468
468
469 # _phaserevs and _phasesets depend on changelog. what we need is to
469 # _phaserevs and _phasesets depend on changelog. what we need is to
470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
471 # can't be easily expressed in filecache mechanism.
471 # can't be easily expressed in filecache mechanism.
472 @storecache('phaseroots', '00changelog.i')
472 @storecache('phaseroots', '00changelog.i')
473 def _phasecache(self):
473 def _phasecache(self):
474 return phases.phasecache(self, self._phasedefaults)
474 return phases.phasecache(self, self._phasedefaults)
475
475
476 @storecache('obsstore')
476 @storecache('obsstore')
477 def obsstore(self):
477 def obsstore(self):
478 # read default format for new obsstore.
478 # read default format for new obsstore.
479 # developer config: format.obsstore-version
479 # developer config: format.obsstore-version
480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
481 # rely on obsstore class default when possible.
481 # rely on obsstore class default when possible.
482 kwargs = {}
482 kwargs = {}
483 if defaultformat is not None:
483 if defaultformat is not None:
484 kwargs['defaultformat'] = defaultformat
484 kwargs['defaultformat'] = defaultformat
485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
486 store = obsolete.obsstore(self.svfs, readonly=readonly,
486 store = obsolete.obsstore(self.svfs, readonly=readonly,
487 **kwargs)
487 **kwargs)
488 if store and readonly:
488 if store and readonly:
489 self.ui.warn(
489 self.ui.warn(
490 _('obsolete feature not enabled but %i markers found!\n')
490 _('obsolete feature not enabled but %i markers found!\n')
491 % len(list(store)))
491 % len(list(store)))
492 return store
492 return store
493
493
494 @storecache('00changelog.i')
494 @storecache('00changelog.i')
495 def changelog(self):
495 def changelog(self):
496 c = changelog.changelog(self.svfs)
496 c = changelog.changelog(self.svfs)
497 if 'HG_PENDING' in os.environ:
497 if 'HG_PENDING' in os.environ:
498 p = os.environ['HG_PENDING']
498 p = os.environ['HG_PENDING']
499 if p.startswith(self.root):
499 if p.startswith(self.root):
500 c.readpending('00changelog.i.a')
500 c.readpending('00changelog.i.a')
501 return c
501 return c
502
502
503 @storecache('00manifest.i')
503 @storecache('00manifest.i')
504 def manifest(self):
504 def manifest(self):
505 return manifest.manifest(self.svfs)
505 return manifest.manifest(self.svfs)
506
506
507 @property
507 @property
508 def manifestlog(self):
508 def manifestlog(self):
509 return manifest.manifestlog(self.svfs, self)
509 return manifest.manifestlog(self.svfs, self)
510
510
511 @repofilecache('dirstate')
511 @repofilecache('dirstate')
512 def dirstate(self):
512 def dirstate(self):
513 return dirstate.dirstate(self.vfs, self.ui, self.root,
513 return dirstate.dirstate(self.vfs, self.ui, self.root,
514 self._dirstatevalidate)
514 self._dirstatevalidate)
515
515
516 def _dirstatevalidate(self, node):
516 def _dirstatevalidate(self, node):
517 try:
517 try:
518 self.changelog.rev(node)
518 self.changelog.rev(node)
519 return node
519 return node
520 except error.LookupError:
520 except error.LookupError:
521 if not self._dirstatevalidatewarned:
521 if not self._dirstatevalidatewarned:
522 self._dirstatevalidatewarned = True
522 self._dirstatevalidatewarned = True
523 self.ui.warn(_("warning: ignoring unknown"
523 self.ui.warn(_("warning: ignoring unknown"
524 " working parent %s!\n") % short(node))
524 " working parent %s!\n") % short(node))
525 return nullid
525 return nullid
526
526
527 def __getitem__(self, changeid):
527 def __getitem__(self, changeid):
528 if changeid is None or changeid == wdirrev:
528 if changeid is None or changeid == wdirrev:
529 return context.workingctx(self)
529 return context.workingctx(self)
530 if isinstance(changeid, slice):
530 if isinstance(changeid, slice):
531 return [context.changectx(self, i)
531 return [context.changectx(self, i)
532 for i in xrange(*changeid.indices(len(self)))
532 for i in xrange(*changeid.indices(len(self)))
533 if i not in self.changelog.filteredrevs]
533 if i not in self.changelog.filteredrevs]
534 return context.changectx(self, changeid)
534 return context.changectx(self, changeid)
535
535
536 def __contains__(self, changeid):
536 def __contains__(self, changeid):
537 try:
537 try:
538 self[changeid]
538 self[changeid]
539 return True
539 return True
540 except error.RepoLookupError:
540 except error.RepoLookupError:
541 return False
541 return False
542
542
543 def __nonzero__(self):
543 def __nonzero__(self):
544 return True
544 return True
545
545
546 def __len__(self):
546 def __len__(self):
547 return len(self.changelog)
547 return len(self.changelog)
548
548
549 def __iter__(self):
549 def __iter__(self):
550 return iter(self.changelog)
550 return iter(self.changelog)
551
551
552 def revs(self, expr, *args):
552 def revs(self, expr, *args):
553 '''Find revisions matching a revset.
553 '''Find revisions matching a revset.
554
554
555 The revset is specified as a string ``expr`` that may contain
555 The revset is specified as a string ``expr`` that may contain
556 %-formatting to escape certain types. See ``revset.formatspec``.
556 %-formatting to escape certain types. See ``revset.formatspec``.
557
557
558 Revset aliases from the configuration are not expanded. To expand
558 Revset aliases from the configuration are not expanded. To expand
559 user aliases, consider calling ``scmutil.revrange()``.
559 user aliases, consider calling ``scmutil.revrange()``.
560
560
561 Returns a revset.abstractsmartset, which is a list-like interface
561 Returns a revset.abstractsmartset, which is a list-like interface
562 that contains integer revisions.
562 that contains integer revisions.
563 '''
563 '''
564 expr = revset.formatspec(expr, *args)
564 expr = revset.formatspec(expr, *args)
565 m = revset.match(None, expr)
565 m = revset.match(None, expr)
566 return m(self)
566 return m(self)
567
567
568 def set(self, expr, *args):
568 def set(self, expr, *args):
569 '''Find revisions matching a revset and emit changectx instances.
569 '''Find revisions matching a revset and emit changectx instances.
570
570
571 This is a convenience wrapper around ``revs()`` that iterates the
571 This is a convenience wrapper around ``revs()`` that iterates the
572 result and is a generator of changectx instances.
572 result and is a generator of changectx instances.
573
573
574 Revset aliases from the configuration are not expanded. To expand
574 Revset aliases from the configuration are not expanded. To expand
575 user aliases, consider calling ``scmutil.revrange()``.
575 user aliases, consider calling ``scmutil.revrange()``.
576 '''
576 '''
577 for r in self.revs(expr, *args):
577 for r in self.revs(expr, *args):
578 yield self[r]
578 yield self[r]
579
579
580 def url(self):
580 def url(self):
581 return 'file:' + self.root
581 return 'file:' + self.root
582
582
583 def hook(self, name, throw=False, **args):
583 def hook(self, name, throw=False, **args):
584 """Call a hook, passing this repo instance.
584 """Call a hook, passing this repo instance.
585
585
586 This a convenience method to aid invoking hooks. Extensions likely
586 This a convenience method to aid invoking hooks. Extensions likely
587 won't call this unless they have registered a custom hook or are
587 won't call this unless they have registered a custom hook or are
588 replacing code that is expected to call a hook.
588 replacing code that is expected to call a hook.
589 """
589 """
590 return hook.hook(self.ui, self, name, throw, **args)
590 return hook.hook(self.ui, self, name, throw, **args)
591
591
592 @unfilteredmethod
592 @unfilteredmethod
593 def _tag(self, names, node, message, local, user, date, extra=None,
593 def _tag(self, names, node, message, local, user, date, extra=None,
594 editor=False):
594 editor=False):
595 if isinstance(names, str):
595 if isinstance(names, str):
596 names = (names,)
596 names = (names,)
597
597
598 branches = self.branchmap()
598 branches = self.branchmap()
599 for name in names:
599 for name in names:
600 self.hook('pretag', throw=True, node=hex(node), tag=name,
600 self.hook('pretag', throw=True, node=hex(node), tag=name,
601 local=local)
601 local=local)
602 if name in branches:
602 if name in branches:
603 self.ui.warn(_("warning: tag %s conflicts with existing"
603 self.ui.warn(_("warning: tag %s conflicts with existing"
604 " branch name\n") % name)
604 " branch name\n") % name)
605
605
606 def writetags(fp, names, munge, prevtags):
606 def writetags(fp, names, munge, prevtags):
607 fp.seek(0, 2)
607 fp.seek(0, 2)
608 if prevtags and prevtags[-1] != '\n':
608 if prevtags and prevtags[-1] != '\n':
609 fp.write('\n')
609 fp.write('\n')
610 for name in names:
610 for name in names:
611 if munge:
611 if munge:
612 m = munge(name)
612 m = munge(name)
613 else:
613 else:
614 m = name
614 m = name
615
615
616 if (self._tagscache.tagtypes and
616 if (self._tagscache.tagtypes and
617 name in self._tagscache.tagtypes):
617 name in self._tagscache.tagtypes):
618 old = self.tags().get(name, nullid)
618 old = self.tags().get(name, nullid)
619 fp.write('%s %s\n' % (hex(old), m))
619 fp.write('%s %s\n' % (hex(old), m))
620 fp.write('%s %s\n' % (hex(node), m))
620 fp.write('%s %s\n' % (hex(node), m))
621 fp.close()
621 fp.close()
622
622
623 prevtags = ''
623 prevtags = ''
624 if local:
624 if local:
625 try:
625 try:
626 fp = self.vfs('localtags', 'r+')
626 fp = self.vfs('localtags', 'r+')
627 except IOError:
627 except IOError:
628 fp = self.vfs('localtags', 'a')
628 fp = self.vfs('localtags', 'a')
629 else:
629 else:
630 prevtags = fp.read()
630 prevtags = fp.read()
631
631
632 # local tags are stored in the current charset
632 # local tags are stored in the current charset
633 writetags(fp, names, None, prevtags)
633 writetags(fp, names, None, prevtags)
634 for name in names:
634 for name in names:
635 self.hook('tag', node=hex(node), tag=name, local=local)
635 self.hook('tag', node=hex(node), tag=name, local=local)
636 return
636 return
637
637
638 try:
638 try:
639 fp = self.wfile('.hgtags', 'rb+')
639 fp = self.wfile('.hgtags', 'rb+')
640 except IOError as e:
640 except IOError as e:
641 if e.errno != errno.ENOENT:
641 if e.errno != errno.ENOENT:
642 raise
642 raise
643 fp = self.wfile('.hgtags', 'ab')
643 fp = self.wfile('.hgtags', 'ab')
644 else:
644 else:
645 prevtags = fp.read()
645 prevtags = fp.read()
646
646
647 # committed tags are stored in UTF-8
647 # committed tags are stored in UTF-8
648 writetags(fp, names, encoding.fromlocal, prevtags)
648 writetags(fp, names, encoding.fromlocal, prevtags)
649
649
650 fp.close()
650 fp.close()
651
651
652 self.invalidatecaches()
652 self.invalidatecaches()
653
653
654 if '.hgtags' not in self.dirstate:
654 if '.hgtags' not in self.dirstate:
655 self[None].add(['.hgtags'])
655 self[None].add(['.hgtags'])
656
656
657 m = matchmod.exact(self.root, '', ['.hgtags'])
657 m = matchmod.exact(self.root, '', ['.hgtags'])
658 tagnode = self.commit(message, user, date, extra=extra, match=m,
658 tagnode = self.commit(message, user, date, extra=extra, match=m,
659 editor=editor)
659 editor=editor)
660
660
661 for name in names:
661 for name in names:
662 self.hook('tag', node=hex(node), tag=name, local=local)
662 self.hook('tag', node=hex(node), tag=name, local=local)
663
663
664 return tagnode
664 return tagnode
665
665
666 def tag(self, names, node, message, local, user, date, editor=False):
666 def tag(self, names, node, message, local, user, date, editor=False):
667 '''tag a revision with one or more symbolic names.
667 '''tag a revision with one or more symbolic names.
668
668
669 names is a list of strings or, when adding a single tag, names may be a
669 names is a list of strings or, when adding a single tag, names may be a
670 string.
670 string.
671
671
672 if local is True, the tags are stored in a per-repository file.
672 if local is True, the tags are stored in a per-repository file.
673 otherwise, they are stored in the .hgtags file, and a new
673 otherwise, they are stored in the .hgtags file, and a new
674 changeset is committed with the change.
674 changeset is committed with the change.
675
675
676 keyword arguments:
676 keyword arguments:
677
677
678 local: whether to store tags in non-version-controlled file
678 local: whether to store tags in non-version-controlled file
679 (default False)
679 (default False)
680
680
681 message: commit message to use if committing
681 message: commit message to use if committing
682
682
683 user: name of user to use if committing
683 user: name of user to use if committing
684
684
685 date: date tuple to use if committing'''
685 date: date tuple to use if committing'''
686
686
687 if not local:
687 if not local:
688 m = matchmod.exact(self.root, '', ['.hgtags'])
688 m = matchmod.exact(self.root, '', ['.hgtags'])
689 if any(self.status(match=m, unknown=True, ignored=True)):
689 if any(self.status(match=m, unknown=True, ignored=True)):
690 raise error.Abort(_('working copy of .hgtags is changed'),
690 raise error.Abort(_('working copy of .hgtags is changed'),
691 hint=_('please commit .hgtags manually'))
691 hint=_('please commit .hgtags manually'))
692
692
693 self.tags() # instantiate the cache
693 self.tags() # instantiate the cache
694 self._tag(names, node, message, local, user, date, editor=editor)
694 self._tag(names, node, message, local, user, date, editor=editor)
695
695
696 @filteredpropertycache
696 @filteredpropertycache
697 def _tagscache(self):
697 def _tagscache(self):
698 '''Returns a tagscache object that contains various tags related
698 '''Returns a tagscache object that contains various tags related
699 caches.'''
699 caches.'''
700
700
701 # This simplifies its cache management by having one decorated
701 # This simplifies its cache management by having one decorated
702 # function (this one) and the rest simply fetch things from it.
702 # function (this one) and the rest simply fetch things from it.
703 class tagscache(object):
703 class tagscache(object):
704 def __init__(self):
704 def __init__(self):
705 # These two define the set of tags for this repository. tags
705 # These two define the set of tags for this repository. tags
706 # maps tag name to node; tagtypes maps tag name to 'global' or
706 # maps tag name to node; tagtypes maps tag name to 'global' or
707 # 'local'. (Global tags are defined by .hgtags across all
707 # 'local'. (Global tags are defined by .hgtags across all
708 # heads, and local tags are defined in .hg/localtags.)
708 # heads, and local tags are defined in .hg/localtags.)
709 # They constitute the in-memory cache of tags.
709 # They constitute the in-memory cache of tags.
710 self.tags = self.tagtypes = None
710 self.tags = self.tagtypes = None
711
711
712 self.nodetagscache = self.tagslist = None
712 self.nodetagscache = self.tagslist = None
713
713
714 cache = tagscache()
714 cache = tagscache()
715 cache.tags, cache.tagtypes = self._findtags()
715 cache.tags, cache.tagtypes = self._findtags()
716
716
717 return cache
717 return cache
718
718
719 def tags(self):
719 def tags(self):
720 '''return a mapping of tag to node'''
720 '''return a mapping of tag to node'''
721 t = {}
721 t = {}
722 if self.changelog.filteredrevs:
722 if self.changelog.filteredrevs:
723 tags, tt = self._findtags()
723 tags, tt = self._findtags()
724 else:
724 else:
725 tags = self._tagscache.tags
725 tags = self._tagscache.tags
726 for k, v in tags.iteritems():
726 for k, v in tags.iteritems():
727 try:
727 try:
728 # ignore tags to unknown nodes
728 # ignore tags to unknown nodes
729 self.changelog.rev(v)
729 self.changelog.rev(v)
730 t[k] = v
730 t[k] = v
731 except (error.LookupError, ValueError):
731 except (error.LookupError, ValueError):
732 pass
732 pass
733 return t
733 return t
734
734
735 def _findtags(self):
735 def _findtags(self):
736 '''Do the hard work of finding tags. Return a pair of dicts
736 '''Do the hard work of finding tags. Return a pair of dicts
737 (tags, tagtypes) where tags maps tag name to node, and tagtypes
737 (tags, tagtypes) where tags maps tag name to node, and tagtypes
738 maps tag name to a string like \'global\' or \'local\'.
738 maps tag name to a string like \'global\' or \'local\'.
739 Subclasses or extensions are free to add their own tags, but
739 Subclasses or extensions are free to add their own tags, but
740 should be aware that the returned dicts will be retained for the
740 should be aware that the returned dicts will be retained for the
741 duration of the localrepo object.'''
741 duration of the localrepo object.'''
742
742
743 # XXX what tagtype should subclasses/extensions use? Currently
743 # XXX what tagtype should subclasses/extensions use? Currently
744 # mq and bookmarks add tags, but do not set the tagtype at all.
744 # mq and bookmarks add tags, but do not set the tagtype at all.
745 # Should each extension invent its own tag type? Should there
745 # Should each extension invent its own tag type? Should there
746 # be one tagtype for all such "virtual" tags? Or is the status
746 # be one tagtype for all such "virtual" tags? Or is the status
747 # quo fine?
747 # quo fine?
748
748
749 alltags = {} # map tag name to (node, hist)
749 alltags = {} # map tag name to (node, hist)
750 tagtypes = {}
750 tagtypes = {}
751
751
752 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
752 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
753 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
753 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
754
754
755 # Build the return dicts. Have to re-encode tag names because
755 # Build the return dicts. Have to re-encode tag names because
756 # the tags module always uses UTF-8 (in order not to lose info
756 # the tags module always uses UTF-8 (in order not to lose info
757 # writing to the cache), but the rest of Mercurial wants them in
757 # writing to the cache), but the rest of Mercurial wants them in
758 # local encoding.
758 # local encoding.
759 tags = {}
759 tags = {}
760 for (name, (node, hist)) in alltags.iteritems():
760 for (name, (node, hist)) in alltags.iteritems():
761 if node != nullid:
761 if node != nullid:
762 tags[encoding.tolocal(name)] = node
762 tags[encoding.tolocal(name)] = node
763 tags['tip'] = self.changelog.tip()
763 tags['tip'] = self.changelog.tip()
764 tagtypes = dict([(encoding.tolocal(name), value)
764 tagtypes = dict([(encoding.tolocal(name), value)
765 for (name, value) in tagtypes.iteritems()])
765 for (name, value) in tagtypes.iteritems()])
766 return (tags, tagtypes)
766 return (tags, tagtypes)
767
767
768 def tagtype(self, tagname):
768 def tagtype(self, tagname):
769 '''
769 '''
770 return the type of the given tag. result can be:
770 return the type of the given tag. result can be:
771
771
772 'local' : a local tag
772 'local' : a local tag
773 'global' : a global tag
773 'global' : a global tag
774 None : tag does not exist
774 None : tag does not exist
775 '''
775 '''
776
776
777 return self._tagscache.tagtypes.get(tagname)
777 return self._tagscache.tagtypes.get(tagname)
778
778
779 def tagslist(self):
779 def tagslist(self):
780 '''return a list of tags ordered by revision'''
780 '''return a list of tags ordered by revision'''
781 if not self._tagscache.tagslist:
781 if not self._tagscache.tagslist:
782 l = []
782 l = []
783 for t, n in self.tags().iteritems():
783 for t, n in self.tags().iteritems():
784 l.append((self.changelog.rev(n), t, n))
784 l.append((self.changelog.rev(n), t, n))
785 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
785 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
786
786
787 return self._tagscache.tagslist
787 return self._tagscache.tagslist
788
788
789 def nodetags(self, node):
789 def nodetags(self, node):
790 '''return the tags associated with a node'''
790 '''return the tags associated with a node'''
791 if not self._tagscache.nodetagscache:
791 if not self._tagscache.nodetagscache:
792 nodetagscache = {}
792 nodetagscache = {}
793 for t, n in self._tagscache.tags.iteritems():
793 for t, n in self._tagscache.tags.iteritems():
794 nodetagscache.setdefault(n, []).append(t)
794 nodetagscache.setdefault(n, []).append(t)
795 for tags in nodetagscache.itervalues():
795 for tags in nodetagscache.itervalues():
796 tags.sort()
796 tags.sort()
797 self._tagscache.nodetagscache = nodetagscache
797 self._tagscache.nodetagscache = nodetagscache
798 return self._tagscache.nodetagscache.get(node, [])
798 return self._tagscache.nodetagscache.get(node, [])
799
799
800 def nodebookmarks(self, node):
800 def nodebookmarks(self, node):
801 """return the list of bookmarks pointing to the specified node"""
801 """return the list of bookmarks pointing to the specified node"""
802 marks = []
802 marks = []
803 for bookmark, n in self._bookmarks.iteritems():
803 for bookmark, n in self._bookmarks.iteritems():
804 if n == node:
804 if n == node:
805 marks.append(bookmark)
805 marks.append(bookmark)
806 return sorted(marks)
806 return sorted(marks)
807
807
808 def branchmap(self):
808 def branchmap(self):
809 '''returns a dictionary {branch: [branchheads]} with branchheads
809 '''returns a dictionary {branch: [branchheads]} with branchheads
810 ordered by increasing revision number'''
810 ordered by increasing revision number'''
811 branchmap.updatecache(self)
811 branchmap.updatecache(self)
812 return self._branchcaches[self.filtername]
812 return self._branchcaches[self.filtername]
813
813
814 @unfilteredmethod
814 @unfilteredmethod
815 def revbranchcache(self):
815 def revbranchcache(self):
816 if not self._revbranchcache:
816 if not self._revbranchcache:
817 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
817 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
818 return self._revbranchcache
818 return self._revbranchcache
819
819
820 def branchtip(self, branch, ignoremissing=False):
820 def branchtip(self, branch, ignoremissing=False):
821 '''return the tip node for a given branch
821 '''return the tip node for a given branch
822
822
823 If ignoremissing is True, then this method will not raise an error.
823 If ignoremissing is True, then this method will not raise an error.
824 This is helpful for callers that only expect None for a missing branch
824 This is helpful for callers that only expect None for a missing branch
825 (e.g. namespace).
825 (e.g. namespace).
826
826
827 '''
827 '''
828 try:
828 try:
829 return self.branchmap().branchtip(branch)
829 return self.branchmap().branchtip(branch)
830 except KeyError:
830 except KeyError:
831 if not ignoremissing:
831 if not ignoremissing:
832 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
832 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
833 else:
833 else:
834 pass
834 pass
835
835
836 def lookup(self, key):
836 def lookup(self, key):
837 return self[key].node()
837 return self[key].node()
838
838
839 def lookupbranch(self, key, remote=None):
839 def lookupbranch(self, key, remote=None):
840 repo = remote or self
840 repo = remote or self
841 if key in repo.branchmap():
841 if key in repo.branchmap():
842 return key
842 return key
843
843
844 repo = (remote and remote.local()) and remote or self
844 repo = (remote and remote.local()) and remote or self
845 return repo[key].branch()
845 return repo[key].branch()
846
846
847 def known(self, nodes):
847 def known(self, nodes):
848 cl = self.changelog
848 cl = self.changelog
849 nm = cl.nodemap
849 nm = cl.nodemap
850 filtered = cl.filteredrevs
850 filtered = cl.filteredrevs
851 result = []
851 result = []
852 for n in nodes:
852 for n in nodes:
853 r = nm.get(n)
853 r = nm.get(n)
854 resp = not (r is None or r in filtered)
854 resp = not (r is None or r in filtered)
855 result.append(resp)
855 result.append(resp)
856 return result
856 return result
857
857
858 def local(self):
858 def local(self):
859 return self
859 return self
860
860
861 def publishing(self):
861 def publishing(self):
862 # it's safe (and desirable) to trust the publish flag unconditionally
862 # it's safe (and desirable) to trust the publish flag unconditionally
863 # so that we don't finalize changes shared between users via ssh or nfs
863 # so that we don't finalize changes shared between users via ssh or nfs
864 return self.ui.configbool('phases', 'publish', True, untrusted=True)
864 return self.ui.configbool('phases', 'publish', True, untrusted=True)
865
865
866 def cancopy(self):
866 def cancopy(self):
867 # so statichttprepo's override of local() works
867 # so statichttprepo's override of local() works
868 if not self.local():
868 if not self.local():
869 return False
869 return False
870 if not self.publishing():
870 if not self.publishing():
871 return True
871 return True
872 # if publishing we can't copy if there is filtered content
872 # if publishing we can't copy if there is filtered content
873 return not self.filtered('visible').changelog.filteredrevs
873 return not self.filtered('visible').changelog.filteredrevs
874
874
875 def shared(self):
875 def shared(self):
876 '''the type of shared repository (None if not shared)'''
876 '''the type of shared repository (None if not shared)'''
877 if self.sharedpath != self.path:
877 if self.sharedpath != self.path:
878 return 'store'
878 return 'store'
879 return None
879 return None
880
880
881 def join(self, f, *insidef):
881 def join(self, f, *insidef):
882 return self.vfs.join(os.path.join(f, *insidef))
882 return self.vfs.join(os.path.join(f, *insidef))
883
883
884 def wjoin(self, f, *insidef):
884 def wjoin(self, f, *insidef):
885 return self.vfs.reljoin(self.root, f, *insidef)
885 return self.vfs.reljoin(self.root, f, *insidef)
886
886
887 def file(self, f):
887 def file(self, f):
888 if f[0] == '/':
888 if f[0] == '/':
889 f = f[1:]
889 f = f[1:]
890 return filelog.filelog(self.svfs, f)
890 return filelog.filelog(self.svfs, f)
891
891
892 def changectx(self, changeid):
892 def changectx(self, changeid):
893 return self[changeid]
893 return self[changeid]
894
894
895 def setparents(self, p1, p2=nullid):
895 def setparents(self, p1, p2=nullid):
896 self.dirstate.beginparentchange()
896 self.dirstate.beginparentchange()
897 copies = self.dirstate.setparents(p1, p2)
897 copies = self.dirstate.setparents(p1, p2)
898 pctx = self[p1]
898 pctx = self[p1]
899 if copies:
899 if copies:
900 # Adjust copy records, the dirstate cannot do it, it
900 # Adjust copy records, the dirstate cannot do it, it
901 # requires access to parents manifests. Preserve them
901 # requires access to parents manifests. Preserve them
902 # only for entries added to first parent.
902 # only for entries added to first parent.
903 for f in copies:
903 for f in copies:
904 if f not in pctx and copies[f] in pctx:
904 if f not in pctx and copies[f] in pctx:
905 self.dirstate.copy(copies[f], f)
905 self.dirstate.copy(copies[f], f)
906 if p2 == nullid:
906 if p2 == nullid:
907 for f, s in sorted(self.dirstate.copies().items()):
907 for f, s in sorted(self.dirstate.copies().items()):
908 if f not in pctx and s not in pctx:
908 if f not in pctx and s not in pctx:
909 self.dirstate.copy(None, f)
909 self.dirstate.copy(None, f)
910 self.dirstate.endparentchange()
910 self.dirstate.endparentchange()
911
911
912 def filectx(self, path, changeid=None, fileid=None):
912 def filectx(self, path, changeid=None, fileid=None):
913 """changeid can be a changeset revision, node, or tag.
913 """changeid can be a changeset revision, node, or tag.
914 fileid can be a file revision or node."""
914 fileid can be a file revision or node."""
915 return context.filectx(self, path, changeid, fileid)
915 return context.filectx(self, path, changeid, fileid)
916
916
917 def getcwd(self):
917 def getcwd(self):
918 return self.dirstate.getcwd()
918 return self.dirstate.getcwd()
919
919
920 def pathto(self, f, cwd=None):
920 def pathto(self, f, cwd=None):
921 return self.dirstate.pathto(f, cwd)
921 return self.dirstate.pathto(f, cwd)
922
922
923 def wfile(self, f, mode='r'):
923 def wfile(self, f, mode='r'):
924 return self.wvfs(f, mode)
924 return self.wvfs(f, mode)
925
925
926 def _link(self, f):
926 def _link(self, f):
927 return self.wvfs.islink(f)
927 return self.wvfs.islink(f)
928
928
929 def _loadfilter(self, filter):
929 def _loadfilter(self, filter):
930 if filter not in self.filterpats:
930 if filter not in self.filterpats:
931 l = []
931 l = []
932 for pat, cmd in self.ui.configitems(filter):
932 for pat, cmd in self.ui.configitems(filter):
933 if cmd == '!':
933 if cmd == '!':
934 continue
934 continue
935 mf = matchmod.match(self.root, '', [pat])
935 mf = matchmod.match(self.root, '', [pat])
936 fn = None
936 fn = None
937 params = cmd
937 params = cmd
938 for name, filterfn in self._datafilters.iteritems():
938 for name, filterfn in self._datafilters.iteritems():
939 if cmd.startswith(name):
939 if cmd.startswith(name):
940 fn = filterfn
940 fn = filterfn
941 params = cmd[len(name):].lstrip()
941 params = cmd[len(name):].lstrip()
942 break
942 break
943 if not fn:
943 if not fn:
944 fn = lambda s, c, **kwargs: util.filter(s, c)
944 fn = lambda s, c, **kwargs: util.filter(s, c)
945 # Wrap old filters not supporting keyword arguments
945 # Wrap old filters not supporting keyword arguments
946 if not inspect.getargspec(fn)[2]:
946 if not inspect.getargspec(fn)[2]:
947 oldfn = fn
947 oldfn = fn
948 fn = lambda s, c, **kwargs: oldfn(s, c)
948 fn = lambda s, c, **kwargs: oldfn(s, c)
949 l.append((mf, fn, params))
949 l.append((mf, fn, params))
950 self.filterpats[filter] = l
950 self.filterpats[filter] = l
951 return self.filterpats[filter]
951 return self.filterpats[filter]
952
952
953 def _filter(self, filterpats, filename, data):
953 def _filter(self, filterpats, filename, data):
954 for mf, fn, cmd in filterpats:
954 for mf, fn, cmd in filterpats:
955 if mf(filename):
955 if mf(filename):
956 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
956 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
957 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
957 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
958 break
958 break
959
959
960 return data
960 return data
961
961
962 @unfilteredpropertycache
962 @unfilteredpropertycache
963 def _encodefilterpats(self):
963 def _encodefilterpats(self):
964 return self._loadfilter('encode')
964 return self._loadfilter('encode')
965
965
966 @unfilteredpropertycache
966 @unfilteredpropertycache
967 def _decodefilterpats(self):
967 def _decodefilterpats(self):
968 return self._loadfilter('decode')
968 return self._loadfilter('decode')
969
969
970 def adddatafilter(self, name, filter):
970 def adddatafilter(self, name, filter):
971 self._datafilters[name] = filter
971 self._datafilters[name] = filter
972
972
973 def wread(self, filename):
973 def wread(self, filename):
974 if self._link(filename):
974 if self._link(filename):
975 data = self.wvfs.readlink(filename)
975 data = self.wvfs.readlink(filename)
976 else:
976 else:
977 data = self.wvfs.read(filename)
977 data = self.wvfs.read(filename)
978 return self._filter(self._encodefilterpats, filename, data)
978 return self._filter(self._encodefilterpats, filename, data)
979
979
980 def wwrite(self, filename, data, flags, backgroundclose=False):
980 def wwrite(self, filename, data, flags, backgroundclose=False):
981 """write ``data`` into ``filename`` in the working directory
981 """write ``data`` into ``filename`` in the working directory
982
982
983 This returns length of written (maybe decoded) data.
983 This returns length of written (maybe decoded) data.
984 """
984 """
985 data = self._filter(self._decodefilterpats, filename, data)
985 data = self._filter(self._decodefilterpats, filename, data)
986 if 'l' in flags:
986 if 'l' in flags:
987 self.wvfs.symlink(data, filename)
987 self.wvfs.symlink(data, filename)
988 else:
988 else:
989 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
989 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
990 if 'x' in flags:
990 if 'x' in flags:
991 self.wvfs.setflags(filename, False, True)
991 self.wvfs.setflags(filename, False, True)
992 return len(data)
992 return len(data)
993
993
994 def wwritedata(self, filename, data):
994 def wwritedata(self, filename, data):
995 return self._filter(self._decodefilterpats, filename, data)
995 return self._filter(self._decodefilterpats, filename, data)
996
996
997 def currenttransaction(self):
997 def currenttransaction(self):
998 """return the current transaction or None if non exists"""
998 """return the current transaction or None if non exists"""
999 if self._transref:
999 if self._transref:
1000 tr = self._transref()
1000 tr = self._transref()
1001 else:
1001 else:
1002 tr = None
1002 tr = None
1003
1003
1004 if tr and tr.running():
1004 if tr and tr.running():
1005 return tr
1005 return tr
1006 return None
1006 return None
1007
1007
1008 def transaction(self, desc, report=None):
1008 def transaction(self, desc, report=None):
1009 if (self.ui.configbool('devel', 'all-warnings')
1009 if (self.ui.configbool('devel', 'all-warnings')
1010 or self.ui.configbool('devel', 'check-locks')):
1010 or self.ui.configbool('devel', 'check-locks')):
1011 if self._currentlock(self._lockref) is None:
1011 if self._currentlock(self._lockref) is None:
1012 raise RuntimeError('programming error: transaction requires '
1012 raise RuntimeError('programming error: transaction requires '
1013 'locking')
1013 'locking')
1014 tr = self.currenttransaction()
1014 tr = self.currenttransaction()
1015 if tr is not None:
1015 if tr is not None:
1016 return tr.nest()
1016 return tr.nest()
1017
1017
1018 # abort here if the journal already exists
1018 # abort here if the journal already exists
1019 if self.svfs.exists("journal"):
1019 if self.svfs.exists("journal"):
1020 raise error.RepoError(
1020 raise error.RepoError(
1021 _("abandoned transaction found"),
1021 _("abandoned transaction found"),
1022 hint=_("run 'hg recover' to clean up transaction"))
1022 hint=_("run 'hg recover' to clean up transaction"))
1023
1023
1024 idbase = "%.40f#%f" % (random.random(), time.time())
1024 idbase = "%.40f#%f" % (random.random(), time.time())
1025 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1025 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1027
1027
1028 self._writejournal(desc)
1028 self._writejournal(desc)
1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1030 if report:
1030 if report:
1031 rp = report
1031 rp = report
1032 else:
1032 else:
1033 rp = self.ui.warn
1033 rp = self.ui.warn
1034 vfsmap = {'plain': self.vfs} # root of .hg/
1034 vfsmap = {'plain': self.vfs} # root of .hg/
1035 # we must avoid cyclic reference between repo and transaction.
1035 # we must avoid cyclic reference between repo and transaction.
1036 reporef = weakref.ref(self)
1036 reporef = weakref.ref(self)
1037 def validate(tr):
1037 def validate(tr):
1038 """will run pre-closing hooks"""
1038 """will run pre-closing hooks"""
1039 reporef().hook('pretxnclose', throw=True,
1039 reporef().hook('pretxnclose', throw=True,
1040 txnname=desc, **tr.hookargs)
1040 txnname=desc, **tr.hookargs)
1041 def releasefn(tr, success):
1041 def releasefn(tr, success):
1042 repo = reporef()
1042 repo = reporef()
1043 if success:
1043 if success:
1044 # this should be explicitly invoked here, because
1044 # this should be explicitly invoked here, because
1045 # in-memory changes aren't written out at closing
1045 # in-memory changes aren't written out at closing
1046 # transaction, if tr.addfilegenerator (via
1046 # transaction, if tr.addfilegenerator (via
1047 # dirstate.write or so) isn't invoked while
1047 # dirstate.write or so) isn't invoked while
1048 # transaction running
1048 # transaction running
1049 repo.dirstate.write(None)
1049 repo.dirstate.write(None)
1050 else:
1050 else:
1051 # discard all changes (including ones already written
1051 # discard all changes (including ones already written
1052 # out) in this transaction
1052 # out) in this transaction
1053 repo.dirstate.restorebackup(None, prefix='journal.')
1053 repo.dirstate.restorebackup(None, prefix='journal.')
1054
1054
1055 repo.invalidate(clearfilecache=True)
1055 repo.invalidate(clearfilecache=True)
1056
1056
1057 tr = transaction.transaction(rp, self.svfs, vfsmap,
1057 tr = transaction.transaction(rp, self.svfs, vfsmap,
1058 "journal",
1058 "journal",
1059 "undo",
1059 "undo",
1060 aftertrans(renames),
1060 aftertrans(renames),
1061 self.store.createmode,
1061 self.store.createmode,
1062 validator=validate,
1062 validator=validate,
1063 releasefn=releasefn)
1063 releasefn=releasefn)
1064
1064
1065 tr.hookargs['txnid'] = txnid
1065 tr.hookargs['txnid'] = txnid
1066 # note: writing the fncache only during finalize mean that the file is
1066 # note: writing the fncache only during finalize mean that the file is
1067 # outdated when running hooks. As fncache is used for streaming clone,
1067 # outdated when running hooks. As fncache is used for streaming clone,
1068 # this is not expected to break anything that happen during the hooks.
1068 # this is not expected to break anything that happen during the hooks.
1069 tr.addfinalize('flush-fncache', self.store.write)
1069 tr.addfinalize('flush-fncache', self.store.write)
1070 def txnclosehook(tr2):
1070 def txnclosehook(tr2):
1071 """To be run if transaction is successful, will schedule a hook run
1071 """To be run if transaction is successful, will schedule a hook run
1072 """
1072 """
1073 # Don't reference tr2 in hook() so we don't hold a reference.
1073 # Don't reference tr2 in hook() so we don't hold a reference.
1074 # This reduces memory consumption when there are multiple
1074 # This reduces memory consumption when there are multiple
1075 # transactions per lock. This can likely go away if issue5045
1075 # transactions per lock. This can likely go away if issue5045
1076 # fixes the function accumulation.
1076 # fixes the function accumulation.
1077 hookargs = tr2.hookargs
1077 hookargs = tr2.hookargs
1078
1078
1079 def hook():
1079 def hook():
1080 reporef().hook('txnclose', throw=False, txnname=desc,
1080 reporef().hook('txnclose', throw=False, txnname=desc,
1081 **hookargs)
1081 **hookargs)
1082 reporef()._afterlock(hook)
1082 reporef()._afterlock(hook)
1083 tr.addfinalize('txnclose-hook', txnclosehook)
1083 tr.addfinalize('txnclose-hook', txnclosehook)
1084 def txnaborthook(tr2):
1084 def txnaborthook(tr2):
1085 """To be run if transaction is aborted
1085 """To be run if transaction is aborted
1086 """
1086 """
1087 reporef().hook('txnabort', throw=False, txnname=desc,
1087 reporef().hook('txnabort', throw=False, txnname=desc,
1088 **tr2.hookargs)
1088 **tr2.hookargs)
1089 tr.addabort('txnabort-hook', txnaborthook)
1089 tr.addabort('txnabort-hook', txnaborthook)
1090 # avoid eager cache invalidation. in-memory data should be identical
1090 # avoid eager cache invalidation. in-memory data should be identical
1091 # to stored data if transaction has no error.
1091 # to stored data if transaction has no error.
1092 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1092 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1093 self._transref = weakref.ref(tr)
1093 self._transref = weakref.ref(tr)
1094 return tr
1094 return tr
1095
1095
1096 def _journalfiles(self):
1096 def _journalfiles(self):
1097 return ((self.svfs, 'journal'),
1097 return ((self.svfs, 'journal'),
1098 (self.vfs, 'journal.dirstate'),
1098 (self.vfs, 'journal.dirstate'),
1099 (self.vfs, 'journal.branch'),
1099 (self.vfs, 'journal.branch'),
1100 (self.vfs, 'journal.desc'),
1100 (self.vfs, 'journal.desc'),
1101 (self.vfs, 'journal.bookmarks'),
1101 (self.vfs, 'journal.bookmarks'),
1102 (self.svfs, 'journal.phaseroots'))
1102 (self.svfs, 'journal.phaseroots'))
1103
1103
1104 def undofiles(self):
1104 def undofiles(self):
1105 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1105 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1106
1106
1107 def _writejournal(self, desc):
1107 def _writejournal(self, desc):
1108 self.dirstate.savebackup(None, prefix='journal.')
1108 self.dirstate.savebackup(None, prefix='journal.')
1109 self.vfs.write("journal.branch",
1109 self.vfs.write("journal.branch",
1110 encoding.fromlocal(self.dirstate.branch()))
1110 encoding.fromlocal(self.dirstate.branch()))
1111 self.vfs.write("journal.desc",
1111 self.vfs.write("journal.desc",
1112 "%d\n%s\n" % (len(self), desc))
1112 "%d\n%s\n" % (len(self), desc))
1113 self.vfs.write("journal.bookmarks",
1113 self.vfs.write("journal.bookmarks",
1114 self.vfs.tryread("bookmarks"))
1114 self.vfs.tryread("bookmarks"))
1115 self.svfs.write("journal.phaseroots",
1115 self.svfs.write("journal.phaseroots",
1116 self.svfs.tryread("phaseroots"))
1116 self.svfs.tryread("phaseroots"))
1117
1117
1118 def recover(self):
1118 def recover(self):
1119 with self.lock():
1119 with self.lock():
1120 if self.svfs.exists("journal"):
1120 if self.svfs.exists("journal"):
1121 self.ui.status(_("rolling back interrupted transaction\n"))
1121 self.ui.status(_("rolling back interrupted transaction\n"))
1122 vfsmap = {'': self.svfs,
1122 vfsmap = {'': self.svfs,
1123 'plain': self.vfs,}
1123 'plain': self.vfs,}
1124 transaction.rollback(self.svfs, vfsmap, "journal",
1124 transaction.rollback(self.svfs, vfsmap, "journal",
1125 self.ui.warn)
1125 self.ui.warn)
1126 self.invalidate()
1126 self.invalidate()
1127 return True
1127 return True
1128 else:
1128 else:
1129 self.ui.warn(_("no interrupted transaction available\n"))
1129 self.ui.warn(_("no interrupted transaction available\n"))
1130 return False
1130 return False
1131
1131
1132 def rollback(self, dryrun=False, force=False):
1132 def rollback(self, dryrun=False, force=False):
1133 wlock = lock = dsguard = None
1133 wlock = lock = dsguard = None
1134 try:
1134 try:
1135 wlock = self.wlock()
1135 wlock = self.wlock()
1136 lock = self.lock()
1136 lock = self.lock()
1137 if self.svfs.exists("undo"):
1137 if self.svfs.exists("undo"):
1138 dsguard = cmdutil.dirstateguard(self, 'rollback')
1138 dsguard = cmdutil.dirstateguard(self, 'rollback')
1139
1139
1140 return self._rollback(dryrun, force, dsguard)
1140 return self._rollback(dryrun, force, dsguard)
1141 else:
1141 else:
1142 self.ui.warn(_("no rollback information available\n"))
1142 self.ui.warn(_("no rollback information available\n"))
1143 return 1
1143 return 1
1144 finally:
1144 finally:
1145 release(dsguard, lock, wlock)
1145 release(dsguard, lock, wlock)
1146
1146
1147 @unfilteredmethod # Until we get smarter cache management
1147 @unfilteredmethod # Until we get smarter cache management
1148 def _rollback(self, dryrun, force, dsguard):
1148 def _rollback(self, dryrun, force, dsguard):
1149 ui = self.ui
1149 ui = self.ui
1150 try:
1150 try:
1151 args = self.vfs.read('undo.desc').splitlines()
1151 args = self.vfs.read('undo.desc').splitlines()
1152 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1152 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1153 if len(args) >= 3:
1153 if len(args) >= 3:
1154 detail = args[2]
1154 detail = args[2]
1155 oldtip = oldlen - 1
1155 oldtip = oldlen - 1
1156
1156
1157 if detail and ui.verbose:
1157 if detail and ui.verbose:
1158 msg = (_('repository tip rolled back to revision %s'
1158 msg = (_('repository tip rolled back to revision %s'
1159 ' (undo %s: %s)\n')
1159 ' (undo %s: %s)\n')
1160 % (oldtip, desc, detail))
1160 % (oldtip, desc, detail))
1161 else:
1161 else:
1162 msg = (_('repository tip rolled back to revision %s'
1162 msg = (_('repository tip rolled back to revision %s'
1163 ' (undo %s)\n')
1163 ' (undo %s)\n')
1164 % (oldtip, desc))
1164 % (oldtip, desc))
1165 except IOError:
1165 except IOError:
1166 msg = _('rolling back unknown transaction\n')
1166 msg = _('rolling back unknown transaction\n')
1167 desc = None
1167 desc = None
1168
1168
1169 if not force and self['.'] != self['tip'] and desc == 'commit':
1169 if not force and self['.'] != self['tip'] and desc == 'commit':
1170 raise error.Abort(
1170 raise error.Abort(
1171 _('rollback of last commit while not checked out '
1171 _('rollback of last commit while not checked out '
1172 'may lose data'), hint=_('use -f to force'))
1172 'may lose data'), hint=_('use -f to force'))
1173
1173
1174 ui.status(msg)
1174 ui.status(msg)
1175 if dryrun:
1175 if dryrun:
1176 return 0
1176 return 0
1177
1177
1178 parents = self.dirstate.parents()
1178 parents = self.dirstate.parents()
1179 self.destroying()
1179 self.destroying()
1180 vfsmap = {'plain': self.vfs, '': self.svfs}
1180 vfsmap = {'plain': self.vfs, '': self.svfs}
1181 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1181 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1182 if self.vfs.exists('undo.bookmarks'):
1182 if self.vfs.exists('undo.bookmarks'):
1183 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1183 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1184 if self.svfs.exists('undo.phaseroots'):
1184 if self.svfs.exists('undo.phaseroots'):
1185 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1185 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1186 self.invalidate()
1186 self.invalidate()
1187
1187
1188 parentgone = (parents[0] not in self.changelog.nodemap or
1188 parentgone = (parents[0] not in self.changelog.nodemap or
1189 parents[1] not in self.changelog.nodemap)
1189 parents[1] not in self.changelog.nodemap)
1190 if parentgone:
1190 if parentgone:
1191 # prevent dirstateguard from overwriting already restored one
1191 # prevent dirstateguard from overwriting already restored one
1192 dsguard.close()
1192 dsguard.close()
1193
1193
1194 self.dirstate.restorebackup(None, prefix='undo.')
1194 self.dirstate.restorebackup(None, prefix='undo.')
1195 try:
1195 try:
1196 branch = self.vfs.read('undo.branch')
1196 branch = self.vfs.read('undo.branch')
1197 self.dirstate.setbranch(encoding.tolocal(branch))
1197 self.dirstate.setbranch(encoding.tolocal(branch))
1198 except IOError:
1198 except IOError:
1199 ui.warn(_('named branch could not be reset: '
1199 ui.warn(_('named branch could not be reset: '
1200 'current branch is still \'%s\'\n')
1200 'current branch is still \'%s\'\n')
1201 % self.dirstate.branch())
1201 % self.dirstate.branch())
1202
1202
1203 parents = tuple([p.rev() for p in self[None].parents()])
1203 parents = tuple([p.rev() for p in self[None].parents()])
1204 if len(parents) > 1:
1204 if len(parents) > 1:
1205 ui.status(_('working directory now based on '
1205 ui.status(_('working directory now based on '
1206 'revisions %d and %d\n') % parents)
1206 'revisions %d and %d\n') % parents)
1207 else:
1207 else:
1208 ui.status(_('working directory now based on '
1208 ui.status(_('working directory now based on '
1209 'revision %d\n') % parents)
1209 'revision %d\n') % parents)
1210 mergemod.mergestate.clean(self, self['.'].node())
1210 mergemod.mergestate.clean(self, self['.'].node())
1211
1211
1212 # TODO: if we know which new heads may result from this rollback, pass
1212 # TODO: if we know which new heads may result from this rollback, pass
1213 # them to destroy(), which will prevent the branchhead cache from being
1213 # them to destroy(), which will prevent the branchhead cache from being
1214 # invalidated.
1214 # invalidated.
1215 self.destroyed()
1215 self.destroyed()
1216 return 0
1216 return 0
1217
1217
1218 def invalidatecaches(self):
1218 def invalidatecaches(self):
1219
1219
1220 if '_tagscache' in vars(self):
1220 if '_tagscache' in vars(self):
1221 # can't use delattr on proxy
1221 # can't use delattr on proxy
1222 del self.__dict__['_tagscache']
1222 del self.__dict__['_tagscache']
1223
1223
1224 self.unfiltered()._branchcaches.clear()
1224 self.unfiltered()._branchcaches.clear()
1225 self.invalidatevolatilesets()
1225 self.invalidatevolatilesets()
1226
1226
1227 def invalidatevolatilesets(self):
1227 def invalidatevolatilesets(self):
1228 self.filteredrevcache.clear()
1228 self.filteredrevcache.clear()
1229 obsolete.clearobscaches(self)
1229 obsolete.clearobscaches(self)
1230
1230
1231 def invalidatedirstate(self):
1231 def invalidatedirstate(self):
1232 '''Invalidates the dirstate, causing the next call to dirstate
1232 '''Invalidates the dirstate, causing the next call to dirstate
1233 to check if it was modified since the last time it was read,
1233 to check if it was modified since the last time it was read,
1234 rereading it if it has.
1234 rereading it if it has.
1235
1235
1236 This is different to dirstate.invalidate() that it doesn't always
1236 This is different to dirstate.invalidate() that it doesn't always
1237 rereads the dirstate. Use dirstate.invalidate() if you want to
1237 rereads the dirstate. Use dirstate.invalidate() if you want to
1238 explicitly read the dirstate again (i.e. restoring it to a previous
1238 explicitly read the dirstate again (i.e. restoring it to a previous
1239 known good state).'''
1239 known good state).'''
1240 if hasunfilteredcache(self, 'dirstate'):
1240 if hasunfilteredcache(self, 'dirstate'):
1241 for k in self.dirstate._filecache:
1241 for k in self.dirstate._filecache:
1242 try:
1242 try:
1243 delattr(self.dirstate, k)
1243 delattr(self.dirstate, k)
1244 except AttributeError:
1244 except AttributeError:
1245 pass
1245 pass
1246 delattr(self.unfiltered(), 'dirstate')
1246 delattr(self.unfiltered(), 'dirstate')
1247
1247
1248 def invalidate(self, clearfilecache=False):
1248 def invalidate(self, clearfilecache=False):
1249 '''Invalidates both store and non-store parts other than dirstate
1249 '''Invalidates both store and non-store parts other than dirstate
1250
1250
1251 If a transaction is running, invalidation of store is omitted,
1251 If a transaction is running, invalidation of store is omitted,
1252 because discarding in-memory changes might cause inconsistency
1252 because discarding in-memory changes might cause inconsistency
1253 (e.g. incomplete fncache causes unintentional failure, but
1253 (e.g. incomplete fncache causes unintentional failure, but
1254 redundant one doesn't).
1254 redundant one doesn't).
1255 '''
1255 '''
1256 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1256 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1257 for k in self._filecache.keys():
1257 for k in self._filecache.keys():
1258 # dirstate is invalidated separately in invalidatedirstate()
1258 # dirstate is invalidated separately in invalidatedirstate()
1259 if k == 'dirstate':
1259 if k == 'dirstate':
1260 continue
1260 continue
1261
1261
1262 if clearfilecache:
1262 if clearfilecache:
1263 del self._filecache[k]
1263 del self._filecache[k]
1264 try:
1264 try:
1265 delattr(unfiltered, k)
1265 delattr(unfiltered, k)
1266 except AttributeError:
1266 except AttributeError:
1267 pass
1267 pass
1268 self.invalidatecaches()
1268 self.invalidatecaches()
1269 if not self.currenttransaction():
1269 if not self.currenttransaction():
1270 # TODO: Changing contents of store outside transaction
1270 # TODO: Changing contents of store outside transaction
1271 # causes inconsistency. We should make in-memory store
1271 # causes inconsistency. We should make in-memory store
1272 # changes detectable, and abort if changed.
1272 # changes detectable, and abort if changed.
1273 self.store.invalidatecaches()
1273 self.store.invalidatecaches()
1274
1274
1275 def invalidateall(self):
1275 def invalidateall(self):
1276 '''Fully invalidates both store and non-store parts, causing the
1276 '''Fully invalidates both store and non-store parts, causing the
1277 subsequent operation to reread any outside changes.'''
1277 subsequent operation to reread any outside changes.'''
1278 # extension should hook this to invalidate its caches
1278 # extension should hook this to invalidate its caches
1279 self.invalidate()
1279 self.invalidate()
1280 self.invalidatedirstate()
1280 self.invalidatedirstate()
1281
1281
1282 @unfilteredmethod
1282 @unfilteredmethod
1283 def _refreshfilecachestats(self, tr):
1283 def _refreshfilecachestats(self, tr):
1284 """Reload stats of cached files so that they are flagged as valid"""
1284 """Reload stats of cached files so that they are flagged as valid"""
1285 for k, ce in self._filecache.items():
1285 for k, ce in self._filecache.items():
1286 if k == 'dirstate' or k not in self.__dict__:
1286 if k == 'dirstate' or k not in self.__dict__:
1287 continue
1287 continue
1288 ce.refresh()
1288 ce.refresh()
1289
1289
1290 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1290 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1291 inheritchecker=None, parentenvvar=None):
1291 inheritchecker=None, parentenvvar=None):
1292 parentlock = None
1292 parentlock = None
1293 # the contents of parentenvvar are used by the underlying lock to
1293 # the contents of parentenvvar are used by the underlying lock to
1294 # determine whether it can be inherited
1294 # determine whether it can be inherited
1295 if parentenvvar is not None:
1295 if parentenvvar is not None:
1296 parentlock = os.environ.get(parentenvvar)
1296 parentlock = os.environ.get(parentenvvar)
1297 try:
1297 try:
1298 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1298 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1299 acquirefn=acquirefn, desc=desc,
1299 acquirefn=acquirefn, desc=desc,
1300 inheritchecker=inheritchecker,
1300 inheritchecker=inheritchecker,
1301 parentlock=parentlock)
1301 parentlock=parentlock)
1302 except error.LockHeld as inst:
1302 except error.LockHeld as inst:
1303 if not wait:
1303 if not wait:
1304 raise
1304 raise
1305 # show more details for new-style locks
1305 # show more details for new-style locks
1306 if ':' in inst.locker:
1306 if ':' in inst.locker:
1307 host, pid = inst.locker.split(":", 1)
1307 host, pid = inst.locker.split(":", 1)
1308 self.ui.warn(
1308 self.ui.warn(
1309 _("waiting for lock on %s held by process %r "
1309 _("waiting for lock on %s held by process %r "
1310 "on host %r\n") % (desc, pid, host))
1310 "on host %r\n") % (desc, pid, host))
1311 else:
1311 else:
1312 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1312 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1313 (desc, inst.locker))
1313 (desc, inst.locker))
1314 # default to 600 seconds timeout
1314 # default to 600 seconds timeout
1315 l = lockmod.lock(vfs, lockname,
1315 l = lockmod.lock(vfs, lockname,
1316 int(self.ui.config("ui", "timeout", "600")),
1316 int(self.ui.config("ui", "timeout", "600")),
1317 releasefn=releasefn, acquirefn=acquirefn,
1317 releasefn=releasefn, acquirefn=acquirefn,
1318 desc=desc)
1318 desc=desc)
1319 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1319 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1320 return l
1320 return l
1321
1321
1322 def _afterlock(self, callback):
1322 def _afterlock(self, callback):
1323 """add a callback to be run when the repository is fully unlocked
1323 """add a callback to be run when the repository is fully unlocked
1324
1324
1325 The callback will be executed when the outermost lock is released
1325 The callback will be executed when the outermost lock is released
1326 (with wlock being higher level than 'lock')."""
1326 (with wlock being higher level than 'lock')."""
1327 for ref in (self._wlockref, self._lockref):
1327 for ref in (self._wlockref, self._lockref):
1328 l = ref and ref()
1328 l = ref and ref()
1329 if l and l.held:
1329 if l and l.held:
1330 l.postrelease.append(callback)
1330 l.postrelease.append(callback)
1331 break
1331 break
1332 else: # no lock have been found.
1332 else: # no lock have been found.
1333 callback()
1333 callback()
1334
1334
1335 def lock(self, wait=True):
1335 def lock(self, wait=True):
1336 '''Lock the repository store (.hg/store) and return a weak reference
1336 '''Lock the repository store (.hg/store) and return a weak reference
1337 to the lock. Use this before modifying the store (e.g. committing or
1337 to the lock. Use this before modifying the store (e.g. committing or
1338 stripping). If you are opening a transaction, get a lock as well.)
1338 stripping). If you are opening a transaction, get a lock as well.)
1339
1339
1340 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1340 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1341 'wlock' first to avoid a dead-lock hazard.'''
1341 'wlock' first to avoid a dead-lock hazard.'''
1342 l = self._currentlock(self._lockref)
1342 l = self._currentlock(self._lockref)
1343 if l is not None:
1343 if l is not None:
1344 l.lock()
1344 l.lock()
1345 return l
1345 return l
1346
1346
1347 l = self._lock(self.svfs, "lock", wait, None,
1347 l = self._lock(self.svfs, "lock", wait, None,
1348 self.invalidate, _('repository %s') % self.origroot)
1348 self.invalidate, _('repository %s') % self.origroot)
1349 self._lockref = weakref.ref(l)
1349 self._lockref = weakref.ref(l)
1350 return l
1350 return l
1351
1351
1352 def _wlockchecktransaction(self):
1352 def _wlockchecktransaction(self):
1353 if self.currenttransaction() is not None:
1353 if self.currenttransaction() is not None:
1354 raise error.LockInheritanceContractViolation(
1354 raise error.LockInheritanceContractViolation(
1355 'wlock cannot be inherited in the middle of a transaction')
1355 'wlock cannot be inherited in the middle of a transaction')
1356
1356
1357 def wlock(self, wait=True):
1357 def wlock(self, wait=True):
1358 '''Lock the non-store parts of the repository (everything under
1358 '''Lock the non-store parts of the repository (everything under
1359 .hg except .hg/store) and return a weak reference to the lock.
1359 .hg except .hg/store) and return a weak reference to the lock.
1360
1360
1361 Use this before modifying files in .hg.
1361 Use this before modifying files in .hg.
1362
1362
1363 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1363 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1364 'wlock' first to avoid a dead-lock hazard.'''
1364 'wlock' first to avoid a dead-lock hazard.'''
1365 l = self._wlockref and self._wlockref()
1365 l = self._wlockref and self._wlockref()
1366 if l is not None and l.held:
1366 if l is not None and l.held:
1367 l.lock()
1367 l.lock()
1368 return l
1368 return l
1369
1369
1370 # We do not need to check for non-waiting lock acquisition. Such
1370 # We do not need to check for non-waiting lock acquisition. Such
1371 # acquisition would not cause dead-lock as they would just fail.
1371 # acquisition would not cause dead-lock as they would just fail.
1372 if wait and (self.ui.configbool('devel', 'all-warnings')
1372 if wait and (self.ui.configbool('devel', 'all-warnings')
1373 or self.ui.configbool('devel', 'check-locks')):
1373 or self.ui.configbool('devel', 'check-locks')):
1374 if self._currentlock(self._lockref) is not None:
1374 if self._currentlock(self._lockref) is not None:
1375 self.ui.develwarn('"wlock" acquired after "lock"')
1375 self.ui.develwarn('"wlock" acquired after "lock"')
1376
1376
1377 def unlock():
1377 def unlock():
1378 if self.dirstate.pendingparentchange():
1378 if self.dirstate.pendingparentchange():
1379 self.dirstate.invalidate()
1379 self.dirstate.invalidate()
1380 else:
1380 else:
1381 self.dirstate.write(None)
1381 self.dirstate.write(None)
1382
1382
1383 self._filecache['dirstate'].refresh()
1383 self._filecache['dirstate'].refresh()
1384
1384
1385 l = self._lock(self.vfs, "wlock", wait, unlock,
1385 l = self._lock(self.vfs, "wlock", wait, unlock,
1386 self.invalidatedirstate, _('working directory of %s') %
1386 self.invalidatedirstate, _('working directory of %s') %
1387 self.origroot,
1387 self.origroot,
1388 inheritchecker=self._wlockchecktransaction,
1388 inheritchecker=self._wlockchecktransaction,
1389 parentenvvar='HG_WLOCK_LOCKER')
1389 parentenvvar='HG_WLOCK_LOCKER')
1390 self._wlockref = weakref.ref(l)
1390 self._wlockref = weakref.ref(l)
1391 return l
1391 return l
1392
1392
1393 def _currentlock(self, lockref):
1393 def _currentlock(self, lockref):
1394 """Returns the lock if it's held, or None if it's not."""
1394 """Returns the lock if it's held, or None if it's not."""
1395 if lockref is None:
1395 if lockref is None:
1396 return None
1396 return None
1397 l = lockref()
1397 l = lockref()
1398 if l is None or not l.held:
1398 if l is None or not l.held:
1399 return None
1399 return None
1400 return l
1400 return l
1401
1401
1402 def currentwlock(self):
1402 def currentwlock(self):
1403 """Returns the wlock if it's held, or None if it's not."""
1403 """Returns the wlock if it's held, or None if it's not."""
1404 return self._currentlock(self._wlockref)
1404 return self._currentlock(self._wlockref)
1405
1405
1406 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1406 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1407 """
1407 """
1408 commit an individual file as part of a larger transaction
1408 commit an individual file as part of a larger transaction
1409 """
1409 """
1410
1410
1411 fname = fctx.path()
1411 fname = fctx.path()
1412 fparent1 = manifest1.get(fname, nullid)
1412 fparent1 = manifest1.get(fname, nullid)
1413 fparent2 = manifest2.get(fname, nullid)
1413 fparent2 = manifest2.get(fname, nullid)
1414 if isinstance(fctx, context.filectx):
1414 if isinstance(fctx, context.filectx):
1415 node = fctx.filenode()
1415 node = fctx.filenode()
1416 if node in [fparent1, fparent2]:
1416 if node in [fparent1, fparent2]:
1417 self.ui.debug('reusing %s filelog entry\n' % fname)
1417 self.ui.debug('reusing %s filelog entry\n' % fname)
1418 if manifest1.flags(fname) != fctx.flags():
1418 if manifest1.flags(fname) != fctx.flags():
1419 changelist.append(fname)
1419 changelist.append(fname)
1420 return node
1420 return node
1421
1421
1422 flog = self.file(fname)
1422 flog = self.file(fname)
1423 meta = {}
1423 meta = {}
1424 copy = fctx.renamed()
1424 copy = fctx.renamed()
1425 if copy and copy[0] != fname:
1425 if copy and copy[0] != fname:
1426 # Mark the new revision of this file as a copy of another
1426 # Mark the new revision of this file as a copy of another
1427 # file. This copy data will effectively act as a parent
1427 # file. This copy data will effectively act as a parent
1428 # of this new revision. If this is a merge, the first
1428 # of this new revision. If this is a merge, the first
1429 # parent will be the nullid (meaning "look up the copy data")
1429 # parent will be the nullid (meaning "look up the copy data")
1430 # and the second one will be the other parent. For example:
1430 # and the second one will be the other parent. For example:
1431 #
1431 #
1432 # 0 --- 1 --- 3 rev1 changes file foo
1432 # 0 --- 1 --- 3 rev1 changes file foo
1433 # \ / rev2 renames foo to bar and changes it
1433 # \ / rev2 renames foo to bar and changes it
1434 # \- 2 -/ rev3 should have bar with all changes and
1434 # \- 2 -/ rev3 should have bar with all changes and
1435 # should record that bar descends from
1435 # should record that bar descends from
1436 # bar in rev2 and foo in rev1
1436 # bar in rev2 and foo in rev1
1437 #
1437 #
1438 # this allows this merge to succeed:
1438 # this allows this merge to succeed:
1439 #
1439 #
1440 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1440 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1441 # \ / merging rev3 and rev4 should use bar@rev2
1441 # \ / merging rev3 and rev4 should use bar@rev2
1442 # \- 2 --- 4 as the merge base
1442 # \- 2 --- 4 as the merge base
1443 #
1443 #
1444
1444
1445 cfname = copy[0]
1445 cfname = copy[0]
1446 crev = manifest1.get(cfname)
1446 crev = manifest1.get(cfname)
1447 newfparent = fparent2
1447 newfparent = fparent2
1448
1448
1449 if manifest2: # branch merge
1449 if manifest2: # branch merge
1450 if fparent2 == nullid or crev is None: # copied on remote side
1450 if fparent2 == nullid or crev is None: # copied on remote side
1451 if cfname in manifest2:
1451 if cfname in manifest2:
1452 crev = manifest2[cfname]
1452 crev = manifest2[cfname]
1453 newfparent = fparent1
1453 newfparent = fparent1
1454
1454
1455 # Here, we used to search backwards through history to try to find
1455 # Here, we used to search backwards through history to try to find
1456 # where the file copy came from if the source of a copy was not in
1456 # where the file copy came from if the source of a copy was not in
1457 # the parent directory. However, this doesn't actually make sense to
1457 # the parent directory. However, this doesn't actually make sense to
1458 # do (what does a copy from something not in your working copy even
1458 # do (what does a copy from something not in your working copy even
1459 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1459 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1460 # the user that copy information was dropped, so if they didn't
1460 # the user that copy information was dropped, so if they didn't
1461 # expect this outcome it can be fixed, but this is the correct
1461 # expect this outcome it can be fixed, but this is the correct
1462 # behavior in this circumstance.
1462 # behavior in this circumstance.
1463
1463
1464 if crev:
1464 if crev:
1465 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1465 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1466 meta["copy"] = cfname
1466 meta["copy"] = cfname
1467 meta["copyrev"] = hex(crev)
1467 meta["copyrev"] = hex(crev)
1468 fparent1, fparent2 = nullid, newfparent
1468 fparent1, fparent2 = nullid, newfparent
1469 else:
1469 else:
1470 self.ui.warn(_("warning: can't find ancestor for '%s' "
1470 self.ui.warn(_("warning: can't find ancestor for '%s' "
1471 "copied from '%s'!\n") % (fname, cfname))
1471 "copied from '%s'!\n") % (fname, cfname))
1472
1472
1473 elif fparent1 == nullid:
1473 elif fparent1 == nullid:
1474 fparent1, fparent2 = fparent2, nullid
1474 fparent1, fparent2 = fparent2, nullid
1475 elif fparent2 != nullid:
1475 elif fparent2 != nullid:
1476 # is one parent an ancestor of the other?
1476 # is one parent an ancestor of the other?
1477 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1477 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1478 if fparent1 in fparentancestors:
1478 if fparent1 in fparentancestors:
1479 fparent1, fparent2 = fparent2, nullid
1479 fparent1, fparent2 = fparent2, nullid
1480 elif fparent2 in fparentancestors:
1480 elif fparent2 in fparentancestors:
1481 fparent2 = nullid
1481 fparent2 = nullid
1482
1482
1483 # is the file changed?
1483 # is the file changed?
1484 text = fctx.data()
1484 text = fctx.data()
1485 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1485 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1486 changelist.append(fname)
1486 changelist.append(fname)
1487 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1487 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1488 # are just the flags changed during merge?
1488 # are just the flags changed during merge?
1489 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1489 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1490 changelist.append(fname)
1490 changelist.append(fname)
1491
1491
1492 return fparent1
1492 return fparent1
1493
1493
1494 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1494 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1495 """check for commit arguments that aren't commitable"""
1495 """check for commit arguments that aren't commitable"""
1496 if match.isexact() or match.prefix():
1496 if match.isexact() or match.prefix():
1497 matched = set(status.modified + status.added + status.removed)
1497 matched = set(status.modified + status.added + status.removed)
1498
1498
1499 for f in match.files():
1499 for f in match.files():
1500 f = self.dirstate.normalize(f)
1500 f = self.dirstate.normalize(f)
1501 if f == '.' or f in matched or f in wctx.substate:
1501 if f == '.' or f in matched or f in wctx.substate:
1502 continue
1502 continue
1503 if f in status.deleted:
1503 if f in status.deleted:
1504 fail(f, _('file not found!'))
1504 fail(f, _('file not found!'))
1505 if f in vdirs: # visited directory
1505 if f in vdirs: # visited directory
1506 d = f + '/'
1506 d = f + '/'
1507 for mf in matched:
1507 for mf in matched:
1508 if mf.startswith(d):
1508 if mf.startswith(d):
1509 break
1509 break
1510 else:
1510 else:
1511 fail(f, _("no match under directory!"))
1511 fail(f, _("no match under directory!"))
1512 elif f not in self.dirstate:
1512 elif f not in self.dirstate:
1513 fail(f, _("file not tracked!"))
1513 fail(f, _("file not tracked!"))
1514
1514
1515 @unfilteredmethod
1515 @unfilteredmethod
1516 def commit(self, text="", user=None, date=None, match=None, force=False,
1516 def commit(self, text="", user=None, date=None, match=None, force=False,
1517 editor=False, extra=None):
1517 editor=False, extra=None):
1518 """Add a new revision to current repository.
1518 """Add a new revision to current repository.
1519
1519
1520 Revision information is gathered from the working directory,
1520 Revision information is gathered from the working directory,
1521 match can be used to filter the committed files. If editor is
1521 match can be used to filter the committed files. If editor is
1522 supplied, it is called to get a commit message.
1522 supplied, it is called to get a commit message.
1523 """
1523 """
1524 if extra is None:
1524 if extra is None:
1525 extra = {}
1525 extra = {}
1526
1526
1527 def fail(f, msg):
1527 def fail(f, msg):
1528 raise error.Abort('%s: %s' % (f, msg))
1528 raise error.Abort('%s: %s' % (f, msg))
1529
1529
1530 if not match:
1530 if not match:
1531 match = matchmod.always(self.root, '')
1531 match = matchmod.always(self.root, '')
1532
1532
1533 if not force:
1533 if not force:
1534 vdirs = []
1534 vdirs = []
1535 match.explicitdir = vdirs.append
1535 match.explicitdir = vdirs.append
1536 match.bad = fail
1536 match.bad = fail
1537
1537
1538 wlock = lock = tr = None
1538 wlock = lock = tr = None
1539 try:
1539 try:
1540 wlock = self.wlock()
1540 wlock = self.wlock()
1541 lock = self.lock() # for recent changelog (see issue4368)
1541 lock = self.lock() # for recent changelog (see issue4368)
1542
1542
1543 wctx = self[None]
1543 wctx = self[None]
1544 merge = len(wctx.parents()) > 1
1544 merge = len(wctx.parents()) > 1
1545
1545
1546 if not force and merge and match.ispartial():
1546 if not force and merge and match.ispartial():
1547 raise error.Abort(_('cannot partially commit a merge '
1547 raise error.Abort(_('cannot partially commit a merge '
1548 '(do not specify files or patterns)'))
1548 '(do not specify files or patterns)'))
1549
1549
1550 status = self.status(match=match, clean=force)
1550 status = self.status(match=match, clean=force)
1551 if force:
1551 if force:
1552 status.modified.extend(status.clean) # mq may commit clean files
1552 status.modified.extend(status.clean) # mq may commit clean files
1553
1553
1554 # check subrepos
1554 # check subrepos
1555 subs = []
1555 subs = []
1556 commitsubs = set()
1556 commitsubs = set()
1557 newstate = wctx.substate.copy()
1557 newstate = wctx.substate.copy()
1558 # only manage subrepos and .hgsubstate if .hgsub is present
1558 # only manage subrepos and .hgsubstate if .hgsub is present
1559 if '.hgsub' in wctx:
1559 if '.hgsub' in wctx:
1560 # we'll decide whether to track this ourselves, thanks
1560 # we'll decide whether to track this ourselves, thanks
1561 for c in status.modified, status.added, status.removed:
1561 for c in status.modified, status.added, status.removed:
1562 if '.hgsubstate' in c:
1562 if '.hgsubstate' in c:
1563 c.remove('.hgsubstate')
1563 c.remove('.hgsubstate')
1564
1564
1565 # compare current state to last committed state
1565 # compare current state to last committed state
1566 # build new substate based on last committed state
1566 # build new substate based on last committed state
1567 oldstate = wctx.p1().substate
1567 oldstate = wctx.p1().substate
1568 for s in sorted(newstate.keys()):
1568 for s in sorted(newstate.keys()):
1569 if not match(s):
1569 if not match(s):
1570 # ignore working copy, use old state if present
1570 # ignore working copy, use old state if present
1571 if s in oldstate:
1571 if s in oldstate:
1572 newstate[s] = oldstate[s]
1572 newstate[s] = oldstate[s]
1573 continue
1573 continue
1574 if not force:
1574 if not force:
1575 raise error.Abort(
1575 raise error.Abort(
1576 _("commit with new subrepo %s excluded") % s)
1576 _("commit with new subrepo %s excluded") % s)
1577 dirtyreason = wctx.sub(s).dirtyreason(True)
1577 dirtyreason = wctx.sub(s).dirtyreason(True)
1578 if dirtyreason:
1578 if dirtyreason:
1579 if not self.ui.configbool('ui', 'commitsubrepos'):
1579 if not self.ui.configbool('ui', 'commitsubrepos'):
1580 raise error.Abort(dirtyreason,
1580 raise error.Abort(dirtyreason,
1581 hint=_("use --subrepos for recursive commit"))
1581 hint=_("use --subrepos for recursive commit"))
1582 subs.append(s)
1582 subs.append(s)
1583 commitsubs.add(s)
1583 commitsubs.add(s)
1584 else:
1584 else:
1585 bs = wctx.sub(s).basestate()
1585 bs = wctx.sub(s).basestate()
1586 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1586 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1587 if oldstate.get(s, (None, None, None))[1] != bs:
1587 if oldstate.get(s, (None, None, None))[1] != bs:
1588 subs.append(s)
1588 subs.append(s)
1589
1589
1590 # check for removed subrepos
1590 # check for removed subrepos
1591 for p in wctx.parents():
1591 for p in wctx.parents():
1592 r = [s for s in p.substate if s not in newstate]
1592 r = [s for s in p.substate if s not in newstate]
1593 subs += [s for s in r if match(s)]
1593 subs += [s for s in r if match(s)]
1594 if subs:
1594 if subs:
1595 if (not match('.hgsub') and
1595 if (not match('.hgsub') and
1596 '.hgsub' in (wctx.modified() + wctx.added())):
1596 '.hgsub' in (wctx.modified() + wctx.added())):
1597 raise error.Abort(
1597 raise error.Abort(
1598 _("can't commit subrepos without .hgsub"))
1598 _("can't commit subrepos without .hgsub"))
1599 status.modified.insert(0, '.hgsubstate')
1599 status.modified.insert(0, '.hgsubstate')
1600
1600
1601 elif '.hgsub' in status.removed:
1601 elif '.hgsub' in status.removed:
1602 # clean up .hgsubstate when .hgsub is removed
1602 # clean up .hgsubstate when .hgsub is removed
1603 if ('.hgsubstate' in wctx and
1603 if ('.hgsubstate' in wctx and
1604 '.hgsubstate' not in (status.modified + status.added +
1604 '.hgsubstate' not in (status.modified + status.added +
1605 status.removed)):
1605 status.removed)):
1606 status.removed.insert(0, '.hgsubstate')
1606 status.removed.insert(0, '.hgsubstate')
1607
1607
1608 # make sure all explicit patterns are matched
1608 # make sure all explicit patterns are matched
1609 if not force:
1609 if not force:
1610 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1610 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1611
1611
1612 cctx = context.workingcommitctx(self, status,
1612 cctx = context.workingcommitctx(self, status,
1613 text, user, date, extra)
1613 text, user, date, extra)
1614
1614
1615 # internal config: ui.allowemptycommit
1615 # internal config: ui.allowemptycommit
1616 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1616 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1617 or extra.get('close') or merge or cctx.files()
1617 or extra.get('close') or merge or cctx.files()
1618 or self.ui.configbool('ui', 'allowemptycommit'))
1618 or self.ui.configbool('ui', 'allowemptycommit'))
1619 if not allowemptycommit:
1619 if not allowemptycommit:
1620 return None
1620 return None
1621
1621
1622 if merge and cctx.deleted():
1622 if merge and cctx.deleted():
1623 raise error.Abort(_("cannot commit merge with missing files"))
1623 raise error.Abort(_("cannot commit merge with missing files"))
1624
1624
1625 ms = mergemod.mergestate.read(self)
1625 ms = mergemod.mergestate.read(self)
1626
1626
1627 if list(ms.unresolved()):
1627 if list(ms.unresolved()):
1628 raise error.Abort(_('unresolved merge conflicts '
1628 raise error.Abort(_('unresolved merge conflicts '
1629 '(see "hg help resolve")'))
1629 '(see "hg help resolve")'))
1630 if ms.mdstate() != 's' or list(ms.driverresolved()):
1630 if ms.mdstate() != 's' or list(ms.driverresolved()):
1631 raise error.Abort(_('driver-resolved merge conflicts'),
1631 raise error.Abort(_('driver-resolved merge conflicts'),
1632 hint=_('run "hg resolve --all" to resolve'))
1632 hint=_('run "hg resolve --all" to resolve'))
1633
1633
1634 if editor:
1634 if editor:
1635 cctx._text = editor(self, cctx, subs)
1635 cctx._text = editor(self, cctx, subs)
1636 edited = (text != cctx._text)
1636 edited = (text != cctx._text)
1637
1637
1638 # Save commit message in case this transaction gets rolled back
1638 # Save commit message in case this transaction gets rolled back
1639 # (e.g. by a pretxncommit hook). Leave the content alone on
1639 # (e.g. by a pretxncommit hook). Leave the content alone on
1640 # the assumption that the user will use the same editor again.
1640 # the assumption that the user will use the same editor again.
1641 msgfn = self.savecommitmessage(cctx._text)
1641 msgfn = self.savecommitmessage(cctx._text)
1642
1642
1643 # commit subs and write new state
1643 # commit subs and write new state
1644 if subs:
1644 if subs:
1645 for s in sorted(commitsubs):
1645 for s in sorted(commitsubs):
1646 sub = wctx.sub(s)
1646 sub = wctx.sub(s)
1647 self.ui.status(_('committing subrepository %s\n') %
1647 self.ui.status(_('committing subrepository %s\n') %
1648 subrepo.subrelpath(sub))
1648 subrepo.subrelpath(sub))
1649 sr = sub.commit(cctx._text, user, date)
1649 sr = sub.commit(cctx._text, user, date)
1650 newstate[s] = (newstate[s][0], sr)
1650 newstate[s] = (newstate[s][0], sr)
1651 subrepo.writestate(self, newstate)
1651 subrepo.writestate(self, newstate)
1652
1652
1653 p1, p2 = self.dirstate.parents()
1653 p1, p2 = self.dirstate.parents()
1654 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1654 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1655 try:
1655 try:
1656 self.hook("precommit", throw=True, parent1=hookp1,
1656 self.hook("precommit", throw=True, parent1=hookp1,
1657 parent2=hookp2)
1657 parent2=hookp2)
1658 tr = self.transaction('commit')
1658 tr = self.transaction('commit')
1659 ret = self.commitctx(cctx, True)
1659 ret = self.commitctx(cctx, True)
1660 except: # re-raises
1660 except: # re-raises
1661 if edited:
1661 if edited:
1662 self.ui.write(
1662 self.ui.write(
1663 _('note: commit message saved in %s\n') % msgfn)
1663 _('note: commit message saved in %s\n') % msgfn)
1664 raise
1664 raise
1665 # update bookmarks, dirstate and mergestate
1665 # update bookmarks, dirstate and mergestate
1666 bookmarks.update(self, [p1, p2], ret)
1666 bookmarks.update(self, [p1, p2], ret)
1667 cctx.markcommitted(ret)
1667 cctx.markcommitted(ret)
1668 ms.reset()
1668 ms.reset()
1669 tr.close()
1669 tr.close()
1670
1670
1671 finally:
1671 finally:
1672 lockmod.release(tr, lock, wlock)
1672 lockmod.release(tr, lock, wlock)
1673
1673
1674 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1674 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1675 # hack for command that use a temporary commit (eg: histedit)
1675 # hack for command that use a temporary commit (eg: histedit)
1676 # temporary commit got stripped before hook release
1676 # temporary commit got stripped before hook release
1677 if self.changelog.hasnode(ret):
1677 if self.changelog.hasnode(ret):
1678 self.hook("commit", node=node, parent1=parent1,
1678 self.hook("commit", node=node, parent1=parent1,
1679 parent2=parent2)
1679 parent2=parent2)
1680 self._afterlock(commithook)
1680 self._afterlock(commithook)
1681 return ret
1681 return ret
1682
1682
1683 @unfilteredmethod
1683 @unfilteredmethod
1684 def commitctx(self, ctx, error=False):
1684 def commitctx(self, ctx, error=False):
1685 """Add a new revision to current repository.
1685 """Add a new revision to current repository.
1686 Revision information is passed via the context argument.
1686 Revision information is passed via the context argument.
1687 """
1687 """
1688
1688
1689 tr = None
1689 tr = None
1690 p1, p2 = ctx.p1(), ctx.p2()
1690 p1, p2 = ctx.p1(), ctx.p2()
1691 user = ctx.user()
1691 user = ctx.user()
1692
1692
1693 lock = self.lock()
1693 lock = self.lock()
1694 try:
1694 try:
1695 tr = self.transaction("commit")
1695 tr = self.transaction("commit")
1696 trp = weakref.proxy(tr)
1696 trp = weakref.proxy(tr)
1697
1697
1698 if ctx.files():
1698 if ctx.files():
1699 m1 = p1.manifest()
1699 m1 = p1.manifest()
1700 m2 = p2.manifest()
1700 m2 = p2.manifest()
1701 m = m1.copy()
1701 m = m1.copy()
1702
1702
1703 # check in files
1703 # check in files
1704 added = []
1704 added = []
1705 changed = []
1705 changed = []
1706 removed = list(ctx.removed())
1706 removed = list(ctx.removed())
1707 linkrev = len(self)
1707 linkrev = len(self)
1708 self.ui.note(_("committing files:\n"))
1708 self.ui.note(_("committing files:\n"))
1709 for f in sorted(ctx.modified() + ctx.added()):
1709 for f in sorted(ctx.modified() + ctx.added()):
1710 self.ui.note(f + "\n")
1710 self.ui.note(f + "\n")
1711 try:
1711 try:
1712 fctx = ctx[f]
1712 fctx = ctx[f]
1713 if fctx is None:
1713 if fctx is None:
1714 removed.append(f)
1714 removed.append(f)
1715 else:
1715 else:
1716 added.append(f)
1716 added.append(f)
1717 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1717 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1718 trp, changed)
1718 trp, changed)
1719 m.setflag(f, fctx.flags())
1719 m.setflag(f, fctx.flags())
1720 except OSError as inst:
1720 except OSError as inst:
1721 self.ui.warn(_("trouble committing %s!\n") % f)
1721 self.ui.warn(_("trouble committing %s!\n") % f)
1722 raise
1722 raise
1723 except IOError as inst:
1723 except IOError as inst:
1724 errcode = getattr(inst, 'errno', errno.ENOENT)
1724 errcode = getattr(inst, 'errno', errno.ENOENT)
1725 if error or errcode and errcode != errno.ENOENT:
1725 if error or errcode and errcode != errno.ENOENT:
1726 self.ui.warn(_("trouble committing %s!\n") % f)
1726 self.ui.warn(_("trouble committing %s!\n") % f)
1727 raise
1727 raise
1728
1728
1729 # update manifest
1729 # update manifest
1730 self.ui.note(_("committing manifest\n"))
1730 self.ui.note(_("committing manifest\n"))
1731 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1731 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1732 drop = [f for f in removed if f in m]
1732 drop = [f for f in removed if f in m]
1733 for f in drop:
1733 for f in drop:
1734 del m[f]
1734 del m[f]
1735 mn = self.manifest.add(m, trp, linkrev,
1735 mn = self.manifestlog.add(m, trp, linkrev,
1736 p1.manifestnode(), p2.manifestnode(),
1736 p1.manifestnode(), p2.manifestnode(),
1737 added, drop)
1737 added, drop)
1738 files = changed + removed
1738 files = changed + removed
1739 else:
1739 else:
1740 mn = p1.manifestnode()
1740 mn = p1.manifestnode()
1741 files = []
1741 files = []
1742
1742
1743 # update changelog
1743 # update changelog
1744 self.ui.note(_("committing changelog\n"))
1744 self.ui.note(_("committing changelog\n"))
1745 self.changelog.delayupdate(tr)
1745 self.changelog.delayupdate(tr)
1746 n = self.changelog.add(mn, files, ctx.description(),
1746 n = self.changelog.add(mn, files, ctx.description(),
1747 trp, p1.node(), p2.node(),
1747 trp, p1.node(), p2.node(),
1748 user, ctx.date(), ctx.extra().copy())
1748 user, ctx.date(), ctx.extra().copy())
1749 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1749 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1750 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1750 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1751 parent2=xp2)
1751 parent2=xp2)
1752 # set the new commit is proper phase
1752 # set the new commit is proper phase
1753 targetphase = subrepo.newcommitphase(self.ui, ctx)
1753 targetphase = subrepo.newcommitphase(self.ui, ctx)
1754 if targetphase:
1754 if targetphase:
1755 # retract boundary do not alter parent changeset.
1755 # retract boundary do not alter parent changeset.
1756 # if a parent have higher the resulting phase will
1756 # if a parent have higher the resulting phase will
1757 # be compliant anyway
1757 # be compliant anyway
1758 #
1758 #
1759 # if minimal phase was 0 we don't need to retract anything
1759 # if minimal phase was 0 we don't need to retract anything
1760 phases.retractboundary(self, tr, targetphase, [n])
1760 phases.retractboundary(self, tr, targetphase, [n])
1761 tr.close()
1761 tr.close()
1762 branchmap.updatecache(self.filtered('served'))
1762 branchmap.updatecache(self.filtered('served'))
1763 return n
1763 return n
1764 finally:
1764 finally:
1765 if tr:
1765 if tr:
1766 tr.release()
1766 tr.release()
1767 lock.release()
1767 lock.release()
1768
1768
1769 @unfilteredmethod
1769 @unfilteredmethod
1770 def destroying(self):
1770 def destroying(self):
1771 '''Inform the repository that nodes are about to be destroyed.
1771 '''Inform the repository that nodes are about to be destroyed.
1772 Intended for use by strip and rollback, so there's a common
1772 Intended for use by strip and rollback, so there's a common
1773 place for anything that has to be done before destroying history.
1773 place for anything that has to be done before destroying history.
1774
1774
1775 This is mostly useful for saving state that is in memory and waiting
1775 This is mostly useful for saving state that is in memory and waiting
1776 to be flushed when the current lock is released. Because a call to
1776 to be flushed when the current lock is released. Because a call to
1777 destroyed is imminent, the repo will be invalidated causing those
1777 destroyed is imminent, the repo will be invalidated causing those
1778 changes to stay in memory (waiting for the next unlock), or vanish
1778 changes to stay in memory (waiting for the next unlock), or vanish
1779 completely.
1779 completely.
1780 '''
1780 '''
1781 # When using the same lock to commit and strip, the phasecache is left
1781 # When using the same lock to commit and strip, the phasecache is left
1782 # dirty after committing. Then when we strip, the repo is invalidated,
1782 # dirty after committing. Then when we strip, the repo is invalidated,
1783 # causing those changes to disappear.
1783 # causing those changes to disappear.
1784 if '_phasecache' in vars(self):
1784 if '_phasecache' in vars(self):
1785 self._phasecache.write()
1785 self._phasecache.write()
1786
1786
1787 @unfilteredmethod
1787 @unfilteredmethod
1788 def destroyed(self):
1788 def destroyed(self):
1789 '''Inform the repository that nodes have been destroyed.
1789 '''Inform the repository that nodes have been destroyed.
1790 Intended for use by strip and rollback, so there's a common
1790 Intended for use by strip and rollback, so there's a common
1791 place for anything that has to be done after destroying history.
1791 place for anything that has to be done after destroying history.
1792 '''
1792 '''
1793 # When one tries to:
1793 # When one tries to:
1794 # 1) destroy nodes thus calling this method (e.g. strip)
1794 # 1) destroy nodes thus calling this method (e.g. strip)
1795 # 2) use phasecache somewhere (e.g. commit)
1795 # 2) use phasecache somewhere (e.g. commit)
1796 #
1796 #
1797 # then 2) will fail because the phasecache contains nodes that were
1797 # then 2) will fail because the phasecache contains nodes that were
1798 # removed. We can either remove phasecache from the filecache,
1798 # removed. We can either remove phasecache from the filecache,
1799 # causing it to reload next time it is accessed, or simply filter
1799 # causing it to reload next time it is accessed, or simply filter
1800 # the removed nodes now and write the updated cache.
1800 # the removed nodes now and write the updated cache.
1801 self._phasecache.filterunknown(self)
1801 self._phasecache.filterunknown(self)
1802 self._phasecache.write()
1802 self._phasecache.write()
1803
1803
1804 # update the 'served' branch cache to help read only server process
1804 # update the 'served' branch cache to help read only server process
1805 # Thanks to branchcache collaboration this is done from the nearest
1805 # Thanks to branchcache collaboration this is done from the nearest
1806 # filtered subset and it is expected to be fast.
1806 # filtered subset and it is expected to be fast.
1807 branchmap.updatecache(self.filtered('served'))
1807 branchmap.updatecache(self.filtered('served'))
1808
1808
1809 # Ensure the persistent tag cache is updated. Doing it now
1809 # Ensure the persistent tag cache is updated. Doing it now
1810 # means that the tag cache only has to worry about destroyed
1810 # means that the tag cache only has to worry about destroyed
1811 # heads immediately after a strip/rollback. That in turn
1811 # heads immediately after a strip/rollback. That in turn
1812 # guarantees that "cachetip == currenttip" (comparing both rev
1812 # guarantees that "cachetip == currenttip" (comparing both rev
1813 # and node) always means no nodes have been added or destroyed.
1813 # and node) always means no nodes have been added or destroyed.
1814
1814
1815 # XXX this is suboptimal when qrefresh'ing: we strip the current
1815 # XXX this is suboptimal when qrefresh'ing: we strip the current
1816 # head, refresh the tag cache, then immediately add a new head.
1816 # head, refresh the tag cache, then immediately add a new head.
1817 # But I think doing it this way is necessary for the "instant
1817 # But I think doing it this way is necessary for the "instant
1818 # tag cache retrieval" case to work.
1818 # tag cache retrieval" case to work.
1819 self.invalidate()
1819 self.invalidate()
1820
1820
1821 def walk(self, match, node=None):
1821 def walk(self, match, node=None):
1822 '''
1822 '''
1823 walk recursively through the directory tree or a given
1823 walk recursively through the directory tree or a given
1824 changeset, finding all files matched by the match
1824 changeset, finding all files matched by the match
1825 function
1825 function
1826 '''
1826 '''
1827 return self[node].walk(match)
1827 return self[node].walk(match)
1828
1828
1829 def status(self, node1='.', node2=None, match=None,
1829 def status(self, node1='.', node2=None, match=None,
1830 ignored=False, clean=False, unknown=False,
1830 ignored=False, clean=False, unknown=False,
1831 listsubrepos=False):
1831 listsubrepos=False):
1832 '''a convenience method that calls node1.status(node2)'''
1832 '''a convenience method that calls node1.status(node2)'''
1833 return self[node1].status(node2, match, ignored, clean, unknown,
1833 return self[node1].status(node2, match, ignored, clean, unknown,
1834 listsubrepos)
1834 listsubrepos)
1835
1835
1836 def heads(self, start=None):
1836 def heads(self, start=None):
1837 heads = self.changelog.heads(start)
1837 heads = self.changelog.heads(start)
1838 # sort the output in rev descending order
1838 # sort the output in rev descending order
1839 return sorted(heads, key=self.changelog.rev, reverse=True)
1839 return sorted(heads, key=self.changelog.rev, reverse=True)
1840
1840
1841 def branchheads(self, branch=None, start=None, closed=False):
1841 def branchheads(self, branch=None, start=None, closed=False):
1842 '''return a (possibly filtered) list of heads for the given branch
1842 '''return a (possibly filtered) list of heads for the given branch
1843
1843
1844 Heads are returned in topological order, from newest to oldest.
1844 Heads are returned in topological order, from newest to oldest.
1845 If branch is None, use the dirstate branch.
1845 If branch is None, use the dirstate branch.
1846 If start is not None, return only heads reachable from start.
1846 If start is not None, return only heads reachable from start.
1847 If closed is True, return heads that are marked as closed as well.
1847 If closed is True, return heads that are marked as closed as well.
1848 '''
1848 '''
1849 if branch is None:
1849 if branch is None:
1850 branch = self[None].branch()
1850 branch = self[None].branch()
1851 branches = self.branchmap()
1851 branches = self.branchmap()
1852 if branch not in branches:
1852 if branch not in branches:
1853 return []
1853 return []
1854 # the cache returns heads ordered lowest to highest
1854 # the cache returns heads ordered lowest to highest
1855 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1855 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1856 if start is not None:
1856 if start is not None:
1857 # filter out the heads that cannot be reached from startrev
1857 # filter out the heads that cannot be reached from startrev
1858 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1858 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1859 bheads = [h for h in bheads if h in fbheads]
1859 bheads = [h for h in bheads if h in fbheads]
1860 return bheads
1860 return bheads
1861
1861
1862 def branches(self, nodes):
1862 def branches(self, nodes):
1863 if not nodes:
1863 if not nodes:
1864 nodes = [self.changelog.tip()]
1864 nodes = [self.changelog.tip()]
1865 b = []
1865 b = []
1866 for n in nodes:
1866 for n in nodes:
1867 t = n
1867 t = n
1868 while True:
1868 while True:
1869 p = self.changelog.parents(n)
1869 p = self.changelog.parents(n)
1870 if p[1] != nullid or p[0] == nullid:
1870 if p[1] != nullid or p[0] == nullid:
1871 b.append((t, n, p[0], p[1]))
1871 b.append((t, n, p[0], p[1]))
1872 break
1872 break
1873 n = p[0]
1873 n = p[0]
1874 return b
1874 return b
1875
1875
1876 def between(self, pairs):
1876 def between(self, pairs):
1877 r = []
1877 r = []
1878
1878
1879 for top, bottom in pairs:
1879 for top, bottom in pairs:
1880 n, l, i = top, [], 0
1880 n, l, i = top, [], 0
1881 f = 1
1881 f = 1
1882
1882
1883 while n != bottom and n != nullid:
1883 while n != bottom and n != nullid:
1884 p = self.changelog.parents(n)[0]
1884 p = self.changelog.parents(n)[0]
1885 if i == f:
1885 if i == f:
1886 l.append(n)
1886 l.append(n)
1887 f = f * 2
1887 f = f * 2
1888 n = p
1888 n = p
1889 i += 1
1889 i += 1
1890
1890
1891 r.append(l)
1891 r.append(l)
1892
1892
1893 return r
1893 return r
1894
1894
1895 def checkpush(self, pushop):
1895 def checkpush(self, pushop):
1896 """Extensions can override this function if additional checks have
1896 """Extensions can override this function if additional checks have
1897 to be performed before pushing, or call it if they override push
1897 to be performed before pushing, or call it if they override push
1898 command.
1898 command.
1899 """
1899 """
1900 pass
1900 pass
1901
1901
1902 @unfilteredpropertycache
1902 @unfilteredpropertycache
1903 def prepushoutgoinghooks(self):
1903 def prepushoutgoinghooks(self):
1904 """Return util.hooks consists of a pushop with repo, remote, outgoing
1904 """Return util.hooks consists of a pushop with repo, remote, outgoing
1905 methods, which are called before pushing changesets.
1905 methods, which are called before pushing changesets.
1906 """
1906 """
1907 return util.hooks()
1907 return util.hooks()
1908
1908
1909 def pushkey(self, namespace, key, old, new):
1909 def pushkey(self, namespace, key, old, new):
1910 try:
1910 try:
1911 tr = self.currenttransaction()
1911 tr = self.currenttransaction()
1912 hookargs = {}
1912 hookargs = {}
1913 if tr is not None:
1913 if tr is not None:
1914 hookargs.update(tr.hookargs)
1914 hookargs.update(tr.hookargs)
1915 hookargs['namespace'] = namespace
1915 hookargs['namespace'] = namespace
1916 hookargs['key'] = key
1916 hookargs['key'] = key
1917 hookargs['old'] = old
1917 hookargs['old'] = old
1918 hookargs['new'] = new
1918 hookargs['new'] = new
1919 self.hook('prepushkey', throw=True, **hookargs)
1919 self.hook('prepushkey', throw=True, **hookargs)
1920 except error.HookAbort as exc:
1920 except error.HookAbort as exc:
1921 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1921 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1922 if exc.hint:
1922 if exc.hint:
1923 self.ui.write_err(_("(%s)\n") % exc.hint)
1923 self.ui.write_err(_("(%s)\n") % exc.hint)
1924 return False
1924 return False
1925 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1925 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1926 ret = pushkey.push(self, namespace, key, old, new)
1926 ret = pushkey.push(self, namespace, key, old, new)
1927 def runhook():
1927 def runhook():
1928 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1928 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1929 ret=ret)
1929 ret=ret)
1930 self._afterlock(runhook)
1930 self._afterlock(runhook)
1931 return ret
1931 return ret
1932
1932
1933 def listkeys(self, namespace):
1933 def listkeys(self, namespace):
1934 self.hook('prelistkeys', throw=True, namespace=namespace)
1934 self.hook('prelistkeys', throw=True, namespace=namespace)
1935 self.ui.debug('listing keys for "%s"\n' % namespace)
1935 self.ui.debug('listing keys for "%s"\n' % namespace)
1936 values = pushkey.list(self, namespace)
1936 values = pushkey.list(self, namespace)
1937 self.hook('listkeys', namespace=namespace, values=values)
1937 self.hook('listkeys', namespace=namespace, values=values)
1938 return values
1938 return values
1939
1939
1940 def debugwireargs(self, one, two, three=None, four=None, five=None):
1940 def debugwireargs(self, one, two, three=None, four=None, five=None):
1941 '''used to test argument passing over the wire'''
1941 '''used to test argument passing over the wire'''
1942 return "%s %s %s %s %s" % (one, two, three, four, five)
1942 return "%s %s %s %s %s" % (one, two, three, four, five)
1943
1943
1944 def savecommitmessage(self, text):
1944 def savecommitmessage(self, text):
1945 fp = self.vfs('last-message.txt', 'wb')
1945 fp = self.vfs('last-message.txt', 'wb')
1946 try:
1946 try:
1947 fp.write(text)
1947 fp.write(text)
1948 finally:
1948 finally:
1949 fp.close()
1949 fp.close()
1950 return self.pathto(fp.name[len(self.root) + 1:])
1950 return self.pathto(fp.name[len(self.root) + 1:])
1951
1951
1952 # used to avoid circular references so destructors work
1952 # used to avoid circular references so destructors work
1953 def aftertrans(files):
1953 def aftertrans(files):
1954 renamefiles = [tuple(t) for t in files]
1954 renamefiles = [tuple(t) for t in files]
1955 def a():
1955 def a():
1956 for vfs, src, dest in renamefiles:
1956 for vfs, src, dest in renamefiles:
1957 try:
1957 try:
1958 vfs.rename(src, dest)
1958 vfs.rename(src, dest)
1959 except OSError: # journal file does not yet exist
1959 except OSError: # journal file does not yet exist
1960 pass
1960 pass
1961 return a
1961 return a
1962
1962
1963 def undoname(fn):
1963 def undoname(fn):
1964 base, name = os.path.split(fn)
1964 base, name = os.path.split(fn)
1965 assert name.startswith('journal')
1965 assert name.startswith('journal')
1966 return os.path.join(base, name.replace('journal', 'undo', 1))
1966 return os.path.join(base, name.replace('journal', 'undo', 1))
1967
1967
1968 def instance(ui, path, create):
1968 def instance(ui, path, create):
1969 return localrepository(ui, util.urllocalpath(path), create)
1969 return localrepository(ui, util.urllocalpath(path), create)
1970
1970
1971 def islocal(path):
1971 def islocal(path):
1972 return True
1972 return True
1973
1973
1974 def newreporequirements(repo):
1974 def newreporequirements(repo):
1975 """Determine the set of requirements for a new local repository.
1975 """Determine the set of requirements for a new local repository.
1976
1976
1977 Extensions can wrap this function to specify custom requirements for
1977 Extensions can wrap this function to specify custom requirements for
1978 new repositories.
1978 new repositories.
1979 """
1979 """
1980 ui = repo.ui
1980 ui = repo.ui
1981 requirements = set(['revlogv1'])
1981 requirements = set(['revlogv1'])
1982 if ui.configbool('format', 'usestore', True):
1982 if ui.configbool('format', 'usestore', True):
1983 requirements.add('store')
1983 requirements.add('store')
1984 if ui.configbool('format', 'usefncache', True):
1984 if ui.configbool('format', 'usefncache', True):
1985 requirements.add('fncache')
1985 requirements.add('fncache')
1986 if ui.configbool('format', 'dotencode', True):
1986 if ui.configbool('format', 'dotencode', True):
1987 requirements.add('dotencode')
1987 requirements.add('dotencode')
1988
1988
1989 if scmutil.gdinitconfig(ui):
1989 if scmutil.gdinitconfig(ui):
1990 requirements.add('generaldelta')
1990 requirements.add('generaldelta')
1991 if ui.configbool('experimental', 'treemanifest', False):
1991 if ui.configbool('experimental', 'treemanifest', False):
1992 requirements.add('treemanifest')
1992 requirements.add('treemanifest')
1993 if ui.configbool('experimental', 'manifestv2', False):
1993 if ui.configbool('experimental', 'manifestv2', False):
1994 requirements.add('manifestv2')
1994 requirements.add('manifestv2')
1995
1995
1996 return requirements
1996 return requirements
@@ -1,1295 +1,1298 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import array
10 import array
11 import heapq
11 import heapq
12 import os
12 import os
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from . import (
16 from . import (
17 error,
17 error,
18 mdiff,
18 mdiff,
19 parsers,
19 parsers,
20 revlog,
20 revlog,
21 util,
21 util,
22 )
22 )
23
23
24 propertycache = util.propertycache
24 propertycache = util.propertycache
25
25
26 def _parsev1(data):
26 def _parsev1(data):
27 # This method does a little bit of excessive-looking
27 # This method does a little bit of excessive-looking
28 # precondition checking. This is so that the behavior of this
28 # precondition checking. This is so that the behavior of this
29 # class exactly matches its C counterpart to try and help
29 # class exactly matches its C counterpart to try and help
30 # prevent surprise breakage for anyone that develops against
30 # prevent surprise breakage for anyone that develops against
31 # the pure version.
31 # the pure version.
32 if data and data[-1] != '\n':
32 if data and data[-1] != '\n':
33 raise ValueError('Manifest did not end in a newline.')
33 raise ValueError('Manifest did not end in a newline.')
34 prev = None
34 prev = None
35 for l in data.splitlines():
35 for l in data.splitlines():
36 if prev is not None and prev > l:
36 if prev is not None and prev > l:
37 raise ValueError('Manifest lines not in sorted order.')
37 raise ValueError('Manifest lines not in sorted order.')
38 prev = l
38 prev = l
39 f, n = l.split('\0')
39 f, n = l.split('\0')
40 if len(n) > 40:
40 if len(n) > 40:
41 yield f, revlog.bin(n[:40]), n[40:]
41 yield f, revlog.bin(n[:40]), n[40:]
42 else:
42 else:
43 yield f, revlog.bin(n), ''
43 yield f, revlog.bin(n), ''
44
44
45 def _parsev2(data):
45 def _parsev2(data):
46 metadataend = data.find('\n')
46 metadataend = data.find('\n')
47 # Just ignore metadata for now
47 # Just ignore metadata for now
48 pos = metadataend + 1
48 pos = metadataend + 1
49 prevf = ''
49 prevf = ''
50 while pos < len(data):
50 while pos < len(data):
51 end = data.find('\n', pos + 1) # +1 to skip stem length byte
51 end = data.find('\n', pos + 1) # +1 to skip stem length byte
52 if end == -1:
52 if end == -1:
53 raise ValueError('Manifest ended with incomplete file entry.')
53 raise ValueError('Manifest ended with incomplete file entry.')
54 stemlen = ord(data[pos])
54 stemlen = ord(data[pos])
55 items = data[pos + 1:end].split('\0')
55 items = data[pos + 1:end].split('\0')
56 f = prevf[:stemlen] + items[0]
56 f = prevf[:stemlen] + items[0]
57 if prevf > f:
57 if prevf > f:
58 raise ValueError('Manifest entries not in sorted order.')
58 raise ValueError('Manifest entries not in sorted order.')
59 fl = items[1]
59 fl = items[1]
60 # Just ignore metadata (items[2:] for now)
60 # Just ignore metadata (items[2:] for now)
61 n = data[end + 1:end + 21]
61 n = data[end + 1:end + 21]
62 yield f, n, fl
62 yield f, n, fl
63 pos = end + 22
63 pos = end + 22
64 prevf = f
64 prevf = f
65
65
66 def _parse(data):
66 def _parse(data):
67 """Generates (path, node, flags) tuples from a manifest text"""
67 """Generates (path, node, flags) tuples from a manifest text"""
68 if data.startswith('\0'):
68 if data.startswith('\0'):
69 return iter(_parsev2(data))
69 return iter(_parsev2(data))
70 else:
70 else:
71 return iter(_parsev1(data))
71 return iter(_parsev1(data))
72
72
73 def _text(it, usemanifestv2):
73 def _text(it, usemanifestv2):
74 """Given an iterator over (path, node, flags) tuples, returns a manifest
74 """Given an iterator over (path, node, flags) tuples, returns a manifest
75 text"""
75 text"""
76 if usemanifestv2:
76 if usemanifestv2:
77 return _textv2(it)
77 return _textv2(it)
78 else:
78 else:
79 return _textv1(it)
79 return _textv1(it)
80
80
81 def _textv1(it):
81 def _textv1(it):
82 files = []
82 files = []
83 lines = []
83 lines = []
84 _hex = revlog.hex
84 _hex = revlog.hex
85 for f, n, fl in it:
85 for f, n, fl in it:
86 files.append(f)
86 files.append(f)
87 # if this is changed to support newlines in filenames,
87 # if this is changed to support newlines in filenames,
88 # be sure to check the templates/ dir again (especially *-raw.tmpl)
88 # be sure to check the templates/ dir again (especially *-raw.tmpl)
89 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
89 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
90
90
91 _checkforbidden(files)
91 _checkforbidden(files)
92 return ''.join(lines)
92 return ''.join(lines)
93
93
94 def _textv2(it):
94 def _textv2(it):
95 files = []
95 files = []
96 lines = ['\0\n']
96 lines = ['\0\n']
97 prevf = ''
97 prevf = ''
98 for f, n, fl in it:
98 for f, n, fl in it:
99 files.append(f)
99 files.append(f)
100 stem = os.path.commonprefix([prevf, f])
100 stem = os.path.commonprefix([prevf, f])
101 stemlen = min(len(stem), 255)
101 stemlen = min(len(stem), 255)
102 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
102 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
103 prevf = f
103 prevf = f
104 _checkforbidden(files)
104 _checkforbidden(files)
105 return ''.join(lines)
105 return ''.join(lines)
106
106
107 class _lazymanifest(dict):
107 class _lazymanifest(dict):
108 """This is the pure implementation of lazymanifest.
108 """This is the pure implementation of lazymanifest.
109
109
110 It has not been optimized *at all* and is not lazy.
110 It has not been optimized *at all* and is not lazy.
111 """
111 """
112
112
113 def __init__(self, data):
113 def __init__(self, data):
114 dict.__init__(self)
114 dict.__init__(self)
115 for f, n, fl in _parse(data):
115 for f, n, fl in _parse(data):
116 self[f] = n, fl
116 self[f] = n, fl
117
117
118 def __setitem__(self, k, v):
118 def __setitem__(self, k, v):
119 node, flag = v
119 node, flag = v
120 assert node is not None
120 assert node is not None
121 if len(node) > 21:
121 if len(node) > 21:
122 node = node[:21] # match c implementation behavior
122 node = node[:21] # match c implementation behavior
123 dict.__setitem__(self, k, (node, flag))
123 dict.__setitem__(self, k, (node, flag))
124
124
125 def __iter__(self):
125 def __iter__(self):
126 return iter(sorted(dict.keys(self)))
126 return iter(sorted(dict.keys(self)))
127
127
128 def iterkeys(self):
128 def iterkeys(self):
129 return iter(sorted(dict.keys(self)))
129 return iter(sorted(dict.keys(self)))
130
130
131 def iterentries(self):
131 def iterentries(self):
132 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
132 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
133
133
134 def copy(self):
134 def copy(self):
135 c = _lazymanifest('')
135 c = _lazymanifest('')
136 c.update(self)
136 c.update(self)
137 return c
137 return c
138
138
139 def diff(self, m2, clean=False):
139 def diff(self, m2, clean=False):
140 '''Finds changes between the current manifest and m2.'''
140 '''Finds changes between the current manifest and m2.'''
141 diff = {}
141 diff = {}
142
142
143 for fn, e1 in self.iteritems():
143 for fn, e1 in self.iteritems():
144 if fn not in m2:
144 if fn not in m2:
145 diff[fn] = e1, (None, '')
145 diff[fn] = e1, (None, '')
146 else:
146 else:
147 e2 = m2[fn]
147 e2 = m2[fn]
148 if e1 != e2:
148 if e1 != e2:
149 diff[fn] = e1, e2
149 diff[fn] = e1, e2
150 elif clean:
150 elif clean:
151 diff[fn] = None
151 diff[fn] = None
152
152
153 for fn, e2 in m2.iteritems():
153 for fn, e2 in m2.iteritems():
154 if fn not in self:
154 if fn not in self:
155 diff[fn] = (None, ''), e2
155 diff[fn] = (None, ''), e2
156
156
157 return diff
157 return diff
158
158
159 def filtercopy(self, filterfn):
159 def filtercopy(self, filterfn):
160 c = _lazymanifest('')
160 c = _lazymanifest('')
161 for f, n, fl in self.iterentries():
161 for f, n, fl in self.iterentries():
162 if filterfn(f):
162 if filterfn(f):
163 c[f] = n, fl
163 c[f] = n, fl
164 return c
164 return c
165
165
166 def text(self):
166 def text(self):
167 """Get the full data of this manifest as a bytestring."""
167 """Get the full data of this manifest as a bytestring."""
168 return _textv1(self.iterentries())
168 return _textv1(self.iterentries())
169
169
170 try:
170 try:
171 _lazymanifest = parsers.lazymanifest
171 _lazymanifest = parsers.lazymanifest
172 except AttributeError:
172 except AttributeError:
173 pass
173 pass
174
174
175 class manifestdict(object):
175 class manifestdict(object):
176 def __init__(self, data=''):
176 def __init__(self, data=''):
177 if data.startswith('\0'):
177 if data.startswith('\0'):
178 #_lazymanifest can not parse v2
178 #_lazymanifest can not parse v2
179 self._lm = _lazymanifest('')
179 self._lm = _lazymanifest('')
180 for f, n, fl in _parsev2(data):
180 for f, n, fl in _parsev2(data):
181 self._lm[f] = n, fl
181 self._lm[f] = n, fl
182 else:
182 else:
183 self._lm = _lazymanifest(data)
183 self._lm = _lazymanifest(data)
184
184
185 def __getitem__(self, key):
185 def __getitem__(self, key):
186 return self._lm[key][0]
186 return self._lm[key][0]
187
187
188 def find(self, key):
188 def find(self, key):
189 return self._lm[key]
189 return self._lm[key]
190
190
191 def __len__(self):
191 def __len__(self):
192 return len(self._lm)
192 return len(self._lm)
193
193
194 def __setitem__(self, key, node):
194 def __setitem__(self, key, node):
195 self._lm[key] = node, self.flags(key, '')
195 self._lm[key] = node, self.flags(key, '')
196
196
197 def __contains__(self, key):
197 def __contains__(self, key):
198 return key in self._lm
198 return key in self._lm
199
199
200 def __delitem__(self, key):
200 def __delitem__(self, key):
201 del self._lm[key]
201 del self._lm[key]
202
202
203 def __iter__(self):
203 def __iter__(self):
204 return self._lm.__iter__()
204 return self._lm.__iter__()
205
205
206 def iterkeys(self):
206 def iterkeys(self):
207 return self._lm.iterkeys()
207 return self._lm.iterkeys()
208
208
209 def keys(self):
209 def keys(self):
210 return list(self.iterkeys())
210 return list(self.iterkeys())
211
211
212 def filesnotin(self, m2):
212 def filesnotin(self, m2):
213 '''Set of files in this manifest that are not in the other'''
213 '''Set of files in this manifest that are not in the other'''
214 diff = self.diff(m2)
214 diff = self.diff(m2)
215 files = set(filepath
215 files = set(filepath
216 for filepath, hashflags in diff.iteritems()
216 for filepath, hashflags in diff.iteritems()
217 if hashflags[1][0] is None)
217 if hashflags[1][0] is None)
218 return files
218 return files
219
219
220 @propertycache
220 @propertycache
221 def _dirs(self):
221 def _dirs(self):
222 return util.dirs(self)
222 return util.dirs(self)
223
223
224 def dirs(self):
224 def dirs(self):
225 return self._dirs
225 return self._dirs
226
226
227 def hasdir(self, dir):
227 def hasdir(self, dir):
228 return dir in self._dirs
228 return dir in self._dirs
229
229
230 def _filesfastpath(self, match):
230 def _filesfastpath(self, match):
231 '''Checks whether we can correctly and quickly iterate over matcher
231 '''Checks whether we can correctly and quickly iterate over matcher
232 files instead of over manifest files.'''
232 files instead of over manifest files.'''
233 files = match.files()
233 files = match.files()
234 return (len(files) < 100 and (match.isexact() or
234 return (len(files) < 100 and (match.isexact() or
235 (match.prefix() and all(fn in self for fn in files))))
235 (match.prefix() and all(fn in self for fn in files))))
236
236
237 def walk(self, match):
237 def walk(self, match):
238 '''Generates matching file names.
238 '''Generates matching file names.
239
239
240 Equivalent to manifest.matches(match).iterkeys(), but without creating
240 Equivalent to manifest.matches(match).iterkeys(), but without creating
241 an entirely new manifest.
241 an entirely new manifest.
242
242
243 It also reports nonexistent files by marking them bad with match.bad().
243 It also reports nonexistent files by marking them bad with match.bad().
244 '''
244 '''
245 if match.always():
245 if match.always():
246 for f in iter(self):
246 for f in iter(self):
247 yield f
247 yield f
248 return
248 return
249
249
250 fset = set(match.files())
250 fset = set(match.files())
251
251
252 # avoid the entire walk if we're only looking for specific files
252 # avoid the entire walk if we're only looking for specific files
253 if self._filesfastpath(match):
253 if self._filesfastpath(match):
254 for fn in sorted(fset):
254 for fn in sorted(fset):
255 yield fn
255 yield fn
256 return
256 return
257
257
258 for fn in self:
258 for fn in self:
259 if fn in fset:
259 if fn in fset:
260 # specified pattern is the exact name
260 # specified pattern is the exact name
261 fset.remove(fn)
261 fset.remove(fn)
262 if match(fn):
262 if match(fn):
263 yield fn
263 yield fn
264
264
265 # for dirstate.walk, files=['.'] means "walk the whole tree".
265 # for dirstate.walk, files=['.'] means "walk the whole tree".
266 # follow that here, too
266 # follow that here, too
267 fset.discard('.')
267 fset.discard('.')
268
268
269 for fn in sorted(fset):
269 for fn in sorted(fset):
270 if not self.hasdir(fn):
270 if not self.hasdir(fn):
271 match.bad(fn, None)
271 match.bad(fn, None)
272
272
273 def matches(self, match):
273 def matches(self, match):
274 '''generate a new manifest filtered by the match argument'''
274 '''generate a new manifest filtered by the match argument'''
275 if match.always():
275 if match.always():
276 return self.copy()
276 return self.copy()
277
277
278 if self._filesfastpath(match):
278 if self._filesfastpath(match):
279 m = manifestdict()
279 m = manifestdict()
280 lm = self._lm
280 lm = self._lm
281 for fn in match.files():
281 for fn in match.files():
282 if fn in lm:
282 if fn in lm:
283 m._lm[fn] = lm[fn]
283 m._lm[fn] = lm[fn]
284 return m
284 return m
285
285
286 m = manifestdict()
286 m = manifestdict()
287 m._lm = self._lm.filtercopy(match)
287 m._lm = self._lm.filtercopy(match)
288 return m
288 return m
289
289
290 def diff(self, m2, clean=False):
290 def diff(self, m2, clean=False):
291 '''Finds changes between the current manifest and m2.
291 '''Finds changes between the current manifest and m2.
292
292
293 Args:
293 Args:
294 m2: the manifest to which this manifest should be compared.
294 m2: the manifest to which this manifest should be compared.
295 clean: if true, include files unchanged between these manifests
295 clean: if true, include files unchanged between these manifests
296 with a None value in the returned dictionary.
296 with a None value in the returned dictionary.
297
297
298 The result is returned as a dict with filename as key and
298 The result is returned as a dict with filename as key and
299 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
299 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
300 nodeid in the current/other manifest and fl1/fl2 is the flag
300 nodeid in the current/other manifest and fl1/fl2 is the flag
301 in the current/other manifest. Where the file does not exist,
301 in the current/other manifest. Where the file does not exist,
302 the nodeid will be None and the flags will be the empty
302 the nodeid will be None and the flags will be the empty
303 string.
303 string.
304 '''
304 '''
305 return self._lm.diff(m2._lm, clean)
305 return self._lm.diff(m2._lm, clean)
306
306
307 def setflag(self, key, flag):
307 def setflag(self, key, flag):
308 self._lm[key] = self[key], flag
308 self._lm[key] = self[key], flag
309
309
310 def get(self, key, default=None):
310 def get(self, key, default=None):
311 try:
311 try:
312 return self._lm[key][0]
312 return self._lm[key][0]
313 except KeyError:
313 except KeyError:
314 return default
314 return default
315
315
316 def flags(self, key, default=''):
316 def flags(self, key, default=''):
317 try:
317 try:
318 return self._lm[key][1]
318 return self._lm[key][1]
319 except KeyError:
319 except KeyError:
320 return default
320 return default
321
321
322 def copy(self):
322 def copy(self):
323 c = manifestdict()
323 c = manifestdict()
324 c._lm = self._lm.copy()
324 c._lm = self._lm.copy()
325 return c
325 return c
326
326
327 def iteritems(self):
327 def iteritems(self):
328 return (x[:2] for x in self._lm.iterentries())
328 return (x[:2] for x in self._lm.iterentries())
329
329
330 def iterentries(self):
330 def iterentries(self):
331 return self._lm.iterentries()
331 return self._lm.iterentries()
332
332
333 def text(self, usemanifestv2=False):
333 def text(self, usemanifestv2=False):
334 if usemanifestv2:
334 if usemanifestv2:
335 return _textv2(self._lm.iterentries())
335 return _textv2(self._lm.iterentries())
336 else:
336 else:
337 # use (probably) native version for v1
337 # use (probably) native version for v1
338 return self._lm.text()
338 return self._lm.text()
339
339
340 def fastdelta(self, base, changes):
340 def fastdelta(self, base, changes):
341 """Given a base manifest text as an array.array and a list of changes
341 """Given a base manifest text as an array.array and a list of changes
342 relative to that text, compute a delta that can be used by revlog.
342 relative to that text, compute a delta that can be used by revlog.
343 """
343 """
344 delta = []
344 delta = []
345 dstart = None
345 dstart = None
346 dend = None
346 dend = None
347 dline = [""]
347 dline = [""]
348 start = 0
348 start = 0
349 # zero copy representation of base as a buffer
349 # zero copy representation of base as a buffer
350 addbuf = util.buffer(base)
350 addbuf = util.buffer(base)
351
351
352 changes = list(changes)
352 changes = list(changes)
353 if len(changes) < 1000:
353 if len(changes) < 1000:
354 # start with a readonly loop that finds the offset of
354 # start with a readonly loop that finds the offset of
355 # each line and creates the deltas
355 # each line and creates the deltas
356 for f, todelete in changes:
356 for f, todelete in changes:
357 # bs will either be the index of the item or the insert point
357 # bs will either be the index of the item or the insert point
358 start, end = _msearch(addbuf, f, start)
358 start, end = _msearch(addbuf, f, start)
359 if not todelete:
359 if not todelete:
360 h, fl = self._lm[f]
360 h, fl = self._lm[f]
361 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
361 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
362 else:
362 else:
363 if start == end:
363 if start == end:
364 # item we want to delete was not found, error out
364 # item we want to delete was not found, error out
365 raise AssertionError(
365 raise AssertionError(
366 _("failed to remove %s from manifest") % f)
366 _("failed to remove %s from manifest") % f)
367 l = ""
367 l = ""
368 if dstart is not None and dstart <= start and dend >= start:
368 if dstart is not None and dstart <= start and dend >= start:
369 if dend < end:
369 if dend < end:
370 dend = end
370 dend = end
371 if l:
371 if l:
372 dline.append(l)
372 dline.append(l)
373 else:
373 else:
374 if dstart is not None:
374 if dstart is not None:
375 delta.append([dstart, dend, "".join(dline)])
375 delta.append([dstart, dend, "".join(dline)])
376 dstart = start
376 dstart = start
377 dend = end
377 dend = end
378 dline = [l]
378 dline = [l]
379
379
380 if dstart is not None:
380 if dstart is not None:
381 delta.append([dstart, dend, "".join(dline)])
381 delta.append([dstart, dend, "".join(dline)])
382 # apply the delta to the base, and get a delta for addrevision
382 # apply the delta to the base, and get a delta for addrevision
383 deltatext, arraytext = _addlistdelta(base, delta)
383 deltatext, arraytext = _addlistdelta(base, delta)
384 else:
384 else:
385 # For large changes, it's much cheaper to just build the text and
385 # For large changes, it's much cheaper to just build the text and
386 # diff it.
386 # diff it.
387 arraytext = array.array('c', self.text())
387 arraytext = array.array('c', self.text())
388 deltatext = mdiff.textdiff(base, arraytext)
388 deltatext = mdiff.textdiff(base, arraytext)
389
389
390 return arraytext, deltatext
390 return arraytext, deltatext
391
391
392 def _msearch(m, s, lo=0, hi=None):
392 def _msearch(m, s, lo=0, hi=None):
393 '''return a tuple (start, end) that says where to find s within m.
393 '''return a tuple (start, end) that says where to find s within m.
394
394
395 If the string is found m[start:end] are the line containing
395 If the string is found m[start:end] are the line containing
396 that string. If start == end the string was not found and
396 that string. If start == end the string was not found and
397 they indicate the proper sorted insertion point.
397 they indicate the proper sorted insertion point.
398
398
399 m should be a buffer or a string
399 m should be a buffer or a string
400 s is a string'''
400 s is a string'''
401 def advance(i, c):
401 def advance(i, c):
402 while i < lenm and m[i] != c:
402 while i < lenm and m[i] != c:
403 i += 1
403 i += 1
404 return i
404 return i
405 if not s:
405 if not s:
406 return (lo, lo)
406 return (lo, lo)
407 lenm = len(m)
407 lenm = len(m)
408 if not hi:
408 if not hi:
409 hi = lenm
409 hi = lenm
410 while lo < hi:
410 while lo < hi:
411 mid = (lo + hi) // 2
411 mid = (lo + hi) // 2
412 start = mid
412 start = mid
413 while start > 0 and m[start - 1] != '\n':
413 while start > 0 and m[start - 1] != '\n':
414 start -= 1
414 start -= 1
415 end = advance(start, '\0')
415 end = advance(start, '\0')
416 if m[start:end] < s:
416 if m[start:end] < s:
417 # we know that after the null there are 40 bytes of sha1
417 # we know that after the null there are 40 bytes of sha1
418 # this translates to the bisect lo = mid + 1
418 # this translates to the bisect lo = mid + 1
419 lo = advance(end + 40, '\n') + 1
419 lo = advance(end + 40, '\n') + 1
420 else:
420 else:
421 # this translates to the bisect hi = mid
421 # this translates to the bisect hi = mid
422 hi = start
422 hi = start
423 end = advance(lo, '\0')
423 end = advance(lo, '\0')
424 found = m[lo:end]
424 found = m[lo:end]
425 if s == found:
425 if s == found:
426 # we know that after the null there are 40 bytes of sha1
426 # we know that after the null there are 40 bytes of sha1
427 end = advance(end + 40, '\n')
427 end = advance(end + 40, '\n')
428 return (lo, end + 1)
428 return (lo, end + 1)
429 else:
429 else:
430 return (lo, lo)
430 return (lo, lo)
431
431
432 def _checkforbidden(l):
432 def _checkforbidden(l):
433 """Check filenames for illegal characters."""
433 """Check filenames for illegal characters."""
434 for f in l:
434 for f in l:
435 if '\n' in f or '\r' in f:
435 if '\n' in f or '\r' in f:
436 raise error.RevlogError(
436 raise error.RevlogError(
437 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
437 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
438
438
439
439
440 # apply the changes collected during the bisect loop to our addlist
440 # apply the changes collected during the bisect loop to our addlist
441 # return a delta suitable for addrevision
441 # return a delta suitable for addrevision
442 def _addlistdelta(addlist, x):
442 def _addlistdelta(addlist, x):
443 # for large addlist arrays, building a new array is cheaper
443 # for large addlist arrays, building a new array is cheaper
444 # than repeatedly modifying the existing one
444 # than repeatedly modifying the existing one
445 currentposition = 0
445 currentposition = 0
446 newaddlist = array.array('c')
446 newaddlist = array.array('c')
447
447
448 for start, end, content in x:
448 for start, end, content in x:
449 newaddlist += addlist[currentposition:start]
449 newaddlist += addlist[currentposition:start]
450 if content:
450 if content:
451 newaddlist += array.array('c', content)
451 newaddlist += array.array('c', content)
452
452
453 currentposition = end
453 currentposition = end
454
454
455 newaddlist += addlist[currentposition:]
455 newaddlist += addlist[currentposition:]
456
456
457 deltatext = "".join(struct.pack(">lll", start, end, len(content))
457 deltatext = "".join(struct.pack(">lll", start, end, len(content))
458 + content for start, end, content in x)
458 + content for start, end, content in x)
459 return deltatext, newaddlist
459 return deltatext, newaddlist
460
460
461 def _splittopdir(f):
461 def _splittopdir(f):
462 if '/' in f:
462 if '/' in f:
463 dir, subpath = f.split('/', 1)
463 dir, subpath = f.split('/', 1)
464 return dir + '/', subpath
464 return dir + '/', subpath
465 else:
465 else:
466 return '', f
466 return '', f
467
467
468 _noop = lambda s: None
468 _noop = lambda s: None
469
469
470 class treemanifest(object):
470 class treemanifest(object):
471 def __init__(self, dir='', text=''):
471 def __init__(self, dir='', text=''):
472 self._dir = dir
472 self._dir = dir
473 self._node = revlog.nullid
473 self._node = revlog.nullid
474 self._loadfunc = _noop
474 self._loadfunc = _noop
475 self._copyfunc = _noop
475 self._copyfunc = _noop
476 self._dirty = False
476 self._dirty = False
477 self._dirs = {}
477 self._dirs = {}
478 # Using _lazymanifest here is a little slower than plain old dicts
478 # Using _lazymanifest here is a little slower than plain old dicts
479 self._files = {}
479 self._files = {}
480 self._flags = {}
480 self._flags = {}
481 if text:
481 if text:
482 def readsubtree(subdir, subm):
482 def readsubtree(subdir, subm):
483 raise AssertionError('treemanifest constructor only accepts '
483 raise AssertionError('treemanifest constructor only accepts '
484 'flat manifests')
484 'flat manifests')
485 self.parse(text, readsubtree)
485 self.parse(text, readsubtree)
486 self._dirty = True # Mark flat manifest dirty after parsing
486 self._dirty = True # Mark flat manifest dirty after parsing
487
487
488 def _subpath(self, path):
488 def _subpath(self, path):
489 return self._dir + path
489 return self._dir + path
490
490
491 def __len__(self):
491 def __len__(self):
492 self._load()
492 self._load()
493 size = len(self._files)
493 size = len(self._files)
494 for m in self._dirs.values():
494 for m in self._dirs.values():
495 size += m.__len__()
495 size += m.__len__()
496 return size
496 return size
497
497
498 def _isempty(self):
498 def _isempty(self):
499 self._load() # for consistency; already loaded by all callers
499 self._load() # for consistency; already loaded by all callers
500 return (not self._files and (not self._dirs or
500 return (not self._files and (not self._dirs or
501 all(m._isempty() for m in self._dirs.values())))
501 all(m._isempty() for m in self._dirs.values())))
502
502
503 def __repr__(self):
503 def __repr__(self):
504 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
504 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
505 (self._dir, revlog.hex(self._node),
505 (self._dir, revlog.hex(self._node),
506 bool(self._loadfunc is _noop),
506 bool(self._loadfunc is _noop),
507 self._dirty, id(self)))
507 self._dirty, id(self)))
508
508
509 def dir(self):
509 def dir(self):
510 '''The directory that this tree manifest represents, including a
510 '''The directory that this tree manifest represents, including a
511 trailing '/'. Empty string for the repo root directory.'''
511 trailing '/'. Empty string for the repo root directory.'''
512 return self._dir
512 return self._dir
513
513
514 def node(self):
514 def node(self):
515 '''This node of this instance. nullid for unsaved instances. Should
515 '''This node of this instance. nullid for unsaved instances. Should
516 be updated when the instance is read or written from a revlog.
516 be updated when the instance is read or written from a revlog.
517 '''
517 '''
518 assert not self._dirty
518 assert not self._dirty
519 return self._node
519 return self._node
520
520
521 def setnode(self, node):
521 def setnode(self, node):
522 self._node = node
522 self._node = node
523 self._dirty = False
523 self._dirty = False
524
524
525 def iterentries(self):
525 def iterentries(self):
526 self._load()
526 self._load()
527 for p, n in sorted(self._dirs.items() + self._files.items()):
527 for p, n in sorted(self._dirs.items() + self._files.items()):
528 if p in self._files:
528 if p in self._files:
529 yield self._subpath(p), n, self._flags.get(p, '')
529 yield self._subpath(p), n, self._flags.get(p, '')
530 else:
530 else:
531 for x in n.iterentries():
531 for x in n.iterentries():
532 yield x
532 yield x
533
533
534 def iteritems(self):
534 def iteritems(self):
535 self._load()
535 self._load()
536 for p, n in sorted(self._dirs.items() + self._files.items()):
536 for p, n in sorted(self._dirs.items() + self._files.items()):
537 if p in self._files:
537 if p in self._files:
538 yield self._subpath(p), n
538 yield self._subpath(p), n
539 else:
539 else:
540 for f, sn in n.iteritems():
540 for f, sn in n.iteritems():
541 yield f, sn
541 yield f, sn
542
542
543 def iterkeys(self):
543 def iterkeys(self):
544 self._load()
544 self._load()
545 for p in sorted(self._dirs.keys() + self._files.keys()):
545 for p in sorted(self._dirs.keys() + self._files.keys()):
546 if p in self._files:
546 if p in self._files:
547 yield self._subpath(p)
547 yield self._subpath(p)
548 else:
548 else:
549 for f in self._dirs[p].iterkeys():
549 for f in self._dirs[p].iterkeys():
550 yield f
550 yield f
551
551
552 def keys(self):
552 def keys(self):
553 return list(self.iterkeys())
553 return list(self.iterkeys())
554
554
555 def __iter__(self):
555 def __iter__(self):
556 return self.iterkeys()
556 return self.iterkeys()
557
557
558 def __contains__(self, f):
558 def __contains__(self, f):
559 if f is None:
559 if f is None:
560 return False
560 return False
561 self._load()
561 self._load()
562 dir, subpath = _splittopdir(f)
562 dir, subpath = _splittopdir(f)
563 if dir:
563 if dir:
564 if dir not in self._dirs:
564 if dir not in self._dirs:
565 return False
565 return False
566 return self._dirs[dir].__contains__(subpath)
566 return self._dirs[dir].__contains__(subpath)
567 else:
567 else:
568 return f in self._files
568 return f in self._files
569
569
570 def get(self, f, default=None):
570 def get(self, f, default=None):
571 self._load()
571 self._load()
572 dir, subpath = _splittopdir(f)
572 dir, subpath = _splittopdir(f)
573 if dir:
573 if dir:
574 if dir not in self._dirs:
574 if dir not in self._dirs:
575 return default
575 return default
576 return self._dirs[dir].get(subpath, default)
576 return self._dirs[dir].get(subpath, default)
577 else:
577 else:
578 return self._files.get(f, default)
578 return self._files.get(f, default)
579
579
580 def __getitem__(self, f):
580 def __getitem__(self, f):
581 self._load()
581 self._load()
582 dir, subpath = _splittopdir(f)
582 dir, subpath = _splittopdir(f)
583 if dir:
583 if dir:
584 return self._dirs[dir].__getitem__(subpath)
584 return self._dirs[dir].__getitem__(subpath)
585 else:
585 else:
586 return self._files[f]
586 return self._files[f]
587
587
588 def flags(self, f):
588 def flags(self, f):
589 self._load()
589 self._load()
590 dir, subpath = _splittopdir(f)
590 dir, subpath = _splittopdir(f)
591 if dir:
591 if dir:
592 if dir not in self._dirs:
592 if dir not in self._dirs:
593 return ''
593 return ''
594 return self._dirs[dir].flags(subpath)
594 return self._dirs[dir].flags(subpath)
595 else:
595 else:
596 if f in self._dirs:
596 if f in self._dirs:
597 return ''
597 return ''
598 return self._flags.get(f, '')
598 return self._flags.get(f, '')
599
599
600 def find(self, f):
600 def find(self, f):
601 self._load()
601 self._load()
602 dir, subpath = _splittopdir(f)
602 dir, subpath = _splittopdir(f)
603 if dir:
603 if dir:
604 return self._dirs[dir].find(subpath)
604 return self._dirs[dir].find(subpath)
605 else:
605 else:
606 return self._files[f], self._flags.get(f, '')
606 return self._files[f], self._flags.get(f, '')
607
607
608 def __delitem__(self, f):
608 def __delitem__(self, f):
609 self._load()
609 self._load()
610 dir, subpath = _splittopdir(f)
610 dir, subpath = _splittopdir(f)
611 if dir:
611 if dir:
612 self._dirs[dir].__delitem__(subpath)
612 self._dirs[dir].__delitem__(subpath)
613 # If the directory is now empty, remove it
613 # If the directory is now empty, remove it
614 if self._dirs[dir]._isempty():
614 if self._dirs[dir]._isempty():
615 del self._dirs[dir]
615 del self._dirs[dir]
616 else:
616 else:
617 del self._files[f]
617 del self._files[f]
618 if f in self._flags:
618 if f in self._flags:
619 del self._flags[f]
619 del self._flags[f]
620 self._dirty = True
620 self._dirty = True
621
621
622 def __setitem__(self, f, n):
622 def __setitem__(self, f, n):
623 assert n is not None
623 assert n is not None
624 self._load()
624 self._load()
625 dir, subpath = _splittopdir(f)
625 dir, subpath = _splittopdir(f)
626 if dir:
626 if dir:
627 if dir not in self._dirs:
627 if dir not in self._dirs:
628 self._dirs[dir] = treemanifest(self._subpath(dir))
628 self._dirs[dir] = treemanifest(self._subpath(dir))
629 self._dirs[dir].__setitem__(subpath, n)
629 self._dirs[dir].__setitem__(subpath, n)
630 else:
630 else:
631 self._files[f] = n[:21] # to match manifestdict's behavior
631 self._files[f] = n[:21] # to match manifestdict's behavior
632 self._dirty = True
632 self._dirty = True
633
633
634 def _load(self):
634 def _load(self):
635 if self._loadfunc is not _noop:
635 if self._loadfunc is not _noop:
636 lf, self._loadfunc = self._loadfunc, _noop
636 lf, self._loadfunc = self._loadfunc, _noop
637 lf(self)
637 lf(self)
638 elif self._copyfunc is not _noop:
638 elif self._copyfunc is not _noop:
639 cf, self._copyfunc = self._copyfunc, _noop
639 cf, self._copyfunc = self._copyfunc, _noop
640 cf(self)
640 cf(self)
641
641
642 def setflag(self, f, flags):
642 def setflag(self, f, flags):
643 """Set the flags (symlink, executable) for path f."""
643 """Set the flags (symlink, executable) for path f."""
644 self._load()
644 self._load()
645 dir, subpath = _splittopdir(f)
645 dir, subpath = _splittopdir(f)
646 if dir:
646 if dir:
647 if dir not in self._dirs:
647 if dir not in self._dirs:
648 self._dirs[dir] = treemanifest(self._subpath(dir))
648 self._dirs[dir] = treemanifest(self._subpath(dir))
649 self._dirs[dir].setflag(subpath, flags)
649 self._dirs[dir].setflag(subpath, flags)
650 else:
650 else:
651 self._flags[f] = flags
651 self._flags[f] = flags
652 self._dirty = True
652 self._dirty = True
653
653
654 def copy(self):
654 def copy(self):
655 copy = treemanifest(self._dir)
655 copy = treemanifest(self._dir)
656 copy._node = self._node
656 copy._node = self._node
657 copy._dirty = self._dirty
657 copy._dirty = self._dirty
658 if self._copyfunc is _noop:
658 if self._copyfunc is _noop:
659 def _copyfunc(s):
659 def _copyfunc(s):
660 self._load()
660 self._load()
661 for d in self._dirs:
661 for d in self._dirs:
662 s._dirs[d] = self._dirs[d].copy()
662 s._dirs[d] = self._dirs[d].copy()
663 s._files = dict.copy(self._files)
663 s._files = dict.copy(self._files)
664 s._flags = dict.copy(self._flags)
664 s._flags = dict.copy(self._flags)
665 if self._loadfunc is _noop:
665 if self._loadfunc is _noop:
666 _copyfunc(copy)
666 _copyfunc(copy)
667 else:
667 else:
668 copy._copyfunc = _copyfunc
668 copy._copyfunc = _copyfunc
669 else:
669 else:
670 copy._copyfunc = self._copyfunc
670 copy._copyfunc = self._copyfunc
671 return copy
671 return copy
672
672
673 def filesnotin(self, m2):
673 def filesnotin(self, m2):
674 '''Set of files in this manifest that are not in the other'''
674 '''Set of files in this manifest that are not in the other'''
675 files = set()
675 files = set()
676 def _filesnotin(t1, t2):
676 def _filesnotin(t1, t2):
677 if t1._node == t2._node and not t1._dirty and not t2._dirty:
677 if t1._node == t2._node and not t1._dirty and not t2._dirty:
678 return
678 return
679 t1._load()
679 t1._load()
680 t2._load()
680 t2._load()
681 for d, m1 in t1._dirs.iteritems():
681 for d, m1 in t1._dirs.iteritems():
682 if d in t2._dirs:
682 if d in t2._dirs:
683 m2 = t2._dirs[d]
683 m2 = t2._dirs[d]
684 _filesnotin(m1, m2)
684 _filesnotin(m1, m2)
685 else:
685 else:
686 files.update(m1.iterkeys())
686 files.update(m1.iterkeys())
687
687
688 for fn in t1._files.iterkeys():
688 for fn in t1._files.iterkeys():
689 if fn not in t2._files:
689 if fn not in t2._files:
690 files.add(t1._subpath(fn))
690 files.add(t1._subpath(fn))
691
691
692 _filesnotin(self, m2)
692 _filesnotin(self, m2)
693 return files
693 return files
694
694
695 @propertycache
695 @propertycache
696 def _alldirs(self):
696 def _alldirs(self):
697 return util.dirs(self)
697 return util.dirs(self)
698
698
699 def dirs(self):
699 def dirs(self):
700 return self._alldirs
700 return self._alldirs
701
701
702 def hasdir(self, dir):
702 def hasdir(self, dir):
703 self._load()
703 self._load()
704 topdir, subdir = _splittopdir(dir)
704 topdir, subdir = _splittopdir(dir)
705 if topdir:
705 if topdir:
706 if topdir in self._dirs:
706 if topdir in self._dirs:
707 return self._dirs[topdir].hasdir(subdir)
707 return self._dirs[topdir].hasdir(subdir)
708 return False
708 return False
709 return (dir + '/') in self._dirs
709 return (dir + '/') in self._dirs
710
710
711 def walk(self, match):
711 def walk(self, match):
712 '''Generates matching file names.
712 '''Generates matching file names.
713
713
714 Equivalent to manifest.matches(match).iterkeys(), but without creating
714 Equivalent to manifest.matches(match).iterkeys(), but without creating
715 an entirely new manifest.
715 an entirely new manifest.
716
716
717 It also reports nonexistent files by marking them bad with match.bad().
717 It also reports nonexistent files by marking them bad with match.bad().
718 '''
718 '''
719 if match.always():
719 if match.always():
720 for f in iter(self):
720 for f in iter(self):
721 yield f
721 yield f
722 return
722 return
723
723
724 fset = set(match.files())
724 fset = set(match.files())
725
725
726 for fn in self._walk(match):
726 for fn in self._walk(match):
727 if fn in fset:
727 if fn in fset:
728 # specified pattern is the exact name
728 # specified pattern is the exact name
729 fset.remove(fn)
729 fset.remove(fn)
730 yield fn
730 yield fn
731
731
732 # for dirstate.walk, files=['.'] means "walk the whole tree".
732 # for dirstate.walk, files=['.'] means "walk the whole tree".
733 # follow that here, too
733 # follow that here, too
734 fset.discard('.')
734 fset.discard('.')
735
735
736 for fn in sorted(fset):
736 for fn in sorted(fset):
737 if not self.hasdir(fn):
737 if not self.hasdir(fn):
738 match.bad(fn, None)
738 match.bad(fn, None)
739
739
740 def _walk(self, match):
740 def _walk(self, match):
741 '''Recursively generates matching file names for walk().'''
741 '''Recursively generates matching file names for walk().'''
742 if not match.visitdir(self._dir[:-1] or '.'):
742 if not match.visitdir(self._dir[:-1] or '.'):
743 return
743 return
744
744
745 # yield this dir's files and walk its submanifests
745 # yield this dir's files and walk its submanifests
746 self._load()
746 self._load()
747 for p in sorted(self._dirs.keys() + self._files.keys()):
747 for p in sorted(self._dirs.keys() + self._files.keys()):
748 if p in self._files:
748 if p in self._files:
749 fullp = self._subpath(p)
749 fullp = self._subpath(p)
750 if match(fullp):
750 if match(fullp):
751 yield fullp
751 yield fullp
752 else:
752 else:
753 for f in self._dirs[p]._walk(match):
753 for f in self._dirs[p]._walk(match):
754 yield f
754 yield f
755
755
756 def matches(self, match):
756 def matches(self, match):
757 '''generate a new manifest filtered by the match argument'''
757 '''generate a new manifest filtered by the match argument'''
758 if match.always():
758 if match.always():
759 return self.copy()
759 return self.copy()
760
760
761 return self._matches(match)
761 return self._matches(match)
762
762
763 def _matches(self, match):
763 def _matches(self, match):
764 '''recursively generate a new manifest filtered by the match argument.
764 '''recursively generate a new manifest filtered by the match argument.
765 '''
765 '''
766
766
767 visit = match.visitdir(self._dir[:-1] or '.')
767 visit = match.visitdir(self._dir[:-1] or '.')
768 if visit == 'all':
768 if visit == 'all':
769 return self.copy()
769 return self.copy()
770 ret = treemanifest(self._dir)
770 ret = treemanifest(self._dir)
771 if not visit:
771 if not visit:
772 return ret
772 return ret
773
773
774 self._load()
774 self._load()
775 for fn in self._files:
775 for fn in self._files:
776 fullp = self._subpath(fn)
776 fullp = self._subpath(fn)
777 if not match(fullp):
777 if not match(fullp):
778 continue
778 continue
779 ret._files[fn] = self._files[fn]
779 ret._files[fn] = self._files[fn]
780 if fn in self._flags:
780 if fn in self._flags:
781 ret._flags[fn] = self._flags[fn]
781 ret._flags[fn] = self._flags[fn]
782
782
783 for dir, subm in self._dirs.iteritems():
783 for dir, subm in self._dirs.iteritems():
784 m = subm._matches(match)
784 m = subm._matches(match)
785 if not m._isempty():
785 if not m._isempty():
786 ret._dirs[dir] = m
786 ret._dirs[dir] = m
787
787
788 if not ret._isempty():
788 if not ret._isempty():
789 ret._dirty = True
789 ret._dirty = True
790 return ret
790 return ret
791
791
792 def diff(self, m2, clean=False):
792 def diff(self, m2, clean=False):
793 '''Finds changes between the current manifest and m2.
793 '''Finds changes between the current manifest and m2.
794
794
795 Args:
795 Args:
796 m2: the manifest to which this manifest should be compared.
796 m2: the manifest to which this manifest should be compared.
797 clean: if true, include files unchanged between these manifests
797 clean: if true, include files unchanged between these manifests
798 with a None value in the returned dictionary.
798 with a None value in the returned dictionary.
799
799
800 The result is returned as a dict with filename as key and
800 The result is returned as a dict with filename as key and
801 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
801 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
802 nodeid in the current/other manifest and fl1/fl2 is the flag
802 nodeid in the current/other manifest and fl1/fl2 is the flag
803 in the current/other manifest. Where the file does not exist,
803 in the current/other manifest. Where the file does not exist,
804 the nodeid will be None and the flags will be the empty
804 the nodeid will be None and the flags will be the empty
805 string.
805 string.
806 '''
806 '''
807 result = {}
807 result = {}
808 emptytree = treemanifest()
808 emptytree = treemanifest()
809 def _diff(t1, t2):
809 def _diff(t1, t2):
810 if t1._node == t2._node and not t1._dirty and not t2._dirty:
810 if t1._node == t2._node and not t1._dirty and not t2._dirty:
811 return
811 return
812 t1._load()
812 t1._load()
813 t2._load()
813 t2._load()
814 for d, m1 in t1._dirs.iteritems():
814 for d, m1 in t1._dirs.iteritems():
815 m2 = t2._dirs.get(d, emptytree)
815 m2 = t2._dirs.get(d, emptytree)
816 _diff(m1, m2)
816 _diff(m1, m2)
817
817
818 for d, m2 in t2._dirs.iteritems():
818 for d, m2 in t2._dirs.iteritems():
819 if d not in t1._dirs:
819 if d not in t1._dirs:
820 _diff(emptytree, m2)
820 _diff(emptytree, m2)
821
821
822 for fn, n1 in t1._files.iteritems():
822 for fn, n1 in t1._files.iteritems():
823 fl1 = t1._flags.get(fn, '')
823 fl1 = t1._flags.get(fn, '')
824 n2 = t2._files.get(fn, None)
824 n2 = t2._files.get(fn, None)
825 fl2 = t2._flags.get(fn, '')
825 fl2 = t2._flags.get(fn, '')
826 if n1 != n2 or fl1 != fl2:
826 if n1 != n2 or fl1 != fl2:
827 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
827 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
828 elif clean:
828 elif clean:
829 result[t1._subpath(fn)] = None
829 result[t1._subpath(fn)] = None
830
830
831 for fn, n2 in t2._files.iteritems():
831 for fn, n2 in t2._files.iteritems():
832 if fn not in t1._files:
832 if fn not in t1._files:
833 fl2 = t2._flags.get(fn, '')
833 fl2 = t2._flags.get(fn, '')
834 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
834 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
835
835
836 _diff(self, m2)
836 _diff(self, m2)
837 return result
837 return result
838
838
839 def unmodifiedsince(self, m2):
839 def unmodifiedsince(self, m2):
840 return not self._dirty and not m2._dirty and self._node == m2._node
840 return not self._dirty and not m2._dirty and self._node == m2._node
841
841
842 def parse(self, text, readsubtree):
842 def parse(self, text, readsubtree):
843 for f, n, fl in _parse(text):
843 for f, n, fl in _parse(text):
844 if fl == 't':
844 if fl == 't':
845 f = f + '/'
845 f = f + '/'
846 self._dirs[f] = readsubtree(self._subpath(f), n)
846 self._dirs[f] = readsubtree(self._subpath(f), n)
847 elif '/' in f:
847 elif '/' in f:
848 # This is a flat manifest, so use __setitem__ and setflag rather
848 # This is a flat manifest, so use __setitem__ and setflag rather
849 # than assigning directly to _files and _flags, so we can
849 # than assigning directly to _files and _flags, so we can
850 # assign a path in a subdirectory, and to mark dirty (compared
850 # assign a path in a subdirectory, and to mark dirty (compared
851 # to nullid).
851 # to nullid).
852 self[f] = n
852 self[f] = n
853 if fl:
853 if fl:
854 self.setflag(f, fl)
854 self.setflag(f, fl)
855 else:
855 else:
856 # Assigning to _files and _flags avoids marking as dirty,
856 # Assigning to _files and _flags avoids marking as dirty,
857 # and should be a little faster.
857 # and should be a little faster.
858 self._files[f] = n
858 self._files[f] = n
859 if fl:
859 if fl:
860 self._flags[f] = fl
860 self._flags[f] = fl
861
861
862 def text(self, usemanifestv2=False):
862 def text(self, usemanifestv2=False):
863 """Get the full data of this manifest as a bytestring."""
863 """Get the full data of this manifest as a bytestring."""
864 self._load()
864 self._load()
865 return _text(self.iterentries(), usemanifestv2)
865 return _text(self.iterentries(), usemanifestv2)
866
866
867 def dirtext(self, usemanifestv2=False):
867 def dirtext(self, usemanifestv2=False):
868 """Get the full data of this directory as a bytestring. Make sure that
868 """Get the full data of this directory as a bytestring. Make sure that
869 any submanifests have been written first, so their nodeids are correct.
869 any submanifests have been written first, so their nodeids are correct.
870 """
870 """
871 self._load()
871 self._load()
872 flags = self.flags
872 flags = self.flags
873 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
873 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
874 files = [(f, self._files[f], flags(f)) for f in self._files]
874 files = [(f, self._files[f], flags(f)) for f in self._files]
875 return _text(sorted(dirs + files), usemanifestv2)
875 return _text(sorted(dirs + files), usemanifestv2)
876
876
877 def read(self, gettext, readsubtree):
877 def read(self, gettext, readsubtree):
878 def _load_for_read(s):
878 def _load_for_read(s):
879 s.parse(gettext(), readsubtree)
879 s.parse(gettext(), readsubtree)
880 s._dirty = False
880 s._dirty = False
881 self._loadfunc = _load_for_read
881 self._loadfunc = _load_for_read
882
882
883 def writesubtrees(self, m1, m2, writesubtree):
883 def writesubtrees(self, m1, m2, writesubtree):
884 self._load() # for consistency; should never have any effect here
884 self._load() # for consistency; should never have any effect here
885 m1._load()
885 m1._load()
886 m2._load()
886 m2._load()
887 emptytree = treemanifest()
887 emptytree = treemanifest()
888 for d, subm in self._dirs.iteritems():
888 for d, subm in self._dirs.iteritems():
889 subp1 = m1._dirs.get(d, emptytree)._node
889 subp1 = m1._dirs.get(d, emptytree)._node
890 subp2 = m2._dirs.get(d, emptytree)._node
890 subp2 = m2._dirs.get(d, emptytree)._node
891 if subp1 == revlog.nullid:
891 if subp1 == revlog.nullid:
892 subp1, subp2 = subp2, subp1
892 subp1, subp2 = subp2, subp1
893 writesubtree(subm, subp1, subp2)
893 writesubtree(subm, subp1, subp2)
894
894
895 class manifestrevlog(revlog.revlog):
895 class manifestrevlog(revlog.revlog):
896 '''A revlog that stores manifest texts. This is responsible for caching the
896 '''A revlog that stores manifest texts. This is responsible for caching the
897 full-text manifest contents.
897 full-text manifest contents.
898 '''
898 '''
899 def __init__(self, opener, dir='', dirlogcache=None):
899 def __init__(self, opener, dir='', dirlogcache=None):
900 # During normal operations, we expect to deal with not more than four
900 # During normal operations, we expect to deal with not more than four
901 # revs at a time (such as during commit --amend). When rebasing large
901 # revs at a time (such as during commit --amend). When rebasing large
902 # stacks of commits, the number can go up, hence the config knob below.
902 # stacks of commits, the number can go up, hence the config knob below.
903 cachesize = 4
903 cachesize = 4
904 usetreemanifest = False
904 usetreemanifest = False
905 usemanifestv2 = False
905 usemanifestv2 = False
906 opts = getattr(opener, 'options', None)
906 opts = getattr(opener, 'options', None)
907 if opts is not None:
907 if opts is not None:
908 cachesize = opts.get('manifestcachesize', cachesize)
908 cachesize = opts.get('manifestcachesize', cachesize)
909 usetreemanifest = opts.get('treemanifest', usetreemanifest)
909 usetreemanifest = opts.get('treemanifest', usetreemanifest)
910 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
910 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
911
911
912 self._treeondisk = usetreemanifest
912 self._treeondisk = usetreemanifest
913 self._usemanifestv2 = usemanifestv2
913 self._usemanifestv2 = usemanifestv2
914
914
915 self._fulltextcache = util.lrucachedict(cachesize)
915 self._fulltextcache = util.lrucachedict(cachesize)
916
916
917 indexfile = "00manifest.i"
917 indexfile = "00manifest.i"
918 if dir:
918 if dir:
919 assert self._treeondisk, 'opts is %r' % opts
919 assert self._treeondisk, 'opts is %r' % opts
920 if not dir.endswith('/'):
920 if not dir.endswith('/'):
921 dir = dir + '/'
921 dir = dir + '/'
922 indexfile = "meta/" + dir + "00manifest.i"
922 indexfile = "meta/" + dir + "00manifest.i"
923 self._dir = dir
923 self._dir = dir
924 # The dirlogcache is kept on the root manifest log
924 # The dirlogcache is kept on the root manifest log
925 if dir:
925 if dir:
926 self._dirlogcache = dirlogcache
926 self._dirlogcache = dirlogcache
927 else:
927 else:
928 self._dirlogcache = {'': self}
928 self._dirlogcache = {'': self}
929
929
930 super(manifestrevlog, self).__init__(opener, indexfile)
930 super(manifestrevlog, self).__init__(opener, indexfile)
931
931
932 @property
932 @property
933 def fulltextcache(self):
933 def fulltextcache(self):
934 return self._fulltextcache
934 return self._fulltextcache
935
935
936 def clearcaches(self):
936 def clearcaches(self):
937 super(manifestrevlog, self).clearcaches()
937 super(manifestrevlog, self).clearcaches()
938 self._fulltextcache.clear()
938 self._fulltextcache.clear()
939 self._dirlogcache = {'': self}
939 self._dirlogcache = {'': self}
940
940
941 def dirlog(self, dir):
941 def dirlog(self, dir):
942 if dir:
942 if dir:
943 assert self._treeondisk
943 assert self._treeondisk
944 if dir not in self._dirlogcache:
944 if dir not in self._dirlogcache:
945 self._dirlogcache[dir] = manifestrevlog(self.opener, dir,
945 self._dirlogcache[dir] = manifestrevlog(self.opener, dir,
946 self._dirlogcache)
946 self._dirlogcache)
947 return self._dirlogcache[dir]
947 return self._dirlogcache[dir]
948
948
949 def add(self, m, transaction, link, p1, p2, added, removed):
949 def add(self, m, transaction, link, p1, p2, added, removed):
950 if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta')
950 if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta')
951 and not self._usemanifestv2):
951 and not self._usemanifestv2):
952 # If our first parent is in the manifest cache, we can
952 # If our first parent is in the manifest cache, we can
953 # compute a delta here using properties we know about the
953 # compute a delta here using properties we know about the
954 # manifest up-front, which may save time later for the
954 # manifest up-front, which may save time later for the
955 # revlog layer.
955 # revlog layer.
956
956
957 _checkforbidden(added)
957 _checkforbidden(added)
958 # combine the changed lists into one sorted iterator
958 # combine the changed lists into one sorted iterator
959 work = heapq.merge([(x, False) for x in added],
959 work = heapq.merge([(x, False) for x in added],
960 [(x, True) for x in removed])
960 [(x, True) for x in removed])
961
961
962 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
962 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
963 cachedelta = self.rev(p1), deltatext
963 cachedelta = self.rev(p1), deltatext
964 text = util.buffer(arraytext)
964 text = util.buffer(arraytext)
965 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
965 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
966 else:
966 else:
967 # The first parent manifest isn't already loaded, so we'll
967 # The first parent manifest isn't already loaded, so we'll
968 # just encode a fulltext of the manifest and pass that
968 # just encode a fulltext of the manifest and pass that
969 # through to the revlog layer, and let it handle the delta
969 # through to the revlog layer, and let it handle the delta
970 # process.
970 # process.
971 if self._treeondisk:
971 if self._treeondisk:
972 m1 = self.read(p1)
972 m1 = self.read(p1)
973 m2 = self.read(p2)
973 m2 = self.read(p2)
974 n = self._addtree(m, transaction, link, m1, m2)
974 n = self._addtree(m, transaction, link, m1, m2)
975 arraytext = None
975 arraytext = None
976 else:
976 else:
977 text = m.text(self._usemanifestv2)
977 text = m.text(self._usemanifestv2)
978 n = self.addrevision(text, transaction, link, p1, p2)
978 n = self.addrevision(text, transaction, link, p1, p2)
979 arraytext = array.array('c', text)
979 arraytext = array.array('c', text)
980
980
981 self.fulltextcache[n] = arraytext
981 self.fulltextcache[n] = arraytext
982
982
983 return n
983 return n
984
984
985 def _addtree(self, m, transaction, link, m1, m2):
985 def _addtree(self, m, transaction, link, m1, m2):
986 # If the manifest is unchanged compared to one parent,
986 # If the manifest is unchanged compared to one parent,
987 # don't write a new revision
987 # don't write a new revision
988 if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
988 if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
989 return m.node()
989 return m.node()
990 def writesubtree(subm, subp1, subp2):
990 def writesubtree(subm, subp1, subp2):
991 sublog = self.dirlog(subm.dir())
991 sublog = self.dirlog(subm.dir())
992 sublog.add(subm, transaction, link, subp1, subp2, None, None)
992 sublog.add(subm, transaction, link, subp1, subp2, None, None)
993 m.writesubtrees(m1, m2, writesubtree)
993 m.writesubtrees(m1, m2, writesubtree)
994 text = m.dirtext(self._usemanifestv2)
994 text = m.dirtext(self._usemanifestv2)
995 # Double-check whether contents are unchanged to one parent
995 # Double-check whether contents are unchanged to one parent
996 if text == m1.dirtext(self._usemanifestv2):
996 if text == m1.dirtext(self._usemanifestv2):
997 n = m1.node()
997 n = m1.node()
998 elif text == m2.dirtext(self._usemanifestv2):
998 elif text == m2.dirtext(self._usemanifestv2):
999 n = m2.node()
999 n = m2.node()
1000 else:
1000 else:
1001 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1001 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1002 # Save nodeid so parent manifest can calculate its nodeid
1002 # Save nodeid so parent manifest can calculate its nodeid
1003 m.setnode(n)
1003 m.setnode(n)
1004 return n
1004 return n
1005
1005
1006 class manifestlog(object):
1006 class manifestlog(object):
1007 """A collection class representing the collection of manifest snapshots
1007 """A collection class representing the collection of manifest snapshots
1008 referenced by commits in the repository.
1008 referenced by commits in the repository.
1009
1009
1010 In this situation, 'manifest' refers to the abstract concept of a snapshot
1010 In this situation, 'manifest' refers to the abstract concept of a snapshot
1011 of the list of files in the given commit. Consumers of the output of this
1011 of the list of files in the given commit. Consumers of the output of this
1012 class do not care about the implementation details of the actual manifests
1012 class do not care about the implementation details of the actual manifests
1013 they receive (i.e. tree or flat or lazily loaded, etc)."""
1013 they receive (i.e. tree or flat or lazily loaded, etc)."""
1014 def __init__(self, opener, repo):
1014 def __init__(self, opener, repo):
1015 self._repo = repo
1015 self._repo = repo
1016
1016
1017 usetreemanifest = False
1017 usetreemanifest = False
1018
1018
1019 opts = getattr(opener, 'options', None)
1019 opts = getattr(opener, 'options', None)
1020 if opts is not None:
1020 if opts is not None:
1021 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1021 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1022 self._treeinmem = usetreemanifest
1022 self._treeinmem = usetreemanifest
1023
1023
1024 # We'll separate this into it's own cache once oldmanifest is no longer
1024 # We'll separate this into it's own cache once oldmanifest is no longer
1025 # used
1025 # used
1026 self._mancache = repo.manifest._mancache
1026 self._mancache = repo.manifest._mancache
1027
1027
1028 @property
1028 @property
1029 def _revlog(self):
1029 def _revlog(self):
1030 return self._repo.manifest
1030 return self._repo.manifest
1031
1031
1032 def __getitem__(self, node):
1032 def __getitem__(self, node):
1033 """Retrieves the manifest instance for the given node. Throws a KeyError
1033 """Retrieves the manifest instance for the given node. Throws a KeyError
1034 if not found.
1034 if not found.
1035 """
1035 """
1036 if node in self._mancache:
1036 if node in self._mancache:
1037 cachemf = self._mancache[node]
1037 cachemf = self._mancache[node]
1038 # The old manifest may put non-ctx manifests in the cache, so skip
1038 # The old manifest may put non-ctx manifests in the cache, so skip
1039 # those since they don't implement the full api.
1039 # those since they don't implement the full api.
1040 if (isinstance(cachemf, manifestctx) or
1040 if (isinstance(cachemf, manifestctx) or
1041 isinstance(cachemf, treemanifestctx)):
1041 isinstance(cachemf, treemanifestctx)):
1042 return cachemf
1042 return cachemf
1043
1043
1044 if self._treeinmem:
1044 if self._treeinmem:
1045 m = treemanifestctx(self._revlog, '', node)
1045 m = treemanifestctx(self._revlog, '', node)
1046 else:
1046 else:
1047 m = manifestctx(self._revlog, node)
1047 m = manifestctx(self._revlog, node)
1048 if node != revlog.nullid:
1048 if node != revlog.nullid:
1049 self._mancache[node] = m
1049 self._mancache[node] = m
1050 return m
1050 return m
1051
1051
1052 def add(self, m, transaction, link, p1, p2, added, removed):
1053 return self._revlog.add(m, transaction, link, p1, p2, added, removed)
1054
1052 class manifestctx(object):
1055 class manifestctx(object):
1053 """A class representing a single revision of a manifest, including its
1056 """A class representing a single revision of a manifest, including its
1054 contents, its parent revs, and its linkrev.
1057 contents, its parent revs, and its linkrev.
1055 """
1058 """
1056 def __init__(self, revlog, node):
1059 def __init__(self, revlog, node):
1057 self._revlog = revlog
1060 self._revlog = revlog
1058 self._data = None
1061 self._data = None
1059
1062
1060 self._node = node
1063 self._node = node
1061
1064
1062 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1065 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1063 # but let's add it later when something needs it and we can load it
1066 # but let's add it later when something needs it and we can load it
1064 # lazily.
1067 # lazily.
1065 #self.p1, self.p2 = revlog.parents(node)
1068 #self.p1, self.p2 = revlog.parents(node)
1066 #rev = revlog.rev(node)
1069 #rev = revlog.rev(node)
1067 #self.linkrev = revlog.linkrev(rev)
1070 #self.linkrev = revlog.linkrev(rev)
1068
1071
1069 def node(self):
1072 def node(self):
1070 return self._node
1073 return self._node
1071
1074
1072 def read(self):
1075 def read(self):
1073 if not self._data:
1076 if not self._data:
1074 if self._node == revlog.nullid:
1077 if self._node == revlog.nullid:
1075 self._data = manifestdict()
1078 self._data = manifestdict()
1076 else:
1079 else:
1077 text = self._revlog.revision(self._node)
1080 text = self._revlog.revision(self._node)
1078 arraytext = array.array('c', text)
1081 arraytext = array.array('c', text)
1079 self._revlog._fulltextcache[self._node] = arraytext
1082 self._revlog._fulltextcache[self._node] = arraytext
1080 self._data = manifestdict(text)
1083 self._data = manifestdict(text)
1081 return self._data
1084 return self._data
1082
1085
1083 def readfast(self):
1086 def readfast(self):
1084 rl = self._revlog
1087 rl = self._revlog
1085 r = rl.rev(self._node)
1088 r = rl.rev(self._node)
1086 deltaparent = rl.deltaparent(r)
1089 deltaparent = rl.deltaparent(r)
1087 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1090 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1088 return self.readdelta()
1091 return self.readdelta()
1089 return self.read()
1092 return self.read()
1090
1093
1091 def readdelta(self):
1094 def readdelta(self):
1092 revlog = self._revlog
1095 revlog = self._revlog
1093 if revlog._usemanifestv2:
1096 if revlog._usemanifestv2:
1094 # Need to perform a slow delta
1097 # Need to perform a slow delta
1095 r0 = revlog.deltaparent(revlog.rev(self._node))
1098 r0 = revlog.deltaparent(revlog.rev(self._node))
1096 m0 = manifestctx(revlog, revlog.node(r0)).read()
1099 m0 = manifestctx(revlog, revlog.node(r0)).read()
1097 m1 = self.read()
1100 m1 = self.read()
1098 md = manifestdict()
1101 md = manifestdict()
1099 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1102 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1100 if n1:
1103 if n1:
1101 md[f] = n1
1104 md[f] = n1
1102 if fl1:
1105 if fl1:
1103 md.setflag(f, fl1)
1106 md.setflag(f, fl1)
1104 return md
1107 return md
1105
1108
1106 r = revlog.rev(self._node)
1109 r = revlog.rev(self._node)
1107 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1110 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1108 return manifestdict(d)
1111 return manifestdict(d)
1109
1112
1110 class treemanifestctx(object):
1113 class treemanifestctx(object):
1111 def __init__(self, revlog, dir, node):
1114 def __init__(self, revlog, dir, node):
1112 revlog = revlog.dirlog(dir)
1115 revlog = revlog.dirlog(dir)
1113 self._revlog = revlog
1116 self._revlog = revlog
1114 self._dir = dir
1117 self._dir = dir
1115 self._data = None
1118 self._data = None
1116
1119
1117 self._node = node
1120 self._node = node
1118
1121
1119 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1122 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1120 # we can instantiate treemanifestctx objects for directories we don't
1123 # we can instantiate treemanifestctx objects for directories we don't
1121 # have on disk.
1124 # have on disk.
1122 #self.p1, self.p2 = revlog.parents(node)
1125 #self.p1, self.p2 = revlog.parents(node)
1123 #rev = revlog.rev(node)
1126 #rev = revlog.rev(node)
1124 #self.linkrev = revlog.linkrev(rev)
1127 #self.linkrev = revlog.linkrev(rev)
1125
1128
1126 def read(self):
1129 def read(self):
1127 if not self._data:
1130 if not self._data:
1128 if self._node == revlog.nullid:
1131 if self._node == revlog.nullid:
1129 self._data = treemanifest()
1132 self._data = treemanifest()
1130 elif self._revlog._treeondisk:
1133 elif self._revlog._treeondisk:
1131 m = treemanifest(dir=self._dir)
1134 m = treemanifest(dir=self._dir)
1132 def gettext():
1135 def gettext():
1133 return self._revlog.revision(self._node)
1136 return self._revlog.revision(self._node)
1134 def readsubtree(dir, subm):
1137 def readsubtree(dir, subm):
1135 return treemanifestctx(self._revlog, dir, subm).read()
1138 return treemanifestctx(self._revlog, dir, subm).read()
1136 m.read(gettext, readsubtree)
1139 m.read(gettext, readsubtree)
1137 m.setnode(self._node)
1140 m.setnode(self._node)
1138 self._data = m
1141 self._data = m
1139 else:
1142 else:
1140 text = self._revlog.revision(self._node)
1143 text = self._revlog.revision(self._node)
1141 arraytext = array.array('c', text)
1144 arraytext = array.array('c', text)
1142 self._revlog.fulltextcache[self._node] = arraytext
1145 self._revlog.fulltextcache[self._node] = arraytext
1143 self._data = treemanifest(dir=self._dir, text=text)
1146 self._data = treemanifest(dir=self._dir, text=text)
1144
1147
1145 return self._data
1148 return self._data
1146
1149
1147 def node(self):
1150 def node(self):
1148 return self._node
1151 return self._node
1149
1152
1150 def readdelta(self):
1153 def readdelta(self):
1151 # Need to perform a slow delta
1154 # Need to perform a slow delta
1152 revlog = self._revlog
1155 revlog = self._revlog
1153 r0 = revlog.deltaparent(revlog.rev(self._node))
1156 r0 = revlog.deltaparent(revlog.rev(self._node))
1154 m0 = treemanifestctx(revlog, revlog.node(r0), dir=self._dir).read()
1157 m0 = treemanifestctx(revlog, revlog.node(r0), dir=self._dir).read()
1155 m1 = self.read()
1158 m1 = self.read()
1156 md = treemanifest(dir=self._dir)
1159 md = treemanifest(dir=self._dir)
1157 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1160 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1158 if n1:
1161 if n1:
1159 md[f] = n1
1162 md[f] = n1
1160 if fl1:
1163 if fl1:
1161 md.setflag(f, fl1)
1164 md.setflag(f, fl1)
1162 return md
1165 return md
1163
1166
1164 def readfast(self):
1167 def readfast(self):
1165 rl = self._revlog
1168 rl = self._revlog
1166 r = rl.rev(self._node)
1169 r = rl.rev(self._node)
1167 deltaparent = rl.deltaparent(r)
1170 deltaparent = rl.deltaparent(r)
1168 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1171 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1169 return self.readdelta()
1172 return self.readdelta()
1170 return self.read()
1173 return self.read()
1171
1174
1172 class manifest(manifestrevlog):
1175 class manifest(manifestrevlog):
1173 def __init__(self, opener, dir='', dirlogcache=None):
1176 def __init__(self, opener, dir='', dirlogcache=None):
1174 '''The 'dir' and 'dirlogcache' arguments are for internal use by
1177 '''The 'dir' and 'dirlogcache' arguments are for internal use by
1175 manifest.manifest only. External users should create a root manifest
1178 manifest.manifest only. External users should create a root manifest
1176 log with manifest.manifest(opener) and call dirlog() on it.
1179 log with manifest.manifest(opener) and call dirlog() on it.
1177 '''
1180 '''
1178 # During normal operations, we expect to deal with not more than four
1181 # During normal operations, we expect to deal with not more than four
1179 # revs at a time (such as during commit --amend). When rebasing large
1182 # revs at a time (such as during commit --amend). When rebasing large
1180 # stacks of commits, the number can go up, hence the config knob below.
1183 # stacks of commits, the number can go up, hence the config knob below.
1181 cachesize = 4
1184 cachesize = 4
1182 usetreemanifest = False
1185 usetreemanifest = False
1183 opts = getattr(opener, 'options', None)
1186 opts = getattr(opener, 'options', None)
1184 if opts is not None:
1187 if opts is not None:
1185 cachesize = opts.get('manifestcachesize', cachesize)
1188 cachesize = opts.get('manifestcachesize', cachesize)
1186 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1189 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1187 self._mancache = util.lrucachedict(cachesize)
1190 self._mancache = util.lrucachedict(cachesize)
1188 self._treeinmem = usetreemanifest
1191 self._treeinmem = usetreemanifest
1189 super(manifest, self).__init__(opener, dir=dir, dirlogcache=dirlogcache)
1192 super(manifest, self).__init__(opener, dir=dir, dirlogcache=dirlogcache)
1190
1193
1191 def _newmanifest(self, data=''):
1194 def _newmanifest(self, data=''):
1192 if self._treeinmem:
1195 if self._treeinmem:
1193 return treemanifest(self._dir, data)
1196 return treemanifest(self._dir, data)
1194 return manifestdict(data)
1197 return manifestdict(data)
1195
1198
1196 def dirlog(self, dir):
1199 def dirlog(self, dir):
1197 """This overrides the base revlog implementation to allow construction
1200 """This overrides the base revlog implementation to allow construction
1198 'manifest' types instead of manifestrevlog types. This is only needed
1201 'manifest' types instead of manifestrevlog types. This is only needed
1199 until we migrate off the 'manifest' type."""
1202 until we migrate off the 'manifest' type."""
1200 if dir:
1203 if dir:
1201 assert self._treeondisk
1204 assert self._treeondisk
1202 if dir not in self._dirlogcache:
1205 if dir not in self._dirlogcache:
1203 self._dirlogcache[dir] = manifest(self.opener, dir,
1206 self._dirlogcache[dir] = manifest(self.opener, dir,
1204 self._dirlogcache)
1207 self._dirlogcache)
1205 return self._dirlogcache[dir]
1208 return self._dirlogcache[dir]
1206
1209
1207 def _slowreaddelta(self, node):
1210 def _slowreaddelta(self, node):
1208 r0 = self.deltaparent(self.rev(node))
1211 r0 = self.deltaparent(self.rev(node))
1209 m0 = self.read(self.node(r0))
1212 m0 = self.read(self.node(r0))
1210 m1 = self.read(node)
1213 m1 = self.read(node)
1211 md = self._newmanifest()
1214 md = self._newmanifest()
1212 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1215 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1213 if n1:
1216 if n1:
1214 md[f] = n1
1217 md[f] = n1
1215 if fl1:
1218 if fl1:
1216 md.setflag(f, fl1)
1219 md.setflag(f, fl1)
1217 return md
1220 return md
1218
1221
1219 def readdelta(self, node):
1222 def readdelta(self, node):
1220 if self._usemanifestv2 or self._treeondisk:
1223 if self._usemanifestv2 or self._treeondisk:
1221 return self._slowreaddelta(node)
1224 return self._slowreaddelta(node)
1222 r = self.rev(node)
1225 r = self.rev(node)
1223 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
1226 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
1224 return self._newmanifest(d)
1227 return self._newmanifest(d)
1225
1228
1226 def readshallowdelta(self, node):
1229 def readshallowdelta(self, node):
1227 '''For flat manifests, this is the same as readdelta(). For
1230 '''For flat manifests, this is the same as readdelta(). For
1228 treemanifests, this will read the delta for this revlog's directory,
1231 treemanifests, this will read the delta for this revlog's directory,
1229 without recursively reading subdirectory manifests. Instead, any
1232 without recursively reading subdirectory manifests. Instead, any
1230 subdirectory entry will be reported as it appears in the manifests, i.e.
1233 subdirectory entry will be reported as it appears in the manifests, i.e.
1231 the subdirectory will be reported among files and distinguished only by
1234 the subdirectory will be reported among files and distinguished only by
1232 its 't' flag.'''
1235 its 't' flag.'''
1233 if not self._treeondisk:
1236 if not self._treeondisk:
1234 return self.readdelta(node)
1237 return self.readdelta(node)
1235 if self._usemanifestv2:
1238 if self._usemanifestv2:
1236 raise error.Abort(
1239 raise error.Abort(
1237 _("readshallowdelta() not implemented for manifestv2"))
1240 _("readshallowdelta() not implemented for manifestv2"))
1238 r = self.rev(node)
1241 r = self.rev(node)
1239 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
1242 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
1240 return manifestdict(d)
1243 return manifestdict(d)
1241
1244
1242 def readshallowfast(self, node):
1245 def readshallowfast(self, node):
1243 '''like readfast(), but calls readshallowdelta() instead of readdelta()
1246 '''like readfast(), but calls readshallowdelta() instead of readdelta()
1244 '''
1247 '''
1245 r = self.rev(node)
1248 r = self.rev(node)
1246 deltaparent = self.deltaparent(r)
1249 deltaparent = self.deltaparent(r)
1247 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
1250 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
1248 return self.readshallowdelta(node)
1251 return self.readshallowdelta(node)
1249 return self.readshallow(node)
1252 return self.readshallow(node)
1250
1253
1251 def read(self, node):
1254 def read(self, node):
1252 if node == revlog.nullid:
1255 if node == revlog.nullid:
1253 return self._newmanifest() # don't upset local cache
1256 return self._newmanifest() # don't upset local cache
1254 if node in self._mancache:
1257 if node in self._mancache:
1255 cached = self._mancache[node]
1258 cached = self._mancache[node]
1256 if (isinstance(cached, manifestctx) or
1259 if (isinstance(cached, manifestctx) or
1257 isinstance(cached, treemanifestctx)):
1260 isinstance(cached, treemanifestctx)):
1258 cached = cached.read()
1261 cached = cached.read()
1259 return cached
1262 return cached
1260 if self._treeondisk:
1263 if self._treeondisk:
1261 def gettext():
1264 def gettext():
1262 return self.revision(node)
1265 return self.revision(node)
1263 def readsubtree(dir, subm):
1266 def readsubtree(dir, subm):
1264 return self.dirlog(dir).read(subm)
1267 return self.dirlog(dir).read(subm)
1265 m = self._newmanifest()
1268 m = self._newmanifest()
1266 m.read(gettext, readsubtree)
1269 m.read(gettext, readsubtree)
1267 m.setnode(node)
1270 m.setnode(node)
1268 arraytext = None
1271 arraytext = None
1269 else:
1272 else:
1270 text = self.revision(node)
1273 text = self.revision(node)
1271 m = self._newmanifest(text)
1274 m = self._newmanifest(text)
1272 arraytext = array.array('c', text)
1275 arraytext = array.array('c', text)
1273 self._mancache[node] = m
1276 self._mancache[node] = m
1274 self.fulltextcache[node] = arraytext
1277 self.fulltextcache[node] = arraytext
1275 return m
1278 return m
1276
1279
1277 def readshallow(self, node):
1280 def readshallow(self, node):
1278 '''Reads the manifest in this directory. When using flat manifests,
1281 '''Reads the manifest in this directory. When using flat manifests,
1279 this manifest will generally have files in subdirectories in it. Does
1282 this manifest will generally have files in subdirectories in it. Does
1280 not cache the manifest as the callers generally do not read the same
1283 not cache the manifest as the callers generally do not read the same
1281 version twice.'''
1284 version twice.'''
1282 return manifestdict(self.revision(node))
1285 return manifestdict(self.revision(node))
1283
1286
1284 def find(self, node, f):
1287 def find(self, node, f):
1285 '''look up entry for a single file efficiently.
1288 '''look up entry for a single file efficiently.
1286 return (node, flags) pair if found, (None, None) if not.'''
1289 return (node, flags) pair if found, (None, None) if not.'''
1287 m = self.read(node)
1290 m = self.read(node)
1288 try:
1291 try:
1289 return m.find(f)
1292 return m.find(f)
1290 except KeyError:
1293 except KeyError:
1291 return None, None
1294 return None, None
1292
1295
1293 def clearcaches(self):
1296 def clearcaches(self):
1294 super(manifest, self).clearcaches()
1297 super(manifest, self).clearcaches()
1295 self._mancache.clear()
1298 self._mancache.clear()
General Comments 0
You need to be logged in to leave comments. Login now