##// END OF EJS Templates
manifestv2: set requires at repo creation time...
Martin von Zweigbergk -
r24571:919f8ce0 default
parent child Browse files
Show More
@@ -0,0 +1,31 b''
1 Check that entry is added to .hg/requires
2
3 $ hg --config experimental.manifestv2=True init repo
4 $ cd repo
5 $ grep manifestv2 .hg/requires
6 manifestv2
7
8 Set up simple repo
9
10 $ echo a > file1
11 $ echo b > file2
12 $ echo c > file3
13 $ hg ci -Aqm 'initial'
14 $ echo d > file2
15 $ hg ci -m 'modify file2'
16
17 Check that 'hg verify', which uses manifest.readdelta(), works
18
19 $ hg verify
20 checking changesets
21 checking manifests
22 crosschecking files in changesets and manifests
23 checking files
24 3 files, 2 changesets, 4 total revisions
25
26 TODO: Check that manifest revlog is smaller than for v1
27
28 $ hg debugindex -m
29 rev offset length base linkrev nodeid p1 p2
30 0 0 106 0 0 f6279f9f8b31 000000000000 000000000000
31 1 106 59 0 1 cd20459b75e6 f6279f9f8b31 000000000000
@@ -1,1926 +1,1925 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 format='HG10', **kwargs):
110 format='HG10', **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.unbundle20(self.ui, cg)
117 cg = bundle2.unbundle20(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 cg = exchange.readbundle(self.ui, cg, None)
128 cg = exchange.readbundle(self.ui, cg, None)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 if util.safehasattr(ret, 'getchunks'):
130 if util.safehasattr(ret, 'getchunks'):
131 # This is a bundle20 object, turn it into an unbundler.
131 # This is a bundle20 object, turn it into an unbundler.
132 # This little dance should be dropped eventually when the API
132 # This little dance should be dropped eventually when the API
133 # is finally improved.
133 # is finally improved.
134 stream = util.chunkbuffer(ret.getchunks())
134 stream = util.chunkbuffer(ret.getchunks())
135 ret = bundle2.unbundle20(self.ui, stream)
135 ret = bundle2.unbundle20(self.ui, stream)
136 return ret
136 return ret
137 except error.PushRaced, exc:
137 except error.PushRaced, exc:
138 raise error.ResponseError(_('push failed:'), str(exc))
138 raise error.ResponseError(_('push failed:'), str(exc))
139
139
140 def lock(self):
140 def lock(self):
141 return self._repo.lock()
141 return self._repo.lock()
142
142
143 def addchangegroup(self, cg, source, url):
143 def addchangegroup(self, cg, source, url):
144 return changegroup.addchangegroup(self._repo, cg, source, url)
144 return changegroup.addchangegroup(self._repo, cg, source, url)
145
145
146 def pushkey(self, namespace, key, old, new):
146 def pushkey(self, namespace, key, old, new):
147 return self._repo.pushkey(namespace, key, old, new)
147 return self._repo.pushkey(namespace, key, old, new)
148
148
149 def listkeys(self, namespace):
149 def listkeys(self, namespace):
150 return self._repo.listkeys(namespace)
150 return self._repo.listkeys(namespace)
151
151
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 '''used to test argument passing over the wire'''
153 '''used to test argument passing over the wire'''
154 return "%s %s %s %s %s" % (one, two, three, four, five)
154 return "%s %s %s %s %s" % (one, two, three, four, five)
155
155
156 class locallegacypeer(localpeer):
156 class locallegacypeer(localpeer):
157 '''peer extension which implements legacy methods too; used for tests with
157 '''peer extension which implements legacy methods too; used for tests with
158 restricted capabilities'''
158 restricted capabilities'''
159
159
160 def __init__(self, repo):
160 def __init__(self, repo):
161 localpeer.__init__(self, repo, caps=legacycaps)
161 localpeer.__init__(self, repo, caps=legacycaps)
162
162
163 def branches(self, nodes):
163 def branches(self, nodes):
164 return self._repo.branches(nodes)
164 return self._repo.branches(nodes)
165
165
166 def between(self, pairs):
166 def between(self, pairs):
167 return self._repo.between(pairs)
167 return self._repo.between(pairs)
168
168
169 def changegroup(self, basenodes, source):
169 def changegroup(self, basenodes, source):
170 return changegroup.changegroup(self._repo, basenodes, source)
170 return changegroup.changegroup(self._repo, basenodes, source)
171
171
172 def changegroupsubset(self, bases, heads, source):
172 def changegroupsubset(self, bases, heads, source):
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174
174
175 class localrepository(object):
175 class localrepository(object):
176
176
177 supportedformats = set(('revlogv1', 'generaldelta'))
177 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 'dotencode'))
179 'dotencode'))
180 openerreqs = set(('revlogv1', 'generaldelta'))
180 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
181 requirements = ['revlogv1']
181 requirements = ['revlogv1']
182 filtername = None
182 filtername = None
183
183
184 # a list of (ui, featureset) functions.
184 # a list of (ui, featureset) functions.
185 # only functions defined in module of enabled extensions are invoked
185 # only functions defined in module of enabled extensions are invoked
186 featuresetupfuncs = set()
186 featuresetupfuncs = set()
187
187
188 def _baserequirements(self, create):
188 def _baserequirements(self, create):
189 return self.requirements[:]
189 return self.requirements[:]
190
190
191 def __init__(self, baseui, path=None, create=False):
191 def __init__(self, baseui, path=None, create=False):
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 self.wopener = self.wvfs
193 self.wopener = self.wvfs
194 self.root = self.wvfs.base
194 self.root = self.wvfs.base
195 self.path = self.wvfs.join(".hg")
195 self.path = self.wvfs.join(".hg")
196 self.origroot = path
196 self.origroot = path
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 self.vfs = scmutil.vfs(self.path)
198 self.vfs = scmutil.vfs(self.path)
199 self.opener = self.vfs
199 self.opener = self.vfs
200 self.baseui = baseui
200 self.baseui = baseui
201 self.ui = baseui.copy()
201 self.ui = baseui.copy()
202 self.ui.copy = baseui.copy # prevent copying repo configuration
202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 # A list of callback to shape the phase if no data were found.
203 # A list of callback to shape the phase if no data were found.
204 # Callback are in the form: func(repo, roots) --> processed root.
204 # Callback are in the form: func(repo, roots) --> processed root.
205 # This list it to be filled by extension during repo setup
205 # This list it to be filled by extension during repo setup
206 self._phasedefaults = []
206 self._phasedefaults = []
207 try:
207 try:
208 self.ui.readconfig(self.join("hgrc"), self.root)
208 self.ui.readconfig(self.join("hgrc"), self.root)
209 extensions.loadall(self.ui)
209 extensions.loadall(self.ui)
210 except IOError:
210 except IOError:
211 pass
211 pass
212
212
213 if self.featuresetupfuncs:
213 if self.featuresetupfuncs:
214 self.supported = set(self._basesupported) # use private copy
214 self.supported = set(self._basesupported) # use private copy
215 extmods = set(m.__name__ for n, m
215 extmods = set(m.__name__ for n, m
216 in extensions.extensions(self.ui))
216 in extensions.extensions(self.ui))
217 for setupfunc in self.featuresetupfuncs:
217 for setupfunc in self.featuresetupfuncs:
218 if setupfunc.__module__ in extmods:
218 if setupfunc.__module__ in extmods:
219 setupfunc(self.ui, self.supported)
219 setupfunc(self.ui, self.supported)
220 else:
220 else:
221 self.supported = self._basesupported
221 self.supported = self._basesupported
222
222
223 if not self.vfs.isdir():
223 if not self.vfs.isdir():
224 if create:
224 if create:
225 if not self.wvfs.exists():
225 if not self.wvfs.exists():
226 self.wvfs.makedirs()
226 self.wvfs.makedirs()
227 self.vfs.makedir(notindexed=True)
227 self.vfs.makedir(notindexed=True)
228 requirements = self._baserequirements(create)
228 requirements = self._baserequirements(create)
229 if self.ui.configbool('format', 'usestore', True):
229 if self.ui.configbool('format', 'usestore', True):
230 self.vfs.mkdir("store")
230 self.vfs.mkdir("store")
231 requirements.append("store")
231 requirements.append("store")
232 if self.ui.configbool('format', 'usefncache', True):
232 if self.ui.configbool('format', 'usefncache', True):
233 requirements.append("fncache")
233 requirements.append("fncache")
234 if self.ui.configbool('format', 'dotencode', True):
234 if self.ui.configbool('format', 'dotencode', True):
235 requirements.append('dotencode')
235 requirements.append('dotencode')
236 # create an invalid changelog
236 # create an invalid changelog
237 self.vfs.append(
237 self.vfs.append(
238 "00changelog.i",
238 "00changelog.i",
239 '\0\0\0\2' # represents revlogv2
239 '\0\0\0\2' # represents revlogv2
240 ' dummy changelog to prevent using the old repo layout'
240 ' dummy changelog to prevent using the old repo layout'
241 )
241 )
242 if self.ui.configbool('format', 'generaldelta', False):
242 if self.ui.configbool('format', 'generaldelta', False):
243 requirements.append("generaldelta")
243 requirements.append("generaldelta")
244 if self.ui.configbool('experimental', 'manifestv2', False):
245 requirements.append("manifestv2")
244 requirements = set(requirements)
246 requirements = set(requirements)
245 else:
247 else:
246 raise error.RepoError(_("repository %s not found") % path)
248 raise error.RepoError(_("repository %s not found") % path)
247 elif create:
249 elif create:
248 raise error.RepoError(_("repository %s already exists") % path)
250 raise error.RepoError(_("repository %s already exists") % path)
249 else:
251 else:
250 try:
252 try:
251 requirements = scmutil.readrequires(self.vfs, self.supported)
253 requirements = scmutil.readrequires(self.vfs, self.supported)
252 except IOError, inst:
254 except IOError, inst:
253 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
254 raise
256 raise
255 requirements = set()
257 requirements = set()
256
258
257 self.sharedpath = self.path
259 self.sharedpath = self.path
258 try:
260 try:
259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
261 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 realpath=True)
262 realpath=True)
261 s = vfs.base
263 s = vfs.base
262 if not vfs.exists():
264 if not vfs.exists():
263 raise error.RepoError(
265 raise error.RepoError(
264 _('.hg/sharedpath points to nonexistent directory %s') % s)
266 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 self.sharedpath = s
267 self.sharedpath = s
266 except IOError, inst:
268 except IOError, inst:
267 if inst.errno != errno.ENOENT:
269 if inst.errno != errno.ENOENT:
268 raise
270 raise
269
271
270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
272 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 self.spath = self.store.path
273 self.spath = self.store.path
272 self.svfs = self.store.vfs
274 self.svfs = self.store.vfs
273 self.sopener = self.svfs
275 self.sopener = self.svfs
274 self.sjoin = self.store.join
276 self.sjoin = self.store.join
275 self.vfs.createmode = self.store.createmode
277 self.vfs.createmode = self.store.createmode
276 self._applyrequirements(requirements)
278 self._applyrequirements(requirements)
277 if create:
279 if create:
278 self._writerequirements()
280 self._writerequirements()
279
281
280
282
281 self._branchcaches = {}
283 self._branchcaches = {}
282 self._revbranchcache = None
284 self._revbranchcache = None
283 self.filterpats = {}
285 self.filterpats = {}
284 self._datafilters = {}
286 self._datafilters = {}
285 self._transref = self._lockref = self._wlockref = None
287 self._transref = self._lockref = self._wlockref = None
286
288
287 # A cache for various files under .hg/ that tracks file changes,
289 # A cache for various files under .hg/ that tracks file changes,
288 # (used by the filecache decorator)
290 # (used by the filecache decorator)
289 #
291 #
290 # Maps a property name to its util.filecacheentry
292 # Maps a property name to its util.filecacheentry
291 self._filecache = {}
293 self._filecache = {}
292
294
293 # hold sets of revision to be filtered
295 # hold sets of revision to be filtered
294 # should be cleared when something might have changed the filter value:
296 # should be cleared when something might have changed the filter value:
295 # - new changesets,
297 # - new changesets,
296 # - phase change,
298 # - phase change,
297 # - new obsolescence marker,
299 # - new obsolescence marker,
298 # - working directory parent change,
300 # - working directory parent change,
299 # - bookmark changes
301 # - bookmark changes
300 self.filteredrevcache = {}
302 self.filteredrevcache = {}
301
303
302 # generic mapping between names and nodes
304 # generic mapping between names and nodes
303 self.names = namespaces.namespaces()
305 self.names = namespaces.namespaces()
304
306
305 def close(self):
307 def close(self):
306 self._writecaches()
308 self._writecaches()
307
309
308 def _writecaches(self):
310 def _writecaches(self):
309 if self._revbranchcache:
311 if self._revbranchcache:
310 self._revbranchcache.write()
312 self._revbranchcache.write()
311
313
312 def _restrictcapabilities(self, caps):
314 def _restrictcapabilities(self, caps):
313 # bundle2 is not ready for prime time, drop it unless explicitly
315 # bundle2 is not ready for prime time, drop it unless explicitly
314 # required by the tests (or some brave tester)
316 # required by the tests (or some brave tester)
315 if self.ui.configbool('experimental', 'bundle2-exp', False):
317 if self.ui.configbool('experimental', 'bundle2-exp', False):
316 caps = set(caps)
318 caps = set(caps)
317 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
319 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
318 caps.add('bundle2-exp=' + urllib.quote(capsblob))
320 caps.add('bundle2-exp=' + urllib.quote(capsblob))
319 return caps
321 return caps
320
322
321 def _applyrequirements(self, requirements):
323 def _applyrequirements(self, requirements):
322 self.requirements = requirements
324 self.requirements = requirements
323 self.svfs.options = dict((r, 1) for r in requirements
325 self.svfs.options = dict((r, 1) for r in requirements
324 if r in self.openerreqs)
326 if r in self.openerreqs)
325 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
327 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
326 if chunkcachesize is not None:
328 if chunkcachesize is not None:
327 self.svfs.options['chunkcachesize'] = chunkcachesize
329 self.svfs.options['chunkcachesize'] = chunkcachesize
328 maxchainlen = self.ui.configint('format', 'maxchainlen')
330 maxchainlen = self.ui.configint('format', 'maxchainlen')
329 if maxchainlen is not None:
331 if maxchainlen is not None:
330 self.svfs.options['maxchainlen'] = maxchainlen
332 self.svfs.options['maxchainlen'] = maxchainlen
331 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
333 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
332 if manifestcachesize is not None:
334 if manifestcachesize is not None:
333 self.svfs.options['manifestcachesize'] = manifestcachesize
335 self.svfs.options['manifestcachesize'] = manifestcachesize
334 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
336 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
335 if usetreemanifest is not None:
337 if usetreemanifest is not None:
336 self.svfs.options['usetreemanifest'] = usetreemanifest
338 self.svfs.options['usetreemanifest'] = usetreemanifest
337 usemanifestv2 = self.ui.configbool('experimental', 'manifestv2')
338 if usemanifestv2 is not None:
339 self.svfs.options['usemanifestv2'] = usemanifestv2
340
339
341 def _writerequirements(self):
340 def _writerequirements(self):
342 reqfile = self.vfs("requires", "w")
341 reqfile = self.vfs("requires", "w")
343 for r in sorted(self.requirements):
342 for r in sorted(self.requirements):
344 reqfile.write("%s\n" % r)
343 reqfile.write("%s\n" % r)
345 reqfile.close()
344 reqfile.close()
346
345
347 def _checknested(self, path):
346 def _checknested(self, path):
348 """Determine if path is a legal nested repository."""
347 """Determine if path is a legal nested repository."""
349 if not path.startswith(self.root):
348 if not path.startswith(self.root):
350 return False
349 return False
351 subpath = path[len(self.root) + 1:]
350 subpath = path[len(self.root) + 1:]
352 normsubpath = util.pconvert(subpath)
351 normsubpath = util.pconvert(subpath)
353
352
354 # XXX: Checking against the current working copy is wrong in
353 # XXX: Checking against the current working copy is wrong in
355 # the sense that it can reject things like
354 # the sense that it can reject things like
356 #
355 #
357 # $ hg cat -r 10 sub/x.txt
356 # $ hg cat -r 10 sub/x.txt
358 #
357 #
359 # if sub/ is no longer a subrepository in the working copy
358 # if sub/ is no longer a subrepository in the working copy
360 # parent revision.
359 # parent revision.
361 #
360 #
362 # However, it can of course also allow things that would have
361 # However, it can of course also allow things that would have
363 # been rejected before, such as the above cat command if sub/
362 # been rejected before, such as the above cat command if sub/
364 # is a subrepository now, but was a normal directory before.
363 # is a subrepository now, but was a normal directory before.
365 # The old path auditor would have rejected by mistake since it
364 # The old path auditor would have rejected by mistake since it
366 # panics when it sees sub/.hg/.
365 # panics when it sees sub/.hg/.
367 #
366 #
368 # All in all, checking against the working copy seems sensible
367 # All in all, checking against the working copy seems sensible
369 # since we want to prevent access to nested repositories on
368 # since we want to prevent access to nested repositories on
370 # the filesystem *now*.
369 # the filesystem *now*.
371 ctx = self[None]
370 ctx = self[None]
372 parts = util.splitpath(subpath)
371 parts = util.splitpath(subpath)
373 while parts:
372 while parts:
374 prefix = '/'.join(parts)
373 prefix = '/'.join(parts)
375 if prefix in ctx.substate:
374 if prefix in ctx.substate:
376 if prefix == normsubpath:
375 if prefix == normsubpath:
377 return True
376 return True
378 else:
377 else:
379 sub = ctx.sub(prefix)
378 sub = ctx.sub(prefix)
380 return sub.checknested(subpath[len(prefix) + 1:])
379 return sub.checknested(subpath[len(prefix) + 1:])
381 else:
380 else:
382 parts.pop()
381 parts.pop()
383 return False
382 return False
384
383
385 def peer(self):
384 def peer(self):
386 return localpeer(self) # not cached to avoid reference cycle
385 return localpeer(self) # not cached to avoid reference cycle
387
386
388 def unfiltered(self):
387 def unfiltered(self):
389 """Return unfiltered version of the repository
388 """Return unfiltered version of the repository
390
389
391 Intended to be overwritten by filtered repo."""
390 Intended to be overwritten by filtered repo."""
392 return self
391 return self
393
392
394 def filtered(self, name):
393 def filtered(self, name):
395 """Return a filtered version of a repository"""
394 """Return a filtered version of a repository"""
396 # build a new class with the mixin and the current class
395 # build a new class with the mixin and the current class
397 # (possibly subclass of the repo)
396 # (possibly subclass of the repo)
398 class proxycls(repoview.repoview, self.unfiltered().__class__):
397 class proxycls(repoview.repoview, self.unfiltered().__class__):
399 pass
398 pass
400 return proxycls(self, name)
399 return proxycls(self, name)
401
400
402 @repofilecache('bookmarks')
401 @repofilecache('bookmarks')
403 def _bookmarks(self):
402 def _bookmarks(self):
404 return bookmarks.bmstore(self)
403 return bookmarks.bmstore(self)
405
404
406 @repofilecache('bookmarks.current')
405 @repofilecache('bookmarks.current')
407 def _bookmarkcurrent(self):
406 def _bookmarkcurrent(self):
408 return bookmarks.readcurrent(self)
407 return bookmarks.readcurrent(self)
409
408
410 def bookmarkheads(self, bookmark):
409 def bookmarkheads(self, bookmark):
411 name = bookmark.split('@', 1)[0]
410 name = bookmark.split('@', 1)[0]
412 heads = []
411 heads = []
413 for mark, n in self._bookmarks.iteritems():
412 for mark, n in self._bookmarks.iteritems():
414 if mark.split('@', 1)[0] == name:
413 if mark.split('@', 1)[0] == name:
415 heads.append(n)
414 heads.append(n)
416 return heads
415 return heads
417
416
418 @storecache('phaseroots')
417 @storecache('phaseroots')
419 def _phasecache(self):
418 def _phasecache(self):
420 return phases.phasecache(self, self._phasedefaults)
419 return phases.phasecache(self, self._phasedefaults)
421
420
422 @storecache('obsstore')
421 @storecache('obsstore')
423 def obsstore(self):
422 def obsstore(self):
424 # read default format for new obsstore.
423 # read default format for new obsstore.
425 defaultformat = self.ui.configint('format', 'obsstore-version', None)
424 defaultformat = self.ui.configint('format', 'obsstore-version', None)
426 # rely on obsstore class default when possible.
425 # rely on obsstore class default when possible.
427 kwargs = {}
426 kwargs = {}
428 if defaultformat is not None:
427 if defaultformat is not None:
429 kwargs['defaultformat'] = defaultformat
428 kwargs['defaultformat'] = defaultformat
430 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
429 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
431 store = obsolete.obsstore(self.svfs, readonly=readonly,
430 store = obsolete.obsstore(self.svfs, readonly=readonly,
432 **kwargs)
431 **kwargs)
433 if store and readonly:
432 if store and readonly:
434 self.ui.warn(
433 self.ui.warn(
435 _('obsolete feature not enabled but %i markers found!\n')
434 _('obsolete feature not enabled but %i markers found!\n')
436 % len(list(store)))
435 % len(list(store)))
437 return store
436 return store
438
437
439 @storecache('00changelog.i')
438 @storecache('00changelog.i')
440 def changelog(self):
439 def changelog(self):
441 c = changelog.changelog(self.svfs)
440 c = changelog.changelog(self.svfs)
442 if 'HG_PENDING' in os.environ:
441 if 'HG_PENDING' in os.environ:
443 p = os.environ['HG_PENDING']
442 p = os.environ['HG_PENDING']
444 if p.startswith(self.root):
443 if p.startswith(self.root):
445 c.readpending('00changelog.i.a')
444 c.readpending('00changelog.i.a')
446 return c
445 return c
447
446
448 @storecache('00manifest.i')
447 @storecache('00manifest.i')
449 def manifest(self):
448 def manifest(self):
450 return manifest.manifest(self.svfs)
449 return manifest.manifest(self.svfs)
451
450
452 @repofilecache('dirstate')
451 @repofilecache('dirstate')
453 def dirstate(self):
452 def dirstate(self):
454 warned = [0]
453 warned = [0]
455 def validate(node):
454 def validate(node):
456 try:
455 try:
457 self.changelog.rev(node)
456 self.changelog.rev(node)
458 return node
457 return node
459 except error.LookupError:
458 except error.LookupError:
460 if not warned[0]:
459 if not warned[0]:
461 warned[0] = True
460 warned[0] = True
462 self.ui.warn(_("warning: ignoring unknown"
461 self.ui.warn(_("warning: ignoring unknown"
463 " working parent %s!\n") % short(node))
462 " working parent %s!\n") % short(node))
464 return nullid
463 return nullid
465
464
466 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
465 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
467
466
468 def __getitem__(self, changeid):
467 def __getitem__(self, changeid):
469 if changeid is None:
468 if changeid is None:
470 return context.workingctx(self)
469 return context.workingctx(self)
471 if isinstance(changeid, slice):
470 if isinstance(changeid, slice):
472 return [context.changectx(self, i)
471 return [context.changectx(self, i)
473 for i in xrange(*changeid.indices(len(self)))
472 for i in xrange(*changeid.indices(len(self)))
474 if i not in self.changelog.filteredrevs]
473 if i not in self.changelog.filteredrevs]
475 return context.changectx(self, changeid)
474 return context.changectx(self, changeid)
476
475
477 def __contains__(self, changeid):
476 def __contains__(self, changeid):
478 try:
477 try:
479 self[changeid]
478 self[changeid]
480 return True
479 return True
481 except error.RepoLookupError:
480 except error.RepoLookupError:
482 return False
481 return False
483
482
484 def __nonzero__(self):
483 def __nonzero__(self):
485 return True
484 return True
486
485
487 def __len__(self):
486 def __len__(self):
488 return len(self.changelog)
487 return len(self.changelog)
489
488
490 def __iter__(self):
489 def __iter__(self):
491 return iter(self.changelog)
490 return iter(self.changelog)
492
491
493 def revs(self, expr, *args):
492 def revs(self, expr, *args):
494 '''Return a list of revisions matching the given revset'''
493 '''Return a list of revisions matching the given revset'''
495 expr = revset.formatspec(expr, *args)
494 expr = revset.formatspec(expr, *args)
496 m = revset.match(None, expr)
495 m = revset.match(None, expr)
497 return m(self)
496 return m(self)
498
497
499 def set(self, expr, *args):
498 def set(self, expr, *args):
500 '''
499 '''
501 Yield a context for each matching revision, after doing arg
500 Yield a context for each matching revision, after doing arg
502 replacement via revset.formatspec
501 replacement via revset.formatspec
503 '''
502 '''
504 for r in self.revs(expr, *args):
503 for r in self.revs(expr, *args):
505 yield self[r]
504 yield self[r]
506
505
507 def url(self):
506 def url(self):
508 return 'file:' + self.root
507 return 'file:' + self.root
509
508
510 def hook(self, name, throw=False, **args):
509 def hook(self, name, throw=False, **args):
511 """Call a hook, passing this repo instance.
510 """Call a hook, passing this repo instance.
512
511
513 This a convenience method to aid invoking hooks. Extensions likely
512 This a convenience method to aid invoking hooks. Extensions likely
514 won't call this unless they have registered a custom hook or are
513 won't call this unless they have registered a custom hook or are
515 replacing code that is expected to call a hook.
514 replacing code that is expected to call a hook.
516 """
515 """
517 return hook.hook(self.ui, self, name, throw, **args)
516 return hook.hook(self.ui, self, name, throw, **args)
518
517
519 @unfilteredmethod
518 @unfilteredmethod
520 def _tag(self, names, node, message, local, user, date, extra={},
519 def _tag(self, names, node, message, local, user, date, extra={},
521 editor=False):
520 editor=False):
522 if isinstance(names, str):
521 if isinstance(names, str):
523 names = (names,)
522 names = (names,)
524
523
525 branches = self.branchmap()
524 branches = self.branchmap()
526 for name in names:
525 for name in names:
527 self.hook('pretag', throw=True, node=hex(node), tag=name,
526 self.hook('pretag', throw=True, node=hex(node), tag=name,
528 local=local)
527 local=local)
529 if name in branches:
528 if name in branches:
530 self.ui.warn(_("warning: tag %s conflicts with existing"
529 self.ui.warn(_("warning: tag %s conflicts with existing"
531 " branch name\n") % name)
530 " branch name\n") % name)
532
531
533 def writetags(fp, names, munge, prevtags):
532 def writetags(fp, names, munge, prevtags):
534 fp.seek(0, 2)
533 fp.seek(0, 2)
535 if prevtags and prevtags[-1] != '\n':
534 if prevtags and prevtags[-1] != '\n':
536 fp.write('\n')
535 fp.write('\n')
537 for name in names:
536 for name in names:
538 if munge:
537 if munge:
539 m = munge(name)
538 m = munge(name)
540 else:
539 else:
541 m = name
540 m = name
542
541
543 if (self._tagscache.tagtypes and
542 if (self._tagscache.tagtypes and
544 name in self._tagscache.tagtypes):
543 name in self._tagscache.tagtypes):
545 old = self.tags().get(name, nullid)
544 old = self.tags().get(name, nullid)
546 fp.write('%s %s\n' % (hex(old), m))
545 fp.write('%s %s\n' % (hex(old), m))
547 fp.write('%s %s\n' % (hex(node), m))
546 fp.write('%s %s\n' % (hex(node), m))
548 fp.close()
547 fp.close()
549
548
550 prevtags = ''
549 prevtags = ''
551 if local:
550 if local:
552 try:
551 try:
553 fp = self.vfs('localtags', 'r+')
552 fp = self.vfs('localtags', 'r+')
554 except IOError:
553 except IOError:
555 fp = self.vfs('localtags', 'a')
554 fp = self.vfs('localtags', 'a')
556 else:
555 else:
557 prevtags = fp.read()
556 prevtags = fp.read()
558
557
559 # local tags are stored in the current charset
558 # local tags are stored in the current charset
560 writetags(fp, names, None, prevtags)
559 writetags(fp, names, None, prevtags)
561 for name in names:
560 for name in names:
562 self.hook('tag', node=hex(node), tag=name, local=local)
561 self.hook('tag', node=hex(node), tag=name, local=local)
563 return
562 return
564
563
565 try:
564 try:
566 fp = self.wfile('.hgtags', 'rb+')
565 fp = self.wfile('.hgtags', 'rb+')
567 except IOError, e:
566 except IOError, e:
568 if e.errno != errno.ENOENT:
567 if e.errno != errno.ENOENT:
569 raise
568 raise
570 fp = self.wfile('.hgtags', 'ab')
569 fp = self.wfile('.hgtags', 'ab')
571 else:
570 else:
572 prevtags = fp.read()
571 prevtags = fp.read()
573
572
574 # committed tags are stored in UTF-8
573 # committed tags are stored in UTF-8
575 writetags(fp, names, encoding.fromlocal, prevtags)
574 writetags(fp, names, encoding.fromlocal, prevtags)
576
575
577 fp.close()
576 fp.close()
578
577
579 self.invalidatecaches()
578 self.invalidatecaches()
580
579
581 if '.hgtags' not in self.dirstate:
580 if '.hgtags' not in self.dirstate:
582 self[None].add(['.hgtags'])
581 self[None].add(['.hgtags'])
583
582
584 m = matchmod.exact(self.root, '', ['.hgtags'])
583 m = matchmod.exact(self.root, '', ['.hgtags'])
585 tagnode = self.commit(message, user, date, extra=extra, match=m,
584 tagnode = self.commit(message, user, date, extra=extra, match=m,
586 editor=editor)
585 editor=editor)
587
586
588 for name in names:
587 for name in names:
589 self.hook('tag', node=hex(node), tag=name, local=local)
588 self.hook('tag', node=hex(node), tag=name, local=local)
590
589
591 return tagnode
590 return tagnode
592
591
593 def tag(self, names, node, message, local, user, date, editor=False):
592 def tag(self, names, node, message, local, user, date, editor=False):
594 '''tag a revision with one or more symbolic names.
593 '''tag a revision with one or more symbolic names.
595
594
596 names is a list of strings or, when adding a single tag, names may be a
595 names is a list of strings or, when adding a single tag, names may be a
597 string.
596 string.
598
597
599 if local is True, the tags are stored in a per-repository file.
598 if local is True, the tags are stored in a per-repository file.
600 otherwise, they are stored in the .hgtags file, and a new
599 otherwise, they are stored in the .hgtags file, and a new
601 changeset is committed with the change.
600 changeset is committed with the change.
602
601
603 keyword arguments:
602 keyword arguments:
604
603
605 local: whether to store tags in non-version-controlled file
604 local: whether to store tags in non-version-controlled file
606 (default False)
605 (default False)
607
606
608 message: commit message to use if committing
607 message: commit message to use if committing
609
608
610 user: name of user to use if committing
609 user: name of user to use if committing
611
610
612 date: date tuple to use if committing'''
611 date: date tuple to use if committing'''
613
612
614 if not local:
613 if not local:
615 m = matchmod.exact(self.root, '', ['.hgtags'])
614 m = matchmod.exact(self.root, '', ['.hgtags'])
616 if util.any(self.status(match=m, unknown=True, ignored=True)):
615 if util.any(self.status(match=m, unknown=True, ignored=True)):
617 raise util.Abort(_('working copy of .hgtags is changed'),
616 raise util.Abort(_('working copy of .hgtags is changed'),
618 hint=_('please commit .hgtags manually'))
617 hint=_('please commit .hgtags manually'))
619
618
620 self.tags() # instantiate the cache
619 self.tags() # instantiate the cache
621 self._tag(names, node, message, local, user, date, editor=editor)
620 self._tag(names, node, message, local, user, date, editor=editor)
622
621
623 @filteredpropertycache
622 @filteredpropertycache
624 def _tagscache(self):
623 def _tagscache(self):
625 '''Returns a tagscache object that contains various tags related
624 '''Returns a tagscache object that contains various tags related
626 caches.'''
625 caches.'''
627
626
628 # This simplifies its cache management by having one decorated
627 # This simplifies its cache management by having one decorated
629 # function (this one) and the rest simply fetch things from it.
628 # function (this one) and the rest simply fetch things from it.
630 class tagscache(object):
629 class tagscache(object):
631 def __init__(self):
630 def __init__(self):
632 # These two define the set of tags for this repository. tags
631 # These two define the set of tags for this repository. tags
633 # maps tag name to node; tagtypes maps tag name to 'global' or
632 # maps tag name to node; tagtypes maps tag name to 'global' or
634 # 'local'. (Global tags are defined by .hgtags across all
633 # 'local'. (Global tags are defined by .hgtags across all
635 # heads, and local tags are defined in .hg/localtags.)
634 # heads, and local tags are defined in .hg/localtags.)
636 # They constitute the in-memory cache of tags.
635 # They constitute the in-memory cache of tags.
637 self.tags = self.tagtypes = None
636 self.tags = self.tagtypes = None
638
637
639 self.nodetagscache = self.tagslist = None
638 self.nodetagscache = self.tagslist = None
640
639
641 cache = tagscache()
640 cache = tagscache()
642 cache.tags, cache.tagtypes = self._findtags()
641 cache.tags, cache.tagtypes = self._findtags()
643
642
644 return cache
643 return cache
645
644
646 def tags(self):
645 def tags(self):
647 '''return a mapping of tag to node'''
646 '''return a mapping of tag to node'''
648 t = {}
647 t = {}
649 if self.changelog.filteredrevs:
648 if self.changelog.filteredrevs:
650 tags, tt = self._findtags()
649 tags, tt = self._findtags()
651 else:
650 else:
652 tags = self._tagscache.tags
651 tags = self._tagscache.tags
653 for k, v in tags.iteritems():
652 for k, v in tags.iteritems():
654 try:
653 try:
655 # ignore tags to unknown nodes
654 # ignore tags to unknown nodes
656 self.changelog.rev(v)
655 self.changelog.rev(v)
657 t[k] = v
656 t[k] = v
658 except (error.LookupError, ValueError):
657 except (error.LookupError, ValueError):
659 pass
658 pass
660 return t
659 return t
661
660
662 def _findtags(self):
661 def _findtags(self):
663 '''Do the hard work of finding tags. Return a pair of dicts
662 '''Do the hard work of finding tags. Return a pair of dicts
664 (tags, tagtypes) where tags maps tag name to node, and tagtypes
663 (tags, tagtypes) where tags maps tag name to node, and tagtypes
665 maps tag name to a string like \'global\' or \'local\'.
664 maps tag name to a string like \'global\' or \'local\'.
666 Subclasses or extensions are free to add their own tags, but
665 Subclasses or extensions are free to add their own tags, but
667 should be aware that the returned dicts will be retained for the
666 should be aware that the returned dicts will be retained for the
668 duration of the localrepo object.'''
667 duration of the localrepo object.'''
669
668
670 # XXX what tagtype should subclasses/extensions use? Currently
669 # XXX what tagtype should subclasses/extensions use? Currently
671 # mq and bookmarks add tags, but do not set the tagtype at all.
670 # mq and bookmarks add tags, but do not set the tagtype at all.
672 # Should each extension invent its own tag type? Should there
671 # Should each extension invent its own tag type? Should there
673 # be one tagtype for all such "virtual" tags? Or is the status
672 # be one tagtype for all such "virtual" tags? Or is the status
674 # quo fine?
673 # quo fine?
675
674
676 alltags = {} # map tag name to (node, hist)
675 alltags = {} # map tag name to (node, hist)
677 tagtypes = {}
676 tagtypes = {}
678
677
679 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
678 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
680 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
679 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
681
680
682 # Build the return dicts. Have to re-encode tag names because
681 # Build the return dicts. Have to re-encode tag names because
683 # the tags module always uses UTF-8 (in order not to lose info
682 # the tags module always uses UTF-8 (in order not to lose info
684 # writing to the cache), but the rest of Mercurial wants them in
683 # writing to the cache), but the rest of Mercurial wants them in
685 # local encoding.
684 # local encoding.
686 tags = {}
685 tags = {}
687 for (name, (node, hist)) in alltags.iteritems():
686 for (name, (node, hist)) in alltags.iteritems():
688 if node != nullid:
687 if node != nullid:
689 tags[encoding.tolocal(name)] = node
688 tags[encoding.tolocal(name)] = node
690 tags['tip'] = self.changelog.tip()
689 tags['tip'] = self.changelog.tip()
691 tagtypes = dict([(encoding.tolocal(name), value)
690 tagtypes = dict([(encoding.tolocal(name), value)
692 for (name, value) in tagtypes.iteritems()])
691 for (name, value) in tagtypes.iteritems()])
693 return (tags, tagtypes)
692 return (tags, tagtypes)
694
693
695 def tagtype(self, tagname):
694 def tagtype(self, tagname):
696 '''
695 '''
697 return the type of the given tag. result can be:
696 return the type of the given tag. result can be:
698
697
699 'local' : a local tag
698 'local' : a local tag
700 'global' : a global tag
699 'global' : a global tag
701 None : tag does not exist
700 None : tag does not exist
702 '''
701 '''
703
702
704 return self._tagscache.tagtypes.get(tagname)
703 return self._tagscache.tagtypes.get(tagname)
705
704
706 def tagslist(self):
705 def tagslist(self):
707 '''return a list of tags ordered by revision'''
706 '''return a list of tags ordered by revision'''
708 if not self._tagscache.tagslist:
707 if not self._tagscache.tagslist:
709 l = []
708 l = []
710 for t, n in self.tags().iteritems():
709 for t, n in self.tags().iteritems():
711 l.append((self.changelog.rev(n), t, n))
710 l.append((self.changelog.rev(n), t, n))
712 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
711 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
713
712
714 return self._tagscache.tagslist
713 return self._tagscache.tagslist
715
714
716 def nodetags(self, node):
715 def nodetags(self, node):
717 '''return the tags associated with a node'''
716 '''return the tags associated with a node'''
718 if not self._tagscache.nodetagscache:
717 if not self._tagscache.nodetagscache:
719 nodetagscache = {}
718 nodetagscache = {}
720 for t, n in self._tagscache.tags.iteritems():
719 for t, n in self._tagscache.tags.iteritems():
721 nodetagscache.setdefault(n, []).append(t)
720 nodetagscache.setdefault(n, []).append(t)
722 for tags in nodetagscache.itervalues():
721 for tags in nodetagscache.itervalues():
723 tags.sort()
722 tags.sort()
724 self._tagscache.nodetagscache = nodetagscache
723 self._tagscache.nodetagscache = nodetagscache
725 return self._tagscache.nodetagscache.get(node, [])
724 return self._tagscache.nodetagscache.get(node, [])
726
725
727 def nodebookmarks(self, node):
726 def nodebookmarks(self, node):
728 marks = []
727 marks = []
729 for bookmark, n in self._bookmarks.iteritems():
728 for bookmark, n in self._bookmarks.iteritems():
730 if n == node:
729 if n == node:
731 marks.append(bookmark)
730 marks.append(bookmark)
732 return sorted(marks)
731 return sorted(marks)
733
732
734 def branchmap(self):
733 def branchmap(self):
735 '''returns a dictionary {branch: [branchheads]} with branchheads
734 '''returns a dictionary {branch: [branchheads]} with branchheads
736 ordered by increasing revision number'''
735 ordered by increasing revision number'''
737 branchmap.updatecache(self)
736 branchmap.updatecache(self)
738 return self._branchcaches[self.filtername]
737 return self._branchcaches[self.filtername]
739
738
740 @unfilteredmethod
739 @unfilteredmethod
741 def revbranchcache(self):
740 def revbranchcache(self):
742 if not self._revbranchcache:
741 if not self._revbranchcache:
743 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
742 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
744 return self._revbranchcache
743 return self._revbranchcache
745
744
746 def branchtip(self, branch, ignoremissing=False):
745 def branchtip(self, branch, ignoremissing=False):
747 '''return the tip node for a given branch
746 '''return the tip node for a given branch
748
747
749 If ignoremissing is True, then this method will not raise an error.
748 If ignoremissing is True, then this method will not raise an error.
750 This is helpful for callers that only expect None for a missing branch
749 This is helpful for callers that only expect None for a missing branch
751 (e.g. namespace).
750 (e.g. namespace).
752
751
753 '''
752 '''
754 try:
753 try:
755 return self.branchmap().branchtip(branch)
754 return self.branchmap().branchtip(branch)
756 except KeyError:
755 except KeyError:
757 if not ignoremissing:
756 if not ignoremissing:
758 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
757 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
759 else:
758 else:
760 pass
759 pass
761
760
762 def lookup(self, key):
761 def lookup(self, key):
763 return self[key].node()
762 return self[key].node()
764
763
765 def lookupbranch(self, key, remote=None):
764 def lookupbranch(self, key, remote=None):
766 repo = remote or self
765 repo = remote or self
767 if key in repo.branchmap():
766 if key in repo.branchmap():
768 return key
767 return key
769
768
770 repo = (remote and remote.local()) and remote or self
769 repo = (remote and remote.local()) and remote or self
771 return repo[key].branch()
770 return repo[key].branch()
772
771
773 def known(self, nodes):
772 def known(self, nodes):
774 nm = self.changelog.nodemap
773 nm = self.changelog.nodemap
775 pc = self._phasecache
774 pc = self._phasecache
776 result = []
775 result = []
777 for n in nodes:
776 for n in nodes:
778 r = nm.get(n)
777 r = nm.get(n)
779 resp = not (r is None or pc.phase(self, r) >= phases.secret)
778 resp = not (r is None or pc.phase(self, r) >= phases.secret)
780 result.append(resp)
779 result.append(resp)
781 return result
780 return result
782
781
783 def local(self):
782 def local(self):
784 return self
783 return self
785
784
786 def cancopy(self):
785 def cancopy(self):
787 # so statichttprepo's override of local() works
786 # so statichttprepo's override of local() works
788 if not self.local():
787 if not self.local():
789 return False
788 return False
790 if not self.ui.configbool('phases', 'publish', True):
789 if not self.ui.configbool('phases', 'publish', True):
791 return True
790 return True
792 # if publishing we can't copy if there is filtered content
791 # if publishing we can't copy if there is filtered content
793 return not self.filtered('visible').changelog.filteredrevs
792 return not self.filtered('visible').changelog.filteredrevs
794
793
795 def shared(self):
794 def shared(self):
796 '''the type of shared repository (None if not shared)'''
795 '''the type of shared repository (None if not shared)'''
797 if self.sharedpath != self.path:
796 if self.sharedpath != self.path:
798 return 'store'
797 return 'store'
799 return None
798 return None
800
799
801 def join(self, f, *insidef):
800 def join(self, f, *insidef):
802 return self.vfs.join(os.path.join(f, *insidef))
801 return self.vfs.join(os.path.join(f, *insidef))
803
802
804 def wjoin(self, f, *insidef):
803 def wjoin(self, f, *insidef):
805 return self.vfs.reljoin(self.root, f, *insidef)
804 return self.vfs.reljoin(self.root, f, *insidef)
806
805
807 def file(self, f):
806 def file(self, f):
808 if f[0] == '/':
807 if f[0] == '/':
809 f = f[1:]
808 f = f[1:]
810 return filelog.filelog(self.svfs, f)
809 return filelog.filelog(self.svfs, f)
811
810
812 def changectx(self, changeid):
811 def changectx(self, changeid):
813 return self[changeid]
812 return self[changeid]
814
813
815 def parents(self, changeid=None):
814 def parents(self, changeid=None):
816 '''get list of changectxs for parents of changeid'''
815 '''get list of changectxs for parents of changeid'''
817 return self[changeid].parents()
816 return self[changeid].parents()
818
817
819 def setparents(self, p1, p2=nullid):
818 def setparents(self, p1, p2=nullid):
820 self.dirstate.beginparentchange()
819 self.dirstate.beginparentchange()
821 copies = self.dirstate.setparents(p1, p2)
820 copies = self.dirstate.setparents(p1, p2)
822 pctx = self[p1]
821 pctx = self[p1]
823 if copies:
822 if copies:
824 # Adjust copy records, the dirstate cannot do it, it
823 # Adjust copy records, the dirstate cannot do it, it
825 # requires access to parents manifests. Preserve them
824 # requires access to parents manifests. Preserve them
826 # only for entries added to first parent.
825 # only for entries added to first parent.
827 for f in copies:
826 for f in copies:
828 if f not in pctx and copies[f] in pctx:
827 if f not in pctx and copies[f] in pctx:
829 self.dirstate.copy(copies[f], f)
828 self.dirstate.copy(copies[f], f)
830 if p2 == nullid:
829 if p2 == nullid:
831 for f, s in sorted(self.dirstate.copies().items()):
830 for f, s in sorted(self.dirstate.copies().items()):
832 if f not in pctx and s not in pctx:
831 if f not in pctx and s not in pctx:
833 self.dirstate.copy(None, f)
832 self.dirstate.copy(None, f)
834 self.dirstate.endparentchange()
833 self.dirstate.endparentchange()
835
834
836 def filectx(self, path, changeid=None, fileid=None):
835 def filectx(self, path, changeid=None, fileid=None):
837 """changeid can be a changeset revision, node, or tag.
836 """changeid can be a changeset revision, node, or tag.
838 fileid can be a file revision or node."""
837 fileid can be a file revision or node."""
839 return context.filectx(self, path, changeid, fileid)
838 return context.filectx(self, path, changeid, fileid)
840
839
841 def getcwd(self):
840 def getcwd(self):
842 return self.dirstate.getcwd()
841 return self.dirstate.getcwd()
843
842
844 def pathto(self, f, cwd=None):
843 def pathto(self, f, cwd=None):
845 return self.dirstate.pathto(f, cwd)
844 return self.dirstate.pathto(f, cwd)
846
845
847 def wfile(self, f, mode='r'):
846 def wfile(self, f, mode='r'):
848 return self.wvfs(f, mode)
847 return self.wvfs(f, mode)
849
848
850 def _link(self, f):
849 def _link(self, f):
851 return self.wvfs.islink(f)
850 return self.wvfs.islink(f)
852
851
853 def _loadfilter(self, filter):
852 def _loadfilter(self, filter):
854 if filter not in self.filterpats:
853 if filter not in self.filterpats:
855 l = []
854 l = []
856 for pat, cmd in self.ui.configitems(filter):
855 for pat, cmd in self.ui.configitems(filter):
857 if cmd == '!':
856 if cmd == '!':
858 continue
857 continue
859 mf = matchmod.match(self.root, '', [pat])
858 mf = matchmod.match(self.root, '', [pat])
860 fn = None
859 fn = None
861 params = cmd
860 params = cmd
862 for name, filterfn in self._datafilters.iteritems():
861 for name, filterfn in self._datafilters.iteritems():
863 if cmd.startswith(name):
862 if cmd.startswith(name):
864 fn = filterfn
863 fn = filterfn
865 params = cmd[len(name):].lstrip()
864 params = cmd[len(name):].lstrip()
866 break
865 break
867 if not fn:
866 if not fn:
868 fn = lambda s, c, **kwargs: util.filter(s, c)
867 fn = lambda s, c, **kwargs: util.filter(s, c)
869 # Wrap old filters not supporting keyword arguments
868 # Wrap old filters not supporting keyword arguments
870 if not inspect.getargspec(fn)[2]:
869 if not inspect.getargspec(fn)[2]:
871 oldfn = fn
870 oldfn = fn
872 fn = lambda s, c, **kwargs: oldfn(s, c)
871 fn = lambda s, c, **kwargs: oldfn(s, c)
873 l.append((mf, fn, params))
872 l.append((mf, fn, params))
874 self.filterpats[filter] = l
873 self.filterpats[filter] = l
875 return self.filterpats[filter]
874 return self.filterpats[filter]
876
875
877 def _filter(self, filterpats, filename, data):
876 def _filter(self, filterpats, filename, data):
878 for mf, fn, cmd in filterpats:
877 for mf, fn, cmd in filterpats:
879 if mf(filename):
878 if mf(filename):
880 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
879 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
881 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
880 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
882 break
881 break
883
882
884 return data
883 return data
885
884
886 @unfilteredpropertycache
885 @unfilteredpropertycache
887 def _encodefilterpats(self):
886 def _encodefilterpats(self):
888 return self._loadfilter('encode')
887 return self._loadfilter('encode')
889
888
890 @unfilteredpropertycache
889 @unfilteredpropertycache
891 def _decodefilterpats(self):
890 def _decodefilterpats(self):
892 return self._loadfilter('decode')
891 return self._loadfilter('decode')
893
892
894 def adddatafilter(self, name, filter):
893 def adddatafilter(self, name, filter):
895 self._datafilters[name] = filter
894 self._datafilters[name] = filter
896
895
897 def wread(self, filename):
896 def wread(self, filename):
898 if self._link(filename):
897 if self._link(filename):
899 data = self.wvfs.readlink(filename)
898 data = self.wvfs.readlink(filename)
900 else:
899 else:
901 data = self.wvfs.read(filename)
900 data = self.wvfs.read(filename)
902 return self._filter(self._encodefilterpats, filename, data)
901 return self._filter(self._encodefilterpats, filename, data)
903
902
904 def wwrite(self, filename, data, flags):
903 def wwrite(self, filename, data, flags):
905 data = self._filter(self._decodefilterpats, filename, data)
904 data = self._filter(self._decodefilterpats, filename, data)
906 if 'l' in flags:
905 if 'l' in flags:
907 self.wvfs.symlink(data, filename)
906 self.wvfs.symlink(data, filename)
908 else:
907 else:
909 self.wvfs.write(filename, data)
908 self.wvfs.write(filename, data)
910 if 'x' in flags:
909 if 'x' in flags:
911 self.wvfs.setflags(filename, False, True)
910 self.wvfs.setflags(filename, False, True)
912
911
913 def wwritedata(self, filename, data):
912 def wwritedata(self, filename, data):
914 return self._filter(self._decodefilterpats, filename, data)
913 return self._filter(self._decodefilterpats, filename, data)
915
914
916 def currenttransaction(self):
915 def currenttransaction(self):
917 """return the current transaction or None if non exists"""
916 """return the current transaction or None if non exists"""
918 if self._transref:
917 if self._transref:
919 tr = self._transref()
918 tr = self._transref()
920 else:
919 else:
921 tr = None
920 tr = None
922
921
923 if tr and tr.running():
922 if tr and tr.running():
924 return tr
923 return tr
925 return None
924 return None
926
925
927 def transaction(self, desc, report=None):
926 def transaction(self, desc, report=None):
928 if (self.ui.configbool('devel', 'all')
927 if (self.ui.configbool('devel', 'all')
929 or self.ui.configbool('devel', 'check-locks')):
928 or self.ui.configbool('devel', 'check-locks')):
930 l = self._lockref and self._lockref()
929 l = self._lockref and self._lockref()
931 if l is None or not l.held:
930 if l is None or not l.held:
932 msg = 'transaction with no lock\n'
931 msg = 'transaction with no lock\n'
933 if self.ui.tracebackflag:
932 if self.ui.tracebackflag:
934 util.debugstacktrace(msg, 1)
933 util.debugstacktrace(msg, 1)
935 else:
934 else:
936 self.ui.write_err(msg)
935 self.ui.write_err(msg)
937 tr = self.currenttransaction()
936 tr = self.currenttransaction()
938 if tr is not None:
937 if tr is not None:
939 return tr.nest()
938 return tr.nest()
940
939
941 # abort here if the journal already exists
940 # abort here if the journal already exists
942 if self.svfs.exists("journal"):
941 if self.svfs.exists("journal"):
943 raise error.RepoError(
942 raise error.RepoError(
944 _("abandoned transaction found"),
943 _("abandoned transaction found"),
945 hint=_("run 'hg recover' to clean up transaction"))
944 hint=_("run 'hg recover' to clean up transaction"))
946
945
947 self.hook('pretxnopen', throw=True, txnname=desc)
946 self.hook('pretxnopen', throw=True, txnname=desc)
948
947
949 self._writejournal(desc)
948 self._writejournal(desc)
950 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
949 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
951 if report:
950 if report:
952 rp = report
951 rp = report
953 else:
952 else:
954 rp = self.ui.warn
953 rp = self.ui.warn
955 vfsmap = {'plain': self.vfs} # root of .hg/
954 vfsmap = {'plain': self.vfs} # root of .hg/
956 # we must avoid cyclic reference between repo and transaction.
955 # we must avoid cyclic reference between repo and transaction.
957 reporef = weakref.ref(self)
956 reporef = weakref.ref(self)
958 def validate(tr):
957 def validate(tr):
959 """will run pre-closing hooks"""
958 """will run pre-closing hooks"""
960 pending = lambda: tr.writepending() and self.root or ""
959 pending = lambda: tr.writepending() and self.root or ""
961 reporef().hook('pretxnclose', throw=True, pending=pending,
960 reporef().hook('pretxnclose', throw=True, pending=pending,
962 xnname=desc)
961 xnname=desc)
963
962
964 tr = transaction.transaction(rp, self.sopener, vfsmap,
963 tr = transaction.transaction(rp, self.sopener, vfsmap,
965 "journal",
964 "journal",
966 "undo",
965 "undo",
967 aftertrans(renames),
966 aftertrans(renames),
968 self.store.createmode,
967 self.store.createmode,
969 validator=validate)
968 validator=validate)
970 # note: writing the fncache only during finalize mean that the file is
969 # note: writing the fncache only during finalize mean that the file is
971 # outdated when running hooks. As fncache is used for streaming clone,
970 # outdated when running hooks. As fncache is used for streaming clone,
972 # this is not expected to break anything that happen during the hooks.
971 # this is not expected to break anything that happen during the hooks.
973 tr.addfinalize('flush-fncache', self.store.write)
972 tr.addfinalize('flush-fncache', self.store.write)
974 def txnclosehook(tr2):
973 def txnclosehook(tr2):
975 """To be run if transaction is successful, will schedule a hook run
974 """To be run if transaction is successful, will schedule a hook run
976 """
975 """
977 def hook():
976 def hook():
978 reporef().hook('txnclose', throw=False, txnname=desc,
977 reporef().hook('txnclose', throw=False, txnname=desc,
979 **tr2.hookargs)
978 **tr2.hookargs)
980 reporef()._afterlock(hook)
979 reporef()._afterlock(hook)
981 tr.addfinalize('txnclose-hook', txnclosehook)
980 tr.addfinalize('txnclose-hook', txnclosehook)
982 self._transref = weakref.ref(tr)
981 self._transref = weakref.ref(tr)
983 return tr
982 return tr
984
983
985 def _journalfiles(self):
984 def _journalfiles(self):
986 return ((self.svfs, 'journal'),
985 return ((self.svfs, 'journal'),
987 (self.vfs, 'journal.dirstate'),
986 (self.vfs, 'journal.dirstate'),
988 (self.vfs, 'journal.branch'),
987 (self.vfs, 'journal.branch'),
989 (self.vfs, 'journal.desc'),
988 (self.vfs, 'journal.desc'),
990 (self.vfs, 'journal.bookmarks'),
989 (self.vfs, 'journal.bookmarks'),
991 (self.svfs, 'journal.phaseroots'))
990 (self.svfs, 'journal.phaseroots'))
992
991
993 def undofiles(self):
992 def undofiles(self):
994 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
993 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
995
994
996 def _writejournal(self, desc):
995 def _writejournal(self, desc):
997 self.vfs.write("journal.dirstate",
996 self.vfs.write("journal.dirstate",
998 self.vfs.tryread("dirstate"))
997 self.vfs.tryread("dirstate"))
999 self.vfs.write("journal.branch",
998 self.vfs.write("journal.branch",
1000 encoding.fromlocal(self.dirstate.branch()))
999 encoding.fromlocal(self.dirstate.branch()))
1001 self.vfs.write("journal.desc",
1000 self.vfs.write("journal.desc",
1002 "%d\n%s\n" % (len(self), desc))
1001 "%d\n%s\n" % (len(self), desc))
1003 self.vfs.write("journal.bookmarks",
1002 self.vfs.write("journal.bookmarks",
1004 self.vfs.tryread("bookmarks"))
1003 self.vfs.tryread("bookmarks"))
1005 self.svfs.write("journal.phaseroots",
1004 self.svfs.write("journal.phaseroots",
1006 self.svfs.tryread("phaseroots"))
1005 self.svfs.tryread("phaseroots"))
1007
1006
1008 def recover(self):
1007 def recover(self):
1009 lock = self.lock()
1008 lock = self.lock()
1010 try:
1009 try:
1011 if self.svfs.exists("journal"):
1010 if self.svfs.exists("journal"):
1012 self.ui.status(_("rolling back interrupted transaction\n"))
1011 self.ui.status(_("rolling back interrupted transaction\n"))
1013 vfsmap = {'': self.svfs,
1012 vfsmap = {'': self.svfs,
1014 'plain': self.vfs,}
1013 'plain': self.vfs,}
1015 transaction.rollback(self.svfs, vfsmap, "journal",
1014 transaction.rollback(self.svfs, vfsmap, "journal",
1016 self.ui.warn)
1015 self.ui.warn)
1017 self.invalidate()
1016 self.invalidate()
1018 return True
1017 return True
1019 else:
1018 else:
1020 self.ui.warn(_("no interrupted transaction available\n"))
1019 self.ui.warn(_("no interrupted transaction available\n"))
1021 return False
1020 return False
1022 finally:
1021 finally:
1023 lock.release()
1022 lock.release()
1024
1023
1025 def rollback(self, dryrun=False, force=False):
1024 def rollback(self, dryrun=False, force=False):
1026 wlock = lock = None
1025 wlock = lock = None
1027 try:
1026 try:
1028 wlock = self.wlock()
1027 wlock = self.wlock()
1029 lock = self.lock()
1028 lock = self.lock()
1030 if self.svfs.exists("undo"):
1029 if self.svfs.exists("undo"):
1031 return self._rollback(dryrun, force)
1030 return self._rollback(dryrun, force)
1032 else:
1031 else:
1033 self.ui.warn(_("no rollback information available\n"))
1032 self.ui.warn(_("no rollback information available\n"))
1034 return 1
1033 return 1
1035 finally:
1034 finally:
1036 release(lock, wlock)
1035 release(lock, wlock)
1037
1036
1038 @unfilteredmethod # Until we get smarter cache management
1037 @unfilteredmethod # Until we get smarter cache management
1039 def _rollback(self, dryrun, force):
1038 def _rollback(self, dryrun, force):
1040 ui = self.ui
1039 ui = self.ui
1041 try:
1040 try:
1042 args = self.vfs.read('undo.desc').splitlines()
1041 args = self.vfs.read('undo.desc').splitlines()
1043 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1042 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1044 if len(args) >= 3:
1043 if len(args) >= 3:
1045 detail = args[2]
1044 detail = args[2]
1046 oldtip = oldlen - 1
1045 oldtip = oldlen - 1
1047
1046
1048 if detail and ui.verbose:
1047 if detail and ui.verbose:
1049 msg = (_('repository tip rolled back to revision %s'
1048 msg = (_('repository tip rolled back to revision %s'
1050 ' (undo %s: %s)\n')
1049 ' (undo %s: %s)\n')
1051 % (oldtip, desc, detail))
1050 % (oldtip, desc, detail))
1052 else:
1051 else:
1053 msg = (_('repository tip rolled back to revision %s'
1052 msg = (_('repository tip rolled back to revision %s'
1054 ' (undo %s)\n')
1053 ' (undo %s)\n')
1055 % (oldtip, desc))
1054 % (oldtip, desc))
1056 except IOError:
1055 except IOError:
1057 msg = _('rolling back unknown transaction\n')
1056 msg = _('rolling back unknown transaction\n')
1058 desc = None
1057 desc = None
1059
1058
1060 if not force and self['.'] != self['tip'] and desc == 'commit':
1059 if not force and self['.'] != self['tip'] and desc == 'commit':
1061 raise util.Abort(
1060 raise util.Abort(
1062 _('rollback of last commit while not checked out '
1061 _('rollback of last commit while not checked out '
1063 'may lose data'), hint=_('use -f to force'))
1062 'may lose data'), hint=_('use -f to force'))
1064
1063
1065 ui.status(msg)
1064 ui.status(msg)
1066 if dryrun:
1065 if dryrun:
1067 return 0
1066 return 0
1068
1067
1069 parents = self.dirstate.parents()
1068 parents = self.dirstate.parents()
1070 self.destroying()
1069 self.destroying()
1071 vfsmap = {'plain': self.vfs, '': self.svfs}
1070 vfsmap = {'plain': self.vfs, '': self.svfs}
1072 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1071 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1073 if self.vfs.exists('undo.bookmarks'):
1072 if self.vfs.exists('undo.bookmarks'):
1074 self.vfs.rename('undo.bookmarks', 'bookmarks')
1073 self.vfs.rename('undo.bookmarks', 'bookmarks')
1075 if self.svfs.exists('undo.phaseroots'):
1074 if self.svfs.exists('undo.phaseroots'):
1076 self.svfs.rename('undo.phaseroots', 'phaseroots')
1075 self.svfs.rename('undo.phaseroots', 'phaseroots')
1077 self.invalidate()
1076 self.invalidate()
1078
1077
1079 parentgone = (parents[0] not in self.changelog.nodemap or
1078 parentgone = (parents[0] not in self.changelog.nodemap or
1080 parents[1] not in self.changelog.nodemap)
1079 parents[1] not in self.changelog.nodemap)
1081 if parentgone:
1080 if parentgone:
1082 self.vfs.rename('undo.dirstate', 'dirstate')
1081 self.vfs.rename('undo.dirstate', 'dirstate')
1083 try:
1082 try:
1084 branch = self.vfs.read('undo.branch')
1083 branch = self.vfs.read('undo.branch')
1085 self.dirstate.setbranch(encoding.tolocal(branch))
1084 self.dirstate.setbranch(encoding.tolocal(branch))
1086 except IOError:
1085 except IOError:
1087 ui.warn(_('named branch could not be reset: '
1086 ui.warn(_('named branch could not be reset: '
1088 'current branch is still \'%s\'\n')
1087 'current branch is still \'%s\'\n')
1089 % self.dirstate.branch())
1088 % self.dirstate.branch())
1090
1089
1091 self.dirstate.invalidate()
1090 self.dirstate.invalidate()
1092 parents = tuple([p.rev() for p in self.parents()])
1091 parents = tuple([p.rev() for p in self.parents()])
1093 if len(parents) > 1:
1092 if len(parents) > 1:
1094 ui.status(_('working directory now based on '
1093 ui.status(_('working directory now based on '
1095 'revisions %d and %d\n') % parents)
1094 'revisions %d and %d\n') % parents)
1096 else:
1095 else:
1097 ui.status(_('working directory now based on '
1096 ui.status(_('working directory now based on '
1098 'revision %d\n') % parents)
1097 'revision %d\n') % parents)
1099 # TODO: if we know which new heads may result from this rollback, pass
1098 # TODO: if we know which new heads may result from this rollback, pass
1100 # them to destroy(), which will prevent the branchhead cache from being
1099 # them to destroy(), which will prevent the branchhead cache from being
1101 # invalidated.
1100 # invalidated.
1102 self.destroyed()
1101 self.destroyed()
1103 return 0
1102 return 0
1104
1103
1105 def invalidatecaches(self):
1104 def invalidatecaches(self):
1106
1105
1107 if '_tagscache' in vars(self):
1106 if '_tagscache' in vars(self):
1108 # can't use delattr on proxy
1107 # can't use delattr on proxy
1109 del self.__dict__['_tagscache']
1108 del self.__dict__['_tagscache']
1110
1109
1111 self.unfiltered()._branchcaches.clear()
1110 self.unfiltered()._branchcaches.clear()
1112 self.invalidatevolatilesets()
1111 self.invalidatevolatilesets()
1113
1112
1114 def invalidatevolatilesets(self):
1113 def invalidatevolatilesets(self):
1115 self.filteredrevcache.clear()
1114 self.filteredrevcache.clear()
1116 obsolete.clearobscaches(self)
1115 obsolete.clearobscaches(self)
1117
1116
1118 def invalidatedirstate(self):
1117 def invalidatedirstate(self):
1119 '''Invalidates the dirstate, causing the next call to dirstate
1118 '''Invalidates the dirstate, causing the next call to dirstate
1120 to check if it was modified since the last time it was read,
1119 to check if it was modified since the last time it was read,
1121 rereading it if it has.
1120 rereading it if it has.
1122
1121
1123 This is different to dirstate.invalidate() that it doesn't always
1122 This is different to dirstate.invalidate() that it doesn't always
1124 rereads the dirstate. Use dirstate.invalidate() if you want to
1123 rereads the dirstate. Use dirstate.invalidate() if you want to
1125 explicitly read the dirstate again (i.e. restoring it to a previous
1124 explicitly read the dirstate again (i.e. restoring it to a previous
1126 known good state).'''
1125 known good state).'''
1127 if hasunfilteredcache(self, 'dirstate'):
1126 if hasunfilteredcache(self, 'dirstate'):
1128 for k in self.dirstate._filecache:
1127 for k in self.dirstate._filecache:
1129 try:
1128 try:
1130 delattr(self.dirstate, k)
1129 delattr(self.dirstate, k)
1131 except AttributeError:
1130 except AttributeError:
1132 pass
1131 pass
1133 delattr(self.unfiltered(), 'dirstate')
1132 delattr(self.unfiltered(), 'dirstate')
1134
1133
1135 def invalidate(self):
1134 def invalidate(self):
1136 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1135 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1137 for k in self._filecache:
1136 for k in self._filecache:
1138 # dirstate is invalidated separately in invalidatedirstate()
1137 # dirstate is invalidated separately in invalidatedirstate()
1139 if k == 'dirstate':
1138 if k == 'dirstate':
1140 continue
1139 continue
1141
1140
1142 try:
1141 try:
1143 delattr(unfiltered, k)
1142 delattr(unfiltered, k)
1144 except AttributeError:
1143 except AttributeError:
1145 pass
1144 pass
1146 self.invalidatecaches()
1145 self.invalidatecaches()
1147 self.store.invalidatecaches()
1146 self.store.invalidatecaches()
1148
1147
1149 def invalidateall(self):
1148 def invalidateall(self):
1150 '''Fully invalidates both store and non-store parts, causing the
1149 '''Fully invalidates both store and non-store parts, causing the
1151 subsequent operation to reread any outside changes.'''
1150 subsequent operation to reread any outside changes.'''
1152 # extension should hook this to invalidate its caches
1151 # extension should hook this to invalidate its caches
1153 self.invalidate()
1152 self.invalidate()
1154 self.invalidatedirstate()
1153 self.invalidatedirstate()
1155
1154
1156 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1155 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1157 try:
1156 try:
1158 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1157 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1159 except error.LockHeld, inst:
1158 except error.LockHeld, inst:
1160 if not wait:
1159 if not wait:
1161 raise
1160 raise
1162 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1161 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1163 (desc, inst.locker))
1162 (desc, inst.locker))
1164 # default to 600 seconds timeout
1163 # default to 600 seconds timeout
1165 l = lockmod.lock(vfs, lockname,
1164 l = lockmod.lock(vfs, lockname,
1166 int(self.ui.config("ui", "timeout", "600")),
1165 int(self.ui.config("ui", "timeout", "600")),
1167 releasefn, desc=desc)
1166 releasefn, desc=desc)
1168 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1167 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1169 if acquirefn:
1168 if acquirefn:
1170 acquirefn()
1169 acquirefn()
1171 return l
1170 return l
1172
1171
1173 def _afterlock(self, callback):
1172 def _afterlock(self, callback):
1174 """add a callback to the current repository lock.
1173 """add a callback to the current repository lock.
1175
1174
1176 The callback will be executed on lock release."""
1175 The callback will be executed on lock release."""
1177 l = self._lockref and self._lockref()
1176 l = self._lockref and self._lockref()
1178 if l:
1177 if l:
1179 l.postrelease.append(callback)
1178 l.postrelease.append(callback)
1180 else:
1179 else:
1181 callback()
1180 callback()
1182
1181
1183 def lock(self, wait=True):
1182 def lock(self, wait=True):
1184 '''Lock the repository store (.hg/store) and return a weak reference
1183 '''Lock the repository store (.hg/store) and return a weak reference
1185 to the lock. Use this before modifying the store (e.g. committing or
1184 to the lock. Use this before modifying the store (e.g. committing or
1186 stripping). If you are opening a transaction, get a lock as well.)'''
1185 stripping). If you are opening a transaction, get a lock as well.)'''
1187 l = self._lockref and self._lockref()
1186 l = self._lockref and self._lockref()
1188 if l is not None and l.held:
1187 if l is not None and l.held:
1189 l.lock()
1188 l.lock()
1190 return l
1189 return l
1191
1190
1192 def unlock():
1191 def unlock():
1193 for k, ce in self._filecache.items():
1192 for k, ce in self._filecache.items():
1194 if k == 'dirstate' or k not in self.__dict__:
1193 if k == 'dirstate' or k not in self.__dict__:
1195 continue
1194 continue
1196 ce.refresh()
1195 ce.refresh()
1197
1196
1198 l = self._lock(self.svfs, "lock", wait, unlock,
1197 l = self._lock(self.svfs, "lock", wait, unlock,
1199 self.invalidate, _('repository %s') % self.origroot)
1198 self.invalidate, _('repository %s') % self.origroot)
1200 self._lockref = weakref.ref(l)
1199 self._lockref = weakref.ref(l)
1201 return l
1200 return l
1202
1201
1203 def wlock(self, wait=True):
1202 def wlock(self, wait=True):
1204 '''Lock the non-store parts of the repository (everything under
1203 '''Lock the non-store parts of the repository (everything under
1205 .hg except .hg/store) and return a weak reference to the lock.
1204 .hg except .hg/store) and return a weak reference to the lock.
1206 Use this before modifying files in .hg.'''
1205 Use this before modifying files in .hg.'''
1207 if (self.ui.configbool('devel', 'all')
1206 if (self.ui.configbool('devel', 'all')
1208 or self.ui.configbool('devel', 'check-locks')):
1207 or self.ui.configbool('devel', 'check-locks')):
1209 l = self._lockref and self._lockref()
1208 l = self._lockref and self._lockref()
1210 if l is not None and l.held:
1209 if l is not None and l.held:
1211 msg = '"lock" taken before "wlock"\n'
1210 msg = '"lock" taken before "wlock"\n'
1212 if self.ui.tracebackflag:
1211 if self.ui.tracebackflag:
1213 util.debugstacktrace(msg, 1)
1212 util.debugstacktrace(msg, 1)
1214 else:
1213 else:
1215 self.ui.write_err(msg)
1214 self.ui.write_err(msg)
1216 l = self._wlockref and self._wlockref()
1215 l = self._wlockref and self._wlockref()
1217 if l is not None and l.held:
1216 if l is not None and l.held:
1218 l.lock()
1217 l.lock()
1219 return l
1218 return l
1220
1219
1221 def unlock():
1220 def unlock():
1222 if self.dirstate.pendingparentchange():
1221 if self.dirstate.pendingparentchange():
1223 self.dirstate.invalidate()
1222 self.dirstate.invalidate()
1224 else:
1223 else:
1225 self.dirstate.write()
1224 self.dirstate.write()
1226
1225
1227 self._filecache['dirstate'].refresh()
1226 self._filecache['dirstate'].refresh()
1228
1227
1229 l = self._lock(self.vfs, "wlock", wait, unlock,
1228 l = self._lock(self.vfs, "wlock", wait, unlock,
1230 self.invalidatedirstate, _('working directory of %s') %
1229 self.invalidatedirstate, _('working directory of %s') %
1231 self.origroot)
1230 self.origroot)
1232 self._wlockref = weakref.ref(l)
1231 self._wlockref = weakref.ref(l)
1233 return l
1232 return l
1234
1233
1235 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1234 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1236 """
1235 """
1237 commit an individual file as part of a larger transaction
1236 commit an individual file as part of a larger transaction
1238 """
1237 """
1239
1238
1240 fname = fctx.path()
1239 fname = fctx.path()
1241 fparent1 = manifest1.get(fname, nullid)
1240 fparent1 = manifest1.get(fname, nullid)
1242 fparent2 = manifest2.get(fname, nullid)
1241 fparent2 = manifest2.get(fname, nullid)
1243 if isinstance(fctx, context.filectx):
1242 if isinstance(fctx, context.filectx):
1244 node = fctx.filenode()
1243 node = fctx.filenode()
1245 if node in [fparent1, fparent2]:
1244 if node in [fparent1, fparent2]:
1246 self.ui.debug('reusing %s filelog entry\n' % fname)
1245 self.ui.debug('reusing %s filelog entry\n' % fname)
1247 return node
1246 return node
1248
1247
1249 flog = self.file(fname)
1248 flog = self.file(fname)
1250 meta = {}
1249 meta = {}
1251 copy = fctx.renamed()
1250 copy = fctx.renamed()
1252 if copy and copy[0] != fname:
1251 if copy and copy[0] != fname:
1253 # Mark the new revision of this file as a copy of another
1252 # Mark the new revision of this file as a copy of another
1254 # file. This copy data will effectively act as a parent
1253 # file. This copy data will effectively act as a parent
1255 # of this new revision. If this is a merge, the first
1254 # of this new revision. If this is a merge, the first
1256 # parent will be the nullid (meaning "look up the copy data")
1255 # parent will be the nullid (meaning "look up the copy data")
1257 # and the second one will be the other parent. For example:
1256 # and the second one will be the other parent. For example:
1258 #
1257 #
1259 # 0 --- 1 --- 3 rev1 changes file foo
1258 # 0 --- 1 --- 3 rev1 changes file foo
1260 # \ / rev2 renames foo to bar and changes it
1259 # \ / rev2 renames foo to bar and changes it
1261 # \- 2 -/ rev3 should have bar with all changes and
1260 # \- 2 -/ rev3 should have bar with all changes and
1262 # should record that bar descends from
1261 # should record that bar descends from
1263 # bar in rev2 and foo in rev1
1262 # bar in rev2 and foo in rev1
1264 #
1263 #
1265 # this allows this merge to succeed:
1264 # this allows this merge to succeed:
1266 #
1265 #
1267 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1266 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1268 # \ / merging rev3 and rev4 should use bar@rev2
1267 # \ / merging rev3 and rev4 should use bar@rev2
1269 # \- 2 --- 4 as the merge base
1268 # \- 2 --- 4 as the merge base
1270 #
1269 #
1271
1270
1272 cfname = copy[0]
1271 cfname = copy[0]
1273 crev = manifest1.get(cfname)
1272 crev = manifest1.get(cfname)
1274 newfparent = fparent2
1273 newfparent = fparent2
1275
1274
1276 if manifest2: # branch merge
1275 if manifest2: # branch merge
1277 if fparent2 == nullid or crev is None: # copied on remote side
1276 if fparent2 == nullid or crev is None: # copied on remote side
1278 if cfname in manifest2:
1277 if cfname in manifest2:
1279 crev = manifest2[cfname]
1278 crev = manifest2[cfname]
1280 newfparent = fparent1
1279 newfparent = fparent1
1281
1280
1282 # Here, we used to search backwards through history to try to find
1281 # Here, we used to search backwards through history to try to find
1283 # where the file copy came from if the source of a copy was not in
1282 # where the file copy came from if the source of a copy was not in
1284 # the parent directory. However, this doesn't actually make sense to
1283 # the parent directory. However, this doesn't actually make sense to
1285 # do (what does a copy from something not in your working copy even
1284 # do (what does a copy from something not in your working copy even
1286 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1285 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1287 # the user that copy information was dropped, so if they didn't
1286 # the user that copy information was dropped, so if they didn't
1288 # expect this outcome it can be fixed, but this is the correct
1287 # expect this outcome it can be fixed, but this is the correct
1289 # behavior in this circumstance.
1288 # behavior in this circumstance.
1290
1289
1291 if crev:
1290 if crev:
1292 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1291 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1293 meta["copy"] = cfname
1292 meta["copy"] = cfname
1294 meta["copyrev"] = hex(crev)
1293 meta["copyrev"] = hex(crev)
1295 fparent1, fparent2 = nullid, newfparent
1294 fparent1, fparent2 = nullid, newfparent
1296 else:
1295 else:
1297 self.ui.warn(_("warning: can't find ancestor for '%s' "
1296 self.ui.warn(_("warning: can't find ancestor for '%s' "
1298 "copied from '%s'!\n") % (fname, cfname))
1297 "copied from '%s'!\n") % (fname, cfname))
1299
1298
1300 elif fparent1 == nullid:
1299 elif fparent1 == nullid:
1301 fparent1, fparent2 = fparent2, nullid
1300 fparent1, fparent2 = fparent2, nullid
1302 elif fparent2 != nullid:
1301 elif fparent2 != nullid:
1303 # is one parent an ancestor of the other?
1302 # is one parent an ancestor of the other?
1304 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1303 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1305 if fparent1 in fparentancestors:
1304 if fparent1 in fparentancestors:
1306 fparent1, fparent2 = fparent2, nullid
1305 fparent1, fparent2 = fparent2, nullid
1307 elif fparent2 in fparentancestors:
1306 elif fparent2 in fparentancestors:
1308 fparent2 = nullid
1307 fparent2 = nullid
1309
1308
1310 # is the file changed?
1309 # is the file changed?
1311 text = fctx.data()
1310 text = fctx.data()
1312 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1311 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1313 changelist.append(fname)
1312 changelist.append(fname)
1314 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1313 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1315 # are just the flags changed during merge?
1314 # are just the flags changed during merge?
1316 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1315 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1317 changelist.append(fname)
1316 changelist.append(fname)
1318
1317
1319 return fparent1
1318 return fparent1
1320
1319
1321 @unfilteredmethod
1320 @unfilteredmethod
1322 def commit(self, text="", user=None, date=None, match=None, force=False,
1321 def commit(self, text="", user=None, date=None, match=None, force=False,
1323 editor=False, extra={}):
1322 editor=False, extra={}):
1324 """Add a new revision to current repository.
1323 """Add a new revision to current repository.
1325
1324
1326 Revision information is gathered from the working directory,
1325 Revision information is gathered from the working directory,
1327 match can be used to filter the committed files. If editor is
1326 match can be used to filter the committed files. If editor is
1328 supplied, it is called to get a commit message.
1327 supplied, it is called to get a commit message.
1329 """
1328 """
1330
1329
1331 def fail(f, msg):
1330 def fail(f, msg):
1332 raise util.Abort('%s: %s' % (f, msg))
1331 raise util.Abort('%s: %s' % (f, msg))
1333
1332
1334 if not match:
1333 if not match:
1335 match = matchmod.always(self.root, '')
1334 match = matchmod.always(self.root, '')
1336
1335
1337 if not force:
1336 if not force:
1338 vdirs = []
1337 vdirs = []
1339 match.explicitdir = vdirs.append
1338 match.explicitdir = vdirs.append
1340 match.bad = fail
1339 match.bad = fail
1341
1340
1342 wlock = self.wlock()
1341 wlock = self.wlock()
1343 try:
1342 try:
1344 wctx = self[None]
1343 wctx = self[None]
1345 merge = len(wctx.parents()) > 1
1344 merge = len(wctx.parents()) > 1
1346
1345
1347 if not force and merge and not match.always():
1346 if not force and merge and not match.always():
1348 raise util.Abort(_('cannot partially commit a merge '
1347 raise util.Abort(_('cannot partially commit a merge '
1349 '(do not specify files or patterns)'))
1348 '(do not specify files or patterns)'))
1350
1349
1351 status = self.status(match=match, clean=force)
1350 status = self.status(match=match, clean=force)
1352 if force:
1351 if force:
1353 status.modified.extend(status.clean) # mq may commit clean files
1352 status.modified.extend(status.clean) # mq may commit clean files
1354
1353
1355 # check subrepos
1354 # check subrepos
1356 subs = []
1355 subs = []
1357 commitsubs = set()
1356 commitsubs = set()
1358 newstate = wctx.substate.copy()
1357 newstate = wctx.substate.copy()
1359 # only manage subrepos and .hgsubstate if .hgsub is present
1358 # only manage subrepos and .hgsubstate if .hgsub is present
1360 if '.hgsub' in wctx:
1359 if '.hgsub' in wctx:
1361 # we'll decide whether to track this ourselves, thanks
1360 # we'll decide whether to track this ourselves, thanks
1362 for c in status.modified, status.added, status.removed:
1361 for c in status.modified, status.added, status.removed:
1363 if '.hgsubstate' in c:
1362 if '.hgsubstate' in c:
1364 c.remove('.hgsubstate')
1363 c.remove('.hgsubstate')
1365
1364
1366 # compare current state to last committed state
1365 # compare current state to last committed state
1367 # build new substate based on last committed state
1366 # build new substate based on last committed state
1368 oldstate = wctx.p1().substate
1367 oldstate = wctx.p1().substate
1369 for s in sorted(newstate.keys()):
1368 for s in sorted(newstate.keys()):
1370 if not match(s):
1369 if not match(s):
1371 # ignore working copy, use old state if present
1370 # ignore working copy, use old state if present
1372 if s in oldstate:
1371 if s in oldstate:
1373 newstate[s] = oldstate[s]
1372 newstate[s] = oldstate[s]
1374 continue
1373 continue
1375 if not force:
1374 if not force:
1376 raise util.Abort(
1375 raise util.Abort(
1377 _("commit with new subrepo %s excluded") % s)
1376 _("commit with new subrepo %s excluded") % s)
1378 dirtyreason = wctx.sub(s).dirtyreason(True)
1377 dirtyreason = wctx.sub(s).dirtyreason(True)
1379 if dirtyreason:
1378 if dirtyreason:
1380 if not self.ui.configbool('ui', 'commitsubrepos'):
1379 if not self.ui.configbool('ui', 'commitsubrepos'):
1381 raise util.Abort(dirtyreason,
1380 raise util.Abort(dirtyreason,
1382 hint=_("use --subrepos for recursive commit"))
1381 hint=_("use --subrepos for recursive commit"))
1383 subs.append(s)
1382 subs.append(s)
1384 commitsubs.add(s)
1383 commitsubs.add(s)
1385 else:
1384 else:
1386 bs = wctx.sub(s).basestate()
1385 bs = wctx.sub(s).basestate()
1387 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1386 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1388 if oldstate.get(s, (None, None, None))[1] != bs:
1387 if oldstate.get(s, (None, None, None))[1] != bs:
1389 subs.append(s)
1388 subs.append(s)
1390
1389
1391 # check for removed subrepos
1390 # check for removed subrepos
1392 for p in wctx.parents():
1391 for p in wctx.parents():
1393 r = [s for s in p.substate if s not in newstate]
1392 r = [s for s in p.substate if s not in newstate]
1394 subs += [s for s in r if match(s)]
1393 subs += [s for s in r if match(s)]
1395 if subs:
1394 if subs:
1396 if (not match('.hgsub') and
1395 if (not match('.hgsub') and
1397 '.hgsub' in (wctx.modified() + wctx.added())):
1396 '.hgsub' in (wctx.modified() + wctx.added())):
1398 raise util.Abort(
1397 raise util.Abort(
1399 _("can't commit subrepos without .hgsub"))
1398 _("can't commit subrepos without .hgsub"))
1400 status.modified.insert(0, '.hgsubstate')
1399 status.modified.insert(0, '.hgsubstate')
1401
1400
1402 elif '.hgsub' in status.removed:
1401 elif '.hgsub' in status.removed:
1403 # clean up .hgsubstate when .hgsub is removed
1402 # clean up .hgsubstate when .hgsub is removed
1404 if ('.hgsubstate' in wctx and
1403 if ('.hgsubstate' in wctx and
1405 '.hgsubstate' not in (status.modified + status.added +
1404 '.hgsubstate' not in (status.modified + status.added +
1406 status.removed)):
1405 status.removed)):
1407 status.removed.insert(0, '.hgsubstate')
1406 status.removed.insert(0, '.hgsubstate')
1408
1407
1409 # make sure all explicit patterns are matched
1408 # make sure all explicit patterns are matched
1410 if not force and match.files():
1409 if not force and match.files():
1411 matched = set(status.modified + status.added + status.removed)
1410 matched = set(status.modified + status.added + status.removed)
1412
1411
1413 for f in match.files():
1412 for f in match.files():
1414 f = self.dirstate.normalize(f)
1413 f = self.dirstate.normalize(f)
1415 if f == '.' or f in matched or f in wctx.substate:
1414 if f == '.' or f in matched or f in wctx.substate:
1416 continue
1415 continue
1417 if f in status.deleted:
1416 if f in status.deleted:
1418 fail(f, _('file not found!'))
1417 fail(f, _('file not found!'))
1419 if f in vdirs: # visited directory
1418 if f in vdirs: # visited directory
1420 d = f + '/'
1419 d = f + '/'
1421 for mf in matched:
1420 for mf in matched:
1422 if mf.startswith(d):
1421 if mf.startswith(d):
1423 break
1422 break
1424 else:
1423 else:
1425 fail(f, _("no match under directory!"))
1424 fail(f, _("no match under directory!"))
1426 elif f not in self.dirstate:
1425 elif f not in self.dirstate:
1427 fail(f, _("file not tracked!"))
1426 fail(f, _("file not tracked!"))
1428
1427
1429 cctx = context.workingcommitctx(self, status,
1428 cctx = context.workingcommitctx(self, status,
1430 text, user, date, extra)
1429 text, user, date, extra)
1431
1430
1432 if (not force and not extra.get("close") and not merge
1431 if (not force and not extra.get("close") and not merge
1433 and not cctx.files()
1432 and not cctx.files()
1434 and wctx.branch() == wctx.p1().branch()):
1433 and wctx.branch() == wctx.p1().branch()):
1435 return None
1434 return None
1436
1435
1437 if merge and cctx.deleted():
1436 if merge and cctx.deleted():
1438 raise util.Abort(_("cannot commit merge with missing files"))
1437 raise util.Abort(_("cannot commit merge with missing files"))
1439
1438
1440 ms = mergemod.mergestate(self)
1439 ms = mergemod.mergestate(self)
1441 for f in status.modified:
1440 for f in status.modified:
1442 if f in ms and ms[f] == 'u':
1441 if f in ms and ms[f] == 'u':
1443 raise util.Abort(_('unresolved merge conflicts '
1442 raise util.Abort(_('unresolved merge conflicts '
1444 '(see "hg help resolve")'))
1443 '(see "hg help resolve")'))
1445
1444
1446 if editor:
1445 if editor:
1447 cctx._text = editor(self, cctx, subs)
1446 cctx._text = editor(self, cctx, subs)
1448 edited = (text != cctx._text)
1447 edited = (text != cctx._text)
1449
1448
1450 # Save commit message in case this transaction gets rolled back
1449 # Save commit message in case this transaction gets rolled back
1451 # (e.g. by a pretxncommit hook). Leave the content alone on
1450 # (e.g. by a pretxncommit hook). Leave the content alone on
1452 # the assumption that the user will use the same editor again.
1451 # the assumption that the user will use the same editor again.
1453 msgfn = self.savecommitmessage(cctx._text)
1452 msgfn = self.savecommitmessage(cctx._text)
1454
1453
1455 # commit subs and write new state
1454 # commit subs and write new state
1456 if subs:
1455 if subs:
1457 for s in sorted(commitsubs):
1456 for s in sorted(commitsubs):
1458 sub = wctx.sub(s)
1457 sub = wctx.sub(s)
1459 self.ui.status(_('committing subrepository %s\n') %
1458 self.ui.status(_('committing subrepository %s\n') %
1460 subrepo.subrelpath(sub))
1459 subrepo.subrelpath(sub))
1461 sr = sub.commit(cctx._text, user, date)
1460 sr = sub.commit(cctx._text, user, date)
1462 newstate[s] = (newstate[s][0], sr)
1461 newstate[s] = (newstate[s][0], sr)
1463 subrepo.writestate(self, newstate)
1462 subrepo.writestate(self, newstate)
1464
1463
1465 p1, p2 = self.dirstate.parents()
1464 p1, p2 = self.dirstate.parents()
1466 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1465 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1467 try:
1466 try:
1468 self.hook("precommit", throw=True, parent1=hookp1,
1467 self.hook("precommit", throw=True, parent1=hookp1,
1469 parent2=hookp2)
1468 parent2=hookp2)
1470 ret = self.commitctx(cctx, True)
1469 ret = self.commitctx(cctx, True)
1471 except: # re-raises
1470 except: # re-raises
1472 if edited:
1471 if edited:
1473 self.ui.write(
1472 self.ui.write(
1474 _('note: commit message saved in %s\n') % msgfn)
1473 _('note: commit message saved in %s\n') % msgfn)
1475 raise
1474 raise
1476
1475
1477 # update bookmarks, dirstate and mergestate
1476 # update bookmarks, dirstate and mergestate
1478 bookmarks.update(self, [p1, p2], ret)
1477 bookmarks.update(self, [p1, p2], ret)
1479 cctx.markcommitted(ret)
1478 cctx.markcommitted(ret)
1480 ms.reset()
1479 ms.reset()
1481 finally:
1480 finally:
1482 wlock.release()
1481 wlock.release()
1483
1482
1484 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1483 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1485 # hack for command that use a temporary commit (eg: histedit)
1484 # hack for command that use a temporary commit (eg: histedit)
1486 # temporary commit got stripped before hook release
1485 # temporary commit got stripped before hook release
1487 if node in self:
1486 if node in self:
1488 self.hook("commit", node=node, parent1=parent1,
1487 self.hook("commit", node=node, parent1=parent1,
1489 parent2=parent2)
1488 parent2=parent2)
1490 self._afterlock(commithook)
1489 self._afterlock(commithook)
1491 return ret
1490 return ret
1492
1491
1493 @unfilteredmethod
1492 @unfilteredmethod
1494 def commitctx(self, ctx, error=False):
1493 def commitctx(self, ctx, error=False):
1495 """Add a new revision to current repository.
1494 """Add a new revision to current repository.
1496 Revision information is passed via the context argument.
1495 Revision information is passed via the context argument.
1497 """
1496 """
1498
1497
1499 tr = None
1498 tr = None
1500 p1, p2 = ctx.p1(), ctx.p2()
1499 p1, p2 = ctx.p1(), ctx.p2()
1501 user = ctx.user()
1500 user = ctx.user()
1502
1501
1503 lock = self.lock()
1502 lock = self.lock()
1504 try:
1503 try:
1505 tr = self.transaction("commit")
1504 tr = self.transaction("commit")
1506 trp = weakref.proxy(tr)
1505 trp = weakref.proxy(tr)
1507
1506
1508 if ctx.files():
1507 if ctx.files():
1509 m1 = p1.manifest()
1508 m1 = p1.manifest()
1510 m2 = p2.manifest()
1509 m2 = p2.manifest()
1511 m = m1.copy()
1510 m = m1.copy()
1512
1511
1513 # check in files
1512 # check in files
1514 added = []
1513 added = []
1515 changed = []
1514 changed = []
1516 removed = list(ctx.removed())
1515 removed = list(ctx.removed())
1517 linkrev = len(self)
1516 linkrev = len(self)
1518 self.ui.note(_("committing files:\n"))
1517 self.ui.note(_("committing files:\n"))
1519 for f in sorted(ctx.modified() + ctx.added()):
1518 for f in sorted(ctx.modified() + ctx.added()):
1520 self.ui.note(f + "\n")
1519 self.ui.note(f + "\n")
1521 try:
1520 try:
1522 fctx = ctx[f]
1521 fctx = ctx[f]
1523 if fctx is None:
1522 if fctx is None:
1524 removed.append(f)
1523 removed.append(f)
1525 else:
1524 else:
1526 added.append(f)
1525 added.append(f)
1527 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1526 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1528 trp, changed)
1527 trp, changed)
1529 m.setflag(f, fctx.flags())
1528 m.setflag(f, fctx.flags())
1530 except OSError, inst:
1529 except OSError, inst:
1531 self.ui.warn(_("trouble committing %s!\n") % f)
1530 self.ui.warn(_("trouble committing %s!\n") % f)
1532 raise
1531 raise
1533 except IOError, inst:
1532 except IOError, inst:
1534 errcode = getattr(inst, 'errno', errno.ENOENT)
1533 errcode = getattr(inst, 'errno', errno.ENOENT)
1535 if error or errcode and errcode != errno.ENOENT:
1534 if error or errcode and errcode != errno.ENOENT:
1536 self.ui.warn(_("trouble committing %s!\n") % f)
1535 self.ui.warn(_("trouble committing %s!\n") % f)
1537 raise
1536 raise
1538
1537
1539 # update manifest
1538 # update manifest
1540 self.ui.note(_("committing manifest\n"))
1539 self.ui.note(_("committing manifest\n"))
1541 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1540 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1542 drop = [f for f in removed if f in m]
1541 drop = [f for f in removed if f in m]
1543 for f in drop:
1542 for f in drop:
1544 del m[f]
1543 del m[f]
1545 mn = self.manifest.add(m, trp, linkrev,
1544 mn = self.manifest.add(m, trp, linkrev,
1546 p1.manifestnode(), p2.manifestnode(),
1545 p1.manifestnode(), p2.manifestnode(),
1547 added, drop)
1546 added, drop)
1548 files = changed + removed
1547 files = changed + removed
1549 else:
1548 else:
1550 mn = p1.manifestnode()
1549 mn = p1.manifestnode()
1551 files = []
1550 files = []
1552
1551
1553 # update changelog
1552 # update changelog
1554 self.ui.note(_("committing changelog\n"))
1553 self.ui.note(_("committing changelog\n"))
1555 self.changelog.delayupdate(tr)
1554 self.changelog.delayupdate(tr)
1556 n = self.changelog.add(mn, files, ctx.description(),
1555 n = self.changelog.add(mn, files, ctx.description(),
1557 trp, p1.node(), p2.node(),
1556 trp, p1.node(), p2.node(),
1558 user, ctx.date(), ctx.extra().copy())
1557 user, ctx.date(), ctx.extra().copy())
1559 p = lambda: tr.writepending() and self.root or ""
1558 p = lambda: tr.writepending() and self.root or ""
1560 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1559 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1561 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1560 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1562 parent2=xp2, pending=p)
1561 parent2=xp2, pending=p)
1563 # set the new commit is proper phase
1562 # set the new commit is proper phase
1564 targetphase = subrepo.newcommitphase(self.ui, ctx)
1563 targetphase = subrepo.newcommitphase(self.ui, ctx)
1565 if targetphase:
1564 if targetphase:
1566 # retract boundary do not alter parent changeset.
1565 # retract boundary do not alter parent changeset.
1567 # if a parent have higher the resulting phase will
1566 # if a parent have higher the resulting phase will
1568 # be compliant anyway
1567 # be compliant anyway
1569 #
1568 #
1570 # if minimal phase was 0 we don't need to retract anything
1569 # if minimal phase was 0 we don't need to retract anything
1571 phases.retractboundary(self, tr, targetphase, [n])
1570 phases.retractboundary(self, tr, targetphase, [n])
1572 tr.close()
1571 tr.close()
1573 branchmap.updatecache(self.filtered('served'))
1572 branchmap.updatecache(self.filtered('served'))
1574 return n
1573 return n
1575 finally:
1574 finally:
1576 if tr:
1575 if tr:
1577 tr.release()
1576 tr.release()
1578 lock.release()
1577 lock.release()
1579
1578
1580 @unfilteredmethod
1579 @unfilteredmethod
1581 def destroying(self):
1580 def destroying(self):
1582 '''Inform the repository that nodes are about to be destroyed.
1581 '''Inform the repository that nodes are about to be destroyed.
1583 Intended for use by strip and rollback, so there's a common
1582 Intended for use by strip and rollback, so there's a common
1584 place for anything that has to be done before destroying history.
1583 place for anything that has to be done before destroying history.
1585
1584
1586 This is mostly useful for saving state that is in memory and waiting
1585 This is mostly useful for saving state that is in memory and waiting
1587 to be flushed when the current lock is released. Because a call to
1586 to be flushed when the current lock is released. Because a call to
1588 destroyed is imminent, the repo will be invalidated causing those
1587 destroyed is imminent, the repo will be invalidated causing those
1589 changes to stay in memory (waiting for the next unlock), or vanish
1588 changes to stay in memory (waiting for the next unlock), or vanish
1590 completely.
1589 completely.
1591 '''
1590 '''
1592 # When using the same lock to commit and strip, the phasecache is left
1591 # When using the same lock to commit and strip, the phasecache is left
1593 # dirty after committing. Then when we strip, the repo is invalidated,
1592 # dirty after committing. Then when we strip, the repo is invalidated,
1594 # causing those changes to disappear.
1593 # causing those changes to disappear.
1595 if '_phasecache' in vars(self):
1594 if '_phasecache' in vars(self):
1596 self._phasecache.write()
1595 self._phasecache.write()
1597
1596
1598 @unfilteredmethod
1597 @unfilteredmethod
1599 def destroyed(self):
1598 def destroyed(self):
1600 '''Inform the repository that nodes have been destroyed.
1599 '''Inform the repository that nodes have been destroyed.
1601 Intended for use by strip and rollback, so there's a common
1600 Intended for use by strip and rollback, so there's a common
1602 place for anything that has to be done after destroying history.
1601 place for anything that has to be done after destroying history.
1603 '''
1602 '''
1604 # When one tries to:
1603 # When one tries to:
1605 # 1) destroy nodes thus calling this method (e.g. strip)
1604 # 1) destroy nodes thus calling this method (e.g. strip)
1606 # 2) use phasecache somewhere (e.g. commit)
1605 # 2) use phasecache somewhere (e.g. commit)
1607 #
1606 #
1608 # then 2) will fail because the phasecache contains nodes that were
1607 # then 2) will fail because the phasecache contains nodes that were
1609 # removed. We can either remove phasecache from the filecache,
1608 # removed. We can either remove phasecache from the filecache,
1610 # causing it to reload next time it is accessed, or simply filter
1609 # causing it to reload next time it is accessed, or simply filter
1611 # the removed nodes now and write the updated cache.
1610 # the removed nodes now and write the updated cache.
1612 self._phasecache.filterunknown(self)
1611 self._phasecache.filterunknown(self)
1613 self._phasecache.write()
1612 self._phasecache.write()
1614
1613
1615 # update the 'served' branch cache to help read only server process
1614 # update the 'served' branch cache to help read only server process
1616 # Thanks to branchcache collaboration this is done from the nearest
1615 # Thanks to branchcache collaboration this is done from the nearest
1617 # filtered subset and it is expected to be fast.
1616 # filtered subset and it is expected to be fast.
1618 branchmap.updatecache(self.filtered('served'))
1617 branchmap.updatecache(self.filtered('served'))
1619
1618
1620 # Ensure the persistent tag cache is updated. Doing it now
1619 # Ensure the persistent tag cache is updated. Doing it now
1621 # means that the tag cache only has to worry about destroyed
1620 # means that the tag cache only has to worry about destroyed
1622 # heads immediately after a strip/rollback. That in turn
1621 # heads immediately after a strip/rollback. That in turn
1623 # guarantees that "cachetip == currenttip" (comparing both rev
1622 # guarantees that "cachetip == currenttip" (comparing both rev
1624 # and node) always means no nodes have been added or destroyed.
1623 # and node) always means no nodes have been added or destroyed.
1625
1624
1626 # XXX this is suboptimal when qrefresh'ing: we strip the current
1625 # XXX this is suboptimal when qrefresh'ing: we strip the current
1627 # head, refresh the tag cache, then immediately add a new head.
1626 # head, refresh the tag cache, then immediately add a new head.
1628 # But I think doing it this way is necessary for the "instant
1627 # But I think doing it this way is necessary for the "instant
1629 # tag cache retrieval" case to work.
1628 # tag cache retrieval" case to work.
1630 self.invalidate()
1629 self.invalidate()
1631
1630
1632 def walk(self, match, node=None):
1631 def walk(self, match, node=None):
1633 '''
1632 '''
1634 walk recursively through the directory tree or a given
1633 walk recursively through the directory tree or a given
1635 changeset, finding all files matched by the match
1634 changeset, finding all files matched by the match
1636 function
1635 function
1637 '''
1636 '''
1638 return self[node].walk(match)
1637 return self[node].walk(match)
1639
1638
1640 def status(self, node1='.', node2=None, match=None,
1639 def status(self, node1='.', node2=None, match=None,
1641 ignored=False, clean=False, unknown=False,
1640 ignored=False, clean=False, unknown=False,
1642 listsubrepos=False):
1641 listsubrepos=False):
1643 '''a convenience method that calls node1.status(node2)'''
1642 '''a convenience method that calls node1.status(node2)'''
1644 return self[node1].status(node2, match, ignored, clean, unknown,
1643 return self[node1].status(node2, match, ignored, clean, unknown,
1645 listsubrepos)
1644 listsubrepos)
1646
1645
1647 def heads(self, start=None):
1646 def heads(self, start=None):
1648 heads = self.changelog.heads(start)
1647 heads = self.changelog.heads(start)
1649 # sort the output in rev descending order
1648 # sort the output in rev descending order
1650 return sorted(heads, key=self.changelog.rev, reverse=True)
1649 return sorted(heads, key=self.changelog.rev, reverse=True)
1651
1650
1652 def branchheads(self, branch=None, start=None, closed=False):
1651 def branchheads(self, branch=None, start=None, closed=False):
1653 '''return a (possibly filtered) list of heads for the given branch
1652 '''return a (possibly filtered) list of heads for the given branch
1654
1653
1655 Heads are returned in topological order, from newest to oldest.
1654 Heads are returned in topological order, from newest to oldest.
1656 If branch is None, use the dirstate branch.
1655 If branch is None, use the dirstate branch.
1657 If start is not None, return only heads reachable from start.
1656 If start is not None, return only heads reachable from start.
1658 If closed is True, return heads that are marked as closed as well.
1657 If closed is True, return heads that are marked as closed as well.
1659 '''
1658 '''
1660 if branch is None:
1659 if branch is None:
1661 branch = self[None].branch()
1660 branch = self[None].branch()
1662 branches = self.branchmap()
1661 branches = self.branchmap()
1663 if branch not in branches:
1662 if branch not in branches:
1664 return []
1663 return []
1665 # the cache returns heads ordered lowest to highest
1664 # the cache returns heads ordered lowest to highest
1666 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1665 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1667 if start is not None:
1666 if start is not None:
1668 # filter out the heads that cannot be reached from startrev
1667 # filter out the heads that cannot be reached from startrev
1669 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1668 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1670 bheads = [h for h in bheads if h in fbheads]
1669 bheads = [h for h in bheads if h in fbheads]
1671 return bheads
1670 return bheads
1672
1671
1673 def branches(self, nodes):
1672 def branches(self, nodes):
1674 if not nodes:
1673 if not nodes:
1675 nodes = [self.changelog.tip()]
1674 nodes = [self.changelog.tip()]
1676 b = []
1675 b = []
1677 for n in nodes:
1676 for n in nodes:
1678 t = n
1677 t = n
1679 while True:
1678 while True:
1680 p = self.changelog.parents(n)
1679 p = self.changelog.parents(n)
1681 if p[1] != nullid or p[0] == nullid:
1680 if p[1] != nullid or p[0] == nullid:
1682 b.append((t, n, p[0], p[1]))
1681 b.append((t, n, p[0], p[1]))
1683 break
1682 break
1684 n = p[0]
1683 n = p[0]
1685 return b
1684 return b
1686
1685
1687 def between(self, pairs):
1686 def between(self, pairs):
1688 r = []
1687 r = []
1689
1688
1690 for top, bottom in pairs:
1689 for top, bottom in pairs:
1691 n, l, i = top, [], 0
1690 n, l, i = top, [], 0
1692 f = 1
1691 f = 1
1693
1692
1694 while n != bottom and n != nullid:
1693 while n != bottom and n != nullid:
1695 p = self.changelog.parents(n)[0]
1694 p = self.changelog.parents(n)[0]
1696 if i == f:
1695 if i == f:
1697 l.append(n)
1696 l.append(n)
1698 f = f * 2
1697 f = f * 2
1699 n = p
1698 n = p
1700 i += 1
1699 i += 1
1701
1700
1702 r.append(l)
1701 r.append(l)
1703
1702
1704 return r
1703 return r
1705
1704
1706 def checkpush(self, pushop):
1705 def checkpush(self, pushop):
1707 """Extensions can override this function if additional checks have
1706 """Extensions can override this function if additional checks have
1708 to be performed before pushing, or call it if they override push
1707 to be performed before pushing, or call it if they override push
1709 command.
1708 command.
1710 """
1709 """
1711 pass
1710 pass
1712
1711
1713 @unfilteredpropertycache
1712 @unfilteredpropertycache
1714 def prepushoutgoinghooks(self):
1713 def prepushoutgoinghooks(self):
1715 """Return util.hooks consists of "(repo, remote, outgoing)"
1714 """Return util.hooks consists of "(repo, remote, outgoing)"
1716 functions, which are called before pushing changesets.
1715 functions, which are called before pushing changesets.
1717 """
1716 """
1718 return util.hooks()
1717 return util.hooks()
1719
1718
1720 def stream_in(self, remote, requirements):
1719 def stream_in(self, remote, requirements):
1721 lock = self.lock()
1720 lock = self.lock()
1722 try:
1721 try:
1723 # Save remote branchmap. We will use it later
1722 # Save remote branchmap. We will use it later
1724 # to speed up branchcache creation
1723 # to speed up branchcache creation
1725 rbranchmap = None
1724 rbranchmap = None
1726 if remote.capable("branchmap"):
1725 if remote.capable("branchmap"):
1727 rbranchmap = remote.branchmap()
1726 rbranchmap = remote.branchmap()
1728
1727
1729 fp = remote.stream_out()
1728 fp = remote.stream_out()
1730 l = fp.readline()
1729 l = fp.readline()
1731 try:
1730 try:
1732 resp = int(l)
1731 resp = int(l)
1733 except ValueError:
1732 except ValueError:
1734 raise error.ResponseError(
1733 raise error.ResponseError(
1735 _('unexpected response from remote server:'), l)
1734 _('unexpected response from remote server:'), l)
1736 if resp == 1:
1735 if resp == 1:
1737 raise util.Abort(_('operation forbidden by server'))
1736 raise util.Abort(_('operation forbidden by server'))
1738 elif resp == 2:
1737 elif resp == 2:
1739 raise util.Abort(_('locking the remote repository failed'))
1738 raise util.Abort(_('locking the remote repository failed'))
1740 elif resp != 0:
1739 elif resp != 0:
1741 raise util.Abort(_('the server sent an unknown error code'))
1740 raise util.Abort(_('the server sent an unknown error code'))
1742 self.ui.status(_('streaming all changes\n'))
1741 self.ui.status(_('streaming all changes\n'))
1743 l = fp.readline()
1742 l = fp.readline()
1744 try:
1743 try:
1745 total_files, total_bytes = map(int, l.split(' ', 1))
1744 total_files, total_bytes = map(int, l.split(' ', 1))
1746 except (ValueError, TypeError):
1745 except (ValueError, TypeError):
1747 raise error.ResponseError(
1746 raise error.ResponseError(
1748 _('unexpected response from remote server:'), l)
1747 _('unexpected response from remote server:'), l)
1749 self.ui.status(_('%d files to transfer, %s of data\n') %
1748 self.ui.status(_('%d files to transfer, %s of data\n') %
1750 (total_files, util.bytecount(total_bytes)))
1749 (total_files, util.bytecount(total_bytes)))
1751 handled_bytes = 0
1750 handled_bytes = 0
1752 self.ui.progress(_('clone'), 0, total=total_bytes)
1751 self.ui.progress(_('clone'), 0, total=total_bytes)
1753 start = time.time()
1752 start = time.time()
1754
1753
1755 tr = self.transaction(_('clone'))
1754 tr = self.transaction(_('clone'))
1756 try:
1755 try:
1757 for i in xrange(total_files):
1756 for i in xrange(total_files):
1758 # XXX doesn't support '\n' or '\r' in filenames
1757 # XXX doesn't support '\n' or '\r' in filenames
1759 l = fp.readline()
1758 l = fp.readline()
1760 try:
1759 try:
1761 name, size = l.split('\0', 1)
1760 name, size = l.split('\0', 1)
1762 size = int(size)
1761 size = int(size)
1763 except (ValueError, TypeError):
1762 except (ValueError, TypeError):
1764 raise error.ResponseError(
1763 raise error.ResponseError(
1765 _('unexpected response from remote server:'), l)
1764 _('unexpected response from remote server:'), l)
1766 if self.ui.debugflag:
1765 if self.ui.debugflag:
1767 self.ui.debug('adding %s (%s)\n' %
1766 self.ui.debug('adding %s (%s)\n' %
1768 (name, util.bytecount(size)))
1767 (name, util.bytecount(size)))
1769 # for backwards compat, name was partially encoded
1768 # for backwards compat, name was partially encoded
1770 ofp = self.svfs(store.decodedir(name), 'w')
1769 ofp = self.svfs(store.decodedir(name), 'w')
1771 for chunk in util.filechunkiter(fp, limit=size):
1770 for chunk in util.filechunkiter(fp, limit=size):
1772 handled_bytes += len(chunk)
1771 handled_bytes += len(chunk)
1773 self.ui.progress(_('clone'), handled_bytes,
1772 self.ui.progress(_('clone'), handled_bytes,
1774 total=total_bytes)
1773 total=total_bytes)
1775 ofp.write(chunk)
1774 ofp.write(chunk)
1776 ofp.close()
1775 ofp.close()
1777 tr.close()
1776 tr.close()
1778 finally:
1777 finally:
1779 tr.release()
1778 tr.release()
1780
1779
1781 # Writing straight to files circumvented the inmemory caches
1780 # Writing straight to files circumvented the inmemory caches
1782 self.invalidate()
1781 self.invalidate()
1783
1782
1784 elapsed = time.time() - start
1783 elapsed = time.time() - start
1785 if elapsed <= 0:
1784 if elapsed <= 0:
1786 elapsed = 0.001
1785 elapsed = 0.001
1787 self.ui.progress(_('clone'), None)
1786 self.ui.progress(_('clone'), None)
1788 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1787 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1789 (util.bytecount(total_bytes), elapsed,
1788 (util.bytecount(total_bytes), elapsed,
1790 util.bytecount(total_bytes / elapsed)))
1789 util.bytecount(total_bytes / elapsed)))
1791
1790
1792 # new requirements = old non-format requirements +
1791 # new requirements = old non-format requirements +
1793 # new format-related
1792 # new format-related
1794 # requirements from the streamed-in repository
1793 # requirements from the streamed-in repository
1795 requirements.update(set(self.requirements) - self.supportedformats)
1794 requirements.update(set(self.requirements) - self.supportedformats)
1796 self._applyrequirements(requirements)
1795 self._applyrequirements(requirements)
1797 self._writerequirements()
1796 self._writerequirements()
1798
1797
1799 if rbranchmap:
1798 if rbranchmap:
1800 rbheads = []
1799 rbheads = []
1801 closed = []
1800 closed = []
1802 for bheads in rbranchmap.itervalues():
1801 for bheads in rbranchmap.itervalues():
1803 rbheads.extend(bheads)
1802 rbheads.extend(bheads)
1804 for h in bheads:
1803 for h in bheads:
1805 r = self.changelog.rev(h)
1804 r = self.changelog.rev(h)
1806 b, c = self.changelog.branchinfo(r)
1805 b, c = self.changelog.branchinfo(r)
1807 if c:
1806 if c:
1808 closed.append(h)
1807 closed.append(h)
1809
1808
1810 if rbheads:
1809 if rbheads:
1811 rtiprev = max((int(self.changelog.rev(node))
1810 rtiprev = max((int(self.changelog.rev(node))
1812 for node in rbheads))
1811 for node in rbheads))
1813 cache = branchmap.branchcache(rbranchmap,
1812 cache = branchmap.branchcache(rbranchmap,
1814 self[rtiprev].node(),
1813 self[rtiprev].node(),
1815 rtiprev,
1814 rtiprev,
1816 closednodes=closed)
1815 closednodes=closed)
1817 # Try to stick it as low as possible
1816 # Try to stick it as low as possible
1818 # filter above served are unlikely to be fetch from a clone
1817 # filter above served are unlikely to be fetch from a clone
1819 for candidate in ('base', 'immutable', 'served'):
1818 for candidate in ('base', 'immutable', 'served'):
1820 rview = self.filtered(candidate)
1819 rview = self.filtered(candidate)
1821 if cache.validfor(rview):
1820 if cache.validfor(rview):
1822 self._branchcaches[candidate] = cache
1821 self._branchcaches[candidate] = cache
1823 cache.write(rview)
1822 cache.write(rview)
1824 break
1823 break
1825 self.invalidate()
1824 self.invalidate()
1826 return len(self.heads()) + 1
1825 return len(self.heads()) + 1
1827 finally:
1826 finally:
1828 lock.release()
1827 lock.release()
1829
1828
1830 def clone(self, remote, heads=[], stream=None):
1829 def clone(self, remote, heads=[], stream=None):
1831 '''clone remote repository.
1830 '''clone remote repository.
1832
1831
1833 keyword arguments:
1832 keyword arguments:
1834 heads: list of revs to clone (forces use of pull)
1833 heads: list of revs to clone (forces use of pull)
1835 stream: use streaming clone if possible'''
1834 stream: use streaming clone if possible'''
1836
1835
1837 # now, all clients that can request uncompressed clones can
1836 # now, all clients that can request uncompressed clones can
1838 # read repo formats supported by all servers that can serve
1837 # read repo formats supported by all servers that can serve
1839 # them.
1838 # them.
1840
1839
1841 # if revlog format changes, client will have to check version
1840 # if revlog format changes, client will have to check version
1842 # and format flags on "stream" capability, and use
1841 # and format flags on "stream" capability, and use
1843 # uncompressed only if compatible.
1842 # uncompressed only if compatible.
1844
1843
1845 if stream is None:
1844 if stream is None:
1846 # if the server explicitly prefers to stream (for fast LANs)
1845 # if the server explicitly prefers to stream (for fast LANs)
1847 stream = remote.capable('stream-preferred')
1846 stream = remote.capable('stream-preferred')
1848
1847
1849 if stream and not heads:
1848 if stream and not heads:
1850 # 'stream' means remote revlog format is revlogv1 only
1849 # 'stream' means remote revlog format is revlogv1 only
1851 if remote.capable('stream'):
1850 if remote.capable('stream'):
1852 self.stream_in(remote, set(('revlogv1',)))
1851 self.stream_in(remote, set(('revlogv1',)))
1853 else:
1852 else:
1854 # otherwise, 'streamreqs' contains the remote revlog format
1853 # otherwise, 'streamreqs' contains the remote revlog format
1855 streamreqs = remote.capable('streamreqs')
1854 streamreqs = remote.capable('streamreqs')
1856 if streamreqs:
1855 if streamreqs:
1857 streamreqs = set(streamreqs.split(','))
1856 streamreqs = set(streamreqs.split(','))
1858 # if we support it, stream in and adjust our requirements
1857 # if we support it, stream in and adjust our requirements
1859 if not streamreqs - self.supportedformats:
1858 if not streamreqs - self.supportedformats:
1860 self.stream_in(remote, streamreqs)
1859 self.stream_in(remote, streamreqs)
1861
1860
1862 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1861 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1863 try:
1862 try:
1864 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1863 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1865 ret = exchange.pull(self, remote, heads).cgresult
1864 ret = exchange.pull(self, remote, heads).cgresult
1866 finally:
1865 finally:
1867 self.ui.restoreconfig(quiet)
1866 self.ui.restoreconfig(quiet)
1868 return ret
1867 return ret
1869
1868
1870 def pushkey(self, namespace, key, old, new):
1869 def pushkey(self, namespace, key, old, new):
1871 try:
1870 try:
1872 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1871 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1873 old=old, new=new)
1872 old=old, new=new)
1874 except error.HookAbort, exc:
1873 except error.HookAbort, exc:
1875 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1874 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1876 if exc.hint:
1875 if exc.hint:
1877 self.ui.write_err(_("(%s)\n") % exc.hint)
1876 self.ui.write_err(_("(%s)\n") % exc.hint)
1878 return False
1877 return False
1879 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1878 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1880 ret = pushkey.push(self, namespace, key, old, new)
1879 ret = pushkey.push(self, namespace, key, old, new)
1881 def runhook():
1880 def runhook():
1882 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1881 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1883 ret=ret)
1882 ret=ret)
1884 self._afterlock(runhook)
1883 self._afterlock(runhook)
1885 return ret
1884 return ret
1886
1885
1887 def listkeys(self, namespace):
1886 def listkeys(self, namespace):
1888 self.hook('prelistkeys', throw=True, namespace=namespace)
1887 self.hook('prelistkeys', throw=True, namespace=namespace)
1889 self.ui.debug('listing keys for "%s"\n' % namespace)
1888 self.ui.debug('listing keys for "%s"\n' % namespace)
1890 values = pushkey.list(self, namespace)
1889 values = pushkey.list(self, namespace)
1891 self.hook('listkeys', namespace=namespace, values=values)
1890 self.hook('listkeys', namespace=namespace, values=values)
1892 return values
1891 return values
1893
1892
1894 def debugwireargs(self, one, two, three=None, four=None, five=None):
1893 def debugwireargs(self, one, two, three=None, four=None, five=None):
1895 '''used to test argument passing over the wire'''
1894 '''used to test argument passing over the wire'''
1896 return "%s %s %s %s %s" % (one, two, three, four, five)
1895 return "%s %s %s %s %s" % (one, two, three, four, five)
1897
1896
1898 def savecommitmessage(self, text):
1897 def savecommitmessage(self, text):
1899 fp = self.vfs('last-message.txt', 'wb')
1898 fp = self.vfs('last-message.txt', 'wb')
1900 try:
1899 try:
1901 fp.write(text)
1900 fp.write(text)
1902 finally:
1901 finally:
1903 fp.close()
1902 fp.close()
1904 return self.pathto(fp.name[len(self.root) + 1:])
1903 return self.pathto(fp.name[len(self.root) + 1:])
1905
1904
1906 # used to avoid circular references so destructors work
1905 # used to avoid circular references so destructors work
1907 def aftertrans(files):
1906 def aftertrans(files):
1908 renamefiles = [tuple(t) for t in files]
1907 renamefiles = [tuple(t) for t in files]
1909 def a():
1908 def a():
1910 for vfs, src, dest in renamefiles:
1909 for vfs, src, dest in renamefiles:
1911 try:
1910 try:
1912 vfs.rename(src, dest)
1911 vfs.rename(src, dest)
1913 except OSError: # journal file does not yet exist
1912 except OSError: # journal file does not yet exist
1914 pass
1913 pass
1915 return a
1914 return a
1916
1915
1917 def undoname(fn):
1916 def undoname(fn):
1918 base, name = os.path.split(fn)
1917 base, name = os.path.split(fn)
1919 assert name.startswith('journal')
1918 assert name.startswith('journal')
1920 return os.path.join(base, name.replace('journal', 'undo', 1))
1919 return os.path.join(base, name.replace('journal', 'undo', 1))
1921
1920
1922 def instance(ui, path, create):
1921 def instance(ui, path, create):
1923 return localrepository(ui, util.urllocalpath(path), create)
1922 return localrepository(ui, util.urllocalpath(path), create)
1924
1923
1925 def islocal(path):
1924 def islocal(path):
1926 return True
1925 return True
@@ -1,697 +1,697 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import mdiff, parsers, error, revlog, util, scmutil
9 import mdiff, parsers, error, revlog, util, scmutil
10 import array, struct
10 import array, struct
11
11
12 propertycache = util.propertycache
12 propertycache = util.propertycache
13
13
14 def _parse(data):
14 def _parse(data):
15 """Generates (path, node, flags) tuples from a manifest text"""
15 """Generates (path, node, flags) tuples from a manifest text"""
16 # This method does a little bit of excessive-looking
16 # This method does a little bit of excessive-looking
17 # precondition checking. This is so that the behavior of this
17 # precondition checking. This is so that the behavior of this
18 # class exactly matches its C counterpart to try and help
18 # class exactly matches its C counterpart to try and help
19 # prevent surprise breakage for anyone that develops against
19 # prevent surprise breakage for anyone that develops against
20 # the pure version.
20 # the pure version.
21 if data and data[-1] != '\n':
21 if data and data[-1] != '\n':
22 raise ValueError('Manifest did not end in a newline.')
22 raise ValueError('Manifest did not end in a newline.')
23 prev = None
23 prev = None
24 for l in data.splitlines():
24 for l in data.splitlines():
25 if prev is not None and prev > l:
25 if prev is not None and prev > l:
26 raise ValueError('Manifest lines not in sorted order.')
26 raise ValueError('Manifest lines not in sorted order.')
27 prev = l
27 prev = l
28 f, n = l.split('\0')
28 f, n = l.split('\0')
29 if len(n) > 40:
29 if len(n) > 40:
30 yield f, revlog.bin(n[:40]), n[40:]
30 yield f, revlog.bin(n[:40]), n[40:]
31 else:
31 else:
32 yield f, revlog.bin(n), ''
32 yield f, revlog.bin(n), ''
33
33
34 def _text(it):
34 def _text(it):
35 """Given an iterator over (path, node, flags) tuples, returns a manifest
35 """Given an iterator over (path, node, flags) tuples, returns a manifest
36 text"""
36 text"""
37 files = []
37 files = []
38 lines = []
38 lines = []
39 _hex = revlog.hex
39 _hex = revlog.hex
40 for f, n, fl in it:
40 for f, n, fl in it:
41 files.append(f)
41 files.append(f)
42 # if this is changed to support newlines in filenames,
42 # if this is changed to support newlines in filenames,
43 # be sure to check the templates/ dir again (especially *-raw.tmpl)
43 # be sure to check the templates/ dir again (especially *-raw.tmpl)
44 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
44 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
45
45
46 _checkforbidden(files)
46 _checkforbidden(files)
47 return ''.join(lines)
47 return ''.join(lines)
48
48
49 class _lazymanifest(dict):
49 class _lazymanifest(dict):
50 """This is the pure implementation of lazymanifest.
50 """This is the pure implementation of lazymanifest.
51
51
52 It has not been optimized *at all* and is not lazy.
52 It has not been optimized *at all* and is not lazy.
53 """
53 """
54
54
55 def __init__(self, data):
55 def __init__(self, data):
56 dict.__init__(self)
56 dict.__init__(self)
57 for f, n, fl in _parse(data):
57 for f, n, fl in _parse(data):
58 self[f] = n, fl
58 self[f] = n, fl
59
59
60 def __setitem__(self, k, v):
60 def __setitem__(self, k, v):
61 node, flag = v
61 node, flag = v
62 assert node is not None
62 assert node is not None
63 if len(node) > 21:
63 if len(node) > 21:
64 node = node[:21] # match c implementation behavior
64 node = node[:21] # match c implementation behavior
65 dict.__setitem__(self, k, (node, flag))
65 dict.__setitem__(self, k, (node, flag))
66
66
67 def __iter__(self):
67 def __iter__(self):
68 return iter(sorted(dict.keys(self)))
68 return iter(sorted(dict.keys(self)))
69
69
70 def iterkeys(self):
70 def iterkeys(self):
71 return iter(sorted(dict.keys(self)))
71 return iter(sorted(dict.keys(self)))
72
72
73 def iterentries(self):
73 def iterentries(self):
74 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
74 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
75
75
76 def copy(self):
76 def copy(self):
77 c = _lazymanifest('')
77 c = _lazymanifest('')
78 c.update(self)
78 c.update(self)
79 return c
79 return c
80
80
81 def diff(self, m2, clean=False):
81 def diff(self, m2, clean=False):
82 '''Finds changes between the current manifest and m2.'''
82 '''Finds changes between the current manifest and m2.'''
83 diff = {}
83 diff = {}
84
84
85 for fn, e1 in self.iteritems():
85 for fn, e1 in self.iteritems():
86 if fn not in m2:
86 if fn not in m2:
87 diff[fn] = e1, (None, '')
87 diff[fn] = e1, (None, '')
88 else:
88 else:
89 e2 = m2[fn]
89 e2 = m2[fn]
90 if e1 != e2:
90 if e1 != e2:
91 diff[fn] = e1, e2
91 diff[fn] = e1, e2
92 elif clean:
92 elif clean:
93 diff[fn] = None
93 diff[fn] = None
94
94
95 for fn, e2 in m2.iteritems():
95 for fn, e2 in m2.iteritems():
96 if fn not in self:
96 if fn not in self:
97 diff[fn] = (None, ''), e2
97 diff[fn] = (None, ''), e2
98
98
99 return diff
99 return diff
100
100
101 def filtercopy(self, filterfn):
101 def filtercopy(self, filterfn):
102 c = _lazymanifest('')
102 c = _lazymanifest('')
103 for f, n, fl in self.iterentries():
103 for f, n, fl in self.iterentries():
104 if filterfn(f):
104 if filterfn(f):
105 c[f] = n, fl
105 c[f] = n, fl
106 return c
106 return c
107
107
108 def text(self):
108 def text(self):
109 """Get the full data of this manifest as a bytestring."""
109 """Get the full data of this manifest as a bytestring."""
110 return _text(self.iterentries())
110 return _text(self.iterentries())
111
111
112 try:
112 try:
113 _lazymanifest = parsers.lazymanifest
113 _lazymanifest = parsers.lazymanifest
114 except AttributeError:
114 except AttributeError:
115 pass
115 pass
116
116
117 class manifestdict(object):
117 class manifestdict(object):
118 def __init__(self, data=''):
118 def __init__(self, data=''):
119 self._lm = _lazymanifest(data)
119 self._lm = _lazymanifest(data)
120
120
121 def __getitem__(self, key):
121 def __getitem__(self, key):
122 return self._lm[key][0]
122 return self._lm[key][0]
123
123
124 def find(self, key):
124 def find(self, key):
125 return self._lm[key]
125 return self._lm[key]
126
126
127 def __len__(self):
127 def __len__(self):
128 return len(self._lm)
128 return len(self._lm)
129
129
130 def __setitem__(self, key, node):
130 def __setitem__(self, key, node):
131 self._lm[key] = node, self.flags(key, '')
131 self._lm[key] = node, self.flags(key, '')
132
132
133 def __contains__(self, key):
133 def __contains__(self, key):
134 return key in self._lm
134 return key in self._lm
135
135
136 def __delitem__(self, key):
136 def __delitem__(self, key):
137 del self._lm[key]
137 del self._lm[key]
138
138
139 def __iter__(self):
139 def __iter__(self):
140 return self._lm.__iter__()
140 return self._lm.__iter__()
141
141
142 def iterkeys(self):
142 def iterkeys(self):
143 return self._lm.iterkeys()
143 return self._lm.iterkeys()
144
144
145 def keys(self):
145 def keys(self):
146 return list(self.iterkeys())
146 return list(self.iterkeys())
147
147
148 def _intersectfiles(self, files):
148 def _intersectfiles(self, files):
149 '''make a new lazymanifest with the intersection of self with files
149 '''make a new lazymanifest with the intersection of self with files
150
150
151 The algorithm assumes that files is much smaller than self.'''
151 The algorithm assumes that files is much smaller than self.'''
152 ret = manifestdict()
152 ret = manifestdict()
153 lm = self._lm
153 lm = self._lm
154 for fn in files:
154 for fn in files:
155 if fn in lm:
155 if fn in lm:
156 ret._lm[fn] = self._lm[fn]
156 ret._lm[fn] = self._lm[fn]
157 return ret
157 return ret
158
158
159 def filesnotin(self, m2):
159 def filesnotin(self, m2):
160 '''Set of files in this manifest that are not in the other'''
160 '''Set of files in this manifest that are not in the other'''
161 files = set(self)
161 files = set(self)
162 files.difference_update(m2)
162 files.difference_update(m2)
163 return files
163 return files
164
164
165 @propertycache
165 @propertycache
166 def _dirs(self):
166 def _dirs(self):
167 return scmutil.dirs(self)
167 return scmutil.dirs(self)
168
168
169 def dirs(self):
169 def dirs(self):
170 return self._dirs
170 return self._dirs
171
171
172 def hasdir(self, dir):
172 def hasdir(self, dir):
173 return dir in self._dirs
173 return dir in self._dirs
174
174
175 def matches(self, match):
175 def matches(self, match):
176 '''generate a new manifest filtered by the match argument'''
176 '''generate a new manifest filtered by the match argument'''
177 if match.always():
177 if match.always():
178 return self.copy()
178 return self.copy()
179
179
180 files = match.files()
180 files = match.files()
181 if (len(files) < 100 and (match.isexact() or
181 if (len(files) < 100 and (match.isexact() or
182 (not match.anypats() and util.all(fn in self for fn in files)))):
182 (not match.anypats() and util.all(fn in self for fn in files)))):
183 return self._intersectfiles(files)
183 return self._intersectfiles(files)
184
184
185 lm = manifestdict('')
185 lm = manifestdict('')
186 lm._lm = self._lm.filtercopy(match)
186 lm._lm = self._lm.filtercopy(match)
187 return lm
187 return lm
188
188
189 def diff(self, m2, clean=False):
189 def diff(self, m2, clean=False):
190 '''Finds changes between the current manifest and m2.
190 '''Finds changes between the current manifest and m2.
191
191
192 Args:
192 Args:
193 m2: the manifest to which this manifest should be compared.
193 m2: the manifest to which this manifest should be compared.
194 clean: if true, include files unchanged between these manifests
194 clean: if true, include files unchanged between these manifests
195 with a None value in the returned dictionary.
195 with a None value in the returned dictionary.
196
196
197 The result is returned as a dict with filename as key and
197 The result is returned as a dict with filename as key and
198 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
198 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
199 nodeid in the current/other manifest and fl1/fl2 is the flag
199 nodeid in the current/other manifest and fl1/fl2 is the flag
200 in the current/other manifest. Where the file does not exist,
200 in the current/other manifest. Where the file does not exist,
201 the nodeid will be None and the flags will be the empty
201 the nodeid will be None and the flags will be the empty
202 string.
202 string.
203 '''
203 '''
204 return self._lm.diff(m2._lm, clean)
204 return self._lm.diff(m2._lm, clean)
205
205
206 def setflag(self, key, flag):
206 def setflag(self, key, flag):
207 self._lm[key] = self[key], flag
207 self._lm[key] = self[key], flag
208
208
209 def get(self, key, default=None):
209 def get(self, key, default=None):
210 try:
210 try:
211 return self._lm[key][0]
211 return self._lm[key][0]
212 except KeyError:
212 except KeyError:
213 return default
213 return default
214
214
215 def flags(self, key, default=''):
215 def flags(self, key, default=''):
216 try:
216 try:
217 return self._lm[key][1]
217 return self._lm[key][1]
218 except KeyError:
218 except KeyError:
219 return default
219 return default
220
220
221 def copy(self):
221 def copy(self):
222 c = manifestdict('')
222 c = manifestdict('')
223 c._lm = self._lm.copy()
223 c._lm = self._lm.copy()
224 return c
224 return c
225
225
226 def iteritems(self):
226 def iteritems(self):
227 return (x[:2] for x in self._lm.iterentries())
227 return (x[:2] for x in self._lm.iterentries())
228
228
229 def text(self):
229 def text(self):
230 return self._lm.text()
230 return self._lm.text()
231
231
232 def fastdelta(self, base, changes):
232 def fastdelta(self, base, changes):
233 """Given a base manifest text as an array.array and a list of changes
233 """Given a base manifest text as an array.array and a list of changes
234 relative to that text, compute a delta that can be used by revlog.
234 relative to that text, compute a delta that can be used by revlog.
235 """
235 """
236 delta = []
236 delta = []
237 dstart = None
237 dstart = None
238 dend = None
238 dend = None
239 dline = [""]
239 dline = [""]
240 start = 0
240 start = 0
241 # zero copy representation of base as a buffer
241 # zero copy representation of base as a buffer
242 addbuf = util.buffer(base)
242 addbuf = util.buffer(base)
243
243
244 # start with a readonly loop that finds the offset of
244 # start with a readonly loop that finds the offset of
245 # each line and creates the deltas
245 # each line and creates the deltas
246 for f, todelete in changes:
246 for f, todelete in changes:
247 # bs will either be the index of the item or the insert point
247 # bs will either be the index of the item or the insert point
248 start, end = _msearch(addbuf, f, start)
248 start, end = _msearch(addbuf, f, start)
249 if not todelete:
249 if not todelete:
250 h, fl = self._lm[f]
250 h, fl = self._lm[f]
251 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
251 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
252 else:
252 else:
253 if start == end:
253 if start == end:
254 # item we want to delete was not found, error out
254 # item we want to delete was not found, error out
255 raise AssertionError(
255 raise AssertionError(
256 _("failed to remove %s from manifest") % f)
256 _("failed to remove %s from manifest") % f)
257 l = ""
257 l = ""
258 if dstart is not None and dstart <= start and dend >= start:
258 if dstart is not None and dstart <= start and dend >= start:
259 if dend < end:
259 if dend < end:
260 dend = end
260 dend = end
261 if l:
261 if l:
262 dline.append(l)
262 dline.append(l)
263 else:
263 else:
264 if dstart is not None:
264 if dstart is not None:
265 delta.append([dstart, dend, "".join(dline)])
265 delta.append([dstart, dend, "".join(dline)])
266 dstart = start
266 dstart = start
267 dend = end
267 dend = end
268 dline = [l]
268 dline = [l]
269
269
270 if dstart is not None:
270 if dstart is not None:
271 delta.append([dstart, dend, "".join(dline)])
271 delta.append([dstart, dend, "".join(dline)])
272 # apply the delta to the base, and get a delta for addrevision
272 # apply the delta to the base, and get a delta for addrevision
273 deltatext, arraytext = _addlistdelta(base, delta)
273 deltatext, arraytext = _addlistdelta(base, delta)
274 return arraytext, deltatext
274 return arraytext, deltatext
275
275
276 def _msearch(m, s, lo=0, hi=None):
276 def _msearch(m, s, lo=0, hi=None):
277 '''return a tuple (start, end) that says where to find s within m.
277 '''return a tuple (start, end) that says where to find s within m.
278
278
279 If the string is found m[start:end] are the line containing
279 If the string is found m[start:end] are the line containing
280 that string. If start == end the string was not found and
280 that string. If start == end the string was not found and
281 they indicate the proper sorted insertion point.
281 they indicate the proper sorted insertion point.
282
282
283 m should be a buffer or a string
283 m should be a buffer or a string
284 s is a string'''
284 s is a string'''
285 def advance(i, c):
285 def advance(i, c):
286 while i < lenm and m[i] != c:
286 while i < lenm and m[i] != c:
287 i += 1
287 i += 1
288 return i
288 return i
289 if not s:
289 if not s:
290 return (lo, lo)
290 return (lo, lo)
291 lenm = len(m)
291 lenm = len(m)
292 if not hi:
292 if not hi:
293 hi = lenm
293 hi = lenm
294 while lo < hi:
294 while lo < hi:
295 mid = (lo + hi) // 2
295 mid = (lo + hi) // 2
296 start = mid
296 start = mid
297 while start > 0 and m[start - 1] != '\n':
297 while start > 0 and m[start - 1] != '\n':
298 start -= 1
298 start -= 1
299 end = advance(start, '\0')
299 end = advance(start, '\0')
300 if m[start:end] < s:
300 if m[start:end] < s:
301 # we know that after the null there are 40 bytes of sha1
301 # we know that after the null there are 40 bytes of sha1
302 # this translates to the bisect lo = mid + 1
302 # this translates to the bisect lo = mid + 1
303 lo = advance(end + 40, '\n') + 1
303 lo = advance(end + 40, '\n') + 1
304 else:
304 else:
305 # this translates to the bisect hi = mid
305 # this translates to the bisect hi = mid
306 hi = start
306 hi = start
307 end = advance(lo, '\0')
307 end = advance(lo, '\0')
308 found = m[lo:end]
308 found = m[lo:end]
309 if s == found:
309 if s == found:
310 # we know that after the null there are 40 bytes of sha1
310 # we know that after the null there are 40 bytes of sha1
311 end = advance(end + 40, '\n')
311 end = advance(end + 40, '\n')
312 return (lo, end + 1)
312 return (lo, end + 1)
313 else:
313 else:
314 return (lo, lo)
314 return (lo, lo)
315
315
316 def _checkforbidden(l):
316 def _checkforbidden(l):
317 """Check filenames for illegal characters."""
317 """Check filenames for illegal characters."""
318 for f in l:
318 for f in l:
319 if '\n' in f or '\r' in f:
319 if '\n' in f or '\r' in f:
320 raise error.RevlogError(
320 raise error.RevlogError(
321 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
321 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
322
322
323
323
324 # apply the changes collected during the bisect loop to our addlist
324 # apply the changes collected during the bisect loop to our addlist
325 # return a delta suitable for addrevision
325 # return a delta suitable for addrevision
326 def _addlistdelta(addlist, x):
326 def _addlistdelta(addlist, x):
327 # for large addlist arrays, building a new array is cheaper
327 # for large addlist arrays, building a new array is cheaper
328 # than repeatedly modifying the existing one
328 # than repeatedly modifying the existing one
329 currentposition = 0
329 currentposition = 0
330 newaddlist = array.array('c')
330 newaddlist = array.array('c')
331
331
332 for start, end, content in x:
332 for start, end, content in x:
333 newaddlist += addlist[currentposition:start]
333 newaddlist += addlist[currentposition:start]
334 if content:
334 if content:
335 newaddlist += array.array('c', content)
335 newaddlist += array.array('c', content)
336
336
337 currentposition = end
337 currentposition = end
338
338
339 newaddlist += addlist[currentposition:]
339 newaddlist += addlist[currentposition:]
340
340
341 deltatext = "".join(struct.pack(">lll", start, end, len(content))
341 deltatext = "".join(struct.pack(">lll", start, end, len(content))
342 + content for start, end, content in x)
342 + content for start, end, content in x)
343 return deltatext, newaddlist
343 return deltatext, newaddlist
344
344
345 def _splittopdir(f):
345 def _splittopdir(f):
346 if '/' in f:
346 if '/' in f:
347 dir, subpath = f.split('/', 1)
347 dir, subpath = f.split('/', 1)
348 return dir + '/', subpath
348 return dir + '/', subpath
349 else:
349 else:
350 return '', f
350 return '', f
351
351
352 class treemanifest(object):
352 class treemanifest(object):
353 def __init__(self, dir='', text=''):
353 def __init__(self, dir='', text=''):
354 self._dir = dir
354 self._dir = dir
355 self._dirs = {}
355 self._dirs = {}
356 # Using _lazymanifest here is a little slower than plain old dicts
356 # Using _lazymanifest here is a little slower than plain old dicts
357 self._files = {}
357 self._files = {}
358 self._flags = {}
358 self._flags = {}
359 for f, n, fl in _parse(text):
359 for f, n, fl in _parse(text):
360 self[f] = n
360 self[f] = n
361 if fl:
361 if fl:
362 self.setflag(f, fl)
362 self.setflag(f, fl)
363
363
364 def _subpath(self, path):
364 def _subpath(self, path):
365 return self._dir + path
365 return self._dir + path
366
366
367 def __len__(self):
367 def __len__(self):
368 size = len(self._files)
368 size = len(self._files)
369 for m in self._dirs.values():
369 for m in self._dirs.values():
370 size += m.__len__()
370 size += m.__len__()
371 return size
371 return size
372
372
373 def _isempty(self):
373 def _isempty(self):
374 return (not self._files and (not self._dirs or
374 return (not self._files and (not self._dirs or
375 util.all(m._isempty() for m in self._dirs.values())))
375 util.all(m._isempty() for m in self._dirs.values())))
376
376
377 def __str__(self):
377 def __str__(self):
378 return '<treemanifest dir=%s>' % self._dir
378 return '<treemanifest dir=%s>' % self._dir
379
379
380 def iteritems(self):
380 def iteritems(self):
381 for p, n in sorted(self._dirs.items() + self._files.items()):
381 for p, n in sorted(self._dirs.items() + self._files.items()):
382 if p in self._files:
382 if p in self._files:
383 yield self._subpath(p), n
383 yield self._subpath(p), n
384 else:
384 else:
385 for f, sn in n.iteritems():
385 for f, sn in n.iteritems():
386 yield f, sn
386 yield f, sn
387
387
388 def iterkeys(self):
388 def iterkeys(self):
389 for p in sorted(self._dirs.keys() + self._files.keys()):
389 for p in sorted(self._dirs.keys() + self._files.keys()):
390 if p in self._files:
390 if p in self._files:
391 yield self._subpath(p)
391 yield self._subpath(p)
392 else:
392 else:
393 for f in self._dirs[p].iterkeys():
393 for f in self._dirs[p].iterkeys():
394 yield f
394 yield f
395
395
396 def keys(self):
396 def keys(self):
397 return list(self.iterkeys())
397 return list(self.iterkeys())
398
398
399 def __iter__(self):
399 def __iter__(self):
400 return self.iterkeys()
400 return self.iterkeys()
401
401
402 def __contains__(self, f):
402 def __contains__(self, f):
403 if f is None:
403 if f is None:
404 return False
404 return False
405 dir, subpath = _splittopdir(f)
405 dir, subpath = _splittopdir(f)
406 if dir:
406 if dir:
407 if dir not in self._dirs:
407 if dir not in self._dirs:
408 return False
408 return False
409 return self._dirs[dir].__contains__(subpath)
409 return self._dirs[dir].__contains__(subpath)
410 else:
410 else:
411 return f in self._files
411 return f in self._files
412
412
413 def get(self, f, default=None):
413 def get(self, f, default=None):
414 dir, subpath = _splittopdir(f)
414 dir, subpath = _splittopdir(f)
415 if dir:
415 if dir:
416 if dir not in self._dirs:
416 if dir not in self._dirs:
417 return default
417 return default
418 return self._dirs[dir].get(subpath, default)
418 return self._dirs[dir].get(subpath, default)
419 else:
419 else:
420 return self._files.get(f, default)
420 return self._files.get(f, default)
421
421
422 def __getitem__(self, f):
422 def __getitem__(self, f):
423 dir, subpath = _splittopdir(f)
423 dir, subpath = _splittopdir(f)
424 if dir:
424 if dir:
425 return self._dirs[dir].__getitem__(subpath)
425 return self._dirs[dir].__getitem__(subpath)
426 else:
426 else:
427 return self._files[f]
427 return self._files[f]
428
428
429 def flags(self, f):
429 def flags(self, f):
430 dir, subpath = _splittopdir(f)
430 dir, subpath = _splittopdir(f)
431 if dir:
431 if dir:
432 if dir not in self._dirs:
432 if dir not in self._dirs:
433 return ''
433 return ''
434 return self._dirs[dir].flags(subpath)
434 return self._dirs[dir].flags(subpath)
435 else:
435 else:
436 if f in self._dirs:
436 if f in self._dirs:
437 return ''
437 return ''
438 return self._flags.get(f, '')
438 return self._flags.get(f, '')
439
439
440 def find(self, f):
440 def find(self, f):
441 dir, subpath = _splittopdir(f)
441 dir, subpath = _splittopdir(f)
442 if dir:
442 if dir:
443 return self._dirs[dir].find(subpath)
443 return self._dirs[dir].find(subpath)
444 else:
444 else:
445 return self._files[f], self._flags.get(f, '')
445 return self._files[f], self._flags.get(f, '')
446
446
447 def __delitem__(self, f):
447 def __delitem__(self, f):
448 dir, subpath = _splittopdir(f)
448 dir, subpath = _splittopdir(f)
449 if dir:
449 if dir:
450 self._dirs[dir].__delitem__(subpath)
450 self._dirs[dir].__delitem__(subpath)
451 # If the directory is now empty, remove it
451 # If the directory is now empty, remove it
452 if self._dirs[dir]._isempty():
452 if self._dirs[dir]._isempty():
453 del self._dirs[dir]
453 del self._dirs[dir]
454 else:
454 else:
455 del self._files[f]
455 del self._files[f]
456 if f in self._flags:
456 if f in self._flags:
457 del self._flags[f]
457 del self._flags[f]
458
458
459 def __setitem__(self, f, n):
459 def __setitem__(self, f, n):
460 assert n is not None
460 assert n is not None
461 dir, subpath = _splittopdir(f)
461 dir, subpath = _splittopdir(f)
462 if dir:
462 if dir:
463 if dir not in self._dirs:
463 if dir not in self._dirs:
464 self._dirs[dir] = treemanifest(self._subpath(dir))
464 self._dirs[dir] = treemanifest(self._subpath(dir))
465 self._dirs[dir].__setitem__(subpath, n)
465 self._dirs[dir].__setitem__(subpath, n)
466 else:
466 else:
467 self._files[f] = n[:21] # to match manifestdict's behavior
467 self._files[f] = n[:21] # to match manifestdict's behavior
468
468
469 def setflag(self, f, flags):
469 def setflag(self, f, flags):
470 """Set the flags (symlink, executable) for path f."""
470 """Set the flags (symlink, executable) for path f."""
471 dir, subpath = _splittopdir(f)
471 dir, subpath = _splittopdir(f)
472 if dir:
472 if dir:
473 if dir not in self._dirs:
473 if dir not in self._dirs:
474 self._dirs[dir] = treemanifest(self._subpath(dir))
474 self._dirs[dir] = treemanifest(self._subpath(dir))
475 self._dirs[dir].setflag(subpath, flags)
475 self._dirs[dir].setflag(subpath, flags)
476 else:
476 else:
477 self._flags[f] = flags
477 self._flags[f] = flags
478
478
479 def copy(self):
479 def copy(self):
480 copy = treemanifest(self._dir)
480 copy = treemanifest(self._dir)
481 for d in self._dirs:
481 for d in self._dirs:
482 copy._dirs[d] = self._dirs[d].copy()
482 copy._dirs[d] = self._dirs[d].copy()
483 copy._files = dict.copy(self._files)
483 copy._files = dict.copy(self._files)
484 copy._flags = dict.copy(self._flags)
484 copy._flags = dict.copy(self._flags)
485 return copy
485 return copy
486
486
487 def filesnotin(self, m2):
487 def filesnotin(self, m2):
488 '''Set of files in this manifest that are not in the other'''
488 '''Set of files in this manifest that are not in the other'''
489 files = set()
489 files = set()
490 def _filesnotin(t1, t2):
490 def _filesnotin(t1, t2):
491 for d, m1 in t1._dirs.iteritems():
491 for d, m1 in t1._dirs.iteritems():
492 if d in t2._dirs:
492 if d in t2._dirs:
493 m2 = t2._dirs[d]
493 m2 = t2._dirs[d]
494 _filesnotin(m1, m2)
494 _filesnotin(m1, m2)
495 else:
495 else:
496 files.update(m1.iterkeys())
496 files.update(m1.iterkeys())
497
497
498 for fn in t1._files.iterkeys():
498 for fn in t1._files.iterkeys():
499 if fn not in t2._files:
499 if fn not in t2._files:
500 files.add(t1._subpath(fn))
500 files.add(t1._subpath(fn))
501
501
502 _filesnotin(self, m2)
502 _filesnotin(self, m2)
503 return files
503 return files
504
504
505 @propertycache
505 @propertycache
506 def _alldirs(self):
506 def _alldirs(self):
507 return scmutil.dirs(self)
507 return scmutil.dirs(self)
508
508
509 def dirs(self):
509 def dirs(self):
510 return self._alldirs
510 return self._alldirs
511
511
512 def hasdir(self, dir):
512 def hasdir(self, dir):
513 topdir, subdir = _splittopdir(dir)
513 topdir, subdir = _splittopdir(dir)
514 if topdir:
514 if topdir:
515 if topdir in self._dirs:
515 if topdir in self._dirs:
516 return self._dirs[topdir].hasdir(subdir)
516 return self._dirs[topdir].hasdir(subdir)
517 return False
517 return False
518 return (dir + '/') in self._dirs
518 return (dir + '/') in self._dirs
519
519
520 def matches(self, match):
520 def matches(self, match):
521 '''generate a new manifest filtered by the match argument'''
521 '''generate a new manifest filtered by the match argument'''
522 if match.always():
522 if match.always():
523 return self.copy()
523 return self.copy()
524
524
525 return self._matches(match)
525 return self._matches(match)
526
526
527 def _matches(self, match):
527 def _matches(self, match):
528 '''recursively generate a new manifest filtered by the match argument.
528 '''recursively generate a new manifest filtered by the match argument.
529 '''
529 '''
530
530
531 ret = treemanifest(self._dir)
531 ret = treemanifest(self._dir)
532
532
533 for fn in self._files:
533 for fn in self._files:
534 fullp = self._subpath(fn)
534 fullp = self._subpath(fn)
535 if not match(fullp):
535 if not match(fullp):
536 continue
536 continue
537 ret._files[fn] = self._files[fn]
537 ret._files[fn] = self._files[fn]
538 if fn in self._flags:
538 if fn in self._flags:
539 ret._flags[fn] = self._flags[fn]
539 ret._flags[fn] = self._flags[fn]
540
540
541 for dir, subm in self._dirs.iteritems():
541 for dir, subm in self._dirs.iteritems():
542 m = subm._matches(match)
542 m = subm._matches(match)
543 if not m._isempty():
543 if not m._isempty():
544 ret._dirs[dir] = m
544 ret._dirs[dir] = m
545
545
546 return ret
546 return ret
547
547
548 def diff(self, m2, clean=False):
548 def diff(self, m2, clean=False):
549 '''Finds changes between the current manifest and m2.
549 '''Finds changes between the current manifest and m2.
550
550
551 Args:
551 Args:
552 m2: the manifest to which this manifest should be compared.
552 m2: the manifest to which this manifest should be compared.
553 clean: if true, include files unchanged between these manifests
553 clean: if true, include files unchanged between these manifests
554 with a None value in the returned dictionary.
554 with a None value in the returned dictionary.
555
555
556 The result is returned as a dict with filename as key and
556 The result is returned as a dict with filename as key and
557 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
557 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
558 nodeid in the current/other manifest and fl1/fl2 is the flag
558 nodeid in the current/other manifest and fl1/fl2 is the flag
559 in the current/other manifest. Where the file does not exist,
559 in the current/other manifest. Where the file does not exist,
560 the nodeid will be None and the flags will be the empty
560 the nodeid will be None and the flags will be the empty
561 string.
561 string.
562 '''
562 '''
563 result = {}
563 result = {}
564 emptytree = treemanifest()
564 emptytree = treemanifest()
565 def _diff(t1, t2):
565 def _diff(t1, t2):
566 for d, m1 in t1._dirs.iteritems():
566 for d, m1 in t1._dirs.iteritems():
567 m2 = t2._dirs.get(d, emptytree)
567 m2 = t2._dirs.get(d, emptytree)
568 _diff(m1, m2)
568 _diff(m1, m2)
569
569
570 for d, m2 in t2._dirs.iteritems():
570 for d, m2 in t2._dirs.iteritems():
571 if d not in t1._dirs:
571 if d not in t1._dirs:
572 _diff(emptytree, m2)
572 _diff(emptytree, m2)
573
573
574 for fn, n1 in t1._files.iteritems():
574 for fn, n1 in t1._files.iteritems():
575 fl1 = t1._flags.get(fn, '')
575 fl1 = t1._flags.get(fn, '')
576 n2 = t2._files.get(fn, None)
576 n2 = t2._files.get(fn, None)
577 fl2 = t2._flags.get(fn, '')
577 fl2 = t2._flags.get(fn, '')
578 if n1 != n2 or fl1 != fl2:
578 if n1 != n2 or fl1 != fl2:
579 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
579 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
580 elif clean:
580 elif clean:
581 result[t1._subpath(fn)] = None
581 result[t1._subpath(fn)] = None
582
582
583 for fn, n2 in t2._files.iteritems():
583 for fn, n2 in t2._files.iteritems():
584 if fn not in t1._files:
584 if fn not in t1._files:
585 fl2 = t2._flags.get(fn, '')
585 fl2 = t2._flags.get(fn, '')
586 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
586 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
587
587
588 _diff(self, m2)
588 _diff(self, m2)
589 return result
589 return result
590
590
591 def text(self):
591 def text(self):
592 """Get the full data of this manifest as a bytestring."""
592 """Get the full data of this manifest as a bytestring."""
593 flags = self.flags
593 flags = self.flags
594 return _text((f, self[f], flags(f)) for f in self.keys())
594 return _text((f, self[f], flags(f)) for f in self.keys())
595
595
596 class manifest(revlog.revlog):
596 class manifest(revlog.revlog):
597 def __init__(self, opener):
597 def __init__(self, opener):
598 # During normal operations, we expect to deal with not more than four
598 # During normal operations, we expect to deal with not more than four
599 # revs at a time (such as during commit --amend). When rebasing large
599 # revs at a time (such as during commit --amend). When rebasing large
600 # stacks of commits, the number can go up, hence the config knob below.
600 # stacks of commits, the number can go up, hence the config knob below.
601 cachesize = 4
601 cachesize = 4
602 usetreemanifest = False
602 usetreemanifest = False
603 usemanifestv2 = False
603 usemanifestv2 = False
604 opts = getattr(opener, 'options', None)
604 opts = getattr(opener, 'options', None)
605 if opts is not None:
605 if opts is not None:
606 cachesize = opts.get('manifestcachesize', cachesize)
606 cachesize = opts.get('manifestcachesize', cachesize)
607 usetreemanifest = opts.get('usetreemanifest', usetreemanifest)
607 usetreemanifest = opts.get('usetreemanifest', usetreemanifest)
608 usemanifestv2 = opts.get('usemanifestv2', usemanifestv2)
608 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
609 self._mancache = util.lrucachedict(cachesize)
609 self._mancache = util.lrucachedict(cachesize)
610 revlog.revlog.__init__(self, opener, "00manifest.i")
610 revlog.revlog.__init__(self, opener, "00manifest.i")
611 self._usetreemanifest = usetreemanifest
611 self._usetreemanifest = usetreemanifest
612 self._usemanifestv2 = usemanifestv2
612 self._usemanifestv2 = usemanifestv2
613
613
614 def _newmanifest(self, data=''):
614 def _newmanifest(self, data=''):
615 if self._usetreemanifest:
615 if self._usetreemanifest:
616 return treemanifest('', data)
616 return treemanifest('', data)
617 return manifestdict(data)
617 return manifestdict(data)
618
618
619 def _slowreaddelta(self, node):
619 def _slowreaddelta(self, node):
620 r0 = self.deltaparent(self.rev(node))
620 r0 = self.deltaparent(self.rev(node))
621 m0 = self.read(self.node(r0))
621 m0 = self.read(self.node(r0))
622 m1 = self.read(node)
622 m1 = self.read(node)
623 md = self._newmanifest()
623 md = self._newmanifest()
624 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
624 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
625 if n1:
625 if n1:
626 md[f] = n1
626 md[f] = n1
627 if fl1:
627 if fl1:
628 md.setflag(f, fl1)
628 md.setflag(f, fl1)
629 return md
629 return md
630
630
631 def readdelta(self, node):
631 def readdelta(self, node):
632 if self._usemanifestv2:
632 if self._usemanifestv2:
633 return self._slowreaddelta(node)
633 return self._slowreaddelta(node)
634 r = self.rev(node)
634 r = self.rev(node)
635 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
635 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
636 return self._newmanifest(d)
636 return self._newmanifest(d)
637
637
638 def readfast(self, node):
638 def readfast(self, node):
639 '''use the faster of readdelta or read'''
639 '''use the faster of readdelta or read'''
640 r = self.rev(node)
640 r = self.rev(node)
641 deltaparent = self.deltaparent(r)
641 deltaparent = self.deltaparent(r)
642 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
642 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
643 return self.readdelta(node)
643 return self.readdelta(node)
644 return self.read(node)
644 return self.read(node)
645
645
646 def read(self, node):
646 def read(self, node):
647 if node == revlog.nullid:
647 if node == revlog.nullid:
648 return self._newmanifest() # don't upset local cache
648 return self._newmanifest() # don't upset local cache
649 if node in self._mancache:
649 if node in self._mancache:
650 return self._mancache[node][0]
650 return self._mancache[node][0]
651 text = self.revision(node)
651 text = self.revision(node)
652 arraytext = array.array('c', text)
652 arraytext = array.array('c', text)
653 m = self._newmanifest(text)
653 m = self._newmanifest(text)
654 self._mancache[node] = (m, arraytext)
654 self._mancache[node] = (m, arraytext)
655 return m
655 return m
656
656
657 def find(self, node, f):
657 def find(self, node, f):
658 '''look up entry for a single file efficiently.
658 '''look up entry for a single file efficiently.
659 return (node, flags) pair if found, (None, None) if not.'''
659 return (node, flags) pair if found, (None, None) if not.'''
660 m = self.read(node)
660 m = self.read(node)
661 try:
661 try:
662 return m.find(f)
662 return m.find(f)
663 except KeyError:
663 except KeyError:
664 return None, None
664 return None, None
665
665
666 def add(self, m, transaction, link, p1, p2, added, removed):
666 def add(self, m, transaction, link, p1, p2, added, removed):
667 if (p1 in self._mancache and not self._usetreemanifest
667 if (p1 in self._mancache and not self._usetreemanifest
668 and not self._usemanifestv2):
668 and not self._usemanifestv2):
669 # If our first parent is in the manifest cache, we can
669 # If our first parent is in the manifest cache, we can
670 # compute a delta here using properties we know about the
670 # compute a delta here using properties we know about the
671 # manifest up-front, which may save time later for the
671 # manifest up-front, which may save time later for the
672 # revlog layer.
672 # revlog layer.
673
673
674 _checkforbidden(added)
674 _checkforbidden(added)
675 # combine the changed lists into one list for sorting
675 # combine the changed lists into one list for sorting
676 work = [(x, False) for x in added]
676 work = [(x, False) for x in added]
677 work.extend((x, True) for x in removed)
677 work.extend((x, True) for x in removed)
678 # this could use heapq.merge() (from Python 2.6+) or equivalent
678 # this could use heapq.merge() (from Python 2.6+) or equivalent
679 # since the lists are already sorted
679 # since the lists are already sorted
680 work.sort()
680 work.sort()
681
681
682 arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work)
682 arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work)
683 cachedelta = self.rev(p1), deltatext
683 cachedelta = self.rev(p1), deltatext
684 text = util.buffer(arraytext)
684 text = util.buffer(arraytext)
685 else:
685 else:
686 # The first parent manifest isn't already loaded, so we'll
686 # The first parent manifest isn't already loaded, so we'll
687 # just encode a fulltext of the manifest and pass that
687 # just encode a fulltext of the manifest and pass that
688 # through to the revlog layer, and let it handle the delta
688 # through to the revlog layer, and let it handle the delta
689 # process.
689 # process.
690 text = m.text()
690 text = m.text()
691 arraytext = array.array('c', text)
691 arraytext = array.array('c', text)
692 cachedelta = None
692 cachedelta = None
693
693
694 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
694 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
695 self._mancache[n] = (m, arraytext)
695 self._mancache[n] = (m, arraytext)
696
696
697 return n
697 return n
General Comments 0
You need to be logged in to leave comments. Login now