##// END OF EJS Templates
treemanifest: add configuration for using treemanifest type...
Martin von Zweigbergk -
r24402:c2287f20 default
parent child Browse files
Show More
@@ -1,1920 +1,1923
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 format='HG10', **kwargs):
110 format='HG10', **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.unbundle20(self.ui, cg)
117 cg = bundle2.unbundle20(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 cg = exchange.readbundle(self.ui, cg, None)
128 cg = exchange.readbundle(self.ui, cg, None)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 if util.safehasattr(ret, 'getchunks'):
130 if util.safehasattr(ret, 'getchunks'):
131 # This is a bundle20 object, turn it into an unbundler.
131 # This is a bundle20 object, turn it into an unbundler.
132 # This little dance should be dropped eventually when the API
132 # This little dance should be dropped eventually when the API
133 # is finally improved.
133 # is finally improved.
134 stream = util.chunkbuffer(ret.getchunks())
134 stream = util.chunkbuffer(ret.getchunks())
135 ret = bundle2.unbundle20(self.ui, stream)
135 ret = bundle2.unbundle20(self.ui, stream)
136 return ret
136 return ret
137 except error.PushRaced, exc:
137 except error.PushRaced, exc:
138 raise error.ResponseError(_('push failed:'), str(exc))
138 raise error.ResponseError(_('push failed:'), str(exc))
139
139
140 def lock(self):
140 def lock(self):
141 return self._repo.lock()
141 return self._repo.lock()
142
142
143 def addchangegroup(self, cg, source, url):
143 def addchangegroup(self, cg, source, url):
144 return changegroup.addchangegroup(self._repo, cg, source, url)
144 return changegroup.addchangegroup(self._repo, cg, source, url)
145
145
146 def pushkey(self, namespace, key, old, new):
146 def pushkey(self, namespace, key, old, new):
147 return self._repo.pushkey(namespace, key, old, new)
147 return self._repo.pushkey(namespace, key, old, new)
148
148
149 def listkeys(self, namespace):
149 def listkeys(self, namespace):
150 return self._repo.listkeys(namespace)
150 return self._repo.listkeys(namespace)
151
151
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 '''used to test argument passing over the wire'''
153 '''used to test argument passing over the wire'''
154 return "%s %s %s %s %s" % (one, two, three, four, five)
154 return "%s %s %s %s %s" % (one, two, three, four, five)
155
155
156 class locallegacypeer(localpeer):
156 class locallegacypeer(localpeer):
157 '''peer extension which implements legacy methods too; used for tests with
157 '''peer extension which implements legacy methods too; used for tests with
158 restricted capabilities'''
158 restricted capabilities'''
159
159
160 def __init__(self, repo):
160 def __init__(self, repo):
161 localpeer.__init__(self, repo, caps=legacycaps)
161 localpeer.__init__(self, repo, caps=legacycaps)
162
162
163 def branches(self, nodes):
163 def branches(self, nodes):
164 return self._repo.branches(nodes)
164 return self._repo.branches(nodes)
165
165
166 def between(self, pairs):
166 def between(self, pairs):
167 return self._repo.between(pairs)
167 return self._repo.between(pairs)
168
168
169 def changegroup(self, basenodes, source):
169 def changegroup(self, basenodes, source):
170 return changegroup.changegroup(self._repo, basenodes, source)
170 return changegroup.changegroup(self._repo, basenodes, source)
171
171
172 def changegroupsubset(self, bases, heads, source):
172 def changegroupsubset(self, bases, heads, source):
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174
174
175 class localrepository(object):
175 class localrepository(object):
176
176
177 supportedformats = set(('revlogv1', 'generaldelta'))
177 supportedformats = set(('revlogv1', 'generaldelta'))
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 'dotencode'))
179 'dotencode'))
180 openerreqs = set(('revlogv1', 'generaldelta'))
180 openerreqs = set(('revlogv1', 'generaldelta'))
181 requirements = ['revlogv1']
181 requirements = ['revlogv1']
182 filtername = None
182 filtername = None
183
183
184 # a list of (ui, featureset) functions.
184 # a list of (ui, featureset) functions.
185 # only functions defined in module of enabled extensions are invoked
185 # only functions defined in module of enabled extensions are invoked
186 featuresetupfuncs = set()
186 featuresetupfuncs = set()
187
187
188 def _baserequirements(self, create):
188 def _baserequirements(self, create):
189 return self.requirements[:]
189 return self.requirements[:]
190
190
191 def __init__(self, baseui, path=None, create=False):
191 def __init__(self, baseui, path=None, create=False):
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 self.wopener = self.wvfs
193 self.wopener = self.wvfs
194 self.root = self.wvfs.base
194 self.root = self.wvfs.base
195 self.path = self.wvfs.join(".hg")
195 self.path = self.wvfs.join(".hg")
196 self.origroot = path
196 self.origroot = path
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 self.vfs = scmutil.vfs(self.path)
198 self.vfs = scmutil.vfs(self.path)
199 self.opener = self.vfs
199 self.opener = self.vfs
200 self.baseui = baseui
200 self.baseui = baseui
201 self.ui = baseui.copy()
201 self.ui = baseui.copy()
202 self.ui.copy = baseui.copy # prevent copying repo configuration
202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 # A list of callback to shape the phase if no data were found.
203 # A list of callback to shape the phase if no data were found.
204 # Callback are in the form: func(repo, roots) --> processed root.
204 # Callback are in the form: func(repo, roots) --> processed root.
205 # This list it to be filled by extension during repo setup
205 # This list it to be filled by extension during repo setup
206 self._phasedefaults = []
206 self._phasedefaults = []
207 try:
207 try:
208 self.ui.readconfig(self.join("hgrc"), self.root)
208 self.ui.readconfig(self.join("hgrc"), self.root)
209 extensions.loadall(self.ui)
209 extensions.loadall(self.ui)
210 except IOError:
210 except IOError:
211 pass
211 pass
212
212
213 if self.featuresetupfuncs:
213 if self.featuresetupfuncs:
214 self.supported = set(self._basesupported) # use private copy
214 self.supported = set(self._basesupported) # use private copy
215 extmods = set(m.__name__ for n, m
215 extmods = set(m.__name__ for n, m
216 in extensions.extensions(self.ui))
216 in extensions.extensions(self.ui))
217 for setupfunc in self.featuresetupfuncs:
217 for setupfunc in self.featuresetupfuncs:
218 if setupfunc.__module__ in extmods:
218 if setupfunc.__module__ in extmods:
219 setupfunc(self.ui, self.supported)
219 setupfunc(self.ui, self.supported)
220 else:
220 else:
221 self.supported = self._basesupported
221 self.supported = self._basesupported
222
222
223 if not self.vfs.isdir():
223 if not self.vfs.isdir():
224 if create:
224 if create:
225 if not self.wvfs.exists():
225 if not self.wvfs.exists():
226 self.wvfs.makedirs()
226 self.wvfs.makedirs()
227 self.vfs.makedir(notindexed=True)
227 self.vfs.makedir(notindexed=True)
228 requirements = self._baserequirements(create)
228 requirements = self._baserequirements(create)
229 if self.ui.configbool('format', 'usestore', True):
229 if self.ui.configbool('format', 'usestore', True):
230 self.vfs.mkdir("store")
230 self.vfs.mkdir("store")
231 requirements.append("store")
231 requirements.append("store")
232 if self.ui.configbool('format', 'usefncache', True):
232 if self.ui.configbool('format', 'usefncache', True):
233 requirements.append("fncache")
233 requirements.append("fncache")
234 if self.ui.configbool('format', 'dotencode', True):
234 if self.ui.configbool('format', 'dotencode', True):
235 requirements.append('dotencode')
235 requirements.append('dotencode')
236 # create an invalid changelog
236 # create an invalid changelog
237 self.vfs.append(
237 self.vfs.append(
238 "00changelog.i",
238 "00changelog.i",
239 '\0\0\0\2' # represents revlogv2
239 '\0\0\0\2' # represents revlogv2
240 ' dummy changelog to prevent using the old repo layout'
240 ' dummy changelog to prevent using the old repo layout'
241 )
241 )
242 if self.ui.configbool('format', 'generaldelta', False):
242 if self.ui.configbool('format', 'generaldelta', False):
243 requirements.append("generaldelta")
243 requirements.append("generaldelta")
244 requirements = set(requirements)
244 requirements = set(requirements)
245 else:
245 else:
246 raise error.RepoError(_("repository %s not found") % path)
246 raise error.RepoError(_("repository %s not found") % path)
247 elif create:
247 elif create:
248 raise error.RepoError(_("repository %s already exists") % path)
248 raise error.RepoError(_("repository %s already exists") % path)
249 else:
249 else:
250 try:
250 try:
251 requirements = scmutil.readrequires(self.vfs, self.supported)
251 requirements = scmutil.readrequires(self.vfs, self.supported)
252 except IOError, inst:
252 except IOError, inst:
253 if inst.errno != errno.ENOENT:
253 if inst.errno != errno.ENOENT:
254 raise
254 raise
255 requirements = set()
255 requirements = set()
256
256
257 self.sharedpath = self.path
257 self.sharedpath = self.path
258 try:
258 try:
259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 realpath=True)
260 realpath=True)
261 s = vfs.base
261 s = vfs.base
262 if not vfs.exists():
262 if not vfs.exists():
263 raise error.RepoError(
263 raise error.RepoError(
264 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 self.sharedpath = s
265 self.sharedpath = s
266 except IOError, inst:
266 except IOError, inst:
267 if inst.errno != errno.ENOENT:
267 if inst.errno != errno.ENOENT:
268 raise
268 raise
269
269
270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 self.spath = self.store.path
271 self.spath = self.store.path
272 self.svfs = self.store.vfs
272 self.svfs = self.store.vfs
273 self.sopener = self.svfs
273 self.sopener = self.svfs
274 self.sjoin = self.store.join
274 self.sjoin = self.store.join
275 self.vfs.createmode = self.store.createmode
275 self.vfs.createmode = self.store.createmode
276 self._applyrequirements(requirements)
276 self._applyrequirements(requirements)
277 if create:
277 if create:
278 self._writerequirements()
278 self._writerequirements()
279
279
280
280
281 self._branchcaches = {}
281 self._branchcaches = {}
282 self._revbranchcache = None
282 self._revbranchcache = None
283 self.filterpats = {}
283 self.filterpats = {}
284 self._datafilters = {}
284 self._datafilters = {}
285 self._transref = self._lockref = self._wlockref = None
285 self._transref = self._lockref = self._wlockref = None
286
286
287 # A cache for various files under .hg/ that tracks file changes,
287 # A cache for various files under .hg/ that tracks file changes,
288 # (used by the filecache decorator)
288 # (used by the filecache decorator)
289 #
289 #
290 # Maps a property name to its util.filecacheentry
290 # Maps a property name to its util.filecacheentry
291 self._filecache = {}
291 self._filecache = {}
292
292
293 # hold sets of revision to be filtered
293 # hold sets of revision to be filtered
294 # should be cleared when something might have changed the filter value:
294 # should be cleared when something might have changed the filter value:
295 # - new changesets,
295 # - new changesets,
296 # - phase change,
296 # - phase change,
297 # - new obsolescence marker,
297 # - new obsolescence marker,
298 # - working directory parent change,
298 # - working directory parent change,
299 # - bookmark changes
299 # - bookmark changes
300 self.filteredrevcache = {}
300 self.filteredrevcache = {}
301
301
302 # generic mapping between names and nodes
302 # generic mapping between names and nodes
303 self.names = namespaces.namespaces()
303 self.names = namespaces.namespaces()
304
304
305 def close(self):
305 def close(self):
306 self._writecaches()
306 self._writecaches()
307
307
308 def _writecaches(self):
308 def _writecaches(self):
309 if self._revbranchcache:
309 if self._revbranchcache:
310 self._revbranchcache.write()
310 self._revbranchcache.write()
311
311
312 def _restrictcapabilities(self, caps):
312 def _restrictcapabilities(self, caps):
313 # bundle2 is not ready for prime time, drop it unless explicitly
313 # bundle2 is not ready for prime time, drop it unless explicitly
314 # required by the tests (or some brave tester)
314 # required by the tests (or some brave tester)
315 if self.ui.configbool('experimental', 'bundle2-exp', False):
315 if self.ui.configbool('experimental', 'bundle2-exp', False):
316 caps = set(caps)
316 caps = set(caps)
317 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
317 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
318 caps.add('bundle2-exp=' + urllib.quote(capsblob))
318 caps.add('bundle2-exp=' + urllib.quote(capsblob))
319 return caps
319 return caps
320
320
321 def _applyrequirements(self, requirements):
321 def _applyrequirements(self, requirements):
322 self.requirements = requirements
322 self.requirements = requirements
323 self.svfs.options = dict((r, 1) for r in requirements
323 self.svfs.options = dict((r, 1) for r in requirements
324 if r in self.openerreqs)
324 if r in self.openerreqs)
325 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
325 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
326 if chunkcachesize is not None:
326 if chunkcachesize is not None:
327 self.svfs.options['chunkcachesize'] = chunkcachesize
327 self.svfs.options['chunkcachesize'] = chunkcachesize
328 maxchainlen = self.ui.configint('format', 'maxchainlen')
328 maxchainlen = self.ui.configint('format', 'maxchainlen')
329 if maxchainlen is not None:
329 if maxchainlen is not None:
330 self.svfs.options['maxchainlen'] = maxchainlen
330 self.svfs.options['maxchainlen'] = maxchainlen
331 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
331 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
332 if manifestcachesize is not None:
332 if manifestcachesize is not None:
333 self.svfs.options['manifestcachesize'] = manifestcachesize
333 self.svfs.options['manifestcachesize'] = manifestcachesize
334 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
335 if usetreemanifest is not None:
336 self.svfs.options['usetreemanifest'] = usetreemanifest
334
337
335 def _writerequirements(self):
338 def _writerequirements(self):
336 reqfile = self.vfs("requires", "w")
339 reqfile = self.vfs("requires", "w")
337 for r in sorted(self.requirements):
340 for r in sorted(self.requirements):
338 reqfile.write("%s\n" % r)
341 reqfile.write("%s\n" % r)
339 reqfile.close()
342 reqfile.close()
340
343
341 def _checknested(self, path):
344 def _checknested(self, path):
342 """Determine if path is a legal nested repository."""
345 """Determine if path is a legal nested repository."""
343 if not path.startswith(self.root):
346 if not path.startswith(self.root):
344 return False
347 return False
345 subpath = path[len(self.root) + 1:]
348 subpath = path[len(self.root) + 1:]
346 normsubpath = util.pconvert(subpath)
349 normsubpath = util.pconvert(subpath)
347
350
348 # XXX: Checking against the current working copy is wrong in
351 # XXX: Checking against the current working copy is wrong in
349 # the sense that it can reject things like
352 # the sense that it can reject things like
350 #
353 #
351 # $ hg cat -r 10 sub/x.txt
354 # $ hg cat -r 10 sub/x.txt
352 #
355 #
353 # if sub/ is no longer a subrepository in the working copy
356 # if sub/ is no longer a subrepository in the working copy
354 # parent revision.
357 # parent revision.
355 #
358 #
356 # However, it can of course also allow things that would have
359 # However, it can of course also allow things that would have
357 # been rejected before, such as the above cat command if sub/
360 # been rejected before, such as the above cat command if sub/
358 # is a subrepository now, but was a normal directory before.
361 # is a subrepository now, but was a normal directory before.
359 # The old path auditor would have rejected by mistake since it
362 # The old path auditor would have rejected by mistake since it
360 # panics when it sees sub/.hg/.
363 # panics when it sees sub/.hg/.
361 #
364 #
362 # All in all, checking against the working copy seems sensible
365 # All in all, checking against the working copy seems sensible
363 # since we want to prevent access to nested repositories on
366 # since we want to prevent access to nested repositories on
364 # the filesystem *now*.
367 # the filesystem *now*.
365 ctx = self[None]
368 ctx = self[None]
366 parts = util.splitpath(subpath)
369 parts = util.splitpath(subpath)
367 while parts:
370 while parts:
368 prefix = '/'.join(parts)
371 prefix = '/'.join(parts)
369 if prefix in ctx.substate:
372 if prefix in ctx.substate:
370 if prefix == normsubpath:
373 if prefix == normsubpath:
371 return True
374 return True
372 else:
375 else:
373 sub = ctx.sub(prefix)
376 sub = ctx.sub(prefix)
374 return sub.checknested(subpath[len(prefix) + 1:])
377 return sub.checknested(subpath[len(prefix) + 1:])
375 else:
378 else:
376 parts.pop()
379 parts.pop()
377 return False
380 return False
378
381
379 def peer(self):
382 def peer(self):
380 return localpeer(self) # not cached to avoid reference cycle
383 return localpeer(self) # not cached to avoid reference cycle
381
384
382 def unfiltered(self):
385 def unfiltered(self):
383 """Return unfiltered version of the repository
386 """Return unfiltered version of the repository
384
387
385 Intended to be overwritten by filtered repo."""
388 Intended to be overwritten by filtered repo."""
386 return self
389 return self
387
390
388 def filtered(self, name):
391 def filtered(self, name):
389 """Return a filtered version of a repository"""
392 """Return a filtered version of a repository"""
390 # build a new class with the mixin and the current class
393 # build a new class with the mixin and the current class
391 # (possibly subclass of the repo)
394 # (possibly subclass of the repo)
392 class proxycls(repoview.repoview, self.unfiltered().__class__):
395 class proxycls(repoview.repoview, self.unfiltered().__class__):
393 pass
396 pass
394 return proxycls(self, name)
397 return proxycls(self, name)
395
398
396 @repofilecache('bookmarks')
399 @repofilecache('bookmarks')
397 def _bookmarks(self):
400 def _bookmarks(self):
398 return bookmarks.bmstore(self)
401 return bookmarks.bmstore(self)
399
402
400 @repofilecache('bookmarks.current')
403 @repofilecache('bookmarks.current')
401 def _bookmarkcurrent(self):
404 def _bookmarkcurrent(self):
402 return bookmarks.readcurrent(self)
405 return bookmarks.readcurrent(self)
403
406
404 def bookmarkheads(self, bookmark):
407 def bookmarkheads(self, bookmark):
405 name = bookmark.split('@', 1)[0]
408 name = bookmark.split('@', 1)[0]
406 heads = []
409 heads = []
407 for mark, n in self._bookmarks.iteritems():
410 for mark, n in self._bookmarks.iteritems():
408 if mark.split('@', 1)[0] == name:
411 if mark.split('@', 1)[0] == name:
409 heads.append(n)
412 heads.append(n)
410 return heads
413 return heads
411
414
412 @storecache('phaseroots')
415 @storecache('phaseroots')
413 def _phasecache(self):
416 def _phasecache(self):
414 return phases.phasecache(self, self._phasedefaults)
417 return phases.phasecache(self, self._phasedefaults)
415
418
416 @storecache('obsstore')
419 @storecache('obsstore')
417 def obsstore(self):
420 def obsstore(self):
418 # read default format for new obsstore.
421 # read default format for new obsstore.
419 defaultformat = self.ui.configint('format', 'obsstore-version', None)
422 defaultformat = self.ui.configint('format', 'obsstore-version', None)
420 # rely on obsstore class default when possible.
423 # rely on obsstore class default when possible.
421 kwargs = {}
424 kwargs = {}
422 if defaultformat is not None:
425 if defaultformat is not None:
423 kwargs['defaultformat'] = defaultformat
426 kwargs['defaultformat'] = defaultformat
424 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
427 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
425 store = obsolete.obsstore(self.svfs, readonly=readonly,
428 store = obsolete.obsstore(self.svfs, readonly=readonly,
426 **kwargs)
429 **kwargs)
427 if store and readonly:
430 if store and readonly:
428 # message is rare enough to not be translated
431 # message is rare enough to not be translated
429 msg = 'obsolete feature not enabled but %i markers found!\n'
432 msg = 'obsolete feature not enabled but %i markers found!\n'
430 self.ui.warn(msg % len(list(store)))
433 self.ui.warn(msg % len(list(store)))
431 return store
434 return store
432
435
433 @storecache('00changelog.i')
436 @storecache('00changelog.i')
434 def changelog(self):
437 def changelog(self):
435 c = changelog.changelog(self.svfs)
438 c = changelog.changelog(self.svfs)
436 if 'HG_PENDING' in os.environ:
439 if 'HG_PENDING' in os.environ:
437 p = os.environ['HG_PENDING']
440 p = os.environ['HG_PENDING']
438 if p.startswith(self.root):
441 if p.startswith(self.root):
439 c.readpending('00changelog.i.a')
442 c.readpending('00changelog.i.a')
440 return c
443 return c
441
444
442 @storecache('00manifest.i')
445 @storecache('00manifest.i')
443 def manifest(self):
446 def manifest(self):
444 return manifest.manifest(self.svfs)
447 return manifest.manifest(self.svfs)
445
448
446 @repofilecache('dirstate')
449 @repofilecache('dirstate')
447 def dirstate(self):
450 def dirstate(self):
448 warned = [0]
451 warned = [0]
449 def validate(node):
452 def validate(node):
450 try:
453 try:
451 self.changelog.rev(node)
454 self.changelog.rev(node)
452 return node
455 return node
453 except error.LookupError:
456 except error.LookupError:
454 if not warned[0]:
457 if not warned[0]:
455 warned[0] = True
458 warned[0] = True
456 self.ui.warn(_("warning: ignoring unknown"
459 self.ui.warn(_("warning: ignoring unknown"
457 " working parent %s!\n") % short(node))
460 " working parent %s!\n") % short(node))
458 return nullid
461 return nullid
459
462
460 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
463 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
461
464
462 def __getitem__(self, changeid):
465 def __getitem__(self, changeid):
463 if changeid is None:
466 if changeid is None:
464 return context.workingctx(self)
467 return context.workingctx(self)
465 if isinstance(changeid, slice):
468 if isinstance(changeid, slice):
466 return [context.changectx(self, i)
469 return [context.changectx(self, i)
467 for i in xrange(*changeid.indices(len(self)))
470 for i in xrange(*changeid.indices(len(self)))
468 if i not in self.changelog.filteredrevs]
471 if i not in self.changelog.filteredrevs]
469 return context.changectx(self, changeid)
472 return context.changectx(self, changeid)
470
473
471 def __contains__(self, changeid):
474 def __contains__(self, changeid):
472 try:
475 try:
473 self[changeid]
476 self[changeid]
474 return True
477 return True
475 except error.RepoLookupError:
478 except error.RepoLookupError:
476 return False
479 return False
477
480
478 def __nonzero__(self):
481 def __nonzero__(self):
479 return True
482 return True
480
483
481 def __len__(self):
484 def __len__(self):
482 return len(self.changelog)
485 return len(self.changelog)
483
486
484 def __iter__(self):
487 def __iter__(self):
485 return iter(self.changelog)
488 return iter(self.changelog)
486
489
487 def revs(self, expr, *args):
490 def revs(self, expr, *args):
488 '''Return a list of revisions matching the given revset'''
491 '''Return a list of revisions matching the given revset'''
489 expr = revset.formatspec(expr, *args)
492 expr = revset.formatspec(expr, *args)
490 m = revset.match(None, expr)
493 m = revset.match(None, expr)
491 return m(self)
494 return m(self)
492
495
493 def set(self, expr, *args):
496 def set(self, expr, *args):
494 '''
497 '''
495 Yield a context for each matching revision, after doing arg
498 Yield a context for each matching revision, after doing arg
496 replacement via revset.formatspec
499 replacement via revset.formatspec
497 '''
500 '''
498 for r in self.revs(expr, *args):
501 for r in self.revs(expr, *args):
499 yield self[r]
502 yield self[r]
500
503
501 def url(self):
504 def url(self):
502 return 'file:' + self.root
505 return 'file:' + self.root
503
506
504 def hook(self, name, throw=False, **args):
507 def hook(self, name, throw=False, **args):
505 """Call a hook, passing this repo instance.
508 """Call a hook, passing this repo instance.
506
509
507 This a convenience method to aid invoking hooks. Extensions likely
510 This a convenience method to aid invoking hooks. Extensions likely
508 won't call this unless they have registered a custom hook or are
511 won't call this unless they have registered a custom hook or are
509 replacing code that is expected to call a hook.
512 replacing code that is expected to call a hook.
510 """
513 """
511 return hook.hook(self.ui, self, name, throw, **args)
514 return hook.hook(self.ui, self, name, throw, **args)
512
515
513 @unfilteredmethod
516 @unfilteredmethod
514 def _tag(self, names, node, message, local, user, date, extra={},
517 def _tag(self, names, node, message, local, user, date, extra={},
515 editor=False):
518 editor=False):
516 if isinstance(names, str):
519 if isinstance(names, str):
517 names = (names,)
520 names = (names,)
518
521
519 branches = self.branchmap()
522 branches = self.branchmap()
520 for name in names:
523 for name in names:
521 self.hook('pretag', throw=True, node=hex(node), tag=name,
524 self.hook('pretag', throw=True, node=hex(node), tag=name,
522 local=local)
525 local=local)
523 if name in branches:
526 if name in branches:
524 self.ui.warn(_("warning: tag %s conflicts with existing"
527 self.ui.warn(_("warning: tag %s conflicts with existing"
525 " branch name\n") % name)
528 " branch name\n") % name)
526
529
527 def writetags(fp, names, munge, prevtags):
530 def writetags(fp, names, munge, prevtags):
528 fp.seek(0, 2)
531 fp.seek(0, 2)
529 if prevtags and prevtags[-1] != '\n':
532 if prevtags and prevtags[-1] != '\n':
530 fp.write('\n')
533 fp.write('\n')
531 for name in names:
534 for name in names:
532 if munge:
535 if munge:
533 m = munge(name)
536 m = munge(name)
534 else:
537 else:
535 m = name
538 m = name
536
539
537 if (self._tagscache.tagtypes and
540 if (self._tagscache.tagtypes and
538 name in self._tagscache.tagtypes):
541 name in self._tagscache.tagtypes):
539 old = self.tags().get(name, nullid)
542 old = self.tags().get(name, nullid)
540 fp.write('%s %s\n' % (hex(old), m))
543 fp.write('%s %s\n' % (hex(old), m))
541 fp.write('%s %s\n' % (hex(node), m))
544 fp.write('%s %s\n' % (hex(node), m))
542 fp.close()
545 fp.close()
543
546
544 prevtags = ''
547 prevtags = ''
545 if local:
548 if local:
546 try:
549 try:
547 fp = self.vfs('localtags', 'r+')
550 fp = self.vfs('localtags', 'r+')
548 except IOError:
551 except IOError:
549 fp = self.vfs('localtags', 'a')
552 fp = self.vfs('localtags', 'a')
550 else:
553 else:
551 prevtags = fp.read()
554 prevtags = fp.read()
552
555
553 # local tags are stored in the current charset
556 # local tags are stored in the current charset
554 writetags(fp, names, None, prevtags)
557 writetags(fp, names, None, prevtags)
555 for name in names:
558 for name in names:
556 self.hook('tag', node=hex(node), tag=name, local=local)
559 self.hook('tag', node=hex(node), tag=name, local=local)
557 return
560 return
558
561
559 try:
562 try:
560 fp = self.wfile('.hgtags', 'rb+')
563 fp = self.wfile('.hgtags', 'rb+')
561 except IOError, e:
564 except IOError, e:
562 if e.errno != errno.ENOENT:
565 if e.errno != errno.ENOENT:
563 raise
566 raise
564 fp = self.wfile('.hgtags', 'ab')
567 fp = self.wfile('.hgtags', 'ab')
565 else:
568 else:
566 prevtags = fp.read()
569 prevtags = fp.read()
567
570
568 # committed tags are stored in UTF-8
571 # committed tags are stored in UTF-8
569 writetags(fp, names, encoding.fromlocal, prevtags)
572 writetags(fp, names, encoding.fromlocal, prevtags)
570
573
571 fp.close()
574 fp.close()
572
575
573 self.invalidatecaches()
576 self.invalidatecaches()
574
577
575 if '.hgtags' not in self.dirstate:
578 if '.hgtags' not in self.dirstate:
576 self[None].add(['.hgtags'])
579 self[None].add(['.hgtags'])
577
580
578 m = matchmod.exact(self.root, '', ['.hgtags'])
581 m = matchmod.exact(self.root, '', ['.hgtags'])
579 tagnode = self.commit(message, user, date, extra=extra, match=m,
582 tagnode = self.commit(message, user, date, extra=extra, match=m,
580 editor=editor)
583 editor=editor)
581
584
582 for name in names:
585 for name in names:
583 self.hook('tag', node=hex(node), tag=name, local=local)
586 self.hook('tag', node=hex(node), tag=name, local=local)
584
587
585 return tagnode
588 return tagnode
586
589
587 def tag(self, names, node, message, local, user, date, editor=False):
590 def tag(self, names, node, message, local, user, date, editor=False):
588 '''tag a revision with one or more symbolic names.
591 '''tag a revision with one or more symbolic names.
589
592
590 names is a list of strings or, when adding a single tag, names may be a
593 names is a list of strings or, when adding a single tag, names may be a
591 string.
594 string.
592
595
593 if local is True, the tags are stored in a per-repository file.
596 if local is True, the tags are stored in a per-repository file.
594 otherwise, they are stored in the .hgtags file, and a new
597 otherwise, they are stored in the .hgtags file, and a new
595 changeset is committed with the change.
598 changeset is committed with the change.
596
599
597 keyword arguments:
600 keyword arguments:
598
601
599 local: whether to store tags in non-version-controlled file
602 local: whether to store tags in non-version-controlled file
600 (default False)
603 (default False)
601
604
602 message: commit message to use if committing
605 message: commit message to use if committing
603
606
604 user: name of user to use if committing
607 user: name of user to use if committing
605
608
606 date: date tuple to use if committing'''
609 date: date tuple to use if committing'''
607
610
608 if not local:
611 if not local:
609 m = matchmod.exact(self.root, '', ['.hgtags'])
612 m = matchmod.exact(self.root, '', ['.hgtags'])
610 if util.any(self.status(match=m, unknown=True, ignored=True)):
613 if util.any(self.status(match=m, unknown=True, ignored=True)):
611 raise util.Abort(_('working copy of .hgtags is changed'),
614 raise util.Abort(_('working copy of .hgtags is changed'),
612 hint=_('please commit .hgtags manually'))
615 hint=_('please commit .hgtags manually'))
613
616
614 self.tags() # instantiate the cache
617 self.tags() # instantiate the cache
615 self._tag(names, node, message, local, user, date, editor=editor)
618 self._tag(names, node, message, local, user, date, editor=editor)
616
619
617 @filteredpropertycache
620 @filteredpropertycache
618 def _tagscache(self):
621 def _tagscache(self):
619 '''Returns a tagscache object that contains various tags related
622 '''Returns a tagscache object that contains various tags related
620 caches.'''
623 caches.'''
621
624
622 # This simplifies its cache management by having one decorated
625 # This simplifies its cache management by having one decorated
623 # function (this one) and the rest simply fetch things from it.
626 # function (this one) and the rest simply fetch things from it.
624 class tagscache(object):
627 class tagscache(object):
625 def __init__(self):
628 def __init__(self):
626 # These two define the set of tags for this repository. tags
629 # These two define the set of tags for this repository. tags
627 # maps tag name to node; tagtypes maps tag name to 'global' or
630 # maps tag name to node; tagtypes maps tag name to 'global' or
628 # 'local'. (Global tags are defined by .hgtags across all
631 # 'local'. (Global tags are defined by .hgtags across all
629 # heads, and local tags are defined in .hg/localtags.)
632 # heads, and local tags are defined in .hg/localtags.)
630 # They constitute the in-memory cache of tags.
633 # They constitute the in-memory cache of tags.
631 self.tags = self.tagtypes = None
634 self.tags = self.tagtypes = None
632
635
633 self.nodetagscache = self.tagslist = None
636 self.nodetagscache = self.tagslist = None
634
637
635 cache = tagscache()
638 cache = tagscache()
636 cache.tags, cache.tagtypes = self._findtags()
639 cache.tags, cache.tagtypes = self._findtags()
637
640
638 return cache
641 return cache
639
642
640 def tags(self):
643 def tags(self):
641 '''return a mapping of tag to node'''
644 '''return a mapping of tag to node'''
642 t = {}
645 t = {}
643 if self.changelog.filteredrevs:
646 if self.changelog.filteredrevs:
644 tags, tt = self._findtags()
647 tags, tt = self._findtags()
645 else:
648 else:
646 tags = self._tagscache.tags
649 tags = self._tagscache.tags
647 for k, v in tags.iteritems():
650 for k, v in tags.iteritems():
648 try:
651 try:
649 # ignore tags to unknown nodes
652 # ignore tags to unknown nodes
650 self.changelog.rev(v)
653 self.changelog.rev(v)
651 t[k] = v
654 t[k] = v
652 except (error.LookupError, ValueError):
655 except (error.LookupError, ValueError):
653 pass
656 pass
654 return t
657 return t
655
658
656 def _findtags(self):
659 def _findtags(self):
657 '''Do the hard work of finding tags. Return a pair of dicts
660 '''Do the hard work of finding tags. Return a pair of dicts
658 (tags, tagtypes) where tags maps tag name to node, and tagtypes
661 (tags, tagtypes) where tags maps tag name to node, and tagtypes
659 maps tag name to a string like \'global\' or \'local\'.
662 maps tag name to a string like \'global\' or \'local\'.
660 Subclasses or extensions are free to add their own tags, but
663 Subclasses or extensions are free to add their own tags, but
661 should be aware that the returned dicts will be retained for the
664 should be aware that the returned dicts will be retained for the
662 duration of the localrepo object.'''
665 duration of the localrepo object.'''
663
666
664 # XXX what tagtype should subclasses/extensions use? Currently
667 # XXX what tagtype should subclasses/extensions use? Currently
665 # mq and bookmarks add tags, but do not set the tagtype at all.
668 # mq and bookmarks add tags, but do not set the tagtype at all.
666 # Should each extension invent its own tag type? Should there
669 # Should each extension invent its own tag type? Should there
667 # be one tagtype for all such "virtual" tags? Or is the status
670 # be one tagtype for all such "virtual" tags? Or is the status
668 # quo fine?
671 # quo fine?
669
672
670 alltags = {} # map tag name to (node, hist)
673 alltags = {} # map tag name to (node, hist)
671 tagtypes = {}
674 tagtypes = {}
672
675
673 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
676 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
674 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
677 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
675
678
676 # Build the return dicts. Have to re-encode tag names because
679 # Build the return dicts. Have to re-encode tag names because
677 # the tags module always uses UTF-8 (in order not to lose info
680 # the tags module always uses UTF-8 (in order not to lose info
678 # writing to the cache), but the rest of Mercurial wants them in
681 # writing to the cache), but the rest of Mercurial wants them in
679 # local encoding.
682 # local encoding.
680 tags = {}
683 tags = {}
681 for (name, (node, hist)) in alltags.iteritems():
684 for (name, (node, hist)) in alltags.iteritems():
682 if node != nullid:
685 if node != nullid:
683 tags[encoding.tolocal(name)] = node
686 tags[encoding.tolocal(name)] = node
684 tags['tip'] = self.changelog.tip()
687 tags['tip'] = self.changelog.tip()
685 tagtypes = dict([(encoding.tolocal(name), value)
688 tagtypes = dict([(encoding.tolocal(name), value)
686 for (name, value) in tagtypes.iteritems()])
689 for (name, value) in tagtypes.iteritems()])
687 return (tags, tagtypes)
690 return (tags, tagtypes)
688
691
689 def tagtype(self, tagname):
692 def tagtype(self, tagname):
690 '''
693 '''
691 return the type of the given tag. result can be:
694 return the type of the given tag. result can be:
692
695
693 'local' : a local tag
696 'local' : a local tag
694 'global' : a global tag
697 'global' : a global tag
695 None : tag does not exist
698 None : tag does not exist
696 '''
699 '''
697
700
698 return self._tagscache.tagtypes.get(tagname)
701 return self._tagscache.tagtypes.get(tagname)
699
702
700 def tagslist(self):
703 def tagslist(self):
701 '''return a list of tags ordered by revision'''
704 '''return a list of tags ordered by revision'''
702 if not self._tagscache.tagslist:
705 if not self._tagscache.tagslist:
703 l = []
706 l = []
704 for t, n in self.tags().iteritems():
707 for t, n in self.tags().iteritems():
705 l.append((self.changelog.rev(n), t, n))
708 l.append((self.changelog.rev(n), t, n))
706 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
709 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
707
710
708 return self._tagscache.tagslist
711 return self._tagscache.tagslist
709
712
710 def nodetags(self, node):
713 def nodetags(self, node):
711 '''return the tags associated with a node'''
714 '''return the tags associated with a node'''
712 if not self._tagscache.nodetagscache:
715 if not self._tagscache.nodetagscache:
713 nodetagscache = {}
716 nodetagscache = {}
714 for t, n in self._tagscache.tags.iteritems():
717 for t, n in self._tagscache.tags.iteritems():
715 nodetagscache.setdefault(n, []).append(t)
718 nodetagscache.setdefault(n, []).append(t)
716 for tags in nodetagscache.itervalues():
719 for tags in nodetagscache.itervalues():
717 tags.sort()
720 tags.sort()
718 self._tagscache.nodetagscache = nodetagscache
721 self._tagscache.nodetagscache = nodetagscache
719 return self._tagscache.nodetagscache.get(node, [])
722 return self._tagscache.nodetagscache.get(node, [])
720
723
721 def nodebookmarks(self, node):
724 def nodebookmarks(self, node):
722 marks = []
725 marks = []
723 for bookmark, n in self._bookmarks.iteritems():
726 for bookmark, n in self._bookmarks.iteritems():
724 if n == node:
727 if n == node:
725 marks.append(bookmark)
728 marks.append(bookmark)
726 return sorted(marks)
729 return sorted(marks)
727
730
728 def branchmap(self):
731 def branchmap(self):
729 '''returns a dictionary {branch: [branchheads]} with branchheads
732 '''returns a dictionary {branch: [branchheads]} with branchheads
730 ordered by increasing revision number'''
733 ordered by increasing revision number'''
731 branchmap.updatecache(self)
734 branchmap.updatecache(self)
732 return self._branchcaches[self.filtername]
735 return self._branchcaches[self.filtername]
733
736
734 @unfilteredmethod
737 @unfilteredmethod
735 def revbranchcache(self):
738 def revbranchcache(self):
736 if not self._revbranchcache:
739 if not self._revbranchcache:
737 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
740 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
738 return self._revbranchcache
741 return self._revbranchcache
739
742
740 def branchtip(self, branch, ignoremissing=False):
743 def branchtip(self, branch, ignoremissing=False):
741 '''return the tip node for a given branch
744 '''return the tip node for a given branch
742
745
743 If ignoremissing is True, then this method will not raise an error.
746 If ignoremissing is True, then this method will not raise an error.
744 This is helpful for callers that only expect None for a missing branch
747 This is helpful for callers that only expect None for a missing branch
745 (e.g. namespace).
748 (e.g. namespace).
746
749
747 '''
750 '''
748 try:
751 try:
749 return self.branchmap().branchtip(branch)
752 return self.branchmap().branchtip(branch)
750 except KeyError:
753 except KeyError:
751 if not ignoremissing:
754 if not ignoremissing:
752 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
755 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
753 else:
756 else:
754 pass
757 pass
755
758
756 def lookup(self, key):
759 def lookup(self, key):
757 return self[key].node()
760 return self[key].node()
758
761
759 def lookupbranch(self, key, remote=None):
762 def lookupbranch(self, key, remote=None):
760 repo = remote or self
763 repo = remote or self
761 if key in repo.branchmap():
764 if key in repo.branchmap():
762 return key
765 return key
763
766
764 repo = (remote and remote.local()) and remote or self
767 repo = (remote and remote.local()) and remote or self
765 return repo[key].branch()
768 return repo[key].branch()
766
769
767 def known(self, nodes):
770 def known(self, nodes):
768 nm = self.changelog.nodemap
771 nm = self.changelog.nodemap
769 pc = self._phasecache
772 pc = self._phasecache
770 result = []
773 result = []
771 for n in nodes:
774 for n in nodes:
772 r = nm.get(n)
775 r = nm.get(n)
773 resp = not (r is None or pc.phase(self, r) >= phases.secret)
776 resp = not (r is None or pc.phase(self, r) >= phases.secret)
774 result.append(resp)
777 result.append(resp)
775 return result
778 return result
776
779
777 def local(self):
780 def local(self):
778 return self
781 return self
779
782
780 def cancopy(self):
783 def cancopy(self):
781 # so statichttprepo's override of local() works
784 # so statichttprepo's override of local() works
782 if not self.local():
785 if not self.local():
783 return False
786 return False
784 if not self.ui.configbool('phases', 'publish', True):
787 if not self.ui.configbool('phases', 'publish', True):
785 return True
788 return True
786 # if publishing we can't copy if there is filtered content
789 # if publishing we can't copy if there is filtered content
787 return not self.filtered('visible').changelog.filteredrevs
790 return not self.filtered('visible').changelog.filteredrevs
788
791
789 def shared(self):
792 def shared(self):
790 '''the type of shared repository (None if not shared)'''
793 '''the type of shared repository (None if not shared)'''
791 if self.sharedpath != self.path:
794 if self.sharedpath != self.path:
792 return 'store'
795 return 'store'
793 return None
796 return None
794
797
795 def join(self, f, *insidef):
798 def join(self, f, *insidef):
796 return self.vfs.join(os.path.join(f, *insidef))
799 return self.vfs.join(os.path.join(f, *insidef))
797
800
798 def wjoin(self, f, *insidef):
801 def wjoin(self, f, *insidef):
799 return self.vfs.reljoin(self.root, f, *insidef)
802 return self.vfs.reljoin(self.root, f, *insidef)
800
803
801 def file(self, f):
804 def file(self, f):
802 if f[0] == '/':
805 if f[0] == '/':
803 f = f[1:]
806 f = f[1:]
804 return filelog.filelog(self.svfs, f)
807 return filelog.filelog(self.svfs, f)
805
808
806 def changectx(self, changeid):
809 def changectx(self, changeid):
807 return self[changeid]
810 return self[changeid]
808
811
809 def parents(self, changeid=None):
812 def parents(self, changeid=None):
810 '''get list of changectxs for parents of changeid'''
813 '''get list of changectxs for parents of changeid'''
811 return self[changeid].parents()
814 return self[changeid].parents()
812
815
813 def setparents(self, p1, p2=nullid):
816 def setparents(self, p1, p2=nullid):
814 self.dirstate.beginparentchange()
817 self.dirstate.beginparentchange()
815 copies = self.dirstate.setparents(p1, p2)
818 copies = self.dirstate.setparents(p1, p2)
816 pctx = self[p1]
819 pctx = self[p1]
817 if copies:
820 if copies:
818 # Adjust copy records, the dirstate cannot do it, it
821 # Adjust copy records, the dirstate cannot do it, it
819 # requires access to parents manifests. Preserve them
822 # requires access to parents manifests. Preserve them
820 # only for entries added to first parent.
823 # only for entries added to first parent.
821 for f in copies:
824 for f in copies:
822 if f not in pctx and copies[f] in pctx:
825 if f not in pctx and copies[f] in pctx:
823 self.dirstate.copy(copies[f], f)
826 self.dirstate.copy(copies[f], f)
824 if p2 == nullid:
827 if p2 == nullid:
825 for f, s in sorted(self.dirstate.copies().items()):
828 for f, s in sorted(self.dirstate.copies().items()):
826 if f not in pctx and s not in pctx:
829 if f not in pctx and s not in pctx:
827 self.dirstate.copy(None, f)
830 self.dirstate.copy(None, f)
828 self.dirstate.endparentchange()
831 self.dirstate.endparentchange()
829
832
830 def filectx(self, path, changeid=None, fileid=None):
833 def filectx(self, path, changeid=None, fileid=None):
831 """changeid can be a changeset revision, node, or tag.
834 """changeid can be a changeset revision, node, or tag.
832 fileid can be a file revision or node."""
835 fileid can be a file revision or node."""
833 return context.filectx(self, path, changeid, fileid)
836 return context.filectx(self, path, changeid, fileid)
834
837
835 def getcwd(self):
838 def getcwd(self):
836 return self.dirstate.getcwd()
839 return self.dirstate.getcwd()
837
840
838 def pathto(self, f, cwd=None):
841 def pathto(self, f, cwd=None):
839 return self.dirstate.pathto(f, cwd)
842 return self.dirstate.pathto(f, cwd)
840
843
841 def wfile(self, f, mode='r'):
844 def wfile(self, f, mode='r'):
842 return self.wvfs(f, mode)
845 return self.wvfs(f, mode)
843
846
844 def _link(self, f):
847 def _link(self, f):
845 return self.wvfs.islink(f)
848 return self.wvfs.islink(f)
846
849
847 def _loadfilter(self, filter):
850 def _loadfilter(self, filter):
848 if filter not in self.filterpats:
851 if filter not in self.filterpats:
849 l = []
852 l = []
850 for pat, cmd in self.ui.configitems(filter):
853 for pat, cmd in self.ui.configitems(filter):
851 if cmd == '!':
854 if cmd == '!':
852 continue
855 continue
853 mf = matchmod.match(self.root, '', [pat])
856 mf = matchmod.match(self.root, '', [pat])
854 fn = None
857 fn = None
855 params = cmd
858 params = cmd
856 for name, filterfn in self._datafilters.iteritems():
859 for name, filterfn in self._datafilters.iteritems():
857 if cmd.startswith(name):
860 if cmd.startswith(name):
858 fn = filterfn
861 fn = filterfn
859 params = cmd[len(name):].lstrip()
862 params = cmd[len(name):].lstrip()
860 break
863 break
861 if not fn:
864 if not fn:
862 fn = lambda s, c, **kwargs: util.filter(s, c)
865 fn = lambda s, c, **kwargs: util.filter(s, c)
863 # Wrap old filters not supporting keyword arguments
866 # Wrap old filters not supporting keyword arguments
864 if not inspect.getargspec(fn)[2]:
867 if not inspect.getargspec(fn)[2]:
865 oldfn = fn
868 oldfn = fn
866 fn = lambda s, c, **kwargs: oldfn(s, c)
869 fn = lambda s, c, **kwargs: oldfn(s, c)
867 l.append((mf, fn, params))
870 l.append((mf, fn, params))
868 self.filterpats[filter] = l
871 self.filterpats[filter] = l
869 return self.filterpats[filter]
872 return self.filterpats[filter]
870
873
871 def _filter(self, filterpats, filename, data):
874 def _filter(self, filterpats, filename, data):
872 for mf, fn, cmd in filterpats:
875 for mf, fn, cmd in filterpats:
873 if mf(filename):
876 if mf(filename):
874 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
877 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
875 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
878 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
876 break
879 break
877
880
878 return data
881 return data
879
882
880 @unfilteredpropertycache
883 @unfilteredpropertycache
881 def _encodefilterpats(self):
884 def _encodefilterpats(self):
882 return self._loadfilter('encode')
885 return self._loadfilter('encode')
883
886
884 @unfilteredpropertycache
887 @unfilteredpropertycache
885 def _decodefilterpats(self):
888 def _decodefilterpats(self):
886 return self._loadfilter('decode')
889 return self._loadfilter('decode')
887
890
888 def adddatafilter(self, name, filter):
891 def adddatafilter(self, name, filter):
889 self._datafilters[name] = filter
892 self._datafilters[name] = filter
890
893
891 def wread(self, filename):
894 def wread(self, filename):
892 if self._link(filename):
895 if self._link(filename):
893 data = self.wvfs.readlink(filename)
896 data = self.wvfs.readlink(filename)
894 else:
897 else:
895 data = self.wvfs.read(filename)
898 data = self.wvfs.read(filename)
896 return self._filter(self._encodefilterpats, filename, data)
899 return self._filter(self._encodefilterpats, filename, data)
897
900
898 def wwrite(self, filename, data, flags):
901 def wwrite(self, filename, data, flags):
899 data = self._filter(self._decodefilterpats, filename, data)
902 data = self._filter(self._decodefilterpats, filename, data)
900 if 'l' in flags:
903 if 'l' in flags:
901 self.wvfs.symlink(data, filename)
904 self.wvfs.symlink(data, filename)
902 else:
905 else:
903 self.wvfs.write(filename, data)
906 self.wvfs.write(filename, data)
904 if 'x' in flags:
907 if 'x' in flags:
905 self.wvfs.setflags(filename, False, True)
908 self.wvfs.setflags(filename, False, True)
906
909
907 def wwritedata(self, filename, data):
910 def wwritedata(self, filename, data):
908 return self._filter(self._decodefilterpats, filename, data)
911 return self._filter(self._decodefilterpats, filename, data)
909
912
910 def currenttransaction(self):
913 def currenttransaction(self):
911 """return the current transaction or None if non exists"""
914 """return the current transaction or None if non exists"""
912 if self._transref:
915 if self._transref:
913 tr = self._transref()
916 tr = self._transref()
914 else:
917 else:
915 tr = None
918 tr = None
916
919
917 if tr and tr.running():
920 if tr and tr.running():
918 return tr
921 return tr
919 return None
922 return None
920
923
921 def transaction(self, desc, report=None):
924 def transaction(self, desc, report=None):
922 if (self.ui.configbool('devel', 'all')
925 if (self.ui.configbool('devel', 'all')
923 or self.ui.configbool('devel', 'check-locks')):
926 or self.ui.configbool('devel', 'check-locks')):
924 l = self._lockref and self._lockref()
927 l = self._lockref and self._lockref()
925 if l is None or not l.held:
928 if l is None or not l.held:
926 msg = 'transaction with no lock\n'
929 msg = 'transaction with no lock\n'
927 if self.ui.tracebackflag:
930 if self.ui.tracebackflag:
928 util.debugstacktrace(msg, 1)
931 util.debugstacktrace(msg, 1)
929 else:
932 else:
930 self.ui.write_err(msg)
933 self.ui.write_err(msg)
931 tr = self.currenttransaction()
934 tr = self.currenttransaction()
932 if tr is not None:
935 if tr is not None:
933 return tr.nest()
936 return tr.nest()
934
937
935 # abort here if the journal already exists
938 # abort here if the journal already exists
936 if self.svfs.exists("journal"):
939 if self.svfs.exists("journal"):
937 raise error.RepoError(
940 raise error.RepoError(
938 _("abandoned transaction found"),
941 _("abandoned transaction found"),
939 hint=_("run 'hg recover' to clean up transaction"))
942 hint=_("run 'hg recover' to clean up transaction"))
940
943
941 self.hook('pretxnopen', throw=True, txnname=desc)
944 self.hook('pretxnopen', throw=True, txnname=desc)
942
945
943 self._writejournal(desc)
946 self._writejournal(desc)
944 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
947 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
945 if report:
948 if report:
946 rp = report
949 rp = report
947 else:
950 else:
948 rp = self.ui.warn
951 rp = self.ui.warn
949 vfsmap = {'plain': self.vfs} # root of .hg/
952 vfsmap = {'plain': self.vfs} # root of .hg/
950 # we must avoid cyclic reference between repo and transaction.
953 # we must avoid cyclic reference between repo and transaction.
951 reporef = weakref.ref(self)
954 reporef = weakref.ref(self)
952 def validate(tr):
955 def validate(tr):
953 """will run pre-closing hooks"""
956 """will run pre-closing hooks"""
954 pending = lambda: tr.writepending() and self.root or ""
957 pending = lambda: tr.writepending() and self.root or ""
955 reporef().hook('pretxnclose', throw=True, pending=pending,
958 reporef().hook('pretxnclose', throw=True, pending=pending,
956 xnname=desc)
959 xnname=desc)
957
960
958 tr = transaction.transaction(rp, self.sopener, vfsmap,
961 tr = transaction.transaction(rp, self.sopener, vfsmap,
959 "journal",
962 "journal",
960 "undo",
963 "undo",
961 aftertrans(renames),
964 aftertrans(renames),
962 self.store.createmode,
965 self.store.createmode,
963 validator=validate)
966 validator=validate)
964 # note: writing the fncache only during finalize mean that the file is
967 # note: writing the fncache only during finalize mean that the file is
965 # outdated when running hooks. As fncache is used for streaming clone,
968 # outdated when running hooks. As fncache is used for streaming clone,
966 # this is not expected to break anything that happen during the hooks.
969 # this is not expected to break anything that happen during the hooks.
967 tr.addfinalize('flush-fncache', self.store.write)
970 tr.addfinalize('flush-fncache', self.store.write)
968 def txnclosehook(tr2):
971 def txnclosehook(tr2):
969 """To be run if transaction is successful, will schedule a hook run
972 """To be run if transaction is successful, will schedule a hook run
970 """
973 """
971 def hook():
974 def hook():
972 reporef().hook('txnclose', throw=False, txnname=desc,
975 reporef().hook('txnclose', throw=False, txnname=desc,
973 **tr2.hookargs)
976 **tr2.hookargs)
974 reporef()._afterlock(hook)
977 reporef()._afterlock(hook)
975 tr.addfinalize('txnclose-hook', txnclosehook)
978 tr.addfinalize('txnclose-hook', txnclosehook)
976 self._transref = weakref.ref(tr)
979 self._transref = weakref.ref(tr)
977 return tr
980 return tr
978
981
979 def _journalfiles(self):
982 def _journalfiles(self):
980 return ((self.svfs, 'journal'),
983 return ((self.svfs, 'journal'),
981 (self.vfs, 'journal.dirstate'),
984 (self.vfs, 'journal.dirstate'),
982 (self.vfs, 'journal.branch'),
985 (self.vfs, 'journal.branch'),
983 (self.vfs, 'journal.desc'),
986 (self.vfs, 'journal.desc'),
984 (self.vfs, 'journal.bookmarks'),
987 (self.vfs, 'journal.bookmarks'),
985 (self.svfs, 'journal.phaseroots'))
988 (self.svfs, 'journal.phaseroots'))
986
989
987 def undofiles(self):
990 def undofiles(self):
988 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
991 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
989
992
990 def _writejournal(self, desc):
993 def _writejournal(self, desc):
991 self.vfs.write("journal.dirstate",
994 self.vfs.write("journal.dirstate",
992 self.vfs.tryread("dirstate"))
995 self.vfs.tryread("dirstate"))
993 self.vfs.write("journal.branch",
996 self.vfs.write("journal.branch",
994 encoding.fromlocal(self.dirstate.branch()))
997 encoding.fromlocal(self.dirstate.branch()))
995 self.vfs.write("journal.desc",
998 self.vfs.write("journal.desc",
996 "%d\n%s\n" % (len(self), desc))
999 "%d\n%s\n" % (len(self), desc))
997 self.vfs.write("journal.bookmarks",
1000 self.vfs.write("journal.bookmarks",
998 self.vfs.tryread("bookmarks"))
1001 self.vfs.tryread("bookmarks"))
999 self.svfs.write("journal.phaseroots",
1002 self.svfs.write("journal.phaseroots",
1000 self.svfs.tryread("phaseroots"))
1003 self.svfs.tryread("phaseroots"))
1001
1004
1002 def recover(self):
1005 def recover(self):
1003 lock = self.lock()
1006 lock = self.lock()
1004 try:
1007 try:
1005 if self.svfs.exists("journal"):
1008 if self.svfs.exists("journal"):
1006 self.ui.status(_("rolling back interrupted transaction\n"))
1009 self.ui.status(_("rolling back interrupted transaction\n"))
1007 vfsmap = {'': self.svfs,
1010 vfsmap = {'': self.svfs,
1008 'plain': self.vfs,}
1011 'plain': self.vfs,}
1009 transaction.rollback(self.svfs, vfsmap, "journal",
1012 transaction.rollback(self.svfs, vfsmap, "journal",
1010 self.ui.warn)
1013 self.ui.warn)
1011 self.invalidate()
1014 self.invalidate()
1012 return True
1015 return True
1013 else:
1016 else:
1014 self.ui.warn(_("no interrupted transaction available\n"))
1017 self.ui.warn(_("no interrupted transaction available\n"))
1015 return False
1018 return False
1016 finally:
1019 finally:
1017 lock.release()
1020 lock.release()
1018
1021
1019 def rollback(self, dryrun=False, force=False):
1022 def rollback(self, dryrun=False, force=False):
1020 wlock = lock = None
1023 wlock = lock = None
1021 try:
1024 try:
1022 wlock = self.wlock()
1025 wlock = self.wlock()
1023 lock = self.lock()
1026 lock = self.lock()
1024 if self.svfs.exists("undo"):
1027 if self.svfs.exists("undo"):
1025 return self._rollback(dryrun, force)
1028 return self._rollback(dryrun, force)
1026 else:
1029 else:
1027 self.ui.warn(_("no rollback information available\n"))
1030 self.ui.warn(_("no rollback information available\n"))
1028 return 1
1031 return 1
1029 finally:
1032 finally:
1030 release(lock, wlock)
1033 release(lock, wlock)
1031
1034
1032 @unfilteredmethod # Until we get smarter cache management
1035 @unfilteredmethod # Until we get smarter cache management
1033 def _rollback(self, dryrun, force):
1036 def _rollback(self, dryrun, force):
1034 ui = self.ui
1037 ui = self.ui
1035 try:
1038 try:
1036 args = self.vfs.read('undo.desc').splitlines()
1039 args = self.vfs.read('undo.desc').splitlines()
1037 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1040 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1038 if len(args) >= 3:
1041 if len(args) >= 3:
1039 detail = args[2]
1042 detail = args[2]
1040 oldtip = oldlen - 1
1043 oldtip = oldlen - 1
1041
1044
1042 if detail and ui.verbose:
1045 if detail and ui.verbose:
1043 msg = (_('repository tip rolled back to revision %s'
1046 msg = (_('repository tip rolled back to revision %s'
1044 ' (undo %s: %s)\n')
1047 ' (undo %s: %s)\n')
1045 % (oldtip, desc, detail))
1048 % (oldtip, desc, detail))
1046 else:
1049 else:
1047 msg = (_('repository tip rolled back to revision %s'
1050 msg = (_('repository tip rolled back to revision %s'
1048 ' (undo %s)\n')
1051 ' (undo %s)\n')
1049 % (oldtip, desc))
1052 % (oldtip, desc))
1050 except IOError:
1053 except IOError:
1051 msg = _('rolling back unknown transaction\n')
1054 msg = _('rolling back unknown transaction\n')
1052 desc = None
1055 desc = None
1053
1056
1054 if not force and self['.'] != self['tip'] and desc == 'commit':
1057 if not force and self['.'] != self['tip'] and desc == 'commit':
1055 raise util.Abort(
1058 raise util.Abort(
1056 _('rollback of last commit while not checked out '
1059 _('rollback of last commit while not checked out '
1057 'may lose data'), hint=_('use -f to force'))
1060 'may lose data'), hint=_('use -f to force'))
1058
1061
1059 ui.status(msg)
1062 ui.status(msg)
1060 if dryrun:
1063 if dryrun:
1061 return 0
1064 return 0
1062
1065
1063 parents = self.dirstate.parents()
1066 parents = self.dirstate.parents()
1064 self.destroying()
1067 self.destroying()
1065 vfsmap = {'plain': self.vfs, '': self.svfs}
1068 vfsmap = {'plain': self.vfs, '': self.svfs}
1066 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1069 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1067 if self.vfs.exists('undo.bookmarks'):
1070 if self.vfs.exists('undo.bookmarks'):
1068 self.vfs.rename('undo.bookmarks', 'bookmarks')
1071 self.vfs.rename('undo.bookmarks', 'bookmarks')
1069 if self.svfs.exists('undo.phaseroots'):
1072 if self.svfs.exists('undo.phaseroots'):
1070 self.svfs.rename('undo.phaseroots', 'phaseroots')
1073 self.svfs.rename('undo.phaseroots', 'phaseroots')
1071 self.invalidate()
1074 self.invalidate()
1072
1075
1073 parentgone = (parents[0] not in self.changelog.nodemap or
1076 parentgone = (parents[0] not in self.changelog.nodemap or
1074 parents[1] not in self.changelog.nodemap)
1077 parents[1] not in self.changelog.nodemap)
1075 if parentgone:
1078 if parentgone:
1076 self.vfs.rename('undo.dirstate', 'dirstate')
1079 self.vfs.rename('undo.dirstate', 'dirstate')
1077 try:
1080 try:
1078 branch = self.vfs.read('undo.branch')
1081 branch = self.vfs.read('undo.branch')
1079 self.dirstate.setbranch(encoding.tolocal(branch))
1082 self.dirstate.setbranch(encoding.tolocal(branch))
1080 except IOError:
1083 except IOError:
1081 ui.warn(_('named branch could not be reset: '
1084 ui.warn(_('named branch could not be reset: '
1082 'current branch is still \'%s\'\n')
1085 'current branch is still \'%s\'\n')
1083 % self.dirstate.branch())
1086 % self.dirstate.branch())
1084
1087
1085 self.dirstate.invalidate()
1088 self.dirstate.invalidate()
1086 parents = tuple([p.rev() for p in self.parents()])
1089 parents = tuple([p.rev() for p in self.parents()])
1087 if len(parents) > 1:
1090 if len(parents) > 1:
1088 ui.status(_('working directory now based on '
1091 ui.status(_('working directory now based on '
1089 'revisions %d and %d\n') % parents)
1092 'revisions %d and %d\n') % parents)
1090 else:
1093 else:
1091 ui.status(_('working directory now based on '
1094 ui.status(_('working directory now based on '
1092 'revision %d\n') % parents)
1095 'revision %d\n') % parents)
1093 # TODO: if we know which new heads may result from this rollback, pass
1096 # TODO: if we know which new heads may result from this rollback, pass
1094 # them to destroy(), which will prevent the branchhead cache from being
1097 # them to destroy(), which will prevent the branchhead cache from being
1095 # invalidated.
1098 # invalidated.
1096 self.destroyed()
1099 self.destroyed()
1097 return 0
1100 return 0
1098
1101
1099 def invalidatecaches(self):
1102 def invalidatecaches(self):
1100
1103
1101 if '_tagscache' in vars(self):
1104 if '_tagscache' in vars(self):
1102 # can't use delattr on proxy
1105 # can't use delattr on proxy
1103 del self.__dict__['_tagscache']
1106 del self.__dict__['_tagscache']
1104
1107
1105 self.unfiltered()._branchcaches.clear()
1108 self.unfiltered()._branchcaches.clear()
1106 self.invalidatevolatilesets()
1109 self.invalidatevolatilesets()
1107
1110
1108 def invalidatevolatilesets(self):
1111 def invalidatevolatilesets(self):
1109 self.filteredrevcache.clear()
1112 self.filteredrevcache.clear()
1110 obsolete.clearobscaches(self)
1113 obsolete.clearobscaches(self)
1111
1114
1112 def invalidatedirstate(self):
1115 def invalidatedirstate(self):
1113 '''Invalidates the dirstate, causing the next call to dirstate
1116 '''Invalidates the dirstate, causing the next call to dirstate
1114 to check if it was modified since the last time it was read,
1117 to check if it was modified since the last time it was read,
1115 rereading it if it has.
1118 rereading it if it has.
1116
1119
1117 This is different to dirstate.invalidate() that it doesn't always
1120 This is different to dirstate.invalidate() that it doesn't always
1118 rereads the dirstate. Use dirstate.invalidate() if you want to
1121 rereads the dirstate. Use dirstate.invalidate() if you want to
1119 explicitly read the dirstate again (i.e. restoring it to a previous
1122 explicitly read the dirstate again (i.e. restoring it to a previous
1120 known good state).'''
1123 known good state).'''
1121 if hasunfilteredcache(self, 'dirstate'):
1124 if hasunfilteredcache(self, 'dirstate'):
1122 for k in self.dirstate._filecache:
1125 for k in self.dirstate._filecache:
1123 try:
1126 try:
1124 delattr(self.dirstate, k)
1127 delattr(self.dirstate, k)
1125 except AttributeError:
1128 except AttributeError:
1126 pass
1129 pass
1127 delattr(self.unfiltered(), 'dirstate')
1130 delattr(self.unfiltered(), 'dirstate')
1128
1131
1129 def invalidate(self):
1132 def invalidate(self):
1130 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1133 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1131 for k in self._filecache:
1134 for k in self._filecache:
1132 # dirstate is invalidated separately in invalidatedirstate()
1135 # dirstate is invalidated separately in invalidatedirstate()
1133 if k == 'dirstate':
1136 if k == 'dirstate':
1134 continue
1137 continue
1135
1138
1136 try:
1139 try:
1137 delattr(unfiltered, k)
1140 delattr(unfiltered, k)
1138 except AttributeError:
1141 except AttributeError:
1139 pass
1142 pass
1140 self.invalidatecaches()
1143 self.invalidatecaches()
1141 self.store.invalidatecaches()
1144 self.store.invalidatecaches()
1142
1145
1143 def invalidateall(self):
1146 def invalidateall(self):
1144 '''Fully invalidates both store and non-store parts, causing the
1147 '''Fully invalidates both store and non-store parts, causing the
1145 subsequent operation to reread any outside changes.'''
1148 subsequent operation to reread any outside changes.'''
1146 # extension should hook this to invalidate its caches
1149 # extension should hook this to invalidate its caches
1147 self.invalidate()
1150 self.invalidate()
1148 self.invalidatedirstate()
1151 self.invalidatedirstate()
1149
1152
1150 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1153 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1151 try:
1154 try:
1152 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1155 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1153 except error.LockHeld, inst:
1156 except error.LockHeld, inst:
1154 if not wait:
1157 if not wait:
1155 raise
1158 raise
1156 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1159 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1157 (desc, inst.locker))
1160 (desc, inst.locker))
1158 # default to 600 seconds timeout
1161 # default to 600 seconds timeout
1159 l = lockmod.lock(vfs, lockname,
1162 l = lockmod.lock(vfs, lockname,
1160 int(self.ui.config("ui", "timeout", "600")),
1163 int(self.ui.config("ui", "timeout", "600")),
1161 releasefn, desc=desc)
1164 releasefn, desc=desc)
1162 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1165 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1163 if acquirefn:
1166 if acquirefn:
1164 acquirefn()
1167 acquirefn()
1165 return l
1168 return l
1166
1169
1167 def _afterlock(self, callback):
1170 def _afterlock(self, callback):
1168 """add a callback to the current repository lock.
1171 """add a callback to the current repository lock.
1169
1172
1170 The callback will be executed on lock release."""
1173 The callback will be executed on lock release."""
1171 l = self._lockref and self._lockref()
1174 l = self._lockref and self._lockref()
1172 if l:
1175 if l:
1173 l.postrelease.append(callback)
1176 l.postrelease.append(callback)
1174 else:
1177 else:
1175 callback()
1178 callback()
1176
1179
1177 def lock(self, wait=True):
1180 def lock(self, wait=True):
1178 '''Lock the repository store (.hg/store) and return a weak reference
1181 '''Lock the repository store (.hg/store) and return a weak reference
1179 to the lock. Use this before modifying the store (e.g. committing or
1182 to the lock. Use this before modifying the store (e.g. committing or
1180 stripping). If you are opening a transaction, get a lock as well.)'''
1183 stripping). If you are opening a transaction, get a lock as well.)'''
1181 l = self._lockref and self._lockref()
1184 l = self._lockref and self._lockref()
1182 if l is not None and l.held:
1185 if l is not None and l.held:
1183 l.lock()
1186 l.lock()
1184 return l
1187 return l
1185
1188
1186 def unlock():
1189 def unlock():
1187 for k, ce in self._filecache.items():
1190 for k, ce in self._filecache.items():
1188 if k == 'dirstate' or k not in self.__dict__:
1191 if k == 'dirstate' or k not in self.__dict__:
1189 continue
1192 continue
1190 ce.refresh()
1193 ce.refresh()
1191
1194
1192 l = self._lock(self.svfs, "lock", wait, unlock,
1195 l = self._lock(self.svfs, "lock", wait, unlock,
1193 self.invalidate, _('repository %s') % self.origroot)
1196 self.invalidate, _('repository %s') % self.origroot)
1194 self._lockref = weakref.ref(l)
1197 self._lockref = weakref.ref(l)
1195 return l
1198 return l
1196
1199
1197 def wlock(self, wait=True):
1200 def wlock(self, wait=True):
1198 '''Lock the non-store parts of the repository (everything under
1201 '''Lock the non-store parts of the repository (everything under
1199 .hg except .hg/store) and return a weak reference to the lock.
1202 .hg except .hg/store) and return a weak reference to the lock.
1200 Use this before modifying files in .hg.'''
1203 Use this before modifying files in .hg.'''
1201 if (self.ui.configbool('devel', 'all')
1204 if (self.ui.configbool('devel', 'all')
1202 or self.ui.configbool('devel', 'check-locks')):
1205 or self.ui.configbool('devel', 'check-locks')):
1203 l = self._lockref and self._lockref()
1206 l = self._lockref and self._lockref()
1204 if l is not None and l.held:
1207 if l is not None and l.held:
1205 msg = '"lock" taken before "wlock"\n'
1208 msg = '"lock" taken before "wlock"\n'
1206 if self.ui.tracebackflag:
1209 if self.ui.tracebackflag:
1207 util.debugstacktrace(msg, 1)
1210 util.debugstacktrace(msg, 1)
1208 else:
1211 else:
1209 self.ui.write_err(msg)
1212 self.ui.write_err(msg)
1210 l = self._wlockref and self._wlockref()
1213 l = self._wlockref and self._wlockref()
1211 if l is not None and l.held:
1214 if l is not None and l.held:
1212 l.lock()
1215 l.lock()
1213 return l
1216 return l
1214
1217
1215 def unlock():
1218 def unlock():
1216 if self.dirstate.pendingparentchange():
1219 if self.dirstate.pendingparentchange():
1217 self.dirstate.invalidate()
1220 self.dirstate.invalidate()
1218 else:
1221 else:
1219 self.dirstate.write()
1222 self.dirstate.write()
1220
1223
1221 self._filecache['dirstate'].refresh()
1224 self._filecache['dirstate'].refresh()
1222
1225
1223 l = self._lock(self.vfs, "wlock", wait, unlock,
1226 l = self._lock(self.vfs, "wlock", wait, unlock,
1224 self.invalidatedirstate, _('working directory of %s') %
1227 self.invalidatedirstate, _('working directory of %s') %
1225 self.origroot)
1228 self.origroot)
1226 self._wlockref = weakref.ref(l)
1229 self._wlockref = weakref.ref(l)
1227 return l
1230 return l
1228
1231
1229 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1232 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1230 """
1233 """
1231 commit an individual file as part of a larger transaction
1234 commit an individual file as part of a larger transaction
1232 """
1235 """
1233
1236
1234 fname = fctx.path()
1237 fname = fctx.path()
1235 fparent1 = manifest1.get(fname, nullid)
1238 fparent1 = manifest1.get(fname, nullid)
1236 fparent2 = manifest2.get(fname, nullid)
1239 fparent2 = manifest2.get(fname, nullid)
1237 if isinstance(fctx, context.filectx):
1240 if isinstance(fctx, context.filectx):
1238 node = fctx.filenode()
1241 node = fctx.filenode()
1239 if node in [fparent1, fparent2]:
1242 if node in [fparent1, fparent2]:
1240 self.ui.debug('reusing %s filelog entry\n' % fname)
1243 self.ui.debug('reusing %s filelog entry\n' % fname)
1241 return node
1244 return node
1242
1245
1243 flog = self.file(fname)
1246 flog = self.file(fname)
1244 meta = {}
1247 meta = {}
1245 copy = fctx.renamed()
1248 copy = fctx.renamed()
1246 if copy and copy[0] != fname:
1249 if copy and copy[0] != fname:
1247 # Mark the new revision of this file as a copy of another
1250 # Mark the new revision of this file as a copy of another
1248 # file. This copy data will effectively act as a parent
1251 # file. This copy data will effectively act as a parent
1249 # of this new revision. If this is a merge, the first
1252 # of this new revision. If this is a merge, the first
1250 # parent will be the nullid (meaning "look up the copy data")
1253 # parent will be the nullid (meaning "look up the copy data")
1251 # and the second one will be the other parent. For example:
1254 # and the second one will be the other parent. For example:
1252 #
1255 #
1253 # 0 --- 1 --- 3 rev1 changes file foo
1256 # 0 --- 1 --- 3 rev1 changes file foo
1254 # \ / rev2 renames foo to bar and changes it
1257 # \ / rev2 renames foo to bar and changes it
1255 # \- 2 -/ rev3 should have bar with all changes and
1258 # \- 2 -/ rev3 should have bar with all changes and
1256 # should record that bar descends from
1259 # should record that bar descends from
1257 # bar in rev2 and foo in rev1
1260 # bar in rev2 and foo in rev1
1258 #
1261 #
1259 # this allows this merge to succeed:
1262 # this allows this merge to succeed:
1260 #
1263 #
1261 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1264 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1262 # \ / merging rev3 and rev4 should use bar@rev2
1265 # \ / merging rev3 and rev4 should use bar@rev2
1263 # \- 2 --- 4 as the merge base
1266 # \- 2 --- 4 as the merge base
1264 #
1267 #
1265
1268
1266 cfname = copy[0]
1269 cfname = copy[0]
1267 crev = manifest1.get(cfname)
1270 crev = manifest1.get(cfname)
1268 newfparent = fparent2
1271 newfparent = fparent2
1269
1272
1270 if manifest2: # branch merge
1273 if manifest2: # branch merge
1271 if fparent2 == nullid or crev is None: # copied on remote side
1274 if fparent2 == nullid or crev is None: # copied on remote side
1272 if cfname in manifest2:
1275 if cfname in manifest2:
1273 crev = manifest2[cfname]
1276 crev = manifest2[cfname]
1274 newfparent = fparent1
1277 newfparent = fparent1
1275
1278
1276 # Here, we used to search backwards through history to try to find
1279 # Here, we used to search backwards through history to try to find
1277 # where the file copy came from if the source of a copy was not in
1280 # where the file copy came from if the source of a copy was not in
1278 # the parent directory. However, this doesn't actually make sense to
1281 # the parent directory. However, this doesn't actually make sense to
1279 # do (what does a copy from something not in your working copy even
1282 # do (what does a copy from something not in your working copy even
1280 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1283 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1281 # the user that copy information was dropped, so if they didn't
1284 # the user that copy information was dropped, so if they didn't
1282 # expect this outcome it can be fixed, but this is the correct
1285 # expect this outcome it can be fixed, but this is the correct
1283 # behavior in this circumstance.
1286 # behavior in this circumstance.
1284
1287
1285 if crev:
1288 if crev:
1286 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1289 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1287 meta["copy"] = cfname
1290 meta["copy"] = cfname
1288 meta["copyrev"] = hex(crev)
1291 meta["copyrev"] = hex(crev)
1289 fparent1, fparent2 = nullid, newfparent
1292 fparent1, fparent2 = nullid, newfparent
1290 else:
1293 else:
1291 self.ui.warn(_("warning: can't find ancestor for '%s' "
1294 self.ui.warn(_("warning: can't find ancestor for '%s' "
1292 "copied from '%s'!\n") % (fname, cfname))
1295 "copied from '%s'!\n") % (fname, cfname))
1293
1296
1294 elif fparent1 == nullid:
1297 elif fparent1 == nullid:
1295 fparent1, fparent2 = fparent2, nullid
1298 fparent1, fparent2 = fparent2, nullid
1296 elif fparent2 != nullid:
1299 elif fparent2 != nullid:
1297 # is one parent an ancestor of the other?
1300 # is one parent an ancestor of the other?
1298 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1301 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1299 if fparent1 in fparentancestors:
1302 if fparent1 in fparentancestors:
1300 fparent1, fparent2 = fparent2, nullid
1303 fparent1, fparent2 = fparent2, nullid
1301 elif fparent2 in fparentancestors:
1304 elif fparent2 in fparentancestors:
1302 fparent2 = nullid
1305 fparent2 = nullid
1303
1306
1304 # is the file changed?
1307 # is the file changed?
1305 text = fctx.data()
1308 text = fctx.data()
1306 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1309 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1307 changelist.append(fname)
1310 changelist.append(fname)
1308 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1311 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1309 # are just the flags changed during merge?
1312 # are just the flags changed during merge?
1310 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1313 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1311 changelist.append(fname)
1314 changelist.append(fname)
1312
1315
1313 return fparent1
1316 return fparent1
1314
1317
1315 @unfilteredmethod
1318 @unfilteredmethod
1316 def commit(self, text="", user=None, date=None, match=None, force=False,
1319 def commit(self, text="", user=None, date=None, match=None, force=False,
1317 editor=False, extra={}):
1320 editor=False, extra={}):
1318 """Add a new revision to current repository.
1321 """Add a new revision to current repository.
1319
1322
1320 Revision information is gathered from the working directory,
1323 Revision information is gathered from the working directory,
1321 match can be used to filter the committed files. If editor is
1324 match can be used to filter the committed files. If editor is
1322 supplied, it is called to get a commit message.
1325 supplied, it is called to get a commit message.
1323 """
1326 """
1324
1327
1325 def fail(f, msg):
1328 def fail(f, msg):
1326 raise util.Abort('%s: %s' % (f, msg))
1329 raise util.Abort('%s: %s' % (f, msg))
1327
1330
1328 if not match:
1331 if not match:
1329 match = matchmod.always(self.root, '')
1332 match = matchmod.always(self.root, '')
1330
1333
1331 if not force:
1334 if not force:
1332 vdirs = []
1335 vdirs = []
1333 match.explicitdir = vdirs.append
1336 match.explicitdir = vdirs.append
1334 match.bad = fail
1337 match.bad = fail
1335
1338
1336 wlock = self.wlock()
1339 wlock = self.wlock()
1337 try:
1340 try:
1338 wctx = self[None]
1341 wctx = self[None]
1339 merge = len(wctx.parents()) > 1
1342 merge = len(wctx.parents()) > 1
1340
1343
1341 if not force and merge and not match.always():
1344 if not force and merge and not match.always():
1342 raise util.Abort(_('cannot partially commit a merge '
1345 raise util.Abort(_('cannot partially commit a merge '
1343 '(do not specify files or patterns)'))
1346 '(do not specify files or patterns)'))
1344
1347
1345 status = self.status(match=match, clean=force)
1348 status = self.status(match=match, clean=force)
1346 if force:
1349 if force:
1347 status.modified.extend(status.clean) # mq may commit clean files
1350 status.modified.extend(status.clean) # mq may commit clean files
1348
1351
1349 # check subrepos
1352 # check subrepos
1350 subs = []
1353 subs = []
1351 commitsubs = set()
1354 commitsubs = set()
1352 newstate = wctx.substate.copy()
1355 newstate = wctx.substate.copy()
1353 # only manage subrepos and .hgsubstate if .hgsub is present
1356 # only manage subrepos and .hgsubstate if .hgsub is present
1354 if '.hgsub' in wctx:
1357 if '.hgsub' in wctx:
1355 # we'll decide whether to track this ourselves, thanks
1358 # we'll decide whether to track this ourselves, thanks
1356 for c in status.modified, status.added, status.removed:
1359 for c in status.modified, status.added, status.removed:
1357 if '.hgsubstate' in c:
1360 if '.hgsubstate' in c:
1358 c.remove('.hgsubstate')
1361 c.remove('.hgsubstate')
1359
1362
1360 # compare current state to last committed state
1363 # compare current state to last committed state
1361 # build new substate based on last committed state
1364 # build new substate based on last committed state
1362 oldstate = wctx.p1().substate
1365 oldstate = wctx.p1().substate
1363 for s in sorted(newstate.keys()):
1366 for s in sorted(newstate.keys()):
1364 if not match(s):
1367 if not match(s):
1365 # ignore working copy, use old state if present
1368 # ignore working copy, use old state if present
1366 if s in oldstate:
1369 if s in oldstate:
1367 newstate[s] = oldstate[s]
1370 newstate[s] = oldstate[s]
1368 continue
1371 continue
1369 if not force:
1372 if not force:
1370 raise util.Abort(
1373 raise util.Abort(
1371 _("commit with new subrepo %s excluded") % s)
1374 _("commit with new subrepo %s excluded") % s)
1372 if wctx.sub(s).dirty(True):
1375 if wctx.sub(s).dirty(True):
1373 if not self.ui.configbool('ui', 'commitsubrepos'):
1376 if not self.ui.configbool('ui', 'commitsubrepos'):
1374 raise util.Abort(
1377 raise util.Abort(
1375 _("uncommitted changes in subrepo %s") % s,
1378 _("uncommitted changes in subrepo %s") % s,
1376 hint=_("use --subrepos for recursive commit"))
1379 hint=_("use --subrepos for recursive commit"))
1377 subs.append(s)
1380 subs.append(s)
1378 commitsubs.add(s)
1381 commitsubs.add(s)
1379 else:
1382 else:
1380 bs = wctx.sub(s).basestate()
1383 bs = wctx.sub(s).basestate()
1381 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1384 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1382 if oldstate.get(s, (None, None, None))[1] != bs:
1385 if oldstate.get(s, (None, None, None))[1] != bs:
1383 subs.append(s)
1386 subs.append(s)
1384
1387
1385 # check for removed subrepos
1388 # check for removed subrepos
1386 for p in wctx.parents():
1389 for p in wctx.parents():
1387 r = [s for s in p.substate if s not in newstate]
1390 r = [s for s in p.substate if s not in newstate]
1388 subs += [s for s in r if match(s)]
1391 subs += [s for s in r if match(s)]
1389 if subs:
1392 if subs:
1390 if (not match('.hgsub') and
1393 if (not match('.hgsub') and
1391 '.hgsub' in (wctx.modified() + wctx.added())):
1394 '.hgsub' in (wctx.modified() + wctx.added())):
1392 raise util.Abort(
1395 raise util.Abort(
1393 _("can't commit subrepos without .hgsub"))
1396 _("can't commit subrepos without .hgsub"))
1394 status.modified.insert(0, '.hgsubstate')
1397 status.modified.insert(0, '.hgsubstate')
1395
1398
1396 elif '.hgsub' in status.removed:
1399 elif '.hgsub' in status.removed:
1397 # clean up .hgsubstate when .hgsub is removed
1400 # clean up .hgsubstate when .hgsub is removed
1398 if ('.hgsubstate' in wctx and
1401 if ('.hgsubstate' in wctx and
1399 '.hgsubstate' not in (status.modified + status.added +
1402 '.hgsubstate' not in (status.modified + status.added +
1400 status.removed)):
1403 status.removed)):
1401 status.removed.insert(0, '.hgsubstate')
1404 status.removed.insert(0, '.hgsubstate')
1402
1405
1403 # make sure all explicit patterns are matched
1406 # make sure all explicit patterns are matched
1404 if not force and match.files():
1407 if not force and match.files():
1405 matched = set(status.modified + status.added + status.removed)
1408 matched = set(status.modified + status.added + status.removed)
1406
1409
1407 for f in match.files():
1410 for f in match.files():
1408 f = self.dirstate.normalize(f)
1411 f = self.dirstate.normalize(f)
1409 if f == '.' or f in matched or f in wctx.substate:
1412 if f == '.' or f in matched or f in wctx.substate:
1410 continue
1413 continue
1411 if f in status.deleted:
1414 if f in status.deleted:
1412 fail(f, _('file not found!'))
1415 fail(f, _('file not found!'))
1413 if f in vdirs: # visited directory
1416 if f in vdirs: # visited directory
1414 d = f + '/'
1417 d = f + '/'
1415 for mf in matched:
1418 for mf in matched:
1416 if mf.startswith(d):
1419 if mf.startswith(d):
1417 break
1420 break
1418 else:
1421 else:
1419 fail(f, _("no match under directory!"))
1422 fail(f, _("no match under directory!"))
1420 elif f not in self.dirstate:
1423 elif f not in self.dirstate:
1421 fail(f, _("file not tracked!"))
1424 fail(f, _("file not tracked!"))
1422
1425
1423 cctx = context.workingcommitctx(self, status,
1426 cctx = context.workingcommitctx(self, status,
1424 text, user, date, extra)
1427 text, user, date, extra)
1425
1428
1426 if (not force and not extra.get("close") and not merge
1429 if (not force and not extra.get("close") and not merge
1427 and not cctx.files()
1430 and not cctx.files()
1428 and wctx.branch() == wctx.p1().branch()):
1431 and wctx.branch() == wctx.p1().branch()):
1429 return None
1432 return None
1430
1433
1431 if merge and cctx.deleted():
1434 if merge and cctx.deleted():
1432 raise util.Abort(_("cannot commit merge with missing files"))
1435 raise util.Abort(_("cannot commit merge with missing files"))
1433
1436
1434 ms = mergemod.mergestate(self)
1437 ms = mergemod.mergestate(self)
1435 for f in status.modified:
1438 for f in status.modified:
1436 if f in ms and ms[f] == 'u':
1439 if f in ms and ms[f] == 'u':
1437 raise util.Abort(_('unresolved merge conflicts '
1440 raise util.Abort(_('unresolved merge conflicts '
1438 '(see "hg help resolve")'))
1441 '(see "hg help resolve")'))
1439
1442
1440 if editor:
1443 if editor:
1441 cctx._text = editor(self, cctx, subs)
1444 cctx._text = editor(self, cctx, subs)
1442 edited = (text != cctx._text)
1445 edited = (text != cctx._text)
1443
1446
1444 # Save commit message in case this transaction gets rolled back
1447 # Save commit message in case this transaction gets rolled back
1445 # (e.g. by a pretxncommit hook). Leave the content alone on
1448 # (e.g. by a pretxncommit hook). Leave the content alone on
1446 # the assumption that the user will use the same editor again.
1449 # the assumption that the user will use the same editor again.
1447 msgfn = self.savecommitmessage(cctx._text)
1450 msgfn = self.savecommitmessage(cctx._text)
1448
1451
1449 # commit subs and write new state
1452 # commit subs and write new state
1450 if subs:
1453 if subs:
1451 for s in sorted(commitsubs):
1454 for s in sorted(commitsubs):
1452 sub = wctx.sub(s)
1455 sub = wctx.sub(s)
1453 self.ui.status(_('committing subrepository %s\n') %
1456 self.ui.status(_('committing subrepository %s\n') %
1454 subrepo.subrelpath(sub))
1457 subrepo.subrelpath(sub))
1455 sr = sub.commit(cctx._text, user, date)
1458 sr = sub.commit(cctx._text, user, date)
1456 newstate[s] = (newstate[s][0], sr)
1459 newstate[s] = (newstate[s][0], sr)
1457 subrepo.writestate(self, newstate)
1460 subrepo.writestate(self, newstate)
1458
1461
1459 p1, p2 = self.dirstate.parents()
1462 p1, p2 = self.dirstate.parents()
1460 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1463 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1461 try:
1464 try:
1462 self.hook("precommit", throw=True, parent1=hookp1,
1465 self.hook("precommit", throw=True, parent1=hookp1,
1463 parent2=hookp2)
1466 parent2=hookp2)
1464 ret = self.commitctx(cctx, True)
1467 ret = self.commitctx(cctx, True)
1465 except: # re-raises
1468 except: # re-raises
1466 if edited:
1469 if edited:
1467 self.ui.write(
1470 self.ui.write(
1468 _('note: commit message saved in %s\n') % msgfn)
1471 _('note: commit message saved in %s\n') % msgfn)
1469 raise
1472 raise
1470
1473
1471 # update bookmarks, dirstate and mergestate
1474 # update bookmarks, dirstate and mergestate
1472 bookmarks.update(self, [p1, p2], ret)
1475 bookmarks.update(self, [p1, p2], ret)
1473 cctx.markcommitted(ret)
1476 cctx.markcommitted(ret)
1474 ms.reset()
1477 ms.reset()
1475 finally:
1478 finally:
1476 wlock.release()
1479 wlock.release()
1477
1480
1478 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1481 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1479 # hack for command that use a temporary commit (eg: histedit)
1482 # hack for command that use a temporary commit (eg: histedit)
1480 # temporary commit got stripped before hook release
1483 # temporary commit got stripped before hook release
1481 if node in self:
1484 if node in self:
1482 self.hook("commit", node=node, parent1=parent1,
1485 self.hook("commit", node=node, parent1=parent1,
1483 parent2=parent2)
1486 parent2=parent2)
1484 self._afterlock(commithook)
1487 self._afterlock(commithook)
1485 return ret
1488 return ret
1486
1489
1487 @unfilteredmethod
1490 @unfilteredmethod
1488 def commitctx(self, ctx, error=False):
1491 def commitctx(self, ctx, error=False):
1489 """Add a new revision to current repository.
1492 """Add a new revision to current repository.
1490 Revision information is passed via the context argument.
1493 Revision information is passed via the context argument.
1491 """
1494 """
1492
1495
1493 tr = None
1496 tr = None
1494 p1, p2 = ctx.p1(), ctx.p2()
1497 p1, p2 = ctx.p1(), ctx.p2()
1495 user = ctx.user()
1498 user = ctx.user()
1496
1499
1497 lock = self.lock()
1500 lock = self.lock()
1498 try:
1501 try:
1499 tr = self.transaction("commit")
1502 tr = self.transaction("commit")
1500 trp = weakref.proxy(tr)
1503 trp = weakref.proxy(tr)
1501
1504
1502 if ctx.files():
1505 if ctx.files():
1503 m1 = p1.manifest()
1506 m1 = p1.manifest()
1504 m2 = p2.manifest()
1507 m2 = p2.manifest()
1505 m = m1.copy()
1508 m = m1.copy()
1506
1509
1507 # check in files
1510 # check in files
1508 added = []
1511 added = []
1509 changed = []
1512 changed = []
1510 removed = list(ctx.removed())
1513 removed = list(ctx.removed())
1511 linkrev = len(self)
1514 linkrev = len(self)
1512 self.ui.note(_("committing files:\n"))
1515 self.ui.note(_("committing files:\n"))
1513 for f in sorted(ctx.modified() + ctx.added()):
1516 for f in sorted(ctx.modified() + ctx.added()):
1514 self.ui.note(f + "\n")
1517 self.ui.note(f + "\n")
1515 try:
1518 try:
1516 fctx = ctx[f]
1519 fctx = ctx[f]
1517 if fctx is None:
1520 if fctx is None:
1518 removed.append(f)
1521 removed.append(f)
1519 else:
1522 else:
1520 added.append(f)
1523 added.append(f)
1521 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1524 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1522 trp, changed)
1525 trp, changed)
1523 m.setflag(f, fctx.flags())
1526 m.setflag(f, fctx.flags())
1524 except OSError, inst:
1527 except OSError, inst:
1525 self.ui.warn(_("trouble committing %s!\n") % f)
1528 self.ui.warn(_("trouble committing %s!\n") % f)
1526 raise
1529 raise
1527 except IOError, inst:
1530 except IOError, inst:
1528 errcode = getattr(inst, 'errno', errno.ENOENT)
1531 errcode = getattr(inst, 'errno', errno.ENOENT)
1529 if error or errcode and errcode != errno.ENOENT:
1532 if error or errcode and errcode != errno.ENOENT:
1530 self.ui.warn(_("trouble committing %s!\n") % f)
1533 self.ui.warn(_("trouble committing %s!\n") % f)
1531 raise
1534 raise
1532
1535
1533 # update manifest
1536 # update manifest
1534 self.ui.note(_("committing manifest\n"))
1537 self.ui.note(_("committing manifest\n"))
1535 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1538 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1536 drop = [f for f in removed if f in m]
1539 drop = [f for f in removed if f in m]
1537 for f in drop:
1540 for f in drop:
1538 del m[f]
1541 del m[f]
1539 mn = self.manifest.add(m, trp, linkrev,
1542 mn = self.manifest.add(m, trp, linkrev,
1540 p1.manifestnode(), p2.manifestnode(),
1543 p1.manifestnode(), p2.manifestnode(),
1541 added, drop)
1544 added, drop)
1542 files = changed + removed
1545 files = changed + removed
1543 else:
1546 else:
1544 mn = p1.manifestnode()
1547 mn = p1.manifestnode()
1545 files = []
1548 files = []
1546
1549
1547 # update changelog
1550 # update changelog
1548 self.ui.note(_("committing changelog\n"))
1551 self.ui.note(_("committing changelog\n"))
1549 self.changelog.delayupdate(tr)
1552 self.changelog.delayupdate(tr)
1550 n = self.changelog.add(mn, files, ctx.description(),
1553 n = self.changelog.add(mn, files, ctx.description(),
1551 trp, p1.node(), p2.node(),
1554 trp, p1.node(), p2.node(),
1552 user, ctx.date(), ctx.extra().copy())
1555 user, ctx.date(), ctx.extra().copy())
1553 p = lambda: tr.writepending() and self.root or ""
1556 p = lambda: tr.writepending() and self.root or ""
1554 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1557 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1555 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1558 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1556 parent2=xp2, pending=p)
1559 parent2=xp2, pending=p)
1557 # set the new commit is proper phase
1560 # set the new commit is proper phase
1558 targetphase = subrepo.newcommitphase(self.ui, ctx)
1561 targetphase = subrepo.newcommitphase(self.ui, ctx)
1559 if targetphase:
1562 if targetphase:
1560 # retract boundary do not alter parent changeset.
1563 # retract boundary do not alter parent changeset.
1561 # if a parent have higher the resulting phase will
1564 # if a parent have higher the resulting phase will
1562 # be compliant anyway
1565 # be compliant anyway
1563 #
1566 #
1564 # if minimal phase was 0 we don't need to retract anything
1567 # if minimal phase was 0 we don't need to retract anything
1565 phases.retractboundary(self, tr, targetphase, [n])
1568 phases.retractboundary(self, tr, targetphase, [n])
1566 tr.close()
1569 tr.close()
1567 branchmap.updatecache(self.filtered('served'))
1570 branchmap.updatecache(self.filtered('served'))
1568 return n
1571 return n
1569 finally:
1572 finally:
1570 if tr:
1573 if tr:
1571 tr.release()
1574 tr.release()
1572 lock.release()
1575 lock.release()
1573
1576
1574 @unfilteredmethod
1577 @unfilteredmethod
1575 def destroying(self):
1578 def destroying(self):
1576 '''Inform the repository that nodes are about to be destroyed.
1579 '''Inform the repository that nodes are about to be destroyed.
1577 Intended for use by strip and rollback, so there's a common
1580 Intended for use by strip and rollback, so there's a common
1578 place for anything that has to be done before destroying history.
1581 place for anything that has to be done before destroying history.
1579
1582
1580 This is mostly useful for saving state that is in memory and waiting
1583 This is mostly useful for saving state that is in memory and waiting
1581 to be flushed when the current lock is released. Because a call to
1584 to be flushed when the current lock is released. Because a call to
1582 destroyed is imminent, the repo will be invalidated causing those
1585 destroyed is imminent, the repo will be invalidated causing those
1583 changes to stay in memory (waiting for the next unlock), or vanish
1586 changes to stay in memory (waiting for the next unlock), or vanish
1584 completely.
1587 completely.
1585 '''
1588 '''
1586 # When using the same lock to commit and strip, the phasecache is left
1589 # When using the same lock to commit and strip, the phasecache is left
1587 # dirty after committing. Then when we strip, the repo is invalidated,
1590 # dirty after committing. Then when we strip, the repo is invalidated,
1588 # causing those changes to disappear.
1591 # causing those changes to disappear.
1589 if '_phasecache' in vars(self):
1592 if '_phasecache' in vars(self):
1590 self._phasecache.write()
1593 self._phasecache.write()
1591
1594
1592 @unfilteredmethod
1595 @unfilteredmethod
1593 def destroyed(self):
1596 def destroyed(self):
1594 '''Inform the repository that nodes have been destroyed.
1597 '''Inform the repository that nodes have been destroyed.
1595 Intended for use by strip and rollback, so there's a common
1598 Intended for use by strip and rollback, so there's a common
1596 place for anything that has to be done after destroying history.
1599 place for anything that has to be done after destroying history.
1597 '''
1600 '''
1598 # When one tries to:
1601 # When one tries to:
1599 # 1) destroy nodes thus calling this method (e.g. strip)
1602 # 1) destroy nodes thus calling this method (e.g. strip)
1600 # 2) use phasecache somewhere (e.g. commit)
1603 # 2) use phasecache somewhere (e.g. commit)
1601 #
1604 #
1602 # then 2) will fail because the phasecache contains nodes that were
1605 # then 2) will fail because the phasecache contains nodes that were
1603 # removed. We can either remove phasecache from the filecache,
1606 # removed. We can either remove phasecache from the filecache,
1604 # causing it to reload next time it is accessed, or simply filter
1607 # causing it to reload next time it is accessed, or simply filter
1605 # the removed nodes now and write the updated cache.
1608 # the removed nodes now and write the updated cache.
1606 self._phasecache.filterunknown(self)
1609 self._phasecache.filterunknown(self)
1607 self._phasecache.write()
1610 self._phasecache.write()
1608
1611
1609 # update the 'served' branch cache to help read only server process
1612 # update the 'served' branch cache to help read only server process
1610 # Thanks to branchcache collaboration this is done from the nearest
1613 # Thanks to branchcache collaboration this is done from the nearest
1611 # filtered subset and it is expected to be fast.
1614 # filtered subset and it is expected to be fast.
1612 branchmap.updatecache(self.filtered('served'))
1615 branchmap.updatecache(self.filtered('served'))
1613
1616
1614 # Ensure the persistent tag cache is updated. Doing it now
1617 # Ensure the persistent tag cache is updated. Doing it now
1615 # means that the tag cache only has to worry about destroyed
1618 # means that the tag cache only has to worry about destroyed
1616 # heads immediately after a strip/rollback. That in turn
1619 # heads immediately after a strip/rollback. That in turn
1617 # guarantees that "cachetip == currenttip" (comparing both rev
1620 # guarantees that "cachetip == currenttip" (comparing both rev
1618 # and node) always means no nodes have been added or destroyed.
1621 # and node) always means no nodes have been added or destroyed.
1619
1622
1620 # XXX this is suboptimal when qrefresh'ing: we strip the current
1623 # XXX this is suboptimal when qrefresh'ing: we strip the current
1621 # head, refresh the tag cache, then immediately add a new head.
1624 # head, refresh the tag cache, then immediately add a new head.
1622 # But I think doing it this way is necessary for the "instant
1625 # But I think doing it this way is necessary for the "instant
1623 # tag cache retrieval" case to work.
1626 # tag cache retrieval" case to work.
1624 self.invalidate()
1627 self.invalidate()
1625
1628
1626 def walk(self, match, node=None):
1629 def walk(self, match, node=None):
1627 '''
1630 '''
1628 walk recursively through the directory tree or a given
1631 walk recursively through the directory tree or a given
1629 changeset, finding all files matched by the match
1632 changeset, finding all files matched by the match
1630 function
1633 function
1631 '''
1634 '''
1632 return self[node].walk(match)
1635 return self[node].walk(match)
1633
1636
1634 def status(self, node1='.', node2=None, match=None,
1637 def status(self, node1='.', node2=None, match=None,
1635 ignored=False, clean=False, unknown=False,
1638 ignored=False, clean=False, unknown=False,
1636 listsubrepos=False):
1639 listsubrepos=False):
1637 '''a convenience method that calls node1.status(node2)'''
1640 '''a convenience method that calls node1.status(node2)'''
1638 return self[node1].status(node2, match, ignored, clean, unknown,
1641 return self[node1].status(node2, match, ignored, clean, unknown,
1639 listsubrepos)
1642 listsubrepos)
1640
1643
1641 def heads(self, start=None):
1644 def heads(self, start=None):
1642 heads = self.changelog.heads(start)
1645 heads = self.changelog.heads(start)
1643 # sort the output in rev descending order
1646 # sort the output in rev descending order
1644 return sorted(heads, key=self.changelog.rev, reverse=True)
1647 return sorted(heads, key=self.changelog.rev, reverse=True)
1645
1648
1646 def branchheads(self, branch=None, start=None, closed=False):
1649 def branchheads(self, branch=None, start=None, closed=False):
1647 '''return a (possibly filtered) list of heads for the given branch
1650 '''return a (possibly filtered) list of heads for the given branch
1648
1651
1649 Heads are returned in topological order, from newest to oldest.
1652 Heads are returned in topological order, from newest to oldest.
1650 If branch is None, use the dirstate branch.
1653 If branch is None, use the dirstate branch.
1651 If start is not None, return only heads reachable from start.
1654 If start is not None, return only heads reachable from start.
1652 If closed is True, return heads that are marked as closed as well.
1655 If closed is True, return heads that are marked as closed as well.
1653 '''
1656 '''
1654 if branch is None:
1657 if branch is None:
1655 branch = self[None].branch()
1658 branch = self[None].branch()
1656 branches = self.branchmap()
1659 branches = self.branchmap()
1657 if branch not in branches:
1660 if branch not in branches:
1658 return []
1661 return []
1659 # the cache returns heads ordered lowest to highest
1662 # the cache returns heads ordered lowest to highest
1660 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1663 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1661 if start is not None:
1664 if start is not None:
1662 # filter out the heads that cannot be reached from startrev
1665 # filter out the heads that cannot be reached from startrev
1663 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1666 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1664 bheads = [h for h in bheads if h in fbheads]
1667 bheads = [h for h in bheads if h in fbheads]
1665 return bheads
1668 return bheads
1666
1669
1667 def branches(self, nodes):
1670 def branches(self, nodes):
1668 if not nodes:
1671 if not nodes:
1669 nodes = [self.changelog.tip()]
1672 nodes = [self.changelog.tip()]
1670 b = []
1673 b = []
1671 for n in nodes:
1674 for n in nodes:
1672 t = n
1675 t = n
1673 while True:
1676 while True:
1674 p = self.changelog.parents(n)
1677 p = self.changelog.parents(n)
1675 if p[1] != nullid or p[0] == nullid:
1678 if p[1] != nullid or p[0] == nullid:
1676 b.append((t, n, p[0], p[1]))
1679 b.append((t, n, p[0], p[1]))
1677 break
1680 break
1678 n = p[0]
1681 n = p[0]
1679 return b
1682 return b
1680
1683
1681 def between(self, pairs):
1684 def between(self, pairs):
1682 r = []
1685 r = []
1683
1686
1684 for top, bottom in pairs:
1687 for top, bottom in pairs:
1685 n, l, i = top, [], 0
1688 n, l, i = top, [], 0
1686 f = 1
1689 f = 1
1687
1690
1688 while n != bottom and n != nullid:
1691 while n != bottom and n != nullid:
1689 p = self.changelog.parents(n)[0]
1692 p = self.changelog.parents(n)[0]
1690 if i == f:
1693 if i == f:
1691 l.append(n)
1694 l.append(n)
1692 f = f * 2
1695 f = f * 2
1693 n = p
1696 n = p
1694 i += 1
1697 i += 1
1695
1698
1696 r.append(l)
1699 r.append(l)
1697
1700
1698 return r
1701 return r
1699
1702
1700 def checkpush(self, pushop):
1703 def checkpush(self, pushop):
1701 """Extensions can override this function if additional checks have
1704 """Extensions can override this function if additional checks have
1702 to be performed before pushing, or call it if they override push
1705 to be performed before pushing, or call it if they override push
1703 command.
1706 command.
1704 """
1707 """
1705 pass
1708 pass
1706
1709
1707 @unfilteredpropertycache
1710 @unfilteredpropertycache
1708 def prepushoutgoinghooks(self):
1711 def prepushoutgoinghooks(self):
1709 """Return util.hooks consists of "(repo, remote, outgoing)"
1712 """Return util.hooks consists of "(repo, remote, outgoing)"
1710 functions, which are called before pushing changesets.
1713 functions, which are called before pushing changesets.
1711 """
1714 """
1712 return util.hooks()
1715 return util.hooks()
1713
1716
1714 def stream_in(self, remote, requirements):
1717 def stream_in(self, remote, requirements):
1715 lock = self.lock()
1718 lock = self.lock()
1716 try:
1719 try:
1717 # Save remote branchmap. We will use it later
1720 # Save remote branchmap. We will use it later
1718 # to speed up branchcache creation
1721 # to speed up branchcache creation
1719 rbranchmap = None
1722 rbranchmap = None
1720 if remote.capable("branchmap"):
1723 if remote.capable("branchmap"):
1721 rbranchmap = remote.branchmap()
1724 rbranchmap = remote.branchmap()
1722
1725
1723 fp = remote.stream_out()
1726 fp = remote.stream_out()
1724 l = fp.readline()
1727 l = fp.readline()
1725 try:
1728 try:
1726 resp = int(l)
1729 resp = int(l)
1727 except ValueError:
1730 except ValueError:
1728 raise error.ResponseError(
1731 raise error.ResponseError(
1729 _('unexpected response from remote server:'), l)
1732 _('unexpected response from remote server:'), l)
1730 if resp == 1:
1733 if resp == 1:
1731 raise util.Abort(_('operation forbidden by server'))
1734 raise util.Abort(_('operation forbidden by server'))
1732 elif resp == 2:
1735 elif resp == 2:
1733 raise util.Abort(_('locking the remote repository failed'))
1736 raise util.Abort(_('locking the remote repository failed'))
1734 elif resp != 0:
1737 elif resp != 0:
1735 raise util.Abort(_('the server sent an unknown error code'))
1738 raise util.Abort(_('the server sent an unknown error code'))
1736 self.ui.status(_('streaming all changes\n'))
1739 self.ui.status(_('streaming all changes\n'))
1737 l = fp.readline()
1740 l = fp.readline()
1738 try:
1741 try:
1739 total_files, total_bytes = map(int, l.split(' ', 1))
1742 total_files, total_bytes = map(int, l.split(' ', 1))
1740 except (ValueError, TypeError):
1743 except (ValueError, TypeError):
1741 raise error.ResponseError(
1744 raise error.ResponseError(
1742 _('unexpected response from remote server:'), l)
1745 _('unexpected response from remote server:'), l)
1743 self.ui.status(_('%d files to transfer, %s of data\n') %
1746 self.ui.status(_('%d files to transfer, %s of data\n') %
1744 (total_files, util.bytecount(total_bytes)))
1747 (total_files, util.bytecount(total_bytes)))
1745 handled_bytes = 0
1748 handled_bytes = 0
1746 self.ui.progress(_('clone'), 0, total=total_bytes)
1749 self.ui.progress(_('clone'), 0, total=total_bytes)
1747 start = time.time()
1750 start = time.time()
1748
1751
1749 tr = self.transaction(_('clone'))
1752 tr = self.transaction(_('clone'))
1750 try:
1753 try:
1751 for i in xrange(total_files):
1754 for i in xrange(total_files):
1752 # XXX doesn't support '\n' or '\r' in filenames
1755 # XXX doesn't support '\n' or '\r' in filenames
1753 l = fp.readline()
1756 l = fp.readline()
1754 try:
1757 try:
1755 name, size = l.split('\0', 1)
1758 name, size = l.split('\0', 1)
1756 size = int(size)
1759 size = int(size)
1757 except (ValueError, TypeError):
1760 except (ValueError, TypeError):
1758 raise error.ResponseError(
1761 raise error.ResponseError(
1759 _('unexpected response from remote server:'), l)
1762 _('unexpected response from remote server:'), l)
1760 if self.ui.debugflag:
1763 if self.ui.debugflag:
1761 self.ui.debug('adding %s (%s)\n' %
1764 self.ui.debug('adding %s (%s)\n' %
1762 (name, util.bytecount(size)))
1765 (name, util.bytecount(size)))
1763 # for backwards compat, name was partially encoded
1766 # for backwards compat, name was partially encoded
1764 ofp = self.svfs(store.decodedir(name), 'w')
1767 ofp = self.svfs(store.decodedir(name), 'w')
1765 for chunk in util.filechunkiter(fp, limit=size):
1768 for chunk in util.filechunkiter(fp, limit=size):
1766 handled_bytes += len(chunk)
1769 handled_bytes += len(chunk)
1767 self.ui.progress(_('clone'), handled_bytes,
1770 self.ui.progress(_('clone'), handled_bytes,
1768 total=total_bytes)
1771 total=total_bytes)
1769 ofp.write(chunk)
1772 ofp.write(chunk)
1770 ofp.close()
1773 ofp.close()
1771 tr.close()
1774 tr.close()
1772 finally:
1775 finally:
1773 tr.release()
1776 tr.release()
1774
1777
1775 # Writing straight to files circumvented the inmemory caches
1778 # Writing straight to files circumvented the inmemory caches
1776 self.invalidate()
1779 self.invalidate()
1777
1780
1778 elapsed = time.time() - start
1781 elapsed = time.time() - start
1779 if elapsed <= 0:
1782 if elapsed <= 0:
1780 elapsed = 0.001
1783 elapsed = 0.001
1781 self.ui.progress(_('clone'), None)
1784 self.ui.progress(_('clone'), None)
1782 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1785 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1783 (util.bytecount(total_bytes), elapsed,
1786 (util.bytecount(total_bytes), elapsed,
1784 util.bytecount(total_bytes / elapsed)))
1787 util.bytecount(total_bytes / elapsed)))
1785
1788
1786 # new requirements = old non-format requirements +
1789 # new requirements = old non-format requirements +
1787 # new format-related
1790 # new format-related
1788 # requirements from the streamed-in repository
1791 # requirements from the streamed-in repository
1789 requirements.update(set(self.requirements) - self.supportedformats)
1792 requirements.update(set(self.requirements) - self.supportedformats)
1790 self._applyrequirements(requirements)
1793 self._applyrequirements(requirements)
1791 self._writerequirements()
1794 self._writerequirements()
1792
1795
1793 if rbranchmap:
1796 if rbranchmap:
1794 rbheads = []
1797 rbheads = []
1795 closed = []
1798 closed = []
1796 for bheads in rbranchmap.itervalues():
1799 for bheads in rbranchmap.itervalues():
1797 rbheads.extend(bheads)
1800 rbheads.extend(bheads)
1798 for h in bheads:
1801 for h in bheads:
1799 r = self.changelog.rev(h)
1802 r = self.changelog.rev(h)
1800 b, c = self.changelog.branchinfo(r)
1803 b, c = self.changelog.branchinfo(r)
1801 if c:
1804 if c:
1802 closed.append(h)
1805 closed.append(h)
1803
1806
1804 if rbheads:
1807 if rbheads:
1805 rtiprev = max((int(self.changelog.rev(node))
1808 rtiprev = max((int(self.changelog.rev(node))
1806 for node in rbheads))
1809 for node in rbheads))
1807 cache = branchmap.branchcache(rbranchmap,
1810 cache = branchmap.branchcache(rbranchmap,
1808 self[rtiprev].node(),
1811 self[rtiprev].node(),
1809 rtiprev,
1812 rtiprev,
1810 closednodes=closed)
1813 closednodes=closed)
1811 # Try to stick it as low as possible
1814 # Try to stick it as low as possible
1812 # filter above served are unlikely to be fetch from a clone
1815 # filter above served are unlikely to be fetch from a clone
1813 for candidate in ('base', 'immutable', 'served'):
1816 for candidate in ('base', 'immutable', 'served'):
1814 rview = self.filtered(candidate)
1817 rview = self.filtered(candidate)
1815 if cache.validfor(rview):
1818 if cache.validfor(rview):
1816 self._branchcaches[candidate] = cache
1819 self._branchcaches[candidate] = cache
1817 cache.write(rview)
1820 cache.write(rview)
1818 break
1821 break
1819 self.invalidate()
1822 self.invalidate()
1820 return len(self.heads()) + 1
1823 return len(self.heads()) + 1
1821 finally:
1824 finally:
1822 lock.release()
1825 lock.release()
1823
1826
1824 def clone(self, remote, heads=[], stream=None):
1827 def clone(self, remote, heads=[], stream=None):
1825 '''clone remote repository.
1828 '''clone remote repository.
1826
1829
1827 keyword arguments:
1830 keyword arguments:
1828 heads: list of revs to clone (forces use of pull)
1831 heads: list of revs to clone (forces use of pull)
1829 stream: use streaming clone if possible'''
1832 stream: use streaming clone if possible'''
1830
1833
1831 # now, all clients that can request uncompressed clones can
1834 # now, all clients that can request uncompressed clones can
1832 # read repo formats supported by all servers that can serve
1835 # read repo formats supported by all servers that can serve
1833 # them.
1836 # them.
1834
1837
1835 # if revlog format changes, client will have to check version
1838 # if revlog format changes, client will have to check version
1836 # and format flags on "stream" capability, and use
1839 # and format flags on "stream" capability, and use
1837 # uncompressed only if compatible.
1840 # uncompressed only if compatible.
1838
1841
1839 if stream is None:
1842 if stream is None:
1840 # if the server explicitly prefers to stream (for fast LANs)
1843 # if the server explicitly prefers to stream (for fast LANs)
1841 stream = remote.capable('stream-preferred')
1844 stream = remote.capable('stream-preferred')
1842
1845
1843 if stream and not heads:
1846 if stream and not heads:
1844 # 'stream' means remote revlog format is revlogv1 only
1847 # 'stream' means remote revlog format is revlogv1 only
1845 if remote.capable('stream'):
1848 if remote.capable('stream'):
1846 self.stream_in(remote, set(('revlogv1',)))
1849 self.stream_in(remote, set(('revlogv1',)))
1847 else:
1850 else:
1848 # otherwise, 'streamreqs' contains the remote revlog format
1851 # otherwise, 'streamreqs' contains the remote revlog format
1849 streamreqs = remote.capable('streamreqs')
1852 streamreqs = remote.capable('streamreqs')
1850 if streamreqs:
1853 if streamreqs:
1851 streamreqs = set(streamreqs.split(','))
1854 streamreqs = set(streamreqs.split(','))
1852 # if we support it, stream in and adjust our requirements
1855 # if we support it, stream in and adjust our requirements
1853 if not streamreqs - self.supportedformats:
1856 if not streamreqs - self.supportedformats:
1854 self.stream_in(remote, streamreqs)
1857 self.stream_in(remote, streamreqs)
1855
1858
1856 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1859 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1857 try:
1860 try:
1858 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1861 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1859 ret = exchange.pull(self, remote, heads).cgresult
1862 ret = exchange.pull(self, remote, heads).cgresult
1860 finally:
1863 finally:
1861 self.ui.restoreconfig(quiet)
1864 self.ui.restoreconfig(quiet)
1862 return ret
1865 return ret
1863
1866
1864 def pushkey(self, namespace, key, old, new):
1867 def pushkey(self, namespace, key, old, new):
1865 try:
1868 try:
1866 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1869 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1867 old=old, new=new)
1870 old=old, new=new)
1868 except error.HookAbort, exc:
1871 except error.HookAbort, exc:
1869 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1872 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1870 if exc.hint:
1873 if exc.hint:
1871 self.ui.write_err(_("(%s)\n") % exc.hint)
1874 self.ui.write_err(_("(%s)\n") % exc.hint)
1872 return False
1875 return False
1873 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1876 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1874 ret = pushkey.push(self, namespace, key, old, new)
1877 ret = pushkey.push(self, namespace, key, old, new)
1875 def runhook():
1878 def runhook():
1876 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1879 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1877 ret=ret)
1880 ret=ret)
1878 self._afterlock(runhook)
1881 self._afterlock(runhook)
1879 return ret
1882 return ret
1880
1883
1881 def listkeys(self, namespace):
1884 def listkeys(self, namespace):
1882 self.hook('prelistkeys', throw=True, namespace=namespace)
1885 self.hook('prelistkeys', throw=True, namespace=namespace)
1883 self.ui.debug('listing keys for "%s"\n' % namespace)
1886 self.ui.debug('listing keys for "%s"\n' % namespace)
1884 values = pushkey.list(self, namespace)
1887 values = pushkey.list(self, namespace)
1885 self.hook('listkeys', namespace=namespace, values=values)
1888 self.hook('listkeys', namespace=namespace, values=values)
1886 return values
1889 return values
1887
1890
1888 def debugwireargs(self, one, two, three=None, four=None, five=None):
1891 def debugwireargs(self, one, two, three=None, four=None, five=None):
1889 '''used to test argument passing over the wire'''
1892 '''used to test argument passing over the wire'''
1890 return "%s %s %s %s %s" % (one, two, three, four, five)
1893 return "%s %s %s %s %s" % (one, two, three, four, five)
1891
1894
1892 def savecommitmessage(self, text):
1895 def savecommitmessage(self, text):
1893 fp = self.vfs('last-message.txt', 'wb')
1896 fp = self.vfs('last-message.txt', 'wb')
1894 try:
1897 try:
1895 fp.write(text)
1898 fp.write(text)
1896 finally:
1899 finally:
1897 fp.close()
1900 fp.close()
1898 return self.pathto(fp.name[len(self.root) + 1:])
1901 return self.pathto(fp.name[len(self.root) + 1:])
1899
1902
1900 # used to avoid circular references so destructors work
1903 # used to avoid circular references so destructors work
1901 def aftertrans(files):
1904 def aftertrans(files):
1902 renamefiles = [tuple(t) for t in files]
1905 renamefiles = [tuple(t) for t in files]
1903 def a():
1906 def a():
1904 for vfs, src, dest in renamefiles:
1907 for vfs, src, dest in renamefiles:
1905 try:
1908 try:
1906 vfs.rename(src, dest)
1909 vfs.rename(src, dest)
1907 except OSError: # journal file does not yet exist
1910 except OSError: # journal file does not yet exist
1908 pass
1911 pass
1909 return a
1912 return a
1910
1913
1911 def undoname(fn):
1914 def undoname(fn):
1912 base, name = os.path.split(fn)
1915 base, name = os.path.split(fn)
1913 assert name.startswith('journal')
1916 assert name.startswith('journal')
1914 return os.path.join(base, name.replace('journal', 'undo', 1))
1917 return os.path.join(base, name.replace('journal', 'undo', 1))
1915
1918
1916 def instance(ui, path, create):
1919 def instance(ui, path, create):
1917 return localrepository(ui, util.urllocalpath(path), create)
1920 return localrepository(ui, util.urllocalpath(path), create)
1918
1921
1919 def islocal(path):
1922 def islocal(path):
1920 return True
1923 return True
@@ -1,628 +1,636
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import mdiff, parsers, error, revlog, util, scmutil
9 import mdiff, parsers, error, revlog, util, scmutil
10 import array, struct
10 import array, struct
11
11
12 propertycache = util.propertycache
12 propertycache = util.propertycache
13
13
14 class _lazymanifest(dict):
14 class _lazymanifest(dict):
15 """This is the pure implementation of lazymanifest.
15 """This is the pure implementation of lazymanifest.
16
16
17 It has not been optimized *at all* and is not lazy.
17 It has not been optimized *at all* and is not lazy.
18 """
18 """
19
19
20 def __init__(self, data):
20 def __init__(self, data):
21 # This init method does a little bit of excessive-looking
21 # This init method does a little bit of excessive-looking
22 # precondition checking. This is so that the behavior of this
22 # precondition checking. This is so that the behavior of this
23 # class exactly matches its C counterpart to try and help
23 # class exactly matches its C counterpart to try and help
24 # prevent surprise breakage for anyone that develops against
24 # prevent surprise breakage for anyone that develops against
25 # the pure version.
25 # the pure version.
26 if data and data[-1] != '\n':
26 if data and data[-1] != '\n':
27 raise ValueError('Manifest did not end in a newline.')
27 raise ValueError('Manifest did not end in a newline.')
28 dict.__init__(self)
28 dict.__init__(self)
29 prev = None
29 prev = None
30 for l in data.splitlines():
30 for l in data.splitlines():
31 if prev is not None and prev > l:
31 if prev is not None and prev > l:
32 raise ValueError('Manifest lines not in sorted order.')
32 raise ValueError('Manifest lines not in sorted order.')
33 prev = l
33 prev = l
34 f, n = l.split('\0')
34 f, n = l.split('\0')
35 if len(n) > 40:
35 if len(n) > 40:
36 self[f] = revlog.bin(n[:40]), n[40:]
36 self[f] = revlog.bin(n[:40]), n[40:]
37 else:
37 else:
38 self[f] = revlog.bin(n), ''
38 self[f] = revlog.bin(n), ''
39
39
40 def __setitem__(self, k, v):
40 def __setitem__(self, k, v):
41 node, flag = v
41 node, flag = v
42 assert node is not None
42 assert node is not None
43 if len(node) > 21:
43 if len(node) > 21:
44 node = node[:21] # match c implementation behavior
44 node = node[:21] # match c implementation behavior
45 dict.__setitem__(self, k, (node, flag))
45 dict.__setitem__(self, k, (node, flag))
46
46
47 def __iter__(self):
47 def __iter__(self):
48 return iter(sorted(dict.keys(self)))
48 return iter(sorted(dict.keys(self)))
49
49
50 def iterkeys(self):
50 def iterkeys(self):
51 return iter(sorted(dict.keys(self)))
51 return iter(sorted(dict.keys(self)))
52
52
53 def iterentries(self):
53 def iterentries(self):
54 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
54 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
55
55
56 def copy(self):
56 def copy(self):
57 c = _lazymanifest('')
57 c = _lazymanifest('')
58 c.update(self)
58 c.update(self)
59 return c
59 return c
60
60
61 def diff(self, m2, clean=False):
61 def diff(self, m2, clean=False):
62 '''Finds changes between the current manifest and m2.'''
62 '''Finds changes between the current manifest and m2.'''
63 diff = {}
63 diff = {}
64
64
65 for fn, e1 in self.iteritems():
65 for fn, e1 in self.iteritems():
66 if fn not in m2:
66 if fn not in m2:
67 diff[fn] = e1, (None, '')
67 diff[fn] = e1, (None, '')
68 else:
68 else:
69 e2 = m2[fn]
69 e2 = m2[fn]
70 if e1 != e2:
70 if e1 != e2:
71 diff[fn] = e1, e2
71 diff[fn] = e1, e2
72 elif clean:
72 elif clean:
73 diff[fn] = None
73 diff[fn] = None
74
74
75 for fn, e2 in m2.iteritems():
75 for fn, e2 in m2.iteritems():
76 if fn not in self:
76 if fn not in self:
77 diff[fn] = (None, ''), e2
77 diff[fn] = (None, ''), e2
78
78
79 return diff
79 return diff
80
80
81 def filtercopy(self, filterfn):
81 def filtercopy(self, filterfn):
82 c = _lazymanifest('')
82 c = _lazymanifest('')
83 for f, n, fl in self.iterentries():
83 for f, n, fl in self.iterentries():
84 if filterfn(f):
84 if filterfn(f):
85 c[f] = n, fl
85 c[f] = n, fl
86 return c
86 return c
87
87
88 def text(self):
88 def text(self):
89 """Get the full data of this manifest as a bytestring."""
89 """Get the full data of this manifest as a bytestring."""
90 fl = sorted(self.iterentries())
90 fl = sorted(self.iterentries())
91
91
92 _hex = revlog.hex
92 _hex = revlog.hex
93 # if this is changed to support newlines in filenames,
93 # if this is changed to support newlines in filenames,
94 # be sure to check the templates/ dir again (especially *-raw.tmpl)
94 # be sure to check the templates/ dir again (especially *-raw.tmpl)
95 return ''.join("%s\0%s%s\n" % (
95 return ''.join("%s\0%s%s\n" % (
96 f, _hex(n[:20]), flag) for f, n, flag in fl)
96 f, _hex(n[:20]), flag) for f, n, flag in fl)
97
97
98 try:
98 try:
99 _lazymanifest = parsers.lazymanifest
99 _lazymanifest = parsers.lazymanifest
100 except AttributeError:
100 except AttributeError:
101 pass
101 pass
102
102
103 class manifestdict(object):
103 class manifestdict(object):
104 def __init__(self, data=''):
104 def __init__(self, data=''):
105 self._lm = _lazymanifest(data)
105 self._lm = _lazymanifest(data)
106
106
107 def __getitem__(self, key):
107 def __getitem__(self, key):
108 return self._lm[key][0]
108 return self._lm[key][0]
109
109
110 def find(self, key):
110 def find(self, key):
111 return self._lm[key]
111 return self._lm[key]
112
112
113 def __len__(self):
113 def __len__(self):
114 return len(self._lm)
114 return len(self._lm)
115
115
116 def __setitem__(self, key, node):
116 def __setitem__(self, key, node):
117 self._lm[key] = node, self.flags(key, '')
117 self._lm[key] = node, self.flags(key, '')
118
118
119 def __contains__(self, key):
119 def __contains__(self, key):
120 return key in self._lm
120 return key in self._lm
121
121
122 def __delitem__(self, key):
122 def __delitem__(self, key):
123 del self._lm[key]
123 del self._lm[key]
124
124
125 def __iter__(self):
125 def __iter__(self):
126 return self._lm.__iter__()
126 return self._lm.__iter__()
127
127
128 def iterkeys(self):
128 def iterkeys(self):
129 return self._lm.iterkeys()
129 return self._lm.iterkeys()
130
130
131 def keys(self):
131 def keys(self):
132 return list(self.iterkeys())
132 return list(self.iterkeys())
133
133
134 def intersectfiles(self, files):
134 def intersectfiles(self, files):
135 '''make a new lazymanifest with the intersection of self with files
135 '''make a new lazymanifest with the intersection of self with files
136
136
137 The algorithm assumes that files is much smaller than self.'''
137 The algorithm assumes that files is much smaller than self.'''
138 ret = manifestdict()
138 ret = manifestdict()
139 lm = self._lm
139 lm = self._lm
140 for fn in files:
140 for fn in files:
141 if fn in lm:
141 if fn in lm:
142 ret._lm[fn] = self._lm[fn]
142 ret._lm[fn] = self._lm[fn]
143 return ret
143 return ret
144
144
145 def filesnotin(self, m2):
145 def filesnotin(self, m2):
146 '''Set of files in this manifest that are not in the other'''
146 '''Set of files in this manifest that are not in the other'''
147 files = set(self)
147 files = set(self)
148 files.difference_update(m2)
148 files.difference_update(m2)
149 return files
149 return files
150
150
151 @propertycache
151 @propertycache
152 def _dirs(self):
152 def _dirs(self):
153 return scmutil.dirs(self)
153 return scmutil.dirs(self)
154
154
155 def dirs(self):
155 def dirs(self):
156 return self._dirs
156 return self._dirs
157
157
158 def hasdir(self, dir):
158 def hasdir(self, dir):
159 return dir in self._dirs
159 return dir in self._dirs
160
160
161 def matches(self, match):
161 def matches(self, match):
162 '''generate a new manifest filtered by the match argument'''
162 '''generate a new manifest filtered by the match argument'''
163 if match.always():
163 if match.always():
164 return self.copy()
164 return self.copy()
165
165
166 files = match.files()
166 files = match.files()
167 if (len(files) < 100 and (match.matchfn == match.exact or
167 if (len(files) < 100 and (match.matchfn == match.exact or
168 (not match.anypats() and util.all(fn in self for fn in files)))):
168 (not match.anypats() and util.all(fn in self for fn in files)))):
169 return self.intersectfiles(files)
169 return self.intersectfiles(files)
170
170
171 lm = manifestdict('')
171 lm = manifestdict('')
172 lm._lm = self._lm.filtercopy(match)
172 lm._lm = self._lm.filtercopy(match)
173 return lm
173 return lm
174
174
175 def diff(self, m2, clean=False):
175 def diff(self, m2, clean=False):
176 '''Finds changes between the current manifest and m2.
176 '''Finds changes between the current manifest and m2.
177
177
178 Args:
178 Args:
179 m2: the manifest to which this manifest should be compared.
179 m2: the manifest to which this manifest should be compared.
180 clean: if true, include files unchanged between these manifests
180 clean: if true, include files unchanged between these manifests
181 with a None value in the returned dictionary.
181 with a None value in the returned dictionary.
182
182
183 The result is returned as a dict with filename as key and
183 The result is returned as a dict with filename as key and
184 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
184 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
185 nodeid in the current/other manifest and fl1/fl2 is the flag
185 nodeid in the current/other manifest and fl1/fl2 is the flag
186 in the current/other manifest. Where the file does not exist,
186 in the current/other manifest. Where the file does not exist,
187 the nodeid will be None and the flags will be the empty
187 the nodeid will be None and the flags will be the empty
188 string.
188 string.
189 '''
189 '''
190 return self._lm.diff(m2._lm, clean)
190 return self._lm.diff(m2._lm, clean)
191
191
192 def setflag(self, key, flag):
192 def setflag(self, key, flag):
193 self._lm[key] = self[key], flag
193 self._lm[key] = self[key], flag
194
194
195 def get(self, key, default=None):
195 def get(self, key, default=None):
196 try:
196 try:
197 return self._lm[key][0]
197 return self._lm[key][0]
198 except KeyError:
198 except KeyError:
199 return default
199 return default
200
200
201 def flags(self, key, default=''):
201 def flags(self, key, default=''):
202 try:
202 try:
203 return self._lm[key][1]
203 return self._lm[key][1]
204 except KeyError:
204 except KeyError:
205 return default
205 return default
206
206
207 def copy(self):
207 def copy(self):
208 c = manifestdict('')
208 c = manifestdict('')
209 c._lm = self._lm.copy()
209 c._lm = self._lm.copy()
210 return c
210 return c
211
211
212 def iteritems(self):
212 def iteritems(self):
213 return (x[:2] for x in self._lm.iterentries())
213 return (x[:2] for x in self._lm.iterentries())
214
214
215 def text(self):
215 def text(self):
216 return self._lm.text()
216 return self._lm.text()
217
217
218 def fastdelta(self, base, changes):
218 def fastdelta(self, base, changes):
219 """Given a base manifest text as an array.array and a list of changes
219 """Given a base manifest text as an array.array and a list of changes
220 relative to that text, compute a delta that can be used by revlog.
220 relative to that text, compute a delta that can be used by revlog.
221 """
221 """
222 delta = []
222 delta = []
223 dstart = None
223 dstart = None
224 dend = None
224 dend = None
225 dline = [""]
225 dline = [""]
226 start = 0
226 start = 0
227 # zero copy representation of base as a buffer
227 # zero copy representation of base as a buffer
228 addbuf = util.buffer(base)
228 addbuf = util.buffer(base)
229
229
230 # start with a readonly loop that finds the offset of
230 # start with a readonly loop that finds the offset of
231 # each line and creates the deltas
231 # each line and creates the deltas
232 for f, todelete in changes:
232 for f, todelete in changes:
233 # bs will either be the index of the item or the insert point
233 # bs will either be the index of the item or the insert point
234 start, end = _msearch(addbuf, f, start)
234 start, end = _msearch(addbuf, f, start)
235 if not todelete:
235 if not todelete:
236 h, fl = self._lm[f]
236 h, fl = self._lm[f]
237 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
237 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
238 else:
238 else:
239 if start == end:
239 if start == end:
240 # item we want to delete was not found, error out
240 # item we want to delete was not found, error out
241 raise AssertionError(
241 raise AssertionError(
242 _("failed to remove %s from manifest") % f)
242 _("failed to remove %s from manifest") % f)
243 l = ""
243 l = ""
244 if dstart is not None and dstart <= start and dend >= start:
244 if dstart is not None and dstart <= start and dend >= start:
245 if dend < end:
245 if dend < end:
246 dend = end
246 dend = end
247 if l:
247 if l:
248 dline.append(l)
248 dline.append(l)
249 else:
249 else:
250 if dstart is not None:
250 if dstart is not None:
251 delta.append([dstart, dend, "".join(dline)])
251 delta.append([dstart, dend, "".join(dline)])
252 dstart = start
252 dstart = start
253 dend = end
253 dend = end
254 dline = [l]
254 dline = [l]
255
255
256 if dstart is not None:
256 if dstart is not None:
257 delta.append([dstart, dend, "".join(dline)])
257 delta.append([dstart, dend, "".join(dline)])
258 # apply the delta to the base, and get a delta for addrevision
258 # apply the delta to the base, and get a delta for addrevision
259 deltatext, arraytext = _addlistdelta(base, delta)
259 deltatext, arraytext = _addlistdelta(base, delta)
260 return arraytext, deltatext
260 return arraytext, deltatext
261
261
262 def _msearch(m, s, lo=0, hi=None):
262 def _msearch(m, s, lo=0, hi=None):
263 '''return a tuple (start, end) that says where to find s within m.
263 '''return a tuple (start, end) that says where to find s within m.
264
264
265 If the string is found m[start:end] are the line containing
265 If the string is found m[start:end] are the line containing
266 that string. If start == end the string was not found and
266 that string. If start == end the string was not found and
267 they indicate the proper sorted insertion point.
267 they indicate the proper sorted insertion point.
268
268
269 m should be a buffer or a string
269 m should be a buffer or a string
270 s is a string'''
270 s is a string'''
271 def advance(i, c):
271 def advance(i, c):
272 while i < lenm and m[i] != c:
272 while i < lenm and m[i] != c:
273 i += 1
273 i += 1
274 return i
274 return i
275 if not s:
275 if not s:
276 return (lo, lo)
276 return (lo, lo)
277 lenm = len(m)
277 lenm = len(m)
278 if not hi:
278 if not hi:
279 hi = lenm
279 hi = lenm
280 while lo < hi:
280 while lo < hi:
281 mid = (lo + hi) // 2
281 mid = (lo + hi) // 2
282 start = mid
282 start = mid
283 while start > 0 and m[start - 1] != '\n':
283 while start > 0 and m[start - 1] != '\n':
284 start -= 1
284 start -= 1
285 end = advance(start, '\0')
285 end = advance(start, '\0')
286 if m[start:end] < s:
286 if m[start:end] < s:
287 # we know that after the null there are 40 bytes of sha1
287 # we know that after the null there are 40 bytes of sha1
288 # this translates to the bisect lo = mid + 1
288 # this translates to the bisect lo = mid + 1
289 lo = advance(end + 40, '\n') + 1
289 lo = advance(end + 40, '\n') + 1
290 else:
290 else:
291 # this translates to the bisect hi = mid
291 # this translates to the bisect hi = mid
292 hi = start
292 hi = start
293 end = advance(lo, '\0')
293 end = advance(lo, '\0')
294 found = m[lo:end]
294 found = m[lo:end]
295 if s == found:
295 if s == found:
296 # we know that after the null there are 40 bytes of sha1
296 # we know that after the null there are 40 bytes of sha1
297 end = advance(end + 40, '\n')
297 end = advance(end + 40, '\n')
298 return (lo, end + 1)
298 return (lo, end + 1)
299 else:
299 else:
300 return (lo, lo)
300 return (lo, lo)
301
301
302 def _checkforbidden(l):
302 def _checkforbidden(l):
303 """Check filenames for illegal characters."""
303 """Check filenames for illegal characters."""
304 for f in l:
304 for f in l:
305 if '\n' in f or '\r' in f:
305 if '\n' in f or '\r' in f:
306 raise error.RevlogError(
306 raise error.RevlogError(
307 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
307 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
308
308
309
309
310 # apply the changes collected during the bisect loop to our addlist
310 # apply the changes collected during the bisect loop to our addlist
311 # return a delta suitable for addrevision
311 # return a delta suitable for addrevision
312 def _addlistdelta(addlist, x):
312 def _addlistdelta(addlist, x):
313 # for large addlist arrays, building a new array is cheaper
313 # for large addlist arrays, building a new array is cheaper
314 # than repeatedly modifying the existing one
314 # than repeatedly modifying the existing one
315 currentposition = 0
315 currentposition = 0
316 newaddlist = array.array('c')
316 newaddlist = array.array('c')
317
317
318 for start, end, content in x:
318 for start, end, content in x:
319 newaddlist += addlist[currentposition:start]
319 newaddlist += addlist[currentposition:start]
320 if content:
320 if content:
321 newaddlist += array.array('c', content)
321 newaddlist += array.array('c', content)
322
322
323 currentposition = end
323 currentposition = end
324
324
325 newaddlist += addlist[currentposition:]
325 newaddlist += addlist[currentposition:]
326
326
327 deltatext = "".join(struct.pack(">lll", start, end, len(content))
327 deltatext = "".join(struct.pack(">lll", start, end, len(content))
328 + content for start, end, content in x)
328 + content for start, end, content in x)
329 return deltatext, newaddlist
329 return deltatext, newaddlist
330
330
331 def _splittopdir(f):
331 def _splittopdir(f):
332 if '/' in f:
332 if '/' in f:
333 dir, subpath = f.split('/', 1)
333 dir, subpath = f.split('/', 1)
334 return dir + '/', subpath
334 return dir + '/', subpath
335 else:
335 else:
336 return '', f
336 return '', f
337
337
338 class treemanifest(object):
338 class treemanifest(object):
339 def __init__(self, text=''):
339 def __init__(self, text=''):
340 self._dirs = {}
340 self._dirs = {}
341 # Using _lazymanifest here is a little slower than plain old dicts
341 # Using _lazymanifest here is a little slower than plain old dicts
342 self._files = {}
342 self._files = {}
343 self._flags = {}
343 self._flags = {}
344 lm = _lazymanifest(text)
344 lm = _lazymanifest(text)
345 for f, n, fl in lm.iterentries():
345 for f, n, fl in lm.iterentries():
346 self[f] = n
346 self[f] = n
347 if fl:
347 if fl:
348 self.setflag(f, fl)
348 self.setflag(f, fl)
349
349
350 def __len__(self):
350 def __len__(self):
351 size = len(self._files)
351 size = len(self._files)
352 for m in self._dirs.values():
352 for m in self._dirs.values():
353 size += m.__len__()
353 size += m.__len__()
354 return size
354 return size
355
355
356 def iteritems(self):
356 def iteritems(self):
357 for p, n in sorted(self._dirs.items() + self._files.items()):
357 for p, n in sorted(self._dirs.items() + self._files.items()):
358 if p in self._files:
358 if p in self._files:
359 yield p, n
359 yield p, n
360 else:
360 else:
361 for sf, sn in n.iteritems():
361 for sf, sn in n.iteritems():
362 yield p + sf, sn
362 yield p + sf, sn
363
363
364 def iterkeys(self):
364 def iterkeys(self):
365 for p in sorted(self._dirs.keys() + self._files.keys()):
365 for p in sorted(self._dirs.keys() + self._files.keys()):
366 if p in self._files:
366 if p in self._files:
367 yield p
367 yield p
368 else:
368 else:
369 for f in self._dirs[p].iterkeys():
369 for f in self._dirs[p].iterkeys():
370 yield p + f
370 yield p + f
371
371
372 def keys(self):
372 def keys(self):
373 return list(self.iterkeys())
373 return list(self.iterkeys())
374
374
375 def __iter__(self):
375 def __iter__(self):
376 return self.iterkeys()
376 return self.iterkeys()
377
377
378 def __contains__(self, f):
378 def __contains__(self, f):
379 if f is None:
379 if f is None:
380 return False
380 return False
381 dir, subpath = _splittopdir(f)
381 dir, subpath = _splittopdir(f)
382 if dir:
382 if dir:
383 if dir not in self._dirs:
383 if dir not in self._dirs:
384 return False
384 return False
385 return self._dirs[dir].__contains__(subpath)
385 return self._dirs[dir].__contains__(subpath)
386 else:
386 else:
387 return f in self._files
387 return f in self._files
388
388
389 def get(self, f, default=None):
389 def get(self, f, default=None):
390 dir, subpath = _splittopdir(f)
390 dir, subpath = _splittopdir(f)
391 if dir:
391 if dir:
392 if dir not in self._dirs:
392 if dir not in self._dirs:
393 return default
393 return default
394 return self._dirs[dir].get(subpath, default)
394 return self._dirs[dir].get(subpath, default)
395 else:
395 else:
396 return self._files.get(f, default)
396 return self._files.get(f, default)
397
397
398 def __getitem__(self, f):
398 def __getitem__(self, f):
399 dir, subpath = _splittopdir(f)
399 dir, subpath = _splittopdir(f)
400 if dir:
400 if dir:
401 return self._dirs[dir].__getitem__(subpath)
401 return self._dirs[dir].__getitem__(subpath)
402 else:
402 else:
403 return self._files[f]
403 return self._files[f]
404
404
405 def flags(self, f):
405 def flags(self, f):
406 dir, subpath = _splittopdir(f)
406 dir, subpath = _splittopdir(f)
407 if dir:
407 if dir:
408 if dir not in self._dirs:
408 if dir not in self._dirs:
409 return ''
409 return ''
410 return self._dirs[dir].flags(subpath)
410 return self._dirs[dir].flags(subpath)
411 else:
411 else:
412 if f in self._dirs:
412 if f in self._dirs:
413 return ''
413 return ''
414 return self._flags.get(f, '')
414 return self._flags.get(f, '')
415
415
416 def find(self, f):
416 def find(self, f):
417 dir, subpath = _splittopdir(f)
417 dir, subpath = _splittopdir(f)
418 if dir:
418 if dir:
419 return self._dirs[dir].find(subpath)
419 return self._dirs[dir].find(subpath)
420 else:
420 else:
421 return self._files[f], self._flags.get(f, '')
421 return self._files[f], self._flags.get(f, '')
422
422
423 def __delitem__(self, f):
423 def __delitem__(self, f):
424 dir, subpath = _splittopdir(f)
424 dir, subpath = _splittopdir(f)
425 if dir:
425 if dir:
426 self._dirs[dir].__delitem__(subpath)
426 self._dirs[dir].__delitem__(subpath)
427 # If the directory is now empty, remove it
427 # If the directory is now empty, remove it
428 if not self._dirs[dir]._dirs and not self._dirs[dir]._files:
428 if not self._dirs[dir]._dirs and not self._dirs[dir]._files:
429 del self._dirs[dir]
429 del self._dirs[dir]
430 else:
430 else:
431 del self._files[f]
431 del self._files[f]
432 if f in self._flags:
432 if f in self._flags:
433 del self._flags[f]
433 del self._flags[f]
434
434
435 def __setitem__(self, f, n):
435 def __setitem__(self, f, n):
436 assert n is not None
436 assert n is not None
437 dir, subpath = _splittopdir(f)
437 dir, subpath = _splittopdir(f)
438 if dir:
438 if dir:
439 if dir not in self._dirs:
439 if dir not in self._dirs:
440 self._dirs[dir] = treemanifest()
440 self._dirs[dir] = treemanifest()
441 self._dirs[dir].__setitem__(subpath, n)
441 self._dirs[dir].__setitem__(subpath, n)
442 else:
442 else:
443 self._files[f] = n
443 self._files[f] = n
444
444
445 def setflag(self, f, flags):
445 def setflag(self, f, flags):
446 """Set the flags (symlink, executable) for path f."""
446 """Set the flags (symlink, executable) for path f."""
447 dir, subpath = _splittopdir(f)
447 dir, subpath = _splittopdir(f)
448 if dir:
448 if dir:
449 if dir not in self._dirs:
449 if dir not in self._dirs:
450 self._dirs[dir] = treemanifest()
450 self._dirs[dir] = treemanifest()
451 self._dirs[dir].setflag(subpath, flags)
451 self._dirs[dir].setflag(subpath, flags)
452 else:
452 else:
453 self._flags[f] = flags
453 self._flags[f] = flags
454
454
455 def copy(self):
455 def copy(self):
456 copy = treemanifest()
456 copy = treemanifest()
457 for d in self._dirs:
457 for d in self._dirs:
458 copy._dirs[d] = self._dirs[d].copy()
458 copy._dirs[d] = self._dirs[d].copy()
459 copy._files = dict.copy(self._files)
459 copy._files = dict.copy(self._files)
460 copy._flags = dict.copy(self._flags)
460 copy._flags = dict.copy(self._flags)
461 return copy
461 return copy
462
462
463 def intersectfiles(self, files):
463 def intersectfiles(self, files):
464 '''make a new treemanifest with the intersection of self with files
464 '''make a new treemanifest with the intersection of self with files
465
465
466 The algorithm assumes that files is much smaller than self.'''
466 The algorithm assumes that files is much smaller than self.'''
467 ret = treemanifest()
467 ret = treemanifest()
468 for fn in files:
468 for fn in files:
469 if fn in self:
469 if fn in self:
470 ret[fn] = self[fn]
470 ret[fn] = self[fn]
471 flags = self.flags(fn)
471 flags = self.flags(fn)
472 if flags:
472 if flags:
473 ret.setflag(fn, flags)
473 ret.setflag(fn, flags)
474 return ret
474 return ret
475
475
476 def filesnotin(self, m2):
476 def filesnotin(self, m2):
477 '''Set of files in this manifest that are not in the other'''
477 '''Set of files in this manifest that are not in the other'''
478 files = set(self.iterkeys())
478 files = set(self.iterkeys())
479 files.difference_update(m2.iterkeys())
479 files.difference_update(m2.iterkeys())
480 return files
480 return files
481
481
482 @propertycache
482 @propertycache
483 def _alldirs(self):
483 def _alldirs(self):
484 return scmutil.dirs(self)
484 return scmutil.dirs(self)
485
485
486 def dirs(self):
486 def dirs(self):
487 return self._alldirs
487 return self._alldirs
488
488
489 def hasdir(self, dir):
489 def hasdir(self, dir):
490 return dir in self._alldirs
490 return dir in self._alldirs
491
491
492 def matches(self, match):
492 def matches(self, match):
493 '''generate a new manifest filtered by the match argument'''
493 '''generate a new manifest filtered by the match argument'''
494 if match.always():
494 if match.always():
495 return self.copy()
495 return self.copy()
496
496
497 files = match.files()
497 files = match.files()
498 if (match.matchfn == match.exact or
498 if (match.matchfn == match.exact or
499 (not match.anypats() and util.all(fn in self for fn in files))):
499 (not match.anypats() and util.all(fn in self for fn in files))):
500 return self.intersectfiles(files)
500 return self.intersectfiles(files)
501
501
502 m = self.copy()
502 m = self.copy()
503 for fn in m.keys():
503 for fn in m.keys():
504 if not match(fn):
504 if not match(fn):
505 del m[fn]
505 del m[fn]
506 return m
506 return m
507
507
508 def diff(self, m2, clean=False):
508 def diff(self, m2, clean=False):
509 '''Finds changes between the current manifest and m2.
509 '''Finds changes between the current manifest and m2.
510
510
511 Args:
511 Args:
512 m2: the manifest to which this manifest should be compared.
512 m2: the manifest to which this manifest should be compared.
513 clean: if true, include files unchanged between these manifests
513 clean: if true, include files unchanged between these manifests
514 with a None value in the returned dictionary.
514 with a None value in the returned dictionary.
515
515
516 The result is returned as a dict with filename as key and
516 The result is returned as a dict with filename as key and
517 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
517 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
518 nodeid in the current/other manifest and fl1/fl2 is the flag
518 nodeid in the current/other manifest and fl1/fl2 is the flag
519 in the current/other manifest. Where the file does not exist,
519 in the current/other manifest. Where the file does not exist,
520 the nodeid will be None and the flags will be the empty
520 the nodeid will be None and the flags will be the empty
521 string.
521 string.
522 '''
522 '''
523 diff = {}
523 diff = {}
524
524
525 for fn, n1 in self.iteritems():
525 for fn, n1 in self.iteritems():
526 fl1 = self.flags(fn)
526 fl1 = self.flags(fn)
527 n2 = m2.get(fn, None)
527 n2 = m2.get(fn, None)
528 fl2 = m2.flags(fn)
528 fl2 = m2.flags(fn)
529 if n2 is None:
529 if n2 is None:
530 fl2 = ''
530 fl2 = ''
531 if n1 != n2 or fl1 != fl2:
531 if n1 != n2 or fl1 != fl2:
532 diff[fn] = ((n1, fl1), (n2, fl2))
532 diff[fn] = ((n1, fl1), (n2, fl2))
533 elif clean:
533 elif clean:
534 diff[fn] = None
534 diff[fn] = None
535
535
536 for fn, n2 in m2.iteritems():
536 for fn, n2 in m2.iteritems():
537 if fn not in self:
537 if fn not in self:
538 fl2 = m2.flags(fn)
538 fl2 = m2.flags(fn)
539 diff[fn] = ((None, ''), (n2, fl2))
539 diff[fn] = ((None, ''), (n2, fl2))
540
540
541 return diff
541 return diff
542
542
543 def text(self):
543 def text(self):
544 """Get the full data of this manifest as a bytestring."""
544 """Get the full data of this manifest as a bytestring."""
545 fl = self.keys()
545 fl = self.keys()
546 _checkforbidden(fl)
546 _checkforbidden(fl)
547
547
548 hex, flags = revlog.hex, self.flags
548 hex, flags = revlog.hex, self.flags
549 # if this is changed to support newlines in filenames,
549 # if this is changed to support newlines in filenames,
550 # be sure to check the templates/ dir again (especially *-raw.tmpl)
550 # be sure to check the templates/ dir again (especially *-raw.tmpl)
551 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
551 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
552
552
553 class manifest(revlog.revlog):
553 class manifest(revlog.revlog):
554 def __init__(self, opener):
554 def __init__(self, opener):
555 # During normal operations, we expect to deal with not more than four
555 # During normal operations, we expect to deal with not more than four
556 # revs at a time (such as during commit --amend). When rebasing large
556 # revs at a time (such as during commit --amend). When rebasing large
557 # stacks of commits, the number can go up, hence the config knob below.
557 # stacks of commits, the number can go up, hence the config knob below.
558 cachesize = 4
558 cachesize = 4
559 usetreemanifest = False
559 opts = getattr(opener, 'options', None)
560 opts = getattr(opener, 'options', None)
560 if opts is not None:
561 if opts is not None:
561 cachesize = opts.get('manifestcachesize', cachesize)
562 cachesize = opts.get('manifestcachesize', cachesize)
563 usetreemanifest = opts.get('usetreemanifest', usetreemanifest)
562 self._mancache = util.lrucachedict(cachesize)
564 self._mancache = util.lrucachedict(cachesize)
563 revlog.revlog.__init__(self, opener, "00manifest.i")
565 revlog.revlog.__init__(self, opener, "00manifest.i")
566 self._usetreemanifest = usetreemanifest
567
568 def _newmanifest(self, data=''):
569 if self._usetreemanifest:
570 return treemanifest(data)
571 return manifestdict(data)
564
572
565 def readdelta(self, node):
573 def readdelta(self, node):
566 r = self.rev(node)
574 r = self.rev(node)
567 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
575 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
568 return manifestdict(d)
576 return self._newmanifest(d)
569
577
570 def readfast(self, node):
578 def readfast(self, node):
571 '''use the faster of readdelta or read'''
579 '''use the faster of readdelta or read'''
572 r = self.rev(node)
580 r = self.rev(node)
573 deltaparent = self.deltaparent(r)
581 deltaparent = self.deltaparent(r)
574 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
582 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
575 return self.readdelta(node)
583 return self.readdelta(node)
576 return self.read(node)
584 return self.read(node)
577
585
578 def read(self, node):
586 def read(self, node):
579 if node == revlog.nullid:
587 if node == revlog.nullid:
580 return manifestdict() # don't upset local cache
588 return self._newmanifest() # don't upset local cache
581 if node in self._mancache:
589 if node in self._mancache:
582 return self._mancache[node][0]
590 return self._mancache[node][0]
583 text = self.revision(node)
591 text = self.revision(node)
584 arraytext = array.array('c', text)
592 arraytext = array.array('c', text)
585 m = manifestdict(text)
593 m = self._newmanifest(text)
586 self._mancache[node] = (m, arraytext)
594 self._mancache[node] = (m, arraytext)
587 return m
595 return m
588
596
589 def find(self, node, f):
597 def find(self, node, f):
590 '''look up entry for a single file efficiently.
598 '''look up entry for a single file efficiently.
591 return (node, flags) pair if found, (None, None) if not.'''
599 return (node, flags) pair if found, (None, None) if not.'''
592 m = self.read(node)
600 m = self.read(node)
593 try:
601 try:
594 return m.find(f)
602 return m.find(f)
595 except KeyError:
603 except KeyError:
596 return None, None
604 return None, None
597
605
598 def add(self, m, transaction, link, p1, p2, added, removed):
606 def add(self, m, transaction, link, p1, p2, added, removed):
599 if p1 in self._mancache:
607 if p1 in self._mancache and not self._usetreemanifest:
600 # If our first parent is in the manifest cache, we can
608 # If our first parent is in the manifest cache, we can
601 # compute a delta here using properties we know about the
609 # compute a delta here using properties we know about the
602 # manifest up-front, which may save time later for the
610 # manifest up-front, which may save time later for the
603 # revlog layer.
611 # revlog layer.
604
612
605 _checkforbidden(added)
613 _checkforbidden(added)
606 # combine the changed lists into one list for sorting
614 # combine the changed lists into one list for sorting
607 work = [(x, False) for x in added]
615 work = [(x, False) for x in added]
608 work.extend((x, True) for x in removed)
616 work.extend((x, True) for x in removed)
609 # this could use heapq.merge() (from Python 2.6+) or equivalent
617 # this could use heapq.merge() (from Python 2.6+) or equivalent
610 # since the lists are already sorted
618 # since the lists are already sorted
611 work.sort()
619 work.sort()
612
620
613 arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work)
621 arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work)
614 cachedelta = self.rev(p1), deltatext
622 cachedelta = self.rev(p1), deltatext
615 text = util.buffer(arraytext)
623 text = util.buffer(arraytext)
616 else:
624 else:
617 # The first parent manifest isn't already loaded, so we'll
625 # The first parent manifest isn't already loaded, so we'll
618 # just encode a fulltext of the manifest and pass that
626 # just encode a fulltext of the manifest and pass that
619 # through to the revlog layer, and let it handle the delta
627 # through to the revlog layer, and let it handle the delta
620 # process.
628 # process.
621 text = m.text()
629 text = m.text()
622 arraytext = array.array('c', text)
630 arraytext = array.array('c', text)
623 cachedelta = None
631 cachedelta = None
624
632
625 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
633 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
626 self._mancache[n] = (m, arraytext)
634 self._mancache[n] = (m, arraytext)
627
635
628 return n
636 return n
General Comments 0
You need to be logged in to leave comments. Login now