##// END OF EJS Templates
localrepo.clone: add a way to override server preferuncompressed...
Siddharth Agarwal -
r23546:deabbe7e default
parent child Browse files
Show More
@@ -1,1823 +1,1823 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10', **kwargs):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps, **kwargs)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2Y' in bundlecaps:
112 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except error.PushRaced, exc:
136 except error.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), str(exc))
137 raise error.ResponseError(_('push failed:'), str(exc))
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 # a list of (ui, featureset) functions.
183 # a list of (ui, featureset) functions.
184 # only functions defined in module of enabled extensions are invoked
184 # only functions defined in module of enabled extensions are invoked
185 featuresetupfuncs = set()
185 featuresetupfuncs = set()
186
186
187 def _baserequirements(self, create):
187 def _baserequirements(self, create):
188 return self.requirements[:]
188 return self.requirements[:]
189
189
190 def __init__(self, baseui, path=None, create=False):
190 def __init__(self, baseui, path=None, create=False):
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wopener = self.wvfs
192 self.wopener = self.wvfs
193 self.root = self.wvfs.base
193 self.root = self.wvfs.base
194 self.path = self.wvfs.join(".hg")
194 self.path = self.wvfs.join(".hg")
195 self.origroot = path
195 self.origroot = path
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.vfs = scmutil.vfs(self.path)
197 self.vfs = scmutil.vfs(self.path)
198 self.opener = self.vfs
198 self.opener = self.vfs
199 self.baseui = baseui
199 self.baseui = baseui
200 self.ui = baseui.copy()
200 self.ui = baseui.copy()
201 self.ui.copy = baseui.copy # prevent copying repo configuration
201 self.ui.copy = baseui.copy # prevent copying repo configuration
202 # A list of callback to shape the phase if no data were found.
202 # A list of callback to shape the phase if no data were found.
203 # Callback are in the form: func(repo, roots) --> processed root.
203 # Callback are in the form: func(repo, roots) --> processed root.
204 # This list it to be filled by extension during repo setup
204 # This list it to be filled by extension during repo setup
205 self._phasedefaults = []
205 self._phasedefaults = []
206 try:
206 try:
207 self.ui.readconfig(self.join("hgrc"), self.root)
207 self.ui.readconfig(self.join("hgrc"), self.root)
208 extensions.loadall(self.ui)
208 extensions.loadall(self.ui)
209 except IOError:
209 except IOError:
210 pass
210 pass
211
211
212 if self.featuresetupfuncs:
212 if self.featuresetupfuncs:
213 self.supported = set(self._basesupported) # use private copy
213 self.supported = set(self._basesupported) # use private copy
214 extmods = set(m.__name__ for n, m
214 extmods = set(m.__name__ for n, m
215 in extensions.extensions(self.ui))
215 in extensions.extensions(self.ui))
216 for setupfunc in self.featuresetupfuncs:
216 for setupfunc in self.featuresetupfuncs:
217 if setupfunc.__module__ in extmods:
217 if setupfunc.__module__ in extmods:
218 setupfunc(self.ui, self.supported)
218 setupfunc(self.ui, self.supported)
219 else:
219 else:
220 self.supported = self._basesupported
220 self.supported = self._basesupported
221
221
222 if not self.vfs.isdir():
222 if not self.vfs.isdir():
223 if create:
223 if create:
224 if not self.wvfs.exists():
224 if not self.wvfs.exists():
225 self.wvfs.makedirs()
225 self.wvfs.makedirs()
226 self.vfs.makedir(notindexed=True)
226 self.vfs.makedir(notindexed=True)
227 requirements = self._baserequirements(create)
227 requirements = self._baserequirements(create)
228 if self.ui.configbool('format', 'usestore', True):
228 if self.ui.configbool('format', 'usestore', True):
229 self.vfs.mkdir("store")
229 self.vfs.mkdir("store")
230 requirements.append("store")
230 requirements.append("store")
231 if self.ui.configbool('format', 'usefncache', True):
231 if self.ui.configbool('format', 'usefncache', True):
232 requirements.append("fncache")
232 requirements.append("fncache")
233 if self.ui.configbool('format', 'dotencode', True):
233 if self.ui.configbool('format', 'dotencode', True):
234 requirements.append('dotencode')
234 requirements.append('dotencode')
235 # create an invalid changelog
235 # create an invalid changelog
236 self.vfs.append(
236 self.vfs.append(
237 "00changelog.i",
237 "00changelog.i",
238 '\0\0\0\2' # represents revlogv2
238 '\0\0\0\2' # represents revlogv2
239 ' dummy changelog to prevent using the old repo layout'
239 ' dummy changelog to prevent using the old repo layout'
240 )
240 )
241 if self.ui.configbool('format', 'generaldelta', False):
241 if self.ui.configbool('format', 'generaldelta', False):
242 requirements.append("generaldelta")
242 requirements.append("generaldelta")
243 requirements = set(requirements)
243 requirements = set(requirements)
244 else:
244 else:
245 raise error.RepoError(_("repository %s not found") % path)
245 raise error.RepoError(_("repository %s not found") % path)
246 elif create:
246 elif create:
247 raise error.RepoError(_("repository %s already exists") % path)
247 raise error.RepoError(_("repository %s already exists") % path)
248 else:
248 else:
249 try:
249 try:
250 requirements = scmutil.readrequires(self.vfs, self.supported)
250 requirements = scmutil.readrequires(self.vfs, self.supported)
251 except IOError, inst:
251 except IOError, inst:
252 if inst.errno != errno.ENOENT:
252 if inst.errno != errno.ENOENT:
253 raise
253 raise
254 requirements = set()
254 requirements = set()
255
255
256 self.sharedpath = self.path
256 self.sharedpath = self.path
257 try:
257 try:
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 realpath=True)
259 realpath=True)
260 s = vfs.base
260 s = vfs.base
261 if not vfs.exists():
261 if not vfs.exists():
262 raise error.RepoError(
262 raise error.RepoError(
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 self.sharedpath = s
264 self.sharedpath = s
265 except IOError, inst:
265 except IOError, inst:
266 if inst.errno != errno.ENOENT:
266 if inst.errno != errno.ENOENT:
267 raise
267 raise
268
268
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 self.spath = self.store.path
270 self.spath = self.store.path
271 self.svfs = self.store.vfs
271 self.svfs = self.store.vfs
272 self.sopener = self.svfs
272 self.sopener = self.svfs
273 self.sjoin = self.store.join
273 self.sjoin = self.store.join
274 self.vfs.createmode = self.store.createmode
274 self.vfs.createmode = self.store.createmode
275 self._applyrequirements(requirements)
275 self._applyrequirements(requirements)
276 if create:
276 if create:
277 self._writerequirements()
277 self._writerequirements()
278
278
279
279
280 self._branchcaches = {}
280 self._branchcaches = {}
281 self.filterpats = {}
281 self.filterpats = {}
282 self._datafilters = {}
282 self._datafilters = {}
283 self._transref = self._lockref = self._wlockref = None
283 self._transref = self._lockref = self._wlockref = None
284
284
285 # A cache for various files under .hg/ that tracks file changes,
285 # A cache for various files under .hg/ that tracks file changes,
286 # (used by the filecache decorator)
286 # (used by the filecache decorator)
287 #
287 #
288 # Maps a property name to its util.filecacheentry
288 # Maps a property name to its util.filecacheentry
289 self._filecache = {}
289 self._filecache = {}
290
290
291 # hold sets of revision to be filtered
291 # hold sets of revision to be filtered
292 # should be cleared when something might have changed the filter value:
292 # should be cleared when something might have changed the filter value:
293 # - new changesets,
293 # - new changesets,
294 # - phase change,
294 # - phase change,
295 # - new obsolescence marker,
295 # - new obsolescence marker,
296 # - working directory parent change,
296 # - working directory parent change,
297 # - bookmark changes
297 # - bookmark changes
298 self.filteredrevcache = {}
298 self.filteredrevcache = {}
299
299
300 def close(self):
300 def close(self):
301 pass
301 pass
302
302
303 def _restrictcapabilities(self, caps):
303 def _restrictcapabilities(self, caps):
304 # bundle2 is not ready for prime time, drop it unless explicitly
304 # bundle2 is not ready for prime time, drop it unless explicitly
305 # required by the tests (or some brave tester)
305 # required by the tests (or some brave tester)
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
307 caps = set(caps)
307 caps = set(caps)
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
310 return caps
310 return caps
311
311
312 def _applyrequirements(self, requirements):
312 def _applyrequirements(self, requirements):
313 self.requirements = requirements
313 self.requirements = requirements
314 self.sopener.options = dict((r, 1) for r in requirements
314 self.sopener.options = dict((r, 1) for r in requirements
315 if r in self.openerreqs)
315 if r in self.openerreqs)
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 if chunkcachesize is not None:
317 if chunkcachesize is not None:
318 self.sopener.options['chunkcachesize'] = chunkcachesize
318 self.sopener.options['chunkcachesize'] = chunkcachesize
319 maxchainlen = self.ui.configint('format', 'maxchainlen')
319 maxchainlen = self.ui.configint('format', 'maxchainlen')
320 if maxchainlen is not None:
320 if maxchainlen is not None:
321 self.sopener.options['maxchainlen'] = maxchainlen
321 self.sopener.options['maxchainlen'] = maxchainlen
322
322
323 def _writerequirements(self):
323 def _writerequirements(self):
324 reqfile = self.opener("requires", "w")
324 reqfile = self.opener("requires", "w")
325 for r in sorted(self.requirements):
325 for r in sorted(self.requirements):
326 reqfile.write("%s\n" % r)
326 reqfile.write("%s\n" % r)
327 reqfile.close()
327 reqfile.close()
328
328
329 def _checknested(self, path):
329 def _checknested(self, path):
330 """Determine if path is a legal nested repository."""
330 """Determine if path is a legal nested repository."""
331 if not path.startswith(self.root):
331 if not path.startswith(self.root):
332 return False
332 return False
333 subpath = path[len(self.root) + 1:]
333 subpath = path[len(self.root) + 1:]
334 normsubpath = util.pconvert(subpath)
334 normsubpath = util.pconvert(subpath)
335
335
336 # XXX: Checking against the current working copy is wrong in
336 # XXX: Checking against the current working copy is wrong in
337 # the sense that it can reject things like
337 # the sense that it can reject things like
338 #
338 #
339 # $ hg cat -r 10 sub/x.txt
339 # $ hg cat -r 10 sub/x.txt
340 #
340 #
341 # if sub/ is no longer a subrepository in the working copy
341 # if sub/ is no longer a subrepository in the working copy
342 # parent revision.
342 # parent revision.
343 #
343 #
344 # However, it can of course also allow things that would have
344 # However, it can of course also allow things that would have
345 # been rejected before, such as the above cat command if sub/
345 # been rejected before, such as the above cat command if sub/
346 # is a subrepository now, but was a normal directory before.
346 # is a subrepository now, but was a normal directory before.
347 # The old path auditor would have rejected by mistake since it
347 # The old path auditor would have rejected by mistake since it
348 # panics when it sees sub/.hg/.
348 # panics when it sees sub/.hg/.
349 #
349 #
350 # All in all, checking against the working copy seems sensible
350 # All in all, checking against the working copy seems sensible
351 # since we want to prevent access to nested repositories on
351 # since we want to prevent access to nested repositories on
352 # the filesystem *now*.
352 # the filesystem *now*.
353 ctx = self[None]
353 ctx = self[None]
354 parts = util.splitpath(subpath)
354 parts = util.splitpath(subpath)
355 while parts:
355 while parts:
356 prefix = '/'.join(parts)
356 prefix = '/'.join(parts)
357 if prefix in ctx.substate:
357 if prefix in ctx.substate:
358 if prefix == normsubpath:
358 if prefix == normsubpath:
359 return True
359 return True
360 else:
360 else:
361 sub = ctx.sub(prefix)
361 sub = ctx.sub(prefix)
362 return sub.checknested(subpath[len(prefix) + 1:])
362 return sub.checknested(subpath[len(prefix) + 1:])
363 else:
363 else:
364 parts.pop()
364 parts.pop()
365 return False
365 return False
366
366
367 def peer(self):
367 def peer(self):
368 return localpeer(self) # not cached to avoid reference cycle
368 return localpeer(self) # not cached to avoid reference cycle
369
369
370 def unfiltered(self):
370 def unfiltered(self):
371 """Return unfiltered version of the repository
371 """Return unfiltered version of the repository
372
372
373 Intended to be overwritten by filtered repo."""
373 Intended to be overwritten by filtered repo."""
374 return self
374 return self
375
375
376 def filtered(self, name):
376 def filtered(self, name):
377 """Return a filtered version of a repository"""
377 """Return a filtered version of a repository"""
378 # build a new class with the mixin and the current class
378 # build a new class with the mixin and the current class
379 # (possibly subclass of the repo)
379 # (possibly subclass of the repo)
380 class proxycls(repoview.repoview, self.unfiltered().__class__):
380 class proxycls(repoview.repoview, self.unfiltered().__class__):
381 pass
381 pass
382 return proxycls(self, name)
382 return proxycls(self, name)
383
383
384 @repofilecache('bookmarks')
384 @repofilecache('bookmarks')
385 def _bookmarks(self):
385 def _bookmarks(self):
386 return bookmarks.bmstore(self)
386 return bookmarks.bmstore(self)
387
387
388 @repofilecache('bookmarks.current')
388 @repofilecache('bookmarks.current')
389 def _bookmarkcurrent(self):
389 def _bookmarkcurrent(self):
390 return bookmarks.readcurrent(self)
390 return bookmarks.readcurrent(self)
391
391
392 def bookmarkheads(self, bookmark):
392 def bookmarkheads(self, bookmark):
393 name = bookmark.split('@', 1)[0]
393 name = bookmark.split('@', 1)[0]
394 heads = []
394 heads = []
395 for mark, n in self._bookmarks.iteritems():
395 for mark, n in self._bookmarks.iteritems():
396 if mark.split('@', 1)[0] == name:
396 if mark.split('@', 1)[0] == name:
397 heads.append(n)
397 heads.append(n)
398 return heads
398 return heads
399
399
400 @storecache('phaseroots')
400 @storecache('phaseroots')
401 def _phasecache(self):
401 def _phasecache(self):
402 return phases.phasecache(self, self._phasedefaults)
402 return phases.phasecache(self, self._phasedefaults)
403
403
404 @storecache('obsstore')
404 @storecache('obsstore')
405 def obsstore(self):
405 def obsstore(self):
406 # read default format for new obsstore.
406 # read default format for new obsstore.
407 defaultformat = self.ui.configint('format', 'obsstore-version', None)
407 defaultformat = self.ui.configint('format', 'obsstore-version', None)
408 # rely on obsstore class default when possible.
408 # rely on obsstore class default when possible.
409 kwargs = {}
409 kwargs = {}
410 if defaultformat is not None:
410 if defaultformat is not None:
411 kwargs['defaultformat'] = defaultformat
411 kwargs['defaultformat'] = defaultformat
412 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
412 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
413 store = obsolete.obsstore(self.sopener, readonly=readonly,
413 store = obsolete.obsstore(self.sopener, readonly=readonly,
414 **kwargs)
414 **kwargs)
415 if store and readonly:
415 if store and readonly:
416 # message is rare enough to not be translated
416 # message is rare enough to not be translated
417 msg = 'obsolete feature not enabled but %i markers found!\n'
417 msg = 'obsolete feature not enabled but %i markers found!\n'
418 self.ui.warn(msg % len(list(store)))
418 self.ui.warn(msg % len(list(store)))
419 return store
419 return store
420
420
421 @storecache('00changelog.i')
421 @storecache('00changelog.i')
422 def changelog(self):
422 def changelog(self):
423 c = changelog.changelog(self.sopener)
423 c = changelog.changelog(self.sopener)
424 if 'HG_PENDING' in os.environ:
424 if 'HG_PENDING' in os.environ:
425 p = os.environ['HG_PENDING']
425 p = os.environ['HG_PENDING']
426 if p.startswith(self.root):
426 if p.startswith(self.root):
427 c.readpending('00changelog.i.a')
427 c.readpending('00changelog.i.a')
428 return c
428 return c
429
429
430 @storecache('00manifest.i')
430 @storecache('00manifest.i')
431 def manifest(self):
431 def manifest(self):
432 return manifest.manifest(self.sopener)
432 return manifest.manifest(self.sopener)
433
433
434 @repofilecache('dirstate')
434 @repofilecache('dirstate')
435 def dirstate(self):
435 def dirstate(self):
436 warned = [0]
436 warned = [0]
437 def validate(node):
437 def validate(node):
438 try:
438 try:
439 self.changelog.rev(node)
439 self.changelog.rev(node)
440 return node
440 return node
441 except error.LookupError:
441 except error.LookupError:
442 if not warned[0]:
442 if not warned[0]:
443 warned[0] = True
443 warned[0] = True
444 self.ui.warn(_("warning: ignoring unknown"
444 self.ui.warn(_("warning: ignoring unknown"
445 " working parent %s!\n") % short(node))
445 " working parent %s!\n") % short(node))
446 return nullid
446 return nullid
447
447
448 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
448 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
449
449
450 def __getitem__(self, changeid):
450 def __getitem__(self, changeid):
451 if changeid is None:
451 if changeid is None:
452 return context.workingctx(self)
452 return context.workingctx(self)
453 return context.changectx(self, changeid)
453 return context.changectx(self, changeid)
454
454
455 def __contains__(self, changeid):
455 def __contains__(self, changeid):
456 try:
456 try:
457 return bool(self.lookup(changeid))
457 return bool(self.lookup(changeid))
458 except error.RepoLookupError:
458 except error.RepoLookupError:
459 return False
459 return False
460
460
461 def __nonzero__(self):
461 def __nonzero__(self):
462 return True
462 return True
463
463
464 def __len__(self):
464 def __len__(self):
465 return len(self.changelog)
465 return len(self.changelog)
466
466
467 def __iter__(self):
467 def __iter__(self):
468 return iter(self.changelog)
468 return iter(self.changelog)
469
469
470 def revs(self, expr, *args):
470 def revs(self, expr, *args):
471 '''Return a list of revisions matching the given revset'''
471 '''Return a list of revisions matching the given revset'''
472 expr = revset.formatspec(expr, *args)
472 expr = revset.formatspec(expr, *args)
473 m = revset.match(None, expr)
473 m = revset.match(None, expr)
474 return m(self, revset.spanset(self))
474 return m(self, revset.spanset(self))
475
475
476 def set(self, expr, *args):
476 def set(self, expr, *args):
477 '''
477 '''
478 Yield a context for each matching revision, after doing arg
478 Yield a context for each matching revision, after doing arg
479 replacement via revset.formatspec
479 replacement via revset.formatspec
480 '''
480 '''
481 for r in self.revs(expr, *args):
481 for r in self.revs(expr, *args):
482 yield self[r]
482 yield self[r]
483
483
484 def url(self):
484 def url(self):
485 return 'file:' + self.root
485 return 'file:' + self.root
486
486
487 def hook(self, name, throw=False, **args):
487 def hook(self, name, throw=False, **args):
488 """Call a hook, passing this repo instance.
488 """Call a hook, passing this repo instance.
489
489
490 This a convenience method to aid invoking hooks. Extensions likely
490 This a convenience method to aid invoking hooks. Extensions likely
491 won't call this unless they have registered a custom hook or are
491 won't call this unless they have registered a custom hook or are
492 replacing code that is expected to call a hook.
492 replacing code that is expected to call a hook.
493 """
493 """
494 return hook.hook(self.ui, self, name, throw, **args)
494 return hook.hook(self.ui, self, name, throw, **args)
495
495
496 @unfilteredmethod
496 @unfilteredmethod
497 def _tag(self, names, node, message, local, user, date, extra={},
497 def _tag(self, names, node, message, local, user, date, extra={},
498 editor=False):
498 editor=False):
499 if isinstance(names, str):
499 if isinstance(names, str):
500 names = (names,)
500 names = (names,)
501
501
502 branches = self.branchmap()
502 branches = self.branchmap()
503 for name in names:
503 for name in names:
504 self.hook('pretag', throw=True, node=hex(node), tag=name,
504 self.hook('pretag', throw=True, node=hex(node), tag=name,
505 local=local)
505 local=local)
506 if name in branches:
506 if name in branches:
507 self.ui.warn(_("warning: tag %s conflicts with existing"
507 self.ui.warn(_("warning: tag %s conflicts with existing"
508 " branch name\n") % name)
508 " branch name\n") % name)
509
509
510 def writetags(fp, names, munge, prevtags):
510 def writetags(fp, names, munge, prevtags):
511 fp.seek(0, 2)
511 fp.seek(0, 2)
512 if prevtags and prevtags[-1] != '\n':
512 if prevtags and prevtags[-1] != '\n':
513 fp.write('\n')
513 fp.write('\n')
514 for name in names:
514 for name in names:
515 m = munge and munge(name) or name
515 m = munge and munge(name) or name
516 if (self._tagscache.tagtypes and
516 if (self._tagscache.tagtypes and
517 name in self._tagscache.tagtypes):
517 name in self._tagscache.tagtypes):
518 old = self.tags().get(name, nullid)
518 old = self.tags().get(name, nullid)
519 fp.write('%s %s\n' % (hex(old), m))
519 fp.write('%s %s\n' % (hex(old), m))
520 fp.write('%s %s\n' % (hex(node), m))
520 fp.write('%s %s\n' % (hex(node), m))
521 fp.close()
521 fp.close()
522
522
523 prevtags = ''
523 prevtags = ''
524 if local:
524 if local:
525 try:
525 try:
526 fp = self.opener('localtags', 'r+')
526 fp = self.opener('localtags', 'r+')
527 except IOError:
527 except IOError:
528 fp = self.opener('localtags', 'a')
528 fp = self.opener('localtags', 'a')
529 else:
529 else:
530 prevtags = fp.read()
530 prevtags = fp.read()
531
531
532 # local tags are stored in the current charset
532 # local tags are stored in the current charset
533 writetags(fp, names, None, prevtags)
533 writetags(fp, names, None, prevtags)
534 for name in names:
534 for name in names:
535 self.hook('tag', node=hex(node), tag=name, local=local)
535 self.hook('tag', node=hex(node), tag=name, local=local)
536 return
536 return
537
537
538 try:
538 try:
539 fp = self.wfile('.hgtags', 'rb+')
539 fp = self.wfile('.hgtags', 'rb+')
540 except IOError, e:
540 except IOError, e:
541 if e.errno != errno.ENOENT:
541 if e.errno != errno.ENOENT:
542 raise
542 raise
543 fp = self.wfile('.hgtags', 'ab')
543 fp = self.wfile('.hgtags', 'ab')
544 else:
544 else:
545 prevtags = fp.read()
545 prevtags = fp.read()
546
546
547 # committed tags are stored in UTF-8
547 # committed tags are stored in UTF-8
548 writetags(fp, names, encoding.fromlocal, prevtags)
548 writetags(fp, names, encoding.fromlocal, prevtags)
549
549
550 fp.close()
550 fp.close()
551
551
552 self.invalidatecaches()
552 self.invalidatecaches()
553
553
554 if '.hgtags' not in self.dirstate:
554 if '.hgtags' not in self.dirstate:
555 self[None].add(['.hgtags'])
555 self[None].add(['.hgtags'])
556
556
557 m = matchmod.exact(self.root, '', ['.hgtags'])
557 m = matchmod.exact(self.root, '', ['.hgtags'])
558 tagnode = self.commit(message, user, date, extra=extra, match=m,
558 tagnode = self.commit(message, user, date, extra=extra, match=m,
559 editor=editor)
559 editor=editor)
560
560
561 for name in names:
561 for name in names:
562 self.hook('tag', node=hex(node), tag=name, local=local)
562 self.hook('tag', node=hex(node), tag=name, local=local)
563
563
564 return tagnode
564 return tagnode
565
565
566 def tag(self, names, node, message, local, user, date, editor=False):
566 def tag(self, names, node, message, local, user, date, editor=False):
567 '''tag a revision with one or more symbolic names.
567 '''tag a revision with one or more symbolic names.
568
568
569 names is a list of strings or, when adding a single tag, names may be a
569 names is a list of strings or, when adding a single tag, names may be a
570 string.
570 string.
571
571
572 if local is True, the tags are stored in a per-repository file.
572 if local is True, the tags are stored in a per-repository file.
573 otherwise, they are stored in the .hgtags file, and a new
573 otherwise, they are stored in the .hgtags file, and a new
574 changeset is committed with the change.
574 changeset is committed with the change.
575
575
576 keyword arguments:
576 keyword arguments:
577
577
578 local: whether to store tags in non-version-controlled file
578 local: whether to store tags in non-version-controlled file
579 (default False)
579 (default False)
580
580
581 message: commit message to use if committing
581 message: commit message to use if committing
582
582
583 user: name of user to use if committing
583 user: name of user to use if committing
584
584
585 date: date tuple to use if committing'''
585 date: date tuple to use if committing'''
586
586
587 if not local:
587 if not local:
588 m = matchmod.exact(self.root, '', ['.hgtags'])
588 m = matchmod.exact(self.root, '', ['.hgtags'])
589 if util.any(self.status(match=m, unknown=True, ignored=True)):
589 if util.any(self.status(match=m, unknown=True, ignored=True)):
590 raise util.Abort(_('working copy of .hgtags is changed'),
590 raise util.Abort(_('working copy of .hgtags is changed'),
591 hint=_('please commit .hgtags manually'))
591 hint=_('please commit .hgtags manually'))
592
592
593 self.tags() # instantiate the cache
593 self.tags() # instantiate the cache
594 self._tag(names, node, message, local, user, date, editor=editor)
594 self._tag(names, node, message, local, user, date, editor=editor)
595
595
596 @filteredpropertycache
596 @filteredpropertycache
597 def _tagscache(self):
597 def _tagscache(self):
598 '''Returns a tagscache object that contains various tags related
598 '''Returns a tagscache object that contains various tags related
599 caches.'''
599 caches.'''
600
600
601 # This simplifies its cache management by having one decorated
601 # This simplifies its cache management by having one decorated
602 # function (this one) and the rest simply fetch things from it.
602 # function (this one) and the rest simply fetch things from it.
603 class tagscache(object):
603 class tagscache(object):
604 def __init__(self):
604 def __init__(self):
605 # These two define the set of tags for this repository. tags
605 # These two define the set of tags for this repository. tags
606 # maps tag name to node; tagtypes maps tag name to 'global' or
606 # maps tag name to node; tagtypes maps tag name to 'global' or
607 # 'local'. (Global tags are defined by .hgtags across all
607 # 'local'. (Global tags are defined by .hgtags across all
608 # heads, and local tags are defined in .hg/localtags.)
608 # heads, and local tags are defined in .hg/localtags.)
609 # They constitute the in-memory cache of tags.
609 # They constitute the in-memory cache of tags.
610 self.tags = self.tagtypes = None
610 self.tags = self.tagtypes = None
611
611
612 self.nodetagscache = self.tagslist = None
612 self.nodetagscache = self.tagslist = None
613
613
614 cache = tagscache()
614 cache = tagscache()
615 cache.tags, cache.tagtypes = self._findtags()
615 cache.tags, cache.tagtypes = self._findtags()
616
616
617 return cache
617 return cache
618
618
619 def tags(self):
619 def tags(self):
620 '''return a mapping of tag to node'''
620 '''return a mapping of tag to node'''
621 t = {}
621 t = {}
622 if self.changelog.filteredrevs:
622 if self.changelog.filteredrevs:
623 tags, tt = self._findtags()
623 tags, tt = self._findtags()
624 else:
624 else:
625 tags = self._tagscache.tags
625 tags = self._tagscache.tags
626 for k, v in tags.iteritems():
626 for k, v in tags.iteritems():
627 try:
627 try:
628 # ignore tags to unknown nodes
628 # ignore tags to unknown nodes
629 self.changelog.rev(v)
629 self.changelog.rev(v)
630 t[k] = v
630 t[k] = v
631 except (error.LookupError, ValueError):
631 except (error.LookupError, ValueError):
632 pass
632 pass
633 return t
633 return t
634
634
635 def _findtags(self):
635 def _findtags(self):
636 '''Do the hard work of finding tags. Return a pair of dicts
636 '''Do the hard work of finding tags. Return a pair of dicts
637 (tags, tagtypes) where tags maps tag name to node, and tagtypes
637 (tags, tagtypes) where tags maps tag name to node, and tagtypes
638 maps tag name to a string like \'global\' or \'local\'.
638 maps tag name to a string like \'global\' or \'local\'.
639 Subclasses or extensions are free to add their own tags, but
639 Subclasses or extensions are free to add their own tags, but
640 should be aware that the returned dicts will be retained for the
640 should be aware that the returned dicts will be retained for the
641 duration of the localrepo object.'''
641 duration of the localrepo object.'''
642
642
643 # XXX what tagtype should subclasses/extensions use? Currently
643 # XXX what tagtype should subclasses/extensions use? Currently
644 # mq and bookmarks add tags, but do not set the tagtype at all.
644 # mq and bookmarks add tags, but do not set the tagtype at all.
645 # Should each extension invent its own tag type? Should there
645 # Should each extension invent its own tag type? Should there
646 # be one tagtype for all such "virtual" tags? Or is the status
646 # be one tagtype for all such "virtual" tags? Or is the status
647 # quo fine?
647 # quo fine?
648
648
649 alltags = {} # map tag name to (node, hist)
649 alltags = {} # map tag name to (node, hist)
650 tagtypes = {}
650 tagtypes = {}
651
651
652 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
652 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
653 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
653 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
654
654
655 # Build the return dicts. Have to re-encode tag names because
655 # Build the return dicts. Have to re-encode tag names because
656 # the tags module always uses UTF-8 (in order not to lose info
656 # the tags module always uses UTF-8 (in order not to lose info
657 # writing to the cache), but the rest of Mercurial wants them in
657 # writing to the cache), but the rest of Mercurial wants them in
658 # local encoding.
658 # local encoding.
659 tags = {}
659 tags = {}
660 for (name, (node, hist)) in alltags.iteritems():
660 for (name, (node, hist)) in alltags.iteritems():
661 if node != nullid:
661 if node != nullid:
662 tags[encoding.tolocal(name)] = node
662 tags[encoding.tolocal(name)] = node
663 tags['tip'] = self.changelog.tip()
663 tags['tip'] = self.changelog.tip()
664 tagtypes = dict([(encoding.tolocal(name), value)
664 tagtypes = dict([(encoding.tolocal(name), value)
665 for (name, value) in tagtypes.iteritems()])
665 for (name, value) in tagtypes.iteritems()])
666 return (tags, tagtypes)
666 return (tags, tagtypes)
667
667
668 def tagtype(self, tagname):
668 def tagtype(self, tagname):
669 '''
669 '''
670 return the type of the given tag. result can be:
670 return the type of the given tag. result can be:
671
671
672 'local' : a local tag
672 'local' : a local tag
673 'global' : a global tag
673 'global' : a global tag
674 None : tag does not exist
674 None : tag does not exist
675 '''
675 '''
676
676
677 return self._tagscache.tagtypes.get(tagname)
677 return self._tagscache.tagtypes.get(tagname)
678
678
679 def tagslist(self):
679 def tagslist(self):
680 '''return a list of tags ordered by revision'''
680 '''return a list of tags ordered by revision'''
681 if not self._tagscache.tagslist:
681 if not self._tagscache.tagslist:
682 l = []
682 l = []
683 for t, n in self.tags().iteritems():
683 for t, n in self.tags().iteritems():
684 l.append((self.changelog.rev(n), t, n))
684 l.append((self.changelog.rev(n), t, n))
685 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
685 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
686
686
687 return self._tagscache.tagslist
687 return self._tagscache.tagslist
688
688
689 def nodetags(self, node):
689 def nodetags(self, node):
690 '''return the tags associated with a node'''
690 '''return the tags associated with a node'''
691 if not self._tagscache.nodetagscache:
691 if not self._tagscache.nodetagscache:
692 nodetagscache = {}
692 nodetagscache = {}
693 for t, n in self._tagscache.tags.iteritems():
693 for t, n in self._tagscache.tags.iteritems():
694 nodetagscache.setdefault(n, []).append(t)
694 nodetagscache.setdefault(n, []).append(t)
695 for tags in nodetagscache.itervalues():
695 for tags in nodetagscache.itervalues():
696 tags.sort()
696 tags.sort()
697 self._tagscache.nodetagscache = nodetagscache
697 self._tagscache.nodetagscache = nodetagscache
698 return self._tagscache.nodetagscache.get(node, [])
698 return self._tagscache.nodetagscache.get(node, [])
699
699
700 def nodebookmarks(self, node):
700 def nodebookmarks(self, node):
701 marks = []
701 marks = []
702 for bookmark, n in self._bookmarks.iteritems():
702 for bookmark, n in self._bookmarks.iteritems():
703 if n == node:
703 if n == node:
704 marks.append(bookmark)
704 marks.append(bookmark)
705 return sorted(marks)
705 return sorted(marks)
706
706
707 def branchmap(self):
707 def branchmap(self):
708 '''returns a dictionary {branch: [branchheads]} with branchheads
708 '''returns a dictionary {branch: [branchheads]} with branchheads
709 ordered by increasing revision number'''
709 ordered by increasing revision number'''
710 branchmap.updatecache(self)
710 branchmap.updatecache(self)
711 return self._branchcaches[self.filtername]
711 return self._branchcaches[self.filtername]
712
712
713 def branchtip(self, branch):
713 def branchtip(self, branch):
714 '''return the tip node for a given branch'''
714 '''return the tip node for a given branch'''
715 try:
715 try:
716 return self.branchmap().branchtip(branch)
716 return self.branchmap().branchtip(branch)
717 except KeyError:
717 except KeyError:
718 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
718 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
719
719
720 def lookup(self, key):
720 def lookup(self, key):
721 return self[key].node()
721 return self[key].node()
722
722
723 def lookupbranch(self, key, remote=None):
723 def lookupbranch(self, key, remote=None):
724 repo = remote or self
724 repo = remote or self
725 if key in repo.branchmap():
725 if key in repo.branchmap():
726 return key
726 return key
727
727
728 repo = (remote and remote.local()) and remote or self
728 repo = (remote and remote.local()) and remote or self
729 return repo[key].branch()
729 return repo[key].branch()
730
730
731 def known(self, nodes):
731 def known(self, nodes):
732 nm = self.changelog.nodemap
732 nm = self.changelog.nodemap
733 pc = self._phasecache
733 pc = self._phasecache
734 result = []
734 result = []
735 for n in nodes:
735 for n in nodes:
736 r = nm.get(n)
736 r = nm.get(n)
737 resp = not (r is None or pc.phase(self, r) >= phases.secret)
737 resp = not (r is None or pc.phase(self, r) >= phases.secret)
738 result.append(resp)
738 result.append(resp)
739 return result
739 return result
740
740
741 def local(self):
741 def local(self):
742 return self
742 return self
743
743
744 def cancopy(self):
744 def cancopy(self):
745 # so statichttprepo's override of local() works
745 # so statichttprepo's override of local() works
746 if not self.local():
746 if not self.local():
747 return False
747 return False
748 if not self.ui.configbool('phases', 'publish', True):
748 if not self.ui.configbool('phases', 'publish', True):
749 return True
749 return True
750 # if publishing we can't copy if there is filtered content
750 # if publishing we can't copy if there is filtered content
751 return not self.filtered('visible').changelog.filteredrevs
751 return not self.filtered('visible').changelog.filteredrevs
752
752
753 def join(self, f, *insidef):
753 def join(self, f, *insidef):
754 return os.path.join(self.path, f, *insidef)
754 return os.path.join(self.path, f, *insidef)
755
755
756 def wjoin(self, f, *insidef):
756 def wjoin(self, f, *insidef):
757 return os.path.join(self.root, f, *insidef)
757 return os.path.join(self.root, f, *insidef)
758
758
759 def file(self, f):
759 def file(self, f):
760 if f[0] == '/':
760 if f[0] == '/':
761 f = f[1:]
761 f = f[1:]
762 return filelog.filelog(self.sopener, f)
762 return filelog.filelog(self.sopener, f)
763
763
764 def changectx(self, changeid):
764 def changectx(self, changeid):
765 return self[changeid]
765 return self[changeid]
766
766
767 def parents(self, changeid=None):
767 def parents(self, changeid=None):
768 '''get list of changectxs for parents of changeid'''
768 '''get list of changectxs for parents of changeid'''
769 return self[changeid].parents()
769 return self[changeid].parents()
770
770
771 def setparents(self, p1, p2=nullid):
771 def setparents(self, p1, p2=nullid):
772 self.dirstate.beginparentchange()
772 self.dirstate.beginparentchange()
773 copies = self.dirstate.setparents(p1, p2)
773 copies = self.dirstate.setparents(p1, p2)
774 pctx = self[p1]
774 pctx = self[p1]
775 if copies:
775 if copies:
776 # Adjust copy records, the dirstate cannot do it, it
776 # Adjust copy records, the dirstate cannot do it, it
777 # requires access to parents manifests. Preserve them
777 # requires access to parents manifests. Preserve them
778 # only for entries added to first parent.
778 # only for entries added to first parent.
779 for f in copies:
779 for f in copies:
780 if f not in pctx and copies[f] in pctx:
780 if f not in pctx and copies[f] in pctx:
781 self.dirstate.copy(copies[f], f)
781 self.dirstate.copy(copies[f], f)
782 if p2 == nullid:
782 if p2 == nullid:
783 for f, s in sorted(self.dirstate.copies().items()):
783 for f, s in sorted(self.dirstate.copies().items()):
784 if f not in pctx and s not in pctx:
784 if f not in pctx and s not in pctx:
785 self.dirstate.copy(None, f)
785 self.dirstate.copy(None, f)
786 self.dirstate.endparentchange()
786 self.dirstate.endparentchange()
787
787
788 def filectx(self, path, changeid=None, fileid=None):
788 def filectx(self, path, changeid=None, fileid=None):
789 """changeid can be a changeset revision, node, or tag.
789 """changeid can be a changeset revision, node, or tag.
790 fileid can be a file revision or node."""
790 fileid can be a file revision or node."""
791 return context.filectx(self, path, changeid, fileid)
791 return context.filectx(self, path, changeid, fileid)
792
792
793 def getcwd(self):
793 def getcwd(self):
794 return self.dirstate.getcwd()
794 return self.dirstate.getcwd()
795
795
796 def pathto(self, f, cwd=None):
796 def pathto(self, f, cwd=None):
797 return self.dirstate.pathto(f, cwd)
797 return self.dirstate.pathto(f, cwd)
798
798
799 def wfile(self, f, mode='r'):
799 def wfile(self, f, mode='r'):
800 return self.wopener(f, mode)
800 return self.wopener(f, mode)
801
801
802 def _link(self, f):
802 def _link(self, f):
803 return self.wvfs.islink(f)
803 return self.wvfs.islink(f)
804
804
805 def _loadfilter(self, filter):
805 def _loadfilter(self, filter):
806 if filter not in self.filterpats:
806 if filter not in self.filterpats:
807 l = []
807 l = []
808 for pat, cmd in self.ui.configitems(filter):
808 for pat, cmd in self.ui.configitems(filter):
809 if cmd == '!':
809 if cmd == '!':
810 continue
810 continue
811 mf = matchmod.match(self.root, '', [pat])
811 mf = matchmod.match(self.root, '', [pat])
812 fn = None
812 fn = None
813 params = cmd
813 params = cmd
814 for name, filterfn in self._datafilters.iteritems():
814 for name, filterfn in self._datafilters.iteritems():
815 if cmd.startswith(name):
815 if cmd.startswith(name):
816 fn = filterfn
816 fn = filterfn
817 params = cmd[len(name):].lstrip()
817 params = cmd[len(name):].lstrip()
818 break
818 break
819 if not fn:
819 if not fn:
820 fn = lambda s, c, **kwargs: util.filter(s, c)
820 fn = lambda s, c, **kwargs: util.filter(s, c)
821 # Wrap old filters not supporting keyword arguments
821 # Wrap old filters not supporting keyword arguments
822 if not inspect.getargspec(fn)[2]:
822 if not inspect.getargspec(fn)[2]:
823 oldfn = fn
823 oldfn = fn
824 fn = lambda s, c, **kwargs: oldfn(s, c)
824 fn = lambda s, c, **kwargs: oldfn(s, c)
825 l.append((mf, fn, params))
825 l.append((mf, fn, params))
826 self.filterpats[filter] = l
826 self.filterpats[filter] = l
827 return self.filterpats[filter]
827 return self.filterpats[filter]
828
828
829 def _filter(self, filterpats, filename, data):
829 def _filter(self, filterpats, filename, data):
830 for mf, fn, cmd in filterpats:
830 for mf, fn, cmd in filterpats:
831 if mf(filename):
831 if mf(filename):
832 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
832 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
833 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
833 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
834 break
834 break
835
835
836 return data
836 return data
837
837
838 @unfilteredpropertycache
838 @unfilteredpropertycache
839 def _encodefilterpats(self):
839 def _encodefilterpats(self):
840 return self._loadfilter('encode')
840 return self._loadfilter('encode')
841
841
842 @unfilteredpropertycache
842 @unfilteredpropertycache
843 def _decodefilterpats(self):
843 def _decodefilterpats(self):
844 return self._loadfilter('decode')
844 return self._loadfilter('decode')
845
845
846 def adddatafilter(self, name, filter):
846 def adddatafilter(self, name, filter):
847 self._datafilters[name] = filter
847 self._datafilters[name] = filter
848
848
849 def wread(self, filename):
849 def wread(self, filename):
850 if self._link(filename):
850 if self._link(filename):
851 data = self.wvfs.readlink(filename)
851 data = self.wvfs.readlink(filename)
852 else:
852 else:
853 data = self.wopener.read(filename)
853 data = self.wopener.read(filename)
854 return self._filter(self._encodefilterpats, filename, data)
854 return self._filter(self._encodefilterpats, filename, data)
855
855
856 def wwrite(self, filename, data, flags):
856 def wwrite(self, filename, data, flags):
857 data = self._filter(self._decodefilterpats, filename, data)
857 data = self._filter(self._decodefilterpats, filename, data)
858 if 'l' in flags:
858 if 'l' in flags:
859 self.wopener.symlink(data, filename)
859 self.wopener.symlink(data, filename)
860 else:
860 else:
861 self.wopener.write(filename, data)
861 self.wopener.write(filename, data)
862 if 'x' in flags:
862 if 'x' in flags:
863 self.wvfs.setflags(filename, False, True)
863 self.wvfs.setflags(filename, False, True)
864
864
865 def wwritedata(self, filename, data):
865 def wwritedata(self, filename, data):
866 return self._filter(self._decodefilterpats, filename, data)
866 return self._filter(self._decodefilterpats, filename, data)
867
867
868 def currenttransaction(self):
868 def currenttransaction(self):
869 """return the current transaction or None if non exists"""
869 """return the current transaction or None if non exists"""
870 tr = self._transref and self._transref() or None
870 tr = self._transref and self._transref() or None
871 if tr and tr.running():
871 if tr and tr.running():
872 return tr
872 return tr
873 return None
873 return None
874
874
875 def transaction(self, desc, report=None):
875 def transaction(self, desc, report=None):
876 tr = self.currenttransaction()
876 tr = self.currenttransaction()
877 if tr is not None:
877 if tr is not None:
878 return tr.nest()
878 return tr.nest()
879
879
880 # abort here if the journal already exists
880 # abort here if the journal already exists
881 if self.svfs.exists("journal"):
881 if self.svfs.exists("journal"):
882 raise error.RepoError(
882 raise error.RepoError(
883 _("abandoned transaction found"),
883 _("abandoned transaction found"),
884 hint=_("run 'hg recover' to clean up transaction"))
884 hint=_("run 'hg recover' to clean up transaction"))
885
885
886 self._writejournal(desc)
886 self._writejournal(desc)
887 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
887 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
888 rp = report and report or self.ui.warn
888 rp = report and report or self.ui.warn
889 vfsmap = {'plain': self.opener} # root of .hg/
889 vfsmap = {'plain': self.opener} # root of .hg/
890 tr = transaction.transaction(rp, self.sopener, vfsmap,
890 tr = transaction.transaction(rp, self.sopener, vfsmap,
891 "journal",
891 "journal",
892 aftertrans(renames),
892 aftertrans(renames),
893 self.store.createmode)
893 self.store.createmode)
894 # note: writing the fncache only during finalize mean that the file is
894 # note: writing the fncache only during finalize mean that the file is
895 # outdated when running hooks. As fncache is used for streaming clone,
895 # outdated when running hooks. As fncache is used for streaming clone,
896 # this is not expected to break anything that happen during the hooks.
896 # this is not expected to break anything that happen during the hooks.
897 tr.addfinalize('flush-fncache', self.store.write)
897 tr.addfinalize('flush-fncache', self.store.write)
898 self._transref = weakref.ref(tr)
898 self._transref = weakref.ref(tr)
899 return tr
899 return tr
900
900
901 def _journalfiles(self):
901 def _journalfiles(self):
902 return ((self.svfs, 'journal'),
902 return ((self.svfs, 'journal'),
903 (self.vfs, 'journal.dirstate'),
903 (self.vfs, 'journal.dirstate'),
904 (self.vfs, 'journal.branch'),
904 (self.vfs, 'journal.branch'),
905 (self.vfs, 'journal.desc'),
905 (self.vfs, 'journal.desc'),
906 (self.vfs, 'journal.bookmarks'),
906 (self.vfs, 'journal.bookmarks'),
907 (self.svfs, 'journal.phaseroots'))
907 (self.svfs, 'journal.phaseroots'))
908
908
909 def undofiles(self):
909 def undofiles(self):
910 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
910 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
911
911
912 def _writejournal(self, desc):
912 def _writejournal(self, desc):
913 self.opener.write("journal.dirstate",
913 self.opener.write("journal.dirstate",
914 self.opener.tryread("dirstate"))
914 self.opener.tryread("dirstate"))
915 self.opener.write("journal.branch",
915 self.opener.write("journal.branch",
916 encoding.fromlocal(self.dirstate.branch()))
916 encoding.fromlocal(self.dirstate.branch()))
917 self.opener.write("journal.desc",
917 self.opener.write("journal.desc",
918 "%d\n%s\n" % (len(self), desc))
918 "%d\n%s\n" % (len(self), desc))
919 self.opener.write("journal.bookmarks",
919 self.opener.write("journal.bookmarks",
920 self.opener.tryread("bookmarks"))
920 self.opener.tryread("bookmarks"))
921 self.sopener.write("journal.phaseroots",
921 self.sopener.write("journal.phaseroots",
922 self.sopener.tryread("phaseroots"))
922 self.sopener.tryread("phaseroots"))
923
923
924 def recover(self):
924 def recover(self):
925 lock = self.lock()
925 lock = self.lock()
926 try:
926 try:
927 if self.svfs.exists("journal"):
927 if self.svfs.exists("journal"):
928 self.ui.status(_("rolling back interrupted transaction\n"))
928 self.ui.status(_("rolling back interrupted transaction\n"))
929 vfsmap = {'': self.sopener,
929 vfsmap = {'': self.sopener,
930 'plain': self.opener,}
930 'plain': self.opener,}
931 transaction.rollback(self.sopener, vfsmap, "journal",
931 transaction.rollback(self.sopener, vfsmap, "journal",
932 self.ui.warn)
932 self.ui.warn)
933 self.invalidate()
933 self.invalidate()
934 return True
934 return True
935 else:
935 else:
936 self.ui.warn(_("no interrupted transaction available\n"))
936 self.ui.warn(_("no interrupted transaction available\n"))
937 return False
937 return False
938 finally:
938 finally:
939 lock.release()
939 lock.release()
940
940
941 def rollback(self, dryrun=False, force=False):
941 def rollback(self, dryrun=False, force=False):
942 wlock = lock = None
942 wlock = lock = None
943 try:
943 try:
944 wlock = self.wlock()
944 wlock = self.wlock()
945 lock = self.lock()
945 lock = self.lock()
946 if self.svfs.exists("undo"):
946 if self.svfs.exists("undo"):
947 return self._rollback(dryrun, force)
947 return self._rollback(dryrun, force)
948 else:
948 else:
949 self.ui.warn(_("no rollback information available\n"))
949 self.ui.warn(_("no rollback information available\n"))
950 return 1
950 return 1
951 finally:
951 finally:
952 release(lock, wlock)
952 release(lock, wlock)
953
953
954 @unfilteredmethod # Until we get smarter cache management
954 @unfilteredmethod # Until we get smarter cache management
955 def _rollback(self, dryrun, force):
955 def _rollback(self, dryrun, force):
956 ui = self.ui
956 ui = self.ui
957 try:
957 try:
958 args = self.opener.read('undo.desc').splitlines()
958 args = self.opener.read('undo.desc').splitlines()
959 (oldlen, desc, detail) = (int(args[0]), args[1], None)
959 (oldlen, desc, detail) = (int(args[0]), args[1], None)
960 if len(args) >= 3:
960 if len(args) >= 3:
961 detail = args[2]
961 detail = args[2]
962 oldtip = oldlen - 1
962 oldtip = oldlen - 1
963
963
964 if detail and ui.verbose:
964 if detail and ui.verbose:
965 msg = (_('repository tip rolled back to revision %s'
965 msg = (_('repository tip rolled back to revision %s'
966 ' (undo %s: %s)\n')
966 ' (undo %s: %s)\n')
967 % (oldtip, desc, detail))
967 % (oldtip, desc, detail))
968 else:
968 else:
969 msg = (_('repository tip rolled back to revision %s'
969 msg = (_('repository tip rolled back to revision %s'
970 ' (undo %s)\n')
970 ' (undo %s)\n')
971 % (oldtip, desc))
971 % (oldtip, desc))
972 except IOError:
972 except IOError:
973 msg = _('rolling back unknown transaction\n')
973 msg = _('rolling back unknown transaction\n')
974 desc = None
974 desc = None
975
975
976 if not force and self['.'] != self['tip'] and desc == 'commit':
976 if not force and self['.'] != self['tip'] and desc == 'commit':
977 raise util.Abort(
977 raise util.Abort(
978 _('rollback of last commit while not checked out '
978 _('rollback of last commit while not checked out '
979 'may lose data'), hint=_('use -f to force'))
979 'may lose data'), hint=_('use -f to force'))
980
980
981 ui.status(msg)
981 ui.status(msg)
982 if dryrun:
982 if dryrun:
983 return 0
983 return 0
984
984
985 parents = self.dirstate.parents()
985 parents = self.dirstate.parents()
986 self.destroying()
986 self.destroying()
987 vfsmap = {'plain': self.opener}
987 vfsmap = {'plain': self.opener}
988 transaction.rollback(self.sopener, vfsmap, 'undo', ui.warn)
988 transaction.rollback(self.sopener, vfsmap, 'undo', ui.warn)
989 if self.vfs.exists('undo.bookmarks'):
989 if self.vfs.exists('undo.bookmarks'):
990 self.vfs.rename('undo.bookmarks', 'bookmarks')
990 self.vfs.rename('undo.bookmarks', 'bookmarks')
991 if self.svfs.exists('undo.phaseroots'):
991 if self.svfs.exists('undo.phaseroots'):
992 self.svfs.rename('undo.phaseroots', 'phaseroots')
992 self.svfs.rename('undo.phaseroots', 'phaseroots')
993 self.invalidate()
993 self.invalidate()
994
994
995 parentgone = (parents[0] not in self.changelog.nodemap or
995 parentgone = (parents[0] not in self.changelog.nodemap or
996 parents[1] not in self.changelog.nodemap)
996 parents[1] not in self.changelog.nodemap)
997 if parentgone:
997 if parentgone:
998 self.vfs.rename('undo.dirstate', 'dirstate')
998 self.vfs.rename('undo.dirstate', 'dirstate')
999 try:
999 try:
1000 branch = self.opener.read('undo.branch')
1000 branch = self.opener.read('undo.branch')
1001 self.dirstate.setbranch(encoding.tolocal(branch))
1001 self.dirstate.setbranch(encoding.tolocal(branch))
1002 except IOError:
1002 except IOError:
1003 ui.warn(_('named branch could not be reset: '
1003 ui.warn(_('named branch could not be reset: '
1004 'current branch is still \'%s\'\n')
1004 'current branch is still \'%s\'\n')
1005 % self.dirstate.branch())
1005 % self.dirstate.branch())
1006
1006
1007 self.dirstate.invalidate()
1007 self.dirstate.invalidate()
1008 parents = tuple([p.rev() for p in self.parents()])
1008 parents = tuple([p.rev() for p in self.parents()])
1009 if len(parents) > 1:
1009 if len(parents) > 1:
1010 ui.status(_('working directory now based on '
1010 ui.status(_('working directory now based on '
1011 'revisions %d and %d\n') % parents)
1011 'revisions %d and %d\n') % parents)
1012 else:
1012 else:
1013 ui.status(_('working directory now based on '
1013 ui.status(_('working directory now based on '
1014 'revision %d\n') % parents)
1014 'revision %d\n') % parents)
1015 # TODO: if we know which new heads may result from this rollback, pass
1015 # TODO: if we know which new heads may result from this rollback, pass
1016 # them to destroy(), which will prevent the branchhead cache from being
1016 # them to destroy(), which will prevent the branchhead cache from being
1017 # invalidated.
1017 # invalidated.
1018 self.destroyed()
1018 self.destroyed()
1019 return 0
1019 return 0
1020
1020
1021 def invalidatecaches(self):
1021 def invalidatecaches(self):
1022
1022
1023 if '_tagscache' in vars(self):
1023 if '_tagscache' in vars(self):
1024 # can't use delattr on proxy
1024 # can't use delattr on proxy
1025 del self.__dict__['_tagscache']
1025 del self.__dict__['_tagscache']
1026
1026
1027 self.unfiltered()._branchcaches.clear()
1027 self.unfiltered()._branchcaches.clear()
1028 self.invalidatevolatilesets()
1028 self.invalidatevolatilesets()
1029
1029
1030 def invalidatevolatilesets(self):
1030 def invalidatevolatilesets(self):
1031 self.filteredrevcache.clear()
1031 self.filteredrevcache.clear()
1032 obsolete.clearobscaches(self)
1032 obsolete.clearobscaches(self)
1033
1033
1034 def invalidatedirstate(self):
1034 def invalidatedirstate(self):
1035 '''Invalidates the dirstate, causing the next call to dirstate
1035 '''Invalidates the dirstate, causing the next call to dirstate
1036 to check if it was modified since the last time it was read,
1036 to check if it was modified since the last time it was read,
1037 rereading it if it has.
1037 rereading it if it has.
1038
1038
1039 This is different to dirstate.invalidate() that it doesn't always
1039 This is different to dirstate.invalidate() that it doesn't always
1040 rereads the dirstate. Use dirstate.invalidate() if you want to
1040 rereads the dirstate. Use dirstate.invalidate() if you want to
1041 explicitly read the dirstate again (i.e. restoring it to a previous
1041 explicitly read the dirstate again (i.e. restoring it to a previous
1042 known good state).'''
1042 known good state).'''
1043 if hasunfilteredcache(self, 'dirstate'):
1043 if hasunfilteredcache(self, 'dirstate'):
1044 for k in self.dirstate._filecache:
1044 for k in self.dirstate._filecache:
1045 try:
1045 try:
1046 delattr(self.dirstate, k)
1046 delattr(self.dirstate, k)
1047 except AttributeError:
1047 except AttributeError:
1048 pass
1048 pass
1049 delattr(self.unfiltered(), 'dirstate')
1049 delattr(self.unfiltered(), 'dirstate')
1050
1050
1051 def invalidate(self):
1051 def invalidate(self):
1052 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1052 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1053 for k in self._filecache:
1053 for k in self._filecache:
1054 # dirstate is invalidated separately in invalidatedirstate()
1054 # dirstate is invalidated separately in invalidatedirstate()
1055 if k == 'dirstate':
1055 if k == 'dirstate':
1056 continue
1056 continue
1057
1057
1058 try:
1058 try:
1059 delattr(unfiltered, k)
1059 delattr(unfiltered, k)
1060 except AttributeError:
1060 except AttributeError:
1061 pass
1061 pass
1062 self.invalidatecaches()
1062 self.invalidatecaches()
1063 self.store.invalidatecaches()
1063 self.store.invalidatecaches()
1064
1064
1065 def invalidateall(self):
1065 def invalidateall(self):
1066 '''Fully invalidates both store and non-store parts, causing the
1066 '''Fully invalidates both store and non-store parts, causing the
1067 subsequent operation to reread any outside changes.'''
1067 subsequent operation to reread any outside changes.'''
1068 # extension should hook this to invalidate its caches
1068 # extension should hook this to invalidate its caches
1069 self.invalidate()
1069 self.invalidate()
1070 self.invalidatedirstate()
1070 self.invalidatedirstate()
1071
1071
1072 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1072 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1073 try:
1073 try:
1074 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1074 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1075 except error.LockHeld, inst:
1075 except error.LockHeld, inst:
1076 if not wait:
1076 if not wait:
1077 raise
1077 raise
1078 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1078 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1079 (desc, inst.locker))
1079 (desc, inst.locker))
1080 # default to 600 seconds timeout
1080 # default to 600 seconds timeout
1081 l = lockmod.lock(vfs, lockname,
1081 l = lockmod.lock(vfs, lockname,
1082 int(self.ui.config("ui", "timeout", "600")),
1082 int(self.ui.config("ui", "timeout", "600")),
1083 releasefn, desc=desc)
1083 releasefn, desc=desc)
1084 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1084 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1085 if acquirefn:
1085 if acquirefn:
1086 acquirefn()
1086 acquirefn()
1087 return l
1087 return l
1088
1088
1089 def _afterlock(self, callback):
1089 def _afterlock(self, callback):
1090 """add a callback to the current repository lock.
1090 """add a callback to the current repository lock.
1091
1091
1092 The callback will be executed on lock release."""
1092 The callback will be executed on lock release."""
1093 l = self._lockref and self._lockref()
1093 l = self._lockref and self._lockref()
1094 if l:
1094 if l:
1095 l.postrelease.append(callback)
1095 l.postrelease.append(callback)
1096 else:
1096 else:
1097 callback()
1097 callback()
1098
1098
1099 def lock(self, wait=True):
1099 def lock(self, wait=True):
1100 '''Lock the repository store (.hg/store) and return a weak reference
1100 '''Lock the repository store (.hg/store) and return a weak reference
1101 to the lock. Use this before modifying the store (e.g. committing or
1101 to the lock. Use this before modifying the store (e.g. committing or
1102 stripping). If you are opening a transaction, get a lock as well.)'''
1102 stripping). If you are opening a transaction, get a lock as well.)'''
1103 l = self._lockref and self._lockref()
1103 l = self._lockref and self._lockref()
1104 if l is not None and l.held:
1104 if l is not None and l.held:
1105 l.lock()
1105 l.lock()
1106 return l
1106 return l
1107
1107
1108 def unlock():
1108 def unlock():
1109 for k, ce in self._filecache.items():
1109 for k, ce in self._filecache.items():
1110 if k == 'dirstate' or k not in self.__dict__:
1110 if k == 'dirstate' or k not in self.__dict__:
1111 continue
1111 continue
1112 ce.refresh()
1112 ce.refresh()
1113
1113
1114 l = self._lock(self.svfs, "lock", wait, unlock,
1114 l = self._lock(self.svfs, "lock", wait, unlock,
1115 self.invalidate, _('repository %s') % self.origroot)
1115 self.invalidate, _('repository %s') % self.origroot)
1116 self._lockref = weakref.ref(l)
1116 self._lockref = weakref.ref(l)
1117 return l
1117 return l
1118
1118
1119 def wlock(self, wait=True):
1119 def wlock(self, wait=True):
1120 '''Lock the non-store parts of the repository (everything under
1120 '''Lock the non-store parts of the repository (everything under
1121 .hg except .hg/store) and return a weak reference to the lock.
1121 .hg except .hg/store) and return a weak reference to the lock.
1122 Use this before modifying files in .hg.'''
1122 Use this before modifying files in .hg.'''
1123 l = self._wlockref and self._wlockref()
1123 l = self._wlockref and self._wlockref()
1124 if l is not None and l.held:
1124 if l is not None and l.held:
1125 l.lock()
1125 l.lock()
1126 return l
1126 return l
1127
1127
1128 def unlock():
1128 def unlock():
1129 if self.dirstate.pendingparentchange():
1129 if self.dirstate.pendingparentchange():
1130 self.dirstate.invalidate()
1130 self.dirstate.invalidate()
1131 else:
1131 else:
1132 self.dirstate.write()
1132 self.dirstate.write()
1133
1133
1134 self._filecache['dirstate'].refresh()
1134 self._filecache['dirstate'].refresh()
1135
1135
1136 l = self._lock(self.vfs, "wlock", wait, unlock,
1136 l = self._lock(self.vfs, "wlock", wait, unlock,
1137 self.invalidatedirstate, _('working directory of %s') %
1137 self.invalidatedirstate, _('working directory of %s') %
1138 self.origroot)
1138 self.origroot)
1139 self._wlockref = weakref.ref(l)
1139 self._wlockref = weakref.ref(l)
1140 return l
1140 return l
1141
1141
1142 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1142 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1143 """
1143 """
1144 commit an individual file as part of a larger transaction
1144 commit an individual file as part of a larger transaction
1145 """
1145 """
1146
1146
1147 fname = fctx.path()
1147 fname = fctx.path()
1148 text = fctx.data()
1148 text = fctx.data()
1149 flog = self.file(fname)
1149 flog = self.file(fname)
1150 fparent1 = manifest1.get(fname, nullid)
1150 fparent1 = manifest1.get(fname, nullid)
1151 fparent2 = manifest2.get(fname, nullid)
1151 fparent2 = manifest2.get(fname, nullid)
1152
1152
1153 meta = {}
1153 meta = {}
1154 copy = fctx.renamed()
1154 copy = fctx.renamed()
1155 if copy and copy[0] != fname:
1155 if copy and copy[0] != fname:
1156 # Mark the new revision of this file as a copy of another
1156 # Mark the new revision of this file as a copy of another
1157 # file. This copy data will effectively act as a parent
1157 # file. This copy data will effectively act as a parent
1158 # of this new revision. If this is a merge, the first
1158 # of this new revision. If this is a merge, the first
1159 # parent will be the nullid (meaning "look up the copy data")
1159 # parent will be the nullid (meaning "look up the copy data")
1160 # and the second one will be the other parent. For example:
1160 # and the second one will be the other parent. For example:
1161 #
1161 #
1162 # 0 --- 1 --- 3 rev1 changes file foo
1162 # 0 --- 1 --- 3 rev1 changes file foo
1163 # \ / rev2 renames foo to bar and changes it
1163 # \ / rev2 renames foo to bar and changes it
1164 # \- 2 -/ rev3 should have bar with all changes and
1164 # \- 2 -/ rev3 should have bar with all changes and
1165 # should record that bar descends from
1165 # should record that bar descends from
1166 # bar in rev2 and foo in rev1
1166 # bar in rev2 and foo in rev1
1167 #
1167 #
1168 # this allows this merge to succeed:
1168 # this allows this merge to succeed:
1169 #
1169 #
1170 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1170 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1171 # \ / merging rev3 and rev4 should use bar@rev2
1171 # \ / merging rev3 and rev4 should use bar@rev2
1172 # \- 2 --- 4 as the merge base
1172 # \- 2 --- 4 as the merge base
1173 #
1173 #
1174
1174
1175 cfname = copy[0]
1175 cfname = copy[0]
1176 crev = manifest1.get(cfname)
1176 crev = manifest1.get(cfname)
1177 newfparent = fparent2
1177 newfparent = fparent2
1178
1178
1179 if manifest2: # branch merge
1179 if manifest2: # branch merge
1180 if fparent2 == nullid or crev is None: # copied on remote side
1180 if fparent2 == nullid or crev is None: # copied on remote side
1181 if cfname in manifest2:
1181 if cfname in manifest2:
1182 crev = manifest2[cfname]
1182 crev = manifest2[cfname]
1183 newfparent = fparent1
1183 newfparent = fparent1
1184
1184
1185 # find source in nearest ancestor if we've lost track
1185 # find source in nearest ancestor if we've lost track
1186 if not crev:
1186 if not crev:
1187 self.ui.debug(" %s: searching for copy revision for %s\n" %
1187 self.ui.debug(" %s: searching for copy revision for %s\n" %
1188 (fname, cfname))
1188 (fname, cfname))
1189 for ancestor in self[None].ancestors():
1189 for ancestor in self[None].ancestors():
1190 if cfname in ancestor:
1190 if cfname in ancestor:
1191 crev = ancestor[cfname].filenode()
1191 crev = ancestor[cfname].filenode()
1192 break
1192 break
1193
1193
1194 if crev:
1194 if crev:
1195 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1195 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1196 meta["copy"] = cfname
1196 meta["copy"] = cfname
1197 meta["copyrev"] = hex(crev)
1197 meta["copyrev"] = hex(crev)
1198 fparent1, fparent2 = nullid, newfparent
1198 fparent1, fparent2 = nullid, newfparent
1199 else:
1199 else:
1200 self.ui.warn(_("warning: can't find ancestor for '%s' "
1200 self.ui.warn(_("warning: can't find ancestor for '%s' "
1201 "copied from '%s'!\n") % (fname, cfname))
1201 "copied from '%s'!\n") % (fname, cfname))
1202
1202
1203 elif fparent1 == nullid:
1203 elif fparent1 == nullid:
1204 fparent1, fparent2 = fparent2, nullid
1204 fparent1, fparent2 = fparent2, nullid
1205 elif fparent2 != nullid:
1205 elif fparent2 != nullid:
1206 # is one parent an ancestor of the other?
1206 # is one parent an ancestor of the other?
1207 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1207 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1208 if fparent1 in fparentancestors:
1208 if fparent1 in fparentancestors:
1209 fparent1, fparent2 = fparent2, nullid
1209 fparent1, fparent2 = fparent2, nullid
1210 elif fparent2 in fparentancestors:
1210 elif fparent2 in fparentancestors:
1211 fparent2 = nullid
1211 fparent2 = nullid
1212
1212
1213 # is the file changed?
1213 # is the file changed?
1214 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1214 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1215 changelist.append(fname)
1215 changelist.append(fname)
1216 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1216 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1217 # are just the flags changed during merge?
1217 # are just the flags changed during merge?
1218 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1218 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1219 changelist.append(fname)
1219 changelist.append(fname)
1220
1220
1221 return fparent1
1221 return fparent1
1222
1222
1223 @unfilteredmethod
1223 @unfilteredmethod
1224 def commit(self, text="", user=None, date=None, match=None, force=False,
1224 def commit(self, text="", user=None, date=None, match=None, force=False,
1225 editor=False, extra={}):
1225 editor=False, extra={}):
1226 """Add a new revision to current repository.
1226 """Add a new revision to current repository.
1227
1227
1228 Revision information is gathered from the working directory,
1228 Revision information is gathered from the working directory,
1229 match can be used to filter the committed files. If editor is
1229 match can be used to filter the committed files. If editor is
1230 supplied, it is called to get a commit message.
1230 supplied, it is called to get a commit message.
1231 """
1231 """
1232
1232
1233 def fail(f, msg):
1233 def fail(f, msg):
1234 raise util.Abort('%s: %s' % (f, msg))
1234 raise util.Abort('%s: %s' % (f, msg))
1235
1235
1236 if not match:
1236 if not match:
1237 match = matchmod.always(self.root, '')
1237 match = matchmod.always(self.root, '')
1238
1238
1239 if not force:
1239 if not force:
1240 vdirs = []
1240 vdirs = []
1241 match.explicitdir = vdirs.append
1241 match.explicitdir = vdirs.append
1242 match.bad = fail
1242 match.bad = fail
1243
1243
1244 wlock = self.wlock()
1244 wlock = self.wlock()
1245 try:
1245 try:
1246 wctx = self[None]
1246 wctx = self[None]
1247 merge = len(wctx.parents()) > 1
1247 merge = len(wctx.parents()) > 1
1248
1248
1249 if (not force and merge and match and
1249 if (not force and merge and match and
1250 (match.files() or match.anypats())):
1250 (match.files() or match.anypats())):
1251 raise util.Abort(_('cannot partially commit a merge '
1251 raise util.Abort(_('cannot partially commit a merge '
1252 '(do not specify files or patterns)'))
1252 '(do not specify files or patterns)'))
1253
1253
1254 status = self.status(match=match, clean=force)
1254 status = self.status(match=match, clean=force)
1255 if force:
1255 if force:
1256 status.modified.extend(status.clean) # mq may commit clean files
1256 status.modified.extend(status.clean) # mq may commit clean files
1257
1257
1258 # check subrepos
1258 # check subrepos
1259 subs = []
1259 subs = []
1260 commitsubs = set()
1260 commitsubs = set()
1261 newstate = wctx.substate.copy()
1261 newstate = wctx.substate.copy()
1262 # only manage subrepos and .hgsubstate if .hgsub is present
1262 # only manage subrepos and .hgsubstate if .hgsub is present
1263 if '.hgsub' in wctx:
1263 if '.hgsub' in wctx:
1264 # we'll decide whether to track this ourselves, thanks
1264 # we'll decide whether to track this ourselves, thanks
1265 for c in status.modified, status.added, status.removed:
1265 for c in status.modified, status.added, status.removed:
1266 if '.hgsubstate' in c:
1266 if '.hgsubstate' in c:
1267 c.remove('.hgsubstate')
1267 c.remove('.hgsubstate')
1268
1268
1269 # compare current state to last committed state
1269 # compare current state to last committed state
1270 # build new substate based on last committed state
1270 # build new substate based on last committed state
1271 oldstate = wctx.p1().substate
1271 oldstate = wctx.p1().substate
1272 for s in sorted(newstate.keys()):
1272 for s in sorted(newstate.keys()):
1273 if not match(s):
1273 if not match(s):
1274 # ignore working copy, use old state if present
1274 # ignore working copy, use old state if present
1275 if s in oldstate:
1275 if s in oldstate:
1276 newstate[s] = oldstate[s]
1276 newstate[s] = oldstate[s]
1277 continue
1277 continue
1278 if not force:
1278 if not force:
1279 raise util.Abort(
1279 raise util.Abort(
1280 _("commit with new subrepo %s excluded") % s)
1280 _("commit with new subrepo %s excluded") % s)
1281 if wctx.sub(s).dirty(True):
1281 if wctx.sub(s).dirty(True):
1282 if not self.ui.configbool('ui', 'commitsubrepos'):
1282 if not self.ui.configbool('ui', 'commitsubrepos'):
1283 raise util.Abort(
1283 raise util.Abort(
1284 _("uncommitted changes in subrepo %s") % s,
1284 _("uncommitted changes in subrepo %s") % s,
1285 hint=_("use --subrepos for recursive commit"))
1285 hint=_("use --subrepos for recursive commit"))
1286 subs.append(s)
1286 subs.append(s)
1287 commitsubs.add(s)
1287 commitsubs.add(s)
1288 else:
1288 else:
1289 bs = wctx.sub(s).basestate()
1289 bs = wctx.sub(s).basestate()
1290 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1290 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1291 if oldstate.get(s, (None, None, None))[1] != bs:
1291 if oldstate.get(s, (None, None, None))[1] != bs:
1292 subs.append(s)
1292 subs.append(s)
1293
1293
1294 # check for removed subrepos
1294 # check for removed subrepos
1295 for p in wctx.parents():
1295 for p in wctx.parents():
1296 r = [s for s in p.substate if s not in newstate]
1296 r = [s for s in p.substate if s not in newstate]
1297 subs += [s for s in r if match(s)]
1297 subs += [s for s in r if match(s)]
1298 if subs:
1298 if subs:
1299 if (not match('.hgsub') and
1299 if (not match('.hgsub') and
1300 '.hgsub' in (wctx.modified() + wctx.added())):
1300 '.hgsub' in (wctx.modified() + wctx.added())):
1301 raise util.Abort(
1301 raise util.Abort(
1302 _("can't commit subrepos without .hgsub"))
1302 _("can't commit subrepos without .hgsub"))
1303 status.modified.insert(0, '.hgsubstate')
1303 status.modified.insert(0, '.hgsubstate')
1304
1304
1305 elif '.hgsub' in status.removed:
1305 elif '.hgsub' in status.removed:
1306 # clean up .hgsubstate when .hgsub is removed
1306 # clean up .hgsubstate when .hgsub is removed
1307 if ('.hgsubstate' in wctx and
1307 if ('.hgsubstate' in wctx and
1308 '.hgsubstate' not in (status.modified + status.added +
1308 '.hgsubstate' not in (status.modified + status.added +
1309 status.removed)):
1309 status.removed)):
1310 status.removed.insert(0, '.hgsubstate')
1310 status.removed.insert(0, '.hgsubstate')
1311
1311
1312 # make sure all explicit patterns are matched
1312 # make sure all explicit patterns are matched
1313 if not force and match.files():
1313 if not force and match.files():
1314 matched = set(status.modified + status.added + status.removed)
1314 matched = set(status.modified + status.added + status.removed)
1315
1315
1316 for f in match.files():
1316 for f in match.files():
1317 f = self.dirstate.normalize(f)
1317 f = self.dirstate.normalize(f)
1318 if f == '.' or f in matched or f in wctx.substate:
1318 if f == '.' or f in matched or f in wctx.substate:
1319 continue
1319 continue
1320 if f in status.deleted:
1320 if f in status.deleted:
1321 fail(f, _('file not found!'))
1321 fail(f, _('file not found!'))
1322 if f in vdirs: # visited directory
1322 if f in vdirs: # visited directory
1323 d = f + '/'
1323 d = f + '/'
1324 for mf in matched:
1324 for mf in matched:
1325 if mf.startswith(d):
1325 if mf.startswith(d):
1326 break
1326 break
1327 else:
1327 else:
1328 fail(f, _("no match under directory!"))
1328 fail(f, _("no match under directory!"))
1329 elif f not in self.dirstate:
1329 elif f not in self.dirstate:
1330 fail(f, _("file not tracked!"))
1330 fail(f, _("file not tracked!"))
1331
1331
1332 cctx = context.workingctx(self, text, user, date, extra, status)
1332 cctx = context.workingctx(self, text, user, date, extra, status)
1333
1333
1334 if (not force and not extra.get("close") and not merge
1334 if (not force and not extra.get("close") and not merge
1335 and not cctx.files()
1335 and not cctx.files()
1336 and wctx.branch() == wctx.p1().branch()):
1336 and wctx.branch() == wctx.p1().branch()):
1337 return None
1337 return None
1338
1338
1339 if merge and cctx.deleted():
1339 if merge and cctx.deleted():
1340 raise util.Abort(_("cannot commit merge with missing files"))
1340 raise util.Abort(_("cannot commit merge with missing files"))
1341
1341
1342 ms = mergemod.mergestate(self)
1342 ms = mergemod.mergestate(self)
1343 for f in status.modified:
1343 for f in status.modified:
1344 if f in ms and ms[f] == 'u':
1344 if f in ms and ms[f] == 'u':
1345 raise util.Abort(_("unresolved merge conflicts "
1345 raise util.Abort(_("unresolved merge conflicts "
1346 "(see hg help resolve)"))
1346 "(see hg help resolve)"))
1347
1347
1348 if editor:
1348 if editor:
1349 cctx._text = editor(self, cctx, subs)
1349 cctx._text = editor(self, cctx, subs)
1350 edited = (text != cctx._text)
1350 edited = (text != cctx._text)
1351
1351
1352 # Save commit message in case this transaction gets rolled back
1352 # Save commit message in case this transaction gets rolled back
1353 # (e.g. by a pretxncommit hook). Leave the content alone on
1353 # (e.g. by a pretxncommit hook). Leave the content alone on
1354 # the assumption that the user will use the same editor again.
1354 # the assumption that the user will use the same editor again.
1355 msgfn = self.savecommitmessage(cctx._text)
1355 msgfn = self.savecommitmessage(cctx._text)
1356
1356
1357 # commit subs and write new state
1357 # commit subs and write new state
1358 if subs:
1358 if subs:
1359 for s in sorted(commitsubs):
1359 for s in sorted(commitsubs):
1360 sub = wctx.sub(s)
1360 sub = wctx.sub(s)
1361 self.ui.status(_('committing subrepository %s\n') %
1361 self.ui.status(_('committing subrepository %s\n') %
1362 subrepo.subrelpath(sub))
1362 subrepo.subrelpath(sub))
1363 sr = sub.commit(cctx._text, user, date)
1363 sr = sub.commit(cctx._text, user, date)
1364 newstate[s] = (newstate[s][0], sr)
1364 newstate[s] = (newstate[s][0], sr)
1365 subrepo.writestate(self, newstate)
1365 subrepo.writestate(self, newstate)
1366
1366
1367 p1, p2 = self.dirstate.parents()
1367 p1, p2 = self.dirstate.parents()
1368 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1368 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1369 try:
1369 try:
1370 self.hook("precommit", throw=True, parent1=hookp1,
1370 self.hook("precommit", throw=True, parent1=hookp1,
1371 parent2=hookp2)
1371 parent2=hookp2)
1372 ret = self.commitctx(cctx, True)
1372 ret = self.commitctx(cctx, True)
1373 except: # re-raises
1373 except: # re-raises
1374 if edited:
1374 if edited:
1375 self.ui.write(
1375 self.ui.write(
1376 _('note: commit message saved in %s\n') % msgfn)
1376 _('note: commit message saved in %s\n') % msgfn)
1377 raise
1377 raise
1378
1378
1379 # update bookmarks, dirstate and mergestate
1379 # update bookmarks, dirstate and mergestate
1380 bookmarks.update(self, [p1, p2], ret)
1380 bookmarks.update(self, [p1, p2], ret)
1381 cctx.markcommitted(ret)
1381 cctx.markcommitted(ret)
1382 ms.reset()
1382 ms.reset()
1383 finally:
1383 finally:
1384 wlock.release()
1384 wlock.release()
1385
1385
1386 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1386 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1387 # hack for command that use a temporary commit (eg: histedit)
1387 # hack for command that use a temporary commit (eg: histedit)
1388 # temporary commit got stripped before hook release
1388 # temporary commit got stripped before hook release
1389 if node in self:
1389 if node in self:
1390 self.hook("commit", node=node, parent1=parent1,
1390 self.hook("commit", node=node, parent1=parent1,
1391 parent2=parent2)
1391 parent2=parent2)
1392 self._afterlock(commithook)
1392 self._afterlock(commithook)
1393 return ret
1393 return ret
1394
1394
1395 @unfilteredmethod
1395 @unfilteredmethod
1396 def commitctx(self, ctx, error=False):
1396 def commitctx(self, ctx, error=False):
1397 """Add a new revision to current repository.
1397 """Add a new revision to current repository.
1398 Revision information is passed via the context argument.
1398 Revision information is passed via the context argument.
1399 """
1399 """
1400
1400
1401 tr = None
1401 tr = None
1402 p1, p2 = ctx.p1(), ctx.p2()
1402 p1, p2 = ctx.p1(), ctx.p2()
1403 user = ctx.user()
1403 user = ctx.user()
1404
1404
1405 lock = self.lock()
1405 lock = self.lock()
1406 try:
1406 try:
1407 tr = self.transaction("commit")
1407 tr = self.transaction("commit")
1408 trp = weakref.proxy(tr)
1408 trp = weakref.proxy(tr)
1409
1409
1410 if ctx.files():
1410 if ctx.files():
1411 m1 = p1.manifest()
1411 m1 = p1.manifest()
1412 m2 = p2.manifest()
1412 m2 = p2.manifest()
1413 m = m1.copy()
1413 m = m1.copy()
1414
1414
1415 # check in files
1415 # check in files
1416 added = []
1416 added = []
1417 changed = []
1417 changed = []
1418 removed = list(ctx.removed())
1418 removed = list(ctx.removed())
1419 linkrev = len(self)
1419 linkrev = len(self)
1420 for f in sorted(ctx.modified() + ctx.added()):
1420 for f in sorted(ctx.modified() + ctx.added()):
1421 self.ui.note(f + "\n")
1421 self.ui.note(f + "\n")
1422 try:
1422 try:
1423 fctx = ctx[f]
1423 fctx = ctx[f]
1424 if fctx is None:
1424 if fctx is None:
1425 removed.append(f)
1425 removed.append(f)
1426 else:
1426 else:
1427 added.append(f)
1427 added.append(f)
1428 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1428 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1429 trp, changed)
1429 trp, changed)
1430 m.setflag(f, fctx.flags())
1430 m.setflag(f, fctx.flags())
1431 except OSError, inst:
1431 except OSError, inst:
1432 self.ui.warn(_("trouble committing %s!\n") % f)
1432 self.ui.warn(_("trouble committing %s!\n") % f)
1433 raise
1433 raise
1434 except IOError, inst:
1434 except IOError, inst:
1435 errcode = getattr(inst, 'errno', errno.ENOENT)
1435 errcode = getattr(inst, 'errno', errno.ENOENT)
1436 if error or errcode and errcode != errno.ENOENT:
1436 if error or errcode and errcode != errno.ENOENT:
1437 self.ui.warn(_("trouble committing %s!\n") % f)
1437 self.ui.warn(_("trouble committing %s!\n") % f)
1438 raise
1438 raise
1439
1439
1440 # update manifest
1440 # update manifest
1441 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1441 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1442 drop = [f for f in removed if f in m]
1442 drop = [f for f in removed if f in m]
1443 for f in drop:
1443 for f in drop:
1444 del m[f]
1444 del m[f]
1445 mn = self.manifest.add(m, trp, linkrev,
1445 mn = self.manifest.add(m, trp, linkrev,
1446 p1.manifestnode(), p2.manifestnode(),
1446 p1.manifestnode(), p2.manifestnode(),
1447 added, drop)
1447 added, drop)
1448 files = changed + removed
1448 files = changed + removed
1449 else:
1449 else:
1450 mn = p1.manifestnode()
1450 mn = p1.manifestnode()
1451 files = []
1451 files = []
1452
1452
1453 # update changelog
1453 # update changelog
1454 self.changelog.delayupdate(tr)
1454 self.changelog.delayupdate(tr)
1455 n = self.changelog.add(mn, files, ctx.description(),
1455 n = self.changelog.add(mn, files, ctx.description(),
1456 trp, p1.node(), p2.node(),
1456 trp, p1.node(), p2.node(),
1457 user, ctx.date(), ctx.extra().copy())
1457 user, ctx.date(), ctx.extra().copy())
1458 p = lambda: tr.writepending() and self.root or ""
1458 p = lambda: tr.writepending() and self.root or ""
1459 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1459 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1460 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1460 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1461 parent2=xp2, pending=p)
1461 parent2=xp2, pending=p)
1462 # set the new commit is proper phase
1462 # set the new commit is proper phase
1463 targetphase = subrepo.newcommitphase(self.ui, ctx)
1463 targetphase = subrepo.newcommitphase(self.ui, ctx)
1464 if targetphase:
1464 if targetphase:
1465 # retract boundary do not alter parent changeset.
1465 # retract boundary do not alter parent changeset.
1466 # if a parent have higher the resulting phase will
1466 # if a parent have higher the resulting phase will
1467 # be compliant anyway
1467 # be compliant anyway
1468 #
1468 #
1469 # if minimal phase was 0 we don't need to retract anything
1469 # if minimal phase was 0 we don't need to retract anything
1470 phases.retractboundary(self, tr, targetphase, [n])
1470 phases.retractboundary(self, tr, targetphase, [n])
1471 tr.close()
1471 tr.close()
1472 branchmap.updatecache(self.filtered('served'))
1472 branchmap.updatecache(self.filtered('served'))
1473 return n
1473 return n
1474 finally:
1474 finally:
1475 if tr:
1475 if tr:
1476 tr.release()
1476 tr.release()
1477 lock.release()
1477 lock.release()
1478
1478
1479 @unfilteredmethod
1479 @unfilteredmethod
1480 def destroying(self):
1480 def destroying(self):
1481 '''Inform the repository that nodes are about to be destroyed.
1481 '''Inform the repository that nodes are about to be destroyed.
1482 Intended for use by strip and rollback, so there's a common
1482 Intended for use by strip and rollback, so there's a common
1483 place for anything that has to be done before destroying history.
1483 place for anything that has to be done before destroying history.
1484
1484
1485 This is mostly useful for saving state that is in memory and waiting
1485 This is mostly useful for saving state that is in memory and waiting
1486 to be flushed when the current lock is released. Because a call to
1486 to be flushed when the current lock is released. Because a call to
1487 destroyed is imminent, the repo will be invalidated causing those
1487 destroyed is imminent, the repo will be invalidated causing those
1488 changes to stay in memory (waiting for the next unlock), or vanish
1488 changes to stay in memory (waiting for the next unlock), or vanish
1489 completely.
1489 completely.
1490 '''
1490 '''
1491 # When using the same lock to commit and strip, the phasecache is left
1491 # When using the same lock to commit and strip, the phasecache is left
1492 # dirty after committing. Then when we strip, the repo is invalidated,
1492 # dirty after committing. Then when we strip, the repo is invalidated,
1493 # causing those changes to disappear.
1493 # causing those changes to disappear.
1494 if '_phasecache' in vars(self):
1494 if '_phasecache' in vars(self):
1495 self._phasecache.write()
1495 self._phasecache.write()
1496
1496
1497 @unfilteredmethod
1497 @unfilteredmethod
1498 def destroyed(self):
1498 def destroyed(self):
1499 '''Inform the repository that nodes have been destroyed.
1499 '''Inform the repository that nodes have been destroyed.
1500 Intended for use by strip and rollback, so there's a common
1500 Intended for use by strip and rollback, so there's a common
1501 place for anything that has to be done after destroying history.
1501 place for anything that has to be done after destroying history.
1502 '''
1502 '''
1503 # When one tries to:
1503 # When one tries to:
1504 # 1) destroy nodes thus calling this method (e.g. strip)
1504 # 1) destroy nodes thus calling this method (e.g. strip)
1505 # 2) use phasecache somewhere (e.g. commit)
1505 # 2) use phasecache somewhere (e.g. commit)
1506 #
1506 #
1507 # then 2) will fail because the phasecache contains nodes that were
1507 # then 2) will fail because the phasecache contains nodes that were
1508 # removed. We can either remove phasecache from the filecache,
1508 # removed. We can either remove phasecache from the filecache,
1509 # causing it to reload next time it is accessed, or simply filter
1509 # causing it to reload next time it is accessed, or simply filter
1510 # the removed nodes now and write the updated cache.
1510 # the removed nodes now and write the updated cache.
1511 self._phasecache.filterunknown(self)
1511 self._phasecache.filterunknown(self)
1512 self._phasecache.write()
1512 self._phasecache.write()
1513
1513
1514 # update the 'served' branch cache to help read only server process
1514 # update the 'served' branch cache to help read only server process
1515 # Thanks to branchcache collaboration this is done from the nearest
1515 # Thanks to branchcache collaboration this is done from the nearest
1516 # filtered subset and it is expected to be fast.
1516 # filtered subset and it is expected to be fast.
1517 branchmap.updatecache(self.filtered('served'))
1517 branchmap.updatecache(self.filtered('served'))
1518
1518
1519 # Ensure the persistent tag cache is updated. Doing it now
1519 # Ensure the persistent tag cache is updated. Doing it now
1520 # means that the tag cache only has to worry about destroyed
1520 # means that the tag cache only has to worry about destroyed
1521 # heads immediately after a strip/rollback. That in turn
1521 # heads immediately after a strip/rollback. That in turn
1522 # guarantees that "cachetip == currenttip" (comparing both rev
1522 # guarantees that "cachetip == currenttip" (comparing both rev
1523 # and node) always means no nodes have been added or destroyed.
1523 # and node) always means no nodes have been added or destroyed.
1524
1524
1525 # XXX this is suboptimal when qrefresh'ing: we strip the current
1525 # XXX this is suboptimal when qrefresh'ing: we strip the current
1526 # head, refresh the tag cache, then immediately add a new head.
1526 # head, refresh the tag cache, then immediately add a new head.
1527 # But I think doing it this way is necessary for the "instant
1527 # But I think doing it this way is necessary for the "instant
1528 # tag cache retrieval" case to work.
1528 # tag cache retrieval" case to work.
1529 self.invalidate()
1529 self.invalidate()
1530
1530
1531 def walk(self, match, node=None):
1531 def walk(self, match, node=None):
1532 '''
1532 '''
1533 walk recursively through the directory tree or a given
1533 walk recursively through the directory tree or a given
1534 changeset, finding all files matched by the match
1534 changeset, finding all files matched by the match
1535 function
1535 function
1536 '''
1536 '''
1537 return self[node].walk(match)
1537 return self[node].walk(match)
1538
1538
1539 def status(self, node1='.', node2=None, match=None,
1539 def status(self, node1='.', node2=None, match=None,
1540 ignored=False, clean=False, unknown=False,
1540 ignored=False, clean=False, unknown=False,
1541 listsubrepos=False):
1541 listsubrepos=False):
1542 '''a convenience method that calls node1.status(node2)'''
1542 '''a convenience method that calls node1.status(node2)'''
1543 return self[node1].status(node2, match, ignored, clean, unknown,
1543 return self[node1].status(node2, match, ignored, clean, unknown,
1544 listsubrepos)
1544 listsubrepos)
1545
1545
1546 def heads(self, start=None):
1546 def heads(self, start=None):
1547 heads = self.changelog.heads(start)
1547 heads = self.changelog.heads(start)
1548 # sort the output in rev descending order
1548 # sort the output in rev descending order
1549 return sorted(heads, key=self.changelog.rev, reverse=True)
1549 return sorted(heads, key=self.changelog.rev, reverse=True)
1550
1550
1551 def branchheads(self, branch=None, start=None, closed=False):
1551 def branchheads(self, branch=None, start=None, closed=False):
1552 '''return a (possibly filtered) list of heads for the given branch
1552 '''return a (possibly filtered) list of heads for the given branch
1553
1553
1554 Heads are returned in topological order, from newest to oldest.
1554 Heads are returned in topological order, from newest to oldest.
1555 If branch is None, use the dirstate branch.
1555 If branch is None, use the dirstate branch.
1556 If start is not None, return only heads reachable from start.
1556 If start is not None, return only heads reachable from start.
1557 If closed is True, return heads that are marked as closed as well.
1557 If closed is True, return heads that are marked as closed as well.
1558 '''
1558 '''
1559 if branch is None:
1559 if branch is None:
1560 branch = self[None].branch()
1560 branch = self[None].branch()
1561 branches = self.branchmap()
1561 branches = self.branchmap()
1562 if branch not in branches:
1562 if branch not in branches:
1563 return []
1563 return []
1564 # the cache returns heads ordered lowest to highest
1564 # the cache returns heads ordered lowest to highest
1565 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1565 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1566 if start is not None:
1566 if start is not None:
1567 # filter out the heads that cannot be reached from startrev
1567 # filter out the heads that cannot be reached from startrev
1568 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1568 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1569 bheads = [h for h in bheads if h in fbheads]
1569 bheads = [h for h in bheads if h in fbheads]
1570 return bheads
1570 return bheads
1571
1571
1572 def branches(self, nodes):
1572 def branches(self, nodes):
1573 if not nodes:
1573 if not nodes:
1574 nodes = [self.changelog.tip()]
1574 nodes = [self.changelog.tip()]
1575 b = []
1575 b = []
1576 for n in nodes:
1576 for n in nodes:
1577 t = n
1577 t = n
1578 while True:
1578 while True:
1579 p = self.changelog.parents(n)
1579 p = self.changelog.parents(n)
1580 if p[1] != nullid or p[0] == nullid:
1580 if p[1] != nullid or p[0] == nullid:
1581 b.append((t, n, p[0], p[1]))
1581 b.append((t, n, p[0], p[1]))
1582 break
1582 break
1583 n = p[0]
1583 n = p[0]
1584 return b
1584 return b
1585
1585
1586 def between(self, pairs):
1586 def between(self, pairs):
1587 r = []
1587 r = []
1588
1588
1589 for top, bottom in pairs:
1589 for top, bottom in pairs:
1590 n, l, i = top, [], 0
1590 n, l, i = top, [], 0
1591 f = 1
1591 f = 1
1592
1592
1593 while n != bottom and n != nullid:
1593 while n != bottom and n != nullid:
1594 p = self.changelog.parents(n)[0]
1594 p = self.changelog.parents(n)[0]
1595 if i == f:
1595 if i == f:
1596 l.append(n)
1596 l.append(n)
1597 f = f * 2
1597 f = f * 2
1598 n = p
1598 n = p
1599 i += 1
1599 i += 1
1600
1600
1601 r.append(l)
1601 r.append(l)
1602
1602
1603 return r
1603 return r
1604
1604
1605 def checkpush(self, pushop):
1605 def checkpush(self, pushop):
1606 """Extensions can override this function if additional checks have
1606 """Extensions can override this function if additional checks have
1607 to be performed before pushing, or call it if they override push
1607 to be performed before pushing, or call it if they override push
1608 command.
1608 command.
1609 """
1609 """
1610 pass
1610 pass
1611
1611
1612 @unfilteredpropertycache
1612 @unfilteredpropertycache
1613 def prepushoutgoinghooks(self):
1613 def prepushoutgoinghooks(self):
1614 """Return util.hooks consists of "(repo, remote, outgoing)"
1614 """Return util.hooks consists of "(repo, remote, outgoing)"
1615 functions, which are called before pushing changesets.
1615 functions, which are called before pushing changesets.
1616 """
1616 """
1617 return util.hooks()
1617 return util.hooks()
1618
1618
1619 def stream_in(self, remote, requirements):
1619 def stream_in(self, remote, requirements):
1620 lock = self.lock()
1620 lock = self.lock()
1621 try:
1621 try:
1622 # Save remote branchmap. We will use it later
1622 # Save remote branchmap. We will use it later
1623 # to speed up branchcache creation
1623 # to speed up branchcache creation
1624 rbranchmap = None
1624 rbranchmap = None
1625 if remote.capable("branchmap"):
1625 if remote.capable("branchmap"):
1626 rbranchmap = remote.branchmap()
1626 rbranchmap = remote.branchmap()
1627
1627
1628 fp = remote.stream_out()
1628 fp = remote.stream_out()
1629 l = fp.readline()
1629 l = fp.readline()
1630 try:
1630 try:
1631 resp = int(l)
1631 resp = int(l)
1632 except ValueError:
1632 except ValueError:
1633 raise error.ResponseError(
1633 raise error.ResponseError(
1634 _('unexpected response from remote server:'), l)
1634 _('unexpected response from remote server:'), l)
1635 if resp == 1:
1635 if resp == 1:
1636 raise util.Abort(_('operation forbidden by server'))
1636 raise util.Abort(_('operation forbidden by server'))
1637 elif resp == 2:
1637 elif resp == 2:
1638 raise util.Abort(_('locking the remote repository failed'))
1638 raise util.Abort(_('locking the remote repository failed'))
1639 elif resp != 0:
1639 elif resp != 0:
1640 raise util.Abort(_('the server sent an unknown error code'))
1640 raise util.Abort(_('the server sent an unknown error code'))
1641 self.ui.status(_('streaming all changes\n'))
1641 self.ui.status(_('streaming all changes\n'))
1642 l = fp.readline()
1642 l = fp.readline()
1643 try:
1643 try:
1644 total_files, total_bytes = map(int, l.split(' ', 1))
1644 total_files, total_bytes = map(int, l.split(' ', 1))
1645 except (ValueError, TypeError):
1645 except (ValueError, TypeError):
1646 raise error.ResponseError(
1646 raise error.ResponseError(
1647 _('unexpected response from remote server:'), l)
1647 _('unexpected response from remote server:'), l)
1648 self.ui.status(_('%d files to transfer, %s of data\n') %
1648 self.ui.status(_('%d files to transfer, %s of data\n') %
1649 (total_files, util.bytecount(total_bytes)))
1649 (total_files, util.bytecount(total_bytes)))
1650 handled_bytes = 0
1650 handled_bytes = 0
1651 self.ui.progress(_('clone'), 0, total=total_bytes)
1651 self.ui.progress(_('clone'), 0, total=total_bytes)
1652 start = time.time()
1652 start = time.time()
1653
1653
1654 tr = self.transaction(_('clone'))
1654 tr = self.transaction(_('clone'))
1655 try:
1655 try:
1656 for i in xrange(total_files):
1656 for i in xrange(total_files):
1657 # XXX doesn't support '\n' or '\r' in filenames
1657 # XXX doesn't support '\n' or '\r' in filenames
1658 l = fp.readline()
1658 l = fp.readline()
1659 try:
1659 try:
1660 name, size = l.split('\0', 1)
1660 name, size = l.split('\0', 1)
1661 size = int(size)
1661 size = int(size)
1662 except (ValueError, TypeError):
1662 except (ValueError, TypeError):
1663 raise error.ResponseError(
1663 raise error.ResponseError(
1664 _('unexpected response from remote server:'), l)
1664 _('unexpected response from remote server:'), l)
1665 if self.ui.debugflag:
1665 if self.ui.debugflag:
1666 self.ui.debug('adding %s (%s)\n' %
1666 self.ui.debug('adding %s (%s)\n' %
1667 (name, util.bytecount(size)))
1667 (name, util.bytecount(size)))
1668 # for backwards compat, name was partially encoded
1668 # for backwards compat, name was partially encoded
1669 ofp = self.sopener(store.decodedir(name), 'w')
1669 ofp = self.sopener(store.decodedir(name), 'w')
1670 for chunk in util.filechunkiter(fp, limit=size):
1670 for chunk in util.filechunkiter(fp, limit=size):
1671 handled_bytes += len(chunk)
1671 handled_bytes += len(chunk)
1672 self.ui.progress(_('clone'), handled_bytes,
1672 self.ui.progress(_('clone'), handled_bytes,
1673 total=total_bytes)
1673 total=total_bytes)
1674 ofp.write(chunk)
1674 ofp.write(chunk)
1675 ofp.close()
1675 ofp.close()
1676 tr.close()
1676 tr.close()
1677 finally:
1677 finally:
1678 tr.release()
1678 tr.release()
1679
1679
1680 # Writing straight to files circumvented the inmemory caches
1680 # Writing straight to files circumvented the inmemory caches
1681 self.invalidate()
1681 self.invalidate()
1682
1682
1683 elapsed = time.time() - start
1683 elapsed = time.time() - start
1684 if elapsed <= 0:
1684 if elapsed <= 0:
1685 elapsed = 0.001
1685 elapsed = 0.001
1686 self.ui.progress(_('clone'), None)
1686 self.ui.progress(_('clone'), None)
1687 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1687 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1688 (util.bytecount(total_bytes), elapsed,
1688 (util.bytecount(total_bytes), elapsed,
1689 util.bytecount(total_bytes / elapsed)))
1689 util.bytecount(total_bytes / elapsed)))
1690
1690
1691 # new requirements = old non-format requirements +
1691 # new requirements = old non-format requirements +
1692 # new format-related
1692 # new format-related
1693 # requirements from the streamed-in repository
1693 # requirements from the streamed-in repository
1694 requirements.update(set(self.requirements) - self.supportedformats)
1694 requirements.update(set(self.requirements) - self.supportedformats)
1695 self._applyrequirements(requirements)
1695 self._applyrequirements(requirements)
1696 self._writerequirements()
1696 self._writerequirements()
1697
1697
1698 if rbranchmap:
1698 if rbranchmap:
1699 rbheads = []
1699 rbheads = []
1700 closed = []
1700 closed = []
1701 for bheads in rbranchmap.itervalues():
1701 for bheads in rbranchmap.itervalues():
1702 rbheads.extend(bheads)
1702 rbheads.extend(bheads)
1703 for h in bheads:
1703 for h in bheads:
1704 r = self.changelog.rev(h)
1704 r = self.changelog.rev(h)
1705 b, c = self.changelog.branchinfo(r)
1705 b, c = self.changelog.branchinfo(r)
1706 if c:
1706 if c:
1707 closed.append(h)
1707 closed.append(h)
1708
1708
1709 if rbheads:
1709 if rbheads:
1710 rtiprev = max((int(self.changelog.rev(node))
1710 rtiprev = max((int(self.changelog.rev(node))
1711 for node in rbheads))
1711 for node in rbheads))
1712 cache = branchmap.branchcache(rbranchmap,
1712 cache = branchmap.branchcache(rbranchmap,
1713 self[rtiprev].node(),
1713 self[rtiprev].node(),
1714 rtiprev,
1714 rtiprev,
1715 closednodes=closed)
1715 closednodes=closed)
1716 # Try to stick it as low as possible
1716 # Try to stick it as low as possible
1717 # filter above served are unlikely to be fetch from a clone
1717 # filter above served are unlikely to be fetch from a clone
1718 for candidate in ('base', 'immutable', 'served'):
1718 for candidate in ('base', 'immutable', 'served'):
1719 rview = self.filtered(candidate)
1719 rview = self.filtered(candidate)
1720 if cache.validfor(rview):
1720 if cache.validfor(rview):
1721 self._branchcaches[candidate] = cache
1721 self._branchcaches[candidate] = cache
1722 cache.write(rview)
1722 cache.write(rview)
1723 break
1723 break
1724 self.invalidate()
1724 self.invalidate()
1725 return len(self.heads()) + 1
1725 return len(self.heads()) + 1
1726 finally:
1726 finally:
1727 lock.release()
1727 lock.release()
1728
1728
1729 def clone(self, remote, heads=[], stream=False):
1729 def clone(self, remote, heads=[], stream=None):
1730 '''clone remote repository.
1730 '''clone remote repository.
1731
1731
1732 keyword arguments:
1732 keyword arguments:
1733 heads: list of revs to clone (forces use of pull)
1733 heads: list of revs to clone (forces use of pull)
1734 stream: use streaming clone if possible'''
1734 stream: use streaming clone if possible'''
1735
1735
1736 # now, all clients that can request uncompressed clones can
1736 # now, all clients that can request uncompressed clones can
1737 # read repo formats supported by all servers that can serve
1737 # read repo formats supported by all servers that can serve
1738 # them.
1738 # them.
1739
1739
1740 # if revlog format changes, client will have to check version
1740 # if revlog format changes, client will have to check version
1741 # and format flags on "stream" capability, and use
1741 # and format flags on "stream" capability, and use
1742 # uncompressed only if compatible.
1742 # uncompressed only if compatible.
1743
1743
1744 if not stream:
1744 if stream is None:
1745 # if the server explicitly prefers to stream (for fast LANs)
1745 # if the server explicitly prefers to stream (for fast LANs)
1746 stream = remote.capable('stream-preferred')
1746 stream = remote.capable('stream-preferred')
1747
1747
1748 if stream and not heads:
1748 if stream and not heads:
1749 # 'stream' means remote revlog format is revlogv1 only
1749 # 'stream' means remote revlog format is revlogv1 only
1750 if remote.capable('stream'):
1750 if remote.capable('stream'):
1751 self.stream_in(remote, set(('revlogv1',)))
1751 self.stream_in(remote, set(('revlogv1',)))
1752 else:
1752 else:
1753 # otherwise, 'streamreqs' contains the remote revlog format
1753 # otherwise, 'streamreqs' contains the remote revlog format
1754 streamreqs = remote.capable('streamreqs')
1754 streamreqs = remote.capable('streamreqs')
1755 if streamreqs:
1755 if streamreqs:
1756 streamreqs = set(streamreqs.split(','))
1756 streamreqs = set(streamreqs.split(','))
1757 # if we support it, stream in and adjust our requirements
1757 # if we support it, stream in and adjust our requirements
1758 if not streamreqs - self.supportedformats:
1758 if not streamreqs - self.supportedformats:
1759 self.stream_in(remote, streamreqs)
1759 self.stream_in(remote, streamreqs)
1760
1760
1761 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1761 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1762 try:
1762 try:
1763 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1763 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1764 ret = exchange.pull(self, remote, heads).cgresult
1764 ret = exchange.pull(self, remote, heads).cgresult
1765 finally:
1765 finally:
1766 self.ui.restoreconfig(quiet)
1766 self.ui.restoreconfig(quiet)
1767 return ret
1767 return ret
1768
1768
1769 def pushkey(self, namespace, key, old, new):
1769 def pushkey(self, namespace, key, old, new):
1770 try:
1770 try:
1771 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1771 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1772 old=old, new=new)
1772 old=old, new=new)
1773 except error.HookAbort, exc:
1773 except error.HookAbort, exc:
1774 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1774 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1775 if exc.hint:
1775 if exc.hint:
1776 self.ui.write_err(_("(%s)\n") % exc.hint)
1776 self.ui.write_err(_("(%s)\n") % exc.hint)
1777 return False
1777 return False
1778 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1778 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1779 ret = pushkey.push(self, namespace, key, old, new)
1779 ret = pushkey.push(self, namespace, key, old, new)
1780 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1780 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1781 ret=ret)
1781 ret=ret)
1782 return ret
1782 return ret
1783
1783
1784 def listkeys(self, namespace):
1784 def listkeys(self, namespace):
1785 self.hook('prelistkeys', throw=True, namespace=namespace)
1785 self.hook('prelistkeys', throw=True, namespace=namespace)
1786 self.ui.debug('listing keys for "%s"\n' % namespace)
1786 self.ui.debug('listing keys for "%s"\n' % namespace)
1787 values = pushkey.list(self, namespace)
1787 values = pushkey.list(self, namespace)
1788 self.hook('listkeys', namespace=namespace, values=values)
1788 self.hook('listkeys', namespace=namespace, values=values)
1789 return values
1789 return values
1790
1790
1791 def debugwireargs(self, one, two, three=None, four=None, five=None):
1791 def debugwireargs(self, one, two, three=None, four=None, five=None):
1792 '''used to test argument passing over the wire'''
1792 '''used to test argument passing over the wire'''
1793 return "%s %s %s %s %s" % (one, two, three, four, five)
1793 return "%s %s %s %s %s" % (one, two, three, four, five)
1794
1794
1795 def savecommitmessage(self, text):
1795 def savecommitmessage(self, text):
1796 fp = self.opener('last-message.txt', 'wb')
1796 fp = self.opener('last-message.txt', 'wb')
1797 try:
1797 try:
1798 fp.write(text)
1798 fp.write(text)
1799 finally:
1799 finally:
1800 fp.close()
1800 fp.close()
1801 return self.pathto(fp.name[len(self.root) + 1:])
1801 return self.pathto(fp.name[len(self.root) + 1:])
1802
1802
1803 # used to avoid circular references so destructors work
1803 # used to avoid circular references so destructors work
1804 def aftertrans(files):
1804 def aftertrans(files):
1805 renamefiles = [tuple(t) for t in files]
1805 renamefiles = [tuple(t) for t in files]
1806 def a():
1806 def a():
1807 for vfs, src, dest in renamefiles:
1807 for vfs, src, dest in renamefiles:
1808 try:
1808 try:
1809 vfs.rename(src, dest)
1809 vfs.rename(src, dest)
1810 except OSError: # journal file does not yet exist
1810 except OSError: # journal file does not yet exist
1811 pass
1811 pass
1812 return a
1812 return a
1813
1813
1814 def undoname(fn):
1814 def undoname(fn):
1815 base, name = os.path.split(fn)
1815 base, name = os.path.split(fn)
1816 assert name.startswith('journal')
1816 assert name.startswith('journal')
1817 return os.path.join(base, name.replace('journal', 'undo', 1))
1817 return os.path.join(base, name.replace('journal', 'undo', 1))
1818
1818
1819 def instance(ui, path, create):
1819 def instance(ui, path, create):
1820 return localrepository(ui, util.urllocalpath(path), create)
1820 return localrepository(ui, util.urllocalpath(path), create)
1821
1821
1822 def islocal(path):
1822 def islocal(path):
1823 return True
1823 return True
@@ -1,313 +1,328 b''
1 #require serve
1 #require serve
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo foo>foo
5 $ echo foo>foo
6 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
6 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
7 $ echo foo>foo.d/foo
7 $ echo foo>foo.d/foo
8 $ echo bar>foo.d/bAr.hg.d/BaR
8 $ echo bar>foo.d/bAr.hg.d/BaR
9 $ echo bar>foo.d/baR.d.hg/bAR
9 $ echo bar>foo.d/baR.d.hg/bAR
10 $ hg commit -A -m 1
10 $ hg commit -A -m 1
11 adding foo
11 adding foo
12 adding foo.d/bAr.hg.d/BaR
12 adding foo.d/bAr.hg.d/BaR
13 adding foo.d/baR.d.hg/bAR
13 adding foo.d/baR.d.hg/bAR
14 adding foo.d/foo
14 adding foo.d/foo
15 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
15 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
16 $ hg --config server.uncompressed=False serve -p $HGPORT1 -d --pid-file=../hg2.pid
16 $ hg --config server.uncompressed=False serve -p $HGPORT1 -d --pid-file=../hg2.pid
17
17
18 Test server address cannot be reused
18 Test server address cannot be reused
19
19
20 #if windows
20 #if windows
21 $ hg serve -p $HGPORT1 2>&1
21 $ hg serve -p $HGPORT1 2>&1
22 abort: cannot start server at ':$HGPORT1': * (glob)
22 abort: cannot start server at ':$HGPORT1': * (glob)
23 [255]
23 [255]
24 #else
24 #else
25 $ hg serve -p $HGPORT1 2>&1
25 $ hg serve -p $HGPORT1 2>&1
26 abort: cannot start server at ':$HGPORT1': Address already in use
26 abort: cannot start server at ':$HGPORT1': Address already in use
27 [255]
27 [255]
28 #endif
28 #endif
29 $ cd ..
29 $ cd ..
30 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
30 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
31
31
32 clone via stream
32 clone via stream
33
33
34 $ hg clone --uncompressed http://localhost:$HGPORT/ copy 2>&1
34 $ hg clone --uncompressed http://localhost:$HGPORT/ copy 2>&1
35 streaming all changes
35 streaming all changes
36 6 files to transfer, 606 bytes of data
36 6 files to transfer, 606 bytes of data
37 transferred * bytes in * seconds (*/sec) (glob)
37 transferred * bytes in * seconds (*/sec) (glob)
38 searching for changes
38 searching for changes
39 no changes found
39 no changes found
40 updating to branch default
40 updating to branch default
41 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
41 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 $ hg verify -R copy
42 $ hg verify -R copy
43 checking changesets
43 checking changesets
44 checking manifests
44 checking manifests
45 crosschecking files in changesets and manifests
45 crosschecking files in changesets and manifests
46 checking files
46 checking files
47 4 files, 1 changesets, 4 total revisions
47 4 files, 1 changesets, 4 total revisions
48
48
49 try to clone via stream, should use pull instead
49 try to clone via stream, should use pull instead
50
50
51 $ hg clone --uncompressed http://localhost:$HGPORT1/ copy2
51 $ hg clone --uncompressed http://localhost:$HGPORT1/ copy2
52 requesting all changes
52 requesting all changes
53 adding changesets
53 adding changesets
54 adding manifests
54 adding manifests
55 adding file changes
55 adding file changes
56 added 1 changesets with 4 changes to 4 files
56 added 1 changesets with 4 changes to 4 files
57 updating to branch default
57 updating to branch default
58 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
58 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
59
59
60 clone via pull
60 clone via pull
61
61
62 $ hg clone http://localhost:$HGPORT1/ copy-pull
62 $ hg clone http://localhost:$HGPORT1/ copy-pull
63 requesting all changes
63 requesting all changes
64 adding changesets
64 adding changesets
65 adding manifests
65 adding manifests
66 adding file changes
66 adding file changes
67 added 1 changesets with 4 changes to 4 files
67 added 1 changesets with 4 changes to 4 files
68 updating to branch default
68 updating to branch default
69 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
69 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
70 $ hg verify -R copy-pull
70 $ hg verify -R copy-pull
71 checking changesets
71 checking changesets
72 checking manifests
72 checking manifests
73 crosschecking files in changesets and manifests
73 crosschecking files in changesets and manifests
74 checking files
74 checking files
75 4 files, 1 changesets, 4 total revisions
75 4 files, 1 changesets, 4 total revisions
76 $ cd test
76 $ cd test
77 $ echo bar > bar
77 $ echo bar > bar
78 $ hg commit -A -d '1 0' -m 2
78 $ hg commit -A -d '1 0' -m 2
79 adding bar
79 adding bar
80 $ cd ..
80 $ cd ..
81
81
82 clone over http with --update
82 clone over http with --update
83
83
84 $ hg clone http://localhost:$HGPORT1/ updated --update 0
84 $ hg clone http://localhost:$HGPORT1/ updated --update 0
85 requesting all changes
85 requesting all changes
86 adding changesets
86 adding changesets
87 adding manifests
87 adding manifests
88 adding file changes
88 adding file changes
89 added 2 changesets with 5 changes to 5 files
89 added 2 changesets with 5 changes to 5 files
90 updating to branch default
90 updating to branch default
91 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
91 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
92 $ hg log -r . -R updated
92 $ hg log -r . -R updated
93 changeset: 0:8b6053c928fe
93 changeset: 0:8b6053c928fe
94 user: test
94 user: test
95 date: Thu Jan 01 00:00:00 1970 +0000
95 date: Thu Jan 01 00:00:00 1970 +0000
96 summary: 1
96 summary: 1
97
97
98 $ rm -rf updated
98 $ rm -rf updated
99
99
100 incoming via HTTP
100 incoming via HTTP
101
101
102 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
102 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
103 adding changesets
103 adding changesets
104 adding manifests
104 adding manifests
105 adding file changes
105 adding file changes
106 added 1 changesets with 4 changes to 4 files
106 added 1 changesets with 4 changes to 4 files
107 updating to branch default
107 updating to branch default
108 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
108 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 $ cd partial
109 $ cd partial
110 $ touch LOCAL
110 $ touch LOCAL
111 $ hg ci -qAm LOCAL
111 $ hg ci -qAm LOCAL
112 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
112 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
113 comparing with http://localhost:$HGPORT1/
113 comparing with http://localhost:$HGPORT1/
114 searching for changes
114 searching for changes
115 2
115 2
116 $ cd ..
116 $ cd ..
117
117
118 pull
118 pull
119
119
120 $ cd copy-pull
120 $ cd copy-pull
121 $ echo '[hooks]' >> .hg/hgrc
121 $ echo '[hooks]' >> .hg/hgrc
122 $ echo "changegroup = python \"$TESTDIR/printenv.py\" changegroup" >> .hg/hgrc
122 $ echo "changegroup = python \"$TESTDIR/printenv.py\" changegroup" >> .hg/hgrc
123 $ hg pull
123 $ hg pull
124 pulling from http://localhost:$HGPORT1/
124 pulling from http://localhost:$HGPORT1/
125 searching for changes
125 searching for changes
126 adding changesets
126 adding changesets
127 adding manifests
127 adding manifests
128 adding file changes
128 adding file changes
129 added 1 changesets with 1 changes to 1 files
129 added 1 changesets with 1 changes to 1 files
130 changegroup hook: HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_URL=http://localhost:$HGPORT1/
130 changegroup hook: HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_URL=http://localhost:$HGPORT1/
131 (run 'hg update' to get a working copy)
131 (run 'hg update' to get a working copy)
132 $ cd ..
132 $ cd ..
133
133
134 clone from invalid URL
134 clone from invalid URL
135
135
136 $ hg clone http://localhost:$HGPORT/bad
136 $ hg clone http://localhost:$HGPORT/bad
137 abort: HTTP Error 404: Not Found
137 abort: HTTP Error 404: Not Found
138 [255]
138 [255]
139
139
140 test http authentication
140 test http authentication
141 + use the same server to test server side streaming preference
141 + use the same server to test server side streaming preference
142
142
143 $ cd test
143 $ cd test
144 $ cat << EOT > userpass.py
144 $ cat << EOT > userpass.py
145 > import base64
145 > import base64
146 > from mercurial.hgweb import common
146 > from mercurial.hgweb import common
147 > def perform_authentication(hgweb, req, op):
147 > def perform_authentication(hgweb, req, op):
148 > auth = req.env.get('HTTP_AUTHORIZATION')
148 > auth = req.env.get('HTTP_AUTHORIZATION')
149 > if not auth:
149 > if not auth:
150 > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
150 > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
151 > [('WWW-Authenticate', 'Basic Realm="mercurial"')])
151 > [('WWW-Authenticate', 'Basic Realm="mercurial"')])
152 > if base64.b64decode(auth.split()[1]).split(':', 1) != ['user', 'pass']:
152 > if base64.b64decode(auth.split()[1]).split(':', 1) != ['user', 'pass']:
153 > raise common.ErrorResponse(common.HTTP_FORBIDDEN, 'no')
153 > raise common.ErrorResponse(common.HTTP_FORBIDDEN, 'no')
154 > def extsetup():
154 > def extsetup():
155 > common.permhooks.insert(0, perform_authentication)
155 > common.permhooks.insert(0, perform_authentication)
156 > EOT
156 > EOT
157 $ hg --config extensions.x=userpass.py serve -p $HGPORT2 -d --pid-file=pid \
157 $ hg --config extensions.x=userpass.py serve -p $HGPORT2 -d --pid-file=pid \
158 > --config server.preferuncompressed=True \
158 > --config server.preferuncompressed=True \
159 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
159 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
160 $ cat pid >> $DAEMON_PIDS
160 $ cat pid >> $DAEMON_PIDS
161
161
162 $ cat << EOF > get_pass.py
162 $ cat << EOF > get_pass.py
163 > import getpass
163 > import getpass
164 > def newgetpass(arg):
164 > def newgetpass(arg):
165 > return "pass"
165 > return "pass"
166 > getpass.getpass = newgetpass
166 > getpass.getpass = newgetpass
167 > EOF
167 > EOF
168
168
169 #if python243
169 #if python243
170 $ hg id http://localhost:$HGPORT2/
170 $ hg id http://localhost:$HGPORT2/
171 abort: http authorization required for http://localhost:$HGPORT2/
171 abort: http authorization required for http://localhost:$HGPORT2/
172 [255]
172 [255]
173 $ hg id http://localhost:$HGPORT2/
173 $ hg id http://localhost:$HGPORT2/
174 abort: http authorization required for http://localhost:$HGPORT2/
174 abort: http authorization required for http://localhost:$HGPORT2/
175 [255]
175 [255]
176 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
176 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
177 http authorization required for http://localhost:$HGPORT2/
177 http authorization required for http://localhost:$HGPORT2/
178 realm: mercurial
178 realm: mercurial
179 user: user
179 user: user
180 password: 5fed3813f7f5
180 password: 5fed3813f7f5
181 $ hg id http://user:pass@localhost:$HGPORT2/
181 $ hg id http://user:pass@localhost:$HGPORT2/
182 5fed3813f7f5
182 5fed3813f7f5
183 #endif
183 #endif
184 $ echo '[auth]' >> .hg/hgrc
184 $ echo '[auth]' >> .hg/hgrc
185 $ echo 'l.schemes=http' >> .hg/hgrc
185 $ echo 'l.schemes=http' >> .hg/hgrc
186 $ echo 'l.prefix=lo' >> .hg/hgrc
186 $ echo 'l.prefix=lo' >> .hg/hgrc
187 $ echo 'l.username=user' >> .hg/hgrc
187 $ echo 'l.username=user' >> .hg/hgrc
188 $ echo 'l.password=pass' >> .hg/hgrc
188 $ echo 'l.password=pass' >> .hg/hgrc
189 $ hg id http://localhost:$HGPORT2/
189 $ hg id http://localhost:$HGPORT2/
190 5fed3813f7f5
190 5fed3813f7f5
191 $ hg id http://localhost:$HGPORT2/
191 $ hg id http://localhost:$HGPORT2/
192 5fed3813f7f5
192 5fed3813f7f5
193 $ hg id http://user@localhost:$HGPORT2/
193 $ hg id http://user@localhost:$HGPORT2/
194 5fed3813f7f5
194 5fed3813f7f5
195 #if python243
195 #if python243
196 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
196 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
197 streaming all changes
197 streaming all changes
198 7 files to transfer, 916 bytes of data
198 7 files to transfer, 916 bytes of data
199 transferred * bytes in * seconds (*/sec) (glob)
199 transferred * bytes in * seconds (*/sec) (glob)
200 searching for changes
200 searching for changes
201 no changes found
201 no changes found
202 updating to branch default
202 updating to branch default
203 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
203 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
204 --pull should override server's preferuncompressed
205 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
206 requesting all changes
207 adding changesets
208 adding manifests
209 adding file changes
210 added 2 changesets with 5 changes to 5 files
211 updating to branch default
212 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
204
213
205 $ hg id http://user2@localhost:$HGPORT2/
214 $ hg id http://user2@localhost:$HGPORT2/
206 abort: http authorization required for http://localhost:$HGPORT2/
215 abort: http authorization required for http://localhost:$HGPORT2/
207 [255]
216 [255]
208 $ hg id http://user:pass2@localhost:$HGPORT2/
217 $ hg id http://user:pass2@localhost:$HGPORT2/
209 abort: HTTP Error 403: no
218 abort: HTTP Error 403: no
210 [255]
219 [255]
211
220
212 $ hg -R dest tag -r tip top
221 $ hg -R dest tag -r tip top
213 $ hg -R dest push http://user:pass@localhost:$HGPORT2/
222 $ hg -R dest push http://user:pass@localhost:$HGPORT2/
214 pushing to http://user:***@localhost:$HGPORT2/
223 pushing to http://user:***@localhost:$HGPORT2/
215 searching for changes
224 searching for changes
216 remote: adding changesets
225 remote: adding changesets
217 remote: adding manifests
226 remote: adding manifests
218 remote: adding file changes
227 remote: adding file changes
219 remote: added 1 changesets with 1 changes to 1 files
228 remote: added 1 changesets with 1 changes to 1 files
220 $ hg rollback -q
229 $ hg rollback -q
221
230
222 $ cut -c38- ../access.log
231 $ cut -c38- ../access.log
223 "GET /?cmd=capabilities HTTP/1.1" 200 -
232 "GET /?cmd=capabilities HTTP/1.1" 200 -
224 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
233 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
225 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
234 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
226 "GET /?cmd=capabilities HTTP/1.1" 200 -
235 "GET /?cmd=capabilities HTTP/1.1" 200 -
227 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
236 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
228 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
237 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
229 "GET /?cmd=capabilities HTTP/1.1" 200 -
238 "GET /?cmd=capabilities HTTP/1.1" 200 -
230 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
239 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
231 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
240 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
232 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
241 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
233 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
242 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
234 "GET /?cmd=capabilities HTTP/1.1" 200 -
243 "GET /?cmd=capabilities HTTP/1.1" 200 -
235 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
244 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
236 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
245 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
237 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
246 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
238 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
247 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
239 "GET /?cmd=capabilities HTTP/1.1" 200 -
248 "GET /?cmd=capabilities HTTP/1.1" 200 -
240 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
249 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
241 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
250 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
242 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
251 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
243 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
252 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
244 "GET /?cmd=capabilities HTTP/1.1" 200 -
253 "GET /?cmd=capabilities HTTP/1.1" 200 -
245 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
254 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
246 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
255 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
247 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
256 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
248 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
257 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
249 "GET /?cmd=capabilities HTTP/1.1" 200 -
258 "GET /?cmd=capabilities HTTP/1.1" 200 -
250 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
259 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
251 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
260 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
252 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
261 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
253 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
262 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
254 "GET /?cmd=capabilities HTTP/1.1" 200 -
263 "GET /?cmd=capabilities HTTP/1.1" 200 -
255 "GET /?cmd=branchmap HTTP/1.1" 200 -
264 "GET /?cmd=branchmap HTTP/1.1" 200 -
256 "GET /?cmd=stream_out HTTP/1.1" 401 -
265 "GET /?cmd=stream_out HTTP/1.1" 401 -
257 "GET /?cmd=stream_out HTTP/1.1" 200 -
266 "GET /?cmd=stream_out HTTP/1.1" 200 -
258 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
267 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
259 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d
268 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d
260 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
269 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
261 "GET /?cmd=capabilities HTTP/1.1" 200 -
270 "GET /?cmd=capabilities HTTP/1.1" 200 -
271 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks
272 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
273 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D
274 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
275 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
276 "GET /?cmd=capabilities HTTP/1.1" 200 -
262 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
277 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
263 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
278 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
264 "GET /?cmd=capabilities HTTP/1.1" 200 -
279 "GET /?cmd=capabilities HTTP/1.1" 200 -
265 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
280 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
266 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
281 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
267 "GET /?cmd=listkeys HTTP/1.1" 403 - x-hgarg-1:namespace=namespaces
282 "GET /?cmd=listkeys HTTP/1.1" 403 - x-hgarg-1:namespace=namespaces
268 "GET /?cmd=capabilities HTTP/1.1" 200 -
283 "GET /?cmd=capabilities HTTP/1.1" 200 -
269 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872
284 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872
270 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases
285 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases
271 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
286 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
272 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
287 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
273 "GET /?cmd=branchmap HTTP/1.1" 200 -
288 "GET /?cmd=branchmap HTTP/1.1" 200 -
274 "GET /?cmd=branchmap HTTP/1.1" 200 -
289 "GET /?cmd=branchmap HTTP/1.1" 200 -
275 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
290 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
276 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524
291 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524
277 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
292 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
278
293
279 #endif
294 #endif
280 $ cd ..
295 $ cd ..
281
296
282 clone of serve with repo in root and unserved subrepo (issue2970)
297 clone of serve with repo in root and unserved subrepo (issue2970)
283
298
284 $ hg --cwd test init sub
299 $ hg --cwd test init sub
285 $ echo empty > test/sub/empty
300 $ echo empty > test/sub/empty
286 $ hg --cwd test/sub add empty
301 $ hg --cwd test/sub add empty
287 $ hg --cwd test/sub commit -qm 'add empty'
302 $ hg --cwd test/sub commit -qm 'add empty'
288 $ hg --cwd test/sub tag -r 0 something
303 $ hg --cwd test/sub tag -r 0 something
289 $ echo sub = sub > test/.hgsub
304 $ echo sub = sub > test/.hgsub
290 $ hg --cwd test add .hgsub
305 $ hg --cwd test add .hgsub
291 $ hg --cwd test commit -qm 'add subrepo'
306 $ hg --cwd test commit -qm 'add subrepo'
292 $ hg clone http://localhost:$HGPORT noslash-clone
307 $ hg clone http://localhost:$HGPORT noslash-clone
293 requesting all changes
308 requesting all changes
294 adding changesets
309 adding changesets
295 adding manifests
310 adding manifests
296 adding file changes
311 adding file changes
297 added 3 changesets with 7 changes to 7 files
312 added 3 changesets with 7 changes to 7 files
298 updating to branch default
313 updating to branch default
299 abort: HTTP Error 404: Not Found
314 abort: HTTP Error 404: Not Found
300 [255]
315 [255]
301 $ hg clone http://localhost:$HGPORT/ slash-clone
316 $ hg clone http://localhost:$HGPORT/ slash-clone
302 requesting all changes
317 requesting all changes
303 adding changesets
318 adding changesets
304 adding manifests
319 adding manifests
305 adding file changes
320 adding file changes
306 added 3 changesets with 7 changes to 7 files
321 added 3 changesets with 7 changes to 7 files
307 updating to branch default
322 updating to branch default
308 abort: HTTP Error 404: Not Found
323 abort: HTTP Error 404: Not Found
309 [255]
324 [255]
310
325
311 check error log
326 check error log
312
327
313 $ cat error.log
328 $ cat error.log
General Comments 0
You need to be logged in to leave comments. Login now