##// END OF EJS Templates
bundle2-localpeer: properly propagate the server output on error (issue4594)...
Pierre-Yves David -
r24799:d99d7e3f default
parent child Browse files
Show More
@@ -1,1940 +1,1955 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception, exc:
138 except Exception, exc:
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
143 #
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
139 raise
154 raise
140 except error.PushRaced, exc:
155 except error.PushRaced, exc:
141 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
142
157
143 def lock(self):
158 def lock(self):
144 return self._repo.lock()
159 return self._repo.lock()
145
160
146 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
147 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
148
163
149 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
150 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
151
166
152 def listkeys(self, namespace):
167 def listkeys(self, namespace):
153 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
154
169
155 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
156 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
157 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
158
173
159 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
160 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
161 restricted capabilities'''
176 restricted capabilities'''
162
177
163 def __init__(self, repo):
178 def __init__(self, repo):
164 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
165
180
166 def branches(self, nodes):
181 def branches(self, nodes):
167 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
168
183
169 def between(self, pairs):
184 def between(self, pairs):
170 return self._repo.between(pairs)
185 return self._repo.between(pairs)
171
186
172 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
173 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
174
189
175 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
176 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
177
192
178 class localrepository(object):
193 class localrepository(object):
179
194
180 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
181 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
182 'dotencode'))
197 'dotencode'))
183 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
184 requirements = ['revlogv1']
199 requirements = ['revlogv1']
185 filtername = None
200 filtername = None
186
201
187 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
188 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
189 featuresetupfuncs = set()
204 featuresetupfuncs = set()
190
205
191 def _baserequirements(self, create):
206 def _baserequirements(self, create):
192 return self.requirements[:]
207 return self.requirements[:]
193
208
194 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
195 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
210 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
196 self.wopener = self.wvfs
211 self.wopener = self.wvfs
197 self.root = self.wvfs.base
212 self.root = self.wvfs.base
198 self.path = self.wvfs.join(".hg")
213 self.path = self.wvfs.join(".hg")
199 self.origroot = path
214 self.origroot = path
200 self.auditor = pathutil.pathauditor(self.root, self._checknested)
215 self.auditor = pathutil.pathauditor(self.root, self._checknested)
201 self.vfs = scmutil.vfs(self.path)
216 self.vfs = scmutil.vfs(self.path)
202 self.opener = self.vfs
217 self.opener = self.vfs
203 self.baseui = baseui
218 self.baseui = baseui
204 self.ui = baseui.copy()
219 self.ui = baseui.copy()
205 self.ui.copy = baseui.copy # prevent copying repo configuration
220 self.ui.copy = baseui.copy # prevent copying repo configuration
206 # A list of callback to shape the phase if no data were found.
221 # A list of callback to shape the phase if no data were found.
207 # Callback are in the form: func(repo, roots) --> processed root.
222 # Callback are in the form: func(repo, roots) --> processed root.
208 # This list it to be filled by extension during repo setup
223 # This list it to be filled by extension during repo setup
209 self._phasedefaults = []
224 self._phasedefaults = []
210 try:
225 try:
211 self.ui.readconfig(self.join("hgrc"), self.root)
226 self.ui.readconfig(self.join("hgrc"), self.root)
212 extensions.loadall(self.ui)
227 extensions.loadall(self.ui)
213 except IOError:
228 except IOError:
214 pass
229 pass
215
230
216 if self.featuresetupfuncs:
231 if self.featuresetupfuncs:
217 self.supported = set(self._basesupported) # use private copy
232 self.supported = set(self._basesupported) # use private copy
218 extmods = set(m.__name__ for n, m
233 extmods = set(m.__name__ for n, m
219 in extensions.extensions(self.ui))
234 in extensions.extensions(self.ui))
220 for setupfunc in self.featuresetupfuncs:
235 for setupfunc in self.featuresetupfuncs:
221 if setupfunc.__module__ in extmods:
236 if setupfunc.__module__ in extmods:
222 setupfunc(self.ui, self.supported)
237 setupfunc(self.ui, self.supported)
223 else:
238 else:
224 self.supported = self._basesupported
239 self.supported = self._basesupported
225
240
226 if not self.vfs.isdir():
241 if not self.vfs.isdir():
227 if create:
242 if create:
228 if not self.wvfs.exists():
243 if not self.wvfs.exists():
229 self.wvfs.makedirs()
244 self.wvfs.makedirs()
230 self.vfs.makedir(notindexed=True)
245 self.vfs.makedir(notindexed=True)
231 requirements = self._baserequirements(create)
246 requirements = self._baserequirements(create)
232 if self.ui.configbool('format', 'usestore', True):
247 if self.ui.configbool('format', 'usestore', True):
233 self.vfs.mkdir("store")
248 self.vfs.mkdir("store")
234 requirements.append("store")
249 requirements.append("store")
235 if self.ui.configbool('format', 'usefncache', True):
250 if self.ui.configbool('format', 'usefncache', True):
236 requirements.append("fncache")
251 requirements.append("fncache")
237 if self.ui.configbool('format', 'dotencode', True):
252 if self.ui.configbool('format', 'dotencode', True):
238 requirements.append('dotencode')
253 requirements.append('dotencode')
239 # create an invalid changelog
254 # create an invalid changelog
240 self.vfs.append(
255 self.vfs.append(
241 "00changelog.i",
256 "00changelog.i",
242 '\0\0\0\2' # represents revlogv2
257 '\0\0\0\2' # represents revlogv2
243 ' dummy changelog to prevent using the old repo layout'
258 ' dummy changelog to prevent using the old repo layout'
244 )
259 )
245 if self.ui.configbool('format', 'generaldelta', False):
260 if self.ui.configbool('format', 'generaldelta', False):
246 requirements.append("generaldelta")
261 requirements.append("generaldelta")
247 if self.ui.configbool('experimental', 'manifestv2', False):
262 if self.ui.configbool('experimental', 'manifestv2', False):
248 requirements.append("manifestv2")
263 requirements.append("manifestv2")
249 requirements = set(requirements)
264 requirements = set(requirements)
250 else:
265 else:
251 raise error.RepoError(_("repository %s not found") % path)
266 raise error.RepoError(_("repository %s not found") % path)
252 elif create:
267 elif create:
253 raise error.RepoError(_("repository %s already exists") % path)
268 raise error.RepoError(_("repository %s already exists") % path)
254 else:
269 else:
255 try:
270 try:
256 requirements = scmutil.readrequires(self.vfs, self.supported)
271 requirements = scmutil.readrequires(self.vfs, self.supported)
257 except IOError, inst:
272 except IOError, inst:
258 if inst.errno != errno.ENOENT:
273 if inst.errno != errno.ENOENT:
259 raise
274 raise
260 requirements = set()
275 requirements = set()
261
276
262 self.sharedpath = self.path
277 self.sharedpath = self.path
263 try:
278 try:
264 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
279 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
265 realpath=True)
280 realpath=True)
266 s = vfs.base
281 s = vfs.base
267 if not vfs.exists():
282 if not vfs.exists():
268 raise error.RepoError(
283 raise error.RepoError(
269 _('.hg/sharedpath points to nonexistent directory %s') % s)
284 _('.hg/sharedpath points to nonexistent directory %s') % s)
270 self.sharedpath = s
285 self.sharedpath = s
271 except IOError, inst:
286 except IOError, inst:
272 if inst.errno != errno.ENOENT:
287 if inst.errno != errno.ENOENT:
273 raise
288 raise
274
289
275 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
290 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
276 self.spath = self.store.path
291 self.spath = self.store.path
277 self.svfs = self.store.vfs
292 self.svfs = self.store.vfs
278 self.sopener = self.svfs
293 self.sopener = self.svfs
279 self.sjoin = self.store.join
294 self.sjoin = self.store.join
280 self.vfs.createmode = self.store.createmode
295 self.vfs.createmode = self.store.createmode
281 self._applyrequirements(requirements)
296 self._applyrequirements(requirements)
282 if create:
297 if create:
283 self._writerequirements()
298 self._writerequirements()
284
299
285
300
286 self._branchcaches = {}
301 self._branchcaches = {}
287 self._revbranchcache = None
302 self._revbranchcache = None
288 self.filterpats = {}
303 self.filterpats = {}
289 self._datafilters = {}
304 self._datafilters = {}
290 self._transref = self._lockref = self._wlockref = None
305 self._transref = self._lockref = self._wlockref = None
291
306
292 # A cache for various files under .hg/ that tracks file changes,
307 # A cache for various files under .hg/ that tracks file changes,
293 # (used by the filecache decorator)
308 # (used by the filecache decorator)
294 #
309 #
295 # Maps a property name to its util.filecacheentry
310 # Maps a property name to its util.filecacheentry
296 self._filecache = {}
311 self._filecache = {}
297
312
298 # hold sets of revision to be filtered
313 # hold sets of revision to be filtered
299 # should be cleared when something might have changed the filter value:
314 # should be cleared when something might have changed the filter value:
300 # - new changesets,
315 # - new changesets,
301 # - phase change,
316 # - phase change,
302 # - new obsolescence marker,
317 # - new obsolescence marker,
303 # - working directory parent change,
318 # - working directory parent change,
304 # - bookmark changes
319 # - bookmark changes
305 self.filteredrevcache = {}
320 self.filteredrevcache = {}
306
321
307 # generic mapping between names and nodes
322 # generic mapping between names and nodes
308 self.names = namespaces.namespaces()
323 self.names = namespaces.namespaces()
309
324
310 def close(self):
325 def close(self):
311 self._writecaches()
326 self._writecaches()
312
327
313 def _writecaches(self):
328 def _writecaches(self):
314 if self._revbranchcache:
329 if self._revbranchcache:
315 self._revbranchcache.write()
330 self._revbranchcache.write()
316
331
317 def _restrictcapabilities(self, caps):
332 def _restrictcapabilities(self, caps):
318 if self.ui.configbool('experimental', 'bundle2-advertise', True):
333 if self.ui.configbool('experimental', 'bundle2-advertise', True):
319 caps = set(caps)
334 caps = set(caps)
320 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
335 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
321 caps.add('bundle2=' + urllib.quote(capsblob))
336 caps.add('bundle2=' + urllib.quote(capsblob))
322 return caps
337 return caps
323
338
324 def _applyrequirements(self, requirements):
339 def _applyrequirements(self, requirements):
325 self.requirements = requirements
340 self.requirements = requirements
326 self.svfs.options = dict((r, 1) for r in requirements
341 self.svfs.options = dict((r, 1) for r in requirements
327 if r in self.openerreqs)
342 if r in self.openerreqs)
328 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
343 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
329 if chunkcachesize is not None:
344 if chunkcachesize is not None:
330 self.svfs.options['chunkcachesize'] = chunkcachesize
345 self.svfs.options['chunkcachesize'] = chunkcachesize
331 maxchainlen = self.ui.configint('format', 'maxchainlen')
346 maxchainlen = self.ui.configint('format', 'maxchainlen')
332 if maxchainlen is not None:
347 if maxchainlen is not None:
333 self.svfs.options['maxchainlen'] = maxchainlen
348 self.svfs.options['maxchainlen'] = maxchainlen
334 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
349 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
335 if manifestcachesize is not None:
350 if manifestcachesize is not None:
336 self.svfs.options['manifestcachesize'] = manifestcachesize
351 self.svfs.options['manifestcachesize'] = manifestcachesize
337 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
352 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
338 if usetreemanifest is not None:
353 if usetreemanifest is not None:
339 self.svfs.options['usetreemanifest'] = usetreemanifest
354 self.svfs.options['usetreemanifest'] = usetreemanifest
340
355
341 def _writerequirements(self):
356 def _writerequirements(self):
342 reqfile = self.vfs("requires", "w")
357 reqfile = self.vfs("requires", "w")
343 for r in sorted(self.requirements):
358 for r in sorted(self.requirements):
344 reqfile.write("%s\n" % r)
359 reqfile.write("%s\n" % r)
345 reqfile.close()
360 reqfile.close()
346
361
347 def _checknested(self, path):
362 def _checknested(self, path):
348 """Determine if path is a legal nested repository."""
363 """Determine if path is a legal nested repository."""
349 if not path.startswith(self.root):
364 if not path.startswith(self.root):
350 return False
365 return False
351 subpath = path[len(self.root) + 1:]
366 subpath = path[len(self.root) + 1:]
352 normsubpath = util.pconvert(subpath)
367 normsubpath = util.pconvert(subpath)
353
368
354 # XXX: Checking against the current working copy is wrong in
369 # XXX: Checking against the current working copy is wrong in
355 # the sense that it can reject things like
370 # the sense that it can reject things like
356 #
371 #
357 # $ hg cat -r 10 sub/x.txt
372 # $ hg cat -r 10 sub/x.txt
358 #
373 #
359 # if sub/ is no longer a subrepository in the working copy
374 # if sub/ is no longer a subrepository in the working copy
360 # parent revision.
375 # parent revision.
361 #
376 #
362 # However, it can of course also allow things that would have
377 # However, it can of course also allow things that would have
363 # been rejected before, such as the above cat command if sub/
378 # been rejected before, such as the above cat command if sub/
364 # is a subrepository now, but was a normal directory before.
379 # is a subrepository now, but was a normal directory before.
365 # The old path auditor would have rejected by mistake since it
380 # The old path auditor would have rejected by mistake since it
366 # panics when it sees sub/.hg/.
381 # panics when it sees sub/.hg/.
367 #
382 #
368 # All in all, checking against the working copy seems sensible
383 # All in all, checking against the working copy seems sensible
369 # since we want to prevent access to nested repositories on
384 # since we want to prevent access to nested repositories on
370 # the filesystem *now*.
385 # the filesystem *now*.
371 ctx = self[None]
386 ctx = self[None]
372 parts = util.splitpath(subpath)
387 parts = util.splitpath(subpath)
373 while parts:
388 while parts:
374 prefix = '/'.join(parts)
389 prefix = '/'.join(parts)
375 if prefix in ctx.substate:
390 if prefix in ctx.substate:
376 if prefix == normsubpath:
391 if prefix == normsubpath:
377 return True
392 return True
378 else:
393 else:
379 sub = ctx.sub(prefix)
394 sub = ctx.sub(prefix)
380 return sub.checknested(subpath[len(prefix) + 1:])
395 return sub.checknested(subpath[len(prefix) + 1:])
381 else:
396 else:
382 parts.pop()
397 parts.pop()
383 return False
398 return False
384
399
385 def peer(self):
400 def peer(self):
386 return localpeer(self) # not cached to avoid reference cycle
401 return localpeer(self) # not cached to avoid reference cycle
387
402
388 def unfiltered(self):
403 def unfiltered(self):
389 """Return unfiltered version of the repository
404 """Return unfiltered version of the repository
390
405
391 Intended to be overwritten by filtered repo."""
406 Intended to be overwritten by filtered repo."""
392 return self
407 return self
393
408
394 def filtered(self, name):
409 def filtered(self, name):
395 """Return a filtered version of a repository"""
410 """Return a filtered version of a repository"""
396 # build a new class with the mixin and the current class
411 # build a new class with the mixin and the current class
397 # (possibly subclass of the repo)
412 # (possibly subclass of the repo)
398 class proxycls(repoview.repoview, self.unfiltered().__class__):
413 class proxycls(repoview.repoview, self.unfiltered().__class__):
399 pass
414 pass
400 return proxycls(self, name)
415 return proxycls(self, name)
401
416
402 @repofilecache('bookmarks')
417 @repofilecache('bookmarks')
403 def _bookmarks(self):
418 def _bookmarks(self):
404 return bookmarks.bmstore(self)
419 return bookmarks.bmstore(self)
405
420
406 @repofilecache('bookmarks.current')
421 @repofilecache('bookmarks.current')
407 def _bookmarkcurrent(self):
422 def _bookmarkcurrent(self):
408 return bookmarks.readcurrent(self)
423 return bookmarks.readcurrent(self)
409
424
410 def bookmarkheads(self, bookmark):
425 def bookmarkheads(self, bookmark):
411 name = bookmark.split('@', 1)[0]
426 name = bookmark.split('@', 1)[0]
412 heads = []
427 heads = []
413 for mark, n in self._bookmarks.iteritems():
428 for mark, n in self._bookmarks.iteritems():
414 if mark.split('@', 1)[0] == name:
429 if mark.split('@', 1)[0] == name:
415 heads.append(n)
430 heads.append(n)
416 return heads
431 return heads
417
432
418 @storecache('phaseroots')
433 @storecache('phaseroots')
419 def _phasecache(self):
434 def _phasecache(self):
420 return phases.phasecache(self, self._phasedefaults)
435 return phases.phasecache(self, self._phasedefaults)
421
436
422 @storecache('obsstore')
437 @storecache('obsstore')
423 def obsstore(self):
438 def obsstore(self):
424 # read default format for new obsstore.
439 # read default format for new obsstore.
425 defaultformat = self.ui.configint('format', 'obsstore-version', None)
440 defaultformat = self.ui.configint('format', 'obsstore-version', None)
426 # rely on obsstore class default when possible.
441 # rely on obsstore class default when possible.
427 kwargs = {}
442 kwargs = {}
428 if defaultformat is not None:
443 if defaultformat is not None:
429 kwargs['defaultformat'] = defaultformat
444 kwargs['defaultformat'] = defaultformat
430 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
445 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
431 store = obsolete.obsstore(self.svfs, readonly=readonly,
446 store = obsolete.obsstore(self.svfs, readonly=readonly,
432 **kwargs)
447 **kwargs)
433 if store and readonly:
448 if store and readonly:
434 self.ui.warn(
449 self.ui.warn(
435 _('obsolete feature not enabled but %i markers found!\n')
450 _('obsolete feature not enabled but %i markers found!\n')
436 % len(list(store)))
451 % len(list(store)))
437 return store
452 return store
438
453
439 @storecache('00changelog.i')
454 @storecache('00changelog.i')
440 def changelog(self):
455 def changelog(self):
441 c = changelog.changelog(self.svfs)
456 c = changelog.changelog(self.svfs)
442 if 'HG_PENDING' in os.environ:
457 if 'HG_PENDING' in os.environ:
443 p = os.environ['HG_PENDING']
458 p = os.environ['HG_PENDING']
444 if p.startswith(self.root):
459 if p.startswith(self.root):
445 c.readpending('00changelog.i.a')
460 c.readpending('00changelog.i.a')
446 return c
461 return c
447
462
448 @storecache('00manifest.i')
463 @storecache('00manifest.i')
449 def manifest(self):
464 def manifest(self):
450 return manifest.manifest(self.svfs)
465 return manifest.manifest(self.svfs)
451
466
452 @repofilecache('dirstate')
467 @repofilecache('dirstate')
453 def dirstate(self):
468 def dirstate(self):
454 warned = [0]
469 warned = [0]
455 def validate(node):
470 def validate(node):
456 try:
471 try:
457 self.changelog.rev(node)
472 self.changelog.rev(node)
458 return node
473 return node
459 except error.LookupError:
474 except error.LookupError:
460 if not warned[0]:
475 if not warned[0]:
461 warned[0] = True
476 warned[0] = True
462 self.ui.warn(_("warning: ignoring unknown"
477 self.ui.warn(_("warning: ignoring unknown"
463 " working parent %s!\n") % short(node))
478 " working parent %s!\n") % short(node))
464 return nullid
479 return nullid
465
480
466 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
481 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
467
482
468 def __getitem__(self, changeid):
483 def __getitem__(self, changeid):
469 if changeid is None:
484 if changeid is None:
470 return context.workingctx(self)
485 return context.workingctx(self)
471 if isinstance(changeid, slice):
486 if isinstance(changeid, slice):
472 return [context.changectx(self, i)
487 return [context.changectx(self, i)
473 for i in xrange(*changeid.indices(len(self)))
488 for i in xrange(*changeid.indices(len(self)))
474 if i not in self.changelog.filteredrevs]
489 if i not in self.changelog.filteredrevs]
475 return context.changectx(self, changeid)
490 return context.changectx(self, changeid)
476
491
477 def __contains__(self, changeid):
492 def __contains__(self, changeid):
478 try:
493 try:
479 self[changeid]
494 self[changeid]
480 return True
495 return True
481 except error.RepoLookupError:
496 except error.RepoLookupError:
482 return False
497 return False
483
498
484 def __nonzero__(self):
499 def __nonzero__(self):
485 return True
500 return True
486
501
487 def __len__(self):
502 def __len__(self):
488 return len(self.changelog)
503 return len(self.changelog)
489
504
490 def __iter__(self):
505 def __iter__(self):
491 return iter(self.changelog)
506 return iter(self.changelog)
492
507
493 def revs(self, expr, *args):
508 def revs(self, expr, *args):
494 '''Return a list of revisions matching the given revset'''
509 '''Return a list of revisions matching the given revset'''
495 expr = revset.formatspec(expr, *args)
510 expr = revset.formatspec(expr, *args)
496 m = revset.match(None, expr)
511 m = revset.match(None, expr)
497 return m(self)
512 return m(self)
498
513
499 def set(self, expr, *args):
514 def set(self, expr, *args):
500 '''
515 '''
501 Yield a context for each matching revision, after doing arg
516 Yield a context for each matching revision, after doing arg
502 replacement via revset.formatspec
517 replacement via revset.formatspec
503 '''
518 '''
504 for r in self.revs(expr, *args):
519 for r in self.revs(expr, *args):
505 yield self[r]
520 yield self[r]
506
521
507 def url(self):
522 def url(self):
508 return 'file:' + self.root
523 return 'file:' + self.root
509
524
510 def hook(self, name, throw=False, **args):
525 def hook(self, name, throw=False, **args):
511 """Call a hook, passing this repo instance.
526 """Call a hook, passing this repo instance.
512
527
513 This a convenience method to aid invoking hooks. Extensions likely
528 This a convenience method to aid invoking hooks. Extensions likely
514 won't call this unless they have registered a custom hook or are
529 won't call this unless they have registered a custom hook or are
515 replacing code that is expected to call a hook.
530 replacing code that is expected to call a hook.
516 """
531 """
517 return hook.hook(self.ui, self, name, throw, **args)
532 return hook.hook(self.ui, self, name, throw, **args)
518
533
519 @unfilteredmethod
534 @unfilteredmethod
520 def _tag(self, names, node, message, local, user, date, extra={},
535 def _tag(self, names, node, message, local, user, date, extra={},
521 editor=False):
536 editor=False):
522 if isinstance(names, str):
537 if isinstance(names, str):
523 names = (names,)
538 names = (names,)
524
539
525 branches = self.branchmap()
540 branches = self.branchmap()
526 for name in names:
541 for name in names:
527 self.hook('pretag', throw=True, node=hex(node), tag=name,
542 self.hook('pretag', throw=True, node=hex(node), tag=name,
528 local=local)
543 local=local)
529 if name in branches:
544 if name in branches:
530 self.ui.warn(_("warning: tag %s conflicts with existing"
545 self.ui.warn(_("warning: tag %s conflicts with existing"
531 " branch name\n") % name)
546 " branch name\n") % name)
532
547
533 def writetags(fp, names, munge, prevtags):
548 def writetags(fp, names, munge, prevtags):
534 fp.seek(0, 2)
549 fp.seek(0, 2)
535 if prevtags and prevtags[-1] != '\n':
550 if prevtags and prevtags[-1] != '\n':
536 fp.write('\n')
551 fp.write('\n')
537 for name in names:
552 for name in names:
538 if munge:
553 if munge:
539 m = munge(name)
554 m = munge(name)
540 else:
555 else:
541 m = name
556 m = name
542
557
543 if (self._tagscache.tagtypes and
558 if (self._tagscache.tagtypes and
544 name in self._tagscache.tagtypes):
559 name in self._tagscache.tagtypes):
545 old = self.tags().get(name, nullid)
560 old = self.tags().get(name, nullid)
546 fp.write('%s %s\n' % (hex(old), m))
561 fp.write('%s %s\n' % (hex(old), m))
547 fp.write('%s %s\n' % (hex(node), m))
562 fp.write('%s %s\n' % (hex(node), m))
548 fp.close()
563 fp.close()
549
564
550 prevtags = ''
565 prevtags = ''
551 if local:
566 if local:
552 try:
567 try:
553 fp = self.vfs('localtags', 'r+')
568 fp = self.vfs('localtags', 'r+')
554 except IOError:
569 except IOError:
555 fp = self.vfs('localtags', 'a')
570 fp = self.vfs('localtags', 'a')
556 else:
571 else:
557 prevtags = fp.read()
572 prevtags = fp.read()
558
573
559 # local tags are stored in the current charset
574 # local tags are stored in the current charset
560 writetags(fp, names, None, prevtags)
575 writetags(fp, names, None, prevtags)
561 for name in names:
576 for name in names:
562 self.hook('tag', node=hex(node), tag=name, local=local)
577 self.hook('tag', node=hex(node), tag=name, local=local)
563 return
578 return
564
579
565 try:
580 try:
566 fp = self.wfile('.hgtags', 'rb+')
581 fp = self.wfile('.hgtags', 'rb+')
567 except IOError, e:
582 except IOError, e:
568 if e.errno != errno.ENOENT:
583 if e.errno != errno.ENOENT:
569 raise
584 raise
570 fp = self.wfile('.hgtags', 'ab')
585 fp = self.wfile('.hgtags', 'ab')
571 else:
586 else:
572 prevtags = fp.read()
587 prevtags = fp.read()
573
588
574 # committed tags are stored in UTF-8
589 # committed tags are stored in UTF-8
575 writetags(fp, names, encoding.fromlocal, prevtags)
590 writetags(fp, names, encoding.fromlocal, prevtags)
576
591
577 fp.close()
592 fp.close()
578
593
579 self.invalidatecaches()
594 self.invalidatecaches()
580
595
581 if '.hgtags' not in self.dirstate:
596 if '.hgtags' not in self.dirstate:
582 self[None].add(['.hgtags'])
597 self[None].add(['.hgtags'])
583
598
584 m = matchmod.exact(self.root, '', ['.hgtags'])
599 m = matchmod.exact(self.root, '', ['.hgtags'])
585 tagnode = self.commit(message, user, date, extra=extra, match=m,
600 tagnode = self.commit(message, user, date, extra=extra, match=m,
586 editor=editor)
601 editor=editor)
587
602
588 for name in names:
603 for name in names:
589 self.hook('tag', node=hex(node), tag=name, local=local)
604 self.hook('tag', node=hex(node), tag=name, local=local)
590
605
591 return tagnode
606 return tagnode
592
607
593 def tag(self, names, node, message, local, user, date, editor=False):
608 def tag(self, names, node, message, local, user, date, editor=False):
594 '''tag a revision with one or more symbolic names.
609 '''tag a revision with one or more symbolic names.
595
610
596 names is a list of strings or, when adding a single tag, names may be a
611 names is a list of strings or, when adding a single tag, names may be a
597 string.
612 string.
598
613
599 if local is True, the tags are stored in a per-repository file.
614 if local is True, the tags are stored in a per-repository file.
600 otherwise, they are stored in the .hgtags file, and a new
615 otherwise, they are stored in the .hgtags file, and a new
601 changeset is committed with the change.
616 changeset is committed with the change.
602
617
603 keyword arguments:
618 keyword arguments:
604
619
605 local: whether to store tags in non-version-controlled file
620 local: whether to store tags in non-version-controlled file
606 (default False)
621 (default False)
607
622
608 message: commit message to use if committing
623 message: commit message to use if committing
609
624
610 user: name of user to use if committing
625 user: name of user to use if committing
611
626
612 date: date tuple to use if committing'''
627 date: date tuple to use if committing'''
613
628
614 if not local:
629 if not local:
615 m = matchmod.exact(self.root, '', ['.hgtags'])
630 m = matchmod.exact(self.root, '', ['.hgtags'])
616 if util.any(self.status(match=m, unknown=True, ignored=True)):
631 if util.any(self.status(match=m, unknown=True, ignored=True)):
617 raise util.Abort(_('working copy of .hgtags is changed'),
632 raise util.Abort(_('working copy of .hgtags is changed'),
618 hint=_('please commit .hgtags manually'))
633 hint=_('please commit .hgtags manually'))
619
634
620 self.tags() # instantiate the cache
635 self.tags() # instantiate the cache
621 self._tag(names, node, message, local, user, date, editor=editor)
636 self._tag(names, node, message, local, user, date, editor=editor)
622
637
623 @filteredpropertycache
638 @filteredpropertycache
624 def _tagscache(self):
639 def _tagscache(self):
625 '''Returns a tagscache object that contains various tags related
640 '''Returns a tagscache object that contains various tags related
626 caches.'''
641 caches.'''
627
642
628 # This simplifies its cache management by having one decorated
643 # This simplifies its cache management by having one decorated
629 # function (this one) and the rest simply fetch things from it.
644 # function (this one) and the rest simply fetch things from it.
630 class tagscache(object):
645 class tagscache(object):
631 def __init__(self):
646 def __init__(self):
632 # These two define the set of tags for this repository. tags
647 # These two define the set of tags for this repository. tags
633 # maps tag name to node; tagtypes maps tag name to 'global' or
648 # maps tag name to node; tagtypes maps tag name to 'global' or
634 # 'local'. (Global tags are defined by .hgtags across all
649 # 'local'. (Global tags are defined by .hgtags across all
635 # heads, and local tags are defined in .hg/localtags.)
650 # heads, and local tags are defined in .hg/localtags.)
636 # They constitute the in-memory cache of tags.
651 # They constitute the in-memory cache of tags.
637 self.tags = self.tagtypes = None
652 self.tags = self.tagtypes = None
638
653
639 self.nodetagscache = self.tagslist = None
654 self.nodetagscache = self.tagslist = None
640
655
641 cache = tagscache()
656 cache = tagscache()
642 cache.tags, cache.tagtypes = self._findtags()
657 cache.tags, cache.tagtypes = self._findtags()
643
658
644 return cache
659 return cache
645
660
646 def tags(self):
661 def tags(self):
647 '''return a mapping of tag to node'''
662 '''return a mapping of tag to node'''
648 t = {}
663 t = {}
649 if self.changelog.filteredrevs:
664 if self.changelog.filteredrevs:
650 tags, tt = self._findtags()
665 tags, tt = self._findtags()
651 else:
666 else:
652 tags = self._tagscache.tags
667 tags = self._tagscache.tags
653 for k, v in tags.iteritems():
668 for k, v in tags.iteritems():
654 try:
669 try:
655 # ignore tags to unknown nodes
670 # ignore tags to unknown nodes
656 self.changelog.rev(v)
671 self.changelog.rev(v)
657 t[k] = v
672 t[k] = v
658 except (error.LookupError, ValueError):
673 except (error.LookupError, ValueError):
659 pass
674 pass
660 return t
675 return t
661
676
662 def _findtags(self):
677 def _findtags(self):
663 '''Do the hard work of finding tags. Return a pair of dicts
678 '''Do the hard work of finding tags. Return a pair of dicts
664 (tags, tagtypes) where tags maps tag name to node, and tagtypes
679 (tags, tagtypes) where tags maps tag name to node, and tagtypes
665 maps tag name to a string like \'global\' or \'local\'.
680 maps tag name to a string like \'global\' or \'local\'.
666 Subclasses or extensions are free to add their own tags, but
681 Subclasses or extensions are free to add their own tags, but
667 should be aware that the returned dicts will be retained for the
682 should be aware that the returned dicts will be retained for the
668 duration of the localrepo object.'''
683 duration of the localrepo object.'''
669
684
670 # XXX what tagtype should subclasses/extensions use? Currently
685 # XXX what tagtype should subclasses/extensions use? Currently
671 # mq and bookmarks add tags, but do not set the tagtype at all.
686 # mq and bookmarks add tags, but do not set the tagtype at all.
672 # Should each extension invent its own tag type? Should there
687 # Should each extension invent its own tag type? Should there
673 # be one tagtype for all such "virtual" tags? Or is the status
688 # be one tagtype for all such "virtual" tags? Or is the status
674 # quo fine?
689 # quo fine?
675
690
676 alltags = {} # map tag name to (node, hist)
691 alltags = {} # map tag name to (node, hist)
677 tagtypes = {}
692 tagtypes = {}
678
693
679 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
694 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
680 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
695 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
681
696
682 # Build the return dicts. Have to re-encode tag names because
697 # Build the return dicts. Have to re-encode tag names because
683 # the tags module always uses UTF-8 (in order not to lose info
698 # the tags module always uses UTF-8 (in order not to lose info
684 # writing to the cache), but the rest of Mercurial wants them in
699 # writing to the cache), but the rest of Mercurial wants them in
685 # local encoding.
700 # local encoding.
686 tags = {}
701 tags = {}
687 for (name, (node, hist)) in alltags.iteritems():
702 for (name, (node, hist)) in alltags.iteritems():
688 if node != nullid:
703 if node != nullid:
689 tags[encoding.tolocal(name)] = node
704 tags[encoding.tolocal(name)] = node
690 tags['tip'] = self.changelog.tip()
705 tags['tip'] = self.changelog.tip()
691 tagtypes = dict([(encoding.tolocal(name), value)
706 tagtypes = dict([(encoding.tolocal(name), value)
692 for (name, value) in tagtypes.iteritems()])
707 for (name, value) in tagtypes.iteritems()])
693 return (tags, tagtypes)
708 return (tags, tagtypes)
694
709
695 def tagtype(self, tagname):
710 def tagtype(self, tagname):
696 '''
711 '''
697 return the type of the given tag. result can be:
712 return the type of the given tag. result can be:
698
713
699 'local' : a local tag
714 'local' : a local tag
700 'global' : a global tag
715 'global' : a global tag
701 None : tag does not exist
716 None : tag does not exist
702 '''
717 '''
703
718
704 return self._tagscache.tagtypes.get(tagname)
719 return self._tagscache.tagtypes.get(tagname)
705
720
706 def tagslist(self):
721 def tagslist(self):
707 '''return a list of tags ordered by revision'''
722 '''return a list of tags ordered by revision'''
708 if not self._tagscache.tagslist:
723 if not self._tagscache.tagslist:
709 l = []
724 l = []
710 for t, n in self.tags().iteritems():
725 for t, n in self.tags().iteritems():
711 l.append((self.changelog.rev(n), t, n))
726 l.append((self.changelog.rev(n), t, n))
712 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
727 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
713
728
714 return self._tagscache.tagslist
729 return self._tagscache.tagslist
715
730
716 def nodetags(self, node):
731 def nodetags(self, node):
717 '''return the tags associated with a node'''
732 '''return the tags associated with a node'''
718 if not self._tagscache.nodetagscache:
733 if not self._tagscache.nodetagscache:
719 nodetagscache = {}
734 nodetagscache = {}
720 for t, n in self._tagscache.tags.iteritems():
735 for t, n in self._tagscache.tags.iteritems():
721 nodetagscache.setdefault(n, []).append(t)
736 nodetagscache.setdefault(n, []).append(t)
722 for tags in nodetagscache.itervalues():
737 for tags in nodetagscache.itervalues():
723 tags.sort()
738 tags.sort()
724 self._tagscache.nodetagscache = nodetagscache
739 self._tagscache.nodetagscache = nodetagscache
725 return self._tagscache.nodetagscache.get(node, [])
740 return self._tagscache.nodetagscache.get(node, [])
726
741
727 def nodebookmarks(self, node):
742 def nodebookmarks(self, node):
728 marks = []
743 marks = []
729 for bookmark, n in self._bookmarks.iteritems():
744 for bookmark, n in self._bookmarks.iteritems():
730 if n == node:
745 if n == node:
731 marks.append(bookmark)
746 marks.append(bookmark)
732 return sorted(marks)
747 return sorted(marks)
733
748
734 def branchmap(self):
749 def branchmap(self):
735 '''returns a dictionary {branch: [branchheads]} with branchheads
750 '''returns a dictionary {branch: [branchheads]} with branchheads
736 ordered by increasing revision number'''
751 ordered by increasing revision number'''
737 branchmap.updatecache(self)
752 branchmap.updatecache(self)
738 return self._branchcaches[self.filtername]
753 return self._branchcaches[self.filtername]
739
754
740 @unfilteredmethod
755 @unfilteredmethod
741 def revbranchcache(self):
756 def revbranchcache(self):
742 if not self._revbranchcache:
757 if not self._revbranchcache:
743 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
758 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
744 return self._revbranchcache
759 return self._revbranchcache
745
760
746 def branchtip(self, branch, ignoremissing=False):
761 def branchtip(self, branch, ignoremissing=False):
747 '''return the tip node for a given branch
762 '''return the tip node for a given branch
748
763
749 If ignoremissing is True, then this method will not raise an error.
764 If ignoremissing is True, then this method will not raise an error.
750 This is helpful for callers that only expect None for a missing branch
765 This is helpful for callers that only expect None for a missing branch
751 (e.g. namespace).
766 (e.g. namespace).
752
767
753 '''
768 '''
754 try:
769 try:
755 return self.branchmap().branchtip(branch)
770 return self.branchmap().branchtip(branch)
756 except KeyError:
771 except KeyError:
757 if not ignoremissing:
772 if not ignoremissing:
758 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
773 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
759 else:
774 else:
760 pass
775 pass
761
776
762 def lookup(self, key):
777 def lookup(self, key):
763 return self[key].node()
778 return self[key].node()
764
779
765 def lookupbranch(self, key, remote=None):
780 def lookupbranch(self, key, remote=None):
766 repo = remote or self
781 repo = remote or self
767 if key in repo.branchmap():
782 if key in repo.branchmap():
768 return key
783 return key
769
784
770 repo = (remote and remote.local()) and remote or self
785 repo = (remote and remote.local()) and remote or self
771 return repo[key].branch()
786 return repo[key].branch()
772
787
773 def known(self, nodes):
788 def known(self, nodes):
774 nm = self.changelog.nodemap
789 nm = self.changelog.nodemap
775 pc = self._phasecache
790 pc = self._phasecache
776 result = []
791 result = []
777 for n in nodes:
792 for n in nodes:
778 r = nm.get(n)
793 r = nm.get(n)
779 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 resp = not (r is None or pc.phase(self, r) >= phases.secret)
780 result.append(resp)
795 result.append(resp)
781 return result
796 return result
782
797
783 def local(self):
798 def local(self):
784 return self
799 return self
785
800
786 def cancopy(self):
801 def cancopy(self):
787 # so statichttprepo's override of local() works
802 # so statichttprepo's override of local() works
788 if not self.local():
803 if not self.local():
789 return False
804 return False
790 if not self.ui.configbool('phases', 'publish', True):
805 if not self.ui.configbool('phases', 'publish', True):
791 return True
806 return True
792 # if publishing we can't copy if there is filtered content
807 # if publishing we can't copy if there is filtered content
793 return not self.filtered('visible').changelog.filteredrevs
808 return not self.filtered('visible').changelog.filteredrevs
794
809
795 def shared(self):
810 def shared(self):
796 '''the type of shared repository (None if not shared)'''
811 '''the type of shared repository (None if not shared)'''
797 if self.sharedpath != self.path:
812 if self.sharedpath != self.path:
798 return 'store'
813 return 'store'
799 return None
814 return None
800
815
801 def join(self, f, *insidef):
816 def join(self, f, *insidef):
802 return self.vfs.join(os.path.join(f, *insidef))
817 return self.vfs.join(os.path.join(f, *insidef))
803
818
804 def wjoin(self, f, *insidef):
819 def wjoin(self, f, *insidef):
805 return self.vfs.reljoin(self.root, f, *insidef)
820 return self.vfs.reljoin(self.root, f, *insidef)
806
821
807 def file(self, f):
822 def file(self, f):
808 if f[0] == '/':
823 if f[0] == '/':
809 f = f[1:]
824 f = f[1:]
810 return filelog.filelog(self.svfs, f)
825 return filelog.filelog(self.svfs, f)
811
826
812 def changectx(self, changeid):
827 def changectx(self, changeid):
813 return self[changeid]
828 return self[changeid]
814
829
815 def parents(self, changeid=None):
830 def parents(self, changeid=None):
816 '''get list of changectxs for parents of changeid'''
831 '''get list of changectxs for parents of changeid'''
817 return self[changeid].parents()
832 return self[changeid].parents()
818
833
819 def setparents(self, p1, p2=nullid):
834 def setparents(self, p1, p2=nullid):
820 self.dirstate.beginparentchange()
835 self.dirstate.beginparentchange()
821 copies = self.dirstate.setparents(p1, p2)
836 copies = self.dirstate.setparents(p1, p2)
822 pctx = self[p1]
837 pctx = self[p1]
823 if copies:
838 if copies:
824 # Adjust copy records, the dirstate cannot do it, it
839 # Adjust copy records, the dirstate cannot do it, it
825 # requires access to parents manifests. Preserve them
840 # requires access to parents manifests. Preserve them
826 # only for entries added to first parent.
841 # only for entries added to first parent.
827 for f in copies:
842 for f in copies:
828 if f not in pctx and copies[f] in pctx:
843 if f not in pctx and copies[f] in pctx:
829 self.dirstate.copy(copies[f], f)
844 self.dirstate.copy(copies[f], f)
830 if p2 == nullid:
845 if p2 == nullid:
831 for f, s in sorted(self.dirstate.copies().items()):
846 for f, s in sorted(self.dirstate.copies().items()):
832 if f not in pctx and s not in pctx:
847 if f not in pctx and s not in pctx:
833 self.dirstate.copy(None, f)
848 self.dirstate.copy(None, f)
834 self.dirstate.endparentchange()
849 self.dirstate.endparentchange()
835
850
836 def filectx(self, path, changeid=None, fileid=None):
851 def filectx(self, path, changeid=None, fileid=None):
837 """changeid can be a changeset revision, node, or tag.
852 """changeid can be a changeset revision, node, or tag.
838 fileid can be a file revision or node."""
853 fileid can be a file revision or node."""
839 return context.filectx(self, path, changeid, fileid)
854 return context.filectx(self, path, changeid, fileid)
840
855
841 def getcwd(self):
856 def getcwd(self):
842 return self.dirstate.getcwd()
857 return self.dirstate.getcwd()
843
858
844 def pathto(self, f, cwd=None):
859 def pathto(self, f, cwd=None):
845 return self.dirstate.pathto(f, cwd)
860 return self.dirstate.pathto(f, cwd)
846
861
847 def wfile(self, f, mode='r'):
862 def wfile(self, f, mode='r'):
848 return self.wvfs(f, mode)
863 return self.wvfs(f, mode)
849
864
850 def _link(self, f):
865 def _link(self, f):
851 return self.wvfs.islink(f)
866 return self.wvfs.islink(f)
852
867
853 def _loadfilter(self, filter):
868 def _loadfilter(self, filter):
854 if filter not in self.filterpats:
869 if filter not in self.filterpats:
855 l = []
870 l = []
856 for pat, cmd in self.ui.configitems(filter):
871 for pat, cmd in self.ui.configitems(filter):
857 if cmd == '!':
872 if cmd == '!':
858 continue
873 continue
859 mf = matchmod.match(self.root, '', [pat])
874 mf = matchmod.match(self.root, '', [pat])
860 fn = None
875 fn = None
861 params = cmd
876 params = cmd
862 for name, filterfn in self._datafilters.iteritems():
877 for name, filterfn in self._datafilters.iteritems():
863 if cmd.startswith(name):
878 if cmd.startswith(name):
864 fn = filterfn
879 fn = filterfn
865 params = cmd[len(name):].lstrip()
880 params = cmd[len(name):].lstrip()
866 break
881 break
867 if not fn:
882 if not fn:
868 fn = lambda s, c, **kwargs: util.filter(s, c)
883 fn = lambda s, c, **kwargs: util.filter(s, c)
869 # Wrap old filters not supporting keyword arguments
884 # Wrap old filters not supporting keyword arguments
870 if not inspect.getargspec(fn)[2]:
885 if not inspect.getargspec(fn)[2]:
871 oldfn = fn
886 oldfn = fn
872 fn = lambda s, c, **kwargs: oldfn(s, c)
887 fn = lambda s, c, **kwargs: oldfn(s, c)
873 l.append((mf, fn, params))
888 l.append((mf, fn, params))
874 self.filterpats[filter] = l
889 self.filterpats[filter] = l
875 return self.filterpats[filter]
890 return self.filterpats[filter]
876
891
877 def _filter(self, filterpats, filename, data):
892 def _filter(self, filterpats, filename, data):
878 for mf, fn, cmd in filterpats:
893 for mf, fn, cmd in filterpats:
879 if mf(filename):
894 if mf(filename):
880 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
895 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
881 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
896 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
882 break
897 break
883
898
884 return data
899 return data
885
900
886 @unfilteredpropertycache
901 @unfilteredpropertycache
887 def _encodefilterpats(self):
902 def _encodefilterpats(self):
888 return self._loadfilter('encode')
903 return self._loadfilter('encode')
889
904
890 @unfilteredpropertycache
905 @unfilteredpropertycache
891 def _decodefilterpats(self):
906 def _decodefilterpats(self):
892 return self._loadfilter('decode')
907 return self._loadfilter('decode')
893
908
894 def adddatafilter(self, name, filter):
909 def adddatafilter(self, name, filter):
895 self._datafilters[name] = filter
910 self._datafilters[name] = filter
896
911
897 def wread(self, filename):
912 def wread(self, filename):
898 if self._link(filename):
913 if self._link(filename):
899 data = self.wvfs.readlink(filename)
914 data = self.wvfs.readlink(filename)
900 else:
915 else:
901 data = self.wvfs.read(filename)
916 data = self.wvfs.read(filename)
902 return self._filter(self._encodefilterpats, filename, data)
917 return self._filter(self._encodefilterpats, filename, data)
903
918
904 def wwrite(self, filename, data, flags):
919 def wwrite(self, filename, data, flags):
905 data = self._filter(self._decodefilterpats, filename, data)
920 data = self._filter(self._decodefilterpats, filename, data)
906 if 'l' in flags:
921 if 'l' in flags:
907 self.wvfs.symlink(data, filename)
922 self.wvfs.symlink(data, filename)
908 else:
923 else:
909 self.wvfs.write(filename, data)
924 self.wvfs.write(filename, data)
910 if 'x' in flags:
925 if 'x' in flags:
911 self.wvfs.setflags(filename, False, True)
926 self.wvfs.setflags(filename, False, True)
912
927
913 def wwritedata(self, filename, data):
928 def wwritedata(self, filename, data):
914 return self._filter(self._decodefilterpats, filename, data)
929 return self._filter(self._decodefilterpats, filename, data)
915
930
916 def currenttransaction(self):
931 def currenttransaction(self):
917 """return the current transaction or None if non exists"""
932 """return the current transaction or None if non exists"""
918 if self._transref:
933 if self._transref:
919 tr = self._transref()
934 tr = self._transref()
920 else:
935 else:
921 tr = None
936 tr = None
922
937
923 if tr and tr.running():
938 if tr and tr.running():
924 return tr
939 return tr
925 return None
940 return None
926
941
927 def transaction(self, desc, report=None):
942 def transaction(self, desc, report=None):
928 if (self.ui.configbool('devel', 'all')
943 if (self.ui.configbool('devel', 'all')
929 or self.ui.configbool('devel', 'check-locks')):
944 or self.ui.configbool('devel', 'check-locks')):
930 l = self._lockref and self._lockref()
945 l = self._lockref and self._lockref()
931 if l is None or not l.held:
946 if l is None or not l.held:
932 scmutil.develwarn(self.ui, 'transaction with no lock')
947 scmutil.develwarn(self.ui, 'transaction with no lock')
933 tr = self.currenttransaction()
948 tr = self.currenttransaction()
934 if tr is not None:
949 if tr is not None:
935 return tr.nest()
950 return tr.nest()
936
951
937 # abort here if the journal already exists
952 # abort here if the journal already exists
938 if self.svfs.exists("journal"):
953 if self.svfs.exists("journal"):
939 raise error.RepoError(
954 raise error.RepoError(
940 _("abandoned transaction found"),
955 _("abandoned transaction found"),
941 hint=_("run 'hg recover' to clean up transaction"))
956 hint=_("run 'hg recover' to clean up transaction"))
942
957
943 self.hook('pretxnopen', throw=True, txnname=desc)
958 self.hook('pretxnopen', throw=True, txnname=desc)
944
959
945 self._writejournal(desc)
960 self._writejournal(desc)
946 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
961 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
947 if report:
962 if report:
948 rp = report
963 rp = report
949 else:
964 else:
950 rp = self.ui.warn
965 rp = self.ui.warn
951 vfsmap = {'plain': self.vfs} # root of .hg/
966 vfsmap = {'plain': self.vfs} # root of .hg/
952 # we must avoid cyclic reference between repo and transaction.
967 # we must avoid cyclic reference between repo and transaction.
953 reporef = weakref.ref(self)
968 reporef = weakref.ref(self)
954 def validate(tr):
969 def validate(tr):
955 """will run pre-closing hooks"""
970 """will run pre-closing hooks"""
956 pending = lambda: tr.writepending() and self.root or ""
971 pending = lambda: tr.writepending() and self.root or ""
957 reporef().hook('pretxnclose', throw=True, pending=pending,
972 reporef().hook('pretxnclose', throw=True, pending=pending,
958 xnname=desc, **tr.hookargs)
973 xnname=desc, **tr.hookargs)
959
974
960 tr = transaction.transaction(rp, self.sopener, vfsmap,
975 tr = transaction.transaction(rp, self.sopener, vfsmap,
961 "journal",
976 "journal",
962 "undo",
977 "undo",
963 aftertrans(renames),
978 aftertrans(renames),
964 self.store.createmode,
979 self.store.createmode,
965 validator=validate)
980 validator=validate)
966
981
967 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
982 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
968 tr.hookargs['TXNID'] = trid
983 tr.hookargs['TXNID'] = trid
969 # note: writing the fncache only during finalize mean that the file is
984 # note: writing the fncache only during finalize mean that the file is
970 # outdated when running hooks. As fncache is used for streaming clone,
985 # outdated when running hooks. As fncache is used for streaming clone,
971 # this is not expected to break anything that happen during the hooks.
986 # this is not expected to break anything that happen during the hooks.
972 tr.addfinalize('flush-fncache', self.store.write)
987 tr.addfinalize('flush-fncache', self.store.write)
973 def txnclosehook(tr2):
988 def txnclosehook(tr2):
974 """To be run if transaction is successful, will schedule a hook run
989 """To be run if transaction is successful, will schedule a hook run
975 """
990 """
976 def hook():
991 def hook():
977 reporef().hook('txnclose', throw=False, txnname=desc,
992 reporef().hook('txnclose', throw=False, txnname=desc,
978 **tr2.hookargs)
993 **tr2.hookargs)
979 reporef()._afterlock(hook)
994 reporef()._afterlock(hook)
980 tr.addfinalize('txnclose-hook', txnclosehook)
995 tr.addfinalize('txnclose-hook', txnclosehook)
981 def txnaborthook(tr2):
996 def txnaborthook(tr2):
982 """To be run if transaction is aborted
997 """To be run if transaction is aborted
983 """
998 """
984 reporef().hook('txnabort', throw=False, txnname=desc,
999 reporef().hook('txnabort', throw=False, txnname=desc,
985 **tr2.hookargs)
1000 **tr2.hookargs)
986 tr.addabort('txnabort-hook', txnaborthook)
1001 tr.addabort('txnabort-hook', txnaborthook)
987 self._transref = weakref.ref(tr)
1002 self._transref = weakref.ref(tr)
988 return tr
1003 return tr
989
1004
990 def _journalfiles(self):
1005 def _journalfiles(self):
991 return ((self.svfs, 'journal'),
1006 return ((self.svfs, 'journal'),
992 (self.vfs, 'journal.dirstate'),
1007 (self.vfs, 'journal.dirstate'),
993 (self.vfs, 'journal.branch'),
1008 (self.vfs, 'journal.branch'),
994 (self.vfs, 'journal.desc'),
1009 (self.vfs, 'journal.desc'),
995 (self.vfs, 'journal.bookmarks'),
1010 (self.vfs, 'journal.bookmarks'),
996 (self.svfs, 'journal.phaseroots'))
1011 (self.svfs, 'journal.phaseroots'))
997
1012
998 def undofiles(self):
1013 def undofiles(self):
999 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1014 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1000
1015
1001 def _writejournal(self, desc):
1016 def _writejournal(self, desc):
1002 self.vfs.write("journal.dirstate",
1017 self.vfs.write("journal.dirstate",
1003 self.vfs.tryread("dirstate"))
1018 self.vfs.tryread("dirstate"))
1004 self.vfs.write("journal.branch",
1019 self.vfs.write("journal.branch",
1005 encoding.fromlocal(self.dirstate.branch()))
1020 encoding.fromlocal(self.dirstate.branch()))
1006 self.vfs.write("journal.desc",
1021 self.vfs.write("journal.desc",
1007 "%d\n%s\n" % (len(self), desc))
1022 "%d\n%s\n" % (len(self), desc))
1008 self.vfs.write("journal.bookmarks",
1023 self.vfs.write("journal.bookmarks",
1009 self.vfs.tryread("bookmarks"))
1024 self.vfs.tryread("bookmarks"))
1010 self.svfs.write("journal.phaseroots",
1025 self.svfs.write("journal.phaseroots",
1011 self.svfs.tryread("phaseroots"))
1026 self.svfs.tryread("phaseroots"))
1012
1027
1013 def recover(self):
1028 def recover(self):
1014 lock = self.lock()
1029 lock = self.lock()
1015 try:
1030 try:
1016 if self.svfs.exists("journal"):
1031 if self.svfs.exists("journal"):
1017 self.ui.status(_("rolling back interrupted transaction\n"))
1032 self.ui.status(_("rolling back interrupted transaction\n"))
1018 vfsmap = {'': self.svfs,
1033 vfsmap = {'': self.svfs,
1019 'plain': self.vfs,}
1034 'plain': self.vfs,}
1020 transaction.rollback(self.svfs, vfsmap, "journal",
1035 transaction.rollback(self.svfs, vfsmap, "journal",
1021 self.ui.warn)
1036 self.ui.warn)
1022 self.invalidate()
1037 self.invalidate()
1023 return True
1038 return True
1024 else:
1039 else:
1025 self.ui.warn(_("no interrupted transaction available\n"))
1040 self.ui.warn(_("no interrupted transaction available\n"))
1026 return False
1041 return False
1027 finally:
1042 finally:
1028 lock.release()
1043 lock.release()
1029
1044
1030 def rollback(self, dryrun=False, force=False):
1045 def rollback(self, dryrun=False, force=False):
1031 wlock = lock = None
1046 wlock = lock = None
1032 try:
1047 try:
1033 wlock = self.wlock()
1048 wlock = self.wlock()
1034 lock = self.lock()
1049 lock = self.lock()
1035 if self.svfs.exists("undo"):
1050 if self.svfs.exists("undo"):
1036 return self._rollback(dryrun, force)
1051 return self._rollback(dryrun, force)
1037 else:
1052 else:
1038 self.ui.warn(_("no rollback information available\n"))
1053 self.ui.warn(_("no rollback information available\n"))
1039 return 1
1054 return 1
1040 finally:
1055 finally:
1041 release(lock, wlock)
1056 release(lock, wlock)
1042
1057
1043 @unfilteredmethod # Until we get smarter cache management
1058 @unfilteredmethod # Until we get smarter cache management
1044 def _rollback(self, dryrun, force):
1059 def _rollback(self, dryrun, force):
1045 ui = self.ui
1060 ui = self.ui
1046 try:
1061 try:
1047 args = self.vfs.read('undo.desc').splitlines()
1062 args = self.vfs.read('undo.desc').splitlines()
1048 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1063 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1049 if len(args) >= 3:
1064 if len(args) >= 3:
1050 detail = args[2]
1065 detail = args[2]
1051 oldtip = oldlen - 1
1066 oldtip = oldlen - 1
1052
1067
1053 if detail and ui.verbose:
1068 if detail and ui.verbose:
1054 msg = (_('repository tip rolled back to revision %s'
1069 msg = (_('repository tip rolled back to revision %s'
1055 ' (undo %s: %s)\n')
1070 ' (undo %s: %s)\n')
1056 % (oldtip, desc, detail))
1071 % (oldtip, desc, detail))
1057 else:
1072 else:
1058 msg = (_('repository tip rolled back to revision %s'
1073 msg = (_('repository tip rolled back to revision %s'
1059 ' (undo %s)\n')
1074 ' (undo %s)\n')
1060 % (oldtip, desc))
1075 % (oldtip, desc))
1061 except IOError:
1076 except IOError:
1062 msg = _('rolling back unknown transaction\n')
1077 msg = _('rolling back unknown transaction\n')
1063 desc = None
1078 desc = None
1064
1079
1065 if not force and self['.'] != self['tip'] and desc == 'commit':
1080 if not force and self['.'] != self['tip'] and desc == 'commit':
1066 raise util.Abort(
1081 raise util.Abort(
1067 _('rollback of last commit while not checked out '
1082 _('rollback of last commit while not checked out '
1068 'may lose data'), hint=_('use -f to force'))
1083 'may lose data'), hint=_('use -f to force'))
1069
1084
1070 ui.status(msg)
1085 ui.status(msg)
1071 if dryrun:
1086 if dryrun:
1072 return 0
1087 return 0
1073
1088
1074 parents = self.dirstate.parents()
1089 parents = self.dirstate.parents()
1075 self.destroying()
1090 self.destroying()
1076 vfsmap = {'plain': self.vfs, '': self.svfs}
1091 vfsmap = {'plain': self.vfs, '': self.svfs}
1077 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1092 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1078 if self.vfs.exists('undo.bookmarks'):
1093 if self.vfs.exists('undo.bookmarks'):
1079 self.vfs.rename('undo.bookmarks', 'bookmarks')
1094 self.vfs.rename('undo.bookmarks', 'bookmarks')
1080 if self.svfs.exists('undo.phaseroots'):
1095 if self.svfs.exists('undo.phaseroots'):
1081 self.svfs.rename('undo.phaseroots', 'phaseroots')
1096 self.svfs.rename('undo.phaseroots', 'phaseroots')
1082 self.invalidate()
1097 self.invalidate()
1083
1098
1084 parentgone = (parents[0] not in self.changelog.nodemap or
1099 parentgone = (parents[0] not in self.changelog.nodemap or
1085 parents[1] not in self.changelog.nodemap)
1100 parents[1] not in self.changelog.nodemap)
1086 if parentgone:
1101 if parentgone:
1087 self.vfs.rename('undo.dirstate', 'dirstate')
1102 self.vfs.rename('undo.dirstate', 'dirstate')
1088 try:
1103 try:
1089 branch = self.vfs.read('undo.branch')
1104 branch = self.vfs.read('undo.branch')
1090 self.dirstate.setbranch(encoding.tolocal(branch))
1105 self.dirstate.setbranch(encoding.tolocal(branch))
1091 except IOError:
1106 except IOError:
1092 ui.warn(_('named branch could not be reset: '
1107 ui.warn(_('named branch could not be reset: '
1093 'current branch is still \'%s\'\n')
1108 'current branch is still \'%s\'\n')
1094 % self.dirstate.branch())
1109 % self.dirstate.branch())
1095
1110
1096 self.dirstate.invalidate()
1111 self.dirstate.invalidate()
1097 parents = tuple([p.rev() for p in self.parents()])
1112 parents = tuple([p.rev() for p in self.parents()])
1098 if len(parents) > 1:
1113 if len(parents) > 1:
1099 ui.status(_('working directory now based on '
1114 ui.status(_('working directory now based on '
1100 'revisions %d and %d\n') % parents)
1115 'revisions %d and %d\n') % parents)
1101 else:
1116 else:
1102 ui.status(_('working directory now based on '
1117 ui.status(_('working directory now based on '
1103 'revision %d\n') % parents)
1118 'revision %d\n') % parents)
1104 ms = mergemod.mergestate(self)
1119 ms = mergemod.mergestate(self)
1105 ms.reset(self['.'].node())
1120 ms.reset(self['.'].node())
1106
1121
1107 # TODO: if we know which new heads may result from this rollback, pass
1122 # TODO: if we know which new heads may result from this rollback, pass
1108 # them to destroy(), which will prevent the branchhead cache from being
1123 # them to destroy(), which will prevent the branchhead cache from being
1109 # invalidated.
1124 # invalidated.
1110 self.destroyed()
1125 self.destroyed()
1111 return 0
1126 return 0
1112
1127
1113 def invalidatecaches(self):
1128 def invalidatecaches(self):
1114
1129
1115 if '_tagscache' in vars(self):
1130 if '_tagscache' in vars(self):
1116 # can't use delattr on proxy
1131 # can't use delattr on proxy
1117 del self.__dict__['_tagscache']
1132 del self.__dict__['_tagscache']
1118
1133
1119 self.unfiltered()._branchcaches.clear()
1134 self.unfiltered()._branchcaches.clear()
1120 self.invalidatevolatilesets()
1135 self.invalidatevolatilesets()
1121
1136
1122 def invalidatevolatilesets(self):
1137 def invalidatevolatilesets(self):
1123 self.filteredrevcache.clear()
1138 self.filteredrevcache.clear()
1124 obsolete.clearobscaches(self)
1139 obsolete.clearobscaches(self)
1125
1140
1126 def invalidatedirstate(self):
1141 def invalidatedirstate(self):
1127 '''Invalidates the dirstate, causing the next call to dirstate
1142 '''Invalidates the dirstate, causing the next call to dirstate
1128 to check if it was modified since the last time it was read,
1143 to check if it was modified since the last time it was read,
1129 rereading it if it has.
1144 rereading it if it has.
1130
1145
1131 This is different to dirstate.invalidate() that it doesn't always
1146 This is different to dirstate.invalidate() that it doesn't always
1132 rereads the dirstate. Use dirstate.invalidate() if you want to
1147 rereads the dirstate. Use dirstate.invalidate() if you want to
1133 explicitly read the dirstate again (i.e. restoring it to a previous
1148 explicitly read the dirstate again (i.e. restoring it to a previous
1134 known good state).'''
1149 known good state).'''
1135 if hasunfilteredcache(self, 'dirstate'):
1150 if hasunfilteredcache(self, 'dirstate'):
1136 for k in self.dirstate._filecache:
1151 for k in self.dirstate._filecache:
1137 try:
1152 try:
1138 delattr(self.dirstate, k)
1153 delattr(self.dirstate, k)
1139 except AttributeError:
1154 except AttributeError:
1140 pass
1155 pass
1141 delattr(self.unfiltered(), 'dirstate')
1156 delattr(self.unfiltered(), 'dirstate')
1142
1157
1143 def invalidate(self):
1158 def invalidate(self):
1144 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1159 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1145 for k in self._filecache:
1160 for k in self._filecache:
1146 # dirstate is invalidated separately in invalidatedirstate()
1161 # dirstate is invalidated separately in invalidatedirstate()
1147 if k == 'dirstate':
1162 if k == 'dirstate':
1148 continue
1163 continue
1149
1164
1150 try:
1165 try:
1151 delattr(unfiltered, k)
1166 delattr(unfiltered, k)
1152 except AttributeError:
1167 except AttributeError:
1153 pass
1168 pass
1154 self.invalidatecaches()
1169 self.invalidatecaches()
1155 self.store.invalidatecaches()
1170 self.store.invalidatecaches()
1156
1171
1157 def invalidateall(self):
1172 def invalidateall(self):
1158 '''Fully invalidates both store and non-store parts, causing the
1173 '''Fully invalidates both store and non-store parts, causing the
1159 subsequent operation to reread any outside changes.'''
1174 subsequent operation to reread any outside changes.'''
1160 # extension should hook this to invalidate its caches
1175 # extension should hook this to invalidate its caches
1161 self.invalidate()
1176 self.invalidate()
1162 self.invalidatedirstate()
1177 self.invalidatedirstate()
1163
1178
1164 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1179 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1165 try:
1180 try:
1166 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1181 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1167 except error.LockHeld, inst:
1182 except error.LockHeld, inst:
1168 if not wait:
1183 if not wait:
1169 raise
1184 raise
1170 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1185 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1171 (desc, inst.locker))
1186 (desc, inst.locker))
1172 # default to 600 seconds timeout
1187 # default to 600 seconds timeout
1173 l = lockmod.lock(vfs, lockname,
1188 l = lockmod.lock(vfs, lockname,
1174 int(self.ui.config("ui", "timeout", "600")),
1189 int(self.ui.config("ui", "timeout", "600")),
1175 releasefn, desc=desc)
1190 releasefn, desc=desc)
1176 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1191 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1177 if acquirefn:
1192 if acquirefn:
1178 acquirefn()
1193 acquirefn()
1179 return l
1194 return l
1180
1195
1181 def _afterlock(self, callback):
1196 def _afterlock(self, callback):
1182 """add a callback to the current repository lock.
1197 """add a callback to the current repository lock.
1183
1198
1184 The callback will be executed on lock release."""
1199 The callback will be executed on lock release."""
1185 l = self._lockref and self._lockref()
1200 l = self._lockref and self._lockref()
1186 if l:
1201 if l:
1187 l.postrelease.append(callback)
1202 l.postrelease.append(callback)
1188 else:
1203 else:
1189 callback()
1204 callback()
1190
1205
1191 def lock(self, wait=True):
1206 def lock(self, wait=True):
1192 '''Lock the repository store (.hg/store) and return a weak reference
1207 '''Lock the repository store (.hg/store) and return a weak reference
1193 to the lock. Use this before modifying the store (e.g. committing or
1208 to the lock. Use this before modifying the store (e.g. committing or
1194 stripping). If you are opening a transaction, get a lock as well.)
1209 stripping). If you are opening a transaction, get a lock as well.)
1195
1210
1196 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1211 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1197 'wlock' first to avoid a dead-lock hazard.'''
1212 'wlock' first to avoid a dead-lock hazard.'''
1198 l = self._lockref and self._lockref()
1213 l = self._lockref and self._lockref()
1199 if l is not None and l.held:
1214 if l is not None and l.held:
1200 l.lock()
1215 l.lock()
1201 return l
1216 return l
1202
1217
1203 def unlock():
1218 def unlock():
1204 for k, ce in self._filecache.items():
1219 for k, ce in self._filecache.items():
1205 if k == 'dirstate' or k not in self.__dict__:
1220 if k == 'dirstate' or k not in self.__dict__:
1206 continue
1221 continue
1207 ce.refresh()
1222 ce.refresh()
1208
1223
1209 l = self._lock(self.svfs, "lock", wait, unlock,
1224 l = self._lock(self.svfs, "lock", wait, unlock,
1210 self.invalidate, _('repository %s') % self.origroot)
1225 self.invalidate, _('repository %s') % self.origroot)
1211 self._lockref = weakref.ref(l)
1226 self._lockref = weakref.ref(l)
1212 return l
1227 return l
1213
1228
1214 def wlock(self, wait=True):
1229 def wlock(self, wait=True):
1215 '''Lock the non-store parts of the repository (everything under
1230 '''Lock the non-store parts of the repository (everything under
1216 .hg except .hg/store) and return a weak reference to the lock.
1231 .hg except .hg/store) and return a weak reference to the lock.
1217
1232
1218 Use this before modifying files in .hg.
1233 Use this before modifying files in .hg.
1219
1234
1220 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1235 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1221 'wlock' first to avoid a dead-lock hazard.'''
1236 'wlock' first to avoid a dead-lock hazard.'''
1222 l = self._wlockref and self._wlockref()
1237 l = self._wlockref and self._wlockref()
1223 if l is not None and l.held:
1238 if l is not None and l.held:
1224 l.lock()
1239 l.lock()
1225 return l
1240 return l
1226
1241
1227 # We do not need to check for non-waiting lock aquisition. Such
1242 # We do not need to check for non-waiting lock aquisition. Such
1228 # acquisition would not cause dead-lock as they would just fail.
1243 # acquisition would not cause dead-lock as they would just fail.
1229 if wait and (self.ui.configbool('devel', 'all')
1244 if wait and (self.ui.configbool('devel', 'all')
1230 or self.ui.configbool('devel', 'check-locks')):
1245 or self.ui.configbool('devel', 'check-locks')):
1231 l = self._lockref and self._lockref()
1246 l = self._lockref and self._lockref()
1232 if l is not None and l.held:
1247 if l is not None and l.held:
1233 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1248 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1234
1249
1235 def unlock():
1250 def unlock():
1236 if self.dirstate.pendingparentchange():
1251 if self.dirstate.pendingparentchange():
1237 self.dirstate.invalidate()
1252 self.dirstate.invalidate()
1238 else:
1253 else:
1239 self.dirstate.write()
1254 self.dirstate.write()
1240
1255
1241 self._filecache['dirstate'].refresh()
1256 self._filecache['dirstate'].refresh()
1242
1257
1243 l = self._lock(self.vfs, "wlock", wait, unlock,
1258 l = self._lock(self.vfs, "wlock", wait, unlock,
1244 self.invalidatedirstate, _('working directory of %s') %
1259 self.invalidatedirstate, _('working directory of %s') %
1245 self.origroot)
1260 self.origroot)
1246 self._wlockref = weakref.ref(l)
1261 self._wlockref = weakref.ref(l)
1247 return l
1262 return l
1248
1263
1249 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1264 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1250 """
1265 """
1251 commit an individual file as part of a larger transaction
1266 commit an individual file as part of a larger transaction
1252 """
1267 """
1253
1268
1254 fname = fctx.path()
1269 fname = fctx.path()
1255 fparent1 = manifest1.get(fname, nullid)
1270 fparent1 = manifest1.get(fname, nullid)
1256 fparent2 = manifest2.get(fname, nullid)
1271 fparent2 = manifest2.get(fname, nullid)
1257 if isinstance(fctx, context.filectx):
1272 if isinstance(fctx, context.filectx):
1258 node = fctx.filenode()
1273 node = fctx.filenode()
1259 if node in [fparent1, fparent2]:
1274 if node in [fparent1, fparent2]:
1260 self.ui.debug('reusing %s filelog entry\n' % fname)
1275 self.ui.debug('reusing %s filelog entry\n' % fname)
1261 return node
1276 return node
1262
1277
1263 flog = self.file(fname)
1278 flog = self.file(fname)
1264 meta = {}
1279 meta = {}
1265 copy = fctx.renamed()
1280 copy = fctx.renamed()
1266 if copy and copy[0] != fname:
1281 if copy and copy[0] != fname:
1267 # Mark the new revision of this file as a copy of another
1282 # Mark the new revision of this file as a copy of another
1268 # file. This copy data will effectively act as a parent
1283 # file. This copy data will effectively act as a parent
1269 # of this new revision. If this is a merge, the first
1284 # of this new revision. If this is a merge, the first
1270 # parent will be the nullid (meaning "look up the copy data")
1285 # parent will be the nullid (meaning "look up the copy data")
1271 # and the second one will be the other parent. For example:
1286 # and the second one will be the other parent. For example:
1272 #
1287 #
1273 # 0 --- 1 --- 3 rev1 changes file foo
1288 # 0 --- 1 --- 3 rev1 changes file foo
1274 # \ / rev2 renames foo to bar and changes it
1289 # \ / rev2 renames foo to bar and changes it
1275 # \- 2 -/ rev3 should have bar with all changes and
1290 # \- 2 -/ rev3 should have bar with all changes and
1276 # should record that bar descends from
1291 # should record that bar descends from
1277 # bar in rev2 and foo in rev1
1292 # bar in rev2 and foo in rev1
1278 #
1293 #
1279 # this allows this merge to succeed:
1294 # this allows this merge to succeed:
1280 #
1295 #
1281 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1296 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1282 # \ / merging rev3 and rev4 should use bar@rev2
1297 # \ / merging rev3 and rev4 should use bar@rev2
1283 # \- 2 --- 4 as the merge base
1298 # \- 2 --- 4 as the merge base
1284 #
1299 #
1285
1300
1286 cfname = copy[0]
1301 cfname = copy[0]
1287 crev = manifest1.get(cfname)
1302 crev = manifest1.get(cfname)
1288 newfparent = fparent2
1303 newfparent = fparent2
1289
1304
1290 if manifest2: # branch merge
1305 if manifest2: # branch merge
1291 if fparent2 == nullid or crev is None: # copied on remote side
1306 if fparent2 == nullid or crev is None: # copied on remote side
1292 if cfname in manifest2:
1307 if cfname in manifest2:
1293 crev = manifest2[cfname]
1308 crev = manifest2[cfname]
1294 newfparent = fparent1
1309 newfparent = fparent1
1295
1310
1296 # Here, we used to search backwards through history to try to find
1311 # Here, we used to search backwards through history to try to find
1297 # where the file copy came from if the source of a copy was not in
1312 # where the file copy came from if the source of a copy was not in
1298 # the parent directory. However, this doesn't actually make sense to
1313 # the parent directory. However, this doesn't actually make sense to
1299 # do (what does a copy from something not in your working copy even
1314 # do (what does a copy from something not in your working copy even
1300 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1315 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1301 # the user that copy information was dropped, so if they didn't
1316 # the user that copy information was dropped, so if they didn't
1302 # expect this outcome it can be fixed, but this is the correct
1317 # expect this outcome it can be fixed, but this is the correct
1303 # behavior in this circumstance.
1318 # behavior in this circumstance.
1304
1319
1305 if crev:
1320 if crev:
1306 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1321 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1307 meta["copy"] = cfname
1322 meta["copy"] = cfname
1308 meta["copyrev"] = hex(crev)
1323 meta["copyrev"] = hex(crev)
1309 fparent1, fparent2 = nullid, newfparent
1324 fparent1, fparent2 = nullid, newfparent
1310 else:
1325 else:
1311 self.ui.warn(_("warning: can't find ancestor for '%s' "
1326 self.ui.warn(_("warning: can't find ancestor for '%s' "
1312 "copied from '%s'!\n") % (fname, cfname))
1327 "copied from '%s'!\n") % (fname, cfname))
1313
1328
1314 elif fparent1 == nullid:
1329 elif fparent1 == nullid:
1315 fparent1, fparent2 = fparent2, nullid
1330 fparent1, fparent2 = fparent2, nullid
1316 elif fparent2 != nullid:
1331 elif fparent2 != nullid:
1317 # is one parent an ancestor of the other?
1332 # is one parent an ancestor of the other?
1318 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1333 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1319 if fparent1 in fparentancestors:
1334 if fparent1 in fparentancestors:
1320 fparent1, fparent2 = fparent2, nullid
1335 fparent1, fparent2 = fparent2, nullid
1321 elif fparent2 in fparentancestors:
1336 elif fparent2 in fparentancestors:
1322 fparent2 = nullid
1337 fparent2 = nullid
1323
1338
1324 # is the file changed?
1339 # is the file changed?
1325 text = fctx.data()
1340 text = fctx.data()
1326 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1341 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1327 changelist.append(fname)
1342 changelist.append(fname)
1328 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1343 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1329 # are just the flags changed during merge?
1344 # are just the flags changed during merge?
1330 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1345 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1331 changelist.append(fname)
1346 changelist.append(fname)
1332
1347
1333 return fparent1
1348 return fparent1
1334
1349
1335 @unfilteredmethod
1350 @unfilteredmethod
1336 def commit(self, text="", user=None, date=None, match=None, force=False,
1351 def commit(self, text="", user=None, date=None, match=None, force=False,
1337 editor=False, extra={}):
1352 editor=False, extra={}):
1338 """Add a new revision to current repository.
1353 """Add a new revision to current repository.
1339
1354
1340 Revision information is gathered from the working directory,
1355 Revision information is gathered from the working directory,
1341 match can be used to filter the committed files. If editor is
1356 match can be used to filter the committed files. If editor is
1342 supplied, it is called to get a commit message.
1357 supplied, it is called to get a commit message.
1343 """
1358 """
1344
1359
1345 def fail(f, msg):
1360 def fail(f, msg):
1346 raise util.Abort('%s: %s' % (f, msg))
1361 raise util.Abort('%s: %s' % (f, msg))
1347
1362
1348 if not match:
1363 if not match:
1349 match = matchmod.always(self.root, '')
1364 match = matchmod.always(self.root, '')
1350
1365
1351 if not force:
1366 if not force:
1352 vdirs = []
1367 vdirs = []
1353 match.explicitdir = vdirs.append
1368 match.explicitdir = vdirs.append
1354 match.bad = fail
1369 match.bad = fail
1355
1370
1356 wlock = self.wlock()
1371 wlock = self.wlock()
1357 try:
1372 try:
1358 wctx = self[None]
1373 wctx = self[None]
1359 merge = len(wctx.parents()) > 1
1374 merge = len(wctx.parents()) > 1
1360
1375
1361 if not force and merge and not match.always():
1376 if not force and merge and not match.always():
1362 raise util.Abort(_('cannot partially commit a merge '
1377 raise util.Abort(_('cannot partially commit a merge '
1363 '(do not specify files or patterns)'))
1378 '(do not specify files or patterns)'))
1364
1379
1365 status = self.status(match=match, clean=force)
1380 status = self.status(match=match, clean=force)
1366 if force:
1381 if force:
1367 status.modified.extend(status.clean) # mq may commit clean files
1382 status.modified.extend(status.clean) # mq may commit clean files
1368
1383
1369 # check subrepos
1384 # check subrepos
1370 subs = []
1385 subs = []
1371 commitsubs = set()
1386 commitsubs = set()
1372 newstate = wctx.substate.copy()
1387 newstate = wctx.substate.copy()
1373 # only manage subrepos and .hgsubstate if .hgsub is present
1388 # only manage subrepos and .hgsubstate if .hgsub is present
1374 if '.hgsub' in wctx:
1389 if '.hgsub' in wctx:
1375 # we'll decide whether to track this ourselves, thanks
1390 # we'll decide whether to track this ourselves, thanks
1376 for c in status.modified, status.added, status.removed:
1391 for c in status.modified, status.added, status.removed:
1377 if '.hgsubstate' in c:
1392 if '.hgsubstate' in c:
1378 c.remove('.hgsubstate')
1393 c.remove('.hgsubstate')
1379
1394
1380 # compare current state to last committed state
1395 # compare current state to last committed state
1381 # build new substate based on last committed state
1396 # build new substate based on last committed state
1382 oldstate = wctx.p1().substate
1397 oldstate = wctx.p1().substate
1383 for s in sorted(newstate.keys()):
1398 for s in sorted(newstate.keys()):
1384 if not match(s):
1399 if not match(s):
1385 # ignore working copy, use old state if present
1400 # ignore working copy, use old state if present
1386 if s in oldstate:
1401 if s in oldstate:
1387 newstate[s] = oldstate[s]
1402 newstate[s] = oldstate[s]
1388 continue
1403 continue
1389 if not force:
1404 if not force:
1390 raise util.Abort(
1405 raise util.Abort(
1391 _("commit with new subrepo %s excluded") % s)
1406 _("commit with new subrepo %s excluded") % s)
1392 dirtyreason = wctx.sub(s).dirtyreason(True)
1407 dirtyreason = wctx.sub(s).dirtyreason(True)
1393 if dirtyreason:
1408 if dirtyreason:
1394 if not self.ui.configbool('ui', 'commitsubrepos'):
1409 if not self.ui.configbool('ui', 'commitsubrepos'):
1395 raise util.Abort(dirtyreason,
1410 raise util.Abort(dirtyreason,
1396 hint=_("use --subrepos for recursive commit"))
1411 hint=_("use --subrepos for recursive commit"))
1397 subs.append(s)
1412 subs.append(s)
1398 commitsubs.add(s)
1413 commitsubs.add(s)
1399 else:
1414 else:
1400 bs = wctx.sub(s).basestate()
1415 bs = wctx.sub(s).basestate()
1401 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1416 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1402 if oldstate.get(s, (None, None, None))[1] != bs:
1417 if oldstate.get(s, (None, None, None))[1] != bs:
1403 subs.append(s)
1418 subs.append(s)
1404
1419
1405 # check for removed subrepos
1420 # check for removed subrepos
1406 for p in wctx.parents():
1421 for p in wctx.parents():
1407 r = [s for s in p.substate if s not in newstate]
1422 r = [s for s in p.substate if s not in newstate]
1408 subs += [s for s in r if match(s)]
1423 subs += [s for s in r if match(s)]
1409 if subs:
1424 if subs:
1410 if (not match('.hgsub') and
1425 if (not match('.hgsub') and
1411 '.hgsub' in (wctx.modified() + wctx.added())):
1426 '.hgsub' in (wctx.modified() + wctx.added())):
1412 raise util.Abort(
1427 raise util.Abort(
1413 _("can't commit subrepos without .hgsub"))
1428 _("can't commit subrepos without .hgsub"))
1414 status.modified.insert(0, '.hgsubstate')
1429 status.modified.insert(0, '.hgsubstate')
1415
1430
1416 elif '.hgsub' in status.removed:
1431 elif '.hgsub' in status.removed:
1417 # clean up .hgsubstate when .hgsub is removed
1432 # clean up .hgsubstate when .hgsub is removed
1418 if ('.hgsubstate' in wctx and
1433 if ('.hgsubstate' in wctx and
1419 '.hgsubstate' not in (status.modified + status.added +
1434 '.hgsubstate' not in (status.modified + status.added +
1420 status.removed)):
1435 status.removed)):
1421 status.removed.insert(0, '.hgsubstate')
1436 status.removed.insert(0, '.hgsubstate')
1422
1437
1423 # make sure all explicit patterns are matched
1438 # make sure all explicit patterns are matched
1424 if not force and match.files():
1439 if not force and match.files():
1425 matched = set(status.modified + status.added + status.removed)
1440 matched = set(status.modified + status.added + status.removed)
1426
1441
1427 for f in match.files():
1442 for f in match.files():
1428 f = self.dirstate.normalize(f)
1443 f = self.dirstate.normalize(f)
1429 if f == '.' or f in matched or f in wctx.substate:
1444 if f == '.' or f in matched or f in wctx.substate:
1430 continue
1445 continue
1431 if f in status.deleted:
1446 if f in status.deleted:
1432 fail(f, _('file not found!'))
1447 fail(f, _('file not found!'))
1433 if f in vdirs: # visited directory
1448 if f in vdirs: # visited directory
1434 d = f + '/'
1449 d = f + '/'
1435 for mf in matched:
1450 for mf in matched:
1436 if mf.startswith(d):
1451 if mf.startswith(d):
1437 break
1452 break
1438 else:
1453 else:
1439 fail(f, _("no match under directory!"))
1454 fail(f, _("no match under directory!"))
1440 elif f not in self.dirstate:
1455 elif f not in self.dirstate:
1441 fail(f, _("file not tracked!"))
1456 fail(f, _("file not tracked!"))
1442
1457
1443 cctx = context.workingcommitctx(self, status,
1458 cctx = context.workingcommitctx(self, status,
1444 text, user, date, extra)
1459 text, user, date, extra)
1445
1460
1446 if (not force and not extra.get("close") and not merge
1461 if (not force and not extra.get("close") and not merge
1447 and not cctx.files()
1462 and not cctx.files()
1448 and wctx.branch() == wctx.p1().branch()):
1463 and wctx.branch() == wctx.p1().branch()):
1449 return None
1464 return None
1450
1465
1451 if merge and cctx.deleted():
1466 if merge and cctx.deleted():
1452 raise util.Abort(_("cannot commit merge with missing files"))
1467 raise util.Abort(_("cannot commit merge with missing files"))
1453
1468
1454 ms = mergemod.mergestate(self)
1469 ms = mergemod.mergestate(self)
1455 for f in status.modified:
1470 for f in status.modified:
1456 if f in ms and ms[f] == 'u':
1471 if f in ms and ms[f] == 'u':
1457 raise util.Abort(_('unresolved merge conflicts '
1472 raise util.Abort(_('unresolved merge conflicts '
1458 '(see "hg help resolve")'))
1473 '(see "hg help resolve")'))
1459
1474
1460 if editor:
1475 if editor:
1461 cctx._text = editor(self, cctx, subs)
1476 cctx._text = editor(self, cctx, subs)
1462 edited = (text != cctx._text)
1477 edited = (text != cctx._text)
1463
1478
1464 # Save commit message in case this transaction gets rolled back
1479 # Save commit message in case this transaction gets rolled back
1465 # (e.g. by a pretxncommit hook). Leave the content alone on
1480 # (e.g. by a pretxncommit hook). Leave the content alone on
1466 # the assumption that the user will use the same editor again.
1481 # the assumption that the user will use the same editor again.
1467 msgfn = self.savecommitmessage(cctx._text)
1482 msgfn = self.savecommitmessage(cctx._text)
1468
1483
1469 # commit subs and write new state
1484 # commit subs and write new state
1470 if subs:
1485 if subs:
1471 for s in sorted(commitsubs):
1486 for s in sorted(commitsubs):
1472 sub = wctx.sub(s)
1487 sub = wctx.sub(s)
1473 self.ui.status(_('committing subrepository %s\n') %
1488 self.ui.status(_('committing subrepository %s\n') %
1474 subrepo.subrelpath(sub))
1489 subrepo.subrelpath(sub))
1475 sr = sub.commit(cctx._text, user, date)
1490 sr = sub.commit(cctx._text, user, date)
1476 newstate[s] = (newstate[s][0], sr)
1491 newstate[s] = (newstate[s][0], sr)
1477 subrepo.writestate(self, newstate)
1492 subrepo.writestate(self, newstate)
1478
1493
1479 p1, p2 = self.dirstate.parents()
1494 p1, p2 = self.dirstate.parents()
1480 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1495 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1481 try:
1496 try:
1482 self.hook("precommit", throw=True, parent1=hookp1,
1497 self.hook("precommit", throw=True, parent1=hookp1,
1483 parent2=hookp2)
1498 parent2=hookp2)
1484 ret = self.commitctx(cctx, True)
1499 ret = self.commitctx(cctx, True)
1485 except: # re-raises
1500 except: # re-raises
1486 if edited:
1501 if edited:
1487 self.ui.write(
1502 self.ui.write(
1488 _('note: commit message saved in %s\n') % msgfn)
1503 _('note: commit message saved in %s\n') % msgfn)
1489 raise
1504 raise
1490
1505
1491 # update bookmarks, dirstate and mergestate
1506 # update bookmarks, dirstate and mergestate
1492 bookmarks.update(self, [p1, p2], ret)
1507 bookmarks.update(self, [p1, p2], ret)
1493 cctx.markcommitted(ret)
1508 cctx.markcommitted(ret)
1494 ms.reset()
1509 ms.reset()
1495 finally:
1510 finally:
1496 wlock.release()
1511 wlock.release()
1497
1512
1498 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1513 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1499 # hack for command that use a temporary commit (eg: histedit)
1514 # hack for command that use a temporary commit (eg: histedit)
1500 # temporary commit got stripped before hook release
1515 # temporary commit got stripped before hook release
1501 if node in self:
1516 if node in self:
1502 self.hook("commit", node=node, parent1=parent1,
1517 self.hook("commit", node=node, parent1=parent1,
1503 parent2=parent2)
1518 parent2=parent2)
1504 self._afterlock(commithook)
1519 self._afterlock(commithook)
1505 return ret
1520 return ret
1506
1521
1507 @unfilteredmethod
1522 @unfilteredmethod
1508 def commitctx(self, ctx, error=False):
1523 def commitctx(self, ctx, error=False):
1509 """Add a new revision to current repository.
1524 """Add a new revision to current repository.
1510 Revision information is passed via the context argument.
1525 Revision information is passed via the context argument.
1511 """
1526 """
1512
1527
1513 tr = None
1528 tr = None
1514 p1, p2 = ctx.p1(), ctx.p2()
1529 p1, p2 = ctx.p1(), ctx.p2()
1515 user = ctx.user()
1530 user = ctx.user()
1516
1531
1517 lock = self.lock()
1532 lock = self.lock()
1518 try:
1533 try:
1519 tr = self.transaction("commit")
1534 tr = self.transaction("commit")
1520 trp = weakref.proxy(tr)
1535 trp = weakref.proxy(tr)
1521
1536
1522 if ctx.files():
1537 if ctx.files():
1523 m1 = p1.manifest()
1538 m1 = p1.manifest()
1524 m2 = p2.manifest()
1539 m2 = p2.manifest()
1525 m = m1.copy()
1540 m = m1.copy()
1526
1541
1527 # check in files
1542 # check in files
1528 added = []
1543 added = []
1529 changed = []
1544 changed = []
1530 removed = list(ctx.removed())
1545 removed = list(ctx.removed())
1531 linkrev = len(self)
1546 linkrev = len(self)
1532 self.ui.note(_("committing files:\n"))
1547 self.ui.note(_("committing files:\n"))
1533 for f in sorted(ctx.modified() + ctx.added()):
1548 for f in sorted(ctx.modified() + ctx.added()):
1534 self.ui.note(f + "\n")
1549 self.ui.note(f + "\n")
1535 try:
1550 try:
1536 fctx = ctx[f]
1551 fctx = ctx[f]
1537 if fctx is None:
1552 if fctx is None:
1538 removed.append(f)
1553 removed.append(f)
1539 else:
1554 else:
1540 added.append(f)
1555 added.append(f)
1541 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1556 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1542 trp, changed)
1557 trp, changed)
1543 m.setflag(f, fctx.flags())
1558 m.setflag(f, fctx.flags())
1544 except OSError, inst:
1559 except OSError, inst:
1545 self.ui.warn(_("trouble committing %s!\n") % f)
1560 self.ui.warn(_("trouble committing %s!\n") % f)
1546 raise
1561 raise
1547 except IOError, inst:
1562 except IOError, inst:
1548 errcode = getattr(inst, 'errno', errno.ENOENT)
1563 errcode = getattr(inst, 'errno', errno.ENOENT)
1549 if error or errcode and errcode != errno.ENOENT:
1564 if error or errcode and errcode != errno.ENOENT:
1550 self.ui.warn(_("trouble committing %s!\n") % f)
1565 self.ui.warn(_("trouble committing %s!\n") % f)
1551 raise
1566 raise
1552
1567
1553 # update manifest
1568 # update manifest
1554 self.ui.note(_("committing manifest\n"))
1569 self.ui.note(_("committing manifest\n"))
1555 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1570 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1556 drop = [f for f in removed if f in m]
1571 drop = [f for f in removed if f in m]
1557 for f in drop:
1572 for f in drop:
1558 del m[f]
1573 del m[f]
1559 mn = self.manifest.add(m, trp, linkrev,
1574 mn = self.manifest.add(m, trp, linkrev,
1560 p1.manifestnode(), p2.manifestnode(),
1575 p1.manifestnode(), p2.manifestnode(),
1561 added, drop)
1576 added, drop)
1562 files = changed + removed
1577 files = changed + removed
1563 else:
1578 else:
1564 mn = p1.manifestnode()
1579 mn = p1.manifestnode()
1565 files = []
1580 files = []
1566
1581
1567 # update changelog
1582 # update changelog
1568 self.ui.note(_("committing changelog\n"))
1583 self.ui.note(_("committing changelog\n"))
1569 self.changelog.delayupdate(tr)
1584 self.changelog.delayupdate(tr)
1570 n = self.changelog.add(mn, files, ctx.description(),
1585 n = self.changelog.add(mn, files, ctx.description(),
1571 trp, p1.node(), p2.node(),
1586 trp, p1.node(), p2.node(),
1572 user, ctx.date(), ctx.extra().copy())
1587 user, ctx.date(), ctx.extra().copy())
1573 p = lambda: tr.writepending() and self.root or ""
1588 p = lambda: tr.writepending() and self.root or ""
1574 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1589 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1575 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1590 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1576 parent2=xp2, pending=p)
1591 parent2=xp2, pending=p)
1577 # set the new commit is proper phase
1592 # set the new commit is proper phase
1578 targetphase = subrepo.newcommitphase(self.ui, ctx)
1593 targetphase = subrepo.newcommitphase(self.ui, ctx)
1579 if targetphase:
1594 if targetphase:
1580 # retract boundary do not alter parent changeset.
1595 # retract boundary do not alter parent changeset.
1581 # if a parent have higher the resulting phase will
1596 # if a parent have higher the resulting phase will
1582 # be compliant anyway
1597 # be compliant anyway
1583 #
1598 #
1584 # if minimal phase was 0 we don't need to retract anything
1599 # if minimal phase was 0 we don't need to retract anything
1585 phases.retractboundary(self, tr, targetphase, [n])
1600 phases.retractboundary(self, tr, targetphase, [n])
1586 tr.close()
1601 tr.close()
1587 branchmap.updatecache(self.filtered('served'))
1602 branchmap.updatecache(self.filtered('served'))
1588 return n
1603 return n
1589 finally:
1604 finally:
1590 if tr:
1605 if tr:
1591 tr.release()
1606 tr.release()
1592 lock.release()
1607 lock.release()
1593
1608
1594 @unfilteredmethod
1609 @unfilteredmethod
1595 def destroying(self):
1610 def destroying(self):
1596 '''Inform the repository that nodes are about to be destroyed.
1611 '''Inform the repository that nodes are about to be destroyed.
1597 Intended for use by strip and rollback, so there's a common
1612 Intended for use by strip and rollback, so there's a common
1598 place for anything that has to be done before destroying history.
1613 place for anything that has to be done before destroying history.
1599
1614
1600 This is mostly useful for saving state that is in memory and waiting
1615 This is mostly useful for saving state that is in memory and waiting
1601 to be flushed when the current lock is released. Because a call to
1616 to be flushed when the current lock is released. Because a call to
1602 destroyed is imminent, the repo will be invalidated causing those
1617 destroyed is imminent, the repo will be invalidated causing those
1603 changes to stay in memory (waiting for the next unlock), or vanish
1618 changes to stay in memory (waiting for the next unlock), or vanish
1604 completely.
1619 completely.
1605 '''
1620 '''
1606 # When using the same lock to commit and strip, the phasecache is left
1621 # When using the same lock to commit and strip, the phasecache is left
1607 # dirty after committing. Then when we strip, the repo is invalidated,
1622 # dirty after committing. Then when we strip, the repo is invalidated,
1608 # causing those changes to disappear.
1623 # causing those changes to disappear.
1609 if '_phasecache' in vars(self):
1624 if '_phasecache' in vars(self):
1610 self._phasecache.write()
1625 self._phasecache.write()
1611
1626
1612 @unfilteredmethod
1627 @unfilteredmethod
1613 def destroyed(self):
1628 def destroyed(self):
1614 '''Inform the repository that nodes have been destroyed.
1629 '''Inform the repository that nodes have been destroyed.
1615 Intended for use by strip and rollback, so there's a common
1630 Intended for use by strip and rollback, so there's a common
1616 place for anything that has to be done after destroying history.
1631 place for anything that has to be done after destroying history.
1617 '''
1632 '''
1618 # When one tries to:
1633 # When one tries to:
1619 # 1) destroy nodes thus calling this method (e.g. strip)
1634 # 1) destroy nodes thus calling this method (e.g. strip)
1620 # 2) use phasecache somewhere (e.g. commit)
1635 # 2) use phasecache somewhere (e.g. commit)
1621 #
1636 #
1622 # then 2) will fail because the phasecache contains nodes that were
1637 # then 2) will fail because the phasecache contains nodes that were
1623 # removed. We can either remove phasecache from the filecache,
1638 # removed. We can either remove phasecache from the filecache,
1624 # causing it to reload next time it is accessed, or simply filter
1639 # causing it to reload next time it is accessed, or simply filter
1625 # the removed nodes now and write the updated cache.
1640 # the removed nodes now and write the updated cache.
1626 self._phasecache.filterunknown(self)
1641 self._phasecache.filterunknown(self)
1627 self._phasecache.write()
1642 self._phasecache.write()
1628
1643
1629 # update the 'served' branch cache to help read only server process
1644 # update the 'served' branch cache to help read only server process
1630 # Thanks to branchcache collaboration this is done from the nearest
1645 # Thanks to branchcache collaboration this is done from the nearest
1631 # filtered subset and it is expected to be fast.
1646 # filtered subset and it is expected to be fast.
1632 branchmap.updatecache(self.filtered('served'))
1647 branchmap.updatecache(self.filtered('served'))
1633
1648
1634 # Ensure the persistent tag cache is updated. Doing it now
1649 # Ensure the persistent tag cache is updated. Doing it now
1635 # means that the tag cache only has to worry about destroyed
1650 # means that the tag cache only has to worry about destroyed
1636 # heads immediately after a strip/rollback. That in turn
1651 # heads immediately after a strip/rollback. That in turn
1637 # guarantees that "cachetip == currenttip" (comparing both rev
1652 # guarantees that "cachetip == currenttip" (comparing both rev
1638 # and node) always means no nodes have been added or destroyed.
1653 # and node) always means no nodes have been added or destroyed.
1639
1654
1640 # XXX this is suboptimal when qrefresh'ing: we strip the current
1655 # XXX this is suboptimal when qrefresh'ing: we strip the current
1641 # head, refresh the tag cache, then immediately add a new head.
1656 # head, refresh the tag cache, then immediately add a new head.
1642 # But I think doing it this way is necessary for the "instant
1657 # But I think doing it this way is necessary for the "instant
1643 # tag cache retrieval" case to work.
1658 # tag cache retrieval" case to work.
1644 self.invalidate()
1659 self.invalidate()
1645
1660
1646 def walk(self, match, node=None):
1661 def walk(self, match, node=None):
1647 '''
1662 '''
1648 walk recursively through the directory tree or a given
1663 walk recursively through the directory tree or a given
1649 changeset, finding all files matched by the match
1664 changeset, finding all files matched by the match
1650 function
1665 function
1651 '''
1666 '''
1652 return self[node].walk(match)
1667 return self[node].walk(match)
1653
1668
1654 def status(self, node1='.', node2=None, match=None,
1669 def status(self, node1='.', node2=None, match=None,
1655 ignored=False, clean=False, unknown=False,
1670 ignored=False, clean=False, unknown=False,
1656 listsubrepos=False):
1671 listsubrepos=False):
1657 '''a convenience method that calls node1.status(node2)'''
1672 '''a convenience method that calls node1.status(node2)'''
1658 return self[node1].status(node2, match, ignored, clean, unknown,
1673 return self[node1].status(node2, match, ignored, clean, unknown,
1659 listsubrepos)
1674 listsubrepos)
1660
1675
1661 def heads(self, start=None):
1676 def heads(self, start=None):
1662 heads = self.changelog.heads(start)
1677 heads = self.changelog.heads(start)
1663 # sort the output in rev descending order
1678 # sort the output in rev descending order
1664 return sorted(heads, key=self.changelog.rev, reverse=True)
1679 return sorted(heads, key=self.changelog.rev, reverse=True)
1665
1680
1666 def branchheads(self, branch=None, start=None, closed=False):
1681 def branchheads(self, branch=None, start=None, closed=False):
1667 '''return a (possibly filtered) list of heads for the given branch
1682 '''return a (possibly filtered) list of heads for the given branch
1668
1683
1669 Heads are returned in topological order, from newest to oldest.
1684 Heads are returned in topological order, from newest to oldest.
1670 If branch is None, use the dirstate branch.
1685 If branch is None, use the dirstate branch.
1671 If start is not None, return only heads reachable from start.
1686 If start is not None, return only heads reachable from start.
1672 If closed is True, return heads that are marked as closed as well.
1687 If closed is True, return heads that are marked as closed as well.
1673 '''
1688 '''
1674 if branch is None:
1689 if branch is None:
1675 branch = self[None].branch()
1690 branch = self[None].branch()
1676 branches = self.branchmap()
1691 branches = self.branchmap()
1677 if branch not in branches:
1692 if branch not in branches:
1678 return []
1693 return []
1679 # the cache returns heads ordered lowest to highest
1694 # the cache returns heads ordered lowest to highest
1680 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1695 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1681 if start is not None:
1696 if start is not None:
1682 # filter out the heads that cannot be reached from startrev
1697 # filter out the heads that cannot be reached from startrev
1683 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1698 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1684 bheads = [h for h in bheads if h in fbheads]
1699 bheads = [h for h in bheads if h in fbheads]
1685 return bheads
1700 return bheads
1686
1701
1687 def branches(self, nodes):
1702 def branches(self, nodes):
1688 if not nodes:
1703 if not nodes:
1689 nodes = [self.changelog.tip()]
1704 nodes = [self.changelog.tip()]
1690 b = []
1705 b = []
1691 for n in nodes:
1706 for n in nodes:
1692 t = n
1707 t = n
1693 while True:
1708 while True:
1694 p = self.changelog.parents(n)
1709 p = self.changelog.parents(n)
1695 if p[1] != nullid or p[0] == nullid:
1710 if p[1] != nullid or p[0] == nullid:
1696 b.append((t, n, p[0], p[1]))
1711 b.append((t, n, p[0], p[1]))
1697 break
1712 break
1698 n = p[0]
1713 n = p[0]
1699 return b
1714 return b
1700
1715
1701 def between(self, pairs):
1716 def between(self, pairs):
1702 r = []
1717 r = []
1703
1718
1704 for top, bottom in pairs:
1719 for top, bottom in pairs:
1705 n, l, i = top, [], 0
1720 n, l, i = top, [], 0
1706 f = 1
1721 f = 1
1707
1722
1708 while n != bottom and n != nullid:
1723 while n != bottom and n != nullid:
1709 p = self.changelog.parents(n)[0]
1724 p = self.changelog.parents(n)[0]
1710 if i == f:
1725 if i == f:
1711 l.append(n)
1726 l.append(n)
1712 f = f * 2
1727 f = f * 2
1713 n = p
1728 n = p
1714 i += 1
1729 i += 1
1715
1730
1716 r.append(l)
1731 r.append(l)
1717
1732
1718 return r
1733 return r
1719
1734
1720 def checkpush(self, pushop):
1735 def checkpush(self, pushop):
1721 """Extensions can override this function if additional checks have
1736 """Extensions can override this function if additional checks have
1722 to be performed before pushing, or call it if they override push
1737 to be performed before pushing, or call it if they override push
1723 command.
1738 command.
1724 """
1739 """
1725 pass
1740 pass
1726
1741
1727 @unfilteredpropertycache
1742 @unfilteredpropertycache
1728 def prepushoutgoinghooks(self):
1743 def prepushoutgoinghooks(self):
1729 """Return util.hooks consists of "(repo, remote, outgoing)"
1744 """Return util.hooks consists of "(repo, remote, outgoing)"
1730 functions, which are called before pushing changesets.
1745 functions, which are called before pushing changesets.
1731 """
1746 """
1732 return util.hooks()
1747 return util.hooks()
1733
1748
1734 def stream_in(self, remote, requirements):
1749 def stream_in(self, remote, requirements):
1735 lock = self.lock()
1750 lock = self.lock()
1736 try:
1751 try:
1737 # Save remote branchmap. We will use it later
1752 # Save remote branchmap. We will use it later
1738 # to speed up branchcache creation
1753 # to speed up branchcache creation
1739 rbranchmap = None
1754 rbranchmap = None
1740 if remote.capable("branchmap"):
1755 if remote.capable("branchmap"):
1741 rbranchmap = remote.branchmap()
1756 rbranchmap = remote.branchmap()
1742
1757
1743 fp = remote.stream_out()
1758 fp = remote.stream_out()
1744 l = fp.readline()
1759 l = fp.readline()
1745 try:
1760 try:
1746 resp = int(l)
1761 resp = int(l)
1747 except ValueError:
1762 except ValueError:
1748 raise error.ResponseError(
1763 raise error.ResponseError(
1749 _('unexpected response from remote server:'), l)
1764 _('unexpected response from remote server:'), l)
1750 if resp == 1:
1765 if resp == 1:
1751 raise util.Abort(_('operation forbidden by server'))
1766 raise util.Abort(_('operation forbidden by server'))
1752 elif resp == 2:
1767 elif resp == 2:
1753 raise util.Abort(_('locking the remote repository failed'))
1768 raise util.Abort(_('locking the remote repository failed'))
1754 elif resp != 0:
1769 elif resp != 0:
1755 raise util.Abort(_('the server sent an unknown error code'))
1770 raise util.Abort(_('the server sent an unknown error code'))
1756 self.ui.status(_('streaming all changes\n'))
1771 self.ui.status(_('streaming all changes\n'))
1757 l = fp.readline()
1772 l = fp.readline()
1758 try:
1773 try:
1759 total_files, total_bytes = map(int, l.split(' ', 1))
1774 total_files, total_bytes = map(int, l.split(' ', 1))
1760 except (ValueError, TypeError):
1775 except (ValueError, TypeError):
1761 raise error.ResponseError(
1776 raise error.ResponseError(
1762 _('unexpected response from remote server:'), l)
1777 _('unexpected response from remote server:'), l)
1763 self.ui.status(_('%d files to transfer, %s of data\n') %
1778 self.ui.status(_('%d files to transfer, %s of data\n') %
1764 (total_files, util.bytecount(total_bytes)))
1779 (total_files, util.bytecount(total_bytes)))
1765 handled_bytes = 0
1780 handled_bytes = 0
1766 self.ui.progress(_('clone'), 0, total=total_bytes)
1781 self.ui.progress(_('clone'), 0, total=total_bytes)
1767 start = time.time()
1782 start = time.time()
1768
1783
1769 tr = self.transaction(_('clone'))
1784 tr = self.transaction(_('clone'))
1770 try:
1785 try:
1771 for i in xrange(total_files):
1786 for i in xrange(total_files):
1772 # XXX doesn't support '\n' or '\r' in filenames
1787 # XXX doesn't support '\n' or '\r' in filenames
1773 l = fp.readline()
1788 l = fp.readline()
1774 try:
1789 try:
1775 name, size = l.split('\0', 1)
1790 name, size = l.split('\0', 1)
1776 size = int(size)
1791 size = int(size)
1777 except (ValueError, TypeError):
1792 except (ValueError, TypeError):
1778 raise error.ResponseError(
1793 raise error.ResponseError(
1779 _('unexpected response from remote server:'), l)
1794 _('unexpected response from remote server:'), l)
1780 if self.ui.debugflag:
1795 if self.ui.debugflag:
1781 self.ui.debug('adding %s (%s)\n' %
1796 self.ui.debug('adding %s (%s)\n' %
1782 (name, util.bytecount(size)))
1797 (name, util.bytecount(size)))
1783 # for backwards compat, name was partially encoded
1798 # for backwards compat, name was partially encoded
1784 ofp = self.svfs(store.decodedir(name), 'w')
1799 ofp = self.svfs(store.decodedir(name), 'w')
1785 for chunk in util.filechunkiter(fp, limit=size):
1800 for chunk in util.filechunkiter(fp, limit=size):
1786 handled_bytes += len(chunk)
1801 handled_bytes += len(chunk)
1787 self.ui.progress(_('clone'), handled_bytes,
1802 self.ui.progress(_('clone'), handled_bytes,
1788 total=total_bytes)
1803 total=total_bytes)
1789 ofp.write(chunk)
1804 ofp.write(chunk)
1790 ofp.close()
1805 ofp.close()
1791 tr.close()
1806 tr.close()
1792 finally:
1807 finally:
1793 tr.release()
1808 tr.release()
1794
1809
1795 # Writing straight to files circumvented the inmemory caches
1810 # Writing straight to files circumvented the inmemory caches
1796 self.invalidate()
1811 self.invalidate()
1797
1812
1798 elapsed = time.time() - start
1813 elapsed = time.time() - start
1799 if elapsed <= 0:
1814 if elapsed <= 0:
1800 elapsed = 0.001
1815 elapsed = 0.001
1801 self.ui.progress(_('clone'), None)
1816 self.ui.progress(_('clone'), None)
1802 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1817 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1803 (util.bytecount(total_bytes), elapsed,
1818 (util.bytecount(total_bytes), elapsed,
1804 util.bytecount(total_bytes / elapsed)))
1819 util.bytecount(total_bytes / elapsed)))
1805
1820
1806 # new requirements = old non-format requirements +
1821 # new requirements = old non-format requirements +
1807 # new format-related
1822 # new format-related
1808 # requirements from the streamed-in repository
1823 # requirements from the streamed-in repository
1809 requirements.update(set(self.requirements) - self.supportedformats)
1824 requirements.update(set(self.requirements) - self.supportedformats)
1810 self._applyrequirements(requirements)
1825 self._applyrequirements(requirements)
1811 self._writerequirements()
1826 self._writerequirements()
1812
1827
1813 if rbranchmap:
1828 if rbranchmap:
1814 rbheads = []
1829 rbheads = []
1815 closed = []
1830 closed = []
1816 for bheads in rbranchmap.itervalues():
1831 for bheads in rbranchmap.itervalues():
1817 rbheads.extend(bheads)
1832 rbheads.extend(bheads)
1818 for h in bheads:
1833 for h in bheads:
1819 r = self.changelog.rev(h)
1834 r = self.changelog.rev(h)
1820 b, c = self.changelog.branchinfo(r)
1835 b, c = self.changelog.branchinfo(r)
1821 if c:
1836 if c:
1822 closed.append(h)
1837 closed.append(h)
1823
1838
1824 if rbheads:
1839 if rbheads:
1825 rtiprev = max((int(self.changelog.rev(node))
1840 rtiprev = max((int(self.changelog.rev(node))
1826 for node in rbheads))
1841 for node in rbheads))
1827 cache = branchmap.branchcache(rbranchmap,
1842 cache = branchmap.branchcache(rbranchmap,
1828 self[rtiprev].node(),
1843 self[rtiprev].node(),
1829 rtiprev,
1844 rtiprev,
1830 closednodes=closed)
1845 closednodes=closed)
1831 # Try to stick it as low as possible
1846 # Try to stick it as low as possible
1832 # filter above served are unlikely to be fetch from a clone
1847 # filter above served are unlikely to be fetch from a clone
1833 for candidate in ('base', 'immutable', 'served'):
1848 for candidate in ('base', 'immutable', 'served'):
1834 rview = self.filtered(candidate)
1849 rview = self.filtered(candidate)
1835 if cache.validfor(rview):
1850 if cache.validfor(rview):
1836 self._branchcaches[candidate] = cache
1851 self._branchcaches[candidate] = cache
1837 cache.write(rview)
1852 cache.write(rview)
1838 break
1853 break
1839 self.invalidate()
1854 self.invalidate()
1840 return len(self.heads()) + 1
1855 return len(self.heads()) + 1
1841 finally:
1856 finally:
1842 lock.release()
1857 lock.release()
1843
1858
1844 def clone(self, remote, heads=[], stream=None):
1859 def clone(self, remote, heads=[], stream=None):
1845 '''clone remote repository.
1860 '''clone remote repository.
1846
1861
1847 keyword arguments:
1862 keyword arguments:
1848 heads: list of revs to clone (forces use of pull)
1863 heads: list of revs to clone (forces use of pull)
1849 stream: use streaming clone if possible'''
1864 stream: use streaming clone if possible'''
1850
1865
1851 # now, all clients that can request uncompressed clones can
1866 # now, all clients that can request uncompressed clones can
1852 # read repo formats supported by all servers that can serve
1867 # read repo formats supported by all servers that can serve
1853 # them.
1868 # them.
1854
1869
1855 # if revlog format changes, client will have to check version
1870 # if revlog format changes, client will have to check version
1856 # and format flags on "stream" capability, and use
1871 # and format flags on "stream" capability, and use
1857 # uncompressed only if compatible.
1872 # uncompressed only if compatible.
1858
1873
1859 if stream is None:
1874 if stream is None:
1860 # if the server explicitly prefers to stream (for fast LANs)
1875 # if the server explicitly prefers to stream (for fast LANs)
1861 stream = remote.capable('stream-preferred')
1876 stream = remote.capable('stream-preferred')
1862
1877
1863 if stream and not heads:
1878 if stream and not heads:
1864 # 'stream' means remote revlog format is revlogv1 only
1879 # 'stream' means remote revlog format is revlogv1 only
1865 if remote.capable('stream'):
1880 if remote.capable('stream'):
1866 self.stream_in(remote, set(('revlogv1',)))
1881 self.stream_in(remote, set(('revlogv1',)))
1867 else:
1882 else:
1868 # otherwise, 'streamreqs' contains the remote revlog format
1883 # otherwise, 'streamreqs' contains the remote revlog format
1869 streamreqs = remote.capable('streamreqs')
1884 streamreqs = remote.capable('streamreqs')
1870 if streamreqs:
1885 if streamreqs:
1871 streamreqs = set(streamreqs.split(','))
1886 streamreqs = set(streamreqs.split(','))
1872 # if we support it, stream in and adjust our requirements
1887 # if we support it, stream in and adjust our requirements
1873 if not streamreqs - self.supportedformats:
1888 if not streamreqs - self.supportedformats:
1874 self.stream_in(remote, streamreqs)
1889 self.stream_in(remote, streamreqs)
1875
1890
1876 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1891 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1877 try:
1892 try:
1878 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1893 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1879 ret = exchange.pull(self, remote, heads).cgresult
1894 ret = exchange.pull(self, remote, heads).cgresult
1880 finally:
1895 finally:
1881 self.ui.restoreconfig(quiet)
1896 self.ui.restoreconfig(quiet)
1882 return ret
1897 return ret
1883
1898
1884 def pushkey(self, namespace, key, old, new):
1899 def pushkey(self, namespace, key, old, new):
1885 try:
1900 try:
1886 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1901 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1887 old=old, new=new)
1902 old=old, new=new)
1888 except error.HookAbort, exc:
1903 except error.HookAbort, exc:
1889 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1904 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1890 if exc.hint:
1905 if exc.hint:
1891 self.ui.write_err(_("(%s)\n") % exc.hint)
1906 self.ui.write_err(_("(%s)\n") % exc.hint)
1892 return False
1907 return False
1893 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1908 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1894 ret = pushkey.push(self, namespace, key, old, new)
1909 ret = pushkey.push(self, namespace, key, old, new)
1895 def runhook():
1910 def runhook():
1896 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1911 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1897 ret=ret)
1912 ret=ret)
1898 self._afterlock(runhook)
1913 self._afterlock(runhook)
1899 return ret
1914 return ret
1900
1915
1901 def listkeys(self, namespace):
1916 def listkeys(self, namespace):
1902 self.hook('prelistkeys', throw=True, namespace=namespace)
1917 self.hook('prelistkeys', throw=True, namespace=namespace)
1903 self.ui.debug('listing keys for "%s"\n' % namespace)
1918 self.ui.debug('listing keys for "%s"\n' % namespace)
1904 values = pushkey.list(self, namespace)
1919 values = pushkey.list(self, namespace)
1905 self.hook('listkeys', namespace=namespace, values=values)
1920 self.hook('listkeys', namespace=namespace, values=values)
1906 return values
1921 return values
1907
1922
1908 def debugwireargs(self, one, two, three=None, four=None, five=None):
1923 def debugwireargs(self, one, two, three=None, four=None, five=None):
1909 '''used to test argument passing over the wire'''
1924 '''used to test argument passing over the wire'''
1910 return "%s %s %s %s %s" % (one, two, three, four, five)
1925 return "%s %s %s %s %s" % (one, two, three, four, five)
1911
1926
1912 def savecommitmessage(self, text):
1927 def savecommitmessage(self, text):
1913 fp = self.vfs('last-message.txt', 'wb')
1928 fp = self.vfs('last-message.txt', 'wb')
1914 try:
1929 try:
1915 fp.write(text)
1930 fp.write(text)
1916 finally:
1931 finally:
1917 fp.close()
1932 fp.close()
1918 return self.pathto(fp.name[len(self.root) + 1:])
1933 return self.pathto(fp.name[len(self.root) + 1:])
1919
1934
1920 # used to avoid circular references so destructors work
1935 # used to avoid circular references so destructors work
1921 def aftertrans(files):
1936 def aftertrans(files):
1922 renamefiles = [tuple(t) for t in files]
1937 renamefiles = [tuple(t) for t in files]
1923 def a():
1938 def a():
1924 for vfs, src, dest in renamefiles:
1939 for vfs, src, dest in renamefiles:
1925 try:
1940 try:
1926 vfs.rename(src, dest)
1941 vfs.rename(src, dest)
1927 except OSError: # journal file does not yet exist
1942 except OSError: # journal file does not yet exist
1928 pass
1943 pass
1929 return a
1944 return a
1930
1945
1931 def undoname(fn):
1946 def undoname(fn):
1932 base, name = os.path.split(fn)
1947 base, name = os.path.split(fn)
1933 assert name.startswith('journal')
1948 assert name.startswith('journal')
1934 return os.path.join(base, name.replace('journal', 'undo', 1))
1949 return os.path.join(base, name.replace('journal', 'undo', 1))
1935
1950
1936 def instance(ui, path, create):
1951 def instance(ui, path, create):
1937 return localrepository(ui, util.urllocalpath(path), create)
1952 return localrepository(ui, util.urllocalpath(path), create)
1938
1953
1939 def islocal(path):
1954 def islocal(path):
1940 return True
1955 return True
@@ -1,598 +1,602 b''
1 Test exchange of common information using bundle2
1 Test exchange of common information using bundle2
2
2
3
3
4 $ getmainid() {
4 $ getmainid() {
5 > hg -R main log --template '{node}\n' --rev "$1"
5 > hg -R main log --template '{node}\n' --rev "$1"
6 > }
6 > }
7
7
8 enable obsolescence
8 enable obsolescence
9
9
10 $ cat > $TESTTMP/bundle2-pushkey-hook.sh << EOF
10 $ cat > $TESTTMP/bundle2-pushkey-hook.sh << EOF
11 > echo pushkey: lock state after \"\$HG_NAMESPACE\"
11 > echo pushkey: lock state after \"\$HG_NAMESPACE\"
12 > hg debuglock
12 > hg debuglock
13 > EOF
13 > EOF
14
14
15 $ cat >> $HGRCPATH << EOF
15 $ cat >> $HGRCPATH << EOF
16 > [experimental]
16 > [experimental]
17 > evolution=createmarkers,exchange
17 > evolution=createmarkers,exchange
18 > bundle2-exp=True
18 > bundle2-exp=True
19 > [ui]
19 > [ui]
20 > ssh=python "$TESTDIR/dummyssh"
20 > ssh=python "$TESTDIR/dummyssh"
21 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
21 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
22 > [web]
22 > [web]
23 > push_ssl = false
23 > push_ssl = false
24 > allow_push = *
24 > allow_push = *
25 > [phases]
25 > [phases]
26 > publish=False
26 > publish=False
27 > [hooks]
27 > [hooks]
28 > pretxnclose.tip = hg log -r tip -T "pre-close-tip:{node|short} {phase} {bookmarks}\n"
28 > pretxnclose.tip = hg log -r tip -T "pre-close-tip:{node|short} {phase} {bookmarks}\n"
29 > txnclose.tip = hg log -r tip -T "postclose-tip:{node|short} {phase} {bookmarks}\n"
29 > txnclose.tip = hg log -r tip -T "postclose-tip:{node|short} {phase} {bookmarks}\n"
30 > txnclose.env = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" txnclose"
30 > txnclose.env = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" txnclose"
31 > pushkey= sh "$TESTTMP/bundle2-pushkey-hook.sh"
31 > pushkey= sh "$TESTTMP/bundle2-pushkey-hook.sh"
32 > EOF
32 > EOF
33
33
34 The extension requires a repo (currently unused)
34 The extension requires a repo (currently unused)
35
35
36 $ hg init main
36 $ hg init main
37 $ cd main
37 $ cd main
38 $ touch a
38 $ touch a
39 $ hg add a
39 $ hg add a
40 $ hg commit -m 'a'
40 $ hg commit -m 'a'
41 pre-close-tip:3903775176ed draft
41 pre-close-tip:3903775176ed draft
42 postclose-tip:3903775176ed draft
42 postclose-tip:3903775176ed draft
43 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
43 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
44
44
45 $ hg unbundle $TESTDIR/bundles/rebase.hg
45 $ hg unbundle $TESTDIR/bundles/rebase.hg
46 adding changesets
46 adding changesets
47 adding manifests
47 adding manifests
48 adding file changes
48 adding file changes
49 added 8 changesets with 7 changes to 7 files (+3 heads)
49 added 8 changesets with 7 changes to 7 files (+3 heads)
50 pre-close-tip:02de42196ebe draft
50 pre-close-tip:02de42196ebe draft
51 postclose-tip:02de42196ebe draft
51 postclose-tip:02de42196ebe draft
52 txnclose hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=unbundle HG_TXNID=TXN:* HG_TXNNAME=unbundle (glob)
52 txnclose hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=unbundle HG_TXNID=TXN:* HG_TXNNAME=unbundle (glob)
53 bundle:*/tests/bundles/rebase.hg HG_URL=bundle:*/tests/bundles/rebase.hg (glob)
53 bundle:*/tests/bundles/rebase.hg HG_URL=bundle:*/tests/bundles/rebase.hg (glob)
54 (run 'hg heads' to see heads, 'hg merge' to merge)
54 (run 'hg heads' to see heads, 'hg merge' to merge)
55
55
56 $ cd ..
56 $ cd ..
57
57
58 Real world exchange
58 Real world exchange
59 =====================
59 =====================
60
60
61 Add more obsolescence information
61 Add more obsolescence information
62
62
63 $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
63 $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
64 pre-close-tip:02de42196ebe draft
64 pre-close-tip:02de42196ebe draft
65 postclose-tip:02de42196ebe draft
65 postclose-tip:02de42196ebe draft
66 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
66 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
67 $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
67 $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
68 pre-close-tip:02de42196ebe draft
68 pre-close-tip:02de42196ebe draft
69 postclose-tip:02de42196ebe draft
69 postclose-tip:02de42196ebe draft
70 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
70 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
71
71
72 clone --pull
72 clone --pull
73
73
74 $ hg -R main phase --public cd010b8cd998
74 $ hg -R main phase --public cd010b8cd998
75 pre-close-tip:000000000000 public
75 pre-close-tip:000000000000 public
76 postclose-tip:02de42196ebe draft
76 postclose-tip:02de42196ebe draft
77 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
77 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
78 $ hg clone main other --pull --rev 9520eea781bc
78 $ hg clone main other --pull --rev 9520eea781bc
79 adding changesets
79 adding changesets
80 adding manifests
80 adding manifests
81 adding file changes
81 adding file changes
82 added 2 changesets with 2 changes to 2 files
82 added 2 changesets with 2 changes to 2 files
83 1 new obsolescence markers
83 1 new obsolescence markers
84 pre-close-tip:9520eea781bc draft
84 pre-close-tip:9520eea781bc draft
85 postclose-tip:9520eea781bc draft
85 postclose-tip:9520eea781bc draft
86 txnclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
86 txnclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
87 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
87 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
88 updating to branch default
88 updating to branch default
89 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
90 $ hg -R other log -G
90 $ hg -R other log -G
91 @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
91 @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
92 |
92 |
93 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
93 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
94
94
95 $ hg -R other debugobsolete
95 $ hg -R other debugobsolete
96 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
96 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
97
97
98 pull
98 pull
99
99
100 $ hg -R main phase --public 9520eea781bc
100 $ hg -R main phase --public 9520eea781bc
101 pre-close-tip:000000000000 public
101 pre-close-tip:000000000000 public
102 postclose-tip:02de42196ebe draft
102 postclose-tip:02de42196ebe draft
103 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
103 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
104 $ hg -R other pull -r 24b6387c8c8c
104 $ hg -R other pull -r 24b6387c8c8c
105 pulling from $TESTTMP/main (glob)
105 pulling from $TESTTMP/main (glob)
106 searching for changes
106 searching for changes
107 adding changesets
107 adding changesets
108 adding manifests
108 adding manifests
109 adding file changes
109 adding file changes
110 added 1 changesets with 1 changes to 1 files (+1 heads)
110 added 1 changesets with 1 changes to 1 files (+1 heads)
111 1 new obsolescence markers
111 1 new obsolescence markers
112 pre-close-tip:24b6387c8c8c draft
112 pre-close-tip:24b6387c8c8c draft
113 postclose-tip:24b6387c8c8c draft
113 postclose-tip:24b6387c8c8c draft
114 txnclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
114 txnclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
115 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
115 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
116 (run 'hg heads' to see heads, 'hg merge' to merge)
116 (run 'hg heads' to see heads, 'hg merge' to merge)
117 $ hg -R other log -G
117 $ hg -R other log -G
118 o 2:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
118 o 2:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
119 |
119 |
120 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
120 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
121 |/
121 |/
122 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
122 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
123
123
124 $ hg -R other debugobsolete
124 $ hg -R other debugobsolete
125 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
125 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
126 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
126 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
127
127
128 pull empty (with phase movement)
128 pull empty (with phase movement)
129
129
130 $ hg -R main phase --public 24b6387c8c8c
130 $ hg -R main phase --public 24b6387c8c8c
131 pre-close-tip:000000000000 public
131 pre-close-tip:000000000000 public
132 postclose-tip:02de42196ebe draft
132 postclose-tip:02de42196ebe draft
133 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
133 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
134 $ hg -R other pull -r 24b6387c8c8c
134 $ hg -R other pull -r 24b6387c8c8c
135 pulling from $TESTTMP/main (glob)
135 pulling from $TESTTMP/main (glob)
136 no changes found
136 no changes found
137 pre-close-tip:000000000000 public
137 pre-close-tip:000000000000 public
138 postclose-tip:24b6387c8c8c public
138 postclose-tip:24b6387c8c8c public
139 txnclose hook: HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
139 txnclose hook: HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
140 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
140 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
141 $ hg -R other log -G
141 $ hg -R other log -G
142 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
142 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
143 |
143 |
144 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
144 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
145 |/
145 |/
146 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
146 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
147
147
148 $ hg -R other debugobsolete
148 $ hg -R other debugobsolete
149 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
149 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
150 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
150 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
151
151
152 pull empty
152 pull empty
153
153
154 $ hg -R other pull -r 24b6387c8c8c
154 $ hg -R other pull -r 24b6387c8c8c
155 pulling from $TESTTMP/main (glob)
155 pulling from $TESTTMP/main (glob)
156 no changes found
156 no changes found
157 pre-close-tip:24b6387c8c8c public
157 pre-close-tip:24b6387c8c8c public
158 postclose-tip:24b6387c8c8c public
158 postclose-tip:24b6387c8c8c public
159 txnclose hook: HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
159 txnclose hook: HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
160 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
160 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
161 $ hg -R other log -G
161 $ hg -R other log -G
162 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
162 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
163 |
163 |
164 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
164 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
165 |/
165 |/
166 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
166 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
167
167
168 $ hg -R other debugobsolete
168 $ hg -R other debugobsolete
169 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
169 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
170 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
170 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
171
171
172 add extra data to test their exchange during push
172 add extra data to test their exchange during push
173
173
174 $ hg -R main bookmark --rev eea13746799a book_eea1
174 $ hg -R main bookmark --rev eea13746799a book_eea1
175 $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
175 $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
176 pre-close-tip:02de42196ebe draft
176 pre-close-tip:02de42196ebe draft
177 postclose-tip:02de42196ebe draft
177 postclose-tip:02de42196ebe draft
178 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
178 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
179 $ hg -R main bookmark --rev 02de42196ebe book_02de
179 $ hg -R main bookmark --rev 02de42196ebe book_02de
180 $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
180 $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
181 pre-close-tip:02de42196ebe draft book_02de
181 pre-close-tip:02de42196ebe draft book_02de
182 postclose-tip:02de42196ebe draft book_02de
182 postclose-tip:02de42196ebe draft book_02de
183 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
183 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
184 $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
184 $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
185 $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
185 $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
186 pre-close-tip:02de42196ebe draft book_02de
186 pre-close-tip:02de42196ebe draft book_02de
187 postclose-tip:02de42196ebe draft book_02de
187 postclose-tip:02de42196ebe draft book_02de
188 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
188 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
189 $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
189 $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
190 $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
190 $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
191 pre-close-tip:02de42196ebe draft book_02de
191 pre-close-tip:02de42196ebe draft book_02de
192 postclose-tip:02de42196ebe draft book_02de
192 postclose-tip:02de42196ebe draft book_02de
193 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
193 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
194 $ hg -R main bookmark --rev 32af7686d403 book_32af
194 $ hg -R main bookmark --rev 32af7686d403 book_32af
195 $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
195 $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
196 pre-close-tip:02de42196ebe draft book_02de
196 pre-close-tip:02de42196ebe draft book_02de
197 postclose-tip:02de42196ebe draft book_02de
197 postclose-tip:02de42196ebe draft book_02de
198 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
198 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
199
199
200 $ hg -R other bookmark --rev cd010b8cd998 book_eea1
200 $ hg -R other bookmark --rev cd010b8cd998 book_eea1
201 $ hg -R other bookmark --rev cd010b8cd998 book_02de
201 $ hg -R other bookmark --rev cd010b8cd998 book_02de
202 $ hg -R other bookmark --rev cd010b8cd998 book_42cc
202 $ hg -R other bookmark --rev cd010b8cd998 book_42cc
203 $ hg -R other bookmark --rev cd010b8cd998 book_5fdd
203 $ hg -R other bookmark --rev cd010b8cd998 book_5fdd
204 $ hg -R other bookmark --rev cd010b8cd998 book_32af
204 $ hg -R other bookmark --rev cd010b8cd998 book_32af
205
205
206 $ hg -R main phase --public eea13746799a
206 $ hg -R main phase --public eea13746799a
207 pre-close-tip:000000000000 public
207 pre-close-tip:000000000000 public
208 postclose-tip:02de42196ebe draft book_02de
208 postclose-tip:02de42196ebe draft book_02de
209 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
209 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
210
210
211 push
211 push
212 $ hg -R main push other --rev eea13746799a --bookmark book_eea1
212 $ hg -R main push other --rev eea13746799a --bookmark book_eea1
213 pushing to other
213 pushing to other
214 searching for changes
214 searching for changes
215 pre-close-tip:eea13746799a public book_eea1
215 pre-close-tip:eea13746799a public book_eea1
216 pushkey: lock state after "phases"
216 pushkey: lock state after "phases"
217 lock: free
217 lock: free
218 wlock: free
218 wlock: free
219 pushkey: lock state after "bookmarks"
219 pushkey: lock state after "bookmarks"
220 lock: free
220 lock: free
221 wlock: free
221 wlock: free
222 postclose-tip:eea13746799a public book_eea1
222 postclose-tip:eea13746799a public book_eea1
223 txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_TXNID=TXN:* HG_TXNNAME=push HG_URL=push (glob)
223 txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_TXNID=TXN:* HG_TXNNAME=push HG_URL=push (glob)
224 remote: adding changesets
224 remote: adding changesets
225 remote: adding manifests
225 remote: adding manifests
226 remote: adding file changes
226 remote: adding file changes
227 remote: added 1 changesets with 0 changes to 0 files (-1 heads)
227 remote: added 1 changesets with 0 changes to 0 files (-1 heads)
228 remote: 1 new obsolescence markers
228 remote: 1 new obsolescence markers
229 updating bookmark book_eea1
229 updating bookmark book_eea1
230 pre-close-tip:02de42196ebe draft book_02de
230 pre-close-tip:02de42196ebe draft book_02de
231 postclose-tip:02de42196ebe draft book_02de
231 postclose-tip:02de42196ebe draft book_02de
232 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
232 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
233 file:/*/$TESTTMP/other HG_URL=file:$TESTTMP/other (glob)
233 file:/*/$TESTTMP/other HG_URL=file:$TESTTMP/other (glob)
234 $ hg -R other log -G
234 $ hg -R other log -G
235 o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
235 o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
236 |\
236 |\
237 | o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
237 | o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
238 | |
238 | |
239 @ | 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
239 @ | 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
240 |/
240 |/
241 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de book_32af book_42cc book_5fdd A
241 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de book_32af book_42cc book_5fdd A
242
242
243 $ hg -R other debugobsolete
243 $ hg -R other debugobsolete
244 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
244 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
245 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
245 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
246 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
246 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
247
247
248 pull over ssh
248 pull over ssh
249
249
250 $ hg -R other pull ssh://user@dummy/main -r 02de42196ebe --bookmark book_02de
250 $ hg -R other pull ssh://user@dummy/main -r 02de42196ebe --bookmark book_02de
251 pulling from ssh://user@dummy/main
251 pulling from ssh://user@dummy/main
252 searching for changes
252 searching for changes
253 adding changesets
253 adding changesets
254 adding manifests
254 adding manifests
255 adding file changes
255 adding file changes
256 added 1 changesets with 1 changes to 1 files (+1 heads)
256 added 1 changesets with 1 changes to 1 files (+1 heads)
257 1 new obsolescence markers
257 1 new obsolescence markers
258 updating bookmark book_02de
258 updating bookmark book_02de
259 pre-close-tip:02de42196ebe draft book_02de
259 pre-close-tip:02de42196ebe draft book_02de
260 postclose-tip:02de42196ebe draft book_02de
260 postclose-tip:02de42196ebe draft book_02de
261 txnclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
261 txnclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
262 ssh://user@dummy/main HG_URL=ssh://user@dummy/main
262 ssh://user@dummy/main HG_URL=ssh://user@dummy/main
263 (run 'hg heads' to see heads, 'hg merge' to merge)
263 (run 'hg heads' to see heads, 'hg merge' to merge)
264 $ hg -R other debugobsolete
264 $ hg -R other debugobsolete
265 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
265 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
266 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
266 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
267 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
267 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
268 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
268 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
269
269
270 pull over http
270 pull over http
271
271
272 $ hg -R main serve -p $HGPORT -d --pid-file=main.pid -E main-error.log
272 $ hg -R main serve -p $HGPORT -d --pid-file=main.pid -E main-error.log
273 $ cat main.pid >> $DAEMON_PIDS
273 $ cat main.pid >> $DAEMON_PIDS
274
274
275 $ hg -R other pull http://localhost:$HGPORT/ -r 42ccdea3bb16 --bookmark book_42cc
275 $ hg -R other pull http://localhost:$HGPORT/ -r 42ccdea3bb16 --bookmark book_42cc
276 pulling from http://localhost:$HGPORT/
276 pulling from http://localhost:$HGPORT/
277 searching for changes
277 searching for changes
278 adding changesets
278 adding changesets
279 adding manifests
279 adding manifests
280 adding file changes
280 adding file changes
281 added 1 changesets with 1 changes to 1 files (+1 heads)
281 added 1 changesets with 1 changes to 1 files (+1 heads)
282 1 new obsolescence markers
282 1 new obsolescence markers
283 updating bookmark book_42cc
283 updating bookmark book_42cc
284 pre-close-tip:42ccdea3bb16 draft book_42cc
284 pre-close-tip:42ccdea3bb16 draft book_42cc
285 postclose-tip:42ccdea3bb16 draft book_42cc
285 postclose-tip:42ccdea3bb16 draft book_42cc
286 txnclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
286 txnclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
287 http://localhost:$HGPORT/ HG_URL=http://localhost:$HGPORT/
287 http://localhost:$HGPORT/ HG_URL=http://localhost:$HGPORT/
288 (run 'hg heads .' to see heads, 'hg merge' to merge)
288 (run 'hg heads .' to see heads, 'hg merge' to merge)
289 $ cat main-error.log
289 $ cat main-error.log
290 $ hg -R other debugobsolete
290 $ hg -R other debugobsolete
291 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
291 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
292 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
292 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
293 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
293 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
294 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
294 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
295 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
295 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
296
296
297 push over ssh
297 push over ssh
298
298
299 $ hg -R main push ssh://user@dummy/other -r 5fddd98957c8 --bookmark book_5fdd
299 $ hg -R main push ssh://user@dummy/other -r 5fddd98957c8 --bookmark book_5fdd
300 pushing to ssh://user@dummy/other
300 pushing to ssh://user@dummy/other
301 searching for changes
301 searching for changes
302 remote: adding changesets
302 remote: adding changesets
303 remote: adding manifests
303 remote: adding manifests
304 remote: adding file changes
304 remote: adding file changes
305 remote: added 1 changesets with 1 changes to 1 files
305 remote: added 1 changesets with 1 changes to 1 files
306 remote: 1 new obsolescence markers
306 remote: 1 new obsolescence markers
307 updating bookmark book_5fdd
307 updating bookmark book_5fdd
308 remote: pre-close-tip:5fddd98957c8 draft book_5fdd
308 remote: pre-close-tip:5fddd98957c8 draft book_5fdd
309 remote: pushkey: lock state after "bookmarks"
309 remote: pushkey: lock state after "bookmarks"
310 remote: lock: free
310 remote: lock: free
311 remote: wlock: free
311 remote: wlock: free
312 remote: postclose-tip:5fddd98957c8 draft book_5fdd
312 remote: postclose-tip:5fddd98957c8 draft book_5fdd
313 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:ssh:127.0.0.1 (glob)
313 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:ssh:127.0.0.1 (glob)
314 pre-close-tip:02de42196ebe draft book_02de
314 pre-close-tip:02de42196ebe draft book_02de
315 postclose-tip:02de42196ebe draft book_02de
315 postclose-tip:02de42196ebe draft book_02de
316 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
316 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
317 ssh://user@dummy/other HG_URL=ssh://user@dummy/other
317 ssh://user@dummy/other HG_URL=ssh://user@dummy/other
318 $ hg -R other log -G
318 $ hg -R other log -G
319 o 6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
319 o 6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
320 |
320 |
321 o 5:42ccdea3bb16 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
321 o 5:42ccdea3bb16 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
322 |
322 |
323 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
323 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
324 | |
324 | |
325 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
325 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
326 | |/|
326 | |/|
327 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
327 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
328 |/ /
328 |/ /
329 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
329 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
330 |/
330 |/
331 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af A
331 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af A
332
332
333 $ hg -R other debugobsolete
333 $ hg -R other debugobsolete
334 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
334 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
335 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
335 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
336 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
336 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
337 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
337 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
338 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
338 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
339 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
339 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
340
340
341 push over http
341 push over http
342
342
343 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
343 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
344 $ cat other.pid >> $DAEMON_PIDS
344 $ cat other.pid >> $DAEMON_PIDS
345
345
346 $ hg -R main phase --public 32af7686d403
346 $ hg -R main phase --public 32af7686d403
347 pre-close-tip:000000000000 public
347 pre-close-tip:000000000000 public
348 postclose-tip:02de42196ebe draft book_02de
348 postclose-tip:02de42196ebe draft book_02de
349 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
349 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
350 $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403 --bookmark book_32af
350 $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403 --bookmark book_32af
351 pushing to http://localhost:$HGPORT2/
351 pushing to http://localhost:$HGPORT2/
352 searching for changes
352 searching for changes
353 remote: adding changesets
353 remote: adding changesets
354 remote: adding manifests
354 remote: adding manifests
355 remote: adding file changes
355 remote: adding file changes
356 remote: added 1 changesets with 1 changes to 1 files
356 remote: added 1 changesets with 1 changes to 1 files
357 remote: 1 new obsolescence markers
357 remote: 1 new obsolescence markers
358 updating bookmark book_32af
358 updating bookmark book_32af
359 pre-close-tip:02de42196ebe draft book_02de
359 pre-close-tip:02de42196ebe draft book_02de
360 postclose-tip:02de42196ebe draft book_02de
360 postclose-tip:02de42196ebe draft book_02de
361 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
361 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
362 http://localhost:$HGPORT2/ HG_URL=http://localhost:$HGPORT2/
362 http://localhost:$HGPORT2/ HG_URL=http://localhost:$HGPORT2/
363 $ cat other-error.log
363 $ cat other-error.log
364
364
365 Check final content.
365 Check final content.
366
366
367 $ hg -R other log -G
367 $ hg -R other log -G
368 o 7:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af D
368 o 7:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af D
369 |
369 |
370 o 6:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
370 o 6:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
371 |
371 |
372 o 5:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
372 o 5:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
373 |
373 |
374 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
374 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
375 | |
375 | |
376 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
376 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
377 | |/|
377 | |/|
378 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
378 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
379 |/ /
379 |/ /
380 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
380 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
381 |/
381 |/
382 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
382 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
383
383
384 $ hg -R other debugobsolete
384 $ hg -R other debugobsolete
385 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
385 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
386 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
386 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
387 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
387 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
388 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
388 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
389 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
389 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
390 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
390 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
391 7777777777777777777777777777777777777777 32af7686d403cf45b5d95f2d70cebea587ac806a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
391 7777777777777777777777777777777777777777 32af7686d403cf45b5d95f2d70cebea587ac806a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
392
392
393 (check that no 'pending' files remain)
393 (check that no 'pending' files remain)
394
394
395 $ ls -1 other/.hg/bookmarks*
395 $ ls -1 other/.hg/bookmarks*
396 other/.hg/bookmarks
396 other/.hg/bookmarks
397 $ ls -1 other/.hg/store/phaseroots*
397 $ ls -1 other/.hg/store/phaseroots*
398 other/.hg/store/phaseroots
398 other/.hg/store/phaseroots
399 $ ls -1 other/.hg/store/00changelog.i*
399 $ ls -1 other/.hg/store/00changelog.i*
400 other/.hg/store/00changelog.i
400 other/.hg/store/00changelog.i
401
401
402 Error Handling
402 Error Handling
403 ==============
403 ==============
404
404
405 Check that errors are properly returned to the client during push.
405 Check that errors are properly returned to the client during push.
406
406
407 Setting up
407 Setting up
408
408
409 $ cat > failpush.py << EOF
409 $ cat > failpush.py << EOF
410 > """A small extension that makes push fails when using bundle2
410 > """A small extension that makes push fails when using bundle2
411 >
411 >
412 > used to test error handling in bundle2
412 > used to test error handling in bundle2
413 > """
413 > """
414 >
414 >
415 > from mercurial import util
415 > from mercurial import util
416 > from mercurial import bundle2
416 > from mercurial import bundle2
417 > from mercurial import exchange
417 > from mercurial import exchange
418 > from mercurial import extensions
418 > from mercurial import extensions
419 >
419 >
420 > def _pushbundle2failpart(pushop, bundler):
420 > def _pushbundle2failpart(pushop, bundler):
421 > reason = pushop.ui.config('failpush', 'reason', None)
421 > reason = pushop.ui.config('failpush', 'reason', None)
422 > part = None
422 > part = None
423 > if reason == 'abort':
423 > if reason == 'abort':
424 > bundler.newpart('test:abort')
424 > bundler.newpart('test:abort')
425 > if reason == 'unknown':
425 > if reason == 'unknown':
426 > bundler.newpart('test:unknown')
426 > bundler.newpart('test:unknown')
427 > if reason == 'race':
427 > if reason == 'race':
428 > # 20 Bytes of crap
428 > # 20 Bytes of crap
429 > bundler.newpart('check:heads', data='01234567890123456789')
429 > bundler.newpart('check:heads', data='01234567890123456789')
430 >
430 >
431 > @bundle2.parthandler("test:abort")
431 > @bundle2.parthandler("test:abort")
432 > def handleabort(op, part):
432 > def handleabort(op, part):
433 > raise util.Abort('Abandon ship!', hint="don't panic")
433 > raise util.Abort('Abandon ship!', hint="don't panic")
434 >
434 >
435 > def uisetup(ui):
435 > def uisetup(ui):
436 > exchange.b2partsgenmapping['failpart'] = _pushbundle2failpart
436 > exchange.b2partsgenmapping['failpart'] = _pushbundle2failpart
437 > exchange.b2partsgenorder.insert(0, 'failpart')
437 > exchange.b2partsgenorder.insert(0, 'failpart')
438 >
438 >
439 > EOF
439 > EOF
440
440
441 $ cd main
441 $ cd main
442 $ hg up tip
442 $ hg up tip
443 3 files updated, 0 files merged, 1 files removed, 0 files unresolved
443 3 files updated, 0 files merged, 1 files removed, 0 files unresolved
444 $ echo 'I' > I
444 $ echo 'I' > I
445 $ hg add I
445 $ hg add I
446 $ hg ci -m 'I'
446 $ hg ci -m 'I'
447 pre-close-tip:e7ec4e813ba6 draft
447 pre-close-tip:e7ec4e813ba6 draft
448 postclose-tip:e7ec4e813ba6 draft
448 postclose-tip:e7ec4e813ba6 draft
449 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
449 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
450 $ hg id
450 $ hg id
451 e7ec4e813ba6 tip
451 e7ec4e813ba6 tip
452 $ cd ..
452 $ cd ..
453
453
454 $ cat << EOF >> $HGRCPATH
454 $ cat << EOF >> $HGRCPATH
455 > [extensions]
455 > [extensions]
456 > failpush=$TESTTMP/failpush.py
456 > failpush=$TESTTMP/failpush.py
457 > EOF
457 > EOF
458
458
459 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
459 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
460 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
460 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
461 $ cat other.pid >> $DAEMON_PIDS
461 $ cat other.pid >> $DAEMON_PIDS
462
462
463 Doing the actual push: Abort error
463 Doing the actual push: Abort error
464
464
465 $ cat << EOF >> $HGRCPATH
465 $ cat << EOF >> $HGRCPATH
466 > [failpush]
466 > [failpush]
467 > reason = abort
467 > reason = abort
468 > EOF
468 > EOF
469
469
470 $ hg -R main push other -r e7ec4e813ba6
470 $ hg -R main push other -r e7ec4e813ba6
471 pushing to other
471 pushing to other
472 searching for changes
472 searching for changes
473 abort: Abandon ship!
473 abort: Abandon ship!
474 (don't panic)
474 (don't panic)
475 [255]
475 [255]
476
476
477 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
477 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
478 pushing to ssh://user@dummy/other
478 pushing to ssh://user@dummy/other
479 searching for changes
479 searching for changes
480 abort: Abandon ship!
480 abort: Abandon ship!
481 (don't panic)
481 (don't panic)
482 [255]
482 [255]
483
483
484 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
484 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
485 pushing to http://localhost:$HGPORT2/
485 pushing to http://localhost:$HGPORT2/
486 searching for changes
486 searching for changes
487 abort: Abandon ship!
487 abort: Abandon ship!
488 (don't panic)
488 (don't panic)
489 [255]
489 [255]
490
490
491
491
492 Doing the actual push: unknown mandatory parts
492 Doing the actual push: unknown mandatory parts
493
493
494 $ cat << EOF >> $HGRCPATH
494 $ cat << EOF >> $HGRCPATH
495 > [failpush]
495 > [failpush]
496 > reason = unknown
496 > reason = unknown
497 > EOF
497 > EOF
498
498
499 $ hg -R main push other -r e7ec4e813ba6
499 $ hg -R main push other -r e7ec4e813ba6
500 pushing to other
500 pushing to other
501 searching for changes
501 searching for changes
502 abort: missing support for test:unknown
502 abort: missing support for test:unknown
503 [255]
503 [255]
504
504
505 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
505 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
506 pushing to ssh://user@dummy/other
506 pushing to ssh://user@dummy/other
507 searching for changes
507 searching for changes
508 abort: missing support for test:unknown
508 abort: missing support for test:unknown
509 [255]
509 [255]
510
510
511 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
511 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
512 pushing to http://localhost:$HGPORT2/
512 pushing to http://localhost:$HGPORT2/
513 searching for changes
513 searching for changes
514 abort: missing support for test:unknown
514 abort: missing support for test:unknown
515 [255]
515 [255]
516
516
517 Doing the actual push: race
517 Doing the actual push: race
518
518
519 $ cat << EOF >> $HGRCPATH
519 $ cat << EOF >> $HGRCPATH
520 > [failpush]
520 > [failpush]
521 > reason = race
521 > reason = race
522 > EOF
522 > EOF
523
523
524 $ hg -R main push other -r e7ec4e813ba6
524 $ hg -R main push other -r e7ec4e813ba6
525 pushing to other
525 pushing to other
526 searching for changes
526 searching for changes
527 abort: push failed:
527 abort: push failed:
528 'repository changed while pushing - please try again'
528 'repository changed while pushing - please try again'
529 [255]
529 [255]
530
530
531 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
531 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
532 pushing to ssh://user@dummy/other
532 pushing to ssh://user@dummy/other
533 searching for changes
533 searching for changes
534 abort: push failed:
534 abort: push failed:
535 'repository changed while pushing - please try again'
535 'repository changed while pushing - please try again'
536 [255]
536 [255]
537
537
538 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
538 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
539 pushing to http://localhost:$HGPORT2/
539 pushing to http://localhost:$HGPORT2/
540 searching for changes
540 searching for changes
541 abort: push failed:
541 abort: push failed:
542 'repository changed while pushing - please try again'
542 'repository changed while pushing - please try again'
543 [255]
543 [255]
544
544
545 Doing the actual push: hook abort
545 Doing the actual push: hook abort
546
546
547 $ cat << EOF >> $HGRCPATH
547 $ cat << EOF >> $HGRCPATH
548 > [failpush]
548 > [failpush]
549 > reason =
549 > reason =
550 > [hooks]
550 > [hooks]
551 > pretxnclose.failpush = false
551 > pretxnclose.failpush = false
552 > EOF
552 > EOF
553
553
554 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
554 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
555 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
555 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
556 $ cat other.pid >> $DAEMON_PIDS
556 $ cat other.pid >> $DAEMON_PIDS
557
557
558 $ hg -R main push other -r e7ec4e813ba6
558 $ hg -R main push other -r e7ec4e813ba6
559 pushing to other
559 pushing to other
560 searching for changes
560 searching for changes
561 pre-close-tip:e7ec4e813ba6 draft
561 pre-close-tip:e7ec4e813ba6 draft
562 transaction abort!
562 transaction abort!
563 rollback completed
563 rollback completed
564 remote: adding changesets
565 remote: adding manifests
566 remote: adding file changes
567 remote: added 1 changesets with 1 changes to 1 files
564 abort: pretxnclose.failpush hook exited with status 1
568 abort: pretxnclose.failpush hook exited with status 1
565 [255]
569 [255]
566
570
567 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
571 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
568 pushing to ssh://user@dummy/other
572 pushing to ssh://user@dummy/other
569 searching for changes
573 searching for changes
570 remote: adding changesets
574 remote: adding changesets
571 remote: adding manifests
575 remote: adding manifests
572 remote: adding file changes
576 remote: adding file changes
573 remote: added 1 changesets with 1 changes to 1 files
577 remote: added 1 changesets with 1 changes to 1 files
574 abort: pretxnclose.failpush hook exited with status 1
578 abort: pretxnclose.failpush hook exited with status 1
575 remote: pre-close-tip:e7ec4e813ba6 draft
579 remote: pre-close-tip:e7ec4e813ba6 draft
576 remote: transaction abort!
580 remote: transaction abort!
577 remote: rollback completed
581 remote: rollback completed
578 [255]
582 [255]
579
583
580 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
584 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
581 pushing to http://localhost:$HGPORT2/
585 pushing to http://localhost:$HGPORT2/
582 searching for changes
586 searching for changes
583 remote: adding changesets
587 remote: adding changesets
584 remote: adding manifests
588 remote: adding manifests
585 remote: adding file changes
589 remote: adding file changes
586 remote: added 1 changesets with 1 changes to 1 files
590 remote: added 1 changesets with 1 changes to 1 files
587 abort: pretxnclose.failpush hook exited with status 1
591 abort: pretxnclose.failpush hook exited with status 1
588 [255]
592 [255]
589
593
590 (check that no 'pending' files remain)
594 (check that no 'pending' files remain)
591
595
592 $ ls -1 other/.hg/bookmarks*
596 $ ls -1 other/.hg/bookmarks*
593 other/.hg/bookmarks
597 other/.hg/bookmarks
594 $ ls -1 other/.hg/store/phaseroots*
598 $ ls -1 other/.hg/store/phaseroots*
595 other/.hg/store/phaseroots
599 other/.hg/store/phaseroots
596 $ ls -1 other/.hg/store/00changelog.i*
600 $ ls -1 other/.hg/store/00changelog.i*
597 other/.hg/store/00changelog.i
601 other/.hg/store/00changelog.i
598
602
General Comments 0
You need to be logged in to leave comments. Login now