##// END OF EJS Templates
bundlerepo: fix raw handling in revision()...
Jun Wu -
r31836:4598e8f4 default
parent child Browse files
Show More
@@ -1,557 +1,557 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18 import tempfile
18 import tempfile
19
19
20 from .i18n import _
20 from .i18n import _
21 from .node import nullid
21 from .node import nullid
22
22
23 from . import (
23 from . import (
24 bundle2,
24 bundle2,
25 changegroup,
25 changegroup,
26 changelog,
26 changelog,
27 cmdutil,
27 cmdutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 filelog,
31 filelog,
32 localrepo,
32 localrepo,
33 manifest,
33 manifest,
34 mdiff,
34 mdiff,
35 node as nodemod,
35 node as nodemod,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 revlog,
39 revlog,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43
43
44 class bundlerevlog(revlog.revlog):
44 class bundlerevlog(revlog.revlog):
45 def __init__(self, opener, indexfile, bundle, linkmapper):
45 def __init__(self, opener, indexfile, bundle, linkmapper):
46 # How it works:
46 # How it works:
47 # To retrieve a revision, we need to know the offset of the revision in
47 # To retrieve a revision, we need to know the offset of the revision in
48 # the bundle (an unbundle object). We store this offset in the index
48 # the bundle (an unbundle object). We store this offset in the index
49 # (start). The base of the delta is stored in the base field.
49 # (start). The base of the delta is stored in the base field.
50 #
50 #
51 # To differentiate a rev in the bundle from a rev in the revlog, we
51 # To differentiate a rev in the bundle from a rev in the revlog, we
52 # check revision against repotiprev.
52 # check revision against repotiprev.
53 opener = vfsmod.readonlyvfs(opener)
53 opener = vfsmod.readonlyvfs(opener)
54 revlog.revlog.__init__(self, opener, indexfile)
54 revlog.revlog.__init__(self, opener, indexfile)
55 self.bundle = bundle
55 self.bundle = bundle
56 n = len(self)
56 n = len(self)
57 self.repotiprev = n - 1
57 self.repotiprev = n - 1
58 chain = None
58 chain = None
59 self.bundlerevs = set() # used by 'bundle()' revset expression
59 self.bundlerevs = set() # used by 'bundle()' revset expression
60 getchunk = lambda: bundle.deltachunk(chain)
60 getchunk = lambda: bundle.deltachunk(chain)
61 for chunkdata in iter(getchunk, {}):
61 for chunkdata in iter(getchunk, {}):
62 node = chunkdata['node']
62 node = chunkdata['node']
63 p1 = chunkdata['p1']
63 p1 = chunkdata['p1']
64 p2 = chunkdata['p2']
64 p2 = chunkdata['p2']
65 cs = chunkdata['cs']
65 cs = chunkdata['cs']
66 deltabase = chunkdata['deltabase']
66 deltabase = chunkdata['deltabase']
67 delta = chunkdata['delta']
67 delta = chunkdata['delta']
68 flags = chunkdata['flags']
68 flags = chunkdata['flags']
69
69
70 size = len(delta)
70 size = len(delta)
71 start = bundle.tell() - size
71 start = bundle.tell() - size
72
72
73 link = linkmapper(cs)
73 link = linkmapper(cs)
74 if node in self.nodemap:
74 if node in self.nodemap:
75 # this can happen if two branches make the same change
75 # this can happen if two branches make the same change
76 chain = node
76 chain = node
77 self.bundlerevs.add(self.nodemap[node])
77 self.bundlerevs.add(self.nodemap[node])
78 continue
78 continue
79
79
80 for p in (p1, p2):
80 for p in (p1, p2):
81 if p not in self.nodemap:
81 if p not in self.nodemap:
82 raise error.LookupError(p, self.indexfile,
82 raise error.LookupError(p, self.indexfile,
83 _("unknown parent"))
83 _("unknown parent"))
84
84
85 if deltabase not in self.nodemap:
85 if deltabase not in self.nodemap:
86 raise LookupError(deltabase, self.indexfile,
86 raise LookupError(deltabase, self.indexfile,
87 _('unknown delta base'))
87 _('unknown delta base'))
88
88
89 baserev = self.rev(deltabase)
89 baserev = self.rev(deltabase)
90 # start, size, full unc. size, base (unused), link, p1, p2, node
90 # start, size, full unc. size, base (unused), link, p1, p2, node
91 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
91 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
92 self.rev(p1), self.rev(p2), node)
92 self.rev(p1), self.rev(p2), node)
93 self.index.insert(-1, e)
93 self.index.insert(-1, e)
94 self.nodemap[node] = n
94 self.nodemap[node] = n
95 self.bundlerevs.add(n)
95 self.bundlerevs.add(n)
96 chain = node
96 chain = node
97 n += 1
97 n += 1
98
98
99 def _chunk(self, rev):
99 def _chunk(self, rev):
100 # Warning: in case of bundle, the diff is against what we stored as
100 # Warning: in case of bundle, the diff is against what we stored as
101 # delta base, not against rev - 1
101 # delta base, not against rev - 1
102 # XXX: could use some caching
102 # XXX: could use some caching
103 if rev <= self.repotiprev:
103 if rev <= self.repotiprev:
104 return revlog.revlog._chunk(self, rev)
104 return revlog.revlog._chunk(self, rev)
105 self.bundle.seek(self.start(rev))
105 self.bundle.seek(self.start(rev))
106 return self.bundle.read(self.length(rev))
106 return self.bundle.read(self.length(rev))
107
107
108 def revdiff(self, rev1, rev2):
108 def revdiff(self, rev1, rev2):
109 """return or calculate a delta between two revisions"""
109 """return or calculate a delta between two revisions"""
110 if rev1 > self.repotiprev and rev2 > self.repotiprev:
110 if rev1 > self.repotiprev and rev2 > self.repotiprev:
111 # hot path for bundle
111 # hot path for bundle
112 revb = self.index[rev2][3]
112 revb = self.index[rev2][3]
113 if revb == rev1:
113 if revb == rev1:
114 return self._chunk(rev2)
114 return self._chunk(rev2)
115 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
115 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
116 return revlog.revlog.revdiff(self, rev1, rev2)
116 return revlog.revlog.revdiff(self, rev1, rev2)
117
117
118 return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
118 return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
119
119
120 def revision(self, nodeorrev, raw=False):
120 def revision(self, nodeorrev, raw=False):
121 """return an uncompressed revision of a given node or revision
121 """return an uncompressed revision of a given node or revision
122 number.
122 number.
123 """
123 """
124 if isinstance(nodeorrev, int):
124 if isinstance(nodeorrev, int):
125 rev = nodeorrev
125 rev = nodeorrev
126 node = self.node(rev)
126 node = self.node(rev)
127 else:
127 else:
128 node = nodeorrev
128 node = nodeorrev
129 rev = self.rev(node)
129 rev = self.rev(node)
130
130
131 if node == nullid:
131 if node == nullid:
132 return ""
132 return ""
133
133
134 text = None
134 rawtext = None
135 chain = []
135 chain = []
136 iterrev = rev
136 iterrev = rev
137 # reconstruct the revision if it is from a changegroup
137 # reconstruct the revision if it is from a changegroup
138 while iterrev > self.repotiprev:
138 while iterrev > self.repotiprev:
139 if self._cache and self._cache[1] == iterrev:
139 if self._cache and self._cache[1] == iterrev:
140 text = self._cache[2]
140 rawtext = self._cache[2]
141 break
141 break
142 chain.append(iterrev)
142 chain.append(iterrev)
143 iterrev = self.index[iterrev][3]
143 iterrev = self.index[iterrev][3]
144 if text is None:
144 if rawtext is None:
145 text = self.baserevision(iterrev)
145 rawtext = self.baserevision(iterrev)
146
146
147 while chain:
147 while chain:
148 delta = self._chunk(chain.pop())
148 delta = self._chunk(chain.pop())
149 text = mdiff.patches(text, [delta])
149 rawtext = mdiff.patches(rawtext, [delta])
150
150
151 text, validatehash = self._processflags(text, self.flags(rev),
151 text, validatehash = self._processflags(rawtext, self.flags(rev),
152 'read', raw=raw)
152 'read', raw=raw)
153 if validatehash:
153 if validatehash:
154 self.checkhash(text, node, rev=rev)
154 self.checkhash(text, node, rev=rev)
155 self._cache = (node, rev, text)
155 self._cache = (node, rev, rawtext)
156 return text
156 return text
157
157
158 def baserevision(self, nodeorrev):
158 def baserevision(self, nodeorrev):
159 # Revlog subclasses may override 'revision' method to modify format of
159 # Revlog subclasses may override 'revision' method to modify format of
160 # content retrieved from revlog. To use bundlerevlog with such class one
160 # content retrieved from revlog. To use bundlerevlog with such class one
161 # needs to override 'baserevision' and make more specific call here.
161 # needs to override 'baserevision' and make more specific call here.
162 return revlog.revlog.revision(self, nodeorrev, raw=True)
162 return revlog.revlog.revision(self, nodeorrev, raw=True)
163
163
164 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
164 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
165 raise NotImplementedError
165 raise NotImplementedError
166 def addgroup(self, revs, linkmapper, transaction):
166 def addgroup(self, revs, linkmapper, transaction):
167 raise NotImplementedError
167 raise NotImplementedError
168 def strip(self, rev, minlink):
168 def strip(self, rev, minlink):
169 raise NotImplementedError
169 raise NotImplementedError
170 def checksize(self):
170 def checksize(self):
171 raise NotImplementedError
171 raise NotImplementedError
172
172
173 class bundlechangelog(bundlerevlog, changelog.changelog):
173 class bundlechangelog(bundlerevlog, changelog.changelog):
174 def __init__(self, opener, bundle):
174 def __init__(self, opener, bundle):
175 changelog.changelog.__init__(self, opener)
175 changelog.changelog.__init__(self, opener)
176 linkmapper = lambda x: x
176 linkmapper = lambda x: x
177 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
177 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
178 linkmapper)
178 linkmapper)
179
179
180 def baserevision(self, nodeorrev):
180 def baserevision(self, nodeorrev):
181 # Although changelog doesn't override 'revision' method, some extensions
181 # Although changelog doesn't override 'revision' method, some extensions
182 # may replace this class with another that does. Same story with
182 # may replace this class with another that does. Same story with
183 # manifest and filelog classes.
183 # manifest and filelog classes.
184
184
185 # This bypasses filtering on changelog.node() and rev() because we need
185 # This bypasses filtering on changelog.node() and rev() because we need
186 # revision text of the bundle base even if it is hidden.
186 # revision text of the bundle base even if it is hidden.
187 oldfilter = self.filteredrevs
187 oldfilter = self.filteredrevs
188 try:
188 try:
189 self.filteredrevs = ()
189 self.filteredrevs = ()
190 return changelog.changelog.revision(self, nodeorrev, raw=True)
190 return changelog.changelog.revision(self, nodeorrev, raw=True)
191 finally:
191 finally:
192 self.filteredrevs = oldfilter
192 self.filteredrevs = oldfilter
193
193
194 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
194 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
195 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
195 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
196 manifest.manifestrevlog.__init__(self, opener, dir=dir)
196 manifest.manifestrevlog.__init__(self, opener, dir=dir)
197 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
197 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
198 linkmapper)
198 linkmapper)
199 if dirlogstarts is None:
199 if dirlogstarts is None:
200 dirlogstarts = {}
200 dirlogstarts = {}
201 if self.bundle.version == "03":
201 if self.bundle.version == "03":
202 dirlogstarts = _getfilestarts(self.bundle)
202 dirlogstarts = _getfilestarts(self.bundle)
203 self._dirlogstarts = dirlogstarts
203 self._dirlogstarts = dirlogstarts
204 self._linkmapper = linkmapper
204 self._linkmapper = linkmapper
205
205
206 def baserevision(self, nodeorrev):
206 def baserevision(self, nodeorrev):
207 node = nodeorrev
207 node = nodeorrev
208 if isinstance(node, int):
208 if isinstance(node, int):
209 node = self.node(node)
209 node = self.node(node)
210
210
211 if node in self.fulltextcache:
211 if node in self.fulltextcache:
212 result = '%s' % self.fulltextcache[node]
212 result = '%s' % self.fulltextcache[node]
213 else:
213 else:
214 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
214 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
215 return result
215 return result
216
216
217 def dirlog(self, d):
217 def dirlog(self, d):
218 if d in self._dirlogstarts:
218 if d in self._dirlogstarts:
219 self.bundle.seek(self._dirlogstarts[d])
219 self.bundle.seek(self._dirlogstarts[d])
220 return bundlemanifest(
220 return bundlemanifest(
221 self.opener, self.bundle, self._linkmapper,
221 self.opener, self.bundle, self._linkmapper,
222 self._dirlogstarts, dir=d)
222 self._dirlogstarts, dir=d)
223 return super(bundlemanifest, self).dirlog(d)
223 return super(bundlemanifest, self).dirlog(d)
224
224
225 class bundlefilelog(bundlerevlog, filelog.filelog):
225 class bundlefilelog(bundlerevlog, filelog.filelog):
226 def __init__(self, opener, path, bundle, linkmapper):
226 def __init__(self, opener, path, bundle, linkmapper):
227 filelog.filelog.__init__(self, opener, path)
227 filelog.filelog.__init__(self, opener, path)
228 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
228 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
229 linkmapper)
229 linkmapper)
230
230
231 def baserevision(self, nodeorrev):
231 def baserevision(self, nodeorrev):
232 return filelog.filelog.revision(self, nodeorrev, raw=True)
232 return filelog.filelog.revision(self, nodeorrev, raw=True)
233
233
234 class bundlepeer(localrepo.localpeer):
234 class bundlepeer(localrepo.localpeer):
235 def canpush(self):
235 def canpush(self):
236 return False
236 return False
237
237
238 class bundlephasecache(phases.phasecache):
238 class bundlephasecache(phases.phasecache):
239 def __init__(self, *args, **kwargs):
239 def __init__(self, *args, **kwargs):
240 super(bundlephasecache, self).__init__(*args, **kwargs)
240 super(bundlephasecache, self).__init__(*args, **kwargs)
241 if util.safehasattr(self, 'opener'):
241 if util.safehasattr(self, 'opener'):
242 self.opener = vfsmod.readonlyvfs(self.opener)
242 self.opener = vfsmod.readonlyvfs(self.opener)
243
243
244 def write(self):
244 def write(self):
245 raise NotImplementedError
245 raise NotImplementedError
246
246
247 def _write(self, fp):
247 def _write(self, fp):
248 raise NotImplementedError
248 raise NotImplementedError
249
249
250 def _updateroots(self, phase, newroots, tr):
250 def _updateroots(self, phase, newroots, tr):
251 self.phaseroots[phase] = newroots
251 self.phaseroots[phase] = newroots
252 self.invalidate()
252 self.invalidate()
253 self.dirty = True
253 self.dirty = True
254
254
255 def _getfilestarts(bundle):
255 def _getfilestarts(bundle):
256 bundlefilespos = {}
256 bundlefilespos = {}
257 for chunkdata in iter(bundle.filelogheader, {}):
257 for chunkdata in iter(bundle.filelogheader, {}):
258 fname = chunkdata['filename']
258 fname = chunkdata['filename']
259 bundlefilespos[fname] = bundle.tell()
259 bundlefilespos[fname] = bundle.tell()
260 for chunk in iter(lambda: bundle.deltachunk(None), {}):
260 for chunk in iter(lambda: bundle.deltachunk(None), {}):
261 pass
261 pass
262 return bundlefilespos
262 return bundlefilespos
263
263
264 class bundlerepository(localrepo.localrepository):
264 class bundlerepository(localrepo.localrepository):
265 def __init__(self, ui, path, bundlename):
265 def __init__(self, ui, path, bundlename):
266 def _writetempbundle(read, suffix, header=''):
266 def _writetempbundle(read, suffix, header=''):
267 """Write a temporary file to disk
267 """Write a temporary file to disk
268
268
269 This is closure because we need to make sure this tracked by
269 This is closure because we need to make sure this tracked by
270 self.tempfile for cleanup purposes."""
270 self.tempfile for cleanup purposes."""
271 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
271 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
272 suffix=".hg10un")
272 suffix=".hg10un")
273 self.tempfile = temp
273 self.tempfile = temp
274
274
275 with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
275 with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
276 fptemp.write(header)
276 fptemp.write(header)
277 while True:
277 while True:
278 chunk = read(2**18)
278 chunk = read(2**18)
279 if not chunk:
279 if not chunk:
280 break
280 break
281 fptemp.write(chunk)
281 fptemp.write(chunk)
282
282
283 return self.vfs.open(self.tempfile, mode="rb")
283 return self.vfs.open(self.tempfile, mode="rb")
284 self._tempparent = None
284 self._tempparent = None
285 try:
285 try:
286 localrepo.localrepository.__init__(self, ui, path)
286 localrepo.localrepository.__init__(self, ui, path)
287 except error.RepoError:
287 except error.RepoError:
288 self._tempparent = tempfile.mkdtemp()
288 self._tempparent = tempfile.mkdtemp()
289 localrepo.instance(ui, self._tempparent, 1)
289 localrepo.instance(ui, self._tempparent, 1)
290 localrepo.localrepository.__init__(self, ui, self._tempparent)
290 localrepo.localrepository.__init__(self, ui, self._tempparent)
291 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
291 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
292
292
293 if path:
293 if path:
294 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
294 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
295 else:
295 else:
296 self._url = 'bundle:' + bundlename
296 self._url = 'bundle:' + bundlename
297
297
298 self.tempfile = None
298 self.tempfile = None
299 f = util.posixfile(bundlename, "rb")
299 f = util.posixfile(bundlename, "rb")
300 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
300 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
301
301
302 if isinstance(self.bundle, bundle2.unbundle20):
302 if isinstance(self.bundle, bundle2.unbundle20):
303 cgstream = None
303 cgstream = None
304 for part in self.bundle.iterparts():
304 for part in self.bundle.iterparts():
305 if part.type == 'changegroup':
305 if part.type == 'changegroup':
306 if cgstream is not None:
306 if cgstream is not None:
307 raise NotImplementedError("can't process "
307 raise NotImplementedError("can't process "
308 "multiple changegroups")
308 "multiple changegroups")
309 cgstream = part
309 cgstream = part
310 version = part.params.get('version', '01')
310 version = part.params.get('version', '01')
311 legalcgvers = changegroup.supportedincomingversions(self)
311 legalcgvers = changegroup.supportedincomingversions(self)
312 if version not in legalcgvers:
312 if version not in legalcgvers:
313 msg = _('Unsupported changegroup version: %s')
313 msg = _('Unsupported changegroup version: %s')
314 raise error.Abort(msg % version)
314 raise error.Abort(msg % version)
315 if self.bundle.compressed():
315 if self.bundle.compressed():
316 cgstream = _writetempbundle(part.read,
316 cgstream = _writetempbundle(part.read,
317 ".cg%sun" % version)
317 ".cg%sun" % version)
318
318
319 if cgstream is None:
319 if cgstream is None:
320 raise error.Abort(_('No changegroups found'))
320 raise error.Abort(_('No changegroups found'))
321 cgstream.seek(0)
321 cgstream.seek(0)
322
322
323 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
323 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
324
324
325 elif self.bundle.compressed():
325 elif self.bundle.compressed():
326 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
326 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
327 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
327 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
328 bundlename,
328 bundlename,
329 self.vfs)
329 self.vfs)
330
330
331 # dict with the mapping 'filename' -> position in the bundle
331 # dict with the mapping 'filename' -> position in the bundle
332 self.bundlefilespos = {}
332 self.bundlefilespos = {}
333
333
334 self.firstnewrev = self.changelog.repotiprev + 1
334 self.firstnewrev = self.changelog.repotiprev + 1
335 phases.retractboundary(self, None, phases.draft,
335 phases.retractboundary(self, None, phases.draft,
336 [ctx.node() for ctx in self[self.firstnewrev:]])
336 [ctx.node() for ctx in self[self.firstnewrev:]])
337
337
338 @localrepo.unfilteredpropertycache
338 @localrepo.unfilteredpropertycache
339 def _phasecache(self):
339 def _phasecache(self):
340 return bundlephasecache(self, self._phasedefaults)
340 return bundlephasecache(self, self._phasedefaults)
341
341
342 @localrepo.unfilteredpropertycache
342 @localrepo.unfilteredpropertycache
343 def changelog(self):
343 def changelog(self):
344 # consume the header if it exists
344 # consume the header if it exists
345 self.bundle.changelogheader()
345 self.bundle.changelogheader()
346 c = bundlechangelog(self.svfs, self.bundle)
346 c = bundlechangelog(self.svfs, self.bundle)
347 self.manstart = self.bundle.tell()
347 self.manstart = self.bundle.tell()
348 return c
348 return c
349
349
350 def _constructmanifest(self):
350 def _constructmanifest(self):
351 self.bundle.seek(self.manstart)
351 self.bundle.seek(self.manstart)
352 # consume the header if it exists
352 # consume the header if it exists
353 self.bundle.manifestheader()
353 self.bundle.manifestheader()
354 linkmapper = self.unfiltered().changelog.rev
354 linkmapper = self.unfiltered().changelog.rev
355 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
355 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
356 self.filestart = self.bundle.tell()
356 self.filestart = self.bundle.tell()
357 return m
357 return m
358
358
359 @localrepo.unfilteredpropertycache
359 @localrepo.unfilteredpropertycache
360 def manstart(self):
360 def manstart(self):
361 self.changelog
361 self.changelog
362 return self.manstart
362 return self.manstart
363
363
364 @localrepo.unfilteredpropertycache
364 @localrepo.unfilteredpropertycache
365 def filestart(self):
365 def filestart(self):
366 self.manifestlog
366 self.manifestlog
367 return self.filestart
367 return self.filestart
368
368
369 def url(self):
369 def url(self):
370 return self._url
370 return self._url
371
371
372 def file(self, f):
372 def file(self, f):
373 if not self.bundlefilespos:
373 if not self.bundlefilespos:
374 self.bundle.seek(self.filestart)
374 self.bundle.seek(self.filestart)
375 self.bundlefilespos = _getfilestarts(self.bundle)
375 self.bundlefilespos = _getfilestarts(self.bundle)
376
376
377 if f in self.bundlefilespos:
377 if f in self.bundlefilespos:
378 self.bundle.seek(self.bundlefilespos[f])
378 self.bundle.seek(self.bundlefilespos[f])
379 linkmapper = self.unfiltered().changelog.rev
379 linkmapper = self.unfiltered().changelog.rev
380 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
380 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
381 else:
381 else:
382 return filelog.filelog(self.svfs, f)
382 return filelog.filelog(self.svfs, f)
383
383
384 def close(self):
384 def close(self):
385 """Close assigned bundle file immediately."""
385 """Close assigned bundle file immediately."""
386 self.bundlefile.close()
386 self.bundlefile.close()
387 if self.tempfile is not None:
387 if self.tempfile is not None:
388 self.vfs.unlink(self.tempfile)
388 self.vfs.unlink(self.tempfile)
389 if self._tempparent:
389 if self._tempparent:
390 shutil.rmtree(self._tempparent, True)
390 shutil.rmtree(self._tempparent, True)
391
391
392 def cancopy(self):
392 def cancopy(self):
393 return False
393 return False
394
394
395 def peer(self):
395 def peer(self):
396 return bundlepeer(self)
396 return bundlepeer(self)
397
397
398 def getcwd(self):
398 def getcwd(self):
399 return pycompat.getcwd() # always outside the repo
399 return pycompat.getcwd() # always outside the repo
400
400
401 # Check if parents exist in localrepo before setting
401 # Check if parents exist in localrepo before setting
402 def setparents(self, p1, p2=nullid):
402 def setparents(self, p1, p2=nullid):
403 p1rev = self.changelog.rev(p1)
403 p1rev = self.changelog.rev(p1)
404 p2rev = self.changelog.rev(p2)
404 p2rev = self.changelog.rev(p2)
405 msg = _("setting parent to node %s that only exists in the bundle\n")
405 msg = _("setting parent to node %s that only exists in the bundle\n")
406 if self.changelog.repotiprev < p1rev:
406 if self.changelog.repotiprev < p1rev:
407 self.ui.warn(msg % nodemod.hex(p1))
407 self.ui.warn(msg % nodemod.hex(p1))
408 if self.changelog.repotiprev < p2rev:
408 if self.changelog.repotiprev < p2rev:
409 self.ui.warn(msg % nodemod.hex(p2))
409 self.ui.warn(msg % nodemod.hex(p2))
410 return super(bundlerepository, self).setparents(p1, p2)
410 return super(bundlerepository, self).setparents(p1, p2)
411
411
412 def instance(ui, path, create):
412 def instance(ui, path, create):
413 if create:
413 if create:
414 raise error.Abort(_('cannot create new bundle repository'))
414 raise error.Abort(_('cannot create new bundle repository'))
415 # internal config: bundle.mainreporoot
415 # internal config: bundle.mainreporoot
416 parentpath = ui.config("bundle", "mainreporoot", "")
416 parentpath = ui.config("bundle", "mainreporoot", "")
417 if not parentpath:
417 if not parentpath:
418 # try to find the correct path to the working directory repo
418 # try to find the correct path to the working directory repo
419 parentpath = cmdutil.findrepo(pycompat.getcwd())
419 parentpath = cmdutil.findrepo(pycompat.getcwd())
420 if parentpath is None:
420 if parentpath is None:
421 parentpath = ''
421 parentpath = ''
422 if parentpath:
422 if parentpath:
423 # Try to make the full path relative so we get a nice, short URL.
423 # Try to make the full path relative so we get a nice, short URL.
424 # In particular, we don't want temp dir names in test outputs.
424 # In particular, we don't want temp dir names in test outputs.
425 cwd = pycompat.getcwd()
425 cwd = pycompat.getcwd()
426 if parentpath == cwd:
426 if parentpath == cwd:
427 parentpath = ''
427 parentpath = ''
428 else:
428 else:
429 cwd = pathutil.normasprefix(cwd)
429 cwd = pathutil.normasprefix(cwd)
430 if parentpath.startswith(cwd):
430 if parentpath.startswith(cwd):
431 parentpath = parentpath[len(cwd):]
431 parentpath = parentpath[len(cwd):]
432 u = util.url(path)
432 u = util.url(path)
433 path = u.localpath()
433 path = u.localpath()
434 if u.scheme == 'bundle':
434 if u.scheme == 'bundle':
435 s = path.split("+", 1)
435 s = path.split("+", 1)
436 if len(s) == 1:
436 if len(s) == 1:
437 repopath, bundlename = parentpath, s[0]
437 repopath, bundlename = parentpath, s[0]
438 else:
438 else:
439 repopath, bundlename = s
439 repopath, bundlename = s
440 else:
440 else:
441 repopath, bundlename = parentpath, path
441 repopath, bundlename = parentpath, path
442 return bundlerepository(ui, repopath, bundlename)
442 return bundlerepository(ui, repopath, bundlename)
443
443
444 class bundletransactionmanager(object):
444 class bundletransactionmanager(object):
445 def transaction(self):
445 def transaction(self):
446 return None
446 return None
447
447
448 def close(self):
448 def close(self):
449 raise NotImplementedError
449 raise NotImplementedError
450
450
451 def release(self):
451 def release(self):
452 raise NotImplementedError
452 raise NotImplementedError
453
453
454 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
454 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
455 force=False):
455 force=False):
456 '''obtains a bundle of changes incoming from other
456 '''obtains a bundle of changes incoming from other
457
457
458 "onlyheads" restricts the returned changes to those reachable from the
458 "onlyheads" restricts the returned changes to those reachable from the
459 specified heads.
459 specified heads.
460 "bundlename", if given, stores the bundle to this file path permanently;
460 "bundlename", if given, stores the bundle to this file path permanently;
461 otherwise it's stored to a temp file and gets deleted again when you call
461 otherwise it's stored to a temp file and gets deleted again when you call
462 the returned "cleanupfn".
462 the returned "cleanupfn".
463 "force" indicates whether to proceed on unrelated repos.
463 "force" indicates whether to proceed on unrelated repos.
464
464
465 Returns a tuple (local, csets, cleanupfn):
465 Returns a tuple (local, csets, cleanupfn):
466
466
467 "local" is a local repo from which to obtain the actual incoming
467 "local" is a local repo from which to obtain the actual incoming
468 changesets; it is a bundlerepo for the obtained bundle when the
468 changesets; it is a bundlerepo for the obtained bundle when the
469 original "other" is remote.
469 original "other" is remote.
470 "csets" lists the incoming changeset node ids.
470 "csets" lists the incoming changeset node ids.
471 "cleanupfn" must be called without arguments when you're done processing
471 "cleanupfn" must be called without arguments when you're done processing
472 the changes; it closes both the original "other" and the one returned
472 the changes; it closes both the original "other" and the one returned
473 here.
473 here.
474 '''
474 '''
475 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
475 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
476 force=force)
476 force=force)
477 common, incoming, rheads = tmp
477 common, incoming, rheads = tmp
478 if not incoming:
478 if not incoming:
479 try:
479 try:
480 if bundlename:
480 if bundlename:
481 os.unlink(bundlename)
481 os.unlink(bundlename)
482 except OSError:
482 except OSError:
483 pass
483 pass
484 return repo, [], other.close
484 return repo, [], other.close
485
485
486 commonset = set(common)
486 commonset = set(common)
487 rheads = [x for x in rheads if x not in commonset]
487 rheads = [x for x in rheads if x not in commonset]
488
488
489 bundle = None
489 bundle = None
490 bundlerepo = None
490 bundlerepo = None
491 localrepo = other.local()
491 localrepo = other.local()
492 if bundlename or not localrepo:
492 if bundlename or not localrepo:
493 # create a bundle (uncompressed if other repo is not local)
493 # create a bundle (uncompressed if other repo is not local)
494
494
495 # developer config: devel.legacy.exchange
495 # developer config: devel.legacy.exchange
496 legexc = ui.configlist('devel', 'legacy.exchange')
496 legexc = ui.configlist('devel', 'legacy.exchange')
497 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
497 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
498 canbundle2 = (not forcebundle1
498 canbundle2 = (not forcebundle1
499 and other.capable('getbundle')
499 and other.capable('getbundle')
500 and other.capable('bundle2'))
500 and other.capable('bundle2'))
501 if canbundle2:
501 if canbundle2:
502 kwargs = {}
502 kwargs = {}
503 kwargs['common'] = common
503 kwargs['common'] = common
504 kwargs['heads'] = rheads
504 kwargs['heads'] = rheads
505 kwargs['bundlecaps'] = exchange.caps20to10(repo)
505 kwargs['bundlecaps'] = exchange.caps20to10(repo)
506 kwargs['cg'] = True
506 kwargs['cg'] = True
507 b2 = other.getbundle('incoming', **kwargs)
507 b2 = other.getbundle('incoming', **kwargs)
508 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
508 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
509 bundlename)
509 bundlename)
510 else:
510 else:
511 if other.capable('getbundle'):
511 if other.capable('getbundle'):
512 cg = other.getbundle('incoming', common=common, heads=rheads)
512 cg = other.getbundle('incoming', common=common, heads=rheads)
513 elif onlyheads is None and not other.capable('changegroupsubset'):
513 elif onlyheads is None and not other.capable('changegroupsubset'):
514 # compat with older servers when pulling all remote heads
514 # compat with older servers when pulling all remote heads
515 cg = other.changegroup(incoming, "incoming")
515 cg = other.changegroup(incoming, "incoming")
516 rheads = None
516 rheads = None
517 else:
517 else:
518 cg = other.changegroupsubset(incoming, rheads, 'incoming')
518 cg = other.changegroupsubset(incoming, rheads, 'incoming')
519 if localrepo:
519 if localrepo:
520 bundletype = "HG10BZ"
520 bundletype = "HG10BZ"
521 else:
521 else:
522 bundletype = "HG10UN"
522 bundletype = "HG10UN"
523 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
523 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
524 bundletype)
524 bundletype)
525 # keep written bundle?
525 # keep written bundle?
526 if bundlename:
526 if bundlename:
527 bundle = None
527 bundle = None
528 if not localrepo:
528 if not localrepo:
529 # use the created uncompressed bundlerepo
529 # use the created uncompressed bundlerepo
530 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
530 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
531 fname)
531 fname)
532 # this repo contains local and other now, so filter out local again
532 # this repo contains local and other now, so filter out local again
533 common = repo.heads()
533 common = repo.heads()
534 if localrepo:
534 if localrepo:
535 # Part of common may be remotely filtered
535 # Part of common may be remotely filtered
536 # So use an unfiltered version
536 # So use an unfiltered version
537 # The discovery process probably need cleanup to avoid that
537 # The discovery process probably need cleanup to avoid that
538 localrepo = localrepo.unfiltered()
538 localrepo = localrepo.unfiltered()
539
539
540 csets = localrepo.changelog.findmissing(common, rheads)
540 csets = localrepo.changelog.findmissing(common, rheads)
541
541
542 if bundlerepo:
542 if bundlerepo:
543 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
543 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
544 remotephases = other.listkeys('phases')
544 remotephases = other.listkeys('phases')
545
545
546 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
546 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
547 pullop.trmanager = bundletransactionmanager()
547 pullop.trmanager = bundletransactionmanager()
548 exchange._pullapplyphases(pullop, remotephases)
548 exchange._pullapplyphases(pullop, remotephases)
549
549
550 def cleanup():
550 def cleanup():
551 if bundlerepo:
551 if bundlerepo:
552 bundlerepo.close()
552 bundlerepo.close()
553 if bundle:
553 if bundle:
554 os.unlink(bundle)
554 os.unlink(bundle)
555 other.close()
555 other.close()
556
556
557 return (localrepo, csets, cleanup)
557 return (localrepo, csets, cleanup)
@@ -1,198 +1,220 b''
1 # Create server
1 # Create server
2 $ hg init server
2 $ hg init server
3 $ cd server
3 $ cd server
4 $ cat >> .hg/hgrc << EOF
4 $ cat >> .hg/hgrc << EOF
5 > [extensions]
5 > [extensions]
6 > extension=$TESTDIR/flagprocessorext.py
6 > extension=$TESTDIR/flagprocessorext.py
7 > EOF
7 > EOF
8 $ cd ../
8 $ cd ../
9
9
10 # Clone server and enable extensions
10 # Clone server and enable extensions
11 $ hg clone -q server client
11 $ hg clone -q server client
12 $ cd client
12 $ cd client
13 $ cat >> .hg/hgrc << EOF
13 $ cat >> .hg/hgrc << EOF
14 > [extensions]
14 > [extensions]
15 > extension=$TESTDIR/flagprocessorext.py
15 > extension=$TESTDIR/flagprocessorext.py
16 > EOF
16 > EOF
17
17
18 # Commit file that will trigger the noop extension
18 # Commit file that will trigger the noop extension
19 $ echo '[NOOP]' > noop
19 $ echo '[NOOP]' > noop
20 $ hg commit -Aqm "noop"
20 $ hg commit -Aqm "noop"
21
21
22 # Commit file that will trigger the base64 extension
22 # Commit file that will trigger the base64 extension
23 $ echo '[BASE64]' > base64
23 $ echo '[BASE64]' > base64
24 $ hg commit -Aqm 'base64'
24 $ hg commit -Aqm 'base64'
25
25
26 # Commit file that will trigger the gzip extension
26 # Commit file that will trigger the gzip extension
27 $ echo '[GZIP]' > gzip
27 $ echo '[GZIP]' > gzip
28 $ hg commit -Aqm 'gzip'
28 $ hg commit -Aqm 'gzip'
29
29
30 # Commit file that will trigger noop and base64
30 # Commit file that will trigger noop and base64
31 $ echo '[NOOP][BASE64]' > noop-base64
31 $ echo '[NOOP][BASE64]' > noop-base64
32 $ hg commit -Aqm 'noop+base64'
32 $ hg commit -Aqm 'noop+base64'
33
33
34 # Commit file that will trigger noop and gzip
34 # Commit file that will trigger noop and gzip
35 $ echo '[NOOP][GZIP]' > noop-gzip
35 $ echo '[NOOP][GZIP]' > noop-gzip
36 $ hg commit -Aqm 'noop+gzip'
36 $ hg commit -Aqm 'noop+gzip'
37
37
38 # Commit file that will trigger base64 and gzip
38 # Commit file that will trigger base64 and gzip
39 $ echo '[BASE64][GZIP]' > base64-gzip
39 $ echo '[BASE64][GZIP]' > base64-gzip
40 $ hg commit -Aqm 'base64+gzip'
40 $ hg commit -Aqm 'base64+gzip'
41
41
42 # Commit file that will trigger base64, gzip and noop
42 # Commit file that will trigger base64, gzip and noop
43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
44 $ hg commit -Aqm 'base64+gzip+noop'
44 $ hg commit -Aqm 'base64+gzip+noop'
45
45
46 # TEST: ensure the revision data is consistent
46 # TEST: ensure the revision data is consistent
47 $ hg cat noop
47 $ hg cat noop
48 [NOOP]
48 [NOOP]
49 $ hg debugdata noop 0
49 $ hg debugdata noop 0
50 [NOOP]
50 [NOOP]
51
51
52 $ hg cat -r . base64
52 $ hg cat -r . base64
53 [BASE64]
53 [BASE64]
54 $ hg debugdata base64 0
54 $ hg debugdata base64 0
55 W0JBU0U2NF0K (no-eol)
55 W0JBU0U2NF0K (no-eol)
56
56
57 $ hg cat -r . gzip
57 $ hg cat -r . gzip
58 [GZIP]
58 [GZIP]
59 $ hg debugdata gzip 0
59 $ hg debugdata gzip 0
60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
61
61
62 $ hg cat -r . noop-base64
62 $ hg cat -r . noop-base64
63 [NOOP][BASE64]
63 [NOOP][BASE64]
64 $ hg debugdata noop-base64 0
64 $ hg debugdata noop-base64 0
65 W05PT1BdW0JBU0U2NF0K (no-eol)
65 W05PT1BdW0JBU0U2NF0K (no-eol)
66
66
67 $ hg cat -r . noop-gzip
67 $ hg cat -r . noop-gzip
68 [NOOP][GZIP]
68 [NOOP][GZIP]
69 $ hg debugdata noop-gzip 0
69 $ hg debugdata noop-gzip 0
70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
71
71
72 $ hg cat -r . base64-gzip
72 $ hg cat -r . base64-gzip
73 [BASE64][GZIP]
73 [BASE64][GZIP]
74 $ hg debugdata base64-gzip 0
74 $ hg debugdata base64-gzip 0
75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
76
76
77 $ hg cat -r . base64-gzip-noop
77 $ hg cat -r . base64-gzip-noop
78 [BASE64][GZIP][NOOP]
78 [BASE64][GZIP][NOOP]
79 $ hg debugdata base64-gzip-noop 0
79 $ hg debugdata base64-gzip-noop 0
80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
81
81
82 # Push to the server
82 # Push to the server
83 $ hg push
83 $ hg push
84 pushing to $TESTTMP/server (glob)
84 pushing to $TESTTMP/server (glob)
85 searching for changes
85 searching for changes
86 adding changesets
86 adding changesets
87 adding manifests
87 adding manifests
88 adding file changes
88 adding file changes
89 added 7 changesets with 7 changes to 7 files
89 added 7 changesets with 7 changes to 7 files
90
90
91 # Initialize new client (not cloning) and setup extension
91 # Initialize new client (not cloning) and setup extension
92 $ cd ..
92 $ cd ..
93 $ hg init client2
93 $ hg init client2
94 $ cd client2
94 $ cd client2
95 $ cat >> .hg/hgrc << EOF
95 $ cat >> .hg/hgrc << EOF
96 > [paths]
96 > [paths]
97 > default = $TESTTMP/server
97 > default = $TESTTMP/server
98 > [extensions]
98 > [extensions]
99 > extension=$TESTDIR/flagprocessorext.py
99 > extension=$TESTDIR/flagprocessorext.py
100 > EOF
100 > EOF
101
101
102 # Pull from server and update to latest revision
102 # Pull from server and update to latest revision
103 $ hg pull default
103 $ hg pull default
104 pulling from $TESTTMP/server (glob)
104 pulling from $TESTTMP/server (glob)
105 requesting all changes
105 requesting all changes
106 adding changesets
106 adding changesets
107 adding manifests
107 adding manifests
108 adding file changes
108 adding file changes
109 added 7 changesets with 7 changes to 7 files
109 added 7 changesets with 7 changes to 7 files
110 (run 'hg update' to get a working copy)
110 (run 'hg update' to get a working copy)
111 $ hg update
111 $ hg update
112 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
113
113
114 # TEST: ensure the revision data is consistent
114 # TEST: ensure the revision data is consistent
115 $ hg cat noop
115 $ hg cat noop
116 [NOOP]
116 [NOOP]
117 $ hg debugdata noop 0
117 $ hg debugdata noop 0
118 [NOOP]
118 [NOOP]
119
119
120 $ hg cat -r . base64
120 $ hg cat -r . base64
121 [BASE64]
121 [BASE64]
122 $ hg debugdata base64 0
122 $ hg debugdata base64 0
123 W0JBU0U2NF0K (no-eol)
123 W0JBU0U2NF0K (no-eol)
124
124
125 $ hg cat -r . gzip
125 $ hg cat -r . gzip
126 [GZIP]
126 [GZIP]
127 $ hg debugdata gzip 0
127 $ hg debugdata gzip 0
128 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
128 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
129
129
130 $ hg cat -r . noop-base64
130 $ hg cat -r . noop-base64
131 [NOOP][BASE64]
131 [NOOP][BASE64]
132 $ hg debugdata noop-base64 0
132 $ hg debugdata noop-base64 0
133 W05PT1BdW0JBU0U2NF0K (no-eol)
133 W05PT1BdW0JBU0U2NF0K (no-eol)
134
134
135 $ hg cat -r . noop-gzip
135 $ hg cat -r . noop-gzip
136 [NOOP][GZIP]
136 [NOOP][GZIP]
137 $ hg debugdata noop-gzip 0
137 $ hg debugdata noop-gzip 0
138 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
138 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
139
139
140 $ hg cat -r . base64-gzip
140 $ hg cat -r . base64-gzip
141 [BASE64][GZIP]
141 [BASE64][GZIP]
142 $ hg debugdata base64-gzip 0
142 $ hg debugdata base64-gzip 0
143 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
143 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
144
144
145 $ hg cat -r . base64-gzip-noop
145 $ hg cat -r . base64-gzip-noop
146 [BASE64][GZIP][NOOP]
146 [BASE64][GZIP][NOOP]
147 $ hg debugdata base64-gzip-noop 0
147 $ hg debugdata base64-gzip-noop 0
148 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
148 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
149
149
150 # TEST: ensure a missing processor is handled
150 # TEST: ensure a missing processor is handled
151 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
151 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
152 $ hg commit -Aqm 'fail+base64+gzip+noop'
152 $ hg commit -Aqm 'fail+base64+gzip+noop'
153 abort: missing processor for flag '0x1'!
153 abort: missing processor for flag '0x1'!
154 [255]
154 [255]
155
155
156 # TEST: ensure we cannot register several flag processors on the same flag
156 # TEST: ensure we cannot register several flag processors on the same flag
157 $ cat >> .hg/hgrc << EOF
157 $ cat >> .hg/hgrc << EOF
158 > [extensions]
158 > [extensions]
159 > extension=$TESTDIR/flagprocessorext.py
159 > extension=$TESTDIR/flagprocessorext.py
160 > duplicate=$TESTDIR/flagprocessorext.py
160 > duplicate=$TESTDIR/flagprocessorext.py
161 > EOF
161 > EOF
162 $ echo 'this should fail' > file
162 $ echo 'this should fail' > file
163 $ hg commit -Aqm 'add file'
163 $ hg commit -Aqm 'add file'
164 abort: cannot register multiple processors on flag '0x8'.
164 abort: cannot register multiple processors on flag '0x8'.
165 [255]
165 [255]
166
166
167 $ cd ..
167 $ cd ..
168
168
169 # TEST: bundle repo
169 # TEST: bundle repo
170 $ hg init bundletest
170 $ hg init bundletest
171 $ cd bundletest
171 $ cd bundletest
172
172
173 $ cat >> .hg/hgrc << EOF
173 $ cat >> .hg/hgrc << EOF
174 > [extensions]
174 > [extensions]
175 > flagprocessor=$TESTDIR/flagprocessorext.py
175 > flagprocessor=$TESTDIR/flagprocessorext.py
176 > EOF
176 > EOF
177
177
178 $ for i in 0 single two three 4; do
178 $ for i in 0 single two three 4; do
179 > echo '[BASE64]a-bit-longer-'$i > base64
179 > echo '[BASE64]a-bit-longer-'$i > base64
180 > hg commit -m base64-$i -A base64
180 > hg commit -m base64-$i -A base64
181 > done
181 > done
182
182
183 $ hg update 2 -q
183 $ hg update 2 -q
184 $ echo '[BASE64]a-bit-longer-branching' > base64
184 $ echo '[BASE64]a-bit-longer-branching' > base64
185 $ hg commit -q -m branching
185 $ hg commit -q -m branching
186
186
187 $ hg bundle --base 1 bundle.hg
187 $ hg bundle --base 1 bundle.hg
188 4 changesets found
188 4 changesets found
189 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
189 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
190 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
190 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
191 Traceback (most recent call last):
191 5 branching
192 mercurial.mpatch.mpatchError: invalid patch
192 base64 | 2 +-
193 1 files changed, 1 insertions(+), 1 deletions(-)
194
195 4 base64-4
196 base64 | 2 +-
197 1 files changed, 1 insertions(+), 1 deletions(-)
198
199 3 base64-three
200 base64 | 2 +-
201 1 files changed, 1 insertions(+), 1 deletions(-)
202
203 2 base64-two
204 base64 | 2 +-
205 1 files changed, 1 insertions(+), 1 deletions(-)
206
207 1 base64-single
208 base64 | 2 +-
209 1 files changed, 1 insertions(+), 1 deletions(-)
210
211 0 base64-0
212 base64 | 1 +
213 1 files changed, 1 insertions(+), 0 deletions(-)
214
193
215
194 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q 2>&1 | egrep -v '^(\*\*| )'
216 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q 2>&1 | egrep -v '^(\*\*| )'
217 [1]
218 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
195 Traceback (most recent call last):
219 Traceback (most recent call last):
196 mercurial.mpatch.mpatchError: invalid patch
220 TypeError: Incorrect padding
197 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
198 abort: repository bundle-again.hg not found!
General Comments 0
You need to be logged in to leave comments. Login now