##// END OF EJS Templates
bundlerepo: make baserevision return raw text...
Jun Wu -
r31834:433ab46f default
parent child Browse files
Show More
@@ -1,556 +1,556
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18 import tempfile
18 import tempfile
19
19
20 from .i18n import _
20 from .i18n import _
21 from .node import nullid
21 from .node import nullid
22
22
23 from . import (
23 from . import (
24 bundle2,
24 bundle2,
25 changegroup,
25 changegroup,
26 changelog,
26 changelog,
27 cmdutil,
27 cmdutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 filelog,
31 filelog,
32 localrepo,
32 localrepo,
33 manifest,
33 manifest,
34 mdiff,
34 mdiff,
35 node as nodemod,
35 node as nodemod,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 revlog,
39 revlog,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43
43
44 class bundlerevlog(revlog.revlog):
44 class bundlerevlog(revlog.revlog):
45 def __init__(self, opener, indexfile, bundle, linkmapper):
45 def __init__(self, opener, indexfile, bundle, linkmapper):
46 # How it works:
46 # How it works:
47 # To retrieve a revision, we need to know the offset of the revision in
47 # To retrieve a revision, we need to know the offset of the revision in
48 # the bundle (an unbundle object). We store this offset in the index
48 # the bundle (an unbundle object). We store this offset in the index
49 # (start). The base of the delta is stored in the base field.
49 # (start). The base of the delta is stored in the base field.
50 #
50 #
51 # To differentiate a rev in the bundle from a rev in the revlog, we
51 # To differentiate a rev in the bundle from a rev in the revlog, we
52 # check revision against repotiprev.
52 # check revision against repotiprev.
53 opener = vfsmod.readonlyvfs(opener)
53 opener = vfsmod.readonlyvfs(opener)
54 revlog.revlog.__init__(self, opener, indexfile)
54 revlog.revlog.__init__(self, opener, indexfile)
55 self.bundle = bundle
55 self.bundle = bundle
56 n = len(self)
56 n = len(self)
57 self.repotiprev = n - 1
57 self.repotiprev = n - 1
58 chain = None
58 chain = None
59 self.bundlerevs = set() # used by 'bundle()' revset expression
59 self.bundlerevs = set() # used by 'bundle()' revset expression
60 getchunk = lambda: bundle.deltachunk(chain)
60 getchunk = lambda: bundle.deltachunk(chain)
61 for chunkdata in iter(getchunk, {}):
61 for chunkdata in iter(getchunk, {}):
62 node = chunkdata['node']
62 node = chunkdata['node']
63 p1 = chunkdata['p1']
63 p1 = chunkdata['p1']
64 p2 = chunkdata['p2']
64 p2 = chunkdata['p2']
65 cs = chunkdata['cs']
65 cs = chunkdata['cs']
66 deltabase = chunkdata['deltabase']
66 deltabase = chunkdata['deltabase']
67 delta = chunkdata['delta']
67 delta = chunkdata['delta']
68
68
69 size = len(delta)
69 size = len(delta)
70 start = bundle.tell() - size
70 start = bundle.tell() - size
71
71
72 link = linkmapper(cs)
72 link = linkmapper(cs)
73 if node in self.nodemap:
73 if node in self.nodemap:
74 # this can happen if two branches make the same change
74 # this can happen if two branches make the same change
75 chain = node
75 chain = node
76 self.bundlerevs.add(self.nodemap[node])
76 self.bundlerevs.add(self.nodemap[node])
77 continue
77 continue
78
78
79 for p in (p1, p2):
79 for p in (p1, p2):
80 if p not in self.nodemap:
80 if p not in self.nodemap:
81 raise error.LookupError(p, self.indexfile,
81 raise error.LookupError(p, self.indexfile,
82 _("unknown parent"))
82 _("unknown parent"))
83
83
84 if deltabase not in self.nodemap:
84 if deltabase not in self.nodemap:
85 raise LookupError(deltabase, self.indexfile,
85 raise LookupError(deltabase, self.indexfile,
86 _('unknown delta base'))
86 _('unknown delta base'))
87
87
88 baserev = self.rev(deltabase)
88 baserev = self.rev(deltabase)
89 # start, size, full unc. size, base (unused), link, p1, p2, node
89 # start, size, full unc. size, base (unused), link, p1, p2, node
90 e = (revlog.offset_type(start, 0), size, -1, baserev, link,
90 e = (revlog.offset_type(start, 0), size, -1, baserev, link,
91 self.rev(p1), self.rev(p2), node)
91 self.rev(p1), self.rev(p2), node)
92 self.index.insert(-1, e)
92 self.index.insert(-1, e)
93 self.nodemap[node] = n
93 self.nodemap[node] = n
94 self.bundlerevs.add(n)
94 self.bundlerevs.add(n)
95 chain = node
95 chain = node
96 n += 1
96 n += 1
97
97
98 def _chunk(self, rev):
98 def _chunk(self, rev):
99 # Warning: in case of bundle, the diff is against what we stored as
99 # Warning: in case of bundle, the diff is against what we stored as
100 # delta base, not against rev - 1
100 # delta base, not against rev - 1
101 # XXX: could use some caching
101 # XXX: could use some caching
102 if rev <= self.repotiprev:
102 if rev <= self.repotiprev:
103 return revlog.revlog._chunk(self, rev)
103 return revlog.revlog._chunk(self, rev)
104 self.bundle.seek(self.start(rev))
104 self.bundle.seek(self.start(rev))
105 return self.bundle.read(self.length(rev))
105 return self.bundle.read(self.length(rev))
106
106
107 def revdiff(self, rev1, rev2):
107 def revdiff(self, rev1, rev2):
108 """return or calculate a delta between two revisions"""
108 """return or calculate a delta between two revisions"""
109 if rev1 > self.repotiprev and rev2 > self.repotiprev:
109 if rev1 > self.repotiprev and rev2 > self.repotiprev:
110 # hot path for bundle
110 # hot path for bundle
111 revb = self.index[rev2][3]
111 revb = self.index[rev2][3]
112 if revb == rev1:
112 if revb == rev1:
113 return self._chunk(rev2)
113 return self._chunk(rev2)
114 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
114 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
115 return revlog.revlog.revdiff(self, rev1, rev2)
115 return revlog.revlog.revdiff(self, rev1, rev2)
116
116
117 return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
117 return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
118
118
119 def revision(self, nodeorrev, raw=False):
119 def revision(self, nodeorrev, raw=False):
120 """return an uncompressed revision of a given node or revision
120 """return an uncompressed revision of a given node or revision
121 number.
121 number.
122 """
122 """
123 if isinstance(nodeorrev, int):
123 if isinstance(nodeorrev, int):
124 rev = nodeorrev
124 rev = nodeorrev
125 node = self.node(rev)
125 node = self.node(rev)
126 else:
126 else:
127 node = nodeorrev
127 node = nodeorrev
128 rev = self.rev(node)
128 rev = self.rev(node)
129
129
130 if node == nullid:
130 if node == nullid:
131 return ""
131 return ""
132
132
133 text = None
133 text = None
134 chain = []
134 chain = []
135 iterrev = rev
135 iterrev = rev
136 # reconstruct the revision if it is from a changegroup
136 # reconstruct the revision if it is from a changegroup
137 while iterrev > self.repotiprev:
137 while iterrev > self.repotiprev:
138 if self._cache and self._cache[1] == iterrev:
138 if self._cache and self._cache[1] == iterrev:
139 text = self._cache[2]
139 text = self._cache[2]
140 break
140 break
141 chain.append(iterrev)
141 chain.append(iterrev)
142 iterrev = self.index[iterrev][3]
142 iterrev = self.index[iterrev][3]
143 if text is None:
143 if text is None:
144 text = self.baserevision(iterrev)
144 text = self.baserevision(iterrev)
145
145
146 while chain:
146 while chain:
147 delta = self._chunk(chain.pop())
147 delta = self._chunk(chain.pop())
148 text = mdiff.patches(text, [delta])
148 text = mdiff.patches(text, [delta])
149
149
150 text, validatehash = self._processflags(text, self.flags(rev),
150 text, validatehash = self._processflags(text, self.flags(rev),
151 'read', raw=raw)
151 'read', raw=raw)
152 if validatehash:
152 if validatehash:
153 self.checkhash(text, node, rev=rev)
153 self.checkhash(text, node, rev=rev)
154 self._cache = (node, rev, text)
154 self._cache = (node, rev, text)
155 return text
155 return text
156
156
157 def baserevision(self, nodeorrev):
157 def baserevision(self, nodeorrev):
158 # Revlog subclasses may override 'revision' method to modify format of
158 # Revlog subclasses may override 'revision' method to modify format of
159 # content retrieved from revlog. To use bundlerevlog with such class one
159 # content retrieved from revlog. To use bundlerevlog with such class one
160 # needs to override 'baserevision' and make more specific call here.
160 # needs to override 'baserevision' and make more specific call here.
161 return revlog.revlog.revision(self, nodeorrev)
161 return revlog.revlog.revision(self, nodeorrev, raw=True)
162
162
163 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
163 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
164 raise NotImplementedError
164 raise NotImplementedError
165 def addgroup(self, revs, linkmapper, transaction):
165 def addgroup(self, revs, linkmapper, transaction):
166 raise NotImplementedError
166 raise NotImplementedError
167 def strip(self, rev, minlink):
167 def strip(self, rev, minlink):
168 raise NotImplementedError
168 raise NotImplementedError
169 def checksize(self):
169 def checksize(self):
170 raise NotImplementedError
170 raise NotImplementedError
171
171
172 class bundlechangelog(bundlerevlog, changelog.changelog):
172 class bundlechangelog(bundlerevlog, changelog.changelog):
173 def __init__(self, opener, bundle):
173 def __init__(self, opener, bundle):
174 changelog.changelog.__init__(self, opener)
174 changelog.changelog.__init__(self, opener)
175 linkmapper = lambda x: x
175 linkmapper = lambda x: x
176 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
176 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
177 linkmapper)
177 linkmapper)
178
178
179 def baserevision(self, nodeorrev):
179 def baserevision(self, nodeorrev):
180 # Although changelog doesn't override 'revision' method, some extensions
180 # Although changelog doesn't override 'revision' method, some extensions
181 # may replace this class with another that does. Same story with
181 # may replace this class with another that does. Same story with
182 # manifest and filelog classes.
182 # manifest and filelog classes.
183
183
184 # This bypasses filtering on changelog.node() and rev() because we need
184 # This bypasses filtering on changelog.node() and rev() because we need
185 # revision text of the bundle base even if it is hidden.
185 # revision text of the bundle base even if it is hidden.
186 oldfilter = self.filteredrevs
186 oldfilter = self.filteredrevs
187 try:
187 try:
188 self.filteredrevs = ()
188 self.filteredrevs = ()
189 return changelog.changelog.revision(self, nodeorrev)
189 return changelog.changelog.revision(self, nodeorrev, raw=True)
190 finally:
190 finally:
191 self.filteredrevs = oldfilter
191 self.filteredrevs = oldfilter
192
192
193 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
193 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
194 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
194 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
195 manifest.manifestrevlog.__init__(self, opener, dir=dir)
195 manifest.manifestrevlog.__init__(self, opener, dir=dir)
196 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
196 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
197 linkmapper)
197 linkmapper)
198 if dirlogstarts is None:
198 if dirlogstarts is None:
199 dirlogstarts = {}
199 dirlogstarts = {}
200 if self.bundle.version == "03":
200 if self.bundle.version == "03":
201 dirlogstarts = _getfilestarts(self.bundle)
201 dirlogstarts = _getfilestarts(self.bundle)
202 self._dirlogstarts = dirlogstarts
202 self._dirlogstarts = dirlogstarts
203 self._linkmapper = linkmapper
203 self._linkmapper = linkmapper
204
204
205 def baserevision(self, nodeorrev):
205 def baserevision(self, nodeorrev):
206 node = nodeorrev
206 node = nodeorrev
207 if isinstance(node, int):
207 if isinstance(node, int):
208 node = self.node(node)
208 node = self.node(node)
209
209
210 if node in self.fulltextcache:
210 if node in self.fulltextcache:
211 result = '%s' % self.fulltextcache[node]
211 result = '%s' % self.fulltextcache[node]
212 else:
212 else:
213 result = manifest.manifestrevlog.revision(self, nodeorrev)
213 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
214 return result
214 return result
215
215
216 def dirlog(self, d):
216 def dirlog(self, d):
217 if d in self._dirlogstarts:
217 if d in self._dirlogstarts:
218 self.bundle.seek(self._dirlogstarts[d])
218 self.bundle.seek(self._dirlogstarts[d])
219 return bundlemanifest(
219 return bundlemanifest(
220 self.opener, self.bundle, self._linkmapper,
220 self.opener, self.bundle, self._linkmapper,
221 self._dirlogstarts, dir=d)
221 self._dirlogstarts, dir=d)
222 return super(bundlemanifest, self).dirlog(d)
222 return super(bundlemanifest, self).dirlog(d)
223
223
224 class bundlefilelog(bundlerevlog, filelog.filelog):
224 class bundlefilelog(bundlerevlog, filelog.filelog):
225 def __init__(self, opener, path, bundle, linkmapper):
225 def __init__(self, opener, path, bundle, linkmapper):
226 filelog.filelog.__init__(self, opener, path)
226 filelog.filelog.__init__(self, opener, path)
227 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
227 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
228 linkmapper)
228 linkmapper)
229
229
230 def baserevision(self, nodeorrev):
230 def baserevision(self, nodeorrev):
231 return filelog.filelog.revision(self, nodeorrev)
231 return filelog.filelog.revision(self, nodeorrev, raw=True)
232
232
233 class bundlepeer(localrepo.localpeer):
233 class bundlepeer(localrepo.localpeer):
234 def canpush(self):
234 def canpush(self):
235 return False
235 return False
236
236
237 class bundlephasecache(phases.phasecache):
237 class bundlephasecache(phases.phasecache):
238 def __init__(self, *args, **kwargs):
238 def __init__(self, *args, **kwargs):
239 super(bundlephasecache, self).__init__(*args, **kwargs)
239 super(bundlephasecache, self).__init__(*args, **kwargs)
240 if util.safehasattr(self, 'opener'):
240 if util.safehasattr(self, 'opener'):
241 self.opener = vfsmod.readonlyvfs(self.opener)
241 self.opener = vfsmod.readonlyvfs(self.opener)
242
242
243 def write(self):
243 def write(self):
244 raise NotImplementedError
244 raise NotImplementedError
245
245
246 def _write(self, fp):
246 def _write(self, fp):
247 raise NotImplementedError
247 raise NotImplementedError
248
248
249 def _updateroots(self, phase, newroots, tr):
249 def _updateroots(self, phase, newroots, tr):
250 self.phaseroots[phase] = newroots
250 self.phaseroots[phase] = newroots
251 self.invalidate()
251 self.invalidate()
252 self.dirty = True
252 self.dirty = True
253
253
254 def _getfilestarts(bundle):
254 def _getfilestarts(bundle):
255 bundlefilespos = {}
255 bundlefilespos = {}
256 for chunkdata in iter(bundle.filelogheader, {}):
256 for chunkdata in iter(bundle.filelogheader, {}):
257 fname = chunkdata['filename']
257 fname = chunkdata['filename']
258 bundlefilespos[fname] = bundle.tell()
258 bundlefilespos[fname] = bundle.tell()
259 for chunk in iter(lambda: bundle.deltachunk(None), {}):
259 for chunk in iter(lambda: bundle.deltachunk(None), {}):
260 pass
260 pass
261 return bundlefilespos
261 return bundlefilespos
262
262
263 class bundlerepository(localrepo.localrepository):
263 class bundlerepository(localrepo.localrepository):
264 def __init__(self, ui, path, bundlename):
264 def __init__(self, ui, path, bundlename):
265 def _writetempbundle(read, suffix, header=''):
265 def _writetempbundle(read, suffix, header=''):
266 """Write a temporary file to disk
266 """Write a temporary file to disk
267
267
268 This is closure because we need to make sure this tracked by
268 This is closure because we need to make sure this tracked by
269 self.tempfile for cleanup purposes."""
269 self.tempfile for cleanup purposes."""
270 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
270 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
271 suffix=".hg10un")
271 suffix=".hg10un")
272 self.tempfile = temp
272 self.tempfile = temp
273
273
274 with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
274 with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
275 fptemp.write(header)
275 fptemp.write(header)
276 while True:
276 while True:
277 chunk = read(2**18)
277 chunk = read(2**18)
278 if not chunk:
278 if not chunk:
279 break
279 break
280 fptemp.write(chunk)
280 fptemp.write(chunk)
281
281
282 return self.vfs.open(self.tempfile, mode="rb")
282 return self.vfs.open(self.tempfile, mode="rb")
283 self._tempparent = None
283 self._tempparent = None
284 try:
284 try:
285 localrepo.localrepository.__init__(self, ui, path)
285 localrepo.localrepository.__init__(self, ui, path)
286 except error.RepoError:
286 except error.RepoError:
287 self._tempparent = tempfile.mkdtemp()
287 self._tempparent = tempfile.mkdtemp()
288 localrepo.instance(ui, self._tempparent, 1)
288 localrepo.instance(ui, self._tempparent, 1)
289 localrepo.localrepository.__init__(self, ui, self._tempparent)
289 localrepo.localrepository.__init__(self, ui, self._tempparent)
290 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
290 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
291
291
292 if path:
292 if path:
293 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
293 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
294 else:
294 else:
295 self._url = 'bundle:' + bundlename
295 self._url = 'bundle:' + bundlename
296
296
297 self.tempfile = None
297 self.tempfile = None
298 f = util.posixfile(bundlename, "rb")
298 f = util.posixfile(bundlename, "rb")
299 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
299 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
300
300
301 if isinstance(self.bundle, bundle2.unbundle20):
301 if isinstance(self.bundle, bundle2.unbundle20):
302 cgstream = None
302 cgstream = None
303 for part in self.bundle.iterparts():
303 for part in self.bundle.iterparts():
304 if part.type == 'changegroup':
304 if part.type == 'changegroup':
305 if cgstream is not None:
305 if cgstream is not None:
306 raise NotImplementedError("can't process "
306 raise NotImplementedError("can't process "
307 "multiple changegroups")
307 "multiple changegroups")
308 cgstream = part
308 cgstream = part
309 version = part.params.get('version', '01')
309 version = part.params.get('version', '01')
310 legalcgvers = changegroup.supportedincomingversions(self)
310 legalcgvers = changegroup.supportedincomingversions(self)
311 if version not in legalcgvers:
311 if version not in legalcgvers:
312 msg = _('Unsupported changegroup version: %s')
312 msg = _('Unsupported changegroup version: %s')
313 raise error.Abort(msg % version)
313 raise error.Abort(msg % version)
314 if self.bundle.compressed():
314 if self.bundle.compressed():
315 cgstream = _writetempbundle(part.read,
315 cgstream = _writetempbundle(part.read,
316 ".cg%sun" % version)
316 ".cg%sun" % version)
317
317
318 if cgstream is None:
318 if cgstream is None:
319 raise error.Abort(_('No changegroups found'))
319 raise error.Abort(_('No changegroups found'))
320 cgstream.seek(0)
320 cgstream.seek(0)
321
321
322 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
322 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
323
323
324 elif self.bundle.compressed():
324 elif self.bundle.compressed():
325 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
325 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
326 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
326 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
327 bundlename,
327 bundlename,
328 self.vfs)
328 self.vfs)
329
329
330 # dict with the mapping 'filename' -> position in the bundle
330 # dict with the mapping 'filename' -> position in the bundle
331 self.bundlefilespos = {}
331 self.bundlefilespos = {}
332
332
333 self.firstnewrev = self.changelog.repotiprev + 1
333 self.firstnewrev = self.changelog.repotiprev + 1
334 phases.retractboundary(self, None, phases.draft,
334 phases.retractboundary(self, None, phases.draft,
335 [ctx.node() for ctx in self[self.firstnewrev:]])
335 [ctx.node() for ctx in self[self.firstnewrev:]])
336
336
337 @localrepo.unfilteredpropertycache
337 @localrepo.unfilteredpropertycache
338 def _phasecache(self):
338 def _phasecache(self):
339 return bundlephasecache(self, self._phasedefaults)
339 return bundlephasecache(self, self._phasedefaults)
340
340
341 @localrepo.unfilteredpropertycache
341 @localrepo.unfilteredpropertycache
342 def changelog(self):
342 def changelog(self):
343 # consume the header if it exists
343 # consume the header if it exists
344 self.bundle.changelogheader()
344 self.bundle.changelogheader()
345 c = bundlechangelog(self.svfs, self.bundle)
345 c = bundlechangelog(self.svfs, self.bundle)
346 self.manstart = self.bundle.tell()
346 self.manstart = self.bundle.tell()
347 return c
347 return c
348
348
349 def _constructmanifest(self):
349 def _constructmanifest(self):
350 self.bundle.seek(self.manstart)
350 self.bundle.seek(self.manstart)
351 # consume the header if it exists
351 # consume the header if it exists
352 self.bundle.manifestheader()
352 self.bundle.manifestheader()
353 linkmapper = self.unfiltered().changelog.rev
353 linkmapper = self.unfiltered().changelog.rev
354 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
354 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
355 self.filestart = self.bundle.tell()
355 self.filestart = self.bundle.tell()
356 return m
356 return m
357
357
358 @localrepo.unfilteredpropertycache
358 @localrepo.unfilteredpropertycache
359 def manstart(self):
359 def manstart(self):
360 self.changelog
360 self.changelog
361 return self.manstart
361 return self.manstart
362
362
363 @localrepo.unfilteredpropertycache
363 @localrepo.unfilteredpropertycache
364 def filestart(self):
364 def filestart(self):
365 self.manifestlog
365 self.manifestlog
366 return self.filestart
366 return self.filestart
367
367
368 def url(self):
368 def url(self):
369 return self._url
369 return self._url
370
370
371 def file(self, f):
371 def file(self, f):
372 if not self.bundlefilespos:
372 if not self.bundlefilespos:
373 self.bundle.seek(self.filestart)
373 self.bundle.seek(self.filestart)
374 self.bundlefilespos = _getfilestarts(self.bundle)
374 self.bundlefilespos = _getfilestarts(self.bundle)
375
375
376 if f in self.bundlefilespos:
376 if f in self.bundlefilespos:
377 self.bundle.seek(self.bundlefilespos[f])
377 self.bundle.seek(self.bundlefilespos[f])
378 linkmapper = self.unfiltered().changelog.rev
378 linkmapper = self.unfiltered().changelog.rev
379 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
379 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
380 else:
380 else:
381 return filelog.filelog(self.svfs, f)
381 return filelog.filelog(self.svfs, f)
382
382
383 def close(self):
383 def close(self):
384 """Close assigned bundle file immediately."""
384 """Close assigned bundle file immediately."""
385 self.bundlefile.close()
385 self.bundlefile.close()
386 if self.tempfile is not None:
386 if self.tempfile is not None:
387 self.vfs.unlink(self.tempfile)
387 self.vfs.unlink(self.tempfile)
388 if self._tempparent:
388 if self._tempparent:
389 shutil.rmtree(self._tempparent, True)
389 shutil.rmtree(self._tempparent, True)
390
390
391 def cancopy(self):
391 def cancopy(self):
392 return False
392 return False
393
393
394 def peer(self):
394 def peer(self):
395 return bundlepeer(self)
395 return bundlepeer(self)
396
396
397 def getcwd(self):
397 def getcwd(self):
398 return pycompat.getcwd() # always outside the repo
398 return pycompat.getcwd() # always outside the repo
399
399
400 # Check if parents exist in localrepo before setting
400 # Check if parents exist in localrepo before setting
401 def setparents(self, p1, p2=nullid):
401 def setparents(self, p1, p2=nullid):
402 p1rev = self.changelog.rev(p1)
402 p1rev = self.changelog.rev(p1)
403 p2rev = self.changelog.rev(p2)
403 p2rev = self.changelog.rev(p2)
404 msg = _("setting parent to node %s that only exists in the bundle\n")
404 msg = _("setting parent to node %s that only exists in the bundle\n")
405 if self.changelog.repotiprev < p1rev:
405 if self.changelog.repotiprev < p1rev:
406 self.ui.warn(msg % nodemod.hex(p1))
406 self.ui.warn(msg % nodemod.hex(p1))
407 if self.changelog.repotiprev < p2rev:
407 if self.changelog.repotiprev < p2rev:
408 self.ui.warn(msg % nodemod.hex(p2))
408 self.ui.warn(msg % nodemod.hex(p2))
409 return super(bundlerepository, self).setparents(p1, p2)
409 return super(bundlerepository, self).setparents(p1, p2)
410
410
411 def instance(ui, path, create):
411 def instance(ui, path, create):
412 if create:
412 if create:
413 raise error.Abort(_('cannot create new bundle repository'))
413 raise error.Abort(_('cannot create new bundle repository'))
414 # internal config: bundle.mainreporoot
414 # internal config: bundle.mainreporoot
415 parentpath = ui.config("bundle", "mainreporoot", "")
415 parentpath = ui.config("bundle", "mainreporoot", "")
416 if not parentpath:
416 if not parentpath:
417 # try to find the correct path to the working directory repo
417 # try to find the correct path to the working directory repo
418 parentpath = cmdutil.findrepo(pycompat.getcwd())
418 parentpath = cmdutil.findrepo(pycompat.getcwd())
419 if parentpath is None:
419 if parentpath is None:
420 parentpath = ''
420 parentpath = ''
421 if parentpath:
421 if parentpath:
422 # Try to make the full path relative so we get a nice, short URL.
422 # Try to make the full path relative so we get a nice, short URL.
423 # In particular, we don't want temp dir names in test outputs.
423 # In particular, we don't want temp dir names in test outputs.
424 cwd = pycompat.getcwd()
424 cwd = pycompat.getcwd()
425 if parentpath == cwd:
425 if parentpath == cwd:
426 parentpath = ''
426 parentpath = ''
427 else:
427 else:
428 cwd = pathutil.normasprefix(cwd)
428 cwd = pathutil.normasprefix(cwd)
429 if parentpath.startswith(cwd):
429 if parentpath.startswith(cwd):
430 parentpath = parentpath[len(cwd):]
430 parentpath = parentpath[len(cwd):]
431 u = util.url(path)
431 u = util.url(path)
432 path = u.localpath()
432 path = u.localpath()
433 if u.scheme == 'bundle':
433 if u.scheme == 'bundle':
434 s = path.split("+", 1)
434 s = path.split("+", 1)
435 if len(s) == 1:
435 if len(s) == 1:
436 repopath, bundlename = parentpath, s[0]
436 repopath, bundlename = parentpath, s[0]
437 else:
437 else:
438 repopath, bundlename = s
438 repopath, bundlename = s
439 else:
439 else:
440 repopath, bundlename = parentpath, path
440 repopath, bundlename = parentpath, path
441 return bundlerepository(ui, repopath, bundlename)
441 return bundlerepository(ui, repopath, bundlename)
442
442
443 class bundletransactionmanager(object):
443 class bundletransactionmanager(object):
444 def transaction(self):
444 def transaction(self):
445 return None
445 return None
446
446
447 def close(self):
447 def close(self):
448 raise NotImplementedError
448 raise NotImplementedError
449
449
450 def release(self):
450 def release(self):
451 raise NotImplementedError
451 raise NotImplementedError
452
452
453 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
453 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
454 force=False):
454 force=False):
455 '''obtains a bundle of changes incoming from other
455 '''obtains a bundle of changes incoming from other
456
456
457 "onlyheads" restricts the returned changes to those reachable from the
457 "onlyheads" restricts the returned changes to those reachable from the
458 specified heads.
458 specified heads.
459 "bundlename", if given, stores the bundle to this file path permanently;
459 "bundlename", if given, stores the bundle to this file path permanently;
460 otherwise it's stored to a temp file and gets deleted again when you call
460 otherwise it's stored to a temp file and gets deleted again when you call
461 the returned "cleanupfn".
461 the returned "cleanupfn".
462 "force" indicates whether to proceed on unrelated repos.
462 "force" indicates whether to proceed on unrelated repos.
463
463
464 Returns a tuple (local, csets, cleanupfn):
464 Returns a tuple (local, csets, cleanupfn):
465
465
466 "local" is a local repo from which to obtain the actual incoming
466 "local" is a local repo from which to obtain the actual incoming
467 changesets; it is a bundlerepo for the obtained bundle when the
467 changesets; it is a bundlerepo for the obtained bundle when the
468 original "other" is remote.
468 original "other" is remote.
469 "csets" lists the incoming changeset node ids.
469 "csets" lists the incoming changeset node ids.
470 "cleanupfn" must be called without arguments when you're done processing
470 "cleanupfn" must be called without arguments when you're done processing
471 the changes; it closes both the original "other" and the one returned
471 the changes; it closes both the original "other" and the one returned
472 here.
472 here.
473 '''
473 '''
474 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
474 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
475 force=force)
475 force=force)
476 common, incoming, rheads = tmp
476 common, incoming, rheads = tmp
477 if not incoming:
477 if not incoming:
478 try:
478 try:
479 if bundlename:
479 if bundlename:
480 os.unlink(bundlename)
480 os.unlink(bundlename)
481 except OSError:
481 except OSError:
482 pass
482 pass
483 return repo, [], other.close
483 return repo, [], other.close
484
484
485 commonset = set(common)
485 commonset = set(common)
486 rheads = [x for x in rheads if x not in commonset]
486 rheads = [x for x in rheads if x not in commonset]
487
487
488 bundle = None
488 bundle = None
489 bundlerepo = None
489 bundlerepo = None
490 localrepo = other.local()
490 localrepo = other.local()
491 if bundlename or not localrepo:
491 if bundlename or not localrepo:
492 # create a bundle (uncompressed if other repo is not local)
492 # create a bundle (uncompressed if other repo is not local)
493
493
494 # developer config: devel.legacy.exchange
494 # developer config: devel.legacy.exchange
495 legexc = ui.configlist('devel', 'legacy.exchange')
495 legexc = ui.configlist('devel', 'legacy.exchange')
496 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
496 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
497 canbundle2 = (not forcebundle1
497 canbundle2 = (not forcebundle1
498 and other.capable('getbundle')
498 and other.capable('getbundle')
499 and other.capable('bundle2'))
499 and other.capable('bundle2'))
500 if canbundle2:
500 if canbundle2:
501 kwargs = {}
501 kwargs = {}
502 kwargs['common'] = common
502 kwargs['common'] = common
503 kwargs['heads'] = rheads
503 kwargs['heads'] = rheads
504 kwargs['bundlecaps'] = exchange.caps20to10(repo)
504 kwargs['bundlecaps'] = exchange.caps20to10(repo)
505 kwargs['cg'] = True
505 kwargs['cg'] = True
506 b2 = other.getbundle('incoming', **kwargs)
506 b2 = other.getbundle('incoming', **kwargs)
507 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
507 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
508 bundlename)
508 bundlename)
509 else:
509 else:
510 if other.capable('getbundle'):
510 if other.capable('getbundle'):
511 cg = other.getbundle('incoming', common=common, heads=rheads)
511 cg = other.getbundle('incoming', common=common, heads=rheads)
512 elif onlyheads is None and not other.capable('changegroupsubset'):
512 elif onlyheads is None and not other.capable('changegroupsubset'):
513 # compat with older servers when pulling all remote heads
513 # compat with older servers when pulling all remote heads
514 cg = other.changegroup(incoming, "incoming")
514 cg = other.changegroup(incoming, "incoming")
515 rheads = None
515 rheads = None
516 else:
516 else:
517 cg = other.changegroupsubset(incoming, rheads, 'incoming')
517 cg = other.changegroupsubset(incoming, rheads, 'incoming')
518 if localrepo:
518 if localrepo:
519 bundletype = "HG10BZ"
519 bundletype = "HG10BZ"
520 else:
520 else:
521 bundletype = "HG10UN"
521 bundletype = "HG10UN"
522 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
522 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
523 bundletype)
523 bundletype)
524 # keep written bundle?
524 # keep written bundle?
525 if bundlename:
525 if bundlename:
526 bundle = None
526 bundle = None
527 if not localrepo:
527 if not localrepo:
528 # use the created uncompressed bundlerepo
528 # use the created uncompressed bundlerepo
529 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
529 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
530 fname)
530 fname)
531 # this repo contains local and other now, so filter out local again
531 # this repo contains local and other now, so filter out local again
532 common = repo.heads()
532 common = repo.heads()
533 if localrepo:
533 if localrepo:
534 # Part of common may be remotely filtered
534 # Part of common may be remotely filtered
535 # So use an unfiltered version
535 # So use an unfiltered version
536 # The discovery process probably need cleanup to avoid that
536 # The discovery process probably need cleanup to avoid that
537 localrepo = localrepo.unfiltered()
537 localrepo = localrepo.unfiltered()
538
538
539 csets = localrepo.changelog.findmissing(common, rheads)
539 csets = localrepo.changelog.findmissing(common, rheads)
540
540
541 if bundlerepo:
541 if bundlerepo:
542 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
542 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
543 remotephases = other.listkeys('phases')
543 remotephases = other.listkeys('phases')
544
544
545 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
545 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
546 pullop.trmanager = bundletransactionmanager()
546 pullop.trmanager = bundletransactionmanager()
547 exchange._pullapplyphases(pullop, remotephases)
547 exchange._pullapplyphases(pullop, remotephases)
548
548
549 def cleanup():
549 def cleanup():
550 if bundlerepo:
550 if bundlerepo:
551 bundlerepo.close()
551 bundlerepo.close()
552 if bundle:
552 if bundle:
553 os.unlink(bundle)
553 os.unlink(bundle)
554 other.close()
554 other.close()
555
555
556 return (localrepo, csets, cleanup)
556 return (localrepo, csets, cleanup)
@@ -1,198 +1,197
1 # Create server
1 # Create server
2 $ hg init server
2 $ hg init server
3 $ cd server
3 $ cd server
4 $ cat >> .hg/hgrc << EOF
4 $ cat >> .hg/hgrc << EOF
5 > [extensions]
5 > [extensions]
6 > extension=$TESTDIR/flagprocessorext.py
6 > extension=$TESTDIR/flagprocessorext.py
7 > EOF
7 > EOF
8 $ cd ../
8 $ cd ../
9
9
10 # Clone server and enable extensions
10 # Clone server and enable extensions
11 $ hg clone -q server client
11 $ hg clone -q server client
12 $ cd client
12 $ cd client
13 $ cat >> .hg/hgrc << EOF
13 $ cat >> .hg/hgrc << EOF
14 > [extensions]
14 > [extensions]
15 > extension=$TESTDIR/flagprocessorext.py
15 > extension=$TESTDIR/flagprocessorext.py
16 > EOF
16 > EOF
17
17
18 # Commit file that will trigger the noop extension
18 # Commit file that will trigger the noop extension
19 $ echo '[NOOP]' > noop
19 $ echo '[NOOP]' > noop
20 $ hg commit -Aqm "noop"
20 $ hg commit -Aqm "noop"
21
21
22 # Commit file that will trigger the base64 extension
22 # Commit file that will trigger the base64 extension
23 $ echo '[BASE64]' > base64
23 $ echo '[BASE64]' > base64
24 $ hg commit -Aqm 'base64'
24 $ hg commit -Aqm 'base64'
25
25
26 # Commit file that will trigger the gzip extension
26 # Commit file that will trigger the gzip extension
27 $ echo '[GZIP]' > gzip
27 $ echo '[GZIP]' > gzip
28 $ hg commit -Aqm 'gzip'
28 $ hg commit -Aqm 'gzip'
29
29
30 # Commit file that will trigger noop and base64
30 # Commit file that will trigger noop and base64
31 $ echo '[NOOP][BASE64]' > noop-base64
31 $ echo '[NOOP][BASE64]' > noop-base64
32 $ hg commit -Aqm 'noop+base64'
32 $ hg commit -Aqm 'noop+base64'
33
33
34 # Commit file that will trigger noop and gzip
34 # Commit file that will trigger noop and gzip
35 $ echo '[NOOP][GZIP]' > noop-gzip
35 $ echo '[NOOP][GZIP]' > noop-gzip
36 $ hg commit -Aqm 'noop+gzip'
36 $ hg commit -Aqm 'noop+gzip'
37
37
38 # Commit file that will trigger base64 and gzip
38 # Commit file that will trigger base64 and gzip
39 $ echo '[BASE64][GZIP]' > base64-gzip
39 $ echo '[BASE64][GZIP]' > base64-gzip
40 $ hg commit -Aqm 'base64+gzip'
40 $ hg commit -Aqm 'base64+gzip'
41
41
42 # Commit file that will trigger base64, gzip and noop
42 # Commit file that will trigger base64, gzip and noop
43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
44 $ hg commit -Aqm 'base64+gzip+noop'
44 $ hg commit -Aqm 'base64+gzip+noop'
45
45
46 # TEST: ensure the revision data is consistent
46 # TEST: ensure the revision data is consistent
47 $ hg cat noop
47 $ hg cat noop
48 [NOOP]
48 [NOOP]
49 $ hg debugdata noop 0
49 $ hg debugdata noop 0
50 [NOOP]
50 [NOOP]
51
51
52 $ hg cat -r . base64
52 $ hg cat -r . base64
53 [BASE64]
53 [BASE64]
54 $ hg debugdata base64 0
54 $ hg debugdata base64 0
55 W0JBU0U2NF0K (no-eol)
55 W0JBU0U2NF0K (no-eol)
56
56
57 $ hg cat -r . gzip
57 $ hg cat -r . gzip
58 [GZIP]
58 [GZIP]
59 $ hg debugdata gzip 0
59 $ hg debugdata gzip 0
60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
61
61
62 $ hg cat -r . noop-base64
62 $ hg cat -r . noop-base64
63 [NOOP][BASE64]
63 [NOOP][BASE64]
64 $ hg debugdata noop-base64 0
64 $ hg debugdata noop-base64 0
65 W05PT1BdW0JBU0U2NF0K (no-eol)
65 W05PT1BdW0JBU0U2NF0K (no-eol)
66
66
67 $ hg cat -r . noop-gzip
67 $ hg cat -r . noop-gzip
68 [NOOP][GZIP]
68 [NOOP][GZIP]
69 $ hg debugdata noop-gzip 0
69 $ hg debugdata noop-gzip 0
70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
71
71
72 $ hg cat -r . base64-gzip
72 $ hg cat -r . base64-gzip
73 [BASE64][GZIP]
73 [BASE64][GZIP]
74 $ hg debugdata base64-gzip 0
74 $ hg debugdata base64-gzip 0
75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
76
76
77 $ hg cat -r . base64-gzip-noop
77 $ hg cat -r . base64-gzip-noop
78 [BASE64][GZIP][NOOP]
78 [BASE64][GZIP][NOOP]
79 $ hg debugdata base64-gzip-noop 0
79 $ hg debugdata base64-gzip-noop 0
80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
81
81
82 # Push to the server
82 # Push to the server
83 $ hg push
83 $ hg push
84 pushing to $TESTTMP/server (glob)
84 pushing to $TESTTMP/server (glob)
85 searching for changes
85 searching for changes
86 adding changesets
86 adding changesets
87 adding manifests
87 adding manifests
88 adding file changes
88 adding file changes
89 added 7 changesets with 7 changes to 7 files
89 added 7 changesets with 7 changes to 7 files
90
90
91 # Initialize new client (not cloning) and setup extension
91 # Initialize new client (not cloning) and setup extension
92 $ cd ..
92 $ cd ..
93 $ hg init client2
93 $ hg init client2
94 $ cd client2
94 $ cd client2
95 $ cat >> .hg/hgrc << EOF
95 $ cat >> .hg/hgrc << EOF
96 > [paths]
96 > [paths]
97 > default = $TESTTMP/server
97 > default = $TESTTMP/server
98 > [extensions]
98 > [extensions]
99 > extension=$TESTDIR/flagprocessorext.py
99 > extension=$TESTDIR/flagprocessorext.py
100 > EOF
100 > EOF
101
101
102 # Pull from server and update to latest revision
102 # Pull from server and update to latest revision
103 $ hg pull default
103 $ hg pull default
104 pulling from $TESTTMP/server (glob)
104 pulling from $TESTTMP/server (glob)
105 requesting all changes
105 requesting all changes
106 adding changesets
106 adding changesets
107 adding manifests
107 adding manifests
108 adding file changes
108 adding file changes
109 added 7 changesets with 7 changes to 7 files
109 added 7 changesets with 7 changes to 7 files
110 (run 'hg update' to get a working copy)
110 (run 'hg update' to get a working copy)
111 $ hg update
111 $ hg update
112 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
113
113
114 # TEST: ensure the revision data is consistent
114 # TEST: ensure the revision data is consistent
115 $ hg cat noop
115 $ hg cat noop
116 [NOOP]
116 [NOOP]
117 $ hg debugdata noop 0
117 $ hg debugdata noop 0
118 [NOOP]
118 [NOOP]
119
119
120 $ hg cat -r . base64
120 $ hg cat -r . base64
121 [BASE64]
121 [BASE64]
122 $ hg debugdata base64 0
122 $ hg debugdata base64 0
123 W0JBU0U2NF0K (no-eol)
123 W0JBU0U2NF0K (no-eol)
124
124
125 $ hg cat -r . gzip
125 $ hg cat -r . gzip
126 [GZIP]
126 [GZIP]
127 $ hg debugdata gzip 0
127 $ hg debugdata gzip 0
128 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
128 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
129
129
130 $ hg cat -r . noop-base64
130 $ hg cat -r . noop-base64
131 [NOOP][BASE64]
131 [NOOP][BASE64]
132 $ hg debugdata noop-base64 0
132 $ hg debugdata noop-base64 0
133 W05PT1BdW0JBU0U2NF0K (no-eol)
133 W05PT1BdW0JBU0U2NF0K (no-eol)
134
134
135 $ hg cat -r . noop-gzip
135 $ hg cat -r . noop-gzip
136 [NOOP][GZIP]
136 [NOOP][GZIP]
137 $ hg debugdata noop-gzip 0
137 $ hg debugdata noop-gzip 0
138 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
138 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
139
139
140 $ hg cat -r . base64-gzip
140 $ hg cat -r . base64-gzip
141 [BASE64][GZIP]
141 [BASE64][GZIP]
142 $ hg debugdata base64-gzip 0
142 $ hg debugdata base64-gzip 0
143 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
143 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
144
144
145 $ hg cat -r . base64-gzip-noop
145 $ hg cat -r . base64-gzip-noop
146 [BASE64][GZIP][NOOP]
146 [BASE64][GZIP][NOOP]
147 $ hg debugdata base64-gzip-noop 0
147 $ hg debugdata base64-gzip-noop 0
148 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
148 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
149
149
150 # TEST: ensure a missing processor is handled
150 # TEST: ensure a missing processor is handled
151 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
151 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
152 $ hg commit -Aqm 'fail+base64+gzip+noop'
152 $ hg commit -Aqm 'fail+base64+gzip+noop'
153 abort: missing processor for flag '0x1'!
153 abort: missing processor for flag '0x1'!
154 [255]
154 [255]
155
155
156 # TEST: ensure we cannot register several flag processors on the same flag
156 # TEST: ensure we cannot register several flag processors on the same flag
157 $ cat >> .hg/hgrc << EOF
157 $ cat >> .hg/hgrc << EOF
158 > [extensions]
158 > [extensions]
159 > extension=$TESTDIR/flagprocessorext.py
159 > extension=$TESTDIR/flagprocessorext.py
160 > duplicate=$TESTDIR/flagprocessorext.py
160 > duplicate=$TESTDIR/flagprocessorext.py
161 > EOF
161 > EOF
162 $ echo 'this should fail' > file
162 $ echo 'this should fail' > file
163 $ hg commit -Aqm 'add file'
163 $ hg commit -Aqm 'add file'
164 abort: cannot register multiple processors on flag '0x8'.
164 abort: cannot register multiple processors on flag '0x8'.
165 [255]
165 [255]
166
166
167 $ cd ..
167 $ cd ..
168
168
169 # TEST: bundle repo
169 # TEST: bundle repo
170 $ hg init bundletest
170 $ hg init bundletest
171 $ cd bundletest
171 $ cd bundletest
172
172
173 $ cat >> .hg/hgrc << EOF
173 $ cat >> .hg/hgrc << EOF
174 > [extensions]
174 > [extensions]
175 > flagprocessor=$TESTDIR/flagprocessorext.py
175 > flagprocessor=$TESTDIR/flagprocessorext.py
176 > EOF
176 > EOF
177
177
178 $ for i in 0 single two three 4; do
178 $ for i in 0 single two three 4; do
179 > echo '[BASE64]a-bit-longer-'$i > base64
179 > echo '[BASE64]a-bit-longer-'$i > base64
180 > hg commit -m base64-$i -A base64
180 > hg commit -m base64-$i -A base64
181 > done
181 > done
182
182
183 $ hg update 2 -q
183 $ hg update 2 -q
184 $ echo '[BASE64]a-bit-longer-branching' > base64
184 $ echo '[BASE64]a-bit-longer-branching' > base64
185 $ hg commit -q -m branching
185 $ hg commit -q -m branching
186
186
187 $ hg bundle --base 1 bundle.hg
187 $ hg bundle --base 1 bundle.hg
188 4 changesets found
188 4 changesets found
189 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
189 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
190 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
190 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
191 Traceback (most recent call last):
191 abort: integrity check failed on data/base64.i:2!
192 mercurial.mpatch.mpatchError: invalid patch
193
192
194 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q 2>&1 | egrep -v '^(\*\*| )'
193 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q 2>&1 | egrep -v '^(\*\*| )'
195 Traceback (most recent call last):
194 Traceback (most recent call last):
196 TypeError: Incorrect padding
195 mercurial.mpatch.mpatchError: invalid patch
197 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
196 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
198 abort: repository bundle-again.hg not found!
197 abort: repository bundle-again.hg not found!
General Comments 0
You need to be logged in to leave comments. Login now