##// END OF EJS Templates
bundlerepo: use raw revision in revdiff()...
Jun Wu -
r31837:37e79391 default
parent child Browse files
Show More
@@ -1,557 +1,558 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18 import tempfile
18 import tempfile
19
19
20 from .i18n import _
20 from .i18n import _
21 from .node import nullid
21 from .node import nullid
22
22
23 from . import (
23 from . import (
24 bundle2,
24 bundle2,
25 changegroup,
25 changegroup,
26 changelog,
26 changelog,
27 cmdutil,
27 cmdutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 filelog,
31 filelog,
32 localrepo,
32 localrepo,
33 manifest,
33 manifest,
34 mdiff,
34 mdiff,
35 node as nodemod,
35 node as nodemod,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 revlog,
39 revlog,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43
43
44 class bundlerevlog(revlog.revlog):
44 class bundlerevlog(revlog.revlog):
45 def __init__(self, opener, indexfile, bundle, linkmapper):
45 def __init__(self, opener, indexfile, bundle, linkmapper):
46 # How it works:
46 # How it works:
47 # To retrieve a revision, we need to know the offset of the revision in
47 # To retrieve a revision, we need to know the offset of the revision in
48 # the bundle (an unbundle object). We store this offset in the index
48 # the bundle (an unbundle object). We store this offset in the index
49 # (start). The base of the delta is stored in the base field.
49 # (start). The base of the delta is stored in the base field.
50 #
50 #
51 # To differentiate a rev in the bundle from a rev in the revlog, we
51 # To differentiate a rev in the bundle from a rev in the revlog, we
52 # check revision against repotiprev.
52 # check revision against repotiprev.
53 opener = vfsmod.readonlyvfs(opener)
53 opener = vfsmod.readonlyvfs(opener)
54 revlog.revlog.__init__(self, opener, indexfile)
54 revlog.revlog.__init__(self, opener, indexfile)
55 self.bundle = bundle
55 self.bundle = bundle
56 n = len(self)
56 n = len(self)
57 self.repotiprev = n - 1
57 self.repotiprev = n - 1
58 chain = None
58 chain = None
59 self.bundlerevs = set() # used by 'bundle()' revset expression
59 self.bundlerevs = set() # used by 'bundle()' revset expression
60 getchunk = lambda: bundle.deltachunk(chain)
60 getchunk = lambda: bundle.deltachunk(chain)
61 for chunkdata in iter(getchunk, {}):
61 for chunkdata in iter(getchunk, {}):
62 node = chunkdata['node']
62 node = chunkdata['node']
63 p1 = chunkdata['p1']
63 p1 = chunkdata['p1']
64 p2 = chunkdata['p2']
64 p2 = chunkdata['p2']
65 cs = chunkdata['cs']
65 cs = chunkdata['cs']
66 deltabase = chunkdata['deltabase']
66 deltabase = chunkdata['deltabase']
67 delta = chunkdata['delta']
67 delta = chunkdata['delta']
68 flags = chunkdata['flags']
68 flags = chunkdata['flags']
69
69
70 size = len(delta)
70 size = len(delta)
71 start = bundle.tell() - size
71 start = bundle.tell() - size
72
72
73 link = linkmapper(cs)
73 link = linkmapper(cs)
74 if node in self.nodemap:
74 if node in self.nodemap:
75 # this can happen if two branches make the same change
75 # this can happen if two branches make the same change
76 chain = node
76 chain = node
77 self.bundlerevs.add(self.nodemap[node])
77 self.bundlerevs.add(self.nodemap[node])
78 continue
78 continue
79
79
80 for p in (p1, p2):
80 for p in (p1, p2):
81 if p not in self.nodemap:
81 if p not in self.nodemap:
82 raise error.LookupError(p, self.indexfile,
82 raise error.LookupError(p, self.indexfile,
83 _("unknown parent"))
83 _("unknown parent"))
84
84
85 if deltabase not in self.nodemap:
85 if deltabase not in self.nodemap:
86 raise LookupError(deltabase, self.indexfile,
86 raise LookupError(deltabase, self.indexfile,
87 _('unknown delta base'))
87 _('unknown delta base'))
88
88
89 baserev = self.rev(deltabase)
89 baserev = self.rev(deltabase)
90 # start, size, full unc. size, base (unused), link, p1, p2, node
90 # start, size, full unc. size, base (unused), link, p1, p2, node
91 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
91 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
92 self.rev(p1), self.rev(p2), node)
92 self.rev(p1), self.rev(p2), node)
93 self.index.insert(-1, e)
93 self.index.insert(-1, e)
94 self.nodemap[node] = n
94 self.nodemap[node] = n
95 self.bundlerevs.add(n)
95 self.bundlerevs.add(n)
96 chain = node
96 chain = node
97 n += 1
97 n += 1
98
98
99 def _chunk(self, rev):
99 def _chunk(self, rev):
100 # Warning: in case of bundle, the diff is against what we stored as
100 # Warning: in case of bundle, the diff is against what we stored as
101 # delta base, not against rev - 1
101 # delta base, not against rev - 1
102 # XXX: could use some caching
102 # XXX: could use some caching
103 if rev <= self.repotiprev:
103 if rev <= self.repotiprev:
104 return revlog.revlog._chunk(self, rev)
104 return revlog.revlog._chunk(self, rev)
105 self.bundle.seek(self.start(rev))
105 self.bundle.seek(self.start(rev))
106 return self.bundle.read(self.length(rev))
106 return self.bundle.read(self.length(rev))
107
107
108 def revdiff(self, rev1, rev2):
108 def revdiff(self, rev1, rev2):
109 """return or calculate a delta between two revisions"""
109 """return or calculate a delta between two revisions"""
110 if rev1 > self.repotiprev and rev2 > self.repotiprev:
110 if rev1 > self.repotiprev and rev2 > self.repotiprev:
111 # hot path for bundle
111 # hot path for bundle
112 revb = self.index[rev2][3]
112 revb = self.index[rev2][3]
113 if revb == rev1:
113 if revb == rev1:
114 return self._chunk(rev2)
114 return self._chunk(rev2)
115 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
115 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
116 return revlog.revlog.revdiff(self, rev1, rev2)
116 return revlog.revlog.revdiff(self, rev1, rev2)
117
117
118 return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
118 return mdiff.textdiff(self.revision(rev1, raw=True),
119 self.revision(rev2, raw=True))
119
120
120 def revision(self, nodeorrev, raw=False):
121 def revision(self, nodeorrev, raw=False):
121 """return an uncompressed revision of a given node or revision
122 """return an uncompressed revision of a given node or revision
122 number.
123 number.
123 """
124 """
124 if isinstance(nodeorrev, int):
125 if isinstance(nodeorrev, int):
125 rev = nodeorrev
126 rev = nodeorrev
126 node = self.node(rev)
127 node = self.node(rev)
127 else:
128 else:
128 node = nodeorrev
129 node = nodeorrev
129 rev = self.rev(node)
130 rev = self.rev(node)
130
131
131 if node == nullid:
132 if node == nullid:
132 return ""
133 return ""
133
134
134 rawtext = None
135 rawtext = None
135 chain = []
136 chain = []
136 iterrev = rev
137 iterrev = rev
137 # reconstruct the revision if it is from a changegroup
138 # reconstruct the revision if it is from a changegroup
138 while iterrev > self.repotiprev:
139 while iterrev > self.repotiprev:
139 if self._cache and self._cache[1] == iterrev:
140 if self._cache and self._cache[1] == iterrev:
140 rawtext = self._cache[2]
141 rawtext = self._cache[2]
141 break
142 break
142 chain.append(iterrev)
143 chain.append(iterrev)
143 iterrev = self.index[iterrev][3]
144 iterrev = self.index[iterrev][3]
144 if rawtext is None:
145 if rawtext is None:
145 rawtext = self.baserevision(iterrev)
146 rawtext = self.baserevision(iterrev)
146
147
147 while chain:
148 while chain:
148 delta = self._chunk(chain.pop())
149 delta = self._chunk(chain.pop())
149 rawtext = mdiff.patches(rawtext, [delta])
150 rawtext = mdiff.patches(rawtext, [delta])
150
151
151 text, validatehash = self._processflags(rawtext, self.flags(rev),
152 text, validatehash = self._processflags(rawtext, self.flags(rev),
152 'read', raw=raw)
153 'read', raw=raw)
153 if validatehash:
154 if validatehash:
154 self.checkhash(text, node, rev=rev)
155 self.checkhash(text, node, rev=rev)
155 self._cache = (node, rev, rawtext)
156 self._cache = (node, rev, rawtext)
156 return text
157 return text
157
158
158 def baserevision(self, nodeorrev):
159 def baserevision(self, nodeorrev):
159 # Revlog subclasses may override 'revision' method to modify format of
160 # Revlog subclasses may override 'revision' method to modify format of
160 # content retrieved from revlog. To use bundlerevlog with such class one
161 # content retrieved from revlog. To use bundlerevlog with such class one
161 # needs to override 'baserevision' and make more specific call here.
162 # needs to override 'baserevision' and make more specific call here.
162 return revlog.revlog.revision(self, nodeorrev, raw=True)
163 return revlog.revlog.revision(self, nodeorrev, raw=True)
163
164
164 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
165 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
165 raise NotImplementedError
166 raise NotImplementedError
166 def addgroup(self, revs, linkmapper, transaction):
167 def addgroup(self, revs, linkmapper, transaction):
167 raise NotImplementedError
168 raise NotImplementedError
168 def strip(self, rev, minlink):
169 def strip(self, rev, minlink):
169 raise NotImplementedError
170 raise NotImplementedError
170 def checksize(self):
171 def checksize(self):
171 raise NotImplementedError
172 raise NotImplementedError
172
173
173 class bundlechangelog(bundlerevlog, changelog.changelog):
174 class bundlechangelog(bundlerevlog, changelog.changelog):
174 def __init__(self, opener, bundle):
175 def __init__(self, opener, bundle):
175 changelog.changelog.__init__(self, opener)
176 changelog.changelog.__init__(self, opener)
176 linkmapper = lambda x: x
177 linkmapper = lambda x: x
177 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
178 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
178 linkmapper)
179 linkmapper)
179
180
180 def baserevision(self, nodeorrev):
181 def baserevision(self, nodeorrev):
181 # Although changelog doesn't override 'revision' method, some extensions
182 # Although changelog doesn't override 'revision' method, some extensions
182 # may replace this class with another that does. Same story with
183 # may replace this class with another that does. Same story with
183 # manifest and filelog classes.
184 # manifest and filelog classes.
184
185
185 # This bypasses filtering on changelog.node() and rev() because we need
186 # This bypasses filtering on changelog.node() and rev() because we need
186 # revision text of the bundle base even if it is hidden.
187 # revision text of the bundle base even if it is hidden.
187 oldfilter = self.filteredrevs
188 oldfilter = self.filteredrevs
188 try:
189 try:
189 self.filteredrevs = ()
190 self.filteredrevs = ()
190 return changelog.changelog.revision(self, nodeorrev, raw=True)
191 return changelog.changelog.revision(self, nodeorrev, raw=True)
191 finally:
192 finally:
192 self.filteredrevs = oldfilter
193 self.filteredrevs = oldfilter
193
194
194 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
195 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
195 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
196 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
196 manifest.manifestrevlog.__init__(self, opener, dir=dir)
197 manifest.manifestrevlog.__init__(self, opener, dir=dir)
197 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
198 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
198 linkmapper)
199 linkmapper)
199 if dirlogstarts is None:
200 if dirlogstarts is None:
200 dirlogstarts = {}
201 dirlogstarts = {}
201 if self.bundle.version == "03":
202 if self.bundle.version == "03":
202 dirlogstarts = _getfilestarts(self.bundle)
203 dirlogstarts = _getfilestarts(self.bundle)
203 self._dirlogstarts = dirlogstarts
204 self._dirlogstarts = dirlogstarts
204 self._linkmapper = linkmapper
205 self._linkmapper = linkmapper
205
206
206 def baserevision(self, nodeorrev):
207 def baserevision(self, nodeorrev):
207 node = nodeorrev
208 node = nodeorrev
208 if isinstance(node, int):
209 if isinstance(node, int):
209 node = self.node(node)
210 node = self.node(node)
210
211
211 if node in self.fulltextcache:
212 if node in self.fulltextcache:
212 result = '%s' % self.fulltextcache[node]
213 result = '%s' % self.fulltextcache[node]
213 else:
214 else:
214 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
215 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
215 return result
216 return result
216
217
217 def dirlog(self, d):
218 def dirlog(self, d):
218 if d in self._dirlogstarts:
219 if d in self._dirlogstarts:
219 self.bundle.seek(self._dirlogstarts[d])
220 self.bundle.seek(self._dirlogstarts[d])
220 return bundlemanifest(
221 return bundlemanifest(
221 self.opener, self.bundle, self._linkmapper,
222 self.opener, self.bundle, self._linkmapper,
222 self._dirlogstarts, dir=d)
223 self._dirlogstarts, dir=d)
223 return super(bundlemanifest, self).dirlog(d)
224 return super(bundlemanifest, self).dirlog(d)
224
225
225 class bundlefilelog(bundlerevlog, filelog.filelog):
226 class bundlefilelog(bundlerevlog, filelog.filelog):
226 def __init__(self, opener, path, bundle, linkmapper):
227 def __init__(self, opener, path, bundle, linkmapper):
227 filelog.filelog.__init__(self, opener, path)
228 filelog.filelog.__init__(self, opener, path)
228 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
229 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
229 linkmapper)
230 linkmapper)
230
231
231 def baserevision(self, nodeorrev):
232 def baserevision(self, nodeorrev):
232 return filelog.filelog.revision(self, nodeorrev, raw=True)
233 return filelog.filelog.revision(self, nodeorrev, raw=True)
233
234
234 class bundlepeer(localrepo.localpeer):
235 class bundlepeer(localrepo.localpeer):
235 def canpush(self):
236 def canpush(self):
236 return False
237 return False
237
238
238 class bundlephasecache(phases.phasecache):
239 class bundlephasecache(phases.phasecache):
239 def __init__(self, *args, **kwargs):
240 def __init__(self, *args, **kwargs):
240 super(bundlephasecache, self).__init__(*args, **kwargs)
241 super(bundlephasecache, self).__init__(*args, **kwargs)
241 if util.safehasattr(self, 'opener'):
242 if util.safehasattr(self, 'opener'):
242 self.opener = vfsmod.readonlyvfs(self.opener)
243 self.opener = vfsmod.readonlyvfs(self.opener)
243
244
244 def write(self):
245 def write(self):
245 raise NotImplementedError
246 raise NotImplementedError
246
247
247 def _write(self, fp):
248 def _write(self, fp):
248 raise NotImplementedError
249 raise NotImplementedError
249
250
250 def _updateroots(self, phase, newroots, tr):
251 def _updateroots(self, phase, newroots, tr):
251 self.phaseroots[phase] = newroots
252 self.phaseroots[phase] = newroots
252 self.invalidate()
253 self.invalidate()
253 self.dirty = True
254 self.dirty = True
254
255
255 def _getfilestarts(bundle):
256 def _getfilestarts(bundle):
256 bundlefilespos = {}
257 bundlefilespos = {}
257 for chunkdata in iter(bundle.filelogheader, {}):
258 for chunkdata in iter(bundle.filelogheader, {}):
258 fname = chunkdata['filename']
259 fname = chunkdata['filename']
259 bundlefilespos[fname] = bundle.tell()
260 bundlefilespos[fname] = bundle.tell()
260 for chunk in iter(lambda: bundle.deltachunk(None), {}):
261 for chunk in iter(lambda: bundle.deltachunk(None), {}):
261 pass
262 pass
262 return bundlefilespos
263 return bundlefilespos
263
264
264 class bundlerepository(localrepo.localrepository):
265 class bundlerepository(localrepo.localrepository):
265 def __init__(self, ui, path, bundlename):
266 def __init__(self, ui, path, bundlename):
266 def _writetempbundle(read, suffix, header=''):
267 def _writetempbundle(read, suffix, header=''):
267 """Write a temporary file to disk
268 """Write a temporary file to disk
268
269
269 This is closure because we need to make sure this tracked by
270 This is closure because we need to make sure this tracked by
270 self.tempfile for cleanup purposes."""
271 self.tempfile for cleanup purposes."""
271 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
272 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
272 suffix=".hg10un")
273 suffix=".hg10un")
273 self.tempfile = temp
274 self.tempfile = temp
274
275
275 with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
276 with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
276 fptemp.write(header)
277 fptemp.write(header)
277 while True:
278 while True:
278 chunk = read(2**18)
279 chunk = read(2**18)
279 if not chunk:
280 if not chunk:
280 break
281 break
281 fptemp.write(chunk)
282 fptemp.write(chunk)
282
283
283 return self.vfs.open(self.tempfile, mode="rb")
284 return self.vfs.open(self.tempfile, mode="rb")
284 self._tempparent = None
285 self._tempparent = None
285 try:
286 try:
286 localrepo.localrepository.__init__(self, ui, path)
287 localrepo.localrepository.__init__(self, ui, path)
287 except error.RepoError:
288 except error.RepoError:
288 self._tempparent = tempfile.mkdtemp()
289 self._tempparent = tempfile.mkdtemp()
289 localrepo.instance(ui, self._tempparent, 1)
290 localrepo.instance(ui, self._tempparent, 1)
290 localrepo.localrepository.__init__(self, ui, self._tempparent)
291 localrepo.localrepository.__init__(self, ui, self._tempparent)
291 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
292 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
292
293
293 if path:
294 if path:
294 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
295 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
295 else:
296 else:
296 self._url = 'bundle:' + bundlename
297 self._url = 'bundle:' + bundlename
297
298
298 self.tempfile = None
299 self.tempfile = None
299 f = util.posixfile(bundlename, "rb")
300 f = util.posixfile(bundlename, "rb")
300 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
301 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
301
302
302 if isinstance(self.bundle, bundle2.unbundle20):
303 if isinstance(self.bundle, bundle2.unbundle20):
303 cgstream = None
304 cgstream = None
304 for part in self.bundle.iterparts():
305 for part in self.bundle.iterparts():
305 if part.type == 'changegroup':
306 if part.type == 'changegroup':
306 if cgstream is not None:
307 if cgstream is not None:
307 raise NotImplementedError("can't process "
308 raise NotImplementedError("can't process "
308 "multiple changegroups")
309 "multiple changegroups")
309 cgstream = part
310 cgstream = part
310 version = part.params.get('version', '01')
311 version = part.params.get('version', '01')
311 legalcgvers = changegroup.supportedincomingversions(self)
312 legalcgvers = changegroup.supportedincomingversions(self)
312 if version not in legalcgvers:
313 if version not in legalcgvers:
313 msg = _('Unsupported changegroup version: %s')
314 msg = _('Unsupported changegroup version: %s')
314 raise error.Abort(msg % version)
315 raise error.Abort(msg % version)
315 if self.bundle.compressed():
316 if self.bundle.compressed():
316 cgstream = _writetempbundle(part.read,
317 cgstream = _writetempbundle(part.read,
317 ".cg%sun" % version)
318 ".cg%sun" % version)
318
319
319 if cgstream is None:
320 if cgstream is None:
320 raise error.Abort(_('No changegroups found'))
321 raise error.Abort(_('No changegroups found'))
321 cgstream.seek(0)
322 cgstream.seek(0)
322
323
323 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
324 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
324
325
325 elif self.bundle.compressed():
326 elif self.bundle.compressed():
326 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
327 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
327 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
328 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
328 bundlename,
329 bundlename,
329 self.vfs)
330 self.vfs)
330
331
331 # dict with the mapping 'filename' -> position in the bundle
332 # dict with the mapping 'filename' -> position in the bundle
332 self.bundlefilespos = {}
333 self.bundlefilespos = {}
333
334
334 self.firstnewrev = self.changelog.repotiprev + 1
335 self.firstnewrev = self.changelog.repotiprev + 1
335 phases.retractboundary(self, None, phases.draft,
336 phases.retractboundary(self, None, phases.draft,
336 [ctx.node() for ctx in self[self.firstnewrev:]])
337 [ctx.node() for ctx in self[self.firstnewrev:]])
337
338
338 @localrepo.unfilteredpropertycache
339 @localrepo.unfilteredpropertycache
339 def _phasecache(self):
340 def _phasecache(self):
340 return bundlephasecache(self, self._phasedefaults)
341 return bundlephasecache(self, self._phasedefaults)
341
342
342 @localrepo.unfilteredpropertycache
343 @localrepo.unfilteredpropertycache
343 def changelog(self):
344 def changelog(self):
344 # consume the header if it exists
345 # consume the header if it exists
345 self.bundle.changelogheader()
346 self.bundle.changelogheader()
346 c = bundlechangelog(self.svfs, self.bundle)
347 c = bundlechangelog(self.svfs, self.bundle)
347 self.manstart = self.bundle.tell()
348 self.manstart = self.bundle.tell()
348 return c
349 return c
349
350
350 def _constructmanifest(self):
351 def _constructmanifest(self):
351 self.bundle.seek(self.manstart)
352 self.bundle.seek(self.manstart)
352 # consume the header if it exists
353 # consume the header if it exists
353 self.bundle.manifestheader()
354 self.bundle.manifestheader()
354 linkmapper = self.unfiltered().changelog.rev
355 linkmapper = self.unfiltered().changelog.rev
355 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
356 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
356 self.filestart = self.bundle.tell()
357 self.filestart = self.bundle.tell()
357 return m
358 return m
358
359
359 @localrepo.unfilteredpropertycache
360 @localrepo.unfilteredpropertycache
360 def manstart(self):
361 def manstart(self):
361 self.changelog
362 self.changelog
362 return self.manstart
363 return self.manstart
363
364
364 @localrepo.unfilteredpropertycache
365 @localrepo.unfilteredpropertycache
365 def filestart(self):
366 def filestart(self):
366 self.manifestlog
367 self.manifestlog
367 return self.filestart
368 return self.filestart
368
369
369 def url(self):
370 def url(self):
370 return self._url
371 return self._url
371
372
372 def file(self, f):
373 def file(self, f):
373 if not self.bundlefilespos:
374 if not self.bundlefilespos:
374 self.bundle.seek(self.filestart)
375 self.bundle.seek(self.filestart)
375 self.bundlefilespos = _getfilestarts(self.bundle)
376 self.bundlefilespos = _getfilestarts(self.bundle)
376
377
377 if f in self.bundlefilespos:
378 if f in self.bundlefilespos:
378 self.bundle.seek(self.bundlefilespos[f])
379 self.bundle.seek(self.bundlefilespos[f])
379 linkmapper = self.unfiltered().changelog.rev
380 linkmapper = self.unfiltered().changelog.rev
380 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
381 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
381 else:
382 else:
382 return filelog.filelog(self.svfs, f)
383 return filelog.filelog(self.svfs, f)
383
384
384 def close(self):
385 def close(self):
385 """Close assigned bundle file immediately."""
386 """Close assigned bundle file immediately."""
386 self.bundlefile.close()
387 self.bundlefile.close()
387 if self.tempfile is not None:
388 if self.tempfile is not None:
388 self.vfs.unlink(self.tempfile)
389 self.vfs.unlink(self.tempfile)
389 if self._tempparent:
390 if self._tempparent:
390 shutil.rmtree(self._tempparent, True)
391 shutil.rmtree(self._tempparent, True)
391
392
392 def cancopy(self):
393 def cancopy(self):
393 return False
394 return False
394
395
395 def peer(self):
396 def peer(self):
396 return bundlepeer(self)
397 return bundlepeer(self)
397
398
398 def getcwd(self):
399 def getcwd(self):
399 return pycompat.getcwd() # always outside the repo
400 return pycompat.getcwd() # always outside the repo
400
401
401 # Check if parents exist in localrepo before setting
402 # Check if parents exist in localrepo before setting
402 def setparents(self, p1, p2=nullid):
403 def setparents(self, p1, p2=nullid):
403 p1rev = self.changelog.rev(p1)
404 p1rev = self.changelog.rev(p1)
404 p2rev = self.changelog.rev(p2)
405 p2rev = self.changelog.rev(p2)
405 msg = _("setting parent to node %s that only exists in the bundle\n")
406 msg = _("setting parent to node %s that only exists in the bundle\n")
406 if self.changelog.repotiprev < p1rev:
407 if self.changelog.repotiprev < p1rev:
407 self.ui.warn(msg % nodemod.hex(p1))
408 self.ui.warn(msg % nodemod.hex(p1))
408 if self.changelog.repotiprev < p2rev:
409 if self.changelog.repotiprev < p2rev:
409 self.ui.warn(msg % nodemod.hex(p2))
410 self.ui.warn(msg % nodemod.hex(p2))
410 return super(bundlerepository, self).setparents(p1, p2)
411 return super(bundlerepository, self).setparents(p1, p2)
411
412
412 def instance(ui, path, create):
413 def instance(ui, path, create):
413 if create:
414 if create:
414 raise error.Abort(_('cannot create new bundle repository'))
415 raise error.Abort(_('cannot create new bundle repository'))
415 # internal config: bundle.mainreporoot
416 # internal config: bundle.mainreporoot
416 parentpath = ui.config("bundle", "mainreporoot", "")
417 parentpath = ui.config("bundle", "mainreporoot", "")
417 if not parentpath:
418 if not parentpath:
418 # try to find the correct path to the working directory repo
419 # try to find the correct path to the working directory repo
419 parentpath = cmdutil.findrepo(pycompat.getcwd())
420 parentpath = cmdutil.findrepo(pycompat.getcwd())
420 if parentpath is None:
421 if parentpath is None:
421 parentpath = ''
422 parentpath = ''
422 if parentpath:
423 if parentpath:
423 # Try to make the full path relative so we get a nice, short URL.
424 # Try to make the full path relative so we get a nice, short URL.
424 # In particular, we don't want temp dir names in test outputs.
425 # In particular, we don't want temp dir names in test outputs.
425 cwd = pycompat.getcwd()
426 cwd = pycompat.getcwd()
426 if parentpath == cwd:
427 if parentpath == cwd:
427 parentpath = ''
428 parentpath = ''
428 else:
429 else:
429 cwd = pathutil.normasprefix(cwd)
430 cwd = pathutil.normasprefix(cwd)
430 if parentpath.startswith(cwd):
431 if parentpath.startswith(cwd):
431 parentpath = parentpath[len(cwd):]
432 parentpath = parentpath[len(cwd):]
432 u = util.url(path)
433 u = util.url(path)
433 path = u.localpath()
434 path = u.localpath()
434 if u.scheme == 'bundle':
435 if u.scheme == 'bundle':
435 s = path.split("+", 1)
436 s = path.split("+", 1)
436 if len(s) == 1:
437 if len(s) == 1:
437 repopath, bundlename = parentpath, s[0]
438 repopath, bundlename = parentpath, s[0]
438 else:
439 else:
439 repopath, bundlename = s
440 repopath, bundlename = s
440 else:
441 else:
441 repopath, bundlename = parentpath, path
442 repopath, bundlename = parentpath, path
442 return bundlerepository(ui, repopath, bundlename)
443 return bundlerepository(ui, repopath, bundlename)
443
444
444 class bundletransactionmanager(object):
445 class bundletransactionmanager(object):
445 def transaction(self):
446 def transaction(self):
446 return None
447 return None
447
448
448 def close(self):
449 def close(self):
449 raise NotImplementedError
450 raise NotImplementedError
450
451
451 def release(self):
452 def release(self):
452 raise NotImplementedError
453 raise NotImplementedError
453
454
454 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
455 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
455 force=False):
456 force=False):
456 '''obtains a bundle of changes incoming from other
457 '''obtains a bundle of changes incoming from other
457
458
458 "onlyheads" restricts the returned changes to those reachable from the
459 "onlyheads" restricts the returned changes to those reachable from the
459 specified heads.
460 specified heads.
460 "bundlename", if given, stores the bundle to this file path permanently;
461 "bundlename", if given, stores the bundle to this file path permanently;
461 otherwise it's stored to a temp file and gets deleted again when you call
462 otherwise it's stored to a temp file and gets deleted again when you call
462 the returned "cleanupfn".
463 the returned "cleanupfn".
463 "force" indicates whether to proceed on unrelated repos.
464 "force" indicates whether to proceed on unrelated repos.
464
465
465 Returns a tuple (local, csets, cleanupfn):
466 Returns a tuple (local, csets, cleanupfn):
466
467
467 "local" is a local repo from which to obtain the actual incoming
468 "local" is a local repo from which to obtain the actual incoming
468 changesets; it is a bundlerepo for the obtained bundle when the
469 changesets; it is a bundlerepo for the obtained bundle when the
469 original "other" is remote.
470 original "other" is remote.
470 "csets" lists the incoming changeset node ids.
471 "csets" lists the incoming changeset node ids.
471 "cleanupfn" must be called without arguments when you're done processing
472 "cleanupfn" must be called without arguments when you're done processing
472 the changes; it closes both the original "other" and the one returned
473 the changes; it closes both the original "other" and the one returned
473 here.
474 here.
474 '''
475 '''
475 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
476 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
476 force=force)
477 force=force)
477 common, incoming, rheads = tmp
478 common, incoming, rheads = tmp
478 if not incoming:
479 if not incoming:
479 try:
480 try:
480 if bundlename:
481 if bundlename:
481 os.unlink(bundlename)
482 os.unlink(bundlename)
482 except OSError:
483 except OSError:
483 pass
484 pass
484 return repo, [], other.close
485 return repo, [], other.close
485
486
486 commonset = set(common)
487 commonset = set(common)
487 rheads = [x for x in rheads if x not in commonset]
488 rheads = [x for x in rheads if x not in commonset]
488
489
489 bundle = None
490 bundle = None
490 bundlerepo = None
491 bundlerepo = None
491 localrepo = other.local()
492 localrepo = other.local()
492 if bundlename or not localrepo:
493 if bundlename or not localrepo:
493 # create a bundle (uncompressed if other repo is not local)
494 # create a bundle (uncompressed if other repo is not local)
494
495
495 # developer config: devel.legacy.exchange
496 # developer config: devel.legacy.exchange
496 legexc = ui.configlist('devel', 'legacy.exchange')
497 legexc = ui.configlist('devel', 'legacy.exchange')
497 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
498 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
498 canbundle2 = (not forcebundle1
499 canbundle2 = (not forcebundle1
499 and other.capable('getbundle')
500 and other.capable('getbundle')
500 and other.capable('bundle2'))
501 and other.capable('bundle2'))
501 if canbundle2:
502 if canbundle2:
502 kwargs = {}
503 kwargs = {}
503 kwargs['common'] = common
504 kwargs['common'] = common
504 kwargs['heads'] = rheads
505 kwargs['heads'] = rheads
505 kwargs['bundlecaps'] = exchange.caps20to10(repo)
506 kwargs['bundlecaps'] = exchange.caps20to10(repo)
506 kwargs['cg'] = True
507 kwargs['cg'] = True
507 b2 = other.getbundle('incoming', **kwargs)
508 b2 = other.getbundle('incoming', **kwargs)
508 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
509 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
509 bundlename)
510 bundlename)
510 else:
511 else:
511 if other.capable('getbundle'):
512 if other.capable('getbundle'):
512 cg = other.getbundle('incoming', common=common, heads=rheads)
513 cg = other.getbundle('incoming', common=common, heads=rheads)
513 elif onlyheads is None and not other.capable('changegroupsubset'):
514 elif onlyheads is None and not other.capable('changegroupsubset'):
514 # compat with older servers when pulling all remote heads
515 # compat with older servers when pulling all remote heads
515 cg = other.changegroup(incoming, "incoming")
516 cg = other.changegroup(incoming, "incoming")
516 rheads = None
517 rheads = None
517 else:
518 else:
518 cg = other.changegroupsubset(incoming, rheads, 'incoming')
519 cg = other.changegroupsubset(incoming, rheads, 'incoming')
519 if localrepo:
520 if localrepo:
520 bundletype = "HG10BZ"
521 bundletype = "HG10BZ"
521 else:
522 else:
522 bundletype = "HG10UN"
523 bundletype = "HG10UN"
523 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
524 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
524 bundletype)
525 bundletype)
525 # keep written bundle?
526 # keep written bundle?
526 if bundlename:
527 if bundlename:
527 bundle = None
528 bundle = None
528 if not localrepo:
529 if not localrepo:
529 # use the created uncompressed bundlerepo
530 # use the created uncompressed bundlerepo
530 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
531 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
531 fname)
532 fname)
532 # this repo contains local and other now, so filter out local again
533 # this repo contains local and other now, so filter out local again
533 common = repo.heads()
534 common = repo.heads()
534 if localrepo:
535 if localrepo:
535 # Part of common may be remotely filtered
536 # Part of common may be remotely filtered
536 # So use an unfiltered version
537 # So use an unfiltered version
537 # The discovery process probably need cleanup to avoid that
538 # The discovery process probably need cleanup to avoid that
538 localrepo = localrepo.unfiltered()
539 localrepo = localrepo.unfiltered()
539
540
540 csets = localrepo.changelog.findmissing(common, rheads)
541 csets = localrepo.changelog.findmissing(common, rheads)
541
542
542 if bundlerepo:
543 if bundlerepo:
543 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
544 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
544 remotephases = other.listkeys('phases')
545 remotephases = other.listkeys('phases')
545
546
546 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
547 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
547 pullop.trmanager = bundletransactionmanager()
548 pullop.trmanager = bundletransactionmanager()
548 exchange._pullapplyphases(pullop, remotephases)
549 exchange._pullapplyphases(pullop, remotephases)
549
550
550 def cleanup():
551 def cleanup():
551 if bundlerepo:
552 if bundlerepo:
552 bundlerepo.close()
553 bundlerepo.close()
553 if bundle:
554 if bundle:
554 os.unlink(bundle)
555 os.unlink(bundle)
555 other.close()
556 other.close()
556
557
557 return (localrepo, csets, cleanup)
558 return (localrepo, csets, cleanup)
@@ -1,220 +1,242 b''
1 # Create server
1 # Create server
2 $ hg init server
2 $ hg init server
3 $ cd server
3 $ cd server
4 $ cat >> .hg/hgrc << EOF
4 $ cat >> .hg/hgrc << EOF
5 > [extensions]
5 > [extensions]
6 > extension=$TESTDIR/flagprocessorext.py
6 > extension=$TESTDIR/flagprocessorext.py
7 > EOF
7 > EOF
8 $ cd ../
8 $ cd ../
9
9
10 # Clone server and enable extensions
10 # Clone server and enable extensions
11 $ hg clone -q server client
11 $ hg clone -q server client
12 $ cd client
12 $ cd client
13 $ cat >> .hg/hgrc << EOF
13 $ cat >> .hg/hgrc << EOF
14 > [extensions]
14 > [extensions]
15 > extension=$TESTDIR/flagprocessorext.py
15 > extension=$TESTDIR/flagprocessorext.py
16 > EOF
16 > EOF
17
17
18 # Commit file that will trigger the noop extension
18 # Commit file that will trigger the noop extension
19 $ echo '[NOOP]' > noop
19 $ echo '[NOOP]' > noop
20 $ hg commit -Aqm "noop"
20 $ hg commit -Aqm "noop"
21
21
22 # Commit file that will trigger the base64 extension
22 # Commit file that will trigger the base64 extension
23 $ echo '[BASE64]' > base64
23 $ echo '[BASE64]' > base64
24 $ hg commit -Aqm 'base64'
24 $ hg commit -Aqm 'base64'
25
25
26 # Commit file that will trigger the gzip extension
26 # Commit file that will trigger the gzip extension
27 $ echo '[GZIP]' > gzip
27 $ echo '[GZIP]' > gzip
28 $ hg commit -Aqm 'gzip'
28 $ hg commit -Aqm 'gzip'
29
29
30 # Commit file that will trigger noop and base64
30 # Commit file that will trigger noop and base64
31 $ echo '[NOOP][BASE64]' > noop-base64
31 $ echo '[NOOP][BASE64]' > noop-base64
32 $ hg commit -Aqm 'noop+base64'
32 $ hg commit -Aqm 'noop+base64'
33
33
34 # Commit file that will trigger noop and gzip
34 # Commit file that will trigger noop and gzip
35 $ echo '[NOOP][GZIP]' > noop-gzip
35 $ echo '[NOOP][GZIP]' > noop-gzip
36 $ hg commit -Aqm 'noop+gzip'
36 $ hg commit -Aqm 'noop+gzip'
37
37
38 # Commit file that will trigger base64 and gzip
38 # Commit file that will trigger base64 and gzip
39 $ echo '[BASE64][GZIP]' > base64-gzip
39 $ echo '[BASE64][GZIP]' > base64-gzip
40 $ hg commit -Aqm 'base64+gzip'
40 $ hg commit -Aqm 'base64+gzip'
41
41
42 # Commit file that will trigger base64, gzip and noop
42 # Commit file that will trigger base64, gzip and noop
43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
44 $ hg commit -Aqm 'base64+gzip+noop'
44 $ hg commit -Aqm 'base64+gzip+noop'
45
45
46 # TEST: ensure the revision data is consistent
46 # TEST: ensure the revision data is consistent
47 $ hg cat noop
47 $ hg cat noop
48 [NOOP]
48 [NOOP]
49 $ hg debugdata noop 0
49 $ hg debugdata noop 0
50 [NOOP]
50 [NOOP]
51
51
52 $ hg cat -r . base64
52 $ hg cat -r . base64
53 [BASE64]
53 [BASE64]
54 $ hg debugdata base64 0
54 $ hg debugdata base64 0
55 W0JBU0U2NF0K (no-eol)
55 W0JBU0U2NF0K (no-eol)
56
56
57 $ hg cat -r . gzip
57 $ hg cat -r . gzip
58 [GZIP]
58 [GZIP]
59 $ hg debugdata gzip 0
59 $ hg debugdata gzip 0
60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
61
61
62 $ hg cat -r . noop-base64
62 $ hg cat -r . noop-base64
63 [NOOP][BASE64]
63 [NOOP][BASE64]
64 $ hg debugdata noop-base64 0
64 $ hg debugdata noop-base64 0
65 W05PT1BdW0JBU0U2NF0K (no-eol)
65 W05PT1BdW0JBU0U2NF0K (no-eol)
66
66
67 $ hg cat -r . noop-gzip
67 $ hg cat -r . noop-gzip
68 [NOOP][GZIP]
68 [NOOP][GZIP]
69 $ hg debugdata noop-gzip 0
69 $ hg debugdata noop-gzip 0
70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
71
71
72 $ hg cat -r . base64-gzip
72 $ hg cat -r . base64-gzip
73 [BASE64][GZIP]
73 [BASE64][GZIP]
74 $ hg debugdata base64-gzip 0
74 $ hg debugdata base64-gzip 0
75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
76
76
77 $ hg cat -r . base64-gzip-noop
77 $ hg cat -r . base64-gzip-noop
78 [BASE64][GZIP][NOOP]
78 [BASE64][GZIP][NOOP]
79 $ hg debugdata base64-gzip-noop 0
79 $ hg debugdata base64-gzip-noop 0
80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
81
81
82 # Push to the server
82 # Push to the server
83 $ hg push
83 $ hg push
84 pushing to $TESTTMP/server (glob)
84 pushing to $TESTTMP/server (glob)
85 searching for changes
85 searching for changes
86 adding changesets
86 adding changesets
87 adding manifests
87 adding manifests
88 adding file changes
88 adding file changes
89 added 7 changesets with 7 changes to 7 files
89 added 7 changesets with 7 changes to 7 files
90
90
91 # Initialize new client (not cloning) and setup extension
91 # Initialize new client (not cloning) and setup extension
92 $ cd ..
92 $ cd ..
93 $ hg init client2
93 $ hg init client2
94 $ cd client2
94 $ cd client2
95 $ cat >> .hg/hgrc << EOF
95 $ cat >> .hg/hgrc << EOF
96 > [paths]
96 > [paths]
97 > default = $TESTTMP/server
97 > default = $TESTTMP/server
98 > [extensions]
98 > [extensions]
99 > extension=$TESTDIR/flagprocessorext.py
99 > extension=$TESTDIR/flagprocessorext.py
100 > EOF
100 > EOF
101
101
102 # Pull from server and update to latest revision
102 # Pull from server and update to latest revision
103 $ hg pull default
103 $ hg pull default
104 pulling from $TESTTMP/server (glob)
104 pulling from $TESTTMP/server (glob)
105 requesting all changes
105 requesting all changes
106 adding changesets
106 adding changesets
107 adding manifests
107 adding manifests
108 adding file changes
108 adding file changes
109 added 7 changesets with 7 changes to 7 files
109 added 7 changesets with 7 changes to 7 files
110 (run 'hg update' to get a working copy)
110 (run 'hg update' to get a working copy)
111 $ hg update
111 $ hg update
112 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
113
113
114 # TEST: ensure the revision data is consistent
114 # TEST: ensure the revision data is consistent
115 $ hg cat noop
115 $ hg cat noop
116 [NOOP]
116 [NOOP]
117 $ hg debugdata noop 0
117 $ hg debugdata noop 0
118 [NOOP]
118 [NOOP]
119
119
120 $ hg cat -r . base64
120 $ hg cat -r . base64
121 [BASE64]
121 [BASE64]
122 $ hg debugdata base64 0
122 $ hg debugdata base64 0
123 W0JBU0U2NF0K (no-eol)
123 W0JBU0U2NF0K (no-eol)
124
124
125 $ hg cat -r . gzip
125 $ hg cat -r . gzip
126 [GZIP]
126 [GZIP]
127 $ hg debugdata gzip 0
127 $ hg debugdata gzip 0
128 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
128 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
129
129
130 $ hg cat -r . noop-base64
130 $ hg cat -r . noop-base64
131 [NOOP][BASE64]
131 [NOOP][BASE64]
132 $ hg debugdata noop-base64 0
132 $ hg debugdata noop-base64 0
133 W05PT1BdW0JBU0U2NF0K (no-eol)
133 W05PT1BdW0JBU0U2NF0K (no-eol)
134
134
135 $ hg cat -r . noop-gzip
135 $ hg cat -r . noop-gzip
136 [NOOP][GZIP]
136 [NOOP][GZIP]
137 $ hg debugdata noop-gzip 0
137 $ hg debugdata noop-gzip 0
138 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
138 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
139
139
140 $ hg cat -r . base64-gzip
140 $ hg cat -r . base64-gzip
141 [BASE64][GZIP]
141 [BASE64][GZIP]
142 $ hg debugdata base64-gzip 0
142 $ hg debugdata base64-gzip 0
143 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
143 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
144
144
145 $ hg cat -r . base64-gzip-noop
145 $ hg cat -r . base64-gzip-noop
146 [BASE64][GZIP][NOOP]
146 [BASE64][GZIP][NOOP]
147 $ hg debugdata base64-gzip-noop 0
147 $ hg debugdata base64-gzip-noop 0
148 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
148 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
149
149
150 # TEST: ensure a missing processor is handled
150 # TEST: ensure a missing processor is handled
151 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
151 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
152 $ hg commit -Aqm 'fail+base64+gzip+noop'
152 $ hg commit -Aqm 'fail+base64+gzip+noop'
153 abort: missing processor for flag '0x1'!
153 abort: missing processor for flag '0x1'!
154 [255]
154 [255]
155
155
156 # TEST: ensure we cannot register several flag processors on the same flag
156 # TEST: ensure we cannot register several flag processors on the same flag
157 $ cat >> .hg/hgrc << EOF
157 $ cat >> .hg/hgrc << EOF
158 > [extensions]
158 > [extensions]
159 > extension=$TESTDIR/flagprocessorext.py
159 > extension=$TESTDIR/flagprocessorext.py
160 > duplicate=$TESTDIR/flagprocessorext.py
160 > duplicate=$TESTDIR/flagprocessorext.py
161 > EOF
161 > EOF
162 $ echo 'this should fail' > file
162 $ echo 'this should fail' > file
163 $ hg commit -Aqm 'add file'
163 $ hg commit -Aqm 'add file'
164 abort: cannot register multiple processors on flag '0x8'.
164 abort: cannot register multiple processors on flag '0x8'.
165 [255]
165 [255]
166
166
167 $ cd ..
167 $ cd ..
168
168
169 # TEST: bundle repo
169 # TEST: bundle repo
170 $ hg init bundletest
170 $ hg init bundletest
171 $ cd bundletest
171 $ cd bundletest
172
172
173 $ cat >> .hg/hgrc << EOF
173 $ cat >> .hg/hgrc << EOF
174 > [extensions]
174 > [extensions]
175 > flagprocessor=$TESTDIR/flagprocessorext.py
175 > flagprocessor=$TESTDIR/flagprocessorext.py
176 > EOF
176 > EOF
177
177
178 $ for i in 0 single two three 4; do
178 $ for i in 0 single two three 4; do
179 > echo '[BASE64]a-bit-longer-'$i > base64
179 > echo '[BASE64]a-bit-longer-'$i > base64
180 > hg commit -m base64-$i -A base64
180 > hg commit -m base64-$i -A base64
181 > done
181 > done
182
182
183 $ hg update 2 -q
183 $ hg update 2 -q
184 $ echo '[BASE64]a-bit-longer-branching' > base64
184 $ echo '[BASE64]a-bit-longer-branching' > base64
185 $ hg commit -q -m branching
185 $ hg commit -q -m branching
186
186
187 $ hg bundle --base 1 bundle.hg
187 $ hg bundle --base 1 bundle.hg
188 4 changesets found
188 4 changesets found
189 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
189 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
190 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
190 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
191 5 branching
191 5 branching
192 base64 | 2 +-
192 base64 | 2 +-
193 1 files changed, 1 insertions(+), 1 deletions(-)
193 1 files changed, 1 insertions(+), 1 deletions(-)
194
194
195 4 base64-4
195 4 base64-4
196 base64 | 2 +-
196 base64 | 2 +-
197 1 files changed, 1 insertions(+), 1 deletions(-)
197 1 files changed, 1 insertions(+), 1 deletions(-)
198
198
199 3 base64-three
199 3 base64-three
200 base64 | 2 +-
200 base64 | 2 +-
201 1 files changed, 1 insertions(+), 1 deletions(-)
201 1 files changed, 1 insertions(+), 1 deletions(-)
202
202
203 2 base64-two
203 2 base64-two
204 base64 | 2 +-
204 base64 | 2 +-
205 1 files changed, 1 insertions(+), 1 deletions(-)
205 1 files changed, 1 insertions(+), 1 deletions(-)
206
206
207 1 base64-single
207 1 base64-single
208 base64 | 2 +-
208 base64 | 2 +-
209 1 files changed, 1 insertions(+), 1 deletions(-)
209 1 files changed, 1 insertions(+), 1 deletions(-)
210
210
211 0 base64-0
211 0 base64-0
212 base64 | 1 +
212 base64 | 1 +
213 1 files changed, 1 insertions(+), 0 deletions(-)
213 1 files changed, 1 insertions(+), 0 deletions(-)
214
214
215
215
216 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q 2>&1 | egrep -v '^(\*\*| )'
216 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q 2>&1 | egrep -v '^(\*\*| )'
217 [1]
217 [1]
218 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
218 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
219 Traceback (most recent call last):
219 5 branching
220 TypeError: Incorrect padding
220 base64 | 2 +-
221 1 files changed, 1 insertions(+), 1 deletions(-)
222
223 4 base64-4
224 base64 | 2 +-
225 1 files changed, 1 insertions(+), 1 deletions(-)
226
227 3 base64-three
228 base64 | 2 +-
229 1 files changed, 1 insertions(+), 1 deletions(-)
230
231 2 base64-two
232 base64 | 2 +-
233 1 files changed, 1 insertions(+), 1 deletions(-)
234
235 1 base64-single
236 base64 | 2 +-
237 1 files changed, 1 insertions(+), 1 deletions(-)
238
239 0 base64-0
240 base64 | 1 +
241 1 files changed, 1 insertions(+), 0 deletions(-)
242
General Comments 0
You need to be logged in to leave comments. Login now