##// END OF EJS Templates
addrevision: use general delta when the incoming base delta is bad...
Pierre-Yves David -
r27191:20a9226b default
parent child Browse files
Show More
@@ -1,1732 +1,1731
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 import collections
15 import collections
16 from node import bin, hex, nullid, nullrev
16 from node import bin, hex, nullid, nullrev
17 from i18n import _
17 from i18n import _
18 import ancestor, mdiff, parsers, error, util, templatefilters
18 import ancestor, mdiff, parsers, error, util, templatefilters
19 import struct, zlib, errno
19 import struct, zlib, errno
20
20
21 _pack = struct.pack
21 _pack = struct.pack
22 _unpack = struct.unpack
22 _unpack = struct.unpack
23 _compress = zlib.compress
23 _compress = zlib.compress
24 _decompress = zlib.decompress
24 _decompress = zlib.decompress
25 _sha = util.sha1
25 _sha = util.sha1
26
26
27 # revlog header flags
27 # revlog header flags
28 REVLOGV0 = 0
28 REVLOGV0 = 0
29 REVLOGNG = 1
29 REVLOGNG = 1
30 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGNGINLINEDATA = (1 << 16)
31 REVLOGGENERALDELTA = (1 << 17)
31 REVLOGGENERALDELTA = (1 << 17)
32 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
33 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_FORMAT = REVLOGNG
34 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
35 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
36
36
37 # revlog index flags
37 # revlog index flags
38 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
38 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
39 REVIDX_DEFAULT_FLAGS = 0
39 REVIDX_DEFAULT_FLAGS = 0
40 REVIDX_KNOWN_FLAGS = REVIDX_ISCENSORED
40 REVIDX_KNOWN_FLAGS = REVIDX_ISCENSORED
41
41
42 # max size of revlog with inline data
42 # max size of revlog with inline data
43 _maxinline = 131072
43 _maxinline = 131072
44 _chunksize = 1048576
44 _chunksize = 1048576
45
45
46 RevlogError = error.RevlogError
46 RevlogError = error.RevlogError
47 LookupError = error.LookupError
47 LookupError = error.LookupError
48 CensoredNodeError = error.CensoredNodeError
48 CensoredNodeError = error.CensoredNodeError
49
49
50 def getoffset(q):
50 def getoffset(q):
51 return int(q >> 16)
51 return int(q >> 16)
52
52
53 def gettype(q):
53 def gettype(q):
54 return int(q & 0xFFFF)
54 return int(q & 0xFFFF)
55
55
56 def offset_type(offset, type):
56 def offset_type(offset, type):
57 return long(long(offset) << 16 | type)
57 return long(long(offset) << 16 | type)
58
58
59 _nullhash = _sha(nullid)
59 _nullhash = _sha(nullid)
60
60
61 def hash(text, p1, p2):
61 def hash(text, p1, p2):
62 """generate a hash from the given text and its parent hashes
62 """generate a hash from the given text and its parent hashes
63
63
64 This hash combines both the current file contents and its history
64 This hash combines both the current file contents and its history
65 in a manner that makes it easy to distinguish nodes with the same
65 in a manner that makes it easy to distinguish nodes with the same
66 content in the revision graph.
66 content in the revision graph.
67 """
67 """
68 # As of now, if one of the parent node is null, p2 is null
68 # As of now, if one of the parent node is null, p2 is null
69 if p2 == nullid:
69 if p2 == nullid:
70 # deep copy of a hash is faster than creating one
70 # deep copy of a hash is faster than creating one
71 s = _nullhash.copy()
71 s = _nullhash.copy()
72 s.update(p1)
72 s.update(p1)
73 else:
73 else:
74 # none of the parent nodes are nullid
74 # none of the parent nodes are nullid
75 l = [p1, p2]
75 l = [p1, p2]
76 l.sort()
76 l.sort()
77 s = _sha(l[0])
77 s = _sha(l[0])
78 s.update(l[1])
78 s.update(l[1])
79 s.update(text)
79 s.update(text)
80 return s.digest()
80 return s.digest()
81
81
82 def decompress(bin):
82 def decompress(bin):
83 """ decompress the given input """
83 """ decompress the given input """
84 if not bin:
84 if not bin:
85 return bin
85 return bin
86 t = bin[0]
86 t = bin[0]
87 if t == '\0':
87 if t == '\0':
88 return bin
88 return bin
89 if t == 'x':
89 if t == 'x':
90 try:
90 try:
91 return _decompress(bin)
91 return _decompress(bin)
92 except zlib.error as e:
92 except zlib.error as e:
93 raise RevlogError(_("revlog decompress error: %s") % str(e))
93 raise RevlogError(_("revlog decompress error: %s") % str(e))
94 if t == 'u':
94 if t == 'u':
95 return bin[1:]
95 return bin[1:]
96 raise RevlogError(_("unknown compression type %r") % t)
96 raise RevlogError(_("unknown compression type %r") % t)
97
97
98 # index v0:
98 # index v0:
99 # 4 bytes: offset
99 # 4 bytes: offset
100 # 4 bytes: compressed length
100 # 4 bytes: compressed length
101 # 4 bytes: base rev
101 # 4 bytes: base rev
102 # 4 bytes: link rev
102 # 4 bytes: link rev
103 # 20 bytes: parent 1 nodeid
103 # 20 bytes: parent 1 nodeid
104 # 20 bytes: parent 2 nodeid
104 # 20 bytes: parent 2 nodeid
105 # 20 bytes: nodeid
105 # 20 bytes: nodeid
106 indexformatv0 = ">4l20s20s20s"
106 indexformatv0 = ">4l20s20s20s"
107
107
108 class revlogoldio(object):
108 class revlogoldio(object):
109 def __init__(self):
109 def __init__(self):
110 self.size = struct.calcsize(indexformatv0)
110 self.size = struct.calcsize(indexformatv0)
111
111
112 def parseindex(self, data, inline):
112 def parseindex(self, data, inline):
113 s = self.size
113 s = self.size
114 index = []
114 index = []
115 nodemap = {nullid: nullrev}
115 nodemap = {nullid: nullrev}
116 n = off = 0
116 n = off = 0
117 l = len(data)
117 l = len(data)
118 while off + s <= l:
118 while off + s <= l:
119 cur = data[off:off + s]
119 cur = data[off:off + s]
120 off += s
120 off += s
121 e = _unpack(indexformatv0, cur)
121 e = _unpack(indexformatv0, cur)
122 # transform to revlogv1 format
122 # transform to revlogv1 format
123 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
123 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
124 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
124 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
125 index.append(e2)
125 index.append(e2)
126 nodemap[e[6]] = n
126 nodemap[e[6]] = n
127 n += 1
127 n += 1
128
128
129 # add the magic null revision at -1
129 # add the magic null revision at -1
130 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
130 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
131
131
132 return index, nodemap, None
132 return index, nodemap, None
133
133
134 def packentry(self, entry, node, version, rev):
134 def packentry(self, entry, node, version, rev):
135 if gettype(entry[0]):
135 if gettype(entry[0]):
136 raise RevlogError(_("index entry flags need RevlogNG"))
136 raise RevlogError(_("index entry flags need RevlogNG"))
137 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
137 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
138 node(entry[5]), node(entry[6]), entry[7])
138 node(entry[5]), node(entry[6]), entry[7])
139 return _pack(indexformatv0, *e2)
139 return _pack(indexformatv0, *e2)
140
140
141 # index ng:
141 # index ng:
142 # 6 bytes: offset
142 # 6 bytes: offset
143 # 2 bytes: flags
143 # 2 bytes: flags
144 # 4 bytes: compressed length
144 # 4 bytes: compressed length
145 # 4 bytes: uncompressed length
145 # 4 bytes: uncompressed length
146 # 4 bytes: base rev
146 # 4 bytes: base rev
147 # 4 bytes: link rev
147 # 4 bytes: link rev
148 # 4 bytes: parent 1 rev
148 # 4 bytes: parent 1 rev
149 # 4 bytes: parent 2 rev
149 # 4 bytes: parent 2 rev
150 # 32 bytes: nodeid
150 # 32 bytes: nodeid
151 indexformatng = ">Qiiiiii20s12x"
151 indexformatng = ">Qiiiiii20s12x"
152 versionformat = ">I"
152 versionformat = ">I"
153
153
154 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
154 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
155 # signed integer)
155 # signed integer)
156 _maxentrysize = 0x7fffffff
156 _maxentrysize = 0x7fffffff
157
157
158 class revlogio(object):
158 class revlogio(object):
159 def __init__(self):
159 def __init__(self):
160 self.size = struct.calcsize(indexformatng)
160 self.size = struct.calcsize(indexformatng)
161
161
162 def parseindex(self, data, inline):
162 def parseindex(self, data, inline):
163 # call the C implementation to parse the index data
163 # call the C implementation to parse the index data
164 index, cache = parsers.parse_index2(data, inline)
164 index, cache = parsers.parse_index2(data, inline)
165 return index, getattr(index, 'nodemap', None), cache
165 return index, getattr(index, 'nodemap', None), cache
166
166
167 def packentry(self, entry, node, version, rev):
167 def packentry(self, entry, node, version, rev):
168 p = _pack(indexformatng, *entry)
168 p = _pack(indexformatng, *entry)
169 if rev == 0:
169 if rev == 0:
170 p = _pack(versionformat, version) + p[4:]
170 p = _pack(versionformat, version) + p[4:]
171 return p
171 return p
172
172
173 class revlog(object):
173 class revlog(object):
174 """
174 """
175 the underlying revision storage object
175 the underlying revision storage object
176
176
177 A revlog consists of two parts, an index and the revision data.
177 A revlog consists of two parts, an index and the revision data.
178
178
179 The index is a file with a fixed record size containing
179 The index is a file with a fixed record size containing
180 information on each revision, including its nodeid (hash), the
180 information on each revision, including its nodeid (hash), the
181 nodeids of its parents, the position and offset of its data within
181 nodeids of its parents, the position and offset of its data within
182 the data file, and the revision it's based on. Finally, each entry
182 the data file, and the revision it's based on. Finally, each entry
183 contains a linkrev entry that can serve as a pointer to external
183 contains a linkrev entry that can serve as a pointer to external
184 data.
184 data.
185
185
186 The revision data itself is a linear collection of data chunks.
186 The revision data itself is a linear collection of data chunks.
187 Each chunk represents a revision and is usually represented as a
187 Each chunk represents a revision and is usually represented as a
188 delta against the previous chunk. To bound lookup time, runs of
188 delta against the previous chunk. To bound lookup time, runs of
189 deltas are limited to about 2 times the length of the original
189 deltas are limited to about 2 times the length of the original
190 version data. This makes retrieval of a version proportional to
190 version data. This makes retrieval of a version proportional to
191 its size, or O(1) relative to the number of revisions.
191 its size, or O(1) relative to the number of revisions.
192
192
193 Both pieces of the revlog are written to in an append-only
193 Both pieces of the revlog are written to in an append-only
194 fashion, which means we never need to rewrite a file to insert or
194 fashion, which means we never need to rewrite a file to insert or
195 remove data, and can use some simple techniques to avoid the need
195 remove data, and can use some simple techniques to avoid the need
196 for locking while reading.
196 for locking while reading.
197 """
197 """
198 def __init__(self, opener, indexfile):
198 def __init__(self, opener, indexfile):
199 """
199 """
200 create a revlog object
200 create a revlog object
201
201
202 opener is a function that abstracts the file opening operation
202 opener is a function that abstracts the file opening operation
203 and can be used to implement COW semantics or the like.
203 and can be used to implement COW semantics or the like.
204 """
204 """
205 self.indexfile = indexfile
205 self.indexfile = indexfile
206 self.datafile = indexfile[:-2] + ".d"
206 self.datafile = indexfile[:-2] + ".d"
207 self.opener = opener
207 self.opener = opener
208 # 3-tuple of (node, rev, text) for a raw revision.
208 # 3-tuple of (node, rev, text) for a raw revision.
209 self._cache = None
209 self._cache = None
210 # 2-tuple of (rev, baserev) defining the base revision the delta chain
210 # 2-tuple of (rev, baserev) defining the base revision the delta chain
211 # begins at for a revision.
211 # begins at for a revision.
212 self._basecache = None
212 self._basecache = None
213 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
213 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
214 self._chunkcache = (0, '')
214 self._chunkcache = (0, '')
215 # How much data to read and cache into the raw revlog data cache.
215 # How much data to read and cache into the raw revlog data cache.
216 self._chunkcachesize = 65536
216 self._chunkcachesize = 65536
217 self._maxchainlen = None
217 self._maxchainlen = None
218 self._aggressivemergedeltas = False
218 self._aggressivemergedeltas = False
219 self.index = []
219 self.index = []
220 # Mapping of partial identifiers to full nodes.
220 # Mapping of partial identifiers to full nodes.
221 self._pcache = {}
221 self._pcache = {}
222 # Mapping of revision integer to full node.
222 # Mapping of revision integer to full node.
223 self._nodecache = {nullid: nullrev}
223 self._nodecache = {nullid: nullrev}
224 self._nodepos = None
224 self._nodepos = None
225
225
226 v = REVLOG_DEFAULT_VERSION
226 v = REVLOG_DEFAULT_VERSION
227 opts = getattr(opener, 'options', None)
227 opts = getattr(opener, 'options', None)
228 if opts is not None:
228 if opts is not None:
229 if 'revlogv1' in opts:
229 if 'revlogv1' in opts:
230 if 'generaldelta' in opts:
230 if 'generaldelta' in opts:
231 v |= REVLOGGENERALDELTA
231 v |= REVLOGGENERALDELTA
232 else:
232 else:
233 v = 0
233 v = 0
234 if 'chunkcachesize' in opts:
234 if 'chunkcachesize' in opts:
235 self._chunkcachesize = opts['chunkcachesize']
235 self._chunkcachesize = opts['chunkcachesize']
236 if 'maxchainlen' in opts:
236 if 'maxchainlen' in opts:
237 self._maxchainlen = opts['maxchainlen']
237 self._maxchainlen = opts['maxchainlen']
238 if 'aggressivemergedeltas' in opts:
238 if 'aggressivemergedeltas' in opts:
239 self._aggressivemergedeltas = opts['aggressivemergedeltas']
239 self._aggressivemergedeltas = opts['aggressivemergedeltas']
240 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
240 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
241
241
242 if self._chunkcachesize <= 0:
242 if self._chunkcachesize <= 0:
243 raise RevlogError(_('revlog chunk cache size %r is not greater '
243 raise RevlogError(_('revlog chunk cache size %r is not greater '
244 'than 0') % self._chunkcachesize)
244 'than 0') % self._chunkcachesize)
245 elif self._chunkcachesize & (self._chunkcachesize - 1):
245 elif self._chunkcachesize & (self._chunkcachesize - 1):
246 raise RevlogError(_('revlog chunk cache size %r is not a power '
246 raise RevlogError(_('revlog chunk cache size %r is not a power '
247 'of 2') % self._chunkcachesize)
247 'of 2') % self._chunkcachesize)
248
248
249 indexdata = ''
249 indexdata = ''
250 self._initempty = True
250 self._initempty = True
251 try:
251 try:
252 f = self.opener(self.indexfile)
252 f = self.opener(self.indexfile)
253 indexdata = f.read()
253 indexdata = f.read()
254 f.close()
254 f.close()
255 if len(indexdata) > 0:
255 if len(indexdata) > 0:
256 v = struct.unpack(versionformat, indexdata[:4])[0]
256 v = struct.unpack(versionformat, indexdata[:4])[0]
257 self._initempty = False
257 self._initempty = False
258 except IOError as inst:
258 except IOError as inst:
259 if inst.errno != errno.ENOENT:
259 if inst.errno != errno.ENOENT:
260 raise
260 raise
261
261
262 self.version = v
262 self.version = v
263 self._inline = v & REVLOGNGINLINEDATA
263 self._inline = v & REVLOGNGINLINEDATA
264 self._generaldelta = v & REVLOGGENERALDELTA
264 self._generaldelta = v & REVLOGGENERALDELTA
265 flags = v & ~0xFFFF
265 flags = v & ~0xFFFF
266 fmt = v & 0xFFFF
266 fmt = v & 0xFFFF
267 if fmt == REVLOGV0 and flags:
267 if fmt == REVLOGV0 and flags:
268 raise RevlogError(_("index %s unknown flags %#04x for format v0")
268 raise RevlogError(_("index %s unknown flags %#04x for format v0")
269 % (self.indexfile, flags >> 16))
269 % (self.indexfile, flags >> 16))
270 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
270 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
271 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
271 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
272 % (self.indexfile, flags >> 16))
272 % (self.indexfile, flags >> 16))
273 elif fmt > REVLOGNG:
273 elif fmt > REVLOGNG:
274 raise RevlogError(_("index %s unknown format %d")
274 raise RevlogError(_("index %s unknown format %d")
275 % (self.indexfile, fmt))
275 % (self.indexfile, fmt))
276
276
277 self._io = revlogio()
277 self._io = revlogio()
278 if self.version == REVLOGV0:
278 if self.version == REVLOGV0:
279 self._io = revlogoldio()
279 self._io = revlogoldio()
280 try:
280 try:
281 d = self._io.parseindex(indexdata, self._inline)
281 d = self._io.parseindex(indexdata, self._inline)
282 except (ValueError, IndexError):
282 except (ValueError, IndexError):
283 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
283 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
284 self.index, nodemap, self._chunkcache = d
284 self.index, nodemap, self._chunkcache = d
285 if nodemap is not None:
285 if nodemap is not None:
286 self.nodemap = self._nodecache = nodemap
286 self.nodemap = self._nodecache = nodemap
287 if not self._chunkcache:
287 if not self._chunkcache:
288 self._chunkclear()
288 self._chunkclear()
289 # revnum -> (chain-length, sum-delta-length)
289 # revnum -> (chain-length, sum-delta-length)
290 self._chaininfocache = {}
290 self._chaininfocache = {}
291
291
292 def tip(self):
292 def tip(self):
293 return self.node(len(self.index) - 2)
293 return self.node(len(self.index) - 2)
294 def __contains__(self, rev):
294 def __contains__(self, rev):
295 return 0 <= rev < len(self)
295 return 0 <= rev < len(self)
296 def __len__(self):
296 def __len__(self):
297 return len(self.index) - 1
297 return len(self.index) - 1
298 def __iter__(self):
298 def __iter__(self):
299 return iter(xrange(len(self)))
299 return iter(xrange(len(self)))
300 def revs(self, start=0, stop=None):
300 def revs(self, start=0, stop=None):
301 """iterate over all rev in this revlog (from start to stop)"""
301 """iterate over all rev in this revlog (from start to stop)"""
302 step = 1
302 step = 1
303 if stop is not None:
303 if stop is not None:
304 if start > stop:
304 if start > stop:
305 step = -1
305 step = -1
306 stop += step
306 stop += step
307 else:
307 else:
308 stop = len(self)
308 stop = len(self)
309 return xrange(start, stop, step)
309 return xrange(start, stop, step)
310
310
311 @util.propertycache
311 @util.propertycache
312 def nodemap(self):
312 def nodemap(self):
313 self.rev(self.node(0))
313 self.rev(self.node(0))
314 return self._nodecache
314 return self._nodecache
315
315
316 def hasnode(self, node):
316 def hasnode(self, node):
317 try:
317 try:
318 self.rev(node)
318 self.rev(node)
319 return True
319 return True
320 except KeyError:
320 except KeyError:
321 return False
321 return False
322
322
323 def clearcaches(self):
323 def clearcaches(self):
324 try:
324 try:
325 self._nodecache.clearcaches()
325 self._nodecache.clearcaches()
326 except AttributeError:
326 except AttributeError:
327 self._nodecache = {nullid: nullrev}
327 self._nodecache = {nullid: nullrev}
328 self._nodepos = None
328 self._nodepos = None
329
329
330 def rev(self, node):
330 def rev(self, node):
331 try:
331 try:
332 return self._nodecache[node]
332 return self._nodecache[node]
333 except TypeError:
333 except TypeError:
334 raise
334 raise
335 except RevlogError:
335 except RevlogError:
336 # parsers.c radix tree lookup failed
336 # parsers.c radix tree lookup failed
337 raise LookupError(node, self.indexfile, _('no node'))
337 raise LookupError(node, self.indexfile, _('no node'))
338 except KeyError:
338 except KeyError:
339 # pure python cache lookup failed
339 # pure python cache lookup failed
340 n = self._nodecache
340 n = self._nodecache
341 i = self.index
341 i = self.index
342 p = self._nodepos
342 p = self._nodepos
343 if p is None:
343 if p is None:
344 p = len(i) - 2
344 p = len(i) - 2
345 for r in xrange(p, -1, -1):
345 for r in xrange(p, -1, -1):
346 v = i[r][7]
346 v = i[r][7]
347 n[v] = r
347 n[v] = r
348 if v == node:
348 if v == node:
349 self._nodepos = r - 1
349 self._nodepos = r - 1
350 return r
350 return r
351 raise LookupError(node, self.indexfile, _('no node'))
351 raise LookupError(node, self.indexfile, _('no node'))
352
352
353 def node(self, rev):
353 def node(self, rev):
354 return self.index[rev][7]
354 return self.index[rev][7]
355 def linkrev(self, rev):
355 def linkrev(self, rev):
356 return self.index[rev][4]
356 return self.index[rev][4]
357 def parents(self, node):
357 def parents(self, node):
358 i = self.index
358 i = self.index
359 d = i[self.rev(node)]
359 d = i[self.rev(node)]
360 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
360 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
361 def parentrevs(self, rev):
361 def parentrevs(self, rev):
362 return self.index[rev][5:7]
362 return self.index[rev][5:7]
363 def start(self, rev):
363 def start(self, rev):
364 return int(self.index[rev][0] >> 16)
364 return int(self.index[rev][0] >> 16)
365 def end(self, rev):
365 def end(self, rev):
366 return self.start(rev) + self.length(rev)
366 return self.start(rev) + self.length(rev)
367 def length(self, rev):
367 def length(self, rev):
368 return self.index[rev][1]
368 return self.index[rev][1]
369 def chainbase(self, rev):
369 def chainbase(self, rev):
370 index = self.index
370 index = self.index
371 base = index[rev][3]
371 base = index[rev][3]
372 while base != rev:
372 while base != rev:
373 rev = base
373 rev = base
374 base = index[rev][3]
374 base = index[rev][3]
375 return base
375 return base
376 def chainlen(self, rev):
376 def chainlen(self, rev):
377 return self._chaininfo(rev)[0]
377 return self._chaininfo(rev)[0]
378
378
379 def _chaininfo(self, rev):
379 def _chaininfo(self, rev):
380 chaininfocache = self._chaininfocache
380 chaininfocache = self._chaininfocache
381 if rev in chaininfocache:
381 if rev in chaininfocache:
382 return chaininfocache[rev]
382 return chaininfocache[rev]
383 index = self.index
383 index = self.index
384 generaldelta = self._generaldelta
384 generaldelta = self._generaldelta
385 iterrev = rev
385 iterrev = rev
386 e = index[iterrev]
386 e = index[iterrev]
387 clen = 0
387 clen = 0
388 compresseddeltalen = 0
388 compresseddeltalen = 0
389 while iterrev != e[3]:
389 while iterrev != e[3]:
390 clen += 1
390 clen += 1
391 compresseddeltalen += e[1]
391 compresseddeltalen += e[1]
392 if generaldelta:
392 if generaldelta:
393 iterrev = e[3]
393 iterrev = e[3]
394 else:
394 else:
395 iterrev -= 1
395 iterrev -= 1
396 if iterrev in chaininfocache:
396 if iterrev in chaininfocache:
397 t = chaininfocache[iterrev]
397 t = chaininfocache[iterrev]
398 clen += t[0]
398 clen += t[0]
399 compresseddeltalen += t[1]
399 compresseddeltalen += t[1]
400 break
400 break
401 e = index[iterrev]
401 e = index[iterrev]
402 else:
402 else:
403 # Add text length of base since decompressing that also takes
403 # Add text length of base since decompressing that also takes
404 # work. For cache hits the length is already included.
404 # work. For cache hits the length is already included.
405 compresseddeltalen += e[1]
405 compresseddeltalen += e[1]
406 r = (clen, compresseddeltalen)
406 r = (clen, compresseddeltalen)
407 chaininfocache[rev] = r
407 chaininfocache[rev] = r
408 return r
408 return r
409
409
410 def flags(self, rev):
410 def flags(self, rev):
411 return self.index[rev][0] & 0xFFFF
411 return self.index[rev][0] & 0xFFFF
412 def rawsize(self, rev):
412 def rawsize(self, rev):
413 """return the length of the uncompressed text for a given revision"""
413 """return the length of the uncompressed text for a given revision"""
414 l = self.index[rev][2]
414 l = self.index[rev][2]
415 if l >= 0:
415 if l >= 0:
416 return l
416 return l
417
417
418 t = self.revision(self.node(rev))
418 t = self.revision(self.node(rev))
419 return len(t)
419 return len(t)
420 size = rawsize
420 size = rawsize
421
421
422 def ancestors(self, revs, stoprev=0, inclusive=False):
422 def ancestors(self, revs, stoprev=0, inclusive=False):
423 """Generate the ancestors of 'revs' in reverse topological order.
423 """Generate the ancestors of 'revs' in reverse topological order.
424 Does not generate revs lower than stoprev.
424 Does not generate revs lower than stoprev.
425
425
426 See the documentation for ancestor.lazyancestors for more details."""
426 See the documentation for ancestor.lazyancestors for more details."""
427
427
428 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
428 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
429 inclusive=inclusive)
429 inclusive=inclusive)
430
430
431 def descendants(self, revs):
431 def descendants(self, revs):
432 """Generate the descendants of 'revs' in revision order.
432 """Generate the descendants of 'revs' in revision order.
433
433
434 Yield a sequence of revision numbers starting with a child of
434 Yield a sequence of revision numbers starting with a child of
435 some rev in revs, i.e., each revision is *not* considered a
435 some rev in revs, i.e., each revision is *not* considered a
436 descendant of itself. Results are ordered by revision number (a
436 descendant of itself. Results are ordered by revision number (a
437 topological sort)."""
437 topological sort)."""
438 first = min(revs)
438 first = min(revs)
439 if first == nullrev:
439 if first == nullrev:
440 for i in self:
440 for i in self:
441 yield i
441 yield i
442 return
442 return
443
443
444 seen = set(revs)
444 seen = set(revs)
445 for i in self.revs(start=first + 1):
445 for i in self.revs(start=first + 1):
446 for x in self.parentrevs(i):
446 for x in self.parentrevs(i):
447 if x != nullrev and x in seen:
447 if x != nullrev and x in seen:
448 seen.add(i)
448 seen.add(i)
449 yield i
449 yield i
450 break
450 break
451
451
452 def findcommonmissing(self, common=None, heads=None):
452 def findcommonmissing(self, common=None, heads=None):
453 """Return a tuple of the ancestors of common and the ancestors of heads
453 """Return a tuple of the ancestors of common and the ancestors of heads
454 that are not ancestors of common. In revset terminology, we return the
454 that are not ancestors of common. In revset terminology, we return the
455 tuple:
455 tuple:
456
456
457 ::common, (::heads) - (::common)
457 ::common, (::heads) - (::common)
458
458
459 The list is sorted by revision number, meaning it is
459 The list is sorted by revision number, meaning it is
460 topologically sorted.
460 topologically sorted.
461
461
462 'heads' and 'common' are both lists of node IDs. If heads is
462 'heads' and 'common' are both lists of node IDs. If heads is
463 not supplied, uses all of the revlog's heads. If common is not
463 not supplied, uses all of the revlog's heads. If common is not
464 supplied, uses nullid."""
464 supplied, uses nullid."""
465 if common is None:
465 if common is None:
466 common = [nullid]
466 common = [nullid]
467 if heads is None:
467 if heads is None:
468 heads = self.heads()
468 heads = self.heads()
469
469
470 common = [self.rev(n) for n in common]
470 common = [self.rev(n) for n in common]
471 heads = [self.rev(n) for n in heads]
471 heads = [self.rev(n) for n in heads]
472
472
473 # we want the ancestors, but inclusive
473 # we want the ancestors, but inclusive
474 class lazyset(object):
474 class lazyset(object):
475 def __init__(self, lazyvalues):
475 def __init__(self, lazyvalues):
476 self.addedvalues = set()
476 self.addedvalues = set()
477 self.lazyvalues = lazyvalues
477 self.lazyvalues = lazyvalues
478
478
479 def __contains__(self, value):
479 def __contains__(self, value):
480 return value in self.addedvalues or value in self.lazyvalues
480 return value in self.addedvalues or value in self.lazyvalues
481
481
482 def __iter__(self):
482 def __iter__(self):
483 added = self.addedvalues
483 added = self.addedvalues
484 for r in added:
484 for r in added:
485 yield r
485 yield r
486 for r in self.lazyvalues:
486 for r in self.lazyvalues:
487 if not r in added:
487 if not r in added:
488 yield r
488 yield r
489
489
490 def add(self, value):
490 def add(self, value):
491 self.addedvalues.add(value)
491 self.addedvalues.add(value)
492
492
493 def update(self, values):
493 def update(self, values):
494 self.addedvalues.update(values)
494 self.addedvalues.update(values)
495
495
496 has = lazyset(self.ancestors(common))
496 has = lazyset(self.ancestors(common))
497 has.add(nullrev)
497 has.add(nullrev)
498 has.update(common)
498 has.update(common)
499
499
500 # take all ancestors from heads that aren't in has
500 # take all ancestors from heads that aren't in has
501 missing = set()
501 missing = set()
502 visit = collections.deque(r for r in heads if r not in has)
502 visit = collections.deque(r for r in heads if r not in has)
503 while visit:
503 while visit:
504 r = visit.popleft()
504 r = visit.popleft()
505 if r in missing:
505 if r in missing:
506 continue
506 continue
507 else:
507 else:
508 missing.add(r)
508 missing.add(r)
509 for p in self.parentrevs(r):
509 for p in self.parentrevs(r):
510 if p not in has:
510 if p not in has:
511 visit.append(p)
511 visit.append(p)
512 missing = list(missing)
512 missing = list(missing)
513 missing.sort()
513 missing.sort()
514 return has, [self.node(r) for r in missing]
514 return has, [self.node(r) for r in missing]
515
515
516 def incrementalmissingrevs(self, common=None):
516 def incrementalmissingrevs(self, common=None):
517 """Return an object that can be used to incrementally compute the
517 """Return an object that can be used to incrementally compute the
518 revision numbers of the ancestors of arbitrary sets that are not
518 revision numbers of the ancestors of arbitrary sets that are not
519 ancestors of common. This is an ancestor.incrementalmissingancestors
519 ancestors of common. This is an ancestor.incrementalmissingancestors
520 object.
520 object.
521
521
522 'common' is a list of revision numbers. If common is not supplied, uses
522 'common' is a list of revision numbers. If common is not supplied, uses
523 nullrev.
523 nullrev.
524 """
524 """
525 if common is None:
525 if common is None:
526 common = [nullrev]
526 common = [nullrev]
527
527
528 return ancestor.incrementalmissingancestors(self.parentrevs, common)
528 return ancestor.incrementalmissingancestors(self.parentrevs, common)
529
529
530 def findmissingrevs(self, common=None, heads=None):
530 def findmissingrevs(self, common=None, heads=None):
531 """Return the revision numbers of the ancestors of heads that
531 """Return the revision numbers of the ancestors of heads that
532 are not ancestors of common.
532 are not ancestors of common.
533
533
534 More specifically, return a list of revision numbers corresponding to
534 More specifically, return a list of revision numbers corresponding to
535 nodes N such that every N satisfies the following constraints:
535 nodes N such that every N satisfies the following constraints:
536
536
537 1. N is an ancestor of some node in 'heads'
537 1. N is an ancestor of some node in 'heads'
538 2. N is not an ancestor of any node in 'common'
538 2. N is not an ancestor of any node in 'common'
539
539
540 The list is sorted by revision number, meaning it is
540 The list is sorted by revision number, meaning it is
541 topologically sorted.
541 topologically sorted.
542
542
543 'heads' and 'common' are both lists of revision numbers. If heads is
543 'heads' and 'common' are both lists of revision numbers. If heads is
544 not supplied, uses all of the revlog's heads. If common is not
544 not supplied, uses all of the revlog's heads. If common is not
545 supplied, uses nullid."""
545 supplied, uses nullid."""
546 if common is None:
546 if common is None:
547 common = [nullrev]
547 common = [nullrev]
548 if heads is None:
548 if heads is None:
549 heads = self.headrevs()
549 heads = self.headrevs()
550
550
551 inc = self.incrementalmissingrevs(common=common)
551 inc = self.incrementalmissingrevs(common=common)
552 return inc.missingancestors(heads)
552 return inc.missingancestors(heads)
553
553
554 def findmissing(self, common=None, heads=None):
554 def findmissing(self, common=None, heads=None):
555 """Return the ancestors of heads that are not ancestors of common.
555 """Return the ancestors of heads that are not ancestors of common.
556
556
557 More specifically, return a list of nodes N such that every N
557 More specifically, return a list of nodes N such that every N
558 satisfies the following constraints:
558 satisfies the following constraints:
559
559
560 1. N is an ancestor of some node in 'heads'
560 1. N is an ancestor of some node in 'heads'
561 2. N is not an ancestor of any node in 'common'
561 2. N is not an ancestor of any node in 'common'
562
562
563 The list is sorted by revision number, meaning it is
563 The list is sorted by revision number, meaning it is
564 topologically sorted.
564 topologically sorted.
565
565
566 'heads' and 'common' are both lists of node IDs. If heads is
566 'heads' and 'common' are both lists of node IDs. If heads is
567 not supplied, uses all of the revlog's heads. If common is not
567 not supplied, uses all of the revlog's heads. If common is not
568 supplied, uses nullid."""
568 supplied, uses nullid."""
569 if common is None:
569 if common is None:
570 common = [nullid]
570 common = [nullid]
571 if heads is None:
571 if heads is None:
572 heads = self.heads()
572 heads = self.heads()
573
573
574 common = [self.rev(n) for n in common]
574 common = [self.rev(n) for n in common]
575 heads = [self.rev(n) for n in heads]
575 heads = [self.rev(n) for n in heads]
576
576
577 inc = self.incrementalmissingrevs(common=common)
577 inc = self.incrementalmissingrevs(common=common)
578 return [self.node(r) for r in inc.missingancestors(heads)]
578 return [self.node(r) for r in inc.missingancestors(heads)]
579
579
580 def nodesbetween(self, roots=None, heads=None):
580 def nodesbetween(self, roots=None, heads=None):
581 """Return a topological path from 'roots' to 'heads'.
581 """Return a topological path from 'roots' to 'heads'.
582
582
583 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
583 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
584 topologically sorted list of all nodes N that satisfy both of
584 topologically sorted list of all nodes N that satisfy both of
585 these constraints:
585 these constraints:
586
586
587 1. N is a descendant of some node in 'roots'
587 1. N is a descendant of some node in 'roots'
588 2. N is an ancestor of some node in 'heads'
588 2. N is an ancestor of some node in 'heads'
589
589
590 Every node is considered to be both a descendant and an ancestor
590 Every node is considered to be both a descendant and an ancestor
591 of itself, so every reachable node in 'roots' and 'heads' will be
591 of itself, so every reachable node in 'roots' and 'heads' will be
592 included in 'nodes'.
592 included in 'nodes'.
593
593
594 'outroots' is the list of reachable nodes in 'roots', i.e., the
594 'outroots' is the list of reachable nodes in 'roots', i.e., the
595 subset of 'roots' that is returned in 'nodes'. Likewise,
595 subset of 'roots' that is returned in 'nodes'. Likewise,
596 'outheads' is the subset of 'heads' that is also in 'nodes'.
596 'outheads' is the subset of 'heads' that is also in 'nodes'.
597
597
598 'roots' and 'heads' are both lists of node IDs. If 'roots' is
598 'roots' and 'heads' are both lists of node IDs. If 'roots' is
599 unspecified, uses nullid as the only root. If 'heads' is
599 unspecified, uses nullid as the only root. If 'heads' is
600 unspecified, uses list of all of the revlog's heads."""
600 unspecified, uses list of all of the revlog's heads."""
601 nonodes = ([], [], [])
601 nonodes = ([], [], [])
602 if roots is not None:
602 if roots is not None:
603 roots = list(roots)
603 roots = list(roots)
604 if not roots:
604 if not roots:
605 return nonodes
605 return nonodes
606 lowestrev = min([self.rev(n) for n in roots])
606 lowestrev = min([self.rev(n) for n in roots])
607 else:
607 else:
608 roots = [nullid] # Everybody's a descendant of nullid
608 roots = [nullid] # Everybody's a descendant of nullid
609 lowestrev = nullrev
609 lowestrev = nullrev
610 if (lowestrev == nullrev) and (heads is None):
610 if (lowestrev == nullrev) and (heads is None):
611 # We want _all_ the nodes!
611 # We want _all_ the nodes!
612 return ([self.node(r) for r in self], [nullid], list(self.heads()))
612 return ([self.node(r) for r in self], [nullid], list(self.heads()))
613 if heads is None:
613 if heads is None:
614 # All nodes are ancestors, so the latest ancestor is the last
614 # All nodes are ancestors, so the latest ancestor is the last
615 # node.
615 # node.
616 highestrev = len(self) - 1
616 highestrev = len(self) - 1
617 # Set ancestors to None to signal that every node is an ancestor.
617 # Set ancestors to None to signal that every node is an ancestor.
618 ancestors = None
618 ancestors = None
619 # Set heads to an empty dictionary for later discovery of heads
619 # Set heads to an empty dictionary for later discovery of heads
620 heads = {}
620 heads = {}
621 else:
621 else:
622 heads = list(heads)
622 heads = list(heads)
623 if not heads:
623 if not heads:
624 return nonodes
624 return nonodes
625 ancestors = set()
625 ancestors = set()
626 # Turn heads into a dictionary so we can remove 'fake' heads.
626 # Turn heads into a dictionary so we can remove 'fake' heads.
627 # Also, later we will be using it to filter out the heads we can't
627 # Also, later we will be using it to filter out the heads we can't
628 # find from roots.
628 # find from roots.
629 heads = dict.fromkeys(heads, False)
629 heads = dict.fromkeys(heads, False)
630 # Start at the top and keep marking parents until we're done.
630 # Start at the top and keep marking parents until we're done.
631 nodestotag = set(heads)
631 nodestotag = set(heads)
632 # Remember where the top was so we can use it as a limit later.
632 # Remember where the top was so we can use it as a limit later.
633 highestrev = max([self.rev(n) for n in nodestotag])
633 highestrev = max([self.rev(n) for n in nodestotag])
634 while nodestotag:
634 while nodestotag:
635 # grab a node to tag
635 # grab a node to tag
636 n = nodestotag.pop()
636 n = nodestotag.pop()
637 # Never tag nullid
637 # Never tag nullid
638 if n == nullid:
638 if n == nullid:
639 continue
639 continue
640 # A node's revision number represents its place in a
640 # A node's revision number represents its place in a
641 # topologically sorted list of nodes.
641 # topologically sorted list of nodes.
642 r = self.rev(n)
642 r = self.rev(n)
643 if r >= lowestrev:
643 if r >= lowestrev:
644 if n not in ancestors:
644 if n not in ancestors:
645 # If we are possibly a descendant of one of the roots
645 # If we are possibly a descendant of one of the roots
646 # and we haven't already been marked as an ancestor
646 # and we haven't already been marked as an ancestor
647 ancestors.add(n) # Mark as ancestor
647 ancestors.add(n) # Mark as ancestor
648 # Add non-nullid parents to list of nodes to tag.
648 # Add non-nullid parents to list of nodes to tag.
649 nodestotag.update([p for p in self.parents(n) if
649 nodestotag.update([p for p in self.parents(n) if
650 p != nullid])
650 p != nullid])
651 elif n in heads: # We've seen it before, is it a fake head?
651 elif n in heads: # We've seen it before, is it a fake head?
652 # So it is, real heads should not be the ancestors of
652 # So it is, real heads should not be the ancestors of
653 # any other heads.
653 # any other heads.
654 heads.pop(n)
654 heads.pop(n)
655 if not ancestors:
655 if not ancestors:
656 return nonodes
656 return nonodes
657 # Now that we have our set of ancestors, we want to remove any
657 # Now that we have our set of ancestors, we want to remove any
658 # roots that are not ancestors.
658 # roots that are not ancestors.
659
659
660 # If one of the roots was nullid, everything is included anyway.
660 # If one of the roots was nullid, everything is included anyway.
661 if lowestrev > nullrev:
661 if lowestrev > nullrev:
662 # But, since we weren't, let's recompute the lowest rev to not
662 # But, since we weren't, let's recompute the lowest rev to not
663 # include roots that aren't ancestors.
663 # include roots that aren't ancestors.
664
664
665 # Filter out roots that aren't ancestors of heads
665 # Filter out roots that aren't ancestors of heads
666 roots = [n for n in roots if n in ancestors]
666 roots = [n for n in roots if n in ancestors]
667 # Recompute the lowest revision
667 # Recompute the lowest revision
668 if roots:
668 if roots:
669 lowestrev = min([self.rev(n) for n in roots])
669 lowestrev = min([self.rev(n) for n in roots])
670 else:
670 else:
671 # No more roots? Return empty list
671 # No more roots? Return empty list
672 return nonodes
672 return nonodes
673 else:
673 else:
674 # We are descending from nullid, and don't need to care about
674 # We are descending from nullid, and don't need to care about
675 # any other roots.
675 # any other roots.
676 lowestrev = nullrev
676 lowestrev = nullrev
677 roots = [nullid]
677 roots = [nullid]
678 # Transform our roots list into a set.
678 # Transform our roots list into a set.
679 descendants = set(roots)
679 descendants = set(roots)
680 # Also, keep the original roots so we can filter out roots that aren't
680 # Also, keep the original roots so we can filter out roots that aren't
681 # 'real' roots (i.e. are descended from other roots).
681 # 'real' roots (i.e. are descended from other roots).
682 roots = descendants.copy()
682 roots = descendants.copy()
683 # Our topologically sorted list of output nodes.
683 # Our topologically sorted list of output nodes.
684 orderedout = []
684 orderedout = []
685 # Don't start at nullid since we don't want nullid in our output list,
685 # Don't start at nullid since we don't want nullid in our output list,
686 # and if nullid shows up in descendants, empty parents will look like
686 # and if nullid shows up in descendants, empty parents will look like
687 # they're descendants.
687 # they're descendants.
688 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
688 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
689 n = self.node(r)
689 n = self.node(r)
690 isdescendant = False
690 isdescendant = False
691 if lowestrev == nullrev: # Everybody is a descendant of nullid
691 if lowestrev == nullrev: # Everybody is a descendant of nullid
692 isdescendant = True
692 isdescendant = True
693 elif n in descendants:
693 elif n in descendants:
694 # n is already a descendant
694 # n is already a descendant
695 isdescendant = True
695 isdescendant = True
696 # This check only needs to be done here because all the roots
696 # This check only needs to be done here because all the roots
697 # will start being marked is descendants before the loop.
697 # will start being marked is descendants before the loop.
698 if n in roots:
698 if n in roots:
699 # If n was a root, check if it's a 'real' root.
699 # If n was a root, check if it's a 'real' root.
700 p = tuple(self.parents(n))
700 p = tuple(self.parents(n))
701 # If any of its parents are descendants, it's not a root.
701 # If any of its parents are descendants, it's not a root.
702 if (p[0] in descendants) or (p[1] in descendants):
702 if (p[0] in descendants) or (p[1] in descendants):
703 roots.remove(n)
703 roots.remove(n)
704 else:
704 else:
705 p = tuple(self.parents(n))
705 p = tuple(self.parents(n))
706 # A node is a descendant if either of its parents are
706 # A node is a descendant if either of its parents are
707 # descendants. (We seeded the dependents list with the roots
707 # descendants. (We seeded the dependents list with the roots
708 # up there, remember?)
708 # up there, remember?)
709 if (p[0] in descendants) or (p[1] in descendants):
709 if (p[0] in descendants) or (p[1] in descendants):
710 descendants.add(n)
710 descendants.add(n)
711 isdescendant = True
711 isdescendant = True
712 if isdescendant and ((ancestors is None) or (n in ancestors)):
712 if isdescendant and ((ancestors is None) or (n in ancestors)):
713 # Only include nodes that are both descendants and ancestors.
713 # Only include nodes that are both descendants and ancestors.
714 orderedout.append(n)
714 orderedout.append(n)
715 if (ancestors is not None) and (n in heads):
715 if (ancestors is not None) and (n in heads):
716 # We're trying to figure out which heads are reachable
716 # We're trying to figure out which heads are reachable
717 # from roots.
717 # from roots.
718 # Mark this head as having been reached
718 # Mark this head as having been reached
719 heads[n] = True
719 heads[n] = True
720 elif ancestors is None:
720 elif ancestors is None:
721 # Otherwise, we're trying to discover the heads.
721 # Otherwise, we're trying to discover the heads.
722 # Assume this is a head because if it isn't, the next step
722 # Assume this is a head because if it isn't, the next step
723 # will eventually remove it.
723 # will eventually remove it.
724 heads[n] = True
724 heads[n] = True
725 # But, obviously its parents aren't.
725 # But, obviously its parents aren't.
726 for p in self.parents(n):
726 for p in self.parents(n):
727 heads.pop(p, None)
727 heads.pop(p, None)
728 heads = [n for n, flag in heads.iteritems() if flag]
728 heads = [n for n, flag in heads.iteritems() if flag]
729 roots = list(roots)
729 roots = list(roots)
730 assert orderedout
730 assert orderedout
731 assert roots
731 assert roots
732 assert heads
732 assert heads
733 return (orderedout, roots, heads)
733 return (orderedout, roots, heads)
734
734
735 def headrevs(self):
735 def headrevs(self):
736 try:
736 try:
737 return self.index.headrevs()
737 return self.index.headrevs()
738 except AttributeError:
738 except AttributeError:
739 return self._headrevs()
739 return self._headrevs()
740
740
741 def computephases(self, roots):
741 def computephases(self, roots):
742 return self.index.computephasesmapsets(roots)
742 return self.index.computephasesmapsets(roots)
743
743
744 def _headrevs(self):
744 def _headrevs(self):
745 count = len(self)
745 count = len(self)
746 if not count:
746 if not count:
747 return [nullrev]
747 return [nullrev]
748 # we won't iter over filtered rev so nobody is a head at start
748 # we won't iter over filtered rev so nobody is a head at start
749 ishead = [0] * (count + 1)
749 ishead = [0] * (count + 1)
750 index = self.index
750 index = self.index
751 for r in self:
751 for r in self:
752 ishead[r] = 1 # I may be an head
752 ishead[r] = 1 # I may be an head
753 e = index[r]
753 e = index[r]
754 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
754 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
755 return [r for r, val in enumerate(ishead) if val]
755 return [r for r, val in enumerate(ishead) if val]
756
756
757 def heads(self, start=None, stop=None):
757 def heads(self, start=None, stop=None):
758 """return the list of all nodes that have no children
758 """return the list of all nodes that have no children
759
759
760 if start is specified, only heads that are descendants of
760 if start is specified, only heads that are descendants of
761 start will be returned
761 start will be returned
762 if stop is specified, it will consider all the revs from stop
762 if stop is specified, it will consider all the revs from stop
763 as if they had no children
763 as if they had no children
764 """
764 """
765 if start is None and stop is None:
765 if start is None and stop is None:
766 if not len(self):
766 if not len(self):
767 return [nullid]
767 return [nullid]
768 return [self.node(r) for r in self.headrevs()]
768 return [self.node(r) for r in self.headrevs()]
769
769
770 if start is None:
770 if start is None:
771 start = nullid
771 start = nullid
772 if stop is None:
772 if stop is None:
773 stop = []
773 stop = []
774 stoprevs = set([self.rev(n) for n in stop])
774 stoprevs = set([self.rev(n) for n in stop])
775 startrev = self.rev(start)
775 startrev = self.rev(start)
776 reachable = set((startrev,))
776 reachable = set((startrev,))
777 heads = set((startrev,))
777 heads = set((startrev,))
778
778
779 parentrevs = self.parentrevs
779 parentrevs = self.parentrevs
780 for r in self.revs(start=startrev + 1):
780 for r in self.revs(start=startrev + 1):
781 for p in parentrevs(r):
781 for p in parentrevs(r):
782 if p in reachable:
782 if p in reachable:
783 if r not in stoprevs:
783 if r not in stoprevs:
784 reachable.add(r)
784 reachable.add(r)
785 heads.add(r)
785 heads.add(r)
786 if p in heads and p not in stoprevs:
786 if p in heads and p not in stoprevs:
787 heads.remove(p)
787 heads.remove(p)
788
788
789 return [self.node(r) for r in heads]
789 return [self.node(r) for r in heads]
790
790
791 def children(self, node):
791 def children(self, node):
792 """find the children of a given node"""
792 """find the children of a given node"""
793 c = []
793 c = []
794 p = self.rev(node)
794 p = self.rev(node)
795 for r in self.revs(start=p + 1):
795 for r in self.revs(start=p + 1):
796 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
796 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
797 if prevs:
797 if prevs:
798 for pr in prevs:
798 for pr in prevs:
799 if pr == p:
799 if pr == p:
800 c.append(self.node(r))
800 c.append(self.node(r))
801 elif p == nullrev:
801 elif p == nullrev:
802 c.append(self.node(r))
802 c.append(self.node(r))
803 return c
803 return c
804
804
805 def descendant(self, start, end):
805 def descendant(self, start, end):
806 if start == nullrev:
806 if start == nullrev:
807 return True
807 return True
808 for i in self.descendants([start]):
808 for i in self.descendants([start]):
809 if i == end:
809 if i == end:
810 return True
810 return True
811 elif i > end:
811 elif i > end:
812 break
812 break
813 return False
813 return False
814
814
815 def commonancestorsheads(self, a, b):
815 def commonancestorsheads(self, a, b):
816 """calculate all the heads of the common ancestors of nodes a and b"""
816 """calculate all the heads of the common ancestors of nodes a and b"""
817 a, b = self.rev(a), self.rev(b)
817 a, b = self.rev(a), self.rev(b)
818 try:
818 try:
819 ancs = self.index.commonancestorsheads(a, b)
819 ancs = self.index.commonancestorsheads(a, b)
820 except (AttributeError, OverflowError): # C implementation failed
820 except (AttributeError, OverflowError): # C implementation failed
821 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
821 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
822 return map(self.node, ancs)
822 return map(self.node, ancs)
823
823
824 def isancestor(self, a, b):
824 def isancestor(self, a, b):
825 """return True if node a is an ancestor of node b
825 """return True if node a is an ancestor of node b
826
826
827 The implementation of this is trivial but the use of
827 The implementation of this is trivial but the use of
828 commonancestorsheads is not."""
828 commonancestorsheads is not."""
829 return a in self.commonancestorsheads(a, b)
829 return a in self.commonancestorsheads(a, b)
830
830
831 def ancestor(self, a, b):
831 def ancestor(self, a, b):
832 """calculate the "best" common ancestor of nodes a and b"""
832 """calculate the "best" common ancestor of nodes a and b"""
833
833
834 a, b = self.rev(a), self.rev(b)
834 a, b = self.rev(a), self.rev(b)
835 try:
835 try:
836 ancs = self.index.ancestors(a, b)
836 ancs = self.index.ancestors(a, b)
837 except (AttributeError, OverflowError):
837 except (AttributeError, OverflowError):
838 ancs = ancestor.ancestors(self.parentrevs, a, b)
838 ancs = ancestor.ancestors(self.parentrevs, a, b)
839 if ancs:
839 if ancs:
840 # choose a consistent winner when there's a tie
840 # choose a consistent winner when there's a tie
841 return min(map(self.node, ancs))
841 return min(map(self.node, ancs))
842 return nullid
842 return nullid
843
843
844 def _match(self, id):
844 def _match(self, id):
845 if isinstance(id, int):
845 if isinstance(id, int):
846 # rev
846 # rev
847 return self.node(id)
847 return self.node(id)
848 if len(id) == 20:
848 if len(id) == 20:
849 # possibly a binary node
849 # possibly a binary node
850 # odds of a binary node being all hex in ASCII are 1 in 10**25
850 # odds of a binary node being all hex in ASCII are 1 in 10**25
851 try:
851 try:
852 node = id
852 node = id
853 self.rev(node) # quick search the index
853 self.rev(node) # quick search the index
854 return node
854 return node
855 except LookupError:
855 except LookupError:
856 pass # may be partial hex id
856 pass # may be partial hex id
857 try:
857 try:
858 # str(rev)
858 # str(rev)
859 rev = int(id)
859 rev = int(id)
860 if str(rev) != id:
860 if str(rev) != id:
861 raise ValueError
861 raise ValueError
862 if rev < 0:
862 if rev < 0:
863 rev = len(self) + rev
863 rev = len(self) + rev
864 if rev < 0 or rev >= len(self):
864 if rev < 0 or rev >= len(self):
865 raise ValueError
865 raise ValueError
866 return self.node(rev)
866 return self.node(rev)
867 except (ValueError, OverflowError):
867 except (ValueError, OverflowError):
868 pass
868 pass
869 if len(id) == 40:
869 if len(id) == 40:
870 try:
870 try:
871 # a full hex nodeid?
871 # a full hex nodeid?
872 node = bin(id)
872 node = bin(id)
873 self.rev(node)
873 self.rev(node)
874 return node
874 return node
875 except (TypeError, LookupError):
875 except (TypeError, LookupError):
876 pass
876 pass
877
877
878 def _partialmatch(self, id):
878 def _partialmatch(self, id):
879 try:
879 try:
880 n = self.index.partialmatch(id)
880 n = self.index.partialmatch(id)
881 if n and self.hasnode(n):
881 if n and self.hasnode(n):
882 return n
882 return n
883 return None
883 return None
884 except RevlogError:
884 except RevlogError:
885 # parsers.c radix tree lookup gave multiple matches
885 # parsers.c radix tree lookup gave multiple matches
886 # fall through to slow path that filters hidden revisions
886 # fall through to slow path that filters hidden revisions
887 pass
887 pass
888 except (AttributeError, ValueError):
888 except (AttributeError, ValueError):
889 # we are pure python, or key was too short to search radix tree
889 # we are pure python, or key was too short to search radix tree
890 pass
890 pass
891
891
892 if id in self._pcache:
892 if id in self._pcache:
893 return self._pcache[id]
893 return self._pcache[id]
894
894
895 if len(id) < 40:
895 if len(id) < 40:
896 try:
896 try:
897 # hex(node)[:...]
897 # hex(node)[:...]
898 l = len(id) // 2 # grab an even number of digits
898 l = len(id) // 2 # grab an even number of digits
899 prefix = bin(id[:l * 2])
899 prefix = bin(id[:l * 2])
900 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
900 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
901 nl = [n for n in nl if hex(n).startswith(id) and
901 nl = [n for n in nl if hex(n).startswith(id) and
902 self.hasnode(n)]
902 self.hasnode(n)]
903 if len(nl) > 0:
903 if len(nl) > 0:
904 if len(nl) == 1:
904 if len(nl) == 1:
905 self._pcache[id] = nl[0]
905 self._pcache[id] = nl[0]
906 return nl[0]
906 return nl[0]
907 raise LookupError(id, self.indexfile,
907 raise LookupError(id, self.indexfile,
908 _('ambiguous identifier'))
908 _('ambiguous identifier'))
909 return None
909 return None
910 except TypeError:
910 except TypeError:
911 pass
911 pass
912
912
913 def lookup(self, id):
913 def lookup(self, id):
914 """locate a node based on:
914 """locate a node based on:
915 - revision number or str(revision number)
915 - revision number or str(revision number)
916 - nodeid or subset of hex nodeid
916 - nodeid or subset of hex nodeid
917 """
917 """
918 n = self._match(id)
918 n = self._match(id)
919 if n is not None:
919 if n is not None:
920 return n
920 return n
921 n = self._partialmatch(id)
921 n = self._partialmatch(id)
922 if n:
922 if n:
923 return n
923 return n
924
924
925 raise LookupError(id, self.indexfile, _('no match found'))
925 raise LookupError(id, self.indexfile, _('no match found'))
926
926
927 def cmp(self, node, text):
927 def cmp(self, node, text):
928 """compare text with a given file revision
928 """compare text with a given file revision
929
929
930 returns True if text is different than what is stored.
930 returns True if text is different than what is stored.
931 """
931 """
932 p1, p2 = self.parents(node)
932 p1, p2 = self.parents(node)
933 return hash(text, p1, p2) != node
933 return hash(text, p1, p2) != node
934
934
935 def _addchunk(self, offset, data):
935 def _addchunk(self, offset, data):
936 """Add a segment to the revlog cache.
936 """Add a segment to the revlog cache.
937
937
938 Accepts an absolute offset and the data that is at that location.
938 Accepts an absolute offset and the data that is at that location.
939 """
939 """
940 o, d = self._chunkcache
940 o, d = self._chunkcache
941 # try to add to existing cache
941 # try to add to existing cache
942 if o + len(d) == offset and len(d) + len(data) < _chunksize:
942 if o + len(d) == offset and len(d) + len(data) < _chunksize:
943 self._chunkcache = o, d + data
943 self._chunkcache = o, d + data
944 else:
944 else:
945 self._chunkcache = offset, data
945 self._chunkcache = offset, data
946
946
947 def _loadchunk(self, offset, length, df=None):
947 def _loadchunk(self, offset, length, df=None):
948 """Load a segment of raw data from the revlog.
948 """Load a segment of raw data from the revlog.
949
949
950 Accepts an absolute offset, length to read, and an optional existing
950 Accepts an absolute offset, length to read, and an optional existing
951 file handle to read from.
951 file handle to read from.
952
952
953 If an existing file handle is passed, it will be seeked and the
953 If an existing file handle is passed, it will be seeked and the
954 original seek position will NOT be restored.
954 original seek position will NOT be restored.
955
955
956 Returns a str or buffer of raw byte data.
956 Returns a str or buffer of raw byte data.
957 """
957 """
958 if df is not None:
958 if df is not None:
959 closehandle = False
959 closehandle = False
960 else:
960 else:
961 if self._inline:
961 if self._inline:
962 df = self.opener(self.indexfile)
962 df = self.opener(self.indexfile)
963 else:
963 else:
964 df = self.opener(self.datafile)
964 df = self.opener(self.datafile)
965 closehandle = True
965 closehandle = True
966
966
967 # Cache data both forward and backward around the requested
967 # Cache data both forward and backward around the requested
968 # data, in a fixed size window. This helps speed up operations
968 # data, in a fixed size window. This helps speed up operations
969 # involving reading the revlog backwards.
969 # involving reading the revlog backwards.
970 cachesize = self._chunkcachesize
970 cachesize = self._chunkcachesize
971 realoffset = offset & ~(cachesize - 1)
971 realoffset = offset & ~(cachesize - 1)
972 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
972 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
973 - realoffset)
973 - realoffset)
974 df.seek(realoffset)
974 df.seek(realoffset)
975 d = df.read(reallength)
975 d = df.read(reallength)
976 if closehandle:
976 if closehandle:
977 df.close()
977 df.close()
978 self._addchunk(realoffset, d)
978 self._addchunk(realoffset, d)
979 if offset != realoffset or reallength != length:
979 if offset != realoffset or reallength != length:
980 return util.buffer(d, offset - realoffset, length)
980 return util.buffer(d, offset - realoffset, length)
981 return d
981 return d
982
982
983 def _getchunk(self, offset, length, df=None):
983 def _getchunk(self, offset, length, df=None):
984 """Obtain a segment of raw data from the revlog.
984 """Obtain a segment of raw data from the revlog.
985
985
986 Accepts an absolute offset, length of bytes to obtain, and an
986 Accepts an absolute offset, length of bytes to obtain, and an
987 optional file handle to the already-opened revlog. If the file
987 optional file handle to the already-opened revlog. If the file
988 handle is used, it's original seek position will not be preserved.
988 handle is used, it's original seek position will not be preserved.
989
989
990 Requests for data may be returned from a cache.
990 Requests for data may be returned from a cache.
991
991
992 Returns a str or a buffer instance of raw byte data.
992 Returns a str or a buffer instance of raw byte data.
993 """
993 """
994 o, d = self._chunkcache
994 o, d = self._chunkcache
995 l = len(d)
995 l = len(d)
996
996
997 # is it in the cache?
997 # is it in the cache?
998 cachestart = offset - o
998 cachestart = offset - o
999 cacheend = cachestart + length
999 cacheend = cachestart + length
1000 if cachestart >= 0 and cacheend <= l:
1000 if cachestart >= 0 and cacheend <= l:
1001 if cachestart == 0 and cacheend == l:
1001 if cachestart == 0 and cacheend == l:
1002 return d # avoid a copy
1002 return d # avoid a copy
1003 return util.buffer(d, cachestart, cacheend - cachestart)
1003 return util.buffer(d, cachestart, cacheend - cachestart)
1004
1004
1005 return self._loadchunk(offset, length, df=df)
1005 return self._loadchunk(offset, length, df=df)
1006
1006
1007 def _chunkraw(self, startrev, endrev, df=None):
1007 def _chunkraw(self, startrev, endrev, df=None):
1008 """Obtain a segment of raw data corresponding to a range of revisions.
1008 """Obtain a segment of raw data corresponding to a range of revisions.
1009
1009
1010 Accepts the start and end revisions and an optional already-open
1010 Accepts the start and end revisions and an optional already-open
1011 file handle to be used for reading. If the file handle is read, its
1011 file handle to be used for reading. If the file handle is read, its
1012 seek position will not be preserved.
1012 seek position will not be preserved.
1013
1013
1014 Requests for data may be satisfied by a cache.
1014 Requests for data may be satisfied by a cache.
1015
1015
1016 Returns a str or a buffer instance of raw byte data. Callers will
1016 Returns a str or a buffer instance of raw byte data. Callers will
1017 need to call ``self.start(rev)`` and ``self.length()`` to determine
1017 need to call ``self.start(rev)`` and ``self.length()`` to determine
1018 where each revision's data begins and ends.
1018 where each revision's data begins and ends.
1019 """
1019 """
1020 start = self.start(startrev)
1020 start = self.start(startrev)
1021 end = self.end(endrev)
1021 end = self.end(endrev)
1022 if self._inline:
1022 if self._inline:
1023 start += (startrev + 1) * self._io.size
1023 start += (startrev + 1) * self._io.size
1024 end += (endrev + 1) * self._io.size
1024 end += (endrev + 1) * self._io.size
1025 length = end - start
1025 length = end - start
1026 return self._getchunk(start, length, df=df)
1026 return self._getchunk(start, length, df=df)
1027
1027
1028 def _chunk(self, rev, df=None):
1028 def _chunk(self, rev, df=None):
1029 """Obtain a single decompressed chunk for a revision.
1029 """Obtain a single decompressed chunk for a revision.
1030
1030
1031 Accepts an integer revision and an optional already-open file handle
1031 Accepts an integer revision and an optional already-open file handle
1032 to be used for reading. If used, the seek position of the file will not
1032 to be used for reading. If used, the seek position of the file will not
1033 be preserved.
1033 be preserved.
1034
1034
1035 Returns a str holding uncompressed data for the requested revision.
1035 Returns a str holding uncompressed data for the requested revision.
1036 """
1036 """
1037 return decompress(self._chunkraw(rev, rev, df=df))
1037 return decompress(self._chunkraw(rev, rev, df=df))
1038
1038
1039 def _chunks(self, revs, df=None):
1039 def _chunks(self, revs, df=None):
1040 """Obtain decompressed chunks for the specified revisions.
1040 """Obtain decompressed chunks for the specified revisions.
1041
1041
1042 Accepts an iterable of numeric revisions that are assumed to be in
1042 Accepts an iterable of numeric revisions that are assumed to be in
1043 ascending order. Also accepts an optional already-open file handle
1043 ascending order. Also accepts an optional already-open file handle
1044 to be used for reading. If used, the seek position of the file will
1044 to be used for reading. If used, the seek position of the file will
1045 not be preserved.
1045 not be preserved.
1046
1046
1047 This function is similar to calling ``self._chunk()`` multiple times,
1047 This function is similar to calling ``self._chunk()`` multiple times,
1048 but is faster.
1048 but is faster.
1049
1049
1050 Returns a list with decompressed data for each requested revision.
1050 Returns a list with decompressed data for each requested revision.
1051 """
1051 """
1052 if not revs:
1052 if not revs:
1053 return []
1053 return []
1054 start = self.start
1054 start = self.start
1055 length = self.length
1055 length = self.length
1056 inline = self._inline
1056 inline = self._inline
1057 iosize = self._io.size
1057 iosize = self._io.size
1058 buffer = util.buffer
1058 buffer = util.buffer
1059
1059
1060 l = []
1060 l = []
1061 ladd = l.append
1061 ladd = l.append
1062
1062
1063 # preload the cache
1063 # preload the cache
1064 try:
1064 try:
1065 while True:
1065 while True:
1066 # ensure that the cache doesn't change out from under us
1066 # ensure that the cache doesn't change out from under us
1067 _cache = self._chunkcache
1067 _cache = self._chunkcache
1068 self._chunkraw(revs[0], revs[-1], df=df)
1068 self._chunkraw(revs[0], revs[-1], df=df)
1069 if _cache == self._chunkcache:
1069 if _cache == self._chunkcache:
1070 break
1070 break
1071 offset, data = _cache
1071 offset, data = _cache
1072 except OverflowError:
1072 except OverflowError:
1073 # issue4215 - we can't cache a run of chunks greater than
1073 # issue4215 - we can't cache a run of chunks greater than
1074 # 2G on Windows
1074 # 2G on Windows
1075 return [self._chunk(rev, df=df) for rev in revs]
1075 return [self._chunk(rev, df=df) for rev in revs]
1076
1076
1077 for rev in revs:
1077 for rev in revs:
1078 chunkstart = start(rev)
1078 chunkstart = start(rev)
1079 if inline:
1079 if inline:
1080 chunkstart += (rev + 1) * iosize
1080 chunkstart += (rev + 1) * iosize
1081 chunklength = length(rev)
1081 chunklength = length(rev)
1082 ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
1082 ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
1083
1083
1084 return l
1084 return l
1085
1085
1086 def _chunkclear(self):
1086 def _chunkclear(self):
1087 """Clear the raw chunk cache."""
1087 """Clear the raw chunk cache."""
1088 self._chunkcache = (0, '')
1088 self._chunkcache = (0, '')
1089
1089
1090 def deltaparent(self, rev):
1090 def deltaparent(self, rev):
1091 """return deltaparent of the given revision"""
1091 """return deltaparent of the given revision"""
1092 base = self.index[rev][3]
1092 base = self.index[rev][3]
1093 if base == rev:
1093 if base == rev:
1094 return nullrev
1094 return nullrev
1095 elif self._generaldelta:
1095 elif self._generaldelta:
1096 return base
1096 return base
1097 else:
1097 else:
1098 return rev - 1
1098 return rev - 1
1099
1099
1100 def revdiff(self, rev1, rev2):
1100 def revdiff(self, rev1, rev2):
1101 """return or calculate a delta between two revisions"""
1101 """return or calculate a delta between two revisions"""
1102 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1102 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1103 return str(self._chunk(rev2))
1103 return str(self._chunk(rev2))
1104
1104
1105 return mdiff.textdiff(self.revision(rev1),
1105 return mdiff.textdiff(self.revision(rev1),
1106 self.revision(rev2))
1106 self.revision(rev2))
1107
1107
1108 def revision(self, nodeorrev, _df=None):
1108 def revision(self, nodeorrev, _df=None):
1109 """return an uncompressed revision of a given node or revision
1109 """return an uncompressed revision of a given node or revision
1110 number.
1110 number.
1111
1111
1112 _df is an existing file handle to read from. It is meant to only be
1112 _df is an existing file handle to read from. It is meant to only be
1113 used internally.
1113 used internally.
1114 """
1114 """
1115 if isinstance(nodeorrev, int):
1115 if isinstance(nodeorrev, int):
1116 rev = nodeorrev
1116 rev = nodeorrev
1117 node = self.node(rev)
1117 node = self.node(rev)
1118 else:
1118 else:
1119 node = nodeorrev
1119 node = nodeorrev
1120 rev = None
1120 rev = None
1121
1121
1122 cachedrev = None
1122 cachedrev = None
1123 if node == nullid:
1123 if node == nullid:
1124 return ""
1124 return ""
1125 if self._cache:
1125 if self._cache:
1126 if self._cache[0] == node:
1126 if self._cache[0] == node:
1127 return self._cache[2]
1127 return self._cache[2]
1128 cachedrev = self._cache[1]
1128 cachedrev = self._cache[1]
1129
1129
1130 # look up what we need to read
1130 # look up what we need to read
1131 text = None
1131 text = None
1132 if rev is None:
1132 if rev is None:
1133 rev = self.rev(node)
1133 rev = self.rev(node)
1134
1134
1135 # check rev flags
1135 # check rev flags
1136 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1136 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1137 raise RevlogError(_('incompatible revision flag %x') %
1137 raise RevlogError(_('incompatible revision flag %x') %
1138 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1138 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1139
1139
1140 # build delta chain
1140 # build delta chain
1141 chain = []
1141 chain = []
1142 index = self.index # for performance
1142 index = self.index # for performance
1143 generaldelta = self._generaldelta
1143 generaldelta = self._generaldelta
1144 iterrev = rev
1144 iterrev = rev
1145 e = index[iterrev]
1145 e = index[iterrev]
1146 while iterrev != e[3] and iterrev != cachedrev:
1146 while iterrev != e[3] and iterrev != cachedrev:
1147 chain.append(iterrev)
1147 chain.append(iterrev)
1148 if generaldelta:
1148 if generaldelta:
1149 iterrev = e[3]
1149 iterrev = e[3]
1150 else:
1150 else:
1151 iterrev -= 1
1151 iterrev -= 1
1152 e = index[iterrev]
1152 e = index[iterrev]
1153
1153
1154 if iterrev == cachedrev:
1154 if iterrev == cachedrev:
1155 # cache hit
1155 # cache hit
1156 text = self._cache[2]
1156 text = self._cache[2]
1157 else:
1157 else:
1158 chain.append(iterrev)
1158 chain.append(iterrev)
1159 chain.reverse()
1159 chain.reverse()
1160
1160
1161 # drop cache to save memory
1161 # drop cache to save memory
1162 self._cache = None
1162 self._cache = None
1163
1163
1164 bins = self._chunks(chain, df=_df)
1164 bins = self._chunks(chain, df=_df)
1165 if text is None:
1165 if text is None:
1166 text = str(bins[0])
1166 text = str(bins[0])
1167 bins = bins[1:]
1167 bins = bins[1:]
1168
1168
1169 text = mdiff.patches(text, bins)
1169 text = mdiff.patches(text, bins)
1170
1170
1171 text = self._checkhash(text, node, rev)
1171 text = self._checkhash(text, node, rev)
1172
1172
1173 self._cache = (node, rev, text)
1173 self._cache = (node, rev, text)
1174 return text
1174 return text
1175
1175
1176 def hash(self, text, p1, p2):
1176 def hash(self, text, p1, p2):
1177 """Compute a node hash.
1177 """Compute a node hash.
1178
1178
1179 Available as a function so that subclasses can replace the hash
1179 Available as a function so that subclasses can replace the hash
1180 as needed.
1180 as needed.
1181 """
1181 """
1182 return hash(text, p1, p2)
1182 return hash(text, p1, p2)
1183
1183
1184 def _checkhash(self, text, node, rev):
1184 def _checkhash(self, text, node, rev):
1185 p1, p2 = self.parents(node)
1185 p1, p2 = self.parents(node)
1186 self.checkhash(text, p1, p2, node, rev)
1186 self.checkhash(text, p1, p2, node, rev)
1187 return text
1187 return text
1188
1188
1189 def checkhash(self, text, p1, p2, node, rev=None):
1189 def checkhash(self, text, p1, p2, node, rev=None):
1190 if node != self.hash(text, p1, p2):
1190 if node != self.hash(text, p1, p2):
1191 revornode = rev
1191 revornode = rev
1192 if revornode is None:
1192 if revornode is None:
1193 revornode = templatefilters.short(hex(node))
1193 revornode = templatefilters.short(hex(node))
1194 raise RevlogError(_("integrity check failed on %s:%s")
1194 raise RevlogError(_("integrity check failed on %s:%s")
1195 % (self.indexfile, revornode))
1195 % (self.indexfile, revornode))
1196
1196
1197 def checkinlinesize(self, tr, fp=None):
1197 def checkinlinesize(self, tr, fp=None):
1198 """Check if the revlog is too big for inline and convert if so.
1198 """Check if the revlog is too big for inline and convert if so.
1199
1199
1200 This should be called after revisions are added to the revlog. If the
1200 This should be called after revisions are added to the revlog. If the
1201 revlog has grown too large to be an inline revlog, it will convert it
1201 revlog has grown too large to be an inline revlog, it will convert it
1202 to use multiple index and data files.
1202 to use multiple index and data files.
1203 """
1203 """
1204 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1204 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1205 return
1205 return
1206
1206
1207 trinfo = tr.find(self.indexfile)
1207 trinfo = tr.find(self.indexfile)
1208 if trinfo is None:
1208 if trinfo is None:
1209 raise RevlogError(_("%s not found in the transaction")
1209 raise RevlogError(_("%s not found in the transaction")
1210 % self.indexfile)
1210 % self.indexfile)
1211
1211
1212 trindex = trinfo[2]
1212 trindex = trinfo[2]
1213 if trindex is not None:
1213 if trindex is not None:
1214 dataoff = self.start(trindex)
1214 dataoff = self.start(trindex)
1215 else:
1215 else:
1216 # revlog was stripped at start of transaction, use all leftover data
1216 # revlog was stripped at start of transaction, use all leftover data
1217 trindex = len(self) - 1
1217 trindex = len(self) - 1
1218 dataoff = self.end(-2)
1218 dataoff = self.end(-2)
1219
1219
1220 tr.add(self.datafile, dataoff)
1220 tr.add(self.datafile, dataoff)
1221
1221
1222 if fp:
1222 if fp:
1223 fp.flush()
1223 fp.flush()
1224 fp.close()
1224 fp.close()
1225
1225
1226 df = self.opener(self.datafile, 'w')
1226 df = self.opener(self.datafile, 'w')
1227 try:
1227 try:
1228 for r in self:
1228 for r in self:
1229 df.write(self._chunkraw(r, r))
1229 df.write(self._chunkraw(r, r))
1230 finally:
1230 finally:
1231 df.close()
1231 df.close()
1232
1232
1233 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1233 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1234 self.version &= ~(REVLOGNGINLINEDATA)
1234 self.version &= ~(REVLOGNGINLINEDATA)
1235 self._inline = False
1235 self._inline = False
1236 for i in self:
1236 for i in self:
1237 e = self._io.packentry(self.index[i], self.node, self.version, i)
1237 e = self._io.packentry(self.index[i], self.node, self.version, i)
1238 fp.write(e)
1238 fp.write(e)
1239
1239
1240 # if we don't call close, the temp file will never replace the
1240 # if we don't call close, the temp file will never replace the
1241 # real index
1241 # real index
1242 fp.close()
1242 fp.close()
1243
1243
1244 tr.replace(self.indexfile, trindex * self._io.size)
1244 tr.replace(self.indexfile, trindex * self._io.size)
1245 self._chunkclear()
1245 self._chunkclear()
1246
1246
1247 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1247 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1248 node=None):
1248 node=None):
1249 """add a revision to the log
1249 """add a revision to the log
1250
1250
1251 text - the revision data to add
1251 text - the revision data to add
1252 transaction - the transaction object used for rollback
1252 transaction - the transaction object used for rollback
1253 link - the linkrev data to add
1253 link - the linkrev data to add
1254 p1, p2 - the parent nodeids of the revision
1254 p1, p2 - the parent nodeids of the revision
1255 cachedelta - an optional precomputed delta
1255 cachedelta - an optional precomputed delta
1256 node - nodeid of revision; typically node is not specified, and it is
1256 node - nodeid of revision; typically node is not specified, and it is
1257 computed by default as hash(text, p1, p2), however subclasses might
1257 computed by default as hash(text, p1, p2), however subclasses might
1258 use different hashing method (and override checkhash() in such case)
1258 use different hashing method (and override checkhash() in such case)
1259 """
1259 """
1260 if link == nullrev:
1260 if link == nullrev:
1261 raise RevlogError(_("attempted to add linkrev -1 to %s")
1261 raise RevlogError(_("attempted to add linkrev -1 to %s")
1262 % self.indexfile)
1262 % self.indexfile)
1263
1263
1264 if len(text) > _maxentrysize:
1264 if len(text) > _maxentrysize:
1265 raise RevlogError(
1265 raise RevlogError(
1266 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1266 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1267 % (self.indexfile, len(text)))
1267 % (self.indexfile, len(text)))
1268
1268
1269 node = node or self.hash(text, p1, p2)
1269 node = node or self.hash(text, p1, p2)
1270 if node in self.nodemap:
1270 if node in self.nodemap:
1271 return node
1271 return node
1272
1272
1273 dfh = None
1273 dfh = None
1274 if not self._inline:
1274 if not self._inline:
1275 dfh = self.opener(self.datafile, "a+")
1275 dfh = self.opener(self.datafile, "a+")
1276 ifh = self.opener(self.indexfile, "a+")
1276 ifh = self.opener(self.indexfile, "a+")
1277 try:
1277 try:
1278 return self._addrevision(node, text, transaction, link, p1, p2,
1278 return self._addrevision(node, text, transaction, link, p1, p2,
1279 REVIDX_DEFAULT_FLAGS, cachedelta, ifh, dfh)
1279 REVIDX_DEFAULT_FLAGS, cachedelta, ifh, dfh)
1280 finally:
1280 finally:
1281 if dfh:
1281 if dfh:
1282 dfh.close()
1282 dfh.close()
1283 ifh.close()
1283 ifh.close()
1284
1284
1285 def compress(self, text):
1285 def compress(self, text):
1286 """ generate a possibly-compressed representation of text """
1286 """ generate a possibly-compressed representation of text """
1287 if not text:
1287 if not text:
1288 return ("", text)
1288 return ("", text)
1289 l = len(text)
1289 l = len(text)
1290 bin = None
1290 bin = None
1291 if l < 44:
1291 if l < 44:
1292 pass
1292 pass
1293 elif l > 1000000:
1293 elif l > 1000000:
1294 # zlib makes an internal copy, thus doubling memory usage for
1294 # zlib makes an internal copy, thus doubling memory usage for
1295 # large files, so lets do this in pieces
1295 # large files, so lets do this in pieces
1296 z = zlib.compressobj()
1296 z = zlib.compressobj()
1297 p = []
1297 p = []
1298 pos = 0
1298 pos = 0
1299 while pos < l:
1299 while pos < l:
1300 pos2 = pos + 2**20
1300 pos2 = pos + 2**20
1301 p.append(z.compress(text[pos:pos2]))
1301 p.append(z.compress(text[pos:pos2]))
1302 pos = pos2
1302 pos = pos2
1303 p.append(z.flush())
1303 p.append(z.flush())
1304 if sum(map(len, p)) < l:
1304 if sum(map(len, p)) < l:
1305 bin = "".join(p)
1305 bin = "".join(p)
1306 else:
1306 else:
1307 bin = _compress(text)
1307 bin = _compress(text)
1308 if bin is None or len(bin) > l:
1308 if bin is None or len(bin) > l:
1309 if text[0] == '\0':
1309 if text[0] == '\0':
1310 return ("", text)
1310 return ("", text)
1311 return ('u', text)
1311 return ('u', text)
1312 return ("", bin)
1312 return ("", bin)
1313
1313
1314 def _isgooddelta(self, d, textlen):
1314 def _isgooddelta(self, d, textlen):
1315 """Returns True if the given delta is good. Good means that it is within
1315 """Returns True if the given delta is good. Good means that it is within
1316 the disk span, disk size, and chain length bounds that we know to be
1316 the disk span, disk size, and chain length bounds that we know to be
1317 performant."""
1317 performant."""
1318 if d is None:
1318 if d is None:
1319 return False
1319 return False
1320
1320
1321 # - 'dist' is the distance from the base revision -- bounding it limits
1321 # - 'dist' is the distance from the base revision -- bounding it limits
1322 # the amount of I/O we need to do.
1322 # the amount of I/O we need to do.
1323 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1323 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1324 # to apply -- bounding it limits the amount of CPU we consume.
1324 # to apply -- bounding it limits the amount of CPU we consume.
1325 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1325 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1326 if (dist > textlen * 4 or l > textlen or
1326 if (dist > textlen * 4 or l > textlen or
1327 compresseddeltalen > textlen * 2 or
1327 compresseddeltalen > textlen * 2 or
1328 (self._maxchainlen and chainlen > self._maxchainlen)):
1328 (self._maxchainlen and chainlen > self._maxchainlen)):
1329 return False
1329 return False
1330
1330
1331 return True
1331 return True
1332
1332
1333 def _addrevision(self, node, text, transaction, link, p1, p2, flags,
1333 def _addrevision(self, node, text, transaction, link, p1, p2, flags,
1334 cachedelta, ifh, dfh, alwayscache=False):
1334 cachedelta, ifh, dfh, alwayscache=False):
1335 """internal function to add revisions to the log
1335 """internal function to add revisions to the log
1336
1336
1337 see addrevision for argument descriptions.
1337 see addrevision for argument descriptions.
1338 invariants:
1338 invariants:
1339 - text is optional (can be None); if not set, cachedelta must be set.
1339 - text is optional (can be None); if not set, cachedelta must be set.
1340 if both are set, they must correspond to each other.
1340 if both are set, they must correspond to each other.
1341 """
1341 """
1342 btext = [text]
1342 btext = [text]
1343 def buildtext():
1343 def buildtext():
1344 if btext[0] is not None:
1344 if btext[0] is not None:
1345 return btext[0]
1345 return btext[0]
1346 baserev = cachedelta[0]
1346 baserev = cachedelta[0]
1347 delta = cachedelta[1]
1347 delta = cachedelta[1]
1348 # special case deltas which replace entire base; no need to decode
1348 # special case deltas which replace entire base; no need to decode
1349 # base revision. this neatly avoids censored bases, which throw when
1349 # base revision. this neatly avoids censored bases, which throw when
1350 # they're decoded.
1350 # they're decoded.
1351 hlen = struct.calcsize(">lll")
1351 hlen = struct.calcsize(">lll")
1352 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1352 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1353 len(delta) - hlen):
1353 len(delta) - hlen):
1354 btext[0] = delta[hlen:]
1354 btext[0] = delta[hlen:]
1355 else:
1355 else:
1356 if self._inline:
1356 if self._inline:
1357 fh = ifh
1357 fh = ifh
1358 else:
1358 else:
1359 fh = dfh
1359 fh = dfh
1360 basetext = self.revision(self.node(baserev), _df=fh)
1360 basetext = self.revision(self.node(baserev), _df=fh)
1361 btext[0] = mdiff.patch(basetext, delta)
1361 btext[0] = mdiff.patch(basetext, delta)
1362 try:
1362 try:
1363 self.checkhash(btext[0], p1, p2, node)
1363 self.checkhash(btext[0], p1, p2, node)
1364 if flags & REVIDX_ISCENSORED:
1364 if flags & REVIDX_ISCENSORED:
1365 raise RevlogError(_('node %s is not censored') % node)
1365 raise RevlogError(_('node %s is not censored') % node)
1366 except CensoredNodeError:
1366 except CensoredNodeError:
1367 # must pass the censored index flag to add censored revisions
1367 # must pass the censored index flag to add censored revisions
1368 if not flags & REVIDX_ISCENSORED:
1368 if not flags & REVIDX_ISCENSORED:
1369 raise
1369 raise
1370 return btext[0]
1370 return btext[0]
1371
1371
1372 def builddelta(rev):
1372 def builddelta(rev):
1373 # can we use the cached delta?
1373 # can we use the cached delta?
1374 if cachedelta and cachedelta[0] == rev:
1374 if cachedelta and cachedelta[0] == rev:
1375 delta = cachedelta[1]
1375 delta = cachedelta[1]
1376 else:
1376 else:
1377 t = buildtext()
1377 t = buildtext()
1378 if self.iscensored(rev):
1378 if self.iscensored(rev):
1379 # deltas based on a censored revision must replace the
1379 # deltas based on a censored revision must replace the
1380 # full content in one patch, so delta works everywhere
1380 # full content in one patch, so delta works everywhere
1381 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1381 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1382 delta = header + t
1382 delta = header + t
1383 else:
1383 else:
1384 if self._inline:
1384 if self._inline:
1385 fh = ifh
1385 fh = ifh
1386 else:
1386 else:
1387 fh = dfh
1387 fh = dfh
1388 ptext = self.revision(self.node(rev), _df=fh)
1388 ptext = self.revision(self.node(rev), _df=fh)
1389 delta = mdiff.textdiff(ptext, t)
1389 delta = mdiff.textdiff(ptext, t)
1390 data = self.compress(delta)
1390 data = self.compress(delta)
1391 l = len(data[1]) + len(data[0])
1391 l = len(data[1]) + len(data[0])
1392 if basecache[0] == rev:
1392 if basecache[0] == rev:
1393 chainbase = basecache[1]
1393 chainbase = basecache[1]
1394 else:
1394 else:
1395 chainbase = self.chainbase(rev)
1395 chainbase = self.chainbase(rev)
1396 dist = l + offset - self.start(chainbase)
1396 dist = l + offset - self.start(chainbase)
1397 if self._generaldelta:
1397 if self._generaldelta:
1398 base = rev
1398 base = rev
1399 else:
1399 else:
1400 base = chainbase
1400 base = chainbase
1401 chainlen, compresseddeltalen = self._chaininfo(rev)
1401 chainlen, compresseddeltalen = self._chaininfo(rev)
1402 chainlen += 1
1402 chainlen += 1
1403 compresseddeltalen += l
1403 compresseddeltalen += l
1404 return dist, l, data, base, chainbase, chainlen, compresseddeltalen
1404 return dist, l, data, base, chainbase, chainlen, compresseddeltalen
1405
1405
1406 curr = len(self)
1406 curr = len(self)
1407 prev = curr - 1
1407 prev = curr - 1
1408 base = chainbase = curr
1408 base = chainbase = curr
1409 chainlen = None
1409 chainlen = None
1410 offset = self.end(prev)
1410 offset = self.end(prev)
1411 delta = None
1411 delta = None
1412 if self._basecache is None:
1412 if self._basecache is None:
1413 self._basecache = (prev, self.chainbase(prev))
1413 self._basecache = (prev, self.chainbase(prev))
1414 basecache = self._basecache
1414 basecache = self._basecache
1415 p1r, p2r = self.rev(p1), self.rev(p2)
1415 p1r, p2r = self.rev(p1), self.rev(p2)
1416
1416
1417 # full versions are inserted when the needed deltas
1417 # full versions are inserted when the needed deltas
1418 # become comparable to the uncompressed text
1418 # become comparable to the uncompressed text
1419 if text is None:
1419 if text is None:
1420 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1420 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1421 cachedelta[1])
1421 cachedelta[1])
1422 else:
1422 else:
1423 textlen = len(text)
1423 textlen = len(text)
1424
1424
1425 # should we try to build a delta?
1425 # should we try to build a delta?
1426 if prev != nullrev:
1426 if prev != nullrev:
1427 tested = set()
1427 if cachedelta and self._generaldelta and self._lazydeltabase:
1428 if cachedelta and self._generaldelta and self._lazydeltabase:
1428 # Assume what we received from the server is a good choice
1429 # Assume what we received from the server is a good choice
1429 # build delta will reuse the cache
1430 # build delta will reuse the cache
1430 candidatedelta = builddelta(cachedelta[0])
1431 candidatedelta = builddelta(cachedelta[0])
1432 tested.add(candidatedelta[3])
1431 if self._isgooddelta(candidatedelta, textlen):
1433 if self._isgooddelta(candidatedelta, textlen):
1432 delta = candidatedelta
1434 delta = candidatedelta
1433 elif prev != candidatedelta[3]:
1435 if delta is None and self._generaldelta:
1434 # Try against prev to hopefully save us a fulltext.
1435 delta = builddelta(prev)
1436 elif self._generaldelta:
1437 parents = [p1r, p2r]
1436 parents = [p1r, p2r]
1438 if not self._aggressivemergedeltas:
1437 # exclude already lazy tested base if any
1438 parents = [p for p in parents if p not in tested]
1439 if parents and not self._aggressivemergedeltas:
1439 # Pick whichever parent is closer to us (to minimize the
1440 # Pick whichever parent is closer to us (to minimize the
1440 # chance of having to build a fulltext). Since
1441 # chance of having to build a fulltext).
1441 # nullrev == -1, any non-merge commit will always pick p1r.
1442 parents = [max(parents)]
1442 parents = [max(parents)]
1443 tested.update(parents)
1443 pdeltas = []
1444 pdeltas = []
1444 for p in parents:
1445 for p in parents:
1445 pd = builddelta(p)
1446 pd = builddelta(p)
1446 if self._isgooddelta(pd, textlen):
1447 if self._isgooddelta(pd, textlen):
1447 pdeltas.append(pd)
1448 pdeltas.append(pd)
1448 if pdeltas:
1449 if pdeltas:
1449 delta = min(pdeltas, key=lambda x: x[1])
1450 delta = min(pdeltas, key=lambda x: x[1])
1450 elif prev not in parents:
1451 if delta is None and prev not in tested:
1451 # Neither is good, try against prev to hopefully save us
1452 # other approach failed try against prev to hopefully save us a
1452 # a fulltext.
1453 # fulltext.
1453 delta = builddelta(prev)
1454 else:
1455 delta = builddelta(prev)
1454 delta = builddelta(prev)
1456 if delta is not None:
1455 if delta is not None:
1457 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1456 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1458
1457
1459 if not self._isgooddelta(delta, textlen):
1458 if not self._isgooddelta(delta, textlen):
1460 text = buildtext()
1459 text = buildtext()
1461 data = self.compress(text)
1460 data = self.compress(text)
1462 l = len(data[1]) + len(data[0])
1461 l = len(data[1]) + len(data[0])
1463 base = chainbase = curr
1462 base = chainbase = curr
1464
1463
1465 e = (offset_type(offset, flags), l, textlen,
1464 e = (offset_type(offset, flags), l, textlen,
1466 base, link, p1r, p2r, node)
1465 base, link, p1r, p2r, node)
1467 self.index.insert(-1, e)
1466 self.index.insert(-1, e)
1468 self.nodemap[node] = curr
1467 self.nodemap[node] = curr
1469
1468
1470 entry = self._io.packentry(e, self.node, self.version, curr)
1469 entry = self._io.packentry(e, self.node, self.version, curr)
1471 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1470 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1472
1471
1473 if alwayscache and text is None:
1472 if alwayscache and text is None:
1474 text = buildtext()
1473 text = buildtext()
1475
1474
1476 if type(text) == str: # only accept immutable objects
1475 if type(text) == str: # only accept immutable objects
1477 self._cache = (node, curr, text)
1476 self._cache = (node, curr, text)
1478 self._basecache = (curr, chainbase)
1477 self._basecache = (curr, chainbase)
1479 return node
1478 return node
1480
1479
1481 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1480 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1482 curr = len(self) - 1
1481 curr = len(self) - 1
1483 if not self._inline:
1482 if not self._inline:
1484 transaction.add(self.datafile, offset)
1483 transaction.add(self.datafile, offset)
1485 transaction.add(self.indexfile, curr * len(entry))
1484 transaction.add(self.indexfile, curr * len(entry))
1486 if data[0]:
1485 if data[0]:
1487 dfh.write(data[0])
1486 dfh.write(data[0])
1488 dfh.write(data[1])
1487 dfh.write(data[1])
1489 ifh.write(entry)
1488 ifh.write(entry)
1490 else:
1489 else:
1491 offset += curr * self._io.size
1490 offset += curr * self._io.size
1492 transaction.add(self.indexfile, offset, curr)
1491 transaction.add(self.indexfile, offset, curr)
1493 ifh.write(entry)
1492 ifh.write(entry)
1494 ifh.write(data[0])
1493 ifh.write(data[0])
1495 ifh.write(data[1])
1494 ifh.write(data[1])
1496 self.checkinlinesize(transaction, ifh)
1495 self.checkinlinesize(transaction, ifh)
1497
1496
1498 def addgroup(self, cg, linkmapper, transaction, addrevisioncb=None):
1497 def addgroup(self, cg, linkmapper, transaction, addrevisioncb=None):
1499 """
1498 """
1500 add a delta group
1499 add a delta group
1501
1500
1502 given a set of deltas, add them to the revision log. the
1501 given a set of deltas, add them to the revision log. the
1503 first delta is against its parent, which should be in our
1502 first delta is against its parent, which should be in our
1504 log, the rest are against the previous delta.
1503 log, the rest are against the previous delta.
1505
1504
1506 If ``addrevisioncb`` is defined, it will be called with arguments of
1505 If ``addrevisioncb`` is defined, it will be called with arguments of
1507 this revlog and the node that was added.
1506 this revlog and the node that was added.
1508 """
1507 """
1509
1508
1510 # track the base of the current delta log
1509 # track the base of the current delta log
1511 content = []
1510 content = []
1512 node = None
1511 node = None
1513
1512
1514 r = len(self)
1513 r = len(self)
1515 end = 0
1514 end = 0
1516 if r:
1515 if r:
1517 end = self.end(r - 1)
1516 end = self.end(r - 1)
1518 ifh = self.opener(self.indexfile, "a+")
1517 ifh = self.opener(self.indexfile, "a+")
1519 isize = r * self._io.size
1518 isize = r * self._io.size
1520 if self._inline:
1519 if self._inline:
1521 transaction.add(self.indexfile, end + isize, r)
1520 transaction.add(self.indexfile, end + isize, r)
1522 dfh = None
1521 dfh = None
1523 else:
1522 else:
1524 transaction.add(self.indexfile, isize, r)
1523 transaction.add(self.indexfile, isize, r)
1525 transaction.add(self.datafile, end)
1524 transaction.add(self.datafile, end)
1526 dfh = self.opener(self.datafile, "a+")
1525 dfh = self.opener(self.datafile, "a+")
1527 def flush():
1526 def flush():
1528 if dfh:
1527 if dfh:
1529 dfh.flush()
1528 dfh.flush()
1530 ifh.flush()
1529 ifh.flush()
1531 try:
1530 try:
1532 # loop through our set of deltas
1531 # loop through our set of deltas
1533 chain = None
1532 chain = None
1534 while True:
1533 while True:
1535 chunkdata = cg.deltachunk(chain)
1534 chunkdata = cg.deltachunk(chain)
1536 if not chunkdata:
1535 if not chunkdata:
1537 break
1536 break
1538 node = chunkdata['node']
1537 node = chunkdata['node']
1539 p1 = chunkdata['p1']
1538 p1 = chunkdata['p1']
1540 p2 = chunkdata['p2']
1539 p2 = chunkdata['p2']
1541 cs = chunkdata['cs']
1540 cs = chunkdata['cs']
1542 deltabase = chunkdata['deltabase']
1541 deltabase = chunkdata['deltabase']
1543 delta = chunkdata['delta']
1542 delta = chunkdata['delta']
1544
1543
1545 content.append(node)
1544 content.append(node)
1546
1545
1547 link = linkmapper(cs)
1546 link = linkmapper(cs)
1548 if node in self.nodemap:
1547 if node in self.nodemap:
1549 # this can happen if two branches make the same change
1548 # this can happen if two branches make the same change
1550 chain = node
1549 chain = node
1551 continue
1550 continue
1552
1551
1553 for p in (p1, p2):
1552 for p in (p1, p2):
1554 if p not in self.nodemap:
1553 if p not in self.nodemap:
1555 raise LookupError(p, self.indexfile,
1554 raise LookupError(p, self.indexfile,
1556 _('unknown parent'))
1555 _('unknown parent'))
1557
1556
1558 if deltabase not in self.nodemap:
1557 if deltabase not in self.nodemap:
1559 raise LookupError(deltabase, self.indexfile,
1558 raise LookupError(deltabase, self.indexfile,
1560 _('unknown delta base'))
1559 _('unknown delta base'))
1561
1560
1562 baserev = self.rev(deltabase)
1561 baserev = self.rev(deltabase)
1563
1562
1564 if baserev != nullrev and self.iscensored(baserev):
1563 if baserev != nullrev and self.iscensored(baserev):
1565 # if base is censored, delta must be full replacement in a
1564 # if base is censored, delta must be full replacement in a
1566 # single patch operation
1565 # single patch operation
1567 hlen = struct.calcsize(">lll")
1566 hlen = struct.calcsize(">lll")
1568 oldlen = self.rawsize(baserev)
1567 oldlen = self.rawsize(baserev)
1569 newlen = len(delta) - hlen
1568 newlen = len(delta) - hlen
1570 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1569 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1571 raise error.CensoredBaseError(self.indexfile,
1570 raise error.CensoredBaseError(self.indexfile,
1572 self.node(baserev))
1571 self.node(baserev))
1573
1572
1574 flags = REVIDX_DEFAULT_FLAGS
1573 flags = REVIDX_DEFAULT_FLAGS
1575 if self._peek_iscensored(baserev, delta, flush):
1574 if self._peek_iscensored(baserev, delta, flush):
1576 flags |= REVIDX_ISCENSORED
1575 flags |= REVIDX_ISCENSORED
1577
1576
1578 # We assume consumers of addrevisioncb will want to retrieve
1577 # We assume consumers of addrevisioncb will want to retrieve
1579 # the added revision, which will require a call to
1578 # the added revision, which will require a call to
1580 # revision(). revision() will fast path if there is a cache
1579 # revision(). revision() will fast path if there is a cache
1581 # hit. So, we tell _addrevision() to always cache in this case.
1580 # hit. So, we tell _addrevision() to always cache in this case.
1582 chain = self._addrevision(node, None, transaction, link,
1581 chain = self._addrevision(node, None, transaction, link,
1583 p1, p2, flags, (baserev, delta),
1582 p1, p2, flags, (baserev, delta),
1584 ifh, dfh,
1583 ifh, dfh,
1585 alwayscache=bool(addrevisioncb))
1584 alwayscache=bool(addrevisioncb))
1586
1585
1587 if addrevisioncb:
1586 if addrevisioncb:
1588 addrevisioncb(self, chain)
1587 addrevisioncb(self, chain)
1589
1588
1590 if not dfh and not self._inline:
1589 if not dfh and not self._inline:
1591 # addrevision switched from inline to conventional
1590 # addrevision switched from inline to conventional
1592 # reopen the index
1591 # reopen the index
1593 ifh.close()
1592 ifh.close()
1594 dfh = self.opener(self.datafile, "a+")
1593 dfh = self.opener(self.datafile, "a+")
1595 ifh = self.opener(self.indexfile, "a+")
1594 ifh = self.opener(self.indexfile, "a+")
1596 finally:
1595 finally:
1597 if dfh:
1596 if dfh:
1598 dfh.close()
1597 dfh.close()
1599 ifh.close()
1598 ifh.close()
1600
1599
1601 return content
1600 return content
1602
1601
1603 def iscensored(self, rev):
1602 def iscensored(self, rev):
1604 """Check if a file revision is censored."""
1603 """Check if a file revision is censored."""
1605 return False
1604 return False
1606
1605
1607 def _peek_iscensored(self, baserev, delta, flush):
1606 def _peek_iscensored(self, baserev, delta, flush):
1608 """Quickly check if a delta produces a censored revision."""
1607 """Quickly check if a delta produces a censored revision."""
1609 return False
1608 return False
1610
1609
1611 def getstrippoint(self, minlink):
1610 def getstrippoint(self, minlink):
1612 """find the minimum rev that must be stripped to strip the linkrev
1611 """find the minimum rev that must be stripped to strip the linkrev
1613
1612
1614 Returns a tuple containing the minimum rev and a set of all revs that
1613 Returns a tuple containing the minimum rev and a set of all revs that
1615 have linkrevs that will be broken by this strip.
1614 have linkrevs that will be broken by this strip.
1616 """
1615 """
1617 brokenrevs = set()
1616 brokenrevs = set()
1618 strippoint = len(self)
1617 strippoint = len(self)
1619
1618
1620 heads = {}
1619 heads = {}
1621 futurelargelinkrevs = set()
1620 futurelargelinkrevs = set()
1622 for head in self.headrevs():
1621 for head in self.headrevs():
1623 headlinkrev = self.linkrev(head)
1622 headlinkrev = self.linkrev(head)
1624 heads[head] = headlinkrev
1623 heads[head] = headlinkrev
1625 if headlinkrev >= minlink:
1624 if headlinkrev >= minlink:
1626 futurelargelinkrevs.add(headlinkrev)
1625 futurelargelinkrevs.add(headlinkrev)
1627
1626
1628 # This algorithm involves walking down the rev graph, starting at the
1627 # This algorithm involves walking down the rev graph, starting at the
1629 # heads. Since the revs are topologically sorted according to linkrev,
1628 # heads. Since the revs are topologically sorted according to linkrev,
1630 # once all head linkrevs are below the minlink, we know there are
1629 # once all head linkrevs are below the minlink, we know there are
1631 # no more revs that could have a linkrev greater than minlink.
1630 # no more revs that could have a linkrev greater than minlink.
1632 # So we can stop walking.
1631 # So we can stop walking.
1633 while futurelargelinkrevs:
1632 while futurelargelinkrevs:
1634 strippoint -= 1
1633 strippoint -= 1
1635 linkrev = heads.pop(strippoint)
1634 linkrev = heads.pop(strippoint)
1636
1635
1637 if linkrev < minlink:
1636 if linkrev < minlink:
1638 brokenrevs.add(strippoint)
1637 brokenrevs.add(strippoint)
1639 else:
1638 else:
1640 futurelargelinkrevs.remove(linkrev)
1639 futurelargelinkrevs.remove(linkrev)
1641
1640
1642 for p in self.parentrevs(strippoint):
1641 for p in self.parentrevs(strippoint):
1643 if p != nullrev:
1642 if p != nullrev:
1644 plinkrev = self.linkrev(p)
1643 plinkrev = self.linkrev(p)
1645 heads[p] = plinkrev
1644 heads[p] = plinkrev
1646 if plinkrev >= minlink:
1645 if plinkrev >= minlink:
1647 futurelargelinkrevs.add(plinkrev)
1646 futurelargelinkrevs.add(plinkrev)
1648
1647
1649 return strippoint, brokenrevs
1648 return strippoint, brokenrevs
1650
1649
1651 def strip(self, minlink, transaction):
1650 def strip(self, minlink, transaction):
1652 """truncate the revlog on the first revision with a linkrev >= minlink
1651 """truncate the revlog on the first revision with a linkrev >= minlink
1653
1652
1654 This function is called when we're stripping revision minlink and
1653 This function is called when we're stripping revision minlink and
1655 its descendants from the repository.
1654 its descendants from the repository.
1656
1655
1657 We have to remove all revisions with linkrev >= minlink, because
1656 We have to remove all revisions with linkrev >= minlink, because
1658 the equivalent changelog revisions will be renumbered after the
1657 the equivalent changelog revisions will be renumbered after the
1659 strip.
1658 strip.
1660
1659
1661 So we truncate the revlog on the first of these revisions, and
1660 So we truncate the revlog on the first of these revisions, and
1662 trust that the caller has saved the revisions that shouldn't be
1661 trust that the caller has saved the revisions that shouldn't be
1663 removed and that it'll re-add them after this truncation.
1662 removed and that it'll re-add them after this truncation.
1664 """
1663 """
1665 if len(self) == 0:
1664 if len(self) == 0:
1666 return
1665 return
1667
1666
1668 rev, _ = self.getstrippoint(minlink)
1667 rev, _ = self.getstrippoint(minlink)
1669 if rev == len(self):
1668 if rev == len(self):
1670 return
1669 return
1671
1670
1672 # first truncate the files on disk
1671 # first truncate the files on disk
1673 end = self.start(rev)
1672 end = self.start(rev)
1674 if not self._inline:
1673 if not self._inline:
1675 transaction.add(self.datafile, end)
1674 transaction.add(self.datafile, end)
1676 end = rev * self._io.size
1675 end = rev * self._io.size
1677 else:
1676 else:
1678 end += rev * self._io.size
1677 end += rev * self._io.size
1679
1678
1680 transaction.add(self.indexfile, end)
1679 transaction.add(self.indexfile, end)
1681
1680
1682 # then reset internal state in memory to forget those revisions
1681 # then reset internal state in memory to forget those revisions
1683 self._cache = None
1682 self._cache = None
1684 self._chaininfocache = {}
1683 self._chaininfocache = {}
1685 self._chunkclear()
1684 self._chunkclear()
1686 for x in xrange(rev, len(self)):
1685 for x in xrange(rev, len(self)):
1687 del self.nodemap[self.node(x)]
1686 del self.nodemap[self.node(x)]
1688
1687
1689 del self.index[rev:-1]
1688 del self.index[rev:-1]
1690
1689
1691 def checksize(self):
1690 def checksize(self):
1692 expected = 0
1691 expected = 0
1693 if len(self):
1692 if len(self):
1694 expected = max(0, self.end(len(self) - 1))
1693 expected = max(0, self.end(len(self) - 1))
1695
1694
1696 try:
1695 try:
1697 f = self.opener(self.datafile)
1696 f = self.opener(self.datafile)
1698 f.seek(0, 2)
1697 f.seek(0, 2)
1699 actual = f.tell()
1698 actual = f.tell()
1700 f.close()
1699 f.close()
1701 dd = actual - expected
1700 dd = actual - expected
1702 except IOError as inst:
1701 except IOError as inst:
1703 if inst.errno != errno.ENOENT:
1702 if inst.errno != errno.ENOENT:
1704 raise
1703 raise
1705 dd = 0
1704 dd = 0
1706
1705
1707 try:
1706 try:
1708 f = self.opener(self.indexfile)
1707 f = self.opener(self.indexfile)
1709 f.seek(0, 2)
1708 f.seek(0, 2)
1710 actual = f.tell()
1709 actual = f.tell()
1711 f.close()
1710 f.close()
1712 s = self._io.size
1711 s = self._io.size
1713 i = max(0, actual // s)
1712 i = max(0, actual // s)
1714 di = actual - (i * s)
1713 di = actual - (i * s)
1715 if self._inline:
1714 if self._inline:
1716 databytes = 0
1715 databytes = 0
1717 for r in self:
1716 for r in self:
1718 databytes += max(0, self.length(r))
1717 databytes += max(0, self.length(r))
1719 dd = 0
1718 dd = 0
1720 di = actual - len(self) * s - databytes
1719 di = actual - len(self) * s - databytes
1721 except IOError as inst:
1720 except IOError as inst:
1722 if inst.errno != errno.ENOENT:
1721 if inst.errno != errno.ENOENT:
1723 raise
1722 raise
1724 di = 0
1723 di = 0
1725
1724
1726 return (dd, di)
1725 return (dd, di)
1727
1726
1728 def files(self):
1727 def files(self):
1729 res = [self.indexfile]
1728 res = [self.indexfile]
1730 if not self._inline:
1729 if not self._inline:
1731 res.append(self.datafile)
1730 res.append(self.datafile)
1732 return res
1731 return res
@@ -1,725 +1,725
1
1
2 $ cat << EOF >> $HGRCPATH
2 $ cat << EOF >> $HGRCPATH
3 > [format]
3 > [format]
4 > usegeneraldelta=yes
4 > usegeneraldelta=yes
5 > EOF
5 > EOF
6
6
7 Setting up test
7 Setting up test
8
8
9 $ hg init test
9 $ hg init test
10 $ cd test
10 $ cd test
11 $ echo 0 > afile
11 $ echo 0 > afile
12 $ hg add afile
12 $ hg add afile
13 $ hg commit -m "0.0"
13 $ hg commit -m "0.0"
14 $ echo 1 >> afile
14 $ echo 1 >> afile
15 $ hg commit -m "0.1"
15 $ hg commit -m "0.1"
16 $ echo 2 >> afile
16 $ echo 2 >> afile
17 $ hg commit -m "0.2"
17 $ hg commit -m "0.2"
18 $ echo 3 >> afile
18 $ echo 3 >> afile
19 $ hg commit -m "0.3"
19 $ hg commit -m "0.3"
20 $ hg update -C 0
20 $ hg update -C 0
21 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
21 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
22 $ echo 1 >> afile
22 $ echo 1 >> afile
23 $ hg commit -m "1.1"
23 $ hg commit -m "1.1"
24 created new head
24 created new head
25 $ echo 2 >> afile
25 $ echo 2 >> afile
26 $ hg commit -m "1.2"
26 $ hg commit -m "1.2"
27 $ echo "a line" > fred
27 $ echo "a line" > fred
28 $ echo 3 >> afile
28 $ echo 3 >> afile
29 $ hg add fred
29 $ hg add fred
30 $ hg commit -m "1.3"
30 $ hg commit -m "1.3"
31 $ hg mv afile adifferentfile
31 $ hg mv afile adifferentfile
32 $ hg commit -m "1.3m"
32 $ hg commit -m "1.3m"
33 $ hg update -C 3
33 $ hg update -C 3
34 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
34 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
35 $ hg mv afile anotherfile
35 $ hg mv afile anotherfile
36 $ hg commit -m "0.3m"
36 $ hg commit -m "0.3m"
37 $ hg verify
37 $ hg verify
38 checking changesets
38 checking changesets
39 checking manifests
39 checking manifests
40 crosschecking files in changesets and manifests
40 crosschecking files in changesets and manifests
41 checking files
41 checking files
42 4 files, 9 changesets, 7 total revisions
42 4 files, 9 changesets, 7 total revisions
43 $ cd ..
43 $ cd ..
44 $ hg init empty
44 $ hg init empty
45
45
46 Bundle and phase
46 Bundle and phase
47
47
48 $ hg -R test phase --force --secret 0
48 $ hg -R test phase --force --secret 0
49 $ hg -R test bundle phase.hg empty
49 $ hg -R test bundle phase.hg empty
50 searching for changes
50 searching for changes
51 no changes found (ignored 9 secret changesets)
51 no changes found (ignored 9 secret changesets)
52 [1]
52 [1]
53 $ hg -R test phase --draft -r 'head()'
53 $ hg -R test phase --draft -r 'head()'
54
54
55 Bundle --all
55 Bundle --all
56
56
57 $ hg -R test bundle --all all.hg
57 $ hg -R test bundle --all all.hg
58 9 changesets found
58 9 changesets found
59
59
60 Bundle test to full.hg
60 Bundle test to full.hg
61
61
62 $ hg -R test bundle full.hg empty
62 $ hg -R test bundle full.hg empty
63 searching for changes
63 searching for changes
64 9 changesets found
64 9 changesets found
65
65
66 Unbundle full.hg in test
66 Unbundle full.hg in test
67
67
68 $ hg -R test unbundle full.hg
68 $ hg -R test unbundle full.hg
69 adding changesets
69 adding changesets
70 adding manifests
70 adding manifests
71 adding file changes
71 adding file changes
72 added 0 changesets with 0 changes to 4 files
72 added 0 changesets with 0 changes to 4 files
73 (run 'hg update' to get a working copy)
73 (run 'hg update' to get a working copy)
74
74
75 Verify empty
75 Verify empty
76
76
77 $ hg -R empty heads
77 $ hg -R empty heads
78 [1]
78 [1]
79 $ hg -R empty verify
79 $ hg -R empty verify
80 checking changesets
80 checking changesets
81 checking manifests
81 checking manifests
82 crosschecking files in changesets and manifests
82 crosschecking files in changesets and manifests
83 checking files
83 checking files
84 0 files, 0 changesets, 0 total revisions
84 0 files, 0 changesets, 0 total revisions
85
85
86 Pull full.hg into test (using --cwd)
86 Pull full.hg into test (using --cwd)
87
87
88 $ hg --cwd test pull ../full.hg
88 $ hg --cwd test pull ../full.hg
89 pulling from ../full.hg
89 pulling from ../full.hg
90 searching for changes
90 searching for changes
91 no changes found
91 no changes found
92
92
93 Verify that there are no leaked temporary files after pull (issue2797)
93 Verify that there are no leaked temporary files after pull (issue2797)
94
94
95 $ ls test/.hg | grep .hg10un
95 $ ls test/.hg | grep .hg10un
96 [1]
96 [1]
97
97
98 Pull full.hg into empty (using --cwd)
98 Pull full.hg into empty (using --cwd)
99
99
100 $ hg --cwd empty pull ../full.hg
100 $ hg --cwd empty pull ../full.hg
101 pulling from ../full.hg
101 pulling from ../full.hg
102 requesting all changes
102 requesting all changes
103 adding changesets
103 adding changesets
104 adding manifests
104 adding manifests
105 adding file changes
105 adding file changes
106 added 9 changesets with 7 changes to 4 files (+1 heads)
106 added 9 changesets with 7 changes to 4 files (+1 heads)
107 (run 'hg heads' to see heads, 'hg merge' to merge)
107 (run 'hg heads' to see heads, 'hg merge' to merge)
108
108
109 Rollback empty
109 Rollback empty
110
110
111 $ hg -R empty rollback
111 $ hg -R empty rollback
112 repository tip rolled back to revision -1 (undo pull)
112 repository tip rolled back to revision -1 (undo pull)
113
113
114 Pull full.hg into empty again (using --cwd)
114 Pull full.hg into empty again (using --cwd)
115
115
116 $ hg --cwd empty pull ../full.hg
116 $ hg --cwd empty pull ../full.hg
117 pulling from ../full.hg
117 pulling from ../full.hg
118 requesting all changes
118 requesting all changes
119 adding changesets
119 adding changesets
120 adding manifests
120 adding manifests
121 adding file changes
121 adding file changes
122 added 9 changesets with 7 changes to 4 files (+1 heads)
122 added 9 changesets with 7 changes to 4 files (+1 heads)
123 (run 'hg heads' to see heads, 'hg merge' to merge)
123 (run 'hg heads' to see heads, 'hg merge' to merge)
124
124
125 Pull full.hg into test (using -R)
125 Pull full.hg into test (using -R)
126
126
127 $ hg -R test pull full.hg
127 $ hg -R test pull full.hg
128 pulling from full.hg
128 pulling from full.hg
129 searching for changes
129 searching for changes
130 no changes found
130 no changes found
131
131
132 Pull full.hg into empty (using -R)
132 Pull full.hg into empty (using -R)
133
133
134 $ hg -R empty pull full.hg
134 $ hg -R empty pull full.hg
135 pulling from full.hg
135 pulling from full.hg
136 searching for changes
136 searching for changes
137 no changes found
137 no changes found
138
138
139 Rollback empty
139 Rollback empty
140
140
141 $ hg -R empty rollback
141 $ hg -R empty rollback
142 repository tip rolled back to revision -1 (undo pull)
142 repository tip rolled back to revision -1 (undo pull)
143
143
144 Pull full.hg into empty again (using -R)
144 Pull full.hg into empty again (using -R)
145
145
146 $ hg -R empty pull full.hg
146 $ hg -R empty pull full.hg
147 pulling from full.hg
147 pulling from full.hg
148 requesting all changes
148 requesting all changes
149 adding changesets
149 adding changesets
150 adding manifests
150 adding manifests
151 adding file changes
151 adding file changes
152 added 9 changesets with 7 changes to 4 files (+1 heads)
152 added 9 changesets with 7 changes to 4 files (+1 heads)
153 (run 'hg heads' to see heads, 'hg merge' to merge)
153 (run 'hg heads' to see heads, 'hg merge' to merge)
154
154
155 Log -R full.hg in fresh empty
155 Log -R full.hg in fresh empty
156
156
157 $ rm -r empty
157 $ rm -r empty
158 $ hg init empty
158 $ hg init empty
159 $ cd empty
159 $ cd empty
160 $ hg -R bundle://../full.hg log
160 $ hg -R bundle://../full.hg log
161 changeset: 8:aa35859c02ea
161 changeset: 8:aa35859c02ea
162 tag: tip
162 tag: tip
163 parent: 3:eebf5a27f8ca
163 parent: 3:eebf5a27f8ca
164 user: test
164 user: test
165 date: Thu Jan 01 00:00:00 1970 +0000
165 date: Thu Jan 01 00:00:00 1970 +0000
166 summary: 0.3m
166 summary: 0.3m
167
167
168 changeset: 7:a6a34bfa0076
168 changeset: 7:a6a34bfa0076
169 user: test
169 user: test
170 date: Thu Jan 01 00:00:00 1970 +0000
170 date: Thu Jan 01 00:00:00 1970 +0000
171 summary: 1.3m
171 summary: 1.3m
172
172
173 changeset: 6:7373c1169842
173 changeset: 6:7373c1169842
174 user: test
174 user: test
175 date: Thu Jan 01 00:00:00 1970 +0000
175 date: Thu Jan 01 00:00:00 1970 +0000
176 summary: 1.3
176 summary: 1.3
177
177
178 changeset: 5:1bb50a9436a7
178 changeset: 5:1bb50a9436a7
179 user: test
179 user: test
180 date: Thu Jan 01 00:00:00 1970 +0000
180 date: Thu Jan 01 00:00:00 1970 +0000
181 summary: 1.2
181 summary: 1.2
182
182
183 changeset: 4:095197eb4973
183 changeset: 4:095197eb4973
184 parent: 0:f9ee2f85a263
184 parent: 0:f9ee2f85a263
185 user: test
185 user: test
186 date: Thu Jan 01 00:00:00 1970 +0000
186 date: Thu Jan 01 00:00:00 1970 +0000
187 summary: 1.1
187 summary: 1.1
188
188
189 changeset: 3:eebf5a27f8ca
189 changeset: 3:eebf5a27f8ca
190 user: test
190 user: test
191 date: Thu Jan 01 00:00:00 1970 +0000
191 date: Thu Jan 01 00:00:00 1970 +0000
192 summary: 0.3
192 summary: 0.3
193
193
194 changeset: 2:e38ba6f5b7e0
194 changeset: 2:e38ba6f5b7e0
195 user: test
195 user: test
196 date: Thu Jan 01 00:00:00 1970 +0000
196 date: Thu Jan 01 00:00:00 1970 +0000
197 summary: 0.2
197 summary: 0.2
198
198
199 changeset: 1:34c2bf6b0626
199 changeset: 1:34c2bf6b0626
200 user: test
200 user: test
201 date: Thu Jan 01 00:00:00 1970 +0000
201 date: Thu Jan 01 00:00:00 1970 +0000
202 summary: 0.1
202 summary: 0.1
203
203
204 changeset: 0:f9ee2f85a263
204 changeset: 0:f9ee2f85a263
205 user: test
205 user: test
206 date: Thu Jan 01 00:00:00 1970 +0000
206 date: Thu Jan 01 00:00:00 1970 +0000
207 summary: 0.0
207 summary: 0.0
208
208
209 Make sure bundlerepo doesn't leak tempfiles (issue2491)
209 Make sure bundlerepo doesn't leak tempfiles (issue2491)
210
210
211 $ ls .hg
211 $ ls .hg
212 00changelog.i
212 00changelog.i
213 cache
213 cache
214 requires
214 requires
215 store
215 store
216
216
217 Pull ../full.hg into empty (with hook)
217 Pull ../full.hg into empty (with hook)
218
218
219 $ echo "[hooks]" >> .hg/hgrc
219 $ echo "[hooks]" >> .hg/hgrc
220 $ echo "changegroup = printenv.py changegroup" >> .hg/hgrc
220 $ echo "changegroup = printenv.py changegroup" >> .hg/hgrc
221
221
222 doesn't work (yet ?)
222 doesn't work (yet ?)
223
223
224 hg -R bundle://../full.hg verify
224 hg -R bundle://../full.hg verify
225
225
226 $ hg pull bundle://../full.hg
226 $ hg pull bundle://../full.hg
227 pulling from bundle:../full.hg
227 pulling from bundle:../full.hg
228 requesting all changes
228 requesting all changes
229 adding changesets
229 adding changesets
230 adding manifests
230 adding manifests
231 adding file changes
231 adding file changes
232 added 9 changesets with 7 changes to 4 files (+1 heads)
232 added 9 changesets with 7 changes to 4 files (+1 heads)
233 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=bundle:../full.hg (glob)
233 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=bundle:../full.hg (glob)
234 (run 'hg heads' to see heads, 'hg merge' to merge)
234 (run 'hg heads' to see heads, 'hg merge' to merge)
235
235
236 Rollback empty
236 Rollback empty
237
237
238 $ hg rollback
238 $ hg rollback
239 repository tip rolled back to revision -1 (undo pull)
239 repository tip rolled back to revision -1 (undo pull)
240 $ cd ..
240 $ cd ..
241
241
242 Log -R bundle:empty+full.hg
242 Log -R bundle:empty+full.hg
243
243
244 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
244 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
245 8 7 6 5 4 3 2 1 0
245 8 7 6 5 4 3 2 1 0
246
246
247 Pull full.hg into empty again (using -R; with hook)
247 Pull full.hg into empty again (using -R; with hook)
248
248
249 $ hg -R empty pull full.hg
249 $ hg -R empty pull full.hg
250 pulling from full.hg
250 pulling from full.hg
251 requesting all changes
251 requesting all changes
252 adding changesets
252 adding changesets
253 adding manifests
253 adding manifests
254 adding file changes
254 adding file changes
255 added 9 changesets with 7 changes to 4 files (+1 heads)
255 added 9 changesets with 7 changes to 4 files (+1 heads)
256 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=bundle:empty+full.hg (glob)
256 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=bundle:empty+full.hg (glob)
257 (run 'hg heads' to see heads, 'hg merge' to merge)
257 (run 'hg heads' to see heads, 'hg merge' to merge)
258
258
259 Cannot produce streaming clone bundles with "hg bundle"
259 Cannot produce streaming clone bundles with "hg bundle"
260
260
261 $ hg -R test bundle -t packed1 packed.hg
261 $ hg -R test bundle -t packed1 packed.hg
262 abort: packed bundles cannot be produced by "hg bundle"
262 abort: packed bundles cannot be produced by "hg bundle"
263 (use "hg debugcreatestreamclonebundle")
263 (use "hg debugcreatestreamclonebundle")
264 [255]
264 [255]
265
265
266 packed1 is produced properly
266 packed1 is produced properly
267
267
268 $ hg -R test debugcreatestreamclonebundle packed.hg
268 $ hg -R test debugcreatestreamclonebundle packed.hg
269 writing 2663 bytes for 6 files
269 writing 2667 bytes for 6 files
270 bundle requirements: generaldelta, revlogv1
270 bundle requirements: generaldelta, revlogv1
271
271
272 $ f -B 64 --size --sha1 --hexdump packed.hg
272 $ f -B 64 --size --sha1 --hexdump packed.hg
273 packed.hg: size=2826, sha1=e139f97692a142b19cdcff64a69697d5307ce6d4
273 packed.hg: size=2830, sha1=c28255110a88ffa52ddc44985cad295b1ab349bc
274 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
274 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
275 0010: 00 00 00 00 0a 67 00 16 67 65 6e 65 72 61 6c 64 |.....g..generald|
275 0010: 00 00 00 00 0a 6b 00 16 67 65 6e 65 72 61 6c 64 |.....k..generald|
276 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 00 64 61 |elta,revlogv1.da|
276 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 00 64 61 |elta,revlogv1.da|
277 0030: 74 61 2f 61 64 69 66 66 65 72 65 6e 74 66 69 6c |ta/adifferentfil|
277 0030: 74 61 2f 61 64 69 66 66 65 72 65 6e 74 66 69 6c |ta/adifferentfil|
278
278
279 generaldelta requirement is listed in stream clone bundles
279 generaldelta requirement is listed in stream clone bundles
280
280
281 $ hg --config format.generaldelta=true init testgd
281 $ hg --config format.generaldelta=true init testgd
282 $ cd testgd
282 $ cd testgd
283 $ touch foo
283 $ touch foo
284 $ hg -q commit -A -m initial
284 $ hg -q commit -A -m initial
285 $ cd ..
285 $ cd ..
286 $ hg -R testgd debugcreatestreamclonebundle packedgd.hg
286 $ hg -R testgd debugcreatestreamclonebundle packedgd.hg
287 writing 301 bytes for 3 files
287 writing 301 bytes for 3 files
288 bundle requirements: generaldelta, revlogv1
288 bundle requirements: generaldelta, revlogv1
289
289
290 $ f -B 64 --size --sha1 --hexdump packedgd.hg
290 $ f -B 64 --size --sha1 --hexdump packedgd.hg
291 packedgd.hg: size=396, sha1=981f9e589799335304a5a9a44caa3623a48d2a9f
291 packedgd.hg: size=396, sha1=981f9e589799335304a5a9a44caa3623a48d2a9f
292 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
292 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
293 0010: 00 00 00 00 01 2d 00 16 67 65 6e 65 72 61 6c 64 |.....-..generald|
293 0010: 00 00 00 00 01 2d 00 16 67 65 6e 65 72 61 6c 64 |.....-..generald|
294 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 00 64 61 |elta,revlogv1.da|
294 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 00 64 61 |elta,revlogv1.da|
295 0030: 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 03 00 01 |ta/foo.i.64.....|
295 0030: 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 03 00 01 |ta/foo.i.64.....|
296
296
297 Unpacking packed1 bundles with "hg unbundle" isn't allowed
297 Unpacking packed1 bundles with "hg unbundle" isn't allowed
298
298
299 $ hg init packed
299 $ hg init packed
300 $ hg -R packed unbundle packed.hg
300 $ hg -R packed unbundle packed.hg
301 abort: packed bundles cannot be applied with "hg unbundle"
301 abort: packed bundles cannot be applied with "hg unbundle"
302 (use "hg debugapplystreamclonebundle")
302 (use "hg debugapplystreamclonebundle")
303 [255]
303 [255]
304
304
305 packed1 can be consumed from debug command
305 packed1 can be consumed from debug command
306
306
307 $ hg -R packed debugapplystreamclonebundle packed.hg
307 $ hg -R packed debugapplystreamclonebundle packed.hg
308 6 files to transfer, 2.60 KB of data
308 6 files to transfer, 2.60 KB of data
309 transferred 2.60 KB in *.* seconds (* */sec) (glob)
309 transferred 2.60 KB in *.* seconds (* */sec) (glob)
310
310
311 Does not work on non-empty repo
311 Does not work on non-empty repo
312
312
313 $ hg -R packed debugapplystreamclonebundle packed.hg
313 $ hg -R packed debugapplystreamclonebundle packed.hg
314 abort: cannot apply stream clone bundle on non-empty repo
314 abort: cannot apply stream clone bundle on non-empty repo
315 [255]
315 [255]
316
316
317 Create partial clones
317 Create partial clones
318
318
319 $ rm -r empty
319 $ rm -r empty
320 $ hg init empty
320 $ hg init empty
321 $ hg clone -r 3 test partial
321 $ hg clone -r 3 test partial
322 adding changesets
322 adding changesets
323 adding manifests
323 adding manifests
324 adding file changes
324 adding file changes
325 added 4 changesets with 4 changes to 1 files
325 added 4 changesets with 4 changes to 1 files
326 updating to branch default
326 updating to branch default
327 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
327 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
328 $ hg clone partial partial2
328 $ hg clone partial partial2
329 updating to branch default
329 updating to branch default
330 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
330 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
331 $ cd partial
331 $ cd partial
332
332
333 Log -R full.hg in partial
333 Log -R full.hg in partial
334
334
335 $ hg -R bundle://../full.hg log -T phases
335 $ hg -R bundle://../full.hg log -T phases
336 changeset: 8:aa35859c02ea
336 changeset: 8:aa35859c02ea
337 tag: tip
337 tag: tip
338 phase: draft
338 phase: draft
339 parent: 3:eebf5a27f8ca
339 parent: 3:eebf5a27f8ca
340 user: test
340 user: test
341 date: Thu Jan 01 00:00:00 1970 +0000
341 date: Thu Jan 01 00:00:00 1970 +0000
342 summary: 0.3m
342 summary: 0.3m
343
343
344 changeset: 7:a6a34bfa0076
344 changeset: 7:a6a34bfa0076
345 phase: draft
345 phase: draft
346 user: test
346 user: test
347 date: Thu Jan 01 00:00:00 1970 +0000
347 date: Thu Jan 01 00:00:00 1970 +0000
348 summary: 1.3m
348 summary: 1.3m
349
349
350 changeset: 6:7373c1169842
350 changeset: 6:7373c1169842
351 phase: draft
351 phase: draft
352 user: test
352 user: test
353 date: Thu Jan 01 00:00:00 1970 +0000
353 date: Thu Jan 01 00:00:00 1970 +0000
354 summary: 1.3
354 summary: 1.3
355
355
356 changeset: 5:1bb50a9436a7
356 changeset: 5:1bb50a9436a7
357 phase: draft
357 phase: draft
358 user: test
358 user: test
359 date: Thu Jan 01 00:00:00 1970 +0000
359 date: Thu Jan 01 00:00:00 1970 +0000
360 summary: 1.2
360 summary: 1.2
361
361
362 changeset: 4:095197eb4973
362 changeset: 4:095197eb4973
363 phase: draft
363 phase: draft
364 parent: 0:f9ee2f85a263
364 parent: 0:f9ee2f85a263
365 user: test
365 user: test
366 date: Thu Jan 01 00:00:00 1970 +0000
366 date: Thu Jan 01 00:00:00 1970 +0000
367 summary: 1.1
367 summary: 1.1
368
368
369 changeset: 3:eebf5a27f8ca
369 changeset: 3:eebf5a27f8ca
370 phase: public
370 phase: public
371 user: test
371 user: test
372 date: Thu Jan 01 00:00:00 1970 +0000
372 date: Thu Jan 01 00:00:00 1970 +0000
373 summary: 0.3
373 summary: 0.3
374
374
375 changeset: 2:e38ba6f5b7e0
375 changeset: 2:e38ba6f5b7e0
376 phase: public
376 phase: public
377 user: test
377 user: test
378 date: Thu Jan 01 00:00:00 1970 +0000
378 date: Thu Jan 01 00:00:00 1970 +0000
379 summary: 0.2
379 summary: 0.2
380
380
381 changeset: 1:34c2bf6b0626
381 changeset: 1:34c2bf6b0626
382 phase: public
382 phase: public
383 user: test
383 user: test
384 date: Thu Jan 01 00:00:00 1970 +0000
384 date: Thu Jan 01 00:00:00 1970 +0000
385 summary: 0.1
385 summary: 0.1
386
386
387 changeset: 0:f9ee2f85a263
387 changeset: 0:f9ee2f85a263
388 phase: public
388 phase: public
389 user: test
389 user: test
390 date: Thu Jan 01 00:00:00 1970 +0000
390 date: Thu Jan 01 00:00:00 1970 +0000
391 summary: 0.0
391 summary: 0.0
392
392
393
393
394 Incoming full.hg in partial
394 Incoming full.hg in partial
395
395
396 $ hg incoming bundle://../full.hg
396 $ hg incoming bundle://../full.hg
397 comparing with bundle:../full.hg
397 comparing with bundle:../full.hg
398 searching for changes
398 searching for changes
399 changeset: 4:095197eb4973
399 changeset: 4:095197eb4973
400 parent: 0:f9ee2f85a263
400 parent: 0:f9ee2f85a263
401 user: test
401 user: test
402 date: Thu Jan 01 00:00:00 1970 +0000
402 date: Thu Jan 01 00:00:00 1970 +0000
403 summary: 1.1
403 summary: 1.1
404
404
405 changeset: 5:1bb50a9436a7
405 changeset: 5:1bb50a9436a7
406 user: test
406 user: test
407 date: Thu Jan 01 00:00:00 1970 +0000
407 date: Thu Jan 01 00:00:00 1970 +0000
408 summary: 1.2
408 summary: 1.2
409
409
410 changeset: 6:7373c1169842
410 changeset: 6:7373c1169842
411 user: test
411 user: test
412 date: Thu Jan 01 00:00:00 1970 +0000
412 date: Thu Jan 01 00:00:00 1970 +0000
413 summary: 1.3
413 summary: 1.3
414
414
415 changeset: 7:a6a34bfa0076
415 changeset: 7:a6a34bfa0076
416 user: test
416 user: test
417 date: Thu Jan 01 00:00:00 1970 +0000
417 date: Thu Jan 01 00:00:00 1970 +0000
418 summary: 1.3m
418 summary: 1.3m
419
419
420 changeset: 8:aa35859c02ea
420 changeset: 8:aa35859c02ea
421 tag: tip
421 tag: tip
422 parent: 3:eebf5a27f8ca
422 parent: 3:eebf5a27f8ca
423 user: test
423 user: test
424 date: Thu Jan 01 00:00:00 1970 +0000
424 date: Thu Jan 01 00:00:00 1970 +0000
425 summary: 0.3m
425 summary: 0.3m
426
426
427
427
428 Outgoing -R full.hg vs partial2 in partial
428 Outgoing -R full.hg vs partial2 in partial
429
429
430 $ hg -R bundle://../full.hg outgoing ../partial2
430 $ hg -R bundle://../full.hg outgoing ../partial2
431 comparing with ../partial2
431 comparing with ../partial2
432 searching for changes
432 searching for changes
433 changeset: 4:095197eb4973
433 changeset: 4:095197eb4973
434 parent: 0:f9ee2f85a263
434 parent: 0:f9ee2f85a263
435 user: test
435 user: test
436 date: Thu Jan 01 00:00:00 1970 +0000
436 date: Thu Jan 01 00:00:00 1970 +0000
437 summary: 1.1
437 summary: 1.1
438
438
439 changeset: 5:1bb50a9436a7
439 changeset: 5:1bb50a9436a7
440 user: test
440 user: test
441 date: Thu Jan 01 00:00:00 1970 +0000
441 date: Thu Jan 01 00:00:00 1970 +0000
442 summary: 1.2
442 summary: 1.2
443
443
444 changeset: 6:7373c1169842
444 changeset: 6:7373c1169842
445 user: test
445 user: test
446 date: Thu Jan 01 00:00:00 1970 +0000
446 date: Thu Jan 01 00:00:00 1970 +0000
447 summary: 1.3
447 summary: 1.3
448
448
449 changeset: 7:a6a34bfa0076
449 changeset: 7:a6a34bfa0076
450 user: test
450 user: test
451 date: Thu Jan 01 00:00:00 1970 +0000
451 date: Thu Jan 01 00:00:00 1970 +0000
452 summary: 1.3m
452 summary: 1.3m
453
453
454 changeset: 8:aa35859c02ea
454 changeset: 8:aa35859c02ea
455 tag: tip
455 tag: tip
456 parent: 3:eebf5a27f8ca
456 parent: 3:eebf5a27f8ca
457 user: test
457 user: test
458 date: Thu Jan 01 00:00:00 1970 +0000
458 date: Thu Jan 01 00:00:00 1970 +0000
459 summary: 0.3m
459 summary: 0.3m
460
460
461
461
462 Outgoing -R does-not-exist.hg vs partial2 in partial
462 Outgoing -R does-not-exist.hg vs partial2 in partial
463
463
464 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
464 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
465 abort: *../does-not-exist.hg* (glob)
465 abort: *../does-not-exist.hg* (glob)
466 [255]
466 [255]
467 $ cd ..
467 $ cd ..
468
468
469 hide outer repo
469 hide outer repo
470 $ hg init
470 $ hg init
471
471
472 Direct clone from bundle (all-history)
472 Direct clone from bundle (all-history)
473
473
474 $ hg clone full.hg full-clone
474 $ hg clone full.hg full-clone
475 requesting all changes
475 requesting all changes
476 adding changesets
476 adding changesets
477 adding manifests
477 adding manifests
478 adding file changes
478 adding file changes
479 added 9 changesets with 7 changes to 4 files (+1 heads)
479 added 9 changesets with 7 changes to 4 files (+1 heads)
480 updating to branch default
480 updating to branch default
481 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
481 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
482 $ hg -R full-clone heads
482 $ hg -R full-clone heads
483 changeset: 8:aa35859c02ea
483 changeset: 8:aa35859c02ea
484 tag: tip
484 tag: tip
485 parent: 3:eebf5a27f8ca
485 parent: 3:eebf5a27f8ca
486 user: test
486 user: test
487 date: Thu Jan 01 00:00:00 1970 +0000
487 date: Thu Jan 01 00:00:00 1970 +0000
488 summary: 0.3m
488 summary: 0.3m
489
489
490 changeset: 7:a6a34bfa0076
490 changeset: 7:a6a34bfa0076
491 user: test
491 user: test
492 date: Thu Jan 01 00:00:00 1970 +0000
492 date: Thu Jan 01 00:00:00 1970 +0000
493 summary: 1.3m
493 summary: 1.3m
494
494
495 $ rm -r full-clone
495 $ rm -r full-clone
496
496
497 When cloning from a non-copiable repository into '', do not
497 When cloning from a non-copiable repository into '', do not
498 recurse infinitely (issue2528)
498 recurse infinitely (issue2528)
499
499
500 $ hg clone full.hg ''
500 $ hg clone full.hg ''
501 abort: empty destination path is not valid
501 abort: empty destination path is not valid
502 [255]
502 [255]
503
503
504 test for https://bz.mercurial-scm.org/216
504 test for https://bz.mercurial-scm.org/216
505
505
506 Unbundle incremental bundles into fresh empty in one go
506 Unbundle incremental bundles into fresh empty in one go
507
507
508 $ rm -r empty
508 $ rm -r empty
509 $ hg init empty
509 $ hg init empty
510 $ hg -R test bundle --base null -r 0 ../0.hg
510 $ hg -R test bundle --base null -r 0 ../0.hg
511 1 changesets found
511 1 changesets found
512 $ hg -R test bundle --base 0 -r 1 ../1.hg
512 $ hg -R test bundle --base 0 -r 1 ../1.hg
513 1 changesets found
513 1 changesets found
514 $ hg -R empty unbundle -u ../0.hg ../1.hg
514 $ hg -R empty unbundle -u ../0.hg ../1.hg
515 adding changesets
515 adding changesets
516 adding manifests
516 adding manifests
517 adding file changes
517 adding file changes
518 added 1 changesets with 1 changes to 1 files
518 added 1 changesets with 1 changes to 1 files
519 adding changesets
519 adding changesets
520 adding manifests
520 adding manifests
521 adding file changes
521 adding file changes
522 added 1 changesets with 1 changes to 1 files
522 added 1 changesets with 1 changes to 1 files
523 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
523 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
524
524
525 View full contents of the bundle
525 View full contents of the bundle
526 $ hg -R test bundle --base null -r 3 ../partial.hg
526 $ hg -R test bundle --base null -r 3 ../partial.hg
527 4 changesets found
527 4 changesets found
528 $ cd test
528 $ cd test
529 $ hg -R ../../partial.hg log -r "bundle()"
529 $ hg -R ../../partial.hg log -r "bundle()"
530 changeset: 0:f9ee2f85a263
530 changeset: 0:f9ee2f85a263
531 user: test
531 user: test
532 date: Thu Jan 01 00:00:00 1970 +0000
532 date: Thu Jan 01 00:00:00 1970 +0000
533 summary: 0.0
533 summary: 0.0
534
534
535 changeset: 1:34c2bf6b0626
535 changeset: 1:34c2bf6b0626
536 user: test
536 user: test
537 date: Thu Jan 01 00:00:00 1970 +0000
537 date: Thu Jan 01 00:00:00 1970 +0000
538 summary: 0.1
538 summary: 0.1
539
539
540 changeset: 2:e38ba6f5b7e0
540 changeset: 2:e38ba6f5b7e0
541 user: test
541 user: test
542 date: Thu Jan 01 00:00:00 1970 +0000
542 date: Thu Jan 01 00:00:00 1970 +0000
543 summary: 0.2
543 summary: 0.2
544
544
545 changeset: 3:eebf5a27f8ca
545 changeset: 3:eebf5a27f8ca
546 user: test
546 user: test
547 date: Thu Jan 01 00:00:00 1970 +0000
547 date: Thu Jan 01 00:00:00 1970 +0000
548 summary: 0.3
548 summary: 0.3
549
549
550 $ cd ..
550 $ cd ..
551
551
552 test for 540d1059c802
552 test for 540d1059c802
553
553
554 test for 540d1059c802
554 test for 540d1059c802
555
555
556 $ hg init orig
556 $ hg init orig
557 $ cd orig
557 $ cd orig
558 $ echo foo > foo
558 $ echo foo > foo
559 $ hg add foo
559 $ hg add foo
560 $ hg ci -m 'add foo'
560 $ hg ci -m 'add foo'
561
561
562 $ hg clone . ../copy
562 $ hg clone . ../copy
563 updating to branch default
563 updating to branch default
564 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
564 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
565 $ hg tag foo
565 $ hg tag foo
566
566
567 $ cd ../copy
567 $ cd ../copy
568 $ echo >> foo
568 $ echo >> foo
569 $ hg ci -m 'change foo'
569 $ hg ci -m 'change foo'
570 $ hg bundle ../bundle.hg ../orig
570 $ hg bundle ../bundle.hg ../orig
571 searching for changes
571 searching for changes
572 1 changesets found
572 1 changesets found
573
573
574 $ cd ../orig
574 $ cd ../orig
575 $ hg incoming ../bundle.hg
575 $ hg incoming ../bundle.hg
576 comparing with ../bundle.hg
576 comparing with ../bundle.hg
577 searching for changes
577 searching for changes
578 changeset: 2:ed1b79f46b9a
578 changeset: 2:ed1b79f46b9a
579 tag: tip
579 tag: tip
580 parent: 0:bbd179dfa0a7
580 parent: 0:bbd179dfa0a7
581 user: test
581 user: test
582 date: Thu Jan 01 00:00:00 1970 +0000
582 date: Thu Jan 01 00:00:00 1970 +0000
583 summary: change foo
583 summary: change foo
584
584
585 $ cd ..
585 $ cd ..
586
586
587 test bundle with # in the filename (issue2154):
587 test bundle with # in the filename (issue2154):
588
588
589 $ cp bundle.hg 'test#bundle.hg'
589 $ cp bundle.hg 'test#bundle.hg'
590 $ cd orig
590 $ cd orig
591 $ hg incoming '../test#bundle.hg'
591 $ hg incoming '../test#bundle.hg'
592 comparing with ../test
592 comparing with ../test
593 abort: unknown revision 'bundle.hg'!
593 abort: unknown revision 'bundle.hg'!
594 [255]
594 [255]
595
595
596 note that percent encoding is not handled:
596 note that percent encoding is not handled:
597
597
598 $ hg incoming ../test%23bundle.hg
598 $ hg incoming ../test%23bundle.hg
599 abort: repository ../test%23bundle.hg not found!
599 abort: repository ../test%23bundle.hg not found!
600 [255]
600 [255]
601 $ cd ..
601 $ cd ..
602
602
603 test to bundle revisions on the newly created branch (issue3828):
603 test to bundle revisions on the newly created branch (issue3828):
604
604
605 $ hg -q clone -U test test-clone
605 $ hg -q clone -U test test-clone
606 $ cd test
606 $ cd test
607
607
608 $ hg -q branch foo
608 $ hg -q branch foo
609 $ hg commit -m "create foo branch"
609 $ hg commit -m "create foo branch"
610 $ hg -q outgoing ../test-clone
610 $ hg -q outgoing ../test-clone
611 9:b4f5acb1ee27
611 9:b4f5acb1ee27
612 $ hg -q bundle --branch foo foo.hg ../test-clone
612 $ hg -q bundle --branch foo foo.hg ../test-clone
613 $ hg -R foo.hg -q log -r "bundle()"
613 $ hg -R foo.hg -q log -r "bundle()"
614 9:b4f5acb1ee27
614 9:b4f5acb1ee27
615
615
616 $ cd ..
616 $ cd ..
617
617
618 test for https://bz.mercurial-scm.org/1144
618 test for https://bz.mercurial-scm.org/1144
619
619
620 test that verify bundle does not traceback
620 test that verify bundle does not traceback
621
621
622 partial history bundle, fails w/ unknown parent
622 partial history bundle, fails w/ unknown parent
623
623
624 $ hg -R bundle.hg verify
624 $ hg -R bundle.hg verify
625 abort: 00changelog.i@bbd179dfa0a7: unknown parent!
625 abort: 00changelog.i@bbd179dfa0a7: unknown parent!
626 [255]
626 [255]
627
627
628 full history bundle, refuses to verify non-local repo
628 full history bundle, refuses to verify non-local repo
629
629
630 $ hg -R all.hg verify
630 $ hg -R all.hg verify
631 abort: cannot verify bundle or remote repos
631 abort: cannot verify bundle or remote repos
632 [255]
632 [255]
633
633
634 but, regular verify must continue to work
634 but, regular verify must continue to work
635
635
636 $ hg -R orig verify
636 $ hg -R orig verify
637 checking changesets
637 checking changesets
638 checking manifests
638 checking manifests
639 crosschecking files in changesets and manifests
639 crosschecking files in changesets and manifests
640 checking files
640 checking files
641 2 files, 2 changesets, 2 total revisions
641 2 files, 2 changesets, 2 total revisions
642
642
643 diff against bundle
643 diff against bundle
644
644
645 $ hg init b
645 $ hg init b
646 $ cd b
646 $ cd b
647 $ hg -R ../all.hg diff -r tip
647 $ hg -R ../all.hg diff -r tip
648 diff -r aa35859c02ea anotherfile
648 diff -r aa35859c02ea anotherfile
649 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
649 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
650 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
650 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
651 @@ -1,4 +0,0 @@
651 @@ -1,4 +0,0 @@
652 -0
652 -0
653 -1
653 -1
654 -2
654 -2
655 -3
655 -3
656 $ cd ..
656 $ cd ..
657
657
658 bundle single branch
658 bundle single branch
659
659
660 $ hg init branchy
660 $ hg init branchy
661 $ cd branchy
661 $ cd branchy
662 $ echo a >a
662 $ echo a >a
663 $ echo x >x
663 $ echo x >x
664 $ hg ci -Ama
664 $ hg ci -Ama
665 adding a
665 adding a
666 adding x
666 adding x
667 $ echo c >c
667 $ echo c >c
668 $ echo xx >x
668 $ echo xx >x
669 $ hg ci -Amc
669 $ hg ci -Amc
670 adding c
670 adding c
671 $ echo c1 >c1
671 $ echo c1 >c1
672 $ hg ci -Amc1
672 $ hg ci -Amc1
673 adding c1
673 adding c1
674 $ hg up 0
674 $ hg up 0
675 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
675 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
676 $ echo b >b
676 $ echo b >b
677 $ hg ci -Amb
677 $ hg ci -Amb
678 adding b
678 adding b
679 created new head
679 created new head
680 $ echo b1 >b1
680 $ echo b1 >b1
681 $ echo xx >x
681 $ echo xx >x
682 $ hg ci -Amb1
682 $ hg ci -Amb1
683 adding b1
683 adding b1
684 $ hg clone -q -r2 . part
684 $ hg clone -q -r2 . part
685
685
686 == bundling via incoming
686 == bundling via incoming
687
687
688 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
688 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
689 comparing with .
689 comparing with .
690 searching for changes
690 searching for changes
691 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
691 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
692 057f4db07f61970e1c11e83be79e9d08adc4dc31
692 057f4db07f61970e1c11e83be79e9d08adc4dc31
693
693
694 == bundling
694 == bundling
695
695
696 $ hg bundle bundle.hg part --debug --config progress.debug=true
696 $ hg bundle bundle.hg part --debug --config progress.debug=true
697 query 1; heads
697 query 1; heads
698 searching for changes
698 searching for changes
699 all remote heads known locally
699 all remote heads known locally
700 2 changesets found
700 2 changesets found
701 list of changesets:
701 list of changesets:
702 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
702 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
703 057f4db07f61970e1c11e83be79e9d08adc4dc31
703 057f4db07f61970e1c11e83be79e9d08adc4dc31
704 bundle2-output-bundle: "HG20", (1 params) 1 parts total
704 bundle2-output-bundle: "HG20", (1 params) 1 parts total
705 bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
705 bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
706 bundling: 1/2 changesets (50.00%)
706 bundling: 1/2 changesets (50.00%)
707 bundling: 2/2 changesets (100.00%)
707 bundling: 2/2 changesets (100.00%)
708 bundling: 1/2 manifests (50.00%)
708 bundling: 1/2 manifests (50.00%)
709 bundling: 2/2 manifests (100.00%)
709 bundling: 2/2 manifests (100.00%)
710 bundling: b 1/3 files (33.33%)
710 bundling: b 1/3 files (33.33%)
711 bundling: b1 2/3 files (66.67%)
711 bundling: b1 2/3 files (66.67%)
712 bundling: x 3/3 files (100.00%)
712 bundling: x 3/3 files (100.00%)
713
713
714 == Test for issue3441
714 == Test for issue3441
715
715
716 $ hg clone -q -r0 . part2
716 $ hg clone -q -r0 . part2
717 $ hg -q -R part2 pull bundle.hg
717 $ hg -q -R part2 pull bundle.hg
718 $ hg -R part2 verify
718 $ hg -R part2 verify
719 checking changesets
719 checking changesets
720 checking manifests
720 checking manifests
721 crosschecking files in changesets and manifests
721 crosschecking files in changesets and manifests
722 checking files
722 checking files
723 4 files, 3 changesets, 5 total revisions
723 4 files, 3 changesets, 5 total revisions
724
724
725 $ cd ..
725 $ cd ..
@@ -1,160 +1,160
1 Check whether size of generaldelta revlog is not bigger than its
1 Check whether size of generaldelta revlog is not bigger than its
2 regular equivalent. Test would fail if generaldelta was naive
2 regular equivalent. Test would fail if generaldelta was naive
3 implementation of parentdelta: third manifest revision would be fully
3 implementation of parentdelta: third manifest revision would be fully
4 inserted due to big distance from its paren revision (zero).
4 inserted due to big distance from its paren revision (zero).
5
5
6 $ hg init repo --config format.generaldelta=no --config format.usegeneraldelta=no
6 $ hg init repo --config format.generaldelta=no --config format.usegeneraldelta=no
7 $ cd repo
7 $ cd repo
8 $ echo foo > foo
8 $ echo foo > foo
9 $ echo bar > bar
9 $ echo bar > bar
10 $ echo baz > baz
10 $ echo baz > baz
11 $ hg commit -q -Am boo
11 $ hg commit -q -Am boo
12 $ hg clone --pull . ../gdrepo -q --config format.generaldelta=yes
12 $ hg clone --pull . ../gdrepo -q --config format.generaldelta=yes
13 $ for r in 1 2 3; do
13 $ for r in 1 2 3; do
14 > echo $r > foo
14 > echo $r > foo
15 > hg commit -q -m $r
15 > hg commit -q -m $r
16 > hg up -q -r 0
16 > hg up -q -r 0
17 > hg pull . -q -r $r -R ../gdrepo
17 > hg pull . -q -r $r -R ../gdrepo
18 > done
18 > done
19
19
20 $ cd ..
20 $ cd ..
21 >>> import os
21 >>> import os
22 >>> regsize = os.stat("repo/.hg/store/00manifest.i").st_size
22 >>> regsize = os.stat("repo/.hg/store/00manifest.i").st_size
23 >>> gdsize = os.stat("gdrepo/.hg/store/00manifest.i").st_size
23 >>> gdsize = os.stat("gdrepo/.hg/store/00manifest.i").st_size
24 >>> if regsize < gdsize:
24 >>> if regsize < gdsize:
25 ... print 'generaldata increased size of manifest'
25 ... print 'generaldata increased size of manifest'
26
26
27 Verify rev reordering doesnt create invalid bundles (issue4462)
27 Verify rev reordering doesnt create invalid bundles (issue4462)
28 This requires a commit tree that when pulled will reorder manifest revs such
28 This requires a commit tree that when pulled will reorder manifest revs such
29 that the second manifest to create a file rev will be ordered before the first
29 that the second manifest to create a file rev will be ordered before the first
30 manifest to create that file rev. We also need to do a partial pull to ensure
30 manifest to create that file rev. We also need to do a partial pull to ensure
31 reordering happens. At the end we verify the linkrev points at the earliest
31 reordering happens. At the end we verify the linkrev points at the earliest
32 commit.
32 commit.
33
33
34 $ hg init server --config format.generaldelta=True
34 $ hg init server --config format.generaldelta=True
35 $ cd server
35 $ cd server
36 $ touch a
36 $ touch a
37 $ hg commit -Aqm a
37 $ hg commit -Aqm a
38 $ echo x > x
38 $ echo x > x
39 $ echo y > y
39 $ echo y > y
40 $ hg commit -Aqm xy
40 $ hg commit -Aqm xy
41 $ hg up -q '.^'
41 $ hg up -q '.^'
42 $ echo x > x
42 $ echo x > x
43 $ echo z > z
43 $ echo z > z
44 $ hg commit -Aqm xz
44 $ hg commit -Aqm xz
45 $ hg up -q 1
45 $ hg up -q 1
46 $ echo b > b
46 $ echo b > b
47 $ hg commit -Aqm b
47 $ hg commit -Aqm b
48 $ hg merge -q 2
48 $ hg merge -q 2
49 $ hg commit -Aqm merge
49 $ hg commit -Aqm merge
50 $ echo c > c
50 $ echo c > c
51 $ hg commit -Aqm c
51 $ hg commit -Aqm c
52 $ hg log -G -T '{rev} {shortest(node)} {desc}'
52 $ hg log -G -T '{rev} {shortest(node)} {desc}'
53 @ 5 ebb8 c
53 @ 5 ebb8 c
54 |
54 |
55 o 4 baf7 merge
55 o 4 baf7 merge
56 |\
56 |\
57 | o 3 a129 b
57 | o 3 a129 b
58 | |
58 | |
59 o | 2 958c xz
59 o | 2 958c xz
60 | |
60 | |
61 | o 1 f00c xy
61 | o 1 f00c xy
62 |/
62 |/
63 o 0 3903 a
63 o 0 3903 a
64
64
65 $ cd ..
65 $ cd ..
66 $ hg init client --config format.generaldelta=false --config format.usegeneraldelta=false
66 $ hg init client --config format.generaldelta=false --config format.usegeneraldelta=false
67 $ cd client
67 $ cd client
68 $ hg pull -q ../server -r 4
68 $ hg pull -q ../server -r 4
69 $ hg debugindex x
69 $ hg debugindex x
70 rev offset length base linkrev nodeid p1 p2
70 rev offset length base linkrev nodeid p1 p2
71 0 0 3 0 1 1406e7411862 000000000000 000000000000
71 0 0 3 0 1 1406e7411862 000000000000 000000000000
72
72
73 $ cd ..
73 $ cd ..
74
74
75 Test "usegeneraldelta" config
75 Test "usegeneraldelta" config
76 (repo are general delta, but incoming bundle are not re-deltified)
76 (repo are general delta, but incoming bundle are not re-deltified)
77
77
78 delta coming from the server base delta server are not recompressed.
78 delta coming from the server base delta server are not recompressed.
79 (also include the aggressive version for comparison)
79 (also include the aggressive version for comparison)
80
80
81 $ hg clone repo --pull --config format.usegeneraldelta=1 usegd
81 $ hg clone repo --pull --config format.usegeneraldelta=1 usegd
82 requesting all changes
82 requesting all changes
83 adding changesets
83 adding changesets
84 adding manifests
84 adding manifests
85 adding file changes
85 adding file changes
86 added 4 changesets with 6 changes to 3 files (+2 heads)
86 added 4 changesets with 6 changes to 3 files (+2 heads)
87 updating to branch default
87 updating to branch default
88 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
88 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 $ hg clone repo --pull --config format.generaldelta=1 full
89 $ hg clone repo --pull --config format.generaldelta=1 full
90 requesting all changes
90 requesting all changes
91 adding changesets
91 adding changesets
92 adding manifests
92 adding manifests
93 adding file changes
93 adding file changes
94 added 4 changesets with 6 changes to 3 files (+2 heads)
94 added 4 changesets with 6 changes to 3 files (+2 heads)
95 updating to branch default
95 updating to branch default
96 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
96 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
97 $ hg -R repo debugindex -m
97 $ hg -R repo debugindex -m
98 rev offset length base linkrev nodeid p1 p2
98 rev offset length base linkrev nodeid p1 p2
99 0 0 104 0 0 cef96823c800 000000000000 000000000000
99 0 0 104 0 0 cef96823c800 000000000000 000000000000
100 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
100 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
101 2 161 57 0 2 134fdc6fd680 cef96823c800 000000000000
101 2 161 57 0 2 134fdc6fd680 cef96823c800 000000000000
102 3 218 104 3 3 723508934dad cef96823c800 000000000000
102 3 218 104 3 3 723508934dad cef96823c800 000000000000
103 $ hg -R usegd debugindex -m
103 $ hg -R usegd debugindex -m
104 rev offset length delta linkrev nodeid p1 p2
104 rev offset length delta linkrev nodeid p1 p2
105 0 0 104 -1 0 cef96823c800 000000000000 000000000000
105 0 0 104 -1 0 cef96823c800 000000000000 000000000000
106 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
106 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
107 2 161 57 1 2 134fdc6fd680 cef96823c800 000000000000
107 2 161 57 1 2 134fdc6fd680 cef96823c800 000000000000
108 3 218 104 -1 3 723508934dad cef96823c800 000000000000
108 3 218 57 0 3 723508934dad cef96823c800 000000000000
109 $ hg -R full debugindex -m
109 $ hg -R full debugindex -m
110 rev offset length delta linkrev nodeid p1 p2
110 rev offset length delta linkrev nodeid p1 p2
111 0 0 104 -1 0 cef96823c800 000000000000 000000000000
111 0 0 104 -1 0 cef96823c800 000000000000 000000000000
112 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
112 1 104 57 0 1 58ab9a8d541d cef96823c800 000000000000
113 2 161 57 0 2 134fdc6fd680 cef96823c800 000000000000
113 2 161 57 0 2 134fdc6fd680 cef96823c800 000000000000
114 3 218 57 0 3 723508934dad cef96823c800 000000000000
114 3 218 57 0 3 723508934dad cef96823c800 000000000000
115
115
116 Test format.aggressivemergedeltas
116 Test format.aggressivemergedeltas
117
117
118 $ hg init --config format.generaldelta=1 aggressive
118 $ hg init --config format.generaldelta=1 aggressive
119 $ cd aggressive
119 $ cd aggressive
120 $ cat << EOF >> .hg/hgrc
120 $ cat << EOF >> .hg/hgrc
121 > [format]
121 > [format]
122 > generaldelta = 1
122 > generaldelta = 1
123 > EOF
123 > EOF
124 $ touch a b c d e
124 $ touch a b c d e
125 $ hg commit -Aqm side1
125 $ hg commit -Aqm side1
126 $ hg up -q null
126 $ hg up -q null
127 $ touch x y
127 $ touch x y
128 $ hg commit -Aqm side2
128 $ hg commit -Aqm side2
129
129
130 - Verify non-aggressive merge uses p1 (commit 1) as delta parent
130 - Verify non-aggressive merge uses p1 (commit 1) as delta parent
131 $ hg merge -q 0
131 $ hg merge -q 0
132 $ hg commit -q -m merge
132 $ hg commit -q -m merge
133 $ hg debugindex -m
133 $ hg debugindex -m
134 rev offset length delta linkrev nodeid p1 p2
134 rev offset length delta linkrev nodeid p1 p2
135 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
135 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
136 1 59 59 -1 1 315c023f341d 000000000000 000000000000
136 1 59 59 -1 1 315c023f341d 000000000000 000000000000
137 2 118 65 1 2 2ab389a983eb 315c023f341d 8dde941edb6e
137 2 118 65 1 2 2ab389a983eb 315c023f341d 8dde941edb6e
138
138
139 $ hg strip -q -r . --config extensions.strip=
139 $ hg strip -q -r . --config extensions.strip=
140
140
141 - Verify aggressive merge uses p2 (commit 0) as delta parent
141 - Verify aggressive merge uses p2 (commit 0) as delta parent
142 $ hg up -q -C 1
142 $ hg up -q -C 1
143 $ hg merge -q 0
143 $ hg merge -q 0
144 $ hg commit -q -m merge --config format.aggressivemergedeltas=True
144 $ hg commit -q -m merge --config format.aggressivemergedeltas=True
145 $ hg debugindex -m
145 $ hg debugindex -m
146 rev offset length delta linkrev nodeid p1 p2
146 rev offset length delta linkrev nodeid p1 p2
147 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
147 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
148 1 59 59 -1 1 315c023f341d 000000000000 000000000000
148 1 59 59 -1 1 315c023f341d 000000000000 000000000000
149 2 118 62 0 2 2ab389a983eb 315c023f341d 8dde941edb6e
149 2 118 62 0 2 2ab389a983eb 315c023f341d 8dde941edb6e
150
150
151 Test that strip bundle use bundle2
151 Test that strip bundle use bundle2
152 $ hg --config extensions.strip= strip .
152 $ hg --config extensions.strip= strip .
153 0 files updated, 0 files merged, 5 files removed, 0 files unresolved
153 0 files updated, 0 files merged, 5 files removed, 0 files unresolved
154 saved backup bundle to $TESTTMP/aggressive/.hg/strip-backup/1c5d4dc9a8b8-6c68e60c-backup.hg (glob)
154 saved backup bundle to $TESTTMP/aggressive/.hg/strip-backup/1c5d4dc9a8b8-6c68e60c-backup.hg (glob)
155 $ hg debugbundle .hg/strip-backup/*
155 $ hg debugbundle .hg/strip-backup/*
156 Stream params: {'Compression': 'BZ'}
156 Stream params: {'Compression': 'BZ'}
157 changegroup -- "{'version': '02'}"
157 changegroup -- "{'version': '02'}"
158 1c5d4dc9a8b8d6e1750966d343e94db665e7a1e9
158 1c5d4dc9a8b8d6e1750966d343e94db665e7a1e9
159
159
160 $ cd ..
160 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now