##// END OF EJS Templates
revlog: store fulltext when compressed delta is bigger than it...
Siddharth Agarwal -
r23285:6cc1f388 default
parent child Browse files
Show More
@@ -1,1489 +1,1489 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev
15 from node import bin, hex, nullid, nullrev
16 from i18n import _
16 from i18n import _
17 import ancestor, mdiff, parsers, error, util, templatefilters
17 import ancestor, mdiff, parsers, error, util, templatefilters
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog header flags
26 # revlog header flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGGENERALDELTA = (1 << 17)
30 REVLOGGENERALDELTA = (1 << 17)
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35
35
36 # revlog index flags
36 # revlog index flags
37 REVIDX_KNOWN_FLAGS = 0
37 REVIDX_KNOWN_FLAGS = 0
38
38
39 # max size of revlog with inline data
39 # max size of revlog with inline data
40 _maxinline = 131072
40 _maxinline = 131072
41 _chunksize = 1048576
41 _chunksize = 1048576
42
42
43 RevlogError = error.RevlogError
43 RevlogError = error.RevlogError
44 LookupError = error.LookupError
44 LookupError = error.LookupError
45 CensoredNodeError = error.CensoredNodeError
45 CensoredNodeError = error.CensoredNodeError
46
46
47 def getoffset(q):
47 def getoffset(q):
48 return int(q >> 16)
48 return int(q >> 16)
49
49
50 def gettype(q):
50 def gettype(q):
51 return int(q & 0xFFFF)
51 return int(q & 0xFFFF)
52
52
53 def offset_type(offset, type):
53 def offset_type(offset, type):
54 return long(long(offset) << 16 | type)
54 return long(long(offset) << 16 | type)
55
55
56 _nullhash = _sha(nullid)
56 _nullhash = _sha(nullid)
57
57
58 def hash(text, p1, p2):
58 def hash(text, p1, p2):
59 """generate a hash from the given text and its parent hashes
59 """generate a hash from the given text and its parent hashes
60
60
61 This hash combines both the current file contents and its history
61 This hash combines both the current file contents and its history
62 in a manner that makes it easy to distinguish nodes with the same
62 in a manner that makes it easy to distinguish nodes with the same
63 content in the revision graph.
63 content in the revision graph.
64 """
64 """
65 # As of now, if one of the parent node is null, p2 is null
65 # As of now, if one of the parent node is null, p2 is null
66 if p2 == nullid:
66 if p2 == nullid:
67 # deep copy of a hash is faster than creating one
67 # deep copy of a hash is faster than creating one
68 s = _nullhash.copy()
68 s = _nullhash.copy()
69 s.update(p1)
69 s.update(p1)
70 else:
70 else:
71 # none of the parent nodes are nullid
71 # none of the parent nodes are nullid
72 l = [p1, p2]
72 l = [p1, p2]
73 l.sort()
73 l.sort()
74 s = _sha(l[0])
74 s = _sha(l[0])
75 s.update(l[1])
75 s.update(l[1])
76 s.update(text)
76 s.update(text)
77 return s.digest()
77 return s.digest()
78
78
79 def decompress(bin):
79 def decompress(bin):
80 """ decompress the given input """
80 """ decompress the given input """
81 if not bin:
81 if not bin:
82 return bin
82 return bin
83 t = bin[0]
83 t = bin[0]
84 if t == '\0':
84 if t == '\0':
85 return bin
85 return bin
86 if t == 'x':
86 if t == 'x':
87 try:
87 try:
88 return _decompress(bin)
88 return _decompress(bin)
89 except zlib.error, e:
89 except zlib.error, e:
90 raise RevlogError(_("revlog decompress error: %s") % str(e))
90 raise RevlogError(_("revlog decompress error: %s") % str(e))
91 if t == 'u':
91 if t == 'u':
92 return bin[1:]
92 return bin[1:]
93 raise RevlogError(_("unknown compression type %r") % t)
93 raise RevlogError(_("unknown compression type %r") % t)
94
94
95 # index v0:
95 # index v0:
96 # 4 bytes: offset
96 # 4 bytes: offset
97 # 4 bytes: compressed length
97 # 4 bytes: compressed length
98 # 4 bytes: base rev
98 # 4 bytes: base rev
99 # 4 bytes: link rev
99 # 4 bytes: link rev
100 # 32 bytes: parent 1 nodeid
100 # 32 bytes: parent 1 nodeid
101 # 32 bytes: parent 2 nodeid
101 # 32 bytes: parent 2 nodeid
102 # 32 bytes: nodeid
102 # 32 bytes: nodeid
103 indexformatv0 = ">4l20s20s20s"
103 indexformatv0 = ">4l20s20s20s"
104 v0shaoffset = 56
104 v0shaoffset = 56
105
105
106 class revlogoldio(object):
106 class revlogoldio(object):
107 def __init__(self):
107 def __init__(self):
108 self.size = struct.calcsize(indexformatv0)
108 self.size = struct.calcsize(indexformatv0)
109
109
110 def parseindex(self, data, inline):
110 def parseindex(self, data, inline):
111 s = self.size
111 s = self.size
112 index = []
112 index = []
113 nodemap = {nullid: nullrev}
113 nodemap = {nullid: nullrev}
114 n = off = 0
114 n = off = 0
115 l = len(data)
115 l = len(data)
116 while off + s <= l:
116 while off + s <= l:
117 cur = data[off:off + s]
117 cur = data[off:off + s]
118 off += s
118 off += s
119 e = _unpack(indexformatv0, cur)
119 e = _unpack(indexformatv0, cur)
120 # transform to revlogv1 format
120 # transform to revlogv1 format
121 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
121 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
122 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
122 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
123 index.append(e2)
123 index.append(e2)
124 nodemap[e[6]] = n
124 nodemap[e[6]] = n
125 n += 1
125 n += 1
126
126
127 # add the magic null revision at -1
127 # add the magic null revision at -1
128 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
128 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
129
129
130 return index, nodemap, None
130 return index, nodemap, None
131
131
132 def packentry(self, entry, node, version, rev):
132 def packentry(self, entry, node, version, rev):
133 if gettype(entry[0]):
133 if gettype(entry[0]):
134 raise RevlogError(_("index entry flags need RevlogNG"))
134 raise RevlogError(_("index entry flags need RevlogNG"))
135 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
135 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
136 node(entry[5]), node(entry[6]), entry[7])
136 node(entry[5]), node(entry[6]), entry[7])
137 return _pack(indexformatv0, *e2)
137 return _pack(indexformatv0, *e2)
138
138
139 # index ng:
139 # index ng:
140 # 6 bytes: offset
140 # 6 bytes: offset
141 # 2 bytes: flags
141 # 2 bytes: flags
142 # 4 bytes: compressed length
142 # 4 bytes: compressed length
143 # 4 bytes: uncompressed length
143 # 4 bytes: uncompressed length
144 # 4 bytes: base rev
144 # 4 bytes: base rev
145 # 4 bytes: link rev
145 # 4 bytes: link rev
146 # 4 bytes: parent 1 rev
146 # 4 bytes: parent 1 rev
147 # 4 bytes: parent 2 rev
147 # 4 bytes: parent 2 rev
148 # 32 bytes: nodeid
148 # 32 bytes: nodeid
149 indexformatng = ">Qiiiiii20s12x"
149 indexformatng = ">Qiiiiii20s12x"
150 ngshaoffset = 32
150 ngshaoffset = 32
151 versionformat = ">I"
151 versionformat = ">I"
152
152
153 class revlogio(object):
153 class revlogio(object):
154 def __init__(self):
154 def __init__(self):
155 self.size = struct.calcsize(indexformatng)
155 self.size = struct.calcsize(indexformatng)
156
156
157 def parseindex(self, data, inline):
157 def parseindex(self, data, inline):
158 # call the C implementation to parse the index data
158 # call the C implementation to parse the index data
159 index, cache = parsers.parse_index2(data, inline)
159 index, cache = parsers.parse_index2(data, inline)
160 return index, getattr(index, 'nodemap', None), cache
160 return index, getattr(index, 'nodemap', None), cache
161
161
162 def packentry(self, entry, node, version, rev):
162 def packentry(self, entry, node, version, rev):
163 p = _pack(indexformatng, *entry)
163 p = _pack(indexformatng, *entry)
164 if rev == 0:
164 if rev == 0:
165 p = _pack(versionformat, version) + p[4:]
165 p = _pack(versionformat, version) + p[4:]
166 return p
166 return p
167
167
168 class revlog(object):
168 class revlog(object):
169 """
169 """
170 the underlying revision storage object
170 the underlying revision storage object
171
171
172 A revlog consists of two parts, an index and the revision data.
172 A revlog consists of two parts, an index and the revision data.
173
173
174 The index is a file with a fixed record size containing
174 The index is a file with a fixed record size containing
175 information on each revision, including its nodeid (hash), the
175 information on each revision, including its nodeid (hash), the
176 nodeids of its parents, the position and offset of its data within
176 nodeids of its parents, the position and offset of its data within
177 the data file, and the revision it's based on. Finally, each entry
177 the data file, and the revision it's based on. Finally, each entry
178 contains a linkrev entry that can serve as a pointer to external
178 contains a linkrev entry that can serve as a pointer to external
179 data.
179 data.
180
180
181 The revision data itself is a linear collection of data chunks.
181 The revision data itself is a linear collection of data chunks.
182 Each chunk represents a revision and is usually represented as a
182 Each chunk represents a revision and is usually represented as a
183 delta against the previous chunk. To bound lookup time, runs of
183 delta against the previous chunk. To bound lookup time, runs of
184 deltas are limited to about 2 times the length of the original
184 deltas are limited to about 2 times the length of the original
185 version data. This makes retrieval of a version proportional to
185 version data. This makes retrieval of a version proportional to
186 its size, or O(1) relative to the number of revisions.
186 its size, or O(1) relative to the number of revisions.
187
187
188 Both pieces of the revlog are written to in an append-only
188 Both pieces of the revlog are written to in an append-only
189 fashion, which means we never need to rewrite a file to insert or
189 fashion, which means we never need to rewrite a file to insert or
190 remove data, and can use some simple techniques to avoid the need
190 remove data, and can use some simple techniques to avoid the need
191 for locking while reading.
191 for locking while reading.
192 """
192 """
193 def __init__(self, opener, indexfile):
193 def __init__(self, opener, indexfile):
194 """
194 """
195 create a revlog object
195 create a revlog object
196
196
197 opener is a function that abstracts the file opening operation
197 opener is a function that abstracts the file opening operation
198 and can be used to implement COW semantics or the like.
198 and can be used to implement COW semantics or the like.
199 """
199 """
200 self.indexfile = indexfile
200 self.indexfile = indexfile
201 self.datafile = indexfile[:-2] + ".d"
201 self.datafile = indexfile[:-2] + ".d"
202 self.opener = opener
202 self.opener = opener
203 self._cache = None
203 self._cache = None
204 self._basecache = None
204 self._basecache = None
205 self._chunkcache = (0, '')
205 self._chunkcache = (0, '')
206 self._chunkcachesize = 65536
206 self._chunkcachesize = 65536
207 self._maxchainlen = None
207 self._maxchainlen = None
208 self.index = []
208 self.index = []
209 self._pcache = {}
209 self._pcache = {}
210 self._nodecache = {nullid: nullrev}
210 self._nodecache = {nullid: nullrev}
211 self._nodepos = None
211 self._nodepos = None
212
212
213 v = REVLOG_DEFAULT_VERSION
213 v = REVLOG_DEFAULT_VERSION
214 opts = getattr(opener, 'options', None)
214 opts = getattr(opener, 'options', None)
215 if opts is not None:
215 if opts is not None:
216 if 'revlogv1' in opts:
216 if 'revlogv1' in opts:
217 if 'generaldelta' in opts:
217 if 'generaldelta' in opts:
218 v |= REVLOGGENERALDELTA
218 v |= REVLOGGENERALDELTA
219 else:
219 else:
220 v = 0
220 v = 0
221 if 'chunkcachesize' in opts:
221 if 'chunkcachesize' in opts:
222 self._chunkcachesize = opts['chunkcachesize']
222 self._chunkcachesize = opts['chunkcachesize']
223 if 'maxchainlen' in opts:
223 if 'maxchainlen' in opts:
224 self._maxchainlen = opts['maxchainlen']
224 self._maxchainlen = opts['maxchainlen']
225
225
226 if self._chunkcachesize <= 0:
226 if self._chunkcachesize <= 0:
227 raise RevlogError(_('revlog chunk cache size %r is not greater '
227 raise RevlogError(_('revlog chunk cache size %r is not greater '
228 'than 0') % self._chunkcachesize)
228 'than 0') % self._chunkcachesize)
229 elif self._chunkcachesize & (self._chunkcachesize - 1):
229 elif self._chunkcachesize & (self._chunkcachesize - 1):
230 raise RevlogError(_('revlog chunk cache size %r is not a power '
230 raise RevlogError(_('revlog chunk cache size %r is not a power '
231 'of 2') % self._chunkcachesize)
231 'of 2') % self._chunkcachesize)
232
232
233 i = ''
233 i = ''
234 self._initempty = True
234 self._initempty = True
235 try:
235 try:
236 f = self.opener(self.indexfile)
236 f = self.opener(self.indexfile)
237 i = f.read()
237 i = f.read()
238 f.close()
238 f.close()
239 if len(i) > 0:
239 if len(i) > 0:
240 v = struct.unpack(versionformat, i[:4])[0]
240 v = struct.unpack(versionformat, i[:4])[0]
241 self._initempty = False
241 self._initempty = False
242 except IOError, inst:
242 except IOError, inst:
243 if inst.errno != errno.ENOENT:
243 if inst.errno != errno.ENOENT:
244 raise
244 raise
245
245
246 self.version = v
246 self.version = v
247 self._inline = v & REVLOGNGINLINEDATA
247 self._inline = v & REVLOGNGINLINEDATA
248 self._generaldelta = v & REVLOGGENERALDELTA
248 self._generaldelta = v & REVLOGGENERALDELTA
249 flags = v & ~0xFFFF
249 flags = v & ~0xFFFF
250 fmt = v & 0xFFFF
250 fmt = v & 0xFFFF
251 if fmt == REVLOGV0 and flags:
251 if fmt == REVLOGV0 and flags:
252 raise RevlogError(_("index %s unknown flags %#04x for format v0")
252 raise RevlogError(_("index %s unknown flags %#04x for format v0")
253 % (self.indexfile, flags >> 16))
253 % (self.indexfile, flags >> 16))
254 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
254 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
255 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
255 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
256 % (self.indexfile, flags >> 16))
256 % (self.indexfile, flags >> 16))
257 elif fmt > REVLOGNG:
257 elif fmt > REVLOGNG:
258 raise RevlogError(_("index %s unknown format %d")
258 raise RevlogError(_("index %s unknown format %d")
259 % (self.indexfile, fmt))
259 % (self.indexfile, fmt))
260
260
261 self._io = revlogio()
261 self._io = revlogio()
262 if self.version == REVLOGV0:
262 if self.version == REVLOGV0:
263 self._io = revlogoldio()
263 self._io = revlogoldio()
264 try:
264 try:
265 d = self._io.parseindex(i, self._inline)
265 d = self._io.parseindex(i, self._inline)
266 except (ValueError, IndexError):
266 except (ValueError, IndexError):
267 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
267 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
268 self.index, nodemap, self._chunkcache = d
268 self.index, nodemap, self._chunkcache = d
269 if nodemap is not None:
269 if nodemap is not None:
270 self.nodemap = self._nodecache = nodemap
270 self.nodemap = self._nodecache = nodemap
271 if not self._chunkcache:
271 if not self._chunkcache:
272 self._chunkclear()
272 self._chunkclear()
273
273
274 def tip(self):
274 def tip(self):
275 return self.node(len(self.index) - 2)
275 return self.node(len(self.index) - 2)
276 def __len__(self):
276 def __len__(self):
277 return len(self.index) - 1
277 return len(self.index) - 1
278 def __iter__(self):
278 def __iter__(self):
279 return iter(xrange(len(self)))
279 return iter(xrange(len(self)))
280 def revs(self, start=0, stop=None):
280 def revs(self, start=0, stop=None):
281 """iterate over all rev in this revlog (from start to stop)"""
281 """iterate over all rev in this revlog (from start to stop)"""
282 step = 1
282 step = 1
283 if stop is not None:
283 if stop is not None:
284 if start > stop:
284 if start > stop:
285 step = -1
285 step = -1
286 stop += step
286 stop += step
287 else:
287 else:
288 stop = len(self)
288 stop = len(self)
289 return xrange(start, stop, step)
289 return xrange(start, stop, step)
290
290
291 @util.propertycache
291 @util.propertycache
292 def nodemap(self):
292 def nodemap(self):
293 self.rev(self.node(0))
293 self.rev(self.node(0))
294 return self._nodecache
294 return self._nodecache
295
295
296 def hasnode(self, node):
296 def hasnode(self, node):
297 try:
297 try:
298 self.rev(node)
298 self.rev(node)
299 return True
299 return True
300 except KeyError:
300 except KeyError:
301 return False
301 return False
302
302
303 def clearcaches(self):
303 def clearcaches(self):
304 try:
304 try:
305 self._nodecache.clearcaches()
305 self._nodecache.clearcaches()
306 except AttributeError:
306 except AttributeError:
307 self._nodecache = {nullid: nullrev}
307 self._nodecache = {nullid: nullrev}
308 self._nodepos = None
308 self._nodepos = None
309
309
310 def rev(self, node):
310 def rev(self, node):
311 try:
311 try:
312 return self._nodecache[node]
312 return self._nodecache[node]
313 except TypeError:
313 except TypeError:
314 raise
314 raise
315 except RevlogError:
315 except RevlogError:
316 # parsers.c radix tree lookup failed
316 # parsers.c radix tree lookup failed
317 raise LookupError(node, self.indexfile, _('no node'))
317 raise LookupError(node, self.indexfile, _('no node'))
318 except KeyError:
318 except KeyError:
319 # pure python cache lookup failed
319 # pure python cache lookup failed
320 n = self._nodecache
320 n = self._nodecache
321 i = self.index
321 i = self.index
322 p = self._nodepos
322 p = self._nodepos
323 if p is None:
323 if p is None:
324 p = len(i) - 2
324 p = len(i) - 2
325 for r in xrange(p, -1, -1):
325 for r in xrange(p, -1, -1):
326 v = i[r][7]
326 v = i[r][7]
327 n[v] = r
327 n[v] = r
328 if v == node:
328 if v == node:
329 self._nodepos = r - 1
329 self._nodepos = r - 1
330 return r
330 return r
331 raise LookupError(node, self.indexfile, _('no node'))
331 raise LookupError(node, self.indexfile, _('no node'))
332
332
333 def node(self, rev):
333 def node(self, rev):
334 return self.index[rev][7]
334 return self.index[rev][7]
335 def linkrev(self, rev):
335 def linkrev(self, rev):
336 return self.index[rev][4]
336 return self.index[rev][4]
337 def parents(self, node):
337 def parents(self, node):
338 i = self.index
338 i = self.index
339 d = i[self.rev(node)]
339 d = i[self.rev(node)]
340 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
340 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
341 def parentrevs(self, rev):
341 def parentrevs(self, rev):
342 return self.index[rev][5:7]
342 return self.index[rev][5:7]
343 def start(self, rev):
343 def start(self, rev):
344 return int(self.index[rev][0] >> 16)
344 return int(self.index[rev][0] >> 16)
345 def end(self, rev):
345 def end(self, rev):
346 return self.start(rev) + self.length(rev)
346 return self.start(rev) + self.length(rev)
347 def length(self, rev):
347 def length(self, rev):
348 return self.index[rev][1]
348 return self.index[rev][1]
349 def chainbase(self, rev):
349 def chainbase(self, rev):
350 index = self.index
350 index = self.index
351 base = index[rev][3]
351 base = index[rev][3]
352 while base != rev:
352 while base != rev:
353 rev = base
353 rev = base
354 base = index[rev][3]
354 base = index[rev][3]
355 return base
355 return base
356 def chainlen(self, rev):
356 def chainlen(self, rev):
357 index = self.index
357 index = self.index
358 generaldelta = self._generaldelta
358 generaldelta = self._generaldelta
359 iterrev = rev
359 iterrev = rev
360 e = index[iterrev]
360 e = index[iterrev]
361 clen = 0
361 clen = 0
362 while iterrev != e[3]:
362 while iterrev != e[3]:
363 clen += 1
363 clen += 1
364 if generaldelta:
364 if generaldelta:
365 iterrev = e[3]
365 iterrev = e[3]
366 else:
366 else:
367 iterrev -= 1
367 iterrev -= 1
368 e = index[iterrev]
368 e = index[iterrev]
369 return clen
369 return clen
370 def flags(self, rev):
370 def flags(self, rev):
371 return self.index[rev][0] & 0xFFFF
371 return self.index[rev][0] & 0xFFFF
372 def rawsize(self, rev):
372 def rawsize(self, rev):
373 """return the length of the uncompressed text for a given revision"""
373 """return the length of the uncompressed text for a given revision"""
374 l = self.index[rev][2]
374 l = self.index[rev][2]
375 if l >= 0:
375 if l >= 0:
376 return l
376 return l
377
377
378 t = self.revision(self.node(rev))
378 t = self.revision(self.node(rev))
379 return len(t)
379 return len(t)
380 size = rawsize
380 size = rawsize
381
381
382 def ancestors(self, revs, stoprev=0, inclusive=False):
382 def ancestors(self, revs, stoprev=0, inclusive=False):
383 """Generate the ancestors of 'revs' in reverse topological order.
383 """Generate the ancestors of 'revs' in reverse topological order.
384 Does not generate revs lower than stoprev.
384 Does not generate revs lower than stoprev.
385
385
386 See the documentation for ancestor.lazyancestors for more details."""
386 See the documentation for ancestor.lazyancestors for more details."""
387
387
388 return ancestor.lazyancestors(self, revs, stoprev=stoprev,
388 return ancestor.lazyancestors(self, revs, stoprev=stoprev,
389 inclusive=inclusive)
389 inclusive=inclusive)
390
390
391 def descendants(self, revs):
391 def descendants(self, revs):
392 """Generate the descendants of 'revs' in revision order.
392 """Generate the descendants of 'revs' in revision order.
393
393
394 Yield a sequence of revision numbers starting with a child of
394 Yield a sequence of revision numbers starting with a child of
395 some rev in revs, i.e., each revision is *not* considered a
395 some rev in revs, i.e., each revision is *not* considered a
396 descendant of itself. Results are ordered by revision number (a
396 descendant of itself. Results are ordered by revision number (a
397 topological sort)."""
397 topological sort)."""
398 first = min(revs)
398 first = min(revs)
399 if first == nullrev:
399 if first == nullrev:
400 for i in self:
400 for i in self:
401 yield i
401 yield i
402 return
402 return
403
403
404 seen = set(revs)
404 seen = set(revs)
405 for i in self.revs(start=first + 1):
405 for i in self.revs(start=first + 1):
406 for x in self.parentrevs(i):
406 for x in self.parentrevs(i):
407 if x != nullrev and x in seen:
407 if x != nullrev and x in seen:
408 seen.add(i)
408 seen.add(i)
409 yield i
409 yield i
410 break
410 break
411
411
412 def findcommonmissing(self, common=None, heads=None):
412 def findcommonmissing(self, common=None, heads=None):
413 """Return a tuple of the ancestors of common and the ancestors of heads
413 """Return a tuple of the ancestors of common and the ancestors of heads
414 that are not ancestors of common. In revset terminology, we return the
414 that are not ancestors of common. In revset terminology, we return the
415 tuple:
415 tuple:
416
416
417 ::common, (::heads) - (::common)
417 ::common, (::heads) - (::common)
418
418
419 The list is sorted by revision number, meaning it is
419 The list is sorted by revision number, meaning it is
420 topologically sorted.
420 topologically sorted.
421
421
422 'heads' and 'common' are both lists of node IDs. If heads is
422 'heads' and 'common' are both lists of node IDs. If heads is
423 not supplied, uses all of the revlog's heads. If common is not
423 not supplied, uses all of the revlog's heads. If common is not
424 supplied, uses nullid."""
424 supplied, uses nullid."""
425 if common is None:
425 if common is None:
426 common = [nullid]
426 common = [nullid]
427 if heads is None:
427 if heads is None:
428 heads = self.heads()
428 heads = self.heads()
429
429
430 common = [self.rev(n) for n in common]
430 common = [self.rev(n) for n in common]
431 heads = [self.rev(n) for n in heads]
431 heads = [self.rev(n) for n in heads]
432
432
433 # we want the ancestors, but inclusive
433 # we want the ancestors, but inclusive
434 class lazyset(object):
434 class lazyset(object):
435 def __init__(self, lazyvalues):
435 def __init__(self, lazyvalues):
436 self.addedvalues = set()
436 self.addedvalues = set()
437 self.lazyvalues = lazyvalues
437 self.lazyvalues = lazyvalues
438
438
439 def __contains__(self, value):
439 def __contains__(self, value):
440 return value in self.addedvalues or value in self.lazyvalues
440 return value in self.addedvalues or value in self.lazyvalues
441
441
442 def __iter__(self):
442 def __iter__(self):
443 added = self.addedvalues
443 added = self.addedvalues
444 for r in added:
444 for r in added:
445 yield r
445 yield r
446 for r in self.lazyvalues:
446 for r in self.lazyvalues:
447 if not r in added:
447 if not r in added:
448 yield r
448 yield r
449
449
450 def add(self, value):
450 def add(self, value):
451 self.addedvalues.add(value)
451 self.addedvalues.add(value)
452
452
453 def update(self, values):
453 def update(self, values):
454 self.addedvalues.update(values)
454 self.addedvalues.update(values)
455
455
456 has = lazyset(self.ancestors(common))
456 has = lazyset(self.ancestors(common))
457 has.add(nullrev)
457 has.add(nullrev)
458 has.update(common)
458 has.update(common)
459
459
460 # take all ancestors from heads that aren't in has
460 # take all ancestors from heads that aren't in has
461 missing = set()
461 missing = set()
462 visit = util.deque(r for r in heads if r not in has)
462 visit = util.deque(r for r in heads if r not in has)
463 while visit:
463 while visit:
464 r = visit.popleft()
464 r = visit.popleft()
465 if r in missing:
465 if r in missing:
466 continue
466 continue
467 else:
467 else:
468 missing.add(r)
468 missing.add(r)
469 for p in self.parentrevs(r):
469 for p in self.parentrevs(r):
470 if p not in has:
470 if p not in has:
471 visit.append(p)
471 visit.append(p)
472 missing = list(missing)
472 missing = list(missing)
473 missing.sort()
473 missing.sort()
474 return has, [self.node(r) for r in missing]
474 return has, [self.node(r) for r in missing]
475
475
476 def findmissingrevs(self, common=None, heads=None):
476 def findmissingrevs(self, common=None, heads=None):
477 """Return the revision numbers of the ancestors of heads that
477 """Return the revision numbers of the ancestors of heads that
478 are not ancestors of common.
478 are not ancestors of common.
479
479
480 More specifically, return a list of revision numbers corresponding to
480 More specifically, return a list of revision numbers corresponding to
481 nodes N such that every N satisfies the following constraints:
481 nodes N such that every N satisfies the following constraints:
482
482
483 1. N is an ancestor of some node in 'heads'
483 1. N is an ancestor of some node in 'heads'
484 2. N is not an ancestor of any node in 'common'
484 2. N is not an ancestor of any node in 'common'
485
485
486 The list is sorted by revision number, meaning it is
486 The list is sorted by revision number, meaning it is
487 topologically sorted.
487 topologically sorted.
488
488
489 'heads' and 'common' are both lists of revision numbers. If heads is
489 'heads' and 'common' are both lists of revision numbers. If heads is
490 not supplied, uses all of the revlog's heads. If common is not
490 not supplied, uses all of the revlog's heads. If common is not
491 supplied, uses nullid."""
491 supplied, uses nullid."""
492 if common is None:
492 if common is None:
493 common = [nullrev]
493 common = [nullrev]
494 if heads is None:
494 if heads is None:
495 heads = self.headrevs()
495 heads = self.headrevs()
496
496
497 return ancestor.missingancestors(heads, common, self.parentrevs)
497 return ancestor.missingancestors(heads, common, self.parentrevs)
498
498
499 def findmissing(self, common=None, heads=None):
499 def findmissing(self, common=None, heads=None):
500 """Return the ancestors of heads that are not ancestors of common.
500 """Return the ancestors of heads that are not ancestors of common.
501
501
502 More specifically, return a list of nodes N such that every N
502 More specifically, return a list of nodes N such that every N
503 satisfies the following constraints:
503 satisfies the following constraints:
504
504
505 1. N is an ancestor of some node in 'heads'
505 1. N is an ancestor of some node in 'heads'
506 2. N is not an ancestor of any node in 'common'
506 2. N is not an ancestor of any node in 'common'
507
507
508 The list is sorted by revision number, meaning it is
508 The list is sorted by revision number, meaning it is
509 topologically sorted.
509 topologically sorted.
510
510
511 'heads' and 'common' are both lists of node IDs. If heads is
511 'heads' and 'common' are both lists of node IDs. If heads is
512 not supplied, uses all of the revlog's heads. If common is not
512 not supplied, uses all of the revlog's heads. If common is not
513 supplied, uses nullid."""
513 supplied, uses nullid."""
514 if common is None:
514 if common is None:
515 common = [nullid]
515 common = [nullid]
516 if heads is None:
516 if heads is None:
517 heads = self.heads()
517 heads = self.heads()
518
518
519 common = [self.rev(n) for n in common]
519 common = [self.rev(n) for n in common]
520 heads = [self.rev(n) for n in heads]
520 heads = [self.rev(n) for n in heads]
521
521
522 return [self.node(r) for r in
522 return [self.node(r) for r in
523 ancestor.missingancestors(heads, common, self.parentrevs)]
523 ancestor.missingancestors(heads, common, self.parentrevs)]
524
524
525 def nodesbetween(self, roots=None, heads=None):
525 def nodesbetween(self, roots=None, heads=None):
526 """Return a topological path from 'roots' to 'heads'.
526 """Return a topological path from 'roots' to 'heads'.
527
527
528 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
528 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
529 topologically sorted list of all nodes N that satisfy both of
529 topologically sorted list of all nodes N that satisfy both of
530 these constraints:
530 these constraints:
531
531
532 1. N is a descendant of some node in 'roots'
532 1. N is a descendant of some node in 'roots'
533 2. N is an ancestor of some node in 'heads'
533 2. N is an ancestor of some node in 'heads'
534
534
535 Every node is considered to be both a descendant and an ancestor
535 Every node is considered to be both a descendant and an ancestor
536 of itself, so every reachable node in 'roots' and 'heads' will be
536 of itself, so every reachable node in 'roots' and 'heads' will be
537 included in 'nodes'.
537 included in 'nodes'.
538
538
539 'outroots' is the list of reachable nodes in 'roots', i.e., the
539 'outroots' is the list of reachable nodes in 'roots', i.e., the
540 subset of 'roots' that is returned in 'nodes'. Likewise,
540 subset of 'roots' that is returned in 'nodes'. Likewise,
541 'outheads' is the subset of 'heads' that is also in 'nodes'.
541 'outheads' is the subset of 'heads' that is also in 'nodes'.
542
542
543 'roots' and 'heads' are both lists of node IDs. If 'roots' is
543 'roots' and 'heads' are both lists of node IDs. If 'roots' is
544 unspecified, uses nullid as the only root. If 'heads' is
544 unspecified, uses nullid as the only root. If 'heads' is
545 unspecified, uses list of all of the revlog's heads."""
545 unspecified, uses list of all of the revlog's heads."""
546 nonodes = ([], [], [])
546 nonodes = ([], [], [])
547 if roots is not None:
547 if roots is not None:
548 roots = list(roots)
548 roots = list(roots)
549 if not roots:
549 if not roots:
550 return nonodes
550 return nonodes
551 lowestrev = min([self.rev(n) for n in roots])
551 lowestrev = min([self.rev(n) for n in roots])
552 else:
552 else:
553 roots = [nullid] # Everybody's a descendant of nullid
553 roots = [nullid] # Everybody's a descendant of nullid
554 lowestrev = nullrev
554 lowestrev = nullrev
555 if (lowestrev == nullrev) and (heads is None):
555 if (lowestrev == nullrev) and (heads is None):
556 # We want _all_ the nodes!
556 # We want _all_ the nodes!
557 return ([self.node(r) for r in self], [nullid], list(self.heads()))
557 return ([self.node(r) for r in self], [nullid], list(self.heads()))
558 if heads is None:
558 if heads is None:
559 # All nodes are ancestors, so the latest ancestor is the last
559 # All nodes are ancestors, so the latest ancestor is the last
560 # node.
560 # node.
561 highestrev = len(self) - 1
561 highestrev = len(self) - 1
562 # Set ancestors to None to signal that every node is an ancestor.
562 # Set ancestors to None to signal that every node is an ancestor.
563 ancestors = None
563 ancestors = None
564 # Set heads to an empty dictionary for later discovery of heads
564 # Set heads to an empty dictionary for later discovery of heads
565 heads = {}
565 heads = {}
566 else:
566 else:
567 heads = list(heads)
567 heads = list(heads)
568 if not heads:
568 if not heads:
569 return nonodes
569 return nonodes
570 ancestors = set()
570 ancestors = set()
571 # Turn heads into a dictionary so we can remove 'fake' heads.
571 # Turn heads into a dictionary so we can remove 'fake' heads.
572 # Also, later we will be using it to filter out the heads we can't
572 # Also, later we will be using it to filter out the heads we can't
573 # find from roots.
573 # find from roots.
574 heads = dict.fromkeys(heads, False)
574 heads = dict.fromkeys(heads, False)
575 # Start at the top and keep marking parents until we're done.
575 # Start at the top and keep marking parents until we're done.
576 nodestotag = set(heads)
576 nodestotag = set(heads)
577 # Remember where the top was so we can use it as a limit later.
577 # Remember where the top was so we can use it as a limit later.
578 highestrev = max([self.rev(n) for n in nodestotag])
578 highestrev = max([self.rev(n) for n in nodestotag])
579 while nodestotag:
579 while nodestotag:
580 # grab a node to tag
580 # grab a node to tag
581 n = nodestotag.pop()
581 n = nodestotag.pop()
582 # Never tag nullid
582 # Never tag nullid
583 if n == nullid:
583 if n == nullid:
584 continue
584 continue
585 # A node's revision number represents its place in a
585 # A node's revision number represents its place in a
586 # topologically sorted list of nodes.
586 # topologically sorted list of nodes.
587 r = self.rev(n)
587 r = self.rev(n)
588 if r >= lowestrev:
588 if r >= lowestrev:
589 if n not in ancestors:
589 if n not in ancestors:
590 # If we are possibly a descendant of one of the roots
590 # If we are possibly a descendant of one of the roots
591 # and we haven't already been marked as an ancestor
591 # and we haven't already been marked as an ancestor
592 ancestors.add(n) # Mark as ancestor
592 ancestors.add(n) # Mark as ancestor
593 # Add non-nullid parents to list of nodes to tag.
593 # Add non-nullid parents to list of nodes to tag.
594 nodestotag.update([p for p in self.parents(n) if
594 nodestotag.update([p for p in self.parents(n) if
595 p != nullid])
595 p != nullid])
596 elif n in heads: # We've seen it before, is it a fake head?
596 elif n in heads: # We've seen it before, is it a fake head?
597 # So it is, real heads should not be the ancestors of
597 # So it is, real heads should not be the ancestors of
598 # any other heads.
598 # any other heads.
599 heads.pop(n)
599 heads.pop(n)
600 if not ancestors:
600 if not ancestors:
601 return nonodes
601 return nonodes
602 # Now that we have our set of ancestors, we want to remove any
602 # Now that we have our set of ancestors, we want to remove any
603 # roots that are not ancestors.
603 # roots that are not ancestors.
604
604
605 # If one of the roots was nullid, everything is included anyway.
605 # If one of the roots was nullid, everything is included anyway.
606 if lowestrev > nullrev:
606 if lowestrev > nullrev:
607 # But, since we weren't, let's recompute the lowest rev to not
607 # But, since we weren't, let's recompute the lowest rev to not
608 # include roots that aren't ancestors.
608 # include roots that aren't ancestors.
609
609
610 # Filter out roots that aren't ancestors of heads
610 # Filter out roots that aren't ancestors of heads
611 roots = [n for n in roots if n in ancestors]
611 roots = [n for n in roots if n in ancestors]
612 # Recompute the lowest revision
612 # Recompute the lowest revision
613 if roots:
613 if roots:
614 lowestrev = min([self.rev(n) for n in roots])
614 lowestrev = min([self.rev(n) for n in roots])
615 else:
615 else:
616 # No more roots? Return empty list
616 # No more roots? Return empty list
617 return nonodes
617 return nonodes
618 else:
618 else:
619 # We are descending from nullid, and don't need to care about
619 # We are descending from nullid, and don't need to care about
620 # any other roots.
620 # any other roots.
621 lowestrev = nullrev
621 lowestrev = nullrev
622 roots = [nullid]
622 roots = [nullid]
623 # Transform our roots list into a set.
623 # Transform our roots list into a set.
624 descendants = set(roots)
624 descendants = set(roots)
625 # Also, keep the original roots so we can filter out roots that aren't
625 # Also, keep the original roots so we can filter out roots that aren't
626 # 'real' roots (i.e. are descended from other roots).
626 # 'real' roots (i.e. are descended from other roots).
627 roots = descendants.copy()
627 roots = descendants.copy()
628 # Our topologically sorted list of output nodes.
628 # Our topologically sorted list of output nodes.
629 orderedout = []
629 orderedout = []
630 # Don't start at nullid since we don't want nullid in our output list,
630 # Don't start at nullid since we don't want nullid in our output list,
631 # and if nullid shows up in descendants, empty parents will look like
631 # and if nullid shows up in descendants, empty parents will look like
632 # they're descendants.
632 # they're descendants.
633 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
633 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
634 n = self.node(r)
634 n = self.node(r)
635 isdescendant = False
635 isdescendant = False
636 if lowestrev == nullrev: # Everybody is a descendant of nullid
636 if lowestrev == nullrev: # Everybody is a descendant of nullid
637 isdescendant = True
637 isdescendant = True
638 elif n in descendants:
638 elif n in descendants:
639 # n is already a descendant
639 # n is already a descendant
640 isdescendant = True
640 isdescendant = True
641 # This check only needs to be done here because all the roots
641 # This check only needs to be done here because all the roots
642 # will start being marked is descendants before the loop.
642 # will start being marked is descendants before the loop.
643 if n in roots:
643 if n in roots:
644 # If n was a root, check if it's a 'real' root.
644 # If n was a root, check if it's a 'real' root.
645 p = tuple(self.parents(n))
645 p = tuple(self.parents(n))
646 # If any of its parents are descendants, it's not a root.
646 # If any of its parents are descendants, it's not a root.
647 if (p[0] in descendants) or (p[1] in descendants):
647 if (p[0] in descendants) or (p[1] in descendants):
648 roots.remove(n)
648 roots.remove(n)
649 else:
649 else:
650 p = tuple(self.parents(n))
650 p = tuple(self.parents(n))
651 # A node is a descendant if either of its parents are
651 # A node is a descendant if either of its parents are
652 # descendants. (We seeded the dependents list with the roots
652 # descendants. (We seeded the dependents list with the roots
653 # up there, remember?)
653 # up there, remember?)
654 if (p[0] in descendants) or (p[1] in descendants):
654 if (p[0] in descendants) or (p[1] in descendants):
655 descendants.add(n)
655 descendants.add(n)
656 isdescendant = True
656 isdescendant = True
657 if isdescendant and ((ancestors is None) or (n in ancestors)):
657 if isdescendant and ((ancestors is None) or (n in ancestors)):
658 # Only include nodes that are both descendants and ancestors.
658 # Only include nodes that are both descendants and ancestors.
659 orderedout.append(n)
659 orderedout.append(n)
660 if (ancestors is not None) and (n in heads):
660 if (ancestors is not None) and (n in heads):
661 # We're trying to figure out which heads are reachable
661 # We're trying to figure out which heads are reachable
662 # from roots.
662 # from roots.
663 # Mark this head as having been reached
663 # Mark this head as having been reached
664 heads[n] = True
664 heads[n] = True
665 elif ancestors is None:
665 elif ancestors is None:
666 # Otherwise, we're trying to discover the heads.
666 # Otherwise, we're trying to discover the heads.
667 # Assume this is a head because if it isn't, the next step
667 # Assume this is a head because if it isn't, the next step
668 # will eventually remove it.
668 # will eventually remove it.
669 heads[n] = True
669 heads[n] = True
670 # But, obviously its parents aren't.
670 # But, obviously its parents aren't.
671 for p in self.parents(n):
671 for p in self.parents(n):
672 heads.pop(p, None)
672 heads.pop(p, None)
673 heads = [n for n, flag in heads.iteritems() if flag]
673 heads = [n for n, flag in heads.iteritems() if flag]
674 roots = list(roots)
674 roots = list(roots)
675 assert orderedout
675 assert orderedout
676 assert roots
676 assert roots
677 assert heads
677 assert heads
678 return (orderedout, roots, heads)
678 return (orderedout, roots, heads)
679
679
680 def headrevs(self):
680 def headrevs(self):
681 try:
681 try:
682 return self.index.headrevs()
682 return self.index.headrevs()
683 except AttributeError:
683 except AttributeError:
684 return self._headrevs()
684 return self._headrevs()
685
685
686 def _headrevs(self):
686 def _headrevs(self):
687 count = len(self)
687 count = len(self)
688 if not count:
688 if not count:
689 return [nullrev]
689 return [nullrev]
690 # we won't iter over filtered rev so nobody is a head at start
690 # we won't iter over filtered rev so nobody is a head at start
691 ishead = [0] * (count + 1)
691 ishead = [0] * (count + 1)
692 index = self.index
692 index = self.index
693 for r in self:
693 for r in self:
694 ishead[r] = 1 # I may be an head
694 ishead[r] = 1 # I may be an head
695 e = index[r]
695 e = index[r]
696 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
696 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
697 return [r for r, val in enumerate(ishead) if val]
697 return [r for r, val in enumerate(ishead) if val]
698
698
699 def heads(self, start=None, stop=None):
699 def heads(self, start=None, stop=None):
700 """return the list of all nodes that have no children
700 """return the list of all nodes that have no children
701
701
702 if start is specified, only heads that are descendants of
702 if start is specified, only heads that are descendants of
703 start will be returned
703 start will be returned
704 if stop is specified, it will consider all the revs from stop
704 if stop is specified, it will consider all the revs from stop
705 as if they had no children
705 as if they had no children
706 """
706 """
707 if start is None and stop is None:
707 if start is None and stop is None:
708 if not len(self):
708 if not len(self):
709 return [nullid]
709 return [nullid]
710 return [self.node(r) for r in self.headrevs()]
710 return [self.node(r) for r in self.headrevs()]
711
711
712 if start is None:
712 if start is None:
713 start = nullid
713 start = nullid
714 if stop is None:
714 if stop is None:
715 stop = []
715 stop = []
716 stoprevs = set([self.rev(n) for n in stop])
716 stoprevs = set([self.rev(n) for n in stop])
717 startrev = self.rev(start)
717 startrev = self.rev(start)
718 reachable = set((startrev,))
718 reachable = set((startrev,))
719 heads = set((startrev,))
719 heads = set((startrev,))
720
720
721 parentrevs = self.parentrevs
721 parentrevs = self.parentrevs
722 for r in self.revs(start=startrev + 1):
722 for r in self.revs(start=startrev + 1):
723 for p in parentrevs(r):
723 for p in parentrevs(r):
724 if p in reachable:
724 if p in reachable:
725 if r not in stoprevs:
725 if r not in stoprevs:
726 reachable.add(r)
726 reachable.add(r)
727 heads.add(r)
727 heads.add(r)
728 if p in heads and p not in stoprevs:
728 if p in heads and p not in stoprevs:
729 heads.remove(p)
729 heads.remove(p)
730
730
731 return [self.node(r) for r in heads]
731 return [self.node(r) for r in heads]
732
732
733 def children(self, node):
733 def children(self, node):
734 """find the children of a given node"""
734 """find the children of a given node"""
735 c = []
735 c = []
736 p = self.rev(node)
736 p = self.rev(node)
737 for r in self.revs(start=p + 1):
737 for r in self.revs(start=p + 1):
738 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
738 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
739 if prevs:
739 if prevs:
740 for pr in prevs:
740 for pr in prevs:
741 if pr == p:
741 if pr == p:
742 c.append(self.node(r))
742 c.append(self.node(r))
743 elif p == nullrev:
743 elif p == nullrev:
744 c.append(self.node(r))
744 c.append(self.node(r))
745 return c
745 return c
746
746
747 def descendant(self, start, end):
747 def descendant(self, start, end):
748 if start == nullrev:
748 if start == nullrev:
749 return True
749 return True
750 for i in self.descendants([start]):
750 for i in self.descendants([start]):
751 if i == end:
751 if i == end:
752 return True
752 return True
753 elif i > end:
753 elif i > end:
754 break
754 break
755 return False
755 return False
756
756
757 def commonancestorsheads(self, a, b):
757 def commonancestorsheads(self, a, b):
758 """calculate all the heads of the common ancestors of nodes a and b"""
758 """calculate all the heads of the common ancestors of nodes a and b"""
759 a, b = self.rev(a), self.rev(b)
759 a, b = self.rev(a), self.rev(b)
760 try:
760 try:
761 ancs = self.index.commonancestorsheads(a, b)
761 ancs = self.index.commonancestorsheads(a, b)
762 except (AttributeError, OverflowError): # C implementation failed
762 except (AttributeError, OverflowError): # C implementation failed
763 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
763 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
764 return map(self.node, ancs)
764 return map(self.node, ancs)
765
765
766 def isancestor(self, a, b):
766 def isancestor(self, a, b):
767 """return True if node a is an ancestor of node b
767 """return True if node a is an ancestor of node b
768
768
769 The implementation of this is trivial but the use of
769 The implementation of this is trivial but the use of
770 commonancestorsheads is not."""
770 commonancestorsheads is not."""
771 return a in self.commonancestorsheads(a, b)
771 return a in self.commonancestorsheads(a, b)
772
772
773 def ancestor(self, a, b):
773 def ancestor(self, a, b):
774 """calculate the "best" common ancestor of nodes a and b"""
774 """calculate the "best" common ancestor of nodes a and b"""
775
775
776 a, b = self.rev(a), self.rev(b)
776 a, b = self.rev(a), self.rev(b)
777 try:
777 try:
778 ancs = self.index.ancestors(a, b)
778 ancs = self.index.ancestors(a, b)
779 except (AttributeError, OverflowError):
779 except (AttributeError, OverflowError):
780 ancs = ancestor.ancestors(self.parentrevs, a, b)
780 ancs = ancestor.ancestors(self.parentrevs, a, b)
781 if ancs:
781 if ancs:
782 # choose a consistent winner when there's a tie
782 # choose a consistent winner when there's a tie
783 return min(map(self.node, ancs))
783 return min(map(self.node, ancs))
784 return nullid
784 return nullid
785
785
786 def _match(self, id):
786 def _match(self, id):
787 if isinstance(id, int):
787 if isinstance(id, int):
788 # rev
788 # rev
789 return self.node(id)
789 return self.node(id)
790 if len(id) == 20:
790 if len(id) == 20:
791 # possibly a binary node
791 # possibly a binary node
792 # odds of a binary node being all hex in ASCII are 1 in 10**25
792 # odds of a binary node being all hex in ASCII are 1 in 10**25
793 try:
793 try:
794 node = id
794 node = id
795 self.rev(node) # quick search the index
795 self.rev(node) # quick search the index
796 return node
796 return node
797 except LookupError:
797 except LookupError:
798 pass # may be partial hex id
798 pass # may be partial hex id
799 try:
799 try:
800 # str(rev)
800 # str(rev)
801 rev = int(id)
801 rev = int(id)
802 if str(rev) != id:
802 if str(rev) != id:
803 raise ValueError
803 raise ValueError
804 if rev < 0:
804 if rev < 0:
805 rev = len(self) + rev
805 rev = len(self) + rev
806 if rev < 0 or rev >= len(self):
806 if rev < 0 or rev >= len(self):
807 raise ValueError
807 raise ValueError
808 return self.node(rev)
808 return self.node(rev)
809 except (ValueError, OverflowError):
809 except (ValueError, OverflowError):
810 pass
810 pass
811 if len(id) == 40:
811 if len(id) == 40:
812 try:
812 try:
813 # a full hex nodeid?
813 # a full hex nodeid?
814 node = bin(id)
814 node = bin(id)
815 self.rev(node)
815 self.rev(node)
816 return node
816 return node
817 except (TypeError, LookupError):
817 except (TypeError, LookupError):
818 pass
818 pass
819
819
820 def _partialmatch(self, id):
820 def _partialmatch(self, id):
821 try:
821 try:
822 n = self.index.partialmatch(id)
822 n = self.index.partialmatch(id)
823 if n and self.hasnode(n):
823 if n and self.hasnode(n):
824 return n
824 return n
825 return None
825 return None
826 except RevlogError:
826 except RevlogError:
827 # parsers.c radix tree lookup gave multiple matches
827 # parsers.c radix tree lookup gave multiple matches
828 # fall through to slow path that filters hidden revisions
828 # fall through to slow path that filters hidden revisions
829 pass
829 pass
830 except (AttributeError, ValueError):
830 except (AttributeError, ValueError):
831 # we are pure python, or key was too short to search radix tree
831 # we are pure python, or key was too short to search radix tree
832 pass
832 pass
833
833
834 if id in self._pcache:
834 if id in self._pcache:
835 return self._pcache[id]
835 return self._pcache[id]
836
836
837 if len(id) < 40:
837 if len(id) < 40:
838 try:
838 try:
839 # hex(node)[:...]
839 # hex(node)[:...]
840 l = len(id) // 2 # grab an even number of digits
840 l = len(id) // 2 # grab an even number of digits
841 prefix = bin(id[:l * 2])
841 prefix = bin(id[:l * 2])
842 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
842 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
843 nl = [n for n in nl if hex(n).startswith(id) and
843 nl = [n for n in nl if hex(n).startswith(id) and
844 self.hasnode(n)]
844 self.hasnode(n)]
845 if len(nl) > 0:
845 if len(nl) > 0:
846 if len(nl) == 1:
846 if len(nl) == 1:
847 self._pcache[id] = nl[0]
847 self._pcache[id] = nl[0]
848 return nl[0]
848 return nl[0]
849 raise LookupError(id, self.indexfile,
849 raise LookupError(id, self.indexfile,
850 _('ambiguous identifier'))
850 _('ambiguous identifier'))
851 return None
851 return None
852 except TypeError:
852 except TypeError:
853 pass
853 pass
854
854
855 def lookup(self, id):
855 def lookup(self, id):
856 """locate a node based on:
856 """locate a node based on:
857 - revision number or str(revision number)
857 - revision number or str(revision number)
858 - nodeid or subset of hex nodeid
858 - nodeid or subset of hex nodeid
859 """
859 """
860 n = self._match(id)
860 n = self._match(id)
861 if n is not None:
861 if n is not None:
862 return n
862 return n
863 n = self._partialmatch(id)
863 n = self._partialmatch(id)
864 if n:
864 if n:
865 return n
865 return n
866
866
867 raise LookupError(id, self.indexfile, _('no match found'))
867 raise LookupError(id, self.indexfile, _('no match found'))
868
868
869 def cmp(self, node, text):
869 def cmp(self, node, text):
870 """compare text with a given file revision
870 """compare text with a given file revision
871
871
872 returns True if text is different than what is stored.
872 returns True if text is different than what is stored.
873 """
873 """
874 p1, p2 = self.parents(node)
874 p1, p2 = self.parents(node)
875 return hash(text, p1, p2) != node
875 return hash(text, p1, p2) != node
876
876
877 def _addchunk(self, offset, data):
877 def _addchunk(self, offset, data):
878 o, d = self._chunkcache
878 o, d = self._chunkcache
879 # try to add to existing cache
879 # try to add to existing cache
880 if o + len(d) == offset and len(d) + len(data) < _chunksize:
880 if o + len(d) == offset and len(d) + len(data) < _chunksize:
881 self._chunkcache = o, d + data
881 self._chunkcache = o, d + data
882 else:
882 else:
883 self._chunkcache = offset, data
883 self._chunkcache = offset, data
884
884
885 def _loadchunk(self, offset, length):
885 def _loadchunk(self, offset, length):
886 if self._inline:
886 if self._inline:
887 df = self.opener(self.indexfile)
887 df = self.opener(self.indexfile)
888 else:
888 else:
889 df = self.opener(self.datafile)
889 df = self.opener(self.datafile)
890
890
891 # Cache data both forward and backward around the requested
891 # Cache data both forward and backward around the requested
892 # data, in a fixed size window. This helps speed up operations
892 # data, in a fixed size window. This helps speed up operations
893 # involving reading the revlog backwards.
893 # involving reading the revlog backwards.
894 cachesize = self._chunkcachesize
894 cachesize = self._chunkcachesize
895 realoffset = offset & ~(cachesize - 1)
895 realoffset = offset & ~(cachesize - 1)
896 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
896 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
897 - realoffset)
897 - realoffset)
898 df.seek(realoffset)
898 df.seek(realoffset)
899 d = df.read(reallength)
899 d = df.read(reallength)
900 df.close()
900 df.close()
901 self._addchunk(realoffset, d)
901 self._addchunk(realoffset, d)
902 if offset != realoffset or reallength != length:
902 if offset != realoffset or reallength != length:
903 return util.buffer(d, offset - realoffset, length)
903 return util.buffer(d, offset - realoffset, length)
904 return d
904 return d
905
905
906 def _getchunk(self, offset, length):
906 def _getchunk(self, offset, length):
907 o, d = self._chunkcache
907 o, d = self._chunkcache
908 l = len(d)
908 l = len(d)
909
909
910 # is it in the cache?
910 # is it in the cache?
911 cachestart = offset - o
911 cachestart = offset - o
912 cacheend = cachestart + length
912 cacheend = cachestart + length
913 if cachestart >= 0 and cacheend <= l:
913 if cachestart >= 0 and cacheend <= l:
914 if cachestart == 0 and cacheend == l:
914 if cachestart == 0 and cacheend == l:
915 return d # avoid a copy
915 return d # avoid a copy
916 return util.buffer(d, cachestart, cacheend - cachestart)
916 return util.buffer(d, cachestart, cacheend - cachestart)
917
917
918 return self._loadchunk(offset, length)
918 return self._loadchunk(offset, length)
919
919
920 def _chunkraw(self, startrev, endrev):
920 def _chunkraw(self, startrev, endrev):
921 start = self.start(startrev)
921 start = self.start(startrev)
922 end = self.end(endrev)
922 end = self.end(endrev)
923 if self._inline:
923 if self._inline:
924 start += (startrev + 1) * self._io.size
924 start += (startrev + 1) * self._io.size
925 end += (endrev + 1) * self._io.size
925 end += (endrev + 1) * self._io.size
926 length = end - start
926 length = end - start
927 return self._getchunk(start, length)
927 return self._getchunk(start, length)
928
928
929 def _chunk(self, rev):
929 def _chunk(self, rev):
930 return decompress(self._chunkraw(rev, rev))
930 return decompress(self._chunkraw(rev, rev))
931
931
932 def _chunks(self, revs):
932 def _chunks(self, revs):
933 '''faster version of [self._chunk(rev) for rev in revs]
933 '''faster version of [self._chunk(rev) for rev in revs]
934
934
935 Assumes that revs is in ascending order.'''
935 Assumes that revs is in ascending order.'''
936 if not revs:
936 if not revs:
937 return []
937 return []
938 start = self.start
938 start = self.start
939 length = self.length
939 length = self.length
940 inline = self._inline
940 inline = self._inline
941 iosize = self._io.size
941 iosize = self._io.size
942 buffer = util.buffer
942 buffer = util.buffer
943
943
944 l = []
944 l = []
945 ladd = l.append
945 ladd = l.append
946
946
947 # preload the cache
947 # preload the cache
948 try:
948 try:
949 while True:
949 while True:
950 # ensure that the cache doesn't change out from under us
950 # ensure that the cache doesn't change out from under us
951 _cache = self._chunkcache
951 _cache = self._chunkcache
952 self._chunkraw(revs[0], revs[-1])
952 self._chunkraw(revs[0], revs[-1])
953 if _cache == self._chunkcache:
953 if _cache == self._chunkcache:
954 break
954 break
955 offset, data = _cache
955 offset, data = _cache
956 except OverflowError:
956 except OverflowError:
957 # issue4215 - we can't cache a run of chunks greater than
957 # issue4215 - we can't cache a run of chunks greater than
958 # 2G on Windows
958 # 2G on Windows
959 return [self._chunk(rev) for rev in revs]
959 return [self._chunk(rev) for rev in revs]
960
960
961 for rev in revs:
961 for rev in revs:
962 chunkstart = start(rev)
962 chunkstart = start(rev)
963 if inline:
963 if inline:
964 chunkstart += (rev + 1) * iosize
964 chunkstart += (rev + 1) * iosize
965 chunklength = length(rev)
965 chunklength = length(rev)
966 ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
966 ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
967
967
968 return l
968 return l
969
969
970 def _chunkclear(self):
970 def _chunkclear(self):
971 self._chunkcache = (0, '')
971 self._chunkcache = (0, '')
972
972
973 def deltaparent(self, rev):
973 def deltaparent(self, rev):
974 """return deltaparent of the given revision"""
974 """return deltaparent of the given revision"""
975 base = self.index[rev][3]
975 base = self.index[rev][3]
976 if base == rev:
976 if base == rev:
977 return nullrev
977 return nullrev
978 elif self._generaldelta:
978 elif self._generaldelta:
979 return base
979 return base
980 else:
980 else:
981 return rev - 1
981 return rev - 1
982
982
983 def revdiff(self, rev1, rev2):
983 def revdiff(self, rev1, rev2):
984 """return or calculate a delta between two revisions"""
984 """return or calculate a delta between two revisions"""
985 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
985 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
986 return str(self._chunk(rev2))
986 return str(self._chunk(rev2))
987
987
988 return mdiff.textdiff(self.revision(rev1),
988 return mdiff.textdiff(self.revision(rev1),
989 self.revision(rev2))
989 self.revision(rev2))
990
990
991 def revision(self, nodeorrev):
991 def revision(self, nodeorrev):
992 """return an uncompressed revision of a given node or revision
992 """return an uncompressed revision of a given node or revision
993 number.
993 number.
994 """
994 """
995 if isinstance(nodeorrev, int):
995 if isinstance(nodeorrev, int):
996 rev = nodeorrev
996 rev = nodeorrev
997 node = self.node(rev)
997 node = self.node(rev)
998 else:
998 else:
999 node = nodeorrev
999 node = nodeorrev
1000 rev = None
1000 rev = None
1001
1001
1002 _cache = self._cache # grab local copy of cache to avoid thread race
1002 _cache = self._cache # grab local copy of cache to avoid thread race
1003 cachedrev = None
1003 cachedrev = None
1004 if node == nullid:
1004 if node == nullid:
1005 return ""
1005 return ""
1006 if _cache:
1006 if _cache:
1007 if _cache[0] == node:
1007 if _cache[0] == node:
1008 return _cache[2]
1008 return _cache[2]
1009 cachedrev = _cache[1]
1009 cachedrev = _cache[1]
1010
1010
1011 # look up what we need to read
1011 # look up what we need to read
1012 text = None
1012 text = None
1013 if rev is None:
1013 if rev is None:
1014 rev = self.rev(node)
1014 rev = self.rev(node)
1015
1015
1016 # check rev flags
1016 # check rev flags
1017 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1017 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1018 raise RevlogError(_('incompatible revision flag %x') %
1018 raise RevlogError(_('incompatible revision flag %x') %
1019 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1019 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1020
1020
1021 # build delta chain
1021 # build delta chain
1022 chain = []
1022 chain = []
1023 index = self.index # for performance
1023 index = self.index # for performance
1024 generaldelta = self._generaldelta
1024 generaldelta = self._generaldelta
1025 iterrev = rev
1025 iterrev = rev
1026 e = index[iterrev]
1026 e = index[iterrev]
1027 while iterrev != e[3] and iterrev != cachedrev:
1027 while iterrev != e[3] and iterrev != cachedrev:
1028 chain.append(iterrev)
1028 chain.append(iterrev)
1029 if generaldelta:
1029 if generaldelta:
1030 iterrev = e[3]
1030 iterrev = e[3]
1031 else:
1031 else:
1032 iterrev -= 1
1032 iterrev -= 1
1033 e = index[iterrev]
1033 e = index[iterrev]
1034
1034
1035 if iterrev == cachedrev:
1035 if iterrev == cachedrev:
1036 # cache hit
1036 # cache hit
1037 text = _cache[2]
1037 text = _cache[2]
1038 else:
1038 else:
1039 chain.append(iterrev)
1039 chain.append(iterrev)
1040 chain.reverse()
1040 chain.reverse()
1041
1041
1042 # drop cache to save memory
1042 # drop cache to save memory
1043 self._cache = None
1043 self._cache = None
1044
1044
1045 bins = self._chunks(chain)
1045 bins = self._chunks(chain)
1046 if text is None:
1046 if text is None:
1047 text = str(bins[0])
1047 text = str(bins[0])
1048 bins = bins[1:]
1048 bins = bins[1:]
1049
1049
1050 text = mdiff.patches(text, bins)
1050 text = mdiff.patches(text, bins)
1051
1051
1052 text = self._checkhash(text, node, rev)
1052 text = self._checkhash(text, node, rev)
1053
1053
1054 self._cache = (node, rev, text)
1054 self._cache = (node, rev, text)
1055 return text
1055 return text
1056
1056
1057 def hash(self, text, p1, p2):
1057 def hash(self, text, p1, p2):
1058 """Compute a node hash.
1058 """Compute a node hash.
1059
1059
1060 Available as a function so that subclasses can replace the hash
1060 Available as a function so that subclasses can replace the hash
1061 as needed.
1061 as needed.
1062 """
1062 """
1063 return hash(text, p1, p2)
1063 return hash(text, p1, p2)
1064
1064
1065 def _checkhash(self, text, node, rev):
1065 def _checkhash(self, text, node, rev):
1066 p1, p2 = self.parents(node)
1066 p1, p2 = self.parents(node)
1067 self.checkhash(text, p1, p2, node, rev)
1067 self.checkhash(text, p1, p2, node, rev)
1068 return text
1068 return text
1069
1069
1070 def checkhash(self, text, p1, p2, node, rev=None):
1070 def checkhash(self, text, p1, p2, node, rev=None):
1071 if node != self.hash(text, p1, p2):
1071 if node != self.hash(text, p1, p2):
1072 revornode = rev
1072 revornode = rev
1073 if revornode is None:
1073 if revornode is None:
1074 revornode = templatefilters.short(hex(node))
1074 revornode = templatefilters.short(hex(node))
1075 raise RevlogError(_("integrity check failed on %s:%s")
1075 raise RevlogError(_("integrity check failed on %s:%s")
1076 % (self.indexfile, revornode))
1076 % (self.indexfile, revornode))
1077
1077
1078 def checkinlinesize(self, tr, fp=None):
1078 def checkinlinesize(self, tr, fp=None):
1079 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1079 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1080 return
1080 return
1081
1081
1082 trinfo = tr.find(self.indexfile)
1082 trinfo = tr.find(self.indexfile)
1083 if trinfo is None:
1083 if trinfo is None:
1084 raise RevlogError(_("%s not found in the transaction")
1084 raise RevlogError(_("%s not found in the transaction")
1085 % self.indexfile)
1085 % self.indexfile)
1086
1086
1087 trindex = trinfo[2]
1087 trindex = trinfo[2]
1088 dataoff = self.start(trindex)
1088 dataoff = self.start(trindex)
1089
1089
1090 tr.add(self.datafile, dataoff)
1090 tr.add(self.datafile, dataoff)
1091
1091
1092 if fp:
1092 if fp:
1093 fp.flush()
1093 fp.flush()
1094 fp.close()
1094 fp.close()
1095
1095
1096 df = self.opener(self.datafile, 'w')
1096 df = self.opener(self.datafile, 'w')
1097 try:
1097 try:
1098 for r in self:
1098 for r in self:
1099 df.write(self._chunkraw(r, r))
1099 df.write(self._chunkraw(r, r))
1100 finally:
1100 finally:
1101 df.close()
1101 df.close()
1102
1102
1103 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1103 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1104 self.version &= ~(REVLOGNGINLINEDATA)
1104 self.version &= ~(REVLOGNGINLINEDATA)
1105 self._inline = False
1105 self._inline = False
1106 for i in self:
1106 for i in self:
1107 e = self._io.packentry(self.index[i], self.node, self.version, i)
1107 e = self._io.packentry(self.index[i], self.node, self.version, i)
1108 fp.write(e)
1108 fp.write(e)
1109
1109
1110 # if we don't call close, the temp file will never replace the
1110 # if we don't call close, the temp file will never replace the
1111 # real index
1111 # real index
1112 fp.close()
1112 fp.close()
1113
1113
1114 tr.replace(self.indexfile, trindex * self._io.size)
1114 tr.replace(self.indexfile, trindex * self._io.size)
1115 self._chunkclear()
1115 self._chunkclear()
1116
1116
1117 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1117 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1118 node=None):
1118 node=None):
1119 """add a revision to the log
1119 """add a revision to the log
1120
1120
1121 text - the revision data to add
1121 text - the revision data to add
1122 transaction - the transaction object used for rollback
1122 transaction - the transaction object used for rollback
1123 link - the linkrev data to add
1123 link - the linkrev data to add
1124 p1, p2 - the parent nodeids of the revision
1124 p1, p2 - the parent nodeids of the revision
1125 cachedelta - an optional precomputed delta
1125 cachedelta - an optional precomputed delta
1126 node - nodeid of revision; typically node is not specified, and it is
1126 node - nodeid of revision; typically node is not specified, and it is
1127 computed by default as hash(text, p1, p2), however subclasses might
1127 computed by default as hash(text, p1, p2), however subclasses might
1128 use different hashing method (and override checkhash() in such case)
1128 use different hashing method (and override checkhash() in such case)
1129 """
1129 """
1130 if link == nullrev:
1130 if link == nullrev:
1131 raise RevlogError(_("attempted to add linkrev -1 to %s")
1131 raise RevlogError(_("attempted to add linkrev -1 to %s")
1132 % self.indexfile)
1132 % self.indexfile)
1133 node = node or self.hash(text, p1, p2)
1133 node = node or self.hash(text, p1, p2)
1134 if node in self.nodemap:
1134 if node in self.nodemap:
1135 return node
1135 return node
1136
1136
1137 dfh = None
1137 dfh = None
1138 if not self._inline:
1138 if not self._inline:
1139 dfh = self.opener(self.datafile, "a")
1139 dfh = self.opener(self.datafile, "a")
1140 ifh = self.opener(self.indexfile, "a+")
1140 ifh = self.opener(self.indexfile, "a+")
1141 try:
1141 try:
1142 return self._addrevision(node, text, transaction, link, p1, p2,
1142 return self._addrevision(node, text, transaction, link, p1, p2,
1143 cachedelta, ifh, dfh)
1143 cachedelta, ifh, dfh)
1144 finally:
1144 finally:
1145 if dfh:
1145 if dfh:
1146 dfh.close()
1146 dfh.close()
1147 ifh.close()
1147 ifh.close()
1148
1148
1149 def compress(self, text):
1149 def compress(self, text):
1150 """ generate a possibly-compressed representation of text """
1150 """ generate a possibly-compressed representation of text """
1151 if not text:
1151 if not text:
1152 return ("", text)
1152 return ("", text)
1153 l = len(text)
1153 l = len(text)
1154 bin = None
1154 bin = None
1155 if l < 44:
1155 if l < 44:
1156 pass
1156 pass
1157 elif l > 1000000:
1157 elif l > 1000000:
1158 # zlib makes an internal copy, thus doubling memory usage for
1158 # zlib makes an internal copy, thus doubling memory usage for
1159 # large files, so lets do this in pieces
1159 # large files, so lets do this in pieces
1160 z = zlib.compressobj()
1160 z = zlib.compressobj()
1161 p = []
1161 p = []
1162 pos = 0
1162 pos = 0
1163 while pos < l:
1163 while pos < l:
1164 pos2 = pos + 2**20
1164 pos2 = pos + 2**20
1165 p.append(z.compress(text[pos:pos2]))
1165 p.append(z.compress(text[pos:pos2]))
1166 pos = pos2
1166 pos = pos2
1167 p.append(z.flush())
1167 p.append(z.flush())
1168 if sum(map(len, p)) < l:
1168 if sum(map(len, p)) < l:
1169 bin = "".join(p)
1169 bin = "".join(p)
1170 else:
1170 else:
1171 bin = _compress(text)
1171 bin = _compress(text)
1172 if bin is None or len(bin) > l:
1172 if bin is None or len(bin) > l:
1173 if text[0] == '\0':
1173 if text[0] == '\0':
1174 return ("", text)
1174 return ("", text)
1175 return ('u', text)
1175 return ('u', text)
1176 return ("", bin)
1176 return ("", bin)
1177
1177
1178 def _addrevision(self, node, text, transaction, link, p1, p2,
1178 def _addrevision(self, node, text, transaction, link, p1, p2,
1179 cachedelta, ifh, dfh):
1179 cachedelta, ifh, dfh):
1180 """internal function to add revisions to the log
1180 """internal function to add revisions to the log
1181
1181
1182 see addrevision for argument descriptions.
1182 see addrevision for argument descriptions.
1183 invariants:
1183 invariants:
1184 - text is optional (can be None); if not set, cachedelta must be set.
1184 - text is optional (can be None); if not set, cachedelta must be set.
1185 if both are set, they must correspond to each other.
1185 if both are set, they must correspond to each other.
1186 """
1186 """
1187 btext = [text]
1187 btext = [text]
1188 def buildtext():
1188 def buildtext():
1189 if btext[0] is not None:
1189 if btext[0] is not None:
1190 return btext[0]
1190 return btext[0]
1191 # flush any pending writes here so we can read it in revision
1191 # flush any pending writes here so we can read it in revision
1192 if dfh:
1192 if dfh:
1193 dfh.flush()
1193 dfh.flush()
1194 ifh.flush()
1194 ifh.flush()
1195 basetext = self.revision(self.node(cachedelta[0]))
1195 basetext = self.revision(self.node(cachedelta[0]))
1196 btext[0] = mdiff.patch(basetext, cachedelta[1])
1196 btext[0] = mdiff.patch(basetext, cachedelta[1])
1197 try:
1197 try:
1198 self.checkhash(btext[0], p1, p2, node)
1198 self.checkhash(btext[0], p1, p2, node)
1199 except CensoredNodeError:
1199 except CensoredNodeError:
1200 pass # always import a censor tombstone.
1200 pass # always import a censor tombstone.
1201 return btext[0]
1201 return btext[0]
1202
1202
1203 def builddelta(rev):
1203 def builddelta(rev):
1204 # can we use the cached delta?
1204 # can we use the cached delta?
1205 if cachedelta and cachedelta[0] == rev:
1205 if cachedelta and cachedelta[0] == rev:
1206 delta = cachedelta[1]
1206 delta = cachedelta[1]
1207 else:
1207 else:
1208 t = buildtext()
1208 t = buildtext()
1209 ptext = self.revision(self.node(rev))
1209 ptext = self.revision(self.node(rev))
1210 delta = mdiff.textdiff(ptext, t)
1210 delta = mdiff.textdiff(ptext, t)
1211 data = self.compress(delta)
1211 data = self.compress(delta)
1212 l = len(data[1]) + len(data[0])
1212 l = len(data[1]) + len(data[0])
1213 if basecache[0] == rev:
1213 if basecache[0] == rev:
1214 chainbase = basecache[1]
1214 chainbase = basecache[1]
1215 else:
1215 else:
1216 chainbase = self.chainbase(rev)
1216 chainbase = self.chainbase(rev)
1217 dist = l + offset - self.start(chainbase)
1217 dist = l + offset - self.start(chainbase)
1218 if self._generaldelta:
1218 if self._generaldelta:
1219 base = rev
1219 base = rev
1220 else:
1220 else:
1221 base = chainbase
1221 base = chainbase
1222 chainlen = self.chainlen(rev) + 1
1222 chainlen = self.chainlen(rev) + 1
1223 return dist, l, data, base, chainbase, chainlen
1223 return dist, l, data, base, chainbase, chainlen
1224
1224
1225 curr = len(self)
1225 curr = len(self)
1226 prev = curr - 1
1226 prev = curr - 1
1227 base = chainbase = curr
1227 base = chainbase = curr
1228 chainlen = None
1228 chainlen = None
1229 offset = self.end(prev)
1229 offset = self.end(prev)
1230 flags = 0
1230 flags = 0
1231 d = None
1231 d = None
1232 if self._basecache is None:
1232 if self._basecache is None:
1233 self._basecache = (prev, self.chainbase(prev))
1233 self._basecache = (prev, self.chainbase(prev))
1234 basecache = self._basecache
1234 basecache = self._basecache
1235 p1r, p2r = self.rev(p1), self.rev(p2)
1235 p1r, p2r = self.rev(p1), self.rev(p2)
1236
1236
1237 # should we try to build a delta?
1237 # should we try to build a delta?
1238 if prev != nullrev:
1238 if prev != nullrev:
1239 if self._generaldelta:
1239 if self._generaldelta:
1240 if p1r >= basecache[1]:
1240 if p1r >= basecache[1]:
1241 d = builddelta(p1r)
1241 d = builddelta(p1r)
1242 elif p2r >= basecache[1]:
1242 elif p2r >= basecache[1]:
1243 d = builddelta(p2r)
1243 d = builddelta(p2r)
1244 else:
1244 else:
1245 d = builddelta(prev)
1245 d = builddelta(prev)
1246 else:
1246 else:
1247 d = builddelta(prev)
1247 d = builddelta(prev)
1248 dist, l, data, base, chainbase, chainlen = d
1248 dist, l, data, base, chainbase, chainlen = d
1249
1249
1250 # full versions are inserted when the needed deltas
1250 # full versions are inserted when the needed deltas
1251 # become comparable to the uncompressed text
1251 # become comparable to the uncompressed text
1252 if text is None:
1252 if text is None:
1253 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1253 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1254 cachedelta[1])
1254 cachedelta[1])
1255 else:
1255 else:
1256 textlen = len(text)
1256 textlen = len(text)
1257 if (d is None or dist > textlen * 2 or
1257 if (d is None or dist > textlen * 2 or l > textlen or
1258 (self._maxchainlen and chainlen > self._maxchainlen)):
1258 (self._maxchainlen and chainlen > self._maxchainlen)):
1259 text = buildtext()
1259 text = buildtext()
1260 data = self.compress(text)
1260 data = self.compress(text)
1261 l = len(data[1]) + len(data[0])
1261 l = len(data[1]) + len(data[0])
1262 base = chainbase = curr
1262 base = chainbase = curr
1263
1263
1264 e = (offset_type(offset, flags), l, textlen,
1264 e = (offset_type(offset, flags), l, textlen,
1265 base, link, p1r, p2r, node)
1265 base, link, p1r, p2r, node)
1266 self.index.insert(-1, e)
1266 self.index.insert(-1, e)
1267 self.nodemap[node] = curr
1267 self.nodemap[node] = curr
1268
1268
1269 entry = self._io.packentry(e, self.node, self.version, curr)
1269 entry = self._io.packentry(e, self.node, self.version, curr)
1270 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1270 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1271
1271
1272 if type(text) == str: # only accept immutable objects
1272 if type(text) == str: # only accept immutable objects
1273 self._cache = (node, curr, text)
1273 self._cache = (node, curr, text)
1274 self._basecache = (curr, chainbase)
1274 self._basecache = (curr, chainbase)
1275 return node
1275 return node
1276
1276
1277 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1277 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1278 curr = len(self) - 1
1278 curr = len(self) - 1
1279 if not self._inline:
1279 if not self._inline:
1280 transaction.add(self.datafile, offset)
1280 transaction.add(self.datafile, offset)
1281 transaction.add(self.indexfile, curr * len(entry))
1281 transaction.add(self.indexfile, curr * len(entry))
1282 if data[0]:
1282 if data[0]:
1283 dfh.write(data[0])
1283 dfh.write(data[0])
1284 dfh.write(data[1])
1284 dfh.write(data[1])
1285 dfh.flush()
1285 dfh.flush()
1286 ifh.write(entry)
1286 ifh.write(entry)
1287 else:
1287 else:
1288 offset += curr * self._io.size
1288 offset += curr * self._io.size
1289 transaction.add(self.indexfile, offset, curr)
1289 transaction.add(self.indexfile, offset, curr)
1290 ifh.write(entry)
1290 ifh.write(entry)
1291 ifh.write(data[0])
1291 ifh.write(data[0])
1292 ifh.write(data[1])
1292 ifh.write(data[1])
1293 self.checkinlinesize(transaction, ifh)
1293 self.checkinlinesize(transaction, ifh)
1294
1294
1295 def addgroup(self, bundle, linkmapper, transaction):
1295 def addgroup(self, bundle, linkmapper, transaction):
1296 """
1296 """
1297 add a delta group
1297 add a delta group
1298
1298
1299 given a set of deltas, add them to the revision log. the
1299 given a set of deltas, add them to the revision log. the
1300 first delta is against its parent, which should be in our
1300 first delta is against its parent, which should be in our
1301 log, the rest are against the previous delta.
1301 log, the rest are against the previous delta.
1302 """
1302 """
1303
1303
1304 # track the base of the current delta log
1304 # track the base of the current delta log
1305 content = []
1305 content = []
1306 node = None
1306 node = None
1307
1307
1308 r = len(self)
1308 r = len(self)
1309 end = 0
1309 end = 0
1310 if r:
1310 if r:
1311 end = self.end(r - 1)
1311 end = self.end(r - 1)
1312 ifh = self.opener(self.indexfile, "a+")
1312 ifh = self.opener(self.indexfile, "a+")
1313 isize = r * self._io.size
1313 isize = r * self._io.size
1314 if self._inline:
1314 if self._inline:
1315 transaction.add(self.indexfile, end + isize, r)
1315 transaction.add(self.indexfile, end + isize, r)
1316 dfh = None
1316 dfh = None
1317 else:
1317 else:
1318 transaction.add(self.indexfile, isize, r)
1318 transaction.add(self.indexfile, isize, r)
1319 transaction.add(self.datafile, end)
1319 transaction.add(self.datafile, end)
1320 dfh = self.opener(self.datafile, "a")
1320 dfh = self.opener(self.datafile, "a")
1321
1321
1322 try:
1322 try:
1323 # loop through our set of deltas
1323 # loop through our set of deltas
1324 chain = None
1324 chain = None
1325 while True:
1325 while True:
1326 chunkdata = bundle.deltachunk(chain)
1326 chunkdata = bundle.deltachunk(chain)
1327 if not chunkdata:
1327 if not chunkdata:
1328 break
1328 break
1329 node = chunkdata['node']
1329 node = chunkdata['node']
1330 p1 = chunkdata['p1']
1330 p1 = chunkdata['p1']
1331 p2 = chunkdata['p2']
1331 p2 = chunkdata['p2']
1332 cs = chunkdata['cs']
1332 cs = chunkdata['cs']
1333 deltabase = chunkdata['deltabase']
1333 deltabase = chunkdata['deltabase']
1334 delta = chunkdata['delta']
1334 delta = chunkdata['delta']
1335
1335
1336 content.append(node)
1336 content.append(node)
1337
1337
1338 link = linkmapper(cs)
1338 link = linkmapper(cs)
1339 if node in self.nodemap:
1339 if node in self.nodemap:
1340 # this can happen if two branches make the same change
1340 # this can happen if two branches make the same change
1341 chain = node
1341 chain = node
1342 continue
1342 continue
1343
1343
1344 for p in (p1, p2):
1344 for p in (p1, p2):
1345 if p not in self.nodemap:
1345 if p not in self.nodemap:
1346 raise LookupError(p, self.indexfile,
1346 raise LookupError(p, self.indexfile,
1347 _('unknown parent'))
1347 _('unknown parent'))
1348
1348
1349 if deltabase not in self.nodemap:
1349 if deltabase not in self.nodemap:
1350 raise LookupError(deltabase, self.indexfile,
1350 raise LookupError(deltabase, self.indexfile,
1351 _('unknown delta base'))
1351 _('unknown delta base'))
1352
1352
1353 baserev = self.rev(deltabase)
1353 baserev = self.rev(deltabase)
1354 chain = self._addrevision(node, None, transaction, link,
1354 chain = self._addrevision(node, None, transaction, link,
1355 p1, p2, (baserev, delta), ifh, dfh)
1355 p1, p2, (baserev, delta), ifh, dfh)
1356 if not dfh and not self._inline:
1356 if not dfh and not self._inline:
1357 # addrevision switched from inline to conventional
1357 # addrevision switched from inline to conventional
1358 # reopen the index
1358 # reopen the index
1359 ifh.close()
1359 ifh.close()
1360 dfh = self.opener(self.datafile, "a")
1360 dfh = self.opener(self.datafile, "a")
1361 ifh = self.opener(self.indexfile, "a")
1361 ifh = self.opener(self.indexfile, "a")
1362 finally:
1362 finally:
1363 if dfh:
1363 if dfh:
1364 dfh.close()
1364 dfh.close()
1365 ifh.close()
1365 ifh.close()
1366
1366
1367 return content
1367 return content
1368
1368
1369 def getstrippoint(self, minlink):
1369 def getstrippoint(self, minlink):
1370 """find the minimum rev that must be stripped to strip the linkrev
1370 """find the minimum rev that must be stripped to strip the linkrev
1371
1371
1372 Returns a tuple containing the minimum rev and a set of all revs that
1372 Returns a tuple containing the minimum rev and a set of all revs that
1373 have linkrevs that will be broken by this strip.
1373 have linkrevs that will be broken by this strip.
1374 """
1374 """
1375 brokenrevs = set()
1375 brokenrevs = set()
1376 strippoint = len(self)
1376 strippoint = len(self)
1377
1377
1378 heads = {}
1378 heads = {}
1379 futurelargelinkrevs = set()
1379 futurelargelinkrevs = set()
1380 for head in self.headrevs():
1380 for head in self.headrevs():
1381 headlinkrev = self.linkrev(head)
1381 headlinkrev = self.linkrev(head)
1382 heads[head] = headlinkrev
1382 heads[head] = headlinkrev
1383 if headlinkrev >= minlink:
1383 if headlinkrev >= minlink:
1384 futurelargelinkrevs.add(headlinkrev)
1384 futurelargelinkrevs.add(headlinkrev)
1385
1385
1386 # This algorithm involves walking down the rev graph, starting at the
1386 # This algorithm involves walking down the rev graph, starting at the
1387 # heads. Since the revs are topologically sorted according to linkrev,
1387 # heads. Since the revs are topologically sorted according to linkrev,
1388 # once all head linkrevs are below the minlink, we know there are
1388 # once all head linkrevs are below the minlink, we know there are
1389 # no more revs that could have a linkrev greater than minlink.
1389 # no more revs that could have a linkrev greater than minlink.
1390 # So we can stop walking.
1390 # So we can stop walking.
1391 while futurelargelinkrevs:
1391 while futurelargelinkrevs:
1392 strippoint -= 1
1392 strippoint -= 1
1393 linkrev = heads.pop(strippoint)
1393 linkrev = heads.pop(strippoint)
1394
1394
1395 if linkrev < minlink:
1395 if linkrev < minlink:
1396 brokenrevs.add(strippoint)
1396 brokenrevs.add(strippoint)
1397 else:
1397 else:
1398 futurelargelinkrevs.remove(linkrev)
1398 futurelargelinkrevs.remove(linkrev)
1399
1399
1400 for p in self.parentrevs(strippoint):
1400 for p in self.parentrevs(strippoint):
1401 if p != nullrev:
1401 if p != nullrev:
1402 plinkrev = self.linkrev(p)
1402 plinkrev = self.linkrev(p)
1403 heads[p] = plinkrev
1403 heads[p] = plinkrev
1404 if plinkrev >= minlink:
1404 if plinkrev >= minlink:
1405 futurelargelinkrevs.add(plinkrev)
1405 futurelargelinkrevs.add(plinkrev)
1406
1406
1407 return strippoint, brokenrevs
1407 return strippoint, brokenrevs
1408
1408
1409 def strip(self, minlink, transaction):
1409 def strip(self, minlink, transaction):
1410 """truncate the revlog on the first revision with a linkrev >= minlink
1410 """truncate the revlog on the first revision with a linkrev >= minlink
1411
1411
1412 This function is called when we're stripping revision minlink and
1412 This function is called when we're stripping revision minlink and
1413 its descendants from the repository.
1413 its descendants from the repository.
1414
1414
1415 We have to remove all revisions with linkrev >= minlink, because
1415 We have to remove all revisions with linkrev >= minlink, because
1416 the equivalent changelog revisions will be renumbered after the
1416 the equivalent changelog revisions will be renumbered after the
1417 strip.
1417 strip.
1418
1418
1419 So we truncate the revlog on the first of these revisions, and
1419 So we truncate the revlog on the first of these revisions, and
1420 trust that the caller has saved the revisions that shouldn't be
1420 trust that the caller has saved the revisions that shouldn't be
1421 removed and that it'll re-add them after this truncation.
1421 removed and that it'll re-add them after this truncation.
1422 """
1422 """
1423 if len(self) == 0:
1423 if len(self) == 0:
1424 return
1424 return
1425
1425
1426 rev, _ = self.getstrippoint(minlink)
1426 rev, _ = self.getstrippoint(minlink)
1427 if rev == len(self):
1427 if rev == len(self):
1428 return
1428 return
1429
1429
1430 # first truncate the files on disk
1430 # first truncate the files on disk
1431 end = self.start(rev)
1431 end = self.start(rev)
1432 if not self._inline:
1432 if not self._inline:
1433 transaction.add(self.datafile, end)
1433 transaction.add(self.datafile, end)
1434 end = rev * self._io.size
1434 end = rev * self._io.size
1435 else:
1435 else:
1436 end += rev * self._io.size
1436 end += rev * self._io.size
1437
1437
1438 transaction.add(self.indexfile, end)
1438 transaction.add(self.indexfile, end)
1439
1439
1440 # then reset internal state in memory to forget those revisions
1440 # then reset internal state in memory to forget those revisions
1441 self._cache = None
1441 self._cache = None
1442 self._chunkclear()
1442 self._chunkclear()
1443 for x in xrange(rev, len(self)):
1443 for x in xrange(rev, len(self)):
1444 del self.nodemap[self.node(x)]
1444 del self.nodemap[self.node(x)]
1445
1445
1446 del self.index[rev:-1]
1446 del self.index[rev:-1]
1447
1447
1448 def checksize(self):
1448 def checksize(self):
1449 expected = 0
1449 expected = 0
1450 if len(self):
1450 if len(self):
1451 expected = max(0, self.end(len(self) - 1))
1451 expected = max(0, self.end(len(self) - 1))
1452
1452
1453 try:
1453 try:
1454 f = self.opener(self.datafile)
1454 f = self.opener(self.datafile)
1455 f.seek(0, 2)
1455 f.seek(0, 2)
1456 actual = f.tell()
1456 actual = f.tell()
1457 f.close()
1457 f.close()
1458 dd = actual - expected
1458 dd = actual - expected
1459 except IOError, inst:
1459 except IOError, inst:
1460 if inst.errno != errno.ENOENT:
1460 if inst.errno != errno.ENOENT:
1461 raise
1461 raise
1462 dd = 0
1462 dd = 0
1463
1463
1464 try:
1464 try:
1465 f = self.opener(self.indexfile)
1465 f = self.opener(self.indexfile)
1466 f.seek(0, 2)
1466 f.seek(0, 2)
1467 actual = f.tell()
1467 actual = f.tell()
1468 f.close()
1468 f.close()
1469 s = self._io.size
1469 s = self._io.size
1470 i = max(0, actual // s)
1470 i = max(0, actual // s)
1471 di = actual - (i * s)
1471 di = actual - (i * s)
1472 if self._inline:
1472 if self._inline:
1473 databytes = 0
1473 databytes = 0
1474 for r in self:
1474 for r in self:
1475 databytes += max(0, self.length(r))
1475 databytes += max(0, self.length(r))
1476 dd = 0
1476 dd = 0
1477 di = actual - len(self) * s - databytes
1477 di = actual - len(self) * s - databytes
1478 except IOError, inst:
1478 except IOError, inst:
1479 if inst.errno != errno.ENOENT:
1479 if inst.errno != errno.ENOENT:
1480 raise
1480 raise
1481 di = 0
1481 di = 0
1482
1482
1483 return (dd, di)
1483 return (dd, di)
1484
1484
1485 def files(self):
1485 def files(self):
1486 res = [self.indexfile]
1486 res = [self.indexfile]
1487 if not self._inline:
1487 if not self._inline:
1488 res.append(self.datafile)
1488 res.append(self.datafile)
1489 return res
1489 return res
@@ -1,216 +1,216 b''
1
1
2 $ mkdir part1
2 $ mkdir part1
3 $ cd part1
3 $ cd part1
4
4
5 $ hg init
5 $ hg init
6 $ echo a > a
6 $ echo a > a
7 $ hg add a
7 $ hg add a
8 $ hg commit -m "1"
8 $ hg commit -m "1"
9 $ hg status
9 $ hg status
10 $ hg copy a b
10 $ hg copy a b
11 $ hg --config ui.portablefilenames=abort copy a con.xml
11 $ hg --config ui.portablefilenames=abort copy a con.xml
12 abort: filename contains 'con', which is reserved on Windows: 'con.xml'
12 abort: filename contains 'con', which is reserved on Windows: 'con.xml'
13 [255]
13 [255]
14 $ hg status
14 $ hg status
15 A b
15 A b
16 $ hg sum
16 $ hg sum
17 parent: 0:c19d34741b0a tip
17 parent: 0:c19d34741b0a tip
18 1
18 1
19 branch: default
19 branch: default
20 commit: 1 copied
20 commit: 1 copied
21 update: (current)
21 update: (current)
22 $ hg --debug commit -m "2"
22 $ hg --debug commit -m "2"
23 b
23 b
24 b: copy a:b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
24 b: copy a:b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
25 committed changeset 1:93580a2c28a50a56f63526fb305067e6fbf739c4
25 committed changeset 1:93580a2c28a50a56f63526fb305067e6fbf739c4
26
26
27 we should see two history entries
27 we should see two history entries
28
28
29 $ hg history -v
29 $ hg history -v
30 changeset: 1:93580a2c28a5
30 changeset: 1:93580a2c28a5
31 tag: tip
31 tag: tip
32 user: test
32 user: test
33 date: Thu Jan 01 00:00:00 1970 +0000
33 date: Thu Jan 01 00:00:00 1970 +0000
34 files: b
34 files: b
35 description:
35 description:
36 2
36 2
37
37
38
38
39 changeset: 0:c19d34741b0a
39 changeset: 0:c19d34741b0a
40 user: test
40 user: test
41 date: Thu Jan 01 00:00:00 1970 +0000
41 date: Thu Jan 01 00:00:00 1970 +0000
42 files: a
42 files: a
43 description:
43 description:
44 1
44 1
45
45
46
46
47
47
48 we should see one log entry for a
48 we should see one log entry for a
49
49
50 $ hg log a
50 $ hg log a
51 changeset: 0:c19d34741b0a
51 changeset: 0:c19d34741b0a
52 user: test
52 user: test
53 date: Thu Jan 01 00:00:00 1970 +0000
53 date: Thu Jan 01 00:00:00 1970 +0000
54 summary: 1
54 summary: 1
55
55
56
56
57 this should show a revision linked to changeset 0
57 this should show a revision linked to changeset 0
58
58
59 $ hg debugindex a
59 $ hg debugindex a
60 rev offset length ..... linkrev nodeid p1 p2 (re)
60 rev offset length ..... linkrev nodeid p1 p2 (re)
61 0 0 3 ..... 0 b789fdd96dc2 000000000000 000000000000 (re)
61 0 0 3 ..... 0 b789fdd96dc2 000000000000 000000000000 (re)
62
62
63 we should see one log entry for b
63 we should see one log entry for b
64
64
65 $ hg log b
65 $ hg log b
66 changeset: 1:93580a2c28a5
66 changeset: 1:93580a2c28a5
67 tag: tip
67 tag: tip
68 user: test
68 user: test
69 date: Thu Jan 01 00:00:00 1970 +0000
69 date: Thu Jan 01 00:00:00 1970 +0000
70 summary: 2
70 summary: 2
71
71
72
72
73 this should show a revision linked to changeset 1
73 this should show a revision linked to changeset 1
74
74
75 $ hg debugindex b
75 $ hg debugindex b
76 rev offset length ..... linkrev nodeid p1 p2 (re)
76 rev offset length ..... linkrev nodeid p1 p2 (re)
77 0 0 65 ..... 1 37d9b5d994ea 000000000000 000000000000 (re)
77 0 0 65 ..... 1 37d9b5d994ea 000000000000 000000000000 (re)
78
78
79 this should show the rename information in the metadata
79 this should show the rename information in the metadata
80
80
81 $ hg debugdata b 0 | head -3 | tail -2
81 $ hg debugdata b 0 | head -3 | tail -2
82 copy: a
82 copy: a
83 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
83 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
84
84
85 $ "$TESTDIR/md5sum.py" .hg/store/data/b.i
85 $ "$TESTDIR/md5sum.py" .hg/store/data/b.i
86 4999f120a3b88713bbefddd195cf5133 .hg/store/data/b.i
86 4999f120a3b88713bbefddd195cf5133 .hg/store/data/b.i
87 $ hg cat b > bsum
87 $ hg cat b > bsum
88 $ "$TESTDIR/md5sum.py" bsum
88 $ "$TESTDIR/md5sum.py" bsum
89 60b725f10c9c85c70d97880dfe8191b3 bsum
89 60b725f10c9c85c70d97880dfe8191b3 bsum
90 $ hg cat a > asum
90 $ hg cat a > asum
91 $ "$TESTDIR/md5sum.py" asum
91 $ "$TESTDIR/md5sum.py" asum
92 60b725f10c9c85c70d97880dfe8191b3 asum
92 60b725f10c9c85c70d97880dfe8191b3 asum
93 $ hg verify
93 $ hg verify
94 checking changesets
94 checking changesets
95 checking manifests
95 checking manifests
96 crosschecking files in changesets and manifests
96 crosschecking files in changesets and manifests
97 checking files
97 checking files
98 2 files, 2 changesets, 2 total revisions
98 2 files, 2 changesets, 2 total revisions
99
99
100 $ cd ..
100 $ cd ..
101
101
102
102
103 $ mkdir part2
103 $ mkdir part2
104 $ cd part2
104 $ cd part2
105
105
106 $ hg init
106 $ hg init
107 $ echo foo > foo
107 $ echo foo > foo
108 should fail - foo is not managed
108 should fail - foo is not managed
109 $ hg mv foo bar
109 $ hg mv foo bar
110 foo: not copying - file is not managed
110 foo: not copying - file is not managed
111 abort: no files to copy
111 abort: no files to copy
112 [255]
112 [255]
113 $ hg st -A
113 $ hg st -A
114 ? foo
114 ? foo
115 $ hg add foo
115 $ hg add foo
116 dry-run; print a warning that this is not a real copy; foo is added
116 dry-run; print a warning that this is not a real copy; foo is added
117 $ hg mv --dry-run foo bar
117 $ hg mv --dry-run foo bar
118 foo has not been committed yet, so no copy data will be stored for bar.
118 foo has not been committed yet, so no copy data will be stored for bar.
119 $ hg st -A
119 $ hg st -A
120 A foo
120 A foo
121 should print a warning that this is not a real copy; bar is added
121 should print a warning that this is not a real copy; bar is added
122 $ hg mv foo bar
122 $ hg mv foo bar
123 foo has not been committed yet, so no copy data will be stored for bar.
123 foo has not been committed yet, so no copy data will be stored for bar.
124 $ hg st -A
124 $ hg st -A
125 A bar
125 A bar
126 should print a warning that this is not a real copy; foo is added
126 should print a warning that this is not a real copy; foo is added
127 $ hg cp bar foo
127 $ hg cp bar foo
128 bar has not been committed yet, so no copy data will be stored for foo.
128 bar has not been committed yet, so no copy data will be stored for foo.
129 $ hg rm -f bar
129 $ hg rm -f bar
130 $ rm bar
130 $ rm bar
131 $ hg st -A
131 $ hg st -A
132 A foo
132 A foo
133 $ hg commit -m1
133 $ hg commit -m1
134
134
135 moving a missing file
135 moving a missing file
136 $ rm foo
136 $ rm foo
137 $ hg mv foo foo3
137 $ hg mv foo foo3
138 foo: deleted in working copy
138 foo: deleted in working copy
139 foo3 does not exist!
139 foo3 does not exist!
140 $ hg up -qC .
140 $ hg up -qC .
141
141
142 copy --after to a nonexistent target filename
142 copy --after to a nonexistent target filename
143 $ hg cp -A foo dummy
143 $ hg cp -A foo dummy
144 foo: not recording copy - dummy does not exist
144 foo: not recording copy - dummy does not exist
145
145
146 dry-run; should show that foo is clean
146 dry-run; should show that foo is clean
147 $ hg copy --dry-run foo bar
147 $ hg copy --dry-run foo bar
148 $ hg st -A
148 $ hg st -A
149 C foo
149 C foo
150 should show copy
150 should show copy
151 $ hg copy foo bar
151 $ hg copy foo bar
152 $ hg st -C
152 $ hg st -C
153 A bar
153 A bar
154 foo
154 foo
155
155
156 shouldn't show copy
156 shouldn't show copy
157 $ hg commit -m2
157 $ hg commit -m2
158 $ hg st -C
158 $ hg st -C
159
159
160 should match
160 should match
161 $ hg debugindex foo
161 $ hg debugindex foo
162 rev offset length ..... linkrev nodeid p1 p2 (re)
162 rev offset length ..... linkrev nodeid p1 p2 (re)
163 0 0 5 ..... 0 2ed2a3912a0b 000000000000 000000000000 (re)
163 0 0 5 ..... 0 2ed2a3912a0b 000000000000 000000000000 (re)
164 $ hg debugrename bar
164 $ hg debugrename bar
165 bar renamed from foo:2ed2a3912a0b24502043eae84ee4b279c18b90dd
165 bar renamed from foo:2ed2a3912a0b24502043eae84ee4b279c18b90dd
166
166
167 $ echo bleah > foo
167 $ echo bleah > foo
168 $ echo quux > bar
168 $ echo quux > bar
169 $ hg commit -m3
169 $ hg commit -m3
170
170
171 should not be renamed
171 should not be renamed
172 $ hg debugrename bar
172 $ hg debugrename bar
173 bar not renamed
173 bar not renamed
174
174
175 $ hg copy -f foo bar
175 $ hg copy -f foo bar
176 should show copy
176 should show copy
177 $ hg st -C
177 $ hg st -C
178 M bar
178 M bar
179 foo
179 foo
180 $ hg commit -m3
180 $ hg commit -m3
181
181
182 should show no parents for tip
182 should show no parents for tip
183 $ hg debugindex bar
183 $ hg debugindex bar
184 rev offset length ..... linkrev nodeid p1 p2 (re)
184 rev offset length ..... linkrev nodeid p1 p2 (re)
185 0 0 69 ..... 1 7711d36246cc 000000000000 000000000000 (re)
185 0 0 69 ..... 1 7711d36246cc 000000000000 000000000000 (re)
186 1 69 6 ..... 2 bdf70a2b8d03 7711d36246cc 000000000000 (re)
186 1 69 6 ..... 2 bdf70a2b8d03 7711d36246cc 000000000000 (re)
187 2 75 81 ..... 3 b2558327ea8d 000000000000 000000000000 (re)
187 2 75 71 ..... 3 b2558327ea8d 000000000000 000000000000 (re)
188 should match
188 should match
189 $ hg debugindex foo
189 $ hg debugindex foo
190 rev offset length ..... linkrev nodeid p1 p2 (re)
190 rev offset length ..... linkrev nodeid p1 p2 (re)
191 0 0 5 ..... 0 2ed2a3912a0b 000000000000 000000000000 (re)
191 0 0 5 ..... 0 2ed2a3912a0b 000000000000 000000000000 (re)
192 1 5 7 ..... 2 dd12c926cf16 2ed2a3912a0b 000000000000 (re)
192 1 5 7 ..... 2 dd12c926cf16 2ed2a3912a0b 000000000000 (re)
193 $ hg debugrename bar
193 $ hg debugrename bar
194 bar renamed from foo:dd12c926cf165e3eb4cf87b084955cb617221c17
194 bar renamed from foo:dd12c926cf165e3eb4cf87b084955cb617221c17
195
195
196 should show no copies
196 should show no copies
197 $ hg st -C
197 $ hg st -C
198
198
199 copy --after on an added file
199 copy --after on an added file
200 $ cp bar baz
200 $ cp bar baz
201 $ hg add baz
201 $ hg add baz
202 $ hg cp -A bar baz
202 $ hg cp -A bar baz
203 $ hg st -C
203 $ hg st -C
204 A baz
204 A baz
205 bar
205 bar
206
206
207 foo was clean:
207 foo was clean:
208 $ hg st -AC foo
208 $ hg st -AC foo
209 C foo
209 C foo
210 but it's considered modified after a copy --after --force
210 but it's considered modified after a copy --after --force
211 $ hg copy -Af bar foo
211 $ hg copy -Af bar foo
212 $ hg st -AC foo
212 $ hg st -AC foo
213 M foo
213 M foo
214 bar
214 bar
215
215
216 $ cd ..
216 $ cd ..
@@ -1,755 +1,755 b''
1 $ cat >> $HGRCPATH << EOF
1 $ cat >> $HGRCPATH << EOF
2 > [phases]
2 > [phases]
3 > # public changeset are not obsolete
3 > # public changeset are not obsolete
4 > publish=false
4 > publish=false
5 > [ui]
5 > [ui]
6 > logtemplate="{rev}:{node|short} ({phase}) [{tags} {bookmarks}] {desc|firstline}\n"
6 > logtemplate="{rev}:{node|short} ({phase}) [{tags} {bookmarks}] {desc|firstline}\n"
7 > EOF
7 > EOF
8 $ mkcommit() {
8 $ mkcommit() {
9 > echo "$1" > "$1"
9 > echo "$1" > "$1"
10 > hg add "$1"
10 > hg add "$1"
11 > hg ci -m "add $1"
11 > hg ci -m "add $1"
12 > }
12 > }
13 $ getid() {
13 $ getid() {
14 > hg id --debug --hidden -ir "desc('$1')"
14 > hg id --debug --hidden -ir "desc('$1')"
15 > }
15 > }
16
16
17 $ cat > debugkeys.py <<EOF
17 $ cat > debugkeys.py <<EOF
18 > def reposetup(ui, repo):
18 > def reposetup(ui, repo):
19 > class debugkeysrepo(repo.__class__):
19 > class debugkeysrepo(repo.__class__):
20 > def listkeys(self, namespace):
20 > def listkeys(self, namespace):
21 > ui.write('listkeys %s\n' % (namespace,))
21 > ui.write('listkeys %s\n' % (namespace,))
22 > return super(debugkeysrepo, self).listkeys(namespace)
22 > return super(debugkeysrepo, self).listkeys(namespace)
23 >
23 >
24 > if repo.local():
24 > if repo.local():
25 > repo.__class__ = debugkeysrepo
25 > repo.__class__ = debugkeysrepo
26 > EOF
26 > EOF
27
27
28 $ hg init tmpa
28 $ hg init tmpa
29 $ cd tmpa
29 $ cd tmpa
30 $ mkcommit kill_me
30 $ mkcommit kill_me
31
31
32 Checking that the feature is properly disabled
32 Checking that the feature is properly disabled
33
33
34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
35 abort: creating obsolete markers is not enabled on this repo
35 abort: creating obsolete markers is not enabled on this repo
36 [255]
36 [255]
37
37
38 Enabling it
38 Enabling it
39
39
40 $ cat >> $HGRCPATH << EOF
40 $ cat >> $HGRCPATH << EOF
41 > [experimental]
41 > [experimental]
42 > evolution=createmarkers,exchange
42 > evolution=createmarkers,exchange
43 > EOF
43 > EOF
44
44
45 Killing a single changeset without replacement
45 Killing a single changeset without replacement
46
46
47 $ hg debugobsolete 0
47 $ hg debugobsolete 0
48 abort: changeset references must be full hexadecimal node identifiers
48 abort: changeset references must be full hexadecimal node identifiers
49 [255]
49 [255]
50 $ hg debugobsolete '00'
50 $ hg debugobsolete '00'
51 abort: changeset references must be full hexadecimal node identifiers
51 abort: changeset references must be full hexadecimal node identifiers
52 [255]
52 [255]
53 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
53 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
54 $ hg debugobsolete
54 $ hg debugobsolete
55 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'babar'}
55 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'babar'}
56
56
57 (test that mercurial is not confused)
57 (test that mercurial is not confused)
58
58
59 $ hg up null --quiet # having 0 as parent prevents it to be hidden
59 $ hg up null --quiet # having 0 as parent prevents it to be hidden
60 $ hg tip
60 $ hg tip
61 -1:000000000000 (public) [tip ]
61 -1:000000000000 (public) [tip ]
62 $ hg up --hidden tip --quiet
62 $ hg up --hidden tip --quiet
63
63
64 Killing a single changeset with itself should fail
64 Killing a single changeset with itself should fail
65 (simple local safeguard)
65 (simple local safeguard)
66
66
67 $ hg debugobsolete `getid kill_me` `getid kill_me`
67 $ hg debugobsolete `getid kill_me` `getid kill_me`
68 abort: bad obsmarker input: in-marker cycle with 97b7c2d76b1845ed3eb988cd612611e72406cef0
68 abort: bad obsmarker input: in-marker cycle with 97b7c2d76b1845ed3eb988cd612611e72406cef0
69 [255]
69 [255]
70
70
71 $ cd ..
71 $ cd ..
72
72
73 Killing a single changeset with replacement
73 Killing a single changeset with replacement
74 (and testing the format option)
74 (and testing the format option)
75
75
76 $ hg init tmpb
76 $ hg init tmpb
77 $ cd tmpb
77 $ cd tmpb
78 $ mkcommit a
78 $ mkcommit a
79 $ mkcommit b
79 $ mkcommit b
80 $ mkcommit original_c
80 $ mkcommit original_c
81 $ hg up "desc('b')"
81 $ hg up "desc('b')"
82 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
82 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
83 $ mkcommit new_c
83 $ mkcommit new_c
84 created new head
84 created new head
85 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
85 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
86 $ hg debugobsolete --config format.obsstore-version=0 --flag 12 `getid original_c` `getid new_c` -d '121 120'
86 $ hg debugobsolete --config format.obsstore-version=0 --flag 12 `getid original_c` `getid new_c` -d '121 120'
87 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
87 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
88 2:245bde4270cd add original_c
88 2:245bde4270cd add original_c
89 $ hg debugrevlog -cd
89 $ hg debugrevlog -cd
90 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
90 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
91 0 -1 -1 0 59 0 0 0 0 58 58 0 1 0
91 0 -1 -1 0 59 0 0 0 0 58 58 0 1 0
92 1 0 -1 59 118 59 59 0 0 58 116 0 1 0
92 1 0 -1 59 118 59 59 0 0 58 116 0 1 0
93 2 1 -1 118 204 59 59 59 0 76 192 0 1 1
93 2 1 -1 118 193 118 118 59 0 76 192 0 1 0
94 3 1 -1 204 271 204 204 59 0 66 258 0 2 0
94 3 1 -1 193 260 193 193 59 0 66 258 0 2 0
95 $ hg debugobsolete
95 $ hg debugobsolete
96 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
96 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
97
97
98 (check for version number of the obsstore)
98 (check for version number of the obsstore)
99
99
100 $ dd bs=1 count=1 if=.hg/store/obsstore 2>/dev/null
100 $ dd bs=1 count=1 if=.hg/store/obsstore 2>/dev/null
101 \x00 (no-eol) (esc)
101 \x00 (no-eol) (esc)
102
102
103 do it again (it read the obsstore before adding new changeset)
103 do it again (it read the obsstore before adding new changeset)
104
104
105 $ hg up '.^'
105 $ hg up '.^'
106 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
106 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
107 $ mkcommit new_2_c
107 $ mkcommit new_2_c
108 created new head
108 created new head
109 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
109 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
110 $ hg debugobsolete
110 $ hg debugobsolete
111 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
111 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
112 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
112 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
113
113
114 Register two markers with a missing node
114 Register two markers with a missing node
115
115
116 $ hg up '.^'
116 $ hg up '.^'
117 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
117 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
118 $ mkcommit new_3_c
118 $ mkcommit new_3_c
119 created new head
119 created new head
120 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
120 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
121 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
121 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
122 $ hg debugobsolete
122 $ hg debugobsolete
123 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
123 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
124 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
124 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
125 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
125 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
126 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
126 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
127
127
128 Refuse pathological nullid successors
128 Refuse pathological nullid successors
129 $ hg debugobsolete -d '9001 0' 1337133713371337133713371337133713371337 0000000000000000000000000000000000000000
129 $ hg debugobsolete -d '9001 0' 1337133713371337133713371337133713371337 0000000000000000000000000000000000000000
130 transaction abort!
130 transaction abort!
131 rollback completed
131 rollback completed
132 abort: bad obsolescence marker detected: invalid successors nullid
132 abort: bad obsolescence marker detected: invalid successors nullid
133 [255]
133 [255]
134
134
135 Check that graphlog detect that a changeset is obsolete:
135 Check that graphlog detect that a changeset is obsolete:
136
136
137 $ hg log -G
137 $ hg log -G
138 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
138 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
139 |
139 |
140 o 1:7c3bad9141dc (draft) [ ] add b
140 o 1:7c3bad9141dc (draft) [ ] add b
141 |
141 |
142 o 0:1f0dee641bb7 (draft) [ ] add a
142 o 0:1f0dee641bb7 (draft) [ ] add a
143
143
144
144
145 check that heads does not report them
145 check that heads does not report them
146
146
147 $ hg heads
147 $ hg heads
148 5:5601fb93a350 (draft) [tip ] add new_3_c
148 5:5601fb93a350 (draft) [tip ] add new_3_c
149 $ hg heads --hidden
149 $ hg heads --hidden
150 5:5601fb93a350 (draft) [tip ] add new_3_c
150 5:5601fb93a350 (draft) [tip ] add new_3_c
151 4:ca819180edb9 (draft) [ ] add new_2_c
151 4:ca819180edb9 (draft) [ ] add new_2_c
152 3:cdbce2fbb163 (draft) [ ] add new_c
152 3:cdbce2fbb163 (draft) [ ] add new_c
153 2:245bde4270cd (draft) [ ] add original_c
153 2:245bde4270cd (draft) [ ] add original_c
154
154
155
155
156 check that summary does not report them
156 check that summary does not report them
157
157
158 $ hg init ../sink
158 $ hg init ../sink
159 $ echo '[paths]' >> .hg/hgrc
159 $ echo '[paths]' >> .hg/hgrc
160 $ echo 'default=../sink' >> .hg/hgrc
160 $ echo 'default=../sink' >> .hg/hgrc
161 $ hg summary --remote
161 $ hg summary --remote
162 parent: 5:5601fb93a350 tip
162 parent: 5:5601fb93a350 tip
163 add new_3_c
163 add new_3_c
164 branch: default
164 branch: default
165 commit: (clean)
165 commit: (clean)
166 update: (current)
166 update: (current)
167 remote: 3 outgoing
167 remote: 3 outgoing
168
168
169 $ hg summary --remote --hidden
169 $ hg summary --remote --hidden
170 parent: 5:5601fb93a350 tip
170 parent: 5:5601fb93a350 tip
171 add new_3_c
171 add new_3_c
172 branch: default
172 branch: default
173 commit: (clean)
173 commit: (clean)
174 update: 3 new changesets, 4 branch heads (merge)
174 update: 3 new changesets, 4 branch heads (merge)
175 remote: 3 outgoing
175 remote: 3 outgoing
176
176
177 check that various commands work well with filtering
177 check that various commands work well with filtering
178
178
179 $ hg tip
179 $ hg tip
180 5:5601fb93a350 (draft) [tip ] add new_3_c
180 5:5601fb93a350 (draft) [tip ] add new_3_c
181 $ hg log -r 6
181 $ hg log -r 6
182 abort: unknown revision '6'!
182 abort: unknown revision '6'!
183 [255]
183 [255]
184 $ hg log -r 4
184 $ hg log -r 4
185 abort: hidden revision '4'!
185 abort: hidden revision '4'!
186 (use --hidden to access hidden revisions)
186 (use --hidden to access hidden revisions)
187 [255]
187 [255]
188 $ hg debugrevspec 'rev(6)'
188 $ hg debugrevspec 'rev(6)'
189 $ hg debugrevspec 'rev(4)'
189 $ hg debugrevspec 'rev(4)'
190
190
191 Check that public changeset are not accounted as obsolete:
191 Check that public changeset are not accounted as obsolete:
192
192
193 $ hg --hidden phase --public 2
193 $ hg --hidden phase --public 2
194 $ hg log -G
194 $ hg log -G
195 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
195 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
196 |
196 |
197 | o 2:245bde4270cd (public) [ ] add original_c
197 | o 2:245bde4270cd (public) [ ] add original_c
198 |/
198 |/
199 o 1:7c3bad9141dc (public) [ ] add b
199 o 1:7c3bad9141dc (public) [ ] add b
200 |
200 |
201 o 0:1f0dee641bb7 (public) [ ] add a
201 o 0:1f0dee641bb7 (public) [ ] add a
202
202
203
203
204 And that bumped changeset are detected
204 And that bumped changeset are detected
205 --------------------------------------
205 --------------------------------------
206
206
207 If we didn't filtered obsolete changesets out, 3 and 4 would show up too. Also
207 If we didn't filtered obsolete changesets out, 3 and 4 would show up too. Also
208 note that the bumped changeset (5:5601fb93a350) is not a direct successor of
208 note that the bumped changeset (5:5601fb93a350) is not a direct successor of
209 the public changeset
209 the public changeset
210
210
211 $ hg log --hidden -r 'bumped()'
211 $ hg log --hidden -r 'bumped()'
212 5:5601fb93a350 (draft) [tip ] add new_3_c
212 5:5601fb93a350 (draft) [tip ] add new_3_c
213
213
214 And that we can't push bumped changeset
214 And that we can't push bumped changeset
215
215
216 $ hg push ../tmpa -r 0 --force #(make repo related)
216 $ hg push ../tmpa -r 0 --force #(make repo related)
217 pushing to ../tmpa
217 pushing to ../tmpa
218 searching for changes
218 searching for changes
219 warning: repository is unrelated
219 warning: repository is unrelated
220 adding changesets
220 adding changesets
221 adding manifests
221 adding manifests
222 adding file changes
222 adding file changes
223 added 1 changesets with 1 changes to 1 files (+1 heads)
223 added 1 changesets with 1 changes to 1 files (+1 heads)
224 $ hg push ../tmpa
224 $ hg push ../tmpa
225 pushing to ../tmpa
225 pushing to ../tmpa
226 searching for changes
226 searching for changes
227 abort: push includes bumped changeset: 5601fb93a350!
227 abort: push includes bumped changeset: 5601fb93a350!
228 [255]
228 [255]
229
229
230 Fixing "bumped" situation
230 Fixing "bumped" situation
231 We need to create a clone of 5 and add a special marker with a flag
231 We need to create a clone of 5 and add a special marker with a flag
232
232
233 $ hg up '5^'
233 $ hg up '5^'
234 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
234 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
235 $ hg revert -ar 5
235 $ hg revert -ar 5
236 adding new_3_c
236 adding new_3_c
237 $ hg ci -m 'add n3w_3_c'
237 $ hg ci -m 'add n3w_3_c'
238 created new head
238 created new head
239 $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c`
239 $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c`
240 $ hg log -r 'bumped()'
240 $ hg log -r 'bumped()'
241 $ hg log -G
241 $ hg log -G
242 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
242 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
243 |
243 |
244 | o 2:245bde4270cd (public) [ ] add original_c
244 | o 2:245bde4270cd (public) [ ] add original_c
245 |/
245 |/
246 o 1:7c3bad9141dc (public) [ ] add b
246 o 1:7c3bad9141dc (public) [ ] add b
247 |
247 |
248 o 0:1f0dee641bb7 (public) [ ] add a
248 o 0:1f0dee641bb7 (public) [ ] add a
249
249
250
250
251
251
252
252
253 $ cd ..
253 $ cd ..
254
254
255 Exchange Test
255 Exchange Test
256 ============================
256 ============================
257
257
258 Destination repo does not have any data
258 Destination repo does not have any data
259 ---------------------------------------
259 ---------------------------------------
260
260
261 Simple incoming test
261 Simple incoming test
262
262
263 $ hg init tmpc
263 $ hg init tmpc
264 $ cd tmpc
264 $ cd tmpc
265 $ hg incoming ../tmpb
265 $ hg incoming ../tmpb
266 comparing with ../tmpb
266 comparing with ../tmpb
267 0:1f0dee641bb7 (public) [ ] add a
267 0:1f0dee641bb7 (public) [ ] add a
268 1:7c3bad9141dc (public) [ ] add b
268 1:7c3bad9141dc (public) [ ] add b
269 2:245bde4270cd (public) [ ] add original_c
269 2:245bde4270cd (public) [ ] add original_c
270 6:6f9641995072 (draft) [tip ] add n3w_3_c
270 6:6f9641995072 (draft) [tip ] add n3w_3_c
271
271
272 Try to pull markers
272 Try to pull markers
273 (extinct changeset are excluded but marker are pushed)
273 (extinct changeset are excluded but marker are pushed)
274
274
275 $ hg pull ../tmpb
275 $ hg pull ../tmpb
276 pulling from ../tmpb
276 pulling from ../tmpb
277 requesting all changes
277 requesting all changes
278 adding changesets
278 adding changesets
279 adding manifests
279 adding manifests
280 adding file changes
280 adding file changes
281 added 4 changesets with 4 changes to 4 files (+1 heads)
281 added 4 changesets with 4 changes to 4 files (+1 heads)
282 (run 'hg heads' to see heads, 'hg merge' to merge)
282 (run 'hg heads' to see heads, 'hg merge' to merge)
283 $ hg debugobsolete
283 $ hg debugobsolete
284 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
284 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
285 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
285 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
286 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
286 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
287 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
287 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
288 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
288 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
289
289
290 Rollback//Transaction support
290 Rollback//Transaction support
291
291
292 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
292 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
293 $ hg debugobsolete
293 $ hg debugobsolete
294 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
294 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
295 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
295 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
296 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
296 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
297 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
297 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
298 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
298 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
299 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 (Thu Jan 01 00:22:20 1970 +0000) {'user': 'test'}
299 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 (Thu Jan 01 00:22:20 1970 +0000) {'user': 'test'}
300 $ hg rollback -n
300 $ hg rollback -n
301 repository tip rolled back to revision 3 (undo debugobsolete)
301 repository tip rolled back to revision 3 (undo debugobsolete)
302 $ hg rollback
302 $ hg rollback
303 repository tip rolled back to revision 3 (undo debugobsolete)
303 repository tip rolled back to revision 3 (undo debugobsolete)
304 $ hg debugobsolete
304 $ hg debugobsolete
305 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
305 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
306 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
306 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
307 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
307 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
308 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
308 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
309 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
309 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
310
310
311 $ cd ..
311 $ cd ..
312
312
313 Try to push markers
313 Try to push markers
314
314
315 $ hg init tmpd
315 $ hg init tmpd
316 $ hg -R tmpb push tmpd
316 $ hg -R tmpb push tmpd
317 pushing to tmpd
317 pushing to tmpd
318 searching for changes
318 searching for changes
319 adding changesets
319 adding changesets
320 adding manifests
320 adding manifests
321 adding file changes
321 adding file changes
322 added 4 changesets with 4 changes to 4 files (+1 heads)
322 added 4 changesets with 4 changes to 4 files (+1 heads)
323 $ hg -R tmpd debugobsolete | sort
323 $ hg -R tmpd debugobsolete | sort
324 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
324 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
325 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
325 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
326 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
326 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
327 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
327 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
328 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
328 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
329
329
330 Check obsolete keys are exchanged only if source has an obsolete store
330 Check obsolete keys are exchanged only if source has an obsolete store
331
331
332 $ hg init empty
332 $ hg init empty
333 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
333 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
334 pushing to tmpd
334 pushing to tmpd
335 listkeys phases
335 listkeys phases
336 listkeys bookmarks
336 listkeys bookmarks
337 no changes found
337 no changes found
338 listkeys phases
338 listkeys phases
339 [1]
339 [1]
340
340
341 clone support
341 clone support
342 (markers are copied and extinct changesets are included to allow hardlinks)
342 (markers are copied and extinct changesets are included to allow hardlinks)
343
343
344 $ hg clone tmpb clone-dest
344 $ hg clone tmpb clone-dest
345 updating to branch default
345 updating to branch default
346 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
346 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
347 $ hg -R clone-dest log -G --hidden
347 $ hg -R clone-dest log -G --hidden
348 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
348 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
349 |
349 |
350 | x 5:5601fb93a350 (draft) [ ] add new_3_c
350 | x 5:5601fb93a350 (draft) [ ] add new_3_c
351 |/
351 |/
352 | x 4:ca819180edb9 (draft) [ ] add new_2_c
352 | x 4:ca819180edb9 (draft) [ ] add new_2_c
353 |/
353 |/
354 | x 3:cdbce2fbb163 (draft) [ ] add new_c
354 | x 3:cdbce2fbb163 (draft) [ ] add new_c
355 |/
355 |/
356 | o 2:245bde4270cd (public) [ ] add original_c
356 | o 2:245bde4270cd (public) [ ] add original_c
357 |/
357 |/
358 o 1:7c3bad9141dc (public) [ ] add b
358 o 1:7c3bad9141dc (public) [ ] add b
359 |
359 |
360 o 0:1f0dee641bb7 (public) [ ] add a
360 o 0:1f0dee641bb7 (public) [ ] add a
361
361
362 $ hg -R clone-dest debugobsolete
362 $ hg -R clone-dest debugobsolete
363 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
363 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
364 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
364 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
365 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
365 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
366 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
366 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
367 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
367 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
368
368
369
369
370 Destination repo have existing data
370 Destination repo have existing data
371 ---------------------------------------
371 ---------------------------------------
372
372
373 On pull
373 On pull
374
374
375 $ hg init tmpe
375 $ hg init tmpe
376 $ cd tmpe
376 $ cd tmpe
377 $ hg debugobsolete -d '1339 0' 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00
377 $ hg debugobsolete -d '1339 0' 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00
378 $ hg pull ../tmpb
378 $ hg pull ../tmpb
379 pulling from ../tmpb
379 pulling from ../tmpb
380 requesting all changes
380 requesting all changes
381 adding changesets
381 adding changesets
382 adding manifests
382 adding manifests
383 adding file changes
383 adding file changes
384 added 4 changesets with 4 changes to 4 files (+1 heads)
384 added 4 changesets with 4 changes to 4 files (+1 heads)
385 (run 'hg heads' to see heads, 'hg merge' to merge)
385 (run 'hg heads' to see heads, 'hg merge' to merge)
386 $ hg debugobsolete
386 $ hg debugobsolete
387 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
387 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
388 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
388 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
389 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
389 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
390 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
390 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
391 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
391 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
392 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
392 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
393
393
394
394
395 On push
395 On push
396
396
397 $ hg push ../tmpc
397 $ hg push ../tmpc
398 pushing to ../tmpc
398 pushing to ../tmpc
399 searching for changes
399 searching for changes
400 no changes found
400 no changes found
401 [1]
401 [1]
402 $ hg -R ../tmpc debugobsolete
402 $ hg -R ../tmpc debugobsolete
403 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
403 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
404 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
404 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
405 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
405 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
406 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
406 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
407 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
407 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
408 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
408 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
409
409
410 detect outgoing obsolete and unstable
410 detect outgoing obsolete and unstable
411 ---------------------------------------
411 ---------------------------------------
412
412
413
413
414 $ hg log -G
414 $ hg log -G
415 o 3:6f9641995072 (draft) [tip ] add n3w_3_c
415 o 3:6f9641995072 (draft) [tip ] add n3w_3_c
416 |
416 |
417 | o 2:245bde4270cd (public) [ ] add original_c
417 | o 2:245bde4270cd (public) [ ] add original_c
418 |/
418 |/
419 o 1:7c3bad9141dc (public) [ ] add b
419 o 1:7c3bad9141dc (public) [ ] add b
420 |
420 |
421 o 0:1f0dee641bb7 (public) [ ] add a
421 o 0:1f0dee641bb7 (public) [ ] add a
422
422
423 $ hg up 'desc("n3w_3_c")'
423 $ hg up 'desc("n3w_3_c")'
424 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
424 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
425 $ mkcommit original_d
425 $ mkcommit original_d
426 $ mkcommit original_e
426 $ mkcommit original_e
427 $ hg debugobsolete --record-parents `getid original_d` -d '0 0'
427 $ hg debugobsolete --record-parents `getid original_d` -d '0 0'
428 $ hg debugobsolete | grep `getid original_d`
428 $ hg debugobsolete | grep `getid original_d`
429 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
429 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
430 $ hg log -r 'obsolete()'
430 $ hg log -r 'obsolete()'
431 4:94b33453f93b (draft) [ ] add original_d
431 4:94b33453f93b (draft) [ ] add original_d
432 $ hg log -G -r '::unstable()'
432 $ hg log -G -r '::unstable()'
433 @ 5:cda648ca50f5 (draft) [tip ] add original_e
433 @ 5:cda648ca50f5 (draft) [tip ] add original_e
434 |
434 |
435 x 4:94b33453f93b (draft) [ ] add original_d
435 x 4:94b33453f93b (draft) [ ] add original_d
436 |
436 |
437 o 3:6f9641995072 (draft) [ ] add n3w_3_c
437 o 3:6f9641995072 (draft) [ ] add n3w_3_c
438 |
438 |
439 o 1:7c3bad9141dc (public) [ ] add b
439 o 1:7c3bad9141dc (public) [ ] add b
440 |
440 |
441 o 0:1f0dee641bb7 (public) [ ] add a
441 o 0:1f0dee641bb7 (public) [ ] add a
442
442
443
443
444 refuse to push obsolete changeset
444 refuse to push obsolete changeset
445
445
446 $ hg push ../tmpc/ -r 'desc("original_d")'
446 $ hg push ../tmpc/ -r 'desc("original_d")'
447 pushing to ../tmpc/
447 pushing to ../tmpc/
448 searching for changes
448 searching for changes
449 abort: push includes obsolete changeset: 94b33453f93b!
449 abort: push includes obsolete changeset: 94b33453f93b!
450 [255]
450 [255]
451
451
452 refuse to push unstable changeset
452 refuse to push unstable changeset
453
453
454 $ hg push ../tmpc/
454 $ hg push ../tmpc/
455 pushing to ../tmpc/
455 pushing to ../tmpc/
456 searching for changes
456 searching for changes
457 abort: push includes unstable changeset: cda648ca50f5!
457 abort: push includes unstable changeset: cda648ca50f5!
458 [255]
458 [255]
459
459
460 Test that extinct changeset are properly detected
460 Test that extinct changeset are properly detected
461
461
462 $ hg log -r 'extinct()'
462 $ hg log -r 'extinct()'
463
463
464 Don't try to push extinct changeset
464 Don't try to push extinct changeset
465
465
466 $ hg init ../tmpf
466 $ hg init ../tmpf
467 $ hg out ../tmpf
467 $ hg out ../tmpf
468 comparing with ../tmpf
468 comparing with ../tmpf
469 searching for changes
469 searching for changes
470 0:1f0dee641bb7 (public) [ ] add a
470 0:1f0dee641bb7 (public) [ ] add a
471 1:7c3bad9141dc (public) [ ] add b
471 1:7c3bad9141dc (public) [ ] add b
472 2:245bde4270cd (public) [ ] add original_c
472 2:245bde4270cd (public) [ ] add original_c
473 3:6f9641995072 (draft) [ ] add n3w_3_c
473 3:6f9641995072 (draft) [ ] add n3w_3_c
474 4:94b33453f93b (draft) [ ] add original_d
474 4:94b33453f93b (draft) [ ] add original_d
475 5:cda648ca50f5 (draft) [tip ] add original_e
475 5:cda648ca50f5 (draft) [tip ] add original_e
476 $ hg push ../tmpf -f # -f because be push unstable too
476 $ hg push ../tmpf -f # -f because be push unstable too
477 pushing to ../tmpf
477 pushing to ../tmpf
478 searching for changes
478 searching for changes
479 adding changesets
479 adding changesets
480 adding manifests
480 adding manifests
481 adding file changes
481 adding file changes
482 added 6 changesets with 6 changes to 6 files (+1 heads)
482 added 6 changesets with 6 changes to 6 files (+1 heads)
483
483
484 no warning displayed
484 no warning displayed
485
485
486 $ hg push ../tmpf
486 $ hg push ../tmpf
487 pushing to ../tmpf
487 pushing to ../tmpf
488 searching for changes
488 searching for changes
489 no changes found
489 no changes found
490 [1]
490 [1]
491
491
492 Do not warn about new head when the new head is a successors of a remote one
492 Do not warn about new head when the new head is a successors of a remote one
493
493
494 $ hg log -G
494 $ hg log -G
495 @ 5:cda648ca50f5 (draft) [tip ] add original_e
495 @ 5:cda648ca50f5 (draft) [tip ] add original_e
496 |
496 |
497 x 4:94b33453f93b (draft) [ ] add original_d
497 x 4:94b33453f93b (draft) [ ] add original_d
498 |
498 |
499 o 3:6f9641995072 (draft) [ ] add n3w_3_c
499 o 3:6f9641995072 (draft) [ ] add n3w_3_c
500 |
500 |
501 | o 2:245bde4270cd (public) [ ] add original_c
501 | o 2:245bde4270cd (public) [ ] add original_c
502 |/
502 |/
503 o 1:7c3bad9141dc (public) [ ] add b
503 o 1:7c3bad9141dc (public) [ ] add b
504 |
504 |
505 o 0:1f0dee641bb7 (public) [ ] add a
505 o 0:1f0dee641bb7 (public) [ ] add a
506
506
507 $ hg up -q 'desc(n3w_3_c)'
507 $ hg up -q 'desc(n3w_3_c)'
508 $ mkcommit obsolete_e
508 $ mkcommit obsolete_e
509 created new head
509 created new head
510 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
510 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
511 $ hg outgoing ../tmpf # parasite hg outgoing testin
511 $ hg outgoing ../tmpf # parasite hg outgoing testin
512 comparing with ../tmpf
512 comparing with ../tmpf
513 searching for changes
513 searching for changes
514 6:3de5eca88c00 (draft) [tip ] add obsolete_e
514 6:3de5eca88c00 (draft) [tip ] add obsolete_e
515 $ hg push ../tmpf
515 $ hg push ../tmpf
516 pushing to ../tmpf
516 pushing to ../tmpf
517 searching for changes
517 searching for changes
518 adding changesets
518 adding changesets
519 adding manifests
519 adding manifests
520 adding file changes
520 adding file changes
521 added 1 changesets with 1 changes to 1 files (+1 heads)
521 added 1 changesets with 1 changes to 1 files (+1 heads)
522
522
523 test relevance computation
523 test relevance computation
524 ---------------------------------------
524 ---------------------------------------
525
525
526 Checking simple case of "marker relevance".
526 Checking simple case of "marker relevance".
527
527
528
528
529 Reminder of the repo situation
529 Reminder of the repo situation
530
530
531 $ hg log --hidden --graph
531 $ hg log --hidden --graph
532 @ 6:3de5eca88c00 (draft) [tip ] add obsolete_e
532 @ 6:3de5eca88c00 (draft) [tip ] add obsolete_e
533 |
533 |
534 | x 5:cda648ca50f5 (draft) [ ] add original_e
534 | x 5:cda648ca50f5 (draft) [ ] add original_e
535 | |
535 | |
536 | x 4:94b33453f93b (draft) [ ] add original_d
536 | x 4:94b33453f93b (draft) [ ] add original_d
537 |/
537 |/
538 o 3:6f9641995072 (draft) [ ] add n3w_3_c
538 o 3:6f9641995072 (draft) [ ] add n3w_3_c
539 |
539 |
540 | o 2:245bde4270cd (public) [ ] add original_c
540 | o 2:245bde4270cd (public) [ ] add original_c
541 |/
541 |/
542 o 1:7c3bad9141dc (public) [ ] add b
542 o 1:7c3bad9141dc (public) [ ] add b
543 |
543 |
544 o 0:1f0dee641bb7 (public) [ ] add a
544 o 0:1f0dee641bb7 (public) [ ] add a
545
545
546
546
547 List of all markers
547 List of all markers
548
548
549 $ hg debugobsolete
549 $ hg debugobsolete
550 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
550 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
551 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
551 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
552 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
552 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
553 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
553 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
554 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
554 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
555 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
555 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
556 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
556 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
557 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
557 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
558
558
559 List of changesets with no chain
559 List of changesets with no chain
560
560
561 $ hg debugobsolete --hidden --rev ::2
561 $ hg debugobsolete --hidden --rev ::2
562
562
563 List of changesets that are included on marker chain
563 List of changesets that are included on marker chain
564
564
565 $ hg debugobsolete --hidden --rev 6
565 $ hg debugobsolete --hidden --rev 6
566 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
566 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
567
567
568 List of changesets with a longer chain, (including a pruned children)
568 List of changesets with a longer chain, (including a pruned children)
569
569
570 $ hg debugobsolete --hidden --rev 3
570 $ hg debugobsolete --hidden --rev 3
571 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
571 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
572 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
572 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
573 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
573 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
574 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
574 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
575 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
575 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
576 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
576 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
577 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
577 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
578
578
579 List of both
579 List of both
580
580
581 $ hg debugobsolete --hidden --rev 3::6
581 $ hg debugobsolete --hidden --rev 3::6
582 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
582 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
583 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
583 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
584 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
584 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
585 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
585 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
586 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
586 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
587 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
587 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
588 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
588 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
589 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
589 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
590
590
591 #if serve
591 #if serve
592
592
593 check hgweb does not explode
593 check hgweb does not explode
594 ====================================
594 ====================================
595
595
596 $ hg unbundle $TESTDIR/bundles/hgweb+obs.hg
596 $ hg unbundle $TESTDIR/bundles/hgweb+obs.hg
597 adding changesets
597 adding changesets
598 adding manifests
598 adding manifests
599 adding file changes
599 adding file changes
600 added 62 changesets with 63 changes to 9 files (+60 heads)
600 added 62 changesets with 63 changes to 9 files (+60 heads)
601 (run 'hg heads .' to see heads, 'hg merge' to merge)
601 (run 'hg heads .' to see heads, 'hg merge' to merge)
602 $ for node in `hg log -r 'desc(babar_)' --template '{node}\n'`;
602 $ for node in `hg log -r 'desc(babar_)' --template '{node}\n'`;
603 > do
603 > do
604 > hg debugobsolete $node
604 > hg debugobsolete $node
605 > done
605 > done
606 $ hg up tip
606 $ hg up tip
607 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
607 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
608
608
609 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
609 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
610 $ cat hg.pid >> $DAEMON_PIDS
610 $ cat hg.pid >> $DAEMON_PIDS
611
611
612 check changelog view
612 check changelog view
613
613
614 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'shortlog/'
614 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'shortlog/'
615 200 Script output follows
615 200 Script output follows
616
616
617 check graph view
617 check graph view
618
618
619 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'graph'
619 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'graph'
620 200 Script output follows
620 200 Script output follows
621
621
622 check filelog view
622 check filelog view
623
623
624 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'log/'`hg id --debug --id`/'babar'
624 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'log/'`hg id --debug --id`/'babar'
625 200 Script output follows
625 200 Script output follows
626
626
627 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/68'
627 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/68'
628 200 Script output follows
628 200 Script output follows
629 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
629 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
630 404 Not Found
630 404 Not Found
631 [1]
631 [1]
632
632
633 check that web.view config option:
633 check that web.view config option:
634
634
635 $ "$TESTDIR/killdaemons.py" hg.pid
635 $ "$TESTDIR/killdaemons.py" hg.pid
636 $ cat >> .hg/hgrc << EOF
636 $ cat >> .hg/hgrc << EOF
637 > [web]
637 > [web]
638 > view=all
638 > view=all
639 > EOF
639 > EOF
640 $ wait
640 $ wait
641 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
641 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
642 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
642 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
643 200 Script output follows
643 200 Script output follows
644 $ "$TESTDIR/killdaemons.py" hg.pid
644 $ "$TESTDIR/killdaemons.py" hg.pid
645
645
646 Checking _enable=False warning if obsolete marker exists
646 Checking _enable=False warning if obsolete marker exists
647
647
648 $ echo '[experimental]' >> $HGRCPATH
648 $ echo '[experimental]' >> $HGRCPATH
649 $ echo "evolution=" >> $HGRCPATH
649 $ echo "evolution=" >> $HGRCPATH
650 $ hg log -r tip
650 $ hg log -r tip
651 obsolete feature not enabled but 68 markers found!
651 obsolete feature not enabled but 68 markers found!
652 68:c15e9edfca13 (draft) [tip ] add celestine
652 68:c15e9edfca13 (draft) [tip ] add celestine
653
653
654 reenable for later test
654 reenable for later test
655
655
656 $ echo '[experimental]' >> $HGRCPATH
656 $ echo '[experimental]' >> $HGRCPATH
657 $ echo "evolution=createmarkers,exchange" >> $HGRCPATH
657 $ echo "evolution=createmarkers,exchange" >> $HGRCPATH
658
658
659 #endif
659 #endif
660
660
661 Test incoming/outcoming with changesets obsoleted remotely, known locally
661 Test incoming/outcoming with changesets obsoleted remotely, known locally
662 ===============================================================================
662 ===============================================================================
663
663
664 This test issue 3805
664 This test issue 3805
665
665
666 $ hg init repo-issue3805
666 $ hg init repo-issue3805
667 $ cd repo-issue3805
667 $ cd repo-issue3805
668 $ echo "foo" > foo
668 $ echo "foo" > foo
669 $ hg ci -Am "A"
669 $ hg ci -Am "A"
670 adding foo
670 adding foo
671 $ hg clone . ../other-issue3805
671 $ hg clone . ../other-issue3805
672 updating to branch default
672 updating to branch default
673 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
673 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
674 $ echo "bar" >> foo
674 $ echo "bar" >> foo
675 $ hg ci --amend
675 $ hg ci --amend
676 $ cd ../other-issue3805
676 $ cd ../other-issue3805
677 $ hg log -G
677 $ hg log -G
678 @ 0:193e9254ce7e (draft) [tip ] A
678 @ 0:193e9254ce7e (draft) [tip ] A
679
679
680 $ hg log -G -R ../repo-issue3805
680 $ hg log -G -R ../repo-issue3805
681 @ 2:3816541e5485 (draft) [tip ] A
681 @ 2:3816541e5485 (draft) [tip ] A
682
682
683 $ hg incoming
683 $ hg incoming
684 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
684 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
685 searching for changes
685 searching for changes
686 2:3816541e5485 (draft) [tip ] A
686 2:3816541e5485 (draft) [tip ] A
687 $ hg incoming --bundle ../issue3805.hg
687 $ hg incoming --bundle ../issue3805.hg
688 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
688 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
689 searching for changes
689 searching for changes
690 2:3816541e5485 (draft) [tip ] A
690 2:3816541e5485 (draft) [tip ] A
691 $ hg outgoing
691 $ hg outgoing
692 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
692 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
693 searching for changes
693 searching for changes
694 no changes found
694 no changes found
695 [1]
695 [1]
696
696
697 #if serve
697 #if serve
698
698
699 $ hg serve -R ../repo-issue3805 -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
699 $ hg serve -R ../repo-issue3805 -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
700 $ cat hg.pid >> $DAEMON_PIDS
700 $ cat hg.pid >> $DAEMON_PIDS
701
701
702 $ hg incoming http://localhost:$HGPORT
702 $ hg incoming http://localhost:$HGPORT
703 comparing with http://localhost:$HGPORT/
703 comparing with http://localhost:$HGPORT/
704 searching for changes
704 searching for changes
705 1:3816541e5485 (public) [tip ] A
705 1:3816541e5485 (public) [tip ] A
706 $ hg outgoing http://localhost:$HGPORT
706 $ hg outgoing http://localhost:$HGPORT
707 comparing with http://localhost:$HGPORT/
707 comparing with http://localhost:$HGPORT/
708 searching for changes
708 searching for changes
709 no changes found
709 no changes found
710 [1]
710 [1]
711
711
712 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
712 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
713
713
714 #endif
714 #endif
715
715
716 This test issue 3814
716 This test issue 3814
717
717
718 (nothing to push but locally hidden changeset)
718 (nothing to push but locally hidden changeset)
719
719
720 $ cd ..
720 $ cd ..
721 $ hg init repo-issue3814
721 $ hg init repo-issue3814
722 $ cd repo-issue3805
722 $ cd repo-issue3805
723 $ hg push -r 3816541e5485 ../repo-issue3814
723 $ hg push -r 3816541e5485 ../repo-issue3814
724 pushing to ../repo-issue3814
724 pushing to ../repo-issue3814
725 searching for changes
725 searching for changes
726 adding changesets
726 adding changesets
727 adding manifests
727 adding manifests
728 adding file changes
728 adding file changes
729 added 1 changesets with 1 changes to 1 files
729 added 1 changesets with 1 changes to 1 files
730 $ hg out ../repo-issue3814
730 $ hg out ../repo-issue3814
731 comparing with ../repo-issue3814
731 comparing with ../repo-issue3814
732 searching for changes
732 searching for changes
733 no changes found
733 no changes found
734 [1]
734 [1]
735
735
736 Test that a local tag blocks a changeset from being hidden
736 Test that a local tag blocks a changeset from being hidden
737
737
738 $ hg tag -l visible -r 0 --hidden
738 $ hg tag -l visible -r 0 --hidden
739 $ hg log -G
739 $ hg log -G
740 @ 2:3816541e5485 (draft) [tip ] A
740 @ 2:3816541e5485 (draft) [tip ] A
741
741
742 x 0:193e9254ce7e (draft) [visible ] A
742 x 0:193e9254ce7e (draft) [visible ] A
743
743
744 Test that removing a local tag does not cause some commands to fail
744 Test that removing a local tag does not cause some commands to fail
745
745
746 $ hg tag -l -r tip tiptag
746 $ hg tag -l -r tip tiptag
747 $ hg tags
747 $ hg tags
748 tiptag 2:3816541e5485
748 tiptag 2:3816541e5485
749 tip 2:3816541e5485
749 tip 2:3816541e5485
750 visible 0:193e9254ce7e
750 visible 0:193e9254ce7e
751 $ hg --config extensions.strip= strip -r tip --no-backup
751 $ hg --config extensions.strip= strip -r tip --no-backup
752 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
752 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
753 $ hg tags
753 $ hg tags
754 visible 0:193e9254ce7e
754 visible 0:193e9254ce7e
755 tip 0:193e9254ce7e
755 tip 0:193e9254ce7e
@@ -1,100 +1,100 b''
1 #require hardlink
1 #require hardlink
2
2
3 $ echo "[extensions]" >> $HGRCPATH
3 $ echo "[extensions]" >> $HGRCPATH
4 $ echo "relink=" >> $HGRCPATH
4 $ echo "relink=" >> $HGRCPATH
5
5
6 $ fix_path() {
6 $ fix_path() {
7 > tr '\\' /
7 > tr '\\' /
8 > }
8 > }
9
9
10 $ cat > arelinked.py <<EOF
10 $ cat > arelinked.py <<EOF
11 > import sys, os
11 > import sys, os
12 > from mercurial import util
12 > from mercurial import util
13 > path1, path2 = sys.argv[1:3]
13 > path1, path2 = sys.argv[1:3]
14 > if util.samefile(path1, path2):
14 > if util.samefile(path1, path2):
15 > print '%s == %s' % (path1, path2)
15 > print '%s == %s' % (path1, path2)
16 > else:
16 > else:
17 > print '%s != %s' % (path1, path2)
17 > print '%s != %s' % (path1, path2)
18 > EOF
18 > EOF
19
19
20
20
21 create source repository
21 create source repository
22
22
23 $ hg init repo
23 $ hg init repo
24 $ cd repo
24 $ cd repo
25 $ echo a > a
25 $ echo a > a
26 $ echo b > b
26 $ echo b > b
27 $ hg ci -Am addfile
27 $ hg ci -Am addfile
28 adding a
28 adding a
29 adding b
29 adding b
30 $ cat "$TESTDIR/binfile.bin" >> a
30 $ cat "$TESTDIR/binfile.bin" >> a
31 $ cat "$TESTDIR/binfile.bin" >> b
31 $ cat "$TESTDIR/binfile.bin" >> b
32 $ hg ci -Am changefiles
32 $ hg ci -Am changefiles
33
33
34 make another commit to create files larger than 1 KB to test
34 make another commit to create files larger than 1 KB to test
35 formatting of final byte count
35 formatting of final byte count
36
36
37 $ cat "$TESTDIR/binfile.bin" >> a
37 $ cat "$TESTDIR/binfile.bin" >> a
38 $ cat "$TESTDIR/binfile.bin" >> b
38 $ cat "$TESTDIR/binfile.bin" >> b
39 $ hg ci -m anotherchange
39 $ hg ci -m anotherchange
40
40
41 don't sit forever trying to double-lock the source repo
41 don't sit forever trying to double-lock the source repo
42
42
43 $ hg relink .
43 $ hg relink .
44 relinking $TESTTMP/repo/.hg/store to $TESTTMP/repo/.hg/store (glob)
44 relinking $TESTTMP/repo/.hg/store to $TESTTMP/repo/.hg/store (glob)
45 there is nothing to relink
45 there is nothing to relink
46
46
47
47
48 Test files are read in binary mode
48 Test files are read in binary mode
49
49
50 $ $PYTHON -c "file('.hg/store/data/dummy.i', 'wb').write('a\r\nb\n')"
50 $ $PYTHON -c "file('.hg/store/data/dummy.i', 'wb').write('a\r\nb\n')"
51 $ cd ..
51 $ cd ..
52
52
53
53
54 clone and pull to break links
54 clone and pull to break links
55
55
56 $ hg clone --pull -r0 repo clone
56 $ hg clone --pull -r0 repo clone
57 adding changesets
57 adding changesets
58 adding manifests
58 adding manifests
59 adding file changes
59 adding file changes
60 added 1 changesets with 2 changes to 2 files
60 added 1 changesets with 2 changes to 2 files
61 updating to branch default
61 updating to branch default
62 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
62 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
63 $ cd clone
63 $ cd clone
64 $ hg pull -q
64 $ hg pull -q
65 $ echo b >> b
65 $ echo b >> b
66 $ hg ci -m changeb
66 $ hg ci -m changeb
67 created new head
67 created new head
68 $ $PYTHON -c "file('.hg/store/data/dummy.i', 'wb').write('a\nb\r\n')"
68 $ $PYTHON -c "file('.hg/store/data/dummy.i', 'wb').write('a\nb\r\n')"
69
69
70
70
71 relink
71 relink
72
72
73 $ hg relink --debug | fix_path
73 $ hg relink --debug | fix_path
74 relinking $TESTTMP/repo/.hg/store to $TESTTMP/clone/.hg/store
74 relinking $TESTTMP/repo/.hg/store to $TESTTMP/clone/.hg/store
75 tip has 2 files, estimated total number of files: 3
75 tip has 2 files, estimated total number of files: 3
76 collecting: 00changelog.i 1/3 files (33.33%)
76 collecting: 00changelog.i 1/3 files (33.33%)
77 collecting: 00manifest.i 2/3 files (66.67%)
77 collecting: 00manifest.i 2/3 files (66.67%)
78 collecting: a.i 3/3 files (100.00%)
78 collecting: a.i 3/3 files (100.00%)
79 collecting: b.i 4/3 files (133.33%)
79 collecting: b.i 4/3 files (133.33%)
80 collecting: dummy.i 5/3 files (166.67%)
80 collecting: dummy.i 5/3 files (166.67%)
81 collected 5 candidate storage files
81 collected 5 candidate storage files
82 not linkable: 00changelog.i
82 not linkable: 00changelog.i
83 not linkable: 00manifest.i
83 not linkable: 00manifest.i
84 pruning: data/a.i 3/5 files (60.00%)
84 pruning: data/a.i 3/5 files (60.00%)
85 not linkable: data/b.i
85 not linkable: data/b.i
86 pruning: data/dummy.i 5/5 files (100.00%)
86 pruning: data/dummy.i 5/5 files (100.00%)
87 pruned down to 2 probably relinkable files
87 pruned down to 2 probably relinkable files
88 relinking: data/a.i 1/2 files (50.00%)
88 relinking: data/a.i 1/2 files (50.00%)
89 not linkable: data/dummy.i
89 not linkable: data/dummy.i
90 relinked 1 files (1.37 KB reclaimed)
90 relinked 1 files (1.36 KB reclaimed)
91 $ cd ..
91 $ cd ..
92
92
93
93
94 check hardlinks
94 check hardlinks
95
95
96 $ python arelinked.py repo/.hg/store/data/a.i clone/.hg/store/data/a.i
96 $ python arelinked.py repo/.hg/store/data/a.i clone/.hg/store/data/a.i
97 repo/.hg/store/data/a.i == clone/.hg/store/data/a.i
97 repo/.hg/store/data/a.i == clone/.hg/store/data/a.i
98 $ python arelinked.py repo/.hg/store/data/b.i clone/.hg/store/data/b.i
98 $ python arelinked.py repo/.hg/store/data/b.i clone/.hg/store/data/b.i
99 repo/.hg/store/data/b.i != clone/.hg/store/data/b.i
99 repo/.hg/store/data/b.i != clone/.hg/store/data/b.i
100
100
@@ -1,23 +1,23 b''
1 $ hg init repo
1 $ hg init repo
2 $ cd repo
2 $ cd repo
3
3
4 $ touch foo
4 $ touch foo
5 $ hg ci -Am 'add foo'
5 $ hg ci -Am 'add foo'
6 adding foo
6 adding foo
7
7
8 $ hg up -C null
8 $ hg up -C null
9 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
9 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
10
10
11 this should be stored as a delta against rev 0
11 this should be stored as a delta against rev 0
12
12
13 $ echo foo bar baz > foo
13 $ echo foo bar baz > foo
14 $ hg ci -Am 'add foo again'
14 $ hg ci -Am 'add foo again'
15 adding foo
15 adding foo
16 created new head
16 created new head
17
17
18 $ hg debugindex foo
18 $ hg debugindex foo
19 rev offset length ..... linkrev nodeid p1 p2 (re)
19 rev offset length ..... linkrev nodeid p1 p2 (re)
20 0 0 0 ..... 0 b80de5d13875 000000000000 000000000000 (re)
20 0 0 0 ..... 0 b80de5d13875 000000000000 000000000000 (re)
21 1 0 24 ..... 1 0376abec49b8 000000000000 000000000000 (re)
21 1 0 13 ..... 1 0376abec49b8 000000000000 000000000000 (re)
22
22
23 $ cd ..
23 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now