##// END OF EJS Templates
changegroup: emit full-replacement deltas if either revision is censored...
Mike Edgar -
r24190:903c7e8c default
parent child Browse files
Show More
@@ -1,888 +1,898 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
17
17
18 def readexactly(stream, n):
18 def readexactly(stream, n):
19 '''read n bytes from stream.read and abort if less was available'''
19 '''read n bytes from stream.read and abort if less was available'''
20 s = stream.read(n)
20 s = stream.read(n)
21 if len(s) < n:
21 if len(s) < n:
22 raise util.Abort(_("stream ended unexpectedly"
22 raise util.Abort(_("stream ended unexpectedly"
23 " (got %d bytes, expected %d)")
23 " (got %d bytes, expected %d)")
24 % (len(s), n))
24 % (len(s), n))
25 return s
25 return s
26
26
27 def getchunk(stream):
27 def getchunk(stream):
28 """return the next chunk from stream as a string"""
28 """return the next chunk from stream as a string"""
29 d = readexactly(stream, 4)
29 d = readexactly(stream, 4)
30 l = struct.unpack(">l", d)[0]
30 l = struct.unpack(">l", d)[0]
31 if l <= 4:
31 if l <= 4:
32 if l:
32 if l:
33 raise util.Abort(_("invalid chunk length %d") % l)
33 raise util.Abort(_("invalid chunk length %d") % l)
34 return ""
34 return ""
35 return readexactly(stream, l - 4)
35 return readexactly(stream, l - 4)
36
36
37 def chunkheader(length):
37 def chunkheader(length):
38 """return a changegroup chunk header (string)"""
38 """return a changegroup chunk header (string)"""
39 return struct.pack(">l", length + 4)
39 return struct.pack(">l", length + 4)
40
40
41 def closechunk():
41 def closechunk():
42 """return a changegroup chunk header (string) for a zero-length chunk"""
42 """return a changegroup chunk header (string) for a zero-length chunk"""
43 return struct.pack(">l", 0)
43 return struct.pack(">l", 0)
44
44
45 def combineresults(results):
45 def combineresults(results):
46 """logic to combine 0 or more addchangegroup results into one"""
46 """logic to combine 0 or more addchangegroup results into one"""
47 changedheads = 0
47 changedheads = 0
48 result = 1
48 result = 1
49 for ret in results:
49 for ret in results:
50 # If any changegroup result is 0, return 0
50 # If any changegroup result is 0, return 0
51 if ret == 0:
51 if ret == 0:
52 result = 0
52 result = 0
53 break
53 break
54 if ret < -1:
54 if ret < -1:
55 changedheads += ret + 1
55 changedheads += ret + 1
56 elif ret > 1:
56 elif ret > 1:
57 changedheads += ret - 1
57 changedheads += ret - 1
58 if changedheads > 0:
58 if changedheads > 0:
59 result = 1 + changedheads
59 result = 1 + changedheads
60 elif changedheads < 0:
60 elif changedheads < 0:
61 result = -1 + changedheads
61 result = -1 + changedheads
62 return result
62 return result
63
63
64 class nocompress(object):
64 class nocompress(object):
65 def compress(self, x):
65 def compress(self, x):
66 return x
66 return x
67 def flush(self):
67 def flush(self):
68 return ""
68 return ""
69
69
70 bundletypes = {
70 bundletypes = {
71 "": ("", nocompress), # only when using unbundle on ssh and old http servers
71 "": ("", nocompress), # only when using unbundle on ssh and old http servers
72 # since the unification ssh accepts a header but there
72 # since the unification ssh accepts a header but there
73 # is no capability signaling it.
73 # is no capability signaling it.
74 "HG2Y": (), # special-cased below
74 "HG2Y": (), # special-cased below
75 "HG10UN": ("HG10UN", nocompress),
75 "HG10UN": ("HG10UN", nocompress),
76 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
76 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
77 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
77 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
78 }
78 }
79
79
80 # hgweb uses this list to communicate its preferred type
80 # hgweb uses this list to communicate its preferred type
81 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
81 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
82
82
83 def writebundle(ui, cg, filename, bundletype, vfs=None):
83 def writebundle(ui, cg, filename, bundletype, vfs=None):
84 """Write a bundle file and return its filename.
84 """Write a bundle file and return its filename.
85
85
86 Existing files will not be overwritten.
86 Existing files will not be overwritten.
87 If no filename is specified, a temporary file is created.
87 If no filename is specified, a temporary file is created.
88 bz2 compression can be turned off.
88 bz2 compression can be turned off.
89 The bundle file will be deleted in case of errors.
89 The bundle file will be deleted in case of errors.
90 """
90 """
91
91
92 fh = None
92 fh = None
93 cleanup = None
93 cleanup = None
94 try:
94 try:
95 if filename:
95 if filename:
96 if vfs:
96 if vfs:
97 fh = vfs.open(filename, "wb")
97 fh = vfs.open(filename, "wb")
98 else:
98 else:
99 fh = open(filename, "wb")
99 fh = open(filename, "wb")
100 else:
100 else:
101 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
101 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
102 fh = os.fdopen(fd, "wb")
102 fh = os.fdopen(fd, "wb")
103 cleanup = filename
103 cleanup = filename
104
104
105 if bundletype == "HG2Y":
105 if bundletype == "HG2Y":
106 import bundle2
106 import bundle2
107 bundle = bundle2.bundle20(ui)
107 bundle = bundle2.bundle20(ui)
108 part = bundle.newpart('b2x:changegroup', data=cg.getchunks())
108 part = bundle.newpart('b2x:changegroup', data=cg.getchunks())
109 part.addparam('version', cg.version)
109 part.addparam('version', cg.version)
110 z = nocompress()
110 z = nocompress()
111 chunkiter = bundle.getchunks()
111 chunkiter = bundle.getchunks()
112 else:
112 else:
113 if cg.version != '01':
113 if cg.version != '01':
114 raise util.Abort(_('old bundle types only supports v1 '
114 raise util.Abort(_('old bundle types only supports v1 '
115 'changegroups'))
115 'changegroups'))
116 header, compressor = bundletypes[bundletype]
116 header, compressor = bundletypes[bundletype]
117 fh.write(header)
117 fh.write(header)
118 z = compressor()
118 z = compressor()
119 chunkiter = cg.getchunks()
119 chunkiter = cg.getchunks()
120
120
121 # parse the changegroup data, otherwise we will block
121 # parse the changegroup data, otherwise we will block
122 # in case of sshrepo because we don't know the end of the stream
122 # in case of sshrepo because we don't know the end of the stream
123
123
124 # an empty chunkgroup is the end of the changegroup
124 # an empty chunkgroup is the end of the changegroup
125 # a changegroup has at least 2 chunkgroups (changelog and manifest).
125 # a changegroup has at least 2 chunkgroups (changelog and manifest).
126 # after that, an empty chunkgroup is the end of the changegroup
126 # after that, an empty chunkgroup is the end of the changegroup
127 for chunk in chunkiter:
127 for chunk in chunkiter:
128 fh.write(z.compress(chunk))
128 fh.write(z.compress(chunk))
129 fh.write(z.flush())
129 fh.write(z.flush())
130 cleanup = None
130 cleanup = None
131 return filename
131 return filename
132 finally:
132 finally:
133 if fh is not None:
133 if fh is not None:
134 fh.close()
134 fh.close()
135 if cleanup is not None:
135 if cleanup is not None:
136 if filename and vfs:
136 if filename and vfs:
137 vfs.unlink(cleanup)
137 vfs.unlink(cleanup)
138 else:
138 else:
139 os.unlink(cleanup)
139 os.unlink(cleanup)
140
140
141 def decompressor(fh, alg):
141 def decompressor(fh, alg):
142 if alg == 'UN':
142 if alg == 'UN':
143 return fh
143 return fh
144 elif alg == 'GZ':
144 elif alg == 'GZ':
145 def generator(f):
145 def generator(f):
146 zd = zlib.decompressobj()
146 zd = zlib.decompressobj()
147 for chunk in util.filechunkiter(f):
147 for chunk in util.filechunkiter(f):
148 yield zd.decompress(chunk)
148 yield zd.decompress(chunk)
149 elif alg == 'BZ':
149 elif alg == 'BZ':
150 def generator(f):
150 def generator(f):
151 zd = bz2.BZ2Decompressor()
151 zd = bz2.BZ2Decompressor()
152 zd.decompress("BZ")
152 zd.decompress("BZ")
153 for chunk in util.filechunkiter(f, 4096):
153 for chunk in util.filechunkiter(f, 4096):
154 yield zd.decompress(chunk)
154 yield zd.decompress(chunk)
155 else:
155 else:
156 raise util.Abort("unknown bundle compression '%s'" % alg)
156 raise util.Abort("unknown bundle compression '%s'" % alg)
157 return util.chunkbuffer(generator(fh))
157 return util.chunkbuffer(generator(fh))
158
158
159 class cg1unpacker(object):
159 class cg1unpacker(object):
160 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
160 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
161 deltaheadersize = struct.calcsize(deltaheader)
161 deltaheadersize = struct.calcsize(deltaheader)
162 version = '01'
162 version = '01'
163 def __init__(self, fh, alg):
163 def __init__(self, fh, alg):
164 self._stream = decompressor(fh, alg)
164 self._stream = decompressor(fh, alg)
165 self._type = alg
165 self._type = alg
166 self.callback = None
166 self.callback = None
167 def compressed(self):
167 def compressed(self):
168 return self._type != 'UN'
168 return self._type != 'UN'
169 def read(self, l):
169 def read(self, l):
170 return self._stream.read(l)
170 return self._stream.read(l)
171 def seek(self, pos):
171 def seek(self, pos):
172 return self._stream.seek(pos)
172 return self._stream.seek(pos)
173 def tell(self):
173 def tell(self):
174 return self._stream.tell()
174 return self._stream.tell()
175 def close(self):
175 def close(self):
176 return self._stream.close()
176 return self._stream.close()
177
177
178 def chunklength(self):
178 def chunklength(self):
179 d = readexactly(self._stream, 4)
179 d = readexactly(self._stream, 4)
180 l = struct.unpack(">l", d)[0]
180 l = struct.unpack(">l", d)[0]
181 if l <= 4:
181 if l <= 4:
182 if l:
182 if l:
183 raise util.Abort(_("invalid chunk length %d") % l)
183 raise util.Abort(_("invalid chunk length %d") % l)
184 return 0
184 return 0
185 if self.callback:
185 if self.callback:
186 self.callback()
186 self.callback()
187 return l - 4
187 return l - 4
188
188
189 def changelogheader(self):
189 def changelogheader(self):
190 """v10 does not have a changelog header chunk"""
190 """v10 does not have a changelog header chunk"""
191 return {}
191 return {}
192
192
193 def manifestheader(self):
193 def manifestheader(self):
194 """v10 does not have a manifest header chunk"""
194 """v10 does not have a manifest header chunk"""
195 return {}
195 return {}
196
196
197 def filelogheader(self):
197 def filelogheader(self):
198 """return the header of the filelogs chunk, v10 only has the filename"""
198 """return the header of the filelogs chunk, v10 only has the filename"""
199 l = self.chunklength()
199 l = self.chunklength()
200 if not l:
200 if not l:
201 return {}
201 return {}
202 fname = readexactly(self._stream, l)
202 fname = readexactly(self._stream, l)
203 return {'filename': fname}
203 return {'filename': fname}
204
204
205 def _deltaheader(self, headertuple, prevnode):
205 def _deltaheader(self, headertuple, prevnode):
206 node, p1, p2, cs = headertuple
206 node, p1, p2, cs = headertuple
207 if prevnode is None:
207 if prevnode is None:
208 deltabase = p1
208 deltabase = p1
209 else:
209 else:
210 deltabase = prevnode
210 deltabase = prevnode
211 return node, p1, p2, deltabase, cs
211 return node, p1, p2, deltabase, cs
212
212
213 def deltachunk(self, prevnode):
213 def deltachunk(self, prevnode):
214 l = self.chunklength()
214 l = self.chunklength()
215 if not l:
215 if not l:
216 return {}
216 return {}
217 headerdata = readexactly(self._stream, self.deltaheadersize)
217 headerdata = readexactly(self._stream, self.deltaheadersize)
218 header = struct.unpack(self.deltaheader, headerdata)
218 header = struct.unpack(self.deltaheader, headerdata)
219 delta = readexactly(self._stream, l - self.deltaheadersize)
219 delta = readexactly(self._stream, l - self.deltaheadersize)
220 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
220 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
221 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
221 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
222 'deltabase': deltabase, 'delta': delta}
222 'deltabase': deltabase, 'delta': delta}
223
223
224 def getchunks(self):
224 def getchunks(self):
225 """returns all the chunks contains in the bundle
225 """returns all the chunks contains in the bundle
226
226
227 Used when you need to forward the binary stream to a file or another
227 Used when you need to forward the binary stream to a file or another
228 network API. To do so, it parse the changegroup data, otherwise it will
228 network API. To do so, it parse the changegroup data, otherwise it will
229 block in case of sshrepo because it don't know the end of the stream.
229 block in case of sshrepo because it don't know the end of the stream.
230 """
230 """
231 # an empty chunkgroup is the end of the changegroup
231 # an empty chunkgroup is the end of the changegroup
232 # a changegroup has at least 2 chunkgroups (changelog and manifest).
232 # a changegroup has at least 2 chunkgroups (changelog and manifest).
233 # after that, an empty chunkgroup is the end of the changegroup
233 # after that, an empty chunkgroup is the end of the changegroup
234 empty = False
234 empty = False
235 count = 0
235 count = 0
236 while not empty or count <= 2:
236 while not empty or count <= 2:
237 empty = True
237 empty = True
238 count += 1
238 count += 1
239 while True:
239 while True:
240 chunk = getchunk(self)
240 chunk = getchunk(self)
241 if not chunk:
241 if not chunk:
242 break
242 break
243 empty = False
243 empty = False
244 yield chunkheader(len(chunk))
244 yield chunkheader(len(chunk))
245 pos = 0
245 pos = 0
246 while pos < len(chunk):
246 while pos < len(chunk):
247 next = pos + 2**20
247 next = pos + 2**20
248 yield chunk[pos:next]
248 yield chunk[pos:next]
249 pos = next
249 pos = next
250 yield closechunk()
250 yield closechunk()
251
251
252 class cg2unpacker(cg1unpacker):
252 class cg2unpacker(cg1unpacker):
253 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
253 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
254 deltaheadersize = struct.calcsize(deltaheader)
254 deltaheadersize = struct.calcsize(deltaheader)
255 version = '02'
255 version = '02'
256
256
257 def _deltaheader(self, headertuple, prevnode):
257 def _deltaheader(self, headertuple, prevnode):
258 node, p1, p2, deltabase, cs = headertuple
258 node, p1, p2, deltabase, cs = headertuple
259 return node, p1, p2, deltabase, cs
259 return node, p1, p2, deltabase, cs
260
260
261 class headerlessfixup(object):
261 class headerlessfixup(object):
262 def __init__(self, fh, h):
262 def __init__(self, fh, h):
263 self._h = h
263 self._h = h
264 self._fh = fh
264 self._fh = fh
265 def read(self, n):
265 def read(self, n):
266 if self._h:
266 if self._h:
267 d, self._h = self._h[:n], self._h[n:]
267 d, self._h = self._h[:n], self._h[n:]
268 if len(d) < n:
268 if len(d) < n:
269 d += readexactly(self._fh, n - len(d))
269 d += readexactly(self._fh, n - len(d))
270 return d
270 return d
271 return readexactly(self._fh, n)
271 return readexactly(self._fh, n)
272
272
273 class cg1packer(object):
273 class cg1packer(object):
274 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
274 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
275 version = '01'
275 version = '01'
276 def __init__(self, repo, bundlecaps=None):
276 def __init__(self, repo, bundlecaps=None):
277 """Given a source repo, construct a bundler.
277 """Given a source repo, construct a bundler.
278
278
279 bundlecaps is optional and can be used to specify the set of
279 bundlecaps is optional and can be used to specify the set of
280 capabilities which can be used to build the bundle.
280 capabilities which can be used to build the bundle.
281 """
281 """
282 # Set of capabilities we can use to build the bundle.
282 # Set of capabilities we can use to build the bundle.
283 if bundlecaps is None:
283 if bundlecaps is None:
284 bundlecaps = set()
284 bundlecaps = set()
285 self._bundlecaps = bundlecaps
285 self._bundlecaps = bundlecaps
286 self._changelog = repo.changelog
286 self._changelog = repo.changelog
287 self._manifest = repo.manifest
287 self._manifest = repo.manifest
288 reorder = repo.ui.config('bundle', 'reorder', 'auto')
288 reorder = repo.ui.config('bundle', 'reorder', 'auto')
289 if reorder == 'auto':
289 if reorder == 'auto':
290 reorder = None
290 reorder = None
291 else:
291 else:
292 reorder = util.parsebool(reorder)
292 reorder = util.parsebool(reorder)
293 self._repo = repo
293 self._repo = repo
294 self._reorder = reorder
294 self._reorder = reorder
295 self._progress = repo.ui.progress
295 self._progress = repo.ui.progress
296 if self._repo.ui.verbose and not self._repo.ui.debugflag:
296 if self._repo.ui.verbose and not self._repo.ui.debugflag:
297 self._verbosenote = self._repo.ui.note
297 self._verbosenote = self._repo.ui.note
298 else:
298 else:
299 self._verbosenote = lambda s: None
299 self._verbosenote = lambda s: None
300
300
301 def close(self):
301 def close(self):
302 return closechunk()
302 return closechunk()
303
303
304 def fileheader(self, fname):
304 def fileheader(self, fname):
305 return chunkheader(len(fname)) + fname
305 return chunkheader(len(fname)) + fname
306
306
307 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
307 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
308 """Calculate a delta group, yielding a sequence of changegroup chunks
308 """Calculate a delta group, yielding a sequence of changegroup chunks
309 (strings).
309 (strings).
310
310
311 Given a list of changeset revs, return a set of deltas and
311 Given a list of changeset revs, return a set of deltas and
312 metadata corresponding to nodes. The first delta is
312 metadata corresponding to nodes. The first delta is
313 first parent(nodelist[0]) -> nodelist[0], the receiver is
313 first parent(nodelist[0]) -> nodelist[0], the receiver is
314 guaranteed to have this parent as it has all history before
314 guaranteed to have this parent as it has all history before
315 these changesets. In the case firstparent is nullrev the
315 these changesets. In the case firstparent is nullrev the
316 changegroup starts with a full revision.
316 changegroup starts with a full revision.
317
317
318 If units is not None, progress detail will be generated, units specifies
318 If units is not None, progress detail will be generated, units specifies
319 the type of revlog that is touched (changelog, manifest, etc.).
319 the type of revlog that is touched (changelog, manifest, etc.).
320 """
320 """
321 # if we don't have any revisions touched by these changesets, bail
321 # if we don't have any revisions touched by these changesets, bail
322 if len(nodelist) == 0:
322 if len(nodelist) == 0:
323 yield self.close()
323 yield self.close()
324 return
324 return
325
325
326 # for generaldelta revlogs, we linearize the revs; this will both be
326 # for generaldelta revlogs, we linearize the revs; this will both be
327 # much quicker and generate a much smaller bundle
327 # much quicker and generate a much smaller bundle
328 if (revlog._generaldelta and reorder is not False) or reorder:
328 if (revlog._generaldelta and reorder is not False) or reorder:
329 dag = dagutil.revlogdag(revlog)
329 dag = dagutil.revlogdag(revlog)
330 revs = set(revlog.rev(n) for n in nodelist)
330 revs = set(revlog.rev(n) for n in nodelist)
331 revs = dag.linearize(revs)
331 revs = dag.linearize(revs)
332 else:
332 else:
333 revs = sorted([revlog.rev(n) for n in nodelist])
333 revs = sorted([revlog.rev(n) for n in nodelist])
334
334
335 # add the parent of the first rev
335 # add the parent of the first rev
336 p = revlog.parentrevs(revs[0])[0]
336 p = revlog.parentrevs(revs[0])[0]
337 revs.insert(0, p)
337 revs.insert(0, p)
338
338
339 # build deltas
339 # build deltas
340 total = len(revs) - 1
340 total = len(revs) - 1
341 msgbundling = _('bundling')
341 msgbundling = _('bundling')
342 for r in xrange(len(revs) - 1):
342 for r in xrange(len(revs) - 1):
343 if units is not None:
343 if units is not None:
344 self._progress(msgbundling, r + 1, unit=units, total=total)
344 self._progress(msgbundling, r + 1, unit=units, total=total)
345 prev, curr = revs[r], revs[r + 1]
345 prev, curr = revs[r], revs[r + 1]
346 linknode = lookup(revlog.node(curr))
346 linknode = lookup(revlog.node(curr))
347 for c in self.revchunk(revlog, curr, prev, linknode):
347 for c in self.revchunk(revlog, curr, prev, linknode):
348 yield c
348 yield c
349
349
350 yield self.close()
350 yield self.close()
351
351
352 # filter any nodes that claim to be part of the known set
352 # filter any nodes that claim to be part of the known set
353 def prune(self, revlog, missing, commonrevs, source):
353 def prune(self, revlog, missing, commonrevs, source):
354 rr, rl = revlog.rev, revlog.linkrev
354 rr, rl = revlog.rev, revlog.linkrev
355 return [n for n in missing if rl(rr(n)) not in commonrevs]
355 return [n for n in missing if rl(rr(n)) not in commonrevs]
356
356
357 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
357 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
358 '''yield a sequence of changegroup chunks (strings)'''
358 '''yield a sequence of changegroup chunks (strings)'''
359 repo = self._repo
359 repo = self._repo
360 cl = self._changelog
360 cl = self._changelog
361 mf = self._manifest
361 mf = self._manifest
362 reorder = self._reorder
362 reorder = self._reorder
363 progress = self._progress
363 progress = self._progress
364
364
365 # for progress output
365 # for progress output
366 msgbundling = _('bundling')
366 msgbundling = _('bundling')
367
367
368 clrevorder = {}
368 clrevorder = {}
369 mfs = {} # needed manifests
369 mfs = {} # needed manifests
370 fnodes = {} # needed file nodes
370 fnodes = {} # needed file nodes
371 changedfiles = set()
371 changedfiles = set()
372
372
373 # Callback for the changelog, used to collect changed files and manifest
373 # Callback for the changelog, used to collect changed files and manifest
374 # nodes.
374 # nodes.
375 # Returns the linkrev node (identity in the changelog case).
375 # Returns the linkrev node (identity in the changelog case).
376 def lookupcl(x):
376 def lookupcl(x):
377 c = cl.read(x)
377 c = cl.read(x)
378 clrevorder[x] = len(clrevorder)
378 clrevorder[x] = len(clrevorder)
379 changedfiles.update(c[3])
379 changedfiles.update(c[3])
380 # record the first changeset introducing this manifest version
380 # record the first changeset introducing this manifest version
381 mfs.setdefault(c[0], x)
381 mfs.setdefault(c[0], x)
382 return x
382 return x
383
383
384 self._verbosenote(_('uncompressed size of bundle content:\n'))
384 self._verbosenote(_('uncompressed size of bundle content:\n'))
385 size = 0
385 size = 0
386 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
386 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
387 reorder=reorder):
387 reorder=reorder):
388 size += len(chunk)
388 size += len(chunk)
389 yield chunk
389 yield chunk
390 self._verbosenote(_('%8.i (changelog)\n') % size)
390 self._verbosenote(_('%8.i (changelog)\n') % size)
391 progress(msgbundling, None)
391 progress(msgbundling, None)
392
392
393 # Callback for the manifest, used to collect linkrevs for filelog
393 # Callback for the manifest, used to collect linkrevs for filelog
394 # revisions.
394 # revisions.
395 # Returns the linkrev node (collected in lookupcl).
395 # Returns the linkrev node (collected in lookupcl).
396 def lookupmf(x):
396 def lookupmf(x):
397 clnode = mfs[x]
397 clnode = mfs[x]
398 if not fastpathlinkrev or reorder:
398 if not fastpathlinkrev or reorder:
399 mdata = mf.readfast(x)
399 mdata = mf.readfast(x)
400 for f, n in mdata.iteritems():
400 for f, n in mdata.iteritems():
401 if f in changedfiles:
401 if f in changedfiles:
402 # record the first changeset introducing this filelog
402 # record the first changeset introducing this filelog
403 # version
403 # version
404 fclnodes = fnodes.setdefault(f, {})
404 fclnodes = fnodes.setdefault(f, {})
405 fclnode = fclnodes.setdefault(n, clnode)
405 fclnode = fclnodes.setdefault(n, clnode)
406 if clrevorder[clnode] < clrevorder[fclnode]:
406 if clrevorder[clnode] < clrevorder[fclnode]:
407 fclnodes[n] = clnode
407 fclnodes[n] = clnode
408 return clnode
408 return clnode
409
409
410 mfnodes = self.prune(mf, mfs, commonrevs, source)
410 mfnodes = self.prune(mf, mfs, commonrevs, source)
411 size = 0
411 size = 0
412 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
412 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
413 reorder=reorder):
413 reorder=reorder):
414 size += len(chunk)
414 size += len(chunk)
415 yield chunk
415 yield chunk
416 self._verbosenote(_('%8.i (manifests)\n') % size)
416 self._verbosenote(_('%8.i (manifests)\n') % size)
417 progress(msgbundling, None)
417 progress(msgbundling, None)
418
418
419 mfs.clear()
419 mfs.clear()
420 needed = set(cl.rev(x) for x in clnodes)
420 needed = set(cl.rev(x) for x in clnodes)
421
421
422 def linknodes(filerevlog, fname):
422 def linknodes(filerevlog, fname):
423 if fastpathlinkrev and not reorder:
423 if fastpathlinkrev and not reorder:
424 llr = filerevlog.linkrev
424 llr = filerevlog.linkrev
425 def genfilenodes():
425 def genfilenodes():
426 for r in filerevlog:
426 for r in filerevlog:
427 linkrev = llr(r)
427 linkrev = llr(r)
428 if linkrev in needed:
428 if linkrev in needed:
429 yield filerevlog.node(r), cl.node(linkrev)
429 yield filerevlog.node(r), cl.node(linkrev)
430 return dict(genfilenodes())
430 return dict(genfilenodes())
431 return fnodes.get(fname, {})
431 return fnodes.get(fname, {})
432
432
433 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
433 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
434 source):
434 source):
435 yield chunk
435 yield chunk
436
436
437 yield self.close()
437 yield self.close()
438 progress(msgbundling, None)
438 progress(msgbundling, None)
439
439
440 if clnodes:
440 if clnodes:
441 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
441 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
442
442
443 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
443 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
444 repo = self._repo
444 repo = self._repo
445 progress = self._progress
445 progress = self._progress
446 reorder = self._reorder
446 reorder = self._reorder
447 msgbundling = _('bundling')
447 msgbundling = _('bundling')
448
448
449 total = len(changedfiles)
449 total = len(changedfiles)
450 # for progress output
450 # for progress output
451 msgfiles = _('files')
451 msgfiles = _('files')
452 for i, fname in enumerate(sorted(changedfiles)):
452 for i, fname in enumerate(sorted(changedfiles)):
453 filerevlog = repo.file(fname)
453 filerevlog = repo.file(fname)
454 if not filerevlog:
454 if not filerevlog:
455 raise util.Abort(_("empty or missing revlog for %s") % fname)
455 raise util.Abort(_("empty or missing revlog for %s") % fname)
456
456
457 linkrevnodes = linknodes(filerevlog, fname)
457 linkrevnodes = linknodes(filerevlog, fname)
458 # Lookup for filenodes, we collected the linkrev nodes above in the
458 # Lookup for filenodes, we collected the linkrev nodes above in the
459 # fastpath case and with lookupmf in the slowpath case.
459 # fastpath case and with lookupmf in the slowpath case.
460 def lookupfilelog(x):
460 def lookupfilelog(x):
461 return linkrevnodes[x]
461 return linkrevnodes[x]
462
462
463 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
463 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
464 if filenodes:
464 if filenodes:
465 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
465 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
466 total=total)
466 total=total)
467 h = self.fileheader(fname)
467 h = self.fileheader(fname)
468 size = len(h)
468 size = len(h)
469 yield h
469 yield h
470 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
470 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
471 reorder=reorder):
471 reorder=reorder):
472 size += len(chunk)
472 size += len(chunk)
473 yield chunk
473 yield chunk
474 self._verbosenote(_('%8.i %s\n') % (size, fname))
474 self._verbosenote(_('%8.i %s\n') % (size, fname))
475
475
476 def deltaparent(self, revlog, rev, p1, p2, prev):
476 def deltaparent(self, revlog, rev, p1, p2, prev):
477 return prev
477 return prev
478
478
479 def revchunk(self, revlog, rev, prev, linknode):
479 def revchunk(self, revlog, rev, prev, linknode):
480 node = revlog.node(rev)
480 node = revlog.node(rev)
481 p1, p2 = revlog.parentrevs(rev)
481 p1, p2 = revlog.parentrevs(rev)
482 base = self.deltaparent(revlog, rev, p1, p2, prev)
482 base = self.deltaparent(revlog, rev, p1, p2, prev)
483
483
484 prefix = ''
484 prefix = ''
485 if base == nullrev:
485 if revlog.iscensored(base) or revlog.iscensored(rev):
486 try:
487 delta = revlog.revision(node)
488 except error.CensoredNodeError, e:
489 delta = e.tombstone
490 if base == nullrev:
491 prefix = mdiff.trivialdiffheader(len(delta))
492 else:
493 baselen = revlog.rawsize(base)
494 prefix = mdiff.replacediffheader(baselen, len(delta))
495 elif base == nullrev:
486 delta = revlog.revision(node)
496 delta = revlog.revision(node)
487 prefix = mdiff.trivialdiffheader(len(delta))
497 prefix = mdiff.trivialdiffheader(len(delta))
488 else:
498 else:
489 delta = revlog.revdiff(base, rev)
499 delta = revlog.revdiff(base, rev)
490 p1n, p2n = revlog.parents(node)
500 p1n, p2n = revlog.parents(node)
491 basenode = revlog.node(base)
501 basenode = revlog.node(base)
492 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
502 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
493 meta += prefix
503 meta += prefix
494 l = len(meta) + len(delta)
504 l = len(meta) + len(delta)
495 yield chunkheader(l)
505 yield chunkheader(l)
496 yield meta
506 yield meta
497 yield delta
507 yield delta
498 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
508 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
499 # do nothing with basenode, it is implicitly the previous one in HG10
509 # do nothing with basenode, it is implicitly the previous one in HG10
500 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
510 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
501
511
502 class cg2packer(cg1packer):
512 class cg2packer(cg1packer):
503 version = '02'
513 version = '02'
504 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
514 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
505
515
506 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
516 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
507 if (revlog._generaldelta and reorder is not True):
517 if (revlog._generaldelta and reorder is not True):
508 reorder = False
518 reorder = False
509 return super(cg2packer, self).group(nodelist, revlog, lookup,
519 return super(cg2packer, self).group(nodelist, revlog, lookup,
510 units=units, reorder=reorder)
520 units=units, reorder=reorder)
511
521
512 def deltaparent(self, revlog, rev, p1, p2, prev):
522 def deltaparent(self, revlog, rev, p1, p2, prev):
513 dp = revlog.deltaparent(rev)
523 dp = revlog.deltaparent(rev)
514 # avoid storing full revisions; pick prev in those cases
524 # avoid storing full revisions; pick prev in those cases
515 # also pick prev when we can't be sure remote has dp
525 # also pick prev when we can't be sure remote has dp
516 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
526 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
517 return prev
527 return prev
518 return dp
528 return dp
519
529
520 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
530 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
521 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
531 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
522
532
523 packermap = {'01': (cg1packer, cg1unpacker),
533 packermap = {'01': (cg1packer, cg1unpacker),
524 '02': (cg2packer, cg2unpacker)}
534 '02': (cg2packer, cg2unpacker)}
525
535
526 def _changegroupinfo(repo, nodes, source):
536 def _changegroupinfo(repo, nodes, source):
527 if repo.ui.verbose or source == 'bundle':
537 if repo.ui.verbose or source == 'bundle':
528 repo.ui.status(_("%d changesets found\n") % len(nodes))
538 repo.ui.status(_("%d changesets found\n") % len(nodes))
529 if repo.ui.debugflag:
539 if repo.ui.debugflag:
530 repo.ui.debug("list of changesets:\n")
540 repo.ui.debug("list of changesets:\n")
531 for node in nodes:
541 for node in nodes:
532 repo.ui.debug("%s\n" % hex(node))
542 repo.ui.debug("%s\n" % hex(node))
533
543
534 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
544 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
535 repo = repo.unfiltered()
545 repo = repo.unfiltered()
536 commonrevs = outgoing.common
546 commonrevs = outgoing.common
537 csets = outgoing.missing
547 csets = outgoing.missing
538 heads = outgoing.missingheads
548 heads = outgoing.missingheads
539 # We go through the fast path if we get told to, or if all (unfiltered
549 # We go through the fast path if we get told to, or if all (unfiltered
540 # heads have been requested (since we then know there all linkrevs will
550 # heads have been requested (since we then know there all linkrevs will
541 # be pulled by the client).
551 # be pulled by the client).
542 heads.sort()
552 heads.sort()
543 fastpathlinkrev = fastpath or (
553 fastpathlinkrev = fastpath or (
544 repo.filtername is None and heads == sorted(repo.heads()))
554 repo.filtername is None and heads == sorted(repo.heads()))
545
555
546 repo.hook('preoutgoing', throw=True, source=source)
556 repo.hook('preoutgoing', throw=True, source=source)
547 _changegroupinfo(repo, csets, source)
557 _changegroupinfo(repo, csets, source)
548 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
558 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
549
559
550 def getsubset(repo, outgoing, bundler, source, fastpath=False, version='01'):
560 def getsubset(repo, outgoing, bundler, source, fastpath=False, version='01'):
551 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
561 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
552 return packermap[version][1](util.chunkbuffer(gengroup), 'UN')
562 return packermap[version][1](util.chunkbuffer(gengroup), 'UN')
553
563
554 def changegroupsubset(repo, roots, heads, source, version='01'):
564 def changegroupsubset(repo, roots, heads, source, version='01'):
555 """Compute a changegroup consisting of all the nodes that are
565 """Compute a changegroup consisting of all the nodes that are
556 descendants of any of the roots and ancestors of any of the heads.
566 descendants of any of the roots and ancestors of any of the heads.
557 Return a chunkbuffer object whose read() method will return
567 Return a chunkbuffer object whose read() method will return
558 successive changegroup chunks.
568 successive changegroup chunks.
559
569
560 It is fairly complex as determining which filenodes and which
570 It is fairly complex as determining which filenodes and which
561 manifest nodes need to be included for the changeset to be complete
571 manifest nodes need to be included for the changeset to be complete
562 is non-trivial.
572 is non-trivial.
563
573
564 Another wrinkle is doing the reverse, figuring out which changeset in
574 Another wrinkle is doing the reverse, figuring out which changeset in
565 the changegroup a particular filenode or manifestnode belongs to.
575 the changegroup a particular filenode or manifestnode belongs to.
566 """
576 """
567 cl = repo.changelog
577 cl = repo.changelog
568 if not roots:
578 if not roots:
569 roots = [nullid]
579 roots = [nullid]
570 # TODO: remove call to nodesbetween.
580 # TODO: remove call to nodesbetween.
571 csets, roots, heads = cl.nodesbetween(roots, heads)
581 csets, roots, heads = cl.nodesbetween(roots, heads)
572 discbases = []
582 discbases = []
573 for n in roots:
583 for n in roots:
574 discbases.extend([p for p in cl.parents(n) if p != nullid])
584 discbases.extend([p for p in cl.parents(n) if p != nullid])
575 outgoing = discovery.outgoing(cl, discbases, heads)
585 outgoing = discovery.outgoing(cl, discbases, heads)
576 bundler = packermap[version][0](repo)
586 bundler = packermap[version][0](repo)
577 return getsubset(repo, outgoing, bundler, source, version=version)
587 return getsubset(repo, outgoing, bundler, source, version=version)
578
588
579 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
589 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
580 version='01'):
590 version='01'):
581 """Like getbundle, but taking a discovery.outgoing as an argument.
591 """Like getbundle, but taking a discovery.outgoing as an argument.
582
592
583 This is only implemented for local repos and reuses potentially
593 This is only implemented for local repos and reuses potentially
584 precomputed sets in outgoing. Returns a raw changegroup generator."""
594 precomputed sets in outgoing. Returns a raw changegroup generator."""
585 if not outgoing.missing:
595 if not outgoing.missing:
586 return None
596 return None
587 bundler = packermap[version][0](repo, bundlecaps)
597 bundler = packermap[version][0](repo, bundlecaps)
588 return getsubsetraw(repo, outgoing, bundler, source)
598 return getsubsetraw(repo, outgoing, bundler, source)
589
599
590 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
600 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
591 """Like getbundle, but taking a discovery.outgoing as an argument.
601 """Like getbundle, but taking a discovery.outgoing as an argument.
592
602
593 This is only implemented for local repos and reuses potentially
603 This is only implemented for local repos and reuses potentially
594 precomputed sets in outgoing."""
604 precomputed sets in outgoing."""
595 if not outgoing.missing:
605 if not outgoing.missing:
596 return None
606 return None
597 bundler = cg1packer(repo, bundlecaps)
607 bundler = cg1packer(repo, bundlecaps)
598 return getsubset(repo, outgoing, bundler, source)
608 return getsubset(repo, outgoing, bundler, source)
599
609
600 def _computeoutgoing(repo, heads, common):
610 def _computeoutgoing(repo, heads, common):
601 """Computes which revs are outgoing given a set of common
611 """Computes which revs are outgoing given a set of common
602 and a set of heads.
612 and a set of heads.
603
613
604 This is a separate function so extensions can have access to
614 This is a separate function so extensions can have access to
605 the logic.
615 the logic.
606
616
607 Returns a discovery.outgoing object.
617 Returns a discovery.outgoing object.
608 """
618 """
609 cl = repo.changelog
619 cl = repo.changelog
610 if common:
620 if common:
611 hasnode = cl.hasnode
621 hasnode = cl.hasnode
612 common = [n for n in common if hasnode(n)]
622 common = [n for n in common if hasnode(n)]
613 else:
623 else:
614 common = [nullid]
624 common = [nullid]
615 if not heads:
625 if not heads:
616 heads = cl.heads()
626 heads = cl.heads()
617 return discovery.outgoing(cl, common, heads)
627 return discovery.outgoing(cl, common, heads)
618
628
619 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
629 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
620 version='01'):
630 version='01'):
621 """Like changegroupsubset, but returns the set difference between the
631 """Like changegroupsubset, but returns the set difference between the
622 ancestors of heads and the ancestors common.
632 ancestors of heads and the ancestors common.
623
633
624 If heads is None, use the local heads. If common is None, use [nullid].
634 If heads is None, use the local heads. If common is None, use [nullid].
625
635
626 If version is None, use a version '1' changegroup.
636 If version is None, use a version '1' changegroup.
627
637
628 The nodes in common might not all be known locally due to the way the
638 The nodes in common might not all be known locally due to the way the
629 current discovery protocol works. Returns a raw changegroup generator.
639 current discovery protocol works. Returns a raw changegroup generator.
630 """
640 """
631 outgoing = _computeoutgoing(repo, heads, common)
641 outgoing = _computeoutgoing(repo, heads, common)
632 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
642 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
633 version=version)
643 version=version)
634
644
635 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
645 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
636 """Like changegroupsubset, but returns the set difference between the
646 """Like changegroupsubset, but returns the set difference between the
637 ancestors of heads and the ancestors common.
647 ancestors of heads and the ancestors common.
638
648
639 If heads is None, use the local heads. If common is None, use [nullid].
649 If heads is None, use the local heads. If common is None, use [nullid].
640
650
641 The nodes in common might not all be known locally due to the way the
651 The nodes in common might not all be known locally due to the way the
642 current discovery protocol works.
652 current discovery protocol works.
643 """
653 """
644 outgoing = _computeoutgoing(repo, heads, common)
654 outgoing = _computeoutgoing(repo, heads, common)
645 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
655 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
646
656
647 def changegroup(repo, basenodes, source):
657 def changegroup(repo, basenodes, source):
648 # to avoid a race we use changegroupsubset() (issue1320)
658 # to avoid a race we use changegroupsubset() (issue1320)
649 return changegroupsubset(repo, basenodes, repo.heads(), source)
659 return changegroupsubset(repo, basenodes, repo.heads(), source)
650
660
651 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
661 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
652 revisions = 0
662 revisions = 0
653 files = 0
663 files = 0
654 while True:
664 while True:
655 chunkdata = source.filelogheader()
665 chunkdata = source.filelogheader()
656 if not chunkdata:
666 if not chunkdata:
657 break
667 break
658 f = chunkdata["filename"]
668 f = chunkdata["filename"]
659 repo.ui.debug("adding %s revisions\n" % f)
669 repo.ui.debug("adding %s revisions\n" % f)
660 pr()
670 pr()
661 fl = repo.file(f)
671 fl = repo.file(f)
662 o = len(fl)
672 o = len(fl)
663 try:
673 try:
664 if not fl.addgroup(source, revmap, trp):
674 if not fl.addgroup(source, revmap, trp):
665 raise util.Abort(_("received file revlog group is empty"))
675 raise util.Abort(_("received file revlog group is empty"))
666 except error.CensoredBaseError, e:
676 except error.CensoredBaseError, e:
667 raise util.Abort(_("received delta base is censored: %s") % e)
677 raise util.Abort(_("received delta base is censored: %s") % e)
668 revisions += len(fl) - o
678 revisions += len(fl) - o
669 files += 1
679 files += 1
670 if f in needfiles:
680 if f in needfiles:
671 needs = needfiles[f]
681 needs = needfiles[f]
672 for new in xrange(o, len(fl)):
682 for new in xrange(o, len(fl)):
673 n = fl.node(new)
683 n = fl.node(new)
674 if n in needs:
684 if n in needs:
675 needs.remove(n)
685 needs.remove(n)
676 else:
686 else:
677 raise util.Abort(
687 raise util.Abort(
678 _("received spurious file revlog entry"))
688 _("received spurious file revlog entry"))
679 if not needs:
689 if not needs:
680 del needfiles[f]
690 del needfiles[f]
681 repo.ui.progress(_('files'), None)
691 repo.ui.progress(_('files'), None)
682
692
683 for f, needs in needfiles.iteritems():
693 for f, needs in needfiles.iteritems():
684 fl = repo.file(f)
694 fl = repo.file(f)
685 for n in needs:
695 for n in needs:
686 try:
696 try:
687 fl.rev(n)
697 fl.rev(n)
688 except error.LookupError:
698 except error.LookupError:
689 raise util.Abort(
699 raise util.Abort(
690 _('missing file data for %s:%s - run hg verify') %
700 _('missing file data for %s:%s - run hg verify') %
691 (f, hex(n)))
701 (f, hex(n)))
692
702
693 return revisions, files
703 return revisions, files
694
704
695 def addchangegroup(repo, source, srctype, url, emptyok=False,
705 def addchangegroup(repo, source, srctype, url, emptyok=False,
696 targetphase=phases.draft):
706 targetphase=phases.draft):
697 """Add the changegroup returned by source.read() to this repo.
707 """Add the changegroup returned by source.read() to this repo.
698 srctype is a string like 'push', 'pull', or 'unbundle'. url is
708 srctype is a string like 'push', 'pull', or 'unbundle'. url is
699 the URL of the repo where this changegroup is coming from.
709 the URL of the repo where this changegroup is coming from.
700
710
701 Return an integer summarizing the change to this repo:
711 Return an integer summarizing the change to this repo:
702 - nothing changed or no source: 0
712 - nothing changed or no source: 0
703 - more heads than before: 1+added heads (2..n)
713 - more heads than before: 1+added heads (2..n)
704 - fewer heads than before: -1-removed heads (-2..-n)
714 - fewer heads than before: -1-removed heads (-2..-n)
705 - number of heads stays the same: 1
715 - number of heads stays the same: 1
706 """
716 """
707 repo = repo.unfiltered()
717 repo = repo.unfiltered()
708 def csmap(x):
718 def csmap(x):
709 repo.ui.debug("add changeset %s\n" % short(x))
719 repo.ui.debug("add changeset %s\n" % short(x))
710 return len(cl)
720 return len(cl)
711
721
712 def revmap(x):
722 def revmap(x):
713 return cl.rev(x)
723 return cl.rev(x)
714
724
715 if not source:
725 if not source:
716 return 0
726 return 0
717
727
718 changesets = files = revisions = 0
728 changesets = files = revisions = 0
719 efiles = set()
729 efiles = set()
720
730
721 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
731 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
722 # The transaction could have been created before and already carries source
732 # The transaction could have been created before and already carries source
723 # information. In this case we use the top level data. We overwrite the
733 # information. In this case we use the top level data. We overwrite the
724 # argument because we need to use the top level value (if they exist) in
734 # argument because we need to use the top level value (if they exist) in
725 # this function.
735 # this function.
726 srctype = tr.hookargs.setdefault('source', srctype)
736 srctype = tr.hookargs.setdefault('source', srctype)
727 url = tr.hookargs.setdefault('url', url)
737 url = tr.hookargs.setdefault('url', url)
728
738
729 # write changelog data to temp files so concurrent readers will not see
739 # write changelog data to temp files so concurrent readers will not see
730 # inconsistent view
740 # inconsistent view
731 cl = repo.changelog
741 cl = repo.changelog
732 cl.delayupdate(tr)
742 cl.delayupdate(tr)
733 oldheads = cl.heads()
743 oldheads = cl.heads()
734 try:
744 try:
735 repo.hook('prechangegroup', throw=True, **tr.hookargs)
745 repo.hook('prechangegroup', throw=True, **tr.hookargs)
736
746
737 trp = weakref.proxy(tr)
747 trp = weakref.proxy(tr)
738 # pull off the changeset group
748 # pull off the changeset group
739 repo.ui.status(_("adding changesets\n"))
749 repo.ui.status(_("adding changesets\n"))
740 clstart = len(cl)
750 clstart = len(cl)
741 class prog(object):
751 class prog(object):
742 step = _('changesets')
752 step = _('changesets')
743 count = 1
753 count = 1
744 ui = repo.ui
754 ui = repo.ui
745 total = None
755 total = None
746 def __call__(repo):
756 def __call__(repo):
747 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
757 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
748 total=repo.total)
758 total=repo.total)
749 repo.count += 1
759 repo.count += 1
750 pr = prog()
760 pr = prog()
751 source.callback = pr
761 source.callback = pr
752
762
753 source.changelogheader()
763 source.changelogheader()
754 srccontent = cl.addgroup(source, csmap, trp)
764 srccontent = cl.addgroup(source, csmap, trp)
755 if not (srccontent or emptyok):
765 if not (srccontent or emptyok):
756 raise util.Abort(_("received changelog group is empty"))
766 raise util.Abort(_("received changelog group is empty"))
757 clend = len(cl)
767 clend = len(cl)
758 changesets = clend - clstart
768 changesets = clend - clstart
759 for c in xrange(clstart, clend):
769 for c in xrange(clstart, clend):
760 efiles.update(repo[c].files())
770 efiles.update(repo[c].files())
761 efiles = len(efiles)
771 efiles = len(efiles)
762 repo.ui.progress(_('changesets'), None)
772 repo.ui.progress(_('changesets'), None)
763
773
764 # pull off the manifest group
774 # pull off the manifest group
765 repo.ui.status(_("adding manifests\n"))
775 repo.ui.status(_("adding manifests\n"))
766 pr.step = _('manifests')
776 pr.step = _('manifests')
767 pr.count = 1
777 pr.count = 1
768 pr.total = changesets # manifests <= changesets
778 pr.total = changesets # manifests <= changesets
769 # no need to check for empty manifest group here:
779 # no need to check for empty manifest group here:
770 # if the result of the merge of 1 and 2 is the same in 3 and 4,
780 # if the result of the merge of 1 and 2 is the same in 3 and 4,
771 # no new manifest will be created and the manifest group will
781 # no new manifest will be created and the manifest group will
772 # be empty during the pull
782 # be empty during the pull
773 source.manifestheader()
783 source.manifestheader()
774 repo.manifest.addgroup(source, revmap, trp)
784 repo.manifest.addgroup(source, revmap, trp)
775 repo.ui.progress(_('manifests'), None)
785 repo.ui.progress(_('manifests'), None)
776
786
777 needfiles = {}
787 needfiles = {}
778 if repo.ui.configbool('server', 'validate', default=False):
788 if repo.ui.configbool('server', 'validate', default=False):
779 # validate incoming csets have their manifests
789 # validate incoming csets have their manifests
780 for cset in xrange(clstart, clend):
790 for cset in xrange(clstart, clend):
781 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
791 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
782 mfest = repo.manifest.readdelta(mfest)
792 mfest = repo.manifest.readdelta(mfest)
783 # store file nodes we must see
793 # store file nodes we must see
784 for f, n in mfest.iteritems():
794 for f, n in mfest.iteritems():
785 needfiles.setdefault(f, set()).add(n)
795 needfiles.setdefault(f, set()).add(n)
786
796
787 # process the files
797 # process the files
788 repo.ui.status(_("adding file changes\n"))
798 repo.ui.status(_("adding file changes\n"))
789 pr.step = _('files')
799 pr.step = _('files')
790 pr.count = 1
800 pr.count = 1
791 pr.total = efiles
801 pr.total = efiles
792 source.callback = None
802 source.callback = None
793
803
794 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
804 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
795 needfiles)
805 needfiles)
796 revisions += newrevs
806 revisions += newrevs
797 files += newfiles
807 files += newfiles
798
808
799 dh = 0
809 dh = 0
800 if oldheads:
810 if oldheads:
801 heads = cl.heads()
811 heads = cl.heads()
802 dh = len(heads) - len(oldheads)
812 dh = len(heads) - len(oldheads)
803 for h in heads:
813 for h in heads:
804 if h not in oldheads and repo[h].closesbranch():
814 if h not in oldheads and repo[h].closesbranch():
805 dh -= 1
815 dh -= 1
806 htext = ""
816 htext = ""
807 if dh:
817 if dh:
808 htext = _(" (%+d heads)") % dh
818 htext = _(" (%+d heads)") % dh
809
819
810 repo.ui.status(_("added %d changesets"
820 repo.ui.status(_("added %d changesets"
811 " with %d changes to %d files%s\n")
821 " with %d changes to %d files%s\n")
812 % (changesets, revisions, files, htext))
822 % (changesets, revisions, files, htext))
813 repo.invalidatevolatilesets()
823 repo.invalidatevolatilesets()
814
824
815 if changesets > 0:
825 if changesets > 0:
816 p = lambda: tr.writepending() and repo.root or ""
826 p = lambda: tr.writepending() and repo.root or ""
817 if 'node' not in tr.hookargs:
827 if 'node' not in tr.hookargs:
818 tr.hookargs['node'] = hex(cl.node(clstart))
828 tr.hookargs['node'] = hex(cl.node(clstart))
819 hookargs = dict(tr.hookargs)
829 hookargs = dict(tr.hookargs)
820 else:
830 else:
821 hookargs = dict(tr.hookargs)
831 hookargs = dict(tr.hookargs)
822 hookargs['node'] = hex(cl.node(clstart))
832 hookargs['node'] = hex(cl.node(clstart))
823 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
833 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
824
834
825 added = [cl.node(r) for r in xrange(clstart, clend)]
835 added = [cl.node(r) for r in xrange(clstart, clend)]
826 publishing = repo.ui.configbool('phases', 'publish', True)
836 publishing = repo.ui.configbool('phases', 'publish', True)
827 if srctype in ('push', 'serve'):
837 if srctype in ('push', 'serve'):
828 # Old servers can not push the boundary themselves.
838 # Old servers can not push the boundary themselves.
829 # New servers won't push the boundary if changeset already
839 # New servers won't push the boundary if changeset already
830 # exists locally as secret
840 # exists locally as secret
831 #
841 #
832 # We should not use added here but the list of all change in
842 # We should not use added here but the list of all change in
833 # the bundle
843 # the bundle
834 if publishing:
844 if publishing:
835 phases.advanceboundary(repo, tr, phases.public, srccontent)
845 phases.advanceboundary(repo, tr, phases.public, srccontent)
836 else:
846 else:
837 # Those changesets have been pushed from the outside, their
847 # Those changesets have been pushed from the outside, their
838 # phases are going to be pushed alongside. Therefor
848 # phases are going to be pushed alongside. Therefor
839 # `targetphase` is ignored.
849 # `targetphase` is ignored.
840 phases.advanceboundary(repo, tr, phases.draft, srccontent)
850 phases.advanceboundary(repo, tr, phases.draft, srccontent)
841 phases.retractboundary(repo, tr, phases.draft, added)
851 phases.retractboundary(repo, tr, phases.draft, added)
842 elif srctype != 'strip':
852 elif srctype != 'strip':
843 # publishing only alter behavior during push
853 # publishing only alter behavior during push
844 #
854 #
845 # strip should not touch boundary at all
855 # strip should not touch boundary at all
846 phases.retractboundary(repo, tr, targetphase, added)
856 phases.retractboundary(repo, tr, targetphase, added)
847
857
848 if changesets > 0:
858 if changesets > 0:
849 if srctype != 'strip':
859 if srctype != 'strip':
850 # During strip, branchcache is invalid but coming call to
860 # During strip, branchcache is invalid but coming call to
851 # `destroyed` will repair it.
861 # `destroyed` will repair it.
852 # In other case we can safely update cache on disk.
862 # In other case we can safely update cache on disk.
853 branchmap.updatecache(repo.filtered('served'))
863 branchmap.updatecache(repo.filtered('served'))
854
864
855 def runhooks():
865 def runhooks():
856 # These hooks run when the lock releases, not when the
866 # These hooks run when the lock releases, not when the
857 # transaction closes. So it's possible for the changelog
867 # transaction closes. So it's possible for the changelog
858 # to have changed since we last saw it.
868 # to have changed since we last saw it.
859 if clstart >= len(repo):
869 if clstart >= len(repo):
860 return
870 return
861
871
862 # forcefully update the on-disk branch cache
872 # forcefully update the on-disk branch cache
863 repo.ui.debug("updating the branch cache\n")
873 repo.ui.debug("updating the branch cache\n")
864 repo.hook("changegroup", **hookargs)
874 repo.hook("changegroup", **hookargs)
865
875
866 for n in added:
876 for n in added:
867 args = hookargs.copy()
877 args = hookargs.copy()
868 args['node'] = hex(n)
878 args['node'] = hex(n)
869 repo.hook("incoming", **args)
879 repo.hook("incoming", **args)
870
880
871 newheads = [h for h in repo.heads() if h not in oldheads]
881 newheads = [h for h in repo.heads() if h not in oldheads]
872 repo.ui.log("incoming",
882 repo.ui.log("incoming",
873 "%s incoming changes - new heads: %s\n",
883 "%s incoming changes - new heads: %s\n",
874 len(added),
884 len(added),
875 ', '.join([hex(c[:6]) for c in newheads]))
885 ', '.join([hex(c[:6]) for c in newheads]))
876
886
877 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
887 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
878 lambda tr: repo._afterlock(runhooks))
888 lambda tr: repo._afterlock(runhooks))
879
889
880 tr.close()
890 tr.close()
881
891
882 finally:
892 finally:
883 tr.release()
893 tr.release()
884 # never return 0 here:
894 # never return 0 here:
885 if dh < 0:
895 if dh < 0:
886 return dh - 1
896 return dh - 1
887 else:
897 else:
888 return dh + 1
898 return dh + 1
@@ -1,153 +1,157 b''
1 # error.py - Mercurial exceptions
1 # error.py - Mercurial exceptions
2 #
2 #
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Mercurial exceptions.
8 """Mercurial exceptions.
9
9
10 This allows us to catch exceptions at higher levels without forcing
10 This allows us to catch exceptions at higher levels without forcing
11 imports.
11 imports.
12 """
12 """
13
13
14 # Do not import anything here, please
14 # Do not import anything here, please
15
15
16 class RevlogError(Exception):
16 class RevlogError(Exception):
17 pass
17 pass
18
18
19 class FilteredIndexError(IndexError):
19 class FilteredIndexError(IndexError):
20 pass
20 pass
21
21
22 class LookupError(RevlogError, KeyError):
22 class LookupError(RevlogError, KeyError):
23 def __init__(self, name, index, message):
23 def __init__(self, name, index, message):
24 self.name = name
24 self.name = name
25 self.index = index
25 self.index = index
26 # this can't be called 'message' because at least some installs of
26 # this can't be called 'message' because at least some installs of
27 # Python 2.6+ complain about the 'message' property being deprecated
27 # Python 2.6+ complain about the 'message' property being deprecated
28 self.lookupmessage = message
28 self.lookupmessage = message
29 if isinstance(name, str) and len(name) == 20:
29 if isinstance(name, str) and len(name) == 20:
30 from node import short
30 from node import short
31 name = short(name)
31 name = short(name)
32 RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
32 RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
33
33
34 def __str__(self):
34 def __str__(self):
35 return RevlogError.__str__(self)
35 return RevlogError.__str__(self)
36
36
37 class FilteredLookupError(LookupError):
37 class FilteredLookupError(LookupError):
38 pass
38 pass
39
39
40 class ManifestLookupError(LookupError):
40 class ManifestLookupError(LookupError):
41 pass
41 pass
42
42
43 class CommandError(Exception):
43 class CommandError(Exception):
44 """Exception raised on errors in parsing the command line."""
44 """Exception raised on errors in parsing the command line."""
45
45
46 class InterventionRequired(Exception):
46 class InterventionRequired(Exception):
47 """Exception raised when a command requires human intervention."""
47 """Exception raised when a command requires human intervention."""
48
48
49 class Abort(Exception):
49 class Abort(Exception):
50 """Raised if a command needs to print an error and exit."""
50 """Raised if a command needs to print an error and exit."""
51 def __init__(self, *args, **kw):
51 def __init__(self, *args, **kw):
52 Exception.__init__(self, *args)
52 Exception.__init__(self, *args)
53 self.hint = kw.get('hint')
53 self.hint = kw.get('hint')
54
54
55 class HookAbort(Abort):
55 class HookAbort(Abort):
56 """raised when a validation hook fails, aborting an operation
56 """raised when a validation hook fails, aborting an operation
57
57
58 Exists to allow more specialized catching."""
58 Exists to allow more specialized catching."""
59 pass
59 pass
60
60
61 class ConfigError(Abort):
61 class ConfigError(Abort):
62 """Exception raised when parsing config files"""
62 """Exception raised when parsing config files"""
63
63
64 class OutOfBandError(Exception):
64 class OutOfBandError(Exception):
65 """Exception raised when a remote repo reports failure"""
65 """Exception raised when a remote repo reports failure"""
66
66
67 class ParseError(Exception):
67 class ParseError(Exception):
68 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
68 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
69
69
70 class RepoError(Exception):
70 class RepoError(Exception):
71 def __init__(self, *args, **kw):
71 def __init__(self, *args, **kw):
72 Exception.__init__(self, *args)
72 Exception.__init__(self, *args)
73 self.hint = kw.get('hint')
73 self.hint = kw.get('hint')
74
74
75 class RepoLookupError(RepoError):
75 class RepoLookupError(RepoError):
76 pass
76 pass
77
77
78 class FilteredRepoLookupError(RepoLookupError):
78 class FilteredRepoLookupError(RepoLookupError):
79 pass
79 pass
80
80
81 class CapabilityError(RepoError):
81 class CapabilityError(RepoError):
82 pass
82 pass
83
83
84 class RequirementError(RepoError):
84 class RequirementError(RepoError):
85 """Exception raised if .hg/requires has an unknown entry."""
85 """Exception raised if .hg/requires has an unknown entry."""
86 pass
86 pass
87
87
88 class LockError(IOError):
88 class LockError(IOError):
89 def __init__(self, errno, strerror, filename, desc):
89 def __init__(self, errno, strerror, filename, desc):
90 IOError.__init__(self, errno, strerror, filename)
90 IOError.__init__(self, errno, strerror, filename)
91 self.desc = desc
91 self.desc = desc
92
92
93 class LockHeld(LockError):
93 class LockHeld(LockError):
94 def __init__(self, errno, filename, desc, locker):
94 def __init__(self, errno, filename, desc, locker):
95 LockError.__init__(self, errno, 'Lock held', filename, desc)
95 LockError.__init__(self, errno, 'Lock held', filename, desc)
96 self.locker = locker
96 self.locker = locker
97
97
98 class LockUnavailable(LockError):
98 class LockUnavailable(LockError):
99 pass
99 pass
100
100
101 class ResponseError(Exception):
101 class ResponseError(Exception):
102 """Raised to print an error with part of output and exit."""
102 """Raised to print an error with part of output and exit."""
103
103
104 class UnknownCommand(Exception):
104 class UnknownCommand(Exception):
105 """Exception raised if command is not in the command table."""
105 """Exception raised if command is not in the command table."""
106
106
107 class AmbiguousCommand(Exception):
107 class AmbiguousCommand(Exception):
108 """Exception raised if command shortcut matches more than one command."""
108 """Exception raised if command shortcut matches more than one command."""
109
109
110 # derived from KeyboardInterrupt to simplify some breakout code
110 # derived from KeyboardInterrupt to simplify some breakout code
111 class SignalInterrupt(KeyboardInterrupt):
111 class SignalInterrupt(KeyboardInterrupt):
112 """Exception raised on SIGTERM and SIGHUP."""
112 """Exception raised on SIGTERM and SIGHUP."""
113
113
114 class SignatureError(Exception):
114 class SignatureError(Exception):
115 pass
115 pass
116
116
117 class PushRaced(RuntimeError):
117 class PushRaced(RuntimeError):
118 """An exception raised during unbundling that indicate a push race"""
118 """An exception raised during unbundling that indicate a push race"""
119
119
120 # bundle2 related errors
120 # bundle2 related errors
121 class BundleValueError(ValueError):
121 class BundleValueError(ValueError):
122 """error raised when bundle2 cannot be processed"""
122 """error raised when bundle2 cannot be processed"""
123
123
124 class UnsupportedPartError(BundleValueError):
124 class UnsupportedPartError(BundleValueError):
125 def __init__(self, parttype=None, params=()):
125 def __init__(self, parttype=None, params=()):
126 self.parttype = parttype
126 self.parttype = parttype
127 self.params = params
127 self.params = params
128 if self.parttype is None:
128 if self.parttype is None:
129 msg = 'Stream Parameter'
129 msg = 'Stream Parameter'
130 else:
130 else:
131 msg = parttype
131 msg = parttype
132 if self.params:
132 if self.params:
133 msg = '%s - %s' % (msg, ', '.join(self.params))
133 msg = '%s - %s' % (msg, ', '.join(self.params))
134 ValueError.__init__(self, msg)
134 ValueError.__init__(self, msg)
135
135
136 class ReadOnlyPartError(RuntimeError):
136 class ReadOnlyPartError(RuntimeError):
137 """error raised when code tries to alter a part being generated"""
137 """error raised when code tries to alter a part being generated"""
138 pass
138 pass
139
139
140 class CensoredNodeError(RevlogError):
140 class CensoredNodeError(RevlogError):
141 """error raised when content verification fails on a censored node"""
141 """error raised when content verification fails on a censored node
142
142
143 def __init__(self, filename, node):
143 Also contains the tombstone data substituted for the uncensored data.
144 """
145
146 def __init__(self, filename, node, tombstone):
144 from node import short
147 from node import short
145 RevlogError.__init__(self, '%s:%s' % (filename, short(node)))
148 RevlogError.__init__(self, '%s:%s' % (filename, short(node)))
149 self.tombstone = tombstone
146
150
147 class CensoredBaseError(RevlogError):
151 class CensoredBaseError(RevlogError):
148 """error raised when a delta is rejected because its base is censored
152 """error raised when a delta is rejected because its base is censored
149
153
150 A delta based on a censored revision must be formed as single patch
154 A delta based on a censored revision must be formed as single patch
151 operation which replaces the entire base with new content. This ensures
155 operation which replaces the entire base with new content. This ensures
152 the delta may be applied by clones which have not censored the base.
156 the delta may be applied by clones which have not censored the base.
153 """
157 """
@@ -1,109 +1,109 b''
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import error, revlog
8 import error, revlog
9 import re
9 import re
10
10
11 _mdre = re.compile('\1\n')
11 _mdre = re.compile('\1\n')
12 def parsemeta(text):
12 def parsemeta(text):
13 """return (metadatadict, keylist, metadatasize)"""
13 """return (metadatadict, keylist, metadatasize)"""
14 # text can be buffer, so we can't use .startswith or .index
14 # text can be buffer, so we can't use .startswith or .index
15 if text[:2] != '\1\n':
15 if text[:2] != '\1\n':
16 return None, None
16 return None, None
17 s = _mdre.search(text, 2).start()
17 s = _mdre.search(text, 2).start()
18 mtext = text[2:s]
18 mtext = text[2:s]
19 meta = {}
19 meta = {}
20 for l in mtext.splitlines():
20 for l in mtext.splitlines():
21 k, v = l.split(": ", 1)
21 k, v = l.split(": ", 1)
22 meta[k] = v
22 meta[k] = v
23 return meta, (s + 2)
23 return meta, (s + 2)
24
24
25 def packmeta(meta, text):
25 def packmeta(meta, text):
26 keys = sorted(meta.iterkeys())
26 keys = sorted(meta.iterkeys())
27 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
27 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
28 return "\1\n%s\1\n%s" % (metatext, text)
28 return "\1\n%s\1\n%s" % (metatext, text)
29
29
30 def _censoredtext(text):
30 def _censoredtext(text):
31 m, offs = parsemeta(text)
31 m, offs = parsemeta(text)
32 return m and "censored" in m
32 return m and "censored" in m
33
33
34 class filelog(revlog.revlog):
34 class filelog(revlog.revlog):
35 def __init__(self, opener, path):
35 def __init__(self, opener, path):
36 super(filelog, self).__init__(opener,
36 super(filelog, self).__init__(opener,
37 "/".join(("data", path + ".i")))
37 "/".join(("data", path + ".i")))
38
38
39 def read(self, node):
39 def read(self, node):
40 t = self.revision(node)
40 t = self.revision(node)
41 if not t.startswith('\1\n'):
41 if not t.startswith('\1\n'):
42 return t
42 return t
43 s = t.index('\1\n', 2)
43 s = t.index('\1\n', 2)
44 return t[s + 2:]
44 return t[s + 2:]
45
45
46 def add(self, text, meta, transaction, link, p1=None, p2=None):
46 def add(self, text, meta, transaction, link, p1=None, p2=None):
47 if meta or text.startswith('\1\n'):
47 if meta or text.startswith('\1\n'):
48 text = packmeta(meta, text)
48 text = packmeta(meta, text)
49 return self.addrevision(text, transaction, link, p1, p2)
49 return self.addrevision(text, transaction, link, p1, p2)
50
50
51 def renamed(self, node):
51 def renamed(self, node):
52 if self.parents(node)[0] != revlog.nullid:
52 if self.parents(node)[0] != revlog.nullid:
53 return False
53 return False
54 t = self.revision(node)
54 t = self.revision(node)
55 m = parsemeta(t)[0]
55 m = parsemeta(t)[0]
56 if m and "copy" in m:
56 if m and "copy" in m:
57 return (m["copy"], revlog.bin(m["copyrev"]))
57 return (m["copy"], revlog.bin(m["copyrev"]))
58 return False
58 return False
59
59
60 def size(self, rev):
60 def size(self, rev):
61 """return the size of a given revision"""
61 """return the size of a given revision"""
62
62
63 # for revisions with renames, we have to go the slow way
63 # for revisions with renames, we have to go the slow way
64 node = self.node(rev)
64 node = self.node(rev)
65 if self.renamed(node):
65 if self.renamed(node):
66 return len(self.read(node))
66 return len(self.read(node))
67 if self.iscensored(rev):
67 if self.iscensored(rev):
68 return 0
68 return 0
69
69
70 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
70 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
71 return super(filelog, self).size(rev)
71 return super(filelog, self).size(rev)
72
72
73 def cmp(self, node, text):
73 def cmp(self, node, text):
74 """compare text with a given file revision
74 """compare text with a given file revision
75
75
76 returns True if text is different than what is stored.
76 returns True if text is different than what is stored.
77 """
77 """
78
78
79 t = text
79 t = text
80 if text.startswith('\1\n'):
80 if text.startswith('\1\n'):
81 t = '\1\n\1\n' + text
81 t = '\1\n\1\n' + text
82
82
83 samehashes = not super(filelog, self).cmp(node, t)
83 samehashes = not super(filelog, self).cmp(node, t)
84 if samehashes:
84 if samehashes:
85 return False
85 return False
86
86
87 # censored files compare against the empty file
87 # censored files compare against the empty file
88 if self.iscensored(self.rev(node)):
88 if self.iscensored(self.rev(node)):
89 return text != ''
89 return text != ''
90
90
91 # renaming a file produces a different hash, even if the data
91 # renaming a file produces a different hash, even if the data
92 # remains unchanged. Check if it's the case (slow):
92 # remains unchanged. Check if it's the case (slow):
93 if self.renamed(node):
93 if self.renamed(node):
94 t2 = self.read(node)
94 t2 = self.read(node)
95 return t2 != text
95 return t2 != text
96
96
97 return True
97 return True
98
98
99 def checkhash(self, text, p1, p2, node, rev=None):
99 def checkhash(self, text, p1, p2, node, rev=None):
100 try:
100 try:
101 super(filelog, self).checkhash(text, p1, p2, node, rev=rev)
101 super(filelog, self).checkhash(text, p1, p2, node, rev=rev)
102 except error.RevlogError:
102 except error.RevlogError:
103 if _censoredtext(text):
103 if _censoredtext(text):
104 raise error.CensoredNodeError(self.indexfile, node)
104 raise error.CensoredNodeError(self.indexfile, node, text)
105 raise
105 raise
106
106
107 def iscensored(self, rev):
107 def iscensored(self, rev):
108 """Check if a file revision is censored."""
108 """Check if a file revision is censored."""
109 return self.flags(rev) & revlog.REVIDX_ISCENSORED
109 return self.flags(rev) & revlog.REVIDX_ISCENSORED
General Comments 0
You need to be logged in to leave comments. Login now