##// END OF EJS Templates
bundle-ng: move bundle generation to changegroup.py
Benoit Boissinot -
r19204:e9c5b1c2 default
parent child Browse files
Show More
@@ -1,354 +1,417 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import nullrev, hex
9 from node import nullrev, hex
10 import mdiff, util, dagutil
10 import mdiff, util, dagutil
11 import struct, os, bz2, zlib, tempfile
11 import struct, os, bz2, zlib, tempfile
12
12
13 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
13 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
14
14
15 def readexactly(stream, n):
15 def readexactly(stream, n):
16 '''read n bytes from stream.read and abort if less was available'''
16 '''read n bytes from stream.read and abort if less was available'''
17 s = stream.read(n)
17 s = stream.read(n)
18 if len(s) < n:
18 if len(s) < n:
19 raise util.Abort(_("stream ended unexpectedly"
19 raise util.Abort(_("stream ended unexpectedly"
20 " (got %d bytes, expected %d)")
20 " (got %d bytes, expected %d)")
21 % (len(s), n))
21 % (len(s), n))
22 return s
22 return s
23
23
24 def getchunk(stream):
24 def getchunk(stream):
25 """return the next chunk from stream as a string"""
25 """return the next chunk from stream as a string"""
26 d = readexactly(stream, 4)
26 d = readexactly(stream, 4)
27 l = struct.unpack(">l", d)[0]
27 l = struct.unpack(">l", d)[0]
28 if l <= 4:
28 if l <= 4:
29 if l:
29 if l:
30 raise util.Abort(_("invalid chunk length %d") % l)
30 raise util.Abort(_("invalid chunk length %d") % l)
31 return ""
31 return ""
32 return readexactly(stream, l - 4)
32 return readexactly(stream, l - 4)
33
33
34 def chunkheader(length):
34 def chunkheader(length):
35 """return a changegroup chunk header (string)"""
35 """return a changegroup chunk header (string)"""
36 return struct.pack(">l", length + 4)
36 return struct.pack(">l", length + 4)
37
37
38 def closechunk():
38 def closechunk():
39 """return a changegroup chunk header (string) for a zero-length chunk"""
39 """return a changegroup chunk header (string) for a zero-length chunk"""
40 return struct.pack(">l", 0)
40 return struct.pack(">l", 0)
41
41
42 class nocompress(object):
42 class nocompress(object):
43 def compress(self, x):
43 def compress(self, x):
44 return x
44 return x
45 def flush(self):
45 def flush(self):
46 return ""
46 return ""
47
47
48 bundletypes = {
48 bundletypes = {
49 "": ("", nocompress), # only when using unbundle on ssh and old http servers
49 "": ("", nocompress), # only when using unbundle on ssh and old http servers
50 # since the unification ssh accepts a header but there
50 # since the unification ssh accepts a header but there
51 # is no capability signaling it.
51 # is no capability signaling it.
52 "HG10UN": ("HG10UN", nocompress),
52 "HG10UN": ("HG10UN", nocompress),
53 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
53 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
54 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
54 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
55 }
55 }
56
56
57 # hgweb uses this list to communicate its preferred type
57 # hgweb uses this list to communicate its preferred type
58 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
58 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
59
59
60 def writebundle(cg, filename, bundletype):
60 def writebundle(cg, filename, bundletype):
61 """Write a bundle file and return its filename.
61 """Write a bundle file and return its filename.
62
62
63 Existing files will not be overwritten.
63 Existing files will not be overwritten.
64 If no filename is specified, a temporary file is created.
64 If no filename is specified, a temporary file is created.
65 bz2 compression can be turned off.
65 bz2 compression can be turned off.
66 The bundle file will be deleted in case of errors.
66 The bundle file will be deleted in case of errors.
67 """
67 """
68
68
69 fh = None
69 fh = None
70 cleanup = None
70 cleanup = None
71 try:
71 try:
72 if filename:
72 if filename:
73 fh = open(filename, "wb")
73 fh = open(filename, "wb")
74 else:
74 else:
75 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
75 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
76 fh = os.fdopen(fd, "wb")
76 fh = os.fdopen(fd, "wb")
77 cleanup = filename
77 cleanup = filename
78
78
79 header, compressor = bundletypes[bundletype]
79 header, compressor = bundletypes[bundletype]
80 fh.write(header)
80 fh.write(header)
81 z = compressor()
81 z = compressor()
82
82
83 # parse the changegroup data, otherwise we will block
83 # parse the changegroup data, otherwise we will block
84 # in case of sshrepo because we don't know the end of the stream
84 # in case of sshrepo because we don't know the end of the stream
85
85
86 # an empty chunkgroup is the end of the changegroup
86 # an empty chunkgroup is the end of the changegroup
87 # a changegroup has at least 2 chunkgroups (changelog and manifest).
87 # a changegroup has at least 2 chunkgroups (changelog and manifest).
88 # after that, an empty chunkgroup is the end of the changegroup
88 # after that, an empty chunkgroup is the end of the changegroup
89 empty = False
89 empty = False
90 count = 0
90 count = 0
91 while not empty or count <= 2:
91 while not empty or count <= 2:
92 empty = True
92 empty = True
93 count += 1
93 count += 1
94 while True:
94 while True:
95 chunk = getchunk(cg)
95 chunk = getchunk(cg)
96 if not chunk:
96 if not chunk:
97 break
97 break
98 empty = False
98 empty = False
99 fh.write(z.compress(chunkheader(len(chunk))))
99 fh.write(z.compress(chunkheader(len(chunk))))
100 pos = 0
100 pos = 0
101 while pos < len(chunk):
101 while pos < len(chunk):
102 next = pos + 2**20
102 next = pos + 2**20
103 fh.write(z.compress(chunk[pos:next]))
103 fh.write(z.compress(chunk[pos:next]))
104 pos = next
104 pos = next
105 fh.write(z.compress(closechunk()))
105 fh.write(z.compress(closechunk()))
106 fh.write(z.flush())
106 fh.write(z.flush())
107 cleanup = None
107 cleanup = None
108 return filename
108 return filename
109 finally:
109 finally:
110 if fh is not None:
110 if fh is not None:
111 fh.close()
111 fh.close()
112 if cleanup is not None:
112 if cleanup is not None:
113 os.unlink(cleanup)
113 os.unlink(cleanup)
114
114
115 def decompressor(fh, alg):
115 def decompressor(fh, alg):
116 if alg == 'UN':
116 if alg == 'UN':
117 return fh
117 return fh
118 elif alg == 'GZ':
118 elif alg == 'GZ':
119 def generator(f):
119 def generator(f):
120 zd = zlib.decompressobj()
120 zd = zlib.decompressobj()
121 for chunk in util.filechunkiter(f):
121 for chunk in util.filechunkiter(f):
122 yield zd.decompress(chunk)
122 yield zd.decompress(chunk)
123 elif alg == 'BZ':
123 elif alg == 'BZ':
124 def generator(f):
124 def generator(f):
125 zd = bz2.BZ2Decompressor()
125 zd = bz2.BZ2Decompressor()
126 zd.decompress("BZ")
126 zd.decompress("BZ")
127 for chunk in util.filechunkiter(f, 4096):
127 for chunk in util.filechunkiter(f, 4096):
128 yield zd.decompress(chunk)
128 yield zd.decompress(chunk)
129 else:
129 else:
130 raise util.Abort("unknown bundle compression '%s'" % alg)
130 raise util.Abort("unknown bundle compression '%s'" % alg)
131 return util.chunkbuffer(generator(fh))
131 return util.chunkbuffer(generator(fh))
132
132
133 class unbundle10(object):
133 class unbundle10(object):
134 deltaheader = _BUNDLE10_DELTA_HEADER
134 deltaheader = _BUNDLE10_DELTA_HEADER
135 deltaheadersize = struct.calcsize(deltaheader)
135 deltaheadersize = struct.calcsize(deltaheader)
136 def __init__(self, fh, alg):
136 def __init__(self, fh, alg):
137 self._stream = decompressor(fh, alg)
137 self._stream = decompressor(fh, alg)
138 self._type = alg
138 self._type = alg
139 self.callback = None
139 self.callback = None
140 def compressed(self):
140 def compressed(self):
141 return self._type != 'UN'
141 return self._type != 'UN'
142 def read(self, l):
142 def read(self, l):
143 return self._stream.read(l)
143 return self._stream.read(l)
144 def seek(self, pos):
144 def seek(self, pos):
145 return self._stream.seek(pos)
145 return self._stream.seek(pos)
146 def tell(self):
146 def tell(self):
147 return self._stream.tell()
147 return self._stream.tell()
148 def close(self):
148 def close(self):
149 return self._stream.close()
149 return self._stream.close()
150
150
151 def chunklength(self):
151 def chunklength(self):
152 d = readexactly(self._stream, 4)
152 d = readexactly(self._stream, 4)
153 l = struct.unpack(">l", d)[0]
153 l = struct.unpack(">l", d)[0]
154 if l <= 4:
154 if l <= 4:
155 if l:
155 if l:
156 raise util.Abort(_("invalid chunk length %d") % l)
156 raise util.Abort(_("invalid chunk length %d") % l)
157 return 0
157 return 0
158 if self.callback:
158 if self.callback:
159 self.callback()
159 self.callback()
160 return l - 4
160 return l - 4
161
161
162 def changelogheader(self):
162 def changelogheader(self):
163 """v10 does not have a changelog header chunk"""
163 """v10 does not have a changelog header chunk"""
164 return {}
164 return {}
165
165
166 def manifestheader(self):
166 def manifestheader(self):
167 """v10 does not have a manifest header chunk"""
167 """v10 does not have a manifest header chunk"""
168 return {}
168 return {}
169
169
170 def filelogheader(self):
170 def filelogheader(self):
171 """return the header of the filelogs chunk, v10 only has the filename"""
171 """return the header of the filelogs chunk, v10 only has the filename"""
172 l = self.chunklength()
172 l = self.chunklength()
173 if not l:
173 if not l:
174 return {}
174 return {}
175 fname = readexactly(self._stream, l)
175 fname = readexactly(self._stream, l)
176 return dict(filename=fname)
176 return dict(filename=fname)
177
177
178 def _deltaheader(self, headertuple, prevnode):
178 def _deltaheader(self, headertuple, prevnode):
179 node, p1, p2, cs = headertuple
179 node, p1, p2, cs = headertuple
180 if prevnode is None:
180 if prevnode is None:
181 deltabase = p1
181 deltabase = p1
182 else:
182 else:
183 deltabase = prevnode
183 deltabase = prevnode
184 return node, p1, p2, deltabase, cs
184 return node, p1, p2, deltabase, cs
185
185
186 def deltachunk(self, prevnode):
186 def deltachunk(self, prevnode):
187 l = self.chunklength()
187 l = self.chunklength()
188 if not l:
188 if not l:
189 return {}
189 return {}
190 headerdata = readexactly(self._stream, self.deltaheadersize)
190 headerdata = readexactly(self._stream, self.deltaheadersize)
191 header = struct.unpack(self.deltaheader, headerdata)
191 header = struct.unpack(self.deltaheader, headerdata)
192 delta = readexactly(self._stream, l - self.deltaheadersize)
192 delta = readexactly(self._stream, l - self.deltaheadersize)
193 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
193 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
194 return dict(node=node, p1=p1, p2=p2, cs=cs,
194 return dict(node=node, p1=p1, p2=p2, cs=cs,
195 deltabase=deltabase, delta=delta)
195 deltabase=deltabase, delta=delta)
196
196
197 class headerlessfixup(object):
197 class headerlessfixup(object):
198 def __init__(self, fh, h):
198 def __init__(self, fh, h):
199 self._h = h
199 self._h = h
200 self._fh = fh
200 self._fh = fh
201 def read(self, n):
201 def read(self, n):
202 if self._h:
202 if self._h:
203 d, self._h = self._h[:n], self._h[n:]
203 d, self._h = self._h[:n], self._h[n:]
204 if len(d) < n:
204 if len(d) < n:
205 d += readexactly(self._fh, n - len(d))
205 d += readexactly(self._fh, n - len(d))
206 return d
206 return d
207 return readexactly(self._fh, n)
207 return readexactly(self._fh, n)
208
208
209 def readbundle(fh, fname):
209 def readbundle(fh, fname):
210 header = readexactly(fh, 6)
210 header = readexactly(fh, 6)
211
211
212 if not fname:
212 if not fname:
213 fname = "stream"
213 fname = "stream"
214 if not header.startswith('HG') and header.startswith('\0'):
214 if not header.startswith('HG') and header.startswith('\0'):
215 fh = headerlessfixup(fh, header)
215 fh = headerlessfixup(fh, header)
216 header = "HG10UN"
216 header = "HG10UN"
217
217
218 magic, version, alg = header[0:2], header[2:4], header[4:6]
218 magic, version, alg = header[0:2], header[2:4], header[4:6]
219
219
220 if magic != 'HG':
220 if magic != 'HG':
221 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
221 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
222 if version != '10':
222 if version != '10':
223 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
223 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
224 return unbundle10(fh, alg)
224 return unbundle10(fh, alg)
225
225
226 class bundle10(object):
226 class bundle10(object):
227 deltaheader = _BUNDLE10_DELTA_HEADER
227 deltaheader = _BUNDLE10_DELTA_HEADER
228 def __init__(self, repo, bundlecaps=None):
228 def __init__(self, repo, bundlecaps=None):
229 """Given a source repo, construct a bundler.
229 """Given a source repo, construct a bundler.
230
230
231 bundlecaps is optional and can be used to specify the set of
231 bundlecaps is optional and can be used to specify the set of
232 capabilities which can be used to build the bundle.
232 capabilities which can be used to build the bundle.
233 """
233 """
234 # Set of capabilities we can use to build the bundle.
234 # Set of capabilities we can use to build the bundle.
235 if bundlecaps is None:
235 if bundlecaps is None:
236 bundlecaps = set()
236 bundlecaps = set()
237 self._bundlecaps = bundlecaps
237 self._bundlecaps = bundlecaps
238 self._changelog = repo.changelog
238 self._changelog = repo.changelog
239 self._manifest = repo.manifest
239 self._manifest = repo.manifest
240 reorder = repo.ui.config('bundle', 'reorder', 'auto')
240 reorder = repo.ui.config('bundle', 'reorder', 'auto')
241 if reorder == 'auto':
241 if reorder == 'auto':
242 reorder = None
242 reorder = None
243 else:
243 else:
244 reorder = util.parsebool(reorder)
244 reorder = util.parsebool(reorder)
245 self._repo = repo
245 self._repo = repo
246 self._reorder = reorder
246 self._reorder = reorder
247 self.count = [0, 0]
247 self.count = [0, 0]
248 def start(self, lookup):
248 def start(self, lookup):
249 self._lookup = lookup
249 self._lookup = lookup
250 def close(self):
250 def close(self):
251 return closechunk()
251 return closechunk()
252
252
253 def fileheader(self, fname):
253 def fileheader(self, fname):
254 return chunkheader(len(fname)) + fname
254 return chunkheader(len(fname)) + fname
255
255
256 def group(self, nodelist, revlog, reorder=None):
256 def group(self, nodelist, revlog, reorder=None):
257 """Calculate a delta group, yielding a sequence of changegroup chunks
257 """Calculate a delta group, yielding a sequence of changegroup chunks
258 (strings).
258 (strings).
259
259
260 Given a list of changeset revs, return a set of deltas and
260 Given a list of changeset revs, return a set of deltas and
261 metadata corresponding to nodes. The first delta is
261 metadata corresponding to nodes. The first delta is
262 first parent(nodelist[0]) -> nodelist[0], the receiver is
262 first parent(nodelist[0]) -> nodelist[0], the receiver is
263 guaranteed to have this parent as it has all history before
263 guaranteed to have this parent as it has all history before
264 these changesets. In the case firstparent is nullrev the
264 these changesets. In the case firstparent is nullrev the
265 changegroup starts with a full revision.
265 changegroup starts with a full revision.
266 """
266 """
267
267
268 # if we don't have any revisions touched by these changesets, bail
268 # if we don't have any revisions touched by these changesets, bail
269 if len(nodelist) == 0:
269 if len(nodelist) == 0:
270 yield self.close()
270 yield self.close()
271 return
271 return
272
272
273 # for generaldelta revlogs, we linearize the revs; this will both be
273 # for generaldelta revlogs, we linearize the revs; this will both be
274 # much quicker and generate a much smaller bundle
274 # much quicker and generate a much smaller bundle
275 if (revlog._generaldelta and reorder is not False) or reorder:
275 if (revlog._generaldelta and reorder is not False) or reorder:
276 dag = dagutil.revlogdag(revlog)
276 dag = dagutil.revlogdag(revlog)
277 revs = set(revlog.rev(n) for n in nodelist)
277 revs = set(revlog.rev(n) for n in nodelist)
278 revs = dag.linearize(revs)
278 revs = dag.linearize(revs)
279 else:
279 else:
280 revs = sorted([revlog.rev(n) for n in nodelist])
280 revs = sorted([revlog.rev(n) for n in nodelist])
281
281
282 # add the parent of the first rev
282 # add the parent of the first rev
283 p = revlog.parentrevs(revs[0])[0]
283 p = revlog.parentrevs(revs[0])[0]
284 revs.insert(0, p)
284 revs.insert(0, p)
285
285
286 # build deltas
286 # build deltas
287 for r in xrange(len(revs) - 1):
287 for r in xrange(len(revs) - 1):
288 prev, curr = revs[r], revs[r + 1]
288 prev, curr = revs[r], revs[r + 1]
289 for c in self.revchunk(revlog, curr, prev):
289 for c in self.revchunk(revlog, curr, prev):
290 yield c
290 yield c
291
291
292 yield self.close()
292 yield self.close()
293
293
294 def generate(self, clnodes, getmfnodes, getfiles, getfilenodes, source):
294 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
295 '''yield a sequence of changegroup chunks (strings)'''
295 '''yield a sequence of changegroup chunks (strings)'''
296 repo = self._repo
296 repo = self._repo
297 cl = self._changelog
297 cl = self._changelog
298 mf = self._manifest
298 mf = self._manifest
299 reorder = self._reorder
299 reorder = self._reorder
300 progress = repo.ui.progress
300 progress = repo.ui.progress
301 count = self.count
301 count = self.count
302 _bundling = _('bundling')
302 _bundling = _('bundling')
303 _changesets = _('changesets')
304 _manifests = _('manifests')
305 _files = _('files')
306
307 mfs = {} # needed manifests
308 fnodes = {} # needed file nodes
309 changedfiles = set()
310 fstate = ['', {}]
311
312 # filter any nodes that claim to be part of the known set
313 def prune(revlog, missing):
314 rr, rl = revlog.rev, revlog.linkrev
315 return [n for n in missing
316 if rl(rr(n)) not in commonrevs]
317
318 def lookup(revlog, x):
319 if revlog == cl:
320 c = cl.read(x)
321 changedfiles.update(c[3])
322 mfs.setdefault(c[0], x)
323 count[0] += 1
324 progress(_bundling, count[0],
325 unit=_changesets, total=count[1])
326 return x
327 elif revlog == mf:
328 clnode = mfs[x]
329 if not fastpathlinkrev:
330 mdata = mf.readfast(x)
331 for f, n in mdata.iteritems():
332 if f in changedfiles:
333 fnodes[f].setdefault(n, clnode)
334 count[0] += 1
335 progress(_bundling, count[0],
336 unit=_manifests, total=count[1])
337 return clnode
338 else:
339 progress(_bundling, count[0], item=fstate[0],
340 unit=_files, total=count[1])
341 return fstate[1][x]
342
343 self.start(lookup)
344
345 def getmfnodes():
346 for f in changedfiles:
347 fnodes[f] = {}
348 count[:] = [0, len(mfs)]
349 return prune(mf, mfs)
350 def getfiles():
351 mfs.clear()
352 return changedfiles
353 def getfilenodes(fname, filerevlog):
354 if fastpathlinkrev:
355 ln, llr = filerevlog.node, filerevlog.linkrev
356 def genfilenodes():
357 for r in filerevlog:
358 linkrev = llr(r)
359 if linkrev not in commonrevs:
360 yield filerevlog.node(r), cl.node(linkrev)
361 fnodes[fname] = dict(genfilenodes())
362 fstate[0] = fname
363 fstate[1] = fnodes.pop(fname, {})
364 return prune(filerevlog, fstate[1])
365
303
366
304 count[:] = [0, len(clnodes)]
367 count[:] = [0, len(clnodes)]
305 for chunk in self.group(clnodes, cl, reorder=reorder):
368 for chunk in self.group(clnodes, cl, reorder=reorder):
306 yield chunk
369 yield chunk
307 progress(_bundling, None)
370 progress(_bundling, None)
308
371
309 for chunk in self.group(getmfnodes(), mf, reorder=reorder):
372 for chunk in self.group(getmfnodes(), mf, reorder=reorder):
310 yield chunk
373 yield chunk
311 progress(_bundling, None)
374 progress(_bundling, None)
312
375
313 changedfiles = getfiles()
376 changedfiles = getfiles()
314 count[:] = [0, len(changedfiles)]
377 count[:] = [0, len(changedfiles)]
315 for fname in sorted(changedfiles):
378 for fname in sorted(changedfiles):
316 filerevlog = repo.file(fname)
379 filerevlog = repo.file(fname)
317 if not len(filerevlog):
380 if not len(filerevlog):
318 raise util.Abort(_("empty or missing revlog for %s")
381 raise util.Abort(_("empty or missing revlog for %s")
319 % fname)
382 % fname)
320 nodelist = getfilenodes(fname, filerevlog)
383 nodelist = getfilenodes(fname, filerevlog)
321 if nodelist:
384 if nodelist:
322 count[0] += 1
385 count[0] += 1
323 yield self.fileheader(fname)
386 yield self.fileheader(fname)
324 for chunk in self.group(nodelist, filerevlog, reorder):
387 for chunk in self.group(nodelist, filerevlog, reorder):
325 yield chunk
388 yield chunk
326 yield self.close()
389 yield self.close()
327 progress(_bundling, None)
390 progress(_bundling, None)
328
391
329 if clnodes:
392 if clnodes:
330 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
393 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
331
394
332 def revchunk(self, revlog, rev, prev):
395 def revchunk(self, revlog, rev, prev):
333 node = revlog.node(rev)
396 node = revlog.node(rev)
334 p1, p2 = revlog.parentrevs(rev)
397 p1, p2 = revlog.parentrevs(rev)
335 base = prev
398 base = prev
336
399
337 prefix = ''
400 prefix = ''
338 if base == nullrev:
401 if base == nullrev:
339 delta = revlog.revision(node)
402 delta = revlog.revision(node)
340 prefix = mdiff.trivialdiffheader(len(delta))
403 prefix = mdiff.trivialdiffheader(len(delta))
341 else:
404 else:
342 delta = revlog.revdiff(base, rev)
405 delta = revlog.revdiff(base, rev)
343 linknode = self._lookup(revlog, node)
406 linknode = self._lookup(revlog, node)
344 p1n, p2n = revlog.parents(node)
407 p1n, p2n = revlog.parents(node)
345 basenode = revlog.node(base)
408 basenode = revlog.node(base)
346 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
409 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
347 meta += prefix
410 meta += prefix
348 l = len(meta) + len(delta)
411 l = len(meta) + len(delta)
349 yield chunkheader(l)
412 yield chunkheader(l)
350 yield meta
413 yield meta
351 yield delta
414 yield delta
352 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
415 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
353 # do nothing with basenode, it is implicitly the previous one in HG10
416 # do nothing with basenode, it is implicitly the previous one in HG10
354 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
417 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
@@ -1,2503 +1,2433 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding
11 import lock, transaction, store, encoding
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 import branchmap
18 import branchmap
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class repofilecache(filecache):
22 class repofilecache(filecache):
23 """All filecache usage on repo are done for logic that should be unfiltered
23 """All filecache usage on repo are done for logic that should be unfiltered
24 """
24 """
25
25
26 def __get__(self, repo, type=None):
26 def __get__(self, repo, type=None):
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 def __set__(self, repo, value):
28 def __set__(self, repo, value):
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 def __delete__(self, repo):
30 def __delete__(self, repo):
31 return super(repofilecache, self).__delete__(repo.unfiltered())
31 return super(repofilecache, self).__delete__(repo.unfiltered())
32
32
33 class storecache(repofilecache):
33 class storecache(repofilecache):
34 """filecache for files in the store"""
34 """filecache for files in the store"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj.sjoin(fname)
36 return obj.sjoin(fname)
37
37
38 class unfilteredpropertycache(propertycache):
38 class unfilteredpropertycache(propertycache):
39 """propertycache that apply to unfiltered repo only"""
39 """propertycache that apply to unfiltered repo only"""
40
40
41 def __get__(self, repo, type=None):
41 def __get__(self, repo, type=None):
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43
43
44 class filteredpropertycache(propertycache):
44 class filteredpropertycache(propertycache):
45 """propertycache that must take filtering in account"""
45 """propertycache that must take filtering in account"""
46
46
47 def cachevalue(self, obj, value):
47 def cachevalue(self, obj, value):
48 object.__setattr__(obj, self.name, value)
48 object.__setattr__(obj, self.name, value)
49
49
50
50
51 def hasunfilteredcache(repo, name):
51 def hasunfilteredcache(repo, name):
52 """check if a repo has an unfilteredpropertycache value for <name>"""
52 """check if a repo has an unfilteredpropertycache value for <name>"""
53 return name in vars(repo.unfiltered())
53 return name in vars(repo.unfiltered())
54
54
55 def unfilteredmethod(orig):
55 def unfilteredmethod(orig):
56 """decorate method that always need to be run on unfiltered version"""
56 """decorate method that always need to be run on unfiltered version"""
57 def wrapper(repo, *args, **kwargs):
57 def wrapper(repo, *args, **kwargs):
58 return orig(repo.unfiltered(), *args, **kwargs)
58 return orig(repo.unfiltered(), *args, **kwargs)
59 return wrapper
59 return wrapper
60
60
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63
63
64 class localpeer(peer.peerrepository):
64 class localpeer(peer.peerrepository):
65 '''peer for a local repo; reflects only the most recent API'''
65 '''peer for a local repo; reflects only the most recent API'''
66
66
67 def __init__(self, repo, caps=MODERNCAPS):
67 def __init__(self, repo, caps=MODERNCAPS):
68 peer.peerrepository.__init__(self)
68 peer.peerrepository.__init__(self)
69 self._repo = repo.filtered('served')
69 self._repo = repo.filtered('served')
70 self.ui = repo.ui
70 self.ui = repo.ui
71 self._caps = repo._restrictcapabilities(caps)
71 self._caps = repo._restrictcapabilities(caps)
72 self.requirements = repo.requirements
72 self.requirements = repo.requirements
73 self.supportedformats = repo.supportedformats
73 self.supportedformats = repo.supportedformats
74
74
75 def close(self):
75 def close(self):
76 self._repo.close()
76 self._repo.close()
77
77
78 def _capabilities(self):
78 def _capabilities(self):
79 return self._caps
79 return self._caps
80
80
81 def local(self):
81 def local(self):
82 return self._repo
82 return self._repo
83
83
84 def canpush(self):
84 def canpush(self):
85 return True
85 return True
86
86
87 def url(self):
87 def url(self):
88 return self._repo.url()
88 return self._repo.url()
89
89
90 def lookup(self, key):
90 def lookup(self, key):
91 return self._repo.lookup(key)
91 return self._repo.lookup(key)
92
92
93 def branchmap(self):
93 def branchmap(self):
94 return self._repo.branchmap()
94 return self._repo.branchmap()
95
95
96 def heads(self):
96 def heads(self):
97 return self._repo.heads()
97 return self._repo.heads()
98
98
99 def known(self, nodes):
99 def known(self, nodes):
100 return self._repo.known(nodes)
100 return self._repo.known(nodes)
101
101
102 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
102 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
103 return self._repo.getbundle(source, heads=heads, common=common,
103 return self._repo.getbundle(source, heads=heads, common=common,
104 bundlecaps=None)
104 bundlecaps=None)
105
105
106 # TODO We might want to move the next two calls into legacypeer and add
106 # TODO We might want to move the next two calls into legacypeer and add
107 # unbundle instead.
107 # unbundle instead.
108
108
109 def lock(self):
109 def lock(self):
110 return self._repo.lock()
110 return self._repo.lock()
111
111
112 def addchangegroup(self, cg, source, url):
112 def addchangegroup(self, cg, source, url):
113 return self._repo.addchangegroup(cg, source, url)
113 return self._repo.addchangegroup(cg, source, url)
114
114
115 def pushkey(self, namespace, key, old, new):
115 def pushkey(self, namespace, key, old, new):
116 return self._repo.pushkey(namespace, key, old, new)
116 return self._repo.pushkey(namespace, key, old, new)
117
117
118 def listkeys(self, namespace):
118 def listkeys(self, namespace):
119 return self._repo.listkeys(namespace)
119 return self._repo.listkeys(namespace)
120
120
121 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 def debugwireargs(self, one, two, three=None, four=None, five=None):
122 '''used to test argument passing over the wire'''
122 '''used to test argument passing over the wire'''
123 return "%s %s %s %s %s" % (one, two, three, four, five)
123 return "%s %s %s %s %s" % (one, two, three, four, five)
124
124
125 class locallegacypeer(localpeer):
125 class locallegacypeer(localpeer):
126 '''peer extension which implements legacy methods too; used for tests with
126 '''peer extension which implements legacy methods too; used for tests with
127 restricted capabilities'''
127 restricted capabilities'''
128
128
129 def __init__(self, repo):
129 def __init__(self, repo):
130 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 localpeer.__init__(self, repo, caps=LEGACYCAPS)
131
131
132 def branches(self, nodes):
132 def branches(self, nodes):
133 return self._repo.branches(nodes)
133 return self._repo.branches(nodes)
134
134
135 def between(self, pairs):
135 def between(self, pairs):
136 return self._repo.between(pairs)
136 return self._repo.between(pairs)
137
137
138 def changegroup(self, basenodes, source):
138 def changegroup(self, basenodes, source):
139 return self._repo.changegroup(basenodes, source)
139 return self._repo.changegroup(basenodes, source)
140
140
141 def changegroupsubset(self, bases, heads, source):
141 def changegroupsubset(self, bases, heads, source):
142 return self._repo.changegroupsubset(bases, heads, source)
142 return self._repo.changegroupsubset(bases, heads, source)
143
143
144 class localrepository(object):
144 class localrepository(object):
145
145
146 supportedformats = set(('revlogv1', 'generaldelta'))
146 supportedformats = set(('revlogv1', 'generaldelta'))
147 supported = supportedformats | set(('store', 'fncache', 'shared',
147 supported = supportedformats | set(('store', 'fncache', 'shared',
148 'dotencode'))
148 'dotencode'))
149 openerreqs = set(('revlogv1', 'generaldelta'))
149 openerreqs = set(('revlogv1', 'generaldelta'))
150 requirements = ['revlogv1']
150 requirements = ['revlogv1']
151 filtername = None
151 filtername = None
152
152
153 def _baserequirements(self, create):
153 def _baserequirements(self, create):
154 return self.requirements[:]
154 return self.requirements[:]
155
155
156 def __init__(self, baseui, path=None, create=False):
156 def __init__(self, baseui, path=None, create=False):
157 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
157 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
158 self.wopener = self.wvfs
158 self.wopener = self.wvfs
159 self.root = self.wvfs.base
159 self.root = self.wvfs.base
160 self.path = self.wvfs.join(".hg")
160 self.path = self.wvfs.join(".hg")
161 self.origroot = path
161 self.origroot = path
162 self.auditor = scmutil.pathauditor(self.root, self._checknested)
162 self.auditor = scmutil.pathauditor(self.root, self._checknested)
163 self.vfs = scmutil.vfs(self.path)
163 self.vfs = scmutil.vfs(self.path)
164 self.opener = self.vfs
164 self.opener = self.vfs
165 self.baseui = baseui
165 self.baseui = baseui
166 self.ui = baseui.copy()
166 self.ui = baseui.copy()
167 # A list of callback to shape the phase if no data were found.
167 # A list of callback to shape the phase if no data were found.
168 # Callback are in the form: func(repo, roots) --> processed root.
168 # Callback are in the form: func(repo, roots) --> processed root.
169 # This list it to be filled by extension during repo setup
169 # This list it to be filled by extension during repo setup
170 self._phasedefaults = []
170 self._phasedefaults = []
171 try:
171 try:
172 self.ui.readconfig(self.join("hgrc"), self.root)
172 self.ui.readconfig(self.join("hgrc"), self.root)
173 extensions.loadall(self.ui)
173 extensions.loadall(self.ui)
174 except IOError:
174 except IOError:
175 pass
175 pass
176
176
177 if not self.vfs.isdir():
177 if not self.vfs.isdir():
178 if create:
178 if create:
179 if not self.wvfs.exists():
179 if not self.wvfs.exists():
180 self.wvfs.makedirs()
180 self.wvfs.makedirs()
181 self.vfs.makedir(notindexed=True)
181 self.vfs.makedir(notindexed=True)
182 requirements = self._baserequirements(create)
182 requirements = self._baserequirements(create)
183 if self.ui.configbool('format', 'usestore', True):
183 if self.ui.configbool('format', 'usestore', True):
184 self.vfs.mkdir("store")
184 self.vfs.mkdir("store")
185 requirements.append("store")
185 requirements.append("store")
186 if self.ui.configbool('format', 'usefncache', True):
186 if self.ui.configbool('format', 'usefncache', True):
187 requirements.append("fncache")
187 requirements.append("fncache")
188 if self.ui.configbool('format', 'dotencode', True):
188 if self.ui.configbool('format', 'dotencode', True):
189 requirements.append('dotencode')
189 requirements.append('dotencode')
190 # create an invalid changelog
190 # create an invalid changelog
191 self.vfs.append(
191 self.vfs.append(
192 "00changelog.i",
192 "00changelog.i",
193 '\0\0\0\2' # represents revlogv2
193 '\0\0\0\2' # represents revlogv2
194 ' dummy changelog to prevent using the old repo layout'
194 ' dummy changelog to prevent using the old repo layout'
195 )
195 )
196 if self.ui.configbool('format', 'generaldelta', False):
196 if self.ui.configbool('format', 'generaldelta', False):
197 requirements.append("generaldelta")
197 requirements.append("generaldelta")
198 requirements = set(requirements)
198 requirements = set(requirements)
199 else:
199 else:
200 raise error.RepoError(_("repository %s not found") % path)
200 raise error.RepoError(_("repository %s not found") % path)
201 elif create:
201 elif create:
202 raise error.RepoError(_("repository %s already exists") % path)
202 raise error.RepoError(_("repository %s already exists") % path)
203 else:
203 else:
204 try:
204 try:
205 requirements = scmutil.readrequires(self.vfs, self.supported)
205 requirements = scmutil.readrequires(self.vfs, self.supported)
206 except IOError, inst:
206 except IOError, inst:
207 if inst.errno != errno.ENOENT:
207 if inst.errno != errno.ENOENT:
208 raise
208 raise
209 requirements = set()
209 requirements = set()
210
210
211 self.sharedpath = self.path
211 self.sharedpath = self.path
212 try:
212 try:
213 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
213 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
214 realpath=True)
214 realpath=True)
215 s = vfs.base
215 s = vfs.base
216 if not vfs.exists():
216 if not vfs.exists():
217 raise error.RepoError(
217 raise error.RepoError(
218 _('.hg/sharedpath points to nonexistent directory %s') % s)
218 _('.hg/sharedpath points to nonexistent directory %s') % s)
219 self.sharedpath = s
219 self.sharedpath = s
220 except IOError, inst:
220 except IOError, inst:
221 if inst.errno != errno.ENOENT:
221 if inst.errno != errno.ENOENT:
222 raise
222 raise
223
223
224 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
224 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
225 self.spath = self.store.path
225 self.spath = self.store.path
226 self.svfs = self.store.vfs
226 self.svfs = self.store.vfs
227 self.sopener = self.svfs
227 self.sopener = self.svfs
228 self.sjoin = self.store.join
228 self.sjoin = self.store.join
229 self.vfs.createmode = self.store.createmode
229 self.vfs.createmode = self.store.createmode
230 self._applyrequirements(requirements)
230 self._applyrequirements(requirements)
231 if create:
231 if create:
232 self._writerequirements()
232 self._writerequirements()
233
233
234
234
235 self._branchcaches = {}
235 self._branchcaches = {}
236 self.filterpats = {}
236 self.filterpats = {}
237 self._datafilters = {}
237 self._datafilters = {}
238 self._transref = self._lockref = self._wlockref = None
238 self._transref = self._lockref = self._wlockref = None
239
239
240 # A cache for various files under .hg/ that tracks file changes,
240 # A cache for various files under .hg/ that tracks file changes,
241 # (used by the filecache decorator)
241 # (used by the filecache decorator)
242 #
242 #
243 # Maps a property name to its util.filecacheentry
243 # Maps a property name to its util.filecacheentry
244 self._filecache = {}
244 self._filecache = {}
245
245
246 # hold sets of revision to be filtered
246 # hold sets of revision to be filtered
247 # should be cleared when something might have changed the filter value:
247 # should be cleared when something might have changed the filter value:
248 # - new changesets,
248 # - new changesets,
249 # - phase change,
249 # - phase change,
250 # - new obsolescence marker,
250 # - new obsolescence marker,
251 # - working directory parent change,
251 # - working directory parent change,
252 # - bookmark changes
252 # - bookmark changes
253 self.filteredrevcache = {}
253 self.filteredrevcache = {}
254
254
255 def close(self):
255 def close(self):
256 pass
256 pass
257
257
258 def _restrictcapabilities(self, caps):
258 def _restrictcapabilities(self, caps):
259 return caps
259 return caps
260
260
261 def _applyrequirements(self, requirements):
261 def _applyrequirements(self, requirements):
262 self.requirements = requirements
262 self.requirements = requirements
263 self.sopener.options = dict((r, 1) for r in requirements
263 self.sopener.options = dict((r, 1) for r in requirements
264 if r in self.openerreqs)
264 if r in self.openerreqs)
265
265
266 def _writerequirements(self):
266 def _writerequirements(self):
267 reqfile = self.opener("requires", "w")
267 reqfile = self.opener("requires", "w")
268 for r in sorted(self.requirements):
268 for r in sorted(self.requirements):
269 reqfile.write("%s\n" % r)
269 reqfile.write("%s\n" % r)
270 reqfile.close()
270 reqfile.close()
271
271
272 def _checknested(self, path):
272 def _checknested(self, path):
273 """Determine if path is a legal nested repository."""
273 """Determine if path is a legal nested repository."""
274 if not path.startswith(self.root):
274 if not path.startswith(self.root):
275 return False
275 return False
276 subpath = path[len(self.root) + 1:]
276 subpath = path[len(self.root) + 1:]
277 normsubpath = util.pconvert(subpath)
277 normsubpath = util.pconvert(subpath)
278
278
279 # XXX: Checking against the current working copy is wrong in
279 # XXX: Checking against the current working copy is wrong in
280 # the sense that it can reject things like
280 # the sense that it can reject things like
281 #
281 #
282 # $ hg cat -r 10 sub/x.txt
282 # $ hg cat -r 10 sub/x.txt
283 #
283 #
284 # if sub/ is no longer a subrepository in the working copy
284 # if sub/ is no longer a subrepository in the working copy
285 # parent revision.
285 # parent revision.
286 #
286 #
287 # However, it can of course also allow things that would have
287 # However, it can of course also allow things that would have
288 # been rejected before, such as the above cat command if sub/
288 # been rejected before, such as the above cat command if sub/
289 # is a subrepository now, but was a normal directory before.
289 # is a subrepository now, but was a normal directory before.
290 # The old path auditor would have rejected by mistake since it
290 # The old path auditor would have rejected by mistake since it
291 # panics when it sees sub/.hg/.
291 # panics when it sees sub/.hg/.
292 #
292 #
293 # All in all, checking against the working copy seems sensible
293 # All in all, checking against the working copy seems sensible
294 # since we want to prevent access to nested repositories on
294 # since we want to prevent access to nested repositories on
295 # the filesystem *now*.
295 # the filesystem *now*.
296 ctx = self[None]
296 ctx = self[None]
297 parts = util.splitpath(subpath)
297 parts = util.splitpath(subpath)
298 while parts:
298 while parts:
299 prefix = '/'.join(parts)
299 prefix = '/'.join(parts)
300 if prefix in ctx.substate:
300 if prefix in ctx.substate:
301 if prefix == normsubpath:
301 if prefix == normsubpath:
302 return True
302 return True
303 else:
303 else:
304 sub = ctx.sub(prefix)
304 sub = ctx.sub(prefix)
305 return sub.checknested(subpath[len(prefix) + 1:])
305 return sub.checknested(subpath[len(prefix) + 1:])
306 else:
306 else:
307 parts.pop()
307 parts.pop()
308 return False
308 return False
309
309
310 def peer(self):
310 def peer(self):
311 return localpeer(self) # not cached to avoid reference cycle
311 return localpeer(self) # not cached to avoid reference cycle
312
312
313 def unfiltered(self):
313 def unfiltered(self):
314 """Return unfiltered version of the repository
314 """Return unfiltered version of the repository
315
315
316 Intended to be overwritten by filtered repo."""
316 Intended to be overwritten by filtered repo."""
317 return self
317 return self
318
318
319 def filtered(self, name):
319 def filtered(self, name):
320 """Return a filtered version of a repository"""
320 """Return a filtered version of a repository"""
321 # build a new class with the mixin and the current class
321 # build a new class with the mixin and the current class
322 # (possibly subclass of the repo)
322 # (possibly subclass of the repo)
323 class proxycls(repoview.repoview, self.unfiltered().__class__):
323 class proxycls(repoview.repoview, self.unfiltered().__class__):
324 pass
324 pass
325 return proxycls(self, name)
325 return proxycls(self, name)
326
326
327 @repofilecache('bookmarks')
327 @repofilecache('bookmarks')
328 def _bookmarks(self):
328 def _bookmarks(self):
329 return bookmarks.bmstore(self)
329 return bookmarks.bmstore(self)
330
330
331 @repofilecache('bookmarks.current')
331 @repofilecache('bookmarks.current')
332 def _bookmarkcurrent(self):
332 def _bookmarkcurrent(self):
333 return bookmarks.readcurrent(self)
333 return bookmarks.readcurrent(self)
334
334
335 def bookmarkheads(self, bookmark):
335 def bookmarkheads(self, bookmark):
336 name = bookmark.split('@', 1)[0]
336 name = bookmark.split('@', 1)[0]
337 heads = []
337 heads = []
338 for mark, n in self._bookmarks.iteritems():
338 for mark, n in self._bookmarks.iteritems():
339 if mark.split('@', 1)[0] == name:
339 if mark.split('@', 1)[0] == name:
340 heads.append(n)
340 heads.append(n)
341 return heads
341 return heads
342
342
343 @storecache('phaseroots')
343 @storecache('phaseroots')
344 def _phasecache(self):
344 def _phasecache(self):
345 return phases.phasecache(self, self._phasedefaults)
345 return phases.phasecache(self, self._phasedefaults)
346
346
347 @storecache('obsstore')
347 @storecache('obsstore')
348 def obsstore(self):
348 def obsstore(self):
349 store = obsolete.obsstore(self.sopener)
349 store = obsolete.obsstore(self.sopener)
350 if store and not obsolete._enabled:
350 if store and not obsolete._enabled:
351 # message is rare enough to not be translated
351 # message is rare enough to not be translated
352 msg = 'obsolete feature not enabled but %i markers found!\n'
352 msg = 'obsolete feature not enabled but %i markers found!\n'
353 self.ui.warn(msg % len(list(store)))
353 self.ui.warn(msg % len(list(store)))
354 return store
354 return store
355
355
356 @storecache('00changelog.i')
356 @storecache('00changelog.i')
357 def changelog(self):
357 def changelog(self):
358 c = changelog.changelog(self.sopener)
358 c = changelog.changelog(self.sopener)
359 if 'HG_PENDING' in os.environ:
359 if 'HG_PENDING' in os.environ:
360 p = os.environ['HG_PENDING']
360 p = os.environ['HG_PENDING']
361 if p.startswith(self.root):
361 if p.startswith(self.root):
362 c.readpending('00changelog.i.a')
362 c.readpending('00changelog.i.a')
363 return c
363 return c
364
364
365 @storecache('00manifest.i')
365 @storecache('00manifest.i')
366 def manifest(self):
366 def manifest(self):
367 return manifest.manifest(self.sopener)
367 return manifest.manifest(self.sopener)
368
368
369 @repofilecache('dirstate')
369 @repofilecache('dirstate')
370 def dirstate(self):
370 def dirstate(self):
371 warned = [0]
371 warned = [0]
372 def validate(node):
372 def validate(node):
373 try:
373 try:
374 self.changelog.rev(node)
374 self.changelog.rev(node)
375 return node
375 return node
376 except error.LookupError:
376 except error.LookupError:
377 if not warned[0]:
377 if not warned[0]:
378 warned[0] = True
378 warned[0] = True
379 self.ui.warn(_("warning: ignoring unknown"
379 self.ui.warn(_("warning: ignoring unknown"
380 " working parent %s!\n") % short(node))
380 " working parent %s!\n") % short(node))
381 return nullid
381 return nullid
382
382
383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
384
384
385 def __getitem__(self, changeid):
385 def __getitem__(self, changeid):
386 if changeid is None:
386 if changeid is None:
387 return context.workingctx(self)
387 return context.workingctx(self)
388 return context.changectx(self, changeid)
388 return context.changectx(self, changeid)
389
389
390 def __contains__(self, changeid):
390 def __contains__(self, changeid):
391 try:
391 try:
392 return bool(self.lookup(changeid))
392 return bool(self.lookup(changeid))
393 except error.RepoLookupError:
393 except error.RepoLookupError:
394 return False
394 return False
395
395
396 def __nonzero__(self):
396 def __nonzero__(self):
397 return True
397 return True
398
398
399 def __len__(self):
399 def __len__(self):
400 return len(self.changelog)
400 return len(self.changelog)
401
401
402 def __iter__(self):
402 def __iter__(self):
403 return iter(self.changelog)
403 return iter(self.changelog)
404
404
405 def revs(self, expr, *args):
405 def revs(self, expr, *args):
406 '''Return a list of revisions matching the given revset'''
406 '''Return a list of revisions matching the given revset'''
407 expr = revset.formatspec(expr, *args)
407 expr = revset.formatspec(expr, *args)
408 m = revset.match(None, expr)
408 m = revset.match(None, expr)
409 return [r for r in m(self, list(self))]
409 return [r for r in m(self, list(self))]
410
410
411 def set(self, expr, *args):
411 def set(self, expr, *args):
412 '''
412 '''
413 Yield a context for each matching revision, after doing arg
413 Yield a context for each matching revision, after doing arg
414 replacement via revset.formatspec
414 replacement via revset.formatspec
415 '''
415 '''
416 for r in self.revs(expr, *args):
416 for r in self.revs(expr, *args):
417 yield self[r]
417 yield self[r]
418
418
419 def url(self):
419 def url(self):
420 return 'file:' + self.root
420 return 'file:' + self.root
421
421
422 def hook(self, name, throw=False, **args):
422 def hook(self, name, throw=False, **args):
423 return hook.hook(self.ui, self, name, throw, **args)
423 return hook.hook(self.ui, self, name, throw, **args)
424
424
425 @unfilteredmethod
425 @unfilteredmethod
426 def _tag(self, names, node, message, local, user, date, extra={}):
426 def _tag(self, names, node, message, local, user, date, extra={}):
427 if isinstance(names, str):
427 if isinstance(names, str):
428 names = (names,)
428 names = (names,)
429
429
430 branches = self.branchmap()
430 branches = self.branchmap()
431 for name in names:
431 for name in names:
432 self.hook('pretag', throw=True, node=hex(node), tag=name,
432 self.hook('pretag', throw=True, node=hex(node), tag=name,
433 local=local)
433 local=local)
434 if name in branches:
434 if name in branches:
435 self.ui.warn(_("warning: tag %s conflicts with existing"
435 self.ui.warn(_("warning: tag %s conflicts with existing"
436 " branch name\n") % name)
436 " branch name\n") % name)
437
437
438 def writetags(fp, names, munge, prevtags):
438 def writetags(fp, names, munge, prevtags):
439 fp.seek(0, 2)
439 fp.seek(0, 2)
440 if prevtags and prevtags[-1] != '\n':
440 if prevtags and prevtags[-1] != '\n':
441 fp.write('\n')
441 fp.write('\n')
442 for name in names:
442 for name in names:
443 m = munge and munge(name) or name
443 m = munge and munge(name) or name
444 if (self._tagscache.tagtypes and
444 if (self._tagscache.tagtypes and
445 name in self._tagscache.tagtypes):
445 name in self._tagscache.tagtypes):
446 old = self.tags().get(name, nullid)
446 old = self.tags().get(name, nullid)
447 fp.write('%s %s\n' % (hex(old), m))
447 fp.write('%s %s\n' % (hex(old), m))
448 fp.write('%s %s\n' % (hex(node), m))
448 fp.write('%s %s\n' % (hex(node), m))
449 fp.close()
449 fp.close()
450
450
451 prevtags = ''
451 prevtags = ''
452 if local:
452 if local:
453 try:
453 try:
454 fp = self.opener('localtags', 'r+')
454 fp = self.opener('localtags', 'r+')
455 except IOError:
455 except IOError:
456 fp = self.opener('localtags', 'a')
456 fp = self.opener('localtags', 'a')
457 else:
457 else:
458 prevtags = fp.read()
458 prevtags = fp.read()
459
459
460 # local tags are stored in the current charset
460 # local tags are stored in the current charset
461 writetags(fp, names, None, prevtags)
461 writetags(fp, names, None, prevtags)
462 for name in names:
462 for name in names:
463 self.hook('tag', node=hex(node), tag=name, local=local)
463 self.hook('tag', node=hex(node), tag=name, local=local)
464 return
464 return
465
465
466 try:
466 try:
467 fp = self.wfile('.hgtags', 'rb+')
467 fp = self.wfile('.hgtags', 'rb+')
468 except IOError, e:
468 except IOError, e:
469 if e.errno != errno.ENOENT:
469 if e.errno != errno.ENOENT:
470 raise
470 raise
471 fp = self.wfile('.hgtags', 'ab')
471 fp = self.wfile('.hgtags', 'ab')
472 else:
472 else:
473 prevtags = fp.read()
473 prevtags = fp.read()
474
474
475 # committed tags are stored in UTF-8
475 # committed tags are stored in UTF-8
476 writetags(fp, names, encoding.fromlocal, prevtags)
476 writetags(fp, names, encoding.fromlocal, prevtags)
477
477
478 fp.close()
478 fp.close()
479
479
480 self.invalidatecaches()
480 self.invalidatecaches()
481
481
482 if '.hgtags' not in self.dirstate:
482 if '.hgtags' not in self.dirstate:
483 self[None].add(['.hgtags'])
483 self[None].add(['.hgtags'])
484
484
485 m = matchmod.exact(self.root, '', ['.hgtags'])
485 m = matchmod.exact(self.root, '', ['.hgtags'])
486 tagnode = self.commit(message, user, date, extra=extra, match=m)
486 tagnode = self.commit(message, user, date, extra=extra, match=m)
487
487
488 for name in names:
488 for name in names:
489 self.hook('tag', node=hex(node), tag=name, local=local)
489 self.hook('tag', node=hex(node), tag=name, local=local)
490
490
491 return tagnode
491 return tagnode
492
492
493 def tag(self, names, node, message, local, user, date):
493 def tag(self, names, node, message, local, user, date):
494 '''tag a revision with one or more symbolic names.
494 '''tag a revision with one or more symbolic names.
495
495
496 names is a list of strings or, when adding a single tag, names may be a
496 names is a list of strings or, when adding a single tag, names may be a
497 string.
497 string.
498
498
499 if local is True, the tags are stored in a per-repository file.
499 if local is True, the tags are stored in a per-repository file.
500 otherwise, they are stored in the .hgtags file, and a new
500 otherwise, they are stored in the .hgtags file, and a new
501 changeset is committed with the change.
501 changeset is committed with the change.
502
502
503 keyword arguments:
503 keyword arguments:
504
504
505 local: whether to store tags in non-version-controlled file
505 local: whether to store tags in non-version-controlled file
506 (default False)
506 (default False)
507
507
508 message: commit message to use if committing
508 message: commit message to use if committing
509
509
510 user: name of user to use if committing
510 user: name of user to use if committing
511
511
512 date: date tuple to use if committing'''
512 date: date tuple to use if committing'''
513
513
514 if not local:
514 if not local:
515 for x in self.status()[:5]:
515 for x in self.status()[:5]:
516 if '.hgtags' in x:
516 if '.hgtags' in x:
517 raise util.Abort(_('working copy of .hgtags is changed '
517 raise util.Abort(_('working copy of .hgtags is changed '
518 '(please commit .hgtags manually)'))
518 '(please commit .hgtags manually)'))
519
519
520 self.tags() # instantiate the cache
520 self.tags() # instantiate the cache
521 self._tag(names, node, message, local, user, date)
521 self._tag(names, node, message, local, user, date)
522
522
523 @filteredpropertycache
523 @filteredpropertycache
524 def _tagscache(self):
524 def _tagscache(self):
525 '''Returns a tagscache object that contains various tags related
525 '''Returns a tagscache object that contains various tags related
526 caches.'''
526 caches.'''
527
527
528 # This simplifies its cache management by having one decorated
528 # This simplifies its cache management by having one decorated
529 # function (this one) and the rest simply fetch things from it.
529 # function (this one) and the rest simply fetch things from it.
530 class tagscache(object):
530 class tagscache(object):
531 def __init__(self):
531 def __init__(self):
532 # These two define the set of tags for this repository. tags
532 # These two define the set of tags for this repository. tags
533 # maps tag name to node; tagtypes maps tag name to 'global' or
533 # maps tag name to node; tagtypes maps tag name to 'global' or
534 # 'local'. (Global tags are defined by .hgtags across all
534 # 'local'. (Global tags are defined by .hgtags across all
535 # heads, and local tags are defined in .hg/localtags.)
535 # heads, and local tags are defined in .hg/localtags.)
536 # They constitute the in-memory cache of tags.
536 # They constitute the in-memory cache of tags.
537 self.tags = self.tagtypes = None
537 self.tags = self.tagtypes = None
538
538
539 self.nodetagscache = self.tagslist = None
539 self.nodetagscache = self.tagslist = None
540
540
541 cache = tagscache()
541 cache = tagscache()
542 cache.tags, cache.tagtypes = self._findtags()
542 cache.tags, cache.tagtypes = self._findtags()
543
543
544 return cache
544 return cache
545
545
546 def tags(self):
546 def tags(self):
547 '''return a mapping of tag to node'''
547 '''return a mapping of tag to node'''
548 t = {}
548 t = {}
549 if self.changelog.filteredrevs:
549 if self.changelog.filteredrevs:
550 tags, tt = self._findtags()
550 tags, tt = self._findtags()
551 else:
551 else:
552 tags = self._tagscache.tags
552 tags = self._tagscache.tags
553 for k, v in tags.iteritems():
553 for k, v in tags.iteritems():
554 try:
554 try:
555 # ignore tags to unknown nodes
555 # ignore tags to unknown nodes
556 self.changelog.rev(v)
556 self.changelog.rev(v)
557 t[k] = v
557 t[k] = v
558 except (error.LookupError, ValueError):
558 except (error.LookupError, ValueError):
559 pass
559 pass
560 return t
560 return t
561
561
562 def _findtags(self):
562 def _findtags(self):
563 '''Do the hard work of finding tags. Return a pair of dicts
563 '''Do the hard work of finding tags. Return a pair of dicts
564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
565 maps tag name to a string like \'global\' or \'local\'.
565 maps tag name to a string like \'global\' or \'local\'.
566 Subclasses or extensions are free to add their own tags, but
566 Subclasses or extensions are free to add their own tags, but
567 should be aware that the returned dicts will be retained for the
567 should be aware that the returned dicts will be retained for the
568 duration of the localrepo object.'''
568 duration of the localrepo object.'''
569
569
570 # XXX what tagtype should subclasses/extensions use? Currently
570 # XXX what tagtype should subclasses/extensions use? Currently
571 # mq and bookmarks add tags, but do not set the tagtype at all.
571 # mq and bookmarks add tags, but do not set the tagtype at all.
572 # Should each extension invent its own tag type? Should there
572 # Should each extension invent its own tag type? Should there
573 # be one tagtype for all such "virtual" tags? Or is the status
573 # be one tagtype for all such "virtual" tags? Or is the status
574 # quo fine?
574 # quo fine?
575
575
576 alltags = {} # map tag name to (node, hist)
576 alltags = {} # map tag name to (node, hist)
577 tagtypes = {}
577 tagtypes = {}
578
578
579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
581
581
582 # Build the return dicts. Have to re-encode tag names because
582 # Build the return dicts. Have to re-encode tag names because
583 # the tags module always uses UTF-8 (in order not to lose info
583 # the tags module always uses UTF-8 (in order not to lose info
584 # writing to the cache), but the rest of Mercurial wants them in
584 # writing to the cache), but the rest of Mercurial wants them in
585 # local encoding.
585 # local encoding.
586 tags = {}
586 tags = {}
587 for (name, (node, hist)) in alltags.iteritems():
587 for (name, (node, hist)) in alltags.iteritems():
588 if node != nullid:
588 if node != nullid:
589 tags[encoding.tolocal(name)] = node
589 tags[encoding.tolocal(name)] = node
590 tags['tip'] = self.changelog.tip()
590 tags['tip'] = self.changelog.tip()
591 tagtypes = dict([(encoding.tolocal(name), value)
591 tagtypes = dict([(encoding.tolocal(name), value)
592 for (name, value) in tagtypes.iteritems()])
592 for (name, value) in tagtypes.iteritems()])
593 return (tags, tagtypes)
593 return (tags, tagtypes)
594
594
595 def tagtype(self, tagname):
595 def tagtype(self, tagname):
596 '''
596 '''
597 return the type of the given tag. result can be:
597 return the type of the given tag. result can be:
598
598
599 'local' : a local tag
599 'local' : a local tag
600 'global' : a global tag
600 'global' : a global tag
601 None : tag does not exist
601 None : tag does not exist
602 '''
602 '''
603
603
604 return self._tagscache.tagtypes.get(tagname)
604 return self._tagscache.tagtypes.get(tagname)
605
605
606 def tagslist(self):
606 def tagslist(self):
607 '''return a list of tags ordered by revision'''
607 '''return a list of tags ordered by revision'''
608 if not self._tagscache.tagslist:
608 if not self._tagscache.tagslist:
609 l = []
609 l = []
610 for t, n in self.tags().iteritems():
610 for t, n in self.tags().iteritems():
611 r = self.changelog.rev(n)
611 r = self.changelog.rev(n)
612 l.append((r, t, n))
612 l.append((r, t, n))
613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
614
614
615 return self._tagscache.tagslist
615 return self._tagscache.tagslist
616
616
617 def nodetags(self, node):
617 def nodetags(self, node):
618 '''return the tags associated with a node'''
618 '''return the tags associated with a node'''
619 if not self._tagscache.nodetagscache:
619 if not self._tagscache.nodetagscache:
620 nodetagscache = {}
620 nodetagscache = {}
621 for t, n in self._tagscache.tags.iteritems():
621 for t, n in self._tagscache.tags.iteritems():
622 nodetagscache.setdefault(n, []).append(t)
622 nodetagscache.setdefault(n, []).append(t)
623 for tags in nodetagscache.itervalues():
623 for tags in nodetagscache.itervalues():
624 tags.sort()
624 tags.sort()
625 self._tagscache.nodetagscache = nodetagscache
625 self._tagscache.nodetagscache = nodetagscache
626 return self._tagscache.nodetagscache.get(node, [])
626 return self._tagscache.nodetagscache.get(node, [])
627
627
628 def nodebookmarks(self, node):
628 def nodebookmarks(self, node):
629 marks = []
629 marks = []
630 for bookmark, n in self._bookmarks.iteritems():
630 for bookmark, n in self._bookmarks.iteritems():
631 if n == node:
631 if n == node:
632 marks.append(bookmark)
632 marks.append(bookmark)
633 return sorted(marks)
633 return sorted(marks)
634
634
635 def branchmap(self):
635 def branchmap(self):
636 '''returns a dictionary {branch: [branchheads]}'''
636 '''returns a dictionary {branch: [branchheads]}'''
637 branchmap.updatecache(self)
637 branchmap.updatecache(self)
638 return self._branchcaches[self.filtername]
638 return self._branchcaches[self.filtername]
639
639
640
640
641 def _branchtip(self, heads):
641 def _branchtip(self, heads):
642 '''return the tipmost branch head in heads'''
642 '''return the tipmost branch head in heads'''
643 tip = heads[-1]
643 tip = heads[-1]
644 for h in reversed(heads):
644 for h in reversed(heads):
645 if not self[h].closesbranch():
645 if not self[h].closesbranch():
646 tip = h
646 tip = h
647 break
647 break
648 return tip
648 return tip
649
649
650 def branchtip(self, branch):
650 def branchtip(self, branch):
651 '''return the tip node for a given branch'''
651 '''return the tip node for a given branch'''
652 if branch not in self.branchmap():
652 if branch not in self.branchmap():
653 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
653 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
654 return self._branchtip(self.branchmap()[branch])
654 return self._branchtip(self.branchmap()[branch])
655
655
656 def branchtags(self):
656 def branchtags(self):
657 '''return a dict where branch names map to the tipmost head of
657 '''return a dict where branch names map to the tipmost head of
658 the branch, open heads come before closed'''
658 the branch, open heads come before closed'''
659 bt = {}
659 bt = {}
660 for bn, heads in self.branchmap().iteritems():
660 for bn, heads in self.branchmap().iteritems():
661 bt[bn] = self._branchtip(heads)
661 bt[bn] = self._branchtip(heads)
662 return bt
662 return bt
663
663
664 def lookup(self, key):
664 def lookup(self, key):
665 return self[key].node()
665 return self[key].node()
666
666
667 def lookupbranch(self, key, remote=None):
667 def lookupbranch(self, key, remote=None):
668 repo = remote or self
668 repo = remote or self
669 if key in repo.branchmap():
669 if key in repo.branchmap():
670 return key
670 return key
671
671
672 repo = (remote and remote.local()) and remote or self
672 repo = (remote and remote.local()) and remote or self
673 return repo[key].branch()
673 return repo[key].branch()
674
674
675 def known(self, nodes):
675 def known(self, nodes):
676 nm = self.changelog.nodemap
676 nm = self.changelog.nodemap
677 pc = self._phasecache
677 pc = self._phasecache
678 result = []
678 result = []
679 for n in nodes:
679 for n in nodes:
680 r = nm.get(n)
680 r = nm.get(n)
681 resp = not (r is None or pc.phase(self, r) >= phases.secret)
681 resp = not (r is None or pc.phase(self, r) >= phases.secret)
682 result.append(resp)
682 result.append(resp)
683 return result
683 return result
684
684
685 def local(self):
685 def local(self):
686 return self
686 return self
687
687
688 def cancopy(self):
688 def cancopy(self):
689 return self.local() # so statichttprepo's override of local() works
689 return self.local() # so statichttprepo's override of local() works
690
690
691 def join(self, f):
691 def join(self, f):
692 return os.path.join(self.path, f)
692 return os.path.join(self.path, f)
693
693
694 def wjoin(self, f):
694 def wjoin(self, f):
695 return os.path.join(self.root, f)
695 return os.path.join(self.root, f)
696
696
697 def file(self, f):
697 def file(self, f):
698 if f[0] == '/':
698 if f[0] == '/':
699 f = f[1:]
699 f = f[1:]
700 return filelog.filelog(self.sopener, f)
700 return filelog.filelog(self.sopener, f)
701
701
702 def changectx(self, changeid):
702 def changectx(self, changeid):
703 return self[changeid]
703 return self[changeid]
704
704
705 def parents(self, changeid=None):
705 def parents(self, changeid=None):
706 '''get list of changectxs for parents of changeid'''
706 '''get list of changectxs for parents of changeid'''
707 return self[changeid].parents()
707 return self[changeid].parents()
708
708
709 def setparents(self, p1, p2=nullid):
709 def setparents(self, p1, p2=nullid):
710 copies = self.dirstate.setparents(p1, p2)
710 copies = self.dirstate.setparents(p1, p2)
711 pctx = self[p1]
711 pctx = self[p1]
712 if copies:
712 if copies:
713 # Adjust copy records, the dirstate cannot do it, it
713 # Adjust copy records, the dirstate cannot do it, it
714 # requires access to parents manifests. Preserve them
714 # requires access to parents manifests. Preserve them
715 # only for entries added to first parent.
715 # only for entries added to first parent.
716 for f in copies:
716 for f in copies:
717 if f not in pctx and copies[f] in pctx:
717 if f not in pctx and copies[f] in pctx:
718 self.dirstate.copy(copies[f], f)
718 self.dirstate.copy(copies[f], f)
719 if p2 == nullid:
719 if p2 == nullid:
720 for f, s in sorted(self.dirstate.copies().items()):
720 for f, s in sorted(self.dirstate.copies().items()):
721 if f not in pctx and s not in pctx:
721 if f not in pctx and s not in pctx:
722 self.dirstate.copy(None, f)
722 self.dirstate.copy(None, f)
723
723
724 def filectx(self, path, changeid=None, fileid=None):
724 def filectx(self, path, changeid=None, fileid=None):
725 """changeid can be a changeset revision, node, or tag.
725 """changeid can be a changeset revision, node, or tag.
726 fileid can be a file revision or node."""
726 fileid can be a file revision or node."""
727 return context.filectx(self, path, changeid, fileid)
727 return context.filectx(self, path, changeid, fileid)
728
728
729 def getcwd(self):
729 def getcwd(self):
730 return self.dirstate.getcwd()
730 return self.dirstate.getcwd()
731
731
732 def pathto(self, f, cwd=None):
732 def pathto(self, f, cwd=None):
733 return self.dirstate.pathto(f, cwd)
733 return self.dirstate.pathto(f, cwd)
734
734
735 def wfile(self, f, mode='r'):
735 def wfile(self, f, mode='r'):
736 return self.wopener(f, mode)
736 return self.wopener(f, mode)
737
737
738 def _link(self, f):
738 def _link(self, f):
739 return self.wvfs.islink(f)
739 return self.wvfs.islink(f)
740
740
741 def _loadfilter(self, filter):
741 def _loadfilter(self, filter):
742 if filter not in self.filterpats:
742 if filter not in self.filterpats:
743 l = []
743 l = []
744 for pat, cmd in self.ui.configitems(filter):
744 for pat, cmd in self.ui.configitems(filter):
745 if cmd == '!':
745 if cmd == '!':
746 continue
746 continue
747 mf = matchmod.match(self.root, '', [pat])
747 mf = matchmod.match(self.root, '', [pat])
748 fn = None
748 fn = None
749 params = cmd
749 params = cmd
750 for name, filterfn in self._datafilters.iteritems():
750 for name, filterfn in self._datafilters.iteritems():
751 if cmd.startswith(name):
751 if cmd.startswith(name):
752 fn = filterfn
752 fn = filterfn
753 params = cmd[len(name):].lstrip()
753 params = cmd[len(name):].lstrip()
754 break
754 break
755 if not fn:
755 if not fn:
756 fn = lambda s, c, **kwargs: util.filter(s, c)
756 fn = lambda s, c, **kwargs: util.filter(s, c)
757 # Wrap old filters not supporting keyword arguments
757 # Wrap old filters not supporting keyword arguments
758 if not inspect.getargspec(fn)[2]:
758 if not inspect.getargspec(fn)[2]:
759 oldfn = fn
759 oldfn = fn
760 fn = lambda s, c, **kwargs: oldfn(s, c)
760 fn = lambda s, c, **kwargs: oldfn(s, c)
761 l.append((mf, fn, params))
761 l.append((mf, fn, params))
762 self.filterpats[filter] = l
762 self.filterpats[filter] = l
763 return self.filterpats[filter]
763 return self.filterpats[filter]
764
764
765 def _filter(self, filterpats, filename, data):
765 def _filter(self, filterpats, filename, data):
766 for mf, fn, cmd in filterpats:
766 for mf, fn, cmd in filterpats:
767 if mf(filename):
767 if mf(filename):
768 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
768 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
769 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
769 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
770 break
770 break
771
771
772 return data
772 return data
773
773
774 @unfilteredpropertycache
774 @unfilteredpropertycache
775 def _encodefilterpats(self):
775 def _encodefilterpats(self):
776 return self._loadfilter('encode')
776 return self._loadfilter('encode')
777
777
778 @unfilteredpropertycache
778 @unfilteredpropertycache
779 def _decodefilterpats(self):
779 def _decodefilterpats(self):
780 return self._loadfilter('decode')
780 return self._loadfilter('decode')
781
781
782 def adddatafilter(self, name, filter):
782 def adddatafilter(self, name, filter):
783 self._datafilters[name] = filter
783 self._datafilters[name] = filter
784
784
785 def wread(self, filename):
785 def wread(self, filename):
786 if self._link(filename):
786 if self._link(filename):
787 data = self.wvfs.readlink(filename)
787 data = self.wvfs.readlink(filename)
788 else:
788 else:
789 data = self.wopener.read(filename)
789 data = self.wopener.read(filename)
790 return self._filter(self._encodefilterpats, filename, data)
790 return self._filter(self._encodefilterpats, filename, data)
791
791
792 def wwrite(self, filename, data, flags):
792 def wwrite(self, filename, data, flags):
793 data = self._filter(self._decodefilterpats, filename, data)
793 data = self._filter(self._decodefilterpats, filename, data)
794 if 'l' in flags:
794 if 'l' in flags:
795 self.wopener.symlink(data, filename)
795 self.wopener.symlink(data, filename)
796 else:
796 else:
797 self.wopener.write(filename, data)
797 self.wopener.write(filename, data)
798 if 'x' in flags:
798 if 'x' in flags:
799 self.wvfs.setflags(filename, False, True)
799 self.wvfs.setflags(filename, False, True)
800
800
801 def wwritedata(self, filename, data):
801 def wwritedata(self, filename, data):
802 return self._filter(self._decodefilterpats, filename, data)
802 return self._filter(self._decodefilterpats, filename, data)
803
803
804 def transaction(self, desc):
804 def transaction(self, desc):
805 tr = self._transref and self._transref() or None
805 tr = self._transref and self._transref() or None
806 if tr and tr.running():
806 if tr and tr.running():
807 return tr.nest()
807 return tr.nest()
808
808
809 # abort here if the journal already exists
809 # abort here if the journal already exists
810 if self.svfs.exists("journal"):
810 if self.svfs.exists("journal"):
811 raise error.RepoError(
811 raise error.RepoError(
812 _("abandoned transaction found - run hg recover"))
812 _("abandoned transaction found - run hg recover"))
813
813
814 self._writejournal(desc)
814 self._writejournal(desc)
815 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
815 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
816
816
817 tr = transaction.transaction(self.ui.warn, self.sopener,
817 tr = transaction.transaction(self.ui.warn, self.sopener,
818 self.sjoin("journal"),
818 self.sjoin("journal"),
819 aftertrans(renames),
819 aftertrans(renames),
820 self.store.createmode)
820 self.store.createmode)
821 self._transref = weakref.ref(tr)
821 self._transref = weakref.ref(tr)
822 return tr
822 return tr
823
823
824 def _journalfiles(self):
824 def _journalfiles(self):
825 return ((self.svfs, 'journal'),
825 return ((self.svfs, 'journal'),
826 (self.vfs, 'journal.dirstate'),
826 (self.vfs, 'journal.dirstate'),
827 (self.vfs, 'journal.branch'),
827 (self.vfs, 'journal.branch'),
828 (self.vfs, 'journal.desc'),
828 (self.vfs, 'journal.desc'),
829 (self.vfs, 'journal.bookmarks'),
829 (self.vfs, 'journal.bookmarks'),
830 (self.svfs, 'journal.phaseroots'))
830 (self.svfs, 'journal.phaseroots'))
831
831
832 def undofiles(self):
832 def undofiles(self):
833 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
833 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
834
834
835 def _writejournal(self, desc):
835 def _writejournal(self, desc):
836 self.opener.write("journal.dirstate",
836 self.opener.write("journal.dirstate",
837 self.opener.tryread("dirstate"))
837 self.opener.tryread("dirstate"))
838 self.opener.write("journal.branch",
838 self.opener.write("journal.branch",
839 encoding.fromlocal(self.dirstate.branch()))
839 encoding.fromlocal(self.dirstate.branch()))
840 self.opener.write("journal.desc",
840 self.opener.write("journal.desc",
841 "%d\n%s\n" % (len(self), desc))
841 "%d\n%s\n" % (len(self), desc))
842 self.opener.write("journal.bookmarks",
842 self.opener.write("journal.bookmarks",
843 self.opener.tryread("bookmarks"))
843 self.opener.tryread("bookmarks"))
844 self.sopener.write("journal.phaseroots",
844 self.sopener.write("journal.phaseroots",
845 self.sopener.tryread("phaseroots"))
845 self.sopener.tryread("phaseroots"))
846
846
847 def recover(self):
847 def recover(self):
848 lock = self.lock()
848 lock = self.lock()
849 try:
849 try:
850 if self.svfs.exists("journal"):
850 if self.svfs.exists("journal"):
851 self.ui.status(_("rolling back interrupted transaction\n"))
851 self.ui.status(_("rolling back interrupted transaction\n"))
852 transaction.rollback(self.sopener, self.sjoin("journal"),
852 transaction.rollback(self.sopener, self.sjoin("journal"),
853 self.ui.warn)
853 self.ui.warn)
854 self.invalidate()
854 self.invalidate()
855 return True
855 return True
856 else:
856 else:
857 self.ui.warn(_("no interrupted transaction available\n"))
857 self.ui.warn(_("no interrupted transaction available\n"))
858 return False
858 return False
859 finally:
859 finally:
860 lock.release()
860 lock.release()
861
861
862 def rollback(self, dryrun=False, force=False):
862 def rollback(self, dryrun=False, force=False):
863 wlock = lock = None
863 wlock = lock = None
864 try:
864 try:
865 wlock = self.wlock()
865 wlock = self.wlock()
866 lock = self.lock()
866 lock = self.lock()
867 if self.svfs.exists("undo"):
867 if self.svfs.exists("undo"):
868 return self._rollback(dryrun, force)
868 return self._rollback(dryrun, force)
869 else:
869 else:
870 self.ui.warn(_("no rollback information available\n"))
870 self.ui.warn(_("no rollback information available\n"))
871 return 1
871 return 1
872 finally:
872 finally:
873 release(lock, wlock)
873 release(lock, wlock)
874
874
875 @unfilteredmethod # Until we get smarter cache management
875 @unfilteredmethod # Until we get smarter cache management
876 def _rollback(self, dryrun, force):
876 def _rollback(self, dryrun, force):
877 ui = self.ui
877 ui = self.ui
878 try:
878 try:
879 args = self.opener.read('undo.desc').splitlines()
879 args = self.opener.read('undo.desc').splitlines()
880 (oldlen, desc, detail) = (int(args[0]), args[1], None)
880 (oldlen, desc, detail) = (int(args[0]), args[1], None)
881 if len(args) >= 3:
881 if len(args) >= 3:
882 detail = args[2]
882 detail = args[2]
883 oldtip = oldlen - 1
883 oldtip = oldlen - 1
884
884
885 if detail and ui.verbose:
885 if detail and ui.verbose:
886 msg = (_('repository tip rolled back to revision %s'
886 msg = (_('repository tip rolled back to revision %s'
887 ' (undo %s: %s)\n')
887 ' (undo %s: %s)\n')
888 % (oldtip, desc, detail))
888 % (oldtip, desc, detail))
889 else:
889 else:
890 msg = (_('repository tip rolled back to revision %s'
890 msg = (_('repository tip rolled back to revision %s'
891 ' (undo %s)\n')
891 ' (undo %s)\n')
892 % (oldtip, desc))
892 % (oldtip, desc))
893 except IOError:
893 except IOError:
894 msg = _('rolling back unknown transaction\n')
894 msg = _('rolling back unknown transaction\n')
895 desc = None
895 desc = None
896
896
897 if not force and self['.'] != self['tip'] and desc == 'commit':
897 if not force and self['.'] != self['tip'] and desc == 'commit':
898 raise util.Abort(
898 raise util.Abort(
899 _('rollback of last commit while not checked out '
899 _('rollback of last commit while not checked out '
900 'may lose data'), hint=_('use -f to force'))
900 'may lose data'), hint=_('use -f to force'))
901
901
902 ui.status(msg)
902 ui.status(msg)
903 if dryrun:
903 if dryrun:
904 return 0
904 return 0
905
905
906 parents = self.dirstate.parents()
906 parents = self.dirstate.parents()
907 self.destroying()
907 self.destroying()
908 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
908 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
909 if self.vfs.exists('undo.bookmarks'):
909 if self.vfs.exists('undo.bookmarks'):
910 self.vfs.rename('undo.bookmarks', 'bookmarks')
910 self.vfs.rename('undo.bookmarks', 'bookmarks')
911 if self.svfs.exists('undo.phaseroots'):
911 if self.svfs.exists('undo.phaseroots'):
912 self.svfs.rename('undo.phaseroots', 'phaseroots')
912 self.svfs.rename('undo.phaseroots', 'phaseroots')
913 self.invalidate()
913 self.invalidate()
914
914
915 parentgone = (parents[0] not in self.changelog.nodemap or
915 parentgone = (parents[0] not in self.changelog.nodemap or
916 parents[1] not in self.changelog.nodemap)
916 parents[1] not in self.changelog.nodemap)
917 if parentgone:
917 if parentgone:
918 self.vfs.rename('undo.dirstate', 'dirstate')
918 self.vfs.rename('undo.dirstate', 'dirstate')
919 try:
919 try:
920 branch = self.opener.read('undo.branch')
920 branch = self.opener.read('undo.branch')
921 self.dirstate.setbranch(encoding.tolocal(branch))
921 self.dirstate.setbranch(encoding.tolocal(branch))
922 except IOError:
922 except IOError:
923 ui.warn(_('named branch could not be reset: '
923 ui.warn(_('named branch could not be reset: '
924 'current branch is still \'%s\'\n')
924 'current branch is still \'%s\'\n')
925 % self.dirstate.branch())
925 % self.dirstate.branch())
926
926
927 self.dirstate.invalidate()
927 self.dirstate.invalidate()
928 parents = tuple([p.rev() for p in self.parents()])
928 parents = tuple([p.rev() for p in self.parents()])
929 if len(parents) > 1:
929 if len(parents) > 1:
930 ui.status(_('working directory now based on '
930 ui.status(_('working directory now based on '
931 'revisions %d and %d\n') % parents)
931 'revisions %d and %d\n') % parents)
932 else:
932 else:
933 ui.status(_('working directory now based on '
933 ui.status(_('working directory now based on '
934 'revision %d\n') % parents)
934 'revision %d\n') % parents)
935 # TODO: if we know which new heads may result from this rollback, pass
935 # TODO: if we know which new heads may result from this rollback, pass
936 # them to destroy(), which will prevent the branchhead cache from being
936 # them to destroy(), which will prevent the branchhead cache from being
937 # invalidated.
937 # invalidated.
938 self.destroyed()
938 self.destroyed()
939 return 0
939 return 0
940
940
941 def invalidatecaches(self):
941 def invalidatecaches(self):
942
942
943 if '_tagscache' in vars(self):
943 if '_tagscache' in vars(self):
944 # can't use delattr on proxy
944 # can't use delattr on proxy
945 del self.__dict__['_tagscache']
945 del self.__dict__['_tagscache']
946
946
947 self.unfiltered()._branchcaches.clear()
947 self.unfiltered()._branchcaches.clear()
948 self.invalidatevolatilesets()
948 self.invalidatevolatilesets()
949
949
950 def invalidatevolatilesets(self):
950 def invalidatevolatilesets(self):
951 self.filteredrevcache.clear()
951 self.filteredrevcache.clear()
952 obsolete.clearobscaches(self)
952 obsolete.clearobscaches(self)
953
953
954 def invalidatedirstate(self):
954 def invalidatedirstate(self):
955 '''Invalidates the dirstate, causing the next call to dirstate
955 '''Invalidates the dirstate, causing the next call to dirstate
956 to check if it was modified since the last time it was read,
956 to check if it was modified since the last time it was read,
957 rereading it if it has.
957 rereading it if it has.
958
958
959 This is different to dirstate.invalidate() that it doesn't always
959 This is different to dirstate.invalidate() that it doesn't always
960 rereads the dirstate. Use dirstate.invalidate() if you want to
960 rereads the dirstate. Use dirstate.invalidate() if you want to
961 explicitly read the dirstate again (i.e. restoring it to a previous
961 explicitly read the dirstate again (i.e. restoring it to a previous
962 known good state).'''
962 known good state).'''
963 if hasunfilteredcache(self, 'dirstate'):
963 if hasunfilteredcache(self, 'dirstate'):
964 for k in self.dirstate._filecache:
964 for k in self.dirstate._filecache:
965 try:
965 try:
966 delattr(self.dirstate, k)
966 delattr(self.dirstate, k)
967 except AttributeError:
967 except AttributeError:
968 pass
968 pass
969 delattr(self.unfiltered(), 'dirstate')
969 delattr(self.unfiltered(), 'dirstate')
970
970
971 def invalidate(self):
971 def invalidate(self):
972 unfiltered = self.unfiltered() # all file caches are stored unfiltered
972 unfiltered = self.unfiltered() # all file caches are stored unfiltered
973 for k in self._filecache:
973 for k in self._filecache:
974 # dirstate is invalidated separately in invalidatedirstate()
974 # dirstate is invalidated separately in invalidatedirstate()
975 if k == 'dirstate':
975 if k == 'dirstate':
976 continue
976 continue
977
977
978 try:
978 try:
979 delattr(unfiltered, k)
979 delattr(unfiltered, k)
980 except AttributeError:
980 except AttributeError:
981 pass
981 pass
982 self.invalidatecaches()
982 self.invalidatecaches()
983
983
984 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
984 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
985 try:
985 try:
986 l = lock.lock(lockname, 0, releasefn, desc=desc)
986 l = lock.lock(lockname, 0, releasefn, desc=desc)
987 except error.LockHeld, inst:
987 except error.LockHeld, inst:
988 if not wait:
988 if not wait:
989 raise
989 raise
990 self.ui.warn(_("waiting for lock on %s held by %r\n") %
990 self.ui.warn(_("waiting for lock on %s held by %r\n") %
991 (desc, inst.locker))
991 (desc, inst.locker))
992 # default to 600 seconds timeout
992 # default to 600 seconds timeout
993 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
993 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
994 releasefn, desc=desc)
994 releasefn, desc=desc)
995 if acquirefn:
995 if acquirefn:
996 acquirefn()
996 acquirefn()
997 return l
997 return l
998
998
999 def _afterlock(self, callback):
999 def _afterlock(self, callback):
1000 """add a callback to the current repository lock.
1000 """add a callback to the current repository lock.
1001
1001
1002 The callback will be executed on lock release."""
1002 The callback will be executed on lock release."""
1003 l = self._lockref and self._lockref()
1003 l = self._lockref and self._lockref()
1004 if l:
1004 if l:
1005 l.postrelease.append(callback)
1005 l.postrelease.append(callback)
1006 else:
1006 else:
1007 callback()
1007 callback()
1008
1008
1009 def lock(self, wait=True):
1009 def lock(self, wait=True):
1010 '''Lock the repository store (.hg/store) and return a weak reference
1010 '''Lock the repository store (.hg/store) and return a weak reference
1011 to the lock. Use this before modifying the store (e.g. committing or
1011 to the lock. Use this before modifying the store (e.g. committing or
1012 stripping). If you are opening a transaction, get a lock as well.)'''
1012 stripping). If you are opening a transaction, get a lock as well.)'''
1013 l = self._lockref and self._lockref()
1013 l = self._lockref and self._lockref()
1014 if l is not None and l.held:
1014 if l is not None and l.held:
1015 l.lock()
1015 l.lock()
1016 return l
1016 return l
1017
1017
1018 def unlock():
1018 def unlock():
1019 self.store.write()
1019 self.store.write()
1020 if hasunfilteredcache(self, '_phasecache'):
1020 if hasunfilteredcache(self, '_phasecache'):
1021 self._phasecache.write()
1021 self._phasecache.write()
1022 for k, ce in self._filecache.items():
1022 for k, ce in self._filecache.items():
1023 if k == 'dirstate' or k not in self.__dict__:
1023 if k == 'dirstate' or k not in self.__dict__:
1024 continue
1024 continue
1025 ce.refresh()
1025 ce.refresh()
1026
1026
1027 l = self._lock(self.sjoin("lock"), wait, unlock,
1027 l = self._lock(self.sjoin("lock"), wait, unlock,
1028 self.invalidate, _('repository %s') % self.origroot)
1028 self.invalidate, _('repository %s') % self.origroot)
1029 self._lockref = weakref.ref(l)
1029 self._lockref = weakref.ref(l)
1030 return l
1030 return l
1031
1031
1032 def wlock(self, wait=True):
1032 def wlock(self, wait=True):
1033 '''Lock the non-store parts of the repository (everything under
1033 '''Lock the non-store parts of the repository (everything under
1034 .hg except .hg/store) and return a weak reference to the lock.
1034 .hg except .hg/store) and return a weak reference to the lock.
1035 Use this before modifying files in .hg.'''
1035 Use this before modifying files in .hg.'''
1036 l = self._wlockref and self._wlockref()
1036 l = self._wlockref and self._wlockref()
1037 if l is not None and l.held:
1037 if l is not None and l.held:
1038 l.lock()
1038 l.lock()
1039 return l
1039 return l
1040
1040
1041 def unlock():
1041 def unlock():
1042 self.dirstate.write()
1042 self.dirstate.write()
1043 self._filecache['dirstate'].refresh()
1043 self._filecache['dirstate'].refresh()
1044
1044
1045 l = self._lock(self.join("wlock"), wait, unlock,
1045 l = self._lock(self.join("wlock"), wait, unlock,
1046 self.invalidatedirstate, _('working directory of %s') %
1046 self.invalidatedirstate, _('working directory of %s') %
1047 self.origroot)
1047 self.origroot)
1048 self._wlockref = weakref.ref(l)
1048 self._wlockref = weakref.ref(l)
1049 return l
1049 return l
1050
1050
1051 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1051 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1052 """
1052 """
1053 commit an individual file as part of a larger transaction
1053 commit an individual file as part of a larger transaction
1054 """
1054 """
1055
1055
1056 fname = fctx.path()
1056 fname = fctx.path()
1057 text = fctx.data()
1057 text = fctx.data()
1058 flog = self.file(fname)
1058 flog = self.file(fname)
1059 fparent1 = manifest1.get(fname, nullid)
1059 fparent1 = manifest1.get(fname, nullid)
1060 fparent2 = fparent2o = manifest2.get(fname, nullid)
1060 fparent2 = fparent2o = manifest2.get(fname, nullid)
1061
1061
1062 meta = {}
1062 meta = {}
1063 copy = fctx.renamed()
1063 copy = fctx.renamed()
1064 if copy and copy[0] != fname:
1064 if copy and copy[0] != fname:
1065 # Mark the new revision of this file as a copy of another
1065 # Mark the new revision of this file as a copy of another
1066 # file. This copy data will effectively act as a parent
1066 # file. This copy data will effectively act as a parent
1067 # of this new revision. If this is a merge, the first
1067 # of this new revision. If this is a merge, the first
1068 # parent will be the nullid (meaning "look up the copy data")
1068 # parent will be the nullid (meaning "look up the copy data")
1069 # and the second one will be the other parent. For example:
1069 # and the second one will be the other parent. For example:
1070 #
1070 #
1071 # 0 --- 1 --- 3 rev1 changes file foo
1071 # 0 --- 1 --- 3 rev1 changes file foo
1072 # \ / rev2 renames foo to bar and changes it
1072 # \ / rev2 renames foo to bar and changes it
1073 # \- 2 -/ rev3 should have bar with all changes and
1073 # \- 2 -/ rev3 should have bar with all changes and
1074 # should record that bar descends from
1074 # should record that bar descends from
1075 # bar in rev2 and foo in rev1
1075 # bar in rev2 and foo in rev1
1076 #
1076 #
1077 # this allows this merge to succeed:
1077 # this allows this merge to succeed:
1078 #
1078 #
1079 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1079 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1080 # \ / merging rev3 and rev4 should use bar@rev2
1080 # \ / merging rev3 and rev4 should use bar@rev2
1081 # \- 2 --- 4 as the merge base
1081 # \- 2 --- 4 as the merge base
1082 #
1082 #
1083
1083
1084 cfname = copy[0]
1084 cfname = copy[0]
1085 crev = manifest1.get(cfname)
1085 crev = manifest1.get(cfname)
1086 newfparent = fparent2
1086 newfparent = fparent2
1087
1087
1088 if manifest2: # branch merge
1088 if manifest2: # branch merge
1089 if fparent2 == nullid or crev is None: # copied on remote side
1089 if fparent2 == nullid or crev is None: # copied on remote side
1090 if cfname in manifest2:
1090 if cfname in manifest2:
1091 crev = manifest2[cfname]
1091 crev = manifest2[cfname]
1092 newfparent = fparent1
1092 newfparent = fparent1
1093
1093
1094 # find source in nearest ancestor if we've lost track
1094 # find source in nearest ancestor if we've lost track
1095 if not crev:
1095 if not crev:
1096 self.ui.debug(" %s: searching for copy revision for %s\n" %
1096 self.ui.debug(" %s: searching for copy revision for %s\n" %
1097 (fname, cfname))
1097 (fname, cfname))
1098 for ancestor in self[None].ancestors():
1098 for ancestor in self[None].ancestors():
1099 if cfname in ancestor:
1099 if cfname in ancestor:
1100 crev = ancestor[cfname].filenode()
1100 crev = ancestor[cfname].filenode()
1101 break
1101 break
1102
1102
1103 if crev:
1103 if crev:
1104 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1104 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1105 meta["copy"] = cfname
1105 meta["copy"] = cfname
1106 meta["copyrev"] = hex(crev)
1106 meta["copyrev"] = hex(crev)
1107 fparent1, fparent2 = nullid, newfparent
1107 fparent1, fparent2 = nullid, newfparent
1108 else:
1108 else:
1109 self.ui.warn(_("warning: can't find ancestor for '%s' "
1109 self.ui.warn(_("warning: can't find ancestor for '%s' "
1110 "copied from '%s'!\n") % (fname, cfname))
1110 "copied from '%s'!\n") % (fname, cfname))
1111
1111
1112 elif fparent2 != nullid:
1112 elif fparent2 != nullid:
1113 # is one parent an ancestor of the other?
1113 # is one parent an ancestor of the other?
1114 fparentancestor = flog.ancestor(fparent1, fparent2)
1114 fparentancestor = flog.ancestor(fparent1, fparent2)
1115 if fparentancestor == fparent1:
1115 if fparentancestor == fparent1:
1116 fparent1, fparent2 = fparent2, nullid
1116 fparent1, fparent2 = fparent2, nullid
1117 elif fparentancestor == fparent2:
1117 elif fparentancestor == fparent2:
1118 fparent2 = nullid
1118 fparent2 = nullid
1119
1119
1120 # is the file changed?
1120 # is the file changed?
1121 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1121 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1122 changelist.append(fname)
1122 changelist.append(fname)
1123 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1123 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1124
1124
1125 # are just the flags changed during merge?
1125 # are just the flags changed during merge?
1126 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1126 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1127 changelist.append(fname)
1127 changelist.append(fname)
1128
1128
1129 return fparent1
1129 return fparent1
1130
1130
1131 @unfilteredmethod
1131 @unfilteredmethod
1132 def commit(self, text="", user=None, date=None, match=None, force=False,
1132 def commit(self, text="", user=None, date=None, match=None, force=False,
1133 editor=False, extra={}):
1133 editor=False, extra={}):
1134 """Add a new revision to current repository.
1134 """Add a new revision to current repository.
1135
1135
1136 Revision information is gathered from the working directory,
1136 Revision information is gathered from the working directory,
1137 match can be used to filter the committed files. If editor is
1137 match can be used to filter the committed files. If editor is
1138 supplied, it is called to get a commit message.
1138 supplied, it is called to get a commit message.
1139 """
1139 """
1140
1140
1141 def fail(f, msg):
1141 def fail(f, msg):
1142 raise util.Abort('%s: %s' % (f, msg))
1142 raise util.Abort('%s: %s' % (f, msg))
1143
1143
1144 if not match:
1144 if not match:
1145 match = matchmod.always(self.root, '')
1145 match = matchmod.always(self.root, '')
1146
1146
1147 if not force:
1147 if not force:
1148 vdirs = []
1148 vdirs = []
1149 match.explicitdir = vdirs.append
1149 match.explicitdir = vdirs.append
1150 match.bad = fail
1150 match.bad = fail
1151
1151
1152 wlock = self.wlock()
1152 wlock = self.wlock()
1153 try:
1153 try:
1154 wctx = self[None]
1154 wctx = self[None]
1155 merge = len(wctx.parents()) > 1
1155 merge = len(wctx.parents()) > 1
1156
1156
1157 if (not force and merge and match and
1157 if (not force and merge and match and
1158 (match.files() or match.anypats())):
1158 (match.files() or match.anypats())):
1159 raise util.Abort(_('cannot partially commit a merge '
1159 raise util.Abort(_('cannot partially commit a merge '
1160 '(do not specify files or patterns)'))
1160 '(do not specify files or patterns)'))
1161
1161
1162 changes = self.status(match=match, clean=force)
1162 changes = self.status(match=match, clean=force)
1163 if force:
1163 if force:
1164 changes[0].extend(changes[6]) # mq may commit unchanged files
1164 changes[0].extend(changes[6]) # mq may commit unchanged files
1165
1165
1166 # check subrepos
1166 # check subrepos
1167 subs = []
1167 subs = []
1168 commitsubs = set()
1168 commitsubs = set()
1169 newstate = wctx.substate.copy()
1169 newstate = wctx.substate.copy()
1170 # only manage subrepos and .hgsubstate if .hgsub is present
1170 # only manage subrepos and .hgsubstate if .hgsub is present
1171 if '.hgsub' in wctx:
1171 if '.hgsub' in wctx:
1172 # we'll decide whether to track this ourselves, thanks
1172 # we'll decide whether to track this ourselves, thanks
1173 if '.hgsubstate' in changes[0]:
1173 if '.hgsubstate' in changes[0]:
1174 changes[0].remove('.hgsubstate')
1174 changes[0].remove('.hgsubstate')
1175 if '.hgsubstate' in changes[2]:
1175 if '.hgsubstate' in changes[2]:
1176 changes[2].remove('.hgsubstate')
1176 changes[2].remove('.hgsubstate')
1177
1177
1178 # compare current state to last committed state
1178 # compare current state to last committed state
1179 # build new substate based on last committed state
1179 # build new substate based on last committed state
1180 oldstate = wctx.p1().substate
1180 oldstate = wctx.p1().substate
1181 for s in sorted(newstate.keys()):
1181 for s in sorted(newstate.keys()):
1182 if not match(s):
1182 if not match(s):
1183 # ignore working copy, use old state if present
1183 # ignore working copy, use old state if present
1184 if s in oldstate:
1184 if s in oldstate:
1185 newstate[s] = oldstate[s]
1185 newstate[s] = oldstate[s]
1186 continue
1186 continue
1187 if not force:
1187 if not force:
1188 raise util.Abort(
1188 raise util.Abort(
1189 _("commit with new subrepo %s excluded") % s)
1189 _("commit with new subrepo %s excluded") % s)
1190 if wctx.sub(s).dirty(True):
1190 if wctx.sub(s).dirty(True):
1191 if not self.ui.configbool('ui', 'commitsubrepos'):
1191 if not self.ui.configbool('ui', 'commitsubrepos'):
1192 raise util.Abort(
1192 raise util.Abort(
1193 _("uncommitted changes in subrepo %s") % s,
1193 _("uncommitted changes in subrepo %s") % s,
1194 hint=_("use --subrepos for recursive commit"))
1194 hint=_("use --subrepos for recursive commit"))
1195 subs.append(s)
1195 subs.append(s)
1196 commitsubs.add(s)
1196 commitsubs.add(s)
1197 else:
1197 else:
1198 bs = wctx.sub(s).basestate()
1198 bs = wctx.sub(s).basestate()
1199 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1199 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1200 if oldstate.get(s, (None, None, None))[1] != bs:
1200 if oldstate.get(s, (None, None, None))[1] != bs:
1201 subs.append(s)
1201 subs.append(s)
1202
1202
1203 # check for removed subrepos
1203 # check for removed subrepos
1204 for p in wctx.parents():
1204 for p in wctx.parents():
1205 r = [s for s in p.substate if s not in newstate]
1205 r = [s for s in p.substate if s not in newstate]
1206 subs += [s for s in r if match(s)]
1206 subs += [s for s in r if match(s)]
1207 if subs:
1207 if subs:
1208 if (not match('.hgsub') and
1208 if (not match('.hgsub') and
1209 '.hgsub' in (wctx.modified() + wctx.added())):
1209 '.hgsub' in (wctx.modified() + wctx.added())):
1210 raise util.Abort(
1210 raise util.Abort(
1211 _("can't commit subrepos without .hgsub"))
1211 _("can't commit subrepos without .hgsub"))
1212 changes[0].insert(0, '.hgsubstate')
1212 changes[0].insert(0, '.hgsubstate')
1213
1213
1214 elif '.hgsub' in changes[2]:
1214 elif '.hgsub' in changes[2]:
1215 # clean up .hgsubstate when .hgsub is removed
1215 # clean up .hgsubstate when .hgsub is removed
1216 if ('.hgsubstate' in wctx and
1216 if ('.hgsubstate' in wctx and
1217 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1217 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1218 changes[2].insert(0, '.hgsubstate')
1218 changes[2].insert(0, '.hgsubstate')
1219
1219
1220 # make sure all explicit patterns are matched
1220 # make sure all explicit patterns are matched
1221 if not force and match.files():
1221 if not force and match.files():
1222 matched = set(changes[0] + changes[1] + changes[2])
1222 matched = set(changes[0] + changes[1] + changes[2])
1223
1223
1224 for f in match.files():
1224 for f in match.files():
1225 f = self.dirstate.normalize(f)
1225 f = self.dirstate.normalize(f)
1226 if f == '.' or f in matched or f in wctx.substate:
1226 if f == '.' or f in matched or f in wctx.substate:
1227 continue
1227 continue
1228 if f in changes[3]: # missing
1228 if f in changes[3]: # missing
1229 fail(f, _('file not found!'))
1229 fail(f, _('file not found!'))
1230 if f in vdirs: # visited directory
1230 if f in vdirs: # visited directory
1231 d = f + '/'
1231 d = f + '/'
1232 for mf in matched:
1232 for mf in matched:
1233 if mf.startswith(d):
1233 if mf.startswith(d):
1234 break
1234 break
1235 else:
1235 else:
1236 fail(f, _("no match under directory!"))
1236 fail(f, _("no match under directory!"))
1237 elif f not in self.dirstate:
1237 elif f not in self.dirstate:
1238 fail(f, _("file not tracked!"))
1238 fail(f, _("file not tracked!"))
1239
1239
1240 cctx = context.workingctx(self, text, user, date, extra, changes)
1240 cctx = context.workingctx(self, text, user, date, extra, changes)
1241
1241
1242 if (not force and not extra.get("close") and not merge
1242 if (not force and not extra.get("close") and not merge
1243 and not cctx.files()
1243 and not cctx.files()
1244 and wctx.branch() == wctx.p1().branch()):
1244 and wctx.branch() == wctx.p1().branch()):
1245 return None
1245 return None
1246
1246
1247 if merge and cctx.deleted():
1247 if merge and cctx.deleted():
1248 raise util.Abort(_("cannot commit merge with missing files"))
1248 raise util.Abort(_("cannot commit merge with missing files"))
1249
1249
1250 ms = mergemod.mergestate(self)
1250 ms = mergemod.mergestate(self)
1251 for f in changes[0]:
1251 for f in changes[0]:
1252 if f in ms and ms[f] == 'u':
1252 if f in ms and ms[f] == 'u':
1253 raise util.Abort(_("unresolved merge conflicts "
1253 raise util.Abort(_("unresolved merge conflicts "
1254 "(see hg help resolve)"))
1254 "(see hg help resolve)"))
1255
1255
1256 if editor:
1256 if editor:
1257 cctx._text = editor(self, cctx, subs)
1257 cctx._text = editor(self, cctx, subs)
1258 edited = (text != cctx._text)
1258 edited = (text != cctx._text)
1259
1259
1260 # commit subs and write new state
1260 # commit subs and write new state
1261 if subs:
1261 if subs:
1262 for s in sorted(commitsubs):
1262 for s in sorted(commitsubs):
1263 sub = wctx.sub(s)
1263 sub = wctx.sub(s)
1264 self.ui.status(_('committing subrepository %s\n') %
1264 self.ui.status(_('committing subrepository %s\n') %
1265 subrepo.subrelpath(sub))
1265 subrepo.subrelpath(sub))
1266 sr = sub.commit(cctx._text, user, date)
1266 sr = sub.commit(cctx._text, user, date)
1267 newstate[s] = (newstate[s][0], sr)
1267 newstate[s] = (newstate[s][0], sr)
1268 subrepo.writestate(self, newstate)
1268 subrepo.writestate(self, newstate)
1269
1269
1270 # Save commit message in case this transaction gets rolled back
1270 # Save commit message in case this transaction gets rolled back
1271 # (e.g. by a pretxncommit hook). Leave the content alone on
1271 # (e.g. by a pretxncommit hook). Leave the content alone on
1272 # the assumption that the user will use the same editor again.
1272 # the assumption that the user will use the same editor again.
1273 msgfn = self.savecommitmessage(cctx._text)
1273 msgfn = self.savecommitmessage(cctx._text)
1274
1274
1275 p1, p2 = self.dirstate.parents()
1275 p1, p2 = self.dirstate.parents()
1276 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1276 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1277 try:
1277 try:
1278 self.hook("precommit", throw=True, parent1=hookp1,
1278 self.hook("precommit", throw=True, parent1=hookp1,
1279 parent2=hookp2)
1279 parent2=hookp2)
1280 ret = self.commitctx(cctx, True)
1280 ret = self.commitctx(cctx, True)
1281 except: # re-raises
1281 except: # re-raises
1282 if edited:
1282 if edited:
1283 self.ui.write(
1283 self.ui.write(
1284 _('note: commit message saved in %s\n') % msgfn)
1284 _('note: commit message saved in %s\n') % msgfn)
1285 raise
1285 raise
1286
1286
1287 # update bookmarks, dirstate and mergestate
1287 # update bookmarks, dirstate and mergestate
1288 bookmarks.update(self, [p1, p2], ret)
1288 bookmarks.update(self, [p1, p2], ret)
1289 cctx.markcommitted(ret)
1289 cctx.markcommitted(ret)
1290 ms.reset()
1290 ms.reset()
1291 finally:
1291 finally:
1292 wlock.release()
1292 wlock.release()
1293
1293
1294 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1294 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1295 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1295 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1296 self._afterlock(commithook)
1296 self._afterlock(commithook)
1297 return ret
1297 return ret
1298
1298
1299 @unfilteredmethod
1299 @unfilteredmethod
1300 def commitctx(self, ctx, error=False):
1300 def commitctx(self, ctx, error=False):
1301 """Add a new revision to current repository.
1301 """Add a new revision to current repository.
1302 Revision information is passed via the context argument.
1302 Revision information is passed via the context argument.
1303 """
1303 """
1304
1304
1305 tr = lock = None
1305 tr = lock = None
1306 removed = list(ctx.removed())
1306 removed = list(ctx.removed())
1307 p1, p2 = ctx.p1(), ctx.p2()
1307 p1, p2 = ctx.p1(), ctx.p2()
1308 user = ctx.user()
1308 user = ctx.user()
1309
1309
1310 lock = self.lock()
1310 lock = self.lock()
1311 try:
1311 try:
1312 tr = self.transaction("commit")
1312 tr = self.transaction("commit")
1313 trp = weakref.proxy(tr)
1313 trp = weakref.proxy(tr)
1314
1314
1315 if ctx.files():
1315 if ctx.files():
1316 m1 = p1.manifest().copy()
1316 m1 = p1.manifest().copy()
1317 m2 = p2.manifest()
1317 m2 = p2.manifest()
1318
1318
1319 # check in files
1319 # check in files
1320 new = {}
1320 new = {}
1321 changed = []
1321 changed = []
1322 linkrev = len(self)
1322 linkrev = len(self)
1323 for f in sorted(ctx.modified() + ctx.added()):
1323 for f in sorted(ctx.modified() + ctx.added()):
1324 self.ui.note(f + "\n")
1324 self.ui.note(f + "\n")
1325 try:
1325 try:
1326 fctx = ctx[f]
1326 fctx = ctx[f]
1327 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1327 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1328 changed)
1328 changed)
1329 m1.set(f, fctx.flags())
1329 m1.set(f, fctx.flags())
1330 except OSError, inst:
1330 except OSError, inst:
1331 self.ui.warn(_("trouble committing %s!\n") % f)
1331 self.ui.warn(_("trouble committing %s!\n") % f)
1332 raise
1332 raise
1333 except IOError, inst:
1333 except IOError, inst:
1334 errcode = getattr(inst, 'errno', errno.ENOENT)
1334 errcode = getattr(inst, 'errno', errno.ENOENT)
1335 if error or errcode and errcode != errno.ENOENT:
1335 if error or errcode and errcode != errno.ENOENT:
1336 self.ui.warn(_("trouble committing %s!\n") % f)
1336 self.ui.warn(_("trouble committing %s!\n") % f)
1337 raise
1337 raise
1338 else:
1338 else:
1339 removed.append(f)
1339 removed.append(f)
1340
1340
1341 # update manifest
1341 # update manifest
1342 m1.update(new)
1342 m1.update(new)
1343 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1343 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1344 drop = [f for f in removed if f in m1]
1344 drop = [f for f in removed if f in m1]
1345 for f in drop:
1345 for f in drop:
1346 del m1[f]
1346 del m1[f]
1347 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1347 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1348 p2.manifestnode(), (new, drop))
1348 p2.manifestnode(), (new, drop))
1349 files = changed + removed
1349 files = changed + removed
1350 else:
1350 else:
1351 mn = p1.manifestnode()
1351 mn = p1.manifestnode()
1352 files = []
1352 files = []
1353
1353
1354 # update changelog
1354 # update changelog
1355 self.changelog.delayupdate()
1355 self.changelog.delayupdate()
1356 n = self.changelog.add(mn, files, ctx.description(),
1356 n = self.changelog.add(mn, files, ctx.description(),
1357 trp, p1.node(), p2.node(),
1357 trp, p1.node(), p2.node(),
1358 user, ctx.date(), ctx.extra().copy())
1358 user, ctx.date(), ctx.extra().copy())
1359 p = lambda: self.changelog.writepending() and self.root or ""
1359 p = lambda: self.changelog.writepending() and self.root or ""
1360 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1360 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1361 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1361 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1362 parent2=xp2, pending=p)
1362 parent2=xp2, pending=p)
1363 self.changelog.finalize(trp)
1363 self.changelog.finalize(trp)
1364 # set the new commit is proper phase
1364 # set the new commit is proper phase
1365 targetphase = phases.newcommitphase(self.ui)
1365 targetphase = phases.newcommitphase(self.ui)
1366 if targetphase:
1366 if targetphase:
1367 # retract boundary do not alter parent changeset.
1367 # retract boundary do not alter parent changeset.
1368 # if a parent have higher the resulting phase will
1368 # if a parent have higher the resulting phase will
1369 # be compliant anyway
1369 # be compliant anyway
1370 #
1370 #
1371 # if minimal phase was 0 we don't need to retract anything
1371 # if minimal phase was 0 we don't need to retract anything
1372 phases.retractboundary(self, targetphase, [n])
1372 phases.retractboundary(self, targetphase, [n])
1373 tr.close()
1373 tr.close()
1374 branchmap.updatecache(self.filtered('served'))
1374 branchmap.updatecache(self.filtered('served'))
1375 return n
1375 return n
1376 finally:
1376 finally:
1377 if tr:
1377 if tr:
1378 tr.release()
1378 tr.release()
1379 lock.release()
1379 lock.release()
1380
1380
1381 @unfilteredmethod
1381 @unfilteredmethod
1382 def destroying(self):
1382 def destroying(self):
1383 '''Inform the repository that nodes are about to be destroyed.
1383 '''Inform the repository that nodes are about to be destroyed.
1384 Intended for use by strip and rollback, so there's a common
1384 Intended for use by strip and rollback, so there's a common
1385 place for anything that has to be done before destroying history.
1385 place for anything that has to be done before destroying history.
1386
1386
1387 This is mostly useful for saving state that is in memory and waiting
1387 This is mostly useful for saving state that is in memory and waiting
1388 to be flushed when the current lock is released. Because a call to
1388 to be flushed when the current lock is released. Because a call to
1389 destroyed is imminent, the repo will be invalidated causing those
1389 destroyed is imminent, the repo will be invalidated causing those
1390 changes to stay in memory (waiting for the next unlock), or vanish
1390 changes to stay in memory (waiting for the next unlock), or vanish
1391 completely.
1391 completely.
1392 '''
1392 '''
1393 # When using the same lock to commit and strip, the phasecache is left
1393 # When using the same lock to commit and strip, the phasecache is left
1394 # dirty after committing. Then when we strip, the repo is invalidated,
1394 # dirty after committing. Then when we strip, the repo is invalidated,
1395 # causing those changes to disappear.
1395 # causing those changes to disappear.
1396 if '_phasecache' in vars(self):
1396 if '_phasecache' in vars(self):
1397 self._phasecache.write()
1397 self._phasecache.write()
1398
1398
1399 @unfilteredmethod
1399 @unfilteredmethod
1400 def destroyed(self):
1400 def destroyed(self):
1401 '''Inform the repository that nodes have been destroyed.
1401 '''Inform the repository that nodes have been destroyed.
1402 Intended for use by strip and rollback, so there's a common
1402 Intended for use by strip and rollback, so there's a common
1403 place for anything that has to be done after destroying history.
1403 place for anything that has to be done after destroying history.
1404 '''
1404 '''
1405 # When one tries to:
1405 # When one tries to:
1406 # 1) destroy nodes thus calling this method (e.g. strip)
1406 # 1) destroy nodes thus calling this method (e.g. strip)
1407 # 2) use phasecache somewhere (e.g. commit)
1407 # 2) use phasecache somewhere (e.g. commit)
1408 #
1408 #
1409 # then 2) will fail because the phasecache contains nodes that were
1409 # then 2) will fail because the phasecache contains nodes that were
1410 # removed. We can either remove phasecache from the filecache,
1410 # removed. We can either remove phasecache from the filecache,
1411 # causing it to reload next time it is accessed, or simply filter
1411 # causing it to reload next time it is accessed, or simply filter
1412 # the removed nodes now and write the updated cache.
1412 # the removed nodes now and write the updated cache.
1413 self._phasecache.filterunknown(self)
1413 self._phasecache.filterunknown(self)
1414 self._phasecache.write()
1414 self._phasecache.write()
1415
1415
1416 # update the 'served' branch cache to help read only server process
1416 # update the 'served' branch cache to help read only server process
1417 # Thanks to branchcache collaboration this is done from the nearest
1417 # Thanks to branchcache collaboration this is done from the nearest
1418 # filtered subset and it is expected to be fast.
1418 # filtered subset and it is expected to be fast.
1419 branchmap.updatecache(self.filtered('served'))
1419 branchmap.updatecache(self.filtered('served'))
1420
1420
1421 # Ensure the persistent tag cache is updated. Doing it now
1421 # Ensure the persistent tag cache is updated. Doing it now
1422 # means that the tag cache only has to worry about destroyed
1422 # means that the tag cache only has to worry about destroyed
1423 # heads immediately after a strip/rollback. That in turn
1423 # heads immediately after a strip/rollback. That in turn
1424 # guarantees that "cachetip == currenttip" (comparing both rev
1424 # guarantees that "cachetip == currenttip" (comparing both rev
1425 # and node) always means no nodes have been added or destroyed.
1425 # and node) always means no nodes have been added or destroyed.
1426
1426
1427 # XXX this is suboptimal when qrefresh'ing: we strip the current
1427 # XXX this is suboptimal when qrefresh'ing: we strip the current
1428 # head, refresh the tag cache, then immediately add a new head.
1428 # head, refresh the tag cache, then immediately add a new head.
1429 # But I think doing it this way is necessary for the "instant
1429 # But I think doing it this way is necessary for the "instant
1430 # tag cache retrieval" case to work.
1430 # tag cache retrieval" case to work.
1431 self.invalidate()
1431 self.invalidate()
1432
1432
1433 def walk(self, match, node=None):
1433 def walk(self, match, node=None):
1434 '''
1434 '''
1435 walk recursively through the directory tree or a given
1435 walk recursively through the directory tree or a given
1436 changeset, finding all files matched by the match
1436 changeset, finding all files matched by the match
1437 function
1437 function
1438 '''
1438 '''
1439 return self[node].walk(match)
1439 return self[node].walk(match)
1440
1440
1441 def status(self, node1='.', node2=None, match=None,
1441 def status(self, node1='.', node2=None, match=None,
1442 ignored=False, clean=False, unknown=False,
1442 ignored=False, clean=False, unknown=False,
1443 listsubrepos=False):
1443 listsubrepos=False):
1444 """return status of files between two nodes or node and working
1444 """return status of files between two nodes or node and working
1445 directory.
1445 directory.
1446
1446
1447 If node1 is None, use the first dirstate parent instead.
1447 If node1 is None, use the first dirstate parent instead.
1448 If node2 is None, compare node1 with working directory.
1448 If node2 is None, compare node1 with working directory.
1449 """
1449 """
1450
1450
1451 def mfmatches(ctx):
1451 def mfmatches(ctx):
1452 mf = ctx.manifest().copy()
1452 mf = ctx.manifest().copy()
1453 if match.always():
1453 if match.always():
1454 return mf
1454 return mf
1455 for fn in mf.keys():
1455 for fn in mf.keys():
1456 if not match(fn):
1456 if not match(fn):
1457 del mf[fn]
1457 del mf[fn]
1458 return mf
1458 return mf
1459
1459
1460 if isinstance(node1, context.changectx):
1460 if isinstance(node1, context.changectx):
1461 ctx1 = node1
1461 ctx1 = node1
1462 else:
1462 else:
1463 ctx1 = self[node1]
1463 ctx1 = self[node1]
1464 if isinstance(node2, context.changectx):
1464 if isinstance(node2, context.changectx):
1465 ctx2 = node2
1465 ctx2 = node2
1466 else:
1466 else:
1467 ctx2 = self[node2]
1467 ctx2 = self[node2]
1468
1468
1469 working = ctx2.rev() is None
1469 working = ctx2.rev() is None
1470 parentworking = working and ctx1 == self['.']
1470 parentworking = working and ctx1 == self['.']
1471 match = match or matchmod.always(self.root, self.getcwd())
1471 match = match or matchmod.always(self.root, self.getcwd())
1472 listignored, listclean, listunknown = ignored, clean, unknown
1472 listignored, listclean, listunknown = ignored, clean, unknown
1473
1473
1474 # load earliest manifest first for caching reasons
1474 # load earliest manifest first for caching reasons
1475 if not working and ctx2.rev() < ctx1.rev():
1475 if not working and ctx2.rev() < ctx1.rev():
1476 ctx2.manifest()
1476 ctx2.manifest()
1477
1477
1478 if not parentworking:
1478 if not parentworking:
1479 def bad(f, msg):
1479 def bad(f, msg):
1480 # 'f' may be a directory pattern from 'match.files()',
1480 # 'f' may be a directory pattern from 'match.files()',
1481 # so 'f not in ctx1' is not enough
1481 # so 'f not in ctx1' is not enough
1482 if f not in ctx1 and f not in ctx1.dirs():
1482 if f not in ctx1 and f not in ctx1.dirs():
1483 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1483 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1484 match.bad = bad
1484 match.bad = bad
1485
1485
1486 if working: # we need to scan the working dir
1486 if working: # we need to scan the working dir
1487 subrepos = []
1487 subrepos = []
1488 if '.hgsub' in self.dirstate:
1488 if '.hgsub' in self.dirstate:
1489 subrepos = sorted(ctx2.substate)
1489 subrepos = sorted(ctx2.substate)
1490 s = self.dirstate.status(match, subrepos, listignored,
1490 s = self.dirstate.status(match, subrepos, listignored,
1491 listclean, listunknown)
1491 listclean, listunknown)
1492 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1492 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1493
1493
1494 # check for any possibly clean files
1494 # check for any possibly clean files
1495 if parentworking and cmp:
1495 if parentworking and cmp:
1496 fixup = []
1496 fixup = []
1497 # do a full compare of any files that might have changed
1497 # do a full compare of any files that might have changed
1498 for f in sorted(cmp):
1498 for f in sorted(cmp):
1499 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1499 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1500 or ctx1[f].cmp(ctx2[f])):
1500 or ctx1[f].cmp(ctx2[f])):
1501 modified.append(f)
1501 modified.append(f)
1502 else:
1502 else:
1503 fixup.append(f)
1503 fixup.append(f)
1504
1504
1505 # update dirstate for files that are actually clean
1505 # update dirstate for files that are actually clean
1506 if fixup:
1506 if fixup:
1507 if listclean:
1507 if listclean:
1508 clean += fixup
1508 clean += fixup
1509
1509
1510 try:
1510 try:
1511 # updating the dirstate is optional
1511 # updating the dirstate is optional
1512 # so we don't wait on the lock
1512 # so we don't wait on the lock
1513 wlock = self.wlock(False)
1513 wlock = self.wlock(False)
1514 try:
1514 try:
1515 for f in fixup:
1515 for f in fixup:
1516 self.dirstate.normal(f)
1516 self.dirstate.normal(f)
1517 finally:
1517 finally:
1518 wlock.release()
1518 wlock.release()
1519 except error.LockError:
1519 except error.LockError:
1520 pass
1520 pass
1521
1521
1522 if not parentworking:
1522 if not parentworking:
1523 mf1 = mfmatches(ctx1)
1523 mf1 = mfmatches(ctx1)
1524 if working:
1524 if working:
1525 # we are comparing working dir against non-parent
1525 # we are comparing working dir against non-parent
1526 # generate a pseudo-manifest for the working dir
1526 # generate a pseudo-manifest for the working dir
1527 mf2 = mfmatches(self['.'])
1527 mf2 = mfmatches(self['.'])
1528 for f in cmp + modified + added:
1528 for f in cmp + modified + added:
1529 mf2[f] = None
1529 mf2[f] = None
1530 mf2.set(f, ctx2.flags(f))
1530 mf2.set(f, ctx2.flags(f))
1531 for f in removed:
1531 for f in removed:
1532 if f in mf2:
1532 if f in mf2:
1533 del mf2[f]
1533 del mf2[f]
1534 else:
1534 else:
1535 # we are comparing two revisions
1535 # we are comparing two revisions
1536 deleted, unknown, ignored = [], [], []
1536 deleted, unknown, ignored = [], [], []
1537 mf2 = mfmatches(ctx2)
1537 mf2 = mfmatches(ctx2)
1538
1538
1539 modified, added, clean = [], [], []
1539 modified, added, clean = [], [], []
1540 withflags = mf1.withflags() | mf2.withflags()
1540 withflags = mf1.withflags() | mf2.withflags()
1541 for fn, mf2node in mf2.iteritems():
1541 for fn, mf2node in mf2.iteritems():
1542 if fn in mf1:
1542 if fn in mf1:
1543 if (fn not in deleted and
1543 if (fn not in deleted and
1544 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1544 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1545 (mf1[fn] != mf2node and
1545 (mf1[fn] != mf2node and
1546 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1546 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1547 modified.append(fn)
1547 modified.append(fn)
1548 elif listclean:
1548 elif listclean:
1549 clean.append(fn)
1549 clean.append(fn)
1550 del mf1[fn]
1550 del mf1[fn]
1551 elif fn not in deleted:
1551 elif fn not in deleted:
1552 added.append(fn)
1552 added.append(fn)
1553 removed = mf1.keys()
1553 removed = mf1.keys()
1554
1554
1555 if working and modified and not self.dirstate._checklink:
1555 if working and modified and not self.dirstate._checklink:
1556 # Symlink placeholders may get non-symlink-like contents
1556 # Symlink placeholders may get non-symlink-like contents
1557 # via user error or dereferencing by NFS or Samba servers,
1557 # via user error or dereferencing by NFS or Samba servers,
1558 # so we filter out any placeholders that don't look like a
1558 # so we filter out any placeholders that don't look like a
1559 # symlink
1559 # symlink
1560 sane = []
1560 sane = []
1561 for f in modified:
1561 for f in modified:
1562 if ctx2.flags(f) == 'l':
1562 if ctx2.flags(f) == 'l':
1563 d = ctx2[f].data()
1563 d = ctx2[f].data()
1564 if len(d) >= 1024 or '\n' in d or util.binary(d):
1564 if len(d) >= 1024 or '\n' in d or util.binary(d):
1565 self.ui.debug('ignoring suspect symlink placeholder'
1565 self.ui.debug('ignoring suspect symlink placeholder'
1566 ' "%s"\n' % f)
1566 ' "%s"\n' % f)
1567 continue
1567 continue
1568 sane.append(f)
1568 sane.append(f)
1569 modified = sane
1569 modified = sane
1570
1570
1571 r = modified, added, removed, deleted, unknown, ignored, clean
1571 r = modified, added, removed, deleted, unknown, ignored, clean
1572
1572
1573 if listsubrepos:
1573 if listsubrepos:
1574 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1574 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1575 if working:
1575 if working:
1576 rev2 = None
1576 rev2 = None
1577 else:
1577 else:
1578 rev2 = ctx2.substate[subpath][1]
1578 rev2 = ctx2.substate[subpath][1]
1579 try:
1579 try:
1580 submatch = matchmod.narrowmatcher(subpath, match)
1580 submatch = matchmod.narrowmatcher(subpath, match)
1581 s = sub.status(rev2, match=submatch, ignored=listignored,
1581 s = sub.status(rev2, match=submatch, ignored=listignored,
1582 clean=listclean, unknown=listunknown,
1582 clean=listclean, unknown=listunknown,
1583 listsubrepos=True)
1583 listsubrepos=True)
1584 for rfiles, sfiles in zip(r, s):
1584 for rfiles, sfiles in zip(r, s):
1585 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1585 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1586 except error.LookupError:
1586 except error.LookupError:
1587 self.ui.status(_("skipping missing subrepository: %s\n")
1587 self.ui.status(_("skipping missing subrepository: %s\n")
1588 % subpath)
1588 % subpath)
1589
1589
1590 for l in r:
1590 for l in r:
1591 l.sort()
1591 l.sort()
1592 return r
1592 return r
1593
1593
1594 def heads(self, start=None):
1594 def heads(self, start=None):
1595 heads = self.changelog.heads(start)
1595 heads = self.changelog.heads(start)
1596 # sort the output in rev descending order
1596 # sort the output in rev descending order
1597 return sorted(heads, key=self.changelog.rev, reverse=True)
1597 return sorted(heads, key=self.changelog.rev, reverse=True)
1598
1598
1599 def branchheads(self, branch=None, start=None, closed=False):
1599 def branchheads(self, branch=None, start=None, closed=False):
1600 '''return a (possibly filtered) list of heads for the given branch
1600 '''return a (possibly filtered) list of heads for the given branch
1601
1601
1602 Heads are returned in topological order, from newest to oldest.
1602 Heads are returned in topological order, from newest to oldest.
1603 If branch is None, use the dirstate branch.
1603 If branch is None, use the dirstate branch.
1604 If start is not None, return only heads reachable from start.
1604 If start is not None, return only heads reachable from start.
1605 If closed is True, return heads that are marked as closed as well.
1605 If closed is True, return heads that are marked as closed as well.
1606 '''
1606 '''
1607 if branch is None:
1607 if branch is None:
1608 branch = self[None].branch()
1608 branch = self[None].branch()
1609 branches = self.branchmap()
1609 branches = self.branchmap()
1610 if branch not in branches:
1610 if branch not in branches:
1611 return []
1611 return []
1612 # the cache returns heads ordered lowest to highest
1612 # the cache returns heads ordered lowest to highest
1613 bheads = list(reversed(branches[branch]))
1613 bheads = list(reversed(branches[branch]))
1614 if start is not None:
1614 if start is not None:
1615 # filter out the heads that cannot be reached from startrev
1615 # filter out the heads that cannot be reached from startrev
1616 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1616 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1617 bheads = [h for h in bheads if h in fbheads]
1617 bheads = [h for h in bheads if h in fbheads]
1618 if not closed:
1618 if not closed:
1619 bheads = [h for h in bheads if not self[h].closesbranch()]
1619 bheads = [h for h in bheads if not self[h].closesbranch()]
1620 return bheads
1620 return bheads
1621
1621
1622 def branches(self, nodes):
1622 def branches(self, nodes):
1623 if not nodes:
1623 if not nodes:
1624 nodes = [self.changelog.tip()]
1624 nodes = [self.changelog.tip()]
1625 b = []
1625 b = []
1626 for n in nodes:
1626 for n in nodes:
1627 t = n
1627 t = n
1628 while True:
1628 while True:
1629 p = self.changelog.parents(n)
1629 p = self.changelog.parents(n)
1630 if p[1] != nullid or p[0] == nullid:
1630 if p[1] != nullid or p[0] == nullid:
1631 b.append((t, n, p[0], p[1]))
1631 b.append((t, n, p[0], p[1]))
1632 break
1632 break
1633 n = p[0]
1633 n = p[0]
1634 return b
1634 return b
1635
1635
1636 def between(self, pairs):
1636 def between(self, pairs):
1637 r = []
1637 r = []
1638
1638
1639 for top, bottom in pairs:
1639 for top, bottom in pairs:
1640 n, l, i = top, [], 0
1640 n, l, i = top, [], 0
1641 f = 1
1641 f = 1
1642
1642
1643 while n != bottom and n != nullid:
1643 while n != bottom and n != nullid:
1644 p = self.changelog.parents(n)[0]
1644 p = self.changelog.parents(n)[0]
1645 if i == f:
1645 if i == f:
1646 l.append(n)
1646 l.append(n)
1647 f = f * 2
1647 f = f * 2
1648 n = p
1648 n = p
1649 i += 1
1649 i += 1
1650
1650
1651 r.append(l)
1651 r.append(l)
1652
1652
1653 return r
1653 return r
1654
1654
1655 def pull(self, remote, heads=None, force=False):
1655 def pull(self, remote, heads=None, force=False):
1656 # don't open transaction for nothing or you break future useful
1656 # don't open transaction for nothing or you break future useful
1657 # rollback call
1657 # rollback call
1658 tr = None
1658 tr = None
1659 trname = 'pull\n' + util.hidepassword(remote.url())
1659 trname = 'pull\n' + util.hidepassword(remote.url())
1660 lock = self.lock()
1660 lock = self.lock()
1661 try:
1661 try:
1662 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1662 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1663 force=force)
1663 force=force)
1664 common, fetch, rheads = tmp
1664 common, fetch, rheads = tmp
1665 if not fetch:
1665 if not fetch:
1666 self.ui.status(_("no changes found\n"))
1666 self.ui.status(_("no changes found\n"))
1667 added = []
1667 added = []
1668 result = 0
1668 result = 0
1669 else:
1669 else:
1670 tr = self.transaction(trname)
1670 tr = self.transaction(trname)
1671 if heads is None and list(common) == [nullid]:
1671 if heads is None and list(common) == [nullid]:
1672 self.ui.status(_("requesting all changes\n"))
1672 self.ui.status(_("requesting all changes\n"))
1673 elif heads is None and remote.capable('changegroupsubset'):
1673 elif heads is None and remote.capable('changegroupsubset'):
1674 # issue1320, avoid a race if remote changed after discovery
1674 # issue1320, avoid a race if remote changed after discovery
1675 heads = rheads
1675 heads = rheads
1676
1676
1677 if remote.capable('getbundle'):
1677 if remote.capable('getbundle'):
1678 # TODO: get bundlecaps from remote
1678 # TODO: get bundlecaps from remote
1679 cg = remote.getbundle('pull', common=common,
1679 cg = remote.getbundle('pull', common=common,
1680 heads=heads or rheads)
1680 heads=heads or rheads)
1681 elif heads is None:
1681 elif heads is None:
1682 cg = remote.changegroup(fetch, 'pull')
1682 cg = remote.changegroup(fetch, 'pull')
1683 elif not remote.capable('changegroupsubset'):
1683 elif not remote.capable('changegroupsubset'):
1684 raise util.Abort(_("partial pull cannot be done because "
1684 raise util.Abort(_("partial pull cannot be done because "
1685 "other repository doesn't support "
1685 "other repository doesn't support "
1686 "changegroupsubset."))
1686 "changegroupsubset."))
1687 else:
1687 else:
1688 cg = remote.changegroupsubset(fetch, heads, 'pull')
1688 cg = remote.changegroupsubset(fetch, heads, 'pull')
1689 # we use unfiltered changelog here because hidden revision must
1689 # we use unfiltered changelog here because hidden revision must
1690 # be taken in account for phase synchronization. They may
1690 # be taken in account for phase synchronization. They may
1691 # becomes public and becomes visible again.
1691 # becomes public and becomes visible again.
1692 cl = self.unfiltered().changelog
1692 cl = self.unfiltered().changelog
1693 clstart = len(cl)
1693 clstart = len(cl)
1694 result = self.addchangegroup(cg, 'pull', remote.url())
1694 result = self.addchangegroup(cg, 'pull', remote.url())
1695 clend = len(cl)
1695 clend = len(cl)
1696 added = [cl.node(r) for r in xrange(clstart, clend)]
1696 added = [cl.node(r) for r in xrange(clstart, clend)]
1697
1697
1698 # compute target subset
1698 # compute target subset
1699 if heads is None:
1699 if heads is None:
1700 # We pulled every thing possible
1700 # We pulled every thing possible
1701 # sync on everything common
1701 # sync on everything common
1702 subset = common + added
1702 subset = common + added
1703 else:
1703 else:
1704 # We pulled a specific subset
1704 # We pulled a specific subset
1705 # sync on this subset
1705 # sync on this subset
1706 subset = heads
1706 subset = heads
1707
1707
1708 # Get remote phases data from remote
1708 # Get remote phases data from remote
1709 remotephases = remote.listkeys('phases')
1709 remotephases = remote.listkeys('phases')
1710 publishing = bool(remotephases.get('publishing', False))
1710 publishing = bool(remotephases.get('publishing', False))
1711 if remotephases and not publishing:
1711 if remotephases and not publishing:
1712 # remote is new and unpublishing
1712 # remote is new and unpublishing
1713 pheads, _dr = phases.analyzeremotephases(self, subset,
1713 pheads, _dr = phases.analyzeremotephases(self, subset,
1714 remotephases)
1714 remotephases)
1715 phases.advanceboundary(self, phases.public, pheads)
1715 phases.advanceboundary(self, phases.public, pheads)
1716 phases.advanceboundary(self, phases.draft, subset)
1716 phases.advanceboundary(self, phases.draft, subset)
1717 else:
1717 else:
1718 # Remote is old or publishing all common changesets
1718 # Remote is old or publishing all common changesets
1719 # should be seen as public
1719 # should be seen as public
1720 phases.advanceboundary(self, phases.public, subset)
1720 phases.advanceboundary(self, phases.public, subset)
1721
1721
1722 def gettransaction():
1722 def gettransaction():
1723 if tr is None:
1723 if tr is None:
1724 return self.transaction(trname)
1724 return self.transaction(trname)
1725 return tr
1725 return tr
1726
1726
1727 obstr = obsolete.syncpull(self, remote, gettransaction)
1727 obstr = obsolete.syncpull(self, remote, gettransaction)
1728 if obstr is not None:
1728 if obstr is not None:
1729 tr = obstr
1729 tr = obstr
1730
1730
1731 if tr is not None:
1731 if tr is not None:
1732 tr.close()
1732 tr.close()
1733 finally:
1733 finally:
1734 if tr is not None:
1734 if tr is not None:
1735 tr.release()
1735 tr.release()
1736 lock.release()
1736 lock.release()
1737
1737
1738 return result
1738 return result
1739
1739
1740 def checkpush(self, force, revs):
1740 def checkpush(self, force, revs):
1741 """Extensions can override this function if additional checks have
1741 """Extensions can override this function if additional checks have
1742 to be performed before pushing, or call it if they override push
1742 to be performed before pushing, or call it if they override push
1743 command.
1743 command.
1744 """
1744 """
1745 pass
1745 pass
1746
1746
1747 def push(self, remote, force=False, revs=None, newbranch=False):
1747 def push(self, remote, force=False, revs=None, newbranch=False):
1748 '''Push outgoing changesets (limited by revs) from the current
1748 '''Push outgoing changesets (limited by revs) from the current
1749 repository to remote. Return an integer:
1749 repository to remote. Return an integer:
1750 - None means nothing to push
1750 - None means nothing to push
1751 - 0 means HTTP error
1751 - 0 means HTTP error
1752 - 1 means we pushed and remote head count is unchanged *or*
1752 - 1 means we pushed and remote head count is unchanged *or*
1753 we have outgoing changesets but refused to push
1753 we have outgoing changesets but refused to push
1754 - other values as described by addchangegroup()
1754 - other values as described by addchangegroup()
1755 '''
1755 '''
1756 # there are two ways to push to remote repo:
1756 # there are two ways to push to remote repo:
1757 #
1757 #
1758 # addchangegroup assumes local user can lock remote
1758 # addchangegroup assumes local user can lock remote
1759 # repo (local filesystem, old ssh servers).
1759 # repo (local filesystem, old ssh servers).
1760 #
1760 #
1761 # unbundle assumes local user cannot lock remote repo (new ssh
1761 # unbundle assumes local user cannot lock remote repo (new ssh
1762 # servers, http servers).
1762 # servers, http servers).
1763
1763
1764 if not remote.canpush():
1764 if not remote.canpush():
1765 raise util.Abort(_("destination does not support push"))
1765 raise util.Abort(_("destination does not support push"))
1766 unfi = self.unfiltered()
1766 unfi = self.unfiltered()
1767 def localphasemove(nodes, phase=phases.public):
1767 def localphasemove(nodes, phase=phases.public):
1768 """move <nodes> to <phase> in the local source repo"""
1768 """move <nodes> to <phase> in the local source repo"""
1769 if locallock is not None:
1769 if locallock is not None:
1770 phases.advanceboundary(self, phase, nodes)
1770 phases.advanceboundary(self, phase, nodes)
1771 else:
1771 else:
1772 # repo is not locked, do not change any phases!
1772 # repo is not locked, do not change any phases!
1773 # Informs the user that phases should have been moved when
1773 # Informs the user that phases should have been moved when
1774 # applicable.
1774 # applicable.
1775 actualmoves = [n for n in nodes if phase < self[n].phase()]
1775 actualmoves = [n for n in nodes if phase < self[n].phase()]
1776 phasestr = phases.phasenames[phase]
1776 phasestr = phases.phasenames[phase]
1777 if actualmoves:
1777 if actualmoves:
1778 self.ui.status(_('cannot lock source repo, skipping local'
1778 self.ui.status(_('cannot lock source repo, skipping local'
1779 ' %s phase update\n') % phasestr)
1779 ' %s phase update\n') % phasestr)
1780 # get local lock as we might write phase data
1780 # get local lock as we might write phase data
1781 locallock = None
1781 locallock = None
1782 try:
1782 try:
1783 locallock = self.lock()
1783 locallock = self.lock()
1784 except IOError, err:
1784 except IOError, err:
1785 if err.errno != errno.EACCES:
1785 if err.errno != errno.EACCES:
1786 raise
1786 raise
1787 # source repo cannot be locked.
1787 # source repo cannot be locked.
1788 # We do not abort the push, but just disable the local phase
1788 # We do not abort the push, but just disable the local phase
1789 # synchronisation.
1789 # synchronisation.
1790 msg = 'cannot lock source repository: %s\n' % err
1790 msg = 'cannot lock source repository: %s\n' % err
1791 self.ui.debug(msg)
1791 self.ui.debug(msg)
1792 try:
1792 try:
1793 self.checkpush(force, revs)
1793 self.checkpush(force, revs)
1794 lock = None
1794 lock = None
1795 unbundle = remote.capable('unbundle')
1795 unbundle = remote.capable('unbundle')
1796 if not unbundle:
1796 if not unbundle:
1797 lock = remote.lock()
1797 lock = remote.lock()
1798 try:
1798 try:
1799 # discovery
1799 # discovery
1800 fci = discovery.findcommonincoming
1800 fci = discovery.findcommonincoming
1801 commoninc = fci(unfi, remote, force=force)
1801 commoninc = fci(unfi, remote, force=force)
1802 common, inc, remoteheads = commoninc
1802 common, inc, remoteheads = commoninc
1803 fco = discovery.findcommonoutgoing
1803 fco = discovery.findcommonoutgoing
1804 outgoing = fco(unfi, remote, onlyheads=revs,
1804 outgoing = fco(unfi, remote, onlyheads=revs,
1805 commoninc=commoninc, force=force)
1805 commoninc=commoninc, force=force)
1806
1806
1807
1807
1808 if not outgoing.missing:
1808 if not outgoing.missing:
1809 # nothing to push
1809 # nothing to push
1810 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1810 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1811 ret = None
1811 ret = None
1812 else:
1812 else:
1813 # something to push
1813 # something to push
1814 if not force:
1814 if not force:
1815 # if self.obsstore == False --> no obsolete
1815 # if self.obsstore == False --> no obsolete
1816 # then, save the iteration
1816 # then, save the iteration
1817 if unfi.obsstore:
1817 if unfi.obsstore:
1818 # this message are here for 80 char limit reason
1818 # this message are here for 80 char limit reason
1819 mso = _("push includes obsolete changeset: %s!")
1819 mso = _("push includes obsolete changeset: %s!")
1820 mst = "push includes %s changeset: %s!"
1820 mst = "push includes %s changeset: %s!"
1821 # plain versions for i18n tool to detect them
1821 # plain versions for i18n tool to detect them
1822 _("push includes unstable changeset: %s!")
1822 _("push includes unstable changeset: %s!")
1823 _("push includes bumped changeset: %s!")
1823 _("push includes bumped changeset: %s!")
1824 _("push includes divergent changeset: %s!")
1824 _("push includes divergent changeset: %s!")
1825 # If we are to push if there is at least one
1825 # If we are to push if there is at least one
1826 # obsolete or unstable changeset in missing, at
1826 # obsolete or unstable changeset in missing, at
1827 # least one of the missinghead will be obsolete or
1827 # least one of the missinghead will be obsolete or
1828 # unstable. So checking heads only is ok
1828 # unstable. So checking heads only is ok
1829 for node in outgoing.missingheads:
1829 for node in outgoing.missingheads:
1830 ctx = unfi[node]
1830 ctx = unfi[node]
1831 if ctx.obsolete():
1831 if ctx.obsolete():
1832 raise util.Abort(mso % ctx)
1832 raise util.Abort(mso % ctx)
1833 elif ctx.troubled():
1833 elif ctx.troubled():
1834 raise util.Abort(_(mst)
1834 raise util.Abort(_(mst)
1835 % (ctx.troubles()[0],
1835 % (ctx.troubles()[0],
1836 ctx))
1836 ctx))
1837 discovery.checkheads(unfi, remote, outgoing,
1837 discovery.checkheads(unfi, remote, outgoing,
1838 remoteheads, newbranch,
1838 remoteheads, newbranch,
1839 bool(inc))
1839 bool(inc))
1840
1840
1841 # TODO: get bundlecaps from remote
1841 # TODO: get bundlecaps from remote
1842 bundlecaps = None
1842 bundlecaps = None
1843 # create a changegroup from local
1843 # create a changegroup from local
1844 if revs is None and not outgoing.excluded:
1844 if revs is None and not outgoing.excluded:
1845 # push everything,
1845 # push everything,
1846 # use the fast path, no race possible on push
1846 # use the fast path, no race possible on push
1847 bundler = changegroup.bundle10(self, bundlecaps)
1847 bundler = changegroup.bundle10(self, bundlecaps)
1848 cg = self._changegroupsubset(outgoing,
1848 cg = self._changegroupsubset(outgoing,
1849 bundler,
1849 bundler,
1850 'push',
1850 'push',
1851 fastpath=True)
1851 fastpath=True)
1852 else:
1852 else:
1853 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1853 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1854
1854
1855 # apply changegroup to remote
1855 # apply changegroup to remote
1856 if unbundle:
1856 if unbundle:
1857 # local repo finds heads on server, finds out what
1857 # local repo finds heads on server, finds out what
1858 # revs it must push. once revs transferred, if server
1858 # revs it must push. once revs transferred, if server
1859 # finds it has different heads (someone else won
1859 # finds it has different heads (someone else won
1860 # commit/push race), server aborts.
1860 # commit/push race), server aborts.
1861 if force:
1861 if force:
1862 remoteheads = ['force']
1862 remoteheads = ['force']
1863 # ssh: return remote's addchangegroup()
1863 # ssh: return remote's addchangegroup()
1864 # http: return remote's addchangegroup() or 0 for error
1864 # http: return remote's addchangegroup() or 0 for error
1865 ret = remote.unbundle(cg, remoteheads, 'push')
1865 ret = remote.unbundle(cg, remoteheads, 'push')
1866 else:
1866 else:
1867 # we return an integer indicating remote head count
1867 # we return an integer indicating remote head count
1868 # change
1868 # change
1869 ret = remote.addchangegroup(cg, 'push', self.url())
1869 ret = remote.addchangegroup(cg, 'push', self.url())
1870
1870
1871 if ret:
1871 if ret:
1872 # push succeed, synchronize target of the push
1872 # push succeed, synchronize target of the push
1873 cheads = outgoing.missingheads
1873 cheads = outgoing.missingheads
1874 elif revs is None:
1874 elif revs is None:
1875 # All out push fails. synchronize all common
1875 # All out push fails. synchronize all common
1876 cheads = outgoing.commonheads
1876 cheads = outgoing.commonheads
1877 else:
1877 else:
1878 # I want cheads = heads(::missingheads and ::commonheads)
1878 # I want cheads = heads(::missingheads and ::commonheads)
1879 # (missingheads is revs with secret changeset filtered out)
1879 # (missingheads is revs with secret changeset filtered out)
1880 #
1880 #
1881 # This can be expressed as:
1881 # This can be expressed as:
1882 # cheads = ( (missingheads and ::commonheads)
1882 # cheads = ( (missingheads and ::commonheads)
1883 # + (commonheads and ::missingheads))"
1883 # + (commonheads and ::missingheads))"
1884 # )
1884 # )
1885 #
1885 #
1886 # while trying to push we already computed the following:
1886 # while trying to push we already computed the following:
1887 # common = (::commonheads)
1887 # common = (::commonheads)
1888 # missing = ((commonheads::missingheads) - commonheads)
1888 # missing = ((commonheads::missingheads) - commonheads)
1889 #
1889 #
1890 # We can pick:
1890 # We can pick:
1891 # * missingheads part of common (::commonheads)
1891 # * missingheads part of common (::commonheads)
1892 common = set(outgoing.common)
1892 common = set(outgoing.common)
1893 cheads = [node for node in revs if node in common]
1893 cheads = [node for node in revs if node in common]
1894 # and
1894 # and
1895 # * commonheads parents on missing
1895 # * commonheads parents on missing
1896 revset = unfi.set('%ln and parents(roots(%ln))',
1896 revset = unfi.set('%ln and parents(roots(%ln))',
1897 outgoing.commonheads,
1897 outgoing.commonheads,
1898 outgoing.missing)
1898 outgoing.missing)
1899 cheads.extend(c.node() for c in revset)
1899 cheads.extend(c.node() for c in revset)
1900 # even when we don't push, exchanging phase data is useful
1900 # even when we don't push, exchanging phase data is useful
1901 remotephases = remote.listkeys('phases')
1901 remotephases = remote.listkeys('phases')
1902 if (self.ui.configbool('ui', '_usedassubrepo', False)
1902 if (self.ui.configbool('ui', '_usedassubrepo', False)
1903 and remotephases # server supports phases
1903 and remotephases # server supports phases
1904 and ret is None # nothing was pushed
1904 and ret is None # nothing was pushed
1905 and remotephases.get('publishing', False)):
1905 and remotephases.get('publishing', False)):
1906 # When:
1906 # When:
1907 # - this is a subrepo push
1907 # - this is a subrepo push
1908 # - and remote support phase
1908 # - and remote support phase
1909 # - and no changeset was pushed
1909 # - and no changeset was pushed
1910 # - and remote is publishing
1910 # - and remote is publishing
1911 # We may be in issue 3871 case!
1911 # We may be in issue 3871 case!
1912 # We drop the possible phase synchronisation done by
1912 # We drop the possible phase synchronisation done by
1913 # courtesy to publish changesets possibly locally draft
1913 # courtesy to publish changesets possibly locally draft
1914 # on the remote.
1914 # on the remote.
1915 remotephases = {'publishing': 'True'}
1915 remotephases = {'publishing': 'True'}
1916 if not remotephases: # old server or public only repo
1916 if not remotephases: # old server or public only repo
1917 localphasemove(cheads)
1917 localphasemove(cheads)
1918 # don't push any phase data as there is nothing to push
1918 # don't push any phase data as there is nothing to push
1919 else:
1919 else:
1920 ana = phases.analyzeremotephases(self, cheads, remotephases)
1920 ana = phases.analyzeremotephases(self, cheads, remotephases)
1921 pheads, droots = ana
1921 pheads, droots = ana
1922 ### Apply remote phase on local
1922 ### Apply remote phase on local
1923 if remotephases.get('publishing', False):
1923 if remotephases.get('publishing', False):
1924 localphasemove(cheads)
1924 localphasemove(cheads)
1925 else: # publish = False
1925 else: # publish = False
1926 localphasemove(pheads)
1926 localphasemove(pheads)
1927 localphasemove(cheads, phases.draft)
1927 localphasemove(cheads, phases.draft)
1928 ### Apply local phase on remote
1928 ### Apply local phase on remote
1929
1929
1930 # Get the list of all revs draft on remote by public here.
1930 # Get the list of all revs draft on remote by public here.
1931 # XXX Beware that revset break if droots is not strictly
1931 # XXX Beware that revset break if droots is not strictly
1932 # XXX root we may want to ensure it is but it is costly
1932 # XXX root we may want to ensure it is but it is costly
1933 outdated = unfi.set('heads((%ln::%ln) and public())',
1933 outdated = unfi.set('heads((%ln::%ln) and public())',
1934 droots, cheads)
1934 droots, cheads)
1935 for newremotehead in outdated:
1935 for newremotehead in outdated:
1936 r = remote.pushkey('phases',
1936 r = remote.pushkey('phases',
1937 newremotehead.hex(),
1937 newremotehead.hex(),
1938 str(phases.draft),
1938 str(phases.draft),
1939 str(phases.public))
1939 str(phases.public))
1940 if not r:
1940 if not r:
1941 self.ui.warn(_('updating %s to public failed!\n')
1941 self.ui.warn(_('updating %s to public failed!\n')
1942 % newremotehead)
1942 % newremotehead)
1943 self.ui.debug('try to push obsolete markers to remote\n')
1943 self.ui.debug('try to push obsolete markers to remote\n')
1944 obsolete.syncpush(self, remote)
1944 obsolete.syncpush(self, remote)
1945 finally:
1945 finally:
1946 if lock is not None:
1946 if lock is not None:
1947 lock.release()
1947 lock.release()
1948 finally:
1948 finally:
1949 if locallock is not None:
1949 if locallock is not None:
1950 locallock.release()
1950 locallock.release()
1951
1951
1952 self.ui.debug("checking for updated bookmarks\n")
1952 self.ui.debug("checking for updated bookmarks\n")
1953 rb = remote.listkeys('bookmarks')
1953 rb = remote.listkeys('bookmarks')
1954 for k in rb.keys():
1954 for k in rb.keys():
1955 if k in unfi._bookmarks:
1955 if k in unfi._bookmarks:
1956 nr, nl = rb[k], hex(self._bookmarks[k])
1956 nr, nl = rb[k], hex(self._bookmarks[k])
1957 if nr in unfi:
1957 if nr in unfi:
1958 cr = unfi[nr]
1958 cr = unfi[nr]
1959 cl = unfi[nl]
1959 cl = unfi[nl]
1960 if bookmarks.validdest(unfi, cr, cl):
1960 if bookmarks.validdest(unfi, cr, cl):
1961 r = remote.pushkey('bookmarks', k, nr, nl)
1961 r = remote.pushkey('bookmarks', k, nr, nl)
1962 if r:
1962 if r:
1963 self.ui.status(_("updating bookmark %s\n") % k)
1963 self.ui.status(_("updating bookmark %s\n") % k)
1964 else:
1964 else:
1965 self.ui.warn(_('updating bookmark %s'
1965 self.ui.warn(_('updating bookmark %s'
1966 ' failed!\n') % k)
1966 ' failed!\n') % k)
1967
1967
1968 return ret
1968 return ret
1969
1969
1970 def changegroupinfo(self, nodes, source):
1970 def changegroupinfo(self, nodes, source):
1971 if self.ui.verbose or source == 'bundle':
1971 if self.ui.verbose or source == 'bundle':
1972 self.ui.status(_("%d changesets found\n") % len(nodes))
1972 self.ui.status(_("%d changesets found\n") % len(nodes))
1973 if self.ui.debugflag:
1973 if self.ui.debugflag:
1974 self.ui.debug("list of changesets:\n")
1974 self.ui.debug("list of changesets:\n")
1975 for node in nodes:
1975 for node in nodes:
1976 self.ui.debug("%s\n" % hex(node))
1976 self.ui.debug("%s\n" % hex(node))
1977
1977
1978 def changegroupsubset(self, bases, heads, source):
1978 def changegroupsubset(self, bases, heads, source):
1979 """Compute a changegroup consisting of all the nodes that are
1979 """Compute a changegroup consisting of all the nodes that are
1980 descendants of any of the bases and ancestors of any of the heads.
1980 descendants of any of the bases and ancestors of any of the heads.
1981 Return a chunkbuffer object whose read() method will return
1981 Return a chunkbuffer object whose read() method will return
1982 successive changegroup chunks.
1982 successive changegroup chunks.
1983
1983
1984 It is fairly complex as determining which filenodes and which
1984 It is fairly complex as determining which filenodes and which
1985 manifest nodes need to be included for the changeset to be complete
1985 manifest nodes need to be included for the changeset to be complete
1986 is non-trivial.
1986 is non-trivial.
1987
1987
1988 Another wrinkle is doing the reverse, figuring out which changeset in
1988 Another wrinkle is doing the reverse, figuring out which changeset in
1989 the changegroup a particular filenode or manifestnode belongs to.
1989 the changegroup a particular filenode or manifestnode belongs to.
1990 """
1990 """
1991 cl = self.changelog
1991 cl = self.changelog
1992 if not bases:
1992 if not bases:
1993 bases = [nullid]
1993 bases = [nullid]
1994 # TODO: remove call to nodesbetween.
1994 # TODO: remove call to nodesbetween.
1995 csets, bases, heads = cl.nodesbetween(bases, heads)
1995 csets, bases, heads = cl.nodesbetween(bases, heads)
1996 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
1996 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
1997 outgoing = discovery.outgoing(cl, bases, heads)
1997 outgoing = discovery.outgoing(cl, bases, heads)
1998 bundler = changegroup.bundle10(self)
1998 bundler = changegroup.bundle10(self)
1999 return self._changegroupsubset(outgoing, bundler, source)
1999 return self._changegroupsubset(outgoing, bundler, source)
2000
2000
2001 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2001 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2002 """Like getbundle, but taking a discovery.outgoing as an argument.
2002 """Like getbundle, but taking a discovery.outgoing as an argument.
2003
2003
2004 This is only implemented for local repos and reuses potentially
2004 This is only implemented for local repos and reuses potentially
2005 precomputed sets in outgoing."""
2005 precomputed sets in outgoing."""
2006 if not outgoing.missing:
2006 if not outgoing.missing:
2007 return None
2007 return None
2008 bundler = changegroup.bundle10(self, bundlecaps)
2008 bundler = changegroup.bundle10(self, bundlecaps)
2009 return self._changegroupsubset(outgoing, bundler, source)
2009 return self._changegroupsubset(outgoing, bundler, source)
2010
2010
2011 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2011 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2012 """Like changegroupsubset, but returns the set difference between the
2012 """Like changegroupsubset, but returns the set difference between the
2013 ancestors of heads and the ancestors common.
2013 ancestors of heads and the ancestors common.
2014
2014
2015 If heads is None, use the local heads. If common is None, use [nullid].
2015 If heads is None, use the local heads. If common is None, use [nullid].
2016
2016
2017 The nodes in common might not all be known locally due to the way the
2017 The nodes in common might not all be known locally due to the way the
2018 current discovery protocol works.
2018 current discovery protocol works.
2019 """
2019 """
2020 cl = self.changelog
2020 cl = self.changelog
2021 if common:
2021 if common:
2022 hasnode = cl.hasnode
2022 hasnode = cl.hasnode
2023 common = [n for n in common if hasnode(n)]
2023 common = [n for n in common if hasnode(n)]
2024 else:
2024 else:
2025 common = [nullid]
2025 common = [nullid]
2026 if not heads:
2026 if not heads:
2027 heads = cl.heads()
2027 heads = cl.heads()
2028 return self.getlocalbundle(source,
2028 return self.getlocalbundle(source,
2029 discovery.outgoing(cl, common, heads),
2029 discovery.outgoing(cl, common, heads),
2030 bundlecaps=bundlecaps)
2030 bundlecaps=bundlecaps)
2031
2031
2032 @unfilteredmethod
2032 @unfilteredmethod
2033 def _changegroupsubset(self, outgoing, bundler, source,
2033 def _changegroupsubset(self, outgoing, bundler, source,
2034 fastpath=False):
2034 fastpath=False):
2035 commonrevs = outgoing.common
2035 commonrevs = outgoing.common
2036 csets = outgoing.missing
2036 csets = outgoing.missing
2037 heads = outgoing.missingheads
2037 heads = outgoing.missingheads
2038 cl = bundler._changelog
2039 mf = bundler._manifest
2040 mfs = {} # needed manifests
2041 fnodes = {} # needed file nodes
2042 changedfiles = set()
2043 fstate = ['', {}]
2044
2045 # We go through the fast path if we get told to, or if all (unfiltered
2038 # We go through the fast path if we get told to, or if all (unfiltered
2046 # heads have been requested (since we then know there all linkrevs will
2039 # heads have been requested (since we then know there all linkrevs will
2047 # be pulled by the client).
2040 # be pulled by the client).
2048 heads.sort()
2041 heads.sort()
2049 fastpathlinkrev = fastpath or (
2042 fastpathlinkrev = fastpath or (
2050 self.filtername is None and heads == sorted(self.heads()))
2043 self.filtername is None and heads == sorted(self.heads()))
2051
2044
2052 self.hook('preoutgoing', throw=True, source=source)
2045 self.hook('preoutgoing', throw=True, source=source)
2053 self.changegroupinfo(csets, source)
2046 self.changegroupinfo(csets, source)
2054
2047 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2055 # filter any nodes that claim to be part of the known set
2056 def prune(revlog, missing):
2057 rr, rl = revlog.rev, revlog.linkrev
2058 return [n for n in missing
2059 if rl(rr(n)) not in commonrevs]
2060
2061 progress = self.ui.progress
2062 _bundling = _('bundling')
2063 _changesets = _('changesets')
2064 _manifests = _('manifests')
2065 _files = _('files')
2066
2067 def lookup(revlog, x):
2068 count = bundler.count
2069 if revlog == cl:
2070 c = cl.read(x)
2071 changedfiles.update(c[3])
2072 mfs.setdefault(c[0], x)
2073 count[0] += 1
2074 progress(_bundling, count[0],
2075 unit=_changesets, total=count[1])
2076 return x
2077 elif revlog == mf:
2078 clnode = mfs[x]
2079 if not fastpathlinkrev:
2080 mdata = mf.readfast(x)
2081 for f, n in mdata.iteritems():
2082 if f in changedfiles:
2083 fnodes[f].setdefault(n, clnode)
2084 count[0] += 1
2085 progress(_bundling, count[0],
2086 unit=_manifests, total=count[1])
2087 return clnode
2088 else:
2089 progress(_bundling, count[0], item=fstate[0],
2090 unit=_files, total=count[1])
2091 return fstate[1][x]
2092
2093 bundler.start(lookup)
2094
2095 def getmfnodes():
2096 for f in changedfiles:
2097 fnodes[f] = {}
2098 bundler.count[:] = [0, len(mfs)]
2099 return prune(mf, mfs)
2100 def getfiles():
2101 mfs.clear()
2102 return changedfiles
2103 def getfilenodes(fname, filerevlog):
2104 if fastpathlinkrev:
2105 ln, llr = filerevlog.node, filerevlog.linkrev
2106 def genfilenodes():
2107 for r in filerevlog:
2108 linkrev = llr(r)
2109 if linkrev not in commonrevs:
2110 yield filerevlog.node(r), cl.node(linkrev)
2111 fnodes[fname] = dict(genfilenodes())
2112 fstate[0] = fname
2113 fstate[1] = fnodes.pop(fname, {})
2114 return prune(filerevlog, fstate[1])
2115
2116 gengroup = bundler.generate(csets, getmfnodes, getfiles, getfilenodes,
2117 source)
2118 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2048 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2119
2049
2120 def changegroup(self, basenodes, source):
2050 def changegroup(self, basenodes, source):
2121 # to avoid a race we use changegroupsubset() (issue1320)
2051 # to avoid a race we use changegroupsubset() (issue1320)
2122 return self.changegroupsubset(basenodes, self.heads(), source)
2052 return self.changegroupsubset(basenodes, self.heads(), source)
2123
2053
2124 @unfilteredmethod
2054 @unfilteredmethod
2125 def addchangegroup(self, source, srctype, url, emptyok=False):
2055 def addchangegroup(self, source, srctype, url, emptyok=False):
2126 """Add the changegroup returned by source.read() to this repo.
2056 """Add the changegroup returned by source.read() to this repo.
2127 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2057 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2128 the URL of the repo where this changegroup is coming from.
2058 the URL of the repo where this changegroup is coming from.
2129
2059
2130 Return an integer summarizing the change to this repo:
2060 Return an integer summarizing the change to this repo:
2131 - nothing changed or no source: 0
2061 - nothing changed or no source: 0
2132 - more heads than before: 1+added heads (2..n)
2062 - more heads than before: 1+added heads (2..n)
2133 - fewer heads than before: -1-removed heads (-2..-n)
2063 - fewer heads than before: -1-removed heads (-2..-n)
2134 - number of heads stays the same: 1
2064 - number of heads stays the same: 1
2135 """
2065 """
2136 def csmap(x):
2066 def csmap(x):
2137 self.ui.debug("add changeset %s\n" % short(x))
2067 self.ui.debug("add changeset %s\n" % short(x))
2138 return len(cl)
2068 return len(cl)
2139
2069
2140 def revmap(x):
2070 def revmap(x):
2141 return cl.rev(x)
2071 return cl.rev(x)
2142
2072
2143 if not source:
2073 if not source:
2144 return 0
2074 return 0
2145
2075
2146 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2076 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2147
2077
2148 changesets = files = revisions = 0
2078 changesets = files = revisions = 0
2149 efiles = set()
2079 efiles = set()
2150
2080
2151 # write changelog data to temp files so concurrent readers will not see
2081 # write changelog data to temp files so concurrent readers will not see
2152 # inconsistent view
2082 # inconsistent view
2153 cl = self.changelog
2083 cl = self.changelog
2154 cl.delayupdate()
2084 cl.delayupdate()
2155 oldheads = cl.heads()
2085 oldheads = cl.heads()
2156
2086
2157 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2087 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2158 try:
2088 try:
2159 trp = weakref.proxy(tr)
2089 trp = weakref.proxy(tr)
2160 # pull off the changeset group
2090 # pull off the changeset group
2161 self.ui.status(_("adding changesets\n"))
2091 self.ui.status(_("adding changesets\n"))
2162 clstart = len(cl)
2092 clstart = len(cl)
2163 class prog(object):
2093 class prog(object):
2164 step = _('changesets')
2094 step = _('changesets')
2165 count = 1
2095 count = 1
2166 ui = self.ui
2096 ui = self.ui
2167 total = None
2097 total = None
2168 def __call__(self):
2098 def __call__(self):
2169 self.ui.progress(self.step, self.count, unit=_('chunks'),
2099 self.ui.progress(self.step, self.count, unit=_('chunks'),
2170 total=self.total)
2100 total=self.total)
2171 self.count += 1
2101 self.count += 1
2172 pr = prog()
2102 pr = prog()
2173 source.callback = pr
2103 source.callback = pr
2174
2104
2175 source.changelogheader()
2105 source.changelogheader()
2176 srccontent = cl.addgroup(source, csmap, trp)
2106 srccontent = cl.addgroup(source, csmap, trp)
2177 if not (srccontent or emptyok):
2107 if not (srccontent or emptyok):
2178 raise util.Abort(_("received changelog group is empty"))
2108 raise util.Abort(_("received changelog group is empty"))
2179 clend = len(cl)
2109 clend = len(cl)
2180 changesets = clend - clstart
2110 changesets = clend - clstart
2181 for c in xrange(clstart, clend):
2111 for c in xrange(clstart, clend):
2182 efiles.update(self[c].files())
2112 efiles.update(self[c].files())
2183 efiles = len(efiles)
2113 efiles = len(efiles)
2184 self.ui.progress(_('changesets'), None)
2114 self.ui.progress(_('changesets'), None)
2185
2115
2186 # pull off the manifest group
2116 # pull off the manifest group
2187 self.ui.status(_("adding manifests\n"))
2117 self.ui.status(_("adding manifests\n"))
2188 pr.step = _('manifests')
2118 pr.step = _('manifests')
2189 pr.count = 1
2119 pr.count = 1
2190 pr.total = changesets # manifests <= changesets
2120 pr.total = changesets # manifests <= changesets
2191 # no need to check for empty manifest group here:
2121 # no need to check for empty manifest group here:
2192 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2122 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2193 # no new manifest will be created and the manifest group will
2123 # no new manifest will be created and the manifest group will
2194 # be empty during the pull
2124 # be empty during the pull
2195 source.manifestheader()
2125 source.manifestheader()
2196 self.manifest.addgroup(source, revmap, trp)
2126 self.manifest.addgroup(source, revmap, trp)
2197 self.ui.progress(_('manifests'), None)
2127 self.ui.progress(_('manifests'), None)
2198
2128
2199 needfiles = {}
2129 needfiles = {}
2200 if self.ui.configbool('server', 'validate', default=False):
2130 if self.ui.configbool('server', 'validate', default=False):
2201 # validate incoming csets have their manifests
2131 # validate incoming csets have their manifests
2202 for cset in xrange(clstart, clend):
2132 for cset in xrange(clstart, clend):
2203 mfest = self.changelog.read(self.changelog.node(cset))[0]
2133 mfest = self.changelog.read(self.changelog.node(cset))[0]
2204 mfest = self.manifest.readdelta(mfest)
2134 mfest = self.manifest.readdelta(mfest)
2205 # store file nodes we must see
2135 # store file nodes we must see
2206 for f, n in mfest.iteritems():
2136 for f, n in mfest.iteritems():
2207 needfiles.setdefault(f, set()).add(n)
2137 needfiles.setdefault(f, set()).add(n)
2208
2138
2209 # process the files
2139 # process the files
2210 self.ui.status(_("adding file changes\n"))
2140 self.ui.status(_("adding file changes\n"))
2211 pr.step = _('files')
2141 pr.step = _('files')
2212 pr.count = 1
2142 pr.count = 1
2213 pr.total = efiles
2143 pr.total = efiles
2214 source.callback = None
2144 source.callback = None
2215
2145
2216 while True:
2146 while True:
2217 chunkdata = source.filelogheader()
2147 chunkdata = source.filelogheader()
2218 if not chunkdata:
2148 if not chunkdata:
2219 break
2149 break
2220 f = chunkdata["filename"]
2150 f = chunkdata["filename"]
2221 self.ui.debug("adding %s revisions\n" % f)
2151 self.ui.debug("adding %s revisions\n" % f)
2222 pr()
2152 pr()
2223 fl = self.file(f)
2153 fl = self.file(f)
2224 o = len(fl)
2154 o = len(fl)
2225 if not fl.addgroup(source, revmap, trp):
2155 if not fl.addgroup(source, revmap, trp):
2226 raise util.Abort(_("received file revlog group is empty"))
2156 raise util.Abort(_("received file revlog group is empty"))
2227 revisions += len(fl) - o
2157 revisions += len(fl) - o
2228 files += 1
2158 files += 1
2229 if f in needfiles:
2159 if f in needfiles:
2230 needs = needfiles[f]
2160 needs = needfiles[f]
2231 for new in xrange(o, len(fl)):
2161 for new in xrange(o, len(fl)):
2232 n = fl.node(new)
2162 n = fl.node(new)
2233 if n in needs:
2163 if n in needs:
2234 needs.remove(n)
2164 needs.remove(n)
2235 else:
2165 else:
2236 raise util.Abort(
2166 raise util.Abort(
2237 _("received spurious file revlog entry"))
2167 _("received spurious file revlog entry"))
2238 if not needs:
2168 if not needs:
2239 del needfiles[f]
2169 del needfiles[f]
2240 self.ui.progress(_('files'), None)
2170 self.ui.progress(_('files'), None)
2241
2171
2242 for f, needs in needfiles.iteritems():
2172 for f, needs in needfiles.iteritems():
2243 fl = self.file(f)
2173 fl = self.file(f)
2244 for n in needs:
2174 for n in needs:
2245 try:
2175 try:
2246 fl.rev(n)
2176 fl.rev(n)
2247 except error.LookupError:
2177 except error.LookupError:
2248 raise util.Abort(
2178 raise util.Abort(
2249 _('missing file data for %s:%s - run hg verify') %
2179 _('missing file data for %s:%s - run hg verify') %
2250 (f, hex(n)))
2180 (f, hex(n)))
2251
2181
2252 dh = 0
2182 dh = 0
2253 if oldheads:
2183 if oldheads:
2254 heads = cl.heads()
2184 heads = cl.heads()
2255 dh = len(heads) - len(oldheads)
2185 dh = len(heads) - len(oldheads)
2256 for h in heads:
2186 for h in heads:
2257 if h not in oldheads and self[h].closesbranch():
2187 if h not in oldheads and self[h].closesbranch():
2258 dh -= 1
2188 dh -= 1
2259 htext = ""
2189 htext = ""
2260 if dh:
2190 if dh:
2261 htext = _(" (%+d heads)") % dh
2191 htext = _(" (%+d heads)") % dh
2262
2192
2263 self.ui.status(_("added %d changesets"
2193 self.ui.status(_("added %d changesets"
2264 " with %d changes to %d files%s\n")
2194 " with %d changes to %d files%s\n")
2265 % (changesets, revisions, files, htext))
2195 % (changesets, revisions, files, htext))
2266 self.invalidatevolatilesets()
2196 self.invalidatevolatilesets()
2267
2197
2268 if changesets > 0:
2198 if changesets > 0:
2269 p = lambda: cl.writepending() and self.root or ""
2199 p = lambda: cl.writepending() and self.root or ""
2270 self.hook('pretxnchangegroup', throw=True,
2200 self.hook('pretxnchangegroup', throw=True,
2271 node=hex(cl.node(clstart)), source=srctype,
2201 node=hex(cl.node(clstart)), source=srctype,
2272 url=url, pending=p)
2202 url=url, pending=p)
2273
2203
2274 added = [cl.node(r) for r in xrange(clstart, clend)]
2204 added = [cl.node(r) for r in xrange(clstart, clend)]
2275 publishing = self.ui.configbool('phases', 'publish', True)
2205 publishing = self.ui.configbool('phases', 'publish', True)
2276 if srctype == 'push':
2206 if srctype == 'push':
2277 # Old server can not push the boundary themself.
2207 # Old server can not push the boundary themself.
2278 # New server won't push the boundary if changeset already
2208 # New server won't push the boundary if changeset already
2279 # existed locally as secrete
2209 # existed locally as secrete
2280 #
2210 #
2281 # We should not use added here but the list of all change in
2211 # We should not use added here but the list of all change in
2282 # the bundle
2212 # the bundle
2283 if publishing:
2213 if publishing:
2284 phases.advanceboundary(self, phases.public, srccontent)
2214 phases.advanceboundary(self, phases.public, srccontent)
2285 else:
2215 else:
2286 phases.advanceboundary(self, phases.draft, srccontent)
2216 phases.advanceboundary(self, phases.draft, srccontent)
2287 phases.retractboundary(self, phases.draft, added)
2217 phases.retractboundary(self, phases.draft, added)
2288 elif srctype != 'strip':
2218 elif srctype != 'strip':
2289 # publishing only alter behavior during push
2219 # publishing only alter behavior during push
2290 #
2220 #
2291 # strip should not touch boundary at all
2221 # strip should not touch boundary at all
2292 phases.retractboundary(self, phases.draft, added)
2222 phases.retractboundary(self, phases.draft, added)
2293
2223
2294 # make changelog see real files again
2224 # make changelog see real files again
2295 cl.finalize(trp)
2225 cl.finalize(trp)
2296
2226
2297 tr.close()
2227 tr.close()
2298
2228
2299 if changesets > 0:
2229 if changesets > 0:
2300 if srctype != 'strip':
2230 if srctype != 'strip':
2301 # During strip, branchcache is invalid but coming call to
2231 # During strip, branchcache is invalid but coming call to
2302 # `destroyed` will repair it.
2232 # `destroyed` will repair it.
2303 # In other case we can safely update cache on disk.
2233 # In other case we can safely update cache on disk.
2304 branchmap.updatecache(self.filtered('served'))
2234 branchmap.updatecache(self.filtered('served'))
2305 def runhooks():
2235 def runhooks():
2306 # forcefully update the on-disk branch cache
2236 # forcefully update the on-disk branch cache
2307 self.ui.debug("updating the branch cache\n")
2237 self.ui.debug("updating the branch cache\n")
2308 self.hook("changegroup", node=hex(cl.node(clstart)),
2238 self.hook("changegroup", node=hex(cl.node(clstart)),
2309 source=srctype, url=url)
2239 source=srctype, url=url)
2310
2240
2311 for n in added:
2241 for n in added:
2312 self.hook("incoming", node=hex(n), source=srctype,
2242 self.hook("incoming", node=hex(n), source=srctype,
2313 url=url)
2243 url=url)
2314
2244
2315 newheads = [h for h in self.heads() if h not in oldheads]
2245 newheads = [h for h in self.heads() if h not in oldheads]
2316 self.ui.log("incoming",
2246 self.ui.log("incoming",
2317 "%s incoming changes - new heads: %s\n",
2247 "%s incoming changes - new heads: %s\n",
2318 len(added),
2248 len(added),
2319 ', '.join([hex(c[:6]) for c in newheads]))
2249 ', '.join([hex(c[:6]) for c in newheads]))
2320 self._afterlock(runhooks)
2250 self._afterlock(runhooks)
2321
2251
2322 finally:
2252 finally:
2323 tr.release()
2253 tr.release()
2324 # never return 0 here:
2254 # never return 0 here:
2325 if dh < 0:
2255 if dh < 0:
2326 return dh - 1
2256 return dh - 1
2327 else:
2257 else:
2328 return dh + 1
2258 return dh + 1
2329
2259
2330 def stream_in(self, remote, requirements):
2260 def stream_in(self, remote, requirements):
2331 lock = self.lock()
2261 lock = self.lock()
2332 try:
2262 try:
2333 # Save remote branchmap. We will use it later
2263 # Save remote branchmap. We will use it later
2334 # to speed up branchcache creation
2264 # to speed up branchcache creation
2335 rbranchmap = None
2265 rbranchmap = None
2336 if remote.capable("branchmap"):
2266 if remote.capable("branchmap"):
2337 rbranchmap = remote.branchmap()
2267 rbranchmap = remote.branchmap()
2338
2268
2339 fp = remote.stream_out()
2269 fp = remote.stream_out()
2340 l = fp.readline()
2270 l = fp.readline()
2341 try:
2271 try:
2342 resp = int(l)
2272 resp = int(l)
2343 except ValueError:
2273 except ValueError:
2344 raise error.ResponseError(
2274 raise error.ResponseError(
2345 _('unexpected response from remote server:'), l)
2275 _('unexpected response from remote server:'), l)
2346 if resp == 1:
2276 if resp == 1:
2347 raise util.Abort(_('operation forbidden by server'))
2277 raise util.Abort(_('operation forbidden by server'))
2348 elif resp == 2:
2278 elif resp == 2:
2349 raise util.Abort(_('locking the remote repository failed'))
2279 raise util.Abort(_('locking the remote repository failed'))
2350 elif resp != 0:
2280 elif resp != 0:
2351 raise util.Abort(_('the server sent an unknown error code'))
2281 raise util.Abort(_('the server sent an unknown error code'))
2352 self.ui.status(_('streaming all changes\n'))
2282 self.ui.status(_('streaming all changes\n'))
2353 l = fp.readline()
2283 l = fp.readline()
2354 try:
2284 try:
2355 total_files, total_bytes = map(int, l.split(' ', 1))
2285 total_files, total_bytes = map(int, l.split(' ', 1))
2356 except (ValueError, TypeError):
2286 except (ValueError, TypeError):
2357 raise error.ResponseError(
2287 raise error.ResponseError(
2358 _('unexpected response from remote server:'), l)
2288 _('unexpected response from remote server:'), l)
2359 self.ui.status(_('%d files to transfer, %s of data\n') %
2289 self.ui.status(_('%d files to transfer, %s of data\n') %
2360 (total_files, util.bytecount(total_bytes)))
2290 (total_files, util.bytecount(total_bytes)))
2361 handled_bytes = 0
2291 handled_bytes = 0
2362 self.ui.progress(_('clone'), 0, total=total_bytes)
2292 self.ui.progress(_('clone'), 0, total=total_bytes)
2363 start = time.time()
2293 start = time.time()
2364 for i in xrange(total_files):
2294 for i in xrange(total_files):
2365 # XXX doesn't support '\n' or '\r' in filenames
2295 # XXX doesn't support '\n' or '\r' in filenames
2366 l = fp.readline()
2296 l = fp.readline()
2367 try:
2297 try:
2368 name, size = l.split('\0', 1)
2298 name, size = l.split('\0', 1)
2369 size = int(size)
2299 size = int(size)
2370 except (ValueError, TypeError):
2300 except (ValueError, TypeError):
2371 raise error.ResponseError(
2301 raise error.ResponseError(
2372 _('unexpected response from remote server:'), l)
2302 _('unexpected response from remote server:'), l)
2373 if self.ui.debugflag:
2303 if self.ui.debugflag:
2374 self.ui.debug('adding %s (%s)\n' %
2304 self.ui.debug('adding %s (%s)\n' %
2375 (name, util.bytecount(size)))
2305 (name, util.bytecount(size)))
2376 # for backwards compat, name was partially encoded
2306 # for backwards compat, name was partially encoded
2377 ofp = self.sopener(store.decodedir(name), 'w')
2307 ofp = self.sopener(store.decodedir(name), 'w')
2378 for chunk in util.filechunkiter(fp, limit=size):
2308 for chunk in util.filechunkiter(fp, limit=size):
2379 handled_bytes += len(chunk)
2309 handled_bytes += len(chunk)
2380 self.ui.progress(_('clone'), handled_bytes,
2310 self.ui.progress(_('clone'), handled_bytes,
2381 total=total_bytes)
2311 total=total_bytes)
2382 ofp.write(chunk)
2312 ofp.write(chunk)
2383 ofp.close()
2313 ofp.close()
2384 elapsed = time.time() - start
2314 elapsed = time.time() - start
2385 if elapsed <= 0:
2315 if elapsed <= 0:
2386 elapsed = 0.001
2316 elapsed = 0.001
2387 self.ui.progress(_('clone'), None)
2317 self.ui.progress(_('clone'), None)
2388 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2318 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2389 (util.bytecount(total_bytes), elapsed,
2319 (util.bytecount(total_bytes), elapsed,
2390 util.bytecount(total_bytes / elapsed)))
2320 util.bytecount(total_bytes / elapsed)))
2391
2321
2392 # new requirements = old non-format requirements +
2322 # new requirements = old non-format requirements +
2393 # new format-related
2323 # new format-related
2394 # requirements from the streamed-in repository
2324 # requirements from the streamed-in repository
2395 requirements.update(set(self.requirements) - self.supportedformats)
2325 requirements.update(set(self.requirements) - self.supportedformats)
2396 self._applyrequirements(requirements)
2326 self._applyrequirements(requirements)
2397 self._writerequirements()
2327 self._writerequirements()
2398
2328
2399 if rbranchmap:
2329 if rbranchmap:
2400 rbheads = []
2330 rbheads = []
2401 for bheads in rbranchmap.itervalues():
2331 for bheads in rbranchmap.itervalues():
2402 rbheads.extend(bheads)
2332 rbheads.extend(bheads)
2403
2333
2404 if rbheads:
2334 if rbheads:
2405 rtiprev = max((int(self.changelog.rev(node))
2335 rtiprev = max((int(self.changelog.rev(node))
2406 for node in rbheads))
2336 for node in rbheads))
2407 cache = branchmap.branchcache(rbranchmap,
2337 cache = branchmap.branchcache(rbranchmap,
2408 self[rtiprev].node(),
2338 self[rtiprev].node(),
2409 rtiprev)
2339 rtiprev)
2410 # Try to stick it as low as possible
2340 # Try to stick it as low as possible
2411 # filter above served are unlikely to be fetch from a clone
2341 # filter above served are unlikely to be fetch from a clone
2412 for candidate in ('base', 'immutable', 'served'):
2342 for candidate in ('base', 'immutable', 'served'):
2413 rview = self.filtered(candidate)
2343 rview = self.filtered(candidate)
2414 if cache.validfor(rview):
2344 if cache.validfor(rview):
2415 self._branchcaches[candidate] = cache
2345 self._branchcaches[candidate] = cache
2416 cache.write(rview)
2346 cache.write(rview)
2417 break
2347 break
2418 self.invalidate()
2348 self.invalidate()
2419 return len(self.heads()) + 1
2349 return len(self.heads()) + 1
2420 finally:
2350 finally:
2421 lock.release()
2351 lock.release()
2422
2352
2423 def clone(self, remote, heads=[], stream=False):
2353 def clone(self, remote, heads=[], stream=False):
2424 '''clone remote repository.
2354 '''clone remote repository.
2425
2355
2426 keyword arguments:
2356 keyword arguments:
2427 heads: list of revs to clone (forces use of pull)
2357 heads: list of revs to clone (forces use of pull)
2428 stream: use streaming clone if possible'''
2358 stream: use streaming clone if possible'''
2429
2359
2430 # now, all clients that can request uncompressed clones can
2360 # now, all clients that can request uncompressed clones can
2431 # read repo formats supported by all servers that can serve
2361 # read repo formats supported by all servers that can serve
2432 # them.
2362 # them.
2433
2363
2434 # if revlog format changes, client will have to check version
2364 # if revlog format changes, client will have to check version
2435 # and format flags on "stream" capability, and use
2365 # and format flags on "stream" capability, and use
2436 # uncompressed only if compatible.
2366 # uncompressed only if compatible.
2437
2367
2438 if not stream:
2368 if not stream:
2439 # if the server explicitly prefers to stream (for fast LANs)
2369 # if the server explicitly prefers to stream (for fast LANs)
2440 stream = remote.capable('stream-preferred')
2370 stream = remote.capable('stream-preferred')
2441
2371
2442 if stream and not heads:
2372 if stream and not heads:
2443 # 'stream' means remote revlog format is revlogv1 only
2373 # 'stream' means remote revlog format is revlogv1 only
2444 if remote.capable('stream'):
2374 if remote.capable('stream'):
2445 return self.stream_in(remote, set(('revlogv1',)))
2375 return self.stream_in(remote, set(('revlogv1',)))
2446 # otherwise, 'streamreqs' contains the remote revlog format
2376 # otherwise, 'streamreqs' contains the remote revlog format
2447 streamreqs = remote.capable('streamreqs')
2377 streamreqs = remote.capable('streamreqs')
2448 if streamreqs:
2378 if streamreqs:
2449 streamreqs = set(streamreqs.split(','))
2379 streamreqs = set(streamreqs.split(','))
2450 # if we support it, stream in and adjust our requirements
2380 # if we support it, stream in and adjust our requirements
2451 if not streamreqs - self.supportedformats:
2381 if not streamreqs - self.supportedformats:
2452 return self.stream_in(remote, streamreqs)
2382 return self.stream_in(remote, streamreqs)
2453 return self.pull(remote, heads)
2383 return self.pull(remote, heads)
2454
2384
2455 def pushkey(self, namespace, key, old, new):
2385 def pushkey(self, namespace, key, old, new):
2456 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2386 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2457 old=old, new=new)
2387 old=old, new=new)
2458 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2388 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2459 ret = pushkey.push(self, namespace, key, old, new)
2389 ret = pushkey.push(self, namespace, key, old, new)
2460 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2390 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2461 ret=ret)
2391 ret=ret)
2462 return ret
2392 return ret
2463
2393
2464 def listkeys(self, namespace):
2394 def listkeys(self, namespace):
2465 self.hook('prelistkeys', throw=True, namespace=namespace)
2395 self.hook('prelistkeys', throw=True, namespace=namespace)
2466 self.ui.debug('listing keys for "%s"\n' % namespace)
2396 self.ui.debug('listing keys for "%s"\n' % namespace)
2467 values = pushkey.list(self, namespace)
2397 values = pushkey.list(self, namespace)
2468 self.hook('listkeys', namespace=namespace, values=values)
2398 self.hook('listkeys', namespace=namespace, values=values)
2469 return values
2399 return values
2470
2400
2471 def debugwireargs(self, one, two, three=None, four=None, five=None):
2401 def debugwireargs(self, one, two, three=None, four=None, five=None):
2472 '''used to test argument passing over the wire'''
2402 '''used to test argument passing over the wire'''
2473 return "%s %s %s %s %s" % (one, two, three, four, five)
2403 return "%s %s %s %s %s" % (one, two, three, four, five)
2474
2404
2475 def savecommitmessage(self, text):
2405 def savecommitmessage(self, text):
2476 fp = self.opener('last-message.txt', 'wb')
2406 fp = self.opener('last-message.txt', 'wb')
2477 try:
2407 try:
2478 fp.write(text)
2408 fp.write(text)
2479 finally:
2409 finally:
2480 fp.close()
2410 fp.close()
2481 return self.pathto(fp.name[len(self.root) + 1:])
2411 return self.pathto(fp.name[len(self.root) + 1:])
2482
2412
2483 # used to avoid circular references so destructors work
2413 # used to avoid circular references so destructors work
2484 def aftertrans(files):
2414 def aftertrans(files):
2485 renamefiles = [tuple(t) for t in files]
2415 renamefiles = [tuple(t) for t in files]
2486 def a():
2416 def a():
2487 for vfs, src, dest in renamefiles:
2417 for vfs, src, dest in renamefiles:
2488 try:
2418 try:
2489 vfs.rename(src, dest)
2419 vfs.rename(src, dest)
2490 except OSError: # journal file does not yet exist
2420 except OSError: # journal file does not yet exist
2491 pass
2421 pass
2492 return a
2422 return a
2493
2423
2494 def undoname(fn):
2424 def undoname(fn):
2495 base, name = os.path.split(fn)
2425 base, name = os.path.split(fn)
2496 assert name.startswith('journal')
2426 assert name.startswith('journal')
2497 return os.path.join(base, name.replace('journal', 'undo', 1))
2427 return os.path.join(base, name.replace('journal', 'undo', 1))
2498
2428
2499 def instance(ui, path, create):
2429 def instance(ui, path, create):
2500 return localrepository(ui, util.urllocalpath(path), create)
2430 return localrepository(ui, util.urllocalpath(path), create)
2501
2431
2502 def islocal(path):
2432 def islocal(path):
2503 return True
2433 return True
General Comments 0
You need to be logged in to leave comments. Login now