##// END OF EJS Templates
changelog: rely on transaction for finalization...
Pierre-Yves David -
r23205:2d54aa53 default
parent child Browse files
Show More
@@ -1,832 +1,830 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
17
17
18 def readexactly(stream, n):
18 def readexactly(stream, n):
19 '''read n bytes from stream.read and abort if less was available'''
19 '''read n bytes from stream.read and abort if less was available'''
20 s = stream.read(n)
20 s = stream.read(n)
21 if len(s) < n:
21 if len(s) < n:
22 raise util.Abort(_("stream ended unexpectedly"
22 raise util.Abort(_("stream ended unexpectedly"
23 " (got %d bytes, expected %d)")
23 " (got %d bytes, expected %d)")
24 % (len(s), n))
24 % (len(s), n))
25 return s
25 return s
26
26
27 def getchunk(stream):
27 def getchunk(stream):
28 """return the next chunk from stream as a string"""
28 """return the next chunk from stream as a string"""
29 d = readexactly(stream, 4)
29 d = readexactly(stream, 4)
30 l = struct.unpack(">l", d)[0]
30 l = struct.unpack(">l", d)[0]
31 if l <= 4:
31 if l <= 4:
32 if l:
32 if l:
33 raise util.Abort(_("invalid chunk length %d") % l)
33 raise util.Abort(_("invalid chunk length %d") % l)
34 return ""
34 return ""
35 return readexactly(stream, l - 4)
35 return readexactly(stream, l - 4)
36
36
37 def chunkheader(length):
37 def chunkheader(length):
38 """return a changegroup chunk header (string)"""
38 """return a changegroup chunk header (string)"""
39 return struct.pack(">l", length + 4)
39 return struct.pack(">l", length + 4)
40
40
41 def closechunk():
41 def closechunk():
42 """return a changegroup chunk header (string) for a zero-length chunk"""
42 """return a changegroup chunk header (string) for a zero-length chunk"""
43 return struct.pack(">l", 0)
43 return struct.pack(">l", 0)
44
44
45 class nocompress(object):
45 class nocompress(object):
46 def compress(self, x):
46 def compress(self, x):
47 return x
47 return x
48 def flush(self):
48 def flush(self):
49 return ""
49 return ""
50
50
51 bundletypes = {
51 bundletypes = {
52 "": ("", nocompress), # only when using unbundle on ssh and old http servers
52 "": ("", nocompress), # only when using unbundle on ssh and old http servers
53 # since the unification ssh accepts a header but there
53 # since the unification ssh accepts a header but there
54 # is no capability signaling it.
54 # is no capability signaling it.
55 "HG10UN": ("HG10UN", nocompress),
55 "HG10UN": ("HG10UN", nocompress),
56 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
56 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
57 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
57 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
58 }
58 }
59
59
60 # hgweb uses this list to communicate its preferred type
60 # hgweb uses this list to communicate its preferred type
61 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
61 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
62
62
63 def writebundle(cg, filename, bundletype, vfs=None):
63 def writebundle(cg, filename, bundletype, vfs=None):
64 """Write a bundle file and return its filename.
64 """Write a bundle file and return its filename.
65
65
66 Existing files will not be overwritten.
66 Existing files will not be overwritten.
67 If no filename is specified, a temporary file is created.
67 If no filename is specified, a temporary file is created.
68 bz2 compression can be turned off.
68 bz2 compression can be turned off.
69 The bundle file will be deleted in case of errors.
69 The bundle file will be deleted in case of errors.
70 """
70 """
71
71
72 fh = None
72 fh = None
73 cleanup = None
73 cleanup = None
74 try:
74 try:
75 if filename:
75 if filename:
76 if vfs:
76 if vfs:
77 fh = vfs.open(filename, "wb")
77 fh = vfs.open(filename, "wb")
78 else:
78 else:
79 fh = open(filename, "wb")
79 fh = open(filename, "wb")
80 else:
80 else:
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
82 fh = os.fdopen(fd, "wb")
82 fh = os.fdopen(fd, "wb")
83 cleanup = filename
83 cleanup = filename
84
84
85 header, compressor = bundletypes[bundletype]
85 header, compressor = bundletypes[bundletype]
86 fh.write(header)
86 fh.write(header)
87 z = compressor()
87 z = compressor()
88
88
89 # parse the changegroup data, otherwise we will block
89 # parse the changegroup data, otherwise we will block
90 # in case of sshrepo because we don't know the end of the stream
90 # in case of sshrepo because we don't know the end of the stream
91
91
92 # an empty chunkgroup is the end of the changegroup
92 # an empty chunkgroup is the end of the changegroup
93 # a changegroup has at least 2 chunkgroups (changelog and manifest).
93 # a changegroup has at least 2 chunkgroups (changelog and manifest).
94 # after that, an empty chunkgroup is the end of the changegroup
94 # after that, an empty chunkgroup is the end of the changegroup
95 for chunk in cg.getchunks():
95 for chunk in cg.getchunks():
96 fh.write(z.compress(chunk))
96 fh.write(z.compress(chunk))
97 fh.write(z.flush())
97 fh.write(z.flush())
98 cleanup = None
98 cleanup = None
99 return filename
99 return filename
100 finally:
100 finally:
101 if fh is not None:
101 if fh is not None:
102 fh.close()
102 fh.close()
103 if cleanup is not None:
103 if cleanup is not None:
104 if filename and vfs:
104 if filename and vfs:
105 vfs.unlink(cleanup)
105 vfs.unlink(cleanup)
106 else:
106 else:
107 os.unlink(cleanup)
107 os.unlink(cleanup)
108
108
109 def decompressor(fh, alg):
109 def decompressor(fh, alg):
110 if alg == 'UN':
110 if alg == 'UN':
111 return fh
111 return fh
112 elif alg == 'GZ':
112 elif alg == 'GZ':
113 def generator(f):
113 def generator(f):
114 zd = zlib.decompressobj()
114 zd = zlib.decompressobj()
115 for chunk in util.filechunkiter(f):
115 for chunk in util.filechunkiter(f):
116 yield zd.decompress(chunk)
116 yield zd.decompress(chunk)
117 elif alg == 'BZ':
117 elif alg == 'BZ':
118 def generator(f):
118 def generator(f):
119 zd = bz2.BZ2Decompressor()
119 zd = bz2.BZ2Decompressor()
120 zd.decompress("BZ")
120 zd.decompress("BZ")
121 for chunk in util.filechunkiter(f, 4096):
121 for chunk in util.filechunkiter(f, 4096):
122 yield zd.decompress(chunk)
122 yield zd.decompress(chunk)
123 else:
123 else:
124 raise util.Abort("unknown bundle compression '%s'" % alg)
124 raise util.Abort("unknown bundle compression '%s'" % alg)
125 return util.chunkbuffer(generator(fh))
125 return util.chunkbuffer(generator(fh))
126
126
127 class cg1unpacker(object):
127 class cg1unpacker(object):
128 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
128 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
129 deltaheadersize = struct.calcsize(deltaheader)
129 deltaheadersize = struct.calcsize(deltaheader)
130 def __init__(self, fh, alg):
130 def __init__(self, fh, alg):
131 self._stream = decompressor(fh, alg)
131 self._stream = decompressor(fh, alg)
132 self._type = alg
132 self._type = alg
133 self.callback = None
133 self.callback = None
134 def compressed(self):
134 def compressed(self):
135 return self._type != 'UN'
135 return self._type != 'UN'
136 def read(self, l):
136 def read(self, l):
137 return self._stream.read(l)
137 return self._stream.read(l)
138 def seek(self, pos):
138 def seek(self, pos):
139 return self._stream.seek(pos)
139 return self._stream.seek(pos)
140 def tell(self):
140 def tell(self):
141 return self._stream.tell()
141 return self._stream.tell()
142 def close(self):
142 def close(self):
143 return self._stream.close()
143 return self._stream.close()
144
144
145 def chunklength(self):
145 def chunklength(self):
146 d = readexactly(self._stream, 4)
146 d = readexactly(self._stream, 4)
147 l = struct.unpack(">l", d)[0]
147 l = struct.unpack(">l", d)[0]
148 if l <= 4:
148 if l <= 4:
149 if l:
149 if l:
150 raise util.Abort(_("invalid chunk length %d") % l)
150 raise util.Abort(_("invalid chunk length %d") % l)
151 return 0
151 return 0
152 if self.callback:
152 if self.callback:
153 self.callback()
153 self.callback()
154 return l - 4
154 return l - 4
155
155
156 def changelogheader(self):
156 def changelogheader(self):
157 """v10 does not have a changelog header chunk"""
157 """v10 does not have a changelog header chunk"""
158 return {}
158 return {}
159
159
160 def manifestheader(self):
160 def manifestheader(self):
161 """v10 does not have a manifest header chunk"""
161 """v10 does not have a manifest header chunk"""
162 return {}
162 return {}
163
163
164 def filelogheader(self):
164 def filelogheader(self):
165 """return the header of the filelogs chunk, v10 only has the filename"""
165 """return the header of the filelogs chunk, v10 only has the filename"""
166 l = self.chunklength()
166 l = self.chunklength()
167 if not l:
167 if not l:
168 return {}
168 return {}
169 fname = readexactly(self._stream, l)
169 fname = readexactly(self._stream, l)
170 return {'filename': fname}
170 return {'filename': fname}
171
171
172 def _deltaheader(self, headertuple, prevnode):
172 def _deltaheader(self, headertuple, prevnode):
173 node, p1, p2, cs = headertuple
173 node, p1, p2, cs = headertuple
174 if prevnode is None:
174 if prevnode is None:
175 deltabase = p1
175 deltabase = p1
176 else:
176 else:
177 deltabase = prevnode
177 deltabase = prevnode
178 return node, p1, p2, deltabase, cs
178 return node, p1, p2, deltabase, cs
179
179
180 def deltachunk(self, prevnode):
180 def deltachunk(self, prevnode):
181 l = self.chunklength()
181 l = self.chunklength()
182 if not l:
182 if not l:
183 return {}
183 return {}
184 headerdata = readexactly(self._stream, self.deltaheadersize)
184 headerdata = readexactly(self._stream, self.deltaheadersize)
185 header = struct.unpack(self.deltaheader, headerdata)
185 header = struct.unpack(self.deltaheader, headerdata)
186 delta = readexactly(self._stream, l - self.deltaheadersize)
186 delta = readexactly(self._stream, l - self.deltaheadersize)
187 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
187 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
188 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
188 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
189 'deltabase': deltabase, 'delta': delta}
189 'deltabase': deltabase, 'delta': delta}
190
190
191 def getchunks(self):
191 def getchunks(self):
192 """returns all the chunks contains in the bundle
192 """returns all the chunks contains in the bundle
193
193
194 Used when you need to forward the binary stream to a file or another
194 Used when you need to forward the binary stream to a file or another
195 network API. To do so, it parse the changegroup data, otherwise it will
195 network API. To do so, it parse the changegroup data, otherwise it will
196 block in case of sshrepo because it don't know the end of the stream.
196 block in case of sshrepo because it don't know the end of the stream.
197 """
197 """
198 # an empty chunkgroup is the end of the changegroup
198 # an empty chunkgroup is the end of the changegroup
199 # a changegroup has at least 2 chunkgroups (changelog and manifest).
199 # a changegroup has at least 2 chunkgroups (changelog and manifest).
200 # after that, an empty chunkgroup is the end of the changegroup
200 # after that, an empty chunkgroup is the end of the changegroup
201 empty = False
201 empty = False
202 count = 0
202 count = 0
203 while not empty or count <= 2:
203 while not empty or count <= 2:
204 empty = True
204 empty = True
205 count += 1
205 count += 1
206 while True:
206 while True:
207 chunk = getchunk(self)
207 chunk = getchunk(self)
208 if not chunk:
208 if not chunk:
209 break
209 break
210 empty = False
210 empty = False
211 yield chunkheader(len(chunk))
211 yield chunkheader(len(chunk))
212 pos = 0
212 pos = 0
213 while pos < len(chunk):
213 while pos < len(chunk):
214 next = pos + 2**20
214 next = pos + 2**20
215 yield chunk[pos:next]
215 yield chunk[pos:next]
216 pos = next
216 pos = next
217 yield closechunk()
217 yield closechunk()
218
218
219 class cg2unpacker(cg1unpacker):
219 class cg2unpacker(cg1unpacker):
220 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
220 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
221 deltaheadersize = struct.calcsize(deltaheader)
221 deltaheadersize = struct.calcsize(deltaheader)
222
222
223 def _deltaheader(self, headertuple, prevnode):
223 def _deltaheader(self, headertuple, prevnode):
224 node, p1, p2, deltabase, cs = headertuple
224 node, p1, p2, deltabase, cs = headertuple
225 return node, p1, p2, deltabase, cs
225 return node, p1, p2, deltabase, cs
226
226
227 class headerlessfixup(object):
227 class headerlessfixup(object):
228 def __init__(self, fh, h):
228 def __init__(self, fh, h):
229 self._h = h
229 self._h = h
230 self._fh = fh
230 self._fh = fh
231 def read(self, n):
231 def read(self, n):
232 if self._h:
232 if self._h:
233 d, self._h = self._h[:n], self._h[n:]
233 d, self._h = self._h[:n], self._h[n:]
234 if len(d) < n:
234 if len(d) < n:
235 d += readexactly(self._fh, n - len(d))
235 d += readexactly(self._fh, n - len(d))
236 return d
236 return d
237 return readexactly(self._fh, n)
237 return readexactly(self._fh, n)
238
238
239 class cg1packer(object):
239 class cg1packer(object):
240 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
240 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
241 def __init__(self, repo, bundlecaps=None):
241 def __init__(self, repo, bundlecaps=None):
242 """Given a source repo, construct a bundler.
242 """Given a source repo, construct a bundler.
243
243
244 bundlecaps is optional and can be used to specify the set of
244 bundlecaps is optional and can be used to specify the set of
245 capabilities which can be used to build the bundle.
245 capabilities which can be used to build the bundle.
246 """
246 """
247 # Set of capabilities we can use to build the bundle.
247 # Set of capabilities we can use to build the bundle.
248 if bundlecaps is None:
248 if bundlecaps is None:
249 bundlecaps = set()
249 bundlecaps = set()
250 self._bundlecaps = bundlecaps
250 self._bundlecaps = bundlecaps
251 self._changelog = repo.changelog
251 self._changelog = repo.changelog
252 self._manifest = repo.manifest
252 self._manifest = repo.manifest
253 reorder = repo.ui.config('bundle', 'reorder', 'auto')
253 reorder = repo.ui.config('bundle', 'reorder', 'auto')
254 if reorder == 'auto':
254 if reorder == 'auto':
255 reorder = None
255 reorder = None
256 else:
256 else:
257 reorder = util.parsebool(reorder)
257 reorder = util.parsebool(reorder)
258 self._repo = repo
258 self._repo = repo
259 self._reorder = reorder
259 self._reorder = reorder
260 self._progress = repo.ui.progress
260 self._progress = repo.ui.progress
261 def close(self):
261 def close(self):
262 return closechunk()
262 return closechunk()
263
263
264 def fileheader(self, fname):
264 def fileheader(self, fname):
265 return chunkheader(len(fname)) + fname
265 return chunkheader(len(fname)) + fname
266
266
267 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
267 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
268 """Calculate a delta group, yielding a sequence of changegroup chunks
268 """Calculate a delta group, yielding a sequence of changegroup chunks
269 (strings).
269 (strings).
270
270
271 Given a list of changeset revs, return a set of deltas and
271 Given a list of changeset revs, return a set of deltas and
272 metadata corresponding to nodes. The first delta is
272 metadata corresponding to nodes. The first delta is
273 first parent(nodelist[0]) -> nodelist[0], the receiver is
273 first parent(nodelist[0]) -> nodelist[0], the receiver is
274 guaranteed to have this parent as it has all history before
274 guaranteed to have this parent as it has all history before
275 these changesets. In the case firstparent is nullrev the
275 these changesets. In the case firstparent is nullrev the
276 changegroup starts with a full revision.
276 changegroup starts with a full revision.
277
277
278 If units is not None, progress detail will be generated, units specifies
278 If units is not None, progress detail will be generated, units specifies
279 the type of revlog that is touched (changelog, manifest, etc.).
279 the type of revlog that is touched (changelog, manifest, etc.).
280 """
280 """
281 # if we don't have any revisions touched by these changesets, bail
281 # if we don't have any revisions touched by these changesets, bail
282 if len(nodelist) == 0:
282 if len(nodelist) == 0:
283 yield self.close()
283 yield self.close()
284 return
284 return
285
285
286 # for generaldelta revlogs, we linearize the revs; this will both be
286 # for generaldelta revlogs, we linearize the revs; this will both be
287 # much quicker and generate a much smaller bundle
287 # much quicker and generate a much smaller bundle
288 if (revlog._generaldelta and reorder is not False) or reorder:
288 if (revlog._generaldelta and reorder is not False) or reorder:
289 dag = dagutil.revlogdag(revlog)
289 dag = dagutil.revlogdag(revlog)
290 revs = set(revlog.rev(n) for n in nodelist)
290 revs = set(revlog.rev(n) for n in nodelist)
291 revs = dag.linearize(revs)
291 revs = dag.linearize(revs)
292 else:
292 else:
293 revs = sorted([revlog.rev(n) for n in nodelist])
293 revs = sorted([revlog.rev(n) for n in nodelist])
294
294
295 # add the parent of the first rev
295 # add the parent of the first rev
296 p = revlog.parentrevs(revs[0])[0]
296 p = revlog.parentrevs(revs[0])[0]
297 revs.insert(0, p)
297 revs.insert(0, p)
298
298
299 # build deltas
299 # build deltas
300 total = len(revs) - 1
300 total = len(revs) - 1
301 msgbundling = _('bundling')
301 msgbundling = _('bundling')
302 for r in xrange(len(revs) - 1):
302 for r in xrange(len(revs) - 1):
303 if units is not None:
303 if units is not None:
304 self._progress(msgbundling, r + 1, unit=units, total=total)
304 self._progress(msgbundling, r + 1, unit=units, total=total)
305 prev, curr = revs[r], revs[r + 1]
305 prev, curr = revs[r], revs[r + 1]
306 linknode = lookup(revlog.node(curr))
306 linknode = lookup(revlog.node(curr))
307 for c in self.revchunk(revlog, curr, prev, linknode):
307 for c in self.revchunk(revlog, curr, prev, linknode):
308 yield c
308 yield c
309
309
310 yield self.close()
310 yield self.close()
311
311
312 # filter any nodes that claim to be part of the known set
312 # filter any nodes that claim to be part of the known set
313 def prune(self, revlog, missing, commonrevs, source):
313 def prune(self, revlog, missing, commonrevs, source):
314 rr, rl = revlog.rev, revlog.linkrev
314 rr, rl = revlog.rev, revlog.linkrev
315 return [n for n in missing if rl(rr(n)) not in commonrevs]
315 return [n for n in missing if rl(rr(n)) not in commonrevs]
316
316
317 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
317 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
318 '''yield a sequence of changegroup chunks (strings)'''
318 '''yield a sequence of changegroup chunks (strings)'''
319 repo = self._repo
319 repo = self._repo
320 cl = self._changelog
320 cl = self._changelog
321 mf = self._manifest
321 mf = self._manifest
322 reorder = self._reorder
322 reorder = self._reorder
323 progress = self._progress
323 progress = self._progress
324
324
325 # for progress output
325 # for progress output
326 msgbundling = _('bundling')
326 msgbundling = _('bundling')
327
327
328 mfs = {} # needed manifests
328 mfs = {} # needed manifests
329 fnodes = {} # needed file nodes
329 fnodes = {} # needed file nodes
330 changedfiles = set()
330 changedfiles = set()
331
331
332 # Callback for the changelog, used to collect changed files and manifest
332 # Callback for the changelog, used to collect changed files and manifest
333 # nodes.
333 # nodes.
334 # Returns the linkrev node (identity in the changelog case).
334 # Returns the linkrev node (identity in the changelog case).
335 def lookupcl(x):
335 def lookupcl(x):
336 c = cl.read(x)
336 c = cl.read(x)
337 changedfiles.update(c[3])
337 changedfiles.update(c[3])
338 # record the first changeset introducing this manifest version
338 # record the first changeset introducing this manifest version
339 mfs.setdefault(c[0], x)
339 mfs.setdefault(c[0], x)
340 return x
340 return x
341
341
342 # Callback for the manifest, used to collect linkrevs for filelog
342 # Callback for the manifest, used to collect linkrevs for filelog
343 # revisions.
343 # revisions.
344 # Returns the linkrev node (collected in lookupcl).
344 # Returns the linkrev node (collected in lookupcl).
345 def lookupmf(x):
345 def lookupmf(x):
346 clnode = mfs[x]
346 clnode = mfs[x]
347 if not fastpathlinkrev:
347 if not fastpathlinkrev:
348 mdata = mf.readfast(x)
348 mdata = mf.readfast(x)
349 for f, n in mdata.iteritems():
349 for f, n in mdata.iteritems():
350 if f in changedfiles:
350 if f in changedfiles:
351 # record the first changeset introducing this filelog
351 # record the first changeset introducing this filelog
352 # version
352 # version
353 fnodes[f].setdefault(n, clnode)
353 fnodes[f].setdefault(n, clnode)
354 return clnode
354 return clnode
355
355
356 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
356 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
357 reorder=reorder):
357 reorder=reorder):
358 yield chunk
358 yield chunk
359 progress(msgbundling, None)
359 progress(msgbundling, None)
360
360
361 for f in changedfiles:
361 for f in changedfiles:
362 fnodes[f] = {}
362 fnodes[f] = {}
363 mfnodes = self.prune(mf, mfs, commonrevs, source)
363 mfnodes = self.prune(mf, mfs, commonrevs, source)
364 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
364 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
365 reorder=reorder):
365 reorder=reorder):
366 yield chunk
366 yield chunk
367 progress(msgbundling, None)
367 progress(msgbundling, None)
368
368
369 mfs.clear()
369 mfs.clear()
370 needed = set(cl.rev(x) for x in clnodes)
370 needed = set(cl.rev(x) for x in clnodes)
371
371
372 def linknodes(filerevlog, fname):
372 def linknodes(filerevlog, fname):
373 if fastpathlinkrev:
373 if fastpathlinkrev:
374 llr = filerevlog.linkrev
374 llr = filerevlog.linkrev
375 def genfilenodes():
375 def genfilenodes():
376 for r in filerevlog:
376 for r in filerevlog:
377 linkrev = llr(r)
377 linkrev = llr(r)
378 if linkrev in needed:
378 if linkrev in needed:
379 yield filerevlog.node(r), cl.node(linkrev)
379 yield filerevlog.node(r), cl.node(linkrev)
380 fnodes[fname] = dict(genfilenodes())
380 fnodes[fname] = dict(genfilenodes())
381 return fnodes.get(fname, {})
381 return fnodes.get(fname, {})
382
382
383 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
383 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
384 source):
384 source):
385 yield chunk
385 yield chunk
386
386
387 yield self.close()
387 yield self.close()
388 progress(msgbundling, None)
388 progress(msgbundling, None)
389
389
390 if clnodes:
390 if clnodes:
391 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
391 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
392
392
393 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
393 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
394 repo = self._repo
394 repo = self._repo
395 progress = self._progress
395 progress = self._progress
396 reorder = self._reorder
396 reorder = self._reorder
397 msgbundling = _('bundling')
397 msgbundling = _('bundling')
398
398
399 total = len(changedfiles)
399 total = len(changedfiles)
400 # for progress output
400 # for progress output
401 msgfiles = _('files')
401 msgfiles = _('files')
402 for i, fname in enumerate(sorted(changedfiles)):
402 for i, fname in enumerate(sorted(changedfiles)):
403 filerevlog = repo.file(fname)
403 filerevlog = repo.file(fname)
404 if not filerevlog:
404 if not filerevlog:
405 raise util.Abort(_("empty or missing revlog for %s") % fname)
405 raise util.Abort(_("empty or missing revlog for %s") % fname)
406
406
407 linkrevnodes = linknodes(filerevlog, fname)
407 linkrevnodes = linknodes(filerevlog, fname)
408 # Lookup for filenodes, we collected the linkrev nodes above in the
408 # Lookup for filenodes, we collected the linkrev nodes above in the
409 # fastpath case and with lookupmf in the slowpath case.
409 # fastpath case and with lookupmf in the slowpath case.
410 def lookupfilelog(x):
410 def lookupfilelog(x):
411 return linkrevnodes[x]
411 return linkrevnodes[x]
412
412
413 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
413 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
414 if filenodes:
414 if filenodes:
415 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
415 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
416 total=total)
416 total=total)
417 yield self.fileheader(fname)
417 yield self.fileheader(fname)
418 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
418 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
419 reorder=reorder):
419 reorder=reorder):
420 yield chunk
420 yield chunk
421
421
422 def deltaparent(self, revlog, rev, p1, p2, prev):
422 def deltaparent(self, revlog, rev, p1, p2, prev):
423 return prev
423 return prev
424
424
425 def revchunk(self, revlog, rev, prev, linknode):
425 def revchunk(self, revlog, rev, prev, linknode):
426 node = revlog.node(rev)
426 node = revlog.node(rev)
427 p1, p2 = revlog.parentrevs(rev)
427 p1, p2 = revlog.parentrevs(rev)
428 base = self.deltaparent(revlog, rev, p1, p2, prev)
428 base = self.deltaparent(revlog, rev, p1, p2, prev)
429
429
430 prefix = ''
430 prefix = ''
431 if base == nullrev:
431 if base == nullrev:
432 delta = revlog.revision(node)
432 delta = revlog.revision(node)
433 prefix = mdiff.trivialdiffheader(len(delta))
433 prefix = mdiff.trivialdiffheader(len(delta))
434 else:
434 else:
435 delta = revlog.revdiff(base, rev)
435 delta = revlog.revdiff(base, rev)
436 p1n, p2n = revlog.parents(node)
436 p1n, p2n = revlog.parents(node)
437 basenode = revlog.node(base)
437 basenode = revlog.node(base)
438 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
438 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
439 meta += prefix
439 meta += prefix
440 l = len(meta) + len(delta)
440 l = len(meta) + len(delta)
441 yield chunkheader(l)
441 yield chunkheader(l)
442 yield meta
442 yield meta
443 yield delta
443 yield delta
444 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
444 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
445 # do nothing with basenode, it is implicitly the previous one in HG10
445 # do nothing with basenode, it is implicitly the previous one in HG10
446 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
446 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
447
447
448 class cg2packer(cg1packer):
448 class cg2packer(cg1packer):
449
449
450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
451
451
452 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
452 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
453 if (revlog._generaldelta and reorder is not True):
453 if (revlog._generaldelta and reorder is not True):
454 reorder = False
454 reorder = False
455 return cg1packer.group(self, nodelist, revlog, lookup,
455 return cg1packer.group(self, nodelist, revlog, lookup,
456 units=units, reorder=reorder)
456 units=units, reorder=reorder)
457
457
458 def deltaparent(self, revlog, rev, p1, p2, prev):
458 def deltaparent(self, revlog, rev, p1, p2, prev):
459 dp = revlog.deltaparent(rev)
459 dp = revlog.deltaparent(rev)
460 # avoid storing full revisions; pick prev in those cases
460 # avoid storing full revisions; pick prev in those cases
461 # also pick prev when we can't be sure remote has dp
461 # also pick prev when we can't be sure remote has dp
462 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
462 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
463 return prev
463 return prev
464 return dp
464 return dp
465
465
466 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
466 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
467 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
467 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
468
468
469 packermap = {'01': (cg1packer, cg1unpacker),
469 packermap = {'01': (cg1packer, cg1unpacker),
470 '02': (cg2packer, cg2unpacker)}
470 '02': (cg2packer, cg2unpacker)}
471
471
472 def _changegroupinfo(repo, nodes, source):
472 def _changegroupinfo(repo, nodes, source):
473 if repo.ui.verbose or source == 'bundle':
473 if repo.ui.verbose or source == 'bundle':
474 repo.ui.status(_("%d changesets found\n") % len(nodes))
474 repo.ui.status(_("%d changesets found\n") % len(nodes))
475 if repo.ui.debugflag:
475 if repo.ui.debugflag:
476 repo.ui.debug("list of changesets:\n")
476 repo.ui.debug("list of changesets:\n")
477 for node in nodes:
477 for node in nodes:
478 repo.ui.debug("%s\n" % hex(node))
478 repo.ui.debug("%s\n" % hex(node))
479
479
480 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
480 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
481 repo = repo.unfiltered()
481 repo = repo.unfiltered()
482 commonrevs = outgoing.common
482 commonrevs = outgoing.common
483 csets = outgoing.missing
483 csets = outgoing.missing
484 heads = outgoing.missingheads
484 heads = outgoing.missingheads
485 # We go through the fast path if we get told to, or if all (unfiltered
485 # We go through the fast path if we get told to, or if all (unfiltered
486 # heads have been requested (since we then know there all linkrevs will
486 # heads have been requested (since we then know there all linkrevs will
487 # be pulled by the client).
487 # be pulled by the client).
488 heads.sort()
488 heads.sort()
489 fastpathlinkrev = fastpath or (
489 fastpathlinkrev = fastpath or (
490 repo.filtername is None and heads == sorted(repo.heads()))
490 repo.filtername is None and heads == sorted(repo.heads()))
491
491
492 repo.hook('preoutgoing', throw=True, source=source)
492 repo.hook('preoutgoing', throw=True, source=source)
493 _changegroupinfo(repo, csets, source)
493 _changegroupinfo(repo, csets, source)
494 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
494 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
495
495
496 def getsubset(repo, outgoing, bundler, source, fastpath=False):
496 def getsubset(repo, outgoing, bundler, source, fastpath=False):
497 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
497 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
498 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
498 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
499
499
500 def changegroupsubset(repo, roots, heads, source):
500 def changegroupsubset(repo, roots, heads, source):
501 """Compute a changegroup consisting of all the nodes that are
501 """Compute a changegroup consisting of all the nodes that are
502 descendants of any of the roots and ancestors of any of the heads.
502 descendants of any of the roots and ancestors of any of the heads.
503 Return a chunkbuffer object whose read() method will return
503 Return a chunkbuffer object whose read() method will return
504 successive changegroup chunks.
504 successive changegroup chunks.
505
505
506 It is fairly complex as determining which filenodes and which
506 It is fairly complex as determining which filenodes and which
507 manifest nodes need to be included for the changeset to be complete
507 manifest nodes need to be included for the changeset to be complete
508 is non-trivial.
508 is non-trivial.
509
509
510 Another wrinkle is doing the reverse, figuring out which changeset in
510 Another wrinkle is doing the reverse, figuring out which changeset in
511 the changegroup a particular filenode or manifestnode belongs to.
511 the changegroup a particular filenode or manifestnode belongs to.
512 """
512 """
513 cl = repo.changelog
513 cl = repo.changelog
514 if not roots:
514 if not roots:
515 roots = [nullid]
515 roots = [nullid]
516 # TODO: remove call to nodesbetween.
516 # TODO: remove call to nodesbetween.
517 csets, roots, heads = cl.nodesbetween(roots, heads)
517 csets, roots, heads = cl.nodesbetween(roots, heads)
518 discbases = []
518 discbases = []
519 for n in roots:
519 for n in roots:
520 discbases.extend([p for p in cl.parents(n) if p != nullid])
520 discbases.extend([p for p in cl.parents(n) if p != nullid])
521 outgoing = discovery.outgoing(cl, discbases, heads)
521 outgoing = discovery.outgoing(cl, discbases, heads)
522 bundler = cg1packer(repo)
522 bundler = cg1packer(repo)
523 return getsubset(repo, outgoing, bundler, source)
523 return getsubset(repo, outgoing, bundler, source)
524
524
525 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
525 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
526 version='01'):
526 version='01'):
527 """Like getbundle, but taking a discovery.outgoing as an argument.
527 """Like getbundle, but taking a discovery.outgoing as an argument.
528
528
529 This is only implemented for local repos and reuses potentially
529 This is only implemented for local repos and reuses potentially
530 precomputed sets in outgoing. Returns a raw changegroup generator."""
530 precomputed sets in outgoing. Returns a raw changegroup generator."""
531 if not outgoing.missing:
531 if not outgoing.missing:
532 return None
532 return None
533 bundler = packermap[version][0](repo, bundlecaps)
533 bundler = packermap[version][0](repo, bundlecaps)
534 return getsubsetraw(repo, outgoing, bundler, source)
534 return getsubsetraw(repo, outgoing, bundler, source)
535
535
536 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
536 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
537 """Like getbundle, but taking a discovery.outgoing as an argument.
537 """Like getbundle, but taking a discovery.outgoing as an argument.
538
538
539 This is only implemented for local repos and reuses potentially
539 This is only implemented for local repos and reuses potentially
540 precomputed sets in outgoing."""
540 precomputed sets in outgoing."""
541 if not outgoing.missing:
541 if not outgoing.missing:
542 return None
542 return None
543 bundler = cg1packer(repo, bundlecaps)
543 bundler = cg1packer(repo, bundlecaps)
544 return getsubset(repo, outgoing, bundler, source)
544 return getsubset(repo, outgoing, bundler, source)
545
545
546 def _computeoutgoing(repo, heads, common):
546 def _computeoutgoing(repo, heads, common):
547 """Computes which revs are outgoing given a set of common
547 """Computes which revs are outgoing given a set of common
548 and a set of heads.
548 and a set of heads.
549
549
550 This is a separate function so extensions can have access to
550 This is a separate function so extensions can have access to
551 the logic.
551 the logic.
552
552
553 Returns a discovery.outgoing object.
553 Returns a discovery.outgoing object.
554 """
554 """
555 cl = repo.changelog
555 cl = repo.changelog
556 if common:
556 if common:
557 hasnode = cl.hasnode
557 hasnode = cl.hasnode
558 common = [n for n in common if hasnode(n)]
558 common = [n for n in common if hasnode(n)]
559 else:
559 else:
560 common = [nullid]
560 common = [nullid]
561 if not heads:
561 if not heads:
562 heads = cl.heads()
562 heads = cl.heads()
563 return discovery.outgoing(cl, common, heads)
563 return discovery.outgoing(cl, common, heads)
564
564
565 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
565 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
566 version='01'):
566 version='01'):
567 """Like changegroupsubset, but returns the set difference between the
567 """Like changegroupsubset, but returns the set difference between the
568 ancestors of heads and the ancestors common.
568 ancestors of heads and the ancestors common.
569
569
570 If heads is None, use the local heads. If common is None, use [nullid].
570 If heads is None, use the local heads. If common is None, use [nullid].
571
571
572 If version is None, use a version '1' changegroup.
572 If version is None, use a version '1' changegroup.
573
573
574 The nodes in common might not all be known locally due to the way the
574 The nodes in common might not all be known locally due to the way the
575 current discovery protocol works. Returns a raw changegroup generator.
575 current discovery protocol works. Returns a raw changegroup generator.
576 """
576 """
577 outgoing = _computeoutgoing(repo, heads, common)
577 outgoing = _computeoutgoing(repo, heads, common)
578 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
578 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
579 version=version)
579 version=version)
580
580
581 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
581 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
582 """Like changegroupsubset, but returns the set difference between the
582 """Like changegroupsubset, but returns the set difference between the
583 ancestors of heads and the ancestors common.
583 ancestors of heads and the ancestors common.
584
584
585 If heads is None, use the local heads. If common is None, use [nullid].
585 If heads is None, use the local heads. If common is None, use [nullid].
586
586
587 The nodes in common might not all be known locally due to the way the
587 The nodes in common might not all be known locally due to the way the
588 current discovery protocol works.
588 current discovery protocol works.
589 """
589 """
590 outgoing = _computeoutgoing(repo, heads, common)
590 outgoing = _computeoutgoing(repo, heads, common)
591 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
591 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
592
592
593 def changegroup(repo, basenodes, source):
593 def changegroup(repo, basenodes, source):
594 # to avoid a race we use changegroupsubset() (issue1320)
594 # to avoid a race we use changegroupsubset() (issue1320)
595 return changegroupsubset(repo, basenodes, repo.heads(), source)
595 return changegroupsubset(repo, basenodes, repo.heads(), source)
596
596
597 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
597 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
598 revisions = 0
598 revisions = 0
599 files = 0
599 files = 0
600 while True:
600 while True:
601 chunkdata = source.filelogheader()
601 chunkdata = source.filelogheader()
602 if not chunkdata:
602 if not chunkdata:
603 break
603 break
604 f = chunkdata["filename"]
604 f = chunkdata["filename"]
605 repo.ui.debug("adding %s revisions\n" % f)
605 repo.ui.debug("adding %s revisions\n" % f)
606 pr()
606 pr()
607 fl = repo.file(f)
607 fl = repo.file(f)
608 o = len(fl)
608 o = len(fl)
609 if not fl.addgroup(source, revmap, trp):
609 if not fl.addgroup(source, revmap, trp):
610 raise util.Abort(_("received file revlog group is empty"))
610 raise util.Abort(_("received file revlog group is empty"))
611 revisions += len(fl) - o
611 revisions += len(fl) - o
612 files += 1
612 files += 1
613 if f in needfiles:
613 if f in needfiles:
614 needs = needfiles[f]
614 needs = needfiles[f]
615 for new in xrange(o, len(fl)):
615 for new in xrange(o, len(fl)):
616 n = fl.node(new)
616 n = fl.node(new)
617 if n in needs:
617 if n in needs:
618 needs.remove(n)
618 needs.remove(n)
619 else:
619 else:
620 raise util.Abort(
620 raise util.Abort(
621 _("received spurious file revlog entry"))
621 _("received spurious file revlog entry"))
622 if not needs:
622 if not needs:
623 del needfiles[f]
623 del needfiles[f]
624 repo.ui.progress(_('files'), None)
624 repo.ui.progress(_('files'), None)
625
625
626 for f, needs in needfiles.iteritems():
626 for f, needs in needfiles.iteritems():
627 fl = repo.file(f)
627 fl = repo.file(f)
628 for n in needs:
628 for n in needs:
629 try:
629 try:
630 fl.rev(n)
630 fl.rev(n)
631 except error.LookupError:
631 except error.LookupError:
632 raise util.Abort(
632 raise util.Abort(
633 _('missing file data for %s:%s - run hg verify') %
633 _('missing file data for %s:%s - run hg verify') %
634 (f, hex(n)))
634 (f, hex(n)))
635
635
636 return revisions, files
636 return revisions, files
637
637
638 def addchangegroup(repo, source, srctype, url, emptyok=False,
638 def addchangegroup(repo, source, srctype, url, emptyok=False,
639 targetphase=phases.draft):
639 targetphase=phases.draft):
640 """Add the changegroup returned by source.read() to this repo.
640 """Add the changegroup returned by source.read() to this repo.
641 srctype is a string like 'push', 'pull', or 'unbundle'. url is
641 srctype is a string like 'push', 'pull', or 'unbundle'. url is
642 the URL of the repo where this changegroup is coming from.
642 the URL of the repo where this changegroup is coming from.
643
643
644 Return an integer summarizing the change to this repo:
644 Return an integer summarizing the change to this repo:
645 - nothing changed or no source: 0
645 - nothing changed or no source: 0
646 - more heads than before: 1+added heads (2..n)
646 - more heads than before: 1+added heads (2..n)
647 - fewer heads than before: -1-removed heads (-2..-n)
647 - fewer heads than before: -1-removed heads (-2..-n)
648 - number of heads stays the same: 1
648 - number of heads stays the same: 1
649 """
649 """
650 repo = repo.unfiltered()
650 repo = repo.unfiltered()
651 def csmap(x):
651 def csmap(x):
652 repo.ui.debug("add changeset %s\n" % short(x))
652 repo.ui.debug("add changeset %s\n" % short(x))
653 return len(cl)
653 return len(cl)
654
654
655 def revmap(x):
655 def revmap(x):
656 return cl.rev(x)
656 return cl.rev(x)
657
657
658 if not source:
658 if not source:
659 return 0
659 return 0
660
660
661 changesets = files = revisions = 0
661 changesets = files = revisions = 0
662 efiles = set()
662 efiles = set()
663
663
664 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
664 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
665 # The transaction could have been created before and already carries source
665 # The transaction could have been created before and already carries source
666 # information. In this case we use the top level data. We overwrite the
666 # information. In this case we use the top level data. We overwrite the
667 # argument because we need to use the top level value (if they exist) in
667 # argument because we need to use the top level value (if they exist) in
668 # this function.
668 # this function.
669 srctype = tr.hookargs.setdefault('source', srctype)
669 srctype = tr.hookargs.setdefault('source', srctype)
670 url = tr.hookargs.setdefault('url', url)
670 url = tr.hookargs.setdefault('url', url)
671
671
672 # write changelog data to temp files so concurrent readers will not see
672 # write changelog data to temp files so concurrent readers will not see
673 # inconsistent view
673 # inconsistent view
674 cl = repo.changelog
674 cl = repo.changelog
675 cl.delayupdate(tr)
675 cl.delayupdate(tr)
676 oldheads = cl.heads()
676 oldheads = cl.heads()
677 try:
677 try:
678 repo.hook('prechangegroup', throw=True, **tr.hookargs)
678 repo.hook('prechangegroup', throw=True, **tr.hookargs)
679
679
680 trp = weakref.proxy(tr)
680 trp = weakref.proxy(tr)
681 # pull off the changeset group
681 # pull off the changeset group
682 repo.ui.status(_("adding changesets\n"))
682 repo.ui.status(_("adding changesets\n"))
683 clstart = len(cl)
683 clstart = len(cl)
684 class prog(object):
684 class prog(object):
685 step = _('changesets')
685 step = _('changesets')
686 count = 1
686 count = 1
687 ui = repo.ui
687 ui = repo.ui
688 total = None
688 total = None
689 def __call__(repo):
689 def __call__(repo):
690 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
690 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
691 total=repo.total)
691 total=repo.total)
692 repo.count += 1
692 repo.count += 1
693 pr = prog()
693 pr = prog()
694 source.callback = pr
694 source.callback = pr
695
695
696 source.changelogheader()
696 source.changelogheader()
697 srccontent = cl.addgroup(source, csmap, trp)
697 srccontent = cl.addgroup(source, csmap, trp)
698 if not (srccontent or emptyok):
698 if not (srccontent or emptyok):
699 raise util.Abort(_("received changelog group is empty"))
699 raise util.Abort(_("received changelog group is empty"))
700 clend = len(cl)
700 clend = len(cl)
701 changesets = clend - clstart
701 changesets = clend - clstart
702 for c in xrange(clstart, clend):
702 for c in xrange(clstart, clend):
703 efiles.update(repo[c].files())
703 efiles.update(repo[c].files())
704 efiles = len(efiles)
704 efiles = len(efiles)
705 repo.ui.progress(_('changesets'), None)
705 repo.ui.progress(_('changesets'), None)
706
706
707 # pull off the manifest group
707 # pull off the manifest group
708 repo.ui.status(_("adding manifests\n"))
708 repo.ui.status(_("adding manifests\n"))
709 pr.step = _('manifests')
709 pr.step = _('manifests')
710 pr.count = 1
710 pr.count = 1
711 pr.total = changesets # manifests <= changesets
711 pr.total = changesets # manifests <= changesets
712 # no need to check for empty manifest group here:
712 # no need to check for empty manifest group here:
713 # if the result of the merge of 1 and 2 is the same in 3 and 4,
713 # if the result of the merge of 1 and 2 is the same in 3 and 4,
714 # no new manifest will be created and the manifest group will
714 # no new manifest will be created and the manifest group will
715 # be empty during the pull
715 # be empty during the pull
716 source.manifestheader()
716 source.manifestheader()
717 repo.manifest.addgroup(source, revmap, trp)
717 repo.manifest.addgroup(source, revmap, trp)
718 repo.ui.progress(_('manifests'), None)
718 repo.ui.progress(_('manifests'), None)
719
719
720 needfiles = {}
720 needfiles = {}
721 if repo.ui.configbool('server', 'validate', default=False):
721 if repo.ui.configbool('server', 'validate', default=False):
722 # validate incoming csets have their manifests
722 # validate incoming csets have their manifests
723 for cset in xrange(clstart, clend):
723 for cset in xrange(clstart, clend):
724 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
724 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
725 mfest = repo.manifest.readdelta(mfest)
725 mfest = repo.manifest.readdelta(mfest)
726 # store file nodes we must see
726 # store file nodes we must see
727 for f, n in mfest.iteritems():
727 for f, n in mfest.iteritems():
728 needfiles.setdefault(f, set()).add(n)
728 needfiles.setdefault(f, set()).add(n)
729
729
730 # process the files
730 # process the files
731 repo.ui.status(_("adding file changes\n"))
731 repo.ui.status(_("adding file changes\n"))
732 pr.step = _('files')
732 pr.step = _('files')
733 pr.count = 1
733 pr.count = 1
734 pr.total = efiles
734 pr.total = efiles
735 source.callback = None
735 source.callback = None
736
736
737 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
737 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
738 needfiles)
738 needfiles)
739 revisions += newrevs
739 revisions += newrevs
740 files += newfiles
740 files += newfiles
741
741
742 dh = 0
742 dh = 0
743 if oldheads:
743 if oldheads:
744 heads = cl.heads()
744 heads = cl.heads()
745 dh = len(heads) - len(oldheads)
745 dh = len(heads) - len(oldheads)
746 for h in heads:
746 for h in heads:
747 if h not in oldheads and repo[h].closesbranch():
747 if h not in oldheads and repo[h].closesbranch():
748 dh -= 1
748 dh -= 1
749 htext = ""
749 htext = ""
750 if dh:
750 if dh:
751 htext = _(" (%+d heads)") % dh
751 htext = _(" (%+d heads)") % dh
752
752
753 repo.ui.status(_("added %d changesets"
753 repo.ui.status(_("added %d changesets"
754 " with %d changes to %d files%s\n")
754 " with %d changes to %d files%s\n")
755 % (changesets, revisions, files, htext))
755 % (changesets, revisions, files, htext))
756 repo.invalidatevolatilesets()
756 repo.invalidatevolatilesets()
757
757
758 if changesets > 0:
758 if changesets > 0:
759 p = lambda: tr.writepending() and repo.root or ""
759 p = lambda: tr.writepending() and repo.root or ""
760 if 'node' not in tr.hookargs:
760 if 'node' not in tr.hookargs:
761 tr.hookargs['node'] = hex(cl.node(clstart))
761 tr.hookargs['node'] = hex(cl.node(clstart))
762 hookargs = dict(tr.hookargs)
762 hookargs = dict(tr.hookargs)
763 else:
763 else:
764 hookargs = dict(tr.hookargs)
764 hookargs = dict(tr.hookargs)
765 hookargs['node'] = hex(cl.node(clstart))
765 hookargs['node'] = hex(cl.node(clstart))
766 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
766 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
767
767
768 added = [cl.node(r) for r in xrange(clstart, clend)]
768 added = [cl.node(r) for r in xrange(clstart, clend)]
769 publishing = repo.ui.configbool('phases', 'publish', True)
769 publishing = repo.ui.configbool('phases', 'publish', True)
770 if srctype in ('push', 'serve'):
770 if srctype in ('push', 'serve'):
771 # Old servers can not push the boundary themselves.
771 # Old servers can not push the boundary themselves.
772 # New servers won't push the boundary if changeset already
772 # New servers won't push the boundary if changeset already
773 # exists locally as secret
773 # exists locally as secret
774 #
774 #
775 # We should not use added here but the list of all change in
775 # We should not use added here but the list of all change in
776 # the bundle
776 # the bundle
777 if publishing:
777 if publishing:
778 phases.advanceboundary(repo, tr, phases.public, srccontent)
778 phases.advanceboundary(repo, tr, phases.public, srccontent)
779 else:
779 else:
780 # Those changesets have been pushed from the outside, their
780 # Those changesets have been pushed from the outside, their
781 # phases are going to be pushed alongside. Therefor
781 # phases are going to be pushed alongside. Therefor
782 # `targetphase` is ignored.
782 # `targetphase` is ignored.
783 phases.advanceboundary(repo, tr, phases.draft, srccontent)
783 phases.advanceboundary(repo, tr, phases.draft, srccontent)
784 phases.retractboundary(repo, tr, phases.draft, added)
784 phases.retractboundary(repo, tr, phases.draft, added)
785 elif srctype != 'strip':
785 elif srctype != 'strip':
786 # publishing only alter behavior during push
786 # publishing only alter behavior during push
787 #
787 #
788 # strip should not touch boundary at all
788 # strip should not touch boundary at all
789 phases.retractboundary(repo, tr, targetphase, added)
789 phases.retractboundary(repo, tr, targetphase, added)
790
790
791 # make changelog see real files again
792 cl.finalize(trp)
793
791
794 tr.close()
792 tr.close()
795
793
796 if changesets > 0:
794 if changesets > 0:
797 if srctype != 'strip':
795 if srctype != 'strip':
798 # During strip, branchcache is invalid but coming call to
796 # During strip, branchcache is invalid but coming call to
799 # `destroyed` will repair it.
797 # `destroyed` will repair it.
800 # In other case we can safely update cache on disk.
798 # In other case we can safely update cache on disk.
801 branchmap.updatecache(repo.filtered('served'))
799 branchmap.updatecache(repo.filtered('served'))
802
800
803 def runhooks():
801 def runhooks():
804 # These hooks run when the lock releases, not when the
802 # These hooks run when the lock releases, not when the
805 # transaction closes. So it's possible for the changelog
803 # transaction closes. So it's possible for the changelog
806 # to have changed since we last saw it.
804 # to have changed since we last saw it.
807 if clstart >= len(repo):
805 if clstart >= len(repo):
808 return
806 return
809
807
810 # forcefully update the on-disk branch cache
808 # forcefully update the on-disk branch cache
811 repo.ui.debug("updating the branch cache\n")
809 repo.ui.debug("updating the branch cache\n")
812 repo.hook("changegroup", **hookargs)
810 repo.hook("changegroup", **hookargs)
813
811
814 for n in added:
812 for n in added:
815 args = hookargs.copy()
813 args = hookargs.copy()
816 args['node'] = hex(n)
814 args['node'] = hex(n)
817 repo.hook("incoming", **args)
815 repo.hook("incoming", **args)
818
816
819 newheads = [h for h in repo.heads() if h not in oldheads]
817 newheads = [h for h in repo.heads() if h not in oldheads]
820 repo.ui.log("incoming",
818 repo.ui.log("incoming",
821 "%s incoming changes - new heads: %s\n",
819 "%s incoming changes - new heads: %s\n",
822 len(added),
820 len(added),
823 ', '.join([hex(c[:6]) for c in newheads]))
821 ', '.join([hex(c[:6]) for c in newheads]))
824 repo._afterlock(runhooks)
822 repo._afterlock(runhooks)
825
823
826 finally:
824 finally:
827 tr.release()
825 tr.release()
828 # never return 0 here:
826 # never return 0 here:
829 if dh < 0:
827 if dh < 0:
830 return dh - 1
828 return dh - 1
831 else:
829 else:
832 return dh + 1
830 return dh + 1
@@ -1,375 +1,378 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 from node import bin, hex, nullid
9 from node import bin, hex, nullid
9 from i18n import _
10 from i18n import _
10 import util, error, revlog, encoding
11 import util, error, revlog, encoding
11
12
12 _defaultextra = {'branch': 'default'}
13 _defaultextra = {'branch': 'default'}
13
14
14 def _string_escape(text):
15 def _string_escape(text):
15 """
16 """
16 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
17 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
17 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
18 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
18 >>> s
19 >>> s
19 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
20 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
20 >>> res = _string_escape(s)
21 >>> res = _string_escape(s)
21 >>> s == res.decode('string_escape')
22 >>> s == res.decode('string_escape')
22 True
23 True
23 """
24 """
24 # subset of the string_escape codec
25 # subset of the string_escape codec
25 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
26 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
26 return text.replace('\0', '\\0')
27 return text.replace('\0', '\\0')
27
28
28 def decodeextra(text):
29 def decodeextra(text):
29 """
30 """
30 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
31 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
31 ... ).iteritems())
32 ... ).iteritems())
32 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
33 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
33 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
34 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
34 ... 'baz': chr(92) + chr(0) + '2'})
35 ... 'baz': chr(92) + chr(0) + '2'})
35 ... ).iteritems())
36 ... ).iteritems())
36 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
37 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
37 """
38 """
38 extra = _defaultextra.copy()
39 extra = _defaultextra.copy()
39 for l in text.split('\0'):
40 for l in text.split('\0'):
40 if l:
41 if l:
41 if '\\0' in l:
42 if '\\0' in l:
42 # fix up \0 without getting into trouble with \\0
43 # fix up \0 without getting into trouble with \\0
43 l = l.replace('\\\\', '\\\\\n')
44 l = l.replace('\\\\', '\\\\\n')
44 l = l.replace('\\0', '\0')
45 l = l.replace('\\0', '\0')
45 l = l.replace('\n', '')
46 l = l.replace('\n', '')
46 k, v = l.decode('string_escape').split(':', 1)
47 k, v = l.decode('string_escape').split(':', 1)
47 extra[k] = v
48 extra[k] = v
48 return extra
49 return extra
49
50
50 def encodeextra(d):
51 def encodeextra(d):
51 # keys must be sorted to produce a deterministic changelog entry
52 # keys must be sorted to produce a deterministic changelog entry
52 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
53 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
53 return "\0".join(items)
54 return "\0".join(items)
54
55
55 def stripdesc(desc):
56 def stripdesc(desc):
56 """strip trailing whitespace and leading and trailing empty lines"""
57 """strip trailing whitespace and leading and trailing empty lines"""
57 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
58 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
58
59
59 class appender(object):
60 class appender(object):
60 '''the changelog index must be updated last on disk, so we use this class
61 '''the changelog index must be updated last on disk, so we use this class
61 to delay writes to it'''
62 to delay writes to it'''
62 def __init__(self, vfs, name, mode, buf):
63 def __init__(self, vfs, name, mode, buf):
63 self.data = buf
64 self.data = buf
64 fp = vfs(name, mode)
65 fp = vfs(name, mode)
65 self.fp = fp
66 self.fp = fp
66 self.offset = fp.tell()
67 self.offset = fp.tell()
67 self.size = vfs.fstat(fp).st_size
68 self.size = vfs.fstat(fp).st_size
68
69
69 def end(self):
70 def end(self):
70 return self.size + len("".join(self.data))
71 return self.size + len("".join(self.data))
71 def tell(self):
72 def tell(self):
72 return self.offset
73 return self.offset
73 def flush(self):
74 def flush(self):
74 pass
75 pass
75 def close(self):
76 def close(self):
76 self.fp.close()
77 self.fp.close()
77
78
78 def seek(self, offset, whence=0):
79 def seek(self, offset, whence=0):
79 '''virtual file offset spans real file and data'''
80 '''virtual file offset spans real file and data'''
80 if whence == 0:
81 if whence == 0:
81 self.offset = offset
82 self.offset = offset
82 elif whence == 1:
83 elif whence == 1:
83 self.offset += offset
84 self.offset += offset
84 elif whence == 2:
85 elif whence == 2:
85 self.offset = self.end() + offset
86 self.offset = self.end() + offset
86 if self.offset < self.size:
87 if self.offset < self.size:
87 self.fp.seek(self.offset)
88 self.fp.seek(self.offset)
88
89
89 def read(self, count=-1):
90 def read(self, count=-1):
90 '''only trick here is reads that span real file and data'''
91 '''only trick here is reads that span real file and data'''
91 ret = ""
92 ret = ""
92 if self.offset < self.size:
93 if self.offset < self.size:
93 s = self.fp.read(count)
94 s = self.fp.read(count)
94 ret = s
95 ret = s
95 self.offset += len(s)
96 self.offset += len(s)
96 if count > 0:
97 if count > 0:
97 count -= len(s)
98 count -= len(s)
98 if count != 0:
99 if count != 0:
99 doff = self.offset - self.size
100 doff = self.offset - self.size
100 self.data.insert(0, "".join(self.data))
101 self.data.insert(0, "".join(self.data))
101 del self.data[1:]
102 del self.data[1:]
102 s = self.data[0][doff:doff + count]
103 s = self.data[0][doff:doff + count]
103 self.offset += len(s)
104 self.offset += len(s)
104 ret += s
105 ret += s
105 return ret
106 return ret
106
107
107 def write(self, s):
108 def write(self, s):
108 self.data.append(str(s))
109 self.data.append(str(s))
109 self.offset += len(s)
110 self.offset += len(s)
110
111
111 def _divertopener(opener, target):
112 def _divertopener(opener, target):
112 """build an opener that writes in 'target.a' instead of 'target'"""
113 """build an opener that writes in 'target.a' instead of 'target'"""
113 def _divert(name, mode='r'):
114 def _divert(name, mode='r'):
114 if name != target:
115 if name != target:
115 return opener(name, mode)
116 return opener(name, mode)
116 return opener(name + ".a", mode)
117 return opener(name + ".a", mode)
117 return _divert
118 return _divert
118
119
119 def _delayopener(opener, target, buf):
120 def _delayopener(opener, target, buf):
120 """build an opener that stores chunks in 'buf' instead of 'target'"""
121 """build an opener that stores chunks in 'buf' instead of 'target'"""
121 def _delay(name, mode='r'):
122 def _delay(name, mode='r'):
122 if name != target:
123 if name != target:
123 return opener(name, mode)
124 return opener(name, mode)
124 return appender(opener, name, mode, buf)
125 return appender(opener, name, mode, buf)
125 return _delay
126 return _delay
126
127
127 class changelog(revlog.revlog):
128 class changelog(revlog.revlog):
128 def __init__(self, opener):
129 def __init__(self, opener):
129 revlog.revlog.__init__(self, opener, "00changelog.i")
130 revlog.revlog.__init__(self, opener, "00changelog.i")
130 if self._initempty:
131 if self._initempty:
131 # changelogs don't benefit from generaldelta
132 # changelogs don't benefit from generaldelta
132 self.version &= ~revlog.REVLOGGENERALDELTA
133 self.version &= ~revlog.REVLOGGENERALDELTA
133 self._generaldelta = False
134 self._generaldelta = False
134 self._realopener = opener
135 self._realopener = opener
135 self._delayed = False
136 self._delayed = False
136 self._delaybuf = None
137 self._delaybuf = None
137 self._divert = False
138 self._divert = False
138 self.filteredrevs = frozenset()
139 self.filteredrevs = frozenset()
139
140
140 def tip(self):
141 def tip(self):
141 """filtered version of revlog.tip"""
142 """filtered version of revlog.tip"""
142 for i in xrange(len(self) -1, -2, -1):
143 for i in xrange(len(self) -1, -2, -1):
143 if i not in self.filteredrevs:
144 if i not in self.filteredrevs:
144 return self.node(i)
145 return self.node(i)
145
146
146 def __iter__(self):
147 def __iter__(self):
147 """filtered version of revlog.__iter__"""
148 """filtered version of revlog.__iter__"""
148 if len(self.filteredrevs) == 0:
149 if len(self.filteredrevs) == 0:
149 return revlog.revlog.__iter__(self)
150 return revlog.revlog.__iter__(self)
150
151
151 def filterediter():
152 def filterediter():
152 for i in xrange(len(self)):
153 for i in xrange(len(self)):
153 if i not in self.filteredrevs:
154 if i not in self.filteredrevs:
154 yield i
155 yield i
155
156
156 return filterediter()
157 return filterediter()
157
158
158 def revs(self, start=0, stop=None):
159 def revs(self, start=0, stop=None):
159 """filtered version of revlog.revs"""
160 """filtered version of revlog.revs"""
160 for i in super(changelog, self).revs(start, stop):
161 for i in super(changelog, self).revs(start, stop):
161 if i not in self.filteredrevs:
162 if i not in self.filteredrevs:
162 yield i
163 yield i
163
164
164 @util.propertycache
165 @util.propertycache
165 def nodemap(self):
166 def nodemap(self):
166 # XXX need filtering too
167 # XXX need filtering too
167 self.rev(self.node(0))
168 self.rev(self.node(0))
168 return self._nodecache
169 return self._nodecache
169
170
170 def hasnode(self, node):
171 def hasnode(self, node):
171 """filtered version of revlog.hasnode"""
172 """filtered version of revlog.hasnode"""
172 try:
173 try:
173 i = self.rev(node)
174 i = self.rev(node)
174 return i not in self.filteredrevs
175 return i not in self.filteredrevs
175 except KeyError:
176 except KeyError:
176 return False
177 return False
177
178
178 def headrevs(self):
179 def headrevs(self):
179 if self.filteredrevs:
180 if self.filteredrevs:
180 try:
181 try:
181 return self.index.headrevsfiltered(self.filteredrevs)
182 return self.index.headrevsfiltered(self.filteredrevs)
182 # AttributeError covers non-c-extension environments and
183 # AttributeError covers non-c-extension environments and
183 # old c extensions without filter handling.
184 # old c extensions without filter handling.
184 except AttributeError:
185 except AttributeError:
185 return self._headrevs()
186 return self._headrevs()
186
187
187 return super(changelog, self).headrevs()
188 return super(changelog, self).headrevs()
188
189
189 def strip(self, *args, **kwargs):
190 def strip(self, *args, **kwargs):
190 # XXX make something better than assert
191 # XXX make something better than assert
191 # We can't expect proper strip behavior if we are filtered.
192 # We can't expect proper strip behavior if we are filtered.
192 assert not self.filteredrevs
193 assert not self.filteredrevs
193 super(changelog, self).strip(*args, **kwargs)
194 super(changelog, self).strip(*args, **kwargs)
194
195
195 def rev(self, node):
196 def rev(self, node):
196 """filtered version of revlog.rev"""
197 """filtered version of revlog.rev"""
197 r = super(changelog, self).rev(node)
198 r = super(changelog, self).rev(node)
198 if r in self.filteredrevs:
199 if r in self.filteredrevs:
199 raise error.FilteredLookupError(hex(node), self.indexfile,
200 raise error.FilteredLookupError(hex(node), self.indexfile,
200 _('filtered node'))
201 _('filtered node'))
201 return r
202 return r
202
203
203 def node(self, rev):
204 def node(self, rev):
204 """filtered version of revlog.node"""
205 """filtered version of revlog.node"""
205 if rev in self.filteredrevs:
206 if rev in self.filteredrevs:
206 raise error.FilteredIndexError(rev)
207 raise error.FilteredIndexError(rev)
207 return super(changelog, self).node(rev)
208 return super(changelog, self).node(rev)
208
209
209 def linkrev(self, rev):
210 def linkrev(self, rev):
210 """filtered version of revlog.linkrev"""
211 """filtered version of revlog.linkrev"""
211 if rev in self.filteredrevs:
212 if rev in self.filteredrevs:
212 raise error.FilteredIndexError(rev)
213 raise error.FilteredIndexError(rev)
213 return super(changelog, self).linkrev(rev)
214 return super(changelog, self).linkrev(rev)
214
215
215 def parentrevs(self, rev):
216 def parentrevs(self, rev):
216 """filtered version of revlog.parentrevs"""
217 """filtered version of revlog.parentrevs"""
217 if rev in self.filteredrevs:
218 if rev in self.filteredrevs:
218 raise error.FilteredIndexError(rev)
219 raise error.FilteredIndexError(rev)
219 return super(changelog, self).parentrevs(rev)
220 return super(changelog, self).parentrevs(rev)
220
221
221 def flags(self, rev):
222 def flags(self, rev):
222 """filtered version of revlog.flags"""
223 """filtered version of revlog.flags"""
223 if rev in self.filteredrevs:
224 if rev in self.filteredrevs:
224 raise error.FilteredIndexError(rev)
225 raise error.FilteredIndexError(rev)
225 return super(changelog, self).flags(rev)
226 return super(changelog, self).flags(rev)
226
227
227 def delayupdate(self, tr):
228 def delayupdate(self, tr):
228 "delay visibility of index updates to other readers"
229 "delay visibility of index updates to other readers"
229
230
230 if not self._delayed:
231 if not self._delayed:
231 if len(self) == 0:
232 if len(self) == 0:
232 self._divert = True
233 self._divert = True
233 if self._realopener.exists(self.indexfile + '.a'):
234 if self._realopener.exists(self.indexfile + '.a'):
234 self._realopener.unlink(self.indexfile + '.a')
235 self._realopener.unlink(self.indexfile + '.a')
235 self.opener = _divertopener(self._realopener, self.indexfile)
236 self.opener = _divertopener(self._realopener, self.indexfile)
236 else:
237 else:
237 self._delaybuf = []
238 self._delaybuf = []
238 self.opener = _delayopener(self._realopener, self.indexfile,
239 self.opener = _delayopener(self._realopener, self.indexfile,
239 self._delaybuf)
240 self._delaybuf)
240 self._delayed = True
241 self._delayed = True
241 tr.addpending('cl-%i' % id(self), self._writepending)
242 tr.addpending('cl-%i' % id(self), self._writepending)
243 trp = weakref.proxy(tr)
244 tr.addfinalize('cl-%i' % id(self), lambda: self._finalize(trp))
242
245
243 def finalize(self, tr):
246 def _finalize(self, tr):
244 "finalize index updates"
247 "finalize index updates"
245 self._delayed = False
248 self._delayed = False
246 self.opener = self._realopener
249 self.opener = self._realopener
247 # move redirected index data back into place
250 # move redirected index data back into place
248 if self._divert:
251 if self._divert:
249 assert not self._delaybuf
252 assert not self._delaybuf
250 tmpname = self.indexfile + ".a"
253 tmpname = self.indexfile + ".a"
251 nfile = self.opener.open(tmpname)
254 nfile = self.opener.open(tmpname)
252 nfile.close()
255 nfile.close()
253 self.opener.rename(tmpname, self.indexfile)
256 self.opener.rename(tmpname, self.indexfile)
254 elif self._delaybuf:
257 elif self._delaybuf:
255 fp = self.opener(self.indexfile, 'a')
258 fp = self.opener(self.indexfile, 'a')
256 fp.write("".join(self._delaybuf))
259 fp.write("".join(self._delaybuf))
257 fp.close()
260 fp.close()
258 self._delaybuf = None
261 self._delaybuf = None
259 self._divert = False
262 self._divert = False
260 # split when we're done
263 # split when we're done
261 self.checkinlinesize(tr)
264 self.checkinlinesize(tr)
262
265
263 def readpending(self, file):
266 def readpending(self, file):
264 r = revlog.revlog(self.opener, file)
267 r = revlog.revlog(self.opener, file)
265 self.index = r.index
268 self.index = r.index
266 self.nodemap = r.nodemap
269 self.nodemap = r.nodemap
267 self._nodecache = r._nodecache
270 self._nodecache = r._nodecache
268 self._chunkcache = r._chunkcache
271 self._chunkcache = r._chunkcache
269
272
270 def _writepending(self):
273 def _writepending(self):
271 "create a file containing the unfinalized state for pretxnchangegroup"
274 "create a file containing the unfinalized state for pretxnchangegroup"
272 if self._delaybuf:
275 if self._delaybuf:
273 # make a temporary copy of the index
276 # make a temporary copy of the index
274 fp1 = self._realopener(self.indexfile)
277 fp1 = self._realopener(self.indexfile)
275 fp2 = self._realopener(self.indexfile + ".a", "w")
278 fp2 = self._realopener(self.indexfile + ".a", "w")
276 fp2.write(fp1.read())
279 fp2.write(fp1.read())
277 # add pending data
280 # add pending data
278 fp2.write("".join(self._delaybuf))
281 fp2.write("".join(self._delaybuf))
279 fp2.close()
282 fp2.close()
280 # switch modes so finalize can simply rename
283 # switch modes so finalize can simply rename
281 self._delaybuf = None
284 self._delaybuf = None
282 self._divert = True
285 self._divert = True
283 self.opener = _divertopener(self._realopener, self.indexfile)
286 self.opener = _divertopener(self._realopener, self.indexfile)
284
287
285 if self._divert:
288 if self._divert:
286 return True
289 return True
287
290
288 return False
291 return False
289
292
290 def checkinlinesize(self, tr, fp=None):
293 def checkinlinesize(self, tr, fp=None):
291 if not self._delayed:
294 if not self._delayed:
292 revlog.revlog.checkinlinesize(self, tr, fp)
295 revlog.revlog.checkinlinesize(self, tr, fp)
293
296
294 def read(self, node):
297 def read(self, node):
295 """
298 """
296 format used:
299 format used:
297 nodeid\n : manifest node in ascii
300 nodeid\n : manifest node in ascii
298 user\n : user, no \n or \r allowed
301 user\n : user, no \n or \r allowed
299 time tz extra\n : date (time is int or float, timezone is int)
302 time tz extra\n : date (time is int or float, timezone is int)
300 : extra is metadata, encoded and separated by '\0'
303 : extra is metadata, encoded and separated by '\0'
301 : older versions ignore it
304 : older versions ignore it
302 files\n\n : files modified by the cset, no \n or \r allowed
305 files\n\n : files modified by the cset, no \n or \r allowed
303 (.*) : comment (free text, ideally utf-8)
306 (.*) : comment (free text, ideally utf-8)
304
307
305 changelog v0 doesn't use extra
308 changelog v0 doesn't use extra
306 """
309 """
307 text = self.revision(node)
310 text = self.revision(node)
308 if not text:
311 if not text:
309 return (nullid, "", (0, 0), [], "", _defaultextra)
312 return (nullid, "", (0, 0), [], "", _defaultextra)
310 last = text.index("\n\n")
313 last = text.index("\n\n")
311 desc = encoding.tolocal(text[last + 2:])
314 desc = encoding.tolocal(text[last + 2:])
312 l = text[:last].split('\n')
315 l = text[:last].split('\n')
313 manifest = bin(l[0])
316 manifest = bin(l[0])
314 user = encoding.tolocal(l[1])
317 user = encoding.tolocal(l[1])
315
318
316 tdata = l[2].split(' ', 2)
319 tdata = l[2].split(' ', 2)
317 if len(tdata) != 3:
320 if len(tdata) != 3:
318 time = float(tdata[0])
321 time = float(tdata[0])
319 try:
322 try:
320 # various tools did silly things with the time zone field.
323 # various tools did silly things with the time zone field.
321 timezone = int(tdata[1])
324 timezone = int(tdata[1])
322 except ValueError:
325 except ValueError:
323 timezone = 0
326 timezone = 0
324 extra = _defaultextra
327 extra = _defaultextra
325 else:
328 else:
326 time, timezone = float(tdata[0]), int(tdata[1])
329 time, timezone = float(tdata[0]), int(tdata[1])
327 extra = decodeextra(tdata[2])
330 extra = decodeextra(tdata[2])
328
331
329 files = l[3:]
332 files = l[3:]
330 return (manifest, user, (time, timezone), files, desc, extra)
333 return (manifest, user, (time, timezone), files, desc, extra)
331
334
332 def add(self, manifest, files, desc, transaction, p1, p2,
335 def add(self, manifest, files, desc, transaction, p1, p2,
333 user, date=None, extra=None):
336 user, date=None, extra=None):
334 # Convert to UTF-8 encoded bytestrings as the very first
337 # Convert to UTF-8 encoded bytestrings as the very first
335 # thing: calling any method on a localstr object will turn it
338 # thing: calling any method on a localstr object will turn it
336 # into a str object and the cached UTF-8 string is thus lost.
339 # into a str object and the cached UTF-8 string is thus lost.
337 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
340 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
338
341
339 user = user.strip()
342 user = user.strip()
340 # An empty username or a username with a "\n" will make the
343 # An empty username or a username with a "\n" will make the
341 # revision text contain two "\n\n" sequences -> corrupt
344 # revision text contain two "\n\n" sequences -> corrupt
342 # repository since read cannot unpack the revision.
345 # repository since read cannot unpack the revision.
343 if not user:
346 if not user:
344 raise error.RevlogError(_("empty username"))
347 raise error.RevlogError(_("empty username"))
345 if "\n" in user:
348 if "\n" in user:
346 raise error.RevlogError(_("username %s contains a newline")
349 raise error.RevlogError(_("username %s contains a newline")
347 % repr(user))
350 % repr(user))
348
351
349 desc = stripdesc(desc)
352 desc = stripdesc(desc)
350
353
351 if date:
354 if date:
352 parseddate = "%d %d" % util.parsedate(date)
355 parseddate = "%d %d" % util.parsedate(date)
353 else:
356 else:
354 parseddate = "%d %d" % util.makedate()
357 parseddate = "%d %d" % util.makedate()
355 if extra:
358 if extra:
356 branch = extra.get("branch")
359 branch = extra.get("branch")
357 if branch in ("default", ""):
360 if branch in ("default", ""):
358 del extra["branch"]
361 del extra["branch"]
359 elif branch in (".", "null", "tip"):
362 elif branch in (".", "null", "tip"):
360 raise error.RevlogError(_('the name \'%s\' is reserved')
363 raise error.RevlogError(_('the name \'%s\' is reserved')
361 % branch)
364 % branch)
362 if extra:
365 if extra:
363 extra = encodeextra(extra)
366 extra = encodeextra(extra)
364 parseddate = "%s %s" % (parseddate, extra)
367 parseddate = "%s %s" % (parseddate, extra)
365 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
368 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
366 text = "\n".join(l)
369 text = "\n".join(l)
367 return self.addrevision(text, transaction, len(self), p1, p2)
370 return self.addrevision(text, transaction, len(self), p1, p2)
368
371
369 def branchinfo(self, rev):
372 def branchinfo(self, rev):
370 """return the branch name and open/close state of a revision
373 """return the branch name and open/close state of a revision
371
374
372 This function exists because creating a changectx object
375 This function exists because creating a changectx object
373 just to access this is costly."""
376 just to access this is costly."""
374 extra = self.read(rev)[5]
377 extra = self.read(rev)[5]
375 return encoding.tolocal(extra.get("branch")), 'close' in extra
378 return encoding.tolocal(extra.get("branch")), 'close' in extra
@@ -1,1804 +1,1803 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10', **kwargs):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps, **kwargs)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2Y' in bundlecaps:
112 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except error.PushRaced, exc:
136 except error.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), str(exc))
137 raise error.ResponseError(_('push failed:'), str(exc))
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 # a list of (ui, featureset) functions.
183 # a list of (ui, featureset) functions.
184 # only functions defined in module of enabled extensions are invoked
184 # only functions defined in module of enabled extensions are invoked
185 featuresetupfuncs = set()
185 featuresetupfuncs = set()
186
186
187 def _baserequirements(self, create):
187 def _baserequirements(self, create):
188 return self.requirements[:]
188 return self.requirements[:]
189
189
190 def __init__(self, baseui, path=None, create=False):
190 def __init__(self, baseui, path=None, create=False):
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wopener = self.wvfs
192 self.wopener = self.wvfs
193 self.root = self.wvfs.base
193 self.root = self.wvfs.base
194 self.path = self.wvfs.join(".hg")
194 self.path = self.wvfs.join(".hg")
195 self.origroot = path
195 self.origroot = path
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.vfs = scmutil.vfs(self.path)
197 self.vfs = scmutil.vfs(self.path)
198 self.opener = self.vfs
198 self.opener = self.vfs
199 self.baseui = baseui
199 self.baseui = baseui
200 self.ui = baseui.copy()
200 self.ui = baseui.copy()
201 self.ui.copy = baseui.copy # prevent copying repo configuration
201 self.ui.copy = baseui.copy # prevent copying repo configuration
202 # A list of callback to shape the phase if no data were found.
202 # A list of callback to shape the phase if no data were found.
203 # Callback are in the form: func(repo, roots) --> processed root.
203 # Callback are in the form: func(repo, roots) --> processed root.
204 # This list it to be filled by extension during repo setup
204 # This list it to be filled by extension during repo setup
205 self._phasedefaults = []
205 self._phasedefaults = []
206 try:
206 try:
207 self.ui.readconfig(self.join("hgrc"), self.root)
207 self.ui.readconfig(self.join("hgrc"), self.root)
208 extensions.loadall(self.ui)
208 extensions.loadall(self.ui)
209 except IOError:
209 except IOError:
210 pass
210 pass
211
211
212 if self.featuresetupfuncs:
212 if self.featuresetupfuncs:
213 self.supported = set(self._basesupported) # use private copy
213 self.supported = set(self._basesupported) # use private copy
214 extmods = set(m.__name__ for n, m
214 extmods = set(m.__name__ for n, m
215 in extensions.extensions(self.ui))
215 in extensions.extensions(self.ui))
216 for setupfunc in self.featuresetupfuncs:
216 for setupfunc in self.featuresetupfuncs:
217 if setupfunc.__module__ in extmods:
217 if setupfunc.__module__ in extmods:
218 setupfunc(self.ui, self.supported)
218 setupfunc(self.ui, self.supported)
219 else:
219 else:
220 self.supported = self._basesupported
220 self.supported = self._basesupported
221
221
222 if not self.vfs.isdir():
222 if not self.vfs.isdir():
223 if create:
223 if create:
224 if not self.wvfs.exists():
224 if not self.wvfs.exists():
225 self.wvfs.makedirs()
225 self.wvfs.makedirs()
226 self.vfs.makedir(notindexed=True)
226 self.vfs.makedir(notindexed=True)
227 requirements = self._baserequirements(create)
227 requirements = self._baserequirements(create)
228 if self.ui.configbool('format', 'usestore', True):
228 if self.ui.configbool('format', 'usestore', True):
229 self.vfs.mkdir("store")
229 self.vfs.mkdir("store")
230 requirements.append("store")
230 requirements.append("store")
231 if self.ui.configbool('format', 'usefncache', True):
231 if self.ui.configbool('format', 'usefncache', True):
232 requirements.append("fncache")
232 requirements.append("fncache")
233 if self.ui.configbool('format', 'dotencode', True):
233 if self.ui.configbool('format', 'dotencode', True):
234 requirements.append('dotencode')
234 requirements.append('dotencode')
235 # create an invalid changelog
235 # create an invalid changelog
236 self.vfs.append(
236 self.vfs.append(
237 "00changelog.i",
237 "00changelog.i",
238 '\0\0\0\2' # represents revlogv2
238 '\0\0\0\2' # represents revlogv2
239 ' dummy changelog to prevent using the old repo layout'
239 ' dummy changelog to prevent using the old repo layout'
240 )
240 )
241 if self.ui.configbool('format', 'generaldelta', False):
241 if self.ui.configbool('format', 'generaldelta', False):
242 requirements.append("generaldelta")
242 requirements.append("generaldelta")
243 requirements = set(requirements)
243 requirements = set(requirements)
244 else:
244 else:
245 raise error.RepoError(_("repository %s not found") % path)
245 raise error.RepoError(_("repository %s not found") % path)
246 elif create:
246 elif create:
247 raise error.RepoError(_("repository %s already exists") % path)
247 raise error.RepoError(_("repository %s already exists") % path)
248 else:
248 else:
249 try:
249 try:
250 requirements = scmutil.readrequires(self.vfs, self.supported)
250 requirements = scmutil.readrequires(self.vfs, self.supported)
251 except IOError, inst:
251 except IOError, inst:
252 if inst.errno != errno.ENOENT:
252 if inst.errno != errno.ENOENT:
253 raise
253 raise
254 requirements = set()
254 requirements = set()
255
255
256 self.sharedpath = self.path
256 self.sharedpath = self.path
257 try:
257 try:
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 realpath=True)
259 realpath=True)
260 s = vfs.base
260 s = vfs.base
261 if not vfs.exists():
261 if not vfs.exists():
262 raise error.RepoError(
262 raise error.RepoError(
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 self.sharedpath = s
264 self.sharedpath = s
265 except IOError, inst:
265 except IOError, inst:
266 if inst.errno != errno.ENOENT:
266 if inst.errno != errno.ENOENT:
267 raise
267 raise
268
268
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 self.spath = self.store.path
270 self.spath = self.store.path
271 self.svfs = self.store.vfs
271 self.svfs = self.store.vfs
272 self.sopener = self.svfs
272 self.sopener = self.svfs
273 self.sjoin = self.store.join
273 self.sjoin = self.store.join
274 self.vfs.createmode = self.store.createmode
274 self.vfs.createmode = self.store.createmode
275 self._applyrequirements(requirements)
275 self._applyrequirements(requirements)
276 if create:
276 if create:
277 self._writerequirements()
277 self._writerequirements()
278
278
279
279
280 self._branchcaches = {}
280 self._branchcaches = {}
281 self.filterpats = {}
281 self.filterpats = {}
282 self._datafilters = {}
282 self._datafilters = {}
283 self._transref = self._lockref = self._wlockref = None
283 self._transref = self._lockref = self._wlockref = None
284
284
285 # A cache for various files under .hg/ that tracks file changes,
285 # A cache for various files under .hg/ that tracks file changes,
286 # (used by the filecache decorator)
286 # (used by the filecache decorator)
287 #
287 #
288 # Maps a property name to its util.filecacheentry
288 # Maps a property name to its util.filecacheentry
289 self._filecache = {}
289 self._filecache = {}
290
290
291 # hold sets of revision to be filtered
291 # hold sets of revision to be filtered
292 # should be cleared when something might have changed the filter value:
292 # should be cleared when something might have changed the filter value:
293 # - new changesets,
293 # - new changesets,
294 # - phase change,
294 # - phase change,
295 # - new obsolescence marker,
295 # - new obsolescence marker,
296 # - working directory parent change,
296 # - working directory parent change,
297 # - bookmark changes
297 # - bookmark changes
298 self.filteredrevcache = {}
298 self.filteredrevcache = {}
299
299
300 def close(self):
300 def close(self):
301 pass
301 pass
302
302
303 def _restrictcapabilities(self, caps):
303 def _restrictcapabilities(self, caps):
304 # bundle2 is not ready for prime time, drop it unless explicitly
304 # bundle2 is not ready for prime time, drop it unless explicitly
305 # required by the tests (or some brave tester)
305 # required by the tests (or some brave tester)
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
307 caps = set(caps)
307 caps = set(caps)
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
310 return caps
310 return caps
311
311
312 def _applyrequirements(self, requirements):
312 def _applyrequirements(self, requirements):
313 self.requirements = requirements
313 self.requirements = requirements
314 self.sopener.options = dict((r, 1) for r in requirements
314 self.sopener.options = dict((r, 1) for r in requirements
315 if r in self.openerreqs)
315 if r in self.openerreqs)
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 if chunkcachesize is not None:
317 if chunkcachesize is not None:
318 self.sopener.options['chunkcachesize'] = chunkcachesize
318 self.sopener.options['chunkcachesize'] = chunkcachesize
319
319
320 def _writerequirements(self):
320 def _writerequirements(self):
321 reqfile = self.opener("requires", "w")
321 reqfile = self.opener("requires", "w")
322 for r in sorted(self.requirements):
322 for r in sorted(self.requirements):
323 reqfile.write("%s\n" % r)
323 reqfile.write("%s\n" % r)
324 reqfile.close()
324 reqfile.close()
325
325
326 def _checknested(self, path):
326 def _checknested(self, path):
327 """Determine if path is a legal nested repository."""
327 """Determine if path is a legal nested repository."""
328 if not path.startswith(self.root):
328 if not path.startswith(self.root):
329 return False
329 return False
330 subpath = path[len(self.root) + 1:]
330 subpath = path[len(self.root) + 1:]
331 normsubpath = util.pconvert(subpath)
331 normsubpath = util.pconvert(subpath)
332
332
333 # XXX: Checking against the current working copy is wrong in
333 # XXX: Checking against the current working copy is wrong in
334 # the sense that it can reject things like
334 # the sense that it can reject things like
335 #
335 #
336 # $ hg cat -r 10 sub/x.txt
336 # $ hg cat -r 10 sub/x.txt
337 #
337 #
338 # if sub/ is no longer a subrepository in the working copy
338 # if sub/ is no longer a subrepository in the working copy
339 # parent revision.
339 # parent revision.
340 #
340 #
341 # However, it can of course also allow things that would have
341 # However, it can of course also allow things that would have
342 # been rejected before, such as the above cat command if sub/
342 # been rejected before, such as the above cat command if sub/
343 # is a subrepository now, but was a normal directory before.
343 # is a subrepository now, but was a normal directory before.
344 # The old path auditor would have rejected by mistake since it
344 # The old path auditor would have rejected by mistake since it
345 # panics when it sees sub/.hg/.
345 # panics when it sees sub/.hg/.
346 #
346 #
347 # All in all, checking against the working copy seems sensible
347 # All in all, checking against the working copy seems sensible
348 # since we want to prevent access to nested repositories on
348 # since we want to prevent access to nested repositories on
349 # the filesystem *now*.
349 # the filesystem *now*.
350 ctx = self[None]
350 ctx = self[None]
351 parts = util.splitpath(subpath)
351 parts = util.splitpath(subpath)
352 while parts:
352 while parts:
353 prefix = '/'.join(parts)
353 prefix = '/'.join(parts)
354 if prefix in ctx.substate:
354 if prefix in ctx.substate:
355 if prefix == normsubpath:
355 if prefix == normsubpath:
356 return True
356 return True
357 else:
357 else:
358 sub = ctx.sub(prefix)
358 sub = ctx.sub(prefix)
359 return sub.checknested(subpath[len(prefix) + 1:])
359 return sub.checknested(subpath[len(prefix) + 1:])
360 else:
360 else:
361 parts.pop()
361 parts.pop()
362 return False
362 return False
363
363
364 def peer(self):
364 def peer(self):
365 return localpeer(self) # not cached to avoid reference cycle
365 return localpeer(self) # not cached to avoid reference cycle
366
366
367 def unfiltered(self):
367 def unfiltered(self):
368 """Return unfiltered version of the repository
368 """Return unfiltered version of the repository
369
369
370 Intended to be overwritten by filtered repo."""
370 Intended to be overwritten by filtered repo."""
371 return self
371 return self
372
372
373 def filtered(self, name):
373 def filtered(self, name):
374 """Return a filtered version of a repository"""
374 """Return a filtered version of a repository"""
375 # build a new class with the mixin and the current class
375 # build a new class with the mixin and the current class
376 # (possibly subclass of the repo)
376 # (possibly subclass of the repo)
377 class proxycls(repoview.repoview, self.unfiltered().__class__):
377 class proxycls(repoview.repoview, self.unfiltered().__class__):
378 pass
378 pass
379 return proxycls(self, name)
379 return proxycls(self, name)
380
380
381 @repofilecache('bookmarks')
381 @repofilecache('bookmarks')
382 def _bookmarks(self):
382 def _bookmarks(self):
383 return bookmarks.bmstore(self)
383 return bookmarks.bmstore(self)
384
384
385 @repofilecache('bookmarks.current')
385 @repofilecache('bookmarks.current')
386 def _bookmarkcurrent(self):
386 def _bookmarkcurrent(self):
387 return bookmarks.readcurrent(self)
387 return bookmarks.readcurrent(self)
388
388
389 def bookmarkheads(self, bookmark):
389 def bookmarkheads(self, bookmark):
390 name = bookmark.split('@', 1)[0]
390 name = bookmark.split('@', 1)[0]
391 heads = []
391 heads = []
392 for mark, n in self._bookmarks.iteritems():
392 for mark, n in self._bookmarks.iteritems():
393 if mark.split('@', 1)[0] == name:
393 if mark.split('@', 1)[0] == name:
394 heads.append(n)
394 heads.append(n)
395 return heads
395 return heads
396
396
397 @storecache('phaseroots')
397 @storecache('phaseroots')
398 def _phasecache(self):
398 def _phasecache(self):
399 return phases.phasecache(self, self._phasedefaults)
399 return phases.phasecache(self, self._phasedefaults)
400
400
401 @storecache('obsstore')
401 @storecache('obsstore')
402 def obsstore(self):
402 def obsstore(self):
403 # read default format for new obsstore.
403 # read default format for new obsstore.
404 defaultformat = self.ui.configint('format', 'obsstore-version', None)
404 defaultformat = self.ui.configint('format', 'obsstore-version', None)
405 # rely on obsstore class default when possible.
405 # rely on obsstore class default when possible.
406 kwargs = {}
406 kwargs = {}
407 if defaultformat is not None:
407 if defaultformat is not None:
408 kwargs['defaultformat'] = defaultformat
408 kwargs['defaultformat'] = defaultformat
409 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
409 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
410 store = obsolete.obsstore(self.sopener, readonly=readonly,
410 store = obsolete.obsstore(self.sopener, readonly=readonly,
411 **kwargs)
411 **kwargs)
412 if store and readonly:
412 if store and readonly:
413 # message is rare enough to not be translated
413 # message is rare enough to not be translated
414 msg = 'obsolete feature not enabled but %i markers found!\n'
414 msg = 'obsolete feature not enabled but %i markers found!\n'
415 self.ui.warn(msg % len(list(store)))
415 self.ui.warn(msg % len(list(store)))
416 return store
416 return store
417
417
418 @storecache('00changelog.i')
418 @storecache('00changelog.i')
419 def changelog(self):
419 def changelog(self):
420 c = changelog.changelog(self.sopener)
420 c = changelog.changelog(self.sopener)
421 if 'HG_PENDING' in os.environ:
421 if 'HG_PENDING' in os.environ:
422 p = os.environ['HG_PENDING']
422 p = os.environ['HG_PENDING']
423 if p.startswith(self.root):
423 if p.startswith(self.root):
424 c.readpending('00changelog.i.a')
424 c.readpending('00changelog.i.a')
425 return c
425 return c
426
426
427 @storecache('00manifest.i')
427 @storecache('00manifest.i')
428 def manifest(self):
428 def manifest(self):
429 return manifest.manifest(self.sopener)
429 return manifest.manifest(self.sopener)
430
430
431 @repofilecache('dirstate')
431 @repofilecache('dirstate')
432 def dirstate(self):
432 def dirstate(self):
433 warned = [0]
433 warned = [0]
434 def validate(node):
434 def validate(node):
435 try:
435 try:
436 self.changelog.rev(node)
436 self.changelog.rev(node)
437 return node
437 return node
438 except error.LookupError:
438 except error.LookupError:
439 if not warned[0]:
439 if not warned[0]:
440 warned[0] = True
440 warned[0] = True
441 self.ui.warn(_("warning: ignoring unknown"
441 self.ui.warn(_("warning: ignoring unknown"
442 " working parent %s!\n") % short(node))
442 " working parent %s!\n") % short(node))
443 return nullid
443 return nullid
444
444
445 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
445 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
446
446
447 def __getitem__(self, changeid):
447 def __getitem__(self, changeid):
448 if changeid is None:
448 if changeid is None:
449 return context.workingctx(self)
449 return context.workingctx(self)
450 return context.changectx(self, changeid)
450 return context.changectx(self, changeid)
451
451
452 def __contains__(self, changeid):
452 def __contains__(self, changeid):
453 try:
453 try:
454 return bool(self.lookup(changeid))
454 return bool(self.lookup(changeid))
455 except error.RepoLookupError:
455 except error.RepoLookupError:
456 return False
456 return False
457
457
458 def __nonzero__(self):
458 def __nonzero__(self):
459 return True
459 return True
460
460
461 def __len__(self):
461 def __len__(self):
462 return len(self.changelog)
462 return len(self.changelog)
463
463
464 def __iter__(self):
464 def __iter__(self):
465 return iter(self.changelog)
465 return iter(self.changelog)
466
466
467 def revs(self, expr, *args):
467 def revs(self, expr, *args):
468 '''Return a list of revisions matching the given revset'''
468 '''Return a list of revisions matching the given revset'''
469 expr = revset.formatspec(expr, *args)
469 expr = revset.formatspec(expr, *args)
470 m = revset.match(None, expr)
470 m = revset.match(None, expr)
471 return m(self, revset.spanset(self))
471 return m(self, revset.spanset(self))
472
472
473 def set(self, expr, *args):
473 def set(self, expr, *args):
474 '''
474 '''
475 Yield a context for each matching revision, after doing arg
475 Yield a context for each matching revision, after doing arg
476 replacement via revset.formatspec
476 replacement via revset.formatspec
477 '''
477 '''
478 for r in self.revs(expr, *args):
478 for r in self.revs(expr, *args):
479 yield self[r]
479 yield self[r]
480
480
481 def url(self):
481 def url(self):
482 return 'file:' + self.root
482 return 'file:' + self.root
483
483
484 def hook(self, name, throw=False, **args):
484 def hook(self, name, throw=False, **args):
485 """Call a hook, passing this repo instance.
485 """Call a hook, passing this repo instance.
486
486
487 This a convenience method to aid invoking hooks. Extensions likely
487 This a convenience method to aid invoking hooks. Extensions likely
488 won't call this unless they have registered a custom hook or are
488 won't call this unless they have registered a custom hook or are
489 replacing code that is expected to call a hook.
489 replacing code that is expected to call a hook.
490 """
490 """
491 return hook.hook(self.ui, self, name, throw, **args)
491 return hook.hook(self.ui, self, name, throw, **args)
492
492
493 @unfilteredmethod
493 @unfilteredmethod
494 def _tag(self, names, node, message, local, user, date, extra={},
494 def _tag(self, names, node, message, local, user, date, extra={},
495 editor=False):
495 editor=False):
496 if isinstance(names, str):
496 if isinstance(names, str):
497 names = (names,)
497 names = (names,)
498
498
499 branches = self.branchmap()
499 branches = self.branchmap()
500 for name in names:
500 for name in names:
501 self.hook('pretag', throw=True, node=hex(node), tag=name,
501 self.hook('pretag', throw=True, node=hex(node), tag=name,
502 local=local)
502 local=local)
503 if name in branches:
503 if name in branches:
504 self.ui.warn(_("warning: tag %s conflicts with existing"
504 self.ui.warn(_("warning: tag %s conflicts with existing"
505 " branch name\n") % name)
505 " branch name\n") % name)
506
506
507 def writetags(fp, names, munge, prevtags):
507 def writetags(fp, names, munge, prevtags):
508 fp.seek(0, 2)
508 fp.seek(0, 2)
509 if prevtags and prevtags[-1] != '\n':
509 if prevtags and prevtags[-1] != '\n':
510 fp.write('\n')
510 fp.write('\n')
511 for name in names:
511 for name in names:
512 m = munge and munge(name) or name
512 m = munge and munge(name) or name
513 if (self._tagscache.tagtypes and
513 if (self._tagscache.tagtypes and
514 name in self._tagscache.tagtypes):
514 name in self._tagscache.tagtypes):
515 old = self.tags().get(name, nullid)
515 old = self.tags().get(name, nullid)
516 fp.write('%s %s\n' % (hex(old), m))
516 fp.write('%s %s\n' % (hex(old), m))
517 fp.write('%s %s\n' % (hex(node), m))
517 fp.write('%s %s\n' % (hex(node), m))
518 fp.close()
518 fp.close()
519
519
520 prevtags = ''
520 prevtags = ''
521 if local:
521 if local:
522 try:
522 try:
523 fp = self.opener('localtags', 'r+')
523 fp = self.opener('localtags', 'r+')
524 except IOError:
524 except IOError:
525 fp = self.opener('localtags', 'a')
525 fp = self.opener('localtags', 'a')
526 else:
526 else:
527 prevtags = fp.read()
527 prevtags = fp.read()
528
528
529 # local tags are stored in the current charset
529 # local tags are stored in the current charset
530 writetags(fp, names, None, prevtags)
530 writetags(fp, names, None, prevtags)
531 for name in names:
531 for name in names:
532 self.hook('tag', node=hex(node), tag=name, local=local)
532 self.hook('tag', node=hex(node), tag=name, local=local)
533 return
533 return
534
534
535 try:
535 try:
536 fp = self.wfile('.hgtags', 'rb+')
536 fp = self.wfile('.hgtags', 'rb+')
537 except IOError, e:
537 except IOError, e:
538 if e.errno != errno.ENOENT:
538 if e.errno != errno.ENOENT:
539 raise
539 raise
540 fp = self.wfile('.hgtags', 'ab')
540 fp = self.wfile('.hgtags', 'ab')
541 else:
541 else:
542 prevtags = fp.read()
542 prevtags = fp.read()
543
543
544 # committed tags are stored in UTF-8
544 # committed tags are stored in UTF-8
545 writetags(fp, names, encoding.fromlocal, prevtags)
545 writetags(fp, names, encoding.fromlocal, prevtags)
546
546
547 fp.close()
547 fp.close()
548
548
549 self.invalidatecaches()
549 self.invalidatecaches()
550
550
551 if '.hgtags' not in self.dirstate:
551 if '.hgtags' not in self.dirstate:
552 self[None].add(['.hgtags'])
552 self[None].add(['.hgtags'])
553
553
554 m = matchmod.exact(self.root, '', ['.hgtags'])
554 m = matchmod.exact(self.root, '', ['.hgtags'])
555 tagnode = self.commit(message, user, date, extra=extra, match=m,
555 tagnode = self.commit(message, user, date, extra=extra, match=m,
556 editor=editor)
556 editor=editor)
557
557
558 for name in names:
558 for name in names:
559 self.hook('tag', node=hex(node), tag=name, local=local)
559 self.hook('tag', node=hex(node), tag=name, local=local)
560
560
561 return tagnode
561 return tagnode
562
562
563 def tag(self, names, node, message, local, user, date, editor=False):
563 def tag(self, names, node, message, local, user, date, editor=False):
564 '''tag a revision with one or more symbolic names.
564 '''tag a revision with one or more symbolic names.
565
565
566 names is a list of strings or, when adding a single tag, names may be a
566 names is a list of strings or, when adding a single tag, names may be a
567 string.
567 string.
568
568
569 if local is True, the tags are stored in a per-repository file.
569 if local is True, the tags are stored in a per-repository file.
570 otherwise, they are stored in the .hgtags file, and a new
570 otherwise, they are stored in the .hgtags file, and a new
571 changeset is committed with the change.
571 changeset is committed with the change.
572
572
573 keyword arguments:
573 keyword arguments:
574
574
575 local: whether to store tags in non-version-controlled file
575 local: whether to store tags in non-version-controlled file
576 (default False)
576 (default False)
577
577
578 message: commit message to use if committing
578 message: commit message to use if committing
579
579
580 user: name of user to use if committing
580 user: name of user to use if committing
581
581
582 date: date tuple to use if committing'''
582 date: date tuple to use if committing'''
583
583
584 if not local:
584 if not local:
585 m = matchmod.exact(self.root, '', ['.hgtags'])
585 m = matchmod.exact(self.root, '', ['.hgtags'])
586 if util.any(self.status(match=m, unknown=True, ignored=True)):
586 if util.any(self.status(match=m, unknown=True, ignored=True)):
587 raise util.Abort(_('working copy of .hgtags is changed'),
587 raise util.Abort(_('working copy of .hgtags is changed'),
588 hint=_('please commit .hgtags manually'))
588 hint=_('please commit .hgtags manually'))
589
589
590 self.tags() # instantiate the cache
590 self.tags() # instantiate the cache
591 self._tag(names, node, message, local, user, date, editor=editor)
591 self._tag(names, node, message, local, user, date, editor=editor)
592
592
593 @filteredpropertycache
593 @filteredpropertycache
594 def _tagscache(self):
594 def _tagscache(self):
595 '''Returns a tagscache object that contains various tags related
595 '''Returns a tagscache object that contains various tags related
596 caches.'''
596 caches.'''
597
597
598 # This simplifies its cache management by having one decorated
598 # This simplifies its cache management by having one decorated
599 # function (this one) and the rest simply fetch things from it.
599 # function (this one) and the rest simply fetch things from it.
600 class tagscache(object):
600 class tagscache(object):
601 def __init__(self):
601 def __init__(self):
602 # These two define the set of tags for this repository. tags
602 # These two define the set of tags for this repository. tags
603 # maps tag name to node; tagtypes maps tag name to 'global' or
603 # maps tag name to node; tagtypes maps tag name to 'global' or
604 # 'local'. (Global tags are defined by .hgtags across all
604 # 'local'. (Global tags are defined by .hgtags across all
605 # heads, and local tags are defined in .hg/localtags.)
605 # heads, and local tags are defined in .hg/localtags.)
606 # They constitute the in-memory cache of tags.
606 # They constitute the in-memory cache of tags.
607 self.tags = self.tagtypes = None
607 self.tags = self.tagtypes = None
608
608
609 self.nodetagscache = self.tagslist = None
609 self.nodetagscache = self.tagslist = None
610
610
611 cache = tagscache()
611 cache = tagscache()
612 cache.tags, cache.tagtypes = self._findtags()
612 cache.tags, cache.tagtypes = self._findtags()
613
613
614 return cache
614 return cache
615
615
616 def tags(self):
616 def tags(self):
617 '''return a mapping of tag to node'''
617 '''return a mapping of tag to node'''
618 t = {}
618 t = {}
619 if self.changelog.filteredrevs:
619 if self.changelog.filteredrevs:
620 tags, tt = self._findtags()
620 tags, tt = self._findtags()
621 else:
621 else:
622 tags = self._tagscache.tags
622 tags = self._tagscache.tags
623 for k, v in tags.iteritems():
623 for k, v in tags.iteritems():
624 try:
624 try:
625 # ignore tags to unknown nodes
625 # ignore tags to unknown nodes
626 self.changelog.rev(v)
626 self.changelog.rev(v)
627 t[k] = v
627 t[k] = v
628 except (error.LookupError, ValueError):
628 except (error.LookupError, ValueError):
629 pass
629 pass
630 return t
630 return t
631
631
632 def _findtags(self):
632 def _findtags(self):
633 '''Do the hard work of finding tags. Return a pair of dicts
633 '''Do the hard work of finding tags. Return a pair of dicts
634 (tags, tagtypes) where tags maps tag name to node, and tagtypes
634 (tags, tagtypes) where tags maps tag name to node, and tagtypes
635 maps tag name to a string like \'global\' or \'local\'.
635 maps tag name to a string like \'global\' or \'local\'.
636 Subclasses or extensions are free to add their own tags, but
636 Subclasses or extensions are free to add their own tags, but
637 should be aware that the returned dicts will be retained for the
637 should be aware that the returned dicts will be retained for the
638 duration of the localrepo object.'''
638 duration of the localrepo object.'''
639
639
640 # XXX what tagtype should subclasses/extensions use? Currently
640 # XXX what tagtype should subclasses/extensions use? Currently
641 # mq and bookmarks add tags, but do not set the tagtype at all.
641 # mq and bookmarks add tags, but do not set the tagtype at all.
642 # Should each extension invent its own tag type? Should there
642 # Should each extension invent its own tag type? Should there
643 # be one tagtype for all such "virtual" tags? Or is the status
643 # be one tagtype for all such "virtual" tags? Or is the status
644 # quo fine?
644 # quo fine?
645
645
646 alltags = {} # map tag name to (node, hist)
646 alltags = {} # map tag name to (node, hist)
647 tagtypes = {}
647 tagtypes = {}
648
648
649 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
649 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
650 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
650 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
651
651
652 # Build the return dicts. Have to re-encode tag names because
652 # Build the return dicts. Have to re-encode tag names because
653 # the tags module always uses UTF-8 (in order not to lose info
653 # the tags module always uses UTF-8 (in order not to lose info
654 # writing to the cache), but the rest of Mercurial wants them in
654 # writing to the cache), but the rest of Mercurial wants them in
655 # local encoding.
655 # local encoding.
656 tags = {}
656 tags = {}
657 for (name, (node, hist)) in alltags.iteritems():
657 for (name, (node, hist)) in alltags.iteritems():
658 if node != nullid:
658 if node != nullid:
659 tags[encoding.tolocal(name)] = node
659 tags[encoding.tolocal(name)] = node
660 tags['tip'] = self.changelog.tip()
660 tags['tip'] = self.changelog.tip()
661 tagtypes = dict([(encoding.tolocal(name), value)
661 tagtypes = dict([(encoding.tolocal(name), value)
662 for (name, value) in tagtypes.iteritems()])
662 for (name, value) in tagtypes.iteritems()])
663 return (tags, tagtypes)
663 return (tags, tagtypes)
664
664
665 def tagtype(self, tagname):
665 def tagtype(self, tagname):
666 '''
666 '''
667 return the type of the given tag. result can be:
667 return the type of the given tag. result can be:
668
668
669 'local' : a local tag
669 'local' : a local tag
670 'global' : a global tag
670 'global' : a global tag
671 None : tag does not exist
671 None : tag does not exist
672 '''
672 '''
673
673
674 return self._tagscache.tagtypes.get(tagname)
674 return self._tagscache.tagtypes.get(tagname)
675
675
676 def tagslist(self):
676 def tagslist(self):
677 '''return a list of tags ordered by revision'''
677 '''return a list of tags ordered by revision'''
678 if not self._tagscache.tagslist:
678 if not self._tagscache.tagslist:
679 l = []
679 l = []
680 for t, n in self.tags().iteritems():
680 for t, n in self.tags().iteritems():
681 l.append((self.changelog.rev(n), t, n))
681 l.append((self.changelog.rev(n), t, n))
682 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
682 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
683
683
684 return self._tagscache.tagslist
684 return self._tagscache.tagslist
685
685
686 def nodetags(self, node):
686 def nodetags(self, node):
687 '''return the tags associated with a node'''
687 '''return the tags associated with a node'''
688 if not self._tagscache.nodetagscache:
688 if not self._tagscache.nodetagscache:
689 nodetagscache = {}
689 nodetagscache = {}
690 for t, n in self._tagscache.tags.iteritems():
690 for t, n in self._tagscache.tags.iteritems():
691 nodetagscache.setdefault(n, []).append(t)
691 nodetagscache.setdefault(n, []).append(t)
692 for tags in nodetagscache.itervalues():
692 for tags in nodetagscache.itervalues():
693 tags.sort()
693 tags.sort()
694 self._tagscache.nodetagscache = nodetagscache
694 self._tagscache.nodetagscache = nodetagscache
695 return self._tagscache.nodetagscache.get(node, [])
695 return self._tagscache.nodetagscache.get(node, [])
696
696
697 def nodebookmarks(self, node):
697 def nodebookmarks(self, node):
698 marks = []
698 marks = []
699 for bookmark, n in self._bookmarks.iteritems():
699 for bookmark, n in self._bookmarks.iteritems():
700 if n == node:
700 if n == node:
701 marks.append(bookmark)
701 marks.append(bookmark)
702 return sorted(marks)
702 return sorted(marks)
703
703
704 def branchmap(self):
704 def branchmap(self):
705 '''returns a dictionary {branch: [branchheads]} with branchheads
705 '''returns a dictionary {branch: [branchheads]} with branchheads
706 ordered by increasing revision number'''
706 ordered by increasing revision number'''
707 branchmap.updatecache(self)
707 branchmap.updatecache(self)
708 return self._branchcaches[self.filtername]
708 return self._branchcaches[self.filtername]
709
709
710 def branchtip(self, branch):
710 def branchtip(self, branch):
711 '''return the tip node for a given branch'''
711 '''return the tip node for a given branch'''
712 try:
712 try:
713 return self.branchmap().branchtip(branch)
713 return self.branchmap().branchtip(branch)
714 except KeyError:
714 except KeyError:
715 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
715 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
716
716
717 def lookup(self, key):
717 def lookup(self, key):
718 return self[key].node()
718 return self[key].node()
719
719
720 def lookupbranch(self, key, remote=None):
720 def lookupbranch(self, key, remote=None):
721 repo = remote or self
721 repo = remote or self
722 if key in repo.branchmap():
722 if key in repo.branchmap():
723 return key
723 return key
724
724
725 repo = (remote and remote.local()) and remote or self
725 repo = (remote and remote.local()) and remote or self
726 return repo[key].branch()
726 return repo[key].branch()
727
727
728 def known(self, nodes):
728 def known(self, nodes):
729 nm = self.changelog.nodemap
729 nm = self.changelog.nodemap
730 pc = self._phasecache
730 pc = self._phasecache
731 result = []
731 result = []
732 for n in nodes:
732 for n in nodes:
733 r = nm.get(n)
733 r = nm.get(n)
734 resp = not (r is None or pc.phase(self, r) >= phases.secret)
734 resp = not (r is None or pc.phase(self, r) >= phases.secret)
735 result.append(resp)
735 result.append(resp)
736 return result
736 return result
737
737
738 def local(self):
738 def local(self):
739 return self
739 return self
740
740
741 def cancopy(self):
741 def cancopy(self):
742 # so statichttprepo's override of local() works
742 # so statichttprepo's override of local() works
743 if not self.local():
743 if not self.local():
744 return False
744 return False
745 if not self.ui.configbool('phases', 'publish', True):
745 if not self.ui.configbool('phases', 'publish', True):
746 return True
746 return True
747 # if publishing we can't copy if there is filtered content
747 # if publishing we can't copy if there is filtered content
748 return not self.filtered('visible').changelog.filteredrevs
748 return not self.filtered('visible').changelog.filteredrevs
749
749
750 def join(self, f, *insidef):
750 def join(self, f, *insidef):
751 return os.path.join(self.path, f, *insidef)
751 return os.path.join(self.path, f, *insidef)
752
752
753 def wjoin(self, f, *insidef):
753 def wjoin(self, f, *insidef):
754 return os.path.join(self.root, f, *insidef)
754 return os.path.join(self.root, f, *insidef)
755
755
756 def file(self, f):
756 def file(self, f):
757 if f[0] == '/':
757 if f[0] == '/':
758 f = f[1:]
758 f = f[1:]
759 return filelog.filelog(self.sopener, f)
759 return filelog.filelog(self.sopener, f)
760
760
761 def changectx(self, changeid):
761 def changectx(self, changeid):
762 return self[changeid]
762 return self[changeid]
763
763
764 def parents(self, changeid=None):
764 def parents(self, changeid=None):
765 '''get list of changectxs for parents of changeid'''
765 '''get list of changectxs for parents of changeid'''
766 return self[changeid].parents()
766 return self[changeid].parents()
767
767
768 def setparents(self, p1, p2=nullid):
768 def setparents(self, p1, p2=nullid):
769 self.dirstate.beginparentchange()
769 self.dirstate.beginparentchange()
770 copies = self.dirstate.setparents(p1, p2)
770 copies = self.dirstate.setparents(p1, p2)
771 pctx = self[p1]
771 pctx = self[p1]
772 if copies:
772 if copies:
773 # Adjust copy records, the dirstate cannot do it, it
773 # Adjust copy records, the dirstate cannot do it, it
774 # requires access to parents manifests. Preserve them
774 # requires access to parents manifests. Preserve them
775 # only for entries added to first parent.
775 # only for entries added to first parent.
776 for f in copies:
776 for f in copies:
777 if f not in pctx and copies[f] in pctx:
777 if f not in pctx and copies[f] in pctx:
778 self.dirstate.copy(copies[f], f)
778 self.dirstate.copy(copies[f], f)
779 if p2 == nullid:
779 if p2 == nullid:
780 for f, s in sorted(self.dirstate.copies().items()):
780 for f, s in sorted(self.dirstate.copies().items()):
781 if f not in pctx and s not in pctx:
781 if f not in pctx and s not in pctx:
782 self.dirstate.copy(None, f)
782 self.dirstate.copy(None, f)
783 self.dirstate.endparentchange()
783 self.dirstate.endparentchange()
784
784
785 def filectx(self, path, changeid=None, fileid=None):
785 def filectx(self, path, changeid=None, fileid=None):
786 """changeid can be a changeset revision, node, or tag.
786 """changeid can be a changeset revision, node, or tag.
787 fileid can be a file revision or node."""
787 fileid can be a file revision or node."""
788 return context.filectx(self, path, changeid, fileid)
788 return context.filectx(self, path, changeid, fileid)
789
789
790 def getcwd(self):
790 def getcwd(self):
791 return self.dirstate.getcwd()
791 return self.dirstate.getcwd()
792
792
793 def pathto(self, f, cwd=None):
793 def pathto(self, f, cwd=None):
794 return self.dirstate.pathto(f, cwd)
794 return self.dirstate.pathto(f, cwd)
795
795
796 def wfile(self, f, mode='r'):
796 def wfile(self, f, mode='r'):
797 return self.wopener(f, mode)
797 return self.wopener(f, mode)
798
798
799 def _link(self, f):
799 def _link(self, f):
800 return self.wvfs.islink(f)
800 return self.wvfs.islink(f)
801
801
802 def _loadfilter(self, filter):
802 def _loadfilter(self, filter):
803 if filter not in self.filterpats:
803 if filter not in self.filterpats:
804 l = []
804 l = []
805 for pat, cmd in self.ui.configitems(filter):
805 for pat, cmd in self.ui.configitems(filter):
806 if cmd == '!':
806 if cmd == '!':
807 continue
807 continue
808 mf = matchmod.match(self.root, '', [pat])
808 mf = matchmod.match(self.root, '', [pat])
809 fn = None
809 fn = None
810 params = cmd
810 params = cmd
811 for name, filterfn in self._datafilters.iteritems():
811 for name, filterfn in self._datafilters.iteritems():
812 if cmd.startswith(name):
812 if cmd.startswith(name):
813 fn = filterfn
813 fn = filterfn
814 params = cmd[len(name):].lstrip()
814 params = cmd[len(name):].lstrip()
815 break
815 break
816 if not fn:
816 if not fn:
817 fn = lambda s, c, **kwargs: util.filter(s, c)
817 fn = lambda s, c, **kwargs: util.filter(s, c)
818 # Wrap old filters not supporting keyword arguments
818 # Wrap old filters not supporting keyword arguments
819 if not inspect.getargspec(fn)[2]:
819 if not inspect.getargspec(fn)[2]:
820 oldfn = fn
820 oldfn = fn
821 fn = lambda s, c, **kwargs: oldfn(s, c)
821 fn = lambda s, c, **kwargs: oldfn(s, c)
822 l.append((mf, fn, params))
822 l.append((mf, fn, params))
823 self.filterpats[filter] = l
823 self.filterpats[filter] = l
824 return self.filterpats[filter]
824 return self.filterpats[filter]
825
825
826 def _filter(self, filterpats, filename, data):
826 def _filter(self, filterpats, filename, data):
827 for mf, fn, cmd in filterpats:
827 for mf, fn, cmd in filterpats:
828 if mf(filename):
828 if mf(filename):
829 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
829 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
830 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
830 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
831 break
831 break
832
832
833 return data
833 return data
834
834
835 @unfilteredpropertycache
835 @unfilteredpropertycache
836 def _encodefilterpats(self):
836 def _encodefilterpats(self):
837 return self._loadfilter('encode')
837 return self._loadfilter('encode')
838
838
839 @unfilteredpropertycache
839 @unfilteredpropertycache
840 def _decodefilterpats(self):
840 def _decodefilterpats(self):
841 return self._loadfilter('decode')
841 return self._loadfilter('decode')
842
842
843 def adddatafilter(self, name, filter):
843 def adddatafilter(self, name, filter):
844 self._datafilters[name] = filter
844 self._datafilters[name] = filter
845
845
846 def wread(self, filename):
846 def wread(self, filename):
847 if self._link(filename):
847 if self._link(filename):
848 data = self.wvfs.readlink(filename)
848 data = self.wvfs.readlink(filename)
849 else:
849 else:
850 data = self.wopener.read(filename)
850 data = self.wopener.read(filename)
851 return self._filter(self._encodefilterpats, filename, data)
851 return self._filter(self._encodefilterpats, filename, data)
852
852
853 def wwrite(self, filename, data, flags):
853 def wwrite(self, filename, data, flags):
854 data = self._filter(self._decodefilterpats, filename, data)
854 data = self._filter(self._decodefilterpats, filename, data)
855 if 'l' in flags:
855 if 'l' in flags:
856 self.wopener.symlink(data, filename)
856 self.wopener.symlink(data, filename)
857 else:
857 else:
858 self.wopener.write(filename, data)
858 self.wopener.write(filename, data)
859 if 'x' in flags:
859 if 'x' in flags:
860 self.wvfs.setflags(filename, False, True)
860 self.wvfs.setflags(filename, False, True)
861
861
862 def wwritedata(self, filename, data):
862 def wwritedata(self, filename, data):
863 return self._filter(self._decodefilterpats, filename, data)
863 return self._filter(self._decodefilterpats, filename, data)
864
864
865 def transaction(self, desc, report=None):
865 def transaction(self, desc, report=None):
866 tr = self._transref and self._transref() or None
866 tr = self._transref and self._transref() or None
867 if tr and tr.running():
867 if tr and tr.running():
868 return tr.nest()
868 return tr.nest()
869
869
870 # abort here if the journal already exists
870 # abort here if the journal already exists
871 if self.svfs.exists("journal"):
871 if self.svfs.exists("journal"):
872 raise error.RepoError(
872 raise error.RepoError(
873 _("abandoned transaction found"),
873 _("abandoned transaction found"),
874 hint=_("run 'hg recover' to clean up transaction"))
874 hint=_("run 'hg recover' to clean up transaction"))
875
875
876 def onclose():
876 def onclose():
877 self.store.write(self._transref())
877 self.store.write(self._transref())
878
878
879 self._writejournal(desc)
879 self._writejournal(desc)
880 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
880 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
881 rp = report and report or self.ui.warn
881 rp = report and report or self.ui.warn
882 tr = transaction.transaction(rp, self.sopener,
882 tr = transaction.transaction(rp, self.sopener,
883 "journal",
883 "journal",
884 aftertrans(renames),
884 aftertrans(renames),
885 self.store.createmode,
885 self.store.createmode,
886 onclose)
886 onclose)
887 self._transref = weakref.ref(tr)
887 self._transref = weakref.ref(tr)
888 return tr
888 return tr
889
889
890 def _journalfiles(self):
890 def _journalfiles(self):
891 return ((self.svfs, 'journal'),
891 return ((self.svfs, 'journal'),
892 (self.vfs, 'journal.dirstate'),
892 (self.vfs, 'journal.dirstate'),
893 (self.vfs, 'journal.branch'),
893 (self.vfs, 'journal.branch'),
894 (self.vfs, 'journal.desc'),
894 (self.vfs, 'journal.desc'),
895 (self.vfs, 'journal.bookmarks'),
895 (self.vfs, 'journal.bookmarks'),
896 (self.svfs, 'journal.phaseroots'))
896 (self.svfs, 'journal.phaseroots'))
897
897
898 def undofiles(self):
898 def undofiles(self):
899 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
899 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
900
900
901 def _writejournal(self, desc):
901 def _writejournal(self, desc):
902 self.opener.write("journal.dirstate",
902 self.opener.write("journal.dirstate",
903 self.opener.tryread("dirstate"))
903 self.opener.tryread("dirstate"))
904 self.opener.write("journal.branch",
904 self.opener.write("journal.branch",
905 encoding.fromlocal(self.dirstate.branch()))
905 encoding.fromlocal(self.dirstate.branch()))
906 self.opener.write("journal.desc",
906 self.opener.write("journal.desc",
907 "%d\n%s\n" % (len(self), desc))
907 "%d\n%s\n" % (len(self), desc))
908 self.opener.write("journal.bookmarks",
908 self.opener.write("journal.bookmarks",
909 self.opener.tryread("bookmarks"))
909 self.opener.tryread("bookmarks"))
910 self.sopener.write("journal.phaseroots",
910 self.sopener.write("journal.phaseroots",
911 self.sopener.tryread("phaseroots"))
911 self.sopener.tryread("phaseroots"))
912
912
913 def recover(self):
913 def recover(self):
914 lock = self.lock()
914 lock = self.lock()
915 try:
915 try:
916 if self.svfs.exists("journal"):
916 if self.svfs.exists("journal"):
917 self.ui.status(_("rolling back interrupted transaction\n"))
917 self.ui.status(_("rolling back interrupted transaction\n"))
918 transaction.rollback(self.sopener, "journal",
918 transaction.rollback(self.sopener, "journal",
919 self.ui.warn)
919 self.ui.warn)
920 self.invalidate()
920 self.invalidate()
921 return True
921 return True
922 else:
922 else:
923 self.ui.warn(_("no interrupted transaction available\n"))
923 self.ui.warn(_("no interrupted transaction available\n"))
924 return False
924 return False
925 finally:
925 finally:
926 lock.release()
926 lock.release()
927
927
928 def rollback(self, dryrun=False, force=False):
928 def rollback(self, dryrun=False, force=False):
929 wlock = lock = None
929 wlock = lock = None
930 try:
930 try:
931 wlock = self.wlock()
931 wlock = self.wlock()
932 lock = self.lock()
932 lock = self.lock()
933 if self.svfs.exists("undo"):
933 if self.svfs.exists("undo"):
934 return self._rollback(dryrun, force)
934 return self._rollback(dryrun, force)
935 else:
935 else:
936 self.ui.warn(_("no rollback information available\n"))
936 self.ui.warn(_("no rollback information available\n"))
937 return 1
937 return 1
938 finally:
938 finally:
939 release(lock, wlock)
939 release(lock, wlock)
940
940
941 @unfilteredmethod # Until we get smarter cache management
941 @unfilteredmethod # Until we get smarter cache management
942 def _rollback(self, dryrun, force):
942 def _rollback(self, dryrun, force):
943 ui = self.ui
943 ui = self.ui
944 try:
944 try:
945 args = self.opener.read('undo.desc').splitlines()
945 args = self.opener.read('undo.desc').splitlines()
946 (oldlen, desc, detail) = (int(args[0]), args[1], None)
946 (oldlen, desc, detail) = (int(args[0]), args[1], None)
947 if len(args) >= 3:
947 if len(args) >= 3:
948 detail = args[2]
948 detail = args[2]
949 oldtip = oldlen - 1
949 oldtip = oldlen - 1
950
950
951 if detail and ui.verbose:
951 if detail and ui.verbose:
952 msg = (_('repository tip rolled back to revision %s'
952 msg = (_('repository tip rolled back to revision %s'
953 ' (undo %s: %s)\n')
953 ' (undo %s: %s)\n')
954 % (oldtip, desc, detail))
954 % (oldtip, desc, detail))
955 else:
955 else:
956 msg = (_('repository tip rolled back to revision %s'
956 msg = (_('repository tip rolled back to revision %s'
957 ' (undo %s)\n')
957 ' (undo %s)\n')
958 % (oldtip, desc))
958 % (oldtip, desc))
959 except IOError:
959 except IOError:
960 msg = _('rolling back unknown transaction\n')
960 msg = _('rolling back unknown transaction\n')
961 desc = None
961 desc = None
962
962
963 if not force and self['.'] != self['tip'] and desc == 'commit':
963 if not force and self['.'] != self['tip'] and desc == 'commit':
964 raise util.Abort(
964 raise util.Abort(
965 _('rollback of last commit while not checked out '
965 _('rollback of last commit while not checked out '
966 'may lose data'), hint=_('use -f to force'))
966 'may lose data'), hint=_('use -f to force'))
967
967
968 ui.status(msg)
968 ui.status(msg)
969 if dryrun:
969 if dryrun:
970 return 0
970 return 0
971
971
972 parents = self.dirstate.parents()
972 parents = self.dirstate.parents()
973 self.destroying()
973 self.destroying()
974 transaction.rollback(self.sopener, 'undo', ui.warn)
974 transaction.rollback(self.sopener, 'undo', ui.warn)
975 if self.vfs.exists('undo.bookmarks'):
975 if self.vfs.exists('undo.bookmarks'):
976 self.vfs.rename('undo.bookmarks', 'bookmarks')
976 self.vfs.rename('undo.bookmarks', 'bookmarks')
977 if self.svfs.exists('undo.phaseroots'):
977 if self.svfs.exists('undo.phaseroots'):
978 self.svfs.rename('undo.phaseroots', 'phaseroots')
978 self.svfs.rename('undo.phaseroots', 'phaseroots')
979 self.invalidate()
979 self.invalidate()
980
980
981 parentgone = (parents[0] not in self.changelog.nodemap or
981 parentgone = (parents[0] not in self.changelog.nodemap or
982 parents[1] not in self.changelog.nodemap)
982 parents[1] not in self.changelog.nodemap)
983 if parentgone:
983 if parentgone:
984 self.vfs.rename('undo.dirstate', 'dirstate')
984 self.vfs.rename('undo.dirstate', 'dirstate')
985 try:
985 try:
986 branch = self.opener.read('undo.branch')
986 branch = self.opener.read('undo.branch')
987 self.dirstate.setbranch(encoding.tolocal(branch))
987 self.dirstate.setbranch(encoding.tolocal(branch))
988 except IOError:
988 except IOError:
989 ui.warn(_('named branch could not be reset: '
989 ui.warn(_('named branch could not be reset: '
990 'current branch is still \'%s\'\n')
990 'current branch is still \'%s\'\n')
991 % self.dirstate.branch())
991 % self.dirstate.branch())
992
992
993 self.dirstate.invalidate()
993 self.dirstate.invalidate()
994 parents = tuple([p.rev() for p in self.parents()])
994 parents = tuple([p.rev() for p in self.parents()])
995 if len(parents) > 1:
995 if len(parents) > 1:
996 ui.status(_('working directory now based on '
996 ui.status(_('working directory now based on '
997 'revisions %d and %d\n') % parents)
997 'revisions %d and %d\n') % parents)
998 else:
998 else:
999 ui.status(_('working directory now based on '
999 ui.status(_('working directory now based on '
1000 'revision %d\n') % parents)
1000 'revision %d\n') % parents)
1001 # TODO: if we know which new heads may result from this rollback, pass
1001 # TODO: if we know which new heads may result from this rollback, pass
1002 # them to destroy(), which will prevent the branchhead cache from being
1002 # them to destroy(), which will prevent the branchhead cache from being
1003 # invalidated.
1003 # invalidated.
1004 self.destroyed()
1004 self.destroyed()
1005 return 0
1005 return 0
1006
1006
1007 def invalidatecaches(self):
1007 def invalidatecaches(self):
1008
1008
1009 if '_tagscache' in vars(self):
1009 if '_tagscache' in vars(self):
1010 # can't use delattr on proxy
1010 # can't use delattr on proxy
1011 del self.__dict__['_tagscache']
1011 del self.__dict__['_tagscache']
1012
1012
1013 self.unfiltered()._branchcaches.clear()
1013 self.unfiltered()._branchcaches.clear()
1014 self.invalidatevolatilesets()
1014 self.invalidatevolatilesets()
1015
1015
1016 def invalidatevolatilesets(self):
1016 def invalidatevolatilesets(self):
1017 self.filteredrevcache.clear()
1017 self.filteredrevcache.clear()
1018 obsolete.clearobscaches(self)
1018 obsolete.clearobscaches(self)
1019
1019
1020 def invalidatedirstate(self):
1020 def invalidatedirstate(self):
1021 '''Invalidates the dirstate, causing the next call to dirstate
1021 '''Invalidates the dirstate, causing the next call to dirstate
1022 to check if it was modified since the last time it was read,
1022 to check if it was modified since the last time it was read,
1023 rereading it if it has.
1023 rereading it if it has.
1024
1024
1025 This is different to dirstate.invalidate() that it doesn't always
1025 This is different to dirstate.invalidate() that it doesn't always
1026 rereads the dirstate. Use dirstate.invalidate() if you want to
1026 rereads the dirstate. Use dirstate.invalidate() if you want to
1027 explicitly read the dirstate again (i.e. restoring it to a previous
1027 explicitly read the dirstate again (i.e. restoring it to a previous
1028 known good state).'''
1028 known good state).'''
1029 if hasunfilteredcache(self, 'dirstate'):
1029 if hasunfilteredcache(self, 'dirstate'):
1030 for k in self.dirstate._filecache:
1030 for k in self.dirstate._filecache:
1031 try:
1031 try:
1032 delattr(self.dirstate, k)
1032 delattr(self.dirstate, k)
1033 except AttributeError:
1033 except AttributeError:
1034 pass
1034 pass
1035 delattr(self.unfiltered(), 'dirstate')
1035 delattr(self.unfiltered(), 'dirstate')
1036
1036
1037 def invalidate(self):
1037 def invalidate(self):
1038 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1038 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1039 for k in self._filecache:
1039 for k in self._filecache:
1040 # dirstate is invalidated separately in invalidatedirstate()
1040 # dirstate is invalidated separately in invalidatedirstate()
1041 if k == 'dirstate':
1041 if k == 'dirstate':
1042 continue
1042 continue
1043
1043
1044 try:
1044 try:
1045 delattr(unfiltered, k)
1045 delattr(unfiltered, k)
1046 except AttributeError:
1046 except AttributeError:
1047 pass
1047 pass
1048 self.invalidatecaches()
1048 self.invalidatecaches()
1049 self.store.invalidatecaches()
1049 self.store.invalidatecaches()
1050
1050
1051 def invalidateall(self):
1051 def invalidateall(self):
1052 '''Fully invalidates both store and non-store parts, causing the
1052 '''Fully invalidates both store and non-store parts, causing the
1053 subsequent operation to reread any outside changes.'''
1053 subsequent operation to reread any outside changes.'''
1054 # extension should hook this to invalidate its caches
1054 # extension should hook this to invalidate its caches
1055 self.invalidate()
1055 self.invalidate()
1056 self.invalidatedirstate()
1056 self.invalidatedirstate()
1057
1057
1058 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1058 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1059 try:
1059 try:
1060 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1060 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1061 except error.LockHeld, inst:
1061 except error.LockHeld, inst:
1062 if not wait:
1062 if not wait:
1063 raise
1063 raise
1064 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1064 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1065 (desc, inst.locker))
1065 (desc, inst.locker))
1066 # default to 600 seconds timeout
1066 # default to 600 seconds timeout
1067 l = lockmod.lock(vfs, lockname,
1067 l = lockmod.lock(vfs, lockname,
1068 int(self.ui.config("ui", "timeout", "600")),
1068 int(self.ui.config("ui", "timeout", "600")),
1069 releasefn, desc=desc)
1069 releasefn, desc=desc)
1070 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1070 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1071 if acquirefn:
1071 if acquirefn:
1072 acquirefn()
1072 acquirefn()
1073 return l
1073 return l
1074
1074
1075 def _afterlock(self, callback):
1075 def _afterlock(self, callback):
1076 """add a callback to the current repository lock.
1076 """add a callback to the current repository lock.
1077
1077
1078 The callback will be executed on lock release."""
1078 The callback will be executed on lock release."""
1079 l = self._lockref and self._lockref()
1079 l = self._lockref and self._lockref()
1080 if l:
1080 if l:
1081 l.postrelease.append(callback)
1081 l.postrelease.append(callback)
1082 else:
1082 else:
1083 callback()
1083 callback()
1084
1084
1085 def lock(self, wait=True):
1085 def lock(self, wait=True):
1086 '''Lock the repository store (.hg/store) and return a weak reference
1086 '''Lock the repository store (.hg/store) and return a weak reference
1087 to the lock. Use this before modifying the store (e.g. committing or
1087 to the lock. Use this before modifying the store (e.g. committing or
1088 stripping). If you are opening a transaction, get a lock as well.)'''
1088 stripping). If you are opening a transaction, get a lock as well.)'''
1089 l = self._lockref and self._lockref()
1089 l = self._lockref and self._lockref()
1090 if l is not None and l.held:
1090 if l is not None and l.held:
1091 l.lock()
1091 l.lock()
1092 return l
1092 return l
1093
1093
1094 def unlock():
1094 def unlock():
1095 for k, ce in self._filecache.items():
1095 for k, ce in self._filecache.items():
1096 if k == 'dirstate' or k not in self.__dict__:
1096 if k == 'dirstate' or k not in self.__dict__:
1097 continue
1097 continue
1098 ce.refresh()
1098 ce.refresh()
1099
1099
1100 l = self._lock(self.svfs, "lock", wait, unlock,
1100 l = self._lock(self.svfs, "lock", wait, unlock,
1101 self.invalidate, _('repository %s') % self.origroot)
1101 self.invalidate, _('repository %s') % self.origroot)
1102 self._lockref = weakref.ref(l)
1102 self._lockref = weakref.ref(l)
1103 return l
1103 return l
1104
1104
1105 def wlock(self, wait=True):
1105 def wlock(self, wait=True):
1106 '''Lock the non-store parts of the repository (everything under
1106 '''Lock the non-store parts of the repository (everything under
1107 .hg except .hg/store) and return a weak reference to the lock.
1107 .hg except .hg/store) and return a weak reference to the lock.
1108 Use this before modifying files in .hg.'''
1108 Use this before modifying files in .hg.'''
1109 l = self._wlockref and self._wlockref()
1109 l = self._wlockref and self._wlockref()
1110 if l is not None and l.held:
1110 if l is not None and l.held:
1111 l.lock()
1111 l.lock()
1112 return l
1112 return l
1113
1113
1114 def unlock():
1114 def unlock():
1115 if self.dirstate.pendingparentchange():
1115 if self.dirstate.pendingparentchange():
1116 self.dirstate.invalidate()
1116 self.dirstate.invalidate()
1117 else:
1117 else:
1118 self.dirstate.write()
1118 self.dirstate.write()
1119
1119
1120 self._filecache['dirstate'].refresh()
1120 self._filecache['dirstate'].refresh()
1121
1121
1122 l = self._lock(self.vfs, "wlock", wait, unlock,
1122 l = self._lock(self.vfs, "wlock", wait, unlock,
1123 self.invalidatedirstate, _('working directory of %s') %
1123 self.invalidatedirstate, _('working directory of %s') %
1124 self.origroot)
1124 self.origroot)
1125 self._wlockref = weakref.ref(l)
1125 self._wlockref = weakref.ref(l)
1126 return l
1126 return l
1127
1127
1128 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1128 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1129 """
1129 """
1130 commit an individual file as part of a larger transaction
1130 commit an individual file as part of a larger transaction
1131 """
1131 """
1132
1132
1133 fname = fctx.path()
1133 fname = fctx.path()
1134 text = fctx.data()
1134 text = fctx.data()
1135 flog = self.file(fname)
1135 flog = self.file(fname)
1136 fparent1 = manifest1.get(fname, nullid)
1136 fparent1 = manifest1.get(fname, nullid)
1137 fparent2 = manifest2.get(fname, nullid)
1137 fparent2 = manifest2.get(fname, nullid)
1138
1138
1139 meta = {}
1139 meta = {}
1140 copy = fctx.renamed()
1140 copy = fctx.renamed()
1141 if copy and copy[0] != fname:
1141 if copy and copy[0] != fname:
1142 # Mark the new revision of this file as a copy of another
1142 # Mark the new revision of this file as a copy of another
1143 # file. This copy data will effectively act as a parent
1143 # file. This copy data will effectively act as a parent
1144 # of this new revision. If this is a merge, the first
1144 # of this new revision. If this is a merge, the first
1145 # parent will be the nullid (meaning "look up the copy data")
1145 # parent will be the nullid (meaning "look up the copy data")
1146 # and the second one will be the other parent. For example:
1146 # and the second one will be the other parent. For example:
1147 #
1147 #
1148 # 0 --- 1 --- 3 rev1 changes file foo
1148 # 0 --- 1 --- 3 rev1 changes file foo
1149 # \ / rev2 renames foo to bar and changes it
1149 # \ / rev2 renames foo to bar and changes it
1150 # \- 2 -/ rev3 should have bar with all changes and
1150 # \- 2 -/ rev3 should have bar with all changes and
1151 # should record that bar descends from
1151 # should record that bar descends from
1152 # bar in rev2 and foo in rev1
1152 # bar in rev2 and foo in rev1
1153 #
1153 #
1154 # this allows this merge to succeed:
1154 # this allows this merge to succeed:
1155 #
1155 #
1156 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1156 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1157 # \ / merging rev3 and rev4 should use bar@rev2
1157 # \ / merging rev3 and rev4 should use bar@rev2
1158 # \- 2 --- 4 as the merge base
1158 # \- 2 --- 4 as the merge base
1159 #
1159 #
1160
1160
1161 cfname = copy[0]
1161 cfname = copy[0]
1162 crev = manifest1.get(cfname)
1162 crev = manifest1.get(cfname)
1163 newfparent = fparent2
1163 newfparent = fparent2
1164
1164
1165 if manifest2: # branch merge
1165 if manifest2: # branch merge
1166 if fparent2 == nullid or crev is None: # copied on remote side
1166 if fparent2 == nullid or crev is None: # copied on remote side
1167 if cfname in manifest2:
1167 if cfname in manifest2:
1168 crev = manifest2[cfname]
1168 crev = manifest2[cfname]
1169 newfparent = fparent1
1169 newfparent = fparent1
1170
1170
1171 # find source in nearest ancestor if we've lost track
1171 # find source in nearest ancestor if we've lost track
1172 if not crev:
1172 if not crev:
1173 self.ui.debug(" %s: searching for copy revision for %s\n" %
1173 self.ui.debug(" %s: searching for copy revision for %s\n" %
1174 (fname, cfname))
1174 (fname, cfname))
1175 for ancestor in self[None].ancestors():
1175 for ancestor in self[None].ancestors():
1176 if cfname in ancestor:
1176 if cfname in ancestor:
1177 crev = ancestor[cfname].filenode()
1177 crev = ancestor[cfname].filenode()
1178 break
1178 break
1179
1179
1180 if crev:
1180 if crev:
1181 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1181 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1182 meta["copy"] = cfname
1182 meta["copy"] = cfname
1183 meta["copyrev"] = hex(crev)
1183 meta["copyrev"] = hex(crev)
1184 fparent1, fparent2 = nullid, newfparent
1184 fparent1, fparent2 = nullid, newfparent
1185 else:
1185 else:
1186 self.ui.warn(_("warning: can't find ancestor for '%s' "
1186 self.ui.warn(_("warning: can't find ancestor for '%s' "
1187 "copied from '%s'!\n") % (fname, cfname))
1187 "copied from '%s'!\n") % (fname, cfname))
1188
1188
1189 elif fparent1 == nullid:
1189 elif fparent1 == nullid:
1190 fparent1, fparent2 = fparent2, nullid
1190 fparent1, fparent2 = fparent2, nullid
1191 elif fparent2 != nullid:
1191 elif fparent2 != nullid:
1192 # is one parent an ancestor of the other?
1192 # is one parent an ancestor of the other?
1193 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1193 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1194 if fparent1 in fparentancestors:
1194 if fparent1 in fparentancestors:
1195 fparent1, fparent2 = fparent2, nullid
1195 fparent1, fparent2 = fparent2, nullid
1196 elif fparent2 in fparentancestors:
1196 elif fparent2 in fparentancestors:
1197 fparent2 = nullid
1197 fparent2 = nullid
1198
1198
1199 # is the file changed?
1199 # is the file changed?
1200 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1200 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1201 changelist.append(fname)
1201 changelist.append(fname)
1202 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1202 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1203 # are just the flags changed during merge?
1203 # are just the flags changed during merge?
1204 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1204 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1205 changelist.append(fname)
1205 changelist.append(fname)
1206
1206
1207 return fparent1
1207 return fparent1
1208
1208
1209 @unfilteredmethod
1209 @unfilteredmethod
1210 def commit(self, text="", user=None, date=None, match=None, force=False,
1210 def commit(self, text="", user=None, date=None, match=None, force=False,
1211 editor=False, extra={}):
1211 editor=False, extra={}):
1212 """Add a new revision to current repository.
1212 """Add a new revision to current repository.
1213
1213
1214 Revision information is gathered from the working directory,
1214 Revision information is gathered from the working directory,
1215 match can be used to filter the committed files. If editor is
1215 match can be used to filter the committed files. If editor is
1216 supplied, it is called to get a commit message.
1216 supplied, it is called to get a commit message.
1217 """
1217 """
1218
1218
1219 def fail(f, msg):
1219 def fail(f, msg):
1220 raise util.Abort('%s: %s' % (f, msg))
1220 raise util.Abort('%s: %s' % (f, msg))
1221
1221
1222 if not match:
1222 if not match:
1223 match = matchmod.always(self.root, '')
1223 match = matchmod.always(self.root, '')
1224
1224
1225 if not force:
1225 if not force:
1226 vdirs = []
1226 vdirs = []
1227 match.explicitdir = vdirs.append
1227 match.explicitdir = vdirs.append
1228 match.bad = fail
1228 match.bad = fail
1229
1229
1230 wlock = self.wlock()
1230 wlock = self.wlock()
1231 try:
1231 try:
1232 wctx = self[None]
1232 wctx = self[None]
1233 merge = len(wctx.parents()) > 1
1233 merge = len(wctx.parents()) > 1
1234
1234
1235 if (not force and merge and match and
1235 if (not force and merge and match and
1236 (match.files() or match.anypats())):
1236 (match.files() or match.anypats())):
1237 raise util.Abort(_('cannot partially commit a merge '
1237 raise util.Abort(_('cannot partially commit a merge '
1238 '(do not specify files or patterns)'))
1238 '(do not specify files or patterns)'))
1239
1239
1240 status = self.status(match=match, clean=force)
1240 status = self.status(match=match, clean=force)
1241 if force:
1241 if force:
1242 status.modified.extend(status.clean) # mq may commit clean files
1242 status.modified.extend(status.clean) # mq may commit clean files
1243
1243
1244 # check subrepos
1244 # check subrepos
1245 subs = []
1245 subs = []
1246 commitsubs = set()
1246 commitsubs = set()
1247 newstate = wctx.substate.copy()
1247 newstate = wctx.substate.copy()
1248 # only manage subrepos and .hgsubstate if .hgsub is present
1248 # only manage subrepos and .hgsubstate if .hgsub is present
1249 if '.hgsub' in wctx:
1249 if '.hgsub' in wctx:
1250 # we'll decide whether to track this ourselves, thanks
1250 # we'll decide whether to track this ourselves, thanks
1251 for c in status.modified, status.added, status.removed:
1251 for c in status.modified, status.added, status.removed:
1252 if '.hgsubstate' in c:
1252 if '.hgsubstate' in c:
1253 c.remove('.hgsubstate')
1253 c.remove('.hgsubstate')
1254
1254
1255 # compare current state to last committed state
1255 # compare current state to last committed state
1256 # build new substate based on last committed state
1256 # build new substate based on last committed state
1257 oldstate = wctx.p1().substate
1257 oldstate = wctx.p1().substate
1258 for s in sorted(newstate.keys()):
1258 for s in sorted(newstate.keys()):
1259 if not match(s):
1259 if not match(s):
1260 # ignore working copy, use old state if present
1260 # ignore working copy, use old state if present
1261 if s in oldstate:
1261 if s in oldstate:
1262 newstate[s] = oldstate[s]
1262 newstate[s] = oldstate[s]
1263 continue
1263 continue
1264 if not force:
1264 if not force:
1265 raise util.Abort(
1265 raise util.Abort(
1266 _("commit with new subrepo %s excluded") % s)
1266 _("commit with new subrepo %s excluded") % s)
1267 if wctx.sub(s).dirty(True):
1267 if wctx.sub(s).dirty(True):
1268 if not self.ui.configbool('ui', 'commitsubrepos'):
1268 if not self.ui.configbool('ui', 'commitsubrepos'):
1269 raise util.Abort(
1269 raise util.Abort(
1270 _("uncommitted changes in subrepo %s") % s,
1270 _("uncommitted changes in subrepo %s") % s,
1271 hint=_("use --subrepos for recursive commit"))
1271 hint=_("use --subrepos for recursive commit"))
1272 subs.append(s)
1272 subs.append(s)
1273 commitsubs.add(s)
1273 commitsubs.add(s)
1274 else:
1274 else:
1275 bs = wctx.sub(s).basestate()
1275 bs = wctx.sub(s).basestate()
1276 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1276 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1277 if oldstate.get(s, (None, None, None))[1] != bs:
1277 if oldstate.get(s, (None, None, None))[1] != bs:
1278 subs.append(s)
1278 subs.append(s)
1279
1279
1280 # check for removed subrepos
1280 # check for removed subrepos
1281 for p in wctx.parents():
1281 for p in wctx.parents():
1282 r = [s for s in p.substate if s not in newstate]
1282 r = [s for s in p.substate if s not in newstate]
1283 subs += [s for s in r if match(s)]
1283 subs += [s for s in r if match(s)]
1284 if subs:
1284 if subs:
1285 if (not match('.hgsub') and
1285 if (not match('.hgsub') and
1286 '.hgsub' in (wctx.modified() + wctx.added())):
1286 '.hgsub' in (wctx.modified() + wctx.added())):
1287 raise util.Abort(
1287 raise util.Abort(
1288 _("can't commit subrepos without .hgsub"))
1288 _("can't commit subrepos without .hgsub"))
1289 status.modified.insert(0, '.hgsubstate')
1289 status.modified.insert(0, '.hgsubstate')
1290
1290
1291 elif '.hgsub' in status.removed:
1291 elif '.hgsub' in status.removed:
1292 # clean up .hgsubstate when .hgsub is removed
1292 # clean up .hgsubstate when .hgsub is removed
1293 if ('.hgsubstate' in wctx and
1293 if ('.hgsubstate' in wctx and
1294 '.hgsubstate' not in (status.modified + status.added +
1294 '.hgsubstate' not in (status.modified + status.added +
1295 status.removed)):
1295 status.removed)):
1296 status.removed.insert(0, '.hgsubstate')
1296 status.removed.insert(0, '.hgsubstate')
1297
1297
1298 # make sure all explicit patterns are matched
1298 # make sure all explicit patterns are matched
1299 if not force and match.files():
1299 if not force and match.files():
1300 matched = set(status.modified + status.added + status.removed)
1300 matched = set(status.modified + status.added + status.removed)
1301
1301
1302 for f in match.files():
1302 for f in match.files():
1303 f = self.dirstate.normalize(f)
1303 f = self.dirstate.normalize(f)
1304 if f == '.' or f in matched or f in wctx.substate:
1304 if f == '.' or f in matched or f in wctx.substate:
1305 continue
1305 continue
1306 if f in status.deleted:
1306 if f in status.deleted:
1307 fail(f, _('file not found!'))
1307 fail(f, _('file not found!'))
1308 if f in vdirs: # visited directory
1308 if f in vdirs: # visited directory
1309 d = f + '/'
1309 d = f + '/'
1310 for mf in matched:
1310 for mf in matched:
1311 if mf.startswith(d):
1311 if mf.startswith(d):
1312 break
1312 break
1313 else:
1313 else:
1314 fail(f, _("no match under directory!"))
1314 fail(f, _("no match under directory!"))
1315 elif f not in self.dirstate:
1315 elif f not in self.dirstate:
1316 fail(f, _("file not tracked!"))
1316 fail(f, _("file not tracked!"))
1317
1317
1318 cctx = context.workingctx(self, text, user, date, extra, status)
1318 cctx = context.workingctx(self, text, user, date, extra, status)
1319
1319
1320 if (not force and not extra.get("close") and not merge
1320 if (not force and not extra.get("close") and not merge
1321 and not cctx.files()
1321 and not cctx.files()
1322 and wctx.branch() == wctx.p1().branch()):
1322 and wctx.branch() == wctx.p1().branch()):
1323 return None
1323 return None
1324
1324
1325 if merge and cctx.deleted():
1325 if merge and cctx.deleted():
1326 raise util.Abort(_("cannot commit merge with missing files"))
1326 raise util.Abort(_("cannot commit merge with missing files"))
1327
1327
1328 ms = mergemod.mergestate(self)
1328 ms = mergemod.mergestate(self)
1329 for f in status.modified:
1329 for f in status.modified:
1330 if f in ms and ms[f] == 'u':
1330 if f in ms and ms[f] == 'u':
1331 raise util.Abort(_("unresolved merge conflicts "
1331 raise util.Abort(_("unresolved merge conflicts "
1332 "(see hg help resolve)"))
1332 "(see hg help resolve)"))
1333
1333
1334 if editor:
1334 if editor:
1335 cctx._text = editor(self, cctx, subs)
1335 cctx._text = editor(self, cctx, subs)
1336 edited = (text != cctx._text)
1336 edited = (text != cctx._text)
1337
1337
1338 # Save commit message in case this transaction gets rolled back
1338 # Save commit message in case this transaction gets rolled back
1339 # (e.g. by a pretxncommit hook). Leave the content alone on
1339 # (e.g. by a pretxncommit hook). Leave the content alone on
1340 # the assumption that the user will use the same editor again.
1340 # the assumption that the user will use the same editor again.
1341 msgfn = self.savecommitmessage(cctx._text)
1341 msgfn = self.savecommitmessage(cctx._text)
1342
1342
1343 # commit subs and write new state
1343 # commit subs and write new state
1344 if subs:
1344 if subs:
1345 for s in sorted(commitsubs):
1345 for s in sorted(commitsubs):
1346 sub = wctx.sub(s)
1346 sub = wctx.sub(s)
1347 self.ui.status(_('committing subrepository %s\n') %
1347 self.ui.status(_('committing subrepository %s\n') %
1348 subrepo.subrelpath(sub))
1348 subrepo.subrelpath(sub))
1349 sr = sub.commit(cctx._text, user, date)
1349 sr = sub.commit(cctx._text, user, date)
1350 newstate[s] = (newstate[s][0], sr)
1350 newstate[s] = (newstate[s][0], sr)
1351 subrepo.writestate(self, newstate)
1351 subrepo.writestate(self, newstate)
1352
1352
1353 p1, p2 = self.dirstate.parents()
1353 p1, p2 = self.dirstate.parents()
1354 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1354 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1355 try:
1355 try:
1356 self.hook("precommit", throw=True, parent1=hookp1,
1356 self.hook("precommit", throw=True, parent1=hookp1,
1357 parent2=hookp2)
1357 parent2=hookp2)
1358 ret = self.commitctx(cctx, True)
1358 ret = self.commitctx(cctx, True)
1359 except: # re-raises
1359 except: # re-raises
1360 if edited:
1360 if edited:
1361 self.ui.write(
1361 self.ui.write(
1362 _('note: commit message saved in %s\n') % msgfn)
1362 _('note: commit message saved in %s\n') % msgfn)
1363 raise
1363 raise
1364
1364
1365 # update bookmarks, dirstate and mergestate
1365 # update bookmarks, dirstate and mergestate
1366 bookmarks.update(self, [p1, p2], ret)
1366 bookmarks.update(self, [p1, p2], ret)
1367 cctx.markcommitted(ret)
1367 cctx.markcommitted(ret)
1368 ms.reset()
1368 ms.reset()
1369 finally:
1369 finally:
1370 wlock.release()
1370 wlock.release()
1371
1371
1372 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1372 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1373 # hack for command that use a temporary commit (eg: histedit)
1373 # hack for command that use a temporary commit (eg: histedit)
1374 # temporary commit got stripped before hook release
1374 # temporary commit got stripped before hook release
1375 if node in self:
1375 if node in self:
1376 self.hook("commit", node=node, parent1=parent1,
1376 self.hook("commit", node=node, parent1=parent1,
1377 parent2=parent2)
1377 parent2=parent2)
1378 self._afterlock(commithook)
1378 self._afterlock(commithook)
1379 return ret
1379 return ret
1380
1380
1381 @unfilteredmethod
1381 @unfilteredmethod
1382 def commitctx(self, ctx, error=False):
1382 def commitctx(self, ctx, error=False):
1383 """Add a new revision to current repository.
1383 """Add a new revision to current repository.
1384 Revision information is passed via the context argument.
1384 Revision information is passed via the context argument.
1385 """
1385 """
1386
1386
1387 tr = None
1387 tr = None
1388 p1, p2 = ctx.p1(), ctx.p2()
1388 p1, p2 = ctx.p1(), ctx.p2()
1389 user = ctx.user()
1389 user = ctx.user()
1390
1390
1391 lock = self.lock()
1391 lock = self.lock()
1392 try:
1392 try:
1393 tr = self.transaction("commit")
1393 tr = self.transaction("commit")
1394 trp = weakref.proxy(tr)
1394 trp = weakref.proxy(tr)
1395
1395
1396 if ctx.files():
1396 if ctx.files():
1397 m1 = p1.manifest()
1397 m1 = p1.manifest()
1398 m2 = p2.manifest()
1398 m2 = p2.manifest()
1399 m = m1.copy()
1399 m = m1.copy()
1400
1400
1401 # check in files
1401 # check in files
1402 added = []
1402 added = []
1403 changed = []
1403 changed = []
1404 removed = list(ctx.removed())
1404 removed = list(ctx.removed())
1405 linkrev = len(self)
1405 linkrev = len(self)
1406 for f in sorted(ctx.modified() + ctx.added()):
1406 for f in sorted(ctx.modified() + ctx.added()):
1407 self.ui.note(f + "\n")
1407 self.ui.note(f + "\n")
1408 try:
1408 try:
1409 fctx = ctx[f]
1409 fctx = ctx[f]
1410 if fctx is None:
1410 if fctx is None:
1411 removed.append(f)
1411 removed.append(f)
1412 else:
1412 else:
1413 added.append(f)
1413 added.append(f)
1414 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1414 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1415 trp, changed)
1415 trp, changed)
1416 m.setflag(f, fctx.flags())
1416 m.setflag(f, fctx.flags())
1417 except OSError, inst:
1417 except OSError, inst:
1418 self.ui.warn(_("trouble committing %s!\n") % f)
1418 self.ui.warn(_("trouble committing %s!\n") % f)
1419 raise
1419 raise
1420 except IOError, inst:
1420 except IOError, inst:
1421 errcode = getattr(inst, 'errno', errno.ENOENT)
1421 errcode = getattr(inst, 'errno', errno.ENOENT)
1422 if error or errcode and errcode != errno.ENOENT:
1422 if error or errcode and errcode != errno.ENOENT:
1423 self.ui.warn(_("trouble committing %s!\n") % f)
1423 self.ui.warn(_("trouble committing %s!\n") % f)
1424 raise
1424 raise
1425
1425
1426 # update manifest
1426 # update manifest
1427 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1427 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1428 drop = [f for f in removed if f in m]
1428 drop = [f for f in removed if f in m]
1429 for f in drop:
1429 for f in drop:
1430 del m[f]
1430 del m[f]
1431 mn = self.manifest.add(m, trp, linkrev,
1431 mn = self.manifest.add(m, trp, linkrev,
1432 p1.manifestnode(), p2.manifestnode(),
1432 p1.manifestnode(), p2.manifestnode(),
1433 added, drop)
1433 added, drop)
1434 files = changed + removed
1434 files = changed + removed
1435 else:
1435 else:
1436 mn = p1.manifestnode()
1436 mn = p1.manifestnode()
1437 files = []
1437 files = []
1438
1438
1439 # update changelog
1439 # update changelog
1440 self.changelog.delayupdate(tr)
1440 self.changelog.delayupdate(tr)
1441 n = self.changelog.add(mn, files, ctx.description(),
1441 n = self.changelog.add(mn, files, ctx.description(),
1442 trp, p1.node(), p2.node(),
1442 trp, p1.node(), p2.node(),
1443 user, ctx.date(), ctx.extra().copy())
1443 user, ctx.date(), ctx.extra().copy())
1444 p = lambda: tr.writepending() and self.root or ""
1444 p = lambda: tr.writepending() and self.root or ""
1445 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1445 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1446 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1446 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1447 parent2=xp2, pending=p)
1447 parent2=xp2, pending=p)
1448 self.changelog.finalize(trp)
1449 # set the new commit is proper phase
1448 # set the new commit is proper phase
1450 targetphase = subrepo.newcommitphase(self.ui, ctx)
1449 targetphase = subrepo.newcommitphase(self.ui, ctx)
1451 if targetphase:
1450 if targetphase:
1452 # retract boundary do not alter parent changeset.
1451 # retract boundary do not alter parent changeset.
1453 # if a parent have higher the resulting phase will
1452 # if a parent have higher the resulting phase will
1454 # be compliant anyway
1453 # be compliant anyway
1455 #
1454 #
1456 # if minimal phase was 0 we don't need to retract anything
1455 # if minimal phase was 0 we don't need to retract anything
1457 phases.retractboundary(self, tr, targetphase, [n])
1456 phases.retractboundary(self, tr, targetphase, [n])
1458 tr.close()
1457 tr.close()
1459 branchmap.updatecache(self.filtered('served'))
1458 branchmap.updatecache(self.filtered('served'))
1460 return n
1459 return n
1461 finally:
1460 finally:
1462 if tr:
1461 if tr:
1463 tr.release()
1462 tr.release()
1464 lock.release()
1463 lock.release()
1465
1464
1466 @unfilteredmethod
1465 @unfilteredmethod
1467 def destroying(self):
1466 def destroying(self):
1468 '''Inform the repository that nodes are about to be destroyed.
1467 '''Inform the repository that nodes are about to be destroyed.
1469 Intended for use by strip and rollback, so there's a common
1468 Intended for use by strip and rollback, so there's a common
1470 place for anything that has to be done before destroying history.
1469 place for anything that has to be done before destroying history.
1471
1470
1472 This is mostly useful for saving state that is in memory and waiting
1471 This is mostly useful for saving state that is in memory and waiting
1473 to be flushed when the current lock is released. Because a call to
1472 to be flushed when the current lock is released. Because a call to
1474 destroyed is imminent, the repo will be invalidated causing those
1473 destroyed is imminent, the repo will be invalidated causing those
1475 changes to stay in memory (waiting for the next unlock), or vanish
1474 changes to stay in memory (waiting for the next unlock), or vanish
1476 completely.
1475 completely.
1477 '''
1476 '''
1478 # When using the same lock to commit and strip, the phasecache is left
1477 # When using the same lock to commit and strip, the phasecache is left
1479 # dirty after committing. Then when we strip, the repo is invalidated,
1478 # dirty after committing. Then when we strip, the repo is invalidated,
1480 # causing those changes to disappear.
1479 # causing those changes to disappear.
1481 if '_phasecache' in vars(self):
1480 if '_phasecache' in vars(self):
1482 self._phasecache.write()
1481 self._phasecache.write()
1483
1482
1484 @unfilteredmethod
1483 @unfilteredmethod
1485 def destroyed(self):
1484 def destroyed(self):
1486 '''Inform the repository that nodes have been destroyed.
1485 '''Inform the repository that nodes have been destroyed.
1487 Intended for use by strip and rollback, so there's a common
1486 Intended for use by strip and rollback, so there's a common
1488 place for anything that has to be done after destroying history.
1487 place for anything that has to be done after destroying history.
1489 '''
1488 '''
1490 # When one tries to:
1489 # When one tries to:
1491 # 1) destroy nodes thus calling this method (e.g. strip)
1490 # 1) destroy nodes thus calling this method (e.g. strip)
1492 # 2) use phasecache somewhere (e.g. commit)
1491 # 2) use phasecache somewhere (e.g. commit)
1493 #
1492 #
1494 # then 2) will fail because the phasecache contains nodes that were
1493 # then 2) will fail because the phasecache contains nodes that were
1495 # removed. We can either remove phasecache from the filecache,
1494 # removed. We can either remove phasecache from the filecache,
1496 # causing it to reload next time it is accessed, or simply filter
1495 # causing it to reload next time it is accessed, or simply filter
1497 # the removed nodes now and write the updated cache.
1496 # the removed nodes now and write the updated cache.
1498 self._phasecache.filterunknown(self)
1497 self._phasecache.filterunknown(self)
1499 self._phasecache.write()
1498 self._phasecache.write()
1500
1499
1501 # update the 'served' branch cache to help read only server process
1500 # update the 'served' branch cache to help read only server process
1502 # Thanks to branchcache collaboration this is done from the nearest
1501 # Thanks to branchcache collaboration this is done from the nearest
1503 # filtered subset and it is expected to be fast.
1502 # filtered subset and it is expected to be fast.
1504 branchmap.updatecache(self.filtered('served'))
1503 branchmap.updatecache(self.filtered('served'))
1505
1504
1506 # Ensure the persistent tag cache is updated. Doing it now
1505 # Ensure the persistent tag cache is updated. Doing it now
1507 # means that the tag cache only has to worry about destroyed
1506 # means that the tag cache only has to worry about destroyed
1508 # heads immediately after a strip/rollback. That in turn
1507 # heads immediately after a strip/rollback. That in turn
1509 # guarantees that "cachetip == currenttip" (comparing both rev
1508 # guarantees that "cachetip == currenttip" (comparing both rev
1510 # and node) always means no nodes have been added or destroyed.
1509 # and node) always means no nodes have been added or destroyed.
1511
1510
1512 # XXX this is suboptimal when qrefresh'ing: we strip the current
1511 # XXX this is suboptimal when qrefresh'ing: we strip the current
1513 # head, refresh the tag cache, then immediately add a new head.
1512 # head, refresh the tag cache, then immediately add a new head.
1514 # But I think doing it this way is necessary for the "instant
1513 # But I think doing it this way is necessary for the "instant
1515 # tag cache retrieval" case to work.
1514 # tag cache retrieval" case to work.
1516 self.invalidate()
1515 self.invalidate()
1517
1516
1518 def walk(self, match, node=None):
1517 def walk(self, match, node=None):
1519 '''
1518 '''
1520 walk recursively through the directory tree or a given
1519 walk recursively through the directory tree or a given
1521 changeset, finding all files matched by the match
1520 changeset, finding all files matched by the match
1522 function
1521 function
1523 '''
1522 '''
1524 return self[node].walk(match)
1523 return self[node].walk(match)
1525
1524
1526 def status(self, node1='.', node2=None, match=None,
1525 def status(self, node1='.', node2=None, match=None,
1527 ignored=False, clean=False, unknown=False,
1526 ignored=False, clean=False, unknown=False,
1528 listsubrepos=False):
1527 listsubrepos=False):
1529 '''a convenience method that calls node1.status(node2)'''
1528 '''a convenience method that calls node1.status(node2)'''
1530 return self[node1].status(node2, match, ignored, clean, unknown,
1529 return self[node1].status(node2, match, ignored, clean, unknown,
1531 listsubrepos)
1530 listsubrepos)
1532
1531
1533 def heads(self, start=None):
1532 def heads(self, start=None):
1534 heads = self.changelog.heads(start)
1533 heads = self.changelog.heads(start)
1535 # sort the output in rev descending order
1534 # sort the output in rev descending order
1536 return sorted(heads, key=self.changelog.rev, reverse=True)
1535 return sorted(heads, key=self.changelog.rev, reverse=True)
1537
1536
1538 def branchheads(self, branch=None, start=None, closed=False):
1537 def branchheads(self, branch=None, start=None, closed=False):
1539 '''return a (possibly filtered) list of heads for the given branch
1538 '''return a (possibly filtered) list of heads for the given branch
1540
1539
1541 Heads are returned in topological order, from newest to oldest.
1540 Heads are returned in topological order, from newest to oldest.
1542 If branch is None, use the dirstate branch.
1541 If branch is None, use the dirstate branch.
1543 If start is not None, return only heads reachable from start.
1542 If start is not None, return only heads reachable from start.
1544 If closed is True, return heads that are marked as closed as well.
1543 If closed is True, return heads that are marked as closed as well.
1545 '''
1544 '''
1546 if branch is None:
1545 if branch is None:
1547 branch = self[None].branch()
1546 branch = self[None].branch()
1548 branches = self.branchmap()
1547 branches = self.branchmap()
1549 if branch not in branches:
1548 if branch not in branches:
1550 return []
1549 return []
1551 # the cache returns heads ordered lowest to highest
1550 # the cache returns heads ordered lowest to highest
1552 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1551 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1553 if start is not None:
1552 if start is not None:
1554 # filter out the heads that cannot be reached from startrev
1553 # filter out the heads that cannot be reached from startrev
1555 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1554 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1556 bheads = [h for h in bheads if h in fbheads]
1555 bheads = [h for h in bheads if h in fbheads]
1557 return bheads
1556 return bheads
1558
1557
1559 def branches(self, nodes):
1558 def branches(self, nodes):
1560 if not nodes:
1559 if not nodes:
1561 nodes = [self.changelog.tip()]
1560 nodes = [self.changelog.tip()]
1562 b = []
1561 b = []
1563 for n in nodes:
1562 for n in nodes:
1564 t = n
1563 t = n
1565 while True:
1564 while True:
1566 p = self.changelog.parents(n)
1565 p = self.changelog.parents(n)
1567 if p[1] != nullid or p[0] == nullid:
1566 if p[1] != nullid or p[0] == nullid:
1568 b.append((t, n, p[0], p[1]))
1567 b.append((t, n, p[0], p[1]))
1569 break
1568 break
1570 n = p[0]
1569 n = p[0]
1571 return b
1570 return b
1572
1571
1573 def between(self, pairs):
1572 def between(self, pairs):
1574 r = []
1573 r = []
1575
1574
1576 for top, bottom in pairs:
1575 for top, bottom in pairs:
1577 n, l, i = top, [], 0
1576 n, l, i = top, [], 0
1578 f = 1
1577 f = 1
1579
1578
1580 while n != bottom and n != nullid:
1579 while n != bottom and n != nullid:
1581 p = self.changelog.parents(n)[0]
1580 p = self.changelog.parents(n)[0]
1582 if i == f:
1581 if i == f:
1583 l.append(n)
1582 l.append(n)
1584 f = f * 2
1583 f = f * 2
1585 n = p
1584 n = p
1586 i += 1
1585 i += 1
1587
1586
1588 r.append(l)
1587 r.append(l)
1589
1588
1590 return r
1589 return r
1591
1590
1592 def checkpush(self, pushop):
1591 def checkpush(self, pushop):
1593 """Extensions can override this function if additional checks have
1592 """Extensions can override this function if additional checks have
1594 to be performed before pushing, or call it if they override push
1593 to be performed before pushing, or call it if they override push
1595 command.
1594 command.
1596 """
1595 """
1597 pass
1596 pass
1598
1597
1599 @unfilteredpropertycache
1598 @unfilteredpropertycache
1600 def prepushoutgoinghooks(self):
1599 def prepushoutgoinghooks(self):
1601 """Return util.hooks consists of "(repo, remote, outgoing)"
1600 """Return util.hooks consists of "(repo, remote, outgoing)"
1602 functions, which are called before pushing changesets.
1601 functions, which are called before pushing changesets.
1603 """
1602 """
1604 return util.hooks()
1603 return util.hooks()
1605
1604
1606 def stream_in(self, remote, requirements):
1605 def stream_in(self, remote, requirements):
1607 lock = self.lock()
1606 lock = self.lock()
1608 try:
1607 try:
1609 # Save remote branchmap. We will use it later
1608 # Save remote branchmap. We will use it later
1610 # to speed up branchcache creation
1609 # to speed up branchcache creation
1611 rbranchmap = None
1610 rbranchmap = None
1612 if remote.capable("branchmap"):
1611 if remote.capable("branchmap"):
1613 rbranchmap = remote.branchmap()
1612 rbranchmap = remote.branchmap()
1614
1613
1615 fp = remote.stream_out()
1614 fp = remote.stream_out()
1616 l = fp.readline()
1615 l = fp.readline()
1617 try:
1616 try:
1618 resp = int(l)
1617 resp = int(l)
1619 except ValueError:
1618 except ValueError:
1620 raise error.ResponseError(
1619 raise error.ResponseError(
1621 _('unexpected response from remote server:'), l)
1620 _('unexpected response from remote server:'), l)
1622 if resp == 1:
1621 if resp == 1:
1623 raise util.Abort(_('operation forbidden by server'))
1622 raise util.Abort(_('operation forbidden by server'))
1624 elif resp == 2:
1623 elif resp == 2:
1625 raise util.Abort(_('locking the remote repository failed'))
1624 raise util.Abort(_('locking the remote repository failed'))
1626 elif resp != 0:
1625 elif resp != 0:
1627 raise util.Abort(_('the server sent an unknown error code'))
1626 raise util.Abort(_('the server sent an unknown error code'))
1628 self.ui.status(_('streaming all changes\n'))
1627 self.ui.status(_('streaming all changes\n'))
1629 l = fp.readline()
1628 l = fp.readline()
1630 try:
1629 try:
1631 total_files, total_bytes = map(int, l.split(' ', 1))
1630 total_files, total_bytes = map(int, l.split(' ', 1))
1632 except (ValueError, TypeError):
1631 except (ValueError, TypeError):
1633 raise error.ResponseError(
1632 raise error.ResponseError(
1634 _('unexpected response from remote server:'), l)
1633 _('unexpected response from remote server:'), l)
1635 self.ui.status(_('%d files to transfer, %s of data\n') %
1634 self.ui.status(_('%d files to transfer, %s of data\n') %
1636 (total_files, util.bytecount(total_bytes)))
1635 (total_files, util.bytecount(total_bytes)))
1637 handled_bytes = 0
1636 handled_bytes = 0
1638 self.ui.progress(_('clone'), 0, total=total_bytes)
1637 self.ui.progress(_('clone'), 0, total=total_bytes)
1639 start = time.time()
1638 start = time.time()
1640
1639
1641 tr = self.transaction(_('clone'))
1640 tr = self.transaction(_('clone'))
1642 try:
1641 try:
1643 for i in xrange(total_files):
1642 for i in xrange(total_files):
1644 # XXX doesn't support '\n' or '\r' in filenames
1643 # XXX doesn't support '\n' or '\r' in filenames
1645 l = fp.readline()
1644 l = fp.readline()
1646 try:
1645 try:
1647 name, size = l.split('\0', 1)
1646 name, size = l.split('\0', 1)
1648 size = int(size)
1647 size = int(size)
1649 except (ValueError, TypeError):
1648 except (ValueError, TypeError):
1650 raise error.ResponseError(
1649 raise error.ResponseError(
1651 _('unexpected response from remote server:'), l)
1650 _('unexpected response from remote server:'), l)
1652 if self.ui.debugflag:
1651 if self.ui.debugflag:
1653 self.ui.debug('adding %s (%s)\n' %
1652 self.ui.debug('adding %s (%s)\n' %
1654 (name, util.bytecount(size)))
1653 (name, util.bytecount(size)))
1655 # for backwards compat, name was partially encoded
1654 # for backwards compat, name was partially encoded
1656 ofp = self.sopener(store.decodedir(name), 'w')
1655 ofp = self.sopener(store.decodedir(name), 'w')
1657 for chunk in util.filechunkiter(fp, limit=size):
1656 for chunk in util.filechunkiter(fp, limit=size):
1658 handled_bytes += len(chunk)
1657 handled_bytes += len(chunk)
1659 self.ui.progress(_('clone'), handled_bytes,
1658 self.ui.progress(_('clone'), handled_bytes,
1660 total=total_bytes)
1659 total=total_bytes)
1661 ofp.write(chunk)
1660 ofp.write(chunk)
1662 ofp.close()
1661 ofp.close()
1663 tr.close()
1662 tr.close()
1664 finally:
1663 finally:
1665 tr.release()
1664 tr.release()
1666
1665
1667 # Writing straight to files circumvented the inmemory caches
1666 # Writing straight to files circumvented the inmemory caches
1668 self.invalidate()
1667 self.invalidate()
1669
1668
1670 elapsed = time.time() - start
1669 elapsed = time.time() - start
1671 if elapsed <= 0:
1670 if elapsed <= 0:
1672 elapsed = 0.001
1671 elapsed = 0.001
1673 self.ui.progress(_('clone'), None)
1672 self.ui.progress(_('clone'), None)
1674 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1673 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1675 (util.bytecount(total_bytes), elapsed,
1674 (util.bytecount(total_bytes), elapsed,
1676 util.bytecount(total_bytes / elapsed)))
1675 util.bytecount(total_bytes / elapsed)))
1677
1676
1678 # new requirements = old non-format requirements +
1677 # new requirements = old non-format requirements +
1679 # new format-related
1678 # new format-related
1680 # requirements from the streamed-in repository
1679 # requirements from the streamed-in repository
1681 requirements.update(set(self.requirements) - self.supportedformats)
1680 requirements.update(set(self.requirements) - self.supportedformats)
1682 self._applyrequirements(requirements)
1681 self._applyrequirements(requirements)
1683 self._writerequirements()
1682 self._writerequirements()
1684
1683
1685 if rbranchmap:
1684 if rbranchmap:
1686 rbheads = []
1685 rbheads = []
1687 closed = []
1686 closed = []
1688 for bheads in rbranchmap.itervalues():
1687 for bheads in rbranchmap.itervalues():
1689 rbheads.extend(bheads)
1688 rbheads.extend(bheads)
1690 for h in bheads:
1689 for h in bheads:
1691 r = self.changelog.rev(h)
1690 r = self.changelog.rev(h)
1692 b, c = self.changelog.branchinfo(r)
1691 b, c = self.changelog.branchinfo(r)
1693 if c:
1692 if c:
1694 closed.append(h)
1693 closed.append(h)
1695
1694
1696 if rbheads:
1695 if rbheads:
1697 rtiprev = max((int(self.changelog.rev(node))
1696 rtiprev = max((int(self.changelog.rev(node))
1698 for node in rbheads))
1697 for node in rbheads))
1699 cache = branchmap.branchcache(rbranchmap,
1698 cache = branchmap.branchcache(rbranchmap,
1700 self[rtiprev].node(),
1699 self[rtiprev].node(),
1701 rtiprev,
1700 rtiprev,
1702 closednodes=closed)
1701 closednodes=closed)
1703 # Try to stick it as low as possible
1702 # Try to stick it as low as possible
1704 # filter above served are unlikely to be fetch from a clone
1703 # filter above served are unlikely to be fetch from a clone
1705 for candidate in ('base', 'immutable', 'served'):
1704 for candidate in ('base', 'immutable', 'served'):
1706 rview = self.filtered(candidate)
1705 rview = self.filtered(candidate)
1707 if cache.validfor(rview):
1706 if cache.validfor(rview):
1708 self._branchcaches[candidate] = cache
1707 self._branchcaches[candidate] = cache
1709 cache.write(rview)
1708 cache.write(rview)
1710 break
1709 break
1711 self.invalidate()
1710 self.invalidate()
1712 return len(self.heads()) + 1
1711 return len(self.heads()) + 1
1713 finally:
1712 finally:
1714 lock.release()
1713 lock.release()
1715
1714
1716 def clone(self, remote, heads=[], stream=False):
1715 def clone(self, remote, heads=[], stream=False):
1717 '''clone remote repository.
1716 '''clone remote repository.
1718
1717
1719 keyword arguments:
1718 keyword arguments:
1720 heads: list of revs to clone (forces use of pull)
1719 heads: list of revs to clone (forces use of pull)
1721 stream: use streaming clone if possible'''
1720 stream: use streaming clone if possible'''
1722
1721
1723 # now, all clients that can request uncompressed clones can
1722 # now, all clients that can request uncompressed clones can
1724 # read repo formats supported by all servers that can serve
1723 # read repo formats supported by all servers that can serve
1725 # them.
1724 # them.
1726
1725
1727 # if revlog format changes, client will have to check version
1726 # if revlog format changes, client will have to check version
1728 # and format flags on "stream" capability, and use
1727 # and format flags on "stream" capability, and use
1729 # uncompressed only if compatible.
1728 # uncompressed only if compatible.
1730
1729
1731 if not stream:
1730 if not stream:
1732 # if the server explicitly prefers to stream (for fast LANs)
1731 # if the server explicitly prefers to stream (for fast LANs)
1733 stream = remote.capable('stream-preferred')
1732 stream = remote.capable('stream-preferred')
1734
1733
1735 if stream and not heads:
1734 if stream and not heads:
1736 # 'stream' means remote revlog format is revlogv1 only
1735 # 'stream' means remote revlog format is revlogv1 only
1737 if remote.capable('stream'):
1736 if remote.capable('stream'):
1738 self.stream_in(remote, set(('revlogv1',)))
1737 self.stream_in(remote, set(('revlogv1',)))
1739 else:
1738 else:
1740 # otherwise, 'streamreqs' contains the remote revlog format
1739 # otherwise, 'streamreqs' contains the remote revlog format
1741 streamreqs = remote.capable('streamreqs')
1740 streamreqs = remote.capable('streamreqs')
1742 if streamreqs:
1741 if streamreqs:
1743 streamreqs = set(streamreqs.split(','))
1742 streamreqs = set(streamreqs.split(','))
1744 # if we support it, stream in and adjust our requirements
1743 # if we support it, stream in and adjust our requirements
1745 if not streamreqs - self.supportedformats:
1744 if not streamreqs - self.supportedformats:
1746 self.stream_in(remote, streamreqs)
1745 self.stream_in(remote, streamreqs)
1747
1746
1748 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1747 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1749 try:
1748 try:
1750 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1749 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1751 ret = exchange.pull(self, remote, heads).cgresult
1750 ret = exchange.pull(self, remote, heads).cgresult
1752 finally:
1751 finally:
1753 self.ui.restoreconfig(quiet)
1752 self.ui.restoreconfig(quiet)
1754 return ret
1753 return ret
1755
1754
1756 def pushkey(self, namespace, key, old, new):
1755 def pushkey(self, namespace, key, old, new):
1757 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1756 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1758 old=old, new=new)
1757 old=old, new=new)
1759 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1758 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1760 ret = pushkey.push(self, namespace, key, old, new)
1759 ret = pushkey.push(self, namespace, key, old, new)
1761 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1760 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1762 ret=ret)
1761 ret=ret)
1763 return ret
1762 return ret
1764
1763
1765 def listkeys(self, namespace):
1764 def listkeys(self, namespace):
1766 self.hook('prelistkeys', throw=True, namespace=namespace)
1765 self.hook('prelistkeys', throw=True, namespace=namespace)
1767 self.ui.debug('listing keys for "%s"\n' % namespace)
1766 self.ui.debug('listing keys for "%s"\n' % namespace)
1768 values = pushkey.list(self, namespace)
1767 values = pushkey.list(self, namespace)
1769 self.hook('listkeys', namespace=namespace, values=values)
1768 self.hook('listkeys', namespace=namespace, values=values)
1770 return values
1769 return values
1771
1770
1772 def debugwireargs(self, one, two, three=None, four=None, five=None):
1771 def debugwireargs(self, one, two, three=None, four=None, five=None):
1773 '''used to test argument passing over the wire'''
1772 '''used to test argument passing over the wire'''
1774 return "%s %s %s %s %s" % (one, two, three, four, five)
1773 return "%s %s %s %s %s" % (one, two, three, four, five)
1775
1774
1776 def savecommitmessage(self, text):
1775 def savecommitmessage(self, text):
1777 fp = self.opener('last-message.txt', 'wb')
1776 fp = self.opener('last-message.txt', 'wb')
1778 try:
1777 try:
1779 fp.write(text)
1778 fp.write(text)
1780 finally:
1779 finally:
1781 fp.close()
1780 fp.close()
1782 return self.pathto(fp.name[len(self.root) + 1:])
1781 return self.pathto(fp.name[len(self.root) + 1:])
1783
1782
1784 # used to avoid circular references so destructors work
1783 # used to avoid circular references so destructors work
1785 def aftertrans(files):
1784 def aftertrans(files):
1786 renamefiles = [tuple(t) for t in files]
1785 renamefiles = [tuple(t) for t in files]
1787 def a():
1786 def a():
1788 for vfs, src, dest in renamefiles:
1787 for vfs, src, dest in renamefiles:
1789 try:
1788 try:
1790 vfs.rename(src, dest)
1789 vfs.rename(src, dest)
1791 except OSError: # journal file does not yet exist
1790 except OSError: # journal file does not yet exist
1792 pass
1791 pass
1793 return a
1792 return a
1794
1793
1795 def undoname(fn):
1794 def undoname(fn):
1796 base, name = os.path.split(fn)
1795 base, name = os.path.split(fn)
1797 assert name.startswith('journal')
1796 assert name.startswith('journal')
1798 return os.path.join(base, name.replace('journal', 'undo', 1))
1797 return os.path.join(base, name.replace('journal', 'undo', 1))
1799
1798
1800 def instance(ui, path, create):
1799 def instance(ui, path, create):
1801 return localrepository(ui, util.urllocalpath(path), create)
1800 return localrepository(ui, util.urllocalpath(path), create)
1802
1801
1803 def islocal(path):
1802 def islocal(path):
1804 return True
1803 return True
General Comments 0
You need to be logged in to leave comments. Login now