##// END OF EJS Templates
phase: add a transaction argument to retractboundary...
Pierre-Yves David -
r22070:c1ca4720 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
@@ -1,756 +1,756 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
15 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
16
16
17 def readexactly(stream, n):
17 def readexactly(stream, n):
18 '''read n bytes from stream.read and abort if less was available'''
18 '''read n bytes from stream.read and abort if less was available'''
19 s = stream.read(n)
19 s = stream.read(n)
20 if len(s) < n:
20 if len(s) < n:
21 raise util.Abort(_("stream ended unexpectedly"
21 raise util.Abort(_("stream ended unexpectedly"
22 " (got %d bytes, expected %d)")
22 " (got %d bytes, expected %d)")
23 % (len(s), n))
23 % (len(s), n))
24 return s
24 return s
25
25
26 def getchunk(stream):
26 def getchunk(stream):
27 """return the next chunk from stream as a string"""
27 """return the next chunk from stream as a string"""
28 d = readexactly(stream, 4)
28 d = readexactly(stream, 4)
29 l = struct.unpack(">l", d)[0]
29 l = struct.unpack(">l", d)[0]
30 if l <= 4:
30 if l <= 4:
31 if l:
31 if l:
32 raise util.Abort(_("invalid chunk length %d") % l)
32 raise util.Abort(_("invalid chunk length %d") % l)
33 return ""
33 return ""
34 return readexactly(stream, l - 4)
34 return readexactly(stream, l - 4)
35
35
36 def chunkheader(length):
36 def chunkheader(length):
37 """return a changegroup chunk header (string)"""
37 """return a changegroup chunk header (string)"""
38 return struct.pack(">l", length + 4)
38 return struct.pack(">l", length + 4)
39
39
40 def closechunk():
40 def closechunk():
41 """return a changegroup chunk header (string) for a zero-length chunk"""
41 """return a changegroup chunk header (string) for a zero-length chunk"""
42 return struct.pack(">l", 0)
42 return struct.pack(">l", 0)
43
43
44 class nocompress(object):
44 class nocompress(object):
45 def compress(self, x):
45 def compress(self, x):
46 return x
46 return x
47 def flush(self):
47 def flush(self):
48 return ""
48 return ""
49
49
50 bundletypes = {
50 bundletypes = {
51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
52 # since the unification ssh accepts a header but there
52 # since the unification ssh accepts a header but there
53 # is no capability signaling it.
53 # is no capability signaling it.
54 "HG10UN": ("HG10UN", nocompress),
54 "HG10UN": ("HG10UN", nocompress),
55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
57 }
57 }
58
58
59 # hgweb uses this list to communicate its preferred type
59 # hgweb uses this list to communicate its preferred type
60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
61
61
62 def writebundle(cg, filename, bundletype, vfs=None):
62 def writebundle(cg, filename, bundletype, vfs=None):
63 """Write a bundle file and return its filename.
63 """Write a bundle file and return its filename.
64
64
65 Existing files will not be overwritten.
65 Existing files will not be overwritten.
66 If no filename is specified, a temporary file is created.
66 If no filename is specified, a temporary file is created.
67 bz2 compression can be turned off.
67 bz2 compression can be turned off.
68 The bundle file will be deleted in case of errors.
68 The bundle file will be deleted in case of errors.
69 """
69 """
70
70
71 fh = None
71 fh = None
72 cleanup = None
72 cleanup = None
73 try:
73 try:
74 if filename:
74 if filename:
75 if vfs:
75 if vfs:
76 fh = vfs.open(filename, "wb")
76 fh = vfs.open(filename, "wb")
77 else:
77 else:
78 fh = open(filename, "wb")
78 fh = open(filename, "wb")
79 else:
79 else:
80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fh = os.fdopen(fd, "wb")
81 fh = os.fdopen(fd, "wb")
82 cleanup = filename
82 cleanup = filename
83
83
84 header, compressor = bundletypes[bundletype]
84 header, compressor = bundletypes[bundletype]
85 fh.write(header)
85 fh.write(header)
86 z = compressor()
86 z = compressor()
87
87
88 # parse the changegroup data, otherwise we will block
88 # parse the changegroup data, otherwise we will block
89 # in case of sshrepo because we don't know the end of the stream
89 # in case of sshrepo because we don't know the end of the stream
90
90
91 # an empty chunkgroup is the end of the changegroup
91 # an empty chunkgroup is the end of the changegroup
92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
93 # after that, an empty chunkgroup is the end of the changegroup
93 # after that, an empty chunkgroup is the end of the changegroup
94 for chunk in cg.getchunks():
94 for chunk in cg.getchunks():
95 fh.write(z.compress(chunk))
95 fh.write(z.compress(chunk))
96 fh.write(z.flush())
96 fh.write(z.flush())
97 cleanup = None
97 cleanup = None
98 return filename
98 return filename
99 finally:
99 finally:
100 if fh is not None:
100 if fh is not None:
101 fh.close()
101 fh.close()
102 if cleanup is not None:
102 if cleanup is not None:
103 if filename and vfs:
103 if filename and vfs:
104 vfs.unlink(cleanup)
104 vfs.unlink(cleanup)
105 else:
105 else:
106 os.unlink(cleanup)
106 os.unlink(cleanup)
107
107
108 def decompressor(fh, alg):
108 def decompressor(fh, alg):
109 if alg == 'UN':
109 if alg == 'UN':
110 return fh
110 return fh
111 elif alg == 'GZ':
111 elif alg == 'GZ':
112 def generator(f):
112 def generator(f):
113 zd = zlib.decompressobj()
113 zd = zlib.decompressobj()
114 for chunk in util.filechunkiter(f):
114 for chunk in util.filechunkiter(f):
115 yield zd.decompress(chunk)
115 yield zd.decompress(chunk)
116 elif alg == 'BZ':
116 elif alg == 'BZ':
117 def generator(f):
117 def generator(f):
118 zd = bz2.BZ2Decompressor()
118 zd = bz2.BZ2Decompressor()
119 zd.decompress("BZ")
119 zd.decompress("BZ")
120 for chunk in util.filechunkiter(f, 4096):
120 for chunk in util.filechunkiter(f, 4096):
121 yield zd.decompress(chunk)
121 yield zd.decompress(chunk)
122 else:
122 else:
123 raise util.Abort("unknown bundle compression '%s'" % alg)
123 raise util.Abort("unknown bundle compression '%s'" % alg)
124 return util.chunkbuffer(generator(fh))
124 return util.chunkbuffer(generator(fh))
125
125
126 class unbundle10(object):
126 class unbundle10(object):
127 deltaheader = _BUNDLE10_DELTA_HEADER
127 deltaheader = _BUNDLE10_DELTA_HEADER
128 deltaheadersize = struct.calcsize(deltaheader)
128 deltaheadersize = struct.calcsize(deltaheader)
129 def __init__(self, fh, alg):
129 def __init__(self, fh, alg):
130 self._stream = decompressor(fh, alg)
130 self._stream = decompressor(fh, alg)
131 self._type = alg
131 self._type = alg
132 self.callback = None
132 self.callback = None
133 def compressed(self):
133 def compressed(self):
134 return self._type != 'UN'
134 return self._type != 'UN'
135 def read(self, l):
135 def read(self, l):
136 return self._stream.read(l)
136 return self._stream.read(l)
137 def seek(self, pos):
137 def seek(self, pos):
138 return self._stream.seek(pos)
138 return self._stream.seek(pos)
139 def tell(self):
139 def tell(self):
140 return self._stream.tell()
140 return self._stream.tell()
141 def close(self):
141 def close(self):
142 return self._stream.close()
142 return self._stream.close()
143
143
144 def chunklength(self):
144 def chunklength(self):
145 d = readexactly(self._stream, 4)
145 d = readexactly(self._stream, 4)
146 l = struct.unpack(">l", d)[0]
146 l = struct.unpack(">l", d)[0]
147 if l <= 4:
147 if l <= 4:
148 if l:
148 if l:
149 raise util.Abort(_("invalid chunk length %d") % l)
149 raise util.Abort(_("invalid chunk length %d") % l)
150 return 0
150 return 0
151 if self.callback:
151 if self.callback:
152 self.callback()
152 self.callback()
153 return l - 4
153 return l - 4
154
154
155 def changelogheader(self):
155 def changelogheader(self):
156 """v10 does not have a changelog header chunk"""
156 """v10 does not have a changelog header chunk"""
157 return {}
157 return {}
158
158
159 def manifestheader(self):
159 def manifestheader(self):
160 """v10 does not have a manifest header chunk"""
160 """v10 does not have a manifest header chunk"""
161 return {}
161 return {}
162
162
163 def filelogheader(self):
163 def filelogheader(self):
164 """return the header of the filelogs chunk, v10 only has the filename"""
164 """return the header of the filelogs chunk, v10 only has the filename"""
165 l = self.chunklength()
165 l = self.chunklength()
166 if not l:
166 if not l:
167 return {}
167 return {}
168 fname = readexactly(self._stream, l)
168 fname = readexactly(self._stream, l)
169 return {'filename': fname}
169 return {'filename': fname}
170
170
171 def _deltaheader(self, headertuple, prevnode):
171 def _deltaheader(self, headertuple, prevnode):
172 node, p1, p2, cs = headertuple
172 node, p1, p2, cs = headertuple
173 if prevnode is None:
173 if prevnode is None:
174 deltabase = p1
174 deltabase = p1
175 else:
175 else:
176 deltabase = prevnode
176 deltabase = prevnode
177 return node, p1, p2, deltabase, cs
177 return node, p1, p2, deltabase, cs
178
178
179 def deltachunk(self, prevnode):
179 def deltachunk(self, prevnode):
180 l = self.chunklength()
180 l = self.chunklength()
181 if not l:
181 if not l:
182 return {}
182 return {}
183 headerdata = readexactly(self._stream, self.deltaheadersize)
183 headerdata = readexactly(self._stream, self.deltaheadersize)
184 header = struct.unpack(self.deltaheader, headerdata)
184 header = struct.unpack(self.deltaheader, headerdata)
185 delta = readexactly(self._stream, l - self.deltaheadersize)
185 delta = readexactly(self._stream, l - self.deltaheadersize)
186 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
186 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
187 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
187 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
188 'deltabase': deltabase, 'delta': delta}
188 'deltabase': deltabase, 'delta': delta}
189
189
190 def getchunks(self):
190 def getchunks(self):
191 """returns all the chunks contains in the bundle
191 """returns all the chunks contains in the bundle
192
192
193 Used when you need to forward the binary stream to a file or another
193 Used when you need to forward the binary stream to a file or another
194 network API. To do so, it parse the changegroup data, otherwise it will
194 network API. To do so, it parse the changegroup data, otherwise it will
195 block in case of sshrepo because it don't know the end of the stream.
195 block in case of sshrepo because it don't know the end of the stream.
196 """
196 """
197 # an empty chunkgroup is the end of the changegroup
197 # an empty chunkgroup is the end of the changegroup
198 # a changegroup has at least 2 chunkgroups (changelog and manifest).
198 # a changegroup has at least 2 chunkgroups (changelog and manifest).
199 # after that, an empty chunkgroup is the end of the changegroup
199 # after that, an empty chunkgroup is the end of the changegroup
200 empty = False
200 empty = False
201 count = 0
201 count = 0
202 while not empty or count <= 2:
202 while not empty or count <= 2:
203 empty = True
203 empty = True
204 count += 1
204 count += 1
205 while True:
205 while True:
206 chunk = getchunk(self)
206 chunk = getchunk(self)
207 if not chunk:
207 if not chunk:
208 break
208 break
209 empty = False
209 empty = False
210 yield chunkheader(len(chunk))
210 yield chunkheader(len(chunk))
211 pos = 0
211 pos = 0
212 while pos < len(chunk):
212 while pos < len(chunk):
213 next = pos + 2**20
213 next = pos + 2**20
214 yield chunk[pos:next]
214 yield chunk[pos:next]
215 pos = next
215 pos = next
216 yield closechunk()
216 yield closechunk()
217
217
218 class headerlessfixup(object):
218 class headerlessfixup(object):
219 def __init__(self, fh, h):
219 def __init__(self, fh, h):
220 self._h = h
220 self._h = h
221 self._fh = fh
221 self._fh = fh
222 def read(self, n):
222 def read(self, n):
223 if self._h:
223 if self._h:
224 d, self._h = self._h[:n], self._h[n:]
224 d, self._h = self._h[:n], self._h[n:]
225 if len(d) < n:
225 if len(d) < n:
226 d += readexactly(self._fh, n - len(d))
226 d += readexactly(self._fh, n - len(d))
227 return d
227 return d
228 return readexactly(self._fh, n)
228 return readexactly(self._fh, n)
229
229
230 class bundle10(object):
230 class bundle10(object):
231 deltaheader = _BUNDLE10_DELTA_HEADER
231 deltaheader = _BUNDLE10_DELTA_HEADER
232 def __init__(self, repo, bundlecaps=None):
232 def __init__(self, repo, bundlecaps=None):
233 """Given a source repo, construct a bundler.
233 """Given a source repo, construct a bundler.
234
234
235 bundlecaps is optional and can be used to specify the set of
235 bundlecaps is optional and can be used to specify the set of
236 capabilities which can be used to build the bundle.
236 capabilities which can be used to build the bundle.
237 """
237 """
238 # Set of capabilities we can use to build the bundle.
238 # Set of capabilities we can use to build the bundle.
239 if bundlecaps is None:
239 if bundlecaps is None:
240 bundlecaps = set()
240 bundlecaps = set()
241 self._bundlecaps = bundlecaps
241 self._bundlecaps = bundlecaps
242 self._changelog = repo.changelog
242 self._changelog = repo.changelog
243 self._manifest = repo.manifest
243 self._manifest = repo.manifest
244 reorder = repo.ui.config('bundle', 'reorder', 'auto')
244 reorder = repo.ui.config('bundle', 'reorder', 'auto')
245 if reorder == 'auto':
245 if reorder == 'auto':
246 reorder = None
246 reorder = None
247 else:
247 else:
248 reorder = util.parsebool(reorder)
248 reorder = util.parsebool(reorder)
249 self._repo = repo
249 self._repo = repo
250 self._reorder = reorder
250 self._reorder = reorder
251 self._progress = repo.ui.progress
251 self._progress = repo.ui.progress
252 def close(self):
252 def close(self):
253 return closechunk()
253 return closechunk()
254
254
255 def fileheader(self, fname):
255 def fileheader(self, fname):
256 return chunkheader(len(fname)) + fname
256 return chunkheader(len(fname)) + fname
257
257
258 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
258 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
259 """Calculate a delta group, yielding a sequence of changegroup chunks
259 """Calculate a delta group, yielding a sequence of changegroup chunks
260 (strings).
260 (strings).
261
261
262 Given a list of changeset revs, return a set of deltas and
262 Given a list of changeset revs, return a set of deltas and
263 metadata corresponding to nodes. The first delta is
263 metadata corresponding to nodes. The first delta is
264 first parent(nodelist[0]) -> nodelist[0], the receiver is
264 first parent(nodelist[0]) -> nodelist[0], the receiver is
265 guaranteed to have this parent as it has all history before
265 guaranteed to have this parent as it has all history before
266 these changesets. In the case firstparent is nullrev the
266 these changesets. In the case firstparent is nullrev the
267 changegroup starts with a full revision.
267 changegroup starts with a full revision.
268
268
269 If units is not None, progress detail will be generated, units specifies
269 If units is not None, progress detail will be generated, units specifies
270 the type of revlog that is touched (changelog, manifest, etc.).
270 the type of revlog that is touched (changelog, manifest, etc.).
271 """
271 """
272 # if we don't have any revisions touched by these changesets, bail
272 # if we don't have any revisions touched by these changesets, bail
273 if len(nodelist) == 0:
273 if len(nodelist) == 0:
274 yield self.close()
274 yield self.close()
275 return
275 return
276
276
277 # for generaldelta revlogs, we linearize the revs; this will both be
277 # for generaldelta revlogs, we linearize the revs; this will both be
278 # much quicker and generate a much smaller bundle
278 # much quicker and generate a much smaller bundle
279 if (revlog._generaldelta and reorder is not False) or reorder:
279 if (revlog._generaldelta and reorder is not False) or reorder:
280 dag = dagutil.revlogdag(revlog)
280 dag = dagutil.revlogdag(revlog)
281 revs = set(revlog.rev(n) for n in nodelist)
281 revs = set(revlog.rev(n) for n in nodelist)
282 revs = dag.linearize(revs)
282 revs = dag.linearize(revs)
283 else:
283 else:
284 revs = sorted([revlog.rev(n) for n in nodelist])
284 revs = sorted([revlog.rev(n) for n in nodelist])
285
285
286 # add the parent of the first rev
286 # add the parent of the first rev
287 p = revlog.parentrevs(revs[0])[0]
287 p = revlog.parentrevs(revs[0])[0]
288 revs.insert(0, p)
288 revs.insert(0, p)
289
289
290 # build deltas
290 # build deltas
291 total = len(revs) - 1
291 total = len(revs) - 1
292 msgbundling = _('bundling')
292 msgbundling = _('bundling')
293 for r in xrange(len(revs) - 1):
293 for r in xrange(len(revs) - 1):
294 if units is not None:
294 if units is not None:
295 self._progress(msgbundling, r + 1, unit=units, total=total)
295 self._progress(msgbundling, r + 1, unit=units, total=total)
296 prev, curr = revs[r], revs[r + 1]
296 prev, curr = revs[r], revs[r + 1]
297 linknode = lookup(revlog.node(curr))
297 linknode = lookup(revlog.node(curr))
298 for c in self.revchunk(revlog, curr, prev, linknode):
298 for c in self.revchunk(revlog, curr, prev, linknode):
299 yield c
299 yield c
300
300
301 yield self.close()
301 yield self.close()
302
302
303 # filter any nodes that claim to be part of the known set
303 # filter any nodes that claim to be part of the known set
304 def prune(self, revlog, missing, commonrevs, source):
304 def prune(self, revlog, missing, commonrevs, source):
305 rr, rl = revlog.rev, revlog.linkrev
305 rr, rl = revlog.rev, revlog.linkrev
306 return [n for n in missing if rl(rr(n)) not in commonrevs]
306 return [n for n in missing if rl(rr(n)) not in commonrevs]
307
307
308 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
308 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
309 '''yield a sequence of changegroup chunks (strings)'''
309 '''yield a sequence of changegroup chunks (strings)'''
310 repo = self._repo
310 repo = self._repo
311 cl = self._changelog
311 cl = self._changelog
312 mf = self._manifest
312 mf = self._manifest
313 reorder = self._reorder
313 reorder = self._reorder
314 progress = self._progress
314 progress = self._progress
315
315
316 # for progress output
316 # for progress output
317 msgbundling = _('bundling')
317 msgbundling = _('bundling')
318
318
319 mfs = {} # needed manifests
319 mfs = {} # needed manifests
320 fnodes = {} # needed file nodes
320 fnodes = {} # needed file nodes
321 changedfiles = set()
321 changedfiles = set()
322
322
323 # Callback for the changelog, used to collect changed files and manifest
323 # Callback for the changelog, used to collect changed files and manifest
324 # nodes.
324 # nodes.
325 # Returns the linkrev node (identity in the changelog case).
325 # Returns the linkrev node (identity in the changelog case).
326 def lookupcl(x):
326 def lookupcl(x):
327 c = cl.read(x)
327 c = cl.read(x)
328 changedfiles.update(c[3])
328 changedfiles.update(c[3])
329 # record the first changeset introducing this manifest version
329 # record the first changeset introducing this manifest version
330 mfs.setdefault(c[0], x)
330 mfs.setdefault(c[0], x)
331 return x
331 return x
332
332
333 # Callback for the manifest, used to collect linkrevs for filelog
333 # Callback for the manifest, used to collect linkrevs for filelog
334 # revisions.
334 # revisions.
335 # Returns the linkrev node (collected in lookupcl).
335 # Returns the linkrev node (collected in lookupcl).
336 def lookupmf(x):
336 def lookupmf(x):
337 clnode = mfs[x]
337 clnode = mfs[x]
338 if not fastpathlinkrev:
338 if not fastpathlinkrev:
339 mdata = mf.readfast(x)
339 mdata = mf.readfast(x)
340 for f, n in mdata.iteritems():
340 for f, n in mdata.iteritems():
341 if f in changedfiles:
341 if f in changedfiles:
342 # record the first changeset introducing this filelog
342 # record the first changeset introducing this filelog
343 # version
343 # version
344 fnodes[f].setdefault(n, clnode)
344 fnodes[f].setdefault(n, clnode)
345 return clnode
345 return clnode
346
346
347 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
347 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
348 reorder=reorder):
348 reorder=reorder):
349 yield chunk
349 yield chunk
350 progress(msgbundling, None)
350 progress(msgbundling, None)
351
351
352 for f in changedfiles:
352 for f in changedfiles:
353 fnodes[f] = {}
353 fnodes[f] = {}
354 mfnodes = self.prune(mf, mfs, commonrevs, source)
354 mfnodes = self.prune(mf, mfs, commonrevs, source)
355 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
355 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
356 reorder=reorder):
356 reorder=reorder):
357 yield chunk
357 yield chunk
358 progress(msgbundling, None)
358 progress(msgbundling, None)
359
359
360 mfs.clear()
360 mfs.clear()
361 needed = set(cl.rev(x) for x in clnodes)
361 needed = set(cl.rev(x) for x in clnodes)
362
362
363 def linknodes(filerevlog, fname):
363 def linknodes(filerevlog, fname):
364 if fastpathlinkrev:
364 if fastpathlinkrev:
365 llr = filerevlog.linkrev
365 llr = filerevlog.linkrev
366 def genfilenodes():
366 def genfilenodes():
367 for r in filerevlog:
367 for r in filerevlog:
368 linkrev = llr(r)
368 linkrev = llr(r)
369 if linkrev in needed:
369 if linkrev in needed:
370 yield filerevlog.node(r), cl.node(linkrev)
370 yield filerevlog.node(r), cl.node(linkrev)
371 fnodes[fname] = dict(genfilenodes())
371 fnodes[fname] = dict(genfilenodes())
372 return fnodes.get(fname, {})
372 return fnodes.get(fname, {})
373
373
374 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
374 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
375 source):
375 source):
376 yield chunk
376 yield chunk
377
377
378 yield self.close()
378 yield self.close()
379 progress(msgbundling, None)
379 progress(msgbundling, None)
380
380
381 if clnodes:
381 if clnodes:
382 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
382 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
383
383
384 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
384 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
385 repo = self._repo
385 repo = self._repo
386 progress = self._progress
386 progress = self._progress
387 reorder = self._reorder
387 reorder = self._reorder
388 msgbundling = _('bundling')
388 msgbundling = _('bundling')
389
389
390 total = len(changedfiles)
390 total = len(changedfiles)
391 # for progress output
391 # for progress output
392 msgfiles = _('files')
392 msgfiles = _('files')
393 for i, fname in enumerate(sorted(changedfiles)):
393 for i, fname in enumerate(sorted(changedfiles)):
394 filerevlog = repo.file(fname)
394 filerevlog = repo.file(fname)
395 if not filerevlog:
395 if not filerevlog:
396 raise util.Abort(_("empty or missing revlog for %s") % fname)
396 raise util.Abort(_("empty or missing revlog for %s") % fname)
397
397
398 linkrevnodes = linknodes(filerevlog, fname)
398 linkrevnodes = linknodes(filerevlog, fname)
399 # Lookup for filenodes, we collected the linkrev nodes above in the
399 # Lookup for filenodes, we collected the linkrev nodes above in the
400 # fastpath case and with lookupmf in the slowpath case.
400 # fastpath case and with lookupmf in the slowpath case.
401 def lookupfilelog(x):
401 def lookupfilelog(x):
402 return linkrevnodes[x]
402 return linkrevnodes[x]
403
403
404 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
404 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
405 if filenodes:
405 if filenodes:
406 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
406 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
407 total=total)
407 total=total)
408 yield self.fileheader(fname)
408 yield self.fileheader(fname)
409 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
409 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
410 reorder=reorder):
410 reorder=reorder):
411 yield chunk
411 yield chunk
412
412
413 def revchunk(self, revlog, rev, prev, linknode):
413 def revchunk(self, revlog, rev, prev, linknode):
414 node = revlog.node(rev)
414 node = revlog.node(rev)
415 p1, p2 = revlog.parentrevs(rev)
415 p1, p2 = revlog.parentrevs(rev)
416 base = prev
416 base = prev
417
417
418 prefix = ''
418 prefix = ''
419 if base == nullrev:
419 if base == nullrev:
420 delta = revlog.revision(node)
420 delta = revlog.revision(node)
421 prefix = mdiff.trivialdiffheader(len(delta))
421 prefix = mdiff.trivialdiffheader(len(delta))
422 else:
422 else:
423 delta = revlog.revdiff(base, rev)
423 delta = revlog.revdiff(base, rev)
424 p1n, p2n = revlog.parents(node)
424 p1n, p2n = revlog.parents(node)
425 basenode = revlog.node(base)
425 basenode = revlog.node(base)
426 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
426 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
427 meta += prefix
427 meta += prefix
428 l = len(meta) + len(delta)
428 l = len(meta) + len(delta)
429 yield chunkheader(l)
429 yield chunkheader(l)
430 yield meta
430 yield meta
431 yield delta
431 yield delta
432 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
432 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
433 # do nothing with basenode, it is implicitly the previous one in HG10
433 # do nothing with basenode, it is implicitly the previous one in HG10
434 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
434 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
435
435
436 def _changegroupinfo(repo, nodes, source):
436 def _changegroupinfo(repo, nodes, source):
437 if repo.ui.verbose or source == 'bundle':
437 if repo.ui.verbose or source == 'bundle':
438 repo.ui.status(_("%d changesets found\n") % len(nodes))
438 repo.ui.status(_("%d changesets found\n") % len(nodes))
439 if repo.ui.debugflag:
439 if repo.ui.debugflag:
440 repo.ui.debug("list of changesets:\n")
440 repo.ui.debug("list of changesets:\n")
441 for node in nodes:
441 for node in nodes:
442 repo.ui.debug("%s\n" % hex(node))
442 repo.ui.debug("%s\n" % hex(node))
443
443
444 def getsubset(repo, outgoing, bundler, source, fastpath=False):
444 def getsubset(repo, outgoing, bundler, source, fastpath=False):
445 repo = repo.unfiltered()
445 repo = repo.unfiltered()
446 commonrevs = outgoing.common
446 commonrevs = outgoing.common
447 csets = outgoing.missing
447 csets = outgoing.missing
448 heads = outgoing.missingheads
448 heads = outgoing.missingheads
449 # We go through the fast path if we get told to, or if all (unfiltered
449 # We go through the fast path if we get told to, or if all (unfiltered
450 # heads have been requested (since we then know there all linkrevs will
450 # heads have been requested (since we then know there all linkrevs will
451 # be pulled by the client).
451 # be pulled by the client).
452 heads.sort()
452 heads.sort()
453 fastpathlinkrev = fastpath or (
453 fastpathlinkrev = fastpath or (
454 repo.filtername is None and heads == sorted(repo.heads()))
454 repo.filtername is None and heads == sorted(repo.heads()))
455
455
456 repo.hook('preoutgoing', throw=True, source=source)
456 repo.hook('preoutgoing', throw=True, source=source)
457 _changegroupinfo(repo, csets, source)
457 _changegroupinfo(repo, csets, source)
458 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
458 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
459 return unbundle10(util.chunkbuffer(gengroup), 'UN')
459 return unbundle10(util.chunkbuffer(gengroup), 'UN')
460
460
461 def changegroupsubset(repo, roots, heads, source):
461 def changegroupsubset(repo, roots, heads, source):
462 """Compute a changegroup consisting of all the nodes that are
462 """Compute a changegroup consisting of all the nodes that are
463 descendants of any of the roots and ancestors of any of the heads.
463 descendants of any of the roots and ancestors of any of the heads.
464 Return a chunkbuffer object whose read() method will return
464 Return a chunkbuffer object whose read() method will return
465 successive changegroup chunks.
465 successive changegroup chunks.
466
466
467 It is fairly complex as determining which filenodes and which
467 It is fairly complex as determining which filenodes and which
468 manifest nodes need to be included for the changeset to be complete
468 manifest nodes need to be included for the changeset to be complete
469 is non-trivial.
469 is non-trivial.
470
470
471 Another wrinkle is doing the reverse, figuring out which changeset in
471 Another wrinkle is doing the reverse, figuring out which changeset in
472 the changegroup a particular filenode or manifestnode belongs to.
472 the changegroup a particular filenode or manifestnode belongs to.
473 """
473 """
474 cl = repo.changelog
474 cl = repo.changelog
475 if not roots:
475 if not roots:
476 roots = [nullid]
476 roots = [nullid]
477 # TODO: remove call to nodesbetween.
477 # TODO: remove call to nodesbetween.
478 csets, roots, heads = cl.nodesbetween(roots, heads)
478 csets, roots, heads = cl.nodesbetween(roots, heads)
479 discbases = []
479 discbases = []
480 for n in roots:
480 for n in roots:
481 discbases.extend([p for p in cl.parents(n) if p != nullid])
481 discbases.extend([p for p in cl.parents(n) if p != nullid])
482 outgoing = discovery.outgoing(cl, discbases, heads)
482 outgoing = discovery.outgoing(cl, discbases, heads)
483 bundler = bundle10(repo)
483 bundler = bundle10(repo)
484 return getsubset(repo, outgoing, bundler, source)
484 return getsubset(repo, outgoing, bundler, source)
485
485
486 def getlocalbundle(repo, source, outgoing, bundlecaps=None):
486 def getlocalbundle(repo, source, outgoing, bundlecaps=None):
487 """Like getbundle, but taking a discovery.outgoing as an argument.
487 """Like getbundle, but taking a discovery.outgoing as an argument.
488
488
489 This is only implemented for local repos and reuses potentially
489 This is only implemented for local repos and reuses potentially
490 precomputed sets in outgoing."""
490 precomputed sets in outgoing."""
491 if not outgoing.missing:
491 if not outgoing.missing:
492 return None
492 return None
493 bundler = bundle10(repo, bundlecaps)
493 bundler = bundle10(repo, bundlecaps)
494 return getsubset(repo, outgoing, bundler, source)
494 return getsubset(repo, outgoing, bundler, source)
495
495
496 def _computeoutgoing(repo, heads, common):
496 def _computeoutgoing(repo, heads, common):
497 """Computes which revs are outgoing given a set of common
497 """Computes which revs are outgoing given a set of common
498 and a set of heads.
498 and a set of heads.
499
499
500 This is a separate function so extensions can have access to
500 This is a separate function so extensions can have access to
501 the logic.
501 the logic.
502
502
503 Returns a discovery.outgoing object.
503 Returns a discovery.outgoing object.
504 """
504 """
505 cl = repo.changelog
505 cl = repo.changelog
506 if common:
506 if common:
507 hasnode = cl.hasnode
507 hasnode = cl.hasnode
508 common = [n for n in common if hasnode(n)]
508 common = [n for n in common if hasnode(n)]
509 else:
509 else:
510 common = [nullid]
510 common = [nullid]
511 if not heads:
511 if not heads:
512 heads = cl.heads()
512 heads = cl.heads()
513 return discovery.outgoing(cl, common, heads)
513 return discovery.outgoing(cl, common, heads)
514
514
515 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
515 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
516 """Like changegroupsubset, but returns the set difference between the
516 """Like changegroupsubset, but returns the set difference between the
517 ancestors of heads and the ancestors common.
517 ancestors of heads and the ancestors common.
518
518
519 If heads is None, use the local heads. If common is None, use [nullid].
519 If heads is None, use the local heads. If common is None, use [nullid].
520
520
521 The nodes in common might not all be known locally due to the way the
521 The nodes in common might not all be known locally due to the way the
522 current discovery protocol works.
522 current discovery protocol works.
523 """
523 """
524 outgoing = _computeoutgoing(repo, heads, common)
524 outgoing = _computeoutgoing(repo, heads, common)
525 return getlocalbundle(repo, source, outgoing, bundlecaps=bundlecaps)
525 return getlocalbundle(repo, source, outgoing, bundlecaps=bundlecaps)
526
526
527 def changegroup(repo, basenodes, source):
527 def changegroup(repo, basenodes, source):
528 # to avoid a race we use changegroupsubset() (issue1320)
528 # to avoid a race we use changegroupsubset() (issue1320)
529 return changegroupsubset(repo, basenodes, repo.heads(), source)
529 return changegroupsubset(repo, basenodes, repo.heads(), source)
530
530
531 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
531 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
532 revisions = 0
532 revisions = 0
533 files = 0
533 files = 0
534 while True:
534 while True:
535 chunkdata = source.filelogheader()
535 chunkdata = source.filelogheader()
536 if not chunkdata:
536 if not chunkdata:
537 break
537 break
538 f = chunkdata["filename"]
538 f = chunkdata["filename"]
539 repo.ui.debug("adding %s revisions\n" % f)
539 repo.ui.debug("adding %s revisions\n" % f)
540 pr()
540 pr()
541 fl = repo.file(f)
541 fl = repo.file(f)
542 o = len(fl)
542 o = len(fl)
543 if not fl.addgroup(source, revmap, trp):
543 if not fl.addgroup(source, revmap, trp):
544 raise util.Abort(_("received file revlog group is empty"))
544 raise util.Abort(_("received file revlog group is empty"))
545 revisions += len(fl) - o
545 revisions += len(fl) - o
546 files += 1
546 files += 1
547 if f in needfiles:
547 if f in needfiles:
548 needs = needfiles[f]
548 needs = needfiles[f]
549 for new in xrange(o, len(fl)):
549 for new in xrange(o, len(fl)):
550 n = fl.node(new)
550 n = fl.node(new)
551 if n in needs:
551 if n in needs:
552 needs.remove(n)
552 needs.remove(n)
553 else:
553 else:
554 raise util.Abort(
554 raise util.Abort(
555 _("received spurious file revlog entry"))
555 _("received spurious file revlog entry"))
556 if not needs:
556 if not needs:
557 del needfiles[f]
557 del needfiles[f]
558 repo.ui.progress(_('files'), None)
558 repo.ui.progress(_('files'), None)
559
559
560 for f, needs in needfiles.iteritems():
560 for f, needs in needfiles.iteritems():
561 fl = repo.file(f)
561 fl = repo.file(f)
562 for n in needs:
562 for n in needs:
563 try:
563 try:
564 fl.rev(n)
564 fl.rev(n)
565 except error.LookupError:
565 except error.LookupError:
566 raise util.Abort(
566 raise util.Abort(
567 _('missing file data for %s:%s - run hg verify') %
567 _('missing file data for %s:%s - run hg verify') %
568 (f, hex(n)))
568 (f, hex(n)))
569
569
570 return revisions, files
570 return revisions, files
571
571
572 def addchangegroup(repo, source, srctype, url, emptyok=False,
572 def addchangegroup(repo, source, srctype, url, emptyok=False,
573 targetphase=phases.draft):
573 targetphase=phases.draft):
574 """Add the changegroup returned by source.read() to this repo.
574 """Add the changegroup returned by source.read() to this repo.
575 srctype is a string like 'push', 'pull', or 'unbundle'. url is
575 srctype is a string like 'push', 'pull', or 'unbundle'. url is
576 the URL of the repo where this changegroup is coming from.
576 the URL of the repo where this changegroup is coming from.
577
577
578 Return an integer summarizing the change to this repo:
578 Return an integer summarizing the change to this repo:
579 - nothing changed or no source: 0
579 - nothing changed or no source: 0
580 - more heads than before: 1+added heads (2..n)
580 - more heads than before: 1+added heads (2..n)
581 - fewer heads than before: -1-removed heads (-2..-n)
581 - fewer heads than before: -1-removed heads (-2..-n)
582 - number of heads stays the same: 1
582 - number of heads stays the same: 1
583 """
583 """
584 repo = repo.unfiltered()
584 repo = repo.unfiltered()
585 def csmap(x):
585 def csmap(x):
586 repo.ui.debug("add changeset %s\n" % short(x))
586 repo.ui.debug("add changeset %s\n" % short(x))
587 return len(cl)
587 return len(cl)
588
588
589 def revmap(x):
589 def revmap(x):
590 return cl.rev(x)
590 return cl.rev(x)
591
591
592 if not source:
592 if not source:
593 return 0
593 return 0
594
594
595 repo.hook('prechangegroup', throw=True, source=srctype, url=url)
595 repo.hook('prechangegroup', throw=True, source=srctype, url=url)
596
596
597 changesets = files = revisions = 0
597 changesets = files = revisions = 0
598 efiles = set()
598 efiles = set()
599
599
600 # write changelog data to temp files so concurrent readers will not see
600 # write changelog data to temp files so concurrent readers will not see
601 # inconsistent view
601 # inconsistent view
602 cl = repo.changelog
602 cl = repo.changelog
603 cl.delayupdate()
603 cl.delayupdate()
604 oldheads = cl.heads()
604 oldheads = cl.heads()
605
605
606 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
606 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
607 try:
607 try:
608 trp = weakref.proxy(tr)
608 trp = weakref.proxy(tr)
609 # pull off the changeset group
609 # pull off the changeset group
610 repo.ui.status(_("adding changesets\n"))
610 repo.ui.status(_("adding changesets\n"))
611 clstart = len(cl)
611 clstart = len(cl)
612 class prog(object):
612 class prog(object):
613 step = _('changesets')
613 step = _('changesets')
614 count = 1
614 count = 1
615 ui = repo.ui
615 ui = repo.ui
616 total = None
616 total = None
617 def __call__(repo):
617 def __call__(repo):
618 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
618 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
619 total=repo.total)
619 total=repo.total)
620 repo.count += 1
620 repo.count += 1
621 pr = prog()
621 pr = prog()
622 source.callback = pr
622 source.callback = pr
623
623
624 source.changelogheader()
624 source.changelogheader()
625 srccontent = cl.addgroup(source, csmap, trp)
625 srccontent = cl.addgroup(source, csmap, trp)
626 if not (srccontent or emptyok):
626 if not (srccontent or emptyok):
627 raise util.Abort(_("received changelog group is empty"))
627 raise util.Abort(_("received changelog group is empty"))
628 clend = len(cl)
628 clend = len(cl)
629 changesets = clend - clstart
629 changesets = clend - clstart
630 for c in xrange(clstart, clend):
630 for c in xrange(clstart, clend):
631 efiles.update(repo[c].files())
631 efiles.update(repo[c].files())
632 efiles = len(efiles)
632 efiles = len(efiles)
633 repo.ui.progress(_('changesets'), None)
633 repo.ui.progress(_('changesets'), None)
634
634
635 # pull off the manifest group
635 # pull off the manifest group
636 repo.ui.status(_("adding manifests\n"))
636 repo.ui.status(_("adding manifests\n"))
637 pr.step = _('manifests')
637 pr.step = _('manifests')
638 pr.count = 1
638 pr.count = 1
639 pr.total = changesets # manifests <= changesets
639 pr.total = changesets # manifests <= changesets
640 # no need to check for empty manifest group here:
640 # no need to check for empty manifest group here:
641 # if the result of the merge of 1 and 2 is the same in 3 and 4,
641 # if the result of the merge of 1 and 2 is the same in 3 and 4,
642 # no new manifest will be created and the manifest group will
642 # no new manifest will be created and the manifest group will
643 # be empty during the pull
643 # be empty during the pull
644 source.manifestheader()
644 source.manifestheader()
645 repo.manifest.addgroup(source, revmap, trp)
645 repo.manifest.addgroup(source, revmap, trp)
646 repo.ui.progress(_('manifests'), None)
646 repo.ui.progress(_('manifests'), None)
647
647
648 needfiles = {}
648 needfiles = {}
649 if repo.ui.configbool('server', 'validate', default=False):
649 if repo.ui.configbool('server', 'validate', default=False):
650 # validate incoming csets have their manifests
650 # validate incoming csets have their manifests
651 for cset in xrange(clstart, clend):
651 for cset in xrange(clstart, clend):
652 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
652 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
653 mfest = repo.manifest.readdelta(mfest)
653 mfest = repo.manifest.readdelta(mfest)
654 # store file nodes we must see
654 # store file nodes we must see
655 for f, n in mfest.iteritems():
655 for f, n in mfest.iteritems():
656 needfiles.setdefault(f, set()).add(n)
656 needfiles.setdefault(f, set()).add(n)
657
657
658 # process the files
658 # process the files
659 repo.ui.status(_("adding file changes\n"))
659 repo.ui.status(_("adding file changes\n"))
660 pr.step = _('files')
660 pr.step = _('files')
661 pr.count = 1
661 pr.count = 1
662 pr.total = efiles
662 pr.total = efiles
663 source.callback = None
663 source.callback = None
664
664
665 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
665 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
666 needfiles)
666 needfiles)
667 revisions += newrevs
667 revisions += newrevs
668 files += newfiles
668 files += newfiles
669
669
670 dh = 0
670 dh = 0
671 if oldheads:
671 if oldheads:
672 heads = cl.heads()
672 heads = cl.heads()
673 dh = len(heads) - len(oldheads)
673 dh = len(heads) - len(oldheads)
674 for h in heads:
674 for h in heads:
675 if h not in oldheads and repo[h].closesbranch():
675 if h not in oldheads and repo[h].closesbranch():
676 dh -= 1
676 dh -= 1
677 htext = ""
677 htext = ""
678 if dh:
678 if dh:
679 htext = _(" (%+d heads)") % dh
679 htext = _(" (%+d heads)") % dh
680
680
681 repo.ui.status(_("added %d changesets"
681 repo.ui.status(_("added %d changesets"
682 " with %d changes to %d files%s\n")
682 " with %d changes to %d files%s\n")
683 % (changesets, revisions, files, htext))
683 % (changesets, revisions, files, htext))
684 repo.invalidatevolatilesets()
684 repo.invalidatevolatilesets()
685
685
686 if changesets > 0:
686 if changesets > 0:
687 p = lambda: cl.writepending() and repo.root or ""
687 p = lambda: cl.writepending() and repo.root or ""
688 if 'node' not in tr.hookargs:
688 if 'node' not in tr.hookargs:
689 tr.hookargs['node'] = hex(cl.node(clstart))
689 tr.hookargs['node'] = hex(cl.node(clstart))
690 repo.hook('pretxnchangegroup', throw=True, source=srctype,
690 repo.hook('pretxnchangegroup', throw=True, source=srctype,
691 url=url, pending=p, **tr.hookargs)
691 url=url, pending=p, **tr.hookargs)
692
692
693 added = [cl.node(r) for r in xrange(clstart, clend)]
693 added = [cl.node(r) for r in xrange(clstart, clend)]
694 publishing = repo.ui.configbool('phases', 'publish', True)
694 publishing = repo.ui.configbool('phases', 'publish', True)
695 if srctype in ('push', 'serve'):
695 if srctype in ('push', 'serve'):
696 # Old servers can not push the boundary themselves.
696 # Old servers can not push the boundary themselves.
697 # New servers won't push the boundary if changeset already
697 # New servers won't push the boundary if changeset already
698 # exists locally as secret
698 # exists locally as secret
699 #
699 #
700 # We should not use added here but the list of all change in
700 # We should not use added here but the list of all change in
701 # the bundle
701 # the bundle
702 if publishing:
702 if publishing:
703 phases.advanceboundary(repo, tr, phases.public, srccontent)
703 phases.advanceboundary(repo, tr, phases.public, srccontent)
704 else:
704 else:
705 # Those changesets have been pushed from the outside, their
705 # Those changesets have been pushed from the outside, their
706 # phases are going to be pushed alongside. Therefor
706 # phases are going to be pushed alongside. Therefor
707 # `targetphase` is ignored.
707 # `targetphase` is ignored.
708 phases.advanceboundary(repo, tr, phases.draft, srccontent)
708 phases.advanceboundary(repo, tr, phases.draft, srccontent)
709 phases.retractboundary(repo, phases.draft, added)
709 phases.retractboundary(repo, tr, phases.draft, added)
710 elif srctype != 'strip':
710 elif srctype != 'strip':
711 # publishing only alter behavior during push
711 # publishing only alter behavior during push
712 #
712 #
713 # strip should not touch boundary at all
713 # strip should not touch boundary at all
714 phases.retractboundary(repo, targetphase, added)
714 phases.retractboundary(repo, tr, targetphase, added)
715
715
716 # make changelog see real files again
716 # make changelog see real files again
717 cl.finalize(trp)
717 cl.finalize(trp)
718
718
719 tr.close()
719 tr.close()
720
720
721 if changesets > 0:
721 if changesets > 0:
722 if srctype != 'strip':
722 if srctype != 'strip':
723 # During strip, branchcache is invalid but coming call to
723 # During strip, branchcache is invalid but coming call to
724 # `destroyed` will repair it.
724 # `destroyed` will repair it.
725 # In other case we can safely update cache on disk.
725 # In other case we can safely update cache on disk.
726 branchmap.updatecache(repo.filtered('served'))
726 branchmap.updatecache(repo.filtered('served'))
727 def runhooks():
727 def runhooks():
728 # These hooks run when the lock releases, not when the
728 # These hooks run when the lock releases, not when the
729 # transaction closes. So it's possible for the changelog
729 # transaction closes. So it's possible for the changelog
730 # to have changed since we last saw it.
730 # to have changed since we last saw it.
731 if clstart >= len(repo):
731 if clstart >= len(repo):
732 return
732 return
733
733
734 # forcefully update the on-disk branch cache
734 # forcefully update the on-disk branch cache
735 repo.ui.debug("updating the branch cache\n")
735 repo.ui.debug("updating the branch cache\n")
736 repo.hook("changegroup", source=srctype, url=url,
736 repo.hook("changegroup", source=srctype, url=url,
737 **tr.hookargs)
737 **tr.hookargs)
738
738
739 for n in added:
739 for n in added:
740 repo.hook("incoming", node=hex(n), source=srctype,
740 repo.hook("incoming", node=hex(n), source=srctype,
741 url=url)
741 url=url)
742
742
743 newheads = [h for h in repo.heads() if h not in oldheads]
743 newheads = [h for h in repo.heads() if h not in oldheads]
744 repo.ui.log("incoming",
744 repo.ui.log("incoming",
745 "%s incoming changes - new heads: %s\n",
745 "%s incoming changes - new heads: %s\n",
746 len(added),
746 len(added),
747 ', '.join([hex(c[:6]) for c in newheads]))
747 ', '.join([hex(c[:6]) for c in newheads]))
748 repo._afterlock(runhooks)
748 repo._afterlock(runhooks)
749
749
750 finally:
750 finally:
751 tr.release()
751 tr.release()
752 # never return 0 here:
752 # never return 0 here:
753 if dh < 0:
753 if dh < 0:
754 return dh - 1
754 return dh - 1
755 else:
755 else:
756 return dh + 1
756 return dh + 1
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
@@ -1,1783 +1,1783 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10', **kwargs):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps, **kwargs)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except error.PushRaced, exc:
136 except error.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), str(exc))
137 raise error.ResponseError(_('push failed:'), str(exc))
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 bundle2caps = {'HG2X': (),
183 bundle2caps = {'HG2X': (),
184 'b2x:listkeys': (),
184 'b2x:listkeys': (),
185 'b2x:pushkey': (),
185 'b2x:pushkey': (),
186 'b2x:changegroup': (),
186 'b2x:changegroup': (),
187 }
187 }
188
188
189 # a list of (ui, featureset) functions.
189 # a list of (ui, featureset) functions.
190 # only functions defined in module of enabled extensions are invoked
190 # only functions defined in module of enabled extensions are invoked
191 featuresetupfuncs = set()
191 featuresetupfuncs = set()
192
192
193 def _baserequirements(self, create):
193 def _baserequirements(self, create):
194 return self.requirements[:]
194 return self.requirements[:]
195
195
196 def __init__(self, baseui, path=None, create=False):
196 def __init__(self, baseui, path=None, create=False):
197 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
197 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
198 self.wopener = self.wvfs
198 self.wopener = self.wvfs
199 self.root = self.wvfs.base
199 self.root = self.wvfs.base
200 self.path = self.wvfs.join(".hg")
200 self.path = self.wvfs.join(".hg")
201 self.origroot = path
201 self.origroot = path
202 self.auditor = pathutil.pathauditor(self.root, self._checknested)
202 self.auditor = pathutil.pathauditor(self.root, self._checknested)
203 self.vfs = scmutil.vfs(self.path)
203 self.vfs = scmutil.vfs(self.path)
204 self.opener = self.vfs
204 self.opener = self.vfs
205 self.baseui = baseui
205 self.baseui = baseui
206 self.ui = baseui.copy()
206 self.ui = baseui.copy()
207 self.ui.copy = baseui.copy # prevent copying repo configuration
207 self.ui.copy = baseui.copy # prevent copying repo configuration
208 # A list of callback to shape the phase if no data were found.
208 # A list of callback to shape the phase if no data were found.
209 # Callback are in the form: func(repo, roots) --> processed root.
209 # Callback are in the form: func(repo, roots) --> processed root.
210 # This list it to be filled by extension during repo setup
210 # This list it to be filled by extension during repo setup
211 self._phasedefaults = []
211 self._phasedefaults = []
212 try:
212 try:
213 self.ui.readconfig(self.join("hgrc"), self.root)
213 self.ui.readconfig(self.join("hgrc"), self.root)
214 extensions.loadall(self.ui)
214 extensions.loadall(self.ui)
215 except IOError:
215 except IOError:
216 pass
216 pass
217
217
218 if self.featuresetupfuncs:
218 if self.featuresetupfuncs:
219 self.supported = set(self._basesupported) # use private copy
219 self.supported = set(self._basesupported) # use private copy
220 extmods = set(m.__name__ for n, m
220 extmods = set(m.__name__ for n, m
221 in extensions.extensions(self.ui))
221 in extensions.extensions(self.ui))
222 for setupfunc in self.featuresetupfuncs:
222 for setupfunc in self.featuresetupfuncs:
223 if setupfunc.__module__ in extmods:
223 if setupfunc.__module__ in extmods:
224 setupfunc(self.ui, self.supported)
224 setupfunc(self.ui, self.supported)
225 else:
225 else:
226 self.supported = self._basesupported
226 self.supported = self._basesupported
227
227
228 if not self.vfs.isdir():
228 if not self.vfs.isdir():
229 if create:
229 if create:
230 if not self.wvfs.exists():
230 if not self.wvfs.exists():
231 self.wvfs.makedirs()
231 self.wvfs.makedirs()
232 self.vfs.makedir(notindexed=True)
232 self.vfs.makedir(notindexed=True)
233 requirements = self._baserequirements(create)
233 requirements = self._baserequirements(create)
234 if self.ui.configbool('format', 'usestore', True):
234 if self.ui.configbool('format', 'usestore', True):
235 self.vfs.mkdir("store")
235 self.vfs.mkdir("store")
236 requirements.append("store")
236 requirements.append("store")
237 if self.ui.configbool('format', 'usefncache', True):
237 if self.ui.configbool('format', 'usefncache', True):
238 requirements.append("fncache")
238 requirements.append("fncache")
239 if self.ui.configbool('format', 'dotencode', True):
239 if self.ui.configbool('format', 'dotencode', True):
240 requirements.append('dotencode')
240 requirements.append('dotencode')
241 # create an invalid changelog
241 # create an invalid changelog
242 self.vfs.append(
242 self.vfs.append(
243 "00changelog.i",
243 "00changelog.i",
244 '\0\0\0\2' # represents revlogv2
244 '\0\0\0\2' # represents revlogv2
245 ' dummy changelog to prevent using the old repo layout'
245 ' dummy changelog to prevent using the old repo layout'
246 )
246 )
247 if self.ui.configbool('format', 'generaldelta', False):
247 if self.ui.configbool('format', 'generaldelta', False):
248 requirements.append("generaldelta")
248 requirements.append("generaldelta")
249 requirements = set(requirements)
249 requirements = set(requirements)
250 else:
250 else:
251 raise error.RepoError(_("repository %s not found") % path)
251 raise error.RepoError(_("repository %s not found") % path)
252 elif create:
252 elif create:
253 raise error.RepoError(_("repository %s already exists") % path)
253 raise error.RepoError(_("repository %s already exists") % path)
254 else:
254 else:
255 try:
255 try:
256 requirements = scmutil.readrequires(self.vfs, self.supported)
256 requirements = scmutil.readrequires(self.vfs, self.supported)
257 except IOError, inst:
257 except IOError, inst:
258 if inst.errno != errno.ENOENT:
258 if inst.errno != errno.ENOENT:
259 raise
259 raise
260 requirements = set()
260 requirements = set()
261
261
262 self.sharedpath = self.path
262 self.sharedpath = self.path
263 try:
263 try:
264 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
264 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
265 realpath=True)
265 realpath=True)
266 s = vfs.base
266 s = vfs.base
267 if not vfs.exists():
267 if not vfs.exists():
268 raise error.RepoError(
268 raise error.RepoError(
269 _('.hg/sharedpath points to nonexistent directory %s') % s)
269 _('.hg/sharedpath points to nonexistent directory %s') % s)
270 self.sharedpath = s
270 self.sharedpath = s
271 except IOError, inst:
271 except IOError, inst:
272 if inst.errno != errno.ENOENT:
272 if inst.errno != errno.ENOENT:
273 raise
273 raise
274
274
275 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
275 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
276 self.spath = self.store.path
276 self.spath = self.store.path
277 self.svfs = self.store.vfs
277 self.svfs = self.store.vfs
278 self.sopener = self.svfs
278 self.sopener = self.svfs
279 self.sjoin = self.store.join
279 self.sjoin = self.store.join
280 self.vfs.createmode = self.store.createmode
280 self.vfs.createmode = self.store.createmode
281 self._applyrequirements(requirements)
281 self._applyrequirements(requirements)
282 if create:
282 if create:
283 self._writerequirements()
283 self._writerequirements()
284
284
285
285
286 self._branchcaches = {}
286 self._branchcaches = {}
287 self.filterpats = {}
287 self.filterpats = {}
288 self._datafilters = {}
288 self._datafilters = {}
289 self._transref = self._lockref = self._wlockref = None
289 self._transref = self._lockref = self._wlockref = None
290
290
291 # A cache for various files under .hg/ that tracks file changes,
291 # A cache for various files under .hg/ that tracks file changes,
292 # (used by the filecache decorator)
292 # (used by the filecache decorator)
293 #
293 #
294 # Maps a property name to its util.filecacheentry
294 # Maps a property name to its util.filecacheentry
295 self._filecache = {}
295 self._filecache = {}
296
296
297 # hold sets of revision to be filtered
297 # hold sets of revision to be filtered
298 # should be cleared when something might have changed the filter value:
298 # should be cleared when something might have changed the filter value:
299 # - new changesets,
299 # - new changesets,
300 # - phase change,
300 # - phase change,
301 # - new obsolescence marker,
301 # - new obsolescence marker,
302 # - working directory parent change,
302 # - working directory parent change,
303 # - bookmark changes
303 # - bookmark changes
304 self.filteredrevcache = {}
304 self.filteredrevcache = {}
305
305
306 def close(self):
306 def close(self):
307 pass
307 pass
308
308
309 def _restrictcapabilities(self, caps):
309 def _restrictcapabilities(self, caps):
310 # bundle2 is not ready for prime time, drop it unless explicitly
310 # bundle2 is not ready for prime time, drop it unless explicitly
311 # required by the tests (or some brave tester)
311 # required by the tests (or some brave tester)
312 if self.ui.configbool('experimental', 'bundle2-exp', False):
312 if self.ui.configbool('experimental', 'bundle2-exp', False):
313 caps = set(caps)
313 caps = set(caps)
314 capsblob = bundle2.encodecaps(self.bundle2caps)
314 capsblob = bundle2.encodecaps(self.bundle2caps)
315 caps.add('bundle2-exp=' + urllib.quote(capsblob))
315 caps.add('bundle2-exp=' + urllib.quote(capsblob))
316 return caps
316 return caps
317
317
318 def _applyrequirements(self, requirements):
318 def _applyrequirements(self, requirements):
319 self.requirements = requirements
319 self.requirements = requirements
320 self.sopener.options = dict((r, 1) for r in requirements
320 self.sopener.options = dict((r, 1) for r in requirements
321 if r in self.openerreqs)
321 if r in self.openerreqs)
322 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
322 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
323 if chunkcachesize is not None:
323 if chunkcachesize is not None:
324 self.sopener.options['chunkcachesize'] = chunkcachesize
324 self.sopener.options['chunkcachesize'] = chunkcachesize
325
325
326 def _writerequirements(self):
326 def _writerequirements(self):
327 reqfile = self.opener("requires", "w")
327 reqfile = self.opener("requires", "w")
328 for r in sorted(self.requirements):
328 for r in sorted(self.requirements):
329 reqfile.write("%s\n" % r)
329 reqfile.write("%s\n" % r)
330 reqfile.close()
330 reqfile.close()
331
331
332 def _checknested(self, path):
332 def _checknested(self, path):
333 """Determine if path is a legal nested repository."""
333 """Determine if path is a legal nested repository."""
334 if not path.startswith(self.root):
334 if not path.startswith(self.root):
335 return False
335 return False
336 subpath = path[len(self.root) + 1:]
336 subpath = path[len(self.root) + 1:]
337 normsubpath = util.pconvert(subpath)
337 normsubpath = util.pconvert(subpath)
338
338
339 # XXX: Checking against the current working copy is wrong in
339 # XXX: Checking against the current working copy is wrong in
340 # the sense that it can reject things like
340 # the sense that it can reject things like
341 #
341 #
342 # $ hg cat -r 10 sub/x.txt
342 # $ hg cat -r 10 sub/x.txt
343 #
343 #
344 # if sub/ is no longer a subrepository in the working copy
344 # if sub/ is no longer a subrepository in the working copy
345 # parent revision.
345 # parent revision.
346 #
346 #
347 # However, it can of course also allow things that would have
347 # However, it can of course also allow things that would have
348 # been rejected before, such as the above cat command if sub/
348 # been rejected before, such as the above cat command if sub/
349 # is a subrepository now, but was a normal directory before.
349 # is a subrepository now, but was a normal directory before.
350 # The old path auditor would have rejected by mistake since it
350 # The old path auditor would have rejected by mistake since it
351 # panics when it sees sub/.hg/.
351 # panics when it sees sub/.hg/.
352 #
352 #
353 # All in all, checking against the working copy seems sensible
353 # All in all, checking against the working copy seems sensible
354 # since we want to prevent access to nested repositories on
354 # since we want to prevent access to nested repositories on
355 # the filesystem *now*.
355 # the filesystem *now*.
356 ctx = self[None]
356 ctx = self[None]
357 parts = util.splitpath(subpath)
357 parts = util.splitpath(subpath)
358 while parts:
358 while parts:
359 prefix = '/'.join(parts)
359 prefix = '/'.join(parts)
360 if prefix in ctx.substate:
360 if prefix in ctx.substate:
361 if prefix == normsubpath:
361 if prefix == normsubpath:
362 return True
362 return True
363 else:
363 else:
364 sub = ctx.sub(prefix)
364 sub = ctx.sub(prefix)
365 return sub.checknested(subpath[len(prefix) + 1:])
365 return sub.checknested(subpath[len(prefix) + 1:])
366 else:
366 else:
367 parts.pop()
367 parts.pop()
368 return False
368 return False
369
369
370 def peer(self):
370 def peer(self):
371 return localpeer(self) # not cached to avoid reference cycle
371 return localpeer(self) # not cached to avoid reference cycle
372
372
373 def unfiltered(self):
373 def unfiltered(self):
374 """Return unfiltered version of the repository
374 """Return unfiltered version of the repository
375
375
376 Intended to be overwritten by filtered repo."""
376 Intended to be overwritten by filtered repo."""
377 return self
377 return self
378
378
379 def filtered(self, name):
379 def filtered(self, name):
380 """Return a filtered version of a repository"""
380 """Return a filtered version of a repository"""
381 # build a new class with the mixin and the current class
381 # build a new class with the mixin and the current class
382 # (possibly subclass of the repo)
382 # (possibly subclass of the repo)
383 class proxycls(repoview.repoview, self.unfiltered().__class__):
383 class proxycls(repoview.repoview, self.unfiltered().__class__):
384 pass
384 pass
385 return proxycls(self, name)
385 return proxycls(self, name)
386
386
387 @repofilecache('bookmarks')
387 @repofilecache('bookmarks')
388 def _bookmarks(self):
388 def _bookmarks(self):
389 return bookmarks.bmstore(self)
389 return bookmarks.bmstore(self)
390
390
391 @repofilecache('bookmarks.current')
391 @repofilecache('bookmarks.current')
392 def _bookmarkcurrent(self):
392 def _bookmarkcurrent(self):
393 return bookmarks.readcurrent(self)
393 return bookmarks.readcurrent(self)
394
394
395 def bookmarkheads(self, bookmark):
395 def bookmarkheads(self, bookmark):
396 name = bookmark.split('@', 1)[0]
396 name = bookmark.split('@', 1)[0]
397 heads = []
397 heads = []
398 for mark, n in self._bookmarks.iteritems():
398 for mark, n in self._bookmarks.iteritems():
399 if mark.split('@', 1)[0] == name:
399 if mark.split('@', 1)[0] == name:
400 heads.append(n)
400 heads.append(n)
401 return heads
401 return heads
402
402
403 @storecache('phaseroots')
403 @storecache('phaseroots')
404 def _phasecache(self):
404 def _phasecache(self):
405 return phases.phasecache(self, self._phasedefaults)
405 return phases.phasecache(self, self._phasedefaults)
406
406
407 @storecache('obsstore')
407 @storecache('obsstore')
408 def obsstore(self):
408 def obsstore(self):
409 store = obsolete.obsstore(self.sopener)
409 store = obsolete.obsstore(self.sopener)
410 if store and not obsolete._enabled:
410 if store and not obsolete._enabled:
411 # message is rare enough to not be translated
411 # message is rare enough to not be translated
412 msg = 'obsolete feature not enabled but %i markers found!\n'
412 msg = 'obsolete feature not enabled but %i markers found!\n'
413 self.ui.warn(msg % len(list(store)))
413 self.ui.warn(msg % len(list(store)))
414 return store
414 return store
415
415
416 @storecache('00changelog.i')
416 @storecache('00changelog.i')
417 def changelog(self):
417 def changelog(self):
418 c = changelog.changelog(self.sopener)
418 c = changelog.changelog(self.sopener)
419 if 'HG_PENDING' in os.environ:
419 if 'HG_PENDING' in os.environ:
420 p = os.environ['HG_PENDING']
420 p = os.environ['HG_PENDING']
421 if p.startswith(self.root):
421 if p.startswith(self.root):
422 c.readpending('00changelog.i.a')
422 c.readpending('00changelog.i.a')
423 return c
423 return c
424
424
425 @storecache('00manifest.i')
425 @storecache('00manifest.i')
426 def manifest(self):
426 def manifest(self):
427 return manifest.manifest(self.sopener)
427 return manifest.manifest(self.sopener)
428
428
429 @repofilecache('dirstate')
429 @repofilecache('dirstate')
430 def dirstate(self):
430 def dirstate(self):
431 warned = [0]
431 warned = [0]
432 def validate(node):
432 def validate(node):
433 try:
433 try:
434 self.changelog.rev(node)
434 self.changelog.rev(node)
435 return node
435 return node
436 except error.LookupError:
436 except error.LookupError:
437 if not warned[0]:
437 if not warned[0]:
438 warned[0] = True
438 warned[0] = True
439 self.ui.warn(_("warning: ignoring unknown"
439 self.ui.warn(_("warning: ignoring unknown"
440 " working parent %s!\n") % short(node))
440 " working parent %s!\n") % short(node))
441 return nullid
441 return nullid
442
442
443 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
443 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
444
444
445 def __getitem__(self, changeid):
445 def __getitem__(self, changeid):
446 if changeid is None:
446 if changeid is None:
447 return context.workingctx(self)
447 return context.workingctx(self)
448 return context.changectx(self, changeid)
448 return context.changectx(self, changeid)
449
449
450 def __contains__(self, changeid):
450 def __contains__(self, changeid):
451 try:
451 try:
452 return bool(self.lookup(changeid))
452 return bool(self.lookup(changeid))
453 except error.RepoLookupError:
453 except error.RepoLookupError:
454 return False
454 return False
455
455
456 def __nonzero__(self):
456 def __nonzero__(self):
457 return True
457 return True
458
458
459 def __len__(self):
459 def __len__(self):
460 return len(self.changelog)
460 return len(self.changelog)
461
461
462 def __iter__(self):
462 def __iter__(self):
463 return iter(self.changelog)
463 return iter(self.changelog)
464
464
465 def revs(self, expr, *args):
465 def revs(self, expr, *args):
466 '''Return a list of revisions matching the given revset'''
466 '''Return a list of revisions matching the given revset'''
467 expr = revset.formatspec(expr, *args)
467 expr = revset.formatspec(expr, *args)
468 m = revset.match(None, expr)
468 m = revset.match(None, expr)
469 return m(self, revset.spanset(self))
469 return m(self, revset.spanset(self))
470
470
471 def set(self, expr, *args):
471 def set(self, expr, *args):
472 '''
472 '''
473 Yield a context for each matching revision, after doing arg
473 Yield a context for each matching revision, after doing arg
474 replacement via revset.formatspec
474 replacement via revset.formatspec
475 '''
475 '''
476 for r in self.revs(expr, *args):
476 for r in self.revs(expr, *args):
477 yield self[r]
477 yield self[r]
478
478
479 def url(self):
479 def url(self):
480 return 'file:' + self.root
480 return 'file:' + self.root
481
481
482 def hook(self, name, throw=False, **args):
482 def hook(self, name, throw=False, **args):
483 """Call a hook, passing this repo instance.
483 """Call a hook, passing this repo instance.
484
484
485 This a convenience method to aid invoking hooks. Extensions likely
485 This a convenience method to aid invoking hooks. Extensions likely
486 won't call this unless they have registered a custom hook or are
486 won't call this unless they have registered a custom hook or are
487 replacing code that is expected to call a hook.
487 replacing code that is expected to call a hook.
488 """
488 """
489 return hook.hook(self.ui, self, name, throw, **args)
489 return hook.hook(self.ui, self, name, throw, **args)
490
490
491 @unfilteredmethod
491 @unfilteredmethod
492 def _tag(self, names, node, message, local, user, date, extra={},
492 def _tag(self, names, node, message, local, user, date, extra={},
493 editor=False):
493 editor=False):
494 if isinstance(names, str):
494 if isinstance(names, str):
495 names = (names,)
495 names = (names,)
496
496
497 branches = self.branchmap()
497 branches = self.branchmap()
498 for name in names:
498 for name in names:
499 self.hook('pretag', throw=True, node=hex(node), tag=name,
499 self.hook('pretag', throw=True, node=hex(node), tag=name,
500 local=local)
500 local=local)
501 if name in branches:
501 if name in branches:
502 self.ui.warn(_("warning: tag %s conflicts with existing"
502 self.ui.warn(_("warning: tag %s conflicts with existing"
503 " branch name\n") % name)
503 " branch name\n") % name)
504
504
505 def writetags(fp, names, munge, prevtags):
505 def writetags(fp, names, munge, prevtags):
506 fp.seek(0, 2)
506 fp.seek(0, 2)
507 if prevtags and prevtags[-1] != '\n':
507 if prevtags and prevtags[-1] != '\n':
508 fp.write('\n')
508 fp.write('\n')
509 for name in names:
509 for name in names:
510 m = munge and munge(name) or name
510 m = munge and munge(name) or name
511 if (self._tagscache.tagtypes and
511 if (self._tagscache.tagtypes and
512 name in self._tagscache.tagtypes):
512 name in self._tagscache.tagtypes):
513 old = self.tags().get(name, nullid)
513 old = self.tags().get(name, nullid)
514 fp.write('%s %s\n' % (hex(old), m))
514 fp.write('%s %s\n' % (hex(old), m))
515 fp.write('%s %s\n' % (hex(node), m))
515 fp.write('%s %s\n' % (hex(node), m))
516 fp.close()
516 fp.close()
517
517
518 prevtags = ''
518 prevtags = ''
519 if local:
519 if local:
520 try:
520 try:
521 fp = self.opener('localtags', 'r+')
521 fp = self.opener('localtags', 'r+')
522 except IOError:
522 except IOError:
523 fp = self.opener('localtags', 'a')
523 fp = self.opener('localtags', 'a')
524 else:
524 else:
525 prevtags = fp.read()
525 prevtags = fp.read()
526
526
527 # local tags are stored in the current charset
527 # local tags are stored in the current charset
528 writetags(fp, names, None, prevtags)
528 writetags(fp, names, None, prevtags)
529 for name in names:
529 for name in names:
530 self.hook('tag', node=hex(node), tag=name, local=local)
530 self.hook('tag', node=hex(node), tag=name, local=local)
531 return
531 return
532
532
533 try:
533 try:
534 fp = self.wfile('.hgtags', 'rb+')
534 fp = self.wfile('.hgtags', 'rb+')
535 except IOError, e:
535 except IOError, e:
536 if e.errno != errno.ENOENT:
536 if e.errno != errno.ENOENT:
537 raise
537 raise
538 fp = self.wfile('.hgtags', 'ab')
538 fp = self.wfile('.hgtags', 'ab')
539 else:
539 else:
540 prevtags = fp.read()
540 prevtags = fp.read()
541
541
542 # committed tags are stored in UTF-8
542 # committed tags are stored in UTF-8
543 writetags(fp, names, encoding.fromlocal, prevtags)
543 writetags(fp, names, encoding.fromlocal, prevtags)
544
544
545 fp.close()
545 fp.close()
546
546
547 self.invalidatecaches()
547 self.invalidatecaches()
548
548
549 if '.hgtags' not in self.dirstate:
549 if '.hgtags' not in self.dirstate:
550 self[None].add(['.hgtags'])
550 self[None].add(['.hgtags'])
551
551
552 m = matchmod.exact(self.root, '', ['.hgtags'])
552 m = matchmod.exact(self.root, '', ['.hgtags'])
553 tagnode = self.commit(message, user, date, extra=extra, match=m,
553 tagnode = self.commit(message, user, date, extra=extra, match=m,
554 editor=editor)
554 editor=editor)
555
555
556 for name in names:
556 for name in names:
557 self.hook('tag', node=hex(node), tag=name, local=local)
557 self.hook('tag', node=hex(node), tag=name, local=local)
558
558
559 return tagnode
559 return tagnode
560
560
561 def tag(self, names, node, message, local, user, date, editor=False):
561 def tag(self, names, node, message, local, user, date, editor=False):
562 '''tag a revision with one or more symbolic names.
562 '''tag a revision with one or more symbolic names.
563
563
564 names is a list of strings or, when adding a single tag, names may be a
564 names is a list of strings or, when adding a single tag, names may be a
565 string.
565 string.
566
566
567 if local is True, the tags are stored in a per-repository file.
567 if local is True, the tags are stored in a per-repository file.
568 otherwise, they are stored in the .hgtags file, and a new
568 otherwise, they are stored in the .hgtags file, and a new
569 changeset is committed with the change.
569 changeset is committed with the change.
570
570
571 keyword arguments:
571 keyword arguments:
572
572
573 local: whether to store tags in non-version-controlled file
573 local: whether to store tags in non-version-controlled file
574 (default False)
574 (default False)
575
575
576 message: commit message to use if committing
576 message: commit message to use if committing
577
577
578 user: name of user to use if committing
578 user: name of user to use if committing
579
579
580 date: date tuple to use if committing'''
580 date: date tuple to use if committing'''
581
581
582 if not local:
582 if not local:
583 for x in self.status()[:5]:
583 for x in self.status()[:5]:
584 if '.hgtags' in x:
584 if '.hgtags' in x:
585 raise util.Abort(_('working copy of .hgtags is changed '
585 raise util.Abort(_('working copy of .hgtags is changed '
586 '(please commit .hgtags manually)'))
586 '(please commit .hgtags manually)'))
587
587
588 self.tags() # instantiate the cache
588 self.tags() # instantiate the cache
589 self._tag(names, node, message, local, user, date, editor=editor)
589 self._tag(names, node, message, local, user, date, editor=editor)
590
590
591 @filteredpropertycache
591 @filteredpropertycache
592 def _tagscache(self):
592 def _tagscache(self):
593 '''Returns a tagscache object that contains various tags related
593 '''Returns a tagscache object that contains various tags related
594 caches.'''
594 caches.'''
595
595
596 # This simplifies its cache management by having one decorated
596 # This simplifies its cache management by having one decorated
597 # function (this one) and the rest simply fetch things from it.
597 # function (this one) and the rest simply fetch things from it.
598 class tagscache(object):
598 class tagscache(object):
599 def __init__(self):
599 def __init__(self):
600 # These two define the set of tags for this repository. tags
600 # These two define the set of tags for this repository. tags
601 # maps tag name to node; tagtypes maps tag name to 'global' or
601 # maps tag name to node; tagtypes maps tag name to 'global' or
602 # 'local'. (Global tags are defined by .hgtags across all
602 # 'local'. (Global tags are defined by .hgtags across all
603 # heads, and local tags are defined in .hg/localtags.)
603 # heads, and local tags are defined in .hg/localtags.)
604 # They constitute the in-memory cache of tags.
604 # They constitute the in-memory cache of tags.
605 self.tags = self.tagtypes = None
605 self.tags = self.tagtypes = None
606
606
607 self.nodetagscache = self.tagslist = None
607 self.nodetagscache = self.tagslist = None
608
608
609 cache = tagscache()
609 cache = tagscache()
610 cache.tags, cache.tagtypes = self._findtags()
610 cache.tags, cache.tagtypes = self._findtags()
611
611
612 return cache
612 return cache
613
613
614 def tags(self):
614 def tags(self):
615 '''return a mapping of tag to node'''
615 '''return a mapping of tag to node'''
616 t = {}
616 t = {}
617 if self.changelog.filteredrevs:
617 if self.changelog.filteredrevs:
618 tags, tt = self._findtags()
618 tags, tt = self._findtags()
619 else:
619 else:
620 tags = self._tagscache.tags
620 tags = self._tagscache.tags
621 for k, v in tags.iteritems():
621 for k, v in tags.iteritems():
622 try:
622 try:
623 # ignore tags to unknown nodes
623 # ignore tags to unknown nodes
624 self.changelog.rev(v)
624 self.changelog.rev(v)
625 t[k] = v
625 t[k] = v
626 except (error.LookupError, ValueError):
626 except (error.LookupError, ValueError):
627 pass
627 pass
628 return t
628 return t
629
629
630 def _findtags(self):
630 def _findtags(self):
631 '''Do the hard work of finding tags. Return a pair of dicts
631 '''Do the hard work of finding tags. Return a pair of dicts
632 (tags, tagtypes) where tags maps tag name to node, and tagtypes
632 (tags, tagtypes) where tags maps tag name to node, and tagtypes
633 maps tag name to a string like \'global\' or \'local\'.
633 maps tag name to a string like \'global\' or \'local\'.
634 Subclasses or extensions are free to add their own tags, but
634 Subclasses or extensions are free to add their own tags, but
635 should be aware that the returned dicts will be retained for the
635 should be aware that the returned dicts will be retained for the
636 duration of the localrepo object.'''
636 duration of the localrepo object.'''
637
637
638 # XXX what tagtype should subclasses/extensions use? Currently
638 # XXX what tagtype should subclasses/extensions use? Currently
639 # mq and bookmarks add tags, but do not set the tagtype at all.
639 # mq and bookmarks add tags, but do not set the tagtype at all.
640 # Should each extension invent its own tag type? Should there
640 # Should each extension invent its own tag type? Should there
641 # be one tagtype for all such "virtual" tags? Or is the status
641 # be one tagtype for all such "virtual" tags? Or is the status
642 # quo fine?
642 # quo fine?
643
643
644 alltags = {} # map tag name to (node, hist)
644 alltags = {} # map tag name to (node, hist)
645 tagtypes = {}
645 tagtypes = {}
646
646
647 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
647 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
648 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
648 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
649
649
650 # Build the return dicts. Have to re-encode tag names because
650 # Build the return dicts. Have to re-encode tag names because
651 # the tags module always uses UTF-8 (in order not to lose info
651 # the tags module always uses UTF-8 (in order not to lose info
652 # writing to the cache), but the rest of Mercurial wants them in
652 # writing to the cache), but the rest of Mercurial wants them in
653 # local encoding.
653 # local encoding.
654 tags = {}
654 tags = {}
655 for (name, (node, hist)) in alltags.iteritems():
655 for (name, (node, hist)) in alltags.iteritems():
656 if node != nullid:
656 if node != nullid:
657 tags[encoding.tolocal(name)] = node
657 tags[encoding.tolocal(name)] = node
658 tags['tip'] = self.changelog.tip()
658 tags['tip'] = self.changelog.tip()
659 tagtypes = dict([(encoding.tolocal(name), value)
659 tagtypes = dict([(encoding.tolocal(name), value)
660 for (name, value) in tagtypes.iteritems()])
660 for (name, value) in tagtypes.iteritems()])
661 return (tags, tagtypes)
661 return (tags, tagtypes)
662
662
663 def tagtype(self, tagname):
663 def tagtype(self, tagname):
664 '''
664 '''
665 return the type of the given tag. result can be:
665 return the type of the given tag. result can be:
666
666
667 'local' : a local tag
667 'local' : a local tag
668 'global' : a global tag
668 'global' : a global tag
669 None : tag does not exist
669 None : tag does not exist
670 '''
670 '''
671
671
672 return self._tagscache.tagtypes.get(tagname)
672 return self._tagscache.tagtypes.get(tagname)
673
673
674 def tagslist(self):
674 def tagslist(self):
675 '''return a list of tags ordered by revision'''
675 '''return a list of tags ordered by revision'''
676 if not self._tagscache.tagslist:
676 if not self._tagscache.tagslist:
677 l = []
677 l = []
678 for t, n in self.tags().iteritems():
678 for t, n in self.tags().iteritems():
679 r = self.changelog.rev(n)
679 r = self.changelog.rev(n)
680 l.append((r, t, n))
680 l.append((r, t, n))
681 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
681 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
682
682
683 return self._tagscache.tagslist
683 return self._tagscache.tagslist
684
684
685 def nodetags(self, node):
685 def nodetags(self, node):
686 '''return the tags associated with a node'''
686 '''return the tags associated with a node'''
687 if not self._tagscache.nodetagscache:
687 if not self._tagscache.nodetagscache:
688 nodetagscache = {}
688 nodetagscache = {}
689 for t, n in self._tagscache.tags.iteritems():
689 for t, n in self._tagscache.tags.iteritems():
690 nodetagscache.setdefault(n, []).append(t)
690 nodetagscache.setdefault(n, []).append(t)
691 for tags in nodetagscache.itervalues():
691 for tags in nodetagscache.itervalues():
692 tags.sort()
692 tags.sort()
693 self._tagscache.nodetagscache = nodetagscache
693 self._tagscache.nodetagscache = nodetagscache
694 return self._tagscache.nodetagscache.get(node, [])
694 return self._tagscache.nodetagscache.get(node, [])
695
695
696 def nodebookmarks(self, node):
696 def nodebookmarks(self, node):
697 marks = []
697 marks = []
698 for bookmark, n in self._bookmarks.iteritems():
698 for bookmark, n in self._bookmarks.iteritems():
699 if n == node:
699 if n == node:
700 marks.append(bookmark)
700 marks.append(bookmark)
701 return sorted(marks)
701 return sorted(marks)
702
702
703 def branchmap(self):
703 def branchmap(self):
704 '''returns a dictionary {branch: [branchheads]} with branchheads
704 '''returns a dictionary {branch: [branchheads]} with branchheads
705 ordered by increasing revision number'''
705 ordered by increasing revision number'''
706 branchmap.updatecache(self)
706 branchmap.updatecache(self)
707 return self._branchcaches[self.filtername]
707 return self._branchcaches[self.filtername]
708
708
709 def branchtip(self, branch):
709 def branchtip(self, branch):
710 '''return the tip node for a given branch'''
710 '''return the tip node for a given branch'''
711 try:
711 try:
712 return self.branchmap().branchtip(branch)
712 return self.branchmap().branchtip(branch)
713 except KeyError:
713 except KeyError:
714 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
714 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
715
715
716 def lookup(self, key):
716 def lookup(self, key):
717 return self[key].node()
717 return self[key].node()
718
718
719 def lookupbranch(self, key, remote=None):
719 def lookupbranch(self, key, remote=None):
720 repo = remote or self
720 repo = remote or self
721 if key in repo.branchmap():
721 if key in repo.branchmap():
722 return key
722 return key
723
723
724 repo = (remote and remote.local()) and remote or self
724 repo = (remote and remote.local()) and remote or self
725 return repo[key].branch()
725 return repo[key].branch()
726
726
727 def known(self, nodes):
727 def known(self, nodes):
728 nm = self.changelog.nodemap
728 nm = self.changelog.nodemap
729 pc = self._phasecache
729 pc = self._phasecache
730 result = []
730 result = []
731 for n in nodes:
731 for n in nodes:
732 r = nm.get(n)
732 r = nm.get(n)
733 resp = not (r is None or pc.phase(self, r) >= phases.secret)
733 resp = not (r is None or pc.phase(self, r) >= phases.secret)
734 result.append(resp)
734 result.append(resp)
735 return result
735 return result
736
736
737 def local(self):
737 def local(self):
738 return self
738 return self
739
739
740 def cancopy(self):
740 def cancopy(self):
741 # so statichttprepo's override of local() works
741 # so statichttprepo's override of local() works
742 if not self.local():
742 if not self.local():
743 return False
743 return False
744 if not self.ui.configbool('phases', 'publish', True):
744 if not self.ui.configbool('phases', 'publish', True):
745 return True
745 return True
746 # if publishing we can't copy if there is filtered content
746 # if publishing we can't copy if there is filtered content
747 return not self.filtered('visible').changelog.filteredrevs
747 return not self.filtered('visible').changelog.filteredrevs
748
748
749 def join(self, f):
749 def join(self, f):
750 return os.path.join(self.path, f)
750 return os.path.join(self.path, f)
751
751
752 def wjoin(self, f):
752 def wjoin(self, f):
753 return os.path.join(self.root, f)
753 return os.path.join(self.root, f)
754
754
755 def file(self, f):
755 def file(self, f):
756 if f[0] == '/':
756 if f[0] == '/':
757 f = f[1:]
757 f = f[1:]
758 return filelog.filelog(self.sopener, f)
758 return filelog.filelog(self.sopener, f)
759
759
760 def changectx(self, changeid):
760 def changectx(self, changeid):
761 return self[changeid]
761 return self[changeid]
762
762
763 def parents(self, changeid=None):
763 def parents(self, changeid=None):
764 '''get list of changectxs for parents of changeid'''
764 '''get list of changectxs for parents of changeid'''
765 return self[changeid].parents()
765 return self[changeid].parents()
766
766
767 def setparents(self, p1, p2=nullid):
767 def setparents(self, p1, p2=nullid):
768 copies = self.dirstate.setparents(p1, p2)
768 copies = self.dirstate.setparents(p1, p2)
769 pctx = self[p1]
769 pctx = self[p1]
770 if copies:
770 if copies:
771 # Adjust copy records, the dirstate cannot do it, it
771 # Adjust copy records, the dirstate cannot do it, it
772 # requires access to parents manifests. Preserve them
772 # requires access to parents manifests. Preserve them
773 # only for entries added to first parent.
773 # only for entries added to first parent.
774 for f in copies:
774 for f in copies:
775 if f not in pctx and copies[f] in pctx:
775 if f not in pctx and copies[f] in pctx:
776 self.dirstate.copy(copies[f], f)
776 self.dirstate.copy(copies[f], f)
777 if p2 == nullid:
777 if p2 == nullid:
778 for f, s in sorted(self.dirstate.copies().items()):
778 for f, s in sorted(self.dirstate.copies().items()):
779 if f not in pctx and s not in pctx:
779 if f not in pctx and s not in pctx:
780 self.dirstate.copy(None, f)
780 self.dirstate.copy(None, f)
781
781
782 def filectx(self, path, changeid=None, fileid=None):
782 def filectx(self, path, changeid=None, fileid=None):
783 """changeid can be a changeset revision, node, or tag.
783 """changeid can be a changeset revision, node, or tag.
784 fileid can be a file revision or node."""
784 fileid can be a file revision or node."""
785 return context.filectx(self, path, changeid, fileid)
785 return context.filectx(self, path, changeid, fileid)
786
786
787 def getcwd(self):
787 def getcwd(self):
788 return self.dirstate.getcwd()
788 return self.dirstate.getcwd()
789
789
790 def pathto(self, f, cwd=None):
790 def pathto(self, f, cwd=None):
791 return self.dirstate.pathto(f, cwd)
791 return self.dirstate.pathto(f, cwd)
792
792
793 def wfile(self, f, mode='r'):
793 def wfile(self, f, mode='r'):
794 return self.wopener(f, mode)
794 return self.wopener(f, mode)
795
795
796 def _link(self, f):
796 def _link(self, f):
797 return self.wvfs.islink(f)
797 return self.wvfs.islink(f)
798
798
799 def _loadfilter(self, filter):
799 def _loadfilter(self, filter):
800 if filter not in self.filterpats:
800 if filter not in self.filterpats:
801 l = []
801 l = []
802 for pat, cmd in self.ui.configitems(filter):
802 for pat, cmd in self.ui.configitems(filter):
803 if cmd == '!':
803 if cmd == '!':
804 continue
804 continue
805 mf = matchmod.match(self.root, '', [pat])
805 mf = matchmod.match(self.root, '', [pat])
806 fn = None
806 fn = None
807 params = cmd
807 params = cmd
808 for name, filterfn in self._datafilters.iteritems():
808 for name, filterfn in self._datafilters.iteritems():
809 if cmd.startswith(name):
809 if cmd.startswith(name):
810 fn = filterfn
810 fn = filterfn
811 params = cmd[len(name):].lstrip()
811 params = cmd[len(name):].lstrip()
812 break
812 break
813 if not fn:
813 if not fn:
814 fn = lambda s, c, **kwargs: util.filter(s, c)
814 fn = lambda s, c, **kwargs: util.filter(s, c)
815 # Wrap old filters not supporting keyword arguments
815 # Wrap old filters not supporting keyword arguments
816 if not inspect.getargspec(fn)[2]:
816 if not inspect.getargspec(fn)[2]:
817 oldfn = fn
817 oldfn = fn
818 fn = lambda s, c, **kwargs: oldfn(s, c)
818 fn = lambda s, c, **kwargs: oldfn(s, c)
819 l.append((mf, fn, params))
819 l.append((mf, fn, params))
820 self.filterpats[filter] = l
820 self.filterpats[filter] = l
821 return self.filterpats[filter]
821 return self.filterpats[filter]
822
822
823 def _filter(self, filterpats, filename, data):
823 def _filter(self, filterpats, filename, data):
824 for mf, fn, cmd in filterpats:
824 for mf, fn, cmd in filterpats:
825 if mf(filename):
825 if mf(filename):
826 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
826 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
827 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
827 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
828 break
828 break
829
829
830 return data
830 return data
831
831
832 @unfilteredpropertycache
832 @unfilteredpropertycache
833 def _encodefilterpats(self):
833 def _encodefilterpats(self):
834 return self._loadfilter('encode')
834 return self._loadfilter('encode')
835
835
836 @unfilteredpropertycache
836 @unfilteredpropertycache
837 def _decodefilterpats(self):
837 def _decodefilterpats(self):
838 return self._loadfilter('decode')
838 return self._loadfilter('decode')
839
839
840 def adddatafilter(self, name, filter):
840 def adddatafilter(self, name, filter):
841 self._datafilters[name] = filter
841 self._datafilters[name] = filter
842
842
843 def wread(self, filename):
843 def wread(self, filename):
844 if self._link(filename):
844 if self._link(filename):
845 data = self.wvfs.readlink(filename)
845 data = self.wvfs.readlink(filename)
846 else:
846 else:
847 data = self.wopener.read(filename)
847 data = self.wopener.read(filename)
848 return self._filter(self._encodefilterpats, filename, data)
848 return self._filter(self._encodefilterpats, filename, data)
849
849
850 def wwrite(self, filename, data, flags):
850 def wwrite(self, filename, data, flags):
851 data = self._filter(self._decodefilterpats, filename, data)
851 data = self._filter(self._decodefilterpats, filename, data)
852 if 'l' in flags:
852 if 'l' in flags:
853 self.wopener.symlink(data, filename)
853 self.wopener.symlink(data, filename)
854 else:
854 else:
855 self.wopener.write(filename, data)
855 self.wopener.write(filename, data)
856 if 'x' in flags:
856 if 'x' in flags:
857 self.wvfs.setflags(filename, False, True)
857 self.wvfs.setflags(filename, False, True)
858
858
859 def wwritedata(self, filename, data):
859 def wwritedata(self, filename, data):
860 return self._filter(self._decodefilterpats, filename, data)
860 return self._filter(self._decodefilterpats, filename, data)
861
861
862 def transaction(self, desc, report=None):
862 def transaction(self, desc, report=None):
863 tr = self._transref and self._transref() or None
863 tr = self._transref and self._transref() or None
864 if tr and tr.running():
864 if tr and tr.running():
865 return tr.nest()
865 return tr.nest()
866
866
867 # abort here if the journal already exists
867 # abort here if the journal already exists
868 if self.svfs.exists("journal"):
868 if self.svfs.exists("journal"):
869 raise error.RepoError(
869 raise error.RepoError(
870 _("abandoned transaction found"),
870 _("abandoned transaction found"),
871 hint=_("run 'hg recover' to clean up transaction"))
871 hint=_("run 'hg recover' to clean up transaction"))
872
872
873 def onclose():
873 def onclose():
874 self.store.write(self._transref())
874 self.store.write(self._transref())
875
875
876 self._writejournal(desc)
876 self._writejournal(desc)
877 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
877 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
878 rp = report and report or self.ui.warn
878 rp = report and report or self.ui.warn
879 tr = transaction.transaction(rp, self.sopener,
879 tr = transaction.transaction(rp, self.sopener,
880 "journal",
880 "journal",
881 aftertrans(renames),
881 aftertrans(renames),
882 self.store.createmode,
882 self.store.createmode,
883 onclose)
883 onclose)
884 self._transref = weakref.ref(tr)
884 self._transref = weakref.ref(tr)
885 return tr
885 return tr
886
886
887 def _journalfiles(self):
887 def _journalfiles(self):
888 return ((self.svfs, 'journal'),
888 return ((self.svfs, 'journal'),
889 (self.vfs, 'journal.dirstate'),
889 (self.vfs, 'journal.dirstate'),
890 (self.vfs, 'journal.branch'),
890 (self.vfs, 'journal.branch'),
891 (self.vfs, 'journal.desc'),
891 (self.vfs, 'journal.desc'),
892 (self.vfs, 'journal.bookmarks'),
892 (self.vfs, 'journal.bookmarks'),
893 (self.svfs, 'journal.phaseroots'))
893 (self.svfs, 'journal.phaseroots'))
894
894
895 def undofiles(self):
895 def undofiles(self):
896 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
896 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
897
897
898 def _writejournal(self, desc):
898 def _writejournal(self, desc):
899 self.opener.write("journal.dirstate",
899 self.opener.write("journal.dirstate",
900 self.opener.tryread("dirstate"))
900 self.opener.tryread("dirstate"))
901 self.opener.write("journal.branch",
901 self.opener.write("journal.branch",
902 encoding.fromlocal(self.dirstate.branch()))
902 encoding.fromlocal(self.dirstate.branch()))
903 self.opener.write("journal.desc",
903 self.opener.write("journal.desc",
904 "%d\n%s\n" % (len(self), desc))
904 "%d\n%s\n" % (len(self), desc))
905 self.opener.write("journal.bookmarks",
905 self.opener.write("journal.bookmarks",
906 self.opener.tryread("bookmarks"))
906 self.opener.tryread("bookmarks"))
907 self.sopener.write("journal.phaseroots",
907 self.sopener.write("journal.phaseroots",
908 self.sopener.tryread("phaseroots"))
908 self.sopener.tryread("phaseroots"))
909
909
910 def recover(self):
910 def recover(self):
911 lock = self.lock()
911 lock = self.lock()
912 try:
912 try:
913 if self.svfs.exists("journal"):
913 if self.svfs.exists("journal"):
914 self.ui.status(_("rolling back interrupted transaction\n"))
914 self.ui.status(_("rolling back interrupted transaction\n"))
915 transaction.rollback(self.sopener, "journal",
915 transaction.rollback(self.sopener, "journal",
916 self.ui.warn)
916 self.ui.warn)
917 self.invalidate()
917 self.invalidate()
918 return True
918 return True
919 else:
919 else:
920 self.ui.warn(_("no interrupted transaction available\n"))
920 self.ui.warn(_("no interrupted transaction available\n"))
921 return False
921 return False
922 finally:
922 finally:
923 lock.release()
923 lock.release()
924
924
925 def rollback(self, dryrun=False, force=False):
925 def rollback(self, dryrun=False, force=False):
926 wlock = lock = None
926 wlock = lock = None
927 try:
927 try:
928 wlock = self.wlock()
928 wlock = self.wlock()
929 lock = self.lock()
929 lock = self.lock()
930 if self.svfs.exists("undo"):
930 if self.svfs.exists("undo"):
931 return self._rollback(dryrun, force)
931 return self._rollback(dryrun, force)
932 else:
932 else:
933 self.ui.warn(_("no rollback information available\n"))
933 self.ui.warn(_("no rollback information available\n"))
934 return 1
934 return 1
935 finally:
935 finally:
936 release(lock, wlock)
936 release(lock, wlock)
937
937
938 @unfilteredmethod # Until we get smarter cache management
938 @unfilteredmethod # Until we get smarter cache management
939 def _rollback(self, dryrun, force):
939 def _rollback(self, dryrun, force):
940 ui = self.ui
940 ui = self.ui
941 try:
941 try:
942 args = self.opener.read('undo.desc').splitlines()
942 args = self.opener.read('undo.desc').splitlines()
943 (oldlen, desc, detail) = (int(args[0]), args[1], None)
943 (oldlen, desc, detail) = (int(args[0]), args[1], None)
944 if len(args) >= 3:
944 if len(args) >= 3:
945 detail = args[2]
945 detail = args[2]
946 oldtip = oldlen - 1
946 oldtip = oldlen - 1
947
947
948 if detail and ui.verbose:
948 if detail and ui.verbose:
949 msg = (_('repository tip rolled back to revision %s'
949 msg = (_('repository tip rolled back to revision %s'
950 ' (undo %s: %s)\n')
950 ' (undo %s: %s)\n')
951 % (oldtip, desc, detail))
951 % (oldtip, desc, detail))
952 else:
952 else:
953 msg = (_('repository tip rolled back to revision %s'
953 msg = (_('repository tip rolled back to revision %s'
954 ' (undo %s)\n')
954 ' (undo %s)\n')
955 % (oldtip, desc))
955 % (oldtip, desc))
956 except IOError:
956 except IOError:
957 msg = _('rolling back unknown transaction\n')
957 msg = _('rolling back unknown transaction\n')
958 desc = None
958 desc = None
959
959
960 if not force and self['.'] != self['tip'] and desc == 'commit':
960 if not force and self['.'] != self['tip'] and desc == 'commit':
961 raise util.Abort(
961 raise util.Abort(
962 _('rollback of last commit while not checked out '
962 _('rollback of last commit while not checked out '
963 'may lose data'), hint=_('use -f to force'))
963 'may lose data'), hint=_('use -f to force'))
964
964
965 ui.status(msg)
965 ui.status(msg)
966 if dryrun:
966 if dryrun:
967 return 0
967 return 0
968
968
969 parents = self.dirstate.parents()
969 parents = self.dirstate.parents()
970 self.destroying()
970 self.destroying()
971 transaction.rollback(self.sopener, 'undo', ui.warn)
971 transaction.rollback(self.sopener, 'undo', ui.warn)
972 if self.vfs.exists('undo.bookmarks'):
972 if self.vfs.exists('undo.bookmarks'):
973 self.vfs.rename('undo.bookmarks', 'bookmarks')
973 self.vfs.rename('undo.bookmarks', 'bookmarks')
974 if self.svfs.exists('undo.phaseroots'):
974 if self.svfs.exists('undo.phaseroots'):
975 self.svfs.rename('undo.phaseroots', 'phaseroots')
975 self.svfs.rename('undo.phaseroots', 'phaseroots')
976 self.invalidate()
976 self.invalidate()
977
977
978 parentgone = (parents[0] not in self.changelog.nodemap or
978 parentgone = (parents[0] not in self.changelog.nodemap or
979 parents[1] not in self.changelog.nodemap)
979 parents[1] not in self.changelog.nodemap)
980 if parentgone:
980 if parentgone:
981 self.vfs.rename('undo.dirstate', 'dirstate')
981 self.vfs.rename('undo.dirstate', 'dirstate')
982 try:
982 try:
983 branch = self.opener.read('undo.branch')
983 branch = self.opener.read('undo.branch')
984 self.dirstate.setbranch(encoding.tolocal(branch))
984 self.dirstate.setbranch(encoding.tolocal(branch))
985 except IOError:
985 except IOError:
986 ui.warn(_('named branch could not be reset: '
986 ui.warn(_('named branch could not be reset: '
987 'current branch is still \'%s\'\n')
987 'current branch is still \'%s\'\n')
988 % self.dirstate.branch())
988 % self.dirstate.branch())
989
989
990 self.dirstate.invalidate()
990 self.dirstate.invalidate()
991 parents = tuple([p.rev() for p in self.parents()])
991 parents = tuple([p.rev() for p in self.parents()])
992 if len(parents) > 1:
992 if len(parents) > 1:
993 ui.status(_('working directory now based on '
993 ui.status(_('working directory now based on '
994 'revisions %d and %d\n') % parents)
994 'revisions %d and %d\n') % parents)
995 else:
995 else:
996 ui.status(_('working directory now based on '
996 ui.status(_('working directory now based on '
997 'revision %d\n') % parents)
997 'revision %d\n') % parents)
998 # TODO: if we know which new heads may result from this rollback, pass
998 # TODO: if we know which new heads may result from this rollback, pass
999 # them to destroy(), which will prevent the branchhead cache from being
999 # them to destroy(), which will prevent the branchhead cache from being
1000 # invalidated.
1000 # invalidated.
1001 self.destroyed()
1001 self.destroyed()
1002 return 0
1002 return 0
1003
1003
1004 def invalidatecaches(self):
1004 def invalidatecaches(self):
1005
1005
1006 if '_tagscache' in vars(self):
1006 if '_tagscache' in vars(self):
1007 # can't use delattr on proxy
1007 # can't use delattr on proxy
1008 del self.__dict__['_tagscache']
1008 del self.__dict__['_tagscache']
1009
1009
1010 self.unfiltered()._branchcaches.clear()
1010 self.unfiltered()._branchcaches.clear()
1011 self.invalidatevolatilesets()
1011 self.invalidatevolatilesets()
1012
1012
1013 def invalidatevolatilesets(self):
1013 def invalidatevolatilesets(self):
1014 self.filteredrevcache.clear()
1014 self.filteredrevcache.clear()
1015 obsolete.clearobscaches(self)
1015 obsolete.clearobscaches(self)
1016
1016
1017 def invalidatedirstate(self):
1017 def invalidatedirstate(self):
1018 '''Invalidates the dirstate, causing the next call to dirstate
1018 '''Invalidates the dirstate, causing the next call to dirstate
1019 to check if it was modified since the last time it was read,
1019 to check if it was modified since the last time it was read,
1020 rereading it if it has.
1020 rereading it if it has.
1021
1021
1022 This is different to dirstate.invalidate() that it doesn't always
1022 This is different to dirstate.invalidate() that it doesn't always
1023 rereads the dirstate. Use dirstate.invalidate() if you want to
1023 rereads the dirstate. Use dirstate.invalidate() if you want to
1024 explicitly read the dirstate again (i.e. restoring it to a previous
1024 explicitly read the dirstate again (i.e. restoring it to a previous
1025 known good state).'''
1025 known good state).'''
1026 if hasunfilteredcache(self, 'dirstate'):
1026 if hasunfilteredcache(self, 'dirstate'):
1027 for k in self.dirstate._filecache:
1027 for k in self.dirstate._filecache:
1028 try:
1028 try:
1029 delattr(self.dirstate, k)
1029 delattr(self.dirstate, k)
1030 except AttributeError:
1030 except AttributeError:
1031 pass
1031 pass
1032 delattr(self.unfiltered(), 'dirstate')
1032 delattr(self.unfiltered(), 'dirstate')
1033
1033
1034 def invalidate(self):
1034 def invalidate(self):
1035 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1035 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1036 for k in self._filecache:
1036 for k in self._filecache:
1037 # dirstate is invalidated separately in invalidatedirstate()
1037 # dirstate is invalidated separately in invalidatedirstate()
1038 if k == 'dirstate':
1038 if k == 'dirstate':
1039 continue
1039 continue
1040
1040
1041 try:
1041 try:
1042 delattr(unfiltered, k)
1042 delattr(unfiltered, k)
1043 except AttributeError:
1043 except AttributeError:
1044 pass
1044 pass
1045 self.invalidatecaches()
1045 self.invalidatecaches()
1046 self.store.invalidatecaches()
1046 self.store.invalidatecaches()
1047
1047
1048 def invalidateall(self):
1048 def invalidateall(self):
1049 '''Fully invalidates both store and non-store parts, causing the
1049 '''Fully invalidates both store and non-store parts, causing the
1050 subsequent operation to reread any outside changes.'''
1050 subsequent operation to reread any outside changes.'''
1051 # extension should hook this to invalidate its caches
1051 # extension should hook this to invalidate its caches
1052 self.invalidate()
1052 self.invalidate()
1053 self.invalidatedirstate()
1053 self.invalidatedirstate()
1054
1054
1055 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1055 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1056 try:
1056 try:
1057 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1057 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1058 except error.LockHeld, inst:
1058 except error.LockHeld, inst:
1059 if not wait:
1059 if not wait:
1060 raise
1060 raise
1061 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1061 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1062 (desc, inst.locker))
1062 (desc, inst.locker))
1063 # default to 600 seconds timeout
1063 # default to 600 seconds timeout
1064 l = lockmod.lock(vfs, lockname,
1064 l = lockmod.lock(vfs, lockname,
1065 int(self.ui.config("ui", "timeout", "600")),
1065 int(self.ui.config("ui", "timeout", "600")),
1066 releasefn, desc=desc)
1066 releasefn, desc=desc)
1067 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1067 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1068 if acquirefn:
1068 if acquirefn:
1069 acquirefn()
1069 acquirefn()
1070 return l
1070 return l
1071
1071
1072 def _afterlock(self, callback):
1072 def _afterlock(self, callback):
1073 """add a callback to the current repository lock.
1073 """add a callback to the current repository lock.
1074
1074
1075 The callback will be executed on lock release."""
1075 The callback will be executed on lock release."""
1076 l = self._lockref and self._lockref()
1076 l = self._lockref and self._lockref()
1077 if l:
1077 if l:
1078 l.postrelease.append(callback)
1078 l.postrelease.append(callback)
1079 else:
1079 else:
1080 callback()
1080 callback()
1081
1081
1082 def lock(self, wait=True):
1082 def lock(self, wait=True):
1083 '''Lock the repository store (.hg/store) and return a weak reference
1083 '''Lock the repository store (.hg/store) and return a weak reference
1084 to the lock. Use this before modifying the store (e.g. committing or
1084 to the lock. Use this before modifying the store (e.g. committing or
1085 stripping). If you are opening a transaction, get a lock as well.)'''
1085 stripping). If you are opening a transaction, get a lock as well.)'''
1086 l = self._lockref and self._lockref()
1086 l = self._lockref and self._lockref()
1087 if l is not None and l.held:
1087 if l is not None and l.held:
1088 l.lock()
1088 l.lock()
1089 return l
1089 return l
1090
1090
1091 def unlock():
1091 def unlock():
1092 if hasunfilteredcache(self, '_phasecache'):
1092 if hasunfilteredcache(self, '_phasecache'):
1093 self._phasecache.write()
1093 self._phasecache.write()
1094 for k, ce in self._filecache.items():
1094 for k, ce in self._filecache.items():
1095 if k == 'dirstate' or k not in self.__dict__:
1095 if k == 'dirstate' or k not in self.__dict__:
1096 continue
1096 continue
1097 ce.refresh()
1097 ce.refresh()
1098
1098
1099 l = self._lock(self.svfs, "lock", wait, unlock,
1099 l = self._lock(self.svfs, "lock", wait, unlock,
1100 self.invalidate, _('repository %s') % self.origroot)
1100 self.invalidate, _('repository %s') % self.origroot)
1101 self._lockref = weakref.ref(l)
1101 self._lockref = weakref.ref(l)
1102 return l
1102 return l
1103
1103
1104 def wlock(self, wait=True):
1104 def wlock(self, wait=True):
1105 '''Lock the non-store parts of the repository (everything under
1105 '''Lock the non-store parts of the repository (everything under
1106 .hg except .hg/store) and return a weak reference to the lock.
1106 .hg except .hg/store) and return a weak reference to the lock.
1107 Use this before modifying files in .hg.'''
1107 Use this before modifying files in .hg.'''
1108 l = self._wlockref and self._wlockref()
1108 l = self._wlockref and self._wlockref()
1109 if l is not None and l.held:
1109 if l is not None and l.held:
1110 l.lock()
1110 l.lock()
1111 return l
1111 return l
1112
1112
1113 def unlock():
1113 def unlock():
1114 self.dirstate.write()
1114 self.dirstate.write()
1115 self._filecache['dirstate'].refresh()
1115 self._filecache['dirstate'].refresh()
1116
1116
1117 l = self._lock(self.vfs, "wlock", wait, unlock,
1117 l = self._lock(self.vfs, "wlock", wait, unlock,
1118 self.invalidatedirstate, _('working directory of %s') %
1118 self.invalidatedirstate, _('working directory of %s') %
1119 self.origroot)
1119 self.origroot)
1120 self._wlockref = weakref.ref(l)
1120 self._wlockref = weakref.ref(l)
1121 return l
1121 return l
1122
1122
1123 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1123 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1124 """
1124 """
1125 commit an individual file as part of a larger transaction
1125 commit an individual file as part of a larger transaction
1126 """
1126 """
1127
1127
1128 fname = fctx.path()
1128 fname = fctx.path()
1129 text = fctx.data()
1129 text = fctx.data()
1130 flog = self.file(fname)
1130 flog = self.file(fname)
1131 fparent1 = manifest1.get(fname, nullid)
1131 fparent1 = manifest1.get(fname, nullid)
1132 fparent2 = fparent2o = manifest2.get(fname, nullid)
1132 fparent2 = fparent2o = manifest2.get(fname, nullid)
1133
1133
1134 meta = {}
1134 meta = {}
1135 copy = fctx.renamed()
1135 copy = fctx.renamed()
1136 if copy and copy[0] != fname:
1136 if copy and copy[0] != fname:
1137 # Mark the new revision of this file as a copy of another
1137 # Mark the new revision of this file as a copy of another
1138 # file. This copy data will effectively act as a parent
1138 # file. This copy data will effectively act as a parent
1139 # of this new revision. If this is a merge, the first
1139 # of this new revision. If this is a merge, the first
1140 # parent will be the nullid (meaning "look up the copy data")
1140 # parent will be the nullid (meaning "look up the copy data")
1141 # and the second one will be the other parent. For example:
1141 # and the second one will be the other parent. For example:
1142 #
1142 #
1143 # 0 --- 1 --- 3 rev1 changes file foo
1143 # 0 --- 1 --- 3 rev1 changes file foo
1144 # \ / rev2 renames foo to bar and changes it
1144 # \ / rev2 renames foo to bar and changes it
1145 # \- 2 -/ rev3 should have bar with all changes and
1145 # \- 2 -/ rev3 should have bar with all changes and
1146 # should record that bar descends from
1146 # should record that bar descends from
1147 # bar in rev2 and foo in rev1
1147 # bar in rev2 and foo in rev1
1148 #
1148 #
1149 # this allows this merge to succeed:
1149 # this allows this merge to succeed:
1150 #
1150 #
1151 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1151 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1152 # \ / merging rev3 and rev4 should use bar@rev2
1152 # \ / merging rev3 and rev4 should use bar@rev2
1153 # \- 2 --- 4 as the merge base
1153 # \- 2 --- 4 as the merge base
1154 #
1154 #
1155
1155
1156 cfname = copy[0]
1156 cfname = copy[0]
1157 crev = manifest1.get(cfname)
1157 crev = manifest1.get(cfname)
1158 newfparent = fparent2
1158 newfparent = fparent2
1159
1159
1160 if manifest2: # branch merge
1160 if manifest2: # branch merge
1161 if fparent2 == nullid or crev is None: # copied on remote side
1161 if fparent2 == nullid or crev is None: # copied on remote side
1162 if cfname in manifest2:
1162 if cfname in manifest2:
1163 crev = manifest2[cfname]
1163 crev = manifest2[cfname]
1164 newfparent = fparent1
1164 newfparent = fparent1
1165
1165
1166 # find source in nearest ancestor if we've lost track
1166 # find source in nearest ancestor if we've lost track
1167 if not crev:
1167 if not crev:
1168 self.ui.debug(" %s: searching for copy revision for %s\n" %
1168 self.ui.debug(" %s: searching for copy revision for %s\n" %
1169 (fname, cfname))
1169 (fname, cfname))
1170 for ancestor in self[None].ancestors():
1170 for ancestor in self[None].ancestors():
1171 if cfname in ancestor:
1171 if cfname in ancestor:
1172 crev = ancestor[cfname].filenode()
1172 crev = ancestor[cfname].filenode()
1173 break
1173 break
1174
1174
1175 if crev:
1175 if crev:
1176 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1176 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1177 meta["copy"] = cfname
1177 meta["copy"] = cfname
1178 meta["copyrev"] = hex(crev)
1178 meta["copyrev"] = hex(crev)
1179 fparent1, fparent2 = nullid, newfparent
1179 fparent1, fparent2 = nullid, newfparent
1180 else:
1180 else:
1181 self.ui.warn(_("warning: can't find ancestor for '%s' "
1181 self.ui.warn(_("warning: can't find ancestor for '%s' "
1182 "copied from '%s'!\n") % (fname, cfname))
1182 "copied from '%s'!\n") % (fname, cfname))
1183
1183
1184 elif fparent1 == nullid:
1184 elif fparent1 == nullid:
1185 fparent1, fparent2 = fparent2, nullid
1185 fparent1, fparent2 = fparent2, nullid
1186 elif fparent2 != nullid:
1186 elif fparent2 != nullid:
1187 # is one parent an ancestor of the other?
1187 # is one parent an ancestor of the other?
1188 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1188 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1189 if fparent1 in fparentancestors:
1189 if fparent1 in fparentancestors:
1190 fparent1, fparent2 = fparent2, nullid
1190 fparent1, fparent2 = fparent2, nullid
1191 elif fparent2 in fparentancestors:
1191 elif fparent2 in fparentancestors:
1192 fparent2 = nullid
1192 fparent2 = nullid
1193
1193
1194 # is the file changed?
1194 # is the file changed?
1195 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1195 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1196 changelist.append(fname)
1196 changelist.append(fname)
1197 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1197 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1198
1198
1199 # are just the flags changed during merge?
1199 # are just the flags changed during merge?
1200 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1200 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1201 changelist.append(fname)
1201 changelist.append(fname)
1202
1202
1203 return fparent1
1203 return fparent1
1204
1204
1205 @unfilteredmethod
1205 @unfilteredmethod
1206 def commit(self, text="", user=None, date=None, match=None, force=False,
1206 def commit(self, text="", user=None, date=None, match=None, force=False,
1207 editor=False, extra={}):
1207 editor=False, extra={}):
1208 """Add a new revision to current repository.
1208 """Add a new revision to current repository.
1209
1209
1210 Revision information is gathered from the working directory,
1210 Revision information is gathered from the working directory,
1211 match can be used to filter the committed files. If editor is
1211 match can be used to filter the committed files. If editor is
1212 supplied, it is called to get a commit message.
1212 supplied, it is called to get a commit message.
1213 """
1213 """
1214
1214
1215 def fail(f, msg):
1215 def fail(f, msg):
1216 raise util.Abort('%s: %s' % (f, msg))
1216 raise util.Abort('%s: %s' % (f, msg))
1217
1217
1218 if not match:
1218 if not match:
1219 match = matchmod.always(self.root, '')
1219 match = matchmod.always(self.root, '')
1220
1220
1221 if not force:
1221 if not force:
1222 vdirs = []
1222 vdirs = []
1223 match.explicitdir = vdirs.append
1223 match.explicitdir = vdirs.append
1224 match.bad = fail
1224 match.bad = fail
1225
1225
1226 wlock = self.wlock()
1226 wlock = self.wlock()
1227 try:
1227 try:
1228 wctx = self[None]
1228 wctx = self[None]
1229 merge = len(wctx.parents()) > 1
1229 merge = len(wctx.parents()) > 1
1230
1230
1231 if (not force and merge and match and
1231 if (not force and merge and match and
1232 (match.files() or match.anypats())):
1232 (match.files() or match.anypats())):
1233 raise util.Abort(_('cannot partially commit a merge '
1233 raise util.Abort(_('cannot partially commit a merge '
1234 '(do not specify files or patterns)'))
1234 '(do not specify files or patterns)'))
1235
1235
1236 changes = self.status(match=match, clean=force)
1236 changes = self.status(match=match, clean=force)
1237 if force:
1237 if force:
1238 changes[0].extend(changes[6]) # mq may commit unchanged files
1238 changes[0].extend(changes[6]) # mq may commit unchanged files
1239
1239
1240 # check subrepos
1240 # check subrepos
1241 subs = []
1241 subs = []
1242 commitsubs = set()
1242 commitsubs = set()
1243 newstate = wctx.substate.copy()
1243 newstate = wctx.substate.copy()
1244 # only manage subrepos and .hgsubstate if .hgsub is present
1244 # only manage subrepos and .hgsubstate if .hgsub is present
1245 if '.hgsub' in wctx:
1245 if '.hgsub' in wctx:
1246 # we'll decide whether to track this ourselves, thanks
1246 # we'll decide whether to track this ourselves, thanks
1247 for c in changes[:3]:
1247 for c in changes[:3]:
1248 if '.hgsubstate' in c:
1248 if '.hgsubstate' in c:
1249 c.remove('.hgsubstate')
1249 c.remove('.hgsubstate')
1250
1250
1251 # compare current state to last committed state
1251 # compare current state to last committed state
1252 # build new substate based on last committed state
1252 # build new substate based on last committed state
1253 oldstate = wctx.p1().substate
1253 oldstate = wctx.p1().substate
1254 for s in sorted(newstate.keys()):
1254 for s in sorted(newstate.keys()):
1255 if not match(s):
1255 if not match(s):
1256 # ignore working copy, use old state if present
1256 # ignore working copy, use old state if present
1257 if s in oldstate:
1257 if s in oldstate:
1258 newstate[s] = oldstate[s]
1258 newstate[s] = oldstate[s]
1259 continue
1259 continue
1260 if not force:
1260 if not force:
1261 raise util.Abort(
1261 raise util.Abort(
1262 _("commit with new subrepo %s excluded") % s)
1262 _("commit with new subrepo %s excluded") % s)
1263 if wctx.sub(s).dirty(True):
1263 if wctx.sub(s).dirty(True):
1264 if not self.ui.configbool('ui', 'commitsubrepos'):
1264 if not self.ui.configbool('ui', 'commitsubrepos'):
1265 raise util.Abort(
1265 raise util.Abort(
1266 _("uncommitted changes in subrepo %s") % s,
1266 _("uncommitted changes in subrepo %s") % s,
1267 hint=_("use --subrepos for recursive commit"))
1267 hint=_("use --subrepos for recursive commit"))
1268 subs.append(s)
1268 subs.append(s)
1269 commitsubs.add(s)
1269 commitsubs.add(s)
1270 else:
1270 else:
1271 bs = wctx.sub(s).basestate()
1271 bs = wctx.sub(s).basestate()
1272 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1272 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1273 if oldstate.get(s, (None, None, None))[1] != bs:
1273 if oldstate.get(s, (None, None, None))[1] != bs:
1274 subs.append(s)
1274 subs.append(s)
1275
1275
1276 # check for removed subrepos
1276 # check for removed subrepos
1277 for p in wctx.parents():
1277 for p in wctx.parents():
1278 r = [s for s in p.substate if s not in newstate]
1278 r = [s for s in p.substate if s not in newstate]
1279 subs += [s for s in r if match(s)]
1279 subs += [s for s in r if match(s)]
1280 if subs:
1280 if subs:
1281 if (not match('.hgsub') and
1281 if (not match('.hgsub') and
1282 '.hgsub' in (wctx.modified() + wctx.added())):
1282 '.hgsub' in (wctx.modified() + wctx.added())):
1283 raise util.Abort(
1283 raise util.Abort(
1284 _("can't commit subrepos without .hgsub"))
1284 _("can't commit subrepos without .hgsub"))
1285 changes[0].insert(0, '.hgsubstate')
1285 changes[0].insert(0, '.hgsubstate')
1286
1286
1287 elif '.hgsub' in changes[2]:
1287 elif '.hgsub' in changes[2]:
1288 # clean up .hgsubstate when .hgsub is removed
1288 # clean up .hgsubstate when .hgsub is removed
1289 if ('.hgsubstate' in wctx and
1289 if ('.hgsubstate' in wctx and
1290 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1290 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1291 changes[2].insert(0, '.hgsubstate')
1291 changes[2].insert(0, '.hgsubstate')
1292
1292
1293 # make sure all explicit patterns are matched
1293 # make sure all explicit patterns are matched
1294 if not force and match.files():
1294 if not force and match.files():
1295 matched = set(changes[0] + changes[1] + changes[2])
1295 matched = set(changes[0] + changes[1] + changes[2])
1296
1296
1297 for f in match.files():
1297 for f in match.files():
1298 f = self.dirstate.normalize(f)
1298 f = self.dirstate.normalize(f)
1299 if f == '.' or f in matched or f in wctx.substate:
1299 if f == '.' or f in matched or f in wctx.substate:
1300 continue
1300 continue
1301 if f in changes[3]: # missing
1301 if f in changes[3]: # missing
1302 fail(f, _('file not found!'))
1302 fail(f, _('file not found!'))
1303 if f in vdirs: # visited directory
1303 if f in vdirs: # visited directory
1304 d = f + '/'
1304 d = f + '/'
1305 for mf in matched:
1305 for mf in matched:
1306 if mf.startswith(d):
1306 if mf.startswith(d):
1307 break
1307 break
1308 else:
1308 else:
1309 fail(f, _("no match under directory!"))
1309 fail(f, _("no match under directory!"))
1310 elif f not in self.dirstate:
1310 elif f not in self.dirstate:
1311 fail(f, _("file not tracked!"))
1311 fail(f, _("file not tracked!"))
1312
1312
1313 cctx = context.workingctx(self, text, user, date, extra, changes)
1313 cctx = context.workingctx(self, text, user, date, extra, changes)
1314
1314
1315 if (not force and not extra.get("close") and not merge
1315 if (not force and not extra.get("close") and not merge
1316 and not cctx.files()
1316 and not cctx.files()
1317 and wctx.branch() == wctx.p1().branch()):
1317 and wctx.branch() == wctx.p1().branch()):
1318 return None
1318 return None
1319
1319
1320 if merge and cctx.deleted():
1320 if merge and cctx.deleted():
1321 raise util.Abort(_("cannot commit merge with missing files"))
1321 raise util.Abort(_("cannot commit merge with missing files"))
1322
1322
1323 ms = mergemod.mergestate(self)
1323 ms = mergemod.mergestate(self)
1324 for f in changes[0]:
1324 for f in changes[0]:
1325 if f in ms and ms[f] == 'u':
1325 if f in ms and ms[f] == 'u':
1326 raise util.Abort(_("unresolved merge conflicts "
1326 raise util.Abort(_("unresolved merge conflicts "
1327 "(see hg help resolve)"))
1327 "(see hg help resolve)"))
1328
1328
1329 if editor:
1329 if editor:
1330 cctx._text = editor(self, cctx, subs)
1330 cctx._text = editor(self, cctx, subs)
1331 edited = (text != cctx._text)
1331 edited = (text != cctx._text)
1332
1332
1333 # Save commit message in case this transaction gets rolled back
1333 # Save commit message in case this transaction gets rolled back
1334 # (e.g. by a pretxncommit hook). Leave the content alone on
1334 # (e.g. by a pretxncommit hook). Leave the content alone on
1335 # the assumption that the user will use the same editor again.
1335 # the assumption that the user will use the same editor again.
1336 msgfn = self.savecommitmessage(cctx._text)
1336 msgfn = self.savecommitmessage(cctx._text)
1337
1337
1338 # commit subs and write new state
1338 # commit subs and write new state
1339 if subs:
1339 if subs:
1340 for s in sorted(commitsubs):
1340 for s in sorted(commitsubs):
1341 sub = wctx.sub(s)
1341 sub = wctx.sub(s)
1342 self.ui.status(_('committing subrepository %s\n') %
1342 self.ui.status(_('committing subrepository %s\n') %
1343 subrepo.subrelpath(sub))
1343 subrepo.subrelpath(sub))
1344 sr = sub.commit(cctx._text, user, date)
1344 sr = sub.commit(cctx._text, user, date)
1345 newstate[s] = (newstate[s][0], sr)
1345 newstate[s] = (newstate[s][0], sr)
1346 subrepo.writestate(self, newstate)
1346 subrepo.writestate(self, newstate)
1347
1347
1348 p1, p2 = self.dirstate.parents()
1348 p1, p2 = self.dirstate.parents()
1349 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1349 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1350 try:
1350 try:
1351 self.hook("precommit", throw=True, parent1=hookp1,
1351 self.hook("precommit", throw=True, parent1=hookp1,
1352 parent2=hookp2)
1352 parent2=hookp2)
1353 ret = self.commitctx(cctx, True)
1353 ret = self.commitctx(cctx, True)
1354 except: # re-raises
1354 except: # re-raises
1355 if edited:
1355 if edited:
1356 self.ui.write(
1356 self.ui.write(
1357 _('note: commit message saved in %s\n') % msgfn)
1357 _('note: commit message saved in %s\n') % msgfn)
1358 raise
1358 raise
1359
1359
1360 # update bookmarks, dirstate and mergestate
1360 # update bookmarks, dirstate and mergestate
1361 bookmarks.update(self, [p1, p2], ret)
1361 bookmarks.update(self, [p1, p2], ret)
1362 cctx.markcommitted(ret)
1362 cctx.markcommitted(ret)
1363 ms.reset()
1363 ms.reset()
1364 finally:
1364 finally:
1365 wlock.release()
1365 wlock.release()
1366
1366
1367 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1367 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1368 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1368 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1369 self._afterlock(commithook)
1369 self._afterlock(commithook)
1370 return ret
1370 return ret
1371
1371
1372 @unfilteredmethod
1372 @unfilteredmethod
1373 def commitctx(self, ctx, error=False):
1373 def commitctx(self, ctx, error=False):
1374 """Add a new revision to current repository.
1374 """Add a new revision to current repository.
1375 Revision information is passed via the context argument.
1375 Revision information is passed via the context argument.
1376 """
1376 """
1377
1377
1378 tr = lock = None
1378 tr = lock = None
1379 removed = list(ctx.removed())
1379 removed = list(ctx.removed())
1380 p1, p2 = ctx.p1(), ctx.p2()
1380 p1, p2 = ctx.p1(), ctx.p2()
1381 user = ctx.user()
1381 user = ctx.user()
1382
1382
1383 lock = self.lock()
1383 lock = self.lock()
1384 try:
1384 try:
1385 tr = self.transaction("commit")
1385 tr = self.transaction("commit")
1386 trp = weakref.proxy(tr)
1386 trp = weakref.proxy(tr)
1387
1387
1388 if ctx.files():
1388 if ctx.files():
1389 m1 = p1.manifest().copy()
1389 m1 = p1.manifest().copy()
1390 m2 = p2.manifest()
1390 m2 = p2.manifest()
1391
1391
1392 # check in files
1392 # check in files
1393 new = {}
1393 new = {}
1394 changed = []
1394 changed = []
1395 linkrev = len(self)
1395 linkrev = len(self)
1396 for f in sorted(ctx.modified() + ctx.added()):
1396 for f in sorted(ctx.modified() + ctx.added()):
1397 self.ui.note(f + "\n")
1397 self.ui.note(f + "\n")
1398 try:
1398 try:
1399 fctx = ctx[f]
1399 fctx = ctx[f]
1400 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1400 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1401 changed)
1401 changed)
1402 m1.set(f, fctx.flags())
1402 m1.set(f, fctx.flags())
1403 except OSError, inst:
1403 except OSError, inst:
1404 self.ui.warn(_("trouble committing %s!\n") % f)
1404 self.ui.warn(_("trouble committing %s!\n") % f)
1405 raise
1405 raise
1406 except IOError, inst:
1406 except IOError, inst:
1407 errcode = getattr(inst, 'errno', errno.ENOENT)
1407 errcode = getattr(inst, 'errno', errno.ENOENT)
1408 if error or errcode and errcode != errno.ENOENT:
1408 if error or errcode and errcode != errno.ENOENT:
1409 self.ui.warn(_("trouble committing %s!\n") % f)
1409 self.ui.warn(_("trouble committing %s!\n") % f)
1410 raise
1410 raise
1411 else:
1411 else:
1412 removed.append(f)
1412 removed.append(f)
1413
1413
1414 # update manifest
1414 # update manifest
1415 m1.update(new)
1415 m1.update(new)
1416 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1416 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1417 drop = [f for f in removed if f in m1]
1417 drop = [f for f in removed if f in m1]
1418 for f in drop:
1418 for f in drop:
1419 del m1[f]
1419 del m1[f]
1420 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1420 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1421 p2.manifestnode(), (new, drop))
1421 p2.manifestnode(), (new, drop))
1422 files = changed + removed
1422 files = changed + removed
1423 else:
1423 else:
1424 mn = p1.manifestnode()
1424 mn = p1.manifestnode()
1425 files = []
1425 files = []
1426
1426
1427 # update changelog
1427 # update changelog
1428 self.changelog.delayupdate()
1428 self.changelog.delayupdate()
1429 n = self.changelog.add(mn, files, ctx.description(),
1429 n = self.changelog.add(mn, files, ctx.description(),
1430 trp, p1.node(), p2.node(),
1430 trp, p1.node(), p2.node(),
1431 user, ctx.date(), ctx.extra().copy())
1431 user, ctx.date(), ctx.extra().copy())
1432 p = lambda: self.changelog.writepending() and self.root or ""
1432 p = lambda: self.changelog.writepending() and self.root or ""
1433 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1433 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1434 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1434 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1435 parent2=xp2, pending=p)
1435 parent2=xp2, pending=p)
1436 self.changelog.finalize(trp)
1436 self.changelog.finalize(trp)
1437 # set the new commit is proper phase
1437 # set the new commit is proper phase
1438 targetphase = subrepo.newcommitphase(self.ui, ctx)
1438 targetphase = subrepo.newcommitphase(self.ui, ctx)
1439 if targetphase:
1439 if targetphase:
1440 # retract boundary do not alter parent changeset.
1440 # retract boundary do not alter parent changeset.
1441 # if a parent have higher the resulting phase will
1441 # if a parent have higher the resulting phase will
1442 # be compliant anyway
1442 # be compliant anyway
1443 #
1443 #
1444 # if minimal phase was 0 we don't need to retract anything
1444 # if minimal phase was 0 we don't need to retract anything
1445 phases.retractboundary(self, targetphase, [n])
1445 phases.retractboundary(self, tr, targetphase, [n])
1446 tr.close()
1446 tr.close()
1447 branchmap.updatecache(self.filtered('served'))
1447 branchmap.updatecache(self.filtered('served'))
1448 return n
1448 return n
1449 finally:
1449 finally:
1450 if tr:
1450 if tr:
1451 tr.release()
1451 tr.release()
1452 lock.release()
1452 lock.release()
1453
1453
1454 @unfilteredmethod
1454 @unfilteredmethod
1455 def destroying(self):
1455 def destroying(self):
1456 '''Inform the repository that nodes are about to be destroyed.
1456 '''Inform the repository that nodes are about to be destroyed.
1457 Intended for use by strip and rollback, so there's a common
1457 Intended for use by strip and rollback, so there's a common
1458 place for anything that has to be done before destroying history.
1458 place for anything that has to be done before destroying history.
1459
1459
1460 This is mostly useful for saving state that is in memory and waiting
1460 This is mostly useful for saving state that is in memory and waiting
1461 to be flushed when the current lock is released. Because a call to
1461 to be flushed when the current lock is released. Because a call to
1462 destroyed is imminent, the repo will be invalidated causing those
1462 destroyed is imminent, the repo will be invalidated causing those
1463 changes to stay in memory (waiting for the next unlock), or vanish
1463 changes to stay in memory (waiting for the next unlock), or vanish
1464 completely.
1464 completely.
1465 '''
1465 '''
1466 # When using the same lock to commit and strip, the phasecache is left
1466 # When using the same lock to commit and strip, the phasecache is left
1467 # dirty after committing. Then when we strip, the repo is invalidated,
1467 # dirty after committing. Then when we strip, the repo is invalidated,
1468 # causing those changes to disappear.
1468 # causing those changes to disappear.
1469 if '_phasecache' in vars(self):
1469 if '_phasecache' in vars(self):
1470 self._phasecache.write()
1470 self._phasecache.write()
1471
1471
1472 @unfilteredmethod
1472 @unfilteredmethod
1473 def destroyed(self):
1473 def destroyed(self):
1474 '''Inform the repository that nodes have been destroyed.
1474 '''Inform the repository that nodes have been destroyed.
1475 Intended for use by strip and rollback, so there's a common
1475 Intended for use by strip and rollback, so there's a common
1476 place for anything that has to be done after destroying history.
1476 place for anything that has to be done after destroying history.
1477 '''
1477 '''
1478 # When one tries to:
1478 # When one tries to:
1479 # 1) destroy nodes thus calling this method (e.g. strip)
1479 # 1) destroy nodes thus calling this method (e.g. strip)
1480 # 2) use phasecache somewhere (e.g. commit)
1480 # 2) use phasecache somewhere (e.g. commit)
1481 #
1481 #
1482 # then 2) will fail because the phasecache contains nodes that were
1482 # then 2) will fail because the phasecache contains nodes that were
1483 # removed. We can either remove phasecache from the filecache,
1483 # removed. We can either remove phasecache from the filecache,
1484 # causing it to reload next time it is accessed, or simply filter
1484 # causing it to reload next time it is accessed, or simply filter
1485 # the removed nodes now and write the updated cache.
1485 # the removed nodes now and write the updated cache.
1486 self._phasecache.filterunknown(self)
1486 self._phasecache.filterunknown(self)
1487 self._phasecache.write()
1487 self._phasecache.write()
1488
1488
1489 # update the 'served' branch cache to help read only server process
1489 # update the 'served' branch cache to help read only server process
1490 # Thanks to branchcache collaboration this is done from the nearest
1490 # Thanks to branchcache collaboration this is done from the nearest
1491 # filtered subset and it is expected to be fast.
1491 # filtered subset and it is expected to be fast.
1492 branchmap.updatecache(self.filtered('served'))
1492 branchmap.updatecache(self.filtered('served'))
1493
1493
1494 # Ensure the persistent tag cache is updated. Doing it now
1494 # Ensure the persistent tag cache is updated. Doing it now
1495 # means that the tag cache only has to worry about destroyed
1495 # means that the tag cache only has to worry about destroyed
1496 # heads immediately after a strip/rollback. That in turn
1496 # heads immediately after a strip/rollback. That in turn
1497 # guarantees that "cachetip == currenttip" (comparing both rev
1497 # guarantees that "cachetip == currenttip" (comparing both rev
1498 # and node) always means no nodes have been added or destroyed.
1498 # and node) always means no nodes have been added or destroyed.
1499
1499
1500 # XXX this is suboptimal when qrefresh'ing: we strip the current
1500 # XXX this is suboptimal when qrefresh'ing: we strip the current
1501 # head, refresh the tag cache, then immediately add a new head.
1501 # head, refresh the tag cache, then immediately add a new head.
1502 # But I think doing it this way is necessary for the "instant
1502 # But I think doing it this way is necessary for the "instant
1503 # tag cache retrieval" case to work.
1503 # tag cache retrieval" case to work.
1504 self.invalidate()
1504 self.invalidate()
1505
1505
1506 def walk(self, match, node=None):
1506 def walk(self, match, node=None):
1507 '''
1507 '''
1508 walk recursively through the directory tree or a given
1508 walk recursively through the directory tree or a given
1509 changeset, finding all files matched by the match
1509 changeset, finding all files matched by the match
1510 function
1510 function
1511 '''
1511 '''
1512 return self[node].walk(match)
1512 return self[node].walk(match)
1513
1513
1514 def status(self, node1='.', node2=None, match=None,
1514 def status(self, node1='.', node2=None, match=None,
1515 ignored=False, clean=False, unknown=False,
1515 ignored=False, clean=False, unknown=False,
1516 listsubrepos=False):
1516 listsubrepos=False):
1517 '''a convenience method that calls node1.status(node2)'''
1517 '''a convenience method that calls node1.status(node2)'''
1518 return self[node1].status(node2, match, ignored, clean, unknown,
1518 return self[node1].status(node2, match, ignored, clean, unknown,
1519 listsubrepos)
1519 listsubrepos)
1520
1520
1521 def heads(self, start=None):
1521 def heads(self, start=None):
1522 heads = self.changelog.heads(start)
1522 heads = self.changelog.heads(start)
1523 # sort the output in rev descending order
1523 # sort the output in rev descending order
1524 return sorted(heads, key=self.changelog.rev, reverse=True)
1524 return sorted(heads, key=self.changelog.rev, reverse=True)
1525
1525
1526 def branchheads(self, branch=None, start=None, closed=False):
1526 def branchheads(self, branch=None, start=None, closed=False):
1527 '''return a (possibly filtered) list of heads for the given branch
1527 '''return a (possibly filtered) list of heads for the given branch
1528
1528
1529 Heads are returned in topological order, from newest to oldest.
1529 Heads are returned in topological order, from newest to oldest.
1530 If branch is None, use the dirstate branch.
1530 If branch is None, use the dirstate branch.
1531 If start is not None, return only heads reachable from start.
1531 If start is not None, return only heads reachable from start.
1532 If closed is True, return heads that are marked as closed as well.
1532 If closed is True, return heads that are marked as closed as well.
1533 '''
1533 '''
1534 if branch is None:
1534 if branch is None:
1535 branch = self[None].branch()
1535 branch = self[None].branch()
1536 branches = self.branchmap()
1536 branches = self.branchmap()
1537 if branch not in branches:
1537 if branch not in branches:
1538 return []
1538 return []
1539 # the cache returns heads ordered lowest to highest
1539 # the cache returns heads ordered lowest to highest
1540 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1540 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1541 if start is not None:
1541 if start is not None:
1542 # filter out the heads that cannot be reached from startrev
1542 # filter out the heads that cannot be reached from startrev
1543 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1543 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1544 bheads = [h for h in bheads if h in fbheads]
1544 bheads = [h for h in bheads if h in fbheads]
1545 return bheads
1545 return bheads
1546
1546
1547 def branches(self, nodes):
1547 def branches(self, nodes):
1548 if not nodes:
1548 if not nodes:
1549 nodes = [self.changelog.tip()]
1549 nodes = [self.changelog.tip()]
1550 b = []
1550 b = []
1551 for n in nodes:
1551 for n in nodes:
1552 t = n
1552 t = n
1553 while True:
1553 while True:
1554 p = self.changelog.parents(n)
1554 p = self.changelog.parents(n)
1555 if p[1] != nullid or p[0] == nullid:
1555 if p[1] != nullid or p[0] == nullid:
1556 b.append((t, n, p[0], p[1]))
1556 b.append((t, n, p[0], p[1]))
1557 break
1557 break
1558 n = p[0]
1558 n = p[0]
1559 return b
1559 return b
1560
1560
1561 def between(self, pairs):
1561 def between(self, pairs):
1562 r = []
1562 r = []
1563
1563
1564 for top, bottom in pairs:
1564 for top, bottom in pairs:
1565 n, l, i = top, [], 0
1565 n, l, i = top, [], 0
1566 f = 1
1566 f = 1
1567
1567
1568 while n != bottom and n != nullid:
1568 while n != bottom and n != nullid:
1569 p = self.changelog.parents(n)[0]
1569 p = self.changelog.parents(n)[0]
1570 if i == f:
1570 if i == f:
1571 l.append(n)
1571 l.append(n)
1572 f = f * 2
1572 f = f * 2
1573 n = p
1573 n = p
1574 i += 1
1574 i += 1
1575
1575
1576 r.append(l)
1576 r.append(l)
1577
1577
1578 return r
1578 return r
1579
1579
1580 def pull(self, remote, heads=None, force=False):
1580 def pull(self, remote, heads=None, force=False):
1581 return exchange.pull (self, remote, heads, force)
1581 return exchange.pull (self, remote, heads, force)
1582
1582
1583 def checkpush(self, pushop):
1583 def checkpush(self, pushop):
1584 """Extensions can override this function if additional checks have
1584 """Extensions can override this function if additional checks have
1585 to be performed before pushing, or call it if they override push
1585 to be performed before pushing, or call it if they override push
1586 command.
1586 command.
1587 """
1587 """
1588 pass
1588 pass
1589
1589
1590 @unfilteredpropertycache
1590 @unfilteredpropertycache
1591 def prepushoutgoinghooks(self):
1591 def prepushoutgoinghooks(self):
1592 """Return util.hooks consists of "(repo, remote, outgoing)"
1592 """Return util.hooks consists of "(repo, remote, outgoing)"
1593 functions, which are called before pushing changesets.
1593 functions, which are called before pushing changesets.
1594 """
1594 """
1595 return util.hooks()
1595 return util.hooks()
1596
1596
1597 def push(self, remote, force=False, revs=None, newbranch=False):
1597 def push(self, remote, force=False, revs=None, newbranch=False):
1598 return exchange.push(self, remote, force, revs, newbranch)
1598 return exchange.push(self, remote, force, revs, newbranch)
1599
1599
1600 def stream_in(self, remote, requirements):
1600 def stream_in(self, remote, requirements):
1601 lock = self.lock()
1601 lock = self.lock()
1602 try:
1602 try:
1603 # Save remote branchmap. We will use it later
1603 # Save remote branchmap. We will use it later
1604 # to speed up branchcache creation
1604 # to speed up branchcache creation
1605 rbranchmap = None
1605 rbranchmap = None
1606 if remote.capable("branchmap"):
1606 if remote.capable("branchmap"):
1607 rbranchmap = remote.branchmap()
1607 rbranchmap = remote.branchmap()
1608
1608
1609 fp = remote.stream_out()
1609 fp = remote.stream_out()
1610 l = fp.readline()
1610 l = fp.readline()
1611 try:
1611 try:
1612 resp = int(l)
1612 resp = int(l)
1613 except ValueError:
1613 except ValueError:
1614 raise error.ResponseError(
1614 raise error.ResponseError(
1615 _('unexpected response from remote server:'), l)
1615 _('unexpected response from remote server:'), l)
1616 if resp == 1:
1616 if resp == 1:
1617 raise util.Abort(_('operation forbidden by server'))
1617 raise util.Abort(_('operation forbidden by server'))
1618 elif resp == 2:
1618 elif resp == 2:
1619 raise util.Abort(_('locking the remote repository failed'))
1619 raise util.Abort(_('locking the remote repository failed'))
1620 elif resp != 0:
1620 elif resp != 0:
1621 raise util.Abort(_('the server sent an unknown error code'))
1621 raise util.Abort(_('the server sent an unknown error code'))
1622 self.ui.status(_('streaming all changes\n'))
1622 self.ui.status(_('streaming all changes\n'))
1623 l = fp.readline()
1623 l = fp.readline()
1624 try:
1624 try:
1625 total_files, total_bytes = map(int, l.split(' ', 1))
1625 total_files, total_bytes = map(int, l.split(' ', 1))
1626 except (ValueError, TypeError):
1626 except (ValueError, TypeError):
1627 raise error.ResponseError(
1627 raise error.ResponseError(
1628 _('unexpected response from remote server:'), l)
1628 _('unexpected response from remote server:'), l)
1629 self.ui.status(_('%d files to transfer, %s of data\n') %
1629 self.ui.status(_('%d files to transfer, %s of data\n') %
1630 (total_files, util.bytecount(total_bytes)))
1630 (total_files, util.bytecount(total_bytes)))
1631 handled_bytes = 0
1631 handled_bytes = 0
1632 self.ui.progress(_('clone'), 0, total=total_bytes)
1632 self.ui.progress(_('clone'), 0, total=total_bytes)
1633 start = time.time()
1633 start = time.time()
1634
1634
1635 tr = self.transaction(_('clone'))
1635 tr = self.transaction(_('clone'))
1636 try:
1636 try:
1637 for i in xrange(total_files):
1637 for i in xrange(total_files):
1638 # XXX doesn't support '\n' or '\r' in filenames
1638 # XXX doesn't support '\n' or '\r' in filenames
1639 l = fp.readline()
1639 l = fp.readline()
1640 try:
1640 try:
1641 name, size = l.split('\0', 1)
1641 name, size = l.split('\0', 1)
1642 size = int(size)
1642 size = int(size)
1643 except (ValueError, TypeError):
1643 except (ValueError, TypeError):
1644 raise error.ResponseError(
1644 raise error.ResponseError(
1645 _('unexpected response from remote server:'), l)
1645 _('unexpected response from remote server:'), l)
1646 if self.ui.debugflag:
1646 if self.ui.debugflag:
1647 self.ui.debug('adding %s (%s)\n' %
1647 self.ui.debug('adding %s (%s)\n' %
1648 (name, util.bytecount(size)))
1648 (name, util.bytecount(size)))
1649 # for backwards compat, name was partially encoded
1649 # for backwards compat, name was partially encoded
1650 ofp = self.sopener(store.decodedir(name), 'w')
1650 ofp = self.sopener(store.decodedir(name), 'w')
1651 for chunk in util.filechunkiter(fp, limit=size):
1651 for chunk in util.filechunkiter(fp, limit=size):
1652 handled_bytes += len(chunk)
1652 handled_bytes += len(chunk)
1653 self.ui.progress(_('clone'), handled_bytes,
1653 self.ui.progress(_('clone'), handled_bytes,
1654 total=total_bytes)
1654 total=total_bytes)
1655 ofp.write(chunk)
1655 ofp.write(chunk)
1656 ofp.close()
1656 ofp.close()
1657 tr.close()
1657 tr.close()
1658 finally:
1658 finally:
1659 tr.release()
1659 tr.release()
1660
1660
1661 # Writing straight to files circumvented the inmemory caches
1661 # Writing straight to files circumvented the inmemory caches
1662 self.invalidate()
1662 self.invalidate()
1663
1663
1664 elapsed = time.time() - start
1664 elapsed = time.time() - start
1665 if elapsed <= 0:
1665 if elapsed <= 0:
1666 elapsed = 0.001
1666 elapsed = 0.001
1667 self.ui.progress(_('clone'), None)
1667 self.ui.progress(_('clone'), None)
1668 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1668 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1669 (util.bytecount(total_bytes), elapsed,
1669 (util.bytecount(total_bytes), elapsed,
1670 util.bytecount(total_bytes / elapsed)))
1670 util.bytecount(total_bytes / elapsed)))
1671
1671
1672 # new requirements = old non-format requirements +
1672 # new requirements = old non-format requirements +
1673 # new format-related
1673 # new format-related
1674 # requirements from the streamed-in repository
1674 # requirements from the streamed-in repository
1675 requirements.update(set(self.requirements) - self.supportedformats)
1675 requirements.update(set(self.requirements) - self.supportedformats)
1676 self._applyrequirements(requirements)
1676 self._applyrequirements(requirements)
1677 self._writerequirements()
1677 self._writerequirements()
1678
1678
1679 if rbranchmap:
1679 if rbranchmap:
1680 rbheads = []
1680 rbheads = []
1681 for bheads in rbranchmap.itervalues():
1681 for bheads in rbranchmap.itervalues():
1682 rbheads.extend(bheads)
1682 rbheads.extend(bheads)
1683
1683
1684 if rbheads:
1684 if rbheads:
1685 rtiprev = max((int(self.changelog.rev(node))
1685 rtiprev = max((int(self.changelog.rev(node))
1686 for node in rbheads))
1686 for node in rbheads))
1687 cache = branchmap.branchcache(rbranchmap,
1687 cache = branchmap.branchcache(rbranchmap,
1688 self[rtiprev].node(),
1688 self[rtiprev].node(),
1689 rtiprev)
1689 rtiprev)
1690 # Try to stick it as low as possible
1690 # Try to stick it as low as possible
1691 # filter above served are unlikely to be fetch from a clone
1691 # filter above served are unlikely to be fetch from a clone
1692 for candidate in ('base', 'immutable', 'served'):
1692 for candidate in ('base', 'immutable', 'served'):
1693 rview = self.filtered(candidate)
1693 rview = self.filtered(candidate)
1694 if cache.validfor(rview):
1694 if cache.validfor(rview):
1695 self._branchcaches[candidate] = cache
1695 self._branchcaches[candidate] = cache
1696 cache.write(rview)
1696 cache.write(rview)
1697 break
1697 break
1698 self.invalidate()
1698 self.invalidate()
1699 return len(self.heads()) + 1
1699 return len(self.heads()) + 1
1700 finally:
1700 finally:
1701 lock.release()
1701 lock.release()
1702
1702
1703 def clone(self, remote, heads=[], stream=False):
1703 def clone(self, remote, heads=[], stream=False):
1704 '''clone remote repository.
1704 '''clone remote repository.
1705
1705
1706 keyword arguments:
1706 keyword arguments:
1707 heads: list of revs to clone (forces use of pull)
1707 heads: list of revs to clone (forces use of pull)
1708 stream: use streaming clone if possible'''
1708 stream: use streaming clone if possible'''
1709
1709
1710 # now, all clients that can request uncompressed clones can
1710 # now, all clients that can request uncompressed clones can
1711 # read repo formats supported by all servers that can serve
1711 # read repo formats supported by all servers that can serve
1712 # them.
1712 # them.
1713
1713
1714 # if revlog format changes, client will have to check version
1714 # if revlog format changes, client will have to check version
1715 # and format flags on "stream" capability, and use
1715 # and format flags on "stream" capability, and use
1716 # uncompressed only if compatible.
1716 # uncompressed only if compatible.
1717
1717
1718 if not stream:
1718 if not stream:
1719 # if the server explicitly prefers to stream (for fast LANs)
1719 # if the server explicitly prefers to stream (for fast LANs)
1720 stream = remote.capable('stream-preferred')
1720 stream = remote.capable('stream-preferred')
1721
1721
1722 if stream and not heads:
1722 if stream and not heads:
1723 # 'stream' means remote revlog format is revlogv1 only
1723 # 'stream' means remote revlog format is revlogv1 only
1724 if remote.capable('stream'):
1724 if remote.capable('stream'):
1725 return self.stream_in(remote, set(('revlogv1',)))
1725 return self.stream_in(remote, set(('revlogv1',)))
1726 # otherwise, 'streamreqs' contains the remote revlog format
1726 # otherwise, 'streamreqs' contains the remote revlog format
1727 streamreqs = remote.capable('streamreqs')
1727 streamreqs = remote.capable('streamreqs')
1728 if streamreqs:
1728 if streamreqs:
1729 streamreqs = set(streamreqs.split(','))
1729 streamreqs = set(streamreqs.split(','))
1730 # if we support it, stream in and adjust our requirements
1730 # if we support it, stream in and adjust our requirements
1731 if not streamreqs - self.supportedformats:
1731 if not streamreqs - self.supportedformats:
1732 return self.stream_in(remote, streamreqs)
1732 return self.stream_in(remote, streamreqs)
1733 return self.pull(remote, heads)
1733 return self.pull(remote, heads)
1734
1734
1735 def pushkey(self, namespace, key, old, new):
1735 def pushkey(self, namespace, key, old, new):
1736 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1736 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1737 old=old, new=new)
1737 old=old, new=new)
1738 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1738 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1739 ret = pushkey.push(self, namespace, key, old, new)
1739 ret = pushkey.push(self, namespace, key, old, new)
1740 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1740 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1741 ret=ret)
1741 ret=ret)
1742 return ret
1742 return ret
1743
1743
1744 def listkeys(self, namespace):
1744 def listkeys(self, namespace):
1745 self.hook('prelistkeys', throw=True, namespace=namespace)
1745 self.hook('prelistkeys', throw=True, namespace=namespace)
1746 self.ui.debug('listing keys for "%s"\n' % namespace)
1746 self.ui.debug('listing keys for "%s"\n' % namespace)
1747 values = pushkey.list(self, namespace)
1747 values = pushkey.list(self, namespace)
1748 self.hook('listkeys', namespace=namespace, values=values)
1748 self.hook('listkeys', namespace=namespace, values=values)
1749 return values
1749 return values
1750
1750
1751 def debugwireargs(self, one, two, three=None, four=None, five=None):
1751 def debugwireargs(self, one, two, three=None, four=None, five=None):
1752 '''used to test argument passing over the wire'''
1752 '''used to test argument passing over the wire'''
1753 return "%s %s %s %s %s" % (one, two, three, four, five)
1753 return "%s %s %s %s %s" % (one, two, three, four, five)
1754
1754
1755 def savecommitmessage(self, text):
1755 def savecommitmessage(self, text):
1756 fp = self.opener('last-message.txt', 'wb')
1756 fp = self.opener('last-message.txt', 'wb')
1757 try:
1757 try:
1758 fp.write(text)
1758 fp.write(text)
1759 finally:
1759 finally:
1760 fp.close()
1760 fp.close()
1761 return self.pathto(fp.name[len(self.root) + 1:])
1761 return self.pathto(fp.name[len(self.root) + 1:])
1762
1762
1763 # used to avoid circular references so destructors work
1763 # used to avoid circular references so destructors work
1764 def aftertrans(files):
1764 def aftertrans(files):
1765 renamefiles = [tuple(t) for t in files]
1765 renamefiles = [tuple(t) for t in files]
1766 def a():
1766 def a():
1767 for vfs, src, dest in renamefiles:
1767 for vfs, src, dest in renamefiles:
1768 try:
1768 try:
1769 vfs.rename(src, dest)
1769 vfs.rename(src, dest)
1770 except OSError: # journal file does not yet exist
1770 except OSError: # journal file does not yet exist
1771 pass
1771 pass
1772 return a
1772 return a
1773
1773
1774 def undoname(fn):
1774 def undoname(fn):
1775 base, name = os.path.split(fn)
1775 base, name = os.path.split(fn)
1776 assert name.startswith('journal')
1776 assert name.startswith('journal')
1777 return os.path.join(base, name.replace('journal', 'undo', 1))
1777 return os.path.join(base, name.replace('journal', 'undo', 1))
1778
1778
1779 def instance(ui, path, create):
1779 def instance(ui, path, create):
1780 return localrepository(ui, util.urllocalpath(path), create)
1780 return localrepository(ui, util.urllocalpath(path), create)
1781
1781
1782 def islocal(path):
1782 def islocal(path):
1783 return True
1783 return True
@@ -1,415 +1,415 b''
1 """ Mercurial phases support code
1 """ Mercurial phases support code
2
2
3 ---
3 ---
4
4
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 Logilab SA <contact@logilab.fr>
6 Logilab SA <contact@logilab.fr>
7 Augie Fackler <durin42@gmail.com>
7 Augie Fackler <durin42@gmail.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License version 2 or any later version.
10 of the GNU General Public License version 2 or any later version.
11
11
12 ---
12 ---
13
13
14 This module implements most phase logic in mercurial.
14 This module implements most phase logic in mercurial.
15
15
16
16
17 Basic Concept
17 Basic Concept
18 =============
18 =============
19
19
20 A 'changeset phase' is an indicator that tells us how a changeset is
20 A 'changeset phase' is an indicator that tells us how a changeset is
21 manipulated and communicated. The details of each phase is described
21 manipulated and communicated. The details of each phase is described
22 below, here we describe the properties they have in common.
22 below, here we describe the properties they have in common.
23
23
24 Like bookmarks, phases are not stored in history and thus are not
24 Like bookmarks, phases are not stored in history and thus are not
25 permanent and leave no audit trail.
25 permanent and leave no audit trail.
26
26
27 First, no changeset can be in two phases at once. Phases are ordered,
27 First, no changeset can be in two phases at once. Phases are ordered,
28 so they can be considered from lowest to highest. The default, lowest
28 so they can be considered from lowest to highest. The default, lowest
29 phase is 'public' - this is the normal phase of existing changesets. A
29 phase is 'public' - this is the normal phase of existing changesets. A
30 child changeset can not be in a lower phase than its parents.
30 child changeset can not be in a lower phase than its parents.
31
31
32 These phases share a hierarchy of traits:
32 These phases share a hierarchy of traits:
33
33
34 immutable shared
34 immutable shared
35 public: X X
35 public: X X
36 draft: X
36 draft: X
37 secret:
37 secret:
38
38
39 Local commits are draft by default.
39 Local commits are draft by default.
40
40
41 Phase Movement and Exchange
41 Phase Movement and Exchange
42 ===========================
42 ===========================
43
43
44 Phase data is exchanged by pushkey on pull and push. Some servers have
44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 a publish option set, we call such a server a "publishing server".
45 a publish option set, we call such a server a "publishing server".
46 Pushing a draft changeset to a publishing server changes the phase to
46 Pushing a draft changeset to a publishing server changes the phase to
47 public.
47 public.
48
48
49 A small list of fact/rules define the exchange of phase:
49 A small list of fact/rules define the exchange of phase:
50
50
51 * old client never changes server states
51 * old client never changes server states
52 * pull never changes server states
52 * pull never changes server states
53 * publish and old server changesets are seen as public by client
53 * publish and old server changesets are seen as public by client
54 * any secret changeset seen in another repository is lowered to at
54 * any secret changeset seen in another repository is lowered to at
55 least draft
55 least draft
56
56
57 Here is the final table summing up the 49 possible use cases of phase
57 Here is the final table summing up the 49 possible use cases of phase
58 exchange:
58 exchange:
59
59
60 server
60 server
61 old publish non-publish
61 old publish non-publish
62 N X N D P N D P
62 N X N D P N D P
63 old client
63 old client
64 pull
64 pull
65 N - X/X - X/D X/P - X/D X/P
65 N - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
67 push
67 push
68 X X/X X/X X/P X/P X/P X/D X/D X/P
68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 new client
69 new client
70 pull
70 pull
71 N - P/X - P/D P/P - D/D P/P
71 N - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
73 P - P/X - P/D P/P - P/D P/P
73 P - P/X - P/D P/P - P/D P/P
74 push
74 push
75 D P/X P/X P/P P/P P/P D/D D/D P/P
75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
77
77
78 Legend:
78 Legend:
79
79
80 A/B = final state on client / state on server
80 A/B = final state on client / state on server
81
81
82 * N = new/not present,
82 * N = new/not present,
83 * P = public,
83 * P = public,
84 * D = draft,
84 * D = draft,
85 * X = not tracked (i.e., the old client or server has no internal
85 * X = not tracked (i.e., the old client or server has no internal
86 way of recording the phase.)
86 way of recording the phase.)
87
87
88 passive = only pushes
88 passive = only pushes
89
89
90
90
91 A cell here can be read like this:
91 A cell here can be read like this:
92
92
93 "When a new client pushes a draft changeset (D) to a publishing
93 "When a new client pushes a draft changeset (D) to a publishing
94 server where it's not present (N), it's marked public on both
94 server where it's not present (N), it's marked public on both
95 sides (P/P)."
95 sides (P/P)."
96
96
97 Note: old client behave as a publishing server with draft only content
97 Note: old client behave as a publishing server with draft only content
98 - other people see it as public
98 - other people see it as public
99 - content is pushed as draft
99 - content is pushed as draft
100
100
101 """
101 """
102
102
103 import errno
103 import errno
104 from node import nullid, nullrev, bin, hex, short
104 from node import nullid, nullrev, bin, hex, short
105 from i18n import _
105 from i18n import _
106 import util, error
106 import util, error
107
107
108 allphases = public, draft, secret = range(3)
108 allphases = public, draft, secret = range(3)
109 trackedphases = allphases[1:]
109 trackedphases = allphases[1:]
110 phasenames = ['public', 'draft', 'secret']
110 phasenames = ['public', 'draft', 'secret']
111
111
112 def _readroots(repo, phasedefaults=None):
112 def _readroots(repo, phasedefaults=None):
113 """Read phase roots from disk
113 """Read phase roots from disk
114
114
115 phasedefaults is a list of fn(repo, roots) callable, which are
115 phasedefaults is a list of fn(repo, roots) callable, which are
116 executed if the phase roots file does not exist. When phases are
116 executed if the phase roots file does not exist. When phases are
117 being initialized on an existing repository, this could be used to
117 being initialized on an existing repository, this could be used to
118 set selected changesets phase to something else than public.
118 set selected changesets phase to something else than public.
119
119
120 Return (roots, dirty) where dirty is true if roots differ from
120 Return (roots, dirty) where dirty is true if roots differ from
121 what is being stored.
121 what is being stored.
122 """
122 """
123 repo = repo.unfiltered()
123 repo = repo.unfiltered()
124 dirty = False
124 dirty = False
125 roots = [set() for i in allphases]
125 roots = [set() for i in allphases]
126 try:
126 try:
127 f = repo.sopener('phaseroots')
127 f = repo.sopener('phaseroots')
128 try:
128 try:
129 for line in f:
129 for line in f:
130 phase, nh = line.split()
130 phase, nh = line.split()
131 roots[int(phase)].add(bin(nh))
131 roots[int(phase)].add(bin(nh))
132 finally:
132 finally:
133 f.close()
133 f.close()
134 except IOError, inst:
134 except IOError, inst:
135 if inst.errno != errno.ENOENT:
135 if inst.errno != errno.ENOENT:
136 raise
136 raise
137 if phasedefaults:
137 if phasedefaults:
138 for f in phasedefaults:
138 for f in phasedefaults:
139 roots = f(repo, roots)
139 roots = f(repo, roots)
140 dirty = True
140 dirty = True
141 return roots, dirty
141 return roots, dirty
142
142
143 class phasecache(object):
143 class phasecache(object):
144 def __init__(self, repo, phasedefaults, _load=True):
144 def __init__(self, repo, phasedefaults, _load=True):
145 if _load:
145 if _load:
146 # Cheap trick to allow shallow-copy without copy module
146 # Cheap trick to allow shallow-copy without copy module
147 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
147 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
148 self._phaserevs = None
148 self._phaserevs = None
149 self.filterunknown(repo)
149 self.filterunknown(repo)
150 self.opener = repo.sopener
150 self.opener = repo.sopener
151
151
152 def copy(self):
152 def copy(self):
153 # Shallow copy meant to ensure isolation in
153 # Shallow copy meant to ensure isolation in
154 # advance/retractboundary(), nothing more.
154 # advance/retractboundary(), nothing more.
155 ph = phasecache(None, None, _load=False)
155 ph = phasecache(None, None, _load=False)
156 ph.phaseroots = self.phaseroots[:]
156 ph.phaseroots = self.phaseroots[:]
157 ph.dirty = self.dirty
157 ph.dirty = self.dirty
158 ph.opener = self.opener
158 ph.opener = self.opener
159 ph._phaserevs = self._phaserevs
159 ph._phaserevs = self._phaserevs
160 return ph
160 return ph
161
161
162 def replace(self, phcache):
162 def replace(self, phcache):
163 for a in 'phaseroots dirty opener _phaserevs'.split():
163 for a in 'phaseroots dirty opener _phaserevs'.split():
164 setattr(self, a, getattr(phcache, a))
164 setattr(self, a, getattr(phcache, a))
165
165
166 def getphaserevs(self, repo, rebuild=False):
166 def getphaserevs(self, repo, rebuild=False):
167 if rebuild or self._phaserevs is None:
167 if rebuild or self._phaserevs is None:
168 repo = repo.unfiltered()
168 repo = repo.unfiltered()
169 revs = [public] * len(repo.changelog)
169 revs = [public] * len(repo.changelog)
170 for phase in trackedphases:
170 for phase in trackedphases:
171 roots = map(repo.changelog.rev, self.phaseroots[phase])
171 roots = map(repo.changelog.rev, self.phaseroots[phase])
172 if roots:
172 if roots:
173 for rev in roots:
173 for rev in roots:
174 revs[rev] = phase
174 revs[rev] = phase
175 for rev in repo.changelog.descendants(roots):
175 for rev in repo.changelog.descendants(roots):
176 revs[rev] = phase
176 revs[rev] = phase
177 self._phaserevs = revs
177 self._phaserevs = revs
178 return self._phaserevs
178 return self._phaserevs
179
179
180 def phase(self, repo, rev):
180 def phase(self, repo, rev):
181 # We need a repo argument here to be able to build _phaserevs
181 # We need a repo argument here to be able to build _phaserevs
182 # if necessary. The repository instance is not stored in
182 # if necessary. The repository instance is not stored in
183 # phasecache to avoid reference cycles. The changelog instance
183 # phasecache to avoid reference cycles. The changelog instance
184 # is not stored because it is a filecache() property and can
184 # is not stored because it is a filecache() property and can
185 # be replaced without us being notified.
185 # be replaced without us being notified.
186 if rev == nullrev:
186 if rev == nullrev:
187 return public
187 return public
188 if rev < nullrev:
188 if rev < nullrev:
189 raise ValueError(_('cannot lookup negative revision'))
189 raise ValueError(_('cannot lookup negative revision'))
190 if self._phaserevs is None or rev >= len(self._phaserevs):
190 if self._phaserevs is None or rev >= len(self._phaserevs):
191 self._phaserevs = self.getphaserevs(repo, rebuild=True)
191 self._phaserevs = self.getphaserevs(repo, rebuild=True)
192 return self._phaserevs[rev]
192 return self._phaserevs[rev]
193
193
194 def write(self):
194 def write(self):
195 if not self.dirty:
195 if not self.dirty:
196 return
196 return
197 f = self.opener('phaseroots', 'w', atomictemp=True)
197 f = self.opener('phaseroots', 'w', atomictemp=True)
198 try:
198 try:
199 for phase, roots in enumerate(self.phaseroots):
199 for phase, roots in enumerate(self.phaseroots):
200 for h in roots:
200 for h in roots:
201 f.write('%i %s\n' % (phase, hex(h)))
201 f.write('%i %s\n' % (phase, hex(h)))
202 finally:
202 finally:
203 f.close()
203 f.close()
204 self.dirty = False
204 self.dirty = False
205
205
206 def _updateroots(self, phase, newroots):
206 def _updateroots(self, phase, newroots):
207 self.phaseroots[phase] = newroots
207 self.phaseroots[phase] = newroots
208 self._phaserevs = None
208 self._phaserevs = None
209 self.dirty = True
209 self.dirty = True
210
210
211 def advanceboundary(self, repo, tr, targetphase, nodes):
211 def advanceboundary(self, repo, tr, targetphase, nodes):
212 # Be careful to preserve shallow-copied values: do not update
212 # Be careful to preserve shallow-copied values: do not update
213 # phaseroots values, replace them.
213 # phaseroots values, replace them.
214
214
215 repo = repo.unfiltered()
215 repo = repo.unfiltered()
216 delroots = [] # set of root deleted by this path
216 delroots = [] # set of root deleted by this path
217 for phase in xrange(targetphase + 1, len(allphases)):
217 for phase in xrange(targetphase + 1, len(allphases)):
218 # filter nodes that are not in a compatible phase already
218 # filter nodes that are not in a compatible phase already
219 nodes = [n for n in nodes
219 nodes = [n for n in nodes
220 if self.phase(repo, repo[n].rev()) >= phase]
220 if self.phase(repo, repo[n].rev()) >= phase]
221 if not nodes:
221 if not nodes:
222 break # no roots to move anymore
222 break # no roots to move anymore
223 olds = self.phaseroots[phase]
223 olds = self.phaseroots[phase]
224 roots = set(ctx.node() for ctx in repo.set(
224 roots = set(ctx.node() for ctx in repo.set(
225 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
225 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
226 if olds != roots:
226 if olds != roots:
227 self._updateroots(phase, roots)
227 self._updateroots(phase, roots)
228 # some roots may need to be declared for lower phases
228 # some roots may need to be declared for lower phases
229 delroots.extend(olds - roots)
229 delroots.extend(olds - roots)
230 # declare deleted root in the target phase
230 # declare deleted root in the target phase
231 if targetphase != 0:
231 if targetphase != 0:
232 self.retractboundary(repo, targetphase, delroots)
232 self.retractboundary(repo, tr, targetphase, delroots)
233 repo.invalidatevolatilesets()
233 repo.invalidatevolatilesets()
234
234
235 def retractboundary(self, repo, targetphase, nodes):
235 def retractboundary(self, repo, tr, targetphase, nodes):
236 # Be careful to preserve shallow-copied values: do not update
236 # Be careful to preserve shallow-copied values: do not update
237 # phaseroots values, replace them.
237 # phaseroots values, replace them.
238
238
239 repo = repo.unfiltered()
239 repo = repo.unfiltered()
240 currentroots = self.phaseroots[targetphase]
240 currentroots = self.phaseroots[targetphase]
241 newroots = [n for n in nodes
241 newroots = [n for n in nodes
242 if self.phase(repo, repo[n].rev()) < targetphase]
242 if self.phase(repo, repo[n].rev()) < targetphase]
243 if newroots:
243 if newroots:
244 if nullid in newroots:
244 if nullid in newroots:
245 raise util.Abort(_('cannot change null revision phase'))
245 raise util.Abort(_('cannot change null revision phase'))
246 currentroots = currentroots.copy()
246 currentroots = currentroots.copy()
247 currentroots.update(newroots)
247 currentroots.update(newroots)
248 ctxs = repo.set('roots(%ln::)', currentroots)
248 ctxs = repo.set('roots(%ln::)', currentroots)
249 currentroots.intersection_update(ctx.node() for ctx in ctxs)
249 currentroots.intersection_update(ctx.node() for ctx in ctxs)
250 self._updateroots(targetphase, currentroots)
250 self._updateroots(targetphase, currentroots)
251 repo.invalidatevolatilesets()
251 repo.invalidatevolatilesets()
252
252
253 def filterunknown(self, repo):
253 def filterunknown(self, repo):
254 """remove unknown nodes from the phase boundary
254 """remove unknown nodes from the phase boundary
255
255
256 Nothing is lost as unknown nodes only hold data for their descendants.
256 Nothing is lost as unknown nodes only hold data for their descendants.
257 """
257 """
258 filtered = False
258 filtered = False
259 nodemap = repo.changelog.nodemap # to filter unknown nodes
259 nodemap = repo.changelog.nodemap # to filter unknown nodes
260 for phase, nodes in enumerate(self.phaseroots):
260 for phase, nodes in enumerate(self.phaseroots):
261 missing = sorted(node for node in nodes if node not in nodemap)
261 missing = sorted(node for node in nodes if node not in nodemap)
262 if missing:
262 if missing:
263 for mnode in missing:
263 for mnode in missing:
264 repo.ui.debug(
264 repo.ui.debug(
265 'removing unknown node %s from %i-phase boundary\n'
265 'removing unknown node %s from %i-phase boundary\n'
266 % (short(mnode), phase))
266 % (short(mnode), phase))
267 nodes.symmetric_difference_update(missing)
267 nodes.symmetric_difference_update(missing)
268 filtered = True
268 filtered = True
269 if filtered:
269 if filtered:
270 self.dirty = True
270 self.dirty = True
271 # filterunknown is called by repo.destroyed, we may have no changes in
271 # filterunknown is called by repo.destroyed, we may have no changes in
272 # root but phaserevs contents is certainly invalid (or at least we
272 # root but phaserevs contents is certainly invalid (or at least we
273 # have not proper way to check that). related to issue 3858.
273 # have not proper way to check that). related to issue 3858.
274 #
274 #
275 # The other caller is __init__ that have no _phaserevs initialized
275 # The other caller is __init__ that have no _phaserevs initialized
276 # anyway. If this change we should consider adding a dedicated
276 # anyway. If this change we should consider adding a dedicated
277 # "destroyed" function to phasecache or a proper cache key mechanism
277 # "destroyed" function to phasecache or a proper cache key mechanism
278 # (see branchmap one)
278 # (see branchmap one)
279 self._phaserevs = None
279 self._phaserevs = None
280
280
281 def advanceboundary(repo, tr, targetphase, nodes):
281 def advanceboundary(repo, tr, targetphase, nodes):
282 """Add nodes to a phase changing other nodes phases if necessary.
282 """Add nodes to a phase changing other nodes phases if necessary.
283
283
284 This function move boundary *forward* this means that all nodes
284 This function move boundary *forward* this means that all nodes
285 are set in the target phase or kept in a *lower* phase.
285 are set in the target phase or kept in a *lower* phase.
286
286
287 Simplify boundary to contains phase roots only."""
287 Simplify boundary to contains phase roots only."""
288 phcache = repo._phasecache.copy()
288 phcache = repo._phasecache.copy()
289 phcache.advanceboundary(repo, tr, targetphase, nodes)
289 phcache.advanceboundary(repo, tr, targetphase, nodes)
290 repo._phasecache.replace(phcache)
290 repo._phasecache.replace(phcache)
291
291
292 def retractboundary(repo, targetphase, nodes):
292 def retractboundary(repo, tr, targetphase, nodes):
293 """Set nodes back to a phase changing other nodes phases if
293 """Set nodes back to a phase changing other nodes phases if
294 necessary.
294 necessary.
295
295
296 This function move boundary *backward* this means that all nodes
296 This function move boundary *backward* this means that all nodes
297 are set in the target phase or kept in a *higher* phase.
297 are set in the target phase or kept in a *higher* phase.
298
298
299 Simplify boundary to contains phase roots only."""
299 Simplify boundary to contains phase roots only."""
300 phcache = repo._phasecache.copy()
300 phcache = repo._phasecache.copy()
301 phcache.retractboundary(repo, targetphase, nodes)
301 phcache.retractboundary(repo, tr, targetphase, nodes)
302 repo._phasecache.replace(phcache)
302 repo._phasecache.replace(phcache)
303
303
304 def listphases(repo):
304 def listphases(repo):
305 """List phases root for serialization over pushkey"""
305 """List phases root for serialization over pushkey"""
306 keys = {}
306 keys = {}
307 value = '%i' % draft
307 value = '%i' % draft
308 for root in repo._phasecache.phaseroots[draft]:
308 for root in repo._phasecache.phaseroots[draft]:
309 keys[hex(root)] = value
309 keys[hex(root)] = value
310
310
311 if repo.ui.configbool('phases', 'publish', True):
311 if repo.ui.configbool('phases', 'publish', True):
312 # Add an extra data to let remote know we are a publishing
312 # Add an extra data to let remote know we are a publishing
313 # repo. Publishing repo can't just pretend they are old repo.
313 # repo. Publishing repo can't just pretend they are old repo.
314 # When pushing to a publishing repo, the client still need to
314 # When pushing to a publishing repo, the client still need to
315 # push phase boundary
315 # push phase boundary
316 #
316 #
317 # Push do not only push changeset. It also push phase data.
317 # Push do not only push changeset. It also push phase data.
318 # New phase data may apply to common changeset which won't be
318 # New phase data may apply to common changeset which won't be
319 # push (as they are common). Here is a very simple example:
319 # push (as they are common). Here is a very simple example:
320 #
320 #
321 # 1) repo A push changeset X as draft to repo B
321 # 1) repo A push changeset X as draft to repo B
322 # 2) repo B make changeset X public
322 # 2) repo B make changeset X public
323 # 3) repo B push to repo A. X is not pushed but the data that
323 # 3) repo B push to repo A. X is not pushed but the data that
324 # X as now public should
324 # X as now public should
325 #
325 #
326 # The server can't handle it on it's own as it has no idea of
326 # The server can't handle it on it's own as it has no idea of
327 # client phase data.
327 # client phase data.
328 keys['publishing'] = 'True'
328 keys['publishing'] = 'True'
329 return keys
329 return keys
330
330
331 def pushphase(repo, nhex, oldphasestr, newphasestr):
331 def pushphase(repo, nhex, oldphasestr, newphasestr):
332 """List phases root for serialization over pushkey"""
332 """List phases root for serialization over pushkey"""
333 repo = repo.unfiltered()
333 repo = repo.unfiltered()
334 tr = None
334 tr = None
335 lock = repo.lock()
335 lock = repo.lock()
336 try:
336 try:
337 currentphase = repo[nhex].phase()
337 currentphase = repo[nhex].phase()
338 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
338 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
339 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
339 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
340 if currentphase == oldphase and newphase < oldphase:
340 if currentphase == oldphase and newphase < oldphase:
341 tr = repo.transaction('pushkey-phase')
341 tr = repo.transaction('pushkey-phase')
342 advanceboundary(repo, tr, newphase, [bin(nhex)])
342 advanceboundary(repo, tr, newphase, [bin(nhex)])
343 tr.close()
343 tr.close()
344 return 1
344 return 1
345 elif currentphase == newphase:
345 elif currentphase == newphase:
346 # raced, but got correct result
346 # raced, but got correct result
347 return 1
347 return 1
348 else:
348 else:
349 return 0
349 return 0
350 finally:
350 finally:
351 if tr:
351 if tr:
352 tr.release()
352 tr.release()
353 lock.release()
353 lock.release()
354
354
355 def analyzeremotephases(repo, subset, roots):
355 def analyzeremotephases(repo, subset, roots):
356 """Compute phases heads and root in a subset of node from root dict
356 """Compute phases heads and root in a subset of node from root dict
357
357
358 * subset is heads of the subset
358 * subset is heads of the subset
359 * roots is {<nodeid> => phase} mapping. key and value are string.
359 * roots is {<nodeid> => phase} mapping. key and value are string.
360
360
361 Accept unknown element input
361 Accept unknown element input
362 """
362 """
363 repo = repo.unfiltered()
363 repo = repo.unfiltered()
364 # build list from dictionary
364 # build list from dictionary
365 draftroots = []
365 draftroots = []
366 nodemap = repo.changelog.nodemap # to filter unknown nodes
366 nodemap = repo.changelog.nodemap # to filter unknown nodes
367 for nhex, phase in roots.iteritems():
367 for nhex, phase in roots.iteritems():
368 if nhex == 'publishing': # ignore data related to publish option
368 if nhex == 'publishing': # ignore data related to publish option
369 continue
369 continue
370 node = bin(nhex)
370 node = bin(nhex)
371 phase = int(phase)
371 phase = int(phase)
372 if phase == 0:
372 if phase == 0:
373 if node != nullid:
373 if node != nullid:
374 repo.ui.warn(_('ignoring inconsistent public root'
374 repo.ui.warn(_('ignoring inconsistent public root'
375 ' from remote: %s\n') % nhex)
375 ' from remote: %s\n') % nhex)
376 elif phase == 1:
376 elif phase == 1:
377 if node in nodemap:
377 if node in nodemap:
378 draftroots.append(node)
378 draftroots.append(node)
379 else:
379 else:
380 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
380 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
381 % (phase, nhex))
381 % (phase, nhex))
382 # compute heads
382 # compute heads
383 publicheads = newheads(repo, subset, draftroots)
383 publicheads = newheads(repo, subset, draftroots)
384 return publicheads, draftroots
384 return publicheads, draftroots
385
385
386 def newheads(repo, heads, roots):
386 def newheads(repo, heads, roots):
387 """compute new head of a subset minus another
387 """compute new head of a subset minus another
388
388
389 * `heads`: define the first subset
389 * `heads`: define the first subset
390 * `roots`: define the second we subtract from the first"""
390 * `roots`: define the second we subtract from the first"""
391 repo = repo.unfiltered()
391 repo = repo.unfiltered()
392 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
392 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
393 heads, roots, roots, heads)
393 heads, roots, roots, heads)
394 return [c.node() for c in revset]
394 return [c.node() for c in revset]
395
395
396
396
397 def newcommitphase(ui):
397 def newcommitphase(ui):
398 """helper to get the target phase of new commit
398 """helper to get the target phase of new commit
399
399
400 Handle all possible values for the phases.new-commit options.
400 Handle all possible values for the phases.new-commit options.
401
401
402 """
402 """
403 v = ui.config('phases', 'new-commit', draft)
403 v = ui.config('phases', 'new-commit', draft)
404 try:
404 try:
405 return phasenames.index(v)
405 return phasenames.index(v)
406 except ValueError:
406 except ValueError:
407 try:
407 try:
408 return int(v)
408 return int(v)
409 except ValueError:
409 except ValueError:
410 msg = _("phases.new-commit: not a valid phase name ('%s')")
410 msg = _("phases.new-commit: not a valid phase name ('%s')")
411 raise error.ConfigError(msg % v)
411 raise error.ConfigError(msg % v)
412
412
413 def hassecret(repo):
413 def hassecret(repo):
414 """utility function that check if a repo have any secret changeset."""
414 """utility function that check if a repo have any secret changeset."""
415 return bool(repo._phasecache.phaseroots[2])
415 return bool(repo._phasecache.phaseroots[2])
General Comments 0
You need to be logged in to leave comments. Login now