##// END OF EJS Templates
filelog: switch 'not len(filerevlog)' to 'not filerevlog'...
Durham Goode -
r19293:446ab88d default
parent child Browse files
Show More
@@ -1,415 +1,415 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import nullrev, hex
9 from node import nullrev, hex
10 import mdiff, util, dagutil
10 import mdiff, util, dagutil
11 import struct, os, bz2, zlib, tempfile
11 import struct, os, bz2, zlib, tempfile
12
12
13 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
13 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
14
14
15 def readexactly(stream, n):
15 def readexactly(stream, n):
16 '''read n bytes from stream.read and abort if less was available'''
16 '''read n bytes from stream.read and abort if less was available'''
17 s = stream.read(n)
17 s = stream.read(n)
18 if len(s) < n:
18 if len(s) < n:
19 raise util.Abort(_("stream ended unexpectedly"
19 raise util.Abort(_("stream ended unexpectedly"
20 " (got %d bytes, expected %d)")
20 " (got %d bytes, expected %d)")
21 % (len(s), n))
21 % (len(s), n))
22 return s
22 return s
23
23
24 def getchunk(stream):
24 def getchunk(stream):
25 """return the next chunk from stream as a string"""
25 """return the next chunk from stream as a string"""
26 d = readexactly(stream, 4)
26 d = readexactly(stream, 4)
27 l = struct.unpack(">l", d)[0]
27 l = struct.unpack(">l", d)[0]
28 if l <= 4:
28 if l <= 4:
29 if l:
29 if l:
30 raise util.Abort(_("invalid chunk length %d") % l)
30 raise util.Abort(_("invalid chunk length %d") % l)
31 return ""
31 return ""
32 return readexactly(stream, l - 4)
32 return readexactly(stream, l - 4)
33
33
34 def chunkheader(length):
34 def chunkheader(length):
35 """return a changegroup chunk header (string)"""
35 """return a changegroup chunk header (string)"""
36 return struct.pack(">l", length + 4)
36 return struct.pack(">l", length + 4)
37
37
38 def closechunk():
38 def closechunk():
39 """return a changegroup chunk header (string) for a zero-length chunk"""
39 """return a changegroup chunk header (string) for a zero-length chunk"""
40 return struct.pack(">l", 0)
40 return struct.pack(">l", 0)
41
41
42 class nocompress(object):
42 class nocompress(object):
43 def compress(self, x):
43 def compress(self, x):
44 return x
44 return x
45 def flush(self):
45 def flush(self):
46 return ""
46 return ""
47
47
48 bundletypes = {
48 bundletypes = {
49 "": ("", nocompress), # only when using unbundle on ssh and old http servers
49 "": ("", nocompress), # only when using unbundle on ssh and old http servers
50 # since the unification ssh accepts a header but there
50 # since the unification ssh accepts a header but there
51 # is no capability signaling it.
51 # is no capability signaling it.
52 "HG10UN": ("HG10UN", nocompress),
52 "HG10UN": ("HG10UN", nocompress),
53 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
53 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
54 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
54 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
55 }
55 }
56
56
57 # hgweb uses this list to communicate its preferred type
57 # hgweb uses this list to communicate its preferred type
58 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
58 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
59
59
60 def writebundle(cg, filename, bundletype):
60 def writebundle(cg, filename, bundletype):
61 """Write a bundle file and return its filename.
61 """Write a bundle file and return its filename.
62
62
63 Existing files will not be overwritten.
63 Existing files will not be overwritten.
64 If no filename is specified, a temporary file is created.
64 If no filename is specified, a temporary file is created.
65 bz2 compression can be turned off.
65 bz2 compression can be turned off.
66 The bundle file will be deleted in case of errors.
66 The bundle file will be deleted in case of errors.
67 """
67 """
68
68
69 fh = None
69 fh = None
70 cleanup = None
70 cleanup = None
71 try:
71 try:
72 if filename:
72 if filename:
73 fh = open(filename, "wb")
73 fh = open(filename, "wb")
74 else:
74 else:
75 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
75 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
76 fh = os.fdopen(fd, "wb")
76 fh = os.fdopen(fd, "wb")
77 cleanup = filename
77 cleanup = filename
78
78
79 header, compressor = bundletypes[bundletype]
79 header, compressor = bundletypes[bundletype]
80 fh.write(header)
80 fh.write(header)
81 z = compressor()
81 z = compressor()
82
82
83 # parse the changegroup data, otherwise we will block
83 # parse the changegroup data, otherwise we will block
84 # in case of sshrepo because we don't know the end of the stream
84 # in case of sshrepo because we don't know the end of the stream
85
85
86 # an empty chunkgroup is the end of the changegroup
86 # an empty chunkgroup is the end of the changegroup
87 # a changegroup has at least 2 chunkgroups (changelog and manifest).
87 # a changegroup has at least 2 chunkgroups (changelog and manifest).
88 # after that, an empty chunkgroup is the end of the changegroup
88 # after that, an empty chunkgroup is the end of the changegroup
89 empty = False
89 empty = False
90 count = 0
90 count = 0
91 while not empty or count <= 2:
91 while not empty or count <= 2:
92 empty = True
92 empty = True
93 count += 1
93 count += 1
94 while True:
94 while True:
95 chunk = getchunk(cg)
95 chunk = getchunk(cg)
96 if not chunk:
96 if not chunk:
97 break
97 break
98 empty = False
98 empty = False
99 fh.write(z.compress(chunkheader(len(chunk))))
99 fh.write(z.compress(chunkheader(len(chunk))))
100 pos = 0
100 pos = 0
101 while pos < len(chunk):
101 while pos < len(chunk):
102 next = pos + 2**20
102 next = pos + 2**20
103 fh.write(z.compress(chunk[pos:next]))
103 fh.write(z.compress(chunk[pos:next]))
104 pos = next
104 pos = next
105 fh.write(z.compress(closechunk()))
105 fh.write(z.compress(closechunk()))
106 fh.write(z.flush())
106 fh.write(z.flush())
107 cleanup = None
107 cleanup = None
108 return filename
108 return filename
109 finally:
109 finally:
110 if fh is not None:
110 if fh is not None:
111 fh.close()
111 fh.close()
112 if cleanup is not None:
112 if cleanup is not None:
113 os.unlink(cleanup)
113 os.unlink(cleanup)
114
114
115 def decompressor(fh, alg):
115 def decompressor(fh, alg):
116 if alg == 'UN':
116 if alg == 'UN':
117 return fh
117 return fh
118 elif alg == 'GZ':
118 elif alg == 'GZ':
119 def generator(f):
119 def generator(f):
120 zd = zlib.decompressobj()
120 zd = zlib.decompressobj()
121 for chunk in util.filechunkiter(f):
121 for chunk in util.filechunkiter(f):
122 yield zd.decompress(chunk)
122 yield zd.decompress(chunk)
123 elif alg == 'BZ':
123 elif alg == 'BZ':
124 def generator(f):
124 def generator(f):
125 zd = bz2.BZ2Decompressor()
125 zd = bz2.BZ2Decompressor()
126 zd.decompress("BZ")
126 zd.decompress("BZ")
127 for chunk in util.filechunkiter(f, 4096):
127 for chunk in util.filechunkiter(f, 4096):
128 yield zd.decompress(chunk)
128 yield zd.decompress(chunk)
129 else:
129 else:
130 raise util.Abort("unknown bundle compression '%s'" % alg)
130 raise util.Abort("unknown bundle compression '%s'" % alg)
131 return util.chunkbuffer(generator(fh))
131 return util.chunkbuffer(generator(fh))
132
132
133 class unbundle10(object):
133 class unbundle10(object):
134 deltaheader = _BUNDLE10_DELTA_HEADER
134 deltaheader = _BUNDLE10_DELTA_HEADER
135 deltaheadersize = struct.calcsize(deltaheader)
135 deltaheadersize = struct.calcsize(deltaheader)
136 def __init__(self, fh, alg):
136 def __init__(self, fh, alg):
137 self._stream = decompressor(fh, alg)
137 self._stream = decompressor(fh, alg)
138 self._type = alg
138 self._type = alg
139 self.callback = None
139 self.callback = None
140 def compressed(self):
140 def compressed(self):
141 return self._type != 'UN'
141 return self._type != 'UN'
142 def read(self, l):
142 def read(self, l):
143 return self._stream.read(l)
143 return self._stream.read(l)
144 def seek(self, pos):
144 def seek(self, pos):
145 return self._stream.seek(pos)
145 return self._stream.seek(pos)
146 def tell(self):
146 def tell(self):
147 return self._stream.tell()
147 return self._stream.tell()
148 def close(self):
148 def close(self):
149 return self._stream.close()
149 return self._stream.close()
150
150
151 def chunklength(self):
151 def chunklength(self):
152 d = readexactly(self._stream, 4)
152 d = readexactly(self._stream, 4)
153 l = struct.unpack(">l", d)[0]
153 l = struct.unpack(">l", d)[0]
154 if l <= 4:
154 if l <= 4:
155 if l:
155 if l:
156 raise util.Abort(_("invalid chunk length %d") % l)
156 raise util.Abort(_("invalid chunk length %d") % l)
157 return 0
157 return 0
158 if self.callback:
158 if self.callback:
159 self.callback()
159 self.callback()
160 return l - 4
160 return l - 4
161
161
162 def changelogheader(self):
162 def changelogheader(self):
163 """v10 does not have a changelog header chunk"""
163 """v10 does not have a changelog header chunk"""
164 return {}
164 return {}
165
165
166 def manifestheader(self):
166 def manifestheader(self):
167 """v10 does not have a manifest header chunk"""
167 """v10 does not have a manifest header chunk"""
168 return {}
168 return {}
169
169
170 def filelogheader(self):
170 def filelogheader(self):
171 """return the header of the filelogs chunk, v10 only has the filename"""
171 """return the header of the filelogs chunk, v10 only has the filename"""
172 l = self.chunklength()
172 l = self.chunklength()
173 if not l:
173 if not l:
174 return {}
174 return {}
175 fname = readexactly(self._stream, l)
175 fname = readexactly(self._stream, l)
176 return dict(filename=fname)
176 return dict(filename=fname)
177
177
178 def _deltaheader(self, headertuple, prevnode):
178 def _deltaheader(self, headertuple, prevnode):
179 node, p1, p2, cs = headertuple
179 node, p1, p2, cs = headertuple
180 if prevnode is None:
180 if prevnode is None:
181 deltabase = p1
181 deltabase = p1
182 else:
182 else:
183 deltabase = prevnode
183 deltabase = prevnode
184 return node, p1, p2, deltabase, cs
184 return node, p1, p2, deltabase, cs
185
185
186 def deltachunk(self, prevnode):
186 def deltachunk(self, prevnode):
187 l = self.chunklength()
187 l = self.chunklength()
188 if not l:
188 if not l:
189 return {}
189 return {}
190 headerdata = readexactly(self._stream, self.deltaheadersize)
190 headerdata = readexactly(self._stream, self.deltaheadersize)
191 header = struct.unpack(self.deltaheader, headerdata)
191 header = struct.unpack(self.deltaheader, headerdata)
192 delta = readexactly(self._stream, l - self.deltaheadersize)
192 delta = readexactly(self._stream, l - self.deltaheadersize)
193 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
193 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
194 return dict(node=node, p1=p1, p2=p2, cs=cs,
194 return dict(node=node, p1=p1, p2=p2, cs=cs,
195 deltabase=deltabase, delta=delta)
195 deltabase=deltabase, delta=delta)
196
196
197 class headerlessfixup(object):
197 class headerlessfixup(object):
198 def __init__(self, fh, h):
198 def __init__(self, fh, h):
199 self._h = h
199 self._h = h
200 self._fh = fh
200 self._fh = fh
201 def read(self, n):
201 def read(self, n):
202 if self._h:
202 if self._h:
203 d, self._h = self._h[:n], self._h[n:]
203 d, self._h = self._h[:n], self._h[n:]
204 if len(d) < n:
204 if len(d) < n:
205 d += readexactly(self._fh, n - len(d))
205 d += readexactly(self._fh, n - len(d))
206 return d
206 return d
207 return readexactly(self._fh, n)
207 return readexactly(self._fh, n)
208
208
209 def readbundle(fh, fname):
209 def readbundle(fh, fname):
210 header = readexactly(fh, 6)
210 header = readexactly(fh, 6)
211
211
212 if not fname:
212 if not fname:
213 fname = "stream"
213 fname = "stream"
214 if not header.startswith('HG') and header.startswith('\0'):
214 if not header.startswith('HG') and header.startswith('\0'):
215 fh = headerlessfixup(fh, header)
215 fh = headerlessfixup(fh, header)
216 header = "HG10UN"
216 header = "HG10UN"
217
217
218 magic, version, alg = header[0:2], header[2:4], header[4:6]
218 magic, version, alg = header[0:2], header[2:4], header[4:6]
219
219
220 if magic != 'HG':
220 if magic != 'HG':
221 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
221 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
222 if version != '10':
222 if version != '10':
223 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
223 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
224 return unbundle10(fh, alg)
224 return unbundle10(fh, alg)
225
225
226 class bundle10(object):
226 class bundle10(object):
227 deltaheader = _BUNDLE10_DELTA_HEADER
227 deltaheader = _BUNDLE10_DELTA_HEADER
228 def __init__(self, repo, bundlecaps=None):
228 def __init__(self, repo, bundlecaps=None):
229 """Given a source repo, construct a bundler.
229 """Given a source repo, construct a bundler.
230
230
231 bundlecaps is optional and can be used to specify the set of
231 bundlecaps is optional and can be used to specify the set of
232 capabilities which can be used to build the bundle.
232 capabilities which can be used to build the bundle.
233 """
233 """
234 # Set of capabilities we can use to build the bundle.
234 # Set of capabilities we can use to build the bundle.
235 if bundlecaps is None:
235 if bundlecaps is None:
236 bundlecaps = set()
236 bundlecaps = set()
237 self._bundlecaps = bundlecaps
237 self._bundlecaps = bundlecaps
238 self._changelog = repo.changelog
238 self._changelog = repo.changelog
239 self._manifest = repo.manifest
239 self._manifest = repo.manifest
240 reorder = repo.ui.config('bundle', 'reorder', 'auto')
240 reorder = repo.ui.config('bundle', 'reorder', 'auto')
241 if reorder == 'auto':
241 if reorder == 'auto':
242 reorder = None
242 reorder = None
243 else:
243 else:
244 reorder = util.parsebool(reorder)
244 reorder = util.parsebool(reorder)
245 self._repo = repo
245 self._repo = repo
246 self._reorder = reorder
246 self._reorder = reorder
247 self._progress = repo.ui.progress
247 self._progress = repo.ui.progress
248 def close(self):
248 def close(self):
249 return closechunk()
249 return closechunk()
250
250
251 def fileheader(self, fname):
251 def fileheader(self, fname):
252 return chunkheader(len(fname)) + fname
252 return chunkheader(len(fname)) + fname
253
253
254 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
254 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
255 """Calculate a delta group, yielding a sequence of changegroup chunks
255 """Calculate a delta group, yielding a sequence of changegroup chunks
256 (strings).
256 (strings).
257
257
258 Given a list of changeset revs, return a set of deltas and
258 Given a list of changeset revs, return a set of deltas and
259 metadata corresponding to nodes. The first delta is
259 metadata corresponding to nodes. The first delta is
260 first parent(nodelist[0]) -> nodelist[0], the receiver is
260 first parent(nodelist[0]) -> nodelist[0], the receiver is
261 guaranteed to have this parent as it has all history before
261 guaranteed to have this parent as it has all history before
262 these changesets. In the case firstparent is nullrev the
262 these changesets. In the case firstparent is nullrev the
263 changegroup starts with a full revision.
263 changegroup starts with a full revision.
264
264
265 If units is not None, progress detail will be generated, units specifies
265 If units is not None, progress detail will be generated, units specifies
266 the type of revlog that is touched (changelog, manifest, etc.).
266 the type of revlog that is touched (changelog, manifest, etc.).
267 """
267 """
268 # if we don't have any revisions touched by these changesets, bail
268 # if we don't have any revisions touched by these changesets, bail
269 if len(nodelist) == 0:
269 if len(nodelist) == 0:
270 yield self.close()
270 yield self.close()
271 return
271 return
272
272
273 # for generaldelta revlogs, we linearize the revs; this will both be
273 # for generaldelta revlogs, we linearize the revs; this will both be
274 # much quicker and generate a much smaller bundle
274 # much quicker and generate a much smaller bundle
275 if (revlog._generaldelta and reorder is not False) or reorder:
275 if (revlog._generaldelta and reorder is not False) or reorder:
276 dag = dagutil.revlogdag(revlog)
276 dag = dagutil.revlogdag(revlog)
277 revs = set(revlog.rev(n) for n in nodelist)
277 revs = set(revlog.rev(n) for n in nodelist)
278 revs = dag.linearize(revs)
278 revs = dag.linearize(revs)
279 else:
279 else:
280 revs = sorted([revlog.rev(n) for n in nodelist])
280 revs = sorted([revlog.rev(n) for n in nodelist])
281
281
282 # add the parent of the first rev
282 # add the parent of the first rev
283 p = revlog.parentrevs(revs[0])[0]
283 p = revlog.parentrevs(revs[0])[0]
284 revs.insert(0, p)
284 revs.insert(0, p)
285
285
286 # build deltas
286 # build deltas
287 total = len(revs) - 1
287 total = len(revs) - 1
288 msgbundling = _('bundling')
288 msgbundling = _('bundling')
289 for r in xrange(len(revs) - 1):
289 for r in xrange(len(revs) - 1):
290 if units is not None:
290 if units is not None:
291 self._progress(msgbundling, r + 1, unit=units, total=total)
291 self._progress(msgbundling, r + 1, unit=units, total=total)
292 prev, curr = revs[r], revs[r + 1]
292 prev, curr = revs[r], revs[r + 1]
293 linknode = lookup(revlog.node(curr))
293 linknode = lookup(revlog.node(curr))
294 for c in self.revchunk(revlog, curr, prev, linknode):
294 for c in self.revchunk(revlog, curr, prev, linknode):
295 yield c
295 yield c
296
296
297 yield self.close()
297 yield self.close()
298
298
299 # filter any nodes that claim to be part of the known set
299 # filter any nodes that claim to be part of the known set
300 def prune(self, revlog, missing, commonrevs, source):
300 def prune(self, revlog, missing, commonrevs, source):
301 rr, rl = revlog.rev, revlog.linkrev
301 rr, rl = revlog.rev, revlog.linkrev
302 return [n for n in missing if rl(rr(n)) not in commonrevs]
302 return [n for n in missing if rl(rr(n)) not in commonrevs]
303
303
304 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
304 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
305 '''yield a sequence of changegroup chunks (strings)'''
305 '''yield a sequence of changegroup chunks (strings)'''
306 repo = self._repo
306 repo = self._repo
307 cl = self._changelog
307 cl = self._changelog
308 mf = self._manifest
308 mf = self._manifest
309 reorder = self._reorder
309 reorder = self._reorder
310 progress = self._progress
310 progress = self._progress
311
311
312 # for progress output
312 # for progress output
313 msgbundling = _('bundling')
313 msgbundling = _('bundling')
314
314
315 mfs = {} # needed manifests
315 mfs = {} # needed manifests
316 fnodes = {} # needed file nodes
316 fnodes = {} # needed file nodes
317 changedfiles = set()
317 changedfiles = set()
318
318
319 # Callback for the changelog, used to collect changed files and manifest
319 # Callback for the changelog, used to collect changed files and manifest
320 # nodes.
320 # nodes.
321 # Returns the linkrev node (identity in the changelog case).
321 # Returns the linkrev node (identity in the changelog case).
322 def lookupcl(x):
322 def lookupcl(x):
323 c = cl.read(x)
323 c = cl.read(x)
324 changedfiles.update(c[3])
324 changedfiles.update(c[3])
325 # record the first changeset introducing this manifest version
325 # record the first changeset introducing this manifest version
326 mfs.setdefault(c[0], x)
326 mfs.setdefault(c[0], x)
327 return x
327 return x
328
328
329 # Callback for the manifest, used to collect linkrevs for filelog
329 # Callback for the manifest, used to collect linkrevs for filelog
330 # revisions.
330 # revisions.
331 # Returns the linkrev node (collected in lookupcl).
331 # Returns the linkrev node (collected in lookupcl).
332 def lookupmf(x):
332 def lookupmf(x):
333 clnode = mfs[x]
333 clnode = mfs[x]
334 if not fastpathlinkrev:
334 if not fastpathlinkrev:
335 mdata = mf.readfast(x)
335 mdata = mf.readfast(x)
336 for f, n in mdata.iteritems():
336 for f, n in mdata.iteritems():
337 if f in changedfiles:
337 if f in changedfiles:
338 # record the first changeset introducing this filelog
338 # record the first changeset introducing this filelog
339 # version
339 # version
340 fnodes[f].setdefault(n, clnode)
340 fnodes[f].setdefault(n, clnode)
341 return clnode
341 return clnode
342
342
343 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
343 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
344 reorder=reorder):
344 reorder=reorder):
345 yield chunk
345 yield chunk
346 progress(msgbundling, None)
346 progress(msgbundling, None)
347
347
348 for f in changedfiles:
348 for f in changedfiles:
349 fnodes[f] = {}
349 fnodes[f] = {}
350 mfnodes = self.prune(mf, mfs, commonrevs, source)
350 mfnodes = self.prune(mf, mfs, commonrevs, source)
351 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
351 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
352 reorder=reorder):
352 reorder=reorder):
353 yield chunk
353 yield chunk
354 progress(msgbundling, None)
354 progress(msgbundling, None)
355
355
356 mfs.clear()
356 mfs.clear()
357 total = len(changedfiles)
357 total = len(changedfiles)
358 # for progress output
358 # for progress output
359 msgfiles = _('files')
359 msgfiles = _('files')
360 for i, fname in enumerate(sorted(changedfiles)):
360 for i, fname in enumerate(sorted(changedfiles)):
361 filerevlog = repo.file(fname)
361 filerevlog = repo.file(fname)
362 if not len(filerevlog):
362 if not filerevlog:
363 raise util.Abort(_("empty or missing revlog for %s") % fname)
363 raise util.Abort(_("empty or missing revlog for %s") % fname)
364
364
365 if fastpathlinkrev:
365 if fastpathlinkrev:
366 ln, llr = filerevlog.node, filerevlog.linkrev
366 ln, llr = filerevlog.node, filerevlog.linkrev
367 def genfilenodes():
367 def genfilenodes():
368 for r in filerevlog:
368 for r in filerevlog:
369 linkrev = llr(r)
369 linkrev = llr(r)
370 if linkrev not in commonrevs:
370 if linkrev not in commonrevs:
371 yield filerevlog.node(r), cl.node(linkrev)
371 yield filerevlog.node(r), cl.node(linkrev)
372 fnodes[fname] = dict(genfilenodes())
372 fnodes[fname] = dict(genfilenodes())
373
373
374 linkrevnodes = fnodes.pop(fname, {})
374 linkrevnodes = fnodes.pop(fname, {})
375 # Lookup for filenodes, we collected the linkrev nodes above in the
375 # Lookup for filenodes, we collected the linkrev nodes above in the
376 # fastpath case and with lookupmf in the slowpath case.
376 # fastpath case and with lookupmf in the slowpath case.
377 def lookupfilelog(x):
377 def lookupfilelog(x):
378 return linkrevnodes[x]
378 return linkrevnodes[x]
379
379
380 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
380 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
381 if filenodes:
381 if filenodes:
382 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
382 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
383 total=total)
383 total=total)
384 yield self.fileheader(fname)
384 yield self.fileheader(fname)
385 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
385 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
386 reorder=reorder):
386 reorder=reorder):
387 yield chunk
387 yield chunk
388 yield self.close()
388 yield self.close()
389 progress(msgbundling, None)
389 progress(msgbundling, None)
390
390
391 if clnodes:
391 if clnodes:
392 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
392 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
393
393
394 def revchunk(self, revlog, rev, prev, linknode):
394 def revchunk(self, revlog, rev, prev, linknode):
395 node = revlog.node(rev)
395 node = revlog.node(rev)
396 p1, p2 = revlog.parentrevs(rev)
396 p1, p2 = revlog.parentrevs(rev)
397 base = prev
397 base = prev
398
398
399 prefix = ''
399 prefix = ''
400 if base == nullrev:
400 if base == nullrev:
401 delta = revlog.revision(node)
401 delta = revlog.revision(node)
402 prefix = mdiff.trivialdiffheader(len(delta))
402 prefix = mdiff.trivialdiffheader(len(delta))
403 else:
403 else:
404 delta = revlog.revdiff(base, rev)
404 delta = revlog.revdiff(base, rev)
405 p1n, p2n = revlog.parents(node)
405 p1n, p2n = revlog.parents(node)
406 basenode = revlog.node(base)
406 basenode = revlog.node(base)
407 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
407 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
408 meta += prefix
408 meta += prefix
409 l = len(meta) + len(delta)
409 l = len(meta) + len(delta)
410 yield chunkheader(l)
410 yield chunkheader(l)
411 yield meta
411 yield meta
412 yield delta
412 yield delta
413 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
413 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
414 # do nothing with basenode, it is implicitly the previous one in HG10
414 # do nothing with basenode, it is implicitly the previous one in HG10
415 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
415 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
@@ -1,2105 +1,2105 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, tempfile
10 import os, sys, errno, re, tempfile
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 import match as matchmod
12 import match as matchmod
13 import subrepo, context, repair, graphmod, revset, phases, obsolete
13 import subrepo, context, repair, graphmod, revset, phases, obsolete
14 import changelog
14 import changelog
15 import bookmarks
15 import bookmarks
16 import lock as lockmod
16 import lock as lockmod
17
17
18 def parsealiases(cmd):
18 def parsealiases(cmd):
19 return cmd.lstrip("^").split("|")
19 return cmd.lstrip("^").split("|")
20
20
21 def findpossible(cmd, table, strict=False):
21 def findpossible(cmd, table, strict=False):
22 """
22 """
23 Return cmd -> (aliases, command table entry)
23 Return cmd -> (aliases, command table entry)
24 for each matching command.
24 for each matching command.
25 Return debug commands (or their aliases) only if no normal command matches.
25 Return debug commands (or their aliases) only if no normal command matches.
26 """
26 """
27 choice = {}
27 choice = {}
28 debugchoice = {}
28 debugchoice = {}
29
29
30 if cmd in table:
30 if cmd in table:
31 # short-circuit exact matches, "log" alias beats "^log|history"
31 # short-circuit exact matches, "log" alias beats "^log|history"
32 keys = [cmd]
32 keys = [cmd]
33 else:
33 else:
34 keys = table.keys()
34 keys = table.keys()
35
35
36 for e in keys:
36 for e in keys:
37 aliases = parsealiases(e)
37 aliases = parsealiases(e)
38 found = None
38 found = None
39 if cmd in aliases:
39 if cmd in aliases:
40 found = cmd
40 found = cmd
41 elif not strict:
41 elif not strict:
42 for a in aliases:
42 for a in aliases:
43 if a.startswith(cmd):
43 if a.startswith(cmd):
44 found = a
44 found = a
45 break
45 break
46 if found is not None:
46 if found is not None:
47 if aliases[0].startswith("debug") or found.startswith("debug"):
47 if aliases[0].startswith("debug") or found.startswith("debug"):
48 debugchoice[found] = (aliases, table[e])
48 debugchoice[found] = (aliases, table[e])
49 else:
49 else:
50 choice[found] = (aliases, table[e])
50 choice[found] = (aliases, table[e])
51
51
52 if not choice and debugchoice:
52 if not choice and debugchoice:
53 choice = debugchoice
53 choice = debugchoice
54
54
55 return choice
55 return choice
56
56
57 def findcmd(cmd, table, strict=True):
57 def findcmd(cmd, table, strict=True):
58 """Return (aliases, command table entry) for command string."""
58 """Return (aliases, command table entry) for command string."""
59 choice = findpossible(cmd, table, strict)
59 choice = findpossible(cmd, table, strict)
60
60
61 if cmd in choice:
61 if cmd in choice:
62 return choice[cmd]
62 return choice[cmd]
63
63
64 if len(choice) > 1:
64 if len(choice) > 1:
65 clist = choice.keys()
65 clist = choice.keys()
66 clist.sort()
66 clist.sort()
67 raise error.AmbiguousCommand(cmd, clist)
67 raise error.AmbiguousCommand(cmd, clist)
68
68
69 if choice:
69 if choice:
70 return choice.values()[0]
70 return choice.values()[0]
71
71
72 raise error.UnknownCommand(cmd)
72 raise error.UnknownCommand(cmd)
73
73
74 def findrepo(p):
74 def findrepo(p):
75 while not os.path.isdir(os.path.join(p, ".hg")):
75 while not os.path.isdir(os.path.join(p, ".hg")):
76 oldp, p = p, os.path.dirname(p)
76 oldp, p = p, os.path.dirname(p)
77 if p == oldp:
77 if p == oldp:
78 return None
78 return None
79
79
80 return p
80 return p
81
81
82 def bailifchanged(repo):
82 def bailifchanged(repo):
83 if repo.dirstate.p2() != nullid:
83 if repo.dirstate.p2() != nullid:
84 raise util.Abort(_('outstanding uncommitted merge'))
84 raise util.Abort(_('outstanding uncommitted merge'))
85 modified, added, removed, deleted = repo.status()[:4]
85 modified, added, removed, deleted = repo.status()[:4]
86 if modified or added or removed or deleted:
86 if modified or added or removed or deleted:
87 raise util.Abort(_("outstanding uncommitted changes"))
87 raise util.Abort(_("outstanding uncommitted changes"))
88 ctx = repo[None]
88 ctx = repo[None]
89 for s in sorted(ctx.substate):
89 for s in sorted(ctx.substate):
90 if ctx.sub(s).dirty():
90 if ctx.sub(s).dirty():
91 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
91 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
92
92
93 def logmessage(ui, opts):
93 def logmessage(ui, opts):
94 """ get the log message according to -m and -l option """
94 """ get the log message according to -m and -l option """
95 message = opts.get('message')
95 message = opts.get('message')
96 logfile = opts.get('logfile')
96 logfile = opts.get('logfile')
97
97
98 if message and logfile:
98 if message and logfile:
99 raise util.Abort(_('options --message and --logfile are mutually '
99 raise util.Abort(_('options --message and --logfile are mutually '
100 'exclusive'))
100 'exclusive'))
101 if not message and logfile:
101 if not message and logfile:
102 try:
102 try:
103 if logfile == '-':
103 if logfile == '-':
104 message = ui.fin.read()
104 message = ui.fin.read()
105 else:
105 else:
106 message = '\n'.join(util.readfile(logfile).splitlines())
106 message = '\n'.join(util.readfile(logfile).splitlines())
107 except IOError, inst:
107 except IOError, inst:
108 raise util.Abort(_("can't read commit message '%s': %s") %
108 raise util.Abort(_("can't read commit message '%s': %s") %
109 (logfile, inst.strerror))
109 (logfile, inst.strerror))
110 return message
110 return message
111
111
112 def loglimit(opts):
112 def loglimit(opts):
113 """get the log limit according to option -l/--limit"""
113 """get the log limit according to option -l/--limit"""
114 limit = opts.get('limit')
114 limit = opts.get('limit')
115 if limit:
115 if limit:
116 try:
116 try:
117 limit = int(limit)
117 limit = int(limit)
118 except ValueError:
118 except ValueError:
119 raise util.Abort(_('limit must be a positive integer'))
119 raise util.Abort(_('limit must be a positive integer'))
120 if limit <= 0:
120 if limit <= 0:
121 raise util.Abort(_('limit must be positive'))
121 raise util.Abort(_('limit must be positive'))
122 else:
122 else:
123 limit = None
123 limit = None
124 return limit
124 return limit
125
125
126 def makefilename(repo, pat, node, desc=None,
126 def makefilename(repo, pat, node, desc=None,
127 total=None, seqno=None, revwidth=None, pathname=None):
127 total=None, seqno=None, revwidth=None, pathname=None):
128 node_expander = {
128 node_expander = {
129 'H': lambda: hex(node),
129 'H': lambda: hex(node),
130 'R': lambda: str(repo.changelog.rev(node)),
130 'R': lambda: str(repo.changelog.rev(node)),
131 'h': lambda: short(node),
131 'h': lambda: short(node),
132 'm': lambda: re.sub('[^\w]', '_', str(desc))
132 'm': lambda: re.sub('[^\w]', '_', str(desc))
133 }
133 }
134 expander = {
134 expander = {
135 '%': lambda: '%',
135 '%': lambda: '%',
136 'b': lambda: os.path.basename(repo.root),
136 'b': lambda: os.path.basename(repo.root),
137 }
137 }
138
138
139 try:
139 try:
140 if node:
140 if node:
141 expander.update(node_expander)
141 expander.update(node_expander)
142 if node:
142 if node:
143 expander['r'] = (lambda:
143 expander['r'] = (lambda:
144 str(repo.changelog.rev(node)).zfill(revwidth or 0))
144 str(repo.changelog.rev(node)).zfill(revwidth or 0))
145 if total is not None:
145 if total is not None:
146 expander['N'] = lambda: str(total)
146 expander['N'] = lambda: str(total)
147 if seqno is not None:
147 if seqno is not None:
148 expander['n'] = lambda: str(seqno)
148 expander['n'] = lambda: str(seqno)
149 if total is not None and seqno is not None:
149 if total is not None and seqno is not None:
150 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
150 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
151 if pathname is not None:
151 if pathname is not None:
152 expander['s'] = lambda: os.path.basename(pathname)
152 expander['s'] = lambda: os.path.basename(pathname)
153 expander['d'] = lambda: os.path.dirname(pathname) or '.'
153 expander['d'] = lambda: os.path.dirname(pathname) or '.'
154 expander['p'] = lambda: pathname
154 expander['p'] = lambda: pathname
155
155
156 newname = []
156 newname = []
157 patlen = len(pat)
157 patlen = len(pat)
158 i = 0
158 i = 0
159 while i < patlen:
159 while i < patlen:
160 c = pat[i]
160 c = pat[i]
161 if c == '%':
161 if c == '%':
162 i += 1
162 i += 1
163 c = pat[i]
163 c = pat[i]
164 c = expander[c]()
164 c = expander[c]()
165 newname.append(c)
165 newname.append(c)
166 i += 1
166 i += 1
167 return ''.join(newname)
167 return ''.join(newname)
168 except KeyError, inst:
168 except KeyError, inst:
169 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
169 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
170 inst.args[0])
170 inst.args[0])
171
171
172 def makefileobj(repo, pat, node=None, desc=None, total=None,
172 def makefileobj(repo, pat, node=None, desc=None, total=None,
173 seqno=None, revwidth=None, mode='wb', modemap={},
173 seqno=None, revwidth=None, mode='wb', modemap={},
174 pathname=None):
174 pathname=None):
175
175
176 writable = mode not in ('r', 'rb')
176 writable = mode not in ('r', 'rb')
177
177
178 if not pat or pat == '-':
178 if not pat or pat == '-':
179 fp = writable and repo.ui.fout or repo.ui.fin
179 fp = writable and repo.ui.fout or repo.ui.fin
180 if util.safehasattr(fp, 'fileno'):
180 if util.safehasattr(fp, 'fileno'):
181 return os.fdopen(os.dup(fp.fileno()), mode)
181 return os.fdopen(os.dup(fp.fileno()), mode)
182 else:
182 else:
183 # if this fp can't be duped properly, return
183 # if this fp can't be duped properly, return
184 # a dummy object that can be closed
184 # a dummy object that can be closed
185 class wrappedfileobj(object):
185 class wrappedfileobj(object):
186 noop = lambda x: None
186 noop = lambda x: None
187 def __init__(self, f):
187 def __init__(self, f):
188 self.f = f
188 self.f = f
189 def __getattr__(self, attr):
189 def __getattr__(self, attr):
190 if attr == 'close':
190 if attr == 'close':
191 return self.noop
191 return self.noop
192 else:
192 else:
193 return getattr(self.f, attr)
193 return getattr(self.f, attr)
194
194
195 return wrappedfileobj(fp)
195 return wrappedfileobj(fp)
196 if util.safehasattr(pat, 'write') and writable:
196 if util.safehasattr(pat, 'write') and writable:
197 return pat
197 return pat
198 if util.safehasattr(pat, 'read') and 'r' in mode:
198 if util.safehasattr(pat, 'read') and 'r' in mode:
199 return pat
199 return pat
200 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
200 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
201 mode = modemap.get(fn, mode)
201 mode = modemap.get(fn, mode)
202 if mode == 'wb':
202 if mode == 'wb':
203 modemap[fn] = 'ab'
203 modemap[fn] = 'ab'
204 return open(fn, mode)
204 return open(fn, mode)
205
205
206 def openrevlog(repo, cmd, file_, opts):
206 def openrevlog(repo, cmd, file_, opts):
207 """opens the changelog, manifest, a filelog or a given revlog"""
207 """opens the changelog, manifest, a filelog or a given revlog"""
208 cl = opts['changelog']
208 cl = opts['changelog']
209 mf = opts['manifest']
209 mf = opts['manifest']
210 msg = None
210 msg = None
211 if cl and mf:
211 if cl and mf:
212 msg = _('cannot specify --changelog and --manifest at the same time')
212 msg = _('cannot specify --changelog and --manifest at the same time')
213 elif cl or mf:
213 elif cl or mf:
214 if file_:
214 if file_:
215 msg = _('cannot specify filename with --changelog or --manifest')
215 msg = _('cannot specify filename with --changelog or --manifest')
216 elif not repo:
216 elif not repo:
217 msg = _('cannot specify --changelog or --manifest '
217 msg = _('cannot specify --changelog or --manifest '
218 'without a repository')
218 'without a repository')
219 if msg:
219 if msg:
220 raise util.Abort(msg)
220 raise util.Abort(msg)
221
221
222 r = None
222 r = None
223 if repo:
223 if repo:
224 if cl:
224 if cl:
225 r = repo.changelog
225 r = repo.changelog
226 elif mf:
226 elif mf:
227 r = repo.manifest
227 r = repo.manifest
228 elif file_:
228 elif file_:
229 filelog = repo.file(file_)
229 filelog = repo.file(file_)
230 if len(filelog):
230 if len(filelog):
231 r = filelog
231 r = filelog
232 if not r:
232 if not r:
233 if not file_:
233 if not file_:
234 raise error.CommandError(cmd, _('invalid arguments'))
234 raise error.CommandError(cmd, _('invalid arguments'))
235 if not os.path.isfile(file_):
235 if not os.path.isfile(file_):
236 raise util.Abort(_("revlog '%s' not found") % file_)
236 raise util.Abort(_("revlog '%s' not found") % file_)
237 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
237 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
238 file_[:-2] + ".i")
238 file_[:-2] + ".i")
239 return r
239 return r
240
240
241 def copy(ui, repo, pats, opts, rename=False):
241 def copy(ui, repo, pats, opts, rename=False):
242 # called with the repo lock held
242 # called with the repo lock held
243 #
243 #
244 # hgsep => pathname that uses "/" to separate directories
244 # hgsep => pathname that uses "/" to separate directories
245 # ossep => pathname that uses os.sep to separate directories
245 # ossep => pathname that uses os.sep to separate directories
246 cwd = repo.getcwd()
246 cwd = repo.getcwd()
247 targets = {}
247 targets = {}
248 after = opts.get("after")
248 after = opts.get("after")
249 dryrun = opts.get("dry_run")
249 dryrun = opts.get("dry_run")
250 wctx = repo[None]
250 wctx = repo[None]
251
251
252 def walkpat(pat):
252 def walkpat(pat):
253 srcs = []
253 srcs = []
254 badstates = after and '?' or '?r'
254 badstates = after and '?' or '?r'
255 m = scmutil.match(repo[None], [pat], opts, globbed=True)
255 m = scmutil.match(repo[None], [pat], opts, globbed=True)
256 for abs in repo.walk(m):
256 for abs in repo.walk(m):
257 state = repo.dirstate[abs]
257 state = repo.dirstate[abs]
258 rel = m.rel(abs)
258 rel = m.rel(abs)
259 exact = m.exact(abs)
259 exact = m.exact(abs)
260 if state in badstates:
260 if state in badstates:
261 if exact and state == '?':
261 if exact and state == '?':
262 ui.warn(_('%s: not copying - file is not managed\n') % rel)
262 ui.warn(_('%s: not copying - file is not managed\n') % rel)
263 if exact and state == 'r':
263 if exact and state == 'r':
264 ui.warn(_('%s: not copying - file has been marked for'
264 ui.warn(_('%s: not copying - file has been marked for'
265 ' remove\n') % rel)
265 ' remove\n') % rel)
266 continue
266 continue
267 # abs: hgsep
267 # abs: hgsep
268 # rel: ossep
268 # rel: ossep
269 srcs.append((abs, rel, exact))
269 srcs.append((abs, rel, exact))
270 return srcs
270 return srcs
271
271
272 # abssrc: hgsep
272 # abssrc: hgsep
273 # relsrc: ossep
273 # relsrc: ossep
274 # otarget: ossep
274 # otarget: ossep
275 def copyfile(abssrc, relsrc, otarget, exact):
275 def copyfile(abssrc, relsrc, otarget, exact):
276 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
276 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
277 if '/' in abstarget:
277 if '/' in abstarget:
278 # We cannot normalize abstarget itself, this would prevent
278 # We cannot normalize abstarget itself, this would prevent
279 # case only renames, like a => A.
279 # case only renames, like a => A.
280 abspath, absname = abstarget.rsplit('/', 1)
280 abspath, absname = abstarget.rsplit('/', 1)
281 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
281 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
282 reltarget = repo.pathto(abstarget, cwd)
282 reltarget = repo.pathto(abstarget, cwd)
283 target = repo.wjoin(abstarget)
283 target = repo.wjoin(abstarget)
284 src = repo.wjoin(abssrc)
284 src = repo.wjoin(abssrc)
285 state = repo.dirstate[abstarget]
285 state = repo.dirstate[abstarget]
286
286
287 scmutil.checkportable(ui, abstarget)
287 scmutil.checkportable(ui, abstarget)
288
288
289 # check for collisions
289 # check for collisions
290 prevsrc = targets.get(abstarget)
290 prevsrc = targets.get(abstarget)
291 if prevsrc is not None:
291 if prevsrc is not None:
292 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
292 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
293 (reltarget, repo.pathto(abssrc, cwd),
293 (reltarget, repo.pathto(abssrc, cwd),
294 repo.pathto(prevsrc, cwd)))
294 repo.pathto(prevsrc, cwd)))
295 return
295 return
296
296
297 # check for overwrites
297 # check for overwrites
298 exists = os.path.lexists(target)
298 exists = os.path.lexists(target)
299 samefile = False
299 samefile = False
300 if exists and abssrc != abstarget:
300 if exists and abssrc != abstarget:
301 if (repo.dirstate.normalize(abssrc) ==
301 if (repo.dirstate.normalize(abssrc) ==
302 repo.dirstate.normalize(abstarget)):
302 repo.dirstate.normalize(abstarget)):
303 if not rename:
303 if not rename:
304 ui.warn(_("%s: can't copy - same file\n") % reltarget)
304 ui.warn(_("%s: can't copy - same file\n") % reltarget)
305 return
305 return
306 exists = False
306 exists = False
307 samefile = True
307 samefile = True
308
308
309 if not after and exists or after and state in 'mn':
309 if not after and exists or after and state in 'mn':
310 if not opts['force']:
310 if not opts['force']:
311 ui.warn(_('%s: not overwriting - file exists\n') %
311 ui.warn(_('%s: not overwriting - file exists\n') %
312 reltarget)
312 reltarget)
313 return
313 return
314
314
315 if after:
315 if after:
316 if not exists:
316 if not exists:
317 if rename:
317 if rename:
318 ui.warn(_('%s: not recording move - %s does not exist\n') %
318 ui.warn(_('%s: not recording move - %s does not exist\n') %
319 (relsrc, reltarget))
319 (relsrc, reltarget))
320 else:
320 else:
321 ui.warn(_('%s: not recording copy - %s does not exist\n') %
321 ui.warn(_('%s: not recording copy - %s does not exist\n') %
322 (relsrc, reltarget))
322 (relsrc, reltarget))
323 return
323 return
324 elif not dryrun:
324 elif not dryrun:
325 try:
325 try:
326 if exists:
326 if exists:
327 os.unlink(target)
327 os.unlink(target)
328 targetdir = os.path.dirname(target) or '.'
328 targetdir = os.path.dirname(target) or '.'
329 if not os.path.isdir(targetdir):
329 if not os.path.isdir(targetdir):
330 os.makedirs(targetdir)
330 os.makedirs(targetdir)
331 if samefile:
331 if samefile:
332 tmp = target + "~hgrename"
332 tmp = target + "~hgrename"
333 os.rename(src, tmp)
333 os.rename(src, tmp)
334 os.rename(tmp, target)
334 os.rename(tmp, target)
335 else:
335 else:
336 util.copyfile(src, target)
336 util.copyfile(src, target)
337 srcexists = True
337 srcexists = True
338 except IOError, inst:
338 except IOError, inst:
339 if inst.errno == errno.ENOENT:
339 if inst.errno == errno.ENOENT:
340 ui.warn(_('%s: deleted in working copy\n') % relsrc)
340 ui.warn(_('%s: deleted in working copy\n') % relsrc)
341 srcexists = False
341 srcexists = False
342 else:
342 else:
343 ui.warn(_('%s: cannot copy - %s\n') %
343 ui.warn(_('%s: cannot copy - %s\n') %
344 (relsrc, inst.strerror))
344 (relsrc, inst.strerror))
345 return True # report a failure
345 return True # report a failure
346
346
347 if ui.verbose or not exact:
347 if ui.verbose or not exact:
348 if rename:
348 if rename:
349 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
349 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
350 else:
350 else:
351 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
351 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
352
352
353 targets[abstarget] = abssrc
353 targets[abstarget] = abssrc
354
354
355 # fix up dirstate
355 # fix up dirstate
356 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
356 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
357 dryrun=dryrun, cwd=cwd)
357 dryrun=dryrun, cwd=cwd)
358 if rename and not dryrun:
358 if rename and not dryrun:
359 if not after and srcexists and not samefile:
359 if not after and srcexists and not samefile:
360 util.unlinkpath(repo.wjoin(abssrc))
360 util.unlinkpath(repo.wjoin(abssrc))
361 wctx.forget([abssrc])
361 wctx.forget([abssrc])
362
362
363 # pat: ossep
363 # pat: ossep
364 # dest ossep
364 # dest ossep
365 # srcs: list of (hgsep, hgsep, ossep, bool)
365 # srcs: list of (hgsep, hgsep, ossep, bool)
366 # return: function that takes hgsep and returns ossep
366 # return: function that takes hgsep and returns ossep
367 def targetpathfn(pat, dest, srcs):
367 def targetpathfn(pat, dest, srcs):
368 if os.path.isdir(pat):
368 if os.path.isdir(pat):
369 abspfx = scmutil.canonpath(repo.root, cwd, pat)
369 abspfx = scmutil.canonpath(repo.root, cwd, pat)
370 abspfx = util.localpath(abspfx)
370 abspfx = util.localpath(abspfx)
371 if destdirexists:
371 if destdirexists:
372 striplen = len(os.path.split(abspfx)[0])
372 striplen = len(os.path.split(abspfx)[0])
373 else:
373 else:
374 striplen = len(abspfx)
374 striplen = len(abspfx)
375 if striplen:
375 if striplen:
376 striplen += len(os.sep)
376 striplen += len(os.sep)
377 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
377 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
378 elif destdirexists:
378 elif destdirexists:
379 res = lambda p: os.path.join(dest,
379 res = lambda p: os.path.join(dest,
380 os.path.basename(util.localpath(p)))
380 os.path.basename(util.localpath(p)))
381 else:
381 else:
382 res = lambda p: dest
382 res = lambda p: dest
383 return res
383 return res
384
384
385 # pat: ossep
385 # pat: ossep
386 # dest ossep
386 # dest ossep
387 # srcs: list of (hgsep, hgsep, ossep, bool)
387 # srcs: list of (hgsep, hgsep, ossep, bool)
388 # return: function that takes hgsep and returns ossep
388 # return: function that takes hgsep and returns ossep
389 def targetpathafterfn(pat, dest, srcs):
389 def targetpathafterfn(pat, dest, srcs):
390 if matchmod.patkind(pat):
390 if matchmod.patkind(pat):
391 # a mercurial pattern
391 # a mercurial pattern
392 res = lambda p: os.path.join(dest,
392 res = lambda p: os.path.join(dest,
393 os.path.basename(util.localpath(p)))
393 os.path.basename(util.localpath(p)))
394 else:
394 else:
395 abspfx = scmutil.canonpath(repo.root, cwd, pat)
395 abspfx = scmutil.canonpath(repo.root, cwd, pat)
396 if len(abspfx) < len(srcs[0][0]):
396 if len(abspfx) < len(srcs[0][0]):
397 # A directory. Either the target path contains the last
397 # A directory. Either the target path contains the last
398 # component of the source path or it does not.
398 # component of the source path or it does not.
399 def evalpath(striplen):
399 def evalpath(striplen):
400 score = 0
400 score = 0
401 for s in srcs:
401 for s in srcs:
402 t = os.path.join(dest, util.localpath(s[0])[striplen:])
402 t = os.path.join(dest, util.localpath(s[0])[striplen:])
403 if os.path.lexists(t):
403 if os.path.lexists(t):
404 score += 1
404 score += 1
405 return score
405 return score
406
406
407 abspfx = util.localpath(abspfx)
407 abspfx = util.localpath(abspfx)
408 striplen = len(abspfx)
408 striplen = len(abspfx)
409 if striplen:
409 if striplen:
410 striplen += len(os.sep)
410 striplen += len(os.sep)
411 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
411 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
412 score = evalpath(striplen)
412 score = evalpath(striplen)
413 striplen1 = len(os.path.split(abspfx)[0])
413 striplen1 = len(os.path.split(abspfx)[0])
414 if striplen1:
414 if striplen1:
415 striplen1 += len(os.sep)
415 striplen1 += len(os.sep)
416 if evalpath(striplen1) > score:
416 if evalpath(striplen1) > score:
417 striplen = striplen1
417 striplen = striplen1
418 res = lambda p: os.path.join(dest,
418 res = lambda p: os.path.join(dest,
419 util.localpath(p)[striplen:])
419 util.localpath(p)[striplen:])
420 else:
420 else:
421 # a file
421 # a file
422 if destdirexists:
422 if destdirexists:
423 res = lambda p: os.path.join(dest,
423 res = lambda p: os.path.join(dest,
424 os.path.basename(util.localpath(p)))
424 os.path.basename(util.localpath(p)))
425 else:
425 else:
426 res = lambda p: dest
426 res = lambda p: dest
427 return res
427 return res
428
428
429
429
430 pats = scmutil.expandpats(pats)
430 pats = scmutil.expandpats(pats)
431 if not pats:
431 if not pats:
432 raise util.Abort(_('no source or destination specified'))
432 raise util.Abort(_('no source or destination specified'))
433 if len(pats) == 1:
433 if len(pats) == 1:
434 raise util.Abort(_('no destination specified'))
434 raise util.Abort(_('no destination specified'))
435 dest = pats.pop()
435 dest = pats.pop()
436 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
436 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
437 if not destdirexists:
437 if not destdirexists:
438 if len(pats) > 1 or matchmod.patkind(pats[0]):
438 if len(pats) > 1 or matchmod.patkind(pats[0]):
439 raise util.Abort(_('with multiple sources, destination must be an '
439 raise util.Abort(_('with multiple sources, destination must be an '
440 'existing directory'))
440 'existing directory'))
441 if util.endswithsep(dest):
441 if util.endswithsep(dest):
442 raise util.Abort(_('destination %s is not a directory') % dest)
442 raise util.Abort(_('destination %s is not a directory') % dest)
443
443
444 tfn = targetpathfn
444 tfn = targetpathfn
445 if after:
445 if after:
446 tfn = targetpathafterfn
446 tfn = targetpathafterfn
447 copylist = []
447 copylist = []
448 for pat in pats:
448 for pat in pats:
449 srcs = walkpat(pat)
449 srcs = walkpat(pat)
450 if not srcs:
450 if not srcs:
451 continue
451 continue
452 copylist.append((tfn(pat, dest, srcs), srcs))
452 copylist.append((tfn(pat, dest, srcs), srcs))
453 if not copylist:
453 if not copylist:
454 raise util.Abort(_('no files to copy'))
454 raise util.Abort(_('no files to copy'))
455
455
456 errors = 0
456 errors = 0
457 for targetpath, srcs in copylist:
457 for targetpath, srcs in copylist:
458 for abssrc, relsrc, exact in srcs:
458 for abssrc, relsrc, exact in srcs:
459 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
459 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
460 errors += 1
460 errors += 1
461
461
462 if errors:
462 if errors:
463 ui.warn(_('(consider using --after)\n'))
463 ui.warn(_('(consider using --after)\n'))
464
464
465 return errors != 0
465 return errors != 0
466
466
467 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
467 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
468 runargs=None, appendpid=False):
468 runargs=None, appendpid=False):
469 '''Run a command as a service.'''
469 '''Run a command as a service.'''
470
470
471 if opts['daemon'] and not opts['daemon_pipefds']:
471 if opts['daemon'] and not opts['daemon_pipefds']:
472 # Signal child process startup with file removal
472 # Signal child process startup with file removal
473 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
473 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
474 os.close(lockfd)
474 os.close(lockfd)
475 try:
475 try:
476 if not runargs:
476 if not runargs:
477 runargs = util.hgcmd() + sys.argv[1:]
477 runargs = util.hgcmd() + sys.argv[1:]
478 runargs.append('--daemon-pipefds=%s' % lockpath)
478 runargs.append('--daemon-pipefds=%s' % lockpath)
479 # Don't pass --cwd to the child process, because we've already
479 # Don't pass --cwd to the child process, because we've already
480 # changed directory.
480 # changed directory.
481 for i in xrange(1, len(runargs)):
481 for i in xrange(1, len(runargs)):
482 if runargs[i].startswith('--cwd='):
482 if runargs[i].startswith('--cwd='):
483 del runargs[i]
483 del runargs[i]
484 break
484 break
485 elif runargs[i].startswith('--cwd'):
485 elif runargs[i].startswith('--cwd'):
486 del runargs[i:i + 2]
486 del runargs[i:i + 2]
487 break
487 break
488 def condfn():
488 def condfn():
489 return not os.path.exists(lockpath)
489 return not os.path.exists(lockpath)
490 pid = util.rundetached(runargs, condfn)
490 pid = util.rundetached(runargs, condfn)
491 if pid < 0:
491 if pid < 0:
492 raise util.Abort(_('child process failed to start'))
492 raise util.Abort(_('child process failed to start'))
493 finally:
493 finally:
494 try:
494 try:
495 os.unlink(lockpath)
495 os.unlink(lockpath)
496 except OSError, e:
496 except OSError, e:
497 if e.errno != errno.ENOENT:
497 if e.errno != errno.ENOENT:
498 raise
498 raise
499 if parentfn:
499 if parentfn:
500 return parentfn(pid)
500 return parentfn(pid)
501 else:
501 else:
502 return
502 return
503
503
504 if initfn:
504 if initfn:
505 initfn()
505 initfn()
506
506
507 if opts['pid_file']:
507 if opts['pid_file']:
508 mode = appendpid and 'a' or 'w'
508 mode = appendpid and 'a' or 'w'
509 fp = open(opts['pid_file'], mode)
509 fp = open(opts['pid_file'], mode)
510 fp.write(str(os.getpid()) + '\n')
510 fp.write(str(os.getpid()) + '\n')
511 fp.close()
511 fp.close()
512
512
513 if opts['daemon_pipefds']:
513 if opts['daemon_pipefds']:
514 lockpath = opts['daemon_pipefds']
514 lockpath = opts['daemon_pipefds']
515 try:
515 try:
516 os.setsid()
516 os.setsid()
517 except AttributeError:
517 except AttributeError:
518 pass
518 pass
519 os.unlink(lockpath)
519 os.unlink(lockpath)
520 util.hidewindow()
520 util.hidewindow()
521 sys.stdout.flush()
521 sys.stdout.flush()
522 sys.stderr.flush()
522 sys.stderr.flush()
523
523
524 nullfd = os.open(os.devnull, os.O_RDWR)
524 nullfd = os.open(os.devnull, os.O_RDWR)
525 logfilefd = nullfd
525 logfilefd = nullfd
526 if logfile:
526 if logfile:
527 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
527 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
528 os.dup2(nullfd, 0)
528 os.dup2(nullfd, 0)
529 os.dup2(logfilefd, 1)
529 os.dup2(logfilefd, 1)
530 os.dup2(logfilefd, 2)
530 os.dup2(logfilefd, 2)
531 if nullfd not in (0, 1, 2):
531 if nullfd not in (0, 1, 2):
532 os.close(nullfd)
532 os.close(nullfd)
533 if logfile and logfilefd not in (0, 1, 2):
533 if logfile and logfilefd not in (0, 1, 2):
534 os.close(logfilefd)
534 os.close(logfilefd)
535
535
536 if runfn:
536 if runfn:
537 return runfn()
537 return runfn()
538
538
539 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
539 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
540 opts=None):
540 opts=None):
541 '''export changesets as hg patches.'''
541 '''export changesets as hg patches.'''
542
542
543 total = len(revs)
543 total = len(revs)
544 revwidth = max([len(str(rev)) for rev in revs])
544 revwidth = max([len(str(rev)) for rev in revs])
545 filemode = {}
545 filemode = {}
546
546
547 def single(rev, seqno, fp):
547 def single(rev, seqno, fp):
548 ctx = repo[rev]
548 ctx = repo[rev]
549 node = ctx.node()
549 node = ctx.node()
550 parents = [p.node() for p in ctx.parents() if p]
550 parents = [p.node() for p in ctx.parents() if p]
551 branch = ctx.branch()
551 branch = ctx.branch()
552 if switch_parent:
552 if switch_parent:
553 parents.reverse()
553 parents.reverse()
554 prev = (parents and parents[0]) or nullid
554 prev = (parents and parents[0]) or nullid
555
555
556 shouldclose = False
556 shouldclose = False
557 if not fp and len(template) > 0:
557 if not fp and len(template) > 0:
558 desc_lines = ctx.description().rstrip().split('\n')
558 desc_lines = ctx.description().rstrip().split('\n')
559 desc = desc_lines[0] #Commit always has a first line.
559 desc = desc_lines[0] #Commit always has a first line.
560 fp = makefileobj(repo, template, node, desc=desc, total=total,
560 fp = makefileobj(repo, template, node, desc=desc, total=total,
561 seqno=seqno, revwidth=revwidth, mode='wb',
561 seqno=seqno, revwidth=revwidth, mode='wb',
562 modemap=filemode)
562 modemap=filemode)
563 if fp != template:
563 if fp != template:
564 shouldclose = True
564 shouldclose = True
565 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
565 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
566 repo.ui.note("%s\n" % fp.name)
566 repo.ui.note("%s\n" % fp.name)
567
567
568 if not fp:
568 if not fp:
569 write = repo.ui.write
569 write = repo.ui.write
570 else:
570 else:
571 def write(s, **kw):
571 def write(s, **kw):
572 fp.write(s)
572 fp.write(s)
573
573
574
574
575 write("# HG changeset patch\n")
575 write("# HG changeset patch\n")
576 write("# User %s\n" % ctx.user())
576 write("# User %s\n" % ctx.user())
577 write("# Date %d %d\n" % ctx.date())
577 write("# Date %d %d\n" % ctx.date())
578 write("# %s\n" % util.datestr(ctx.date()))
578 write("# %s\n" % util.datestr(ctx.date()))
579 if branch and branch != 'default':
579 if branch and branch != 'default':
580 write("# Branch %s\n" % branch)
580 write("# Branch %s\n" % branch)
581 write("# Node ID %s\n" % hex(node))
581 write("# Node ID %s\n" % hex(node))
582 write("# Parent %s\n" % hex(prev))
582 write("# Parent %s\n" % hex(prev))
583 if len(parents) > 1:
583 if len(parents) > 1:
584 write("# Parent %s\n" % hex(parents[1]))
584 write("# Parent %s\n" % hex(parents[1]))
585 write(ctx.description().rstrip())
585 write(ctx.description().rstrip())
586 write("\n\n")
586 write("\n\n")
587
587
588 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
588 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
589 write(chunk, label=label)
589 write(chunk, label=label)
590
590
591 if shouldclose:
591 if shouldclose:
592 fp.close()
592 fp.close()
593
593
594 for seqno, rev in enumerate(revs):
594 for seqno, rev in enumerate(revs):
595 single(rev, seqno + 1, fp)
595 single(rev, seqno + 1, fp)
596
596
597 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
597 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
598 changes=None, stat=False, fp=None, prefix='',
598 changes=None, stat=False, fp=None, prefix='',
599 listsubrepos=False):
599 listsubrepos=False):
600 '''show diff or diffstat.'''
600 '''show diff or diffstat.'''
601 if fp is None:
601 if fp is None:
602 write = ui.write
602 write = ui.write
603 else:
603 else:
604 def write(s, **kw):
604 def write(s, **kw):
605 fp.write(s)
605 fp.write(s)
606
606
607 if stat:
607 if stat:
608 diffopts = diffopts.copy(context=0)
608 diffopts = diffopts.copy(context=0)
609 width = 80
609 width = 80
610 if not ui.plain():
610 if not ui.plain():
611 width = ui.termwidth()
611 width = ui.termwidth()
612 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
612 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
613 prefix=prefix)
613 prefix=prefix)
614 for chunk, label in patch.diffstatui(util.iterlines(chunks),
614 for chunk, label in patch.diffstatui(util.iterlines(chunks),
615 width=width,
615 width=width,
616 git=diffopts.git):
616 git=diffopts.git):
617 write(chunk, label=label)
617 write(chunk, label=label)
618 else:
618 else:
619 for chunk, label in patch.diffui(repo, node1, node2, match,
619 for chunk, label in patch.diffui(repo, node1, node2, match,
620 changes, diffopts, prefix=prefix):
620 changes, diffopts, prefix=prefix):
621 write(chunk, label=label)
621 write(chunk, label=label)
622
622
623 if listsubrepos:
623 if listsubrepos:
624 ctx1 = repo[node1]
624 ctx1 = repo[node1]
625 ctx2 = repo[node2]
625 ctx2 = repo[node2]
626 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
626 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
627 tempnode2 = node2
627 tempnode2 = node2
628 try:
628 try:
629 if node2 is not None:
629 if node2 is not None:
630 tempnode2 = ctx2.substate[subpath][1]
630 tempnode2 = ctx2.substate[subpath][1]
631 except KeyError:
631 except KeyError:
632 # A subrepo that existed in node1 was deleted between node1 and
632 # A subrepo that existed in node1 was deleted between node1 and
633 # node2 (inclusive). Thus, ctx2's substate won't contain that
633 # node2 (inclusive). Thus, ctx2's substate won't contain that
634 # subpath. The best we can do is to ignore it.
634 # subpath. The best we can do is to ignore it.
635 tempnode2 = None
635 tempnode2 = None
636 submatch = matchmod.narrowmatcher(subpath, match)
636 submatch = matchmod.narrowmatcher(subpath, match)
637 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
637 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
638 stat=stat, fp=fp, prefix=prefix)
638 stat=stat, fp=fp, prefix=prefix)
639
639
640 class changeset_printer(object):
640 class changeset_printer(object):
641 '''show changeset information when templating not requested.'''
641 '''show changeset information when templating not requested.'''
642
642
643 def __init__(self, ui, repo, patch, diffopts, buffered):
643 def __init__(self, ui, repo, patch, diffopts, buffered):
644 self.ui = ui
644 self.ui = ui
645 self.repo = repo
645 self.repo = repo
646 self.buffered = buffered
646 self.buffered = buffered
647 self.patch = patch
647 self.patch = patch
648 self.diffopts = diffopts
648 self.diffopts = diffopts
649 self.header = {}
649 self.header = {}
650 self.hunk = {}
650 self.hunk = {}
651 self.lastheader = None
651 self.lastheader = None
652 self.footer = None
652 self.footer = None
653
653
654 def flush(self, rev):
654 def flush(self, rev):
655 if rev in self.header:
655 if rev in self.header:
656 h = self.header[rev]
656 h = self.header[rev]
657 if h != self.lastheader:
657 if h != self.lastheader:
658 self.lastheader = h
658 self.lastheader = h
659 self.ui.write(h)
659 self.ui.write(h)
660 del self.header[rev]
660 del self.header[rev]
661 if rev in self.hunk:
661 if rev in self.hunk:
662 self.ui.write(self.hunk[rev])
662 self.ui.write(self.hunk[rev])
663 del self.hunk[rev]
663 del self.hunk[rev]
664 return 1
664 return 1
665 return 0
665 return 0
666
666
667 def close(self):
667 def close(self):
668 if self.footer:
668 if self.footer:
669 self.ui.write(self.footer)
669 self.ui.write(self.footer)
670
670
671 def show(self, ctx, copies=None, matchfn=None, **props):
671 def show(self, ctx, copies=None, matchfn=None, **props):
672 if self.buffered:
672 if self.buffered:
673 self.ui.pushbuffer()
673 self.ui.pushbuffer()
674 self._show(ctx, copies, matchfn, props)
674 self._show(ctx, copies, matchfn, props)
675 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
675 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
676 else:
676 else:
677 self._show(ctx, copies, matchfn, props)
677 self._show(ctx, copies, matchfn, props)
678
678
679 def _show(self, ctx, copies, matchfn, props):
679 def _show(self, ctx, copies, matchfn, props):
680 '''show a single changeset or file revision'''
680 '''show a single changeset or file revision'''
681 changenode = ctx.node()
681 changenode = ctx.node()
682 rev = ctx.rev()
682 rev = ctx.rev()
683
683
684 if self.ui.quiet:
684 if self.ui.quiet:
685 self.ui.write("%d:%s\n" % (rev, short(changenode)),
685 self.ui.write("%d:%s\n" % (rev, short(changenode)),
686 label='log.node')
686 label='log.node')
687 return
687 return
688
688
689 log = self.repo.changelog
689 log = self.repo.changelog
690 date = util.datestr(ctx.date())
690 date = util.datestr(ctx.date())
691
691
692 hexfunc = self.ui.debugflag and hex or short
692 hexfunc = self.ui.debugflag and hex or short
693
693
694 parents = [(p, hexfunc(log.node(p)))
694 parents = [(p, hexfunc(log.node(p)))
695 for p in self._meaningful_parentrevs(log, rev)]
695 for p in self._meaningful_parentrevs(log, rev)]
696
696
697 # i18n: column positioning for "hg log"
697 # i18n: column positioning for "hg log"
698 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
698 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
699 label='log.changeset changeset.%s' % ctx.phasestr())
699 label='log.changeset changeset.%s' % ctx.phasestr())
700
700
701 branch = ctx.branch()
701 branch = ctx.branch()
702 # don't show the default branch name
702 # don't show the default branch name
703 if branch != 'default':
703 if branch != 'default':
704 # i18n: column positioning for "hg log"
704 # i18n: column positioning for "hg log"
705 self.ui.write(_("branch: %s\n") % branch,
705 self.ui.write(_("branch: %s\n") % branch,
706 label='log.branch')
706 label='log.branch')
707 for bookmark in self.repo.nodebookmarks(changenode):
707 for bookmark in self.repo.nodebookmarks(changenode):
708 # i18n: column positioning for "hg log"
708 # i18n: column positioning for "hg log"
709 self.ui.write(_("bookmark: %s\n") % bookmark,
709 self.ui.write(_("bookmark: %s\n") % bookmark,
710 label='log.bookmark')
710 label='log.bookmark')
711 for tag in self.repo.nodetags(changenode):
711 for tag in self.repo.nodetags(changenode):
712 # i18n: column positioning for "hg log"
712 # i18n: column positioning for "hg log"
713 self.ui.write(_("tag: %s\n") % tag,
713 self.ui.write(_("tag: %s\n") % tag,
714 label='log.tag')
714 label='log.tag')
715 if self.ui.debugflag and ctx.phase():
715 if self.ui.debugflag and ctx.phase():
716 # i18n: column positioning for "hg log"
716 # i18n: column positioning for "hg log"
717 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
717 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
718 label='log.phase')
718 label='log.phase')
719 for parent in parents:
719 for parent in parents:
720 # i18n: column positioning for "hg log"
720 # i18n: column positioning for "hg log"
721 self.ui.write(_("parent: %d:%s\n") % parent,
721 self.ui.write(_("parent: %d:%s\n") % parent,
722 label='log.parent changeset.%s' % ctx.phasestr())
722 label='log.parent changeset.%s' % ctx.phasestr())
723
723
724 if self.ui.debugflag:
724 if self.ui.debugflag:
725 mnode = ctx.manifestnode()
725 mnode = ctx.manifestnode()
726 # i18n: column positioning for "hg log"
726 # i18n: column positioning for "hg log"
727 self.ui.write(_("manifest: %d:%s\n") %
727 self.ui.write(_("manifest: %d:%s\n") %
728 (self.repo.manifest.rev(mnode), hex(mnode)),
728 (self.repo.manifest.rev(mnode), hex(mnode)),
729 label='ui.debug log.manifest')
729 label='ui.debug log.manifest')
730 # i18n: column positioning for "hg log"
730 # i18n: column positioning for "hg log"
731 self.ui.write(_("user: %s\n") % ctx.user(),
731 self.ui.write(_("user: %s\n") % ctx.user(),
732 label='log.user')
732 label='log.user')
733 # i18n: column positioning for "hg log"
733 # i18n: column positioning for "hg log"
734 self.ui.write(_("date: %s\n") % date,
734 self.ui.write(_("date: %s\n") % date,
735 label='log.date')
735 label='log.date')
736
736
737 if self.ui.debugflag:
737 if self.ui.debugflag:
738 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
738 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
739 for key, value in zip([# i18n: column positioning for "hg log"
739 for key, value in zip([# i18n: column positioning for "hg log"
740 _("files:"),
740 _("files:"),
741 # i18n: column positioning for "hg log"
741 # i18n: column positioning for "hg log"
742 _("files+:"),
742 _("files+:"),
743 # i18n: column positioning for "hg log"
743 # i18n: column positioning for "hg log"
744 _("files-:")], files):
744 _("files-:")], files):
745 if value:
745 if value:
746 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
746 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
747 label='ui.debug log.files')
747 label='ui.debug log.files')
748 elif ctx.files() and self.ui.verbose:
748 elif ctx.files() and self.ui.verbose:
749 # i18n: column positioning for "hg log"
749 # i18n: column positioning for "hg log"
750 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
750 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
751 label='ui.note log.files')
751 label='ui.note log.files')
752 if copies and self.ui.verbose:
752 if copies and self.ui.verbose:
753 copies = ['%s (%s)' % c for c in copies]
753 copies = ['%s (%s)' % c for c in copies]
754 # i18n: column positioning for "hg log"
754 # i18n: column positioning for "hg log"
755 self.ui.write(_("copies: %s\n") % ' '.join(copies),
755 self.ui.write(_("copies: %s\n") % ' '.join(copies),
756 label='ui.note log.copies')
756 label='ui.note log.copies')
757
757
758 extra = ctx.extra()
758 extra = ctx.extra()
759 if extra and self.ui.debugflag:
759 if extra and self.ui.debugflag:
760 for key, value in sorted(extra.items()):
760 for key, value in sorted(extra.items()):
761 # i18n: column positioning for "hg log"
761 # i18n: column positioning for "hg log"
762 self.ui.write(_("extra: %s=%s\n")
762 self.ui.write(_("extra: %s=%s\n")
763 % (key, value.encode('string_escape')),
763 % (key, value.encode('string_escape')),
764 label='ui.debug log.extra')
764 label='ui.debug log.extra')
765
765
766 description = ctx.description().strip()
766 description = ctx.description().strip()
767 if description:
767 if description:
768 if self.ui.verbose:
768 if self.ui.verbose:
769 self.ui.write(_("description:\n"),
769 self.ui.write(_("description:\n"),
770 label='ui.note log.description')
770 label='ui.note log.description')
771 self.ui.write(description,
771 self.ui.write(description,
772 label='ui.note log.description')
772 label='ui.note log.description')
773 self.ui.write("\n\n")
773 self.ui.write("\n\n")
774 else:
774 else:
775 # i18n: column positioning for "hg log"
775 # i18n: column positioning for "hg log"
776 self.ui.write(_("summary: %s\n") %
776 self.ui.write(_("summary: %s\n") %
777 description.splitlines()[0],
777 description.splitlines()[0],
778 label='log.summary')
778 label='log.summary')
779 self.ui.write("\n")
779 self.ui.write("\n")
780
780
781 self.showpatch(changenode, matchfn)
781 self.showpatch(changenode, matchfn)
782
782
783 def showpatch(self, node, matchfn):
783 def showpatch(self, node, matchfn):
784 if not matchfn:
784 if not matchfn:
785 matchfn = self.patch
785 matchfn = self.patch
786 if matchfn:
786 if matchfn:
787 stat = self.diffopts.get('stat')
787 stat = self.diffopts.get('stat')
788 diff = self.diffopts.get('patch')
788 diff = self.diffopts.get('patch')
789 diffopts = patch.diffopts(self.ui, self.diffopts)
789 diffopts = patch.diffopts(self.ui, self.diffopts)
790 prev = self.repo.changelog.parents(node)[0]
790 prev = self.repo.changelog.parents(node)[0]
791 if stat:
791 if stat:
792 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
792 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
793 match=matchfn, stat=True)
793 match=matchfn, stat=True)
794 if diff:
794 if diff:
795 if stat:
795 if stat:
796 self.ui.write("\n")
796 self.ui.write("\n")
797 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
797 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
798 match=matchfn, stat=False)
798 match=matchfn, stat=False)
799 self.ui.write("\n")
799 self.ui.write("\n")
800
800
801 def _meaningful_parentrevs(self, log, rev):
801 def _meaningful_parentrevs(self, log, rev):
802 """Return list of meaningful (or all if debug) parentrevs for rev.
802 """Return list of meaningful (or all if debug) parentrevs for rev.
803
803
804 For merges (two non-nullrev revisions) both parents are meaningful.
804 For merges (two non-nullrev revisions) both parents are meaningful.
805 Otherwise the first parent revision is considered meaningful if it
805 Otherwise the first parent revision is considered meaningful if it
806 is not the preceding revision.
806 is not the preceding revision.
807 """
807 """
808 parents = log.parentrevs(rev)
808 parents = log.parentrevs(rev)
809 if not self.ui.debugflag and parents[1] == nullrev:
809 if not self.ui.debugflag and parents[1] == nullrev:
810 if parents[0] >= rev - 1:
810 if parents[0] >= rev - 1:
811 parents = []
811 parents = []
812 else:
812 else:
813 parents = [parents[0]]
813 parents = [parents[0]]
814 return parents
814 return parents
815
815
816
816
817 class changeset_templater(changeset_printer):
817 class changeset_templater(changeset_printer):
818 '''format changeset information.'''
818 '''format changeset information.'''
819
819
820 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
820 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
821 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
821 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
822 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
822 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
823 defaulttempl = {
823 defaulttempl = {
824 'parent': '{rev}:{node|formatnode} ',
824 'parent': '{rev}:{node|formatnode} ',
825 'manifest': '{rev}:{node|formatnode}',
825 'manifest': '{rev}:{node|formatnode}',
826 'file_copy': '{name} ({source})',
826 'file_copy': '{name} ({source})',
827 'extra': '{key}={value|stringescape}'
827 'extra': '{key}={value|stringescape}'
828 }
828 }
829 # filecopy is preserved for compatibility reasons
829 # filecopy is preserved for compatibility reasons
830 defaulttempl['filecopy'] = defaulttempl['file_copy']
830 defaulttempl['filecopy'] = defaulttempl['file_copy']
831 self.t = templater.templater(mapfile, {'formatnode': formatnode},
831 self.t = templater.templater(mapfile, {'formatnode': formatnode},
832 cache=defaulttempl)
832 cache=defaulttempl)
833 self.cache = {}
833 self.cache = {}
834
834
835 def use_template(self, t):
835 def use_template(self, t):
836 '''set template string to use'''
836 '''set template string to use'''
837 self.t.cache['changeset'] = t
837 self.t.cache['changeset'] = t
838
838
839 def _meaningful_parentrevs(self, ctx):
839 def _meaningful_parentrevs(self, ctx):
840 """Return list of meaningful (or all if debug) parentrevs for rev.
840 """Return list of meaningful (or all if debug) parentrevs for rev.
841 """
841 """
842 parents = ctx.parents()
842 parents = ctx.parents()
843 if len(parents) > 1:
843 if len(parents) > 1:
844 return parents
844 return parents
845 if self.ui.debugflag:
845 if self.ui.debugflag:
846 return [parents[0], self.repo['null']]
846 return [parents[0], self.repo['null']]
847 if parents[0].rev() >= ctx.rev() - 1:
847 if parents[0].rev() >= ctx.rev() - 1:
848 return []
848 return []
849 return parents
849 return parents
850
850
851 def _show(self, ctx, copies, matchfn, props):
851 def _show(self, ctx, copies, matchfn, props):
852 '''show a single changeset or file revision'''
852 '''show a single changeset or file revision'''
853
853
854 showlist = templatekw.showlist
854 showlist = templatekw.showlist
855
855
856 # showparents() behaviour depends on ui trace level which
856 # showparents() behaviour depends on ui trace level which
857 # causes unexpected behaviours at templating level and makes
857 # causes unexpected behaviours at templating level and makes
858 # it harder to extract it in a standalone function. Its
858 # it harder to extract it in a standalone function. Its
859 # behaviour cannot be changed so leave it here for now.
859 # behaviour cannot be changed so leave it here for now.
860 def showparents(**args):
860 def showparents(**args):
861 ctx = args['ctx']
861 ctx = args['ctx']
862 parents = [[('rev', p.rev()), ('node', p.hex())]
862 parents = [[('rev', p.rev()), ('node', p.hex())]
863 for p in self._meaningful_parentrevs(ctx)]
863 for p in self._meaningful_parentrevs(ctx)]
864 return showlist('parent', parents, **args)
864 return showlist('parent', parents, **args)
865
865
866 props = props.copy()
866 props = props.copy()
867 props.update(templatekw.keywords)
867 props.update(templatekw.keywords)
868 props['parents'] = showparents
868 props['parents'] = showparents
869 props['templ'] = self.t
869 props['templ'] = self.t
870 props['ctx'] = ctx
870 props['ctx'] = ctx
871 props['repo'] = self.repo
871 props['repo'] = self.repo
872 props['revcache'] = {'copies': copies}
872 props['revcache'] = {'copies': copies}
873 props['cache'] = self.cache
873 props['cache'] = self.cache
874
874
875 # find correct templates for current mode
875 # find correct templates for current mode
876
876
877 tmplmodes = [
877 tmplmodes = [
878 (True, None),
878 (True, None),
879 (self.ui.verbose, 'verbose'),
879 (self.ui.verbose, 'verbose'),
880 (self.ui.quiet, 'quiet'),
880 (self.ui.quiet, 'quiet'),
881 (self.ui.debugflag, 'debug'),
881 (self.ui.debugflag, 'debug'),
882 ]
882 ]
883
883
884 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
884 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
885 for mode, postfix in tmplmodes:
885 for mode, postfix in tmplmodes:
886 for type in types:
886 for type in types:
887 cur = postfix and ('%s_%s' % (type, postfix)) or type
887 cur = postfix and ('%s_%s' % (type, postfix)) or type
888 if mode and cur in self.t:
888 if mode and cur in self.t:
889 types[type] = cur
889 types[type] = cur
890
890
891 try:
891 try:
892
892
893 # write header
893 # write header
894 if types['header']:
894 if types['header']:
895 h = templater.stringify(self.t(types['header'], **props))
895 h = templater.stringify(self.t(types['header'], **props))
896 if self.buffered:
896 if self.buffered:
897 self.header[ctx.rev()] = h
897 self.header[ctx.rev()] = h
898 else:
898 else:
899 if self.lastheader != h:
899 if self.lastheader != h:
900 self.lastheader = h
900 self.lastheader = h
901 self.ui.write(h)
901 self.ui.write(h)
902
902
903 # write changeset metadata, then patch if requested
903 # write changeset metadata, then patch if requested
904 key = types['changeset']
904 key = types['changeset']
905 self.ui.write(templater.stringify(self.t(key, **props)))
905 self.ui.write(templater.stringify(self.t(key, **props)))
906 self.showpatch(ctx.node(), matchfn)
906 self.showpatch(ctx.node(), matchfn)
907
907
908 if types['footer']:
908 if types['footer']:
909 if not self.footer:
909 if not self.footer:
910 self.footer = templater.stringify(self.t(types['footer'],
910 self.footer = templater.stringify(self.t(types['footer'],
911 **props))
911 **props))
912
912
913 except KeyError, inst:
913 except KeyError, inst:
914 msg = _("%s: no key named '%s'")
914 msg = _("%s: no key named '%s'")
915 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
915 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
916 except SyntaxError, inst:
916 except SyntaxError, inst:
917 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
917 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
918
918
919 def show_changeset(ui, repo, opts, buffered=False):
919 def show_changeset(ui, repo, opts, buffered=False):
920 """show one changeset using template or regular display.
920 """show one changeset using template or regular display.
921
921
922 Display format will be the first non-empty hit of:
922 Display format will be the first non-empty hit of:
923 1. option 'template'
923 1. option 'template'
924 2. option 'style'
924 2. option 'style'
925 3. [ui] setting 'logtemplate'
925 3. [ui] setting 'logtemplate'
926 4. [ui] setting 'style'
926 4. [ui] setting 'style'
927 If all of these values are either the unset or the empty string,
927 If all of these values are either the unset or the empty string,
928 regular display via changeset_printer() is done.
928 regular display via changeset_printer() is done.
929 """
929 """
930 # options
930 # options
931 patch = False
931 patch = False
932 if opts.get('patch') or opts.get('stat'):
932 if opts.get('patch') or opts.get('stat'):
933 patch = scmutil.matchall(repo)
933 patch = scmutil.matchall(repo)
934
934
935 tmpl = opts.get('template')
935 tmpl = opts.get('template')
936 style = None
936 style = None
937 if tmpl:
937 if tmpl:
938 tmpl = templater.parsestring(tmpl, quoted=False)
938 tmpl = templater.parsestring(tmpl, quoted=False)
939 else:
939 else:
940 style = opts.get('style')
940 style = opts.get('style')
941
941
942 # ui settings
942 # ui settings
943 if not (tmpl or style):
943 if not (tmpl or style):
944 tmpl = ui.config('ui', 'logtemplate')
944 tmpl = ui.config('ui', 'logtemplate')
945 if tmpl:
945 if tmpl:
946 try:
946 try:
947 tmpl = templater.parsestring(tmpl)
947 tmpl = templater.parsestring(tmpl)
948 except SyntaxError:
948 except SyntaxError:
949 tmpl = templater.parsestring(tmpl, quoted=False)
949 tmpl = templater.parsestring(tmpl, quoted=False)
950 else:
950 else:
951 style = util.expandpath(ui.config('ui', 'style', ''))
951 style = util.expandpath(ui.config('ui', 'style', ''))
952
952
953 if not (tmpl or style):
953 if not (tmpl or style):
954 return changeset_printer(ui, repo, patch, opts, buffered)
954 return changeset_printer(ui, repo, patch, opts, buffered)
955
955
956 mapfile = None
956 mapfile = None
957 if style and not tmpl:
957 if style and not tmpl:
958 mapfile = style
958 mapfile = style
959 if not os.path.split(mapfile)[0]:
959 if not os.path.split(mapfile)[0]:
960 mapname = (templater.templatepath('map-cmdline.' + mapfile)
960 mapname = (templater.templatepath('map-cmdline.' + mapfile)
961 or templater.templatepath(mapfile))
961 or templater.templatepath(mapfile))
962 if mapname:
962 if mapname:
963 mapfile = mapname
963 mapfile = mapname
964
964
965 try:
965 try:
966 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
966 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
967 except SyntaxError, inst:
967 except SyntaxError, inst:
968 raise util.Abort(inst.args[0])
968 raise util.Abort(inst.args[0])
969 if tmpl:
969 if tmpl:
970 t.use_template(tmpl)
970 t.use_template(tmpl)
971 return t
971 return t
972
972
973 def finddate(ui, repo, date):
973 def finddate(ui, repo, date):
974 """Find the tipmost changeset that matches the given date spec"""
974 """Find the tipmost changeset that matches the given date spec"""
975
975
976 df = util.matchdate(date)
976 df = util.matchdate(date)
977 m = scmutil.matchall(repo)
977 m = scmutil.matchall(repo)
978 results = {}
978 results = {}
979
979
980 def prep(ctx, fns):
980 def prep(ctx, fns):
981 d = ctx.date()
981 d = ctx.date()
982 if df(d[0]):
982 if df(d[0]):
983 results[ctx.rev()] = d
983 results[ctx.rev()] = d
984
984
985 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
985 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
986 rev = ctx.rev()
986 rev = ctx.rev()
987 if rev in results:
987 if rev in results:
988 ui.status(_("found revision %s from %s\n") %
988 ui.status(_("found revision %s from %s\n") %
989 (rev, util.datestr(results[rev])))
989 (rev, util.datestr(results[rev])))
990 return str(rev)
990 return str(rev)
991
991
992 raise util.Abort(_("revision matching date not found"))
992 raise util.Abort(_("revision matching date not found"))
993
993
994 def increasingwindows(start, end, windowsize=8, sizelimit=512):
994 def increasingwindows(start, end, windowsize=8, sizelimit=512):
995 if start < end:
995 if start < end:
996 while start < end:
996 while start < end:
997 yield start, min(windowsize, end - start)
997 yield start, min(windowsize, end - start)
998 start += windowsize
998 start += windowsize
999 if windowsize < sizelimit:
999 if windowsize < sizelimit:
1000 windowsize *= 2
1000 windowsize *= 2
1001 else:
1001 else:
1002 while start > end:
1002 while start > end:
1003 yield start, min(windowsize, start - end - 1)
1003 yield start, min(windowsize, start - end - 1)
1004 start -= windowsize
1004 start -= windowsize
1005 if windowsize < sizelimit:
1005 if windowsize < sizelimit:
1006 windowsize *= 2
1006 windowsize *= 2
1007
1007
1008 class FileWalkError(Exception):
1008 class FileWalkError(Exception):
1009 pass
1009 pass
1010
1010
1011 def walkfilerevs(repo, match, follow, revs, fncache):
1011 def walkfilerevs(repo, match, follow, revs, fncache):
1012 '''Walks the file history for the matched files.
1012 '''Walks the file history for the matched files.
1013
1013
1014 Returns the changeset revs that are involved in the file history.
1014 Returns the changeset revs that are involved in the file history.
1015
1015
1016 Throws FileWalkError if the file history can't be walked using
1016 Throws FileWalkError if the file history can't be walked using
1017 filelogs alone.
1017 filelogs alone.
1018 '''
1018 '''
1019 wanted = set()
1019 wanted = set()
1020 copies = []
1020 copies = []
1021 minrev, maxrev = min(revs), max(revs)
1021 minrev, maxrev = min(revs), max(revs)
1022 def filerevgen(filelog, last):
1022 def filerevgen(filelog, last):
1023 """
1023 """
1024 Only files, no patterns. Check the history of each file.
1024 Only files, no patterns. Check the history of each file.
1025
1025
1026 Examines filelog entries within minrev, maxrev linkrev range
1026 Examines filelog entries within minrev, maxrev linkrev range
1027 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1027 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1028 tuples in backwards order
1028 tuples in backwards order
1029 """
1029 """
1030 cl_count = len(repo)
1030 cl_count = len(repo)
1031 revs = []
1031 revs = []
1032 for j in xrange(0, last + 1):
1032 for j in xrange(0, last + 1):
1033 linkrev = filelog.linkrev(j)
1033 linkrev = filelog.linkrev(j)
1034 if linkrev < minrev:
1034 if linkrev < minrev:
1035 continue
1035 continue
1036 # only yield rev for which we have the changelog, it can
1036 # only yield rev for which we have the changelog, it can
1037 # happen while doing "hg log" during a pull or commit
1037 # happen while doing "hg log" during a pull or commit
1038 if linkrev >= cl_count:
1038 if linkrev >= cl_count:
1039 break
1039 break
1040
1040
1041 parentlinkrevs = []
1041 parentlinkrevs = []
1042 for p in filelog.parentrevs(j):
1042 for p in filelog.parentrevs(j):
1043 if p != nullrev:
1043 if p != nullrev:
1044 parentlinkrevs.append(filelog.linkrev(p))
1044 parentlinkrevs.append(filelog.linkrev(p))
1045 n = filelog.node(j)
1045 n = filelog.node(j)
1046 revs.append((linkrev, parentlinkrevs,
1046 revs.append((linkrev, parentlinkrevs,
1047 follow and filelog.renamed(n)))
1047 follow and filelog.renamed(n)))
1048
1048
1049 return reversed(revs)
1049 return reversed(revs)
1050 def iterfiles():
1050 def iterfiles():
1051 pctx = repo['.']
1051 pctx = repo['.']
1052 for filename in match.files():
1052 for filename in match.files():
1053 if follow:
1053 if follow:
1054 if filename not in pctx:
1054 if filename not in pctx:
1055 raise util.Abort(_('cannot follow file not in parent '
1055 raise util.Abort(_('cannot follow file not in parent '
1056 'revision: "%s"') % filename)
1056 'revision: "%s"') % filename)
1057 yield filename, pctx[filename].filenode()
1057 yield filename, pctx[filename].filenode()
1058 else:
1058 else:
1059 yield filename, None
1059 yield filename, None
1060 for filename_node in copies:
1060 for filename_node in copies:
1061 yield filename_node
1061 yield filename_node
1062
1062
1063 for file_, node in iterfiles():
1063 for file_, node in iterfiles():
1064 filelog = repo.file(file_)
1064 filelog = repo.file(file_)
1065 if not len(filelog):
1065 if not len(filelog):
1066 if node is None:
1066 if node is None:
1067 # A zero count may be a directory or deleted file, so
1067 # A zero count may be a directory or deleted file, so
1068 # try to find matching entries on the slow path.
1068 # try to find matching entries on the slow path.
1069 if follow:
1069 if follow:
1070 raise util.Abort(
1070 raise util.Abort(
1071 _('cannot follow nonexistent file: "%s"') % file_)
1071 _('cannot follow nonexistent file: "%s"') % file_)
1072 raise FileWalkError("Cannot walk via filelog")
1072 raise FileWalkError("Cannot walk via filelog")
1073 else:
1073 else:
1074 continue
1074 continue
1075
1075
1076 if node is None:
1076 if node is None:
1077 last = len(filelog) - 1
1077 last = len(filelog) - 1
1078 else:
1078 else:
1079 last = filelog.rev(node)
1079 last = filelog.rev(node)
1080
1080
1081
1081
1082 # keep track of all ancestors of the file
1082 # keep track of all ancestors of the file
1083 ancestors = set([filelog.linkrev(last)])
1083 ancestors = set([filelog.linkrev(last)])
1084
1084
1085 # iterate from latest to oldest revision
1085 # iterate from latest to oldest revision
1086 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1086 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1087 if not follow:
1087 if not follow:
1088 if rev > maxrev:
1088 if rev > maxrev:
1089 continue
1089 continue
1090 else:
1090 else:
1091 # Note that last might not be the first interesting
1091 # Note that last might not be the first interesting
1092 # rev to us:
1092 # rev to us:
1093 # if the file has been changed after maxrev, we'll
1093 # if the file has been changed after maxrev, we'll
1094 # have linkrev(last) > maxrev, and we still need
1094 # have linkrev(last) > maxrev, and we still need
1095 # to explore the file graph
1095 # to explore the file graph
1096 if rev not in ancestors:
1096 if rev not in ancestors:
1097 continue
1097 continue
1098 # XXX insert 1327 fix here
1098 # XXX insert 1327 fix here
1099 if flparentlinkrevs:
1099 if flparentlinkrevs:
1100 ancestors.update(flparentlinkrevs)
1100 ancestors.update(flparentlinkrevs)
1101
1101
1102 fncache.setdefault(rev, []).append(file_)
1102 fncache.setdefault(rev, []).append(file_)
1103 wanted.add(rev)
1103 wanted.add(rev)
1104 if copied:
1104 if copied:
1105 copies.append(copied)
1105 copies.append(copied)
1106
1106
1107 return wanted
1107 return wanted
1108
1108
1109 def walkchangerevs(repo, match, opts, prepare):
1109 def walkchangerevs(repo, match, opts, prepare):
1110 '''Iterate over files and the revs in which they changed.
1110 '''Iterate over files and the revs in which they changed.
1111
1111
1112 Callers most commonly need to iterate backwards over the history
1112 Callers most commonly need to iterate backwards over the history
1113 in which they are interested. Doing so has awful (quadratic-looking)
1113 in which they are interested. Doing so has awful (quadratic-looking)
1114 performance, so we use iterators in a "windowed" way.
1114 performance, so we use iterators in a "windowed" way.
1115
1115
1116 We walk a window of revisions in the desired order. Within the
1116 We walk a window of revisions in the desired order. Within the
1117 window, we first walk forwards to gather data, then in the desired
1117 window, we first walk forwards to gather data, then in the desired
1118 order (usually backwards) to display it.
1118 order (usually backwards) to display it.
1119
1119
1120 This function returns an iterator yielding contexts. Before
1120 This function returns an iterator yielding contexts. Before
1121 yielding each context, the iterator will first call the prepare
1121 yielding each context, the iterator will first call the prepare
1122 function on each context in the window in forward order.'''
1122 function on each context in the window in forward order.'''
1123
1123
1124 follow = opts.get('follow') or opts.get('follow_first')
1124 follow = opts.get('follow') or opts.get('follow_first')
1125
1125
1126 if opts.get('rev'):
1126 if opts.get('rev'):
1127 revs = scmutil.revrange(repo, opts.get('rev'))
1127 revs = scmutil.revrange(repo, opts.get('rev'))
1128 elif follow:
1128 elif follow:
1129 revs = repo.revs('reverse(:.)')
1129 revs = repo.revs('reverse(:.)')
1130 else:
1130 else:
1131 revs = list(repo)
1131 revs = list(repo)
1132 revs.reverse()
1132 revs.reverse()
1133 if not revs:
1133 if not revs:
1134 return []
1134 return []
1135 wanted = set()
1135 wanted = set()
1136 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1136 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1137 fncache = {}
1137 fncache = {}
1138 change = repo.changectx
1138 change = repo.changectx
1139
1139
1140 # First step is to fill wanted, the set of revisions that we want to yield.
1140 # First step is to fill wanted, the set of revisions that we want to yield.
1141 # When it does not induce extra cost, we also fill fncache for revisions in
1141 # When it does not induce extra cost, we also fill fncache for revisions in
1142 # wanted: a cache of filenames that were changed (ctx.files()) and that
1142 # wanted: a cache of filenames that were changed (ctx.files()) and that
1143 # match the file filtering conditions.
1143 # match the file filtering conditions.
1144
1144
1145 if not slowpath and not match.files():
1145 if not slowpath and not match.files():
1146 # No files, no patterns. Display all revs.
1146 # No files, no patterns. Display all revs.
1147 wanted = set(revs)
1147 wanted = set(revs)
1148
1148
1149 if not slowpath and match.files():
1149 if not slowpath and match.files():
1150 # We only have to read through the filelog to find wanted revisions
1150 # We only have to read through the filelog to find wanted revisions
1151
1151
1152 try:
1152 try:
1153 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1153 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1154 except FileWalkError:
1154 except FileWalkError:
1155 slowpath = True
1155 slowpath = True
1156
1156
1157 # We decided to fall back to the slowpath because at least one
1157 # We decided to fall back to the slowpath because at least one
1158 # of the paths was not a file. Check to see if at least one of them
1158 # of the paths was not a file. Check to see if at least one of them
1159 # existed in history, otherwise simply return
1159 # existed in history, otherwise simply return
1160 for path in match.files():
1160 for path in match.files():
1161 if path == '.' or path in repo.store:
1161 if path == '.' or path in repo.store:
1162 break
1162 break
1163 else:
1163 else:
1164 return []
1164 return []
1165
1165
1166 if slowpath:
1166 if slowpath:
1167 # We have to read the changelog to match filenames against
1167 # We have to read the changelog to match filenames against
1168 # changed files
1168 # changed files
1169
1169
1170 if follow:
1170 if follow:
1171 raise util.Abort(_('can only follow copies/renames for explicit '
1171 raise util.Abort(_('can only follow copies/renames for explicit '
1172 'filenames'))
1172 'filenames'))
1173
1173
1174 # The slow path checks files modified in every changeset.
1174 # The slow path checks files modified in every changeset.
1175 for i in sorted(revs):
1175 for i in sorted(revs):
1176 ctx = change(i)
1176 ctx = change(i)
1177 matches = filter(match, ctx.files())
1177 matches = filter(match, ctx.files())
1178 if matches:
1178 if matches:
1179 fncache[i] = matches
1179 fncache[i] = matches
1180 wanted.add(i)
1180 wanted.add(i)
1181
1181
1182 class followfilter(object):
1182 class followfilter(object):
1183 def __init__(self, onlyfirst=False):
1183 def __init__(self, onlyfirst=False):
1184 self.startrev = nullrev
1184 self.startrev = nullrev
1185 self.roots = set()
1185 self.roots = set()
1186 self.onlyfirst = onlyfirst
1186 self.onlyfirst = onlyfirst
1187
1187
1188 def match(self, rev):
1188 def match(self, rev):
1189 def realparents(rev):
1189 def realparents(rev):
1190 if self.onlyfirst:
1190 if self.onlyfirst:
1191 return repo.changelog.parentrevs(rev)[0:1]
1191 return repo.changelog.parentrevs(rev)[0:1]
1192 else:
1192 else:
1193 return filter(lambda x: x != nullrev,
1193 return filter(lambda x: x != nullrev,
1194 repo.changelog.parentrevs(rev))
1194 repo.changelog.parentrevs(rev))
1195
1195
1196 if self.startrev == nullrev:
1196 if self.startrev == nullrev:
1197 self.startrev = rev
1197 self.startrev = rev
1198 return True
1198 return True
1199
1199
1200 if rev > self.startrev:
1200 if rev > self.startrev:
1201 # forward: all descendants
1201 # forward: all descendants
1202 if not self.roots:
1202 if not self.roots:
1203 self.roots.add(self.startrev)
1203 self.roots.add(self.startrev)
1204 for parent in realparents(rev):
1204 for parent in realparents(rev):
1205 if parent in self.roots:
1205 if parent in self.roots:
1206 self.roots.add(rev)
1206 self.roots.add(rev)
1207 return True
1207 return True
1208 else:
1208 else:
1209 # backwards: all parents
1209 # backwards: all parents
1210 if not self.roots:
1210 if not self.roots:
1211 self.roots.update(realparents(self.startrev))
1211 self.roots.update(realparents(self.startrev))
1212 if rev in self.roots:
1212 if rev in self.roots:
1213 self.roots.remove(rev)
1213 self.roots.remove(rev)
1214 self.roots.update(realparents(rev))
1214 self.roots.update(realparents(rev))
1215 return True
1215 return True
1216
1216
1217 return False
1217 return False
1218
1218
1219 # it might be worthwhile to do this in the iterator if the rev range
1219 # it might be worthwhile to do this in the iterator if the rev range
1220 # is descending and the prune args are all within that range
1220 # is descending and the prune args are all within that range
1221 for rev in opts.get('prune', ()):
1221 for rev in opts.get('prune', ()):
1222 rev = repo[rev].rev()
1222 rev = repo[rev].rev()
1223 ff = followfilter()
1223 ff = followfilter()
1224 stop = min(revs[0], revs[-1])
1224 stop = min(revs[0], revs[-1])
1225 for x in xrange(rev, stop - 1, -1):
1225 for x in xrange(rev, stop - 1, -1):
1226 if ff.match(x):
1226 if ff.match(x):
1227 wanted.discard(x)
1227 wanted.discard(x)
1228
1228
1229 # Choose a small initial window if we will probably only visit a
1229 # Choose a small initial window if we will probably only visit a
1230 # few commits.
1230 # few commits.
1231 limit = loglimit(opts)
1231 limit = loglimit(opts)
1232 windowsize = 8
1232 windowsize = 8
1233 if limit:
1233 if limit:
1234 windowsize = min(limit, windowsize)
1234 windowsize = min(limit, windowsize)
1235
1235
1236 # Now that wanted is correctly initialized, we can iterate over the
1236 # Now that wanted is correctly initialized, we can iterate over the
1237 # revision range, yielding only revisions in wanted.
1237 # revision range, yielding only revisions in wanted.
1238 def iterate():
1238 def iterate():
1239 if follow and not match.files():
1239 if follow and not match.files():
1240 ff = followfilter(onlyfirst=opts.get('follow_first'))
1240 ff = followfilter(onlyfirst=opts.get('follow_first'))
1241 def want(rev):
1241 def want(rev):
1242 return ff.match(rev) and rev in wanted
1242 return ff.match(rev) and rev in wanted
1243 else:
1243 else:
1244 def want(rev):
1244 def want(rev):
1245 return rev in wanted
1245 return rev in wanted
1246
1246
1247 for i, window in increasingwindows(0, len(revs), windowsize):
1247 for i, window in increasingwindows(0, len(revs), windowsize):
1248 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1248 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1249 for rev in sorted(nrevs):
1249 for rev in sorted(nrevs):
1250 fns = fncache.get(rev)
1250 fns = fncache.get(rev)
1251 ctx = change(rev)
1251 ctx = change(rev)
1252 if not fns:
1252 if not fns:
1253 def fns_generator():
1253 def fns_generator():
1254 for f in ctx.files():
1254 for f in ctx.files():
1255 if match(f):
1255 if match(f):
1256 yield f
1256 yield f
1257 fns = fns_generator()
1257 fns = fns_generator()
1258 prepare(ctx, fns)
1258 prepare(ctx, fns)
1259 for rev in nrevs:
1259 for rev in nrevs:
1260 yield change(rev)
1260 yield change(rev)
1261 return iterate()
1261 return iterate()
1262
1262
1263 def _makegraphfilematcher(repo, pats, followfirst):
1263 def _makegraphfilematcher(repo, pats, followfirst):
1264 # When displaying a revision with --patch --follow FILE, we have
1264 # When displaying a revision with --patch --follow FILE, we have
1265 # to know which file of the revision must be diffed. With
1265 # to know which file of the revision must be diffed. With
1266 # --follow, we want the names of the ancestors of FILE in the
1266 # --follow, we want the names of the ancestors of FILE in the
1267 # revision, stored in "fcache". "fcache" is populated by
1267 # revision, stored in "fcache". "fcache" is populated by
1268 # reproducing the graph traversal already done by --follow revset
1268 # reproducing the graph traversal already done by --follow revset
1269 # and relating linkrevs to file names (which is not "correct" but
1269 # and relating linkrevs to file names (which is not "correct" but
1270 # good enough).
1270 # good enough).
1271 fcache = {}
1271 fcache = {}
1272 fcacheready = [False]
1272 fcacheready = [False]
1273 pctx = repo['.']
1273 pctx = repo['.']
1274 wctx = repo[None]
1274 wctx = repo[None]
1275
1275
1276 def populate():
1276 def populate():
1277 for fn in pats:
1277 for fn in pats:
1278 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1278 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1279 for c in i:
1279 for c in i:
1280 fcache.setdefault(c.linkrev(), set()).add(c.path())
1280 fcache.setdefault(c.linkrev(), set()).add(c.path())
1281
1281
1282 def filematcher(rev):
1282 def filematcher(rev):
1283 if not fcacheready[0]:
1283 if not fcacheready[0]:
1284 # Lazy initialization
1284 # Lazy initialization
1285 fcacheready[0] = True
1285 fcacheready[0] = True
1286 populate()
1286 populate()
1287 return scmutil.match(wctx, fcache.get(rev, []), default='path')
1287 return scmutil.match(wctx, fcache.get(rev, []), default='path')
1288
1288
1289 return filematcher
1289 return filematcher
1290
1290
1291 def _makegraphlogrevset(repo, pats, opts, revs):
1291 def _makegraphlogrevset(repo, pats, opts, revs):
1292 """Return (expr, filematcher) where expr is a revset string built
1292 """Return (expr, filematcher) where expr is a revset string built
1293 from log options and file patterns or None. If --stat or --patch
1293 from log options and file patterns or None. If --stat or --patch
1294 are not passed filematcher is None. Otherwise it is a callable
1294 are not passed filematcher is None. Otherwise it is a callable
1295 taking a revision number and returning a match objects filtering
1295 taking a revision number and returning a match objects filtering
1296 the files to be detailed when displaying the revision.
1296 the files to be detailed when displaying the revision.
1297 """
1297 """
1298 opt2revset = {
1298 opt2revset = {
1299 'no_merges': ('not merge()', None),
1299 'no_merges': ('not merge()', None),
1300 'only_merges': ('merge()', None),
1300 'only_merges': ('merge()', None),
1301 '_ancestors': ('ancestors(%(val)s)', None),
1301 '_ancestors': ('ancestors(%(val)s)', None),
1302 '_fancestors': ('_firstancestors(%(val)s)', None),
1302 '_fancestors': ('_firstancestors(%(val)s)', None),
1303 '_descendants': ('descendants(%(val)s)', None),
1303 '_descendants': ('descendants(%(val)s)', None),
1304 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1304 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1305 '_matchfiles': ('_matchfiles(%(val)s)', None),
1305 '_matchfiles': ('_matchfiles(%(val)s)', None),
1306 'date': ('date(%(val)r)', None),
1306 'date': ('date(%(val)r)', None),
1307 'branch': ('branch(%(val)r)', ' or '),
1307 'branch': ('branch(%(val)r)', ' or '),
1308 '_patslog': ('filelog(%(val)r)', ' or '),
1308 '_patslog': ('filelog(%(val)r)', ' or '),
1309 '_patsfollow': ('follow(%(val)r)', ' or '),
1309 '_patsfollow': ('follow(%(val)r)', ' or '),
1310 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1310 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1311 'keyword': ('keyword(%(val)r)', ' or '),
1311 'keyword': ('keyword(%(val)r)', ' or '),
1312 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1312 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1313 'user': ('user(%(val)r)', ' or '),
1313 'user': ('user(%(val)r)', ' or '),
1314 }
1314 }
1315
1315
1316 opts = dict(opts)
1316 opts = dict(opts)
1317 # follow or not follow?
1317 # follow or not follow?
1318 follow = opts.get('follow') or opts.get('follow_first')
1318 follow = opts.get('follow') or opts.get('follow_first')
1319 followfirst = opts.get('follow_first') and 1 or 0
1319 followfirst = opts.get('follow_first') and 1 or 0
1320 # --follow with FILE behaviour depends on revs...
1320 # --follow with FILE behaviour depends on revs...
1321 startrev = revs[0]
1321 startrev = revs[0]
1322 followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
1322 followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
1323
1323
1324 # branch and only_branch are really aliases and must be handled at
1324 # branch and only_branch are really aliases and must be handled at
1325 # the same time
1325 # the same time
1326 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1326 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1327 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1327 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1328 # pats/include/exclude are passed to match.match() directly in
1328 # pats/include/exclude are passed to match.match() directly in
1329 # _matchfiles() revset but walkchangerevs() builds its matcher with
1329 # _matchfiles() revset but walkchangerevs() builds its matcher with
1330 # scmutil.match(). The difference is input pats are globbed on
1330 # scmutil.match(). The difference is input pats are globbed on
1331 # platforms without shell expansion (windows).
1331 # platforms without shell expansion (windows).
1332 pctx = repo[None]
1332 pctx = repo[None]
1333 match, pats = scmutil.matchandpats(pctx, pats, opts)
1333 match, pats = scmutil.matchandpats(pctx, pats, opts)
1334 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1334 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1335 if not slowpath:
1335 if not slowpath:
1336 for f in match.files():
1336 for f in match.files():
1337 if follow and f not in pctx:
1337 if follow and f not in pctx:
1338 raise util.Abort(_('cannot follow file not in parent '
1338 raise util.Abort(_('cannot follow file not in parent '
1339 'revision: "%s"') % f)
1339 'revision: "%s"') % f)
1340 filelog = repo.file(f)
1340 filelog = repo.file(f)
1341 if not len(filelog):
1341 if not filelog:
1342 # A zero count may be a directory or deleted file, so
1342 # A zero count may be a directory or deleted file, so
1343 # try to find matching entries on the slow path.
1343 # try to find matching entries on the slow path.
1344 if follow:
1344 if follow:
1345 raise util.Abort(
1345 raise util.Abort(
1346 _('cannot follow nonexistent file: "%s"') % f)
1346 _('cannot follow nonexistent file: "%s"') % f)
1347 slowpath = True
1347 slowpath = True
1348
1348
1349 # We decided to fall back to the slowpath because at least one
1349 # We decided to fall back to the slowpath because at least one
1350 # of the paths was not a file. Check to see if at least one of them
1350 # of the paths was not a file. Check to see if at least one of them
1351 # existed in history - in that case, we'll continue down the
1351 # existed in history - in that case, we'll continue down the
1352 # slowpath; otherwise, we can turn off the slowpath
1352 # slowpath; otherwise, we can turn off the slowpath
1353 if slowpath:
1353 if slowpath:
1354 for path in match.files():
1354 for path in match.files():
1355 if path == '.' or path in repo.store:
1355 if path == '.' or path in repo.store:
1356 break
1356 break
1357 else:
1357 else:
1358 slowpath = False
1358 slowpath = False
1359
1359
1360 if slowpath:
1360 if slowpath:
1361 # See walkchangerevs() slow path.
1361 # See walkchangerevs() slow path.
1362 #
1362 #
1363 if follow:
1363 if follow:
1364 raise util.Abort(_('can only follow copies/renames for explicit '
1364 raise util.Abort(_('can only follow copies/renames for explicit '
1365 'filenames'))
1365 'filenames'))
1366 # pats/include/exclude cannot be represented as separate
1366 # pats/include/exclude cannot be represented as separate
1367 # revset expressions as their filtering logic applies at file
1367 # revset expressions as their filtering logic applies at file
1368 # level. For instance "-I a -X a" matches a revision touching
1368 # level. For instance "-I a -X a" matches a revision touching
1369 # "a" and "b" while "file(a) and not file(b)" does
1369 # "a" and "b" while "file(a) and not file(b)" does
1370 # not. Besides, filesets are evaluated against the working
1370 # not. Besides, filesets are evaluated against the working
1371 # directory.
1371 # directory.
1372 matchargs = ['r:', 'd:relpath']
1372 matchargs = ['r:', 'd:relpath']
1373 for p in pats:
1373 for p in pats:
1374 matchargs.append('p:' + p)
1374 matchargs.append('p:' + p)
1375 for p in opts.get('include', []):
1375 for p in opts.get('include', []):
1376 matchargs.append('i:' + p)
1376 matchargs.append('i:' + p)
1377 for p in opts.get('exclude', []):
1377 for p in opts.get('exclude', []):
1378 matchargs.append('x:' + p)
1378 matchargs.append('x:' + p)
1379 matchargs = ','.join(('%r' % p) for p in matchargs)
1379 matchargs = ','.join(('%r' % p) for p in matchargs)
1380 opts['_matchfiles'] = matchargs
1380 opts['_matchfiles'] = matchargs
1381 else:
1381 else:
1382 if follow:
1382 if follow:
1383 fpats = ('_patsfollow', '_patsfollowfirst')
1383 fpats = ('_patsfollow', '_patsfollowfirst')
1384 fnopats = (('_ancestors', '_fancestors'),
1384 fnopats = (('_ancestors', '_fancestors'),
1385 ('_descendants', '_fdescendants'))
1385 ('_descendants', '_fdescendants'))
1386 if pats:
1386 if pats:
1387 # follow() revset interprets its file argument as a
1387 # follow() revset interprets its file argument as a
1388 # manifest entry, so use match.files(), not pats.
1388 # manifest entry, so use match.files(), not pats.
1389 opts[fpats[followfirst]] = list(match.files())
1389 opts[fpats[followfirst]] = list(match.files())
1390 else:
1390 else:
1391 opts[fnopats[followdescendants][followfirst]] = str(startrev)
1391 opts[fnopats[followdescendants][followfirst]] = str(startrev)
1392 else:
1392 else:
1393 opts['_patslog'] = list(pats)
1393 opts['_patslog'] = list(pats)
1394
1394
1395 filematcher = None
1395 filematcher = None
1396 if opts.get('patch') or opts.get('stat'):
1396 if opts.get('patch') or opts.get('stat'):
1397 if follow:
1397 if follow:
1398 filematcher = _makegraphfilematcher(repo, pats, followfirst)
1398 filematcher = _makegraphfilematcher(repo, pats, followfirst)
1399 else:
1399 else:
1400 filematcher = lambda rev: match
1400 filematcher = lambda rev: match
1401
1401
1402 expr = []
1402 expr = []
1403 for op, val in opts.iteritems():
1403 for op, val in opts.iteritems():
1404 if not val:
1404 if not val:
1405 continue
1405 continue
1406 if op not in opt2revset:
1406 if op not in opt2revset:
1407 continue
1407 continue
1408 revop, andor = opt2revset[op]
1408 revop, andor = opt2revset[op]
1409 if '%(val)' not in revop:
1409 if '%(val)' not in revop:
1410 expr.append(revop)
1410 expr.append(revop)
1411 else:
1411 else:
1412 if not isinstance(val, list):
1412 if not isinstance(val, list):
1413 e = revop % {'val': val}
1413 e = revop % {'val': val}
1414 else:
1414 else:
1415 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
1415 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
1416 expr.append(e)
1416 expr.append(e)
1417
1417
1418 if expr:
1418 if expr:
1419 expr = '(' + ' and '.join(expr) + ')'
1419 expr = '(' + ' and '.join(expr) + ')'
1420 else:
1420 else:
1421 expr = None
1421 expr = None
1422 return expr, filematcher
1422 return expr, filematcher
1423
1423
1424 def getgraphlogrevs(repo, pats, opts):
1424 def getgraphlogrevs(repo, pats, opts):
1425 """Return (revs, expr, filematcher) where revs is an iterable of
1425 """Return (revs, expr, filematcher) where revs is an iterable of
1426 revision numbers, expr is a revset string built from log options
1426 revision numbers, expr is a revset string built from log options
1427 and file patterns or None, and used to filter 'revs'. If --stat or
1427 and file patterns or None, and used to filter 'revs'. If --stat or
1428 --patch are not passed filematcher is None. Otherwise it is a
1428 --patch are not passed filematcher is None. Otherwise it is a
1429 callable taking a revision number and returning a match objects
1429 callable taking a revision number and returning a match objects
1430 filtering the files to be detailed when displaying the revision.
1430 filtering the files to be detailed when displaying the revision.
1431 """
1431 """
1432 if not len(repo):
1432 if not len(repo):
1433 return [], None, None
1433 return [], None, None
1434 limit = loglimit(opts)
1434 limit = loglimit(opts)
1435 # Default --rev value depends on --follow but --follow behaviour
1435 # Default --rev value depends on --follow but --follow behaviour
1436 # depends on revisions resolved from --rev...
1436 # depends on revisions resolved from --rev...
1437 follow = opts.get('follow') or opts.get('follow_first')
1437 follow = opts.get('follow') or opts.get('follow_first')
1438 possiblyunsorted = False # whether revs might need sorting
1438 possiblyunsorted = False # whether revs might need sorting
1439 if opts.get('rev'):
1439 if opts.get('rev'):
1440 revs = scmutil.revrange(repo, opts['rev'])
1440 revs = scmutil.revrange(repo, opts['rev'])
1441 # Don't sort here because _makegraphlogrevset might depend on the
1441 # Don't sort here because _makegraphlogrevset might depend on the
1442 # order of revs
1442 # order of revs
1443 possiblyunsorted = True
1443 possiblyunsorted = True
1444 else:
1444 else:
1445 if follow and len(repo) > 0:
1445 if follow and len(repo) > 0:
1446 revs = repo.revs('reverse(:.)')
1446 revs = repo.revs('reverse(:.)')
1447 else:
1447 else:
1448 revs = list(repo.changelog)
1448 revs = list(repo.changelog)
1449 revs.reverse()
1449 revs.reverse()
1450 if not revs:
1450 if not revs:
1451 return [], None, None
1451 return [], None, None
1452 expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs)
1452 expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs)
1453 if possiblyunsorted:
1453 if possiblyunsorted:
1454 revs.sort(reverse=True)
1454 revs.sort(reverse=True)
1455 if expr:
1455 if expr:
1456 # Revset matchers often operate faster on revisions in changelog
1456 # Revset matchers often operate faster on revisions in changelog
1457 # order, because most filters deal with the changelog.
1457 # order, because most filters deal with the changelog.
1458 revs.reverse()
1458 revs.reverse()
1459 matcher = revset.match(repo.ui, expr)
1459 matcher = revset.match(repo.ui, expr)
1460 # Revset matches can reorder revisions. "A or B" typically returns
1460 # Revset matches can reorder revisions. "A or B" typically returns
1461 # returns the revision matching A then the revision matching B. Sort
1461 # returns the revision matching A then the revision matching B. Sort
1462 # again to fix that.
1462 # again to fix that.
1463 revs = matcher(repo, revs)
1463 revs = matcher(repo, revs)
1464 revs.sort(reverse=True)
1464 revs.sort(reverse=True)
1465 if limit is not None:
1465 if limit is not None:
1466 revs = revs[:limit]
1466 revs = revs[:limit]
1467
1467
1468 return revs, expr, filematcher
1468 return revs, expr, filematcher
1469
1469
1470 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
1470 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
1471 filematcher=None):
1471 filematcher=None):
1472 seen, state = [], graphmod.asciistate()
1472 seen, state = [], graphmod.asciistate()
1473 for rev, type, ctx, parents in dag:
1473 for rev, type, ctx, parents in dag:
1474 char = 'o'
1474 char = 'o'
1475 if ctx.node() in showparents:
1475 if ctx.node() in showparents:
1476 char = '@'
1476 char = '@'
1477 elif ctx.obsolete():
1477 elif ctx.obsolete():
1478 char = 'x'
1478 char = 'x'
1479 copies = None
1479 copies = None
1480 if getrenamed and ctx.rev():
1480 if getrenamed and ctx.rev():
1481 copies = []
1481 copies = []
1482 for fn in ctx.files():
1482 for fn in ctx.files():
1483 rename = getrenamed(fn, ctx.rev())
1483 rename = getrenamed(fn, ctx.rev())
1484 if rename:
1484 if rename:
1485 copies.append((fn, rename[0]))
1485 copies.append((fn, rename[0]))
1486 revmatchfn = None
1486 revmatchfn = None
1487 if filematcher is not None:
1487 if filematcher is not None:
1488 revmatchfn = filematcher(ctx.rev())
1488 revmatchfn = filematcher(ctx.rev())
1489 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
1489 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
1490 lines = displayer.hunk.pop(rev).split('\n')
1490 lines = displayer.hunk.pop(rev).split('\n')
1491 if not lines[-1]:
1491 if not lines[-1]:
1492 del lines[-1]
1492 del lines[-1]
1493 displayer.flush(rev)
1493 displayer.flush(rev)
1494 edges = edgefn(type, char, lines, seen, rev, parents)
1494 edges = edgefn(type, char, lines, seen, rev, parents)
1495 for type, char, lines, coldata in edges:
1495 for type, char, lines, coldata in edges:
1496 graphmod.ascii(ui, state, type, char, lines, coldata)
1496 graphmod.ascii(ui, state, type, char, lines, coldata)
1497 displayer.close()
1497 displayer.close()
1498
1498
1499 def graphlog(ui, repo, *pats, **opts):
1499 def graphlog(ui, repo, *pats, **opts):
1500 # Parameters are identical to log command ones
1500 # Parameters are identical to log command ones
1501 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
1501 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
1502 revdag = graphmod.dagwalker(repo, revs)
1502 revdag = graphmod.dagwalker(repo, revs)
1503
1503
1504 getrenamed = None
1504 getrenamed = None
1505 if opts.get('copies'):
1505 if opts.get('copies'):
1506 endrev = None
1506 endrev = None
1507 if opts.get('rev'):
1507 if opts.get('rev'):
1508 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
1508 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
1509 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
1509 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
1510 displayer = show_changeset(ui, repo, opts, buffered=True)
1510 displayer = show_changeset(ui, repo, opts, buffered=True)
1511 showparents = [ctx.node() for ctx in repo[None].parents()]
1511 showparents = [ctx.node() for ctx in repo[None].parents()]
1512 displaygraph(ui, revdag, displayer, showparents,
1512 displaygraph(ui, revdag, displayer, showparents,
1513 graphmod.asciiedges, getrenamed, filematcher)
1513 graphmod.asciiedges, getrenamed, filematcher)
1514
1514
1515 def checkunsupportedgraphflags(pats, opts):
1515 def checkunsupportedgraphflags(pats, opts):
1516 for op in ["newest_first"]:
1516 for op in ["newest_first"]:
1517 if op in opts and opts[op]:
1517 if op in opts and opts[op]:
1518 raise util.Abort(_("-G/--graph option is incompatible with --%s")
1518 raise util.Abort(_("-G/--graph option is incompatible with --%s")
1519 % op.replace("_", "-"))
1519 % op.replace("_", "-"))
1520
1520
1521 def graphrevs(repo, nodes, opts):
1521 def graphrevs(repo, nodes, opts):
1522 limit = loglimit(opts)
1522 limit = loglimit(opts)
1523 nodes.reverse()
1523 nodes.reverse()
1524 if limit is not None:
1524 if limit is not None:
1525 nodes = nodes[:limit]
1525 nodes = nodes[:limit]
1526 return graphmod.nodes(repo, nodes)
1526 return graphmod.nodes(repo, nodes)
1527
1527
1528 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1528 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1529 join = lambda f: os.path.join(prefix, f)
1529 join = lambda f: os.path.join(prefix, f)
1530 bad = []
1530 bad = []
1531 oldbad = match.bad
1531 oldbad = match.bad
1532 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1532 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1533 names = []
1533 names = []
1534 wctx = repo[None]
1534 wctx = repo[None]
1535 cca = None
1535 cca = None
1536 abort, warn = scmutil.checkportabilityalert(ui)
1536 abort, warn = scmutil.checkportabilityalert(ui)
1537 if abort or warn:
1537 if abort or warn:
1538 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
1538 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
1539 for f in repo.walk(match):
1539 for f in repo.walk(match):
1540 exact = match.exact(f)
1540 exact = match.exact(f)
1541 if exact or not explicitonly and f not in repo.dirstate:
1541 if exact or not explicitonly and f not in repo.dirstate:
1542 if cca:
1542 if cca:
1543 cca(f)
1543 cca(f)
1544 names.append(f)
1544 names.append(f)
1545 if ui.verbose or not exact:
1545 if ui.verbose or not exact:
1546 ui.status(_('adding %s\n') % match.rel(join(f)))
1546 ui.status(_('adding %s\n') % match.rel(join(f)))
1547
1547
1548 for subpath in sorted(wctx.substate):
1548 for subpath in sorted(wctx.substate):
1549 sub = wctx.sub(subpath)
1549 sub = wctx.sub(subpath)
1550 try:
1550 try:
1551 submatch = matchmod.narrowmatcher(subpath, match)
1551 submatch = matchmod.narrowmatcher(subpath, match)
1552 if listsubrepos:
1552 if listsubrepos:
1553 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1553 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1554 False))
1554 False))
1555 else:
1555 else:
1556 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1556 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1557 True))
1557 True))
1558 except error.LookupError:
1558 except error.LookupError:
1559 ui.status(_("skipping missing subrepository: %s\n")
1559 ui.status(_("skipping missing subrepository: %s\n")
1560 % join(subpath))
1560 % join(subpath))
1561
1561
1562 if not dryrun:
1562 if not dryrun:
1563 rejected = wctx.add(names, prefix)
1563 rejected = wctx.add(names, prefix)
1564 bad.extend(f for f in rejected if f in match.files())
1564 bad.extend(f for f in rejected if f in match.files())
1565 return bad
1565 return bad
1566
1566
1567 def forget(ui, repo, match, prefix, explicitonly):
1567 def forget(ui, repo, match, prefix, explicitonly):
1568 join = lambda f: os.path.join(prefix, f)
1568 join = lambda f: os.path.join(prefix, f)
1569 bad = []
1569 bad = []
1570 oldbad = match.bad
1570 oldbad = match.bad
1571 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1571 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1572 wctx = repo[None]
1572 wctx = repo[None]
1573 forgot = []
1573 forgot = []
1574 s = repo.status(match=match, clean=True)
1574 s = repo.status(match=match, clean=True)
1575 forget = sorted(s[0] + s[1] + s[3] + s[6])
1575 forget = sorted(s[0] + s[1] + s[3] + s[6])
1576 if explicitonly:
1576 if explicitonly:
1577 forget = [f for f in forget if match.exact(f)]
1577 forget = [f for f in forget if match.exact(f)]
1578
1578
1579 for subpath in sorted(wctx.substate):
1579 for subpath in sorted(wctx.substate):
1580 sub = wctx.sub(subpath)
1580 sub = wctx.sub(subpath)
1581 try:
1581 try:
1582 submatch = matchmod.narrowmatcher(subpath, match)
1582 submatch = matchmod.narrowmatcher(subpath, match)
1583 subbad, subforgot = sub.forget(ui, submatch, prefix)
1583 subbad, subforgot = sub.forget(ui, submatch, prefix)
1584 bad.extend([subpath + '/' + f for f in subbad])
1584 bad.extend([subpath + '/' + f for f in subbad])
1585 forgot.extend([subpath + '/' + f for f in subforgot])
1585 forgot.extend([subpath + '/' + f for f in subforgot])
1586 except error.LookupError:
1586 except error.LookupError:
1587 ui.status(_("skipping missing subrepository: %s\n")
1587 ui.status(_("skipping missing subrepository: %s\n")
1588 % join(subpath))
1588 % join(subpath))
1589
1589
1590 if not explicitonly:
1590 if not explicitonly:
1591 for f in match.files():
1591 for f in match.files():
1592 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1592 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1593 if f not in forgot:
1593 if f not in forgot:
1594 if os.path.exists(match.rel(join(f))):
1594 if os.path.exists(match.rel(join(f))):
1595 ui.warn(_('not removing %s: '
1595 ui.warn(_('not removing %s: '
1596 'file is already untracked\n')
1596 'file is already untracked\n')
1597 % match.rel(join(f)))
1597 % match.rel(join(f)))
1598 bad.append(f)
1598 bad.append(f)
1599
1599
1600 for f in forget:
1600 for f in forget:
1601 if ui.verbose or not match.exact(f):
1601 if ui.verbose or not match.exact(f):
1602 ui.status(_('removing %s\n') % match.rel(join(f)))
1602 ui.status(_('removing %s\n') % match.rel(join(f)))
1603
1603
1604 rejected = wctx.forget(forget, prefix)
1604 rejected = wctx.forget(forget, prefix)
1605 bad.extend(f for f in rejected if f in match.files())
1605 bad.extend(f for f in rejected if f in match.files())
1606 forgot.extend(forget)
1606 forgot.extend(forget)
1607 return bad, forgot
1607 return bad, forgot
1608
1608
1609 def duplicatecopies(repo, rev, fromrev):
1609 def duplicatecopies(repo, rev, fromrev):
1610 '''reproduce copies from fromrev to rev in the dirstate'''
1610 '''reproduce copies from fromrev to rev in the dirstate'''
1611 for dst, src in copies.pathcopies(repo[fromrev], repo[rev]).iteritems():
1611 for dst, src in copies.pathcopies(repo[fromrev], repo[rev]).iteritems():
1612 # copies.pathcopies returns backward renames, so dst might not
1612 # copies.pathcopies returns backward renames, so dst might not
1613 # actually be in the dirstate
1613 # actually be in the dirstate
1614 if repo.dirstate[dst] in "nma":
1614 if repo.dirstate[dst] in "nma":
1615 repo.dirstate.copy(src, dst)
1615 repo.dirstate.copy(src, dst)
1616
1616
1617 def commit(ui, repo, commitfunc, pats, opts):
1617 def commit(ui, repo, commitfunc, pats, opts):
1618 '''commit the specified files or all outstanding changes'''
1618 '''commit the specified files or all outstanding changes'''
1619 date = opts.get('date')
1619 date = opts.get('date')
1620 if date:
1620 if date:
1621 opts['date'] = util.parsedate(date)
1621 opts['date'] = util.parsedate(date)
1622 message = logmessage(ui, opts)
1622 message = logmessage(ui, opts)
1623
1623
1624 # extract addremove carefully -- this function can be called from a command
1624 # extract addremove carefully -- this function can be called from a command
1625 # that doesn't support addremove
1625 # that doesn't support addremove
1626 if opts.get('addremove'):
1626 if opts.get('addremove'):
1627 scmutil.addremove(repo, pats, opts)
1627 scmutil.addremove(repo, pats, opts)
1628
1628
1629 return commitfunc(ui, repo, message,
1629 return commitfunc(ui, repo, message,
1630 scmutil.match(repo[None], pats, opts), opts)
1630 scmutil.match(repo[None], pats, opts), opts)
1631
1631
1632 def amend(ui, repo, commitfunc, old, extra, pats, opts):
1632 def amend(ui, repo, commitfunc, old, extra, pats, opts):
1633 ui.note(_('amending changeset %s\n') % old)
1633 ui.note(_('amending changeset %s\n') % old)
1634 base = old.p1()
1634 base = old.p1()
1635
1635
1636 wlock = lock = newid = None
1636 wlock = lock = newid = None
1637 try:
1637 try:
1638 wlock = repo.wlock()
1638 wlock = repo.wlock()
1639 lock = repo.lock()
1639 lock = repo.lock()
1640 tr = repo.transaction('amend')
1640 tr = repo.transaction('amend')
1641 try:
1641 try:
1642 # See if we got a message from -m or -l, if not, open the editor
1642 # See if we got a message from -m or -l, if not, open the editor
1643 # with the message of the changeset to amend
1643 # with the message of the changeset to amend
1644 message = logmessage(ui, opts)
1644 message = logmessage(ui, opts)
1645 # ensure logfile does not conflict with later enforcement of the
1645 # ensure logfile does not conflict with later enforcement of the
1646 # message. potential logfile content has been processed by
1646 # message. potential logfile content has been processed by
1647 # `logmessage` anyway.
1647 # `logmessage` anyway.
1648 opts.pop('logfile')
1648 opts.pop('logfile')
1649 # First, do a regular commit to record all changes in the working
1649 # First, do a regular commit to record all changes in the working
1650 # directory (if there are any)
1650 # directory (if there are any)
1651 ui.callhooks = False
1651 ui.callhooks = False
1652 currentbookmark = repo._bookmarkcurrent
1652 currentbookmark = repo._bookmarkcurrent
1653 try:
1653 try:
1654 repo._bookmarkcurrent = None
1654 repo._bookmarkcurrent = None
1655 opts['message'] = 'temporary amend commit for %s' % old
1655 opts['message'] = 'temporary amend commit for %s' % old
1656 node = commit(ui, repo, commitfunc, pats, opts)
1656 node = commit(ui, repo, commitfunc, pats, opts)
1657 finally:
1657 finally:
1658 repo._bookmarkcurrent = currentbookmark
1658 repo._bookmarkcurrent = currentbookmark
1659 ui.callhooks = True
1659 ui.callhooks = True
1660 ctx = repo[node]
1660 ctx = repo[node]
1661
1661
1662 # Participating changesets:
1662 # Participating changesets:
1663 #
1663 #
1664 # node/ctx o - new (intermediate) commit that contains changes
1664 # node/ctx o - new (intermediate) commit that contains changes
1665 # | from working dir to go into amending commit
1665 # | from working dir to go into amending commit
1666 # | (or a workingctx if there were no changes)
1666 # | (or a workingctx if there were no changes)
1667 # |
1667 # |
1668 # old o - changeset to amend
1668 # old o - changeset to amend
1669 # |
1669 # |
1670 # base o - parent of amending changeset
1670 # base o - parent of amending changeset
1671
1671
1672 # Update extra dict from amended commit (e.g. to preserve graft
1672 # Update extra dict from amended commit (e.g. to preserve graft
1673 # source)
1673 # source)
1674 extra.update(old.extra())
1674 extra.update(old.extra())
1675
1675
1676 # Also update it from the intermediate commit or from the wctx
1676 # Also update it from the intermediate commit or from the wctx
1677 extra.update(ctx.extra())
1677 extra.update(ctx.extra())
1678
1678
1679 if len(old.parents()) > 1:
1679 if len(old.parents()) > 1:
1680 # ctx.files() isn't reliable for merges, so fall back to the
1680 # ctx.files() isn't reliable for merges, so fall back to the
1681 # slower repo.status() method
1681 # slower repo.status() method
1682 files = set([fn for st in repo.status(base, old)[:3]
1682 files = set([fn for st in repo.status(base, old)[:3]
1683 for fn in st])
1683 for fn in st])
1684 else:
1684 else:
1685 files = set(old.files())
1685 files = set(old.files())
1686
1686
1687 # Second, we use either the commit we just did, or if there were no
1687 # Second, we use either the commit we just did, or if there were no
1688 # changes the parent of the working directory as the version of the
1688 # changes the parent of the working directory as the version of the
1689 # files in the final amend commit
1689 # files in the final amend commit
1690 if node:
1690 if node:
1691 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
1691 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
1692
1692
1693 user = ctx.user()
1693 user = ctx.user()
1694 date = ctx.date()
1694 date = ctx.date()
1695 # Recompute copies (avoid recording a -> b -> a)
1695 # Recompute copies (avoid recording a -> b -> a)
1696 copied = copies.pathcopies(base, ctx)
1696 copied = copies.pathcopies(base, ctx)
1697
1697
1698 # Prune files which were reverted by the updates: if old
1698 # Prune files which were reverted by the updates: if old
1699 # introduced file X and our intermediate commit, node,
1699 # introduced file X and our intermediate commit, node,
1700 # renamed that file, then those two files are the same and
1700 # renamed that file, then those two files are the same and
1701 # we can discard X from our list of files. Likewise if X
1701 # we can discard X from our list of files. Likewise if X
1702 # was deleted, it's no longer relevant
1702 # was deleted, it's no longer relevant
1703 files.update(ctx.files())
1703 files.update(ctx.files())
1704
1704
1705 def samefile(f):
1705 def samefile(f):
1706 if f in ctx.manifest():
1706 if f in ctx.manifest():
1707 a = ctx.filectx(f)
1707 a = ctx.filectx(f)
1708 if f in base.manifest():
1708 if f in base.manifest():
1709 b = base.filectx(f)
1709 b = base.filectx(f)
1710 return (not a.cmp(b)
1710 return (not a.cmp(b)
1711 and a.flags() == b.flags())
1711 and a.flags() == b.flags())
1712 else:
1712 else:
1713 return False
1713 return False
1714 else:
1714 else:
1715 return f not in base.manifest()
1715 return f not in base.manifest()
1716 files = [f for f in files if not samefile(f)]
1716 files = [f for f in files if not samefile(f)]
1717
1717
1718 def filectxfn(repo, ctx_, path):
1718 def filectxfn(repo, ctx_, path):
1719 try:
1719 try:
1720 fctx = ctx[path]
1720 fctx = ctx[path]
1721 flags = fctx.flags()
1721 flags = fctx.flags()
1722 mctx = context.memfilectx(fctx.path(), fctx.data(),
1722 mctx = context.memfilectx(fctx.path(), fctx.data(),
1723 islink='l' in flags,
1723 islink='l' in flags,
1724 isexec='x' in flags,
1724 isexec='x' in flags,
1725 copied=copied.get(path))
1725 copied=copied.get(path))
1726 return mctx
1726 return mctx
1727 except KeyError:
1727 except KeyError:
1728 raise IOError
1728 raise IOError
1729 else:
1729 else:
1730 ui.note(_('copying changeset %s to %s\n') % (old, base))
1730 ui.note(_('copying changeset %s to %s\n') % (old, base))
1731
1731
1732 # Use version of files as in the old cset
1732 # Use version of files as in the old cset
1733 def filectxfn(repo, ctx_, path):
1733 def filectxfn(repo, ctx_, path):
1734 try:
1734 try:
1735 return old.filectx(path)
1735 return old.filectx(path)
1736 except KeyError:
1736 except KeyError:
1737 raise IOError
1737 raise IOError
1738
1738
1739 user = opts.get('user') or old.user()
1739 user = opts.get('user') or old.user()
1740 date = opts.get('date') or old.date()
1740 date = opts.get('date') or old.date()
1741 editmsg = False
1741 editmsg = False
1742 if not message:
1742 if not message:
1743 editmsg = True
1743 editmsg = True
1744 message = old.description()
1744 message = old.description()
1745
1745
1746 pureextra = extra.copy()
1746 pureextra = extra.copy()
1747 extra['amend_source'] = old.hex()
1747 extra['amend_source'] = old.hex()
1748
1748
1749 new = context.memctx(repo,
1749 new = context.memctx(repo,
1750 parents=[base.node(), old.p2().node()],
1750 parents=[base.node(), old.p2().node()],
1751 text=message,
1751 text=message,
1752 files=files,
1752 files=files,
1753 filectxfn=filectxfn,
1753 filectxfn=filectxfn,
1754 user=user,
1754 user=user,
1755 date=date,
1755 date=date,
1756 extra=extra)
1756 extra=extra)
1757 if editmsg:
1757 if editmsg:
1758 new._text = commitforceeditor(repo, new, [])
1758 new._text = commitforceeditor(repo, new, [])
1759
1759
1760 newdesc = changelog.stripdesc(new.description())
1760 newdesc = changelog.stripdesc(new.description())
1761 if ((not node)
1761 if ((not node)
1762 and newdesc == old.description()
1762 and newdesc == old.description()
1763 and user == old.user()
1763 and user == old.user()
1764 and date == old.date()
1764 and date == old.date()
1765 and pureextra == old.extra()):
1765 and pureextra == old.extra()):
1766 # nothing changed. continuing here would create a new node
1766 # nothing changed. continuing here would create a new node
1767 # anyway because of the amend_source noise.
1767 # anyway because of the amend_source noise.
1768 #
1768 #
1769 # This not what we expect from amend.
1769 # This not what we expect from amend.
1770 return old.node()
1770 return old.node()
1771
1771
1772 ph = repo.ui.config('phases', 'new-commit', phases.draft)
1772 ph = repo.ui.config('phases', 'new-commit', phases.draft)
1773 try:
1773 try:
1774 repo.ui.setconfig('phases', 'new-commit', old.phase())
1774 repo.ui.setconfig('phases', 'new-commit', old.phase())
1775 newid = repo.commitctx(new)
1775 newid = repo.commitctx(new)
1776 finally:
1776 finally:
1777 repo.ui.setconfig('phases', 'new-commit', ph)
1777 repo.ui.setconfig('phases', 'new-commit', ph)
1778 if newid != old.node():
1778 if newid != old.node():
1779 # Reroute the working copy parent to the new changeset
1779 # Reroute the working copy parent to the new changeset
1780 repo.setparents(newid, nullid)
1780 repo.setparents(newid, nullid)
1781
1781
1782 # Move bookmarks from old parent to amend commit
1782 # Move bookmarks from old parent to amend commit
1783 bms = repo.nodebookmarks(old.node())
1783 bms = repo.nodebookmarks(old.node())
1784 if bms:
1784 if bms:
1785 marks = repo._bookmarks
1785 marks = repo._bookmarks
1786 for bm in bms:
1786 for bm in bms:
1787 marks[bm] = newid
1787 marks[bm] = newid
1788 marks.write()
1788 marks.write()
1789 #commit the whole amend process
1789 #commit the whole amend process
1790 if obsolete._enabled and newid != old.node():
1790 if obsolete._enabled and newid != old.node():
1791 # mark the new changeset as successor of the rewritten one
1791 # mark the new changeset as successor of the rewritten one
1792 new = repo[newid]
1792 new = repo[newid]
1793 obs = [(old, (new,))]
1793 obs = [(old, (new,))]
1794 if node:
1794 if node:
1795 obs.append((ctx, ()))
1795 obs.append((ctx, ()))
1796
1796
1797 obsolete.createmarkers(repo, obs)
1797 obsolete.createmarkers(repo, obs)
1798 tr.close()
1798 tr.close()
1799 finally:
1799 finally:
1800 tr.release()
1800 tr.release()
1801 if (not obsolete._enabled) and newid != old.node():
1801 if (not obsolete._enabled) and newid != old.node():
1802 # Strip the intermediate commit (if there was one) and the amended
1802 # Strip the intermediate commit (if there was one) and the amended
1803 # commit
1803 # commit
1804 if node:
1804 if node:
1805 ui.note(_('stripping intermediate changeset %s\n') % ctx)
1805 ui.note(_('stripping intermediate changeset %s\n') % ctx)
1806 ui.note(_('stripping amended changeset %s\n') % old)
1806 ui.note(_('stripping amended changeset %s\n') % old)
1807 repair.strip(ui, repo, old.node(), topic='amend-backup')
1807 repair.strip(ui, repo, old.node(), topic='amend-backup')
1808 finally:
1808 finally:
1809 if newid is None:
1809 if newid is None:
1810 repo.dirstate.invalidate()
1810 repo.dirstate.invalidate()
1811 lockmod.release(lock, wlock)
1811 lockmod.release(lock, wlock)
1812 return newid
1812 return newid
1813
1813
1814 def commiteditor(repo, ctx, subs):
1814 def commiteditor(repo, ctx, subs):
1815 if ctx.description():
1815 if ctx.description():
1816 return ctx.description()
1816 return ctx.description()
1817 return commitforceeditor(repo, ctx, subs)
1817 return commitforceeditor(repo, ctx, subs)
1818
1818
1819 def commitforceeditor(repo, ctx, subs):
1819 def commitforceeditor(repo, ctx, subs):
1820 edittext = []
1820 edittext = []
1821 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1821 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1822 if ctx.description():
1822 if ctx.description():
1823 edittext.append(ctx.description())
1823 edittext.append(ctx.description())
1824 edittext.append("")
1824 edittext.append("")
1825 edittext.append("") # Empty line between message and comments.
1825 edittext.append("") # Empty line between message and comments.
1826 edittext.append(_("HG: Enter commit message."
1826 edittext.append(_("HG: Enter commit message."
1827 " Lines beginning with 'HG:' are removed."))
1827 " Lines beginning with 'HG:' are removed."))
1828 edittext.append(_("HG: Leave message empty to abort commit."))
1828 edittext.append(_("HG: Leave message empty to abort commit."))
1829 edittext.append("HG: --")
1829 edittext.append("HG: --")
1830 edittext.append(_("HG: user: %s") % ctx.user())
1830 edittext.append(_("HG: user: %s") % ctx.user())
1831 if ctx.p2():
1831 if ctx.p2():
1832 edittext.append(_("HG: branch merge"))
1832 edittext.append(_("HG: branch merge"))
1833 if ctx.branch():
1833 if ctx.branch():
1834 edittext.append(_("HG: branch '%s'") % ctx.branch())
1834 edittext.append(_("HG: branch '%s'") % ctx.branch())
1835 if bookmarks.iscurrent(repo):
1835 if bookmarks.iscurrent(repo):
1836 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
1836 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
1837 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1837 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1838 edittext.extend([_("HG: added %s") % f for f in added])
1838 edittext.extend([_("HG: added %s") % f for f in added])
1839 edittext.extend([_("HG: changed %s") % f for f in modified])
1839 edittext.extend([_("HG: changed %s") % f for f in modified])
1840 edittext.extend([_("HG: removed %s") % f for f in removed])
1840 edittext.extend([_("HG: removed %s") % f for f in removed])
1841 if not added and not modified and not removed:
1841 if not added and not modified and not removed:
1842 edittext.append(_("HG: no files changed"))
1842 edittext.append(_("HG: no files changed"))
1843 edittext.append("")
1843 edittext.append("")
1844 # run editor in the repository root
1844 # run editor in the repository root
1845 olddir = os.getcwd()
1845 olddir = os.getcwd()
1846 os.chdir(repo.root)
1846 os.chdir(repo.root)
1847 text = repo.ui.edit("\n".join(edittext), ctx.user())
1847 text = repo.ui.edit("\n".join(edittext), ctx.user())
1848 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1848 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1849 os.chdir(olddir)
1849 os.chdir(olddir)
1850
1850
1851 if not text.strip():
1851 if not text.strip():
1852 raise util.Abort(_("empty commit message"))
1852 raise util.Abort(_("empty commit message"))
1853
1853
1854 return text
1854 return text
1855
1855
1856 def commitstatus(repo, node, branch, bheads=None, opts={}):
1856 def commitstatus(repo, node, branch, bheads=None, opts={}):
1857 ctx = repo[node]
1857 ctx = repo[node]
1858 parents = ctx.parents()
1858 parents = ctx.parents()
1859
1859
1860 if (not opts.get('amend') and bheads and node not in bheads and not
1860 if (not opts.get('amend') and bheads and node not in bheads and not
1861 [x for x in parents if x.node() in bheads and x.branch() == branch]):
1861 [x for x in parents if x.node() in bheads and x.branch() == branch]):
1862 repo.ui.status(_('created new head\n'))
1862 repo.ui.status(_('created new head\n'))
1863 # The message is not printed for initial roots. For the other
1863 # The message is not printed for initial roots. For the other
1864 # changesets, it is printed in the following situations:
1864 # changesets, it is printed in the following situations:
1865 #
1865 #
1866 # Par column: for the 2 parents with ...
1866 # Par column: for the 2 parents with ...
1867 # N: null or no parent
1867 # N: null or no parent
1868 # B: parent is on another named branch
1868 # B: parent is on another named branch
1869 # C: parent is a regular non head changeset
1869 # C: parent is a regular non head changeset
1870 # H: parent was a branch head of the current branch
1870 # H: parent was a branch head of the current branch
1871 # Msg column: whether we print "created new head" message
1871 # Msg column: whether we print "created new head" message
1872 # In the following, it is assumed that there already exists some
1872 # In the following, it is assumed that there already exists some
1873 # initial branch heads of the current branch, otherwise nothing is
1873 # initial branch heads of the current branch, otherwise nothing is
1874 # printed anyway.
1874 # printed anyway.
1875 #
1875 #
1876 # Par Msg Comment
1876 # Par Msg Comment
1877 # N N y additional topo root
1877 # N N y additional topo root
1878 #
1878 #
1879 # B N y additional branch root
1879 # B N y additional branch root
1880 # C N y additional topo head
1880 # C N y additional topo head
1881 # H N n usual case
1881 # H N n usual case
1882 #
1882 #
1883 # B B y weird additional branch root
1883 # B B y weird additional branch root
1884 # C B y branch merge
1884 # C B y branch merge
1885 # H B n merge with named branch
1885 # H B n merge with named branch
1886 #
1886 #
1887 # C C y additional head from merge
1887 # C C y additional head from merge
1888 # C H n merge with a head
1888 # C H n merge with a head
1889 #
1889 #
1890 # H H n head merge: head count decreases
1890 # H H n head merge: head count decreases
1891
1891
1892 if not opts.get('close_branch'):
1892 if not opts.get('close_branch'):
1893 for r in parents:
1893 for r in parents:
1894 if r.closesbranch() and r.branch() == branch:
1894 if r.closesbranch() and r.branch() == branch:
1895 repo.ui.status(_('reopening closed branch head %d\n') % r)
1895 repo.ui.status(_('reopening closed branch head %d\n') % r)
1896
1896
1897 if repo.ui.debugflag:
1897 if repo.ui.debugflag:
1898 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
1898 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
1899 elif repo.ui.verbose:
1899 elif repo.ui.verbose:
1900 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
1900 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
1901
1901
1902 def revert(ui, repo, ctx, parents, *pats, **opts):
1902 def revert(ui, repo, ctx, parents, *pats, **opts):
1903 parent, p2 = parents
1903 parent, p2 = parents
1904 node = ctx.node()
1904 node = ctx.node()
1905
1905
1906 mf = ctx.manifest()
1906 mf = ctx.manifest()
1907 if node == parent:
1907 if node == parent:
1908 pmf = mf
1908 pmf = mf
1909 else:
1909 else:
1910 pmf = None
1910 pmf = None
1911
1911
1912 # need all matching names in dirstate and manifest of target rev,
1912 # need all matching names in dirstate and manifest of target rev,
1913 # so have to walk both. do not print errors if files exist in one
1913 # so have to walk both. do not print errors if files exist in one
1914 # but not other.
1914 # but not other.
1915
1915
1916 names = {}
1916 names = {}
1917
1917
1918 wlock = repo.wlock()
1918 wlock = repo.wlock()
1919 try:
1919 try:
1920 # walk dirstate.
1920 # walk dirstate.
1921
1921
1922 m = scmutil.match(repo[None], pats, opts)
1922 m = scmutil.match(repo[None], pats, opts)
1923 m.bad = lambda x, y: False
1923 m.bad = lambda x, y: False
1924 for abs in repo.walk(m):
1924 for abs in repo.walk(m):
1925 names[abs] = m.rel(abs), m.exact(abs)
1925 names[abs] = m.rel(abs), m.exact(abs)
1926
1926
1927 # walk target manifest.
1927 # walk target manifest.
1928
1928
1929 def badfn(path, msg):
1929 def badfn(path, msg):
1930 if path in names:
1930 if path in names:
1931 return
1931 return
1932 if path in ctx.substate:
1932 if path in ctx.substate:
1933 return
1933 return
1934 path_ = path + '/'
1934 path_ = path + '/'
1935 for f in names:
1935 for f in names:
1936 if f.startswith(path_):
1936 if f.startswith(path_):
1937 return
1937 return
1938 ui.warn("%s: %s\n" % (m.rel(path), msg))
1938 ui.warn("%s: %s\n" % (m.rel(path), msg))
1939
1939
1940 m = scmutil.match(ctx, pats, opts)
1940 m = scmutil.match(ctx, pats, opts)
1941 m.bad = badfn
1941 m.bad = badfn
1942 for abs in ctx.walk(m):
1942 for abs in ctx.walk(m):
1943 if abs not in names:
1943 if abs not in names:
1944 names[abs] = m.rel(abs), m.exact(abs)
1944 names[abs] = m.rel(abs), m.exact(abs)
1945
1945
1946 # get the list of subrepos that must be reverted
1946 # get the list of subrepos that must be reverted
1947 targetsubs = sorted(s for s in ctx.substate if m(s))
1947 targetsubs = sorted(s for s in ctx.substate if m(s))
1948 m = scmutil.matchfiles(repo, names)
1948 m = scmutil.matchfiles(repo, names)
1949 changes = repo.status(match=m)[:4]
1949 changes = repo.status(match=m)[:4]
1950 modified, added, removed, deleted = map(set, changes)
1950 modified, added, removed, deleted = map(set, changes)
1951
1951
1952 # if f is a rename, also revert the source
1952 # if f is a rename, also revert the source
1953 cwd = repo.getcwd()
1953 cwd = repo.getcwd()
1954 for f in added:
1954 for f in added:
1955 src = repo.dirstate.copied(f)
1955 src = repo.dirstate.copied(f)
1956 if src and src not in names and repo.dirstate[src] == 'r':
1956 if src and src not in names and repo.dirstate[src] == 'r':
1957 removed.add(src)
1957 removed.add(src)
1958 names[src] = (repo.pathto(src, cwd), True)
1958 names[src] = (repo.pathto(src, cwd), True)
1959
1959
1960 def removeforget(abs):
1960 def removeforget(abs):
1961 if repo.dirstate[abs] == 'a':
1961 if repo.dirstate[abs] == 'a':
1962 return _('forgetting %s\n')
1962 return _('forgetting %s\n')
1963 return _('removing %s\n')
1963 return _('removing %s\n')
1964
1964
1965 revert = ([], _('reverting %s\n'))
1965 revert = ([], _('reverting %s\n'))
1966 add = ([], _('adding %s\n'))
1966 add = ([], _('adding %s\n'))
1967 remove = ([], removeforget)
1967 remove = ([], removeforget)
1968 undelete = ([], _('undeleting %s\n'))
1968 undelete = ([], _('undeleting %s\n'))
1969
1969
1970 disptable = (
1970 disptable = (
1971 # dispatch table:
1971 # dispatch table:
1972 # file state
1972 # file state
1973 # action if in target manifest
1973 # action if in target manifest
1974 # action if not in target manifest
1974 # action if not in target manifest
1975 # make backup if in target manifest
1975 # make backup if in target manifest
1976 # make backup if not in target manifest
1976 # make backup if not in target manifest
1977 (modified, revert, remove, True, True),
1977 (modified, revert, remove, True, True),
1978 (added, revert, remove, True, False),
1978 (added, revert, remove, True, False),
1979 (removed, undelete, None, False, False),
1979 (removed, undelete, None, False, False),
1980 (deleted, revert, remove, False, False),
1980 (deleted, revert, remove, False, False),
1981 )
1981 )
1982
1982
1983 for abs, (rel, exact) in sorted(names.items()):
1983 for abs, (rel, exact) in sorted(names.items()):
1984 mfentry = mf.get(abs)
1984 mfentry = mf.get(abs)
1985 target = repo.wjoin(abs)
1985 target = repo.wjoin(abs)
1986 def handle(xlist, dobackup):
1986 def handle(xlist, dobackup):
1987 xlist[0].append(abs)
1987 xlist[0].append(abs)
1988 if (dobackup and not opts.get('no_backup') and
1988 if (dobackup and not opts.get('no_backup') and
1989 os.path.lexists(target)):
1989 os.path.lexists(target)):
1990 bakname = "%s.orig" % rel
1990 bakname = "%s.orig" % rel
1991 ui.note(_('saving current version of %s as %s\n') %
1991 ui.note(_('saving current version of %s as %s\n') %
1992 (rel, bakname))
1992 (rel, bakname))
1993 if not opts.get('dry_run'):
1993 if not opts.get('dry_run'):
1994 util.rename(target, bakname)
1994 util.rename(target, bakname)
1995 if ui.verbose or not exact:
1995 if ui.verbose or not exact:
1996 msg = xlist[1]
1996 msg = xlist[1]
1997 if not isinstance(msg, basestring):
1997 if not isinstance(msg, basestring):
1998 msg = msg(abs)
1998 msg = msg(abs)
1999 ui.status(msg % rel)
1999 ui.status(msg % rel)
2000 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2000 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2001 if abs not in table:
2001 if abs not in table:
2002 continue
2002 continue
2003 # file has changed in dirstate
2003 # file has changed in dirstate
2004 if mfentry:
2004 if mfentry:
2005 handle(hitlist, backuphit)
2005 handle(hitlist, backuphit)
2006 elif misslist is not None:
2006 elif misslist is not None:
2007 handle(misslist, backupmiss)
2007 handle(misslist, backupmiss)
2008 break
2008 break
2009 else:
2009 else:
2010 if abs not in repo.dirstate:
2010 if abs not in repo.dirstate:
2011 if mfentry:
2011 if mfentry:
2012 handle(add, True)
2012 handle(add, True)
2013 elif exact:
2013 elif exact:
2014 ui.warn(_('file not managed: %s\n') % rel)
2014 ui.warn(_('file not managed: %s\n') % rel)
2015 continue
2015 continue
2016 # file has not changed in dirstate
2016 # file has not changed in dirstate
2017 if node == parent:
2017 if node == parent:
2018 if exact:
2018 if exact:
2019 ui.warn(_('no changes needed to %s\n') % rel)
2019 ui.warn(_('no changes needed to %s\n') % rel)
2020 continue
2020 continue
2021 if pmf is None:
2021 if pmf is None:
2022 # only need parent manifest in this unlikely case,
2022 # only need parent manifest in this unlikely case,
2023 # so do not read by default
2023 # so do not read by default
2024 pmf = repo[parent].manifest()
2024 pmf = repo[parent].manifest()
2025 if abs in pmf and mfentry:
2025 if abs in pmf and mfentry:
2026 # if version of file is same in parent and target
2026 # if version of file is same in parent and target
2027 # manifests, do nothing
2027 # manifests, do nothing
2028 if (pmf[abs] != mfentry or
2028 if (pmf[abs] != mfentry or
2029 pmf.flags(abs) != mf.flags(abs)):
2029 pmf.flags(abs) != mf.flags(abs)):
2030 handle(revert, False)
2030 handle(revert, False)
2031 else:
2031 else:
2032 handle(remove, False)
2032 handle(remove, False)
2033
2033
2034 if not opts.get('dry_run'):
2034 if not opts.get('dry_run'):
2035 def checkout(f):
2035 def checkout(f):
2036 fc = ctx[f]
2036 fc = ctx[f]
2037 repo.wwrite(f, fc.data(), fc.flags())
2037 repo.wwrite(f, fc.data(), fc.flags())
2038
2038
2039 audit_path = scmutil.pathauditor(repo.root)
2039 audit_path = scmutil.pathauditor(repo.root)
2040 for f in remove[0]:
2040 for f in remove[0]:
2041 if repo.dirstate[f] == 'a':
2041 if repo.dirstate[f] == 'a':
2042 repo.dirstate.drop(f)
2042 repo.dirstate.drop(f)
2043 continue
2043 continue
2044 audit_path(f)
2044 audit_path(f)
2045 try:
2045 try:
2046 util.unlinkpath(repo.wjoin(f))
2046 util.unlinkpath(repo.wjoin(f))
2047 except OSError:
2047 except OSError:
2048 pass
2048 pass
2049 repo.dirstate.remove(f)
2049 repo.dirstate.remove(f)
2050
2050
2051 normal = None
2051 normal = None
2052 if node == parent:
2052 if node == parent:
2053 # We're reverting to our parent. If possible, we'd like status
2053 # We're reverting to our parent. If possible, we'd like status
2054 # to report the file as clean. We have to use normallookup for
2054 # to report the file as clean. We have to use normallookup for
2055 # merges to avoid losing information about merged/dirty files.
2055 # merges to avoid losing information about merged/dirty files.
2056 if p2 != nullid:
2056 if p2 != nullid:
2057 normal = repo.dirstate.normallookup
2057 normal = repo.dirstate.normallookup
2058 else:
2058 else:
2059 normal = repo.dirstate.normal
2059 normal = repo.dirstate.normal
2060 for f in revert[0]:
2060 for f in revert[0]:
2061 checkout(f)
2061 checkout(f)
2062 if normal:
2062 if normal:
2063 normal(f)
2063 normal(f)
2064
2064
2065 for f in add[0]:
2065 for f in add[0]:
2066 checkout(f)
2066 checkout(f)
2067 repo.dirstate.add(f)
2067 repo.dirstate.add(f)
2068
2068
2069 normal = repo.dirstate.normallookup
2069 normal = repo.dirstate.normallookup
2070 if node == parent and p2 == nullid:
2070 if node == parent and p2 == nullid:
2071 normal = repo.dirstate.normal
2071 normal = repo.dirstate.normal
2072 for f in undelete[0]:
2072 for f in undelete[0]:
2073 checkout(f)
2073 checkout(f)
2074 normal(f)
2074 normal(f)
2075
2075
2076 copied = copies.pathcopies(repo[parent], ctx)
2076 copied = copies.pathcopies(repo[parent], ctx)
2077
2077
2078 for f in add[0] + undelete[0] + revert[0]:
2078 for f in add[0] + undelete[0] + revert[0]:
2079 if f in copied:
2079 if f in copied:
2080 repo.dirstate.copy(copied[f], f)
2080 repo.dirstate.copy(copied[f], f)
2081
2081
2082 if targetsubs:
2082 if targetsubs:
2083 # Revert the subrepos on the revert list
2083 # Revert the subrepos on the revert list
2084 for sub in targetsubs:
2084 for sub in targetsubs:
2085 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
2085 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
2086 finally:
2086 finally:
2087 wlock.release()
2087 wlock.release()
2088
2088
2089 def command(table):
2089 def command(table):
2090 '''returns a function object bound to table which can be used as
2090 '''returns a function object bound to table which can be used as
2091 a decorator for populating table as a command table'''
2091 a decorator for populating table as a command table'''
2092
2092
2093 def cmd(name, options=(), synopsis=None):
2093 def cmd(name, options=(), synopsis=None):
2094 def decorator(func):
2094 def decorator(func):
2095 if synopsis:
2095 if synopsis:
2096 table[name] = func, list(options), synopsis
2096 table[name] = func, list(options), synopsis
2097 else:
2097 else:
2098 table[name] = func, list(options)
2098 table[name] = func, list(options)
2099 return func
2099 return func
2100 return decorator
2100 return decorator
2101
2101
2102 return cmd
2102 return cmd
2103
2103
2104 # a list of (ui, repo) functions called by commands.summary
2104 # a list of (ui, repo) functions called by commands.summary
2105 summaryhooks = util.hooks()
2105 summaryhooks = util.hooks()
General Comments 0
You need to be logged in to leave comments. Login now